summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/ABIArgGenerator.h109
-rw-r--r--js/src/jit/ABIFunctionList-inl.h252
-rw-r--r--js/src/jit/ABIFunctions.h73
-rw-r--r--js/src/jit/AliasAnalysis.cpp317
-rw-r--r--js/src/jit/AliasAnalysis.h64
-rw-r--r--js/src/jit/AlignmentMaskAnalysis.cpp97
-rw-r--r--js/src/jit/AlignmentMaskAnalysis.h27
-rw-r--r--js/src/jit/Assembler.h34
-rw-r--r--js/src/jit/AtomicOp.h98
-rw-r--r--js/src/jit/AtomicOperations.h352
-rw-r--r--js/src/jit/AutoWritableJitCode.h88
-rw-r--r--js/src/jit/BacktrackingAllocator.cpp4676
-rw-r--r--js/src/jit/BacktrackingAllocator.h844
-rw-r--r--js/src/jit/Bailouts.cpp352
-rw-r--r--js/src/jit/Bailouts.h227
-rw-r--r--js/src/jit/BaselineBailouts.cpp2092
-rw-r--r--js/src/jit/BaselineCacheIRCompiler.cpp4083
-rw-r--r--js/src/jit/BaselineCacheIRCompiler.h150
-rw-r--r--js/src/jit/BaselineCodeGen.cpp6897
-rw-r--r--js/src/jit/BaselineCodeGen.h521
-rw-r--r--js/src/jit/BaselineDebugModeOSR.cpp561
-rw-r--r--js/src/jit/BaselineDebugModeOSR.h30
-rw-r--r--js/src/jit/BaselineFrame-inl.h131
-rw-r--r--js/src/jit/BaselineFrame.cpp180
-rw-r--r--js/src/jit/BaselineFrame.h373
-rw-r--r--js/src/jit/BaselineFrameInfo-inl.h50
-rw-r--r--js/src/jit/BaselineFrameInfo.cpp239
-rw-r--r--js/src/jit/BaselineFrameInfo.h435
-rw-r--r--js/src/jit/BaselineIC.cpp2497
-rw-r--r--js/src/jit/BaselineIC.h439
-rw-r--r--js/src/jit/BaselineICList.h49
-rw-r--r--js/src/jit/BaselineJIT.cpp1008
-rw-r--r--js/src/jit/BaselineJIT.h593
-rw-r--r--js/src/jit/BitSet.cpp111
-rw-r--r--js/src/jit/BitSet.h167
-rw-r--r--js/src/jit/BytecodeAnalysis.cpp257
-rw-r--r--js/src/jit/BytecodeAnalysis.h83
-rw-r--r--js/src/jit/CacheIR.cpp13193
-rw-r--r--js/src/jit/CacheIR.h528
-rw-r--r--js/src/jit/CacheIRCloner.h82
-rw-r--r--js/src/jit/CacheIRCompiler.cpp9638
-rw-r--r--js/src/jit/CacheIRCompiler.h1314
-rw-r--r--js/src/jit/CacheIRGenerator.h912
-rw-r--r--js/src/jit/CacheIRHealth.cpp416
-rw-r--r--js/src/jit/CacheIRHealth.h109
-rw-r--r--js/src/jit/CacheIROps.yaml3086
-rw-r--r--js/src/jit/CacheIRReader.h155
-rw-r--r--js/src/jit/CacheIRSpewer.cpp441
-rw-r--r--js/src/jit/CacheIRSpewer.h116
-rw-r--r--js/src/jit/CacheIRWriter.h642
-rw-r--r--js/src/jit/CalleeToken.h66
-rw-r--r--js/src/jit/CodeGenerator.cpp18631
-rw-r--r--js/src/jit/CodeGenerator.h447
-rw-r--r--js/src/jit/CompactBuffer.h254
-rw-r--r--js/src/jit/CompileInfo.h382
-rw-r--r--js/src/jit/CompileWrappers.cpp219
-rw-r--r--js/src/jit/CompileWrappers.h176
-rw-r--r--js/src/jit/Disassemble.cpp109
-rw-r--r--js/src/jit/Disassemble.h24
-rw-r--r--js/src/jit/EdgeCaseAnalysis.cpp50
-rw-r--r--js/src/jit/EdgeCaseAnalysis.h28
-rw-r--r--js/src/jit/EffectiveAddressAnalysis.cpp256
-rw-r--r--js/src/jit/EffectiveAddressAnalysis.h33
-rw-r--r--js/src/jit/ExecutableAllocator.cpp329
-rw-r--r--js/src/jit/ExecutableAllocator.h205
-rw-r--r--js/src/jit/FixedList.h99
-rw-r--r--js/src/jit/FlushICache.cpp132
-rw-r--r--js/src/jit/FlushICache.h92
-rw-r--r--js/src/jit/FoldLinearArithConstants.cpp112
-rw-r--r--js/src/jit/FoldLinearArithConstants.h21
-rw-r--r--js/src/jit/GenerateAtomicOperations.py873
-rw-r--r--js/src/jit/GenerateCacheIRFiles.py539
-rw-r--r--js/src/jit/GenerateLIRFiles.py298
-rw-r--r--js/src/jit/GenerateMIRFiles.py404
-rw-r--r--js/src/jit/ICState.h216
-rw-r--r--js/src/jit/ICStubSpace.h66
-rw-r--r--js/src/jit/InlinableNatives.cpp300
-rw-r--r--js/src/jit/InlinableNatives.h240
-rw-r--r--js/src/jit/InlineList.h590
-rw-r--r--js/src/jit/InlineScriptTree-inl.h67
-rw-r--r--js/src/jit/InlineScriptTree.h112
-rw-r--r--js/src/jit/InstructionReordering.cpp248
-rw-r--r--js/src/jit/InstructionReordering.h20
-rw-r--r--js/src/jit/InterpreterEntryTrampoline.cpp269
-rw-r--r--js/src/jit/InterpreterEntryTrampoline.h79
-rw-r--r--js/src/jit/Invalidation.h59
-rw-r--r--js/src/jit/Ion.cpp2631
-rw-r--r--js/src/jit/Ion.h154
-rw-r--r--js/src/jit/IonAnalysis.cpp4934
-rw-r--r--js/src/jit/IonAnalysis.h193
-rw-r--r--js/src/jit/IonCacheIRCompiler.cpp2140
-rw-r--r--js/src/jit/IonCacheIRCompiler.h93
-rw-r--r--js/src/jit/IonCompileTask.cpp203
-rw-r--r--js/src/jit/IonCompileTask.h89
-rw-r--r--js/src/jit/IonIC.cpp727
-rw-r--r--js/src/jit/IonIC.h664
-rw-r--r--js/src/jit/IonOptimizationLevels.cpp141
-rw-r--r--js/src/jit/IonOptimizationLevels.h203
-rw-r--r--js/src/jit/IonScript.h590
-rw-r--r--js/src/jit/IonTypes.h1108
-rw-r--r--js/src/jit/JSJitFrameIter-inl.h65
-rw-r--r--js/src/jit/JSJitFrameIter.cpp798
-rw-r--r--js/src/jit/JSJitFrameIter.h802
-rw-r--r--js/src/jit/JSONSpewer.cpp287
-rw-r--r--js/src/jit/JSONSpewer.h48
-rw-r--r--js/src/jit/Jit.cpp214
-rw-r--r--js/src/jit/Jit.h41
-rw-r--r--js/src/jit/JitAllocPolicy.h179
-rw-r--r--js/src/jit/JitCode.h171
-rw-r--r--js/src/jit/JitCommon.h53
-rw-r--r--js/src/jit/JitContext.cpp161
-rw-r--r--js/src/jit/JitContext.h168
-rw-r--r--js/src/jit/JitFrames-inl.h32
-rw-r--r--js/src/jit/JitFrames.cpp2570
-rw-r--r--js/src/jit/JitFrames.h748
-rw-r--r--js/src/jit/JitHints-inl.h60
-rw-r--r--js/src/jit/JitHints.h56
-rw-r--r--js/src/jit/JitOptions.cpp414
-rw-r--r--js/src/jit/JitOptions.h184
-rw-r--r--js/src/jit/JitRealm.h189
-rw-r--r--js/src/jit/JitRuntime.h451
-rw-r--r--js/src/jit/JitScript-inl.h43
-rw-r--r--js/src/jit/JitScript.cpp732
-rw-r--r--js/src/jit/JitScript.h543
-rw-r--r--js/src/jit/JitSpewer.cpp660
-rw-r--r--js/src/jit/JitSpewer.h286
-rw-r--r--js/src/jit/JitZone.h208
-rw-r--r--js/src/jit/JitcodeMap.cpp1145
-rw-r--r--js/src/jit/JitcodeMap.h808
-rw-r--r--js/src/jit/Jitdump.h78
-rw-r--r--js/src/jit/KnownClass.cpp109
-rw-r--r--js/src/jit/KnownClass.h36
-rw-r--r--js/src/jit/LICM.cpp367
-rw-r--r--js/src/jit/LICM.h23
-rw-r--r--js/src/jit/LIR.cpp780
-rw-r--r--js/src/jit/LIR.h2000
-rw-r--r--js/src/jit/LIROps.yaml3972
-rw-r--r--js/src/jit/Label.cpp29
-rw-r--r--js/src/jit/Label.h106
-rw-r--r--js/src/jit/Linker.cpp79
-rw-r--r--js/src/jit/Linker.h52
-rw-r--r--js/src/jit/Lowering.cpp7172
-rw-r--r--js/src/jit/Lowering.h91
-rw-r--r--js/src/jit/MIR.cpp7261
-rw-r--r--js/src/jit/MIR.h11613
-rw-r--r--js/src/jit/MIRGenerator.h183
-rw-r--r--js/src/jit/MIRGraph.cpp1414
-rw-r--r--js/src/jit/MIRGraph.h901
-rw-r--r--js/src/jit/MIROps.yaml3064
-rw-r--r--js/src/jit/MachineState.h110
-rw-r--r--js/src/jit/MacroAssembler-inl.h1090
-rw-r--r--js/src/jit/MacroAssembler.cpp6671
-rw-r--r--js/src/jit/MacroAssembler.h5611
-rw-r--r--js/src/jit/MoveEmitter.h32
-rw-r--r--js/src/jit/MoveResolver.cpp443
-rw-r--r--js/src/jit/MoveResolver.h309
-rw-r--r--js/src/jit/PcScriptCache.h88
-rw-r--r--js/src/jit/PerfSpewer.cpp1218
-rw-r--r--js/src/jit/PerfSpewer.h207
-rw-r--r--js/src/jit/ProcessExecutableMemory.cpp935
-rw-r--r--js/src/jit/ProcessExecutableMemory.h109
-rw-r--r--js/src/jit/RangeAnalysis.cpp3679
-rw-r--r--js/src/jit/RangeAnalysis.h683
-rw-r--r--js/src/jit/ReciprocalMulConstants.cpp94
-rw-r--r--js/src/jit/ReciprocalMulConstants.h33
-rw-r--r--js/src/jit/Recover.cpp2116
-rw-r--r--js/src/jit/Recover.h964
-rw-r--r--js/src/jit/RegExpStubConstants.h36
-rw-r--r--js/src/jit/RegisterAllocator.cpp669
-rw-r--r--js/src/jit/RegisterAllocator.h314
-rw-r--r--js/src/jit/RegisterSets.h1332
-rw-r--r--js/src/jit/Registers.h299
-rw-r--r--js/src/jit/RematerializedFrame-inl.h23
-rw-r--r--js/src/jit/RematerializedFrame.cpp221
-rw-r--r--js/src/jit/RematerializedFrame.h222
-rw-r--r--js/src/jit/SafepointIndex-inl.h22
-rw-r--r--js/src/jit/SafepointIndex.cpp20
-rw-r--r--js/src/jit/SafepointIndex.h76
-rw-r--r--js/src/jit/Safepoints.cpp559
-rw-r--r--js/src/jit/Safepoints.h129
-rw-r--r--js/src/jit/ScalarReplacement.cpp3086
-rw-r--r--js/src/jit/ScalarReplacement.h22
-rw-r--r--js/src/jit/ScalarTypeUtils.h41
-rw-r--r--js/src/jit/ScriptFromCalleeToken.h33
-rw-r--r--js/src/jit/SharedICHelpers-inl.h36
-rw-r--r--js/src/jit/SharedICHelpers.h36
-rw-r--r--js/src/jit/SharedICRegisters.h38
-rw-r--r--js/src/jit/ShuffleAnalysis.cpp747
-rw-r--r--js/src/jit/ShuffleAnalysis.h147
-rw-r--r--js/src/jit/Simulator.h32
-rw-r--r--js/src/jit/Sink.cpp255
-rw-r--r--js/src/jit/Sink.h22
-rw-r--r--js/src/jit/Snapshots.cpp605
-rw-r--r--js/src/jit/Snapshots.h529
-rw-r--r--js/src/jit/StackSlotAllocator.h133
-rw-r--r--js/src/jit/TemplateObject-inl.h126
-rw-r--r--js/src/jit/TemplateObject.h77
-rw-r--r--js/src/jit/Trampoline.cpp260
-rw-r--r--js/src/jit/TrialInlining.cpp928
-rw-r--r--js/src/jit/TrialInlining.h194
-rw-r--r--js/src/jit/TypeData.h54
-rw-r--r--js/src/jit/TypePolicy.cpp1152
-rw-r--r--js/src/jit/TypePolicy.h557
-rw-r--r--js/src/jit/VMFunctionList-inl.h379
-rw-r--r--js/src/jit/VMFunctions.cpp2940
-rw-r--r--js/src/jit/VMFunctions.h713
-rw-r--r--js/src/jit/ValueNumbering.cpp1338
-rw-r--r--js/src/jit/ValueNumbering.h123
-rw-r--r--js/src/jit/WarpBuilder.cpp3576
-rw-r--r--js/src/jit/WarpBuilder.h326
-rw-r--r--js/src/jit/WarpBuilderShared.cpp99
-rw-r--r--js/src/jit/WarpBuilderShared.h425
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp5809
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.h33
-rw-r--r--js/src/jit/WarpOracle.cpp1226
-rw-r--r--js/src/jit/WarpOracle.h68
-rw-r--r--js/src/jit/WarpSnapshot.cpp408
-rw-r--r--js/src/jit/WarpSnapshot.h627
-rw-r--r--js/src/jit/WasmBCE.cpp139
-rw-r--r--js/src/jit/WasmBCE.h33
-rw-r--r--js/src/jit/XrayJitInfo.cpp17
-rw-r--r--js/src/jit/arm/Architecture-arm.cpp540
-rw-r--r--js/src/jit/arm/Architecture-arm.h733
-rw-r--r--js/src/jit/arm/Assembler-arm.cpp2832
-rw-r--r--js/src/jit/arm/Assembler-arm.h2296
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.cpp3154
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.h172
-rw-r--r--js/src/jit/arm/DoubleEntryTable.tbl257
-rw-r--r--js/src/jit/arm/LIR-arm.h511
-rw-r--r--js/src/jit/arm/Lowering-arm.cpp1223
-rw-r--r--js/src/jit/arm/Lowering-arm.h118
-rw-r--r--js/src/jit/arm/MacroAssembler-arm-inl.h2582
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.cpp6382
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.h1392
-rw-r--r--js/src/jit/arm/MoveEmitter-arm.cpp413
-rw-r--r--js/src/jit/arm/MoveEmitter-arm.h70
-rw-r--r--js/src/jit/arm/SharedICHelpers-arm-inl.h79
-rw-r--r--js/src/jit/arm/SharedICHelpers-arm.h80
-rw-r--r--js/src/jit/arm/SharedICRegisters-arm.h52
-rw-r--r--js/src/jit/arm/Simulator-arm.cpp5472
-rw-r--r--js/src/jit/arm/Simulator-arm.h632
-rw-r--r--js/src/jit/arm/Trampoline-arm.cpp831
-rw-r--r--js/src/jit/arm/disasm/Constants-arm.cpp117
-rw-r--r--js/src/jit/arm/disasm/Constants-arm.h684
-rw-r--r--js/src/jit/arm/disasm/Disasm-arm.cpp2031
-rw-r--r--js/src/jit/arm/disasm/Disasm-arm.h141
-rw-r--r--js/src/jit/arm/gen-double-encoder-table.py35
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S27
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S28
-rw-r--r--js/src/jit/arm/llvm-compiler-rt/assembly.h67
-rw-r--r--js/src/jit/arm64/Architecture-arm64.cpp129
-rw-r--r--js/src/jit/arm64/Architecture-arm64.h773
-rw-r--r--js/src/jit/arm64/Assembler-arm64.cpp609
-rw-r--r--js/src/jit/arm64/Assembler-arm64.h793
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.cpp4245
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.h135
-rw-r--r--js/src/jit/arm64/LIR-arm64.h373
-rw-r--r--js/src/jit/arm64/Lowering-arm64.cpp1438
-rw-r--r--js/src/jit/arm64/Lowering-arm64.h135
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64-inl.h4079
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.cpp3416
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.h2206
-rw-r--r--js/src/jit/arm64/MoveEmitter-arm64.cpp329
-rw-r--r--js/src/jit/arm64/MoveEmitter-arm64.h99
-rw-r--r--js/src/jit/arm64/SharedICHelpers-arm64-inl.h79
-rw-r--r--js/src/jit/arm64/SharedICHelpers-arm64.h82
-rw-r--r--js/src/jit/arm64/SharedICRegisters-arm64.h51
-rw-r--r--js/src/jit/arm64/Trampoline-arm64.cpp840
-rw-r--r--js/src/jit/arm64/vixl/.clang-format4
-rw-r--r--js/src/jit/arm64/vixl/AUTHORS8
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.cpp5318
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.h4974
-rw-r--r--js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h179
-rw-r--r--js/src/jit/arm64/vixl/Constants-vixl.h2694
-rw-r--r--js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp231
-rw-r--r--js/src/jit/arm64/vixl/Cpu-Features-vixl.h397
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.cpp256
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.h241
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.cpp1535
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.h117
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.cpp899
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.h276
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.cpp3741
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.h181
-rw-r--r--js/src/jit/arm64/vixl/Globals-vixl.h272
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.cpp627
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.h817
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.cpp850
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.h109
-rw-r--r--js/src/jit/arm64/vixl/Logic-vixl.cpp4738
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp2027
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.h2622
-rw-r--r--js/src/jit/arm64/vixl/MozAssembler-vixl.cpp610
-rw-r--r--js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h356
-rw-r--r--js/src/jit/arm64/vixl/MozCachingDecoder.h179
-rw-r--r--js/src/jit/arm64/vixl/MozCpu-vixl.cpp226
-rw-r--r--js/src/jit/arm64/vixl/MozInstructions-vixl.cpp211
-rw-r--r--js/src/jit/arm64/vixl/MozSimulator-vixl.cpp1258
-rw-r--r--js/src/jit/arm64/vixl/Platform-vixl.h39
-rw-r--r--js/src/jit/arm64/vixl/README.md7
-rw-r--r--js/src/jit/arm64/vixl/Simulator-Constants-vixl.h140
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.cpp4371
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.h2592
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.cpp555
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.h1283
-rw-r--r--js/src/jit/loong64/Architecture-loong64.cpp87
-rw-r--r--js/src/jit/loong64/Architecture-loong64.h522
-rw-r--r--js/src/jit/loong64/Assembler-loong64.cpp2478
-rw-r--r--js/src/jit/loong64/Assembler-loong64.h1884
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.cpp2790
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.h209
-rw-r--r--js/src/jit/loong64/LIR-loong64.h399
-rw-r--r--js/src/jit/loong64/Lowering-loong64.cpp1088
-rw-r--r--js/src/jit/loong64/Lowering-loong64.h110
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64-inl.h2131
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.cpp5389
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.h1037
-rw-r--r--js/src/jit/loong64/MoveEmitter-loong64.cpp326
-rw-r--r--js/src/jit/loong64/MoveEmitter-loong64.h76
-rw-r--r--js/src/jit/loong64/SharedICHelpers-loong64-inl.h83
-rw-r--r--js/src/jit/loong64/SharedICHelpers-loong64.h91
-rw-r--r--js/src/jit/loong64/SharedICRegisters-loong64.h42
-rw-r--r--js/src/jit/loong64/Simulator-loong64.cpp5238
-rw-r--r--js/src/jit/loong64/Simulator-loong64.h650
-rw-r--r--js/src/jit/loong64/Trampoline-loong64.cpp833
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.cpp121
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.h341
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp2094
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h1500
-rw-r--r--js/src/jit/mips-shared/AtomicOperations-mips-shared.h521
-rw-r--r--js/src/jit/mips-shared/BaselineIC-mips-shared.cpp37
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp2448
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.h157
-rw-r--r--js/src/jit/mips-shared/LIR-mips-shared.h360
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp1024
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.h89
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h1307
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp3355
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h258
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp207
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.h73
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h82
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared.h88
-rw-r--r--js/src/jit/mips32/Architecture-mips32.cpp94
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h282
-rw-r--r--js/src/jit/mips32/Assembler-mips32.cpp369
-rw-r--r--js/src/jit/mips32/Assembler-mips32.h265
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp507
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.h60
-rw-r--r--js/src/jit/mips32/LIR-mips32.h197
-rw-r--r--js/src/jit/mips32/Lowering-mips32.cpp257
-rw-r--r--js/src/jit/mips32/Lowering-mips32.h54
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32-inl.h1027
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp2825
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h823
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.cpp152
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.h31
-rw-r--r--js/src/jit/mips32/SharedICRegisters-mips32.h42
-rw-r--r--js/src/jit/mips32/Simulator-mips32.cpp3629
-rw-r--r--js/src/jit/mips32/Simulator-mips32.h526
-rw-r--r--js/src/jit/mips32/Trampoline-mips32.cpp942
-rw-r--r--js/src/jit/mips64/Architecture-mips64.cpp88
-rw-r--r--js/src/jit/mips64/Architecture-mips64.h233
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp371
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h288
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp586
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.h65
-rw-r--r--js/src/jit/mips64/LIR-mips64.h147
-rw-r--r--js/src/jit/mips64/Lowering-mips64.cpp201
-rw-r--r--js/src/jit/mips64/Lowering-mips64.h56
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64-inl.h845
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp2852
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h841
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.cpp149
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.h31
-rw-r--r--js/src/jit/mips64/SharedICRegisters-mips64.h45
-rw-r--r--js/src/jit/mips64/Simulator-mips64.cpp4402
-rw-r--r--js/src/jit/mips64/Simulator-mips64.h536
-rw-r--r--js/src/jit/mips64/Trampoline-mips64.cpp870
-rw-r--r--js/src/jit/moz.build295
-rw-r--r--js/src/jit/none/Architecture-none.h171
-rw-r--r--js/src/jit/none/Assembler-none.h211
-rw-r--r--js/src/jit/none/CodeGenerator-none.h78
-rw-r--r--js/src/jit/none/LIR-none.h111
-rw-r--r--js/src/jit/none/Lowering-none.h130
-rw-r--r--js/src/jit/none/MacroAssembler-none.h454
-rw-r--r--js/src/jit/none/MoveEmitter-none.h32
-rw-r--r--js/src/jit/none/SharedICHelpers-none-inl.h31
-rw-r--r--js/src/jit/none/SharedICHelpers-none.h32
-rw-r--r--js/src/jit/none/SharedICRegisters-none.h32
-rw-r--r--js/src/jit/none/Trampoline-none.cpp43
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.cpp100
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.h513
-rw-r--r--js/src/jit/riscv64/Assembler-riscv64.cpp1548
-rw-r--r--js/src/jit/riscv64/Assembler-riscv64.h685
-rw-r--r--js/src/jit/riscv64/AssemblerMatInt.cpp217
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.cpp2871
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.h210
-rw-r--r--js/src/jit/riscv64/LIR-riscv64.h399
-rw-r--r--js/src/jit/riscv64/Lowering-riscv64.cpp1087
-rw-r--r--js/src/jit/riscv64/Lowering-riscv64.h110
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64-inl.h2025
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.cpp6515
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.h1224
-rw-r--r--js/src/jit/riscv64/MoveEmitter-riscv64.cpp333
-rw-r--r--js/src/jit/riscv64/MoveEmitter-riscv64.h70
-rw-r--r--js/src/jit/riscv64/Register-riscv64.h186
-rw-r--r--js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h80
-rw-r--r--js/src/jit/riscv64/SharedICHelpers-riscv64.h77
-rw-r--r--js/src/jit/riscv64/SharedICRegisters-riscv64.h38
-rw-r--r--js/src/jit/riscv64/Simulator-riscv64.cpp4718
-rw-r--r--js/src/jit/riscv64/Simulator-riscv64.h1281
-rw-r--r--js/src/jit/riscv64/Trampoline-riscv64.cpp856
-rw-r--r--js/src/jit/riscv64/constant/Base-constant-riscv.cpp247
-rw-r--r--js/src/jit/riscv64/constant/Base-constant-riscv.h1057
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-a.h43
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-c.h61
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-d.h55
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-f.h51
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-i.h73
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-m.h34
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-v.h508
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-zicsr.h30
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-zifencei.h15
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv64.h68
-rw-r--r--js/src/jit/riscv64/constant/util-riscv64.h82
-rw-r--r--js/src/jit/riscv64/disasm/Disasm-riscv64.cpp2155
-rw-r--r--js/src/jit/riscv64/disasm/Disasm-riscv64.h74
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.cc517
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.h219
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.cc351
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.h273
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.cc123
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.h46
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.cc275
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.h77
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.cc167
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.h68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.cc158
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.h66
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.cc68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.h37
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.cc891
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.h484
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.cc44
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.h57
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.cc17
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.h20
-rw-r--r--js/src/jit/shared/Architecture-shared.h18
-rw-r--r--js/src/jit/shared/Assembler-shared.cpp74
-rw-r--r--js/src/jit/shared/Assembler-shared.h716
-rw-r--r--js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h453
-rw-r--r--js/src/jit/shared/AtomicOperations-feeling-lucky.h16
-rw-r--r--js/src/jit/shared/AtomicOperations-shared-jit.cpp180
-rw-r--r--js/src/jit/shared/AtomicOperations-shared-jit.h490
-rw-r--r--js/src/jit/shared/CodeGenerator-shared-inl.h342
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.cpp983
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.h488
-rw-r--r--js/src/jit/shared/Disassembler-shared.cpp248
-rw-r--r--js/src/jit/shared/Disassembler-shared.h184
-rw-r--r--js/src/jit/shared/IonAssemblerBuffer.h438
-rw-r--r--js/src/jit/shared/IonAssemblerBufferWithConstantPools.h1197
-rw-r--r--js/src/jit/shared/LIR-shared.h4272
-rw-r--r--js/src/jit/shared/Lowering-shared-inl.h894
-rw-r--r--js/src/jit/shared/Lowering-shared.cpp319
-rw-r--r--js/src/jit/shared/Lowering-shared.h371
-rw-r--r--js/src/jit/wasm32/Architecture-wasm32.h174
-rw-r--r--js/src/jit/wasm32/Assembler-wasm32.h229
-rw-r--r--js/src/jit/wasm32/CodeGenerator-wasm32.cpp254
-rw-r--r--js/src/jit/wasm32/CodeGenerator-wasm32.h76
-rw-r--r--js/src/jit/wasm32/LIR-wasm32.h109
-rw-r--r--js/src/jit/wasm32/Lowering-wasm32.h128
-rw-r--r--js/src/jit/wasm32/MacroAssembler-wasm32-inl.h1176
-rw-r--r--js/src/jit/wasm32/MacroAssembler-wasm32.cpp502
-rw-r--r--js/src/jit/wasm32/MacroAssembler-wasm32.h528
-rw-r--r--js/src/jit/wasm32/MoveEmitter-wasm32.h30
-rw-r--r--js/src/jit/wasm32/SharedICHelpers-wasm32-inl.h32
-rw-r--r--js/src/jit/wasm32/SharedICHelpers-wasm32.h30
-rw-r--r--js/src/jit/wasm32/SharedICRegisters-wasm32.h36
-rw-r--r--js/src/jit/wasm32/Trampoline-wasm32.cpp46
-rw-r--r--js/src/jit/x64/Assembler-x64.cpp246
-rw-r--r--js/src/jit/x64/Assembler-x64.h1249
-rw-r--r--js/src/jit/x64/BaseAssembler-x64.h1373
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp984
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.h41
-rw-r--r--js/src/jit/x64/LIR-x64.h170
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp565
-rw-r--r--js/src/jit/x64/Lowering-x64.h70
-rw-r--r--js/src/jit/x64/MacroAssembler-x64-inl.h1099
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp1747
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.h1218
-rw-r--r--js/src/jit/x64/SharedICHelpers-x64-inl.h80
-rw-r--r--js/src/jit/x64/SharedICHelpers-x64.h70
-rw-r--r--js/src/jit/x64/SharedICRegisters-x64.h33
-rw-r--r--js/src/jit/x64/Trampoline-x64.cpp888
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.cpp93
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.h467
-rw-r--r--js/src/jit/x86-shared/Assembler-x86-shared.cpp355
-rw-r--r--js/src/jit/x86-shared/Assembler-x86-shared.h4887
-rw-r--r--js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp57
-rw-r--r--js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h256
-rw-r--r--js/src/jit/x86-shared/BaseAssembler-x86-shared.h6460
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp3883
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.h189
-rw-r--r--js/src/jit/x86-shared/Constants-x86-shared.h326
-rw-r--r--js/src/jit/x86-shared/Encoding-x86-shared.h508
-rw-r--r--js/src/jit/x86-shared/LIR-x86-shared.h304
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp1863
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.h78
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp1484
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h3396
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp2132
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h998
-rw-r--r--js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp528
-rw-r--r--js/src/jit/x86-shared/MoveEmitter-x86-shared.h83
-rw-r--r--js/src/jit/x86-shared/Patching-x86-shared.h113
-rw-r--r--js/src/jit/x86/Assembler-x86.cpp85
-rw-r--r--js/src/jit/x86/Assembler-x86.h1079
-rw-r--r--js/src/jit/x86/BaseAssembler-x86.h190
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.cpp1509
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.h49
-rw-r--r--js/src/jit/x86/LIR-x86.h308
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp840
-rw-r--r--js/src/jit/x86/Lowering-x86.h79
-rw-r--r--js/src/jit/x86/MacroAssembler-x86-inl.h1386
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp1829
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.h1149
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86-inl.h77
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86.h70
-rw-r--r--js/src/jit/x86/SharedICRegisters-x86.h36
-rw-r--r--js/src/jit/x86/Trampoline-x86.cpp796
531 files changed, 466249 insertions, 0 deletions
diff --git a/js/src/jit/ABIArgGenerator.h b/js/src/jit/ABIArgGenerator.h
new file mode 100644
index 0000000000..d78a21e242
--- /dev/null
+++ b/js/src/jit/ABIArgGenerator.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ABIArgGenerator_h
+#define jit_ABIArgGenerator_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/Assembler.h"
+#include "jit/IonTypes.h"
+#include "jit/RegisterSets.h"
+#include "wasm/WasmFrame.h"
+
+namespace js::jit {
+
+static inline MIRType ToMIRType(MIRType t) { return t; }
+
+static inline MIRType ToMIRType(ABIArgType argType) {
+ switch (argType) {
+ case ArgType_General:
+ return MIRType::Pointer;
+ case ArgType_Float64:
+ return MIRType::Double;
+ case ArgType_Float32:
+ return MIRType::Float32;
+ case ArgType_Int32:
+ return MIRType::Int32;
+ case ArgType_Int64:
+ return MIRType::Int64;
+ default:
+ break;
+ }
+ MOZ_CRASH("unexpected argType");
+}
+
+template <class VecT, class ABIArgGeneratorT>
+class ABIArgIterBase {
+ ABIArgGeneratorT gen_;
+ const VecT& types_;
+ unsigned i_;
+
+ void settle() {
+ if (!done()) gen_.next(ToMIRType(types_[i_]));
+ }
+
+ public:
+ explicit ABIArgIterBase(const VecT& types) : types_(types), i_(0) {
+ settle();
+ }
+ void operator++(int) {
+ MOZ_ASSERT(!done());
+ i_++;
+ settle();
+ }
+ bool done() const { return i_ == types_.length(); }
+
+ ABIArg* operator->() {
+ MOZ_ASSERT(!done());
+ return &gen_.current();
+ }
+ ABIArg& operator*() {
+ MOZ_ASSERT(!done());
+ return gen_.current();
+ }
+
+ unsigned index() const {
+ MOZ_ASSERT(!done());
+ return i_;
+ }
+ MIRType mirType() const {
+ MOZ_ASSERT(!done());
+ return ToMIRType(types_[i_]);
+ }
+ uint32_t stackBytesConsumedSoFar() const {
+ return gen_.stackBytesConsumedSoFar();
+ }
+};
+
+// This is not an alias because we want to allow class template argument
+// deduction.
+template <class VecT>
+class ABIArgIter : public ABIArgIterBase<VecT, ABIArgGenerator> {
+ public:
+ explicit ABIArgIter(const VecT& types)
+ : ABIArgIterBase<VecT, ABIArgGenerator>(types) {}
+};
+
+class WasmABIArgGenerator : public ABIArgGenerator {
+ public:
+ WasmABIArgGenerator() {
+ increaseStackOffset(wasm::FrameWithInstances::sizeOfInstanceFields());
+ }
+};
+
+template <class VecT>
+class WasmABIArgIter : public ABIArgIterBase<VecT, WasmABIArgGenerator> {
+ public:
+ explicit WasmABIArgIter(const VecT& types)
+ : ABIArgIterBase<VecT, WasmABIArgGenerator>(types) {}
+};
+
+} // namespace js::jit
+
+#endif /* jit_ABIArgGenerator_h */
diff --git a/js/src/jit/ABIFunctionList-inl.h b/js/src/jit/ABIFunctionList-inl.h
new file mode 100644
index 0000000000..fd0c0085ec
--- /dev/null
+++ b/js/src/jit/ABIFunctionList-inl.h
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ABIFunctionList_inl_h
+#define jit_ABIFunctionList_inl_h
+
+#include "jslibmath.h" // js::NumberMod
+#include "jsmath.h" // js::ecmaPow, js::ecmaHypot, js::hypot3, js::hypot4,
+ // js::ecmaAtan2, js::UnaryMathFunctionType, js::powi
+#include "jsnum.h" // js::StringToNumberPure, js::Int32ToStringPure,
+ // js::NumberToStringPure
+
+#include "builtin/Array.h" // js::ArrayShiftMoveElements
+#include "builtin/MapObject.h" // js::MapIteratorObject::next,
+ // js::SetIteratorObject::next
+#include "builtin/Object.h" // js::ObjectClassToString
+#include "builtin/RegExp.h" // js::RegExpPrototypeOptimizableRaw,
+ // js::RegExpInstanceOptimizableRaw
+#include "builtin/TestingFunctions.h" // js::FuzzilliHash*
+
+#include "irregexp/RegExpAPI.h"
+// js::irregexp::CaseInsensitiveCompareNonUnicode,
+// js::irregexp::CaseInsensitiveCompareUnicode,
+// js::irregexp::GrowBacktrackStack,
+// js::irregexp::IsCharacterInRangeArray
+
+#include "jit/ABIFunctions.h"
+#include "jit/Bailouts.h" // js::jit::FinishBailoutToBaseline, js::jit::Bailout,
+ // js::jit::InvalidationBailout
+
+#include "jit/Ion.h" // js::jit::LazyLinkTopActivation
+#include "jit/JitFrames.h" // HandleException
+#include "jit/VMFunctions.h" // Rest of js::jit::* functions.
+
+#include "js/CallArgs.h" // JSNative
+#include "js/Conversions.h" // JS::ToInt32
+// JSJitGetterOp, JSJitSetterOp, JSJitMethodOp
+#include "js/experimental/JitInfo.h"
+#include "js/Utility.h" // js_free
+
+#include "proxy/Proxy.h" // js::ProxyGetProperty
+
+#include "vm/ArgumentsObject.h" // js::ArgumentsObject::finishForIonPure
+#include "vm/Interpreter.h" // js::TypeOfObject
+#include "vm/NativeObject.h" // js::NativeObject
+#include "vm/RegExpShared.h" // js::ExecuteRegExpAtomRaw
+#include "wasm/WasmBuiltins.h" // js::wasm::*
+
+#include "builtin/Boolean-inl.h" // js::EmulatesUndefined
+
+namespace js {
+namespace jit {
+
+// List of all ABI functions to be used with callWithABI. Each entry stores
+// the fully qualified name of the C++ function. This list must be sorted.
+#if JS_GC_PROBES
+# define ABIFUNCTION_JS_GC_PROBES_LIST(_) _(js::jit::TraceCreateObject)
+#else
+# define ABIFUNCTION_JS_GC_PROBES_LIST(_)
+#endif
+
+#if defined(JS_CODEGEN_ARM)
+# define ABIFUNCTION_JS_CODEGEN_ARM_LIST(_) \
+ _(__aeabi_idivmod) \
+ _(__aeabi_uidivmod)
+#else
+# define ABIFUNCTION_JS_CODEGEN_ARM_LIST(_)
+#endif
+
+#ifdef WASM_CODEGEN_DEBUG
+# define ABIFUNCTION_WASM_CODEGEN_DEBUG_LIST(_) \
+ _(js::wasm::PrintF32) \
+ _(js::wasm::PrintF64) \
+ _(js::wasm::PrintI32) \
+ _(js::wasm::PrintPtr) \
+ _(js::wasm::PrintText)
+#else
+# define ABIFUNCTION_WASM_CODEGEN_DEBUG_LIST(_)
+#endif
+
+#ifdef FUZZING_JS_FUZZILLI
+# define ABIFUNCTION_FUZZILLI_LIST(_) _(js::FuzzilliHashBigInt)
+#else
+# define ABIFUNCTION_FUZZILLI_LIST(_)
+#endif
+
+#define ABIFUNCTION_LIST(_) \
+ ABIFUNCTION_JS_GC_PROBES_LIST(_) \
+ ABIFUNCTION_JS_CODEGEN_ARM_LIST(_) \
+ ABIFUNCTION_WASM_CODEGEN_DEBUG_LIST(_) \
+ _(js::ArgumentsObject::finishForIonPure) \
+ _(js::ArgumentsObject::finishInlineForIonPure) \
+ _(js::ArrayShiftMoveElements) \
+ _(js::ecmaAtan2) \
+ _(js::ecmaHypot) \
+ _(js::ecmaPow) \
+ _(js::EmulatesUndefined) \
+ _(js::ExecuteRegExpAtomRaw) \
+ _(js_free) \
+ _(js::hypot3) \
+ _(js::hypot4) \
+ _(js::Interpret) \
+ _(js::Int32ToStringPure) \
+ _(js::irregexp::CaseInsensitiveCompareNonUnicode) \
+ _(js::irregexp::CaseInsensitiveCompareUnicode) \
+ _(js::irregexp::GrowBacktrackStack) \
+ _(js::irregexp::IsCharacterInRangeArray) \
+ _(js::jit::AllocateAndInitTypedArrayBuffer) \
+ _(js::jit::AllocateBigIntNoGC) \
+ _(js::jit::AllocateFatInlineString) \
+ _(js::jit::AllocateDependentString) \
+ _(js::jit::ArrayPushDensePure) \
+ _(js::jit::AssertMapObjectHash) \
+ _(js::jit::AssertPropertyLookup) \
+ _(js::jit::AssertSetObjectHash) \
+ _(js::jit::AssertValidBigIntPtr) \
+ _(js::jit::AssertValidObjectPtr) \
+ _(js::jit::AssertValidStringPtr) \
+ _(js::jit::AssertValidSymbolPtr) \
+ _(js::jit::AssertValidValue) \
+ _(js::jit::AssumeUnreachable) \
+ _(js::jit::AtomicsStore64) \
+ _(js::jit::AtomizeStringNoGC) \
+ _(js::jit::Bailout) \
+ _(js::jit::BigIntNumberEqual<EqualityKind::Equal>) \
+ _(js::jit::BigIntNumberEqual<EqualityKind::NotEqual>) \
+ _(js::jit::BigIntNumberCompare<ComparisonKind::LessThan>) \
+ _(js::jit::NumberBigIntCompare<ComparisonKind::LessThan>) \
+ _(js::jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>) \
+ _(js::jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>) \
+ _(js::jit::CreateMatchResultFallbackFunc) \
+ _(js::jit::EqualStringsHelperPure) \
+ _(js::jit::FinishBailoutToBaseline) \
+ _(js::jit::FrameIsDebuggeeCheck) \
+ _(js::jit::GetContextSensitiveInterpreterStub) \
+ _(js::jit::GetIndexFromString) \
+ _(js::jit::GetInt32FromStringPure) \
+ _(js::jit::GetNativeDataPropertyPure) \
+ _(js::jit::GetNativeDataPropertyPureWithCacheLookup) \
+ _(js::jit::GetNativeDataPropertyByValuePure) \
+ _(js::jit::GlobalHasLiveOnDebuggerStatement) \
+ _(js::jit::HandleCodeCoverageAtPC) \
+ _(js::jit::HandleCodeCoverageAtPrologue) \
+ _(js::jit::HandleException) \
+ _(js::jit::HasNativeDataPropertyPure<false>) \
+ _(js::jit::HasNativeDataPropertyPure<true>) \
+ _(js::jit::HasNativeElementPure) \
+ _(js::jit::InitBaselineFrameForOsr) \
+ _(js::jit::InvalidationBailout) \
+ _(js::jit::InvokeFromInterpreterStub) \
+ _(js::jit::LazyLinkTopActivation) \
+ _(js::jit::LinearizeForCharAccessPure) \
+ _(js::jit::ObjectHasGetterSetterPure) \
+ _(js::jit::ObjectIsCallable) \
+ _(js::jit::ObjectIsConstructor) \
+ _(js::jit::PostGlobalWriteBarrier) \
+ _(js::jit::PostWriteBarrier) \
+ _(js::jit::PostWriteElementBarrier<IndexInBounds::Yes>) \
+ _(js::jit::PostWriteElementBarrier<IndexInBounds::Maybe>) \
+ _(js::jit::Printf0) \
+ _(js::jit::Printf1) \
+ _(js::jit::StringFromCharCodeNoGC) \
+ _(js::jit::TypeOfNameObject) \
+ _(js::jit::WrapObjectPure) \
+ ABIFUNCTION_FUZZILLI_LIST(_) \
+ _(js::MapIteratorObject::next) \
+ _(js::NativeObject::addDenseElementPure) \
+ _(js::NativeObject::growSlotsPure) \
+ _(js::NumberMod) \
+ _(js::NumberToStringPure) \
+ _(js::ObjectClassToString) \
+ _(js::powi) \
+ _(js::ProxyGetProperty) \
+ _(js::RegExpInstanceOptimizableRaw) \
+ _(js::RegExpPrototypeOptimizableRaw) \
+ _(js::SetIteratorObject::next) \
+ _(js::StringToNumberPure) \
+ _(js::TypeOfObject)
+
+// List of all ABI functions to be used with callWithABI, which are
+// overloaded. Each entry stores the fully qualified name of the C++ function,
+// followed by the signature of the function to be called. When the function
+// is not overloaded, you should prefer adding the function to
+// ABIFUNCTION_LIST instead. This list must be sorted with the name of the C++
+// function.
+#define ABIFUNCTION_AND_TYPE_LIST(_) _(JS::ToInt32, int32_t (*)(double))
+
+// List of all ABI function signature which are using a computed function
+// pointer instead of a statically known function pointer.
+#define ABIFUNCTIONSIG_LIST(_) \
+ _(AtomicsCompareExchangeFn) \
+ _(AtomicsReadWriteModifyFn) \
+ _(bool (*)(BigInt*, BigInt*)) \
+ _(bool (*)(BigInt*, double)) \
+ _(bool (*)(double, BigInt*)) \
+ _(float (*)(float)) \
+ _(JSJitGetterOp) \
+ _(JSJitMethodOp) \
+ _(JSJitSetterOp) \
+ _(JSNative) \
+ _(js::UnaryMathFunctionType) \
+ _(void (*)(js::gc::StoreBuffer*, js::gc::Cell**)) \
+ _(void (*)(JSRuntime * rt, JSObject * *objp)) \
+ _(void (*)(JSRuntime * rt, JSString * *stringp)) \
+ _(void (*)(JSRuntime * rt, Shape * *shapep)) \
+ _(void (*)(JSRuntime * rt, Value * vp))
+
+// GCC warns when the signature does not have matching attributes (for example
+// [[nodiscard]]). Squelch this warning to avoid a GCC-only footgun.
+#if MOZ_IS_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wignored-attributes"
+#endif
+
+// Note: the use of ::fp instead of fp is intentional to enforce use of
+// fully-qualified names in the list above.
+#define DEF_TEMPLATE(fp) \
+ template <> \
+ struct ABIFunctionData<decltype(&(::fp)), ::fp> { \
+ static constexpr bool registered = true; \
+ };
+ABIFUNCTION_LIST(DEF_TEMPLATE)
+#undef DEF_TEMPLATE
+
+#define DEF_TEMPLATE(fp, ...) \
+ template <> \
+ struct ABIFunctionData<__VA_ARGS__, ::fp> { \
+ static constexpr bool registered = true; \
+ };
+ABIFUNCTION_AND_TYPE_LIST(DEF_TEMPLATE)
+#undef DEF_TEMPLATE
+
+// Define a known list of function signatures.
+#define DEF_TEMPLATE(...) \
+ template <> \
+ struct ABIFunctionSignatureData<__VA_ARGS__> { \
+ static constexpr bool registered = true; \
+ };
+ABIFUNCTIONSIG_LIST(DEF_TEMPLATE)
+#undef DEF_TEMPLATE
+
+#if MOZ_IS_GCC
+# pragma GCC diagnostic pop
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_VMFunctionList_inl_h
diff --git a/js/src/jit/ABIFunctions.h b/js/src/jit/ABIFunctions.h
new file mode 100644
index 0000000000..d6bd15555f
--- /dev/null
+++ b/js/src/jit/ABIFunctions.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ABIFunctions_h
+#define jit_ABIFunctions_h
+
+#include "jstypes.h" // JS_FUNC_TO_DATA_PTR
+
+namespace js {
+namespace jit {
+
+// This class is used to ensure that all known targets of callWithABI are
+// registered here. Otherwise, this would raise a static assertion at compile
+// time.
+template <typename Sig, Sig fun>
+struct ABIFunctionData {
+ static const bool registered = false;
+};
+
+template <typename Sig, Sig fun>
+struct ABIFunction {
+ void* address() const { return JS_FUNC_TO_DATA_PTR(void*, fun); }
+
+ // If this assertion fails, you are likely in the context of a
+ // `callWithABI<Sig, fn>()` call. This error indicates that ABIFunction has
+ // not been specialized for `<Sig, fn>` by the time of this call.
+ //
+ // This can be fixed by adding the function signature to either
+ // ABIFUNCTION_LIST or ABIFUNCTION_AND_TYPE_LIST (if overloaded) within
+ // `ABIFunctionList-inl.h` and to add an `#include` statement of this header
+ // in the file which is making the call to `callWithABI<Sig, fn>()`.
+ static_assert(ABIFunctionData<Sig, fun>::registered,
+ "ABI function is not registered.");
+};
+
+template <typename Sig>
+struct ABIFunctionSignatureData {
+ static const bool registered = false;
+};
+
+template <typename Sig>
+struct ABIFunctionSignature {
+ void* address(Sig fun) const { return JS_FUNC_TO_DATA_PTR(void*, fun); }
+
+ // If this assertion fails, you are likely in the context of a
+ // `DynamicFunction<Sig>(fn)` call. This error indicates that
+ // ABIFunctionSignature has not been specialized for `Sig` by the time of this
+ // call.
+ //
+ // This can be fixed by adding the function signature to ABIFUNCTIONSIG_LIST
+ // within `ABIFunctionList-inl.h` and to add an `#include` statement of this
+ // header in the file which is making the call to `DynamicFunction<Sig>(fn)`.
+ static_assert(ABIFunctionSignatureData<Sig>::registered,
+ "ABI function signature is not registered.");
+};
+
+// This is a structure created to ensure that the dynamically computed
+// function pointer is well typed.
+//
+// It is meant to be created only through DynamicFunction function calls. In
+// extremelly rare cases, such as VMFunctions, it might be produced as a result
+// of GetVMFunctionTarget.
+struct DynFn {
+ void* address;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_VMFunctions_h */
diff --git a/js/src/jit/AliasAnalysis.cpp b/js/src/jit/AliasAnalysis.cpp
new file mode 100644
index 0000000000..8334d55dfe
--- /dev/null
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -0,0 +1,317 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AliasAnalysis.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+#include "js/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+class LoopAliasInfo : public TempObject {
+ private:
+ LoopAliasInfo* outer_;
+ MBasicBlock* loopHeader_;
+ MInstructionVector invariantLoads_;
+
+ public:
+ LoopAliasInfo(TempAllocator& alloc, LoopAliasInfo* outer,
+ MBasicBlock* loopHeader)
+ : outer_(outer), loopHeader_(loopHeader), invariantLoads_(alloc) {}
+
+ MBasicBlock* loopHeader() const { return loopHeader_; }
+ LoopAliasInfo* outer() const { return outer_; }
+ bool addInvariantLoad(MInstruction* ins) {
+ return invariantLoads_.append(ins);
+ }
+ const MInstructionVector& invariantLoads() const { return invariantLoads_; }
+ MInstruction* firstInstruction() const { return *loopHeader_->begin(); }
+};
+
+} // namespace jit
+} // namespace js
+
+void AliasAnalysis::spewDependencyList() {
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_AliasSummaries)) {
+ Fprinter& print = JitSpewPrinter();
+ JitSpewHeader(JitSpew_AliasSummaries);
+ print.printf("Dependency list for other passes:\n");
+
+ for (ReversePostorderIterator block(graph_.rpoBegin());
+ block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator def(block->begin()),
+ end(block->begin(block->lastIns()));
+ def != end; ++def) {
+ if (!def->dependency()) {
+ continue;
+ }
+ if (!def->getAliasSet().isLoad()) {
+ continue;
+ }
+
+ JitSpewHeader(JitSpew_AliasSummaries);
+ print.printf(" ");
+ MDefinition::PrintOpcodeName(print, def->op());
+ print.printf("%u marked depending on ", def->id());
+ MDefinition::PrintOpcodeName(print, def->dependency()->op());
+ print.printf("%u\n", def->dependency()->id());
+ }
+ }
+ }
+#endif
+}
+
+// Whether there might be a path from src to dest, excluding loop backedges.
+// This is approximate and really ought to depend on precomputed reachability
+// information.
+static inline bool BlockMightReach(MBasicBlock* src, MBasicBlock* dest) {
+ while (src->id() <= dest->id()) {
+ if (src == dest) {
+ return true;
+ }
+ switch (src->numSuccessors()) {
+ case 0:
+ return false;
+ case 1: {
+ MBasicBlock* successor = src->getSuccessor(0);
+ if (successor->id() <= src->id()) {
+ return true; // Don't iloop.
+ }
+ src = successor;
+ break;
+ }
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+static void IonSpewDependency(MInstruction* load, MInstruction* store,
+ const char* verb, const char* reason) {
+#ifdef JS_JITSPEW
+ if (!JitSpewEnabled(JitSpew_Alias)) {
+ return;
+ }
+
+ JitSpewHeader(JitSpew_Alias);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" Load ");
+ load->printName(out);
+ out.printf(" %s on store ", verb);
+ store->printName(out);
+ out.printf(" (%s)\n", reason);
+#endif
+}
+
+static void IonSpewAliasInfo(const char* pre, MInstruction* ins,
+ const char* post) {
+#ifdef JS_JITSPEW
+ if (!JitSpewEnabled(JitSpew_Alias)) {
+ return;
+ }
+
+ JitSpewHeader(JitSpew_Alias);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" %s ", pre);
+ ins->printName(out);
+ out.printf(" %s\n", post);
+#endif
+}
+
+// [SMDOC] IonMonkey Alias Analysis
+//
+// This pass annotates every load instruction with the last store instruction
+// on which it depends. The algorithm is optimistic in that it ignores explicit
+// dependencies and only considers loads and stores.
+//
+// Loads inside loops only have an implicit dependency on a store before the
+// loop header if no instruction inside the loop body aliases it. To calculate
+// this efficiently, we maintain a list of maybe-invariant loads and the
+// combined alias set for all stores inside the loop. When we see the loop's
+// backedge, this information is used to mark every load we wrongly assumed to
+// be loop invariant as having an implicit dependency on the last instruction of
+// the loop header, so that it's never moved before the loop header.
+//
+// The algorithm depends on the invariant that both control instructions and
+// effectful instructions (stores) are never hoisted.
+bool AliasAnalysis::analyze() {
+ JitSpew(JitSpew_Alias, "Begin");
+ Vector<MInstructionVector, AliasSet::NumCategories, JitAllocPolicy> stores(
+ alloc());
+
+ // Initialize to the first instruction.
+ MInstruction* firstIns = *graph_.entryBlock()->begin();
+ for (unsigned i = 0; i < AliasSet::NumCategories; i++) {
+ MInstructionVector defs(alloc());
+ if (!defs.append(firstIns)) {
+ return false;
+ }
+ if (!stores.append(std::move(defs))) {
+ return false;
+ }
+ }
+
+ // Type analysis may have inserted new instructions. Since this pass depends
+ // on the instruction number ordering, all instructions are renumbered.
+ uint32_t newId = 0;
+
+ for (ReversePostorderIterator block(graph_.rpoBegin());
+ block != graph_.rpoEnd(); block++) {
+ if (mir->shouldCancel("Alias Analysis (main loop)")) {
+ return false;
+ }
+
+ if (block->isLoopHeader()) {
+ JitSpew(JitSpew_Alias, "Processing loop header %u", block->id());
+ loop_ = new (alloc().fallible()) LoopAliasInfo(alloc(), loop_, *block);
+ if (!loop_) {
+ return false;
+ }
+ }
+
+ for (MPhiIterator def(block->phisBegin()), end(block->phisEnd());
+ def != end; ++def) {
+ def->setId(newId++);
+ }
+
+ for (MInstructionIterator def(block->begin()), end(block->end());
+ def != end; ++def) {
+ def->setId(newId++);
+
+ AliasSet set = def->getAliasSet();
+ if (set.isNone()) {
+ continue;
+ }
+
+ // For the purposes of alias analysis, all recoverable operations
+ // are treated as effect free as the memory represented by these
+ // operations cannot be aliased by others.
+ if (def->canRecoverOnBailout()) {
+ continue;
+ }
+
+ if (set.isStore()) {
+ for (AliasSetIterator iter(set); iter; iter++) {
+ if (!stores[*iter].append(*def)) {
+ return false;
+ }
+ }
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Alias)) {
+ JitSpewHeader(JitSpew_Alias);
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Processing store ");
+ def->printName(out);
+ out.printf(" (flags %x)\n", set.flags());
+ }
+#endif
+ } else {
+ // Find the most recent store on which this instruction depends.
+ MInstruction* lastStore = firstIns;
+
+ for (AliasSetIterator iter(set); iter; iter++) {
+ MInstructionVector& aliasedStores = stores[*iter];
+ for (int i = aliasedStores.length() - 1; i >= 0; i--) {
+ MInstruction* store = aliasedStores[i];
+ if (def->mightAlias(store) != MDefinition::AliasType::NoAlias &&
+ BlockMightReach(store->block(), *block)) {
+ if (lastStore->id() < store->id()) {
+ lastStore = store;
+ }
+ break;
+ }
+ }
+ }
+
+ def->setDependency(lastStore);
+ IonSpewDependency(*def, lastStore, "depends", "");
+
+ // If the last store was before the current loop, we assume this load
+ // is loop invariant. If a later instruction writes to the same
+ // location, we will fix this at the end of the loop.
+ if (loop_ && lastStore->id() < loop_->firstInstruction()->id()) {
+ if (!loop_->addInvariantLoad(*def)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ if (block->isLoopBackedge()) {
+ MOZ_ASSERT(loop_->loopHeader() == block->loopHeaderOfBackedge());
+ JitSpew(JitSpew_Alias, "Processing loop backedge %u (header %u)",
+ block->id(), loop_->loopHeader()->id());
+ LoopAliasInfo* outerLoop = loop_->outer();
+ MInstruction* firstLoopIns = *loop_->loopHeader()->begin();
+
+ const MInstructionVector& invariant = loop_->invariantLoads();
+
+ for (unsigned i = 0; i < invariant.length(); i++) {
+ MInstruction* ins = invariant[i];
+ AliasSet set = ins->getAliasSet();
+ MOZ_ASSERT(set.isLoad());
+
+ bool hasAlias = false;
+ for (AliasSetIterator iter(set); iter; iter++) {
+ MInstructionVector& aliasedStores = stores[*iter];
+ for (int i = aliasedStores.length() - 1;; i--) {
+ MInstruction* store = aliasedStores[i];
+ if (store->id() < firstLoopIns->id()) {
+ break;
+ }
+ if (ins->mightAlias(store) != MDefinition::AliasType::NoAlias) {
+ hasAlias = true;
+ IonSpewDependency(ins, store, "aliases", "store in loop body");
+ break;
+ }
+ }
+ if (hasAlias) {
+ break;
+ }
+ }
+
+ if (hasAlias) {
+ // This instruction depends on stores inside the loop body. Mark it as
+ // having a dependency on the last instruction of the loop header. The
+ // last instruction is a control instruction and these are never
+ // hoisted.
+ MControlInstruction* controlIns = loop_->loopHeader()->lastIns();
+ IonSpewDependency(ins, controlIns, "depends",
+ "due to stores in loop body");
+ ins->setDependency(controlIns);
+ } else {
+ IonSpewAliasInfo("Load", ins,
+ "does not depend on any stores in this loop");
+
+ if (outerLoop &&
+ ins->dependency()->id() < outerLoop->firstInstruction()->id()) {
+ IonSpewAliasInfo("Load", ins, "may be invariant in outer loop");
+ if (!outerLoop->addInvariantLoad(ins)) {
+ return false;
+ }
+ }
+ }
+ }
+ loop_ = loop_->outer();
+ }
+ }
+
+ spewDependencyList();
+
+ MOZ_ASSERT(loop_ == nullptr);
+ return true;
+}
diff --git a/js/src/jit/AliasAnalysis.h b/js/src/jit/AliasAnalysis.h
new file mode 100644
index 0000000000..49ddaee47c
--- /dev/null
+++ b/js/src/jit/AliasAnalysis.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AliasAnalysis_h
+#define jit_AliasAnalysis_h
+
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+class LoopAliasInfo;
+
+class AliasAnalysis {
+ MIRGenerator* mir;
+ MIRGraph& graph_;
+ LoopAliasInfo* loop_;
+
+ void spewDependencyList();
+
+ TempAllocator& alloc() const { return graph_.alloc(); }
+
+ public:
+ AliasAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : mir(mir), graph_(graph), loop_(nullptr) {}
+
+ [[nodiscard]] bool analyze();
+};
+
+// Iterates over the flags in an AliasSet.
+class AliasSetIterator {
+ private:
+ uint32_t flags;
+ unsigned pos;
+
+ public:
+ explicit AliasSetIterator(AliasSet set) : flags(set.flags()), pos(0) {
+ while (flags && (flags & 1) == 0) {
+ flags >>= 1;
+ pos++;
+ }
+ }
+ AliasSetIterator& operator++(int) {
+ do {
+ flags >>= 1;
+ pos++;
+ } while (flags && (flags & 1) == 0);
+ return *this;
+ }
+ explicit operator bool() const { return !!flags; }
+ unsigned operator*() const {
+ MOZ_ASSERT(pos < AliasSet::NumCategories);
+ return pos;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AliasAnalysis_h */
diff --git a/js/src/jit/AlignmentMaskAnalysis.cpp b/js/src/jit/AlignmentMaskAnalysis.cpp
new file mode 100644
index 0000000000..5b19b0861c
--- /dev/null
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AlignmentMaskAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace jit;
+
+static bool IsAlignmentMask(uint32_t m) {
+ // Test whether m is just leading ones and trailing zeros.
+ return (-m & ~m) == 0;
+}
+
+static void AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph) {
+ // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
+ // since the users of the BitAnd include heap accesses. This will expose
+ // the redundancy for GVN when expressions like this:
+ // a&m
+ // (a+1)&m,
+ // (a+2)&m,
+ // are transformed into this:
+ // a&m
+ // (a&m)+1
+ // (a&m)+2
+ // and it will allow the constants to be folded by the
+ // EffectiveAddressAnalysis pass.
+ //
+ // Putting the add on the outside might seem like it exposes other users of
+ // the expression to the possibility of i32 overflow, if we aren't in wasm
+ // and they aren't naturally truncating. However, since we use MAdd::New
+ // with MIRType::Int32, we make sure that the value is truncated, just as it
+ // would be by the MBitAnd.
+
+ MOZ_ASSERT(IsCompilingWasm());
+
+ if (!ptr->isBitAnd()) {
+ return;
+ }
+
+ MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
+ MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
+ if (lhs->isConstant()) {
+ std::swap(lhs, rhs);
+ }
+ if (!lhs->isAdd() || !rhs->isConstant()) {
+ return;
+ }
+
+ MDefinition* op0 = lhs->toAdd()->getOperand(0);
+ MDefinition* op1 = lhs->toAdd()->getOperand(1);
+ if (op0->isConstant()) {
+ std::swap(op0, op1);
+ }
+ if (!op1->isConstant()) {
+ return;
+ }
+
+ uint32_t i = op1->toConstant()->toInt32();
+ uint32_t m = rhs->toConstant()->toInt32();
+ if (!IsAlignmentMask(m) || (i & m) != i) {
+ return;
+ }
+
+ // The pattern was matched! Produce the replacement expression.
+ MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
+ ptr->block()->insertBefore(ptr->toBitAnd(), and_);
+ auto* add = MAdd::New(graph.alloc(), and_, op1, TruncateKind::Truncate);
+ ptr->block()->insertBefore(ptr->toBitAnd(), add);
+ ptr->replaceAllUsesWith(add);
+ ptr->block()->discard(ptr->toBitAnd());
+}
+
+bool AlignmentMaskAnalysis::analyze() {
+ for (ReversePostorderIterator block(graph_.rpoBegin());
+ block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+
+ // Note that we don't check for MWasmCompareExchangeHeap
+ // or MWasmAtomicBinopHeap, because the backend and the OOB
+ // mechanism don't support non-zero offsets for them yet.
+ if (i->isAsmJSLoadHeap()) {
+ AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
+ } else if (i->isAsmJSStoreHeap()) {
+ AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
+ }
+ }
+ }
+ return true;
+}
diff --git a/js/src/jit/AlignmentMaskAnalysis.h b/js/src/jit/AlignmentMaskAnalysis.h
new file mode 100644
index 0000000000..4e46ca97d3
--- /dev/null
+++ b/js/src/jit/AlignmentMaskAnalysis.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AlignmentMaskAnalysis_h
+#define jit_AlignmentMaskAnalysis_h
+
+namespace js {
+namespace jit {
+
+class MIRGraph;
+
+class AlignmentMaskAnalysis {
+ MIRGraph& graph_;
+
+ public:
+ explicit AlignmentMaskAnalysis(MIRGraph& graph) : graph_(graph) {}
+
+ [[nodiscard]] bool analyze();
+};
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_AlignmentMaskAnalysis_h */
diff --git a/js/src/jit/Assembler.h b/js/src/jit/Assembler.h
new file mode 100644
index 0000000000..5003c351ac
--- /dev/null
+++ b/js/src/jit/Assembler.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Assembler_h
+#define jit_Assembler_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/Assembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/Assembler-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Assembler-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/Assembler-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/Assembler-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/Assembler-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/Assembler-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#endif /* jit_Assembler_h */
diff --git a/js/src/jit/AtomicOp.h b/js/src/jit/AtomicOp.h
new file mode 100644
index 0000000000..90edb631cb
--- /dev/null
+++ b/js/src/jit/AtomicOp.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOp_h
+#define jit_AtomicOp_h
+
+namespace js {
+namespace jit {
+
+// Types of atomic operation, shared by MIR and LIR.
+
+enum AtomicOp {
+ AtomicFetchAddOp,
+ AtomicFetchSubOp,
+ AtomicFetchAndOp,
+ AtomicFetchOrOp,
+ AtomicFetchXorOp
+};
+
+// Memory barrier types, shared by MIR and LIR.
+//
+// MembarSynchronizing is here because some platforms can make the
+// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
+// but there's been no reason to use it yet.
+
+enum MemoryBarrierBits {
+ MembarLoadLoad = 1,
+ MembarLoadStore = 2,
+ MembarStoreStore = 4,
+ MembarStoreLoad = 8,
+
+ MembarSynchronizing = 16,
+
+ // For validity testing
+ MembarNobits = 0,
+ MembarAllbits = 31,
+};
+
+static inline constexpr MemoryBarrierBits operator|(MemoryBarrierBits a,
+ MemoryBarrierBits b) {
+ return MemoryBarrierBits(int(a) | int(b));
+}
+
+static inline constexpr MemoryBarrierBits operator&(MemoryBarrierBits a,
+ MemoryBarrierBits b) {
+ return MemoryBarrierBits(int(a) & int(b));
+}
+
+static inline constexpr MemoryBarrierBits operator~(MemoryBarrierBits a) {
+ return MemoryBarrierBits(~int(a));
+}
+
+// Standard barrier bits for a full barrier.
+static constexpr MemoryBarrierBits MembarFull =
+ MembarLoadLoad | MembarLoadStore | MembarStoreLoad | MembarStoreStore;
+
+// Standard sets of barrier bits for atomic loads and stores.
+// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more.
+static constexpr MemoryBarrierBits MembarBeforeLoad = MembarNobits;
+static constexpr MemoryBarrierBits MembarAfterLoad =
+ MembarLoadLoad | MembarLoadStore;
+static constexpr MemoryBarrierBits MembarBeforeStore = MembarStoreStore;
+static constexpr MemoryBarrierBits MembarAfterStore = MembarStoreLoad;
+
+struct Synchronization {
+ const MemoryBarrierBits barrierBefore;
+ const MemoryBarrierBits barrierAfter;
+
+ constexpr Synchronization(MemoryBarrierBits before, MemoryBarrierBits after)
+ : barrierBefore(before), barrierAfter(after) {}
+
+ static Synchronization None() {
+ return Synchronization(MemoryBarrierBits(MembarNobits),
+ MemoryBarrierBits(MembarNobits));
+ }
+
+ static Synchronization Full() {
+ return Synchronization(MembarFull, MembarFull);
+ }
+
+ static Synchronization Load() {
+ return Synchronization(MembarBeforeLoad, MembarAfterLoad);
+ }
+
+ static Synchronization Store() {
+ return Synchronization(MembarBeforeStore, MembarAfterStore);
+ }
+
+ bool isNone() const { return (barrierBefore | barrierAfter) == MembarNobits; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AtomicOp_h */
diff --git a/js/src/jit/AtomicOperations.h b/js/src/jit/AtomicOperations.h
new file mode 100644
index 0000000000..8ad2839b36
--- /dev/null
+++ b/js/src/jit/AtomicOperations.h
@@ -0,0 +1,352 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOperations_h
+#define jit_AtomicOperations_h
+
+#include "mozilla/Types.h"
+
+#include <string.h>
+
+#include "jit/AtomicOperationsGenerated.h"
+#include "vm/SharedMem.h"
+
+namespace js {
+namespace jit {
+
+/*
+ * [SMDOC] Atomic Operations
+ *
+ * The atomic operations layer defines types and functions for
+ * JIT-compatible atomic operation.
+ *
+ * The fundamental constraints on the functions are:
+ *
+ * - That their realization here MUST be compatible with code the JIT
+ * generates for its Atomics operations, so that an atomic access
+ * from the interpreter or runtime - from any C++ code - really is
+ * atomic relative to a concurrent, compatible atomic access from
+ * jitted code. That is, these primitives expose JIT-compatible
+ * atomicity functionality to C++.
+ *
+ * - That accesses may race without creating C++ undefined behavior:
+ * atomic accesses (marked "SeqCst") may race with non-atomic
+ * accesses (marked "SafeWhenRacy"); overlapping but non-matching,
+ * and hence incompatible, atomic accesses may race; and non-atomic
+ * accesses may race. The effects of races need not be predictable,
+ * so garbage can be produced by a read or written by a write, but
+ * the effects must be benign: the program must continue to run, and
+ * only the memory in the union of addresses named in the racing
+ * accesses may be affected.
+ *
+ * The compatibility constraint means that if the JIT makes dynamic
+ * decisions about how to implement atomic operations then
+ * corresponding dynamic decisions MUST be made in the implementations
+ * of the functions below.
+ *
+ * The safe-for-races constraint means that by and large, it is hard
+ * to implement these primitives in C++. See "Implementation notes"
+ * below.
+ *
+ * The "SeqCst" suffix on operations means "sequentially consistent"
+ * and means such a function's operation must have "sequentially
+ * consistent" memory ordering. See mfbt/Atomics.h for an explanation
+ * of this memory ordering.
+ *
+ * Note that a "SafeWhenRacy" access does not provide the atomicity of
+ * a "relaxed atomic" access: it can read or write garbage if there's
+ * a race.
+ *
+ *
+ * Implementation notes.
+ *
+ * It's not a requirement that these functions be inlined; performance
+ * is not a great concern. On some platforms these functions may call
+ * functions that use inline assembly. See GenerateAtomicOperations.py.
+ *
+ * In principle these functions will not be written in C++, thus
+ * making races defined behavior if all racy accesses from C++ go via
+ * these functions. (Jitted code will always be safe for races and
+ * provides the same guarantees as these functions.)
+ *
+ * The appropriate implementations will be platform-specific and
+ * there are some obvious implementation strategies to choose
+ * from, sometimes a combination is appropriate:
+ *
+ * - generating the code at run-time with the JIT;
+ * - hand-written assembler (maybe inline); or
+ * - using special compiler intrinsics or directives.
+ *
+ * Trusting the compiler not to generate code that blows up on a
+ * race definitely won't work in the presence of TSan, or even of
+ * optimizing compilers in seemingly-"innocuous" conditions. (See
+ * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf
+ * for details.)
+ */
+class AtomicOperations {
+ // The following functions are defined for T = int8_t, uint8_t,
+ // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t.
+
+ // Atomically read *addr.
+ template <typename T>
+ static inline T loadSeqCst(T* addr);
+
+ // Atomically store val in *addr.
+ template <typename T>
+ static inline void storeSeqCst(T* addr, T val);
+
+ // Atomically store val in *addr and return the old value of *addr.
+ template <typename T>
+ static inline T exchangeSeqCst(T* addr, T val);
+
+ // Atomically check that *addr contains oldval and if so replace it
+ // with newval, in any case returning the old contents of *addr.
+ template <typename T>
+ static inline T compareExchangeSeqCst(T* addr, T oldval, T newval);
+
+ // Atomically add, subtract, bitwise-AND, bitwise-OR, or bitwise-XOR
+ // val into *addr and return the old value of *addr.
+ template <typename T>
+ static inline T fetchAddSeqCst(T* addr, T val);
+
+ template <typename T>
+ static inline T fetchSubSeqCst(T* addr, T val);
+
+ template <typename T>
+ static inline T fetchAndSeqCst(T* addr, T val);
+
+ template <typename T>
+ static inline T fetchOrSeqCst(T* addr, T val);
+
+ template <typename T>
+ static inline T fetchXorSeqCst(T* addr, T val);
+
+ // The SafeWhenRacy functions are to be used when C++ code has to access
+ // memory without synchronization and can't guarantee that there won't be a
+ // race on the access. But they are access-atomic for integer data so long
+ // as any racing writes are of the same size and to the same address.
+
+ // Defined for all the integral types as well as for float32 and float64,
+ // but not access-atomic for floats, nor for int64 and uint64 on 32-bit
+ // platforms.
+ template <typename T>
+ static inline T loadSafeWhenRacy(T* addr);
+
+ // Defined for all the integral types as well as for float32 and float64,
+ // but not access-atomic for floats, nor for int64 and uint64 on 32-bit
+ // platforms.
+ template <typename T>
+ static inline void storeSafeWhenRacy(T* addr, T val);
+
+ // Replacement for memcpy(). No access-atomicity guarantees.
+ static inline void memcpySafeWhenRacy(void* dest, const void* src,
+ size_t nbytes);
+
+ // Replacement for memmove(). No access-atomicity guarantees.
+ static inline void memmoveSafeWhenRacy(void* dest, const void* src,
+ size_t nbytes);
+
+ public:
+ // Test lock-freedom for any int32 value. This implements the
+ // Atomics::isLockFree() operation in the ECMAScript Shared Memory and
+ // Atomics specification, as follows:
+ //
+ // 4-byte accesses are always lock free (in the spec).
+ // 1-, 2-, and 8-byte accesses are always lock free (in SpiderMonkey).
+ //
+ // There is no lock-freedom for JS for any other values on any platform.
+ static constexpr inline bool isLockfreeJS(int32_t n);
+
+ // If the return value is true then the templated functions below are
+ // supported for int64_t and uint64_t. If the return value is false then
+ // those functions will MOZ_CRASH. The value of this call does not change
+ // during execution.
+ static inline bool hasAtomic8();
+
+ // If the return value is true then hasAtomic8() is true and the atomic
+ // operations are indeed lock-free. The value of this call does not change
+ // during execution.
+ static inline bool isLockfree8();
+
+ // Execute a full memory barrier (LoadLoad+LoadStore+StoreLoad+StoreStore).
+ static inline void fenceSeqCst();
+
+ // All clients should use the APIs that take SharedMem pointers.
+ // See above for semantics and acceptable types.
+
+ template <typename T>
+ static T loadSeqCst(SharedMem<T*> addr) {
+ return loadSeqCst(addr.unwrap());
+ }
+
+ template <typename T>
+ static void storeSeqCst(SharedMem<T*> addr, T val) {
+ return storeSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T exchangeSeqCst(SharedMem<T*> addr, T val) {
+ return exchangeSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T compareExchangeSeqCst(SharedMem<T*> addr, T oldval, T newval) {
+ return compareExchangeSeqCst(addr.unwrap(), oldval, newval);
+ }
+
+ template <typename T>
+ static T fetchAddSeqCst(SharedMem<T*> addr, T val) {
+ return fetchAddSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T fetchSubSeqCst(SharedMem<T*> addr, T val) {
+ return fetchSubSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T fetchAndSeqCst(SharedMem<T*> addr, T val) {
+ return fetchAndSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T fetchOrSeqCst(SharedMem<T*> addr, T val) {
+ return fetchOrSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T fetchXorSeqCst(SharedMem<T*> addr, T val) {
+ return fetchXorSeqCst(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static T loadSafeWhenRacy(SharedMem<T*> addr) {
+ return loadSafeWhenRacy(addr.unwrap());
+ }
+
+ template <typename T>
+ static void storeSafeWhenRacy(SharedMem<T*> addr, T val) {
+ return storeSafeWhenRacy(addr.unwrap(), val);
+ }
+
+ template <typename T>
+ static void memcpySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src,
+ size_t nbytes) {
+ memcpySafeWhenRacy(dest.template cast<void*>().unwrap(),
+ src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ template <typename T>
+ static void memcpySafeWhenRacy(SharedMem<T*> dest, T* src, size_t nbytes) {
+ memcpySafeWhenRacy(dest.template cast<void*>().unwrap(),
+ static_cast<void*>(src), nbytes);
+ }
+
+ template <typename T>
+ static void memcpySafeWhenRacy(T* dest, SharedMem<T*> src, size_t nbytes) {
+ memcpySafeWhenRacy(static_cast<void*>(dest),
+ src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ template <typename T>
+ static void memmoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src,
+ size_t nbytes) {
+ memmoveSafeWhenRacy(dest.template cast<void*>().unwrap(),
+ src.template cast<void*>().unwrap(), nbytes);
+ }
+
+ static void memsetSafeWhenRacy(SharedMem<uint8_t*> dest, int value,
+ size_t nbytes) {
+ uint8_t buf[1024];
+ size_t iterations = nbytes / sizeof(buf);
+ size_t tail = nbytes % sizeof(buf);
+ size_t offs = 0;
+ if (iterations > 0) {
+ memset(buf, value, sizeof(buf));
+ while (iterations--) {
+ memcpySafeWhenRacy(dest + offs, SharedMem<uint8_t*>::unshared(buf),
+ sizeof(buf));
+ offs += sizeof(buf);
+ }
+ } else {
+ memset(buf, value, tail);
+ }
+ memcpySafeWhenRacy(dest + offs, SharedMem<uint8_t*>::unshared(buf), tail);
+ }
+
+ template <typename T>
+ static void podCopySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src,
+ size_t nelem) {
+ memcpySafeWhenRacy(dest, src, nelem * sizeof(T));
+ }
+
+ template <typename T>
+ static void podMoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src,
+ size_t nelem) {
+ memmoveSafeWhenRacy(dest, src, nelem * sizeof(T));
+ }
+};
+
+constexpr inline bool AtomicOperations::isLockfreeJS(int32_t size) {
+ // Keep this in sync with atomicIsLockFreeJS() in jit/MacroAssembler.cpp.
+
+ switch (size) {
+ case 1:
+ return true;
+ case 2:
+ return true;
+ case 4:
+ // The spec requires Atomics.isLockFree(4) to return true.
+ return true;
+ case 8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+// As explained above, our atomic operations are not portable even in principle,
+// so we must include platform+compiler specific definitions here.
+//
+// x86, x64, arm, and arm64 are maintained by Mozilla. All other platform
+// setups are by platform maintainers' request and are not maintained by
+// Mozilla.
+//
+// If you are using a platform+compiler combination that causes an error below
+// (and if the problem isn't just that the compiler uses a different name for a
+// known architecture), you have basically three options:
+//
+// - find an already-supported compiler for the platform and use that instead
+//
+// - write your own support code for the platform+compiler and create a new
+// case below
+//
+// - include jit/shared/AtomicOperations-feeling-lucky.h in a case for the
+// platform below, if you have a gcc-compatible compiler and truly feel
+// lucky. You may have to add a little code to that file, too.
+//
+// Simulators are confusing. These atomic primitives must be compatible with
+// the code that the JIT emits, but of course for an ARM simulator running on
+// x86 the primitives here will be for x86, not for ARM, while the JIT emits ARM
+// code. Our ARM simulator solves that the easy way: by using these primitives
+// to implement its atomic operations. For other simulators there may need to
+// be special cases below to provide simulator-compatible primitives, for
+// example, for our ARM64 simulator the primitives could in principle
+// participate in the memory exclusivity monitors implemented by the simulator.
+// Such a solution is likely to be difficult.
+
+#ifdef JS_HAVE_GENERATED_ATOMIC_OPS
+# include "jit/shared/AtomicOperations-shared-jit.h"
+#elif defined(JS_SIMULATOR_MIPS32) || defined(__mips__)
+# include "jit/mips-shared/AtomicOperations-mips-shared.h"
+#else
+# include "jit/shared/AtomicOperations-feeling-lucky.h"
+#endif
+
+#endif // jit_AtomicOperations_h
diff --git a/js/src/jit/AutoWritableJitCode.h b/js/src/jit/AutoWritableJitCode.h
new file mode 100644
index 0000000000..ab5b35a54f
--- /dev/null
+++ b/js/src/jit/AutoWritableJitCode.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AutoWritableJitCode_h
+#define jit_AutoWritableJitCode_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TimeStamp.h"
+
+#include <stddef.h>
+
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCode.h"
+#include "jit/ProcessExecutableMemory.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+
+namespace js::jit {
+
+// This class ensures JIT code is executable on its destruction. Creators
+// must call makeWritable(), and not attempt to write to the buffer if it fails.
+//
+// AutoWritableJitCodeFallible may only fail to make code writable; it cannot
+// fail to make JIT code executable (because the creating code has no chance to
+// recover from a failed destructor).
+class MOZ_RAII AutoWritableJitCodeFallible {
+ JSRuntime* rt_;
+ void* addr_;
+ size_t size_;
+
+ public:
+ AutoWritableJitCodeFallible(JSRuntime* rt, void* addr, size_t size)
+ : rt_(rt), addr_(addr), size_(size) {
+ rt_->toggleAutoWritableJitCodeActive(true);
+ }
+
+ AutoWritableJitCodeFallible(void* addr, size_t size)
+ : AutoWritableJitCodeFallible(TlsContext.get()->runtime(), addr, size) {}
+
+ explicit AutoWritableJitCodeFallible(JitCode* code)
+ : AutoWritableJitCodeFallible(code->runtimeFromMainThread(), code->raw(),
+ code->bufferSize()) {}
+
+ [[nodiscard]] bool makeWritable() {
+ return ExecutableAllocator::makeWritable(addr_, size_);
+ }
+
+ ~AutoWritableJitCodeFallible() {
+ mozilla::TimeStamp startTime = mozilla::TimeStamp::Now();
+ auto timer = mozilla::MakeScopeExit([&] {
+ if (Realm* realm = rt_->mainContextFromOwnThread()->realm()) {
+ realm->timers.protectTime += mozilla::TimeStamp::Now() - startTime;
+ }
+ });
+
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(addr_, size_)) {
+ MOZ_CRASH();
+ }
+ rt_->toggleAutoWritableJitCodeActive(false);
+ }
+};
+
+// Infallible variant of AutoWritableJitCodeFallible, ensures writable during
+// construction
+class MOZ_RAII AutoWritableJitCode : private AutoWritableJitCodeFallible {
+ public:
+ AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
+ : AutoWritableJitCodeFallible(rt, addr, size) {
+ MOZ_RELEASE_ASSERT(makeWritable());
+ }
+
+ AutoWritableJitCode(void* addr, size_t size)
+ : AutoWritableJitCode(TlsContext.get()->runtime(), addr, size) {}
+
+ explicit AutoWritableJitCode(JitCode* code)
+ : AutoWritableJitCode(code->runtimeFromMainThread(), code->raw(),
+ code->bufferSize()) {}
+};
+
+} // namespace js::jit
+
+#endif /* jit_AutoWritableJitCode_h */
diff --git a/js/src/jit/BacktrackingAllocator.cpp b/js/src/jit/BacktrackingAllocator.cpp
new file mode 100644
index 0000000000..d93431795a
--- /dev/null
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -0,0 +1,4676 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Documentation. Code starts about 670 lines down from here. //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// [SMDOC] An overview of Ion's register allocator
+//
+// The intent of this documentation is to give maintainers a map with which to
+// navigate the allocator. As further understanding is obtained, it should be
+// added to this overview.
+//
+// Where possible, invariants are stated and are marked "(INVAR)". Many
+// details are omitted because their workings are currently unknown. In
+// particular, this overview doesn't explain how Intel-style "modify" (tied)
+// operands are handled. Facts or invariants that are speculative -- believed
+// to be true, but not verified at the time of writing -- are marked "(SPEC)".
+//
+// The various concepts are interdependent, so a single forwards reading of the
+// following won't make much sense. Many concepts are explained only after
+// they are mentioned.
+//
+// Where possible examples are shown. Without those the description is
+// excessively abstract.
+//
+// Names of the form ::name mean BacktrackingAllocator::name.
+//
+// The description falls into two sections:
+//
+// * Section 1: A tour of the data structures
+// * Section 2: The core allocation loop, and bundle splitting
+//
+// The allocator sometimes produces poor allocations, with excessive spilling
+// and register-to-register moves (bugs 1752520, bug 1714280 bug 1746596).
+// Work in bug 1752582 shows we can get better quality allocations from this
+// framework without having to make any large (conceptual) changes, by having
+// better splitting heuristics.
+//
+// At https://bugzilla.mozilla.org/show_bug.cgi?id=1758274#c17
+// (https://bugzilla.mozilla.org/attachment.cgi?id=9288467) is a document
+// written at the same time as these comments. It describes some improvements
+// we could make to our splitting heuristics, particularly in the presence of
+// loops and calls, and shows why the current implementation sometimes produces
+// excessive spilling. It builds on the commentary in this SMDOC.
+//
+//
+// Top level pipeline
+// ~~~~~~~~~~~~~~~~~~
+// There are three major phases in allocation. They run sequentially, at a
+// per-function granularity.
+//
+// (1) Liveness analysis and bundle formation
+// (2) Bundle allocation and last-chance allocation
+// (3) Rewriting the function to create MoveGroups and to "install"
+// the allocation
+//
+// The input language (LIR) is in SSA form, and phases (1) and (3) depend on
+// that SSAness. Without it the allocator wouldn't work.
+//
+// The top level function is ::go. The phases are divided into functions as
+// follows:
+//
+// (1) ::buildLivenessInfo, ::mergeAndQueueRegisters
+// (2) ::processBundle, ::tryAllocatingRegistersForSpillBundles,
+// ::pickStackSlots
+// (3) ::createMoveGroupsFromLiveRangeTransitions, ::installAllocationsInLIR,
+// ::populateSafepoints, ::annotateMoveGroups
+//
+// The code in this file is structured as much as possible in the same sequence
+// as flow through the pipeline. Hence, top level function ::go is right at
+// the end. Where a function depends on helper function(s), the helpers appear
+// first.
+//
+//
+// ========================================================================
+// ==== ====
+// ==== Section 1: A tour of the data structures ====
+// ==== ====
+// ========================================================================
+//
+// Here are the key data structures necessary for understanding what follows.
+//
+// Some basic data structures
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// CodePosition
+// ------------
+// A CodePosition is an unsigned 32-bit int that indicates an instruction index
+// in the incoming LIR. Each LIR actually has two code positions, one to
+// denote the "input" point (where, one might imagine, the operands are read,
+// at least useAtStart ones) and the "output" point, where operands are
+// written. Eg:
+//
+// Block 0 [successor 2] [successor 1]
+// 2-3 WasmParameter [def v1<g>:r14]
+// 4-5 WasmCall [use v1:F:r14]
+// 6-7 WasmLoadTls [def v2<o>] [use v1:R]
+// 8-9 WasmNullConstant [def v3<o>]
+// 10-11 Compare [def v4<i>] [use v2:R] [use v3:A]
+// 12-13 TestIAndBranch [use v4:R]
+//
+// So for example the WasmLoadTls insn has its input CodePosition as 6 and
+// output point as 7. Input points are even numbered, output points are odd
+// numbered. CodePositions 0 and 1 never appear, because LIR instruction IDs
+// start at 1. Indeed, CodePosition 0 is assumed to be invalid and hence is
+// used as a marker for "unusual conditions" in various places.
+//
+// Phi nodes exist in the instruction stream too. They always appear at the
+// start of blocks (of course) (SPEC), but their start and end points are
+// printed for the group as a whole. This is to emphasise that they are really
+// parallel assignments and that printing them sequentially would misleadingly
+// imply that they are executed sequentially. Example:
+//
+// Block 6 [successor 7] [successor 8]
+// 56-59 Phi [def v19<o>] [use v2:A] [use v5:A] [use v13:A]
+// 56-59 Phi [def v20<o>] [use v7:A] [use v14:A] [use v12:A]
+// 60-61 WasmLoadSlot [def v21<o>] [use v1:R]
+// 62-63 Compare [def v22<i>] [use v20:R] [use v21:A]
+// 64-65 TestIAndBranch [use v22:R]
+//
+// See that both Phis are printed with limits 56-59, even though they are
+// stored in the LIR array like regular LIRs and so have code points 56-57 and
+// 58-59 in reality.
+//
+// The process of allocation adds MoveGroup LIRs to the function. Each
+// incoming LIR has its own private list of MoveGroups (actually, 3 lists; two
+// for moves that conceptually take place before the instruction, and one for
+// moves after it). Hence the CodePositions for LIRs (the "62-63", etc, above)
+// do not change as a result of allocation.
+//
+// Virtual registers (vregs) in LIR
+// --------------------------------
+// The MIR from which the LIR is derived, is standard SSA. That SSAness is
+// carried through into the LIR (SPEC). In the examples here, LIR SSA names
+// (virtual registers, a.k.a. vregs) are printed as "v<number>". v0 never
+// appears and is presumed to be a special value, perhaps "invalid" (SPEC).
+//
+// The allocator core has a type VirtualRegister, but this is private to the
+// allocator and not part of the LIR. It carries far more information than
+// merely the name of the vreg. The allocator creates one VirtualRegister
+// structure for each vreg in the LIR.
+//
+// LDefinition and LUse
+// --------------------
+// These are part of the incoming LIR. Each LIR instruction defines zero or
+// more values, and contains one LDefinition for each defined value (SPEC).
+// Each instruction has zero or more input operands, each of which has its own
+// LUse (SPEC).
+//
+// Both LDefinition and LUse hold both a virtual register name and, in general,
+// a real (physical) register identity. The incoming LIR has the real register
+// fields unset, except in places where the incoming LIR has fixed register
+// constraints (SPEC). Phase 3 of allocation will visit all of the
+// LDefinitions and LUses so as to write into the real register fields the
+// decisions made by the allocator. For LUses, this is done by overwriting the
+// complete LUse with a different LAllocation, for example LStackSlot. That's
+// possible because LUse is a child class of LAllocation.
+//
+// This action of reading and then later updating LDefinition/LUses is the core
+// of the allocator's interface to the outside world.
+//
+// To make visiting of LDefinitions/LUses possible, the allocator doesn't work
+// with LDefinition and LUse directly. Rather it has pointers to them
+// (VirtualRegister::def_, UsePosition::use_). Hence Phase 3 can modify the
+// LIR in-place.
+//
+// (INVARs, all SPEC):
+//
+// - The collective VirtualRegister::def_ values should be unique, and there
+// should be a 1:1 mapping between the VirtualRegister::def_ values and the
+// LDefinitions in the LIR. (So that the LIR LDefinition has exactly one
+// VirtualRegister::def_ to track it). But only for the valid LDefinitions.
+// If isBogusTemp() is true, the definition is invalid and doesn't have a
+// vreg.
+//
+// - The same for uses: there must be a 1:1 correspondence between the
+// CodePosition::use_ values and the LIR LUses.
+//
+// - The allocation process must preserve these 1:1 mappings. That implies
+// (weaker) that the number of VirtualRegisters and of UsePositions must
+// remain constant through allocation. (Eg: losing them would mean that some
+// LIR def or use would necessarily not get annotated with its final
+// allocation decision. Duplicating them would lead to the possibility of
+// conflicting allocation decisions.)
+//
+// Other comments regarding LIR
+// ----------------------------
+// The incoming LIR is structured into basic blocks and a CFG, as one would
+// expect. These (insns, block boundaries, block edges etc) are available
+// through the BacktrackingAllocator object. They are important for Phases 1
+// and 3 but not for Phase 2.
+//
+// Phase 3 "rewrites" the input LIR so as to "install" the final allocation.
+// It has to insert MoveGroup instructions, but that isn't done by pushing them
+// into the instruction array. Rather, each LIR has 3 auxiliary sets of
+// MoveGroups (SPEC): two that "happen" conceptually before the LIR, and one
+// that happens after it. The rewriter inserts MoveGroups into one of these 3
+// sets, and later code generation phases presumably insert the sets (suitably
+// translated) into the final machine code (SPEC).
+//
+//
+// Key data structures: LiveRange, VirtualRegister and LiveBundle
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// These three have central roles in allocation. Of them, LiveRange is the
+// most central. VirtualRegister is conceptually important throughout, but
+// appears less frequently in the allocator code. LiveBundle is important only
+// in Phase 2 (where it is central) and at the end of Phase 1, but plays no
+// role in Phase 3.
+//
+// It's important to understand that LiveRange and VirtualRegister correspond
+// to concepts visible in the incoming LIR, which is in SSA form. LiveBundle
+// by comparison is related to neither the structure of LIR nor its SSA
+// properties. Instead, LiveBundle is an essentially adminstrative structure
+// used to accelerate allocation and to implement a crude form of
+// move-coalescing.
+//
+// VirtualRegisters and LiveRanges are (almost) static throughout the process,
+// because they reflect aspects of the incoming LIR, which does not change.
+// LiveBundles by contrast come and go; they are created, but may be split up
+// into new bundles, and old ones abandoned.
+//
+// Each LiveRange is a member of two different linked lists, chained through
+// fields registerLink and bundleLink.
+//
+// A VirtualRegister (described in detail below) has a list of LiveRanges that
+// it "owns". These are chained through LiveRange::registerLink.
+//
+// A LiveBundle (also described below) also has a list LiveRanges that it
+// "owns", chained through LiveRange::bundleLink.
+//
+// Hence each LiveRange is "owned" by one VirtualRegister and one LiveBundle.
+// LiveRanges may have their owning LiveBundle changed as a result of
+// splitting. By contrast a LiveRange stays with its "owning" VirtualRegister
+// for ever.
+//
+// A few LiveRanges have no VirtualRegister. This is used to implement
+// register spilling for calls. Each physical register that's not preserved
+// across a call has a small range that covers the call. It is
+// ::buildLivenessInfo that adds these small ranges.
+//
+// Iterating over every VirtualRegister in the system is a common operation and
+// is straightforward because (somewhat redundantly?) the LIRGraph knows the
+// number of vregs, and more importantly because BacktrackingAllocator::vregs
+// is a vector of all VirtualRegisters. By contrast iterating over every
+// LiveBundle in the system is more complex because there is no top-level
+// registry of them. It is still possible though. See ::dumpLiveRangesByVReg
+// and ::dumpLiveRangesByBundle for example code.
+//
+// LiveRange
+// ---------
+// Fundamentally, a LiveRange (often written just "range") is a request for
+// storage of a LIR vreg for some contiguous sequence of LIRs. A LiveRange
+// generally covers only a fraction of a vreg's overall lifetime, so multiple
+// LiveRanges are generally needed to cover the whole lifetime.
+//
+// A LiveRange contains (amongst other things):
+//
+// * the vreg for which it is for, as a VirtualRegister*
+//
+// * the range of CodePositions for which it is for, as a LiveRange::Range
+//
+// * auxiliary information:
+//
+// - a boolean that indicates whether this LiveRange defines a value for the
+// vreg. If so, that definition is regarded as taking place at the first
+// CodePoint of the range.
+//
+// - a linked list of uses of the vreg within this range. Each use is a pair
+// of a CodePosition and an LUse*. (INVAR): the uses are maintained in
+// increasing order of CodePosition. Multiple uses at the same
+// CodePosition are permitted, since that is necessary to represent an LIR
+// that uses the same vreg in more than one of its operand slots.
+//
+// Some important facts about LiveRanges are best illustrated with examples:
+//
+// v25 75-82 { 75_def:R 78_v25:A 82_v25:A }
+//
+// This LiveRange is for vreg v25. The value is defined at CodePosition 75,
+// with the LIR requiring that it be in a register. It is used twice at
+// positions 78 and 82, both with no constraints (A meaning "any"). The range
+// runs from position 75 to 82 inclusive. Note however that LiveRange::Range
+// uses non-inclusive range ends; hence its .to field would be 83, not 82.
+//
+// v26 84-85 { 85_v26:R }
+//
+// This LiveRange is for vreg v26. Here, there's only a single use of it at
+// position 85. Presumably it is defined in some other LiveRange.
+//
+// v19 74-79 { }
+//
+// This LiveRange is for vreg v19. There is no def and no uses, so at first
+// glance this seems redundant. But it isn't: it still expresses a request for
+// storage for v19 across 74-79, because Phase 1 regards v19 as being live in
+// this range (meaning: having a value that, if changed in this range, would
+// cause the program to fail).
+//
+// Other points:
+//
+// * (INVAR) Each vreg/VirtualRegister has at least one LiveRange.
+//
+// * (INVAR) Exactly one LiveRange of a vreg gives a definition for the value.
+// All other LiveRanges must consist only of uses (including zero uses, for a
+// "flow-though" range as mentioned above). This requirement follows from
+// the fact that LIR is in SSA form.
+//
+// * It follows from this, that the LiveRanges for a VirtualRegister must form
+// a tree, where the parent-child relationship is "control flows directly
+// from a parent LiveRange (anywhere in the LiveRange) to a child LiveRange
+// (start)". The entire tree carries only one value. This is a use of
+// SSAness in the allocator which is fundamental: without SSA input, this
+// design would not work.
+//
+// The root node (LiveRange) in the tree must be one that defines the value,
+// and all other nodes must only use or be flow-throughs for the value. It's
+// OK for LiveRanges in the tree to overlap, providing that there is a unique
+// root node -- otherwise it would be unclear which LiveRange provides the
+// value.
+//
+// The function ::createMoveGroupsFromLiveRangeTransitions runs after all
+// LiveBundles have been allocated. It visits each VirtualRegister tree in
+// turn. For every parent->child edge in a tree, it creates a MoveGroup that
+// copies the value from the parent into the child -- this is how the
+// allocator decides where to put MoveGroups. There are various other
+// details not described here.
+//
+// * It's important to understand that a LiveRange carries no meaning about
+// control flow beyond that implied by the SSA (hence, dominance)
+// relationship between a def and its uses. In particular, there's no
+// implication that execution "flowing into" the start of the range implies
+// that it will "flow out" of the end. Or that any particular use will or
+// will not be executed.
+//
+// * (very SPEC) Indeed, even if a range has a def, there's no implication that
+// a use later in the range will have been immediately preceded by execution
+// of the def. It could be that the def is executed, flow jumps somewhere
+// else, and later jumps back into the middle of the range, where there are
+// then some uses.
+//
+// * Uses of a vreg by a phi node are not mentioned in the use list of a
+// LiveRange. The reasons for this are unknown, but it is speculated that
+// this is because we don't need to know about phi uses where we use the list
+// of positions. See comments on VirtualRegister::usedByPhi_.
+//
+// * Similarly, a definition of a vreg by a phi node is not regarded as being a
+// definition point (why not?), at least as the view of
+// LiveRange::hasDefinition_.
+//
+// * LiveRanges that nevertheless include a phi-defined value have their first
+// point set to the first of the block of phis, even if the var isn't defined
+// by that specific phi. Eg:
+//
+// Block 6 [successor 7] [successor 8]
+// 56-59 Phi [def v19<o>] [use v2:A] [use v5:A] [use v13:A]
+// 56-59 Phi [def v20<o>] [use v7:A] [use v14:A] [use v12:A]
+// 60-61 WasmLoadSlot [def v21<o>] [use v1:R]
+// 62-63 Compare [def v22<i>] [use v20:R] [use v21:A]
+//
+// The relevant live range for v20 is
+//
+// v20 56-65 { 63_v20:R }
+//
+// Observe that it starts at 56, not 58.
+//
+// VirtualRegister
+// ---------------
+// Each VirtualRegister is associated with an SSA value created by the LIR.
+// Fundamentally it is a container to hold all of the LiveRanges that together
+// indicate where the value must be kept live. This is a linked list beginning
+// at VirtualRegister::ranges_, and which, as described above, is chained
+// through LiveRange::registerLink. The set of LiveRanges must logically form
+// a tree, rooted at the LiveRange which defines the value.
+//
+// For adminstrative convenience, the linked list must contain the LiveRanges
+// in order of increasing start point.
+//
+// There are various auxiliary fields, most importantly the LIR node and the
+// associated LDefinition that define the value.
+//
+// It is OK, and quite common, for LiveRanges of a VirtualRegister to overlap.
+// The effect will be that, in an overlapped area, there are two storage
+// locations holding the value. This is OK -- although wasteful of storage
+// resources -- because the SSAness means the value must be the same in both
+// locations. Hence there's no questions like "which LiveRange holds the most
+// up-to-date value?", since it's all just one value anyway.
+//
+// Note by contrast, it is *not* OK for the LiveRanges of a LiveBundle to
+// overlap.
+//
+// LiveBundle
+// ----------
+// Similar to VirtualRegister, a LiveBundle is also, fundamentally, a container
+// for a set of LiveRanges. The set is stored as a linked list, rooted at
+// LiveBundle::ranges_ and chained through LiveRange::bundleLink.
+//
+// However, the similarity ends there:
+//
+// * The LiveRanges in a LiveBundle absolutely must not overlap. They must
+// indicate disjoint sets of CodePositions, and must be stored in the list in
+// order of increasing CodePosition. Because of the no-overlap requirement,
+// these ranges form a total ordering regardless of whether one uses the
+// LiveRange::Range::from_ or ::to_ fields for comparison.
+//
+// * The LiveRanges in a LiveBundle can otherwise be entirely arbitrary and
+// unrelated. They can be from different VirtualRegisters and can have no
+// particular mutual significance w.r.t. the SSAness or structure of the
+// input LIR.
+//
+// LiveBundles are the fundamental unit of allocation. The allocator attempts
+// to find a single storage location that will work for all LiveRanges in the
+// bundle. That's why the ranges must not overlap. If no such location can be
+// found, the allocator may decide to split the bundle into multiple smaller
+// bundles. Each of those may be allocated independently.
+//
+// The other really important field is LiveBundle::alloc_, indicating the
+// chosen storage location.
+//
+// Here's an example, for a LiveBundle before it has been allocated:
+//
+// LB2(parent=none v3 8-21 { 16_v3:A } ## v3 24-25 { 25_v3:F:xmm0 })
+//
+// LB merely indicates "LiveBundle", and the 2 is the debugId_ value (see
+// below). This bundle has two LiveRanges
+//
+// v3 8-21 { 16_v3:A }
+// v3 24-25 { 25_v3:F:xmm0 }
+//
+// both of which (coincidentally) are for the same VirtualRegister, v3.The
+// second LiveRange has a fixed use in `xmm0`, whilst the first one doesn't
+// care (A meaning "any location") so the allocator *could* choose `xmm0` for
+// the bundle as a whole.
+//
+// One might ask: why bother with LiveBundle at all? After all, it would be
+// possible to get correct allocations by allocating each LiveRange
+// individually, then leaving ::createMoveGroupsFromLiveRangeTransitions to add
+// MoveGroups to join up LiveRanges that form each SSA value tree (that is,
+// LiveRanges belonging to each VirtualRegister).
+//
+// There are two reasons:
+//
+// (1) By putting multiple LiveRanges into each LiveBundle, we can end up with
+// many fewer LiveBundles than LiveRanges. Since the cost of allocating a
+// LiveBundle is substantially less than the cost of allocating each of its
+// LiveRanges individually, the allocator will run faster.
+//
+// (2) It provides a crude form of move-coalescing. There are situations where
+// it would be beneficial, although not mandatory, to have two LiveRanges
+// assigned to the same storage unit. Most importantly: (a) LiveRanges
+// that form all of the inputs, and the output, of a phi node. (b)
+// LiveRanges for the output and first-operand values in the case where we
+// are targetting Intel-style instructions.
+//
+// In such cases, if the bundle can be allocated as-is, then no extra moves
+// are necessary. If not (and the bundle is split), then
+// ::createMoveGroupsFromLiveRangeTransitions will later fix things up by
+// inserting MoveGroups in the right places.
+//
+// Merging of LiveRanges into LiveBundles is done in Phase 1, by
+// ::mergeAndQueueRegisters, after liveness analysis (which generates only
+// LiveRanges).
+//
+// For the bundle mentioned above, viz
+//
+// LB2(parent=none v3 8-21 { 16_v3:A } ## v3 24-25 { 25_v3:F:xmm0 })
+//
+// the allocator did not in the end choose to allocate it to `xmm0`. Instead
+// it was split into two bundles, LB6 (a "spill parent", or root node in the
+// trees described above), and LB9, a leaf node, that points to its parent LB6:
+//
+// LB6(parent=none v3 8-21 %xmm1.s { 16_v3:A } ## v3 24-25 %xmm1.s { })
+// LB9(parent=LB6 v3 24-25 %xmm0.s { 25_v3:F:rax })
+//
+// Note that both bundles now have an allocation, and that is printed,
+// redundantly, for each LiveRange in the bundle -- hence the repeated
+// `%xmm1.s` in the lines above. Since all LiveRanges in a LiveBundle must be
+// allocated to the same storage location, we never expect to see output like
+// this
+//
+// LB6(parent=none v3 8-21 %xmm1.s { 16_v3:A } ## v3 24-25 %xmm2.s { })
+//
+// and that is in any case impossible, since a LiveRange doesn't have an
+// LAllocation field. Instead it has a pointer to its LiveBundle, and the
+// LAllocation lives in the LiveBundle.
+//
+// For the resulting allocation (LB6 and LB9), all the LiveRanges are use-only
+// or flow-through. There are no definitions. But they are all for the same
+// VirtualReg, v3, so they all have the same value. An important question is
+// where they "get their value from". The answer is that
+// ::createMoveGroupsFromLiveRangeTransitions will have to insert suitable
+// MoveGroups so that each LiveRange for v3 can "acquire" the value from a
+// previously-executed LiveRange, except for the range that defines it. The
+// defining LiveRange is not visible here; either it is in some other
+// LiveBundle, not shown, or (more likely) the value is defined by a phi-node,
+// in which case, as mentioned previously, it is not shown as having an
+// explicit definition in any LiveRange.
+//
+// LiveBundles also have a `SpillSet* spill_` field (see below) and a
+// `LiveBundle* spillParent_`. The latter is used to ensure that all bundles
+// originating from an "original" bundle share the same spill slot. The
+// `spillParent_` pointers can be thought of creating a 1-level tree, where
+// each node points at its parent. Since the tree can be only 1-level, the
+// following invariant (INVAR) must be maintained:
+//
+// * if a LiveBundle has a non-null spillParent_ field, then it is a leaf node,
+// and no other LiveBundle can point at this one.
+//
+// * else (it has a null spillParent_ field) it is a root node, and so other
+// LiveBundles may point at it.
+//
+// When compiled with JS_JITSPEW, LiveBundle has a 32-bit `debugId_` field.
+// This is used only for debug printing, and makes it easier to see
+// parent-child relationships induced by the `spillParent_` pointers.
+//
+// The "life cycle" of LiveBundles is described in Section 2 below.
+//
+//
+// Secondary data structures: SpillSet, Requirement
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// SpillSet
+// --------
+// A SpillSet is a container for a set of LiveBundles that have been spilled,
+// all of which are assigned the same spill slot. The set is represented as a
+// vector of points to LiveBundles. SpillSet also contains the identity of the
+// spill slot (its LAllocation).
+//
+// A LiveBundle, if it is to be spilled, has a pointer to the relevant
+// SpillSet, and the SpillSet in turn has a pointer back to the LiveBundle in
+// its vector thereof. So LiveBundles (that are to be spilled) and SpillSets
+// point at each other.
+//
+// (SPEC) LiveBundles that are not to be spilled (or for which the decision has
+// yet to be made, have their SpillSet pointers as null. (/SPEC)
+//
+// Requirement
+// -----------
+// Requirements are used transiently during the main allocation loop. It
+// summarises the set of constraints on storage location (must be any register,
+// must be this specific register, must be stack, etc) for a LiveBundle. This
+// is so that the main allocation loop knows what kind of storage location it
+// must choose in order to satisfy all of the defs and uses within the bundle.
+//
+// What Requirement provides is (a) a partially ordered set of locations, and
+// (b) a constraint-merging method `merge`.
+//
+// Requirement needs a rewrite (and, in fact, that has already happened in
+// un-landed code in bug 1758274) for the following reasons:
+//
+// * it's potentially buggy (bug 1761654), although that doesn't currently
+// affect us, for reasons which are unclear.
+//
+// * the partially ordered set has a top point, meaning "no constraint", but it
+// doesn't have a corresponding bottom point, meaning "impossible
+// constraints". (So it's a partially ordered set, but not a lattice). This
+// leads to awkward coding in some places, which would be simplified if there
+// were an explicit way to indicate "impossible constraint".
+//
+//
+// Some ASCII art
+// ~~~~~~~~~~~~~~
+//
+// Here's some not-very-good ASCII art that tries to summarise the data
+// structures that persist for the entire allocation of a function:
+//
+// BacktrackingAllocator
+// |
+// (vregs)
+// |
+// v
+// |
+// VirtualRegister -->--(ins)--> LNode
+// | | `->--(def)--> LDefinition
+// v ^
+// | |
+// (ranges) |
+// | (vreg)
+// `--v->--. | ,-->--v-->-------------->--v-->--. ,--NULL
+// \ | / \ /
+// LiveRange LiveRange LiveRange
+// / | \ / \.
+// ,--b->--' / `-->--b-->--' `--NULL
+// | (bundle)
+// ^ /
+// | v
+// (ranges) /
+// | /
+// LiveBundle --s-->- LiveBundle
+// | \ / |
+// | \ / |
+// (spill) ^ ^ (spill)
+// | \ / |
+// v \ / ^
+// | (list) |
+// \ | /
+// `--->---> SpillSet <--'
+//
+// --b-- LiveRange::bundleLink: links in the list of LiveRanges that belong to
+// a LiveBundle
+//
+// --v-- LiveRange::registerLink: links in the list of LiveRanges that belong
+// to a VirtualRegister
+//
+// --s-- LiveBundle::spillParent: a possible link to my "spill parent bundle"
+//
+//
+// * LiveRange is in the center. Each LiveRange is a member of two different
+// linked lists, the --b-- list and the --v-- list.
+//
+// * VirtualRegister has a pointer `ranges` that points to the start of its
+// --v-- list of LiveRanges.
+//
+// * LiveBundle has a pointer `ranges` that points to the start of its --b--
+// list of LiveRanges.
+//
+// * LiveRange points back at both its owning VirtualRegister (`vreg`) and its
+// owning LiveBundle (`bundle`).
+//
+// * LiveBundle has a pointer --s-- `spillParent`, which may be null, to its
+// conceptual "spill parent bundle", as discussed in detail above.
+//
+// * LiveBundle has a pointer `spill` to its SpillSet.
+//
+// * SpillSet has a vector `list` of pointers back to the LiveBundles that
+// point at it.
+//
+// * VirtualRegister has pointers `ins` to the LNode that defines the value and
+// `def` to the LDefinition within that LNode.
+//
+// * BacktrackingAllocator has a vector `vregs` of pointers to all the
+// VirtualRegisters for the function. There is no equivalent top-level table
+// of all the LiveBundles for the function.
+//
+// Note that none of these pointers are "owning" in the C++-storage-management
+// sense. Rather, everything is stored in single arena which is freed when
+// compilation of the function is complete. For this reason,
+// BacktrackingAllocator.{h,cpp} is almost completely free of the usual C++
+// storage-management artefacts one would normally expect to see.
+//
+//
+// ========================================================================
+// ==== ====
+// ==== Section 2: The core allocation loop, and bundle splitting ====
+// ==== ====
+// ========================================================================
+//
+// Phase 1 of the allocator (described at the start of this SMDOC) computes
+// live ranges, merges them into bundles, and places the bundles in a priority
+// queue ::allocationQueue, ordered by what ::computePriority computes.
+//
+//
+// The allocation loops
+// ~~~~~~~~~~~~~~~~~~~~
+// The core of the allocation machinery consists of two loops followed by a
+// call to ::pickStackSlots. The latter is uninteresting. The two loops live
+// in ::go and are documented in detail there.
+//
+//
+// Bundle splitting
+// ~~~~~~~~~~~~~~~~
+// If the first of the two abovementioned loops cannot find a register for a
+// bundle, either directly or as a result of evicting conflicting bundles, then
+// it will have to either split or spill the bundle. The entry point to the
+// split/spill subsystem is ::chooseBundleSplit. See comments there.
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// End of documentation //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+#include "jit/BacktrackingAllocator.h"
+
+#include <algorithm>
+
+#include "jit/BitSet.h"
+#include "jit/CompileInfo.h"
+#include "js/Printf.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+// This is a big, complex file. Code is grouped into various sections, each
+// preceded by a box comment. Sections not marked as "Misc helpers" are
+// pretty much the top level flow, and are presented roughly in the same order
+// in which the allocation pipeline operates. BacktrackingAllocator::go,
+// right at the end of the file, is a good starting point.
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: linked-list management //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool SortBefore(UsePosition* a, UsePosition* b) {
+ return a->pos <= b->pos;
+}
+
+static inline bool SortBefore(LiveRange::BundleLink* a,
+ LiveRange::BundleLink* b) {
+ LiveRange* rangea = LiveRange::get(a);
+ LiveRange* rangeb = LiveRange::get(b);
+ MOZ_ASSERT(!rangea->intersects(rangeb));
+ return rangea->from() < rangeb->from();
+}
+
+static inline bool SortBefore(LiveRange::RegisterLink* a,
+ LiveRange::RegisterLink* b) {
+ return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
+}
+
+template <typename T>
+static inline void InsertSortedList(InlineForwardList<T>& list, T* value) {
+ if (list.empty()) {
+ list.pushFront(value);
+ return;
+ }
+
+ if (SortBefore(list.back(), value)) {
+ list.pushBack(value);
+ return;
+ }
+
+ T* prev = nullptr;
+ for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
+ if (SortBefore(value, *iter)) {
+ break;
+ }
+ prev = *iter;
+ }
+
+ if (prev) {
+ list.insertAfter(prev, value);
+ } else {
+ list.pushFront(value);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: methods for class SpillSet //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+void SpillSet::setAllocation(LAllocation alloc) {
+ for (size_t i = 0; i < numSpilledBundles(); i++) {
+ spilledBundle(i)->setAllocation(alloc);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: methods for class LiveRange //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+static size_t SpillWeightFromUsePolicy(LUse::Policy policy) {
+ switch (policy) {
+ case LUse::ANY:
+ return 1000;
+
+ case LUse::REGISTER:
+ case LUse::FIXED:
+ return 2000;
+
+ default:
+ return 0;
+ }
+}
+
+inline void LiveRange::noteAddedUse(UsePosition* use) {
+ LUse::Policy policy = use->usePolicy();
+ usesSpillWeight_ += SpillWeightFromUsePolicy(policy);
+ if (policy == LUse::FIXED) {
+ ++numFixedUses_;
+ }
+}
+
+inline void LiveRange::noteRemovedUse(UsePosition* use) {
+ LUse::Policy policy = use->usePolicy();
+ usesSpillWeight_ -= SpillWeightFromUsePolicy(policy);
+ if (policy == LUse::FIXED) {
+ --numFixedUses_;
+ }
+ MOZ_ASSERT_IF(!hasUses(), !usesSpillWeight_ && !numFixedUses_);
+}
+
+void LiveRange::addUse(UsePosition* use) {
+ MOZ_ASSERT(covers(use->pos));
+ InsertSortedList(uses_, use);
+ noteAddedUse(use);
+}
+
+UsePosition* LiveRange::popUse() {
+ UsePosition* ret = uses_.popFront();
+ noteRemovedUse(ret);
+ return ret;
+}
+
+void LiveRange::tryToMoveDefAndUsesInto(LiveRange* other) {
+ MOZ_ASSERT(&other->vreg() == &vreg());
+ MOZ_ASSERT(this != other);
+
+ // Move over all uses which fit in |other|'s boundaries.
+ for (UsePositionIterator iter = usesBegin(); iter;) {
+ UsePosition* use = *iter;
+ if (other->covers(use->pos)) {
+ uses_.removeAndIncrement(iter);
+ noteRemovedUse(use);
+ other->addUse(use);
+ } else {
+ iter++;
+ }
+ }
+
+ // Distribute the definition to |other| as well, if possible.
+ if (hasDefinition() && from() == other->from()) {
+ other->setHasDefinition();
+ }
+}
+
+bool LiveRange::contains(LiveRange* other) const {
+ return from() <= other->from() && to() >= other->to();
+}
+
+void LiveRange::intersect(LiveRange* other, Range* pre, Range* inside,
+ Range* post) const {
+ MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
+
+ CodePosition innerFrom = from();
+ if (from() < other->from()) {
+ if (to() < other->from()) {
+ *pre = range_;
+ return;
+ }
+ *pre = Range(from(), other->from());
+ innerFrom = other->from();
+ }
+
+ CodePosition innerTo = to();
+ if (to() > other->to()) {
+ if (from() >= other->to()) {
+ *post = range_;
+ return;
+ }
+ *post = Range(other->to(), to());
+ innerTo = other->to();
+ }
+
+ if (innerFrom != innerTo) {
+ *inside = Range(innerFrom, innerTo);
+ }
+}
+
+bool LiveRange::intersects(LiveRange* other) const {
+ Range pre, inside, post;
+ intersect(other, &pre, &inside, &post);
+ return !inside.empty();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: methods for class LiveBundle //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+size_t LiveBundle::numRanges() const {
+ size_t count = 0;
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+ count++;
+ }
+ return count;
+}
+#endif
+
+LiveRange* LiveBundle::rangeFor(CodePosition pos) const {
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->covers(pos)) {
+ return range;
+ }
+ }
+ return nullptr;
+}
+
+void LiveBundle::addRange(LiveRange* range) {
+ MOZ_ASSERT(!range->bundle());
+ range->setBundle(this);
+ InsertSortedList(ranges_, &range->bundleLink);
+}
+
+bool LiveBundle::addRange(TempAllocator& alloc, VirtualRegister* vreg,
+ CodePosition from, CodePosition to) {
+ LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
+ if (!range) {
+ return false;
+ }
+ addRange(range);
+ return true;
+}
+
+bool LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc,
+ LiveRange* oldRange,
+ CodePosition from, CodePosition to) {
+ LiveRange* range = LiveRange::FallibleNew(alloc, &oldRange->vreg(), from, to);
+ if (!range) {
+ return false;
+ }
+ addRange(range);
+ oldRange->tryToMoveDefAndUsesInto(range);
+ return true;
+}
+
+LiveRange* LiveBundle::popFirstRange() {
+ LiveRange::BundleLinkIterator iter = rangesBegin();
+ if (!iter) {
+ return nullptr;
+ }
+
+ LiveRange* range = LiveRange::get(*iter);
+ ranges_.removeAt(iter);
+
+ range->setBundle(nullptr);
+ return range;
+}
+
+void LiveBundle::removeRange(LiveRange* range) {
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* existing = LiveRange::get(*iter);
+ if (existing == range) {
+ ranges_.removeAt(iter);
+ return;
+ }
+ }
+ MOZ_CRASH();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: methods for class VirtualRegister //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+bool VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from,
+ CodePosition to, size_t* numRanges) {
+ MOZ_ASSERT(from < to);
+
+ // Mark [from,to) as a live range for this register during the initial
+ // liveness analysis, coalescing with any existing overlapping ranges.
+
+ // On some pathological graphs there might be a huge number of different
+ // live ranges. Allow non-overlapping live range to be merged if the
+ // number of ranges exceeds the cap below.
+ static const size_t CoalesceLimit = 100000;
+
+ LiveRange* prev = nullptr;
+ LiveRange* merged = nullptr;
+ for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter;) {
+ LiveRange* existing = LiveRange::get(*iter);
+
+ if (from > existing->to() && *numRanges < CoalesceLimit) {
+ // The new range should go after this one.
+ prev = existing;
+ iter++;
+ continue;
+ }
+
+ if (to.next() < existing->from()) {
+ // The new range should go before this one.
+ break;
+ }
+
+ if (!merged) {
+ // This is the first old range we've found that overlaps the new
+ // range. Extend this one to cover its union with the new range.
+ merged = existing;
+
+ if (from < existing->from()) {
+ existing->setFrom(from);
+ }
+ if (to > existing->to()) {
+ existing->setTo(to);
+ }
+
+ // Continue searching to see if any other old ranges can be
+ // coalesced with the new merged range.
+ iter++;
+ continue;
+ }
+
+ // Coalesce this range into the previous range we merged into.
+ MOZ_ASSERT(existing->from() >= merged->from());
+ if (existing->to() > merged->to()) {
+ merged->setTo(existing->to());
+ }
+
+ MOZ_ASSERT(!existing->hasDefinition());
+ existing->tryToMoveDefAndUsesInto(merged);
+ MOZ_ASSERT(!existing->hasUses());
+
+ ranges_.removeAndIncrement(iter);
+ }
+
+ if (!merged) {
+ // The new range does not overlap any existing range for the vreg.
+ LiveRange* range = LiveRange::FallibleNew(alloc, this, from, to);
+ if (!range) {
+ return false;
+ }
+
+ if (prev) {
+ ranges_.insertAfter(&prev->registerLink, &range->registerLink);
+ } else {
+ ranges_.pushFront(&range->registerLink);
+ }
+
+ (*numRanges)++;
+ }
+
+ return true;
+}
+
+void VirtualRegister::addInitialUse(UsePosition* use) {
+ LiveRange::get(*rangesBegin())->addUse(use);
+}
+
+void VirtualRegister::setInitialDefinition(CodePosition from) {
+ LiveRange* first = LiveRange::get(*rangesBegin());
+ MOZ_ASSERT(from >= first->from());
+ first->setFrom(from);
+ first->setHasDefinition();
+}
+
+LiveRange* VirtualRegister::rangeFor(CodePosition pos,
+ bool preferRegister /* = false */) const {
+ LiveRange* found = nullptr;
+ for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->covers(pos)) {
+ if (!preferRegister || range->bundle()->allocation().isRegister()) {
+ return range;
+ }
+ if (!found) {
+ found = range;
+ }
+ }
+ }
+ return found;
+}
+
+void VirtualRegister::addRange(LiveRange* range) {
+ InsertSortedList(ranges_, &range->registerLink);
+}
+
+void VirtualRegister::removeRange(LiveRange* range) {
+ for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+ LiveRange* existing = LiveRange::get(*iter);
+ if (existing == range) {
+ ranges_.removeAt(iter);
+ return;
+ }
+ }
+ MOZ_CRASH();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: queries about uses //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+static inline LDefinition* FindReusingDefOrTemp(LNode* node,
+ LAllocation* alloc) {
+ if (node->isPhi()) {
+ MOZ_ASSERT(node->toPhi()->numDefs() == 1);
+ MOZ_ASSERT(node->toPhi()->getDef(0)->policy() !=
+ LDefinition::MUST_REUSE_INPUT);
+ return nullptr;
+ }
+
+ LInstruction* ins = node->toInstruction();
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(def->getReusedInput()) == alloc) {
+ return def;
+ }
+ }
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* def = ins->getTemp(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(def->getReusedInput()) == alloc) {
+ return def;
+ }
+ }
+ return nullptr;
+}
+
+bool BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins,
+ bool considerCopy) {
+ if (LDefinition* def = FindReusingDefOrTemp(ins, use)) {
+ return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
+ }
+ return false;
+}
+
+bool BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins,
+ bool considerCopy) {
+ switch (use->usePolicy()) {
+ case LUse::ANY:
+ return isReusedInput(use->use(), ins, considerCopy);
+
+ case LUse::REGISTER:
+ case LUse::FIXED:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool BacktrackingAllocator::isRegisterDefinition(LiveRange* range) {
+ if (!range->hasDefinition()) {
+ return false;
+ }
+
+ VirtualRegister& reg = range->vreg();
+ if (reg.ins()->isPhi()) {
+ return false;
+ }
+
+ if (reg.def()->policy() == LDefinition::FIXED &&
+ !reg.def()->output()->isRegister()) {
+ return false;
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: atomic LIR groups //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// The following groupings contain implicit (invisible to ::buildLivenessInfo)
+// value flows, and therefore no split points may be requested inside them.
+// This is an otherwise unstated part of the contract between LIR generation
+// and the allocator.
+//
+// (1) (any insn) ; OsiPoint
+//
+// [Further group definitions and supporting code to come, pending rework
+// of the wasm atomic-group situation.]
+
+CodePosition RegisterAllocator::minimalDefEnd(LNode* ins) const {
+ // Compute the shortest interval that captures vregs defined by ins.
+ // Watch for instructions that are followed by an OSI point.
+ // If moves are introduced between the instruction and the OSI point then
+ // safepoint information for the instruction may be incorrect.
+ while (true) {
+ LNode* next = insData[ins->id() + 1];
+ if (!next->isOsiPoint()) {
+ break;
+ }
+ ins = next;
+ }
+
+ return outputOf(ins);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Misc helpers: computation of bundle priorities and spill weights //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+size_t BacktrackingAllocator::computePriority(LiveBundle* bundle) {
+ // The priority of a bundle is its total length, so that longer lived
+ // bundles will be processed before shorter ones (even if the longer ones
+ // have a low spill weight). See processBundle().
+ size_t lifetimeTotal = 0;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ lifetimeTotal += range->to() - range->from();
+ }
+
+ return lifetimeTotal;
+}
+
+bool BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) {
+ // Whether this is a minimal range capturing a definition at ins.
+ return (range->to() <= minimalDefEnd(ins).next()) &&
+ ((!ins->isPhi() && range->from() == inputOf(ins)) ||
+ range->from() == outputOf(ins));
+}
+
+bool BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use) {
+ // Whether this is a minimal range capturing |use|.
+ LNode* ins = insData[use->pos];
+ return (range->from() == inputOf(ins)) &&
+ (range->to() ==
+ (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
+}
+
+bool BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) {
+ LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!range->hasVreg()) {
+ *pfixed = true;
+ return true;
+ }
+
+ // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
+ // each range into a separate bundle.
+ if (++iter) {
+ return false;
+ }
+
+ if (range->hasDefinition()) {
+ VirtualRegister& reg = range->vreg();
+ if (pfixed) {
+ *pfixed = reg.def()->policy() == LDefinition::FIXED &&
+ reg.def()->output()->isRegister();
+ }
+ return minimalDef(range, reg.ins());
+ }
+
+ bool fixed = false, minimal = false, multiple = false;
+
+ for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+ if (iter != range->usesBegin()) {
+ multiple = true;
+ }
+
+ switch (iter->usePolicy()) {
+ case LUse::FIXED:
+ if (fixed) {
+ return false;
+ }
+ fixed = true;
+ if (minimalUse(range, *iter)) {
+ minimal = true;
+ }
+ break;
+
+ case LUse::REGISTER:
+ if (minimalUse(range, *iter)) {
+ minimal = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // If a range contains a fixed use and at least one other use,
+ // splitAtAllRegisterUses will split each use into a different bundle.
+ if (multiple && fixed) {
+ minimal = false;
+ }
+
+ if (pfixed) {
+ *pfixed = fixed;
+ }
+ return minimal;
+}
+
+size_t BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) {
+ // Minimal bundles have an extremely high spill weight, to ensure they
+ // can evict any other bundles and be allocated to a register.
+ bool fixed;
+ if (minimalBundle(bundle, &fixed)) {
+ return fixed ? 2000000 : 1000000;
+ }
+
+ size_t usesTotal = 0;
+ fixed = false;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (range->hasDefinition()) {
+ VirtualRegister& reg = range->vreg();
+ if (reg.def()->policy() == LDefinition::FIXED &&
+ reg.def()->output()->isRegister()) {
+ usesTotal += 2000;
+ fixed = true;
+ } else if (!reg.ins()->isPhi()) {
+ usesTotal += 2000;
+ }
+ }
+
+ usesTotal += range->usesSpillWeight();
+ if (range->numFixedUses() > 0) {
+ fixed = true;
+ }
+ }
+
+ // Bundles with fixed uses are given a higher spill weight, since they must
+ // be allocated to a specific register.
+ if (testbed && fixed) {
+ usesTotal *= 2;
+ }
+
+ // Compute spill weight as a use density, lowering the weight for long
+ // lived bundles with relatively few uses.
+ size_t lifetimeTotal = computePriority(bundle);
+ return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
+}
+
+size_t BacktrackingAllocator::maximumSpillWeight(
+ const LiveBundleVector& bundles) {
+ size_t maxWeight = 0;
+ for (size_t i = 0; i < bundles.length(); i++) {
+ maxWeight = std::max(maxWeight, computeSpillWeight(bundles[i]));
+ }
+ return maxWeight;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Initialization of the allocator //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// This function pre-allocates and initializes as much global state as possible
+// to avoid littering the algorithms with memory management cruft.
+bool BacktrackingAllocator::init() {
+ if (!RegisterAllocator::init()) {
+ return false;
+ }
+
+ liveIn = mir->allocate<BitSet>(graph.numBlockIds());
+ if (!liveIn) {
+ return false;
+ }
+
+ size_t numVregs = graph.numVirtualRegisters();
+ if (!vregs.init(mir->alloc(), numVregs)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < numVregs; i++) {
+ new (&vregs[i]) VirtualRegister();
+ }
+
+ // Build virtual register objects.
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Create data structures (main loop)")) {
+ return false;
+ }
+
+ LBlock* block = graph.getBlock(i);
+ for (LInstructionIterator ins = block->begin(); ins != block->end();
+ ins++) {
+ if (mir->shouldCancel("Create data structures (inner loop 1)")) {
+ return false;
+ }
+
+ for (size_t j = 0; j < ins->numDefs(); j++) {
+ LDefinition* def = ins->getDef(j);
+ if (def->isBogusTemp()) {
+ continue;
+ }
+ vreg(def).init(*ins, def, /* isTemp = */ false);
+ }
+
+ for (size_t j = 0; j < ins->numTemps(); j++) {
+ LDefinition* def = ins->getTemp(j);
+ if (def->isBogusTemp()) {
+ continue;
+ }
+ vreg(def).init(*ins, def, /* isTemp = */ true);
+ }
+ }
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ LDefinition* def = phi->getDef(0);
+ vreg(def).init(phi, def, /* isTemp = */ false);
+ }
+ }
+
+ LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
+ while (!remainingRegisters.emptyGeneral()) {
+ AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
+ registers[reg.code()].allocatable = true;
+ }
+ while (!remainingRegisters.emptyFloat()) {
+ AnyRegister reg =
+ AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());
+ registers[reg.code()].allocatable = true;
+ }
+
+ LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ registers[i].reg = AnyRegister::FromCode(i);
+ registers[i].allocations.setAllocator(lifoAlloc);
+ }
+
+ hotcode.setAllocator(lifoAlloc);
+ callRanges.setAllocator(lifoAlloc);
+
+ // Partition the graph into hot and cold sections, for helping to make
+ // splitting decisions. Since we don't have any profiling data this is a
+ // crapshoot, so just mark the bodies of inner loops as hot and everything
+ // else as cold.
+
+ LBlock* backedge = nullptr;
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+
+ // If we see a loop header, mark the backedge so we know when we have
+ // hit the end of the loop. Don't process the loop immediately, so that
+ // if there is an inner loop we will ignore the outer backedge.
+ if (block->mir()->isLoopHeader()) {
+ backedge = block->mir()->backedge()->lir();
+ }
+
+ if (block == backedge) {
+ LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
+ LiveRange* range = LiveRange::FallibleNew(
+ alloc(), nullptr, entryOf(header), exitOf(block).next());
+ if (!range || !hotcode.insert(range)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Liveness analysis //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// Helper for ::buildLivenessInfo
+bool BacktrackingAllocator::addInitialFixedRange(AnyRegister reg,
+ CodePosition from,
+ CodePosition to) {
+ LiveRange* range = LiveRange::FallibleNew(alloc(), nullptr, from, to);
+ if (!range) {
+ return false;
+ }
+ LiveRangePlus rangePlus(range);
+ return registers[reg.code()].allocations.insert(rangePlus);
+}
+
+// Helper for ::buildLivenessInfo
+#ifdef DEBUG
+// Returns true iff ins has a def/temp reusing the input allocation.
+static bool IsInputReused(LInstruction* ins, LUse* use) {
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) {
+ return true;
+ }
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+ ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+/*
+ * This function builds up liveness ranges for all virtual registers
+ * defined in the function.
+ *
+ * The algorithm is based on the one published in:
+ *
+ * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
+ * SSA Form." Proceedings of the International Symposium on Code Generation
+ * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
+ *
+ * The algorithm operates on blocks ordered such that dominators of a block
+ * are before the block itself, and such that all blocks of a loop are
+ * contiguous. It proceeds backwards over the instructions in this order,
+ * marking registers live at their uses, ending their live ranges at
+ * definitions, and recording which registers are live at the top of every
+ * block. To deal with loop backedges, registers live at the beginning of
+ * a loop gain a range covering the entire loop.
+ */
+bool BacktrackingAllocator::buildLivenessInfo() {
+ JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
+
+ Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
+ BitSet loopDone(graph.numBlockIds());
+ if (!loopDone.init(alloc())) {
+ return false;
+ }
+
+ size_t numRanges = 0;
+
+ for (size_t i = graph.numBlocks(); i > 0; i--) {
+ if (mir->shouldCancel("Build Liveness Info (main loop)")) {
+ return false;
+ }
+
+ LBlock* block = graph.getBlock(i - 1);
+ MBasicBlock* mblock = block->mir();
+
+ BitSet& live = liveIn[mblock->id()];
+ new (&live) BitSet(graph.numVirtualRegisters());
+ if (!live.init(alloc())) {
+ return false;
+ }
+
+ // Propagate liveIn from our successors to us.
+ for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
+ MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
+ // Skip backedges, as we fix them up at the loop header.
+ if (mblock->id() < successor->id()) {
+ live.insertAll(liveIn[successor->id()]);
+ }
+ }
+
+ // Add successor phis.
+ if (mblock->successorWithPhis()) {
+ LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
+ for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
+ LPhi* phi = phiSuccessor->getPhi(j);
+ LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
+ uint32_t reg = use->toUse()->virtualRegister();
+ live.insert(reg);
+ vreg(use).setUsedByPhi();
+ }
+ }
+
+ // Registers are assumed alive for the entire block, a define shortens
+ // the range to the point of definition.
+ for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+ if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block),
+ exitOf(block).next(), &numRanges))
+ return false;
+ }
+
+ // Shorten the front end of ranges for live variables to their point of
+ // definition, if found.
+ for (LInstructionReverseIterator ins = block->rbegin();
+ ins != block->rend(); ins++) {
+ // Calls may clobber registers, so force a spill and reload around the
+ // callsite.
+ if (ins->isCall()) {
+ for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more();
+ ++iter) {
+ bool found = false;
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (ins->getDef(i)->isFixed() &&
+ ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
+ found = true;
+ break;
+ }
+ }
+ // If this register doesn't have an explicit def above, mark
+ // it as clobbered by the call unless it is actually
+ // call-preserved.
+ if (!found && !ins->isCallPreserved(*iter)) {
+ if (!addInitialFixedRange(*iter, outputOf(*ins),
+ outputOf(*ins).next())) {
+ return false;
+ }
+ }
+ }
+
+ CallRange* callRange = new (alloc().fallible())
+ CallRange(outputOf(*ins), outputOf(*ins).next());
+ if (!callRange) {
+ return false;
+ }
+
+ callRangesList.pushFront(callRange);
+ if (!callRanges.insert(callRange)) {
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->isBogusTemp()) {
+ continue;
+ }
+
+ CodePosition from = outputOf(*ins);
+
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
+ // MUST_REUSE_INPUT is implemented by allocating an output
+ // register and moving the input to it. Register hints are
+ // used to avoid unnecessary moves. We give the input an
+ // LUse::ANY policy to avoid allocating a register for the
+ // input.
+ LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
+ MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
+ MOZ_ASSERT(inputUse->usedAtStart());
+ *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY,
+ /* usedAtStart = */ true);
+ }
+
+ if (!vreg(def).addInitialRange(alloc(), from, from.next(),
+ &numRanges)) {
+ return false;
+ }
+ vreg(def).setInitialDefinition(from);
+ live.remove(def->virtualRegister());
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (temp->isBogusTemp()) {
+ continue;
+ }
+
+ // Normally temps are considered to cover both the input
+ // and output of the associated instruction. In some cases
+ // though we want to use a fixed register as both an input
+ // and clobbered register in the instruction, so watch for
+ // this and shorten the temp to cover only the output.
+ CodePosition from = inputOf(*ins);
+ if (temp->policy() == LDefinition::FIXED) {
+ AnyRegister reg = temp->output()->toRegister();
+ for (LInstruction::InputIterator alloc(**ins); alloc.more();
+ alloc.next()) {
+ if (alloc->isUse()) {
+ LUse* use = alloc->toUse();
+ if (use->isFixedRegister()) {
+ if (GetFixedRegister(vreg(use).def(), use) == reg) {
+ from = outputOf(*ins);
+ }
+ }
+ }
+ }
+ }
+
+ // * For non-call instructions, temps cover both the input and output,
+ // so temps never alias uses (even at-start uses) or defs.
+ // * For call instructions, temps only cover the input (the output is
+ // used for the force-spill ranges added above). This means temps
+ // still don't alias uses but they can alias the (fixed) defs. For now
+ // we conservatively require temps to have a fixed register for call
+ // instructions to prevent a footgun.
+ MOZ_ASSERT_IF(ins->isCall(), temp->policy() == LDefinition::FIXED);
+ CodePosition to =
+ ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
+
+ if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges)) {
+ return false;
+ }
+ vreg(temp).setInitialDefinition(from);
+ }
+
+ DebugOnly<bool> hasUseRegister = false;
+ DebugOnly<bool> hasUseRegisterAtStart = false;
+
+ for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more();
+ inputAlloc.next()) {
+ if (inputAlloc->isUse()) {
+ LUse* use = inputAlloc->toUse();
+
+ // Call uses should always be at-start, since calls use all
+ // registers.
+ MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
+ use->usedAtStart());
+
+#ifdef DEBUG
+ // If there are both useRegisterAtStart(x) and useRegister(y)
+ // uses, we may assign the same register to both operands
+ // (bug 772830). Don't allow this for now.
+ if (use->policy() == LUse::REGISTER) {
+ if (use->usedAtStart()) {
+ if (!IsInputReused(*ins, use)) {
+ hasUseRegisterAtStart = true;
+ }
+ } else {
+ hasUseRegister = true;
+ }
+ }
+ MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
+#endif
+
+ // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
+ if (use->policy() == LUse::RECOVERED_INPUT) {
+ continue;
+ }
+
+ CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
+ if (use->isFixedRegister()) {
+ LAllocation reg(AnyRegister::FromCode(use->registerCode()));
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::FIXED &&
+ *def->output() == reg) {
+ to = inputOf(*ins);
+ }
+ }
+ }
+
+ if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(),
+ &numRanges)) {
+ return false;
+ }
+ UsePosition* usePosition =
+ new (alloc().fallible()) UsePosition(use, to);
+ if (!usePosition) {
+ return false;
+ }
+ vreg(use).addInitialUse(usePosition);
+ live.insert(use->virtualRegister());
+ }
+ }
+ }
+
+ // Phis have simultaneous assignment semantics at block begin, so at
+ // the beginning of the block we can be sure that liveIn does not
+ // contain any phi outputs.
+ for (unsigned int i = 0; i < block->numPhis(); i++) {
+ LDefinition* def = block->getPhi(i)->getDef(0);
+ if (live.contains(def->virtualRegister())) {
+ live.remove(def->virtualRegister());
+ } else {
+ // This is a dead phi, so add a dummy range over all phis. This
+ // can go away if we have an earlier dead code elimination pass.
+ CodePosition entryPos = entryOf(block);
+ if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(),
+ &numRanges)) {
+ return false;
+ }
+ }
+ }
+
+ if (mblock->isLoopHeader()) {
+ // A divergence from the published algorithm is required here, as
+ // our block order does not guarantee that blocks of a loop are
+ // contiguous. As a result, a single live range spanning the
+ // loop is not possible. Additionally, we require liveIn in a later
+ // pass for resolution, so that must also be fixed up here.
+ MBasicBlock* loopBlock = mblock->backedge();
+ while (true) {
+ // Blocks must already have been visited to have a liveIn set.
+ MOZ_ASSERT(loopBlock->id() >= mblock->id());
+
+ // Add a range for this entire loop block
+ CodePosition from = entryOf(loopBlock->lir());
+ CodePosition to = exitOf(loopBlock->lir()).next();
+
+ for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+ if (!vregs[*liveRegId].addInitialRange(alloc(), from, to,
+ &numRanges)) {
+ return false;
+ }
+ }
+
+ // Fix up the liveIn set.
+ liveIn[loopBlock->id()].insertAll(live);
+
+ // Make sure we don't visit this node again
+ loopDone.insert(loopBlock->id());
+
+ // If this is the loop header, any predecessors are either the
+ // backedge or out of the loop, so skip any predecessors of
+ // this block
+ if (loopBlock != mblock) {
+ for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
+ MBasicBlock* pred = loopBlock->getPredecessor(i);
+ if (loopDone.contains(pred->id())) {
+ continue;
+ }
+ if (!loopWorkList.append(pred)) {
+ return false;
+ }
+ }
+ }
+
+ // Terminate loop if out of work.
+ if (loopWorkList.empty()) {
+ break;
+ }
+
+ // Grab the next block off the work list, skipping any OSR block.
+ MBasicBlock* osrBlock = graph.mir().osrBlock();
+ while (!loopWorkList.empty()) {
+ loopBlock = loopWorkList.popCopy();
+ if (loopBlock != osrBlock) {
+ break;
+ }
+ }
+
+ // If end is reached without finding a non-OSR block, then no more work
+ // items were found.
+ if (loopBlock == osrBlock) {
+ MOZ_ASSERT(loopWorkList.empty());
+ break;
+ }
+ }
+
+ // Clear the done set for other loops
+ loopDone.clear();
+ }
+
+ MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
+ }
+
+ JitSpew(JitSpew_RegAlloc, "Completed liveness analysis");
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Merging and queueing of LiveRange groups //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// Helper for ::tryMergeBundles
+static bool IsArgumentSlotDefinition(LDefinition* def) {
+ return def->policy() == LDefinition::FIXED && def->output()->isArgument();
+}
+
+// Helper for ::tryMergeBundles
+static bool IsThisSlotDefinition(LDefinition* def) {
+ return IsArgumentSlotDefinition(def) &&
+ def->output()->toArgument()->index() <
+ THIS_FRAME_ARGSLOT + sizeof(Value);
+}
+
+// Helper for ::tryMergeBundles
+static bool HasStackPolicy(LDefinition* def) {
+ return def->policy() == LDefinition::STACK;
+}
+
+// Helper for ::tryMergeBundles
+static bool CanMergeTypesInBundle(LDefinition::Type a, LDefinition::Type b) {
+ // Fast path for the common case.
+ if (a == b) {
+ return true;
+ }
+
+ // Only merge if the sizes match, so that we don't get confused about the
+ // width of spill slots.
+ return StackSlotAllocator::width(a) == StackSlotAllocator::width(b);
+}
+
+// Helper for ::tryMergeReusedRegister
+bool BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0,
+ LiveBundle* bundle1) {
+ // See if bundle0 and bundle1 can be merged together.
+ if (bundle0 == bundle1) {
+ return true;
+ }
+
+ // Get a representative virtual register from each bundle.
+ VirtualRegister& reg0 = bundle0->firstRange()->vreg();
+ VirtualRegister& reg1 = bundle1->firstRange()->vreg();
+
+ MOZ_ASSERT(CanMergeTypesInBundle(reg0.type(), reg1.type()));
+ MOZ_ASSERT(reg0.isCompatible(reg1));
+
+ // Registers which might spill to the frame's |this| slot can only be
+ // grouped with other such registers. The frame's |this| slot must always
+ // hold the |this| value, as required by JitFrame tracing and by the Ion
+ // constructor calling convention.
+ if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
+ if (*reg0.def()->output() != *reg1.def()->output()) {
+ return true;
+ }
+ }
+
+ // Registers which might spill to the frame's argument slots can only be
+ // grouped with other such registers if the frame might access those
+ // arguments through a lazy arguments object or rest parameter.
+ if (IsArgumentSlotDefinition(reg0.def()) ||
+ IsArgumentSlotDefinition(reg1.def())) {
+ if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
+ if (*reg0.def()->output() != *reg1.def()->output()) {
+ return true;
+ }
+ }
+ }
+
+ // When we make a call to a WebAssembly function that returns multiple
+ // results, some of those results can go on the stack. The callee is passed a
+ // pointer to this stack area, which is represented as having policy
+ // LDefinition::STACK (with type LDefinition::STACKRESULTS). Individual
+ // results alias parts of the stack area with a value-appropriate type, but
+ // policy LDefinition::STACK. This aliasing between allocations makes it
+ // unsound to merge anything with a LDefinition::STACK policy.
+ if (HasStackPolicy(reg0.def()) || HasStackPolicy(reg1.def())) {
+ return true;
+ }
+
+ // Limit the number of times we compare ranges if there are many ranges in
+ // one of the bundles, to avoid quadratic behavior.
+ static const size_t MAX_RANGES = 200;
+
+ // Make sure that ranges in the bundles do not overlap.
+ LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(),
+ iter1 = bundle1->rangesBegin();
+ size_t count = 0;
+ while (iter0 && iter1) {
+ if (++count >= MAX_RANGES) {
+ return true;
+ }
+
+ LiveRange* range0 = LiveRange::get(*iter0);
+ LiveRange* range1 = LiveRange::get(*iter1);
+
+ if (range0->from() >= range1->to()) {
+ iter1++;
+ } else if (range1->from() >= range0->to()) {
+ iter0++;
+ } else {
+ return true;
+ }
+ }
+
+ // Move all ranges from bundle1 into bundle0.
+ while (LiveRange* range = bundle1->popFirstRange()) {
+ bundle0->addRange(range);
+ }
+
+ return true;
+}
+
+// Helper for ::mergeAndQueueRegisters
+void BacktrackingAllocator::allocateStackDefinition(VirtualRegister& reg) {
+ LInstruction* ins = reg.ins()->toInstruction();
+ if (reg.def()->type() == LDefinition::STACKRESULTS) {
+ LStackArea alloc(ins->toInstruction());
+ stackSlotAllocator.allocateStackArea(&alloc);
+ reg.def()->setOutput(alloc);
+ } else {
+ // Because the definitions are visited in order, the area has been allocated
+ // before we reach this result, so we know the operand is an LStackArea.
+ const LUse* use = ins->getOperand(0)->toUse();
+ VirtualRegister& area = vregs[use->virtualRegister()];
+ const LStackArea* areaAlloc = area.def()->output()->toStackArea();
+ reg.def()->setOutput(areaAlloc->resultAlloc(ins, reg.def()));
+ }
+}
+
+// Helper for ::mergeAndQueueRegisters
+bool BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def,
+ VirtualRegister& input) {
+ // def is a vreg which reuses input for its output physical register. Try
+ // to merge ranges for def with those of input if possible, as avoiding
+ // copies before def's instruction is crucial for generated code quality
+ // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
+
+ if (def.rangeFor(inputOf(def.ins()))) {
+ MOZ_ASSERT(def.isTemp());
+ def.setMustCopyInput();
+ return true;
+ }
+
+ if (!CanMergeTypesInBundle(def.type(), input.type())) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
+ if (!inputRange) {
+ // The input is not live after the instruction, either in a safepoint
+ // for the instruction or in subsequent code. The input and output
+ // can thus be in the same group.
+ return tryMergeBundles(def.firstBundle(), input.firstBundle());
+ }
+
+ // Avoid merging in very large live ranges as merging has non-linear
+ // complexity. The cutoff value is hard to gauge. 1M was chosen because it
+ // is "large" and yet usefully caps compile time on AutoCad-for-the-web to
+ // something reasonable on a 2017-era desktop system.
+ const uint32_t RANGE_SIZE_CUTOFF = 1000000;
+ if (inputRange->to() - inputRange->from() > RANGE_SIZE_CUTOFF) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // The input is live afterwards, either in future instructions or in a
+ // safepoint for the reusing instruction. This is impossible to satisfy
+ // without copying the input.
+ //
+ // It may or may not be better to split the input into two bundles at the
+ // point of the definition, which may permit merging. One case where it is
+ // definitely better to split is if the input never has any register uses
+ // after the instruction. Handle this splitting eagerly.
+
+ LBlock* block = def.ins()->block();
+
+ // The input's lifetime must end within the same block as the definition,
+ // otherwise it could live on in phis elsewhere.
+ if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // If we already split the input for some other register, don't make a
+ // third bundle.
+ if (inputRange->bundle() != input.firstRange()->bundle()) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // If the input will start out in memory then adding a separate bundle for
+ // memory uses after the def won't help.
+ if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
+ def.setMustCopyInput();
+ return true;
+ }
+
+ // The input cannot have register or reused uses after the definition.
+ for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
+ if (iter->pos <= inputOf(def.ins())) {
+ continue;
+ }
+
+ LUse* use = iter->use();
+ if (FindReusingDefOrTemp(insData[iter->pos], use)) {
+ def.setMustCopyInput();
+ return true;
+ }
+ if (iter->usePolicy() != LUse::ANY &&
+ iter->usePolicy() != LUse::KEEPALIVE) {
+ def.setMustCopyInput();
+ return true;
+ }
+ }
+
+ LiveRange* preRange = LiveRange::FallibleNew(
+ alloc(), &input, inputRange->from(), outputOf(def.ins()));
+ if (!preRange) {
+ return false;
+ }
+
+ // The new range starts at reg's input position, which means it overlaps
+ // with the old range at one position. This is what we want, because we
+ // need to copy the input before the instruction.
+ LiveRange* postRange = LiveRange::FallibleNew(
+ alloc(), &input, inputOf(def.ins()), inputRange->to());
+ if (!postRange) {
+ return false;
+ }
+
+ inputRange->tryToMoveDefAndUsesInto(preRange);
+ inputRange->tryToMoveDefAndUsesInto(postRange);
+ MOZ_ASSERT(!inputRange->hasUses());
+
+ JitSpewIfEnabled(JitSpew_RegAlloc,
+ " splitting reused input at %u to try to help grouping",
+ inputOf(def.ins()).bits());
+
+ LiveBundle* firstBundle = inputRange->bundle();
+ input.removeRange(inputRange);
+ input.addRange(preRange);
+ input.addRange(postRange);
+
+ firstBundle->removeRange(inputRange);
+ firstBundle->addRange(preRange);
+
+ // The new range goes in a separate bundle, where it will be spilled during
+ // allocation.
+ LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
+ if (!secondBundle) {
+ return false;
+ }
+ secondBundle->addRange(postRange);
+
+ return tryMergeBundles(def.firstBundle(), input.firstBundle());
+}
+
+bool BacktrackingAllocator::mergeAndQueueRegisters() {
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+
+ // Create a bundle for each register containing all its ranges.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ if (!reg.hasRanges()) {
+ continue;
+ }
+
+ LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
+ if (!bundle) {
+ return false;
+ }
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ bundle->addRange(range);
+ }
+ }
+
+ // If there is an OSR block, merge parameters in that block with the
+ // corresponding parameters in the initial block.
+ if (MBasicBlock* osr = graph.mir().osrBlock()) {
+ size_t original = 1;
+ for (LInstructionIterator iter = osr->lir()->begin();
+ iter != osr->lir()->end(); iter++) {
+ if (iter->isParameter()) {
+ for (size_t i = 0; i < iter->numDefs(); i++) {
+ DebugOnly<bool> found = false;
+ VirtualRegister& paramVreg = vreg(iter->getDef(i));
+ for (; original < paramVreg.vreg(); original++) {
+ VirtualRegister& originalVreg = vregs[original];
+ if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
+ MOZ_ASSERT(originalVreg.ins()->isParameter());
+ if (!tryMergeBundles(originalVreg.firstBundle(),
+ paramVreg.firstBundle())) {
+ return false;
+ }
+ found = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(found);
+ }
+ }
+ }
+ }
+
+ // Try to merge registers with their reused inputs.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ if (!reg.hasRanges()) {
+ continue;
+ }
+
+ if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
+ LUse* use = reg.ins()
+ ->toInstruction()
+ ->getOperand(reg.def()->getReusedInput())
+ ->toUse();
+ if (!tryMergeReusedRegister(reg, vreg(use))) {
+ return false;
+ }
+ }
+ }
+
+ // Try to merge phis with their inputs.
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ VirtualRegister& outputVreg = vreg(phi->getDef(0));
+ for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
+ VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
+ if (!tryMergeBundles(inputVreg.firstBundle(),
+ outputVreg.firstBundle())) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Add all bundles to the allocation queue, and create spill sets for them.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ // Eagerly allocate stack result areas and their component stack results.
+ if (reg.def() && reg.def()->policy() == LDefinition::STACK) {
+ allocateStackDefinition(reg);
+ }
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveBundle* bundle = range->bundle();
+ if (range == bundle->firstRange()) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ SpillSet* spill = SpillSet::New(alloc());
+ if (!spill) {
+ return false;
+ }
+ bundle->setSpillSet(spill);
+
+ size_t priority = computePriority(bundle);
+ if (!allocationQueue.insert(QueueItem(bundle, priority))) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Code for the splitting/spilling subsystem begins here. //
+// //
+// The code that follows is structured in the following sequence: //
+// //
+// (1) Routines that are helpers for ::splitAt. //
+// (2) ::splitAt itself, which implements splitting decisions. //
+// (3) heuristic routines (eg ::splitAcrossCalls), which decide where //
+// splits should be made. They then call ::splitAt to perform the //
+// chosen split. //
+// (4) The top level driver, ::chooseBundleSplit. //
+// //
+// There are further comments on ::splitAt and ::chooseBundleSplit below. //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Implementation of splitting decisions, but not the making of those //
+// decisions: various helper functions //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+bool BacktrackingAllocator::updateVirtualRegisterListsThenRequeueBundles(
+ LiveBundle* bundle, const LiveBundleVector& newBundles) {
+#ifdef DEBUG
+ if (newBundles.length() == 1) {
+ LiveBundle* newBundle = newBundles[0];
+ if (newBundle->numRanges() == bundle->numRanges() &&
+ computePriority(newBundle) == computePriority(bundle)) {
+ bool different = false;
+ LiveRange::BundleLinkIterator oldRanges = bundle->rangesBegin();
+ LiveRange::BundleLinkIterator newRanges = newBundle->rangesBegin();
+ while (oldRanges) {
+ LiveRange* oldRange = LiveRange::get(*oldRanges);
+ LiveRange* newRange = LiveRange::get(*newRanges);
+ if (oldRange->from() != newRange->from() ||
+ oldRange->to() != newRange->to()) {
+ different = true;
+ break;
+ }
+ oldRanges++;
+ newRanges++;
+ }
+
+ // This is likely to trigger an infinite loop in register allocation. This
+ // can be the result of invalid register constraints, making regalloc
+ // impossible; consider relaxing those.
+ MOZ_ASSERT(different,
+ "Split results in the same bundle with the same priority");
+ }
+ }
+#endif
+
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ JitSpew(JitSpew_RegAlloc, " .. into:");
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ JitSpew(JitSpew_RegAlloc, " %s", newBundles[i]->toString().get());
+ }
+ }
+
+ // Remove all ranges in the old bundle from their register's list.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ range->vreg().removeRange(range);
+ }
+
+ // Add all ranges in the new bundles to their register's list.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* newBundle = newBundles[i];
+ for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ range->vreg().addRange(range);
+ }
+ }
+
+ // Queue the new bundles for register assignment.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* newBundle = newBundles[i];
+ size_t priority = computePriority(newBundle);
+ if (!allocationQueue.insert(QueueItem(newBundle, priority))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Helper for ::splitAt
+// When splitting a bundle according to a list of split positions, return
+// whether a use or range at |pos| should use a different bundle than the last
+// position this was called for.
+static bool UseNewBundle(const SplitPositionVector& splitPositions,
+ CodePosition pos, size_t* activeSplitPosition) {
+ if (splitPositions.empty()) {
+ // When the split positions are empty we are splitting at all uses.
+ return true;
+ }
+
+ if (*activeSplitPosition == splitPositions.length()) {
+ // We've advanced past all split positions.
+ return false;
+ }
+
+ if (splitPositions[*activeSplitPosition] > pos) {
+ // We haven't gotten to the next split position yet.
+ return false;
+ }
+
+ // We've advanced past the next split position, find the next one which we
+ // should split at.
+ while (*activeSplitPosition < splitPositions.length() &&
+ splitPositions[*activeSplitPosition] <= pos) {
+ (*activeSplitPosition)++;
+ }
+ return true;
+}
+
+// Helper for ::splitAt
+static bool HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
+ MOZ_ASSERT(range->bundle() == bundle);
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* prevRange = LiveRange::get(*iter);
+ if (prevRange == range) {
+ return false;
+ }
+ if (&prevRange->vreg() == &range->vreg()) {
+ return true;
+ }
+ }
+
+ MOZ_CRASH();
+}
+
+// Helper for ::splitAt
+static bool HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
+ MOZ_ASSERT(range->bundle() == bundle);
+
+ bool foundRange = false;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* prevRange = LiveRange::get(*iter);
+ if (foundRange && &prevRange->vreg() == &range->vreg()) {
+ return true;
+ }
+ if (prevRange == range) {
+ foundRange = true;
+ }
+ }
+
+ MOZ_ASSERT(foundRange);
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Implementation of splitting decisions, but not the making of those //
+// decisions: //
+// ::splitAt //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// ::splitAt
+// ---------
+// It would be nice to be able to interpret ::splitAt as simply performing
+// whatever split the heuristic routines decide on. Unfortunately it
+// tries to "improve" on the initial locations, which as
+// https://bugzilla.mozilla.org/show_bug.cgi?id=1758274#c17 shows, often
+// leads to excessive spilling. So there is no clean distinction between
+// policy (where to split, computed by the heuristic routines) and
+// implementation (done by ::splitAt).
+//
+// ::splitAt -- creation of spill parent bundles
+// ---------------------------------------------
+// To understand what ::splitAt does, we must refer back to Section 1's
+// description of LiveBundle::spillParent_.
+//
+// Initially (as created by Phase 1), all bundles have `spillParent_` being
+// NULL. If ::splitAt is asked to split such a bundle, it will first create a
+// "spill bundle" or "spill parent" bundle. This is a copy of the original,
+// with two changes:
+//
+// * all register uses have been removed, so that only stack-compatible uses
+// remain.
+//
+// * for all LiveRanges in the bundle that define a register, the start point
+// of the range is moved one CodePosition forwards, thusly:
+//
+// from = minimalDefEnd(insData[from]).next();
+//
+// The reason for the latter relates to the idea described in Section 1, that
+// all LiveRanges for any given VirtualRegister must form a tree rooted at the
+// defining LiveRange. If the spill-bundle definition range start points are
+// the same as those in the original bundle, then we will end up with two roots
+// for the tree, and it is then unclear which one should supply "the value".
+//
+// Putting the spill-bundle start point one CodePosition further along causes
+// the range containing the register def (after splitting) to still be the
+// defining point. ::createMoveGroupsFromLiveRangeTransitions will observe the
+// equivalent spill-bundle range starting one point later and add a MoveGroup
+// to move the value into it. Since the spill bundle is intended to be stack
+// resident, the effect is to force creation of the MoveGroup that will
+// actually spill this value onto the stack.
+//
+// If the bundle provided to ::splitAt already has a spill parent, then
+// ::splitAt doesn't create a new spill parent. This situation will happen
+// when the bundle to be split was itself created by splitting. The effect is
+// that *all* bundles created from an "original bundle" share the same spill
+// parent, and hence they will share the same spill slot, which guarantees that
+// all the spilled fragments of a VirtualRegister share the same spill slot,
+// which means we'll never have to move a VirtualRegister between different
+// spill slots during its lifetime.
+//
+// ::splitAt -- creation of other bundles
+// --------------------------------------
+// With the spill parent bundle question out of the way, ::splitAt then goes on
+// to create the remaining new bundles, using near-incomprehensible logic
+// steered by `UseNewBundle`.
+//
+// This supposedly splits the bundle at the positions given by the
+// `SplitPositionVector` parameter to ::splitAt, putting them in a temporary
+// vector `newBundles`. Whether it really splits at the requested positions or
+// not is hard to say; more important is what happens next.
+//
+// ::splitAt -- "improvement" ("filtering") of the split bundles
+// -------------------------------------------------------------
+// ::splitAt now tries to reduce the length of the LiveRanges that make up the
+// new bundles (not including the "spill parent"). I assume this is to remove
+// sections of the bundles that carry no useful value (eg, extending after the
+// last using a range), thereby removing the demand for registers in those
+// parts. This does however mean that ::splitAt is no longer really splitting
+// where the heuristic routines wanted, and that can lead to a big increase in
+// spilling in loops, as
+// https://bugzilla.mozilla.org/show_bug.cgi?id=1758274#c17 describes.
+//
+// ::splitAt -- meaning of the incoming `SplitPositionVector`
+// ----------------------------------------------------------
+// ::splitAt has one last mystery which is important to document. The split
+// positions are specified as CodePositions, but this leads to ambiguity
+// because, in a sequence of N (LIR) instructions, there are 2N valid
+// CodePositions. For example:
+//
+// 6-7 WasmLoadTls [def v2<o>] [use v1:R]
+// 8-9 WasmNullConstant [def v3<o>]
+//
+// Consider splitting the range for `v2`, which starts at CodePosition 7.
+// What's the difference between saying "split it at 7" and "split it at 8" ?
+// Not much really, since in both cases what we intend is for the range to be
+// split in between the two instructions.
+//
+// Hence I believe the semantics is:
+//
+// * splitting at an even numbered CodePosition (eg, 8), which is an input-side
+// position, means "split before the instruction containing this position".
+//
+// * splitting at an odd numbered CodePositin (eg, 7), which is an output-side
+// position, means "split after the instruction containing this position".
+//
+// Hence in the example, we could specify either 7 or 8 to mean the same
+// placement of the split. Well, almost true, but actually:
+//
+// (SPEC) specifying 8 means
+//
+// "split between these two insns, and any resulting MoveGroup goes in the
+// list to be emitted before the start of the second insn"
+//
+// (SPEC) specifying 7 means
+//
+// "split between these two insns, and any resulting MoveGroup goes in the
+// list to be emitted after the end of the first insn"
+//
+// In most cases we don't care on which "side of the valley" the MoveGroup ends
+// up, in which case we can use either convention.
+//
+// (SPEC) I believe these semantics are implied by the logic in
+// ::createMoveGroupsFromLiveRangeTransitions. They are certainly not
+// documented anywhere in the code.
+
+bool BacktrackingAllocator::splitAt(LiveBundle* bundle,
+ const SplitPositionVector& splitPositions) {
+ // Split the bundle at the given split points. Register uses which have no
+ // intervening split points are consolidated into the same bundle. If the
+ // list of split points is empty, then all register uses are placed in
+ // minimal bundles.
+
+ // splitPositions should be sorted.
+ for (size_t i = 1; i < splitPositions.length(); ++i) {
+ MOZ_ASSERT(splitPositions[i - 1] < splitPositions[i]);
+ }
+
+ // We don't need to create a new spill bundle if there already is one.
+ bool spillBundleIsNew = false;
+ LiveBundle* spillBundle = bundle->spillParent();
+ if (!spillBundle) {
+ spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
+ if (!spillBundle) {
+ return false;
+ }
+ spillBundleIsNew = true;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ CodePosition from = range->from();
+ if (isRegisterDefinition(range)) {
+ from = minimalDefEnd(insData[from]).next();
+ }
+
+ if (from < range->to()) {
+ if (!spillBundle->addRange(alloc(), &range->vreg(), from,
+ range->to())) {
+ return false;
+ }
+
+ if (range->hasDefinition() && !isRegisterDefinition(range)) {
+ spillBundle->lastRange()->setHasDefinition();
+ }
+ }
+ }
+ }
+
+ LiveBundleVector newBundles;
+
+ // The bundle which ranges are currently being added to.
+ LiveBundle* activeBundle =
+ LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle)) {
+ return false;
+ }
+
+ // State for use by UseNewBundle.
+ size_t activeSplitPosition = 0;
+
+ // Make new bundles according to the split positions, and distribute ranges
+ // and uses to them.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
+ activeBundle =
+ LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle)) {
+ return false;
+ }
+ }
+
+ LiveRange* activeRange = LiveRange::FallibleNew(alloc(), &range->vreg(),
+ range->from(), range->to());
+ if (!activeRange) {
+ return false;
+ }
+ activeBundle->addRange(activeRange);
+
+ if (isRegisterDefinition(range)) {
+ activeRange->setHasDefinition();
+ }
+
+ while (range->hasUses()) {
+ UsePosition* use = range->popUse();
+ LNode* ins = insData[use->pos];
+
+ // Any uses of a register that appear before its definition has
+ // finished must be associated with the range for that definition.
+ if (isRegisterDefinition(range) &&
+ use->pos <= minimalDefEnd(insData[range->from()])) {
+ activeRange->addUse(use);
+ } else if (isRegisterUse(use, ins)) {
+ // Place this register use into a different bundle from the
+ // last one if there are any split points between the two uses.
+ // UseNewBundle always returns true if we are splitting at all
+ // register uses, but we can still reuse the last range and
+ // bundle if they have uses at the same position, except when
+ // either use is fixed (the two uses might require incompatible
+ // registers.)
+ if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
+ (!activeRange->hasUses() ||
+ activeRange->usesBegin()->pos != use->pos ||
+ activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
+ use->usePolicy() == LUse::FIXED)) {
+ activeBundle =
+ LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
+ if (!activeBundle || !newBundles.append(activeBundle)) {
+ return false;
+ }
+ activeRange = LiveRange::FallibleNew(alloc(), &range->vreg(),
+ range->from(), range->to());
+ if (!activeRange) {
+ return false;
+ }
+ activeBundle->addRange(activeRange);
+ }
+
+ activeRange->addUse(use);
+ } else {
+ MOZ_ASSERT(spillBundleIsNew);
+ spillBundle->rangeFor(use->pos)->addUse(use);
+ }
+ }
+ }
+
+ LiveBundleVector filteredBundles;
+
+ // Trim the ends of ranges in each new bundle when there are no other
+ // earlier or later ranges in the same bundle with the same vreg.
+ for (size_t i = 0; i < newBundles.length(); i++) {
+ LiveBundle* bundle = newBundles[i];
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!range->hasDefinition()) {
+ if (!HasPrecedingRangeSharingVreg(bundle, range)) {
+ if (range->hasUses()) {
+ UsePosition* use = *range->usesBegin();
+ range->setFrom(inputOf(insData[use->pos]));
+ } else {
+ bundle->removeRangeAndIncrementIterator(iter);
+ continue;
+ }
+ }
+ }
+
+ if (!HasFollowingRangeSharingVreg(bundle, range)) {
+ if (range->hasUses()) {
+ UsePosition* use = range->lastUse();
+ range->setTo(use->pos.next());
+ } else if (range->hasDefinition()) {
+ range->setTo(minimalDefEnd(insData[range->from()]).next());
+ } else {
+ bundle->removeRangeAndIncrementIterator(iter);
+ continue;
+ }
+ }
+
+ iter++;
+ }
+
+ if (bundle->hasRanges() && !filteredBundles.append(bundle)) {
+ return false;
+ }
+ }
+
+ if (spillBundleIsNew && !filteredBundles.append(spillBundle)) {
+ return false;
+ }
+
+ return updateVirtualRegisterListsThenRequeueBundles(bundle, filteredBundles);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Creation of splitting decisions, but not their implementation: //
+// ::splitAcrossCalls //
+// ::trySplitAcrossHotcode //
+// ::trySplitAfterLastRegisterUse //
+// ::trySplitBeforeFirstRegisterUse //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+bool BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) {
+ // Split the bundle to separate register uses and non-register uses and
+ // allow the vreg to be spilled across its range.
+
+ // Find the locations of all calls in the bundle's range.
+ SplitPositionVector callPositions;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ CallRange searchRange(range->from(), range->to());
+ CallRange* callRange;
+ if (!callRanges.contains(&searchRange, &callRange)) {
+ // There are no calls inside this range.
+ continue;
+ }
+ MOZ_ASSERT(range->covers(callRange->range.from));
+
+ // The search above returns an arbitrary call within the range. Walk
+ // backwards to find the first call in the range.
+ for (CallRangeList::reverse_iterator riter =
+ callRangesList.rbegin(callRange);
+ riter != callRangesList.rend(); ++riter) {
+ CodePosition pos = riter->range.from;
+ if (range->covers(pos)) {
+ callRange = *riter;
+ } else {
+ break;
+ }
+ }
+
+ // Add all call positions within the range, by walking forwards.
+ for (CallRangeList::iterator iter = callRangesList.begin(callRange);
+ iter != callRangesList.end(); ++iter) {
+ CodePosition pos = iter->range.from;
+ if (!range->covers(pos)) {
+ break;
+ }
+
+ // Calls at the beginning of the range are ignored; there is no splitting
+ // to do.
+ if (range->covers(pos.previous())) {
+ MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
+ if (!callPositions.append(pos)) {
+ return false;
+ }
+ }
+ }
+ }
+ MOZ_ASSERT(callPositions.length());
+
+#ifdef JS_JITSPEW
+ JitSpewStart(JitSpew_RegAlloc, " .. split across calls at ");
+ for (size_t i = 0; i < callPositions.length(); ++i) {
+ JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "",
+ callPositions[i].bits());
+ }
+ JitSpewFin(JitSpew_RegAlloc);
+#endif
+
+ return splitAt(bundle, callPositions);
+}
+
+bool BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle,
+ bool* success) {
+ // If this bundle has portions that are hot and portions that are cold,
+ // split it at the boundaries between hot and cold code.
+
+ LiveRange* hotRange = nullptr;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (hotcode.contains(range, &hotRange)) {
+ break;
+ }
+ }
+
+ // Don't split if there is no hot code in the bundle.
+ if (!hotRange) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle does not contain hot code");
+ return true;
+ }
+
+ // Don't split if there is no cold code in the bundle.
+ bool coldCode = false;
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!hotRange->contains(range)) {
+ coldCode = true;
+ break;
+ }
+ }
+ if (!coldCode) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle does not contain cold code");
+ return true;
+ }
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, " .. split across hot range %s",
+ hotRange->toString().get());
+
+ // Tweak the splitting method when compiling wasm code to look at actual
+ // uses within the hot/cold code. This heuristic is in place as the below
+ // mechanism regresses several asm.js tests. Hopefully this will be fixed
+ // soon and this special case removed. See bug 948838.
+ if (compilingWasm()) {
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(hotRange->from()) ||
+ !splitPositions.append(hotRange->to())) {
+ return false;
+ }
+ *success = true;
+ return splitAt(bundle, splitPositions);
+ }
+
+ LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!hotBundle) {
+ return false;
+ }
+ LiveBundle* preBundle = nullptr;
+ LiveBundle* postBundle = nullptr;
+ LiveBundle* coldBundle = nullptr;
+
+ if (testbed) {
+ coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!coldBundle) {
+ return false;
+ }
+ }
+
+ // Accumulate the ranges of hot and cold code in the bundle. Note that
+ // we are only comparing with the single hot range found, so the cold code
+ // may contain separate hot ranges.
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRange::Range hot, coldPre, coldPost;
+ range->intersect(hotRange, &coldPre, &hot, &coldPost);
+
+ if (!hot.empty()) {
+ if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from,
+ hot.to)) {
+ return false;
+ }
+ }
+
+ if (!coldPre.empty()) {
+ if (testbed) {
+ if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
+ coldPre.to)) {
+ return false;
+ }
+ } else {
+ if (!preBundle) {
+ preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!preBundle) {
+ return false;
+ }
+ }
+ if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
+ coldPre.to)) {
+ return false;
+ }
+ }
+ }
+
+ if (!coldPost.empty()) {
+ if (testbed) {
+ if (!coldBundle->addRangeAndDistributeUses(
+ alloc(), range, coldPost.from, coldPost.to)) {
+ return false;
+ }
+ } else {
+ if (!postBundle) {
+ postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
+ bundle->spillParent());
+ if (!postBundle) {
+ return false;
+ }
+ }
+ if (!postBundle->addRangeAndDistributeUses(
+ alloc(), range, coldPost.from, coldPost.to)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ MOZ_ASSERT(hotBundle->numRanges() != 0);
+
+ LiveBundleVector newBundles;
+ if (!newBundles.append(hotBundle)) {
+ return false;
+ }
+
+ if (testbed) {
+ MOZ_ASSERT(coldBundle->numRanges() != 0);
+ if (!newBundles.append(coldBundle)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(preBundle || postBundle);
+ if (preBundle && !newBundles.append(preBundle)) {
+ return false;
+ }
+ if (postBundle && !newBundles.append(postBundle)) {
+ return false;
+ }
+ }
+
+ *success = true;
+ return updateVirtualRegisterListsThenRequeueBundles(bundle, newBundles);
+}
+
+bool BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle,
+ LiveBundle* conflict,
+ bool* success) {
+ // If this bundle's later uses do not require it to be in a register,
+ // split it after the last use which does require a register. If conflict
+ // is specified, only consider register uses before the conflict starts.
+
+ CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ // If the range defines a register, consider that a register use for
+ // our purposes here.
+ if (isRegisterDefinition(range)) {
+ CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
+ if (!conflict || spillStart < conflict->firstRange()->from()) {
+ lastUse = lastRegisterFrom = range->from();
+ lastRegisterTo = spillStart;
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LNode* ins = insData[iter->pos];
+
+ // Uses in the bundle should be sorted.
+ MOZ_ASSERT(iter->pos >= lastUse);
+ lastUse = inputOf(ins);
+
+ if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
+ if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
+ lastRegisterFrom = inputOf(ins);
+ lastRegisterTo = iter->pos.next();
+ }
+ }
+ }
+ }
+
+ // Can't trim non-register uses off the end by splitting.
+ if (!lastRegisterFrom.bits()) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle has no register uses");
+ return true;
+ }
+ if (lastUse < lastRegisterTo) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle's last use is a register use");
+ return true;
+ }
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, " .. split after last register use at %u",
+ lastRegisterTo.bits());
+
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(lastRegisterTo)) {
+ return false;
+ }
+ *success = true;
+ return splitAt(bundle, splitPositions);
+}
+
+bool BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
+ LiveBundle* conflict,
+ bool* success) {
+ // If this bundle's earlier uses do not require it to be in a register,
+ // split it before the first use which does require a register. If conflict
+ // is specified, only consider register uses after the conflict ends.
+
+ if (isRegisterDefinition(bundle->firstRange())) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle is defined by a register");
+ return true;
+ }
+ if (!bundle->firstRange()->hasDefinition()) {
+ JitSpew(JitSpew_RegAlloc, " .. bundle does not have definition");
+ return true;
+ }
+
+ CodePosition firstRegisterFrom;
+
+ CodePosition conflictEnd;
+ if (conflict) {
+ for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->to() > conflictEnd) {
+ conflictEnd = range->to();
+ }
+ }
+ }
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (!conflict || range->from() > conflictEnd) {
+ if (range->hasDefinition() && isRegisterDefinition(range)) {
+ firstRegisterFrom = range->from();
+ break;
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LNode* ins = insData[iter->pos];
+
+ if (!conflict || outputOf(ins) >= conflictEnd) {
+ if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
+ firstRegisterFrom = inputOf(ins);
+ break;
+ }
+ }
+ }
+ if (firstRegisterFrom.bits()) {
+ break;
+ }
+ }
+
+ if (!firstRegisterFrom.bits()) {
+ // Can't trim non-register uses off the beginning by splitting.
+ JitSpew(JitSpew_RegAlloc, " bundle has no register uses");
+ return true;
+ }
+
+ JitSpewIfEnabled(JitSpew_RegAlloc,
+ " .. split before first register use at %u",
+ firstRegisterFrom.bits());
+
+ SplitPositionVector splitPositions;
+ if (!splitPositions.append(firstRegisterFrom)) {
+ return false;
+ }
+ *success = true;
+ return splitAt(bundle, splitPositions);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// The top level driver for the splitting machinery //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// ::chooseBundleSplit
+// -------------------
+// If the first allocation loop (in ::go) can't allocate a bundle, it hands it
+// off to ::chooseBundleSplit, which is the "entry point" of the bundle-split
+// machinery. This tries four heuristics in turn, to see if any can split the
+// bundle:
+//
+// * ::trySplitAcrossHotcode
+// * ::splitAcrossCalls (in some cases)
+// * ::trySplitBeforeFirstRegisterUse
+// * ::trySplitAfterLastRegisterUse
+//
+// These routines have similar structure: they try to decide on one or more
+// CodePositions at which to split the bundle, using whatever heuristics they
+// have to hand. If suitable CodePosition(s) are found, they are put into a
+// `SplitPositionVector`, and the bundle and the vector are handed off to
+// ::splitAt, which performs the split (at least in theory) at the chosen
+// positions. It also arranges for the new bundles to be added to
+// ::allocationQueue.
+//
+// ::trySplitAcrossHotcode has a special case for JS -- it modifies the
+// bundle(s) itself, rather than using ::splitAt.
+//
+// If none of the heuristic routines apply, then ::splitAt is called with an
+// empty vector of split points. This is interpreted to mean "split at all
+// register uses". When combined with how ::splitAt works, the effect is to
+// spill the bundle.
+
+bool BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed,
+ LiveBundle* conflict) {
+ bool success = false;
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, " Splitting %s ..",
+ bundle->toString().get());
+
+ if (!trySplitAcrossHotcode(bundle, &success)) {
+ return false;
+ }
+ if (success) {
+ return true;
+ }
+
+ if (fixed) {
+ return splitAcrossCalls(bundle);
+ }
+
+ if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) {
+ return false;
+ }
+ if (success) {
+ return true;
+ }
+
+ if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) {
+ return false;
+ }
+ if (success) {
+ return true;
+ }
+
+ // Split at all register uses.
+ SplitPositionVector emptyPositions;
+ return splitAt(bundle, emptyPositions);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Bundle allocation //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+static const size_t MAX_ATTEMPTS = 2;
+
+bool BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
+ Requirement* requirement,
+ Requirement* hint) {
+ // Set any requirement or hint on bundle according to its definition and
+ // uses. Return false if there are conflicting requirements which will
+ // require the bundle to be split.
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ VirtualRegister& reg = range->vreg();
+
+ if (range->hasDefinition()) {
+ // Deal with any definition constraints/hints.
+ LDefinition::Policy policy = reg.def()->policy();
+ if (policy == LDefinition::FIXED || policy == LDefinition::STACK) {
+ // Fixed and stack policies get a FIXED requirement. (In the stack
+ // case, the allocation should have been performed already by
+ // mergeAndQueueRegisters.)
+ JitSpewIfEnabled(JitSpew_RegAlloc,
+ " Requirement %s, fixed by definition",
+ reg.def()->output()->toString().get());
+ if (!requirement->merge(Requirement(*reg.def()->output()))) {
+ return false;
+ }
+ } else if (reg.ins()->isPhi()) {
+ // Phis don't have any requirements, but they should prefer their
+ // input allocations. This is captured by the group hints above.
+ } else {
+ // Non-phis get a REGISTER requirement.
+ if (!requirement->merge(Requirement(Requirement::REGISTER))) {
+ return false;
+ }
+ }
+ }
+
+ // Search uses for requirements.
+ for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+ LUse::Policy policy = iter->usePolicy();
+ if (policy == LUse::FIXED) {
+ AnyRegister required = GetFixedRegister(reg.def(), iter->use());
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, " Requirement %s, due to use at %u",
+ required.name(), iter->pos.bits());
+
+ // If there are multiple fixed registers which the bundle is
+ // required to use, fail. The bundle will need to be split before
+ // it can be allocated.
+ if (!requirement->merge(Requirement(LAllocation(required)))) {
+ return false;
+ }
+ } else if (policy == LUse::REGISTER) {
+ if (!requirement->merge(Requirement(Requirement::REGISTER))) {
+ return false;
+ }
+ } else if (policy == LUse::ANY) {
+ // ANY differs from KEEPALIVE by actively preferring a register.
+ if (!hint->merge(Requirement(Requirement::REGISTER))) {
+ return false;
+ }
+ }
+
+ // The only case of STACK use policies is individual stack results using
+ // their containing stack result area, which is given a fixed allocation
+ // above.
+ MOZ_ASSERT_IF(policy == LUse::STACK,
+ requirement->kind() == Requirement::FIXED);
+ MOZ_ASSERT_IF(policy == LUse::STACK,
+ requirement->allocation().isStackArea());
+ }
+ }
+
+ return true;
+}
+
+bool BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r,
+ LiveBundle* bundle,
+ bool* success, bool* pfixed,
+ LiveBundleVector& conflicting) {
+ *success = false;
+
+ if (!r.allocatable) {
+ return true;
+ }
+
+ LiveBundleVector aliasedConflicting;
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRangePlus rangePlus(range);
+
+ // All ranges in the bundle must be compatible with the physical register.
+ MOZ_ASSERT(range->vreg().isCompatible(r.reg));
+
+ for (size_t a = 0; a < r.reg.numAliased(); a++) {
+ PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
+ LiveRangePlus existingPlus;
+ if (!rAlias.allocations.contains(rangePlus, &existingPlus)) {
+ continue;
+ }
+ const LiveRange* existing = existingPlus.liveRange();
+ if (existing->hasVreg()) {
+ MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
+ bool duplicate = false;
+ for (size_t i = 0; i < aliasedConflicting.length(); i++) {
+ if (aliasedConflicting[i] == existing->bundle()) {
+ duplicate = true;
+ break;
+ }
+ }
+ if (!duplicate && !aliasedConflicting.append(existing->bundle())) {
+ return false;
+ }
+ } else {
+ JitSpewIfEnabled(JitSpew_RegAlloc, " %s collides with fixed use %s",
+ rAlias.reg.name(), existing->toString().get());
+ *pfixed = true;
+ return true;
+ }
+ }
+ }
+
+ if (!aliasedConflicting.empty()) {
+ // One or more aliased registers is allocated to another bundle
+ // overlapping this one. Keep track of the conflicting set, and in the
+ // case of multiple conflicting sets keep track of the set with the
+ // lowest maximum spill weight.
+
+ // The #ifdef guards against "unused variable 'existing'" bustage.
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ if (aliasedConflicting.length() == 1) {
+ LiveBundle* existing = aliasedConflicting[0];
+ JitSpew(JitSpew_RegAlloc, " %s collides with %s [weight %zu]",
+ r.reg.name(), existing->toString().get(),
+ computeSpillWeight(existing));
+ } else {
+ JitSpew(JitSpew_RegAlloc, " %s collides with the following",
+ r.reg.name());
+ for (size_t i = 0; i < aliasedConflicting.length(); i++) {
+ LiveBundle* existing = aliasedConflicting[i];
+ JitSpew(JitSpew_RegAlloc, " %s [weight %zu]",
+ existing->toString().get(), computeSpillWeight(existing));
+ }
+ }
+ }
+#endif
+
+ if (conflicting.empty()) {
+ conflicting = std::move(aliasedConflicting);
+ } else {
+ if (maximumSpillWeight(aliasedConflicting) <
+ maximumSpillWeight(conflicting)) {
+ conflicting = std::move(aliasedConflicting);
+ }
+ }
+ return true;
+ }
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, " allocated to %s", r.reg.name());
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ LiveRangePlus rangePlus(range);
+ if (!r.allocations.insert(rangePlus)) {
+ return false;
+ }
+ }
+
+ bundle->setAllocation(LAllocation(r.reg));
+ *success = true;
+ return true;
+}
+
+bool BacktrackingAllocator::tryAllocateAnyRegister(
+ LiveBundle* bundle, bool* success, bool* pfixed,
+ LiveBundleVector& conflicting) {
+ // Search for any available register which the bundle can be allocated to.
+
+ LDefinition::Type type = bundle->firstRange()->vreg().type();
+
+ if (LDefinition::isFloatReg(type)) {
+ for (size_t i = AnyRegister::FirstFloatReg; i < AnyRegister::Total; i++) {
+ if (!LDefinition::isFloatRegCompatible(type, registers[i].reg.fpu())) {
+ continue;
+ }
+ if (!tryAllocateRegister(registers[i], bundle, success, pfixed,
+ conflicting)) {
+ return false;
+ }
+ if (*success) {
+ break;
+ }
+ }
+ return true;
+ }
+
+ for (size_t i = 0; i < AnyRegister::FirstFloatReg; i++) {
+ if (!tryAllocateRegister(registers[i], bundle, success, pfixed,
+ conflicting)) {
+ return false;
+ }
+ if (*success) {
+ break;
+ }
+ }
+ return true;
+}
+
+bool BacktrackingAllocator::evictBundle(LiveBundle* bundle) {
+ JitSpewIfEnabled(JitSpew_RegAlloc,
+ " Evicting %s [priority %zu] [weight %zu]",
+ bundle->toString().get(), computePriority(bundle),
+ computeSpillWeight(bundle));
+
+ AnyRegister reg(bundle->allocation().toRegister());
+ PhysicalRegister& physical = registers[reg.code()];
+ MOZ_ASSERT(physical.reg == reg && physical.allocatable);
+
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRangePlus rangePlus(range);
+ physical.allocations.remove(rangePlus);
+ }
+
+ bundle->setAllocation(LAllocation());
+
+ size_t priority = computePriority(bundle);
+ return allocationQueue.insert(QueueItem(bundle, priority));
+}
+
+bool BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle,
+ Requirement requirement,
+ bool* success, bool* pfixed,
+ LiveBundleVector& conflicting) {
+ // Spill bundles which are required to be in a certain stack slot.
+ if (!requirement.allocation().isRegister()) {
+ JitSpew(JitSpew_RegAlloc, " stack allocation requirement");
+ bundle->setAllocation(requirement.allocation());
+ *success = true;
+ return true;
+ }
+
+ AnyRegister reg = requirement.allocation().toRegister();
+ return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
+ conflicting);
+}
+
+bool BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
+ Requirement requirement,
+ Requirement hint, bool* success,
+ bool* pfixed,
+ LiveBundleVector& conflicting) {
+ // If we want, but do not require a bundle to be in a specific register,
+ // only look at that register for allocating and evict or spill if it is
+ // not available. Picking a separate register may be even worse than
+ // spilling, as it will still necessitate moves and will tie up more
+ // registers than if we spilled.
+ if (hint.kind() == Requirement::FIXED) {
+ AnyRegister reg = hint.allocation().toRegister();
+ if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
+ conflicting)) {
+ return false;
+ }
+ if (*success) {
+ return true;
+ }
+ }
+
+ // Spill bundles which have no hint or register requirement.
+ if (requirement.kind() == Requirement::NONE &&
+ hint.kind() != Requirement::REGISTER) {
+ JitSpew(JitSpew_RegAlloc,
+ " postponed spill (no hint or register requirement)");
+ if (!spilledBundles.append(bundle)) {
+ return false;
+ }
+ *success = true;
+ return true;
+ }
+
+ if (conflicting.empty() || minimalBundle(bundle)) {
+ if (!tryAllocateAnyRegister(bundle, success, pfixed, conflicting)) {
+ return false;
+ }
+ if (*success) {
+ return true;
+ }
+ }
+
+ // Spill bundles which have no register requirement if they didn't get
+ // allocated.
+ if (requirement.kind() == Requirement::NONE) {
+ JitSpew(JitSpew_RegAlloc, " postponed spill (no register requirement)");
+ if (!spilledBundles.append(bundle)) {
+ return false;
+ }
+ *success = true;
+ return true;
+ }
+
+ // We failed to allocate this bundle.
+ MOZ_ASSERT(!*success);
+ return true;
+}
+
+bool BacktrackingAllocator::processBundle(MIRGenerator* mir,
+ LiveBundle* bundle) {
+ JitSpewIfEnabled(JitSpew_RegAlloc,
+ "Allocating %s [priority %zu] [weight %zu]",
+ bundle->toString().get(), computePriority(bundle),
+ computeSpillWeight(bundle));
+
+ // A bundle can be processed by doing any of the following:
+ //
+ // - Assigning the bundle a register. The bundle cannot overlap any other
+ // bundle allocated for that physical register.
+ //
+ // - Spilling the bundle, provided it has no register uses.
+ //
+ // - Splitting the bundle into two or more bundles which cover the original
+ // one. The new bundles are placed back onto the priority queue for later
+ // processing.
+ //
+ // - Evicting one or more existing allocated bundles, and then doing one
+ // of the above operations. Evicted bundles are placed back on the
+ // priority queue. Any evicted bundles must have a lower spill weight
+ // than the bundle being processed.
+ //
+ // As long as this structure is followed, termination is guaranteed.
+ // In general, we want to minimize the amount of bundle splitting (which
+ // generally necessitates spills), so allocate longer lived, lower weight
+ // bundles first and evict and split them later if they prevent allocation
+ // for higher weight bundles.
+
+ Requirement requirement, hint;
+ bool canAllocate = computeRequirement(bundle, &requirement, &hint);
+
+ bool fixed;
+ LiveBundleVector conflicting;
+ for (size_t attempt = 0;; attempt++) {
+ if (mir->shouldCancel("Backtracking Allocation (processBundle loop)")) {
+ return false;
+ }
+
+ if (canAllocate) {
+ bool success = false;
+ fixed = false;
+ conflicting.clear();
+
+ // Ok, let's try allocating for this bundle.
+ if (requirement.kind() == Requirement::FIXED) {
+ if (!tryAllocateFixed(bundle, requirement, &success, &fixed,
+ conflicting)) {
+ return false;
+ }
+ } else {
+ if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed,
+ conflicting)) {
+ return false;
+ }
+ }
+
+ // If that worked, we're done!
+ if (success) {
+ return true;
+ }
+
+ // If that didn't work, but we have one or more non-fixed bundles
+ // known to be conflicting, maybe we can evict them and try again.
+ if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) && !fixed &&
+ !conflicting.empty() &&
+ maximumSpillWeight(conflicting) < computeSpillWeight(bundle)) {
+ for (size_t i = 0; i < conflicting.length(); i++) {
+ if (!evictBundle(conflicting[i])) {
+ return false;
+ }
+ }
+ continue;
+ }
+ }
+
+ // A minimal bundle cannot be split any further. If we try to split it
+ // it at this point we will just end up with the same bundle and will
+ // enter an infinite loop. Weights and the initial live ranges must
+ // be constructed so that any minimal bundle is allocatable.
+ MOZ_ASSERT(!minimalBundle(bundle));
+
+ LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
+ return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
+ }
+}
+
+// Helper for ::tryAllocatingRegistersForSpillBundles
+bool BacktrackingAllocator::spill(LiveBundle* bundle) {
+ JitSpew(JitSpew_RegAlloc, " Spilling bundle");
+ MOZ_ASSERT(bundle->allocation().isBogus());
+
+ if (LiveBundle* spillParent = bundle->spillParent()) {
+ JitSpew(JitSpew_RegAlloc, " Using existing spill bundle");
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRange* parentRange = spillParent->rangeFor(range->from());
+ MOZ_ASSERT(parentRange->contains(range));
+ MOZ_ASSERT(&range->vreg() == &parentRange->vreg());
+ range->tryToMoveDefAndUsesInto(parentRange);
+ MOZ_ASSERT(!range->hasUses());
+ range->vreg().removeRange(range);
+ }
+ return true;
+ }
+
+ return bundle->spillSet()->addSpilledBundle(bundle);
+}
+
+bool BacktrackingAllocator::tryAllocatingRegistersForSpillBundles() {
+ for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
+ LiveBundle* bundle = *it;
+ LiveBundleVector conflicting;
+ bool fixed = false;
+ bool success = false;
+
+ if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles")) {
+ return false;
+ }
+
+ JitSpewIfEnabled(JitSpew_RegAlloc, "Spill or allocate %s",
+ bundle->toString().get());
+
+ if (!tryAllocateAnyRegister(bundle, &success, &fixed, conflicting)) {
+ return false;
+ }
+
+ // If the bundle still has no register, spill the bundle.
+ if (!success && !spill(bundle)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Rewriting of the LIR after bundle processing is done: //
+// ::pickStackSlots //
+// ::createMoveGroupsFromLiveRangeTransitions //
+// ::installAllocationsInLIR //
+// ::populateSafepoints //
+// ::annotateMoveGroups //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+// Helper for ::pickStackSlot
+bool BacktrackingAllocator::insertAllRanges(LiveRangePlusSet& set,
+ LiveBundle* bundle) {
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ LiveRangePlus rangePlus(range);
+ if (!set.insert(rangePlus)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Helper for ::pickStackSlots
+bool BacktrackingAllocator::pickStackSlot(SpillSet* spillSet) {
+ // Look through all ranges that have been spilled in this set for a
+ // register definition which is fixed to a stack or argument slot. If we
+ // find one, use it for all bundles that have been spilled. tryMergeBundles
+ // makes sure this reuse is possible when an initial bundle contains ranges
+ // from multiple virtual registers.
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ if (range->hasDefinition()) {
+ LDefinition* def = range->vreg().def();
+ if (def->policy() == LDefinition::FIXED) {
+ MOZ_ASSERT(!def->output()->isRegister());
+ MOZ_ASSERT(!def->output()->isStackSlot());
+ spillSet->setAllocation(*def->output());
+ return true;
+ }
+ }
+ }
+ }
+
+ LDefinition::Type type =
+ spillSet->spilledBundle(0)->firstRange()->vreg().type();
+
+ SpillSlotList* slotList;
+ switch (StackSlotAllocator::width(type)) {
+ case 4:
+ slotList = &normalSlots;
+ break;
+ case 8:
+ slotList = &doubleSlots;
+ break;
+ case 16:
+ slotList = &quadSlots;
+ break;
+ default:
+ MOZ_CRASH("Bad width");
+ }
+
+ // Maximum number of existing spill slots we will look at before giving up
+ // and allocating a new slot.
+ static const size_t MAX_SEARCH_COUNT = 10;
+
+ size_t searches = 0;
+ SpillSlot* stop = nullptr;
+ while (!slotList->empty()) {
+ SpillSlot* spillSlot = *slotList->begin();
+ if (!stop) {
+ stop = spillSlot;
+ } else if (stop == spillSlot) {
+ // We looked through every slot in the list.
+ break;
+ }
+
+ bool success = true;
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveRangePlus rangePlus(range);
+ LiveRangePlus existingPlus;
+ if (spillSlot->allocated.contains(rangePlus, &existingPlus)) {
+ success = false;
+ break;
+ }
+ }
+ if (!success) {
+ break;
+ }
+ }
+ if (success) {
+ // We can reuse this physical stack slot for the new bundles.
+ // Update the allocated ranges for the slot.
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ if (!insertAllRanges(spillSlot->allocated, bundle)) {
+ return false;
+ }
+ }
+ spillSet->setAllocation(spillSlot->alloc);
+ return true;
+ }
+
+ // On a miss, move the spill to the end of the list. This will cause us
+ // to make fewer attempts to allocate from slots with a large and
+ // highly contended range.
+ slotList->popFront();
+ slotList->pushBack(spillSlot);
+
+ if (++searches == MAX_SEARCH_COUNT) {
+ break;
+ }
+ }
+
+ // We need a new physical stack slot.
+ uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
+
+ SpillSlot* spillSlot =
+ new (alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
+ if (!spillSlot) {
+ return false;
+ }
+
+ for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+ LiveBundle* bundle = spillSet->spilledBundle(i);
+ if (!insertAllRanges(spillSlot->allocated, bundle)) {
+ return false;
+ }
+ }
+
+ spillSet->setAllocation(spillSlot->alloc);
+
+ slotList->pushFront(spillSlot);
+ return true;
+}
+
+bool BacktrackingAllocator::pickStackSlots() {
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel("Backtracking Pick Stack Slots")) {
+ return false;
+ }
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+ LiveBundle* bundle = range->bundle();
+
+ if (bundle->allocation().isBogus()) {
+ if (!pickStackSlot(bundle->spillSet())) {
+ return false;
+ }
+ MOZ_ASSERT(!bundle->allocation().isBogus());
+ }
+ }
+ }
+
+ return true;
+}
+
+// Helper for ::createMoveGroupsFromLiveRangeTransitions
+bool BacktrackingAllocator::moveAtEdge(LBlock* predecessor, LBlock* successor,
+ LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (successor->mir()->numPredecessors() > 1) {
+ MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
+ return moveAtExit(predecessor, from, to, type);
+ }
+
+ return moveAtEntry(successor, from, to, type);
+}
+
+// Helper for ::createMoveGroupsFromLiveRangeTransitions
+bool BacktrackingAllocator::deadRange(LiveRange* range) {
+ // Check for direct uses of this range.
+ if (range->hasUses() || range->hasDefinition()) {
+ return false;
+ }
+
+ CodePosition start = range->from();
+ LNode* ins = insData[start];
+ if (start == entryOf(ins->block())) {
+ return false;
+ }
+
+ VirtualRegister& reg = range->vreg();
+
+ // Check if there are later ranges for this vreg.
+ LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
+ for (iter++; iter; iter++) {
+ LiveRange* laterRange = LiveRange::get(*iter);
+ if (laterRange->from() > range->from()) {
+ return false;
+ }
+ }
+
+ // Check if this range ends at a loop backedge.
+ LNode* last = insData[range->to().previous()];
+ if (last->isGoto() &&
+ last->toGoto()->target()->id() < last->block()->mir()->id()) {
+ return false;
+ }
+
+ // Check if there are phis which this vreg flows to.
+ if (reg.usedByPhi()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BacktrackingAllocator::createMoveGroupsFromLiveRangeTransitions() {
+ // Add moves to handle changing assignments for vregs over their lifetime.
+ JitSpew(JitSpew_RegAlloc, "ResolveControlFlow: begin");
+
+ JitSpew(JitSpew_RegAlloc,
+ " ResolveControlFlow: adding MoveGroups within blocks");
+
+ // Look for places where a register's assignment changes in the middle of a
+ // basic block.
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel(
+ "Backtracking Resolve Control Flow (vreg outer loop)")) {
+ return false;
+ }
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (mir->shouldCancel(
+ "Backtracking Resolve Control Flow (vreg inner loop)")) {
+ return false;
+ }
+
+ // Remove ranges which will never be used.
+ if (deadRange(range)) {
+ reg.removeRangeAndIncrement(iter);
+ continue;
+ }
+
+ // The range which defines the register does not have a predecessor
+ // to add moves from.
+ if (range->hasDefinition()) {
+ iter++;
+ continue;
+ }
+
+ // Ignore ranges that start at block boundaries. We will handle
+ // these in the next phase.
+ CodePosition start = range->from();
+ LNode* ins = insData[start];
+ if (start == entryOf(ins->block())) {
+ iter++;
+ continue;
+ }
+
+ // If we already saw a range which covers the start of this range
+ // and has the same allocation, we don't need an explicit move at
+ // the start of this range.
+ bool skip = false;
+ for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
+ prevIter != iter; prevIter++) {
+ LiveRange* prevRange = LiveRange::get(*prevIter);
+ if (prevRange->covers(start) && prevRange->bundle()->allocation() ==
+ range->bundle()->allocation()) {
+ skip = true;
+ break;
+ }
+ }
+ if (skip) {
+ iter++;
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ LiveRange* predecessorRange =
+ reg.rangeFor(start.previous(), /* preferRegister = */ true);
+ if (start.subpos() == CodePosition::INPUT) {
+ JitSpewIfEnabled(JitSpew_RegAlloc, " moveInput (%s) <- (%s)",
+ range->toString().get(),
+ predecessorRange->toString().get());
+ if (!moveInput(ins->toInstruction(), predecessorRange, range,
+ reg.type())) {
+ return false;
+ }
+ } else {
+ JitSpew(JitSpew_RegAlloc, " (moveAfter)");
+ if (!moveAfter(ins->toInstruction(), predecessorRange, range,
+ reg.type())) {
+ return false;
+ }
+ }
+
+ iter++;
+ }
+ }
+
+ JitSpew(JitSpew_RegAlloc,
+ " ResolveControlFlow: adding MoveGroups for phi nodes");
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) {
+ return false;
+ }
+
+ LBlock* successor = graph.getBlock(i);
+ MBasicBlock* mSuccessor = successor->mir();
+ if (mSuccessor->numPredecessors() < 1) {
+ continue;
+ }
+
+ // Resolve phis to moves.
+ for (size_t j = 0; j < successor->numPhis(); j++) {
+ LPhi* phi = successor->getPhi(j);
+ MOZ_ASSERT(phi->numDefs() == 1);
+ LDefinition* def = phi->getDef(0);
+ VirtualRegister& reg = vreg(def);
+ LiveRange* to = reg.rangeFor(entryOf(successor));
+ MOZ_ASSERT(to);
+
+ for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
+ LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
+ MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
+
+ LAllocation* input = phi->getOperand(k);
+ LiveRange* from = vreg(input).rangeFor(exitOf(predecessor),
+ /* preferRegister = */ true);
+ MOZ_ASSERT(from);
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ // Note: we have to use moveAtEdge both here and below (for edge
+ // resolution) to avoid conflicting moves. See bug 1493900.
+ JitSpew(JitSpew_RegAlloc, " (moveAtEdge#1)");
+ if (!moveAtEdge(predecessor, successor, from, to, def->type())) {
+ return false;
+ }
+ }
+ }
+ }
+
+ JitSpew(JitSpew_RegAlloc,
+ " ResolveControlFlow: adding MoveGroups to fix conflicted edges");
+
+ // Add moves to resolve graph edges with different allocations at their
+ // source and target.
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* targetRange = LiveRange::get(*iter);
+
+ size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
+ if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId)))) {
+ firstBlockId++;
+ }
+ for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
+ LBlock* successor = graph.getBlock(id);
+ if (!targetRange->covers(entryOf(successor))) {
+ break;
+ }
+
+ BitSet& live = liveIn[id];
+ if (!live.contains(i)) {
+ continue;
+ }
+
+ for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
+ LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
+ if (targetRange->covers(exitOf(predecessor))) {
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ JitSpew(JitSpew_RegAlloc, " (moveAtEdge#2)");
+ LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
+ if (!moveAtEdge(predecessor, successor, from, targetRange,
+ reg.type())) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ JitSpew(JitSpew_RegAlloc, "ResolveControlFlow: end");
+ return true;
+}
+
+// Helper for ::addLiveRegistersForRange
+size_t BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from) {
+ size_t i = 0;
+ for (; i < graph.numNonCallSafepoints(); i++) {
+ const LInstruction* ins = graph.getNonCallSafepoint(i);
+ if (from <= inputOf(ins)) {
+ break;
+ }
+ }
+ return i;
+}
+
+// Helper for ::installAllocationsInLIR
+void BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg,
+ LiveRange* range) {
+ // Fill in the live register sets for all non-call safepoints.
+ LAllocation a = range->bundle()->allocation();
+ if (!a.isRegister()) {
+ return;
+ }
+
+ // Don't add output registers to the safepoint.
+ CodePosition start = range->from();
+ if (range->hasDefinition() && !reg.isTemp()) {
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // We don't add the output register to the safepoint,
+ // but it still might get added as one of the inputs.
+ // So eagerly add this reg to the safepoint clobbered registers.
+ if (reg.ins()->isInstruction()) {
+ if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) {
+ safepoint->addClobberedRegister(a.toRegister());
+ }
+ }
+#endif
+ start = start.next();
+ }
+
+ size_t i = findFirstNonCallSafepoint(start);
+ for (; i < graph.numNonCallSafepoints(); i++) {
+ LInstruction* ins = graph.getNonCallSafepoint(i);
+ CodePosition pos = inputOf(ins);
+
+ // Safepoints are sorted, so we can shortcut out of this loop
+ // if we go out of range.
+ if (range->to() <= pos) {
+ break;
+ }
+
+ MOZ_ASSERT(range->covers(pos));
+
+ LSafepoint* safepoint = ins->safepoint();
+ safepoint->addLiveRegister(a.toRegister());
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (reg.isTemp()) {
+ safepoint->addClobberedRegister(a.toRegister());
+ }
+#endif
+ }
+}
+
+// Helper for ::installAllocationsInLIR
+static inline size_t NumReusingDefs(LInstruction* ins) {
+ size_t num = 0;
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
+ num++;
+ }
+ }
+ return num;
+}
+
+bool BacktrackingAllocator::installAllocationsInLIR() {
+ JitSpew(JitSpew_RegAlloc, "Installing Allocations");
+
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+ for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (mir->shouldCancel("Backtracking Install Allocations (main loop)")) {
+ return false;
+ }
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ if (range->hasDefinition()) {
+ reg.def()->setOutput(range->bundle()->allocation());
+ if (reg.ins()->recoversInput()) {
+ LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
+ for (size_t i = 0; i < snapshot->numEntries(); i++) {
+ LAllocation* entry = snapshot->getEntry(i);
+ if (entry->isUse() &&
+ entry->toUse()->policy() == LUse::RECOVERED_INPUT) {
+ *entry = *reg.def()->output();
+ }
+ }
+ }
+ }
+
+ for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+ LAllocation* alloc = iter->use();
+ *alloc = range->bundle()->allocation();
+
+ // For any uses which feed into MUST_REUSE_INPUT definitions,
+ // add copies if the use and def have different allocations.
+ LNode* ins = insData[iter->pos];
+ if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
+ LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
+ LAllocation res = outputRange->bundle()->allocation();
+ LAllocation sourceAlloc = range->bundle()->allocation();
+
+ if (res != *alloc) {
+ if (!this->alloc().ensureBallast()) {
+ return false;
+ }
+ if (NumReusingDefs(ins->toInstruction()) <= 1) {
+ LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
+ if (!group->addAfter(sourceAlloc, res, reg.type())) {
+ return false;
+ }
+ } else {
+ LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
+ if (!group->add(sourceAlloc, res, reg.type())) {
+ return false;
+ }
+ }
+ *alloc = res;
+ }
+ }
+ }
+
+ addLiveRegistersForRange(reg, range);
+ }
+ }
+
+ graph.setLocalSlotsSize(stackSlotAllocator.stackHeight());
+ return true;
+}
+
+// Helper for ::populateSafepoints
+size_t BacktrackingAllocator::findFirstSafepoint(CodePosition pos,
+ size_t startFrom) {
+ size_t i = startFrom;
+ for (; i < graph.numSafepoints(); i++) {
+ LInstruction* ins = graph.getSafepoint(i);
+ if (pos <= inputOf(ins)) {
+ break;
+ }
+ }
+ return i;
+}
+
+// Helper for ::populateSafepoints
+static inline bool IsNunbox(VirtualRegister& reg) {
+#ifdef JS_NUNBOX32
+ return reg.type() == LDefinition::TYPE || reg.type() == LDefinition::PAYLOAD;
+#else
+ return false;
+#endif
+}
+
+// Helper for ::populateSafepoints
+static inline bool IsSlotsOrElements(VirtualRegister& reg) {
+ return reg.type() == LDefinition::SLOTS;
+}
+
+// Helper for ::populateSafepoints
+static inline bool IsTraceable(VirtualRegister& reg) {
+ if (reg.type() == LDefinition::OBJECT) {
+ return true;
+ }
+#ifdef JS_PUNBOX64
+ if (reg.type() == LDefinition::BOX) {
+ return true;
+ }
+#endif
+ if (reg.type() == LDefinition::STACKRESULTS) {
+ MOZ_ASSERT(reg.def());
+ const LStackArea* alloc = reg.def()->output()->toStackArea();
+ for (auto iter = alloc->results(); iter; iter.next()) {
+ if (iter.isGcPointer()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool BacktrackingAllocator::populateSafepoints() {
+ JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
+
+ size_t firstSafepoint = 0;
+
+ MOZ_ASSERT(!vregs[0u].def());
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+
+ if (!reg.def() ||
+ (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) {
+ continue;
+ }
+
+ firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
+ if (firstSafepoint >= graph.numSafepoints()) {
+ break;
+ }
+
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
+ LInstruction* ins = graph.getSafepoint(j);
+
+ if (!range->covers(inputOf(ins))) {
+ if (inputOf(ins) >= range->to()) {
+ break;
+ }
+ continue;
+ }
+
+ // Include temps but not instruction outputs. Also make sure
+ // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
+ // we would have to add the input reg to this safepoint.
+ if (ins == reg.ins() && !reg.isTemp()) {
+ DebugOnly<LDefinition*> def = reg.def();
+ MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
+ def->type() == LDefinition::GENERAL ||
+ def->type() == LDefinition::INT32 ||
+ def->type() == LDefinition::FLOAT32 ||
+ def->type() == LDefinition::DOUBLE ||
+ def->type() == LDefinition::SIMD128);
+ continue;
+ }
+
+ LSafepoint* safepoint = ins->safepoint();
+
+ LAllocation a = range->bundle()->allocation();
+ if (a.isGeneralReg() && ins->isCall()) {
+ continue;
+ }
+
+ switch (reg.type()) {
+ case LDefinition::OBJECT:
+ if (!safepoint->addGcPointer(a)) {
+ return false;
+ }
+ break;
+ case LDefinition::SLOTS:
+ if (!safepoint->addSlotsOrElementsPointer(a)) {
+ return false;
+ }
+ break;
+ case LDefinition::STACKRESULTS: {
+ MOZ_ASSERT(a.isStackArea());
+ for (auto iter = a.toStackArea()->results(); iter; iter.next()) {
+ if (iter.isGcPointer()) {
+ if (!safepoint->addGcPointer(iter.alloc())) {
+ return false;
+ }
+ }
+ }
+ break;
+ }
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ if (!safepoint->addNunboxType(i, a)) {
+ return false;
+ }
+ break;
+ case LDefinition::PAYLOAD:
+ if (!safepoint->addNunboxPayload(i, a)) {
+ return false;
+ }
+ break;
+#else
+ case LDefinition::BOX:
+ if (!safepoint->addBoxedValue(a)) {
+ return false;
+ }
+ break;
+#endif
+ default:
+ MOZ_CRASH("Bad register type");
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool BacktrackingAllocator::annotateMoveGroups() {
+ // Annotate move groups in the LIR graph with any register that is not
+ // allocated at that point and can be used as a scratch register. This is
+ // only required for x86, as other platforms always have scratch registers
+ // available for use.
+#ifdef JS_CODEGEN_X86
+ LiveRange* range = LiveRange::FallibleNew(alloc(), nullptr, CodePosition(),
+ CodePosition().next());
+ if (!range) {
+ return false;
+ }
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ if (mir->shouldCancel("Backtracking Annotate Move Groups")) {
+ return false;
+ }
+
+ LBlock* block = graph.getBlock(i);
+ LInstruction* last = nullptr;
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ ++iter) {
+ if (iter->isMoveGroup()) {
+ CodePosition from = last ? outputOf(last) : entryOf(block);
+ range->setTo(from.next());
+ range->setFrom(from);
+
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ PhysicalRegister& reg = registers[i];
+ if (reg.reg.isFloat() || !reg.allocatable) {
+ continue;
+ }
+
+ // This register is unavailable for use if (a) it is in use
+ // by some live range immediately before the move group,
+ // or (b) it is an operand in one of the group's moves. The
+ // latter case handles live ranges which end immediately
+ // before the move group or start immediately after.
+ // For (b) we need to consider move groups immediately
+ // preceding or following this one.
+
+ if (iter->toMoveGroup()->uses(reg.reg.gpr())) {
+ continue;
+ }
+ bool found = false;
+ LInstructionIterator niter(iter);
+ for (niter++; niter != block->end(); niter++) {
+ if (niter->isMoveGroup()) {
+ if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
+ found = true;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ if (iter != block->begin()) {
+ LInstructionIterator riter(iter);
+ do {
+ riter--;
+ if (riter->isMoveGroup()) {
+ if (riter->toMoveGroup()->uses(reg.reg.gpr())) {
+ found = true;
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (riter != block->begin());
+ }
+
+ if (found) {
+ continue;
+ }
+ LiveRangePlus existingPlus;
+ LiveRangePlus rangePlus(range);
+ if (reg.allocations.contains(rangePlus, &existingPlus)) {
+ continue;
+ }
+
+ iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
+ break;
+ }
+ } else {
+ last = *iter;
+ }
+ }
+ }
+#endif
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Debug-printing support //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef JS_JITSPEW
+
+UniqueChars LiveRange::toString() const {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ UniqueChars buf = JS_smprintf("v%u %u-%u", hasVreg() ? vreg().vreg() : 0,
+ from().bits(), to().bits() - 1);
+
+ if (buf && bundle() && !bundle()->allocation().isBogus()) {
+ buf = JS_sprintf_append(std::move(buf), " %s",
+ bundle()->allocation().toString().get());
+ }
+
+ buf = JS_sprintf_append(std::move(buf), " {");
+
+ if (buf && hasDefinition()) {
+ buf = JS_sprintf_append(std::move(buf), " %u_def", from().bits());
+ if (hasVreg()) {
+ // If the definition has a fixed requirement, print it too.
+ const LDefinition* def = vreg().def();
+ LDefinition::Policy policy = def->policy();
+ if (policy == LDefinition::FIXED || policy == LDefinition::STACK) {
+ if (buf) {
+ buf = JS_sprintf_append(std::move(buf), ":F:%s",
+ def->output()->toString().get());
+ }
+ }
+ }
+ }
+
+ for (UsePositionIterator iter = usesBegin(); buf && iter; iter++) {
+ buf = JS_sprintf_append(std::move(buf), " %u_%s", iter->pos.bits(),
+ iter->use()->toString().get());
+ }
+
+ buf = JS_sprintf_append(std::move(buf), " }");
+
+ if (!buf) {
+ oomUnsafe.crash("LiveRange::toString()");
+ }
+
+ return buf;
+}
+
+UniqueChars LiveBundle::toString() const {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ UniqueChars buf = JS_smprintf("LB%u(", debugId());
+
+ if (buf) {
+ if (spillParent()) {
+ buf = JS_sprintf_append(std::move(buf), "parent=LB%u",
+ spillParent()->debugId());
+ } else {
+ buf = JS_sprintf_append(std::move(buf), "parent=none");
+ }
+ }
+
+ for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter;
+ iter++) {
+ if (buf) {
+ buf = JS_sprintf_append(std::move(buf), "%s %s",
+ (iter == rangesBegin()) ? "" : " ##",
+ LiveRange::get(*iter)->toString().get());
+ }
+ }
+
+ if (buf) {
+ buf = JS_sprintf_append(std::move(buf), ")");
+ }
+
+ if (!buf) {
+ oomUnsafe.crash("LiveBundle::toString()");
+ }
+
+ return buf;
+}
+
+void BacktrackingAllocator::dumpLiveRangesByVReg(const char* who) {
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Live ranges by virtual register (%s):", who);
+
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " ");
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
+ iter++) {
+ if (iter != reg.rangesBegin()) {
+ JitSpewCont(JitSpew_RegAlloc, " ## ");
+ }
+ JitSpewCont(JitSpew_RegAlloc, "%s",
+ LiveRange::get(*iter)->toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+}
+
+void BacktrackingAllocator::dumpLiveRangesByBundle(const char* who) {
+ MOZ_ASSERT(!vregs[0u].hasRanges());
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Live ranges by bundle (%s):", who);
+
+ for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+ VirtualRegister& reg = vregs[i];
+ for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter;
+ baseIter++) {
+ LiveRange* range = LiveRange::get(*baseIter);
+ LiveBundle* bundle = range->bundle();
+ if (range == bundle->firstRange()) {
+ JitSpew(JitSpew_RegAlloc, " %s", bundle->toString().get());
+ }
+ }
+ }
+}
+
+void BacktrackingAllocator::dumpAllocations() {
+ JitSpew(JitSpew_RegAlloc, "Allocations:");
+
+ dumpLiveRangesByBundle("in dumpAllocations()");
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Allocations by physical register:");
+
+ for (size_t i = 0; i < AnyRegister::Total; i++) {
+ if (registers[i].allocatable && !registers[i].allocations.empty()) {
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " %s:", AnyRegister::FromCode(i).name());
+ bool first = true;
+ LiveRangePlusSet::Iter lrpIter(&registers[i].allocations);
+ while (lrpIter.hasMore()) {
+ LiveRange* range = lrpIter.next().liveRange();
+ if (first) {
+ first = false;
+ } else {
+ fprintf(stderr, " /");
+ }
+ fprintf(stderr, " %s", range->toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+}
+
+#endif // JS_JITSPEW
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+// Top level of the register allocation machinery //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+bool BacktrackingAllocator::go() {
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dumpInstructions("(Pre-allocation LIR)");
+ }
+
+ if (!init()) {
+ return false;
+ }
+
+ if (!buildLivenessInfo()) {
+ return false;
+ }
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dumpLiveRangesByVReg("after liveness analysis");
+ }
+#endif
+
+ if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) {
+ return false;
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
+ if (!mergeAndQueueRegisters()) {
+ return false;
+ }
+ JitSpew(JitSpew_RegAlloc, "Completed grouping and queueing registers");
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dumpLiveRangesByBundle("after grouping/queueing regs");
+ }
+#endif
+
+ // There now follow two allocation loops, which are really the heart of the
+ // allocator. First, the "main" allocation loop. This does almost all of
+ // the allocation work, by repeatedly pulling bundles out of
+ // ::allocationQueue and calling ::processBundle on it, until there are no
+ // bundles left in the queue. Note that ::processBundle can add new smaller
+ // bundles to the queue if it needs to split or spill a bundle.
+ //
+ // For each bundle in turn pulled out of ::allocationQueue, ::processBundle:
+ //
+ // * calls ::computeRequirement to discover the overall constraint for the
+ // bundle.
+ //
+ // * tries to find a register for it, by calling either ::tryAllocateFixed or
+ // ::tryAllocateNonFixed.
+ //
+ // * if that fails, but ::tryAllocateFixed / ::tryAllocateNonFixed indicate
+ // that there is some other bundle with lower spill weight that can be
+ // evicted, then that bundle is evicted (hence, put back into
+ // ::allocationQueue), and we try again.
+ //
+ // * at most MAX_ATTEMPTS may be made.
+ //
+ // * If that still fails to find a register, then the bundle is handed off to
+ // ::chooseBundleSplit. That will choose to either split the bundle,
+ // yielding multiple pieces which are put back into ::allocationQueue, or
+ // it will spill the bundle. Note that the same mechanism applies to both;
+ // there's no clear boundary between splitting and spilling, because
+ // spilling can be interpreted as an extreme form of splitting.
+ //
+ // ::processBundle and its callees contains much gnarly and logic which isn't
+ // easy to understand, particularly in the area of how eviction candidates
+ // are chosen. But it works well enough, and tinkering doesn't seem to
+ // improve the resulting allocations. More important is the splitting logic,
+ // because that controls where spill/reload instructions are placed.
+ //
+ // Eventually ::allocationQueue becomes empty, and each LiveBundle has either
+ // been allocated a register or is marked for spilling. In the latter case
+ // it will have been added to ::spilledBundles.
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+
+ // Allocate, spill and split bundles until finished.
+ while (!allocationQueue.empty()) {
+ if (mir->shouldCancel("Backtracking Allocation")) {
+ return false;
+ }
+
+ QueueItem item = allocationQueue.removeHighest();
+ if (!processBundle(mir, item.bundle)) {
+ return false;
+ }
+ }
+
+ // And here's the second allocation loop (hidden inside
+ // ::tryAllocatingRegistersForSpillBundles). It makes one last attempt to
+ // find a register for each spill bundle. There's no attempt to free up
+ // registers by eviction. In at least 99% of cases this attempt fails, in
+ // which case the bundle is handed off to ::spill. The lucky remaining 1%
+ // get a register. Unfortunately this scheme interacts badly with the
+ // splitting strategy, leading to excessive register-to-register copying in
+ // some very simple cases. See bug 1752520.
+ //
+ // A modest but probably worthwhile amount of allocation time can be saved by
+ // making ::tryAllocatingRegistersForSpillBundles use specialised versions of
+ // ::tryAllocateAnyRegister and its callees, that don't bother to create sets
+ // of conflicting bundles. Creating those sets is expensive and, here,
+ // pointless, since we're not going to do any eviction based on them. This
+ // refinement is implemented in the un-landed patch at bug 1758274 comment
+ // 15.
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc,
+ "Main allocation loop complete; "
+ "beginning spill-bundle allocation loop");
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+
+ if (!tryAllocatingRegistersForSpillBundles()) {
+ return false;
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Spill-bundle allocation loop complete");
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+
+ if (!pickStackSlots()) {
+ return false;
+ }
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dumpAllocations();
+ }
+#endif
+
+ if (!createMoveGroupsFromLiveRangeTransitions()) {
+ return false;
+ }
+
+ if (!installAllocationsInLIR()) {
+ return false;
+ }
+
+ if (!populateSafepoints()) {
+ return false;
+ }
+
+ if (!annotateMoveGroups()) {
+ return false;
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dumpInstructions("(Post-allocation LIR)");
+ }
+
+ JitSpew(JitSpew_RegAlloc, "Finished register allocation");
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// //
+///////////////////////////////////////////////////////////////////////////////
diff --git a/js/src/jit/BacktrackingAllocator.h b/js/src/jit/BacktrackingAllocator.h
new file mode 100644
index 0000000000..366aa0d16c
--- /dev/null
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -0,0 +1,844 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BacktrackingAllocator_h
+#define jit_BacktrackingAllocator_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+
+#include "ds/AvlTree.h"
+#include "ds/PriorityQueue.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/StackSlotAllocator.h"
+
+// Gives better traces in Nightly/debug builds (could be EARLY_BETA_OR_EARLIER)
+#if defined(NIGHTLY_BUILD) || defined(DEBUG)
+# define AVOID_INLINE_FOR_DEBUGGING MOZ_NEVER_INLINE
+#else
+# define AVOID_INLINE_FOR_DEBUGGING
+#endif
+
+// Backtracking priority queue based register allocator based on that described
+// in the following blog post:
+//
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+
+namespace js {
+namespace jit {
+
+class Requirement {
+ public:
+ enum Kind { NONE, REGISTER, FIXED };
+
+ Requirement() : kind_(NONE) {}
+
+ explicit Requirement(Kind kind) : kind_(kind) {
+ // FIXED has a dedicated constructor.
+ MOZ_ASSERT(kind != FIXED);
+ }
+
+ explicit Requirement(LAllocation fixed) : kind_(FIXED), allocation_(fixed) {
+ MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+ }
+
+ Kind kind() const { return kind_; }
+
+ LAllocation allocation() const {
+ MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
+ return allocation_;
+ }
+
+ [[nodiscard]] bool merge(const Requirement& newRequirement) {
+ // Merge newRequirement with any existing requirement, returning false
+ // if the new and old requirements conflict.
+
+ if (newRequirement.kind() == Requirement::FIXED) {
+ if (kind() == Requirement::FIXED) {
+ return newRequirement.allocation() == allocation();
+ }
+ *this = newRequirement;
+ return true;
+ }
+
+ MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
+ if (kind() == Requirement::FIXED) {
+ return allocation().isRegister();
+ }
+
+ *this = newRequirement;
+ return true;
+ }
+
+ private:
+ Kind kind_;
+ LAllocation allocation_;
+};
+
+struct UsePosition : public TempObject,
+ public InlineForwardListNode<UsePosition> {
+ private:
+ // A UsePosition is an LUse* with a CodePosition. UsePosition also has an
+ // optimization that allows access to the associated LUse::Policy without
+ // dereferencing memory: the policy is encoded in the low bits of the LUse*.
+ //
+ // Note however that because LUse* is uintptr_t-aligned, on 32-bit systems
+ // there are only 4 encodable values, for more than 4 use policies; in that
+ // case we allocate the common LUse::ANY, LUse::REGISTER, and LUse::FIXED use
+ // policies to tags, and use tag 0x3 to indicate that dereferencing the LUse
+ // is necessary to get the policy (KEEPALIVE or STACK, in that case).
+ uintptr_t use_;
+ static_assert(LUse::ANY < 0x3,
+ "LUse::ANY can be represented in low tag on 32-bit systems");
+ static_assert(LUse::REGISTER < 0x3,
+ "LUse::REGISTER can be represented in tag on 32-bit systems");
+ static_assert(LUse::FIXED < 0x3,
+ "LUse::FIXED can be represented in tag on 32-bit systems");
+
+ static constexpr uintptr_t PolicyMask = sizeof(uintptr_t) - 1;
+ static constexpr uintptr_t UseMask = ~PolicyMask;
+
+ void setUse(LUse* use) {
+ // RECOVERED_INPUT is used by snapshots and ignored when building the
+ // liveness information. Thus we can safely assume that no such value
+ // would be seen.
+ MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
+
+ uintptr_t policyBits = use->policy();
+#ifndef JS_64BIT
+ // On a 32-bit machine, LUse::KEEPALIVE and LUse::STACK are accessed by
+ // dereferencing the use pointer.
+ if (policyBits >= PolicyMask) {
+ policyBits = PolicyMask;
+ }
+#endif
+ use_ = uintptr_t(use) | policyBits;
+ MOZ_ASSERT(use->policy() == usePolicy());
+ }
+
+ public:
+ CodePosition pos;
+
+ LUse* use() const { return reinterpret_cast<LUse*>(use_ & UseMask); }
+
+ LUse::Policy usePolicy() const {
+ uintptr_t bits = use_ & PolicyMask;
+#ifndef JS_64BIT
+ // On 32-bit machines, reach out to memory if it's LUse::KEEPALIVE or
+ // LUse::STACK.
+ if (bits == PolicyMask) {
+ return use()->policy();
+ }
+#endif
+ LUse::Policy policy = LUse::Policy(bits);
+ MOZ_ASSERT(use()->policy() == policy);
+ return policy;
+ }
+
+ UsePosition(LUse* use, CodePosition pos) : pos(pos) {
+ // Verify that the usedAtStart() flag is consistent with the
+ // subposition. For now ignore fixed registers, because they
+ // are handled specially around calls.
+ MOZ_ASSERT_IF(!use->isFixedRegister(),
+ pos.subpos() == (use->usedAtStart() ? CodePosition::INPUT
+ : CodePosition::OUTPUT));
+ setUse(use);
+ }
+};
+
+using UsePositionIterator = InlineForwardListIterator<UsePosition>;
+
+// Backtracking allocator data structures overview.
+//
+// LiveRange: A continuous range of positions where a virtual register is live.
+// LiveBundle: A set of LiveRanges which do not overlap.
+// VirtualRegister: A set of all LiveRanges used for some LDefinition.
+//
+// The allocator first performs a liveness ananlysis on the LIR graph which
+// constructs LiveRanges for each VirtualRegister, determining where the
+// registers are live.
+//
+// The ranges are then bundled together according to heuristics, and placed on
+// the allocation queue.
+//
+// As bundles are removed from the allocation queue, we attempt to find a
+// physical register or stack slot allocation for all ranges in the removed
+// bundle, possibly evicting already-allocated bundles. See processBundle()
+// for details.
+//
+// If we are not able to allocate a bundle, it is split according to heuristics
+// into two or more smaller bundles which cover all the ranges of the original.
+// These smaller bundles are then allocated independently.
+
+class LiveBundle;
+class VirtualRegister;
+
+class LiveRange : public TempObject {
+ public:
+ // Linked lists are used to keep track of the ranges in each LiveBundle and
+ // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
+ // these auxiliary classes to keep things straight.
+ class BundleLink : public InlineForwardListNode<BundleLink> {};
+ class RegisterLink : public InlineForwardListNode<RegisterLink> {};
+
+ using BundleLinkIterator = InlineForwardListIterator<BundleLink>;
+ using RegisterLinkIterator = InlineForwardListIterator<RegisterLink>;
+
+ // Links in the lists in LiveBundle and VirtualRegister.
+ BundleLink bundleLink;
+ RegisterLink registerLink;
+
+ static LiveRange* get(BundleLink* link) {
+ return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+ offsetof(LiveRange, bundleLink));
+ }
+ static LiveRange* get(RegisterLink* link) {
+ return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+ offsetof(LiveRange, registerLink));
+ }
+
+ struct Range {
+ // The beginning of this range, inclusive.
+ CodePosition from;
+
+ // The end of this range, exclusive.
+ CodePosition to;
+
+ Range() = default;
+
+ Range(CodePosition from, CodePosition to) : from(from), to(to) {
+ MOZ_ASSERT(!empty());
+ }
+
+ bool empty() {
+ MOZ_ASSERT(from <= to);
+ return from == to;
+ }
+ };
+
+ private:
+ // The virtual register this range is for, or nullptr if this does not have a
+ // virtual register (for example, it is in the callRanges bundle).
+ VirtualRegister* vreg_;
+
+ // The bundle containing this range, null if liveness information is being
+ // constructed and we haven't started allocating bundles yet.
+ LiveBundle* bundle_;
+
+ // The code positions in this range.
+ Range range_;
+
+ // All uses of the virtual register in this range, ordered by location.
+ InlineForwardList<UsePosition> uses_;
+
+ // Total spill weight that calculate from all the uses' policy. Because the
+ // use's policy can't be changed after initialization, we can update the
+ // weight whenever a use is added to or remove from this range. This way, we
+ // don't need to iterate all the uses every time computeSpillWeight() is
+ // called.
+ size_t usesSpillWeight_;
+
+ // Number of uses that have policy LUse::FIXED.
+ uint32_t numFixedUses_;
+
+ // Whether this range contains the virtual register's definition.
+ bool hasDefinition_;
+
+ LiveRange(VirtualRegister* vreg, Range range)
+ : vreg_(vreg),
+ bundle_(nullptr),
+ range_(range),
+ usesSpillWeight_(0),
+ numFixedUses_(0),
+ hasDefinition_(false)
+
+ {
+ MOZ_ASSERT(!range.empty());
+ }
+
+ void noteAddedUse(UsePosition* use);
+ void noteRemovedUse(UsePosition* use);
+
+ public:
+ static LiveRange* FallibleNew(TempAllocator& alloc, VirtualRegister* vreg,
+ CodePosition from, CodePosition to) {
+ return new (alloc.fallible()) LiveRange(vreg, Range(from, to));
+ }
+
+ VirtualRegister& vreg() const {
+ MOZ_ASSERT(hasVreg());
+ return *vreg_;
+ }
+ bool hasVreg() const { return vreg_ != nullptr; }
+
+ LiveBundle* bundle() const { return bundle_; }
+
+ CodePosition from() const { return range_.from; }
+ CodePosition to() const { return range_.to; }
+ bool covers(CodePosition pos) const { return pos >= from() && pos < to(); }
+
+ // Whether this range wholly contains other.
+ bool contains(LiveRange* other) const;
+
+ // Intersect this range with other, returning the subranges of this
+ // that are before, inside, or after other.
+ void intersect(LiveRange* other, Range* pre, Range* inside,
+ Range* post) const;
+
+ // Whether this range has any intersection with other.
+ bool intersects(LiveRange* other) const;
+
+ UsePositionIterator usesBegin() const { return uses_.begin(); }
+ UsePosition* lastUse() const { return uses_.back(); }
+ bool hasUses() const { return !!usesBegin(); }
+ UsePosition* popUse();
+
+ bool hasDefinition() const { return hasDefinition_; }
+
+ void setFrom(CodePosition from) {
+ range_.from = from;
+ MOZ_ASSERT(!range_.empty());
+ }
+ void setTo(CodePosition to) {
+ range_.to = to;
+ MOZ_ASSERT(!range_.empty());
+ }
+
+ void setBundle(LiveBundle* bundle) { bundle_ = bundle; }
+
+ void addUse(UsePosition* use);
+ void tryToMoveDefAndUsesInto(LiveRange* other);
+
+ void setHasDefinition() {
+ MOZ_ASSERT(!hasDefinition_);
+ hasDefinition_ = true;
+ }
+
+ size_t usesSpillWeight() { return usesSpillWeight_; }
+ uint32_t numFixedUses() { return numFixedUses_; }
+
+#ifdef JS_JITSPEW
+ // Return a string describing this range.
+ UniqueChars toString() const;
+#endif
+
+ // Comparator for use in AVL trees.
+ static int compare(LiveRange* v0, LiveRange* v1) {
+ // The denoted range includes 'from' but excludes 'to'.
+ if (v0->to() <= v1->from()) {
+ return -1;
+ }
+ if (v0->from() >= v1->to()) {
+ return 1;
+ }
+ return 0;
+ }
+};
+
+// LiveRangePlus is a simple wrapper around a LiveRange*. It caches the
+// LiveRange*'s `.range_.from` and `.range_.to` CodePositions. The only
+// purpose of this is to avoid some cache misses that would otherwise occur
+// when comparing those fields in an AvlTree<LiveRange*, ..>. This measurably
+// speeds up the allocator in some cases. See bug 1814204.
+
+class LiveRangePlus {
+ // The LiveRange we're wrapping.
+ LiveRange* liveRange_;
+ // Cached versions of liveRange_->range_.from and lr->range_.to
+ CodePosition from_;
+ CodePosition to_;
+
+ public:
+ explicit LiveRangePlus(LiveRange* lr)
+ : liveRange_(lr), from_(lr->from()), to_(lr->to()) {}
+ LiveRangePlus() : liveRange_(nullptr) {}
+ ~LiveRangePlus() {
+ MOZ_ASSERT(liveRange_ ? from_ == liveRange_->from()
+ : from_ == CodePosition());
+ MOZ_ASSERT(liveRange_ ? to_ == liveRange_->to() : to_ == CodePosition());
+ }
+
+ LiveRange* liveRange() const { return liveRange_; }
+
+ // Comparator for use in AVL trees.
+ static int compare(const LiveRangePlus& lrp0, const LiveRangePlus& lrp1) {
+ // The denoted range includes 'from' but excludes 'to'.
+ if (lrp0.to_ <= lrp1.from_) {
+ return -1;
+ }
+ if (lrp0.from_ >= lrp1.to_) {
+ return 1;
+ }
+ return 0;
+ }
+};
+
+// Make sure there's no alignment holes or vtable present. Per bug 1814204,
+// it's important that this structure is as small as possible.
+static_assert(sizeof(LiveRangePlus) ==
+ sizeof(LiveRange*) + 2 * sizeof(CodePosition));
+
+// Tracks information about bundles that should all be spilled to the same
+// physical location. At the beginning of allocation, each bundle has its own
+// spill set. As bundles are split, the new smaller bundles continue to use the
+// same spill set.
+class SpillSet : public TempObject {
+ // All bundles with this spill set which have been spilled. All bundles in
+ // this list will be given the same physical slot.
+ Vector<LiveBundle*, 1, JitAllocPolicy> list_;
+
+ explicit SpillSet(TempAllocator& alloc) : list_(alloc) {}
+
+ public:
+ static SpillSet* New(TempAllocator& alloc) {
+ return new (alloc) SpillSet(alloc);
+ }
+
+ [[nodiscard]] bool addSpilledBundle(LiveBundle* bundle) {
+ return list_.append(bundle);
+ }
+ size_t numSpilledBundles() const { return list_.length(); }
+ LiveBundle* spilledBundle(size_t i) const { return list_[i]; }
+
+ void setAllocation(LAllocation alloc);
+};
+
+#ifdef JS_JITSPEW
+// See comment on LiveBundle::debugId_ just below. This needs to be atomic
+// because TSan automation runs on debug builds will otherwise (correctly)
+// report a race.
+static mozilla::Atomic<uint32_t> LiveBundle_debugIdCounter =
+ mozilla::Atomic<uint32_t>{0};
+#endif
+
+// A set of live ranges which are all pairwise disjoint. The register allocator
+// attempts to find allocations for an entire bundle, and if it fails the
+// bundle will be broken into smaller ones which are allocated independently.
+class LiveBundle : public TempObject {
+ // Set to use if this bundle or one it is split into is spilled.
+ SpillSet* spill_;
+
+ // All the ranges in this set, ordered by location.
+ InlineForwardList<LiveRange::BundleLink> ranges_;
+
+ // Allocation to use for ranges in this set, bogus if unallocated or spilled
+ // and not yet given a physical stack slot.
+ LAllocation alloc_;
+
+ // Bundle which entirely contains this one and has no register uses. This
+ // may or may not be spilled by the allocator, but it can be spilled and
+ // will not be split.
+ LiveBundle* spillParent_;
+
+#ifdef JS_JITSPEW
+ // This is used only for debug-printing bundles. It gives them an
+ // identifiable identity in the debug output, which they otherwise wouldn't
+ // have.
+ uint32_t debugId_;
+#endif
+
+ LiveBundle(SpillSet* spill, LiveBundle* spillParent)
+ : spill_(spill), spillParent_(spillParent) {
+#ifdef JS_JITSPEW
+ debugId_ = LiveBundle_debugIdCounter++;
+#endif
+ }
+
+ public:
+ static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill,
+ LiveBundle* spillParent) {
+ return new (alloc.fallible()) LiveBundle(spill, spillParent);
+ }
+
+ SpillSet* spillSet() const { return spill_; }
+ void setSpillSet(SpillSet* spill) { spill_ = spill; }
+
+ LiveRange::BundleLinkIterator rangesBegin() const { return ranges_.begin(); }
+ bool hasRanges() const { return !!rangesBegin(); }
+ LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
+ LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
+ LiveRange* rangeFor(CodePosition pos) const;
+ void removeRange(LiveRange* range);
+ void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
+ ranges_.removeAndIncrement(iter);
+ }
+ void addRange(LiveRange* range);
+ [[nodiscard]] bool addRange(TempAllocator& alloc, VirtualRegister* vreg,
+ CodePosition from, CodePosition to);
+ [[nodiscard]] bool addRangeAndDistributeUses(TempAllocator& alloc,
+ LiveRange* oldRange,
+ CodePosition from,
+ CodePosition to);
+ LiveRange* popFirstRange();
+#ifdef DEBUG
+ size_t numRanges() const;
+#endif
+
+ LAllocation allocation() const { return alloc_; }
+ void setAllocation(LAllocation alloc) { alloc_ = alloc; }
+
+ LiveBundle* spillParent() const { return spillParent_; }
+
+#ifdef JS_JITSPEW
+ uint32_t debugId() const { return debugId_; }
+
+ // Return a string describing this bundle.
+ UniqueChars toString() const;
+#endif
+};
+
+// Information about the allocation for a virtual register.
+class VirtualRegister {
+ // Instruction which defines this register.
+ LNode* ins_ = nullptr;
+
+ // Definition in the instruction for this register.
+ LDefinition* def_ = nullptr;
+
+ // All live ranges for this register. These may overlap each other, and are
+ // ordered by their start position.
+ InlineForwardList<LiveRange::RegisterLink> ranges_;
+
+ // Whether def_ is a temp or an output.
+ bool isTemp_ = false;
+
+ // Whether this vreg is an input for some phi. This use is not reflected in
+ // any range on the vreg.
+ bool usedByPhi_ = false;
+
+ // If this register's definition is MUST_REUSE_INPUT, whether a copy must
+ // be introduced before the definition that relaxes the policy.
+ bool mustCopyInput_ = false;
+
+ void operator=(const VirtualRegister&) = delete;
+ VirtualRegister(const VirtualRegister&) = delete;
+
+ public:
+ VirtualRegister() = default;
+
+ void init(LNode* ins, LDefinition* def, bool isTemp) {
+ MOZ_ASSERT(!ins_);
+ ins_ = ins;
+ def_ = def;
+ isTemp_ = isTemp;
+ }
+
+ LNode* ins() const { return ins_; }
+ LDefinition* def() const { return def_; }
+ LDefinition::Type type() const { return def()->type(); }
+ uint32_t vreg() const { return def()->virtualRegister(); }
+ bool isCompatible(const AnyRegister& r) const {
+ return def_->isCompatibleReg(r);
+ }
+ bool isCompatible(const VirtualRegister& vr) const {
+ return def_->isCompatibleDef(*vr.def_);
+ }
+ bool isTemp() const { return isTemp_; }
+
+ void setUsedByPhi() { usedByPhi_ = true; }
+ bool usedByPhi() { return usedByPhi_; }
+
+ void setMustCopyInput() { mustCopyInput_ = true; }
+ bool mustCopyInput() { return mustCopyInput_; }
+
+ LiveRange::RegisterLinkIterator rangesBegin() const {
+ return ranges_.begin();
+ }
+ LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const {
+ return ranges_.begin(&range->registerLink);
+ }
+ bool hasRanges() const { return !!rangesBegin(); }
+ LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
+ LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
+ LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
+ void removeRange(LiveRange* range);
+ void addRange(LiveRange* range);
+
+ void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) {
+ ranges_.removeAndIncrement(iter);
+ }
+
+ LiveBundle* firstBundle() const { return firstRange()->bundle(); }
+
+ [[nodiscard]] bool addInitialRange(TempAllocator& alloc, CodePosition from,
+ CodePosition to, size_t* numRanges);
+ void addInitialUse(UsePosition* use);
+ void setInitialDefinition(CodePosition from);
+};
+
+// A sequence of code positions, for tellings BacktrackingAllocator::splitAt
+// where to split.
+using SplitPositionVector = js::Vector<CodePosition, 4, SystemAllocPolicy>;
+
+class BacktrackingAllocator : protected RegisterAllocator {
+ friend class JSONSpewer;
+
+ // This flag is set when testing new allocator modifications.
+ bool testbed;
+
+ BitSet* liveIn;
+ FixedList<VirtualRegister> vregs;
+
+ // Allocation state.
+ StackSlotAllocator stackSlotAllocator;
+
+ // Priority queue element: a bundle and the associated priority.
+ struct QueueItem {
+ LiveBundle* bundle;
+
+ QueueItem(LiveBundle* bundle, size_t priority)
+ : bundle(bundle), priority_(priority) {}
+
+ static size_t priority(const QueueItem& v) { return v.priority_; }
+
+ private:
+ size_t priority_;
+ };
+
+ PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
+
+ // This is a set of LiveRange. They must be non-overlapping. Attempts
+ // to add an overlapping range will cause AvlTree::insert to MOZ_CRASH().
+ using LiveRangeSet = AvlTree<LiveRange*, LiveRange>;
+
+ // The same, but for LiveRangePlus. See comments on LiveRangePlus.
+ using LiveRangePlusSet = AvlTree<LiveRangePlus, LiveRangePlus>;
+
+ // Each physical register is associated with the set of ranges over which
+ // that register is currently allocated.
+ struct PhysicalRegister {
+ bool allocatable;
+ AnyRegister reg;
+ LiveRangePlusSet allocations;
+
+ PhysicalRegister() : allocatable(false) {}
+ };
+ mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
+
+ // Ranges of code which are considered to be hot, for which good allocation
+ // should be prioritized.
+ LiveRangeSet hotcode;
+
+ struct CallRange : public TempObject, public InlineListNode<CallRange> {
+ LiveRange::Range range;
+
+ CallRange(CodePosition from, CodePosition to) : range(from, to) {}
+
+ // Comparator for use in AVL trees.
+ static int compare(CallRange* v0, CallRange* v1) {
+ if (v0->range.to <= v1->range.from) {
+ return -1;
+ }
+ if (v0->range.from >= v1->range.to) {
+ return 1;
+ }
+ return 0;
+ }
+ };
+
+ // Ranges where all registers must be spilled due to call instructions.
+ using CallRangeList = InlineList<CallRange>;
+ CallRangeList callRangesList;
+ AvlTree<CallRange*, CallRange> callRanges;
+
+ // Information about an allocated stack slot.
+ struct SpillSlot : public TempObject,
+ public InlineForwardListNode<SpillSlot> {
+ LStackSlot alloc;
+ LiveRangePlusSet allocated;
+
+ SpillSlot(uint32_t slot, LifoAlloc* alloc)
+ : alloc(slot), allocated(alloc) {}
+ };
+ using SpillSlotList = InlineForwardList<SpillSlot>;
+
+ // All allocated slots of each width.
+ SpillSlotList normalSlots, doubleSlots, quadSlots;
+
+ Vector<LiveBundle*, 4, SystemAllocPolicy> spilledBundles;
+
+ using LiveBundleVector = Vector<LiveBundle*, 4, SystemAllocPolicy>;
+
+ // Misc accessors
+ bool compilingWasm() { return mir->outerInfo().compilingWasm(); }
+ VirtualRegister& vreg(const LDefinition* def) {
+ return vregs[def->virtualRegister()];
+ }
+ VirtualRegister& vreg(const LAllocation* alloc) {
+ MOZ_ASSERT(alloc->isUse());
+ return vregs[alloc->toUse()->virtualRegister()];
+ }
+
+ // Helpers for creating and adding MoveGroups
+ [[nodiscard]] bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ LAllocation fromAlloc = from->bundle()->allocation();
+ LAllocation toAlloc = to->bundle()->allocation();
+ MOZ_ASSERT(fromAlloc != toAlloc);
+ return moves->add(fromAlloc, toAlloc, type);
+ }
+
+ [[nodiscard]] bool moveInput(LInstruction* ins, LiveRange* from,
+ LiveRange* to, LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation()) {
+ return true;
+ }
+ LMoveGroup* moves = getInputMoveGroup(ins);
+ return addMove(moves, from, to, type);
+ }
+
+ [[nodiscard]] bool moveAfter(LInstruction* ins, LiveRange* from,
+ LiveRange* to, LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation()) {
+ return true;
+ }
+ LMoveGroup* moves = getMoveGroupAfter(ins);
+ return addMove(moves, from, to, type);
+ }
+
+ [[nodiscard]] bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation()) {
+ return true;
+ }
+ LMoveGroup* moves = block->getExitMoveGroup(alloc());
+ return addMove(moves, from, to, type);
+ }
+
+ [[nodiscard]] bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
+ LDefinition::Type type) {
+ if (from->bundle()->allocation() == to->bundle()->allocation()) {
+ return true;
+ }
+ LMoveGroup* moves = block->getEntryMoveGroup(alloc());
+ return addMove(moves, from, to, type);
+ }
+
+ // Out-of-line methods, in the same sequence as in BacktrackingAllocator.cpp.
+
+ // Misc helpers: queries about uses
+ bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
+ bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
+ bool isRegisterDefinition(LiveRange* range);
+
+ // Misc helpers: atomic LIR groups
+ // (these are all in the parent class, RegisterAllocator)
+
+ // Misc helpers: computation of bundle priorities and spill weights
+ size_t computePriority(LiveBundle* bundle);
+ bool minimalDef(LiveRange* range, LNode* ins);
+ bool minimalUse(LiveRange* range, UsePosition* use);
+ bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
+ size_t computeSpillWeight(LiveBundle* bundle);
+ size_t maximumSpillWeight(const LiveBundleVector& bundles);
+
+ // Initialization of the allocator
+ [[nodiscard]] bool init();
+
+ // Liveness analysis
+ [[nodiscard]] bool addInitialFixedRange(AnyRegister reg, CodePosition from,
+ CodePosition to);
+ [[nodiscard]] bool buildLivenessInfo();
+
+ // Merging and queueing of LiveRange groups
+ [[nodiscard]] bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
+ void allocateStackDefinition(VirtualRegister& reg);
+ [[nodiscard]] bool tryMergeReusedRegister(VirtualRegister& def,
+ VirtualRegister& input);
+ [[nodiscard]] bool mergeAndQueueRegisters();
+
+ // Implementation of splitting decisions, but not the making of those
+ // decisions
+ [[nodiscard]] bool updateVirtualRegisterListsThenRequeueBundles(
+ LiveBundle* bundle, const LiveBundleVector& newBundles);
+
+ // Implementation of splitting decisions, but not the making of those
+ // decisions
+ [[nodiscard]] bool splitAt(LiveBundle* bundle,
+ const SplitPositionVector& splitPositions);
+
+ // Creation of splitting decisions, but not their implementation
+ [[nodiscard]] bool splitAcrossCalls(LiveBundle* bundle);
+ [[nodiscard]] bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
+ [[nodiscard]] bool trySplitAfterLastRegisterUse(LiveBundle* bundle,
+ LiveBundle* conflict,
+ bool* success);
+ [[nodiscard]] bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
+ LiveBundle* conflict,
+ bool* success);
+
+ // The top level driver for the splitting machinery
+ [[nodiscard]] bool chooseBundleSplit(LiveBundle* bundle, bool fixed,
+ LiveBundle* conflict);
+
+ // Bundle allocation
+ [[nodiscard]] bool computeRequirement(LiveBundle* bundle,
+ Requirement* prequirement,
+ Requirement* phint);
+ [[nodiscard]] bool tryAllocateRegister(PhysicalRegister& r,
+ LiveBundle* bundle, bool* success,
+ bool* pfixed,
+ LiveBundleVector& conflicting);
+ [[nodiscard]] bool tryAllocateAnyRegister(LiveBundle* bundle, bool* success,
+ bool* pfixed,
+ LiveBundleVector& conflicting);
+ [[nodiscard]] bool evictBundle(LiveBundle* bundle);
+ [[nodiscard]] bool tryAllocateFixed(LiveBundle* bundle,
+ Requirement requirement, bool* success,
+ bool* pfixed,
+ LiveBundleVector& conflicting);
+ [[nodiscard]] bool tryAllocateNonFixed(LiveBundle* bundle,
+ Requirement requirement,
+ Requirement hint, bool* success,
+ bool* pfixed,
+ LiveBundleVector& conflicting);
+ [[nodiscard]] bool processBundle(MIRGenerator* mir, LiveBundle* bundle);
+ [[nodiscard]] bool spill(LiveBundle* bundle);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
+ tryAllocatingRegistersForSpillBundles();
+
+ // Rewriting of the LIR after bundle processing is done
+ [[nodiscard]] bool insertAllRanges(LiveRangePlusSet& set, LiveBundle* bundle);
+ [[nodiscard]] bool pickStackSlot(SpillSet* spill);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool pickStackSlots();
+ [[nodiscard]] bool moveAtEdge(LBlock* predecessor, LBlock* successor,
+ LiveRange* from, LiveRange* to,
+ LDefinition::Type type);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool deadRange(LiveRange* range);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
+ createMoveGroupsFromLiveRangeTransitions();
+ size_t findFirstNonCallSafepoint(CodePosition from);
+ void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool installAllocationsInLIR();
+ size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool populateSafepoints();
+ [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool annotateMoveGroups();
+
+ // Debug-printing support
+#ifdef JS_JITSPEW
+ void dumpLiveRangesByVReg(const char* who);
+ void dumpLiveRangesByBundle(const char* who);
+ void dumpAllocations();
+#endif
+
+ // Top level of the register allocation machinery, and the only externally
+ // visible bit.
+ public:
+ BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph,
+ bool testbed)
+ : RegisterAllocator(mir, lir, graph),
+ testbed(testbed),
+ liveIn(nullptr),
+ callRanges(nullptr) {}
+
+ [[nodiscard]] bool go();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BacktrackingAllocator_h */
diff --git a/js/src/jit/Bailouts.cpp b/js/src/jit/Bailouts.cpp
new file mode 100644
index 0000000000..3730d8997a
--- /dev/null
+++ b/js/src/jit/Bailouts.cpp
@@ -0,0 +1,352 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/ScopeExit.h"
+
+#include "gc/GC.h"
+#include "jit/Assembler.h" // jit::FramePointer
+#include "jit/BaselineJIT.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/JSJitFrameIter.h"
+#include "jit/SafepointIndex.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "vm/Interpreter.h"
+#include "vm/JSContext.h"
+#include "vm/Stack.h"
+
+#include "vm/JSScript-inl.h"
+#include "vm/Probes-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsInRange;
+
+#if defined(_WIN32)
+# pragma pack(push, 1)
+#endif
+class js::jit::BailoutStack {
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ uintptr_t frameSize_;
+ uintptr_t snapshotOffset_;
+
+ public:
+ MachineState machineState() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ uint32_t snapshotOffset() const { return snapshotOffset_; }
+ uint32_t frameSize() const { return frameSize_; }
+ uint8_t* parentStackPointer() {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+};
+#if defined(_WIN32)
+# pragma pack(pop)
+#endif
+
+// Make sure the compiler doesn't add extra padding on 32-bit platforms.
+static_assert((sizeof(BailoutStack) % 8) == 0,
+ "BailoutStack should be 8-byte aligned.");
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machineState()), activation_(nullptr) {
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ MOZ_RELEASE_ASSERT(uintptr_t(framePointer_) == machine_.read(FramePointer));
+
+ JSScript* script =
+ ScriptFromCalleeToken(((JitFrameLayout*)framePointer_)->calleeToken());
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+ snapshotOffset_ = bailout->snapshotOffset();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine()), activation_(nullptr) {
+ framePointer_ = (uint8_t*)bailout->fp();
+ MOZ_RELEASE_ASSERT(uintptr_t(framePointer_) == machine_.read(FramePointer));
+
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ const JSJitFrameIter& frame)
+ : machine_(frame.machineState()) {
+ framePointer_ = (uint8_t*)frame.fp();
+ topIonScript_ = frame.ionScript();
+ attachOnJitActivation(activations);
+
+ const OsiIndex* osiIndex = frame.osiIndex();
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
+
+// This address is a magic number made to cause crashes while indicating that we
+// are making an attempt to mark the stack during a bailout.
+static constexpr uint32_t FAKE_EXITFP_FOR_BAILOUT_ADDR = 0xba2;
+static uint8_t* const FAKE_EXITFP_FOR_BAILOUT =
+ reinterpret_cast<uint8_t*>(FAKE_EXITFP_FOR_BAILOUT_ADDR);
+
+static_assert(!(FAKE_EXITFP_FOR_BAILOUT_ADDR & wasm::ExitFPTag),
+ "FAKE_EXITFP_FOR_BAILOUT could be mistaken as a low-bit tagged "
+ "wasm exit fp");
+
+bool jit::Bailout(BailoutStack* sp, BaselineBailoutInfo** bailoutInfo) {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(bailoutInfo);
+
+ // We don't have an exit frame.
+ MOZ_ASSERT(IsInRange(FAKE_EXITFP_FOR_BAILOUT, 0, 0x1000) &&
+ IsInRange(FAKE_EXITFP_FOR_BAILOUT + sizeof(CommonFrameLayout),
+ 0, 0x1000),
+ "Fake exitfp pointer should be within the first page.");
+
+#ifdef DEBUG
+ // Reset the counter when we bailed after MDebugEnterGCUnsafeRegion, but
+ // before the matching MDebugLeaveGCUnsafeRegion.
+ //
+ // NOTE: EnterJit ensures the counter is zero when we enter JIT code.
+ cx->resetInUnsafeRegion();
+#endif
+
+ cx->activation()->asJit()->setJSExitFP(FAKE_EXITFP_FOR_BAILOUT);
+
+ JitActivationIterator jitActivations(cx);
+ BailoutFrameInfo bailoutData(jitActivations, sp);
+ JSJitFrameIter frame(jitActivations->asJit());
+ MOZ_ASSERT(!frame.ionScript()->invalidated());
+ JitFrameLayout* currentFramePtr = frame.jsFrame();
+
+ JitSpew(JitSpew_IonBailouts, "Took bailout! Snapshot offset: %u",
+ frame.snapshotOffset());
+
+ MOZ_ASSERT(IsBaselineJitEnabled(cx));
+
+ *bailoutInfo = nullptr;
+ bool success =
+ BailoutIonToBaseline(cx, bailoutData.activation(), frame, bailoutInfo,
+ /*exceptionInfo=*/nullptr, BailoutReason::Normal);
+ MOZ_ASSERT_IF(success, *bailoutInfo != nullptr);
+
+ if (!success) {
+ MOZ_ASSERT(cx->isExceptionPending());
+ JSScript* script = frame.script();
+ probes::ExitScript(cx, script, script->function(),
+ /* popProfilerFrame = */ false);
+ }
+
+ // This condition was wrong when we entered this bailout function, but it
+ // might be true now. A GC might have reclaimed all the Jit code and
+ // invalidated all frames which are currently on the stack. As we are
+ // already in a bailout, we could not switch to an invalidation
+ // bailout. When the code of an IonScript which is on the stack is
+ // invalidated (see InvalidateActivation), we remove references to it and
+ // increment the reference counter for each activation that appear on the
+ // stack. As the bailed frame is one of them, we have to decrement it now.
+ if (frame.ionScript()->invalidated()) {
+ frame.ionScript()->decrementInvalidationCount(cx->gcContext());
+ }
+
+ // NB: Commentary on how |lastProfilingFrame| is set from bailouts.
+ //
+ // Once we return to jitcode, any following frames might get clobbered,
+ // but the current frame will not (as it will be clobbered "in-place"
+ // with a baseline frame that will share the same frame prefix).
+ // However, there may be multiple baseline frames unpacked from this
+ // single Ion frame, which means we will need to once again reset
+ // |lastProfilingFrame| to point to the correct unpacked last frame
+ // in |FinishBailoutToBaseline|.
+ //
+ // In the case of error, the jitcode will jump immediately to an
+ // exception handler, which will unwind the frames and properly set
+ // the |lastProfilingFrame| to point to the frame being resumed into
+ // (see |AutoResetLastProfilerFrameOnReturnFromException|).
+ //
+ // In both cases, we want to temporarily set the |lastProfilingFrame|
+ // to the current frame being bailed out, and then fix it up later.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+ }
+
+ return success;
+}
+
+bool jit::InvalidationBailout(InvalidationBailoutStack* sp,
+ BaselineBailoutInfo** bailoutInfo) {
+ sp->checkInvariants();
+
+ JSContext* cx = TlsContext.get();
+
+#ifdef DEBUG
+ // Reset the counter when we bailed after MDebugEnterGCUnsafeRegion, but
+ // before the matching MDebugLeaveGCUnsafeRegion.
+ //
+ // NOTE: EnterJit ensures the counter is zero when we enter JIT code.
+ cx->resetInUnsafeRegion();
+#endif
+
+ // We don't have an exit frame.
+ cx->activation()->asJit()->setJSExitFP(FAKE_EXITFP_FOR_BAILOUT);
+
+ JitActivationIterator jitActivations(cx);
+ BailoutFrameInfo bailoutData(jitActivations, sp);
+ JSJitFrameIter frame(jitActivations->asJit());
+ JitFrameLayout* currentFramePtr = frame.jsFrame();
+
+ JitSpew(JitSpew_IonBailouts, "Took invalidation bailout! Snapshot offset: %u",
+ frame.snapshotOffset());
+
+ MOZ_ASSERT(IsBaselineJitEnabled(cx));
+
+ *bailoutInfo = nullptr;
+ bool success = BailoutIonToBaseline(cx, bailoutData.activation(), frame,
+ bailoutInfo, /*exceptionInfo=*/nullptr,
+ BailoutReason::Invalidate);
+ MOZ_ASSERT_IF(success, *bailoutInfo != nullptr);
+
+ if (!success) {
+ MOZ_ASSERT(cx->isExceptionPending());
+
+ // If the bailout failed, then bailout trampoline will pop the
+ // current frame and jump straight to exception handling code when
+ // this function returns. Any Gecko Profiler entry pushed for this
+ // frame will be silently forgotten.
+ //
+ // We call ExitScript here to ensure that if the ionScript had Gecko
+ // Profiler instrumentation, then the entry for it is popped.
+ //
+ // However, if the bailout was during argument check, then a
+ // pseudostack frame would not have been pushed in the first
+ // place, so don't pop anything in that case.
+ JSScript* script = frame.script();
+ probes::ExitScript(cx, script, script->function(),
+ /* popProfilerFrame = */ false);
+
+#ifdef JS_JITSPEW
+ JitFrameLayout* layout = frame.jsFrame();
+ JitSpew(JitSpew_IonInvalidate, "Bailout failed (Fatal Error)");
+ JitSpew(JitSpew_IonInvalidate, " calleeToken %p",
+ (void*)layout->calleeToken());
+ JitSpew(JitSpew_IonInvalidate, " callerFramePtr %p",
+ layout->callerFramePtr());
+ JitSpew(JitSpew_IonInvalidate, " ra %p", (void*)layout->returnAddress());
+#endif
+ }
+
+ frame.ionScript()->decrementInvalidationCount(cx->gcContext());
+
+ // Make the frame being bailed out the top profiled frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+ }
+
+ return success;
+}
+
+bool jit::ExceptionHandlerBailout(JSContext* cx,
+ const InlineFrameIterator& frame,
+ ResumeFromException* rfe,
+ const ExceptionBailoutInfo& excInfo) {
+ // If we are resuming in a finally block, the exception has already
+ // been captured.
+ // We can also be propagating debug mode exceptions without there being an
+ // actual exception pending. For instance, when we return false from an
+ // operation callback like a timeout handler.
+ MOZ_ASSERT_IF(
+ !cx->isExceptionPending(),
+ excInfo.isFinally() || excInfo.propagatingIonExceptionForDebugMode());
+
+ JS::AutoSaveExceptionState savedExc(cx);
+
+ JitActivation* act = cx->activation()->asJit();
+ uint8_t* prevExitFP = act->jsExitFP();
+ auto restoreExitFP =
+ mozilla::MakeScopeExit([&]() { act->setJSExitFP(prevExitFP); });
+ act->setJSExitFP(FAKE_EXITFP_FOR_BAILOUT);
+
+ gc::AutoSuppressGC suppress(cx);
+
+ JitActivationIterator jitActivations(cx);
+ BailoutFrameInfo bailoutData(jitActivations, frame.frame());
+ JSJitFrameIter frameView(jitActivations->asJit());
+ JitFrameLayout* currentFramePtr = frameView.jsFrame();
+
+ BaselineBailoutInfo* bailoutInfo = nullptr;
+ bool success = BailoutIonToBaseline(cx, bailoutData.activation(), frameView,
+ &bailoutInfo, &excInfo,
+ BailoutReason::ExceptionHandler);
+ if (success) {
+ MOZ_ASSERT(bailoutInfo);
+
+ // Overwrite the kind so HandleException after the bailout returns
+ // false, jumping directly to the exception tail.
+ if (excInfo.propagatingIonExceptionForDebugMode()) {
+ bailoutInfo->bailoutKind =
+ mozilla::Some(BailoutKind::IonExceptionDebugMode);
+ } else if (excInfo.isFinally()) {
+ bailoutInfo->bailoutKind = mozilla::Some(BailoutKind::Finally);
+ }
+
+ rfe->kind = ExceptionResumeKind::Bailout;
+ rfe->stackPointer = bailoutInfo->incomingStack;
+ rfe->bailoutInfo = bailoutInfo;
+ } else {
+ // Drop the exception that triggered the bailout and instead propagate the
+ // failure caused by processing the bailout (eg. OOM).
+ savedExc.drop();
+ MOZ_ASSERT(!bailoutInfo);
+ MOZ_ASSERT(cx->isExceptionPending());
+ }
+
+ // Make the frame being bailed out the top profiled frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+ }
+
+ return success;
+}
+
+// Initialize the NamedLambdaObject and CallObject of the current frame if
+// needed.
+bool jit::EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp) {
+ // Ion does not compile eval scripts.
+ MOZ_ASSERT(!fp.isEvalFrame());
+
+ if (fp.isFunctionFrame() && !fp.hasInitialEnvironment() &&
+ fp.callee()->needsFunctionEnvironmentObjects()) {
+ if (!fp.initFunctionEnvironmentObjects(cx)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void BailoutFrameInfo::attachOnJitActivation(
+ const JitActivationIterator& jitActivations) {
+ MOZ_ASSERT(jitActivations->asJit()->jsExitFP() == FAKE_EXITFP_FOR_BAILOUT);
+ activation_ = jitActivations->asJit();
+ activation_->setBailoutData(this);
+}
+
+BailoutFrameInfo::~BailoutFrameInfo() { activation_->cleanBailoutData(); }
diff --git a/js/src/jit/Bailouts.h b/js/src/jit/Bailouts.h
new file mode 100644
index 0000000000..759f384019
--- /dev/null
+++ b/js/src/jit/Bailouts.h
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Bailouts_h
+#define jit_Bailouts_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "jstypes.h"
+
+#include "jit/IonTypes.h" // js::jit::Bailout{Id,Kind}, js::jit::SnapshotOffset
+#include "jit/MachineState.h" // js::jit::MachineState
+#include "js/TypeDecls.h" // jsbytecode
+#include "vm/JSContext.h" // JSContext
+
+namespace js {
+
+class AbstractFramePtr;
+
+namespace jit {
+
+// [SMDOC] IonMonkey Bailouts
+//
+// A "bailout" is the process of recovering a baseline interpreter frame from an
+// IonFrame. Bailouts are implemented in js::jit::BailoutIonToBaseline, which
+// has the following callers:
+//
+// * js::jit::Bailout - This is used when a guard fails in the Ion code
+// itself; for example, an LGuardShape fails or an LAddI overflows. See
+// callers of CodeGenerator::bailoutFrom() for more examples.
+//
+// * js::jit::ExceptionHandlerBailout - Ion doesn't implement `catch` or
+// `finally`. If an exception is thrown and would be caught by an Ion frame,
+// we bail out instead.
+//
+// * js::jit::InvalidationBailout - We returned to Ion code that was
+// invalidated while it was on the stack. See "OSI" below. Ion code can be
+// invalidated for several reasons: when GC evicts Ion code to save memory,
+// for example, or when assumptions baked into the jitted code are
+// invalidated by the VM.
+//
+// (Some stack inspection can be done without bailing out, including GC stack
+// marking, Error object construction, and Gecko profiler sampling.)
+//
+// Consider the first case. When an Ion guard fails, we can't continue in
+// Ion. There's no IC fallback case coming to save us; we've got a broken
+// assumption baked into the code we're running. So we jump to an out-of-line
+// code path that's responsible for abandoning Ion execution and resuming in
+// the baseline interpreter: the bailout path.
+//
+// We were in the midst of optimized Ion code, so bits of program state may be
+// in registers or spilled to the native stack; values may be unboxed; some
+// objects may have been optimized away; thanks to inlining, whole call frames
+// may be missing. The bailout path must put all these pieces back together
+// into the structure the baseline interpreter expects.
+//
+// The data structure that makes this possible is called a *snapshot*.
+// Snapshots are created during Ion codegen and associated with the IonScript;
+// they tell how to recover each value in a BaselineFrame from the current
+// machine state at a given point in the Ion JIT code. This is potentially
+// different at every place in an Ion script where we might bail out. (See
+// Snapshots.h.)
+//
+// The bailout path performs roughly the following steps:
+//
+// 1. Push a snapshot index and the frame size to the native stack.
+// 2. Spill all registers.
+// 3. Call js::jit::Bailout to reconstruct the baseline frame(s).
+// 4. memmove() those to the right place on the native stack.
+// 5. Jump into the baseline interpreter.
+//
+// When C++ code invalidates Ion code, we do on-stack invalidation, or OSI, to
+// arrange for every affected Ion frame on the stack to bail out as soon as
+// control returns to it. OSI patches every instruction in the JIT code that's
+// at a return address currently on the stack. See InvalidateActivation.
+//
+//
+// ## Bailout path implementation details
+//
+// Ion code has a lot of guards, so each bailout path must be small. Steps 2
+// and 3 above are therefore implemented by a shared per-Runtime trampoline,
+// rt->jitRuntime()->getGenericBailoutHandler().
+//
+// We implement step 1 like this:
+//
+// _bailout_ID_1:
+// push 1
+// jmp _deopt
+// _bailout_ID_2:
+// push 2
+// jmp _deopt
+// ...
+// _deopt:
+// push imm(FrameSize)
+// call _global_bailout_handler
+
+// BailoutStack is an architecture specific pointer to the stack, given by the
+// bailout handler.
+class BailoutStack;
+class InvalidationBailoutStack;
+
+class IonScript;
+class InlineFrameIterator;
+class JitActivation;
+class JitActivationIterator;
+class JSJitFrameIter;
+struct ResumeFromException;
+
+// Must be implemented by each architecture.
+
+// This structure is constructed before recovering the baseline frames for a
+// bailout. It records all information extracted from the stack, and which are
+// needed for the JSJitFrameIter.
+class BailoutFrameInfo {
+ MachineState machine_;
+ uint8_t* framePointer_;
+ IonScript* topIonScript_;
+ uint32_t snapshotOffset_;
+ JitActivation* activation_;
+
+ void attachOnJitActivation(const JitActivationIterator& activations);
+
+ public:
+ BailoutFrameInfo(const JitActivationIterator& activations, BailoutStack* sp);
+ BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* sp);
+ BailoutFrameInfo(const JitActivationIterator& activations,
+ const JSJitFrameIter& frame);
+ ~BailoutFrameInfo();
+
+ uint8_t* fp() const { return framePointer_; }
+ SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
+ const MachineState* machineState() const { return &machine_; }
+ IonScript* ionScript() const { return topIonScript_; }
+ JitActivation* activation() const { return activation_; }
+};
+
+[[nodiscard]] bool EnsureHasEnvironmentObjects(JSContext* cx,
+ AbstractFramePtr fp);
+
+struct BaselineBailoutInfo;
+
+// Called from a bailout thunk.
+[[nodiscard]] bool Bailout(BailoutStack* sp, BaselineBailoutInfo** info);
+
+// Called from the invalidation thunk.
+[[nodiscard]] bool InvalidationBailout(InvalidationBailoutStack* sp,
+ BaselineBailoutInfo** info);
+
+class ExceptionBailoutInfo {
+ size_t frameNo_;
+ jsbytecode* resumePC_;
+ size_t numExprSlots_;
+ bool isFinally_ = false;
+ RootedValue finallyException_;
+ bool forcedReturn_;
+
+ public:
+ ExceptionBailoutInfo(JSContext* cx, size_t frameNo, jsbytecode* resumePC,
+ size_t numExprSlots)
+ : frameNo_(frameNo),
+ resumePC_(resumePC),
+ numExprSlots_(numExprSlots),
+ finallyException_(cx),
+ forcedReturn_(cx->isPropagatingForcedReturn()) {}
+
+ explicit ExceptionBailoutInfo(JSContext* cx)
+ : frameNo_(0),
+ resumePC_(nullptr),
+ numExprSlots_(0),
+ finallyException_(cx),
+ forcedReturn_(cx->isPropagatingForcedReturn()) {}
+
+ bool catchingException() const { return !!resumePC_; }
+ bool propagatingIonExceptionForDebugMode() const { return !resumePC_; }
+
+ size_t frameNo() const {
+ MOZ_ASSERT(catchingException());
+ return frameNo_;
+ }
+ jsbytecode* resumePC() const {
+ MOZ_ASSERT(catchingException());
+ return resumePC_;
+ }
+ size_t numExprSlots() const {
+ MOZ_ASSERT(catchingException());
+ return numExprSlots_;
+ }
+
+ bool isFinally() const { return isFinally_; }
+ void setFinallyException(JS::Value& exception) {
+ MOZ_ASSERT(!isFinally());
+ isFinally_ = true;
+ finallyException_ = exception;
+ }
+ HandleValue finallyException() const {
+ MOZ_ASSERT(isFinally());
+ return finallyException_;
+ }
+
+ bool forcedReturn() const { return forcedReturn_; }
+};
+
+// Called from the exception handler to enter a catch or finally block.
+[[nodiscard]] bool ExceptionHandlerBailout(JSContext* cx,
+ const InlineFrameIterator& frame,
+ ResumeFromException* rfe,
+ const ExceptionBailoutInfo& excInfo);
+
+[[nodiscard]] bool FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg);
+
+#ifdef DEBUG
+[[nodiscard]] bool AssertBailoutStackDepth(JSContext* cx, JSScript* script,
+ jsbytecode* pc, ResumeMode mode,
+ uint32_t exprStackSlots);
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Bailouts_h */
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
new file mode 100644
index 0000000000..c82a05d0ea
--- /dev/null
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -0,0 +1,2092 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ScopeExit.h"
+
+#include "builtin/ModuleObject.h"
+#include "debugger/DebugAPI.h"
+#include "gc/GC.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CalleeToken.h"
+#include "jit/Invalidation.h"
+#include "jit/Ion.h"
+#include "jit/IonScript.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/JitZone.h"
+#include "jit/RematerializedFrame.h"
+#include "jit/SharedICRegisters.h"
+#include "jit/Simulator.h"
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit, js::ReportOverRecursed
+#include "js/Utility.h"
+#include "util/Memory.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/JitActivation.h"
+
+#include "jit/JitFrames-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+// BaselineStackBuilder may reallocate its buffer if the current one is too
+// small. To avoid dangling pointers, BufferPointer represents a pointer into
+// this buffer as a pointer to the header and a fixed offset.
+template <typename T>
+class BufferPointer {
+ const UniquePtr<BaselineBailoutInfo>& header_;
+ size_t offset_;
+ bool heap_;
+
+ public:
+ BufferPointer(const UniquePtr<BaselineBailoutInfo>& header, size_t offset,
+ bool heap)
+ : header_(header), offset_(offset), heap_(heap) {}
+
+ T* get() const {
+ BaselineBailoutInfo* header = header_.get();
+ if (!heap_) {
+ return (T*)(header->incomingStack + offset_);
+ }
+
+ uint8_t* p = header->copyStackTop - offset_;
+ MOZ_ASSERT(p >= header->copyStackBottom && p < header->copyStackTop);
+ return (T*)p;
+ }
+
+ void set(const T& value) { *get() = value; }
+
+ // Note: we return a copy instead of a reference, to avoid potential memory
+ // safety hazards when the underlying buffer gets resized.
+ const T operator*() const { return *get(); }
+ T* operator->() const { return get(); }
+};
+
+/**
+ * BaselineStackBuilder helps abstract the process of rebuilding the C stack on
+ * the heap. It takes a bailout iterator and keeps track of the point on the C
+ * stack from which the reconstructed frames will be written.
+ *
+ * It exposes methods to write data into the heap memory storing the
+ * reconstructed stack. It also exposes method to easily calculate addresses.
+ * This includes both the virtual address that a particular value will be at
+ * when it's eventually copied onto the stack, as well as the current actual
+ * address of that value (whether on the heap allocated portion being
+ * constructed or the existing stack).
+ *
+ * The abstraction handles transparent re-allocation of the heap memory when it
+ * needs to be enlarged to accommodate new data. Similarly to the C stack, the
+ * data that's written to the reconstructed stack grows from high to low in
+ * memory.
+ *
+ * The lowest region of the allocated memory contains a BaselineBailoutInfo
+ * structure that points to the start and end of the written data.
+ */
+class MOZ_STACK_CLASS BaselineStackBuilder {
+ JSContext* cx_;
+ JitFrameLayout* frame_ = nullptr;
+ SnapshotIterator& iter_;
+ RootedValueVector outermostFrameFormals_;
+
+ size_t bufferTotal_ = 1024;
+ size_t bufferAvail_ = 0;
+ size_t bufferUsed_ = 0;
+ size_t framePushed_ = 0;
+
+ UniquePtr<BaselineBailoutInfo> header_;
+
+ JSScript* script_;
+ JSFunction* fun_;
+ const ExceptionBailoutInfo* excInfo_;
+ ICScript* icScript_;
+
+ jsbytecode* pc_ = nullptr;
+ JSOp op_ = JSOp::Nop;
+ mozilla::Maybe<ResumeMode> resumeMode_;
+ uint32_t exprStackSlots_ = 0;
+ void* prevFramePtr_ = nullptr;
+ Maybe<BufferPointer<BaselineFrame>> blFrame_;
+
+ size_t frameNo_ = 0;
+ JSFunction* nextCallee_ = nullptr;
+
+ BailoutKind bailoutKind_;
+
+ // The baseline frames we will reconstruct on the heap are not
+ // rooted, so GC must be suppressed.
+ gc::AutoSuppressGC suppress_;
+
+ public:
+ BaselineStackBuilder(JSContext* cx, const JSJitFrameIter& frameIter,
+ SnapshotIterator& iter,
+ const ExceptionBailoutInfo* excInfo,
+ BailoutReason reason);
+
+ [[nodiscard]] bool init() {
+ MOZ_ASSERT(!header_);
+ MOZ_ASSERT(bufferUsed_ == 0);
+
+ uint8_t* bufferRaw = cx_->pod_calloc<uint8_t>(bufferTotal_);
+ if (!bufferRaw) {
+ return false;
+ }
+ bufferAvail_ = bufferTotal_ - sizeof(BaselineBailoutInfo);
+
+ header_.reset(new (bufferRaw) BaselineBailoutInfo());
+ header_->incomingStack = reinterpret_cast<uint8_t*>(frame_);
+ header_->copyStackTop = bufferRaw + bufferTotal_;
+ header_->copyStackBottom = header_->copyStackTop;
+ return true;
+ }
+
+ [[nodiscard]] bool buildOneFrame();
+ bool done();
+ void nextFrame();
+
+ JSScript* script() const { return script_; }
+ size_t frameNo() const { return frameNo_; }
+ bool isOutermostFrame() const { return frameNo_ == 0; }
+ MutableHandleValueVector outermostFrameFormals() {
+ return &outermostFrameFormals_;
+ }
+ BailoutKind bailoutKind() const { return bailoutKind_; }
+
+ inline JitFrameLayout* startFrame() { return frame_; }
+
+ BaselineBailoutInfo* info() {
+ MOZ_ASSERT(header_);
+ return header_.get();
+ }
+
+ BaselineBailoutInfo* takeBuffer() {
+ MOZ_ASSERT(header_);
+ return header_.release();
+ }
+
+ private:
+ [[nodiscard]] bool initFrame();
+ [[nodiscard]] bool buildBaselineFrame();
+ [[nodiscard]] bool buildArguments();
+ [[nodiscard]] bool buildFixedSlots();
+ [[nodiscard]] bool fixUpCallerArgs(MutableHandleValueVector savedCallerArgs,
+ bool* fixedUp);
+ [[nodiscard]] bool buildFinallyException();
+ [[nodiscard]] bool buildExpressionStack();
+ [[nodiscard]] bool finishLastFrame();
+
+ [[nodiscard]] bool prepareForNextFrame(HandleValueVector savedCallerArgs);
+ [[nodiscard]] bool finishOuterFrame();
+ [[nodiscard]] bool buildStubFrame(uint32_t frameSize,
+ HandleValueVector savedCallerArgs);
+ [[nodiscard]] bool buildRectifierFrame(uint32_t actualArgc,
+ size_t endOfBaselineStubArgs);
+
+#ifdef DEBUG
+ [[nodiscard]] bool validateFrame();
+#endif
+
+#ifdef DEBUG
+ bool envChainSlotCanBeOptimized();
+#endif
+
+ bool isPrologueBailout();
+ jsbytecode* getResumePC();
+ void* getStubReturnAddress();
+
+ uint32_t exprStackSlots() const { return exprStackSlots_; }
+
+ // Returns true if we're bailing out to a catch or finally block in this frame
+ bool catchingException() const {
+ return excInfo_ && excInfo_->catchingException() &&
+ excInfo_->frameNo() == frameNo_;
+ }
+
+ // Returns true if we're bailing out to a finally block in this frame.
+ bool resumingInFinallyBlock() const {
+ return catchingException() && excInfo_->isFinally();
+ }
+
+ bool forcedReturn() const { return excInfo_ && excInfo_->forcedReturn(); }
+
+ // Returns true if we're bailing out in place for debug mode
+ bool propagatingIonExceptionForDebugMode() const {
+ return excInfo_ && excInfo_->propagatingIonExceptionForDebugMode();
+ }
+
+ void* prevFramePtr() const {
+ MOZ_ASSERT(prevFramePtr_);
+ return prevFramePtr_;
+ }
+ BufferPointer<BaselineFrame>& blFrame() { return blFrame_.ref(); }
+
+ void setNextCallee(JSFunction* nextCallee,
+ TrialInliningState trialInliningState);
+ JSFunction* nextCallee() const { return nextCallee_; }
+
+ jsbytecode* pc() const { return pc_; }
+ bool resumeAfter() const {
+ return !catchingException() && iter_.resumeAfter();
+ }
+
+ ResumeMode resumeMode() const { return *resumeMode_; }
+
+ bool needToSaveCallerArgs() const {
+ return resumeMode() == ResumeMode::InlinedAccessor;
+ }
+
+ [[nodiscard]] bool enlarge() {
+ MOZ_ASSERT(header_ != nullptr);
+ if (bufferTotal_ & mozilla::tl::MulOverflowMask<2>::value) {
+ ReportOutOfMemory(cx_);
+ return false;
+ }
+
+ size_t newSize = bufferTotal_ * 2;
+ uint8_t* newBufferRaw = cx_->pod_calloc<uint8_t>(newSize);
+ if (!newBufferRaw) {
+ return false;
+ }
+
+ // Initialize the new buffer.
+ //
+ // Before:
+ //
+ // [ Header | .. | Payload ]
+ //
+ // After:
+ //
+ // [ Header | ............... | Payload ]
+ //
+ // Size of Payload is |bufferUsed_|.
+ //
+ // We need to copy from the old buffer and header to the new buffer before
+ // we set header_ (this deletes the old buffer).
+ //
+ // We also need to update |copyStackBottom| and |copyStackTop| because these
+ // fields point to the Payload's start and end, respectively.
+ using BailoutInfoPtr = UniquePtr<BaselineBailoutInfo>;
+ BailoutInfoPtr newHeader(new (newBufferRaw) BaselineBailoutInfo(*header_));
+ newHeader->copyStackTop = newBufferRaw + newSize;
+ newHeader->copyStackBottom = newHeader->copyStackTop - bufferUsed_;
+ memcpy(newHeader->copyStackBottom, header_->copyStackBottom, bufferUsed_);
+ bufferTotal_ = newSize;
+ bufferAvail_ = newSize - (sizeof(BaselineBailoutInfo) + bufferUsed_);
+ header_ = std::move(newHeader);
+ return true;
+ }
+
+ void resetFramePushed() { framePushed_ = 0; }
+
+ size_t framePushed() const { return framePushed_; }
+
+ [[nodiscard]] bool subtract(size_t size, const char* info = nullptr) {
+ // enlarge the buffer if need be.
+ while (size > bufferAvail_) {
+ if (!enlarge()) {
+ return false;
+ }
+ }
+
+ // write out element.
+ header_->copyStackBottom -= size;
+ bufferAvail_ -= size;
+ bufferUsed_ += size;
+ framePushed_ += size;
+ if (info) {
+ JitSpew(JitSpew_BaselineBailouts, " SUB_%03d %p/%p %-15s",
+ (int)size, header_->copyStackBottom,
+ virtualPointerAtStackOffset(0), info);
+ }
+ return true;
+ }
+
+ template <typename T>
+ [[nodiscard]] bool write(const T& t) {
+ MOZ_ASSERT(!(uintptr_t(&t) >= uintptr_t(header_->copyStackBottom) &&
+ uintptr_t(&t) < uintptr_t(header_->copyStackTop)),
+ "Should not reference memory that can be freed");
+ if (!subtract(sizeof(T))) {
+ return false;
+ }
+ memcpy(header_->copyStackBottom, &t, sizeof(T));
+ return true;
+ }
+
+ template <typename T>
+ [[nodiscard]] bool writePtr(T* t, const char* info) {
+ if (!write<T*>(t)) {
+ return false;
+ }
+ if (info) {
+ JitSpew(JitSpew_BaselineBailouts, " WRITE_PTR %p/%p %-15s %p",
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+ t);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool writeWord(size_t w, const char* info) {
+ if (!write<size_t>(w)) {
+ return false;
+ }
+ if (info) {
+ if (sizeof(size_t) == 4) {
+ JitSpew(JitSpew_BaselineBailouts, " WRITE_WRD %p/%p %-15s %08zx",
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+ w);
+ } else {
+ JitSpew(JitSpew_BaselineBailouts, " WRITE_WRD %p/%p %-15s %016zx",
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+ w);
+ }
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool writeValue(const Value& val, const char* info) {
+ if (!write<Value>(val)) {
+ return false;
+ }
+ if (info) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " WRITE_VAL %p/%p %-15s %016" PRIx64,
+ header_->copyStackBottom, virtualPointerAtStackOffset(0), info,
+ *((uint64_t*)&val));
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool maybeWritePadding(size_t alignment, size_t after,
+ const char* info) {
+ MOZ_ASSERT(framePushed_ % sizeof(Value) == 0);
+ MOZ_ASSERT(after % sizeof(Value) == 0);
+ size_t offset = ComputeByteAlignment(after, alignment);
+ while (framePushed_ % alignment != offset) {
+ if (!writeValue(MagicValue(JS_ARG_POISON), info)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void setResumeFramePtr(void* resumeFramePtr) {
+ header_->resumeFramePtr = resumeFramePtr;
+ }
+
+ void setResumeAddr(void* resumeAddr) { header_->resumeAddr = resumeAddr; }
+
+ template <typename T>
+ BufferPointer<T> pointerAtStackOffset(size_t offset) {
+ if (offset < bufferUsed_) {
+ // Calculate offset from copyStackTop.
+ offset = header_->copyStackTop - (header_->copyStackBottom + offset);
+ return BufferPointer<T>(header_, offset, /* heap = */ true);
+ }
+
+ return BufferPointer<T>(header_, offset - bufferUsed_, /* heap = */ false);
+ }
+
+ BufferPointer<Value> valuePointerAtStackOffset(size_t offset) {
+ return pointerAtStackOffset<Value>(offset);
+ }
+
+ inline uint8_t* virtualPointerAtStackOffset(size_t offset) {
+ if (offset < bufferUsed_) {
+ return reinterpret_cast<uint8_t*>(frame_) - (bufferUsed_ - offset);
+ }
+ return reinterpret_cast<uint8_t*>(frame_) + (offset - bufferUsed_);
+ }
+};
+
+BaselineStackBuilder::BaselineStackBuilder(JSContext* cx,
+ const JSJitFrameIter& frameIter,
+ SnapshotIterator& iter,
+ const ExceptionBailoutInfo* excInfo,
+ BailoutReason reason)
+ : cx_(cx),
+ frame_(static_cast<JitFrameLayout*>(frameIter.current())),
+ iter_(iter),
+ outermostFrameFormals_(cx),
+ script_(frameIter.script()),
+ fun_(frameIter.maybeCallee()),
+ excInfo_(excInfo),
+ icScript_(script_->jitScript()->icScript()),
+ bailoutKind_(iter.bailoutKind()),
+ suppress_(cx) {
+ MOZ_ASSERT(bufferTotal_ >= sizeof(BaselineBailoutInfo));
+ if (reason == BailoutReason::Invalidate) {
+ bailoutKind_ = BailoutKind::OnStackInvalidation;
+ }
+}
+
+bool BaselineStackBuilder::initFrame() {
+ // Get the pc and ResumeMode. If we are handling an exception, resume at the
+ // pc of the catch or finally block.
+ if (catchingException()) {
+ pc_ = excInfo_->resumePC();
+ resumeMode_ = mozilla::Some(ResumeMode::ResumeAt);
+ } else {
+ pc_ = script_->offsetToPC(iter_.pcOffset());
+ resumeMode_ = mozilla::Some(iter_.resumeMode());
+ }
+ op_ = JSOp(*pc_);
+
+ // If we are catching an exception, we are bailing out to a catch or
+ // finally block and this is the frame where we will resume. Usually the
+ // expression stack should be empty in this case but there can be
+ // iterators on the stack.
+ if (catchingException()) {
+ exprStackSlots_ = excInfo_->numExprSlots();
+ } else {
+ uint32_t totalFrameSlots = iter_.numAllocations();
+ uint32_t fixedSlots = script_->nfixed();
+ uint32_t argSlots = CountArgSlots(script_, fun_);
+ uint32_t intermediates = NumIntermediateValues(resumeMode());
+ exprStackSlots_ = totalFrameSlots - fixedSlots - argSlots - intermediates;
+
+ // Verify that there was no underflow.
+ MOZ_ASSERT(exprStackSlots_ <= totalFrameSlots);
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " Unpacking %s:%u:%u",
+ script_->filename(), script_->lineno(), script_->column());
+ JitSpew(JitSpew_BaselineBailouts, " [BASELINE-JS FRAME]");
+
+ // Write the previous frame pointer value. For the outermost frame we reuse
+ // the value in the JitFrameLayout already on the stack. Record the virtual
+ // stack offset at this location. Later on, if we end up writing out a
+ // BaselineStub frame for the next callee, we'll need to save the address.
+ if (!isOutermostFrame()) {
+ if (!writePtr(prevFramePtr(), "PrevFramePtr")) {
+ return false;
+ }
+ }
+ prevFramePtr_ = virtualPointerAtStackOffset(0);
+
+ resetFramePushed();
+
+ return true;
+}
+
+void BaselineStackBuilder::setNextCallee(
+ JSFunction* nextCallee, TrialInliningState trialInliningState) {
+ nextCallee_ = nextCallee;
+
+ if (trialInliningState == TrialInliningState::Inlined) {
+ // Update icScript_ to point to the icScript of nextCallee
+ const uint32_t pcOff = script_->pcToOffset(pc_);
+ icScript_ = icScript_->findInlinedChild(pcOff);
+ } else {
+ // If we don't know for certain that it's TrialInliningState::Inlined,
+ // just use the callee's own ICScript. We could still have the trial
+ // inlined ICScript available, but we also could not if we transitioned
+ // to TrialInliningState::Failure after being monomorphic inlined.
+ icScript_ = nextCallee->nonLazyScript()->jitScript()->icScript();
+ }
+}
+
+bool BaselineStackBuilder::done() {
+ if (!iter_.moreFrames()) {
+ MOZ_ASSERT(!nextCallee_);
+ return true;
+ }
+ return catchingException();
+}
+
+void BaselineStackBuilder::nextFrame() {
+ MOZ_ASSERT(nextCallee_);
+ fun_ = nextCallee_;
+ script_ = fun_->nonLazyScript();
+ nextCallee_ = nullptr;
+
+ // Scripts with an IonScript must also have a BaselineScript.
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ frameNo_++;
+ iter_.nextInstruction();
+}
+
+// Build the BaselineFrame struct
+bool BaselineStackBuilder::buildBaselineFrame() {
+ if (!subtract(BaselineFrame::Size(), "BaselineFrame")) {
+ return false;
+ }
+ blFrame_.reset();
+ blFrame_.emplace(pointerAtStackOffset<BaselineFrame>(0));
+
+ uint32_t flags = BaselineFrame::RUNNING_IN_INTERPRETER;
+
+ // If we are bailing to a script whose execution is observed, mark the
+ // baseline frame as a debuggee frame. This is to cover the case where we
+ // don't rematerialize the Ion frame via the Debugger.
+ if (script_->isDebuggee()) {
+ flags |= BaselineFrame::DEBUGGEE;
+ }
+
+ // Get |envChain|.
+ JSObject* envChain = nullptr;
+ Value envChainSlot = iter_.read();
+ if (envChainSlot.isObject()) {
+ // The env slot has been updated from UndefinedValue. It must be the
+ // complete initial environment.
+ envChain = &envChainSlot.toObject();
+
+ // Set the HAS_INITIAL_ENV flag if needed. See IsFrameInitialEnvironment.
+ MOZ_ASSERT(!script_->isForEval());
+ if (fun_ && fun_->needsFunctionEnvironmentObjects()) {
+ MOZ_ASSERT(fun_->nonLazyScript()->initialEnvironmentShape());
+ flags |= BaselineFrame::HAS_INITIAL_ENV;
+ }
+ } else {
+ MOZ_ASSERT(envChainSlot.isUndefined() ||
+ envChainSlot.isMagic(JS_OPTIMIZED_OUT));
+ MOZ_ASSERT(envChainSlotCanBeOptimized());
+
+ // The env slot has been optimized out.
+ // Get it from the function or script.
+ if (fun_) {
+ envChain = fun_->environment();
+ } else if (script_->isModule()) {
+ envChain = script_->module()->environment();
+ } else {
+ // For global scripts without a non-syntactic env the env
+ // chain is the script's global lexical environment. (We do
+ // not compile scripts with a non-syntactic global scope).
+ // Also note that it's invalid to resume into the prologue in
+ // this case because the prologue expects the env chain in R1
+ // for eval and global scripts.
+ MOZ_ASSERT(!script_->isForEval());
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+ envChain = &(script_->global().lexicalEnvironment());
+ }
+ }
+
+ // Write |envChain|.
+ MOZ_ASSERT(envChain);
+ JitSpew(JitSpew_BaselineBailouts, " EnvChain=%p", envChain);
+ blFrame()->setEnvironmentChain(envChain);
+
+ // Get |returnValue| if present.
+ Value returnValue = UndefinedValue();
+ if (script_->noScriptRval()) {
+ // Don't use the return value (likely a JS_OPTIMIZED_OUT MagicValue) to
+ // not confuse Baseline.
+ iter_.skip();
+ } else {
+ returnValue = iter_.read();
+ flags |= BaselineFrame::HAS_RVAL;
+ }
+
+ // Write |returnValue|.
+ JitSpew(JitSpew_BaselineBailouts, " ReturnValue=%016" PRIx64,
+ *((uint64_t*)&returnValue));
+ blFrame()->setReturnValue(returnValue);
+
+ // Get |argsObj| if present.
+ ArgumentsObject* argsObj = nullptr;
+ if (script_->needsArgsObj()) {
+ Value maybeArgsObj = iter_.read();
+ MOZ_ASSERT(maybeArgsObj.isObject() || maybeArgsObj.isUndefined() ||
+ maybeArgsObj.isMagic(JS_OPTIMIZED_OUT));
+ if (maybeArgsObj.isObject()) {
+ argsObj = &maybeArgsObj.toObject().as<ArgumentsObject>();
+ }
+ }
+
+ // Note: we do not need to initialize the scratchValue field in BaselineFrame.
+
+ // Write |flags|.
+ blFrame()->setFlags(flags);
+
+ // Write |icScript|.
+ JitSpew(JitSpew_BaselineBailouts, " ICScript=%p", icScript_);
+ blFrame()->setICScript(icScript_);
+
+ // initArgsObjUnchecked modifies the frame's flags, so call it after setFlags.
+ if (argsObj) {
+ blFrame()->initArgsObjUnchecked(*argsObj);
+ }
+ return true;
+}
+
+// Overwrite the pushed args present in the calling frame with
+// the unpacked |thisv| and argument values.
+bool BaselineStackBuilder::buildArguments() {
+ Value thisv = iter_.read();
+ JitSpew(JitSpew_BaselineBailouts, " Is function!");
+ JitSpew(JitSpew_BaselineBailouts, " thisv=%016" PRIx64,
+ *((uint64_t*)&thisv));
+
+ size_t thisvOffset = framePushed() + JitFrameLayout::offsetOfThis();
+ valuePointerAtStackOffset(thisvOffset).set(thisv);
+
+ MOZ_ASSERT(iter_.numAllocations() >= CountArgSlots(script_, fun_));
+ JitSpew(JitSpew_BaselineBailouts,
+ " frame slots %u, nargs %zu, nfixed %zu", iter_.numAllocations(),
+ fun_->nargs(), script_->nfixed());
+
+ bool shouldStoreOutermostFormals =
+ isOutermostFrame() && !script_->argsObjAliasesFormals();
+ if (shouldStoreOutermostFormals) {
+ // This is the first (outermost) frame and we don't have an
+ // arguments object aliasing the formals. Due to UCE and phi
+ // elimination, we could store an UndefinedValue() here for
+ // formals we think are unused, but locals may still reference the
+ // original argument slot (MParameter/LArgument) and expect the
+ // original Value. To avoid this problem, store the formals in a
+ // Vector until we are done.
+ MOZ_ASSERT(outermostFrameFormals().empty());
+ if (!outermostFrameFormals().resize(fun_->nargs())) {
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i < fun_->nargs(); i++) {
+ Value arg = iter_.read();
+ JitSpew(JitSpew_BaselineBailouts, " arg %d = %016" PRIx64, (int)i,
+ *((uint64_t*)&arg));
+ if (!isOutermostFrame()) {
+ size_t argOffset = framePushed() + JitFrameLayout::offsetOfActualArg(i);
+ valuePointerAtStackOffset(argOffset).set(arg);
+ } else if (shouldStoreOutermostFormals) {
+ outermostFrameFormals()[i].set(arg);
+ } else {
+ // When the arguments object aliases the formal arguments, then
+ // JSOp::SetArg mutates the argument object. In such cases, the
+ // list of arguments reported by the snapshot are only aliases
+ // of argument object slots which are optimized to only store
+ // differences compared to arguments which are on the stack.
+ }
+ }
+ return true;
+}
+
+bool BaselineStackBuilder::buildFixedSlots() {
+ for (uint32_t i = 0; i < script_->nfixed(); i++) {
+ Value slot = iter_.read();
+ if (!writeValue(slot, "FixedValue")) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// The caller side of inlined js::fun_call and accessors must look
+// like the function wasn't inlined.
+bool BaselineStackBuilder::fixUpCallerArgs(
+ MutableHandleValueVector savedCallerArgs, bool* fixedUp) {
+ MOZ_ASSERT(!*fixedUp);
+
+ // Inlining of SpreadCall-like frames not currently supported.
+ MOZ_ASSERT(!IsSpreadOp(op_));
+
+ if (resumeMode() != ResumeMode::InlinedFunCall && !needToSaveCallerArgs()) {
+ return true;
+ }
+
+ // Calculate how many arguments are consumed by the inlined call.
+ // All calls pass |callee| and |this|.
+ uint32_t inlinedArgs = 2;
+ if (resumeMode() == ResumeMode::InlinedFunCall) {
+ // The first argument to an inlined FunCall becomes |this|,
+ // if it exists. The rest are passed normally.
+ MOZ_ASSERT(IsInvokeOp(op_));
+ inlinedArgs += GET_ARGC(pc_) > 0 ? GET_ARGC(pc_) - 1 : 0;
+ } else {
+ MOZ_ASSERT(resumeMode() == ResumeMode::InlinedAccessor);
+ MOZ_ASSERT(IsIonInlinableGetterOrSetterOp(op_));
+ // Setters are passed one argument. Getters are passed none.
+ if (IsSetPropOp(op_)) {
+ inlinedArgs++;
+ }
+ }
+
+ // Calculate how many values are live on the stack across the call,
+ // and push them.
+ MOZ_ASSERT(inlinedArgs <= exprStackSlots());
+ uint32_t liveStackSlots = exprStackSlots() - inlinedArgs;
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " pushing %u expression stack slots before fixup",
+ liveStackSlots);
+ for (uint32_t i = 0; i < liveStackSlots; i++) {
+ Value v = iter_.read();
+ if (!writeValue(v, "StackValue")) {
+ return false;
+ }
+ }
+
+ // When we inline js::fun_call, we bypass the native and inline the
+ // target directly. When rebuilding the stack, we need to fill in
+ // the right number of slots to make it look like the js_native was
+ // actually called.
+ if (resumeMode() == ResumeMode::InlinedFunCall) {
+ // We must transform the stack from |target, this, args| to
+ // |js_fun_call, target, this, args|. The value of |js_fun_call|
+ // will never be observed, so we push |undefined| for it, followed
+ // by the remaining arguments.
+ JitSpew(JitSpew_BaselineBailouts,
+ " pushing undefined to fixup funcall");
+ if (!writeValue(UndefinedValue(), "StackValue")) {
+ return false;
+ }
+ if (GET_ARGC(pc_) > 0) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " pushing %u expression stack slots", inlinedArgs);
+ for (uint32_t i = 0; i < inlinedArgs; i++) {
+ Value arg = iter_.read();
+ if (!writeValue(arg, "StackValue")) {
+ return false;
+ }
+ }
+ } else {
+ // When we inline FunCall with no arguments, we push an extra
+ // |undefined| value for |this|. That value should not appear
+ // in the rebuilt baseline frame.
+ JitSpew(JitSpew_BaselineBailouts, " pushing target of funcall");
+ Value target = iter_.read();
+ if (!writeValue(target, "StackValue")) {
+ return false;
+ }
+ // Skip |this|.
+ iter_.skip();
+ }
+ }
+
+ if (needToSaveCallerArgs()) {
+ // Save the actual arguments. They are needed to rebuild the callee frame.
+ if (!savedCallerArgs.resize(inlinedArgs)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < inlinedArgs; i++) {
+ savedCallerArgs[i].set(iter_.read());
+ }
+
+ if (IsSetPropOp(op_)) {
+ // The RHS argument to SetProp remains on the stack after the
+ // operation and is observable, so we have to fill it in.
+ Value initialArg = savedCallerArgs[inlinedArgs - 1];
+ JitSpew(JitSpew_BaselineBailouts,
+ " pushing setter's initial argument");
+ if (!writeValue(initialArg, "StackValue")) {
+ return false;
+ }
+ }
+ }
+
+ *fixedUp = true;
+ return true;
+}
+
+bool BaselineStackBuilder::buildExpressionStack() {
+ JitSpew(JitSpew_BaselineBailouts, " pushing %u expression stack slots",
+ exprStackSlots());
+ for (uint32_t i = 0; i < exprStackSlots(); i++) {
+ Value v;
+ // If we are in the middle of propagating an exception from Ion by
+ // bailing to baseline due to debug mode, we might not have all
+ // the stack if we are at the newest frame.
+ //
+ // For instance, if calling |f()| pushed an Ion frame which threw,
+ // the snapshot expects the return value to be pushed, but it's
+ // possible nothing was pushed before we threw.
+ //
+ // We therefore use a fallible read here.
+ if (!iter_.tryRead(&v)) {
+ MOZ_ASSERT(propagatingIonExceptionForDebugMode() && !iter_.moreFrames());
+ v = MagicValue(JS_OPTIMIZED_OUT);
+ }
+ if (!writeValue(v, "StackValue")) {
+ return false;
+ }
+ }
+
+ if (resumeMode() == ResumeMode::ResumeAfterCheckIsObject) {
+ JitSpew(JitSpew_BaselineBailouts,
+ " Checking that intermediate value is an object");
+ Value returnVal;
+ if (iter_.tryRead(&returnVal) && !returnVal.isObject()) {
+ MOZ_ASSERT(!returnVal.isMagic());
+ JitSpew(JitSpew_BaselineBailouts,
+ " Not an object! Overwriting bailout kind");
+ bailoutKind_ = BailoutKind::ThrowCheckIsObject;
+ }
+ }
+
+ return true;
+}
+
+bool BaselineStackBuilder::buildFinallyException() {
+ MOZ_ASSERT(resumingInFinallyBlock());
+
+ if (!writeValue(excInfo_->finallyException(), "Exception")) {
+ return false;
+ }
+ if (!writeValue(BooleanValue(true), "throwing")) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaselineStackBuilder::prepareForNextFrame(
+ HandleValueVector savedCallerArgs) {
+ const uint32_t frameSize = framePushed();
+
+ // Write out descriptor and return address for the baseline frame.
+ // The icEntry in question MUST have an inlinable fallback stub.
+ if (!finishOuterFrame()) {
+ return false;
+ }
+
+ return buildStubFrame(frameSize, savedCallerArgs);
+}
+
+bool BaselineStackBuilder::finishOuterFrame() {
+ // . .
+ // | Descr(BLJS) |
+ // +---------------+
+ // | ReturnAddr |
+ // +===============+
+
+ const BaselineInterpreter& baselineInterp =
+ cx_->runtime()->jitRuntime()->baselineInterpreter();
+
+ blFrame()->setInterpreterFields(script_, pc_);
+
+ // Write out descriptor of BaselineJS frame.
+ size_t baselineFrameDescr = MakeFrameDescriptor(FrameType::BaselineJS);
+ if (!writeWord(baselineFrameDescr, "Descriptor")) {
+ return false;
+ }
+
+ uint8_t* retAddr = baselineInterp.retAddrForIC(op_);
+ return writePtr(retAddr, "ReturnAddr");
+}
+
+bool BaselineStackBuilder::buildStubFrame(uint32_t frameSize,
+ HandleValueVector savedCallerArgs) {
+ // Build baseline stub frame:
+ // +===============+
+ // | FramePtr |
+ // +---------------+
+ // | StubPtr |
+ // +---------------+
+ // | Padding? |
+ // +---------------+
+ // | ArgA |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Arg0 |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descr(BLStub) |
+ // +---------------+
+ // | ReturnAddr |
+ // +===============+
+
+ JitSpew(JitSpew_BaselineBailouts, " [BASELINE-STUB FRAME]");
+
+ // Write previous frame pointer (saved earlier).
+ if (!writePtr(prevFramePtr(), "PrevFramePtr")) {
+ return false;
+ }
+ prevFramePtr_ = virtualPointerAtStackOffset(0);
+
+ // Write stub pointer.
+ uint32_t pcOff = script_->pcToOffset(pc_);
+ JitScript* jitScript = script_->jitScript();
+ const ICEntry& icEntry = jitScript->icEntryFromPCOffset(pcOff);
+ ICFallbackStub* fallback = jitScript->fallbackStubForICEntry(&icEntry);
+ if (!writePtr(fallback, "StubPtr")) {
+ return false;
+ }
+
+ // Write out the arguments, copied from the baseline frame. The order
+ // of the arguments is reversed relative to the baseline frame's stack
+ // values.
+ MOZ_ASSERT(IsIonInlinableOp(op_));
+ bool pushedNewTarget = IsConstructPC(pc_);
+ unsigned actualArgc;
+ Value callee;
+ if (needToSaveCallerArgs()) {
+ // For accessors, the arguments are not on the stack anymore,
+ // but they are copied in a vector and are written here.
+ callee = savedCallerArgs[0];
+ actualArgc = IsSetPropOp(op_) ? 1 : 0;
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize =
+ (actualArgc + 1) * sizeof(Value) + JitFrameLayout::Size();
+ if (!maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) {
+ return false;
+ }
+
+ // Push arguments.
+ MOZ_ASSERT(actualArgc + 2 <= exprStackSlots());
+ MOZ_ASSERT(savedCallerArgs.length() == actualArgc + 2);
+ for (unsigned i = 0; i < actualArgc + 1; i++) {
+ size_t arg = savedCallerArgs.length() - (i + 1);
+ if (!writeValue(savedCallerArgs[arg], "ArgVal")) {
+ return false;
+ }
+ }
+ } else if (resumeMode() == ResumeMode::InlinedFunCall && GET_ARGC(pc_) == 0) {
+ // When calling FunCall with 0 arguments, we push |undefined|
+ // for this. See BaselineCacheIRCompiler::pushFunCallArguments.
+ MOZ_ASSERT(!pushedNewTarget);
+ actualArgc = 0;
+ // Align the stack based on pushing |this| and 0 arguments.
+ size_t afterFrameSize = sizeof(Value) + JitFrameLayout::Size();
+ if (!maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) {
+ return false;
+ }
+ // Push an undefined value for |this|.
+ if (!writeValue(UndefinedValue(), "ThisValue")) {
+ return false;
+ }
+ size_t calleeSlot = blFrame()->numValueSlots(frameSize) - 1;
+ callee = *blFrame()->valueSlot(calleeSlot);
+
+ } else {
+ MOZ_ASSERT(resumeMode() == ResumeMode::InlinedStandardCall ||
+ resumeMode() == ResumeMode::InlinedFunCall);
+ actualArgc = GET_ARGC(pc_);
+ if (resumeMode() == ResumeMode::InlinedFunCall) {
+ // See BaselineCacheIRCompiler::pushFunCallArguments.
+ MOZ_ASSERT(actualArgc > 0);
+ actualArgc--;
+ }
+
+ // In addition to the formal arguments, we must also push |this|.
+ // When calling a constructor, we must also push |newTarget|.
+ uint32_t numArguments = actualArgc + 1 + pushedNewTarget;
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize =
+ numArguments * sizeof(Value) + JitFrameLayout::Size();
+ if (!maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) {
+ return false;
+ }
+
+ // Copy the arguments and |this| from the BaselineFrame, in reverse order.
+ size_t valueSlot = blFrame()->numValueSlots(frameSize) - 1;
+ size_t calleeSlot = valueSlot - numArguments;
+
+ for (size_t i = valueSlot; i > calleeSlot; i--) {
+ Value v = *blFrame()->valueSlot(i);
+ if (!writeValue(v, "ArgVal")) {
+ return false;
+ }
+ }
+
+ callee = *blFrame()->valueSlot(calleeSlot);
+ }
+
+ // In case these arguments need to be copied on the stack again for a
+ // rectifier frame, save the framePushed values here for later use.
+ size_t endOfBaselineStubArgs = framePushed();
+
+ // Push callee token (must be a JS Function)
+ JitSpew(JitSpew_BaselineBailouts, " Callee = %016" PRIx64,
+ callee.asRawBits());
+
+ JSFunction* calleeFun = &callee.toObject().as<JSFunction>();
+ if (!writePtr(CalleeToToken(calleeFun, pushedNewTarget), "CalleeToken")) {
+ return false;
+ }
+ const ICEntry& icScriptEntry = icScript_->icEntryFromPCOffset(pcOff);
+ ICFallbackStub* icScriptFallback =
+ icScript_->fallbackStubForICEntry(&icScriptEntry);
+ setNextCallee(calleeFun, icScriptFallback->trialInliningState());
+
+ // Push BaselineStub frame descriptor
+ size_t baselineStubFrameDescr =
+ MakeFrameDescriptorForJitCall(FrameType::BaselineStub, actualArgc);
+ if (!writeWord(baselineStubFrameDescr, "Descriptor")) {
+ return false;
+ }
+
+ // Push return address into ICCall_Scripted stub, immediately after the call.
+ void* baselineCallReturnAddr = getStubReturnAddress();
+ MOZ_ASSERT(baselineCallReturnAddr);
+ if (!writePtr(baselineCallReturnAddr, "ReturnAddr")) {
+ return false;
+ }
+
+ // The stack must be aligned after the callee pushes the frame pointer.
+ MOZ_ASSERT((framePushed() + sizeof(void*)) % JitStackAlignment == 0);
+
+ // Build a rectifier frame if necessary
+ if (actualArgc < calleeFun->nargs() &&
+ !buildRectifierFrame(actualArgc, endOfBaselineStubArgs)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaselineStackBuilder::buildRectifierFrame(uint32_t actualArgc,
+ size_t endOfBaselineStubArgs) {
+ // Push a reconstructed rectifier frame.
+ // +===============+
+ // | Padding? |
+ // +---------------+
+ // | UndefinedU |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Undefined0 |
+ // +---------------+
+ // | ArgA |
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Arg0 |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descr(Rect) |
+ // +---------------+
+ // | ReturnAddr |
+ // +===============+
+
+ JitSpew(JitSpew_BaselineBailouts, " [RECTIFIER FRAME]");
+ bool pushedNewTarget = IsConstructPC(pc_);
+
+ if (!writePtr(prevFramePtr(), "PrevFramePtr")) {
+ return false;
+ }
+ prevFramePtr_ = virtualPointerAtStackOffset(0);
+
+ // Align the stack based on the number of arguments.
+ size_t afterFrameSize =
+ (nextCallee()->nargs() + 1 + pushedNewTarget) * sizeof(Value) +
+ RectifierFrameLayout::Size();
+ if (!maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) {
+ return false;
+ }
+
+ // Copy new.target, if necessary.
+ if (pushedNewTarget) {
+ size_t newTargetOffset = (framePushed() - endOfBaselineStubArgs) +
+ (actualArgc + 1) * sizeof(Value);
+ Value newTargetValue = *valuePointerAtStackOffset(newTargetOffset);
+ if (!writeValue(newTargetValue, "CopiedNewTarget")) {
+ return false;
+ }
+ }
+
+ // Push undefined for missing arguments.
+ for (unsigned i = 0; i < (nextCallee()->nargs() - actualArgc); i++) {
+ if (!writeValue(UndefinedValue(), "FillerVal")) {
+ return false;
+ }
+ }
+
+ // Copy arguments + thisv from BaselineStub frame.
+ if (!subtract((actualArgc + 1) * sizeof(Value), "CopiedArgs")) {
+ return false;
+ }
+ BufferPointer<uint8_t> stubArgsEnd =
+ pointerAtStackOffset<uint8_t>(framePushed() - endOfBaselineStubArgs);
+ JitSpew(JitSpew_BaselineBailouts, " MemCpy from %p", stubArgsEnd.get());
+ memcpy(pointerAtStackOffset<uint8_t>(0).get(), stubArgsEnd.get(),
+ (actualArgc + 1) * sizeof(Value));
+
+ // Push calleeToken again.
+ if (!writePtr(CalleeToToken(nextCallee(), pushedNewTarget), "CalleeToken")) {
+ return false;
+ }
+
+ // Push rectifier frame descriptor
+ size_t rectifierFrameDescr =
+ MakeFrameDescriptorForJitCall(FrameType::Rectifier, actualArgc);
+ if (!writeWord(rectifierFrameDescr, "Descriptor")) {
+ return false;
+ }
+
+ // Push return address into the ArgumentsRectifier code, immediately after the
+ // ioncode call.
+ void* rectReturnAddr =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifierReturnAddr().value;
+ MOZ_ASSERT(rectReturnAddr);
+ if (!writePtr(rectReturnAddr, "ReturnAddr")) {
+ return false;
+ }
+
+ // The stack must be aligned after the callee pushes the frame pointer.
+ MOZ_ASSERT((framePushed() + sizeof(void*)) % JitStackAlignment == 0);
+
+ return true;
+}
+
+bool BaselineStackBuilder::finishLastFrame() {
+ const BaselineInterpreter& baselineInterp =
+ cx_->runtime()->jitRuntime()->baselineInterpreter();
+
+ setResumeFramePtr(prevFramePtr());
+
+ // Compute the native address (within the Baseline Interpreter) that we will
+ // resume at and initialize the frame's interpreter fields.
+ uint8_t* resumeAddr;
+ if (isPrologueBailout()) {
+ JitSpew(JitSpew_BaselineBailouts, " Resuming into prologue.");
+ MOZ_ASSERT(pc_ == script_->code());
+ blFrame()->setInterpreterFieldsForPrologue(script_);
+ resumeAddr = baselineInterp.bailoutPrologueEntryAddr();
+ } else if (propagatingIonExceptionForDebugMode()) {
+ // When propagating an exception for debug mode, set the
+ // resume pc to the throwing pc, so that Debugger hooks report
+ // the correct pc offset of the throwing op instead of its
+ // successor.
+ jsbytecode* throwPC = script_->offsetToPC(iter_.pcOffset());
+ blFrame()->setInterpreterFields(script_, throwPC);
+ resumeAddr = baselineInterp.interpretOpAddr().value;
+ } else {
+ jsbytecode* resumePC = getResumePC();
+ blFrame()->setInterpreterFields(script_, resumePC);
+ resumeAddr = baselineInterp.interpretOpAddr().value;
+ }
+ setResumeAddr(resumeAddr);
+ JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p", resumeAddr);
+
+ if (cx_->runtime()->geckoProfiler().enabled()) {
+ // Register bailout with profiler.
+ const char* filename = script_->filename();
+ if (filename == nullptr) {
+ filename = "<unknown>";
+ }
+ unsigned len = strlen(filename) + 200;
+ UniqueChars buf(js_pod_malloc<char>(len));
+ if (buf == nullptr) {
+ ReportOutOfMemory(cx_);
+ return false;
+ }
+ snprintf(buf.get(), len, "%s %s %s on line %u of %s:%u",
+ BailoutKindString(bailoutKind()), resumeAfter() ? "after" : "at",
+ CodeName(op_), PCToLineNumber(script_, pc_), filename,
+ script_->lineno());
+ cx_->runtime()->geckoProfiler().markEvent("Bailout", buf.get());
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+// The |envChain| slot must not be optimized out if the currently
+// active scope requires any EnvironmentObjects beyond what is
+// available at body scope. This checks that scope chain does not
+// require any such EnvironmentObjects.
+// See also: |CompileInfo::isObservableFrameSlot|
+bool BaselineStackBuilder::envChainSlotCanBeOptimized() {
+ jsbytecode* pc = script_->offsetToPC(iter_.pcOffset());
+ Scope* scopeIter = script_->innermostScope(pc);
+ while (scopeIter != script_->bodyScope()) {
+ if (!scopeIter || scopeIter->hasEnvironment()) {
+ return false;
+ }
+ scopeIter = scopeIter->enclosing();
+ }
+ return true;
+}
+
+bool jit::AssertBailoutStackDepth(JSContext* cx, JSScript* script,
+ jsbytecode* pc, ResumeMode mode,
+ uint32_t exprStackSlots) {
+ if (IsResumeAfter(mode)) {
+ pc = GetNextPc(pc);
+ }
+
+ uint32_t expectedDepth;
+ bool reachablePC;
+ if (!ReconstructStackDepth(cx, script, pc, &expectedDepth, &reachablePC)) {
+ return false;
+ }
+ if (!reachablePC) {
+ return true;
+ }
+
+ JSOp op = JSOp(*pc);
+
+ if (mode == ResumeMode::InlinedFunCall) {
+ // For inlined fun.call(this, ...); the reconstructed stack depth will
+ // include the |this|, but the exprStackSlots won't.
+ // Exception: if there are no arguments, the depths do match.
+ MOZ_ASSERT(IsInvokeOp(op));
+ if (GET_ARGC(pc) > 0) {
+ MOZ_ASSERT(expectedDepth == exprStackSlots + 1);
+ } else {
+ MOZ_ASSERT(expectedDepth == exprStackSlots);
+ }
+ return true;
+ }
+
+ if (mode == ResumeMode::InlinedAccessor) {
+ // Accessors coming out of ion are inlined via a complete lie perpetrated by
+ // the compiler internally. Ion just rearranges the stack, and pretends that
+ // it looked like a call all along.
+ // This means that the depth is actually one *more* than expected by the
+ // interpreter, as there is now a JSFunction, |this| and [arg], rather than
+ // the expected |this| and [arg].
+ // If the inlined accessor is a GetElem operation, the numbers do match, but
+ // that's just because GetElem expects one more item on the stack. Note that
+ // none of that was pushed, but it's still reflected in exprStackSlots.
+ MOZ_ASSERT(IsIonInlinableGetterOrSetterOp(op));
+ if (IsGetElemOp(op)) {
+ MOZ_ASSERT(exprStackSlots == expectedDepth);
+ } else {
+ MOZ_ASSERT(exprStackSlots == expectedDepth + 1);
+ }
+ return true;
+ }
+
+ // In all other cases, the depth must match.
+ MOZ_ASSERT(exprStackSlots == expectedDepth);
+ return true;
+}
+
+bool BaselineStackBuilder::validateFrame() {
+ const uint32_t frameSize = framePushed();
+ blFrame()->setDebugFrameSize(frameSize);
+ JitSpew(JitSpew_BaselineBailouts, " FrameSize=%u", frameSize);
+
+ // debugNumValueSlots() is based on the frame size, do some sanity checks.
+ MOZ_ASSERT(blFrame()->debugNumValueSlots() >= script_->nfixed());
+ MOZ_ASSERT(blFrame()->debugNumValueSlots() <= script_->nslots());
+
+ uint32_t expectedSlots = exprStackSlots();
+ if (resumingInFinallyBlock()) {
+ // If we are resuming in a finally block, we push two extra values on the
+ // stack (the exception, and |throwing|), so the depth at the resume PC
+ // should be the depth at the fault PC plus two.
+ expectedSlots += 2;
+ }
+ return AssertBailoutStackDepth(cx_, script_, pc_, resumeMode(),
+ expectedSlots);
+}
+#endif
+
+void* BaselineStackBuilder::getStubReturnAddress() {
+ const BaselineICFallbackCode& code =
+ cx_->runtime()->jitRuntime()->baselineICFallbackCode();
+
+ if (IsGetPropOp(op_)) {
+ return code.bailoutReturnAddr(BailoutReturnKind::GetProp);
+ }
+ if (IsSetPropOp(op_)) {
+ return code.bailoutReturnAddr(BailoutReturnKind::SetProp);
+ }
+ if (IsGetElemOp(op_)) {
+ return code.bailoutReturnAddr(BailoutReturnKind::GetElem);
+ }
+
+ // This should be a call op of some kind, now.
+ MOZ_ASSERT(IsInvokeOp(op_) && !IsSpreadOp(op_));
+ if (IsConstructOp(op_)) {
+ return code.bailoutReturnAddr(BailoutReturnKind::New);
+ }
+ return code.bailoutReturnAddr(BailoutReturnKind::Call);
+}
+
+static inline jsbytecode* GetNextNonLoopHeadPc(jsbytecode* pc) {
+ JSOp op = JSOp(*pc);
+ switch (op) {
+ case JSOp::Goto:
+ return pc + GET_JUMP_OFFSET(pc);
+
+ case JSOp::LoopHead:
+ case JSOp::Nop:
+ return GetNextPc(pc);
+
+ default:
+ return pc;
+ }
+}
+
+// Returns the pc to resume execution at in Baseline after a bailout.
+jsbytecode* BaselineStackBuilder::getResumePC() {
+ if (resumeAfter()) {
+ return GetNextPc(pc_);
+ }
+
+ // If we are resuming at a LoopHead op, resume at the next op to avoid
+ // a bailout -> enter Ion -> bailout loop with --ion-eager.
+ //
+ // Cycles can cause the loop below to not terminate. Empty loops are one
+ // such example:
+ //
+ // L: loophead
+ // goto L
+ //
+ // We do cycle detection below with the "tortoise and the hare" algorithm.
+ jsbytecode* slowerPc = pc_;
+ jsbytecode* fasterPc = pc_;
+ while (true) {
+ // Advance fasterPc twice as fast as slowerPc.
+ slowerPc = GetNextNonLoopHeadPc(slowerPc);
+ fasterPc = GetNextNonLoopHeadPc(fasterPc);
+ fasterPc = GetNextNonLoopHeadPc(fasterPc);
+
+ // Break on cycles or at the end of goto sequences.
+ if (fasterPc == slowerPc) {
+ break;
+ }
+ }
+
+ return slowerPc;
+}
+
+bool BaselineStackBuilder::isPrologueBailout() {
+ // If we are propagating an exception for debug mode, we will not resume
+ // into baseline code, but instead into HandleExceptionBaseline (i.e.,
+ // never before the prologue).
+ return iter_.pcOffset() == 0 && !iter_.resumeAfter() &&
+ !propagatingIonExceptionForDebugMode();
+}
+
+// Build a baseline stack frame.
+bool BaselineStackBuilder::buildOneFrame() {
+ // Build a baseline frame:
+ // +===============+
+ // | PrevFramePtr | <-- initFrame()
+ // +---------------+
+ // | Baseline | <-- buildBaselineFrame()
+ // | Frame |
+ // +---------------+
+ // | Fixed0 | <-- buildFixedSlots()
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | FixedF |
+ // +---------------+
+ // | Stack0 | <-- buildExpressionStack() -or- fixupCallerArgs()
+ // +---------------+
+ // | ... |
+ // +---------------+ If we are building the frame in which we will
+ // | StackS | <-- resume, we stop here.
+ // +---------------+ finishLastFrame() sets up the interpreter fields.
+ // . .
+ // . .
+ // . . <-- If there are additional frames inlined into this
+ // | Descr(BLJS) | one, we finish this frame. We generate a stub
+ // +---------------+ frame (and maybe also a rectifier frame) between
+ // | ReturnAddr | this frame and the inlined frame.
+ // +===============+ See: prepareForNextFrame()
+
+ if (!initFrame()) {
+ return false;
+ }
+
+ if (!buildBaselineFrame()) {
+ return false;
+ }
+
+ if (fun_ && !buildArguments()) {
+ return false;
+ }
+
+ if (!buildFixedSlots()) {
+ return false;
+ }
+
+ bool fixedUp = false;
+ RootedValueVector savedCallerArgs(cx_);
+ if (iter_.moreFrames() && !fixUpCallerArgs(&savedCallerArgs, &fixedUp)) {
+ return false;
+ }
+
+ if (!fixedUp) {
+ if (!buildExpressionStack()) {
+ return false;
+ }
+ if (resumingInFinallyBlock() && !buildFinallyException()) {
+ return false;
+ }
+ }
+
+#ifdef DEBUG
+ if (!validateFrame()) {
+ return false;
+ }
+#endif
+
+#ifdef JS_JITSPEW
+ const uint32_t pcOff = script_->pcToOffset(pc());
+ JitSpew(JitSpew_BaselineBailouts,
+ " Resuming %s pc offset %d (op %s) (line %u) of %s:%u:%u",
+ resumeAfter() ? "after" : "at", (int)pcOff, CodeName(op_),
+ PCToLineNumber(script_, pc()), script_->filename(), script_->lineno(),
+ script_->column());
+ JitSpew(JitSpew_BaselineBailouts, " Bailout kind: %s",
+ BailoutKindString(bailoutKind()));
+#endif
+
+ // If this was the last inline frame, or we are bailing out to a catch or
+ // finally block in this frame, then unpacking is almost done.
+ if (done()) {
+ return finishLastFrame();
+ }
+
+ // Otherwise, this is an outer frame for an inlined call or
+ // accessor. We will be building an inner frame. Before that,
+ // we must create a stub frame, and potentially a rectifier frame.
+ return prepareForNextFrame(savedCallerArgs);
+}
+
+bool jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation,
+ const JSJitFrameIter& iter,
+ BaselineBailoutInfo** bailoutInfo,
+ const ExceptionBailoutInfo* excInfo,
+ BailoutReason reason) {
+ MOZ_ASSERT(bailoutInfo != nullptr);
+ MOZ_ASSERT(*bailoutInfo == nullptr);
+ MOZ_ASSERT(iter.isBailoutJS());
+
+ // Caller should have saved the exception while we perform the bailout.
+ MOZ_ASSERT(!cx->isExceptionPending());
+
+ // Ion bailout can fail due to overrecursion and OOM. In such cases we
+ // cannot honor any further Debugger hooks on the frame, and need to
+ // ensure that its Debugger.Frame entry is cleaned up.
+ auto guardRemoveRematerializedFramesFromDebugger =
+ mozilla::MakeScopeExit([&] {
+ activation->removeRematerializedFramesFromDebugger(cx, iter.fp());
+ });
+
+ // Always remove the RInstructionResults from the JitActivation, even in
+ // case of failures as the stack frame is going away after the bailout.
+ auto removeIonFrameRecovery = mozilla::MakeScopeExit(
+ [&] { activation->removeIonFrameRecovery(iter.jsFrame()); });
+
+ // The caller of the top frame must be one of the following:
+ // IonJS - Ion calling into Ion.
+ // BaselineStub - Baseline calling into Ion.
+ // Entry / WasmToJSJit - Interpreter or other (wasm) calling into Ion.
+ // Rectifier - Arguments rectifier calling into Ion.
+ // BaselineJS - Resume'd Baseline, then likely OSR'd into Ion.
+ MOZ_ASSERT(iter.isBailoutJS());
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ FrameType prevFrameType = iter.prevType();
+ MOZ_ASSERT(JSJitFrameIter::isEntry(prevFrameType) ||
+ prevFrameType == FrameType::IonJS ||
+ prevFrameType == FrameType::BaselineStub ||
+ prevFrameType == FrameType::Rectifier ||
+ prevFrameType == FrameType::IonICCall ||
+ prevFrameType == FrameType::BaselineJS ||
+ prevFrameType == FrameType::BaselineInterpreterEntry);
+#endif
+
+ // All incoming frames are going to look like this:
+ //
+ // +---------------+
+ // | ... |
+ // +---------------+
+ // | Args |
+ // | ... |
+ // +---------------+
+ // | ThisV |
+ // +---------------+
+ // | ActualArgC |
+ // +---------------+
+ // | CalleeToken |
+ // +---------------+
+ // | Descriptor |
+ // +---------------+
+ // | ReturnAddr |
+ // +---------------+
+ // | ||||| | <---- Overwrite starting here.
+ // | ||||| |
+ // | ||||| |
+ // +---------------+
+
+ JitSpew(JitSpew_BaselineBailouts,
+ "Bailing to baseline %s:%u:%u (IonScript=%p) (FrameType=%d)",
+ iter.script()->filename(), iter.script()->lineno(),
+ iter.script()->column(), (void*)iter.ionScript(), (int)prevFrameType);
+
+ if (excInfo) {
+ if (excInfo->catchingException()) {
+ JitSpew(JitSpew_BaselineBailouts, "Resuming in catch or finally block");
+ }
+ if (excInfo->propagatingIonExceptionForDebugMode()) {
+ JitSpew(JitSpew_BaselineBailouts, "Resuming in-place for debug mode");
+ }
+ }
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " Reading from snapshot offset %u size %zu", iter.snapshotOffset(),
+ iter.ionScript()->snapshotsListSize());
+
+ iter.script()->updateJitCodeRaw(cx->runtime());
+
+ // Under a bailout, there is no need to invalidate the frame after
+ // evaluating the recover instruction, as the invalidation is only needed in
+ // cases where the frame is introspected ahead of the bailout.
+ MaybeReadFallback recoverBailout(cx, activation, &iter,
+ MaybeReadFallback::Fallback_DoNothing);
+
+ // Ensure that all value locations are readable from the SnapshotIterator.
+ // Get the RInstructionResults from the JitActivation if the frame got
+ // recovered ahead of the bailout.
+ SnapshotIterator snapIter(iter, activation->bailoutData()->machineState());
+ if (!snapIter.initInstructionResults(recoverBailout)) {
+ return false;
+ }
+
+#ifdef TRACK_SNAPSHOTS
+ snapIter.spewBailingFrom();
+#endif
+
+ BaselineStackBuilder builder(cx, iter, snapIter, excInfo, reason);
+ if (!builder.init()) {
+ return false;
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " Incoming frame ptr = %p",
+ builder.startFrame());
+ if (iter.maybeCallee()) {
+ JitSpew(JitSpew_BaselineBailouts, " Callee function (%s:%u:%u)",
+ iter.script()->filename(), iter.script()->lineno(),
+ iter.script()->column());
+ } else {
+ JitSpew(JitSpew_BaselineBailouts, " No callee!");
+ }
+
+ if (iter.isConstructing()) {
+ JitSpew(JitSpew_BaselineBailouts, " Constructing!");
+ } else {
+ JitSpew(JitSpew_BaselineBailouts, " Not constructing!");
+ }
+
+ JitSpew(JitSpew_BaselineBailouts, " Restoring frames:");
+
+ while (true) {
+ // Skip recover instructions as they are already recovered by
+ // |initInstructionResults|.
+ snapIter.settleOnFrame();
+
+ JitSpew(JitSpew_BaselineBailouts, " FrameNo %zu", builder.frameNo());
+
+ if (!builder.buildOneFrame()) {
+ MOZ_ASSERT(cx->isExceptionPending());
+ return false;
+ }
+
+ if (builder.done()) {
+ break;
+ }
+
+ builder.nextFrame();
+ }
+ JitSpew(JitSpew_BaselineBailouts, " Done restoring frames");
+
+ BailoutKind bailoutKind = builder.bailoutKind();
+
+ if (!builder.outermostFrameFormals().empty()) {
+ // Set the first frame's formals, see the comment in InitFromBailout.
+ Value* argv = builder.startFrame()->actualArgs();
+ mozilla::PodCopy(argv, builder.outermostFrameFormals().begin(),
+ builder.outermostFrameFormals().length());
+ }
+
+ // Do stack check.
+ bool overRecursed = false;
+ BaselineBailoutInfo* info = builder.info();
+ size_t numBytesToPush = info->copyStackTop - info->copyStackBottom;
+ MOZ_ASSERT((numBytesToPush % sizeof(uintptr_t)) == 0);
+ uint8_t* newsp = info->incomingStack - numBytesToPush;
+#ifdef JS_SIMULATOR
+ if (Simulator::Current()->overRecursed(uintptr_t(newsp))) {
+ overRecursed = true;
+ }
+#else
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkWithStackPointerDontReport(cx, newsp)) {
+ overRecursed = true;
+ }
+#endif
+ if (overRecursed) {
+ JitSpew(JitSpew_BaselineBailouts, " Overrecursion check failed!");
+ ReportOverRecursed(cx);
+ return false;
+ }
+
+ // Take the reconstructed baseline stack so it doesn't get freed when builder
+ // destructs.
+ info = builder.takeBuffer();
+ info->numFrames = builder.frameNo() + 1;
+ info->bailoutKind.emplace(bailoutKind);
+ *bailoutInfo = info;
+ guardRemoveRematerializedFramesFromDebugger.release();
+ return true;
+}
+
+static void InvalidateAfterBailout(JSContext* cx, HandleScript outerScript,
+ const char* reason) {
+ // In some cases, the computation of recover instruction can invalidate the
+ // Ion script before we reach the end of the bailout. Thus, if the outer
+ // script no longer have any Ion script attached, then we just skip the
+ // invalidation.
+ //
+ // For example, such case can happen if the template object for an unboxed
+ // objects no longer match the content of its properties (see Bug 1174547)
+ if (!outerScript->hasIonScript()) {
+ JitSpew(JitSpew_BaselineBailouts, "Ion script is already invalidated");
+ return;
+ }
+
+ MOZ_ASSERT(!outerScript->ionScript()->invalidated());
+
+ JitSpew(JitSpew_BaselineBailouts, "Invalidating due to %s", reason);
+ Invalidate(cx, outerScript);
+}
+
+static void HandleLexicalCheckFailure(JSContext* cx, HandleScript outerScript,
+ HandleScript innerScript) {
+ JitSpew(JitSpew_IonBailouts,
+ "Lexical check failure %s:%u:%u, inlined into %s:%u:%u",
+ innerScript->filename(), innerScript->lineno(), innerScript->column(),
+ outerScript->filename(), outerScript->lineno(),
+ outerScript->column());
+
+ if (!innerScript->failedLexicalCheck()) {
+ innerScript->setFailedLexicalCheck();
+ }
+
+ InvalidateAfterBailout(cx, outerScript, "lexical check failure");
+ if (innerScript->hasIonScript()) {
+ Invalidate(cx, innerScript);
+ }
+}
+
+static bool CopyFromRematerializedFrame(JSContext* cx, JitActivation* act,
+ uint8_t* fp, size_t inlineDepth,
+ BaselineFrame* frame) {
+ RematerializedFrame* rematFrame =
+ act->lookupRematerializedFrame(fp, inlineDepth);
+
+ // We might not have rematerialized a frame if the user never requested a
+ // Debugger.Frame for it.
+ if (!rematFrame) {
+ return true;
+ }
+
+ MOZ_ASSERT(rematFrame->script() == frame->script());
+ MOZ_ASSERT(rematFrame->numActualArgs() == frame->numActualArgs());
+
+ frame->setEnvironmentChain(rematFrame->environmentChain());
+
+ if (frame->isFunctionFrame()) {
+ frame->thisArgument() = rematFrame->thisArgument();
+ }
+
+ for (unsigned i = 0; i < frame->numActualArgs(); i++) {
+ frame->argv()[i] = rematFrame->argv()[i];
+ }
+
+ for (size_t i = 0; i < frame->script()->nfixed(); i++) {
+ *frame->valueSlot(i) = rematFrame->locals()[i];
+ }
+
+ if (frame->script()->noScriptRval()) {
+ frame->setReturnValue(UndefinedValue());
+ } else {
+ frame->setReturnValue(rematFrame->returnValue());
+ }
+
+ // Don't copy over the hasCachedSavedFrame bit. The new BaselineFrame we're
+ // building has a different AbstractFramePtr, so it won't be found in the
+ // LiveSavedFrameCache if we look there.
+
+ JitSpew(JitSpew_BaselineBailouts,
+ " Copied from rematerialized frame at (%p,%zu)", fp, inlineDepth);
+
+ // Propagate the debuggee frame flag. For the case where the Debugger did
+ // not rematerialize an Ion frame, the baseline frame has its debuggee
+ // flag set iff its script is considered a debuggee. See the debuggee case
+ // in InitFromBailout.
+ if (rematFrame->isDebuggee()) {
+ frame->setIsDebuggee();
+ return DebugAPI::handleIonBailout(cx, rematFrame, frame);
+ }
+
+ return true;
+}
+
+enum class BailoutAction {
+ InvalidateImmediately,
+ InvalidateIfFrequent,
+ DisableIfFrequent,
+ NoAction
+};
+
+bool jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg) {
+ JitSpew(JitSpew_BaselineBailouts, " Done restoring frames");
+
+ // Use UniquePtr to free the bailoutInfo before we return.
+ UniquePtr<BaselineBailoutInfo> bailoutInfo(bailoutInfoArg);
+ bailoutInfoArg = nullptr;
+
+ MOZ_DIAGNOSTIC_ASSERT(*bailoutInfo->bailoutKind != BailoutKind::Unreachable);
+
+ JSContext* cx = TlsContext.get();
+
+ // jit::Bailout(), jit::InvalidationBailout(), and jit::HandleException()
+ // should have reset the counter to zero.
+ MOZ_ASSERT(!cx->isInUnsafeRegion());
+
+ BaselineFrame* topFrame = GetTopBaselineFrame(cx);
+
+ // We have to get rid of the rematerialized frame, whether it is
+ // restored or unwound.
+ uint8_t* incomingStack = bailoutInfo->incomingStack;
+ auto guardRemoveRematerializedFramesFromDebugger =
+ mozilla::MakeScopeExit([&] {
+ JitActivation* act = cx->activation()->asJit();
+ act->removeRematerializedFramesFromDebugger(cx, incomingStack);
+ });
+
+ // Ensure the frame has a call object if it needs one.
+ if (!EnsureHasEnvironmentObjects(cx, topFrame)) {
+ return false;
+ }
+
+ // Create arguments objects for bailed out frames, to maintain the invariant
+ // that script->needsArgsObj() implies frame->hasArgsObj().
+ RootedScript innerScript(cx, nullptr);
+ RootedScript outerScript(cx, nullptr);
+
+ MOZ_ASSERT(cx->currentlyRunningInJit());
+ JSJitFrameIter iter(cx->activation()->asJit());
+ uint8_t* outerFp = nullptr;
+
+ // Iter currently points at the exit frame. Get the previous frame
+ // (which must be a baseline frame), and set it as the last profiling
+ // frame.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ MOZ_ASSERT(iter.prevType() == FrameType::BaselineJS);
+ JitFrameLayout* fp = reinterpret_cast<JitFrameLayout*>(iter.prevFp());
+ cx->jitActivation->setLastProfilingFrame(fp);
+ }
+
+ uint32_t numFrames = bailoutInfo->numFrames;
+ MOZ_ASSERT(numFrames > 0);
+
+ uint32_t frameno = 0;
+ while (frameno < numFrames) {
+ MOZ_ASSERT(!iter.isIonJS());
+
+ if (iter.isBaselineJS()) {
+ BaselineFrame* frame = iter.baselineFrame();
+ MOZ_ASSERT(frame->script()->hasBaselineScript());
+
+ // If the frame doesn't even have a env chain set yet, then it's resuming
+ // into the the prologue before the env chain is initialized. Any
+ // necessary args object will also be initialized there.
+ if (frame->environmentChain() && frame->script()->needsArgsObj()) {
+ ArgumentsObject* argsObj;
+ if (frame->hasArgsObj()) {
+ argsObj = &frame->argsObj();
+ } else {
+ argsObj = ArgumentsObject::createExpected(cx, frame);
+ if (!argsObj) {
+ return false;
+ }
+ }
+
+ // The arguments is a local binding and needsArgsObj does not
+ // check if it is clobbered. Ensure that the local binding
+ // restored during bailout before storing the arguments object
+ // to the slot.
+ RootedScript script(cx, frame->script());
+ SetFrameArgumentsObject(cx, frame, script, argsObj);
+ }
+
+ if (frameno == 0) {
+ innerScript = frame->script();
+ }
+
+ if (frameno == numFrames - 1) {
+ outerScript = frame->script();
+ outerFp = iter.fp();
+ MOZ_ASSERT(outerFp == incomingStack);
+ }
+
+ frameno++;
+ }
+
+ ++iter;
+ }
+
+ MOZ_ASSERT(innerScript);
+ MOZ_ASSERT(outerScript);
+ MOZ_ASSERT(outerFp);
+
+ // If we rematerialized Ion frames due to debug mode toggling, copy their
+ // values into the baseline frame. We need to do this even when debug mode
+ // is off, as we should respect the mutations made while debug mode was
+ // on.
+ JitActivation* act = cx->activation()->asJit();
+ if (act->hasRematerializedFrame(outerFp)) {
+ JSJitFrameIter iter(act);
+ size_t inlineDepth = numFrames;
+ bool ok = true;
+ while (inlineDepth > 0) {
+ if (iter.isBaselineJS()) {
+ // We must attempt to copy all rematerialized frames over,
+ // even if earlier ones failed, to invoke the proper frame
+ // cleanup in the Debugger.
+ if (!CopyFromRematerializedFrame(cx, act, outerFp, --inlineDepth,
+ iter.baselineFrame())) {
+ ok = false;
+ }
+ }
+ ++iter;
+ }
+
+ if (!ok) {
+ return false;
+ }
+
+ // After copying from all the rematerialized frames, remove them from
+ // the table to keep the table up to date.
+ guardRemoveRematerializedFramesFromDebugger.release();
+ act->removeRematerializedFrame(outerFp);
+ }
+
+ // If we are unwinding for an exception, we need to unwind scopes.
+ // See |SettleOnTryNote|
+ if (bailoutInfo->faultPC) {
+ EnvironmentIter ei(cx, topFrame, bailoutInfo->faultPC);
+ UnwindEnvironment(cx, ei, bailoutInfo->tryPC);
+ }
+
+ // Check for interrupts now because we might miss an interrupt check in JIT
+ // code when resuming in the prologue, after the stack/interrupt check.
+ if (!cx->isExceptionPending()) {
+ if (!CheckForInterrupt(cx)) {
+ return false;
+ }
+ }
+
+ BailoutKind bailoutKind = *bailoutInfo->bailoutKind;
+ JitSpew(JitSpew_BaselineBailouts,
+ " Restored outerScript=(%s:%u:%u,%u) innerScript=(%s:%u:%u,%u) "
+ "(bailoutKind=%u)",
+ outerScript->filename(), outerScript->lineno(), outerScript->column(),
+ outerScript->getWarmUpCount(), innerScript->filename(),
+ innerScript->lineno(), innerScript->column(),
+ innerScript->getWarmUpCount(), (unsigned)bailoutKind);
+
+ BailoutAction action = BailoutAction::InvalidateImmediately;
+ DebugOnly<bool> saveFailedICHash = false;
+ switch (bailoutKind) {
+ case BailoutKind::TranspiledCacheIR:
+ // A transpiled guard failed. If this happens often enough, we will
+ // invalidate and recompile.
+ action = BailoutAction::InvalidateIfFrequent;
+ saveFailedICHash = true;
+ break;
+
+ case BailoutKind::MonomorphicInlinedStubFolding:
+ action = BailoutAction::InvalidateIfFrequent;
+ saveFailedICHash = true;
+ if (innerScript != outerScript) {
+ // In the case where this instruction comes from a monomorphic-inlined
+ // ICScript, we need to ensure that we note the connection between the
+ // inner script and the outer script, so that we can properly track if
+ // we add a new case to the folded stub and avoid invalidating the
+ // outer script.
+ cx->zone()->jitZone()->noteStubFoldingBailout(innerScript, outerScript);
+ }
+ break;
+
+ case BailoutKind::SpeculativePhi:
+ // A value of an unexpected type flowed into a phi.
+ MOZ_ASSERT(!outerScript->hadSpeculativePhiBailout());
+ outerScript->setHadSpeculativePhiBailout();
+ InvalidateAfterBailout(cx, outerScript, "phi specialization failure");
+ break;
+
+ case BailoutKind::TypePolicy:
+ // A conversion inserted by a type policy failed.
+ // We will invalidate and disable recompilation if this happens too often.
+ action = BailoutAction::DisableIfFrequent;
+ break;
+
+ case BailoutKind::LICM:
+ // LICM may cause spurious bailouts by hoisting unreachable
+ // guards past branches. To prevent bailout loops, when an
+ // instruction hoisted by LICM bails out, we update the
+ // IonScript and resume in baseline. If the guard would have
+ // been executed anyway, then we will hit the baseline fallback,
+ // and call noteBaselineFallback. If that does not happen,
+ // then the next time we reach this point, we will disable LICM
+ // for this script.
+ MOZ_ASSERT(!outerScript->hadLICMInvalidation());
+ if (outerScript->hasIonScript()) {
+ switch (outerScript->ionScript()->licmState()) {
+ case IonScript::LICMState::NeverBailed:
+ outerScript->ionScript()->setHadLICMBailout();
+ action = BailoutAction::NoAction;
+ break;
+ case IonScript::LICMState::Bailed:
+ outerScript->setHadLICMInvalidation();
+ InvalidateAfterBailout(cx, outerScript, "LICM failure");
+ break;
+ case IonScript::LICMState::BailedAndHitFallback:
+ // This bailout is not due to LICM. Treat it like a
+ // regular TranspiledCacheIR bailout.
+ action = BailoutAction::InvalidateIfFrequent;
+ break;
+ }
+ }
+ break;
+
+ case BailoutKind::InstructionReordering:
+ // An instruction moved up by instruction reordering bailed out.
+ outerScript->setHadReorderingBailout();
+ action = BailoutAction::InvalidateIfFrequent;
+ break;
+
+ case BailoutKind::HoistBoundsCheck:
+ // An instruction hoisted or generated by tryHoistBoundsCheck bailed out.
+ MOZ_ASSERT(!outerScript->failedBoundsCheck());
+ outerScript->setFailedBoundsCheck();
+ InvalidateAfterBailout(cx, outerScript, "bounds check failure");
+ break;
+
+ case BailoutKind::EagerTruncation:
+ // An eager truncation generated by range analysis bailed out.
+ // To avoid bailout loops, we set a flag to avoid generating
+ // eager truncations next time we recompile.
+ MOZ_ASSERT(!outerScript->hadEagerTruncationBailout());
+ outerScript->setHadEagerTruncationBailout();
+ InvalidateAfterBailout(cx, outerScript, "eager range analysis failure");
+ break;
+
+ case BailoutKind::UnboxFolding:
+ // An unbox that was hoisted to fold with a load bailed out.
+ // To avoid bailout loops, we set a flag to avoid folding
+ // loads with unboxes next time we recompile.
+ MOZ_ASSERT(!outerScript->hadUnboxFoldingBailout());
+ outerScript->setHadUnboxFoldingBailout();
+ InvalidateAfterBailout(cx, outerScript, "unbox folding failure");
+ break;
+
+ case BailoutKind::TooManyArguments:
+ // A funapply or spread call had more than JIT_ARGS_LENGTH_MAX arguments.
+ // We will invalidate and disable recompilation if this happens too often.
+ action = BailoutAction::DisableIfFrequent;
+ break;
+
+ case BailoutKind::DuringVMCall:
+ if (cx->isExceptionPending()) {
+ // We are bailing out to catch an exception. We will invalidate
+ // and disable recompilation if this happens too often.
+ action = BailoutAction::DisableIfFrequent;
+ }
+ break;
+
+ case BailoutKind::Finally:
+ // We are bailing out for a finally block. We will invalidate
+ // and disable recompilation if this happens too often.
+ action = BailoutAction::DisableIfFrequent;
+ break;
+
+ case BailoutKind::Inevitable:
+ case BailoutKind::Debugger:
+ // Do nothing.
+ action = BailoutAction::NoAction;
+ break;
+
+ case BailoutKind::FirstExecution:
+ // We reached an instruction that had not been executed yet at
+ // the time we compiled. If this happens often enough, we will
+ // invalidate and recompile.
+ action = BailoutAction::InvalidateIfFrequent;
+ saveFailedICHash = true;
+ break;
+
+ case BailoutKind::UninitializedLexical:
+ HandleLexicalCheckFailure(cx, outerScript, innerScript);
+ break;
+
+ case BailoutKind::ThrowCheckIsObject:
+ MOZ_ASSERT(!cx->isExceptionPending());
+ return ThrowCheckIsObject(cx, CheckIsObjectKind::IteratorReturn);
+
+ case BailoutKind::IonExceptionDebugMode:
+ // Return false to resume in HandleException with reconstructed
+ // baseline frame.
+ return false;
+
+ case BailoutKind::OnStackInvalidation:
+ // The script has already been invalidated. There is nothing left to do.
+ action = BailoutAction::NoAction;
+ break;
+
+ default:
+ MOZ_CRASH("Unknown bailout kind!");
+ }
+
+#ifdef DEBUG
+ if (MOZ_UNLIKELY(cx->runtime()->jitRuntime()->ionBailAfterEnabled())) {
+ action = BailoutAction::NoAction;
+ }
+#endif
+
+ if (outerScript->hasIonScript()) {
+ IonScript* ionScript = outerScript->ionScript();
+ switch (action) {
+ case BailoutAction::InvalidateImmediately:
+ // The IonScript should already have been invalidated.
+ MOZ_ASSERT(false);
+ break;
+ case BailoutAction::InvalidateIfFrequent:
+ ionScript->incNumFixableBailouts();
+ if (ionScript->shouldInvalidate()) {
+#ifdef DEBUG
+ if (saveFailedICHash && !JitOptions.disableBailoutLoopCheck) {
+ outerScript->jitScript()->setFailedICHash(ionScript->icHash());
+ }
+#endif
+ InvalidateAfterBailout(cx, outerScript, "fixable bailouts");
+ }
+ break;
+ case BailoutAction::DisableIfFrequent:
+ ionScript->incNumUnfixableBailouts();
+ if (ionScript->shouldInvalidateAndDisable()) {
+ InvalidateAfterBailout(cx, outerScript, "unfixable bailouts");
+ outerScript->disableIon();
+ }
+ break;
+ case BailoutAction::NoAction:
+ break;
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp
new file mode 100644
index 0000000000..083d0d7c34
--- /dev/null
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -0,0 +1,4083 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCacheIRCompiler.h"
+
+#include "gc/GC.h"
+#include "jit/CacheIR.h"
+#include "jit/CacheIRCloner.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "jit/Linker.h"
+#include "jit/MoveEmitter.h"
+#include "jit/RegExpStubConstants.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/VMFunctions.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
+#include "proxy/DeadObjectProxy.h"
+#include "proxy/Proxy.h"
+#include "util/Unicode.h"
+#include "vm/JSAtom.h"
+#include "vm/StaticStrings.h"
+
+#include "jit/JitScript-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/SharedICHelpers-inl.h"
+#include "jit/VMFunctionList-inl.h"
+#include "vm/List-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+using JS::ExpandoAndGeneration;
+
+namespace js {
+namespace jit {
+
+Address CacheRegisterAllocator::addressOf(MacroAssembler& masm,
+ BaselineFrameSlot slot) const {
+ uint32_t offset =
+ stackPushed_ + ICStackValueOffset + slot.slot() * sizeof(JS::Value);
+ if (JitOptions.enableICFramePointers) {
+ // The frame pointer is also on the stack.
+ offset += sizeof(uintptr_t);
+ }
+ return Address(masm.getStackPointer(), offset);
+}
+BaseValueIndex CacheRegisterAllocator::addressOf(MacroAssembler& masm,
+ Register argcReg,
+ BaselineFrameSlot slot) const {
+ uint32_t offset =
+ stackPushed_ + ICStackValueOffset + slot.slot() * sizeof(JS::Value);
+ if (JitOptions.enableICFramePointers) {
+ // The frame pointer is also on the stack.
+ offset += sizeof(uintptr_t);
+ }
+ return BaseValueIndex(masm.getStackPointer(), argcReg, offset);
+}
+
+// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
+BaselineCacheIRCompiler::BaselineCacheIRCompiler(JSContext* cx,
+ TempAllocator& alloc,
+ const CacheIRWriter& writer,
+ uint32_t stubDataOffset)
+ : CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Baseline,
+ StubFieldPolicy::Address),
+ makesGCCalls_(false) {}
+
+// AutoStubFrame methods
+AutoStubFrame::AutoStubFrame(BaselineCacheIRCompiler& compiler)
+ : compiler(compiler)
+#ifdef DEBUG
+ ,
+ framePushedAtEnterStubFrame_(0)
+#endif
+{
+}
+void AutoStubFrame::enter(MacroAssembler& masm, Register scratch,
+ CallCanGC canGC) {
+ MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
+
+ if (JitOptions.enableICFramePointers) {
+ // If we have already pushed the frame pointer, pop it
+ // before creating the stub frame.
+ masm.pop(FramePointer);
+ }
+ EmitBaselineEnterStubFrame(masm, scratch);
+
+#ifdef DEBUG
+ framePushedAtEnterStubFrame_ = masm.framePushed();
+#endif
+
+ MOZ_ASSERT(!compiler.enteredStubFrame_);
+ compiler.enteredStubFrame_ = true;
+ if (canGC == CallCanGC::CanGC) {
+ compiler.makesGCCalls_ = true;
+ }
+}
+void AutoStubFrame::leave(MacroAssembler& masm) {
+ MOZ_ASSERT(compiler.enteredStubFrame_);
+ compiler.enteredStubFrame_ = false;
+
+#ifdef DEBUG
+ masm.setFramePushed(framePushedAtEnterStubFrame_);
+#endif
+
+ EmitBaselineLeaveStubFrame(masm);
+ if (JitOptions.enableICFramePointers) {
+ // We will pop the frame pointer when we return,
+ // so we have to push it again now.
+ masm.push(FramePointer);
+ }
+}
+
+#ifdef DEBUG
+AutoStubFrame::~AutoStubFrame() { MOZ_ASSERT(!compiler.enteredStubFrame_); }
+#endif
+
+} // namespace jit
+} // namespace js
+
+bool BaselineCacheIRCompiler::makesGCCalls() const { return makesGCCalls_; }
+
+Address BaselineCacheIRCompiler::stubAddress(uint32_t offset) const {
+ return Address(ICStubReg, stubDataOffset_ + offset);
+}
+
+template <typename Fn, Fn fn>
+void BaselineCacheIRCompiler::callVM(MacroAssembler& masm) {
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ callVMInternal(masm, id);
+}
+
+JitCode* BaselineCacheIRCompiler::compile() {
+ AutoCreatedBy acb(masm, "BaselineCacheIRCompiler::compile");
+
+#ifndef JS_USE_LINK_REGISTER
+ masm.adjustFrame(sizeof(intptr_t));
+#endif
+#ifdef JS_CODEGEN_ARM
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+ if (JitOptions.enableICFramePointers) {
+ /* [SMDOC] Baseline IC Frame Pointers
+ *
+ * In general, ICs don't have frame pointers until just before
+ * doing a VM call, at which point we retroactively create a stub
+ * frame. However, for the sake of external profilers, we
+ * optionally support full-IC frame pointers in baseline ICs, with
+ * the following approach:
+ * 1. We push a frame pointer when we enter an IC.
+ * 2. We pop the frame pointer when we return from an IC, or
+ * when we jump to the next IC.
+ * 3. Entering a stub frame for a VM call already pushes a
+ * frame pointer, so we pop our existing frame pointer
+ * just before entering a stub frame and push it again
+ * just after leaving a stub frame.
+ * Some ops take advantage of the fact that the frame pointer is
+ * not updated until we enter a stub frame to read values from
+ * the caller's frame. To support this, we allocate a separate
+ * baselineFrame register when IC frame pointers are enabled.
+ */
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ MOZ_ASSERT(baselineFrameReg() != FramePointer);
+ masm.loadPtr(Address(FramePointer, 0), baselineFrameReg());
+ }
+
+ // Count stub entries: We count entries rather than successes as it much
+ // easier to ensure ICStubReg is valid at entry than at exit.
+ Address enteredCount(ICStubReg, ICCacheIRStub::offsetOfEnteredCount());
+ masm.add32(Imm32(1), enteredCount);
+
+ CacheIRReader reader(writer_);
+ do {
+ CacheOp op = reader.readOp();
+ perfSpewer_.recordInstruction(masm, op);
+ switch (op) {
+#define DEFINE_OP(op, ...) \
+ case CacheOp::op: \
+ if (!emit##op(reader)) return nullptr; \
+ break;
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+ allocator.nextOp();
+ } while (reader.more());
+
+ MOZ_ASSERT(!enteredStubFrame_);
+ masm.assumeUnreachable("Should have returned from IC");
+
+ // Done emitting the main IC code. Now emit the failure paths.
+ for (size_t i = 0; i < failurePaths.length(); i++) {
+ if (!emitFailurePath(i)) {
+ return nullptr;
+ }
+ if (JitOptions.enableICFramePointers) {
+ masm.pop(FramePointer);
+ }
+ EmitStubGuardFailure(masm);
+ }
+
+ Linker linker(masm);
+ Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Baseline));
+ if (!newStubCode) {
+ cx_->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ return newStubCode;
+}
+
+bool BaselineCacheIRCompiler::emitGuardShape(ObjOperandId objId,
+ uint32_t shapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch1(allocator, masm);
+
+ bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+ Maybe<AutoScratchRegister> maybeScratch2;
+ if (needSpectreMitigations) {
+ maybeScratch2.emplace(allocator, masm);
+ }
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address addr(stubAddress(shapeOffset));
+ masm.loadPtr(addr, scratch1);
+ if (needSpectreMitigations) {
+ masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2,
+ obj, failure->label());
+ } else {
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj,
+ scratch1, failure->label());
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardProto(ObjOperandId objId,
+ uint32_t protoOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address addr(stubAddress(protoOffset));
+ masm.loadObjProto(obj, scratch);
+ masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
+ uint32_t globalOffset,
+ uint32_t compartmentOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Verify that the global wrapper is still valid, as
+ // it is pre-requisite for doing the compartment check.
+ Address globalWrapper(stubAddress(globalOffset));
+ masm.loadPtr(globalWrapper, scratch);
+ Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
+ masm.branchPtr(Assembler::Equal, handlerAddr,
+ ImmPtr(&DeadObjectProxy::singleton), failure->label());
+
+ Address addr(stubAddress(compartmentOffset));
+ masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch,
+ failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
+ uint32_t claspOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address testAddr(stubAddress(claspOffset));
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjClass(Assembler::NotEqual, obj, testAddr, scratch, obj,
+ failure->label());
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(
+ Assembler::NotEqual, obj, testAddr, scratch, failure->label());
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
+ uint32_t handlerOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address testAddr(stubAddress(handlerOffset));
+ masm.loadPtr(testAddr, scratch);
+
+ Address handlerAddr(obj, ProxyObject::offsetOfHandler());
+ masm.branchPtr(Assembler::NotEqual, handlerAddr, scratch, failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address addr(stubAddress(expectedOffset));
+ masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardSpecificFunction(
+ ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
+ return emitGuardSpecificObject(objId, expectedOffset);
+}
+
+bool BaselineCacheIRCompiler::emitGuardFunctionScript(
+ ObjOperandId funId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register fun = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address addr(stubAddress(expectedOffset));
+ masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
+ scratch);
+ masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register str = allocator.useRegister(masm, strId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address atomAddr(stubAddress(expectedOffset));
+
+ Label done;
+ masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
+
+ // The pointers are not equal, so if the input string is also an atom it
+ // must be a different string.
+ masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), failure->label());
+
+ // Check the length.
+ masm.loadPtr(atomAddr, scratch);
+ masm.loadStringLength(scratch, scratch);
+ masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
+ scratch, failure->label());
+
+ // We have a non-atomized string with the same length. Call a helper
+ // function to do the comparison.
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSString* str1, JSString* str2);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadPtr(atomAddr, scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(str);
+ masm.callWithABI<Fn, EqualStringsHelperPure>();
+ masm.storeCallPointerResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+ masm.branchIfFalseBool(scratch, failure->label());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register sym = allocator.useRegister(masm, symId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address addr(stubAddress(expectedOffset));
+ masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ masm.loadValue(stubAddress(valOffset), output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.load32(stubAddress(offsetOffset), scratch);
+ masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadFixedSlotTypedResult(
+ ObjOperandId objId, uint32_t offsetOffset, ValueType) {
+ // The type is only used by Warp.
+ return emitLoadFixedSlotResult(objId, offsetOffset);
+}
+
+bool BaselineCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ masm.load32(stubAddress(offsetOffset), scratch);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
+ masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallScriptedGetterShared(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset, Maybe<uint32_t> icScriptOffset) {
+ ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
+ Address getterAddr(stubAddress(getterOffset));
+
+ AutoScratchRegister code(allocator, masm);
+ AutoScratchRegister callee(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+
+ bool isInlined = icScriptOffset.isSome();
+
+ // First, retrieve raw jitcode for getter.
+ masm.loadPtr(getterAddr, callee);
+ if (isInlined) {
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.loadBaselineJitCodeRaw(callee, code, failure->label());
+ } else {
+ masm.loadJitCodeRaw(callee, code);
+ }
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ if (!sameRealm) {
+ masm.switchToObjectRealm(callee, scratch);
+ }
+
+ // Align the stack such that the JitFrameLayout is aligned on
+ // JitStackAlignment.
+ masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
+
+ // Getter is called with 0 arguments, just |receiver| as thisv.
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.Push(receiver);
+
+ if (isInlined) {
+ // Store icScript in the context.
+ Address icScriptAddr(stubAddress(*icScriptOffset));
+ masm.loadPtr(icScriptAddr, scratch);
+ masm.storeICScriptInJSContext(scratch);
+ }
+
+ masm.Push(callee);
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, /* argc = */ 0);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.loadFunctionArgCount(callee, callee);
+ masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
+
+ // Call the arguments rectifier.
+ ArgumentsRectifierKind kind = isInlined
+ ? ArgumentsRectifierKind::TrialInlining
+ : ArgumentsRectifierKind::Normal;
+ TrampolinePtr argumentsRectifier =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifier(kind);
+ masm.movePtr(argumentsRectifier, code);
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ stubFrame.leave(masm);
+
+ if (!sameRealm) {
+ masm.switchToBaselineFrameRealm(R1.scratchReg());
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallScriptedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
+ return emitCallScriptedGetterShared(receiverId, getterOffset, sameRealm,
+ nargsAndFlagsOffset, icScriptOffset);
+}
+
+bool BaselineCacheIRCompiler::emitCallInlinedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitCallScriptedGetterShared(receiverId, getterOffset, sameRealm,
+ nargsAndFlagsOffset,
+ mozilla::Some(icScriptOffset));
+}
+
+bool BaselineCacheIRCompiler::emitCallNativeGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
+ Address getterAddr(stubAddress(getterOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the callee in the scratch register.
+ masm.loadPtr(getterAddr, scratch);
+
+ masm.Push(receiver);
+ masm.Push(scratch);
+
+ using Fn =
+ bool (*)(JSContext*, HandleFunction, HandleValue, MutableHandleValue);
+ callVM<Fn, CallNativeGetter>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
+ uint32_t jitInfoOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Address jitInfoAddr(stubAddress(jitInfoOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the JSJitInfo in the scratch register.
+ masm.loadPtr(jitInfoAddr, scratch);
+
+ masm.Push(obj);
+ masm.Push(scratch);
+
+ using Fn =
+ bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
+ callVM<Fn, CallDOMGetter>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
+ uint32_t idOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Address idAddr(stubAddress(idOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the jsid in the scratch register.
+ masm.loadPtr(idAddr, scratch);
+
+ masm.Push(scratch);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
+ callVM<Fn, ProxyGetProperty>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitFrameIsConstructingResult() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register outputScratch = output.valueReg().scratchReg();
+
+ // Load the CalleeToken.
+ Address tokenAddr(baselineFrameReg(), JitFrameLayout::offsetOfCalleeToken());
+ masm.loadPtr(tokenAddr, outputScratch);
+
+ // The low bit indicates whether this call is constructing, just clear the
+ // other bits.
+ static_assert(CalleeToken_Function == 0x0);
+ static_assert(CalleeToken_FunctionConstructing == 0x1);
+ masm.andPtr(Imm32(0x1), outputScratch);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.loadPtr(stubAddress(strOffset), scratch);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCompareStringResult(JSOp op,
+ StringOperandId lhsId,
+ StringOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register left = allocator.useRegister(masm, lhsId);
+ Register right = allocator.useRegister(masm, rhsId);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ allocator.discardStack(masm);
+
+ Label slow, done;
+ masm.compareStrings(op, left, right, scratch, &slow);
+ masm.jump(&done);
+ masm.bind(&slow);
+ {
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.Push(left);
+ masm.Push(right);
+ } else {
+ masm.Push(right);
+ masm.Push(left);
+ }
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ if (op == JSOp::Eq || op == JSOp::StrictEq) {
+ callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
+ } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
+ callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
+ } else if (op == JSOp::Lt || op == JSOp::Gt) {
+ callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
+ } else {
+ MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
+ callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
+ }
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(scratch);
+ }
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitSameValueResult(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch(allocator, masm);
+ ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
+#ifdef JS_CODEGEN_X86
+ // Use the output to avoid running out of registers.
+ allocator.copyToScratchValueRegister(masm, rhsId, output.valueReg());
+ ValueOperand rhs = output.valueReg();
+#else
+ ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
+#endif
+
+ allocator.discardStack(masm);
+
+ Label done;
+ Label call;
+
+ // Check to see if the values have identical bits.
+ // This is correct for SameValue because SameValue(NaN,NaN) is true,
+ // and SameValue(0,-0) is false.
+ masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
+ &call);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&call);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.pushValue(lhs);
+ masm.pushValue(rhs);
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
+ callVM<Fn, SameValue>(masm);
+
+ stubFrame.leave(masm);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed,
+ ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ Maybe<AutoScratchRegister> scratch2;
+ if (!isFixed) {
+ scratch2.emplace(allocator, masm);
+ }
+
+ Address offsetAddr = stubAddress(offsetOffset);
+ masm.load32(offsetAddr, scratch1);
+
+ if (isFixed) {
+ BaseIndex slot(obj, scratch1, TimesOne);
+ EmitPreBarrier(masm, slot, MIRType::Value);
+ masm.storeValue(val, slot);
+ } else {
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
+ BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
+ EmitPreBarrier(masm, slot, MIRType::Value);
+ masm.storeValue(val, slot);
+ }
+
+ emitPostBarrierSlot(obj, val, scratch1);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitStoreSlotShared(true, objId, offsetOffset, rhsId);
+}
+
+bool BaselineCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitStoreSlotShared(false, objId, offsetOffset, rhsId);
+}
+
+bool BaselineCacheIRCompiler::emitAddAndStoreSlotShared(
+ CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Address newShapeAddr = stubAddress(newShapeOffset);
+ Address offsetAddr = stubAddress(offsetOffset);
+
+ if (op == CacheOp::AllocateAndStoreDynamicSlot) {
+ // We have to (re)allocate dynamic slots. Do this first, as it's the
+ // only fallible operation here. Note that growSlotsPure is fallible but
+ // does not GC.
+ Address numNewSlotsAddr = stubAddress(*numNewSlotsOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ masm.load32(numNewSlotsAddr, scratch2);
+ masm.passABIArg(scratch2);
+ masm.callWithABI<Fn, NativeObject::growSlotsPure>();
+ masm.storeCallPointerResult(scratch1);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch1);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.branchIfFalseBool(scratch1, failure->label());
+ }
+
+ // Update the object's shape.
+ masm.loadPtr(newShapeAddr, scratch1);
+ masm.storeObjShape(scratch1, obj,
+ [](MacroAssembler& masm, const Address& addr) {
+ EmitPreBarrier(masm, addr, MIRType::Shape);
+ });
+
+ // Perform the store. No pre-barrier required since this is a new
+ // initialization.
+ masm.load32(offsetAddr, scratch1);
+ if (op == CacheOp::AddAndStoreFixedSlot) {
+ BaseIndex slot(obj, scratch1, TimesOne);
+ masm.storeValue(val, slot);
+ } else {
+ MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
+ op == CacheOp::AllocateAndStoreDynamicSlot);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
+ BaseIndex slot(scratch2, scratch1, TimesOne);
+ masm.storeValue(val, slot);
+ }
+
+ emitPostBarrierSlot(obj, val, scratch1);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitAddAndStoreFixedSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
+ return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ numNewSlotsOffset);
+}
+
+bool BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
+ return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ numNewSlotsOffset);
+}
+
+bool BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ mozilla::Some(numNewSlotsOffset));
+}
+
+bool BaselineCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
+ StringOperandId sepId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register sep = allocator.useRegister(masm, sepId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ allocator.discardStack(masm);
+
+ // Load obj->elements in scratch.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+ Address lengthAddr(scratch, ObjectElements::offsetOfLength());
+
+ // If array length is 0, return empty string.
+ Label finished;
+
+ {
+ Label arrayNotEmpty;
+ masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(0), &arrayNotEmpty);
+ masm.movePtr(ImmGCPtr(cx_->names().empty), scratch);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+ masm.jump(&finished);
+ masm.bind(&arrayNotEmpty);
+ }
+
+ Label vmCall;
+
+ // Otherwise, handle array length 1 case.
+ masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(1), &vmCall);
+
+ // But only if initializedLength is also 1.
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, Imm32(1), &vmCall);
+
+ // And only if elem0 is a string.
+ Address elementAddr(scratch, 0);
+ masm.branchTestString(Assembler::NotEqual, elementAddr, &vmCall);
+
+ // Store the value.
+ masm.loadValue(elementAddr, output.valueReg());
+ masm.jump(&finished);
+
+ // Otherwise call into the VM.
+ {
+ masm.bind(&vmCall);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(sep);
+ masm.Push(obj);
+
+ using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
+ callVM<Fn, jit::ArrayJoin>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, output.valueReg());
+ }
+
+ masm.bind(&finished);
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitPackedArraySliceResult(
+ uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+ Int32OperandId endId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+
+ Register array = allocator.useRegister(masm, arrayId);
+ Register begin = allocator.useRegister(masm, beginId);
+ Register end = allocator.useRegister(masm, endId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchArrayIsNotPacked(array, scratch1, scratch2, failure->label());
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch1);
+
+ // Don't attempt to pre-allocate the object, instead always use the slow
+ // path.
+ ImmPtr result(nullptr);
+
+ masm.Push(result);
+ masm.Push(end);
+ masm.Push(begin);
+ masm.Push(array);
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+ callVM<Fn, ArraySliceDense>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitArgumentsSliceResult(
+ uint32_t templateObjectOffset, ObjOperandId argsId, Int32OperandId beginId,
+ Int32OperandId endId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register args = allocator.useRegister(masm, argsId);
+ Register begin = allocator.useRegister(masm, beginId);
+ Register end = allocator.useRegister(masm, endId);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Don't attempt to pre-allocate the object, instead always use the slow path.
+ ImmPtr result(nullptr);
+
+ masm.Push(result);
+ masm.Push(end);
+ masm.Push(begin);
+ masm.Push(args);
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+ callVM<Fn, ArgumentsSliceDense>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ ValueOperand val = allocator.useValueRegister(masm, inputId);
+
+ allocator.discardStack(masm);
+
+ Label isNotArray;
+ // Primitives are never Arrays.
+ masm.fallibleUnboxObject(val, scratch1, &isNotArray);
+
+ Label isArray;
+ masm.branchTestObjClass(Assembler::Equal, scratch1, &ArrayObject::class_,
+ scratch2, scratch1, &isArray);
+
+ // isArray can also return true for Proxy wrapped Arrays.
+ masm.branchTestObjectIsProxy(false, scratch1, scratch2, &isNotArray);
+ Label done;
+ {
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch2);
+
+ masm.Push(scratch1);
+
+ using Fn = bool (*)(JSContext*, HandleObject, bool*);
+ callVM<Fn, js::IsArrayFromJit>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
+ masm.jump(&done);
+ }
+
+ masm.bind(&isNotArray);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&isArray);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
+ bool isPossiblyWrapped) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ allocator.discardStack(masm);
+
+ Label notTypedArray, isProxy, done;
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.branchIfClassIsNotTypedArray(scratch, &notTypedArray);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&notTypedArray);
+ if (isPossiblyWrapped) {
+ masm.branchTestClassIsProxy(true, scratch, &isProxy);
+ }
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ if (isPossiblyWrapped) {
+ masm.jump(&done);
+
+ masm.bind(&isProxy);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, JSObject*, bool*);
+ callVM<Fn, jit::IsPossiblyWrappedTypedArray>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
+ Int32OperandId indexId,
+ bool handleOOB) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ // Bounds check, load string char.
+ Label done;
+ Label loadFailed;
+ if (!handleOOB) {
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch1, failure->label());
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
+ failure->label());
+
+ allocator.discardStack(masm);
+ } else {
+ // Discard the stack before jumping to |done|.
+ allocator.discardStack(masm);
+
+ // Return the empty string for out-of-bounds access.
+ masm.movePtr(ImmGCPtr(cx_->names().empty), scratch2);
+
+ // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
+ // guaranteed to see no nested ropes.
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch1, &done);
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
+ }
+
+ // Load StaticString for this char. For larger code units perform a VM call.
+ Label vmCall;
+ masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
+ &vmCall);
+ masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
+ masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
+
+ masm.jump(&done);
+
+ if (handleOOB) {
+ masm.bind(&loadFailed);
+ masm.assumeUnreachable("loadStringChar can't fail for linear strings");
+ }
+
+ {
+ masm.bind(&vmCall);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch2);
+
+ masm.Push(scratch1);
+
+ using Fn = JSLinearString* (*)(JSContext*, int32_t);
+ callVM<Fn, jit::StringFromCharCode>(masm);
+
+ stubFrame.leave(masm);
+
+ masm.storeCallPointerResult(scratch2);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitStringFromCodeResult(Int32OperandId codeId,
+ StringCode stringCode) {
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register code = allocator.useRegister(masm, codeId);
+
+ FailurePath* failure = nullptr;
+ if (stringCode == StringCode::CodePoint) {
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ }
+
+ if (stringCode == StringCode::CodePoint) {
+ // Note: This condition must match tryAttachStringFromCodePoint to prevent
+ // failure loops.
+ masm.branch32(Assembler::Above, code, Imm32(unicode::NonBMPMax),
+ failure->label());
+ }
+
+ allocator.discardStack(masm);
+
+ // We pre-allocate atoms for the first UNIT_STATIC_LIMIT characters.
+ // For code units larger than that, we must do a VM call.
+ Label vmCall;
+ masm.boundsCheck32PowerOfTwo(code, StaticStrings::UNIT_STATIC_LIMIT, &vmCall);
+
+ masm.movePtr(ImmPtr(cx_->runtime()->staticStrings->unitStaticTable), scratch);
+ masm.loadPtr(BaseIndex(scratch, code, ScalePointer), scratch);
+ Label done;
+ masm.jump(&done);
+
+ {
+ masm.bind(&vmCall);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(code);
+
+ if (stringCode == StringCode::CodeUnit) {
+ using Fn = JSLinearString* (*)(JSContext*, int32_t);
+ callVM<Fn, jit::StringFromCharCode>(masm);
+ } else {
+ using Fn = JSString* (*)(JSContext*, int32_t);
+ callVM<Fn, jit::StringFromCodePoint>(masm);
+ }
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(scratch);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitStringFromCharCodeResult(
+ Int32OperandId codeId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ return emitStringFromCodeResult(codeId, StringCode::CodeUnit);
+}
+
+bool BaselineCacheIRCompiler::emitStringFromCodePointResult(
+ Int32OperandId codeId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ return emitStringFromCodeResult(codeId, StringCode::CodePoint);
+}
+
+bool BaselineCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister64 scratch2(allocator, masm);
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ Address rngAddr(stubAddress(rngOffset));
+ masm.loadPtr(rngAddr, scratch1);
+
+ masm.randomDouble(scratch1, scratchFloat, scratch2,
+ output.valueReg().toRegister64());
+
+ if (js::SupportDifferentialTesting()) {
+ masm.loadConstantDouble(0.0, scratchFloat);
+ }
+
+ masm.boxDouble(scratchFloat, output.valueReg(), scratchFloat);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitReflectGetPrototypeOfResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ allocator.discardStack(masm);
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ masm.loadObjProto(obj, scratch);
+
+ Label hasProto;
+ masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
+
+ // Call into the VM for lazy prototypes.
+ Label slow, done;
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), &slow);
+
+ masm.moveValue(NullValue(), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&hasProto);
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&slow);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
+ callVM<Fn, jit::GetPrototypeOf>(masm);
+
+ stubFrame.leave(masm);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
+ uint32_t claspOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Address claspAddr(stubAddress(claspOffset));
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.cmpPtrSet(Assembler::Equal, claspAddr, scratch.get(), scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+void BaselineCacheIRCompiler::emitAtomizeString(Register str, Register temp,
+ Label* failure) {
+ Label isAtom;
+ masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), &isAtom);
+ {
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = JSAtom* (*)(JSContext* cx, JSString* str);
+ masm.setupUnalignedABICall(temp);
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(str);
+ masm.callWithABI<Fn, jit::AtomizeStringNoGC>();
+ masm.storeCallPointerResult(temp);
+
+ LiveRegisterSet ignore;
+ ignore.add(temp);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.branchPtr(Assembler::Equal, temp, ImmWord(0), failure);
+ masm.mov(temp, str);
+ }
+ masm.bind(&isAtom);
+}
+
+bool BaselineCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId,
+ StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ Register str = allocator.useRegister(masm, strId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ emitAtomizeString(str, scratch1, failure->label());
+ masm.prepareHashString(str, scratch1, scratch2);
+
+ masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
+ masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register str = allocator.useRegister(masm, strId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ emitAtomizeString(str, scratch1, failure->label());
+ masm.prepareHashString(str, scratch1, scratch2);
+
+ masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
+ masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register str = allocator.useRegister(masm, strId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ emitAtomizeString(str, scratch1, failure->label());
+ masm.prepareHashString(str, scratch1, scratch2);
+
+ masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
+ masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
+ output.valueReg(), scratch2, scratch3, scratch4);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallNativeSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register receiver = allocator.useRegister(masm, receiverId);
+ Address setterAddr(stubAddress(setterOffset));
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the callee in the scratch register.
+ masm.loadPtr(setterAddr, scratch);
+
+ masm.Push(val);
+ masm.Push(receiver);
+ masm.Push(scratch);
+
+ using Fn = bool (*)(JSContext*, HandleFunction, HandleObject, HandleValue);
+ callVM<Fn, CallNativeSetter>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallScriptedSetterShared(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm, uint32_t nargsAndFlagsOffset,
+ Maybe<uint32_t> icScriptOffset) {
+ AutoScratchRegister callee(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+#if defined(JS_CODEGEN_X86)
+ Register code = scratch;
+#else
+ AutoScratchRegister code(allocator, masm);
+#endif
+
+ Register receiver = allocator.useRegister(masm, receiverId);
+ Address setterAddr(stubAddress(setterOffset));
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ bool isInlined = icScriptOffset.isSome();
+
+ // First, load the callee.
+ masm.loadPtr(setterAddr, callee);
+
+ if (isInlined) {
+ // If we are calling a trial-inlined setter, guard that the
+ // target has a BaselineScript.
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.loadBaselineJitCodeRaw(callee, code, failure->label());
+ }
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ if (!sameRealm) {
+ masm.switchToObjectRealm(callee, scratch);
+ }
+
+ // Align the stack such that the JitFrameLayout is aligned on
+ // JitStackAlignment.
+ masm.alignJitStackBasedOnNArgs(1, /*countIncludesThis = */ false);
+
+ // Setter is called with 1 argument, and |receiver| as thisv. Note that we use
+ // Push, not push, so that callJit will align the stack properly on ARM.
+ masm.Push(val);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
+
+ // Push callee.
+ masm.Push(callee);
+
+ // Push frame descriptor.
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, /* argc = */ 1);
+
+ if (isInlined) {
+ // Store icScript in the context.
+ Address icScriptAddr(stubAddress(*icScriptOffset));
+ masm.loadPtr(icScriptAddr, scratch);
+ masm.storeICScriptInJSContext(scratch);
+ }
+
+ // Load the jitcode pointer.
+ if (isInlined) {
+ // On non-x86 platforms, this pointer is still in a register
+ // after guarding on it above. On x86, we don't have enough
+ // registers and have to reload it here.
+#ifdef JS_CODEGEN_X86
+ masm.loadBaselineJitCodeRaw(callee, code);
+#endif
+ } else {
+ masm.loadJitCodeRaw(callee, code);
+ }
+
+ // Handle arguments underflow. The rhs value is no longer needed and
+ // can be used as scratch.
+ Label noUnderflow;
+ Register scratch2 = val.scratchReg();
+ masm.loadFunctionArgCount(callee, scratch2);
+ masm.branch32(Assembler::BelowOrEqual, scratch2, Imm32(1), &noUnderflow);
+
+ // Call the arguments rectifier.
+ ArgumentsRectifierKind kind = isInlined
+ ? ArgumentsRectifierKind::TrialInlining
+ : ArgumentsRectifierKind::Normal;
+ TrampolinePtr argumentsRectifier =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifier(kind);
+ masm.movePtr(argumentsRectifier, code);
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ stubFrame.leave(masm);
+
+ if (!sameRealm) {
+ masm.switchToBaselineFrameRealm(R1.scratchReg());
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallScriptedSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
+ return emitCallScriptedSetterShared(receiverId, setterOffset, rhsId,
+ sameRealm, nargsAndFlagsOffset,
+ icScriptOffset);
+}
+
+bool BaselineCacheIRCompiler::emitCallInlinedSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitCallScriptedSetterShared(receiverId, setterOffset, rhsId,
+ sameRealm, nargsAndFlagsOffset,
+ mozilla::Some(icScriptOffset));
+}
+
+bool BaselineCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
+ uint32_t jitInfoOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+ Address jitInfoAddr(stubAddress(jitInfoOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the JSJitInfo in the scratch register.
+ masm.loadPtr(jitInfoAddr, scratch);
+
+ masm.Push(val);
+ masm.Push(obj);
+ masm.Push(scratch);
+
+ using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
+ callVM<Fn, CallDOMSetter>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId,
+ bool strict,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
+ callVM<Fn, jit::SetArrayLength>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitProxySet(ObjOperandId objId,
+ uint32_t idOffset,
+ ValOperandId rhsId, bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+ Address idAddr(stubAddress(idOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Load the jsid in the scratch register.
+ masm.loadPtr(idAddr, scratch);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(scratch);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
+ callVM<Fn, ProxySetProperty>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId,
+ bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+
+ // We need a scratch register but we don't have any registers available on
+ // x86, so temporarily store |obj| in the frame's scratch slot.
+ int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
+ masm.storePtr(obj, Address(baselineFrameReg(), scratchOffset));
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, obj);
+
+ // Restore |obj|. Because we entered a stub frame we first have to load
+ // the original frame pointer.
+ masm.loadPtr(Address(FramePointer, 0), obj);
+ masm.loadPtr(Address(obj, scratchOffset), obj);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, ProxySetPropertyByValue>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
+ ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register id = allocator.useRegister(masm, idId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(id);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
+ HandleValue v, bool strict);
+ callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId,
+ bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+#ifdef JS_CODEGEN_X86
+ allocator.discardStack(masm);
+ // We need a scratch register but we don't have any registers available on
+ // x86, so temporarily store |obj| in the frame's scratch slot.
+ int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
+ masm.storePtr(obj, Address(baselineFrameReg_, scratchOffset));
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, obj);
+
+ // Restore |obj|. Because we entered a stub frame we first have to load
+ // the original frame pointer.
+ masm.loadPtr(Address(FramePointer, 0), obj);
+ masm.loadPtr(Address(obj, scratchOffset), obj);
+#else
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+#endif
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, SetElementMegamorphic<false>>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitReturnFromIC() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ allocator.discardStack(masm);
+ if (JitOptions.enableICFramePointers) {
+ masm.pop(FramePointer);
+ }
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
+ uint8_t slotIndex) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand resultReg = allocator.defineValueRegister(masm, resultId);
+ Address addr = allocator.addressOf(masm, BaselineFrameSlot(slotIndex));
+ masm.loadValue(addr, resultReg);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
+ Int32OperandId argcId,
+ uint8_t slotIndex) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand resultReg = allocator.defineValueRegister(masm, resultId);
+ Register argcReg = allocator.useRegister(masm, argcId);
+ BaseValueIndex addr =
+ allocator.addressOf(masm, argcReg, BaselineFrameSlot(slotIndex));
+ masm.loadValue(addr, resultReg);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
+ ValOperandId expandoId, uint32_t shapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand val = allocator.useValueRegister(masm, expandoId);
+ AutoScratchRegister shapeScratch(allocator, masm);
+ AutoScratchRegister objScratch(allocator, masm);
+ Address shapeAddr(stubAddress(shapeOffset));
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.branchTestUndefined(Assembler::Equal, val, &done);
+
+ masm.debugAssertIsObject(val);
+ masm.loadPtr(shapeAddr, shapeScratch);
+ masm.unboxObject(val, objScratch);
+ // The expando object is not used in this case, so we don't need Spectre
+ // mitigations.
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
+ shapeScratch, failure->label());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
+ ObjOperandId objId, uint32_t expandoAndGenerationOffset,
+ uint32_t generationOffset, ValOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Address expandoAndGenerationAddr(stubAddress(expandoAndGenerationOffset));
+ Address generationAddr(stubAddress(generationOffset));
+
+ AutoScratchRegister scratch(allocator, masm);
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
+ Address expandoAddr(scratch,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+
+ // Load the ExpandoAndGeneration* in the output scratch register and guard
+ // it matches the proxy's ExpandoAndGeneration.
+ masm.loadPtr(expandoAndGenerationAddr, output.scratchReg());
+ masm.branchPrivatePtr(Assembler::NotEqual, expandoAddr, output.scratchReg(),
+ failure->label());
+
+ // Guard expandoAndGeneration->generation matches the expected generation.
+ masm.branch64(
+ Assembler::NotEqual,
+ Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
+ generationAddr, scratch, failure->label());
+
+ // Load expandoAndGeneration->expando into the output Value register.
+ masm.loadValue(
+ Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()),
+ output);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::init(CacheKind kind) {
+ if (!allocator.init()) {
+ return false;
+ }
+
+ size_t numInputs = writer_.numInputOperands();
+ MOZ_ASSERT(numInputs == NumInputsForCacheKind(kind));
+
+ // Baseline passes the first 2 inputs in R0/R1, other Values are stored on
+ // the stack.
+ size_t numInputsInRegs = std::min(numInputs, size_t(2));
+ AllocatableGeneralRegisterSet available =
+ BaselineICAvailableGeneralRegs(numInputsInRegs);
+
+ switch (kind) {
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ case CacheKind::GetIntrinsic:
+ MOZ_ASSERT(numInputs == 0);
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::GetProp:
+ case CacheKind::TypeOf:
+ case CacheKind::ToPropertyKey:
+ case CacheKind::GetIterator:
+ case CacheKind::OptimizeSpreadCall:
+ case CacheKind::ToBool:
+ case CacheKind::UnaryArith:
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, R0);
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::Compare:
+ case CacheKind::GetElem:
+ case CacheKind::GetPropSuper:
+ case CacheKind::In:
+ case CacheKind::HasOwn:
+ case CacheKind::CheckPrivateField:
+ case CacheKind::InstanceOf:
+ case CacheKind::BinaryArith:
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, R0);
+ allocator.initInputLocation(1, R1);
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::SetProp:
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, R0);
+ allocator.initInputLocation(1, R1);
+ break;
+ case CacheKind::GetElemSuper:
+ MOZ_ASSERT(numInputs == 3);
+ allocator.initInputLocation(0, BaselineFrameSlot(0));
+ allocator.initInputLocation(1, R1);
+ allocator.initInputLocation(2, R0);
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::SetElem:
+ MOZ_ASSERT(numInputs == 3);
+ allocator.initInputLocation(0, R0);
+ allocator.initInputLocation(1, R1);
+ allocator.initInputLocation(2, BaselineFrameSlot(0));
+ break;
+ case CacheKind::GetName:
+ case CacheKind::BindName:
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
+#if defined(JS_NUNBOX32)
+ // availableGeneralRegs can't know that GetName/BindName is only using
+ // the payloadReg and not typeReg on x86.
+ available.add(R0.typeReg());
+#endif
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::Call:
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_INT32);
+#if defined(JS_NUNBOX32)
+ // availableGeneralRegs can't know that Call is only using
+ // the payloadReg and not typeReg on x86.
+ available.add(R0.typeReg());
+#endif
+ outputUnchecked_.emplace(R0);
+ break;
+ case CacheKind::CloseIter:
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
+#if defined(JS_NUNBOX32)
+ // availableGeneralRegs can't know that CloseIter is only using
+ // the payloadReg and not typeReg on x86.
+ available.add(R0.typeReg());
+#endif
+ break;
+ }
+
+ // Baseline doesn't allocate float registers so none of them are live.
+ liveFloatRegs_ = LiveFloatRegisterSet(FloatRegisterSet());
+
+ if (JitOptions.enableICFramePointers) {
+ baselineFrameReg_ = available.takeAny();
+ }
+
+ allocator.initAvailableRegs(available);
+ return true;
+}
+
+static void ResetEnteredCounts(const ICEntry* icEntry) {
+ ICStub* stub = icEntry->firstStub();
+ while (true) {
+ stub->resetEnteredCount();
+ if (stub->isFallback()) {
+ return;
+ }
+ stub = stub->toCacheIRStub()->next();
+ }
+}
+
+static ICStubSpace* StubSpaceForStub(bool makesGCCalls, JSScript* script,
+ ICScript* icScript) {
+ if (makesGCCalls) {
+ return icScript->jitScriptStubSpace();
+ }
+ return script->zone()->jitZone()->optimizedStubSpace();
+}
+
+static const uint32_t MaxFoldedShapes = 16;
+
+bool js::jit::TryFoldingStubs(JSContext* cx, ICFallbackStub* fallback,
+ JSScript* script, ICScript* icScript) {
+ ICEntry* icEntry = icScript->icEntryForStub(fallback);
+ ICStub* entryStub = icEntry->firstStub();
+
+ // Don't fold unless there are at least two stubs.
+ if (entryStub == fallback) {
+ return true;
+ }
+ ICCacheIRStub* firstStub = entryStub->toCacheIRStub();
+ if (firstStub->next()->isFallback()) {
+ return true;
+ }
+
+ const uint8_t* firstStubData = firstStub->stubDataStart();
+ const CacheIRStubInfo* stubInfo = firstStub->stubInfo();
+
+ // Check to see if:
+ // a) all of the stubs in this chain have the exact same code.
+ // b) all of the stubs have the same stub field data, except
+ // for a single GuardShape where they differ.
+ // c) at least one stub after the first has a non-zero entry count.
+ //
+ // If all of these conditions hold, then we generate a single stub
+ // that covers all the existing cases by replacing GuardShape with
+ // GuardMultipleShapes.
+
+ uint32_t numActive = 0;
+ Maybe<uint32_t> foldableFieldOffset;
+ RootedValue shape(cx);
+ RootedValueVector shapeList(cx);
+
+ auto addShape = [&shapeList, cx](uintptr_t rawShape) -> bool {
+ Shape* shape = reinterpret_cast<Shape*>(rawShape);
+ if (cx->compartment() != shape->compartment()) {
+ return false;
+ }
+ if (!shapeList.append(PrivateGCThingValue(shape))) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+ return true;
+ };
+
+ for (ICCacheIRStub* other = firstStub->nextCacheIR(); other;
+ other = other->nextCacheIR()) {
+ // Verify that the stubs share the same code.
+ if (other->stubInfo() != stubInfo) {
+ return true;
+ }
+ const uint8_t* otherStubData = other->stubDataStart();
+
+ if (other->enteredCount() > 0) {
+ numActive++;
+ }
+
+ uint32_t fieldIndex = 0;
+ size_t offset = 0;
+ while (stubInfo->fieldType(fieldIndex) != StubField::Type::Limit) {
+ StubField::Type fieldType = stubInfo->fieldType(fieldIndex);
+
+ if (StubField::sizeIsWord(fieldType)) {
+ uintptr_t firstRaw = stubInfo->getStubRawWord(firstStubData, offset);
+ uintptr_t otherRaw = stubInfo->getStubRawWord(otherStubData, offset);
+
+ if (firstRaw != otherRaw) {
+ if (fieldType != StubField::Type::Shape) {
+ // Case 1: a field differs that is not a Shape. We only support
+ // folding GuardShape to GuardMultipleShapes.
+ return true;
+ }
+ if (foldableFieldOffset.isNothing()) {
+ // Case 2: this is the first field where the stub data differs.
+ foldableFieldOffset.emplace(offset);
+ if (!addShape(firstRaw) || !addShape(otherRaw)) {
+ return true;
+ }
+ } else if (*foldableFieldOffset == offset) {
+ // Case 3: this is the corresponding offset in a different stub.
+ if (!addShape(otherRaw)) {
+ return true;
+ }
+ } else {
+ // Case 4: we have found more than one field that differs.
+ return true;
+ }
+ }
+ } else {
+ MOZ_ASSERT(StubField::sizeIsInt64(fieldType));
+
+ // We do not support folding any ops with int64-sized fields.
+ if (stubInfo->getStubRawInt64(firstStubData, offset) !=
+ stubInfo->getStubRawInt64(otherStubData, offset)) {
+ return true;
+ }
+ }
+
+ offset += StubField::sizeInBytes(fieldType);
+ fieldIndex++;
+ }
+
+ // We should never attach two completely identical stubs.
+ MOZ_ASSERT(foldableFieldOffset.isSome());
+ }
+
+ if (numActive == 0) {
+ return true;
+ }
+
+ // Clone the CacheIR, replacing GuardShape with GuardMultipleShapes.
+ CacheIRWriter writer(cx);
+ CacheIRReader reader(stubInfo);
+ CacheIRCloner cloner(firstStub);
+
+ // Initialize the operands.
+ CacheKind cacheKind = stubInfo->kind();
+ for (uint32_t i = 0; i < NumInputsForCacheKind(cacheKind); i++) {
+ writer.setInputOperandId(i);
+ }
+
+ bool success = false;
+ while (reader.more()) {
+ CacheOp op = reader.readOp();
+ switch (op) {
+ case CacheOp::GuardShape: {
+ ObjOperandId objId = reader.objOperandId();
+ uint32_t shapeOffset = reader.stubOffset();
+ if (shapeOffset == *foldableFieldOffset) {
+ // Ensure that the allocation of the ListObject doesn't trigger a GC
+ // and free the stubInfo we're currently reading. Note that
+ // AutoKeepJitScripts isn't sufficient, because optimized stubs can be
+ // discarded even if the JitScript is preserved.
+ gc::AutoSuppressGC suppressGC(cx);
+
+ Rooted<ListObject*> shapeObj(cx, ListObject::create(cx));
+ if (!shapeObj) {
+ return false;
+ }
+ for (uint32_t i = 0; i < shapeList.length(); i++) {
+ if (!shapeObj->append(cx, shapeList[i])) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+ }
+
+ writer.guardMultipleShapes(objId, shapeObj);
+ success = true;
+ } else {
+ Shape* shape = stubInfo->getStubField<Shape*>(firstStub, shapeOffset);
+ writer.guardShape(objId, shape);
+ }
+ break;
+ }
+ default:
+ cloner.cloneOp(op, reader, writer);
+ break;
+ }
+ }
+ if (!success) {
+ // If the shape field that differed was not part of a GuardShape,
+ // we can't fold these stubs together.
+ return true;
+ }
+
+ // Replace the existing stubs with the new folded stub.
+ fallback->discardStubs(cx, icEntry);
+
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx, writer, cacheKind, script, icScript, fallback, "StubFold");
+ if (result == ICAttachResult::OOM) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ MOZ_ASSERT(result == ICAttachResult::Attached);
+
+ fallback->setHasFoldedStub();
+ return true;
+}
+
+static bool AddToFoldedStub(JSContext* cx, const CacheIRWriter& writer,
+ ICScript* icScript, ICFallbackStub* fallback) {
+ ICEntry* icEntry = icScript->icEntryForStub(fallback);
+ ICStub* entryStub = icEntry->firstStub();
+
+ // We only update folded stubs if they're the only stub in the IC.
+ if (entryStub == fallback) {
+ return false;
+ }
+ ICCacheIRStub* stub = entryStub->toCacheIRStub();
+ if (!stub->next()->isFallback()) {
+ return false;
+ }
+
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ const uint8_t* stubData = stub->stubDataStart();
+
+ Maybe<uint32_t> shapeFieldOffset;
+ RootedValue newShape(cx);
+ Rooted<ListObject*> foldedShapes(cx);
+
+ CacheIRReader stubReader(stubInfo);
+ CacheIRReader newReader(writer);
+ while (newReader.more() && stubReader.more()) {
+ CacheOp newOp = newReader.readOp();
+ CacheOp stubOp = stubReader.readOp();
+ switch (stubOp) {
+ case CacheOp::GuardMultipleShapes: {
+ // Check that the new stub has a corresponding GuardShape.
+ if (newOp != CacheOp::GuardShape) {
+ return false;
+ }
+
+ // Check that the object being guarded is the same.
+ if (newReader.objOperandId() != stubReader.objOperandId()) {
+ return false;
+ }
+
+ // Check that the field offset is the same.
+ uint32_t newShapeOffset = newReader.stubOffset();
+ uint32_t stubShapesOffset = stubReader.stubOffset();
+ if (newShapeOffset != stubShapesOffset) {
+ return false;
+ }
+ MOZ_ASSERT(shapeFieldOffset.isNothing());
+ shapeFieldOffset.emplace(newShapeOffset);
+
+ // Get the shape from the new stub
+ StubField shapeField =
+ writer.readStubField(newShapeOffset, StubField::Type::Shape);
+ Shape* shape = reinterpret_cast<Shape*>(shapeField.asWord());
+ newShape = PrivateGCThingValue(shape);
+
+ // Get the shape array from the old stub.
+ JSObject* shapeList =
+ stubInfo->getStubField<JSObject*>(stub, stubShapesOffset);
+ foldedShapes = &shapeList->as<ListObject>();
+ MOZ_ASSERT(foldedShapes->compartment() == shape->compartment());
+ break;
+ }
+ default: {
+ // Check that the op is the same.
+ if (newOp != stubOp) {
+ return false;
+ }
+
+ // Check that the arguments are the same.
+ uint32_t argLength = CacheIROpInfos[size_t(newOp)].argLength;
+ for (uint32_t i = 0; i < argLength; i++) {
+ if (newReader.readByte() != stubReader.readByte()) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ MOZ_ASSERT(shapeFieldOffset.isSome());
+
+ // Check to verify that all the other stub fields are the same.
+ if (!writer.stubDataEqualsIgnoring(stubData, *shapeFieldOffset)) {
+ return false;
+ }
+
+ // Limit the maximum number of shapes we will add before giving up.
+ if (foldedShapes->length() == MaxFoldedShapes) {
+ return false;
+ }
+
+ if (!foldedShapes->append(cx, newShape)) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+
+ return true;
+}
+
+ICAttachResult js::jit::AttachBaselineCacheIRStub(
+ JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
+ JSScript* outerScript, ICScript* icScript, ICFallbackStub* stub,
+ const char* name) {
+ // We shouldn't GC or report OOM (or any other exception) here.
+ AutoAssertNoPendingException aanpe(cx);
+ JS::AutoCheckCannotGC nogc;
+
+ if (writer.tooLarge()) {
+ return ICAttachResult::TooLarge;
+ }
+ if (writer.oom()) {
+ return ICAttachResult::OOM;
+ }
+ MOZ_ASSERT(!writer.failed());
+
+ // Just a sanity check: the caller should ensure we don't attach an
+ // unlimited number of stubs.
+#ifdef DEBUG
+ static const size_t MaxOptimizedCacheIRStubs = 16;
+ MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
+#endif
+
+ constexpr uint32_t stubDataOffset = sizeof(ICCacheIRStub);
+ static_assert(stubDataOffset % sizeof(uint64_t) == 0,
+ "Stub fields must be aligned");
+
+ JitZone* jitZone = cx->zone()->jitZone();
+
+ // Check if we already have JitCode for this stub.
+ CacheIRStubInfo* stubInfo;
+ CacheIRStubKey::Lookup lookup(kind, ICStubEngine::Baseline,
+ writer.codeStart(), writer.codeLength());
+ JitCode* code = jitZone->getBaselineCacheIRStubCode(lookup, &stubInfo);
+ if (!code) {
+ // We have to generate stub code.
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jctx(cx);
+ BaselineCacheIRCompiler comp(cx, temp, writer, stubDataOffset);
+ if (!comp.init(kind)) {
+ return ICAttachResult::OOM;
+ }
+
+ code = comp.compile();
+ if (!code) {
+ return ICAttachResult::OOM;
+ }
+
+ comp.perfSpewer().saveProfile(code, name);
+
+ // Allocate the shared CacheIRStubInfo. Note that the
+ // putBaselineCacheIRStubCode call below will transfer ownership
+ // to the stub code HashMap, so we don't have to worry about freeing
+ // it below.
+ MOZ_ASSERT(!stubInfo);
+ stubInfo =
+ CacheIRStubInfo::New(kind, ICStubEngine::Baseline, comp.makesGCCalls(),
+ stubDataOffset, writer);
+ if (!stubInfo) {
+ return ICAttachResult::OOM;
+ }
+
+ CacheIRStubKey key(stubInfo);
+ if (!jitZone->putBaselineCacheIRStubCode(lookup, key, code)) {
+ return ICAttachResult::OOM;
+ }
+ }
+
+ MOZ_ASSERT(code);
+ MOZ_ASSERT(stubInfo);
+ MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
+
+ ICEntry* icEntry = icScript->icEntryForStub(stub);
+
+ // Ensure we don't attach duplicate stubs. This can happen if a stub failed
+ // for some reason and the IR generator doesn't check for exactly the same
+ // conditions.
+ for (ICStub* iter = icEntry->firstStub(); iter != stub;
+ iter = iter->toCacheIRStub()->next()) {
+ auto otherStub = iter->toCacheIRStub();
+ if (otherStub->stubInfo() != stubInfo) {
+ continue;
+ }
+ if (!writer.stubDataEquals(otherStub->stubDataStart())) {
+ continue;
+ }
+
+ // We found a stub that's exactly the same as the stub we're about to
+ // attach. Just return nullptr, the caller should do nothing in this
+ // case.
+ JitSpew(JitSpew_BaselineICFallback,
+ "Tried attaching identical stub for (%s:%u:%u)",
+ outerScript->filename(), outerScript->lineno(),
+ outerScript->column());
+ return ICAttachResult::DuplicateStub;
+ }
+
+ // Try including this case in an existing folded stub.
+ if (stub->hasFoldedStub() && AddToFoldedStub(cx, writer, icScript, stub)) {
+ // Instead of adding a new stub, we have added a new case to an
+ // existing folded stub. We do not have to invalidate Warp,
+ // because the ListObject that stores the cases is shared between
+ // baseline and Warp. Reset the entered count for the fallback
+ // stub so that we can still transpile, and reset the bailout
+ // counter if we have already been transpiled.
+ stub->resetEnteredCount();
+ JSScript* owningScript = nullptr;
+ if (cx->zone()->jitZone()->hasStubFoldingBailoutData(outerScript)) {
+ owningScript = cx->zone()->jitZone()->stubFoldingBailoutParent();
+ } else {
+ owningScript = icScript->isInlined()
+ ? icScript->inliningRoot()->owningScript()
+ : outerScript;
+ }
+ cx->zone()->jitZone()->clearStubFoldingBailoutData();
+ if (stub->usedByTranspiler() && owningScript->hasIonScript()) {
+ owningScript->ionScript()->resetNumFixableBailouts();
+ }
+ return ICAttachResult::Attached;
+ }
+
+ // Time to allocate and attach a new stub.
+
+ size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
+
+ ICStubSpace* stubSpace =
+ StubSpaceForStub(stubInfo->makesGCCalls(), outerScript, icScript);
+ void* newStubMem = stubSpace->alloc(bytesNeeded);
+ if (!newStubMem) {
+ return ICAttachResult::OOM;
+ }
+
+ // Resetting the entered counts on the IC chain makes subsequent reasoning
+ // about the chain much easier.
+ ResetEnteredCounts(icEntry);
+
+ switch (stub->trialInliningState()) {
+ case TrialInliningState::Initial:
+ case TrialInliningState::Candidate:
+ stub->setTrialInliningState(writer.trialInliningState());
+ break;
+ case TrialInliningState::MonomorphicInlined:
+ case TrialInliningState::Inlined:
+ stub->setTrialInliningState(TrialInliningState::Failure);
+ break;
+ case TrialInliningState::Failure:
+ break;
+ }
+
+ auto newStub = new (newStubMem) ICCacheIRStub(code, stubInfo);
+ writer.copyStubData(newStub->stubDataStart());
+ newStub->setTypeData(writer.typeData());
+ stub->addNewStub(icEntry, newStub);
+ return ICAttachResult::Attached;
+}
+
+uint8_t* ICCacheIRStub::stubDataStart() {
+ return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
+}
+
+bool BaselineCacheIRCompiler::emitCallStringObjectConcatResult(
+ ValOperandId lhsId, ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
+ ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.pushValue(rhs);
+ masm.pushValue(lhs);
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
+ callVM<Fn, DoConcatStringObject>(masm);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+// The value of argc entering the call IC is not always the value of
+// argc entering the callee. (For example, argc for a spread call IC
+// is always 1, but argc for the callee is the length of the array.)
+// In these cases, we update argc as part of the call op itself, to
+// avoid modifying input operands while it is still possible to fail a
+// guard. We also limit callee argc to a reasonable value to avoid
+// blowing the stack limit.
+bool BaselineCacheIRCompiler::updateArgc(CallFlags flags, Register argcReg,
+ Register scratch) {
+ CallFlags::ArgFormat format = flags.getArgFormat();
+ switch (format) {
+ case CallFlags::Standard:
+ // Standard calls have no extra guards, and argc is already correct.
+ return true;
+ case CallFlags::FunCall:
+ // fun_call has no extra guards, and argc will be corrected in
+ // pushFunCallArguments.
+ return true;
+ case CallFlags::FunApplyNullUndefined:
+ // argc must be 0 if null or undefined is passed as second argument to
+ // |apply|.
+ masm.move32(Imm32(0), argcReg);
+ return true;
+ default:
+ break;
+ }
+
+ // We need to guard the length of the arguments.
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load callee argc into scratch.
+ switch (flags.getArgFormat()) {
+ case CallFlags::Spread:
+ case CallFlags::FunApplyArray: {
+ // Load the length of the elements.
+ BaselineFrameSlot slot(flags.isConstructing());
+ masm.unboxObject(allocator.addressOf(masm, slot), scratch);
+ masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
+ masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
+ break;
+ }
+ case CallFlags::FunApplyArgsObj: {
+ // Load the arguments object length.
+ BaselineFrameSlot slot(0);
+ masm.unboxObject(allocator.addressOf(masm, slot), scratch);
+ masm.loadArgumentsObjectLength(scratch, scratch, failure->label());
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown arg format");
+ }
+
+ // Ensure that callee argc does not exceed the limit.
+ masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX),
+ failure->label());
+
+ // We're past the final guard. Update argc with the new value.
+ masm.move32(scratch, argcReg);
+
+ return true;
+}
+
+void BaselineCacheIRCompiler::pushArguments(Register argcReg,
+ Register calleeReg,
+ Register scratch, Register scratch2,
+ CallFlags flags, uint32_t argcFixed,
+ bool isJitCall) {
+ switch (flags.getArgFormat()) {
+ case CallFlags::Standard:
+ pushStandardArguments(argcReg, scratch, scratch2, argcFixed, isJitCall,
+ flags.isConstructing());
+ break;
+ case CallFlags::Spread:
+ pushArrayArguments(argcReg, scratch, scratch2, isJitCall,
+ flags.isConstructing());
+ break;
+ case CallFlags::FunCall:
+ pushFunCallArguments(argcReg, calleeReg, scratch, scratch2, argcFixed,
+ isJitCall);
+ break;
+ case CallFlags::FunApplyArgsObj:
+ pushFunApplyArgsObj(argcReg, calleeReg, scratch, scratch2, isJitCall);
+ break;
+ case CallFlags::FunApplyArray:
+ pushArrayArguments(argcReg, scratch, scratch2, isJitCall,
+ /*isConstructing =*/false);
+ break;
+ case CallFlags::FunApplyNullUndefined:
+ pushFunApplyNullUndefinedArguments(calleeReg, isJitCall);
+ break;
+ default:
+ MOZ_CRASH("Invalid arg format");
+ }
+}
+
+void BaselineCacheIRCompiler::pushStandardArguments(
+ Register argcReg, Register scratch, Register scratch2, uint32_t argcFixed,
+ bool isJitCall, bool isConstructing) {
+ MOZ_ASSERT(enteredStubFrame_);
+
+ // The arguments to the call IC are pushed on the stack left-to-right.
+ // Our calling conventions want them right-to-left in the callee, so
+ // we duplicate them on the stack in reverse order.
+
+ int additionalArgc = 1 + !isJitCall + isConstructing;
+ if (argcFixed < MaxUnrolledArgCopy) {
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, argcReg, Imm32(argcFixed), &ok);
+ masm.assumeUnreachable("Invalid argcFixed value");
+ masm.bind(&ok);
+#endif
+
+ size_t realArgc = argcFixed + additionalArgc;
+
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(realArgc, /*countIncludesThis = */ true);
+ }
+
+ for (size_t i = 0; i < realArgc; ++i) {
+ masm.pushValue(Address(
+ FramePointer, BaselineStubFrameLayout::Size() + i * sizeof(Value)));
+ }
+ } else {
+ MOZ_ASSERT(argcFixed == MaxUnrolledArgCopy);
+
+ // argPtr initially points to the last argument. Skip the stub frame.
+ Register argPtr = scratch2;
+ Address argAddress(FramePointer, BaselineStubFrameLayout::Size());
+ masm.computeEffectiveAddress(argAddress, argPtr);
+
+ // countReg contains the total number of arguments to copy.
+ // In addition to the actual arguments, we have to copy hidden arguments.
+ // We always have to copy |this|.
+ // If we are constructing, we have to copy |newTarget|.
+ // If we are not a jit call, we have to copy |callee|.
+ // We use a scratch register to avoid clobbering argc, which is an input
+ // reg.
+ Register countReg = scratch;
+ masm.move32(argcReg, countReg);
+ masm.add32(Imm32(additionalArgc), countReg);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(countReg, /*countIncludesThis = */ true);
+ }
+
+ // Push all values, starting at the last one.
+ Label loop, done;
+ masm.branchTest32(Assembler::Zero, countReg, countReg, &done);
+ masm.bind(&loop);
+ {
+ masm.pushValue(Address(argPtr, 0));
+ masm.addPtr(Imm32(sizeof(Value)), argPtr);
+
+ masm.branchSub32(Assembler::NonZero, Imm32(1), countReg, &loop);
+ }
+ masm.bind(&done);
+ }
+}
+
+void BaselineCacheIRCompiler::pushArrayArguments(Register argcReg,
+ Register scratch,
+ Register scratch2,
+ bool isJitCall,
+ bool isConstructing) {
+ MOZ_ASSERT(enteredStubFrame_);
+
+ // Pull the array off the stack before aligning.
+ Register startReg = scratch;
+ size_t arrayOffset =
+ (isConstructing * sizeof(Value)) + BaselineStubFrameLayout::Size();
+ masm.unboxObject(Address(FramePointer, arrayOffset), startReg);
+ masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ Register alignReg = argcReg;
+ if (isConstructing) {
+ // If we are constructing, we must take newTarget into account.
+ alignReg = scratch2;
+ masm.computeEffectiveAddress(Address(argcReg, 1), alignReg);
+ }
+ masm.alignJitStackBasedOnNArgs(alignReg, /*countIncludesThis =*/false);
+ }
+
+ // Push newTarget, if necessary
+ if (isConstructing) {
+ masm.pushValue(Address(FramePointer, BaselineStubFrameLayout::Size()));
+ }
+
+ // Push arguments: set up endReg to point to &array[argc]
+ Register endReg = scratch2;
+ BaseValueIndex endAddr(startReg, argcReg);
+ masm.computeEffectiveAddress(endAddr, endReg);
+
+ // Copying pre-decrements endReg by 8 until startReg is reached
+ Label copyDone;
+ Label copyStart;
+ masm.bind(&copyStart);
+ masm.branchPtr(Assembler::Equal, endReg, startReg, &copyDone);
+ masm.subPtr(Imm32(sizeof(Value)), endReg);
+ masm.pushValue(Address(endReg, 0));
+ masm.jump(&copyStart);
+ masm.bind(&copyDone);
+
+ // Push |this|.
+ size_t thisvOffset =
+ BaselineStubFrameLayout::Size() + (1 + isConstructing) * sizeof(Value);
+ masm.pushValue(Address(FramePointer, thisvOffset));
+
+ // Push |callee| if needed.
+ if (!isJitCall) {
+ size_t calleeOffset =
+ BaselineStubFrameLayout::Size() + (2 + isConstructing) * sizeof(Value);
+ masm.pushValue(Address(FramePointer, calleeOffset));
+ }
+}
+
+void BaselineCacheIRCompiler::pushFunApplyNullUndefinedArguments(
+ Register calleeReg, bool isJitCall) {
+ // argc is already set to 0, so we just have to push |this| and (for native
+ // calls) the callee.
+
+ MOZ_ASSERT(enteredStubFrame_);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis =*/false);
+ }
+
+ // Push |this|.
+ size_t thisvOffset = BaselineStubFrameLayout::Size() + 1 * sizeof(Value);
+ masm.pushValue(Address(FramePointer, thisvOffset));
+
+ // Push |callee| if needed.
+ if (!isJitCall) {
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+ }
+}
+
+void BaselineCacheIRCompiler::pushFunCallArguments(
+ Register argcReg, Register calleeReg, Register scratch, Register scratch2,
+ uint32_t argcFixed, bool isJitCall) {
+ if (argcFixed == 0) {
+ if (isJitCall) {
+ // Align the stack to 0 args.
+ masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
+ }
+
+ // Store the new |this|.
+ masm.pushValue(UndefinedValue());
+
+ // Store |callee| if needed.
+ if (!isJitCall) {
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+ }
+ } else if (argcFixed < MaxUnrolledArgCopy) {
+ // See below for why we subtract 1 from argcFixed.
+ argcFixed -= 1;
+ masm.sub32(Imm32(1), argcReg);
+ pushStandardArguments(argcReg, scratch, scratch2, argcFixed, isJitCall,
+ /*isConstructing =*/false);
+ } else {
+ Label zeroArgs, done;
+ masm.branchTest32(Assembler::Zero, argcReg, argcReg, &zeroArgs);
+
+ // When we call fun_call, the stack looks like the left column (note
+ // that newTarget will not be present, because fun_call cannot be a
+ // constructor call):
+ //
+ // ***Arguments to fun_call***
+ // callee (fun_call) ***Arguments to target***
+ // this (target function) -----> callee
+ // arg0 (this of target) -----> this
+ // arg1 (arg0 of target) -----> arg0
+ // argN (argN-1 of target) -----> arg1
+ //
+ // As demonstrated in the right column, this is exactly what we need
+ // the stack to look like when calling pushStandardArguments for target,
+ // except with one more argument. If we subtract 1 from argc,
+ // everything works out correctly.
+ masm.sub32(Imm32(1), argcReg);
+
+ pushStandardArguments(argcReg, scratch, scratch2, argcFixed, isJitCall,
+ /*isConstructing =*/false);
+
+ masm.jump(&done);
+ masm.bind(&zeroArgs);
+
+ // The exception is the case where argc == 0:
+ //
+ // ***Arguments to fun_call***
+ // callee (fun_call) ***Arguments to target***
+ // this (target function) -----> callee
+ // <nothing> -----> this
+ //
+ // In this case, we push |undefined| for |this|.
+
+ if (isJitCall) {
+ // Align the stack to 0 args.
+ masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
+ }
+
+ // Store the new |this|.
+ masm.pushValue(UndefinedValue());
+
+ // Store |callee| if needed.
+ if (!isJitCall) {
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+ }
+
+ masm.bind(&done);
+ }
+}
+
+void BaselineCacheIRCompiler::pushFunApplyArgsObj(Register argcReg,
+ Register calleeReg,
+ Register scratch,
+ Register scratch2,
+ bool isJitCall) {
+ MOZ_ASSERT(enteredStubFrame_);
+
+ // Load the arguments object off the stack before aligning.
+ Register argsReg = scratch;
+ masm.unboxObject(Address(FramePointer, BaselineStubFrameLayout::Size()),
+ argsReg);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(argcReg, /*countIncludesThis =*/false);
+ }
+
+ // Load ArgumentsData.
+ masm.loadPrivate(Address(argsReg, ArgumentsObject::getDataSlotOffset()),
+ argsReg);
+
+ // We push the arguments onto the stack last-to-first.
+ // Compute the bounds of the arguments array.
+ Register currReg = scratch2;
+ Address argsStartAddr(argsReg, ArgumentsData::offsetOfArgs());
+ masm.computeEffectiveAddress(argsStartAddr, argsReg);
+ BaseValueIndex argsEndAddr(argsReg, argcReg);
+ masm.computeEffectiveAddress(argsEndAddr, currReg);
+
+ // Loop until all arguments have been pushed.
+ Label done, loop;
+ masm.bind(&loop);
+ masm.branchPtr(Assembler::Equal, currReg, argsReg, &done);
+ masm.subPtr(Imm32(sizeof(Value)), currReg);
+
+ Address currArgAddr(currReg, 0);
+#ifdef DEBUG
+ // Arguments are forwarded to the call object if they are closed over.
+ // In this case, OVERRIDDEN_ELEMENTS_BIT should be set.
+ Label notForwarded;
+ masm.branchTestMagic(Assembler::NotEqual, currArgAddr, &notForwarded);
+ masm.assumeUnreachable("Should have checked for overridden elements");
+ masm.bind(&notForwarded);
+#endif
+ masm.pushValue(currArgAddr);
+
+ masm.jump(&loop);
+ masm.bind(&done);
+
+ // Push arg0 as |this| for call
+ masm.pushValue(
+ Address(FramePointer, BaselineStubFrameLayout::Size() + sizeof(Value)));
+
+ // Push |callee| if needed.
+ if (!isJitCall) {
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+ }
+}
+
+void BaselineCacheIRCompiler::pushBoundFunctionArguments(
+ Register argcReg, Register calleeReg, Register scratch, Register scratch2,
+ CallFlags flags, uint32_t numBoundArgs, bool isJitCall) {
+ bool isConstructing = flags.isConstructing();
+ uint32_t additionalArgc = 1 + isConstructing; // |this| and newTarget
+
+ // Calculate total number of Values to push.
+ Register countReg = scratch;
+ masm.computeEffectiveAddress(Address(argcReg, numBoundArgs + additionalArgc),
+ countReg);
+
+ // Align the stack such that the JitFrameLayout is aligned on the
+ // JitStackAlignment.
+ if (isJitCall) {
+ masm.alignJitStackBasedOnNArgs(countReg, /*countIncludesThis = */ true);
+ }
+
+ if (isConstructing) {
+ // Push the bound function's target as newTarget.
+ Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
+ masm.pushValue(boundTarget);
+ }
+
+ // Ensure argPtr initially points to the last argument. Skip the stub frame.
+ Register argPtr = scratch2;
+ Address argAddress(FramePointer, BaselineStubFrameLayout::Size());
+ if (isConstructing) {
+ // Skip newTarget.
+ argAddress.offset += sizeof(Value);
+ }
+ masm.computeEffectiveAddress(argAddress, argPtr);
+
+ // Push all supplied arguments, starting at the last one.
+ Label loop, done;
+ masm.branchTest32(Assembler::Zero, argcReg, argcReg, &done);
+ masm.move32(argcReg, countReg);
+ masm.bind(&loop);
+ {
+ masm.pushValue(Address(argPtr, 0));
+ masm.addPtr(Imm32(sizeof(Value)), argPtr);
+
+ masm.branchSub32(Assembler::NonZero, Imm32(1), countReg, &loop);
+ }
+ masm.bind(&done);
+
+ // Push the bound arguments, starting at the last one.
+ constexpr size_t inlineArgsOffset =
+ BoundFunctionObject::offsetOfFirstInlineBoundArg();
+ if (numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs) {
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ size_t argIndex = numBoundArgs - i - 1;
+ Address argAddr(calleeReg, inlineArgsOffset + argIndex * sizeof(Value));
+ masm.pushValue(argAddr);
+ }
+ } else {
+ masm.unboxObject(Address(calleeReg, inlineArgsOffset), scratch);
+ masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ size_t argIndex = numBoundArgs - i - 1;
+ Address argAddr(scratch, argIndex * sizeof(Value));
+ masm.pushValue(argAddr);
+ }
+ }
+
+ if (isConstructing) {
+ // Push the |this| Value. This is either the object we allocated or the
+ // JS_UNINITIALIZED_LEXICAL magic value. It's stored in the BaselineFrame,
+ // so skip past the stub frame, (unbound) arguments and newTarget.
+ BaseValueIndex thisAddress(FramePointer, argcReg,
+ BaselineStubFrameLayout::Size() + sizeof(Value));
+ masm.pushValue(thisAddress, scratch);
+ } else {
+ // Push the bound |this|.
+ Address boundThis(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
+ masm.pushValue(boundThis);
+ }
+}
+
+bool BaselineCacheIRCompiler::emitCallNativeShared(
+ NativeCallType callType, ObjOperandId calleeId, Int32OperandId argcId,
+ CallFlags flags, uint32_t argcFixed, Maybe<bool> ignoresReturnValue,
+ Maybe<uint32_t> targetOffset) {
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Register calleeReg = allocator.useRegister(masm, calleeId);
+ Register argcReg = allocator.useRegister(masm, argcId);
+
+ bool isConstructing = flags.isConstructing();
+ bool isSameRealm = flags.isSameRealm();
+
+ if (!updateArgc(flags, argcReg, scratch)) {
+ return false;
+ }
+
+ allocator.discardStack(masm);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ // Note that this leaves the return address in TailCallReg.
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ if (!isSameRealm) {
+ masm.switchToObjectRealm(calleeReg, scratch);
+ }
+
+ pushArguments(argcReg, calleeReg, scratch, scratch2, flags, argcFixed,
+ /*isJitCall =*/false);
+
+ // Native functions have the signature:
+ //
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ //
+ // Where vp[0] is space for callee/return value, vp[1] is |this|, and vp[2]
+ // onward are the function arguments.
+
+ // Initialize vp.
+ masm.moveStackPtrTo(scratch2.get());
+
+ // Construct a native exit frame.
+ masm.push(argcReg);
+
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.push(ICTailCallReg);
+ masm.push(FramePointer);
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrameForNative(scratch, scratch, isConstructing);
+
+ // Execute call.
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(argcReg);
+ masm.passABIArg(scratch2);
+
+ switch (callType) {
+ case NativeCallType::Native: {
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special
+ // swi instruction to handle them, so we store the redirected
+ // pointer in the stub and use that instead of the original one.
+ // (See CacheIRWriter::callNativeFunction.)
+ Address redirectedAddr(stubAddress(*targetOffset));
+ masm.callWithABI(redirectedAddr);
+#else
+ if (*ignoresReturnValue) {
+ masm.loadPrivate(
+ Address(calleeReg, JSFunction::offsetOfJitInfoOrScript()),
+ calleeReg);
+ masm.callWithABI(
+ Address(calleeReg, JSJitInfo::offsetOfIgnoresReturnValueNative()));
+ } else {
+ // This depends on the native function pointer being stored unchanged as
+ // a PrivateValue.
+ masm.callWithABI(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()));
+ }
+#endif
+ } break;
+ case NativeCallType::ClassHook: {
+ Address nativeAddr(stubAddress(*targetOffset));
+ masm.callWithABI(nativeAddr);
+ } break;
+ }
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the return value.
+ masm.loadValue(
+ Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
+ output.valueReg());
+
+ stubFrame.leave(masm);
+
+ if (!isSameRealm) {
+ masm.switchToBaselineFrameRealm(scratch2);
+ }
+
+ return true;
+}
+
+#ifdef JS_SIMULATOR
+bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ uint32_t targetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<bool> ignoresReturnValue;
+ Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
+ return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
+ argcFixed, ignoresReturnValue, targetOffset_);
+}
+
+bool BaselineCacheIRCompiler::emitCallDOMFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
+ CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<bool> ignoresReturnValue;
+ Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
+ return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
+ argcFixed, ignoresReturnValue, targetOffset_);
+}
+#else
+bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ bool ignoresReturnValue) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<bool> ignoresReturnValue_ = mozilla::Some(ignoresReturnValue);
+ Maybe<uint32_t> targetOffset;
+ return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
+ argcFixed, ignoresReturnValue_, targetOffset);
+}
+
+bool BaselineCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ ObjOperandId thisObjId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<bool> ignoresReturnValue = mozilla::Some(false);
+ Maybe<uint32_t> targetOffset;
+ return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
+ argcFixed, ignoresReturnValue, targetOffset);
+}
+#endif
+
+bool BaselineCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ uint32_t targetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<bool> ignoresReturnValue;
+ Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
+ return emitCallNativeShared(NativeCallType::ClassHook, calleeId, argcId,
+ flags, argcFixed, ignoresReturnValue,
+ targetOffset_);
+}
+
+// Helper function for loading call arguments from the stack. Loads
+// and unboxes an object from a specific slot.
+void BaselineCacheIRCompiler::loadStackObject(ArgumentKind kind,
+ CallFlags flags, Register argcReg,
+ Register dest) {
+ MOZ_ASSERT(enteredStubFrame_);
+
+ bool addArgc = false;
+ int32_t slotIndex = GetIndexOfArgument(kind, flags, &addArgc);
+
+ if (addArgc) {
+ int32_t slotOffset =
+ slotIndex * sizeof(JS::Value) + BaselineStubFrameLayout::Size();
+ BaseValueIndex slotAddr(FramePointer, argcReg, slotOffset);
+ masm.unboxObject(slotAddr, dest);
+ } else {
+ int32_t slotOffset =
+ slotIndex * sizeof(JS::Value) + BaselineStubFrameLayout::Size();
+ Address slotAddr(FramePointer, slotOffset);
+ masm.unboxObject(slotAddr, dest);
+ }
+}
+
+template <typename T>
+void BaselineCacheIRCompiler::storeThis(const T& newThis, Register argcReg,
+ CallFlags flags) {
+ switch (flags.getArgFormat()) {
+ case CallFlags::Standard: {
+ BaseValueIndex thisAddress(
+ FramePointer,
+ argcReg, // Arguments
+ 1 * sizeof(Value) + // NewTarget
+ BaselineStubFrameLayout::Size()); // Stub frame
+ masm.storeValue(newThis, thisAddress);
+ } break;
+ case CallFlags::Spread: {
+ Address thisAddress(FramePointer,
+ 2 * sizeof(Value) + // Arg array, NewTarget
+ BaselineStubFrameLayout::Size()); // Stub frame
+ masm.storeValue(newThis, thisAddress);
+ } break;
+ default:
+ MOZ_CRASH("Invalid arg format for scripted constructor");
+ }
+}
+
+/*
+ * Scripted constructors require a |this| object to be created prior to the
+ * call. When this function is called, the stack looks like (bottom->top):
+ *
+ * [..., Callee, ThisV, Arg0V, ..., ArgNV, NewTarget, StubFrameHeader]
+ *
+ * At this point, |ThisV| is JSWhyMagic::JS_IS_CONSTRUCTING.
+ *
+ * This function calls CreateThis to generate a new |this| object, then
+ * overwrites the magic ThisV on the stack.
+ */
+void BaselineCacheIRCompiler::createThis(Register argcReg, Register calleeReg,
+ Register scratch, CallFlags flags,
+ bool isBoundFunction) {
+ MOZ_ASSERT(flags.isConstructing());
+
+ if (flags.needsUninitializedThis()) {
+ storeThis(MagicValue(JS_UNINITIALIZED_LEXICAL), argcReg, flags);
+ return;
+ }
+
+ // Save live registers that don't have to be traced.
+ LiveGeneralRegisterSet liveNonGCRegs;
+ liveNonGCRegs.add(argcReg);
+ liveNonGCRegs.add(ICStubReg);
+ masm.PushRegsInMask(liveNonGCRegs);
+
+ // CreateThis takes two arguments: callee, and newTarget.
+
+ if (isBoundFunction) {
+ // Push the bound function's target as callee and newTarget.
+ Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
+ masm.unboxObject(boundTarget, scratch);
+ masm.push(scratch);
+ masm.push(scratch);
+ } else {
+ // Push newTarget:
+ loadStackObject(ArgumentKind::NewTarget, flags, argcReg, scratch);
+ masm.push(scratch);
+
+ // Push callee:
+ loadStackObject(ArgumentKind::Callee, flags, argcReg, scratch);
+ masm.push(scratch);
+ }
+
+ // Call CreateThisFromIC.
+ using Fn =
+ bool (*)(JSContext*, HandleObject, HandleObject, MutableHandleValue);
+ callVM<Fn, CreateThisFromIC>(masm);
+
+#ifdef DEBUG
+ Label createdThisOK;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &createdThisOK);
+ masm.branchTestMagic(Assembler::Equal, JSReturnOperand, &createdThisOK);
+ masm.assumeUnreachable(
+ "The return of CreateThis must be an object or uninitialized.");
+ masm.bind(&createdThisOK);
+#endif
+
+ // Restore saved registers.
+ masm.PopRegsInMask(liveNonGCRegs);
+
+ // Save |this| value back into pushed arguments on stack.
+ MOZ_ASSERT(!liveNonGCRegs.aliases(JSReturnOperand));
+ storeThis(JSReturnOperand, argcReg, flags);
+
+ // Restore calleeReg. CreateThisFromIC may trigger a GC, so we reload the
+ // callee from the stub frame (which is traced) instead of spilling it to
+ // the stack.
+ loadStackObject(ArgumentKind::Callee, flags, argcReg, calleeReg);
+}
+
+void BaselineCacheIRCompiler::updateReturnValue() {
+ Label skipThisReplace;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+
+ // If a constructor does not explicitly return an object, the return value
+ // of the constructor is |this|. We load it out of the baseline stub frame.
+
+ // At this point, the stack looks like this:
+ // newTarget
+ // ArgN
+ // ...
+ // Arg0
+ // ThisVal <---- We want this value.
+ // Callee token | Skip two stack slots.
+ // Frame descriptor v
+ // [Top of stack]
+ size_t thisvOffset =
+ JitFrameLayout::offsetOfThis() - JitFrameLayout::bytesPoppedAfterCall();
+ Address thisAddress(masm.getStackPointer(), thisvOffset);
+ masm.loadValue(thisAddress, JSReturnOperand);
+
+#ifdef DEBUG
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.assumeUnreachable("Return of constructing call should be an object.");
+#endif
+ masm.bind(&skipThisReplace);
+}
+
+bool BaselineCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Register calleeReg = allocator.useRegister(masm, calleeId);
+ Register argcReg = allocator.useRegister(masm, argcId);
+
+ bool isConstructing = flags.isConstructing();
+ bool isSameRealm = flags.isSameRealm();
+
+ if (!updateArgc(flags, argcReg, scratch)) {
+ return false;
+ }
+
+ allocator.discardStack(masm);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ if (!isSameRealm) {
+ masm.switchToObjectRealm(calleeReg, scratch);
+ }
+
+ if (isConstructing) {
+ createThis(argcReg, calleeReg, scratch, flags,
+ /* isBoundFunction = */ false);
+ }
+
+ pushArguments(argcReg, calleeReg, scratch, scratch2, flags, argcFixed,
+ /*isJitCall =*/true);
+
+ // Load the start of the target JitCode.
+ Register code = scratch2;
+ masm.loadJitCodeRaw(calleeReg, code);
+
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.PushCalleeToken(calleeReg, isConstructing);
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, argcReg, scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.loadFunctionArgCount(calleeReg, calleeReg);
+ masm.branch32(Assembler::AboveOrEqual, argcReg, calleeReg, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ TrampolinePtr argumentsRectifier =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifier();
+ masm.movePtr(argumentsRectifier, code);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ // If this is a constructing call, and the callee returns a non-object,
+ // replace it with the |this| object passed in.
+ if (isConstructing) {
+ updateReturnValue();
+ }
+
+ stubFrame.leave(masm);
+
+ if (!isSameRealm) {
+ masm.switchToBaselineFrameRealm(scratch2);
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallWasmFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
+ uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
+ return emitCallScriptedFunction(calleeId, argcId, flags, argcFixed);
+}
+
+bool BaselineCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ uint32_t icScriptOffset,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+ AutoScratchRegister codeReg(allocator, masm);
+
+ Register calleeReg = allocator.useRegister(masm, calleeId);
+ Register argcReg = allocator.useRegister(masm, argcId);
+
+ bool isConstructing = flags.isConstructing();
+ bool isSameRealm = flags.isSameRealm();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadBaselineJitCodeRaw(calleeReg, codeReg, failure->label());
+
+ if (!updateArgc(flags, argcReg, scratch)) {
+ return false;
+ }
+
+ allocator.discardStack(masm);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ if (!isSameRealm) {
+ masm.switchToObjectRealm(calleeReg, scratch);
+ }
+
+ Label baselineScriptDiscarded;
+ if (isConstructing) {
+ createThis(argcReg, calleeReg, scratch, flags,
+ /* isBoundFunction = */ false);
+
+ // CreateThisFromIC may trigger a GC and discard the BaselineScript.
+ // We have already called discardStack, so we can't use a FailurePath.
+ // Instead, we skip storing the ICScript in the JSContext and use a
+ // normal non-inlined call.
+ masm.loadBaselineJitCodeRaw(calleeReg, codeReg, &baselineScriptDiscarded);
+ }
+
+ // Store icScript in the context.
+ Address icScriptAddr(stubAddress(icScriptOffset));
+ masm.loadPtr(icScriptAddr, scratch);
+ masm.storeICScriptInJSContext(scratch);
+
+ if (isConstructing) {
+ Label skip;
+ masm.jump(&skip);
+ masm.bind(&baselineScriptDiscarded);
+ masm.loadJitCodeRaw(calleeReg, codeReg);
+ masm.bind(&skip);
+ }
+
+ pushArguments(argcReg, calleeReg, scratch, scratch2, flags, argcFixed,
+ /*isJitCall =*/true);
+
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.PushCalleeToken(calleeReg, isConstructing);
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, argcReg, scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.loadFunctionArgCount(calleeReg, calleeReg);
+ masm.branch32(Assembler::AboveOrEqual, argcReg, calleeReg, &noUnderflow);
+
+ // Call the trial-inlining arguments rectifier.
+ ArgumentsRectifierKind kind = ArgumentsRectifierKind::TrialInlining;
+ TrampolinePtr argumentsRectifier =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifier(kind);
+ masm.movePtr(argumentsRectifier, codeReg);
+
+ masm.bind(&noUnderflow);
+ masm.callJit(codeReg);
+
+ // If this is a constructing call, and the callee returns a non-object,
+ // replace it with the |this| object passed in.
+ if (isConstructing) {
+ updateReturnValue();
+ }
+
+ stubFrame.leave(masm);
+
+ if (!isSameRealm) {
+ masm.switchToBaselineFrameRealm(codeReg);
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallBoundScriptedFunction(
+ ObjOperandId calleeId, ObjOperandId targetId, Int32OperandId argcId,
+ CallFlags flags, uint32_t numBoundArgs) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Register calleeReg = allocator.useRegister(masm, calleeId);
+ Register argcReg = allocator.useRegister(masm, argcId);
+
+ bool isConstructing = flags.isConstructing();
+ bool isSameRealm = flags.isSameRealm();
+
+ allocator.discardStack(masm);
+
+ // Push a stub frame so that we can perform a non-tail call.
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
+
+ // If we're constructing, switch to the target's realm and create |this|. If
+ // we're not constructing, we switch to the target's realm after pushing the
+ // arguments and loading the target.
+ if (isConstructing) {
+ if (!isSameRealm) {
+ masm.unboxObject(boundTarget, scratch);
+ masm.switchToObjectRealm(scratch, scratch);
+ }
+ createThis(argcReg, calleeReg, scratch, flags,
+ /* isBoundFunction = */ true);
+ }
+
+ // Push all arguments, including |this|.
+ pushBoundFunctionArguments(argcReg, calleeReg, scratch, scratch2, flags,
+ numBoundArgs, /* isJitCall = */ true);
+
+ // Load the target JSFunction.
+ masm.unboxObject(boundTarget, calleeReg);
+
+ if (!isConstructing && !isSameRealm) {
+ masm.switchToObjectRealm(calleeReg, scratch);
+ }
+
+ // Update argc.
+ masm.add32(Imm32(numBoundArgs), argcReg);
+
+ // Load the start of the target JitCode.
+ Register code = scratch2;
+ masm.loadJitCodeRaw(calleeReg, code);
+
+ // Note that we use Push, not push, so that callJit will align the stack
+ // properly on ARM.
+ masm.PushCalleeToken(calleeReg, isConstructing);
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, argcReg, scratch);
+
+ // Handle arguments underflow.
+ Label noUnderflow;
+ masm.loadFunctionArgCount(calleeReg, calleeReg);
+ masm.branch32(Assembler::AboveOrEqual, argcReg, calleeReg, &noUnderflow);
+ {
+ // Call the arguments rectifier.
+ TrampolinePtr argumentsRectifier =
+ cx_->runtime()->jitRuntime()->getArgumentsRectifier();
+ masm.movePtr(argumentsRectifier, code);
+ }
+
+ masm.bind(&noUnderflow);
+ masm.callJit(code);
+
+ if (isConstructing) {
+ updateReturnValue();
+ }
+
+ stubFrame.leave(masm);
+
+ if (!isSameRealm) {
+ masm.switchToBaselineFrameRealm(scratch2);
+ }
+
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
+ MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
+ allocKind = ForegroundToBackgroundAllocKind(allocKind);
+
+ uint32_t slotCount = GetGCKindSlots(allocKind);
+ MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
+ uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister result(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister site(allocator, masm);
+ AutoScratchRegisterMaybeOutput shape(allocator, masm, output);
+
+ Address shapeAddr(stubAddress(shapeOffset));
+ masm.loadPtr(shapeAddr, shape);
+
+ Address siteAddr(stubAddress(siteOffset));
+ masm.loadPtr(siteAddr, site);
+
+ allocator.discardStack(masm);
+
+ Label done;
+ Label fail;
+
+ masm.createArrayWithFixedElements(result, shape, scratch, arrayLength,
+ arrayCapacity, allocKind, gc::Heap::Default,
+ &fail, AllocSiteInput(site));
+ masm.jump(&done);
+
+ {
+ masm.bind(&fail);
+
+ // We get here if the nursery is full (unlikely) but also for tenured
+ // allocations if the current arena is full and we need to allocate a new
+ // one (fairly common).
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(site);
+ masm.Push(Imm32(int32_t(allocKind)));
+ masm.Push(Imm32(arrayLength));
+
+ using Fn =
+ ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, gc::AllocSite*);
+ callVM<Fn, NewArrayObjectBaselineFallback>(masm);
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(result);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_OBJECT, result, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
+ uint32_t numDynamicSlots,
+ gc::AllocKind allocKind,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister obj(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister site(allocator, masm);
+ AutoScratchRegisterMaybeOutput shape(allocator, masm, output);
+
+ Address shapeAddr(stubAddress(shapeOffset));
+ masm.loadPtr(shapeAddr, shape);
+
+ Address siteAddr(stubAddress(siteOffset));
+ masm.loadPtr(siteAddr, site);
+
+ allocator.discardStack(masm);
+
+ Label done;
+ Label fail;
+
+ masm.createPlainGCObject(obj, shape, scratch, shape, numFixedSlots,
+ numDynamicSlots, allocKind, gc::Heap::Default, &fail,
+ AllocSiteInput(site));
+ masm.jump(&done);
+
+ {
+ masm.bind(&fail);
+
+ // We get here if the nursery is full (unlikely) but also for tenured
+ // allocations if the current arena is full and we need to allocate a new
+ // one (fairly common).
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ masm.Push(site);
+ masm.Push(Imm32(int32_t(allocKind)));
+ masm.loadPtr(shapeAddr, shape); // This might have been overwritten.
+ masm.Push(shape);
+
+ using Fn = JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind,
+ gc::AllocSite*);
+ callVM<Fn, NewPlainObjectBaselineFallback>(masm);
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(obj);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitBindFunctionResult(
+ ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch(allocator, masm);
+
+ Register target = allocator.useRegister(masm, targetId);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Push the arguments in reverse order.
+ for (uint32_t i = 0; i < argc; i++) {
+ Address argAddress(FramePointer,
+ BaselineStubFrameLayout::Size() + i * sizeof(Value));
+ masm.pushValue(argAddress);
+ }
+ masm.moveStackPtrTo(scratch.get());
+
+ masm.Push(ImmWord(0)); // nullptr for maybeBound
+ masm.Push(Imm32(argc));
+ masm.Push(scratch);
+ masm.Push(target);
+
+ using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
+ uint32_t, Handle<BoundFunctionObject*>);
+ callVM<Fn, BoundFunctionObject::functionBindImpl>(masm);
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(scratch);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitSpecializedBindFunctionResult(
+ ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Register target = allocator.useRegister(masm, targetId);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch2);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch1);
+
+ // Push the arguments in reverse order.
+ for (uint32_t i = 0; i < argc; i++) {
+ Address argAddress(FramePointer,
+ BaselineStubFrameLayout::Size() + i * sizeof(Value));
+ masm.pushValue(argAddress);
+ }
+ masm.moveStackPtrTo(scratch1.get());
+
+ masm.Push(scratch2);
+ masm.Push(Imm32(argc));
+ masm.Push(scratch1);
+ masm.Push(target);
+
+ using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
+ uint32_t, Handle<BoundFunctionObject*>);
+ callVM<Fn, BoundFunctionObject::functionBindSpecializedBaseline>(masm);
+
+ stubFrame.leave(masm);
+ masm.storeCallPointerResult(scratch1);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, output.valueReg());
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCloseIterScriptedResult(
+ ObjOperandId iterId, ObjOperandId calleeId, CompletionKind kind,
+ uint32_t calleeNargs) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register iter = allocator.useRegister(masm, iterId);
+ Register callee = allocator.useRegister(masm, calleeId);
+
+ AutoScratchRegister code(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+
+ masm.loadJitCodeRaw(callee, code);
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ // Call the return method.
+ masm.alignJitStackBasedOnNArgs(calleeNargs, /*countIncludesThis = */ false);
+ for (uint32_t i = 0; i < calleeNargs; i++) {
+ masm.pushValue(UndefinedValue());
+ }
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(iter)));
+ masm.Push(callee);
+ masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, /* argc = */ 0);
+
+ masm.callJit(code);
+
+ if (kind != CompletionKind::Throw) {
+ // Verify that the return value is an object.
+ Label success;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &success);
+
+ masm.Push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn)));
+ using Fn = bool (*)(JSContext*, CheckIsObjectKind);
+ callVM<Fn, ThrowCheckIsObject>(masm);
+
+ masm.bind(&success);
+ }
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+static void CallRegExpStub(MacroAssembler& masm, size_t jitRealmStubOffset,
+ Register temp, Label* vmCall) {
+ // Call cx->realm()->jitRealm()->regExpStub. We store a pointer to the RegExp
+ // stub in the IC stub to keep it alive, but we shouldn't use it if the stub
+ // has been discarded in the meantime (because we might have changed GC string
+ // pretenuring heuristics that affect behavior of the stub). This is uncommon
+ // but can happen if we discarded all JIT code but had some active (Baseline)
+ // scripts on the stack.
+ masm.loadJSContext(temp);
+ masm.loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
+ masm.loadPtr(Address(temp, Realm::offsetOfJitRealm()), temp);
+ masm.loadPtr(Address(temp, jitRealmStubOffset), temp);
+ masm.branchTestPtr(Assembler::Zero, temp, temp, vmCall);
+ masm.call(Address(temp, JitCode::offsetOfCode()));
+}
+
+// Used to move inputs to the registers expected by the RegExp stub.
+static void SetRegExpStubInputRegisters(MacroAssembler& masm,
+ Register* regexpSrc,
+ Register regexpDest, Register* inputSrc,
+ Register inputDest,
+ Register* lastIndexSrc,
+ Register lastIndexDest) {
+ MoveResolver& moves = masm.moveResolver();
+ if (*regexpSrc != regexpDest) {
+ masm.propagateOOM(moves.addMove(MoveOperand(*regexpSrc),
+ MoveOperand(regexpDest), MoveOp::GENERAL));
+ *regexpSrc = regexpDest;
+ }
+ if (*inputSrc != inputDest) {
+ masm.propagateOOM(moves.addMove(MoveOperand(*inputSrc),
+ MoveOperand(inputDest), MoveOp::GENERAL));
+ *inputSrc = inputDest;
+ }
+ if (lastIndexSrc && *lastIndexSrc != lastIndexDest) {
+ masm.propagateOOM(moves.addMove(MoveOperand(*lastIndexSrc),
+ MoveOperand(lastIndexDest), MoveOp::INT32));
+ *lastIndexSrc = lastIndexDest;
+ }
+
+ masm.propagateOOM(moves.resolve());
+
+ MoveEmitter emitter(masm);
+ emitter.emit(moves);
+ emitter.finish();
+}
+
+bool BaselineCacheIRCompiler::emitCallRegExpMatcherResult(
+ ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ Register input = allocator.useRegister(masm, inputId);
+ Register lastIndex = allocator.useRegister(masm, lastIndexId);
+ Register scratch = output.valueReg().scratchReg();
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ SetRegExpStubInputRegisters(masm, &regexp, RegExpMatcherRegExpReg, &input,
+ RegExpMatcherStringReg, &lastIndex,
+ RegExpMatcherLastIndexReg);
+
+ masm.reserveStack(RegExpReservedStack);
+
+ Label done, vmCall, vmCallNoMatches;
+ CallRegExpStub(masm, JitRealm::offsetOfRegExpMatcherStub(), scratch,
+ &vmCallNoMatches);
+ masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, &vmCall);
+
+ masm.jump(&done);
+
+ {
+ Label pushedMatches;
+ masm.bind(&vmCallNoMatches);
+ masm.push(ImmWord(0));
+ masm.jump(&pushedMatches);
+
+ masm.bind(&vmCall);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), scratch);
+ masm.Push(scratch);
+
+ masm.bind(&pushedMatches);
+ masm.Push(lastIndex);
+ masm.Push(input);
+ masm.Push(regexp);
+
+ using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
+ int32_t lastIndex, MatchPairs* pairs,
+ MutableHandleValue output);
+ callVM<Fn, RegExpMatcherRaw>(masm);
+ }
+
+ masm.bind(&done);
+
+ static_assert(R0 == JSReturnOperand);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitCallRegExpSearcherResult(
+ ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ Register input = allocator.useRegister(masm, inputId);
+ Register lastIndex = allocator.useRegister(masm, lastIndexId);
+ Register scratch = output.valueReg().scratchReg();
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ SetRegExpStubInputRegisters(masm, &regexp, RegExpSearcherRegExpReg, &input,
+ RegExpSearcherStringReg, &lastIndex,
+ RegExpSearcherLastIndexReg);
+ // Ensure `scratch` doesn't conflict with the stub's input registers.
+ scratch = ReturnReg;
+
+ masm.reserveStack(RegExpReservedStack);
+
+ Label done, vmCall, vmCallNoMatches;
+ CallRegExpStub(masm, JitRealm::offsetOfRegExpSearcherStub(), scratch,
+ &vmCallNoMatches);
+ masm.branch32(Assembler::Equal, scratch, Imm32(RegExpSearcherResultFailed),
+ &vmCall);
+
+ masm.jump(&done);
+
+ {
+ Label pushedMatches;
+ masm.bind(&vmCallNoMatches);
+ masm.push(ImmWord(0));
+ masm.jump(&pushedMatches);
+
+ masm.bind(&vmCall);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), scratch);
+ masm.Push(scratch);
+
+ masm.bind(&pushedMatches);
+ masm.Push(lastIndex);
+ masm.Push(input);
+ masm.Push(regexp);
+
+ using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
+ int32_t lastIndex, MatchPairs* pairs, int32_t* result);
+ callVM<Fn, RegExpSearcherRaw>(masm);
+ }
+
+ masm.bind(&done);
+
+ masm.tagValue(JSVAL_TYPE_INT32, ReturnReg, output.valueReg());
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ Register input = allocator.useRegister(masm, inputId);
+ Register scratch = output.valueReg().scratchReg();
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ SetRegExpStubInputRegisters(masm, &regexp, RegExpMatcherRegExpReg, &input,
+ RegExpMatcherStringReg, nullptr, InvalidReg);
+
+ masm.reserveStack(RegExpReservedStack);
+
+ Label done, vmCall, vmCallNoMatches;
+ CallRegExpStub(masm, JitRealm::offsetOfRegExpExecMatchStub(), scratch,
+ &vmCallNoMatches);
+ masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, &vmCall);
+
+ masm.jump(&done);
+
+ {
+ Label pushedMatches;
+ masm.bind(&vmCallNoMatches);
+ masm.push(ImmWord(0));
+ masm.jump(&pushedMatches);
+
+ masm.bind(&vmCall);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), scratch);
+ masm.Push(scratch);
+
+ masm.bind(&pushedMatches);
+ masm.Push(input);
+ masm.Push(regexp);
+
+ using Fn =
+ bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
+ MatchPairs* pairs, MutableHandleValue output);
+ callVM<Fn, RegExpBuiltinExecMatchFromJit>(masm);
+ }
+
+ masm.bind(&done);
+
+ static_assert(R0 == JSReturnOperand);
+
+ stubFrame.leave(masm);
+ return true;
+}
+
+bool BaselineCacheIRCompiler::emitRegExpBuiltinExecTestResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ Register input = allocator.useRegister(masm, inputId);
+ Register scratch = output.valueReg().scratchReg();
+
+ allocator.discardStack(masm);
+
+ AutoStubFrame stubFrame(*this);
+ stubFrame.enter(masm, scratch);
+
+ SetRegExpStubInputRegisters(masm, &regexp, RegExpExecTestRegExpReg, &input,
+ RegExpExecTestStringReg, nullptr, InvalidReg);
+ // Ensure `scratch` doesn't conflict with the stub's input registers.
+ scratch = ReturnReg;
+
+ Label done, vmCall;
+ CallRegExpStub(masm, JitRealm::offsetOfRegExpExecTestStub(), scratch,
+ &vmCall);
+ masm.branch32(Assembler::Equal, scratch, Imm32(RegExpExecTestResultFailed),
+ &vmCall);
+
+ masm.jump(&done);
+
+ {
+ masm.bind(&vmCall);
+
+ masm.Push(input);
+ masm.Push(regexp);
+
+ using Fn = bool (*)(JSContext*, Handle<RegExpObject*> regexp,
+ HandleString input, bool* result);
+ callVM<Fn, RegExpBuiltinExecTestFromJit>(masm);
+ }
+
+ masm.bind(&done);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
+
+ stubFrame.leave(masm);
+ return true;
+}
diff --git a/js/src/jit/BaselineCacheIRCompiler.h b/js/src/jit/BaselineCacheIRCompiler.h
new file mode 100644
index 0000000000..91d1aa55f0
--- /dev/null
+++ b/js/src/jit/BaselineCacheIRCompiler.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineCacheIRCompiler_h
+#define jit_BaselineCacheIRCompiler_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIROpsGenerated.h"
+#include "jit/CacheIRReader.h"
+
+struct JS_PUBLIC_API JSContext;
+
+class JSScript;
+
+namespace js {
+namespace jit {
+
+class CacheIRWriter;
+class ICFallbackStub;
+class ICScript;
+class JitCode;
+class Label;
+class MacroAssembler;
+
+struct Address;
+struct Register;
+
+enum class ICAttachResult { Attached, DuplicateStub, TooLarge, OOM };
+
+bool TryFoldingStubs(JSContext* cx, ICFallbackStub* fallback, JSScript* script,
+ ICScript* icScript);
+
+ICAttachResult AttachBaselineCacheIRStub(JSContext* cx,
+ const CacheIRWriter& writer,
+ CacheKind kind, JSScript* outerScript,
+ ICScript* icScript,
+ ICFallbackStub* stub,
+ const char* name);
+
+// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
+class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler {
+ bool makesGCCalls_;
+ Register baselineFrameReg_ = FramePointer;
+
+ // This register points to the baseline frame of the caller. It should only
+ // be used before we enter a stub frame. This is normally the frame pointer
+ // register, but with --enable-ic-frame-pointers we have to allocate a
+ // separate register.
+ inline Register baselineFrameReg() {
+ MOZ_ASSERT(!enteredStubFrame_);
+ return baselineFrameReg_;
+ }
+
+ [[nodiscard]] bool emitStoreSlotShared(bool isFixed, ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId);
+ [[nodiscard]] bool emitAddAndStoreSlotShared(
+ CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, mozilla::Maybe<uint32_t> numNewSlotsOffset);
+
+ bool updateArgc(CallFlags flags, Register argcReg, Register scratch);
+ void loadStackObject(ArgumentKind kind, CallFlags flags, Register argcReg,
+ Register dest);
+ void pushArguments(Register argcReg, Register calleeReg, Register scratch,
+ Register scratch2, CallFlags flags, uint32_t argcFixed,
+ bool isJitCall);
+ void pushStandardArguments(Register argcReg, Register scratch,
+ Register scratch2, uint32_t argcFixed,
+ bool isJitCall, bool isConstructing);
+ void pushArrayArguments(Register argcReg, Register scratch, Register scratch2,
+ bool isJitCall, bool isConstructing);
+ void pushFunCallArguments(Register argcReg, Register calleeReg,
+ Register scratch, Register scratch2,
+ uint32_t argcFixed, bool isJitCall);
+ void pushFunApplyArgsObj(Register argcReg, Register calleeReg,
+ Register scratch, Register scratch2, bool isJitCall);
+ void pushFunApplyNullUndefinedArguments(Register calleeReg, bool isJitCall);
+ void pushBoundFunctionArguments(Register argcReg, Register calleeReg,
+ Register scratch, Register scratch2,
+ CallFlags flags, uint32_t numBoundArgs,
+ bool isJitCall);
+ void createThis(Register argcReg, Register calleeReg, Register scratch,
+ CallFlags flags, bool isBoundFunction);
+ template <typename T>
+ void storeThis(const T& newThis, Register argcReg, CallFlags flags);
+ void updateReturnValue();
+
+ enum class NativeCallType { Native, ClassHook };
+ bool emitCallNativeShared(NativeCallType callType, ObjOperandId calleeId,
+ Int32OperandId argcId, CallFlags flags,
+ uint32_t argcFixed,
+ mozilla::Maybe<bool> ignoresReturnValue,
+ mozilla::Maybe<uint32_t> targetOffset);
+
+ enum class StringCode { CodeUnit, CodePoint };
+ bool emitStringFromCodeResult(Int32OperandId codeId, StringCode stringCode);
+
+ void emitAtomizeString(Register str, Register temp, Label* failure);
+
+ bool emitCallScriptedGetterShared(ValOperandId receiverId,
+ uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset,
+ mozilla::Maybe<uint32_t> icScriptOffset);
+ bool emitCallScriptedSetterShared(ObjOperandId receiverId,
+ uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset,
+ mozilla::Maybe<uint32_t> icScriptOffset);
+
+ BaselineICPerfSpewer perfSpewer_;
+
+ public:
+ BaselineICPerfSpewer& perfSpewer() { return perfSpewer_; }
+
+ friend class AutoStubFrame;
+
+ BaselineCacheIRCompiler(JSContext* cx, TempAllocator& alloc,
+ const CacheIRWriter& writer, uint32_t stubDataOffset);
+
+ [[nodiscard]] bool init(CacheKind kind);
+
+ template <typename Fn, Fn fn>
+ void callVM(MacroAssembler& masm);
+
+ JitCode* compile();
+
+ bool makesGCCalls() const;
+
+ Address stubAddress(uint32_t offset) const;
+
+ private:
+ CACHE_IR_COMPILER_UNSHARED_GENERATED
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineCacheIRCompiler_h */
diff --git a/js/src/jit/BaselineCodeGen.cpp b/js/src/jit/BaselineCodeGen.cpp
new file mode 100644
index 0000000000..f88a026074
--- /dev/null
+++ b/js/src/jit/BaselineCodeGen.cpp
@@ -0,0 +1,6897 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCodeGen.h"
+
+#include "mozilla/Casting.h"
+
+#include "gc/GC.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRGenerator.h"
+#include "jit/CalleeToken.h"
+#include "jit/FixedList.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/PerfSpewer.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/TemplateObject.h"
+#include "jit/TrialInlining.h"
+#include "jit/VMFunctions.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/UniquePtr.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BuiltinObjectKind.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/Interpreter.h"
+#include "vm/JSFunction.h"
+#include "vm/Time.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "debugger/DebugAPI-inl.h"
+#include "jit/BaselineFrameInfo-inl.h"
+#include "jit/JitHints-inl.h"
+#include "jit/JitScript-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/SharedICHelpers-inl.h"
+#include "jit/TemplateObject-inl.h"
+#include "jit/VMFunctionList-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::TraceKind;
+
+using mozilla::AssertedCast;
+using mozilla::Maybe;
+
+namespace js {
+
+class PlainObject;
+
+namespace jit {
+
+BaselineCompilerHandler::BaselineCompilerHandler(JSContext* cx,
+ MacroAssembler& masm,
+ TempAllocator& alloc,
+ JSScript* script)
+ : frame_(script, masm),
+ alloc_(alloc),
+ analysis_(alloc, script),
+#ifdef DEBUG
+ masm_(masm),
+#endif
+ script_(script),
+ pc_(script->code()),
+ icEntryIndex_(0),
+ compileDebugInstrumentation_(script->isDebuggee()),
+ ionCompileable_(IsIonEnabled(cx) && CanIonCompileScript(cx, script)) {
+}
+
+BaselineInterpreterHandler::BaselineInterpreterHandler(JSContext* cx,
+ MacroAssembler& masm)
+ : frame_(masm) {}
+
+template <typename Handler>
+template <typename... HandlerArgs>
+BaselineCodeGen<Handler>::BaselineCodeGen(JSContext* cx, TempAllocator& alloc,
+ HandlerArgs&&... args)
+ : handler(cx, masm, std::forward<HandlerArgs>(args)...),
+ cx(cx),
+ masm(cx, alloc),
+ frame(handler.frame()) {}
+
+BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCodeGen(cx, alloc, /* HandlerArgs = */ alloc, script),
+ profilerPushToggleOffset_() {
+#ifdef JS_CODEGEN_NONE
+ MOZ_CRASH();
+#endif
+}
+
+BaselineInterpreterGenerator::BaselineInterpreterGenerator(JSContext* cx,
+ TempAllocator& alloc)
+ : BaselineCodeGen(cx, alloc /* no handlerArgs */) {}
+
+bool BaselineCompilerHandler::init(JSContext* cx) {
+ if (!analysis_.init(alloc_)) {
+ return false;
+ }
+
+ uint32_t len = script_->length();
+
+ if (!labels_.init(alloc_, len)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < len; i++) {
+ new (&labels_[i]) Label();
+ }
+
+ if (!frame_.init(alloc_)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaselineCompiler::init() {
+ if (!handler.init(cx)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaselineCompilerHandler::recordCallRetAddr(JSContext* cx,
+ RetAddrEntry::Kind kind,
+ uint32_t retOffset) {
+ uint32_t pcOffset = script_->pcToOffset(pc_);
+
+ // Entries must be sorted by pcOffset for binary search to work.
+ // See BaselineScript::retAddrEntryFromPCOffset.
+ MOZ_ASSERT_IF(!retAddrEntries_.empty(),
+ retAddrEntries_.back().pcOffset() <= pcOffset);
+
+ // Similarly, entries must be sorted by return offset and this offset must be
+ // unique. See BaselineScript::retAddrEntryFromReturnOffset.
+ MOZ_ASSERT_IF(!retAddrEntries_.empty() && !masm_.oom(),
+ retAddrEntries_.back().returnOffset().offset() < retOffset);
+
+ if (!retAddrEntries_.emplaceBack(pcOffset, kind, CodeOffset(retOffset))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool BaselineInterpreterHandler::recordCallRetAddr(JSContext* cx,
+ RetAddrEntry::Kind kind,
+ uint32_t retOffset) {
+ switch (kind) {
+ case RetAddrEntry::Kind::DebugPrologue:
+ MOZ_ASSERT(callVMOffsets_.debugPrologueOffset == 0,
+ "expected single DebugPrologue call");
+ callVMOffsets_.debugPrologueOffset = retOffset;
+ break;
+ case RetAddrEntry::Kind::DebugEpilogue:
+ MOZ_ASSERT(callVMOffsets_.debugEpilogueOffset == 0,
+ "expected single DebugEpilogue call");
+ callVMOffsets_.debugEpilogueOffset = retOffset;
+ break;
+ case RetAddrEntry::Kind::DebugAfterYield:
+ MOZ_ASSERT(callVMOffsets_.debugAfterYieldOffset == 0,
+ "expected single DebugAfterYield call");
+ callVMOffsets_.debugAfterYieldOffset = retOffset;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool BaselineInterpreterHandler::addDebugInstrumentationOffset(
+ JSContext* cx, CodeOffset offset) {
+ if (!debugInstrumentationOffsets_.append(offset.offset())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+}
+
+MethodStatus BaselineCompiler::compile() {
+ AutoCreatedBy acb(masm, "BaselineCompiler::compile");
+
+ Rooted<JSScript*> script(cx, handler.script());
+ JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%u:%u (%p)",
+ script->filename(), script->lineno(), script->column(), script.get());
+
+ JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%u:%u",
+ script->filename(), script->lineno(), script->column());
+
+ AutoIncrementalTimer timer(cx->realm()->timers.baselineCompileTime);
+
+ AutoKeepJitScripts keepJitScript(cx);
+ if (!script->ensureHasJitScript(cx, keepJitScript)) {
+ return Method_Error;
+ }
+
+ // When code coverage is enabled, we have to create the ScriptCounts if they
+ // do not exist.
+ if (!script->hasScriptCounts() && cx->realm()->collectCoverageForDebug()) {
+ if (!script->initScriptCounts(cx)) {
+ return Method_Error;
+ }
+ }
+
+ if (!JitOptions.disableJitHints &&
+ cx->runtime()->jitRuntime()->hasJitHintsMap()) {
+ JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
+ jitHints->setEagerBaselineHint(script);
+ }
+
+ // Suppress GC during compilation.
+ gc::AutoSuppressGC suppressGC(cx);
+
+ if (!script->jitScript()->ensureHasCachedBaselineJitData(cx, script)) {
+ return Method_Error;
+ }
+
+ MOZ_ASSERT(!script->hasBaselineScript());
+
+ perfSpewer_.recordOffset(masm, "Prologue");
+ if (!emitPrologue()) {
+ return Method_Error;
+ }
+
+ MethodStatus status = emitBody();
+ if (status != Method_Compiled) {
+ return status;
+ }
+
+ perfSpewer_.recordOffset(masm, "Epilogue");
+ if (!emitEpilogue()) {
+ return Method_Error;
+ }
+
+ perfSpewer_.recordOffset(masm, "OOLPostBarrierSlot");
+ if (!emitOutOfLinePostBarrierSlot()) {
+ return Method_Error;
+ }
+
+ AutoCreatedBy acb2(masm, "exception_tail");
+ Linker linker(masm);
+ if (masm.oom()) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ JitCode* code = linker.newCode(cx, CodeKind::Baseline);
+ if (!code) {
+ return Method_Error;
+ }
+
+ UniquePtr<BaselineScript> baselineScript(
+ BaselineScript::New(
+ cx, warmUpCheckPrologueOffset_.offset(),
+ profilerEnterFrameToggleOffset_.offset(),
+ profilerExitFrameToggleOffset_.offset(),
+ handler.retAddrEntries().length(), handler.osrEntries().length(),
+ debugTrapEntries_.length(), script->resumeOffsets().size()),
+ JS::DeletePolicy<BaselineScript>(cx->runtime()));
+ if (!baselineScript) {
+ return Method_Error;
+ }
+
+ baselineScript->setMethod(code);
+
+ JitSpew(JitSpew_BaselineScripts,
+ "Created BaselineScript %p (raw %p) for %s:%u:%u",
+ (void*)baselineScript.get(), (void*)code->raw(), script->filename(),
+ script->lineno(), script->column());
+
+ baselineScript->copyRetAddrEntries(handler.retAddrEntries().begin());
+ baselineScript->copyOSREntries(handler.osrEntries().begin());
+ baselineScript->copyDebugTrapEntries(debugTrapEntries_.begin());
+
+ // If profiler instrumentation is enabled, toggle instrumentation on.
+ if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ baselineScript->toggleProfilerInstrumentation(true);
+ }
+
+ // Compute native resume addresses for the script's resume offsets.
+ baselineScript->computeResumeNativeOffsets(script, resumeOffsetEntries_);
+
+ if (compileDebugInstrumentation()) {
+ baselineScript->setHasDebugInstrumentation();
+ }
+
+ // Always register a native => bytecode mapping entry, since profiler can be
+ // turned on with baseline jitcode on stack, and baseline jitcode cannot be
+ // invalidated.
+ {
+ JitSpew(JitSpew_Profiling,
+ "Added JitcodeGlobalEntry for baseline script %s:%u:%u (%p)",
+ script->filename(), script->lineno(), script->column(),
+ baselineScript.get());
+
+ // Generate profiling string.
+ UniqueChars str = GeckoProfilerRuntime::allocProfileString(cx, script);
+ if (!str) {
+ return Method_Error;
+ }
+
+ auto entry = MakeJitcodeGlobalEntry<BaselineEntry>(
+ cx, code, code->raw(), code->rawEnd(), script, std::move(str));
+ if (!entry) {
+ return Method_Error;
+ }
+
+ JitcodeGlobalTable* globalTable =
+ cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(std::move(entry))) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ }
+
+ script->jitScript()->setBaselineScript(script, baselineScript.release());
+
+ perfSpewer_.saveProfile(cx, script, code);
+
+#ifdef MOZ_VTUNE
+ vtune::MarkScript(code, script, "baseline");
+#endif
+
+ return Method_Compiled;
+}
+
+// On most platforms we use a dedicated bytecode PC register to avoid many
+// dependent loads and stores for sequences of simple bytecode ops. This
+// register must be saved/restored around VM and IC calls.
+//
+// On 32-bit x86 we don't have enough registers for this (because R0-R2 require
+// 6 registers) so there we always store the pc on the frame.
+static constexpr bool HasInterpreterPCReg() {
+ return InterpreterPCReg != InvalidReg;
+}
+
+static Register LoadBytecodePC(MacroAssembler& masm, Register scratch) {
+ if (HasInterpreterPCReg()) {
+ return InterpreterPCReg;
+ }
+
+ Address pcAddr(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
+ masm.loadPtr(pcAddr, scratch);
+ return scratch;
+}
+
+static void LoadInt8Operand(MacroAssembler& masm, Register dest) {
+ Register pc = LoadBytecodePC(masm, dest);
+ masm.load8SignExtend(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+static void LoadUint8Operand(MacroAssembler& masm, Register dest) {
+ Register pc = LoadBytecodePC(masm, dest);
+ masm.load8ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+static void LoadUint16Operand(MacroAssembler& masm, Register dest) {
+ Register pc = LoadBytecodePC(masm, dest);
+ masm.load16ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+static void LoadInt32Operand(MacroAssembler& masm, Register dest) {
+ Register pc = LoadBytecodePC(masm, dest);
+ masm.load32(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+static void LoadInt32OperandSignExtendToPtr(MacroAssembler& masm, Register pc,
+ Register dest) {
+ masm.load32SignExtendToPtr(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+static void LoadUint24Operand(MacroAssembler& masm, size_t offset,
+ Register dest) {
+ // Load the opcode and operand, then left shift to discard the opcode.
+ Register pc = LoadBytecodePC(masm, dest);
+ masm.load32(Address(pc, offset), dest);
+ masm.rshift32(Imm32(8), dest);
+}
+
+static void LoadInlineValueOperand(MacroAssembler& masm, ValueOperand dest) {
+ // Note: the Value might be unaligned but as above we rely on all our
+ // platforms having appropriate support for unaligned accesses (except for
+ // floating point instructions on ARM).
+ Register pc = LoadBytecodePC(masm, dest.scratchReg());
+ masm.loadUnalignedValue(Address(pc, sizeof(jsbytecode)), dest);
+}
+
+template <>
+void BaselineCompilerCodeGen::loadScript(Register dest) {
+ masm.movePtr(ImmGCPtr(handler.script()), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadScript(Register dest) {
+ masm.loadPtr(frame.addressOfInterpreterScript(), dest);
+}
+
+template <>
+void BaselineCompilerCodeGen::saveInterpreterPCReg() {}
+
+template <>
+void BaselineInterpreterCodeGen::saveInterpreterPCReg() {
+ if (HasInterpreterPCReg()) {
+ masm.storePtr(InterpreterPCReg, frame.addressOfInterpreterPC());
+ }
+}
+
+template <>
+void BaselineCompilerCodeGen::restoreInterpreterPCReg() {}
+
+template <>
+void BaselineInterpreterCodeGen::restoreInterpreterPCReg() {
+ if (HasInterpreterPCReg()) {
+ masm.loadPtr(frame.addressOfInterpreterPC(), InterpreterPCReg);
+ }
+}
+
+template <>
+void BaselineCompilerCodeGen::emitInitializeLocals() {
+ // Initialize all locals to |undefined|. Lexical bindings are temporal
+ // dead zoned in bytecode.
+
+ size_t n = frame.nlocals();
+ if (n == 0) {
+ return;
+ }
+
+ // Use R0 to minimize code size. If the number of locals to push is <
+ // LOOP_UNROLL_FACTOR, then the initialization pushes are emitted directly
+ // and inline. Otherwise, they're emitted in a partially unrolled loop.
+ static const size_t LOOP_UNROLL_FACTOR = 4;
+ size_t toPushExtra = n % LOOP_UNROLL_FACTOR;
+
+ masm.moveValue(UndefinedValue(), R0);
+
+ // Handle any extra pushes left over by the optional unrolled loop below.
+ for (size_t i = 0; i < toPushExtra; i++) {
+ masm.pushValue(R0);
+ }
+
+ // Partially unrolled loop of pushes.
+ if (n >= LOOP_UNROLL_FACTOR) {
+ size_t toPush = n - toPushExtra;
+ MOZ_ASSERT(toPush % LOOP_UNROLL_FACTOR == 0);
+ MOZ_ASSERT(toPush >= LOOP_UNROLL_FACTOR);
+ masm.move32(Imm32(toPush), R1.scratchReg());
+ // Emit unrolled loop with 4 pushes per iteration.
+ Label pushLoop;
+ masm.bind(&pushLoop);
+ for (size_t i = 0; i < LOOP_UNROLL_FACTOR; i++) {
+ masm.pushValue(R0);
+ }
+ masm.branchSub32(Assembler::NonZero, Imm32(LOOP_UNROLL_FACTOR),
+ R1.scratchReg(), &pushLoop);
+ }
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitInitializeLocals() {
+ // Push |undefined| for all locals.
+
+ Register scratch = R0.scratchReg();
+ loadScript(scratch);
+ masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
+ masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
+ scratch);
+ masm.load32(Address(scratch, ImmutableScriptData::offsetOfNfixed()), scratch);
+
+ Label top, done;
+ masm.branchTest32(Assembler::Zero, scratch, scratch, &done);
+ masm.bind(&top);
+ {
+ masm.pushValue(UndefinedValue());
+ masm.branchSub32(Assembler::NonZero, Imm32(1), scratch, &top);
+ }
+ masm.bind(&done);
+}
+
+// On input:
+// R2.scratchReg() contains object being written to.
+// Called with the baseline stack synced, except for R0 which is preserved.
+// All other registers are usable as scratch.
+// This calls:
+// void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot() {
+ AutoCreatedBy acb(masm,
+ "BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot");
+
+ if (!postBarrierSlot_.used()) {
+ return true;
+ }
+
+ masm.bind(&postBarrierSlot_);
+
+ saveInterpreterPCReg();
+
+ Register objReg = R2.scratchReg();
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(R0);
+ regs.take(objReg);
+ Register scratch = regs.takeAny();
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ // On ARM, save the link register before calling. It contains the return
+ // address. The |masm.ret()| later will pop this into |pc| to return.
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.push(ra);
+#elif defined(JS_CODEGEN_LOONG64)
+ masm.push(ra);
+#elif defined(JS_CODEGEN_RISCV64)
+ masm.push(ra);
+#endif
+ masm.pushValue(R0);
+
+ using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
+ masm.setupUnalignedABICall(scratch);
+ masm.movePtr(ImmPtr(cx->runtime()), scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(objReg);
+ masm.callWithABI<Fn, PostWriteBarrier>();
+
+ restoreInterpreterPCReg();
+
+ masm.popValue(R0);
+ masm.ret();
+ return true;
+}
+
+// Scan the a cache IR stub's fields and create an allocation site for any that
+// refer to the catch-all unknown allocation site. This will be the case for
+// stubs created when running in the interpreter. This happens on transition to
+// baseline.
+static bool CreateAllocSitesForCacheIRStub(JSScript* script,
+ ICCacheIRStub* stub) {
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ uint8_t* stubData = stub->stubDataStart();
+
+ uint32_t field = 0;
+ size_t offset = 0;
+ while (true) {
+ StubField::Type fieldType = stubInfo->fieldType(field);
+ if (fieldType == StubField::Type::Limit) {
+ break;
+ }
+
+ if (fieldType == StubField::Type::AllocSite) {
+ gc::AllocSite* site =
+ stubInfo->getPtrStubField<ICCacheIRStub, gc::AllocSite>(stub, offset);
+ if (site->kind() == gc::AllocSite::Kind::Unknown) {
+ gc::AllocSite* newSite = script->createAllocSite();
+ if (!newSite) {
+ return false;
+ }
+
+ stubInfo->replaceStubRawWord(stubData, offset, uintptr_t(site),
+ uintptr_t(newSite));
+ }
+ }
+
+ field++;
+ offset += StubField::sizeInBytes(fieldType);
+ }
+
+ return true;
+}
+
+static void CreateAllocSitesForICChain(JSScript* script, uint32_t entryIndex) {
+ JitScript* jitScript = script->jitScript();
+ ICStub* stub = jitScript->icEntry(entryIndex).firstStub();
+
+ while (!stub->isFallback()) {
+ if (!CreateAllocSitesForCacheIRStub(script, stub->toCacheIRStub())) {
+ // This is an optimization and safe to skip if we hit OOM or per-zone
+ // limit.
+ return;
+ }
+ stub = stub->toCacheIRStub()->next();
+ }
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitNextIC() {
+ AutoCreatedBy acb(masm, "emitNextIC");
+
+ // Emit a call to an IC stored in JitScript. Calls to this must match the
+ // ICEntry order in JitScript: first the non-op IC entries for |this| and
+ // formal arguments, then the for-op IC entries for JOF_IC ops.
+
+ JSScript* script = handler.script();
+ uint32_t pcOffset = script->pcToOffset(handler.pc());
+
+ // We don't use every ICEntry and we can skip unreachable ops, so we have
+ // to loop until we find an ICEntry for the current pc.
+ const ICFallbackStub* stub;
+ uint32_t entryIndex;
+ do {
+ stub = script->jitScript()->fallbackStub(handler.icEntryIndex());
+ entryIndex = handler.icEntryIndex();
+ handler.moveToNextICEntry();
+ } while (stub->pcOffset() < pcOffset);
+
+ MOZ_ASSERT(stub->pcOffset() == pcOffset);
+ MOZ_ASSERT(BytecodeOpHasIC(JSOp(*handler.pc())));
+
+ if (BytecodeOpCanHaveAllocSite(JSOp(*handler.pc()))) {
+ CreateAllocSitesForICChain(script, entryIndex);
+ }
+
+ // Load stub pointer into ICStubReg.
+ masm.loadPtr(frame.addressOfICScript(), ICStubReg);
+ size_t firstStubOffset = ICScript::offsetOfFirstStub(entryIndex);
+ masm.loadPtr(Address(ICStubReg, firstStubOffset), ICStubReg);
+
+ CodeOffset returnOffset;
+ EmitCallIC(masm, &returnOffset);
+
+ RetAddrEntry::Kind kind = RetAddrEntry::Kind::IC;
+ if (!handler.retAddrEntries().emplaceBack(pcOffset, kind, returnOffset)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitNextIC() {
+ saveInterpreterPCReg();
+ masm.loadPtr(frame.addressOfInterpreterICEntry(), ICStubReg);
+ masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+ uint32_t returnOffset = masm.currentOffset();
+ restoreInterpreterPCReg();
+
+ // If this is an IC for a bytecode op where Ion may inline scripts, we need to
+ // record the return offset for Ion bailouts.
+ if (handler.currentOp()) {
+ JSOp op = *handler.currentOp();
+ MOZ_ASSERT(BytecodeOpHasIC(op));
+ if (IsIonInlinableOp(op)) {
+ if (!handler.icReturnOffsets().emplaceBack(returnOffset, op)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+template <>
+void BaselineCompilerCodeGen::computeFrameSize(Register dest) {
+ MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
+ masm.move32(Imm32(frame.frameSize()), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::computeFrameSize(Register dest) {
+ // dest := FramePointer - StackPointer.
+ MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
+ masm.mov(FramePointer, dest);
+ masm.subStackPtrFrom(dest);
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::prepareVMCall() {
+ pushedBeforeCall_ = masm.framePushed();
+#ifdef DEBUG
+ inCall_ = true;
+#endif
+
+ // Ensure everything is synced.
+ frame.syncStack(0);
+}
+
+template <>
+void BaselineCompilerCodeGen::storeFrameSizeAndPushDescriptor(
+ uint32_t argSize, Register scratch) {
+#ifdef DEBUG
+ masm.store32(Imm32(frame.frameSize()), frame.addressOfDebugFrameSize());
+#endif
+
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+}
+
+template <>
+void BaselineInterpreterCodeGen::storeFrameSizeAndPushDescriptor(
+ uint32_t argSize, Register scratch) {
+#ifdef DEBUG
+ // Store the frame size without VMFunction arguments in debug builds.
+ // scratch := FramePointer - StackPointer - argSize.
+ masm.mov(FramePointer, scratch);
+ masm.subStackPtrFrom(scratch);
+ masm.sub32(Imm32(argSize), scratch);
+ masm.store32(scratch, frame.addressOfDebugFrameSize());
+#endif
+
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+}
+
+static uint32_t GetVMFunctionArgSize(const VMFunctionData& fun) {
+ return fun.explicitStackSlots() * sizeof(void*);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::callVMInternal(VMFunctionId id,
+ RetAddrEntry::Kind kind,
+ CallVMPhase phase) {
+#ifdef DEBUG
+ // Assert prepareVMCall() has been called.
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+
+ TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
+ const VMFunctionData& fun = GetVMFunction(id);
+
+ uint32_t argSize = GetVMFunctionArgSize(fun);
+
+ // Assert all arguments were pushed.
+ MOZ_ASSERT(masm.framePushed() - pushedBeforeCall_ == argSize);
+
+ saveInterpreterPCReg();
+
+ if (phase == CallVMPhase::AfterPushingLocals) {
+ storeFrameSizeAndPushDescriptor(argSize, R0.scratchReg());
+ } else {
+ MOZ_ASSERT(phase == CallVMPhase::BeforePushingLocals);
+#ifdef DEBUG
+ uint32_t frameBaseSize = BaselineFrame::frameSizeForNumValueSlots(0);
+ masm.store32(Imm32(frameBaseSize), frame.addressOfDebugFrameSize());
+#endif
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ }
+ MOZ_ASSERT(fun.expectTailCall == NonTailCall);
+ // Perform the call.
+ masm.call(code);
+ uint32_t callOffset = masm.currentOffset();
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(argSize);
+
+ restoreInterpreterPCReg();
+
+ return handler.recordCallRetAddr(cx, kind, callOffset);
+}
+
+template <typename Handler>
+template <typename Fn, Fn fn>
+bool BaselineCodeGen<Handler>::callVM(RetAddrEntry::Kind kind,
+ CallVMPhase phase) {
+ VMFunctionId fnId = VMFunctionToId<Fn, fn>::id;
+ return callVMInternal(fnId, kind, phase);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitStackCheck() {
+ Label skipCall;
+ if (handler.mustIncludeSlotsInStackCheck()) {
+ // Subtract the size of script->nslots() first.
+ Register scratch = R1.scratchReg();
+ masm.moveStackPtrTo(scratch);
+ subtractScriptSlotsSize(scratch, R2.scratchReg());
+ masm.branchPtr(Assembler::BelowOrEqual,
+ AbsoluteAddress(cx->addressOfJitStackLimit()), scratch,
+ &skipCall);
+ } else {
+ masm.branchStackPtrRhs(Assembler::BelowOrEqual,
+ AbsoluteAddress(cx->addressOfJitStackLimit()),
+ &skipCall);
+ }
+
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
+ pushArg(R1.scratchReg());
+
+ const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
+ const RetAddrEntry::Kind kind = RetAddrEntry::Kind::StackCheck;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVM<Fn, CheckOverRecursedBaseline>(kind, phase)) {
+ return false;
+ }
+
+ masm.bind(&skipCall);
+ return true;
+}
+
+static void EmitCallFrameIsDebuggeeCheck(MacroAssembler& masm) {
+ using Fn = void (*)(BaselineFrame* frame);
+ masm.setupUnalignedABICall(R0.scratchReg());
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ masm.passABIArg(R0.scratchReg());
+ masm.callWithABI<Fn, FrameIsDebuggeeCheck>();
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitIsDebuggeeCheck() {
+ if (handler.compileDebugInstrumentation()) {
+ EmitCallFrameIsDebuggeeCheck(masm);
+ }
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitIsDebuggeeCheck() {
+ // Use a toggled jump to call FrameIsDebuggeeCheck only if the debugger is
+ // enabled.
+ //
+ // TODO(bug 1522394): consider having a cx->realm->isDebuggee guard before the
+ // call. Consider moving the callWithABI out-of-line.
+
+ Label skipCheck;
+ CodeOffset toggleOffset = masm.toggledJump(&skipCheck);
+ {
+ saveInterpreterPCReg();
+ EmitCallFrameIsDebuggeeCheck(masm);
+ restoreInterpreterPCReg();
+ }
+ masm.bind(&skipCheck);
+ return handler.addDebugInstrumentationOffset(cx, toggleOffset);
+}
+
+static void MaybeIncrementCodeCoverageCounter(MacroAssembler& masm,
+ JSScript* script,
+ jsbytecode* pc) {
+ if (!script->hasScriptCounts()) {
+ return;
+ }
+ PCCounts* counts = script->maybeGetPCCounts(pc);
+ uint64_t* counterAddr = &counts->numExec();
+ masm.inc64(AbsoluteAddress(counterAddr));
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitHandleCodeCoverageAtPrologue() {
+ // If the main instruction is not a jump target, then we emit the
+ // corresponding code coverage counter.
+ JSScript* script = handler.script();
+ jsbytecode* main = script->main();
+ if (!BytecodeIsJumpTarget(JSOp(*main))) {
+ MaybeIncrementCodeCoverageCounter(masm, script, main);
+ }
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitHandleCodeCoverageAtPrologue() {
+ Label skipCoverage;
+ CodeOffset toggleOffset = masm.toggledJump(&skipCoverage);
+ masm.call(handler.codeCoverageAtPrologueLabel());
+ masm.bind(&skipCoverage);
+ return handler.codeCoverageOffsets().append(toggleOffset.offset());
+}
+
+template <>
+void BaselineCompilerCodeGen::subtractScriptSlotsSize(Register reg,
+ Register scratch) {
+ uint32_t slotsSize = handler.script()->nslots() * sizeof(Value);
+ masm.subPtr(Imm32(slotsSize), reg);
+}
+
+template <>
+void BaselineInterpreterCodeGen::subtractScriptSlotsSize(Register reg,
+ Register scratch) {
+ // reg = reg - script->nslots() * sizeof(Value)
+ MOZ_ASSERT(reg != scratch);
+ loadScript(scratch);
+ masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
+ masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
+ scratch);
+ masm.load32(Address(scratch, ImmutableScriptData::offsetOfNslots()), scratch);
+ static_assert(sizeof(Value) == 8,
+ "shift by 3 below assumes Value is 8 bytes");
+ masm.lshiftPtr(Imm32(3), scratch);
+ masm.subPtr(scratch, reg);
+}
+
+template <>
+void BaselineCompilerCodeGen::loadGlobalLexicalEnvironment(Register dest) {
+ MOZ_ASSERT(!handler.script()->hasNonSyntacticScope());
+ masm.movePtr(ImmGCPtr(&cx->global()->lexicalEnvironment()), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadGlobalLexicalEnvironment(Register dest) {
+ masm.loadPtr(AbsoluteAddress(cx->addressOfRealm()), dest);
+ masm.loadPtr(Address(dest, Realm::offsetOfActiveGlobal()), dest);
+ masm.loadPrivate(Address(dest, GlobalObject::offsetOfGlobalDataSlot()), dest);
+ masm.loadPtr(Address(dest, GlobalObjectData::offsetOfLexicalEnvironment()),
+ dest);
+}
+
+template <>
+void BaselineCompilerCodeGen::pushGlobalLexicalEnvironmentValue(
+ ValueOperand scratch) {
+ frame.push(ObjectValue(cx->global()->lexicalEnvironment()));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushGlobalLexicalEnvironmentValue(
+ ValueOperand scratch) {
+ loadGlobalLexicalEnvironment(scratch.scratchReg());
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch.scratchReg(), scratch);
+ frame.push(scratch);
+}
+
+template <>
+void BaselineCompilerCodeGen::loadGlobalThisValue(ValueOperand dest) {
+ JSObject* thisObj = cx->global()->lexicalEnvironment().thisObject();
+ masm.moveValue(ObjectValue(*thisObj), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadGlobalThisValue(ValueOperand dest) {
+ Register scratch = dest.scratchReg();
+ loadGlobalLexicalEnvironment(scratch);
+ static constexpr size_t SlotOffset =
+ GlobalLexicalEnvironmentObject::offsetOfThisValueSlot();
+ masm.loadValue(Address(scratch, SlotOffset), dest);
+}
+
+template <>
+void BaselineCompilerCodeGen::pushScriptArg() {
+ pushArg(ImmGCPtr(handler.script()));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushScriptArg() {
+ pushArg(frame.addressOfInterpreterScript());
+}
+
+template <>
+void BaselineCompilerCodeGen::pushBytecodePCArg() {
+ pushArg(ImmPtr(handler.pc()));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushBytecodePCArg() {
+ if (HasInterpreterPCReg()) {
+ pushArg(InterpreterPCReg);
+ } else {
+ pushArg(frame.addressOfInterpreterPC());
+ }
+}
+
+static gc::Cell* GetScriptGCThing(JSScript* script, jsbytecode* pc,
+ ScriptGCThingType type) {
+ switch (type) {
+ case ScriptGCThingType::Atom:
+ return script->getAtom(pc);
+ case ScriptGCThingType::String:
+ return script->getString(pc);
+ case ScriptGCThingType::RegExp:
+ return script->getRegExp(pc);
+ case ScriptGCThingType::Object:
+ return script->getObject(pc);
+ case ScriptGCThingType::Function:
+ return script->getFunction(pc);
+ case ScriptGCThingType::Scope:
+ return script->getScope(pc);
+ case ScriptGCThingType::BigInt:
+ return script->getBigInt(pc);
+ }
+ MOZ_CRASH("Unexpected GCThing type");
+}
+
+template <>
+void BaselineCompilerCodeGen::loadScriptGCThing(ScriptGCThingType type,
+ Register dest,
+ Register scratch) {
+ gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
+ masm.movePtr(ImmGCPtr(thing), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadScriptGCThing(ScriptGCThingType type,
+ Register dest,
+ Register scratch) {
+ MOZ_ASSERT(dest != scratch);
+
+ // Load the index in |scratch|.
+ LoadInt32Operand(masm, scratch);
+
+ // Load the GCCellPtr.
+ loadScript(dest);
+ masm.loadPtr(Address(dest, JSScript::offsetOfPrivateData()), dest);
+ masm.loadPtr(BaseIndex(dest, scratch, ScalePointer,
+ PrivateScriptData::offsetOfGCThings()),
+ dest);
+
+ // Clear the tag bits.
+ switch (type) {
+ case ScriptGCThingType::Atom:
+ case ScriptGCThingType::String:
+ // Use xorPtr with a 32-bit immediate because it's more efficient than
+ // andPtr on 64-bit.
+ static_assert(uintptr_t(TraceKind::String) == 2,
+ "Unexpected tag bits for string GCCellPtr");
+ masm.xorPtr(Imm32(2), dest);
+ break;
+ case ScriptGCThingType::RegExp:
+ case ScriptGCThingType::Object:
+ case ScriptGCThingType::Function:
+ // No-op because GCCellPtr tag bits are zero for objects.
+ static_assert(uintptr_t(TraceKind::Object) == 0,
+ "Unexpected tag bits for object GCCellPtr");
+ break;
+ case ScriptGCThingType::BigInt:
+ // Use xorPtr with a 32-bit immediate because it's more efficient than
+ // andPtr on 64-bit.
+ static_assert(uintptr_t(TraceKind::BigInt) == 1,
+ "Unexpected tag bits for BigInt GCCellPtr");
+ masm.xorPtr(Imm32(1), dest);
+ break;
+ case ScriptGCThingType::Scope:
+ // Use xorPtr with a 32-bit immediate because it's more efficient than
+ // andPtr on 64-bit.
+ static_assert(uintptr_t(TraceKind::Scope) >= JS::OutOfLineTraceKindMask,
+ "Expected Scopes to have OutOfLineTraceKindMask tag");
+ masm.xorPtr(Imm32(JS::OutOfLineTraceKindMask), dest);
+ break;
+ }
+
+#ifdef DEBUG
+ // Assert low bits are not set.
+ Label ok;
+ masm.branchTestPtr(Assembler::Zero, dest, Imm32(0b111), &ok);
+ masm.assumeUnreachable("GC pointer with tag bits set");
+ masm.bind(&ok);
+#endif
+}
+
+template <>
+void BaselineCompilerCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
+ Register scratch1,
+ Register scratch2) {
+ gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
+ pushArg(ImmGCPtr(thing));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
+ Register scratch1,
+ Register scratch2) {
+ loadScriptGCThing(type, scratch1, scratch2);
+ pushArg(scratch1);
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::pushScriptNameArg(Register scratch1,
+ Register scratch2) {
+ pushScriptGCThingArg(ScriptGCThingType::Atom, scratch1, scratch2);
+}
+
+template <>
+void BaselineCompilerCodeGen::pushUint8BytecodeOperandArg(Register) {
+ MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT8);
+ pushArg(Imm32(GET_UINT8(handler.pc())));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushUint8BytecodeOperandArg(Register scratch) {
+ LoadUint8Operand(masm, scratch);
+ pushArg(scratch);
+}
+
+template <>
+void BaselineCompilerCodeGen::pushUint16BytecodeOperandArg(Register) {
+ MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT16);
+ pushArg(Imm32(GET_UINT16(handler.pc())));
+}
+
+template <>
+void BaselineInterpreterCodeGen::pushUint16BytecodeOperandArg(
+ Register scratch) {
+ LoadUint16Operand(masm, scratch);
+ pushArg(scratch);
+}
+
+template <>
+void BaselineCompilerCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
+ uint32_t length = GET_UINT32(handler.pc());
+ MOZ_ASSERT(length <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce a length exceeding int32_t range");
+ masm.move32(Imm32(AssertedCast<int32_t>(length)), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
+ LoadInt32Operand(masm, dest);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitDebugPrologue() {
+ auto ifDebuggee = [this]() {
+ // Load pointer to BaselineFrame in R0.
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ const RetAddrEntry::Kind kind = RetAddrEntry::Kind::DebugPrologue;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVM<Fn, jit::DebugPrologue>(kind)) {
+ return false;
+ }
+
+ return true;
+ };
+ return emitDebugInstrumentation(ifDebuggee);
+}
+
+template <>
+void BaselineCompilerCodeGen::emitInitFrameFields(Register nonFunctionEnv) {
+ Register scratch = R0.scratchReg();
+ Register scratch2 = R2.scratchReg();
+ MOZ_ASSERT(nonFunctionEnv != scratch && nonFunctionEnv != scratch2);
+
+ masm.store32(Imm32(0), frame.addressOfFlags());
+ if (handler.function()) {
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), scratch);
+ masm.unboxObject(Address(scratch, JSFunction::offsetOfEnvironment()),
+ scratch);
+ masm.storePtr(scratch, frame.addressOfEnvironmentChain());
+ } else {
+ masm.storePtr(nonFunctionEnv, frame.addressOfEnvironmentChain());
+ }
+
+ // If cx->inlinedICScript contains an inlined ICScript (passed from
+ // the caller), take that ICScript and store it in the frame, then
+ // overwrite cx->inlinedICScript with nullptr.
+ Label notInlined, done;
+ masm.movePtr(ImmPtr(cx->addressOfInlinedICScript()), scratch);
+ Address inlinedAddr(scratch, 0);
+ masm.branchPtr(Assembler::Equal, inlinedAddr, ImmWord(0), &notInlined);
+ masm.loadPtr(inlinedAddr, scratch2);
+ masm.storePtr(scratch2, frame.addressOfICScript());
+ masm.storePtr(ImmPtr(nullptr), inlinedAddr);
+ masm.jump(&done);
+
+ // Otherwise, store this script's default ICSCript in the frame.
+ masm.bind(&notInlined);
+ masm.storePtr(ImmPtr(handler.script()->jitScript()->icScript()),
+ frame.addressOfICScript());
+ masm.bind(&done);
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitInitFrameFields(Register nonFunctionEnv) {
+ MOZ_ASSERT(nonFunctionEnv == R1.scratchReg(),
+ "Don't clobber nonFunctionEnv below");
+
+ // If we have a dedicated PC register we use it as scratch1 to avoid a
+ // register move below.
+ Register scratch1 =
+ HasInterpreterPCReg() ? InterpreterPCReg : R0.scratchReg();
+ Register scratch2 = R2.scratchReg();
+
+ masm.store32(Imm32(BaselineFrame::RUNNING_IN_INTERPRETER),
+ frame.addressOfFlags());
+
+ // Initialize interpreterScript.
+ Label notFunction, done;
+ masm.loadPtr(frame.addressOfCalleeToken(), scratch1);
+ masm.branchTestPtr(Assembler::NonZero, scratch1, Imm32(CalleeTokenScriptBit),
+ &notFunction);
+ {
+ // CalleeToken_Function or CalleeToken_FunctionConstructing.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch1);
+ masm.unboxObject(Address(scratch1, JSFunction::offsetOfEnvironment()),
+ scratch2);
+ masm.storePtr(scratch2, frame.addressOfEnvironmentChain());
+ masm.loadPrivate(Address(scratch1, JSFunction::offsetOfJitInfoOrScript()),
+ scratch1);
+ masm.jump(&done);
+ }
+ masm.bind(&notFunction);
+ {
+ // CalleeToken_Script.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch1);
+ masm.storePtr(nonFunctionEnv, frame.addressOfEnvironmentChain());
+ }
+ masm.bind(&done);
+ masm.storePtr(scratch1, frame.addressOfInterpreterScript());
+
+ // Initialize icScript and interpreterICEntry
+ masm.loadJitScript(scratch1, scratch2);
+ masm.computeEffectiveAddress(Address(scratch2, JitScript::offsetOfICScript()),
+ scratch2);
+ masm.storePtr(scratch2, frame.addressOfICScript());
+ masm.computeEffectiveAddress(Address(scratch2, ICScript::offsetOfICEntries()),
+ scratch2);
+ masm.storePtr(scratch2, frame.addressOfInterpreterICEntry());
+
+ // Initialize interpreter pc.
+ masm.loadPtr(Address(scratch1, JSScript::offsetOfSharedData()), scratch1);
+ masm.loadPtr(Address(scratch1, SharedImmutableScriptData::offsetOfISD()),
+ scratch1);
+ masm.addPtr(Imm32(ImmutableScriptData::offsetOfCode()), scratch1);
+
+ if (HasInterpreterPCReg()) {
+ MOZ_ASSERT(scratch1 == InterpreterPCReg,
+ "pc must be stored in the pc register");
+ } else {
+ masm.storePtr(scratch1, frame.addressOfInterpreterPC());
+ }
+}
+
+// Assert we don't need a post write barrier to write sourceObj to a slot of
+// destObj. See comments in WarpBuilder::buildNamedLambdaEnv.
+static void AssertCanElidePostWriteBarrier(MacroAssembler& masm,
+ Register destObj, Register sourceObj,
+ Register temp) {
+#ifdef DEBUG
+ Label ok;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, destObj, temp, &ok);
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, sourceObj, temp, &ok);
+ masm.assumeUnreachable("Unexpected missing post write barrier in Baseline");
+ masm.bind(&ok);
+#endif
+}
+
+template <>
+bool BaselineCompilerCodeGen::initEnvironmentChain() {
+ if (!handler.function()) {
+ return true;
+ }
+ if (!handler.script()->needsFunctionEnvironmentObjects()) {
+ return true;
+ }
+
+ // Allocate a NamedLambdaObject and/or a CallObject. If the function needs
+ // both, the NamedLambdaObject must enclose the CallObject. If one of the
+ // allocations fails, we perform the whole operation in C++.
+
+ JSObject* templateEnv = handler.script()->jitScript()->templateEnvironment();
+ MOZ_ASSERT(templateEnv);
+
+ CallObject* callObjectTemplate = nullptr;
+ if (handler.function()->needsCallObject()) {
+ callObjectTemplate = &templateEnv->as<CallObject>();
+ }
+
+ NamedLambdaObject* namedLambdaTemplate = nullptr;
+ if (handler.function()->needsNamedLambdaEnvironment()) {
+ if (callObjectTemplate) {
+ templateEnv = templateEnv->enclosingEnvironment();
+ }
+ namedLambdaTemplate = &templateEnv->as<NamedLambdaObject>();
+ }
+
+ MOZ_ASSERT(namedLambdaTemplate || callObjectTemplate);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ Register newEnv = regs.takeAny();
+ Register enclosingEnv = regs.takeAny();
+ Register callee = regs.takeAny();
+ Register temp = regs.takeAny();
+
+ Label fail;
+ masm.loadPtr(frame.addressOfEnvironmentChain(), enclosingEnv);
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), callee);
+
+ // Allocate a NamedLambdaObject if needed.
+ if (namedLambdaTemplate) {
+ TemplateObject templateObject(namedLambdaTemplate);
+ masm.createGCObject(newEnv, temp, templateObject, gc::Heap::Default, &fail);
+
+ // Store enclosing environment.
+ Address enclosingSlot(newEnv,
+ NamedLambdaObject::offsetOfEnclosingEnvironment());
+ masm.storeValue(JSVAL_TYPE_OBJECT, enclosingEnv, enclosingSlot);
+ AssertCanElidePostWriteBarrier(masm, newEnv, enclosingEnv, temp);
+
+ // Store callee.
+ Address lambdaSlot(newEnv, NamedLambdaObject::offsetOfLambdaSlot());
+ masm.storeValue(JSVAL_TYPE_OBJECT, callee, lambdaSlot);
+ AssertCanElidePostWriteBarrier(masm, newEnv, callee, temp);
+
+ if (callObjectTemplate) {
+ masm.movePtr(newEnv, enclosingEnv);
+ }
+ }
+
+ // Allocate a CallObject if needed.
+ if (callObjectTemplate) {
+ TemplateObject templateObject(callObjectTemplate);
+ masm.createGCObject(newEnv, temp, templateObject, gc::Heap::Default, &fail);
+
+ // Store enclosing environment.
+ Address enclosingSlot(newEnv, CallObject::offsetOfEnclosingEnvironment());
+ masm.storeValue(JSVAL_TYPE_OBJECT, enclosingEnv, enclosingSlot);
+ AssertCanElidePostWriteBarrier(masm, newEnv, enclosingEnv, temp);
+
+ // Store callee.
+ Address calleeSlot(newEnv, CallObject::offsetOfCallee());
+ masm.storeValue(JSVAL_TYPE_OBJECT, callee, calleeSlot);
+ AssertCanElidePostWriteBarrier(masm, newEnv, callee, temp);
+ }
+
+ // Update the frame's environment chain and mark it initialized.
+ Label done;
+ masm.storePtr(newEnv, frame.addressOfEnvironmentChain());
+ masm.or32(Imm32(BaselineFrame::HAS_INITIAL_ENV), frame.addressOfFlags());
+ masm.jump(&done);
+
+ masm.bind(&fail);
+
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(FramePointer, temp);
+ pushArg(temp);
+
+ const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVMNonOp<Fn, jit::InitFunctionEnvironmentObjects>(phase)) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::initEnvironmentChain() {
+ // For function scripts, call InitFunctionEnvironmentObjects if needed. For
+ // non-function scripts this is a no-op.
+
+ Label done;
+ masm.branchTestPtr(Assembler::NonZero, frame.addressOfCalleeToken(),
+ Imm32(CalleeTokenScriptBit), &done);
+ {
+ auto initEnv = [this]() {
+ // Call into the VM to create the proper environment objects.
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ return callVMNonOp<Fn, jit::InitFunctionEnvironmentObjects>(phase);
+ };
+ if (!emitTestScriptFlag(
+ JSScript::ImmutableFlags::NeedsFunctionEnvironmentObjects, true,
+ initEnv, R2.scratchReg())) {
+ return false;
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitInterruptCheck() {
+ frame.syncStack(0);
+
+ Label done;
+ masm.branch32(Assembler::Equal, AbsoluteAddress(cx->addressOfInterruptBits()),
+ Imm32(0), &done);
+
+ prepareVMCall();
+
+ // Use a custom RetAddrEntry::Kind so DebugModeOSR can distinguish this call
+ // from other callVMs that might happen at this pc.
+ const RetAddrEntry::Kind kind = RetAddrEntry::Kind::InterruptCheck;
+
+ using Fn = bool (*)(JSContext*);
+ if (!callVM<Fn, InterruptCheck>(kind)) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitWarmUpCounterIncrement() {
+ frame.assertSyncedStack();
+
+ // Record native code offset for OSR from Baseline Interpreter into Baseline
+ // JIT code. This is right before the warm-up check in the Baseline JIT code,
+ // to make sure we can immediately enter Ion if the script is warm enough or
+ // if --ion-eager is used.
+ JSScript* script = handler.script();
+ jsbytecode* pc = handler.pc();
+ if (JSOp(*pc) == JSOp::LoopHead) {
+ uint32_t pcOffset = script->pcToOffset(pc);
+ uint32_t nativeOffset = masm.currentOffset();
+ if (!handler.osrEntries().emplaceBack(pcOffset, nativeOffset)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Emit no warm-up counter increments if Ion is not enabled or if the script
+ // will never be Ion-compileable.
+ if (!handler.maybeIonCompileable()) {
+ return true;
+ }
+
+ Register scriptReg = R2.scratchReg();
+ Register countReg = R0.scratchReg();
+
+ // Load the ICScript* in scriptReg.
+ masm.loadPtr(frame.addressOfICScript(), scriptReg);
+
+ // Bump warm-up counter.
+ Address warmUpCounterAddr(scriptReg, ICScript::offsetOfWarmUpCount());
+ masm.load32(warmUpCounterAddr, countReg);
+ masm.add32(Imm32(1), countReg);
+ masm.store32(countReg, warmUpCounterAddr);
+
+ if (!JitOptions.disableInlining) {
+ // Consider trial inlining.
+ // Note: unlike other warmup thresholds, where we try to enter a
+ // higher tier whenever we are higher than a given warmup count,
+ // trial inlining triggers once when reaching the threshold.
+ Label noTrialInlining;
+ masm.branch32(Assembler::NotEqual, countReg,
+ Imm32(JitOptions.trialInliningWarmUpThreshold),
+ &noTrialInlining);
+ prepareVMCall();
+
+ masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVMNonOp<Fn, DoTrialInlining>()) {
+ return false;
+ }
+ // Reload registers potentially clobbered by the call.
+ masm.loadPtr(frame.addressOfICScript(), scriptReg);
+ masm.load32(warmUpCounterAddr, countReg);
+ masm.bind(&noTrialInlining);
+ }
+
+ if (JSOp(*pc) == JSOp::LoopHead) {
+ // If this is a loop where we can't OSR (for example because it's inside a
+ // catch or finally block), increment the warmup counter but don't attempt
+ // OSR (Ion/Warp only compiles the try block).
+ if (!handler.analysis().info(pc).loopHeadCanOsr) {
+ return true;
+ }
+ }
+
+ Label done;
+
+ const OptimizationInfo* info =
+ IonOptimizations.get(OptimizationLevel::Normal);
+ uint32_t warmUpThreshold = info->compilerWarmUpThreshold(script, pc);
+ masm.branch32(Assembler::LessThan, countReg, Imm32(warmUpThreshold), &done);
+
+ // Don't trigger Warp compilations from trial-inlined scripts.
+ Address depthAddr(scriptReg, ICScript::offsetOfDepth());
+ masm.branch32(Assembler::NotEqual, depthAddr, Imm32(0), &done);
+
+ // Load the IonScript* in scriptReg. We can load this from the ICScript*
+ // because it must be an outer ICScript embedded in the JitScript.
+ constexpr int32_t offset = -int32_t(JitScript::offsetOfICScript()) +
+ int32_t(JitScript::offsetOfIonScript());
+ masm.loadPtr(Address(scriptReg, offset), scriptReg);
+
+ // Do nothing if Ion is already compiling this script off-thread or if Ion has
+ // been disabled for this script.
+ masm.branchPtr(Assembler::Equal, scriptReg, ImmPtr(IonCompilingScriptPtr),
+ &done);
+ masm.branchPtr(Assembler::Equal, scriptReg, ImmPtr(IonDisabledScriptPtr),
+ &done);
+
+ // Try to compile and/or finish a compilation.
+ if (JSOp(*pc) == JSOp::LoopHead) {
+ // Try to OSR into Ion.
+ computeFrameSize(R0.scratchReg());
+
+ prepareVMCall();
+
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+ masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, uint32_t, jsbytecode*,
+ IonOsrTempData**);
+ if (!callVM<Fn, IonCompileScriptForBaselineOSR>()) {
+ return false;
+ }
+
+ // The return register holds the IonOsrTempData*. Perform OSR if it's not
+ // nullptr.
+ static_assert(ReturnReg != OsrFrameReg,
+ "Code below depends on osrDataReg != OsrFrameReg");
+ Register osrDataReg = ReturnReg;
+ masm.branchTestPtr(Assembler::Zero, osrDataReg, osrDataReg, &done);
+
+ // Success! Switch from Baseline JIT code to Ion JIT code.
+
+ // At this point, stack looks like:
+ //
+ // +-> [...Calling-Frame...]
+ // | [...Actual-Args/ThisV/ArgCount/Callee...]
+ // | [Descriptor]
+ // | [Return-Addr]
+ // +---[Saved-FramePtr]
+ // [...Baseline-Frame...]
+
+#ifdef DEBUG
+ // Get a scratch register that's not osrDataReg or OsrFrameReg.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(osrDataReg);
+ regs.take(OsrFrameReg);
+
+ Register scratchReg = regs.takeAny();
+
+ // If profiler instrumentation is on, ensure that lastProfilingFrame is
+ // the frame currently being OSR-ed
+ {
+ Label checkOk;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &checkOk);
+ masm.loadPtr(AbsoluteAddress((void*)&cx->jitActivation), scratchReg);
+ masm.loadPtr(
+ Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()),
+ scratchReg);
+
+ // It may be the case that we entered the baseline frame with
+ // profiling turned off on, then in a call within a loop (i.e. a
+ // callee frame), turn on profiling, then return to this frame,
+ // and then OSR with profiling turned on. In this case, allow for
+ // lastProfilingFrame to be null.
+ masm.branchPtr(Assembler::Equal, scratchReg, ImmWord(0), &checkOk);
+
+ masm.branchPtr(Assembler::Equal, FramePointer, scratchReg, &checkOk);
+ masm.assumeUnreachable("Baseline OSR lastProfilingFrame mismatch.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Restore the stack pointer so that the saved frame pointer is on top of
+ // the stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump into Ion.
+ masm.loadPtr(Address(osrDataReg, IonOsrTempData::offsetOfBaselineFrame()),
+ OsrFrameReg);
+ masm.jump(Address(osrDataReg, IonOsrTempData::offsetOfJitCode()));
+ } else {
+ prepareVMCall();
+
+ masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVMNonOp<Fn, IonCompileScriptForBaselineAtEntry>()) {
+ return false;
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitWarmUpCounterIncrement() {
+ Register scriptReg = R2.scratchReg();
+ Register countReg = R0.scratchReg();
+
+ // Load the JitScript* in scriptReg.
+ loadScript(scriptReg);
+ masm.loadJitScript(scriptReg, scriptReg);
+
+ // Bump warm-up counter.
+ Address warmUpCounterAddr(scriptReg, JitScript::offsetOfWarmUpCount());
+ masm.load32(warmUpCounterAddr, countReg);
+ masm.add32(Imm32(1), countReg);
+ masm.store32(countReg, warmUpCounterAddr);
+
+ // If the script is warm enough for Baseline compilation, call into the VM to
+ // compile it.
+ Label done;
+ masm.branch32(Assembler::BelowOrEqual, countReg,
+ Imm32(JitOptions.baselineJitWarmUpThreshold), &done);
+ masm.branchPtr(Assembler::Equal,
+ Address(scriptReg, JitScript::offsetOfBaselineScript()),
+ ImmPtr(BaselineDisabledScriptPtr), &done);
+ {
+ prepareVMCall();
+
+ masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, uint8_t**);
+ if (!callVM<Fn, BaselineCompileFromBaselineInterpreter>()) {
+ return false;
+ }
+
+ // If the function returned nullptr we either skipped compilation or were
+ // unable to compile the script. Continue running in the interpreter.
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &done);
+
+ // Success! Switch from interpreter to JIT code by jumping to the
+ // corresponding code in the BaselineScript.
+ //
+ // This works because BaselineCompiler uses the same frame layout (stack is
+ // synced at OSR points) and BaselineCompileFromBaselineInterpreter has
+ // already cleared the RUNNING_IN_INTERPRETER flag for us.
+ // See BaselineFrame::prepareForBaselineInterpreterToJitOSR.
+ masm.jump(ReturnReg);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool BaselineCompiler::emitDebugTrap() {
+ MOZ_ASSERT(compileDebugInstrumentation());
+ MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
+
+ JSScript* script = handler.script();
+ bool enabled = DebugAPI::stepModeEnabled(script) ||
+ DebugAPI::hasBreakpointsAt(script, handler.pc());
+
+ // Emit patchable call to debug trap handler.
+ JitCode* handlerCode = cx->runtime()->jitRuntime()->debugTrapHandler(
+ cx, DebugTrapHandlerKind::Compiler);
+ if (!handlerCode) {
+ return false;
+ }
+
+ CodeOffset nativeOffset = masm.toggledCall(handlerCode, enabled);
+
+ uint32_t pcOffset = script->pcToOffset(handler.pc());
+ if (!debugTrapEntries_.emplaceBack(pcOffset, nativeOffset.offset())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Add a RetAddrEntry for the return offset -> pc mapping.
+ return handler.recordCallRetAddr(cx, RetAddrEntry::Kind::DebugTrap,
+ masm.currentOffset());
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::emitProfilerEnterFrame() {
+ // Store stack position to lastProfilingFrame variable, guarded by a toggled
+ // jump. Starts off initially disabled.
+ Label noInstrument;
+ CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
+ masm.profilerEnterFrame(FramePointer, R0.scratchReg());
+ masm.bind(&noInstrument);
+
+ // Store the start offset in the appropriate location.
+ MOZ_ASSERT(!profilerEnterFrameToggleOffset_.bound());
+ profilerEnterFrameToggleOffset_ = toggleOffset;
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::emitProfilerExitFrame() {
+ // Store previous frame to lastProfilingFrame variable, guarded by a toggled
+ // jump. Starts off initially disabled.
+ Label noInstrument;
+ CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
+ masm.profilerExitFrame();
+ masm.bind(&noInstrument);
+
+ // Store the start offset in the appropriate location.
+ MOZ_ASSERT(!profilerExitFrameToggleOffset_.bound());
+ profilerExitFrameToggleOffset_ = toggleOffset;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Nop() {
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NopDestructuring() {
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_TryDestructuring() {
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Pop() {
+ frame.pop();
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_PopN() {
+ frame.popn(GET_UINT16(handler.pc()));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_PopN() {
+ LoadUint16Operand(masm, R0.scratchReg());
+ frame.popn(R0.scratchReg());
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_DupAt() {
+ frame.syncStack(0);
+
+ // DupAt takes a value on the stack and re-pushes it on top. It's like
+ // GetLocal but it addresses from the top of the stack instead of from the
+ // stack frame.
+
+ int depth = -(GET_UINT24(handler.pc()) + 1);
+ masm.loadValue(frame.addressOfStackValue(depth), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_DupAt() {
+ LoadUint24Operand(masm, 0, R0.scratchReg());
+ masm.loadValue(frame.addressOfStackValue(R0.scratchReg()), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Dup() {
+ // Keep top stack value in R0, sync the rest so that we can use R1. We use
+ // separate registers because every register can be used by at most one
+ // StackValue.
+ frame.popRegsAndSync(1);
+ masm.moveValue(R0, R1);
+
+ // inc/dec ops use Dup followed by Inc/Dec. Push R0 last to avoid a move.
+ frame.push(R1);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Dup2() {
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ frame.push(R0);
+ frame.push(R1);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Swap() {
+ // Keep top stack values in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ frame.push(R1);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Pick() {
+ frame.syncStack(0);
+
+ // Pick takes a value on the stack and moves it to the top.
+ // For instance, pick 2:
+ // before: A B C D E
+ // after : A B D E C
+
+ // First, move value at -(amount + 1) into R0.
+ int32_t depth = -(GET_INT8(handler.pc()) + 1);
+ masm.loadValue(frame.addressOfStackValue(depth), R0);
+
+ // Move the other values down.
+ depth++;
+ for (; depth < 0; depth++) {
+ Address source = frame.addressOfStackValue(depth);
+ Address dest = frame.addressOfStackValue(depth - 1);
+ masm.loadValue(source, R1);
+ masm.storeValue(R1, dest);
+ }
+
+ // Push R0.
+ frame.pop();
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Pick() {
+ // First, move the value to move up into R0.
+ Register scratch = R2.scratchReg();
+ LoadUint8Operand(masm, scratch);
+ masm.loadValue(frame.addressOfStackValue(scratch), R0);
+
+ // Move the other values down.
+ Label top, done;
+ masm.bind(&top);
+ masm.branchSub32(Assembler::Signed, Imm32(1), scratch, &done);
+ {
+ masm.loadValue(frame.addressOfStackValue(scratch), R1);
+ masm.storeValue(R1, frame.addressOfStackValue(scratch, sizeof(Value)));
+ masm.jump(&top);
+ }
+
+ masm.bind(&done);
+
+ // Replace value on top of the stack with R0.
+ masm.storeValue(R0, frame.addressOfStackValue(-1));
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Unpick() {
+ frame.syncStack(0);
+
+ // Pick takes the top of the stack value and moves it under the nth value.
+ // For instance, unpick 2:
+ // before: A B C D E
+ // after : A B E C D
+
+ // First, move value at -1 into R0.
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ MOZ_ASSERT(GET_INT8(handler.pc()) > 0,
+ "Interpreter code assumes JSOp::Unpick operand > 0");
+
+ // Move the other values up.
+ int32_t depth = -(GET_INT8(handler.pc()) + 1);
+ for (int32_t i = -1; i > depth; i--) {
+ Address source = frame.addressOfStackValue(i - 1);
+ Address dest = frame.addressOfStackValue(i);
+ masm.loadValue(source, R1);
+ masm.storeValue(R1, dest);
+ }
+
+ // Store R0 under the nth value.
+ Address dest = frame.addressOfStackValue(depth);
+ masm.storeValue(R0, dest);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Unpick() {
+ Register scratch = R2.scratchReg();
+ LoadUint8Operand(masm, scratch);
+
+ // Move the top value into R0.
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ // Overwrite the nth stack value with R0 but first save the old value in R1.
+ masm.loadValue(frame.addressOfStackValue(scratch), R1);
+ masm.storeValue(R0, frame.addressOfStackValue(scratch));
+
+ // Now for each slot x in [n-1, 1] do the following:
+ //
+ // * Store the value in slot x in R0.
+ // * Store the value in the previous slot (now in R1) in slot x.
+ // * Move R0 to R1.
+
+#ifdef DEBUG
+ // Assert the operand > 0 so the branchSub32 below doesn't "underflow" to
+ // negative values.
+ {
+ Label ok;
+ masm.branch32(Assembler::GreaterThan, scratch, Imm32(0), &ok);
+ masm.assumeUnreachable("JSOp::Unpick with operand <= 0?");
+ masm.bind(&ok);
+ }
+#endif
+
+ Label top, done;
+ masm.bind(&top);
+ masm.branchSub32(Assembler::Zero, Imm32(1), scratch, &done);
+ {
+ // Overwrite stack slot x with slot x + 1, saving the old value in R1.
+ masm.loadValue(frame.addressOfStackValue(scratch), R0);
+ masm.storeValue(R1, frame.addressOfStackValue(scratch));
+ masm.moveValue(R0, R1);
+ masm.jump(&top);
+ }
+
+ // Finally, replace the value on top of the stack (slot 0) with R1. This is
+ // the value that used to be in slot 1.
+ masm.bind(&done);
+ masm.storeValue(R1, frame.addressOfStackValue(-1));
+ return true;
+}
+
+template <>
+void BaselineCompilerCodeGen::emitJump() {
+ jsbytecode* pc = handler.pc();
+ MOZ_ASSERT(IsJumpOpcode(JSOp(*pc)));
+ frame.assertSyncedStack();
+
+ jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
+ masm.jump(handler.labelOf(target));
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitJump() {
+ // We have to add the current pc's jump offset to the current pc. We can use
+ // R0 and R1 as scratch because we jump to the "next op" label so these
+ // registers aren't in use at this point.
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+ Register pc = LoadBytecodePC(masm, scratch1);
+ LoadInt32OperandSignExtendToPtr(masm, pc, scratch2);
+ if (HasInterpreterPCReg()) {
+ masm.addPtr(scratch2, InterpreterPCReg);
+ } else {
+ masm.addPtr(pc, scratch2);
+ masm.storePtr(scratch2, frame.addressOfInterpreterPC());
+ }
+ masm.jump(handler.interpretOpWithPCRegLabel());
+}
+
+template <>
+void BaselineCompilerCodeGen::emitTestBooleanTruthy(bool branchIfTrue,
+ ValueOperand val) {
+ jsbytecode* pc = handler.pc();
+ MOZ_ASSERT(IsJumpOpcode(JSOp(*pc)));
+ frame.assertSyncedStack();
+
+ jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
+ masm.branchTestBooleanTruthy(branchIfTrue, val, handler.labelOf(target));
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitTestBooleanTruthy(bool branchIfTrue,
+ ValueOperand val) {
+ Label done;
+ masm.branchTestBooleanTruthy(!branchIfTrue, val, &done);
+ emitJump();
+ masm.bind(&done);
+}
+
+template <>
+template <typename F1, typename F2>
+[[nodiscard]] bool BaselineCompilerCodeGen::emitTestScriptFlag(
+ JSScript::ImmutableFlags flag, const F1& ifSet, const F2& ifNotSet,
+ Register scratch) {
+ if (handler.script()->hasFlag(flag)) {
+ return ifSet();
+ }
+ return ifNotSet();
+}
+
+template <>
+template <typename F1, typename F2>
+[[nodiscard]] bool BaselineInterpreterCodeGen::emitTestScriptFlag(
+ JSScript::ImmutableFlags flag, const F1& ifSet, const F2& ifNotSet,
+ Register scratch) {
+ Label flagNotSet, done;
+ loadScript(scratch);
+ masm.branchTest32(Assembler::Zero,
+ Address(scratch, JSScript::offsetOfImmutableFlags()),
+ Imm32(uint32_t(flag)), &flagNotSet);
+ {
+ if (!ifSet()) {
+ return false;
+ }
+ masm.jump(&done);
+ }
+ masm.bind(&flagNotSet);
+ {
+ if (!ifNotSet()) {
+ return false;
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+template <typename F>
+[[nodiscard]] bool BaselineCompilerCodeGen::emitTestScriptFlag(
+ JSScript::ImmutableFlags flag, bool value, const F& emit,
+ Register scratch) {
+ if (handler.script()->hasFlag(flag) == value) {
+ return emit();
+ }
+ return true;
+}
+
+template <>
+template <typename F>
+[[nodiscard]] bool BaselineCompilerCodeGen::emitTestScriptFlag(
+ JSScript::MutableFlags flag, bool value, const F& emit, Register scratch) {
+ if (handler.script()->hasFlag(flag) == value) {
+ return emit();
+ }
+ return true;
+}
+
+template <>
+template <typename F>
+[[nodiscard]] bool BaselineInterpreterCodeGen::emitTestScriptFlag(
+ JSScript::ImmutableFlags flag, bool value, const F& emit,
+ Register scratch) {
+ Label done;
+ loadScript(scratch);
+ masm.branchTest32(value ? Assembler::Zero : Assembler::NonZero,
+ Address(scratch, JSScript::offsetOfImmutableFlags()),
+ Imm32(uint32_t(flag)), &done);
+ {
+ if (!emit()) {
+ return false;
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+template <typename F>
+[[nodiscard]] bool BaselineInterpreterCodeGen::emitTestScriptFlag(
+ JSScript::MutableFlags flag, bool value, const F& emit, Register scratch) {
+ Label done;
+ loadScript(scratch);
+ masm.branchTest32(value ? Assembler::Zero : Assembler::NonZero,
+ Address(scratch, JSScript::offsetOfMutableFlags()),
+ Imm32(uint32_t(flag)), &done);
+ {
+ if (!emit()) {
+ return false;
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Goto() {
+ frame.syncStack(0);
+ emitJump();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitTest(bool branchIfTrue) {
+ bool knownBoolean = frame.stackValueHasKnownType(-1, JSVAL_TYPE_BOOLEAN);
+
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ if (!knownBoolean && !emitNextIC()) {
+ return false;
+ }
+
+ // IC will leave a BooleanValue in R0, just need to branch on it.
+ emitTestBooleanTruthy(branchIfTrue, R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_JumpIfFalse() {
+ return emitTest(false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_JumpIfTrue() {
+ return emitTest(true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitAndOr(bool branchIfTrue) {
+ bool knownBoolean = frame.stackValueHasKnownType(-1, JSVAL_TYPE_BOOLEAN);
+
+ // And and Or leave the original value on the stack.
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ if (!knownBoolean && !emitNextIC()) {
+ return false;
+ }
+
+ emitTestBooleanTruthy(branchIfTrue, R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_And() {
+ return emitAndOr(false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Or() {
+ return emitAndOr(true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Coalesce() {
+ // Coalesce leaves the original value on the stack.
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ Label undefinedOrNull;
+
+ masm.branchTestUndefined(Assembler::Equal, R0, &undefinedOrNull);
+ masm.branchTestNull(Assembler::Equal, R0, &undefinedOrNull);
+ emitJump();
+
+ masm.bind(&undefinedOrNull);
+ // fall through
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Not() {
+ bool knownBoolean = frame.stackValueHasKnownType(-1, JSVAL_TYPE_BOOLEAN);
+
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ if (!knownBoolean && !emitNextIC()) {
+ return false;
+ }
+
+ masm.notBoolean(R0);
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Pos() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ToNumeric() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_LoopHead() {
+ if (!emit_JumpTarget()) {
+ return false;
+ }
+ if (!emitInterruptCheck()) {
+ return false;
+ }
+ if (!emitWarmUpCounterIncrement()) {
+ return false;
+ }
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Void() {
+ frame.pop();
+ frame.push(UndefinedValue());
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Undefined() {
+ frame.push(UndefinedValue());
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Hole() {
+ frame.push(MagicValue(JS_ELEMENTS_HOLE));
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Null() {
+ frame.push(NullValue());
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckIsObj() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ Label ok;
+ masm.branchTestObject(Assembler::Equal, R0, &ok);
+
+ prepareVMCall();
+
+ pushUint8BytecodeOperandArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, CheckIsObjectKind);
+ if (!callVM<Fn, ThrowCheckIsObject>()) {
+ return false;
+ }
+
+ masm.bind(&ok);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckThis() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ return emitCheckThis(R0);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckThisReinit() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ return emitCheckThis(R0, /* reinit = */ true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitCheckThis(ValueOperand val, bool reinit) {
+ Label thisOK;
+ if (reinit) {
+ masm.branchTestMagic(Assembler::Equal, val, &thisOK);
+ } else {
+ masm.branchTestMagic(Assembler::NotEqual, val, &thisOK);
+ }
+
+ prepareVMCall();
+
+ if (reinit) {
+ using Fn = bool (*)(JSContext*);
+ if (!callVM<Fn, ThrowInitializedThis>()) {
+ return false;
+ }
+ } else {
+ using Fn = bool (*)(JSContext*);
+ if (!callVM<Fn, ThrowUninitializedThis>()) {
+ return false;
+ }
+ }
+
+ masm.bind(&thisOK);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckReturn() {
+ MOZ_ASSERT_IF(handler.maybeScript(),
+ handler.maybeScript()->isDerivedClassConstructor());
+
+ // Load |this| in R0, return value in R1.
+ frame.popRegsAndSync(1);
+ emitLoadReturnValue(R1);
+
+ Label done, returnBad, checkThis;
+ masm.branchTestObject(Assembler::NotEqual, R1, &checkThis);
+ {
+ masm.moveValue(R1, R0);
+ masm.jump(&done);
+ }
+ masm.bind(&checkThis);
+ masm.branchTestUndefined(Assembler::NotEqual, R1, &returnBad);
+ masm.branchTestMagic(Assembler::NotEqual, R0, &done);
+ masm.bind(&returnBad);
+
+ prepareVMCall();
+ pushArg(R1);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ if (!callVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>()) {
+ return false;
+ }
+ masm.assumeUnreachable("Should throw on bad derived constructor return");
+
+ masm.bind(&done);
+
+ // Push |rval| or |this| onto the stack.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_FunctionThis() {
+ MOZ_ASSERT_IF(handler.maybeFunction(), !handler.maybeFunction()->isArrow());
+
+ frame.pushThis();
+
+ auto boxThis = [this]() {
+ // Load |thisv| in R0. Skip the call if it's already an object.
+ Label skipCall;
+ frame.popRegsAndSync(1);
+ masm.branchTestObject(Assembler::Equal, R0, &skipCall);
+
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
+
+ pushArg(R1.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, MutableHandleValue);
+ if (!callVM<Fn, BaselineGetFunctionThis>()) {
+ return false;
+ }
+
+ masm.bind(&skipCall);
+ frame.push(R0);
+ return true;
+ };
+
+ // In strict mode code, |this| is left alone.
+ return emitTestScriptFlag(JSScript::ImmutableFlags::Strict, false, boxThis,
+ R2.scratchReg());
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GlobalThis() {
+ frame.syncStack(0);
+
+ loadGlobalThisValue(R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NonSyntacticGlobalThis() {
+ frame.syncStack(0);
+
+ prepareVMCall();
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = void (*)(JSContext*, HandleObject, MutableHandleValue);
+ if (!callVM<Fn, GetNonSyntacticGlobalThis>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_True() {
+ frame.push(BooleanValue(true));
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_False() {
+ frame.push(BooleanValue(false));
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Zero() {
+ frame.push(Int32Value(0));
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_One() {
+ frame.push(Int32Value(1));
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Int8() {
+ frame.push(Int32Value(GET_INT8(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Int8() {
+ LoadInt8Operand(masm, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Int32() {
+ frame.push(Int32Value(GET_INT32(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Int32() {
+ LoadInt32Operand(masm, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Uint16() {
+ frame.push(Int32Value(GET_UINT16(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Uint16() {
+ LoadUint16Operand(masm, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Uint24() {
+ frame.push(Int32Value(GET_UINT24(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Uint24() {
+ LoadUint24Operand(masm, 0, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Double() {
+ frame.push(GET_INLINE_VALUE(handler.pc()));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Double() {
+ LoadInlineValueOperand(masm, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_BigInt() {
+ BigInt* bi = handler.script()->getBigInt(handler.pc());
+ frame.push(BigIntValue(bi));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_BigInt() {
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+ loadScriptGCThing(ScriptGCThingType::BigInt, scratch1, scratch2);
+ masm.tagValue(JSVAL_TYPE_BIGINT, scratch1, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_String() {
+ frame.push(StringValue(handler.script()->getString(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_String() {
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+ loadScriptGCThing(ScriptGCThingType::String, scratch1, scratch2);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch1, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Symbol() {
+ unsigned which = GET_UINT8(handler.pc());
+ JS::Symbol* sym = cx->runtime()->wellKnownSymbols->get(which);
+ frame.push(SymbolValue(sym));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Symbol() {
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+ LoadUint8Operand(masm, scratch1);
+
+ masm.movePtr(ImmPtr(cx->runtime()->wellKnownSymbols), scratch2);
+ masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch1);
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, scratch1, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_Object() {
+ frame.push(ObjectValue(*handler.script()->getObject(handler.pc())));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_Object() {
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+ loadScriptGCThing(ScriptGCThingType::Object, scratch1, scratch2);
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CallSiteObj() {
+ return emit_Object();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_RegExp() {
+ prepareVMCall();
+ pushScriptGCThingArg(ScriptGCThingType::RegExp, R0.scratchReg(),
+ R1.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
+ if (!callVM<Fn, CloneRegExpObject>()) {
+ return false;
+ }
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+# define UNSUPPORTED_OPCODE(OP) \
+ template <typename Handler> \
+ bool BaselineCodeGen<Handler>::emit_##OP() { \
+ MOZ_CRASH("Record and Tuple are not supported by jit"); \
+ return false; \
+ }
+
+UNSUPPORTED_OPCODE(InitRecord)
+UNSUPPORTED_OPCODE(AddRecordProperty)
+UNSUPPORTED_OPCODE(AddRecordSpread)
+UNSUPPORTED_OPCODE(FinishRecord)
+UNSUPPORTED_OPCODE(InitTuple)
+UNSUPPORTED_OPCODE(AddTupleElement)
+UNSUPPORTED_OPCODE(FinishTuple)
+
+# undef UNSUPPORTED_OPCODE
+#endif
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Lambda() {
+ prepareVMCall();
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ pushArg(R0.scratchReg());
+ pushScriptGCThingArg(ScriptGCThingType::Function, R0.scratchReg(),
+ R1.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
+ if (!callVM<Fn, js::Lambda>()) {
+ return false;
+ }
+
+ // Box and push return value.
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetFunName() {
+ frame.popRegsAndSync(2);
+
+ frame.push(R0);
+ frame.syncStack(0);
+
+ masm.unboxObject(R0, R0.scratchReg());
+
+ prepareVMCall();
+
+ pushUint8BytecodeOperandArg(R2.scratchReg());
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
+ return callVM<Fn, SetFunctionName>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BitOr() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BitXor() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BitAnd() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Lsh() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Rsh() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Ursh() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Add() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Sub() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Mul() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Div() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Mod() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Pow() {
+ return emitBinaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitBinaryArith() {
+ // Keep top JSStack value in R0 and R2
+ frame.popRegsAndSync(2);
+
+ // Call IC
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitUnaryArith() {
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ // Call IC
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BitNot() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Neg() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Inc() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Dec() {
+ return emitUnaryArith();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Lt() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Le() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Gt() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Ge() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Eq() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Ne() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitCompare() {
+ // Keep top JSStack value in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictEq() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictNe() {
+ return emitCompare();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Case() {
+ frame.popRegsAndSync(1);
+
+ Label done;
+ masm.branchTestBooleanTruthy(/* branchIfTrue */ false, R0, &done);
+ {
+ // Pop the switch value if the case matches.
+ masm.addToStackPtr(Imm32(sizeof(Value)));
+ emitJump();
+ }
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Default() {
+ frame.pop();
+ return emit_Goto();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Lineno() {
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewArray() {
+ frame.syncStack(0);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+static void MarkElementsNonPackedIfHoleValue(MacroAssembler& masm,
+ Register elements,
+ ValueOperand val) {
+ Label notHole;
+ masm.branchTestMagic(Assembler::NotEqual, val, &notHole);
+ {
+ Address elementsFlags(elements, ObjectElements::offsetOfFlags());
+ masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
+ }
+ masm.bind(&notHole);
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_InitElemArray() {
+ // Pop value into R0, keep the object on the stack.
+ frame.popRegsAndSync(1);
+
+ // Load object in R2.
+ Register obj = R2.scratchReg();
+ masm.unboxObject(frame.addressOfStackValue(-1), obj);
+
+ // Load index in R1.
+ Register index = R1.scratchReg();
+ LoadInt32Operand(masm, index);
+
+ // Store the Value. No pre-barrier because this is an initialization.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), obj);
+ masm.storeValue(R0, BaseObjectElementIndex(obj, index));
+
+ // Bump initialized length.
+ Address initLength(obj, ObjectElements::offsetOfInitializedLength());
+ masm.add32(Imm32(1), index);
+ masm.store32(index, initLength);
+
+ // Mark elements as NON_PACKED if we stored the hole value.
+ MarkElementsNonPackedIfHoleValue(masm, obj, R0);
+
+ // Post-barrier.
+ Label skipBarrier;
+ Register scratch = index;
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, scratch, &skipBarrier);
+ {
+ masm.unboxObject(frame.addressOfStackValue(-1), obj);
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
+ MOZ_ASSERT(obj == R2.scratchReg(), "post barrier expects object in R2");
+ masm.call(&postBarrierSlot_);
+ }
+ masm.bind(&skipBarrier);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_InitElemArray() {
+ // Pop value into R0, keep the object on the stack.
+ Maybe<Value> knownValue = frame.knownStackValue(-1);
+ frame.popRegsAndSync(1);
+
+ // Load object in R2.
+ Register obj = R2.scratchReg();
+ masm.unboxObject(frame.addressOfStackValue(-1), obj);
+
+ uint32_t index = GET_UINT32(handler.pc());
+ MOZ_ASSERT(index <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce an index exceeding int32_t range");
+
+ // Store the Value. No pre-barrier because this is an initialization.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), obj);
+ masm.storeValue(R0, Address(obj, index * sizeof(Value)));
+
+ // Bump initialized length.
+ Address initLength(obj, ObjectElements::offsetOfInitializedLength());
+ masm.store32(Imm32(index + 1), initLength);
+
+ // Mark elements as NON_PACKED if we stored the hole value. We know this
+ // statically except when debugger instrumentation is enabled because that
+ // forces a stack-sync (which discards constants and known types) for each op.
+ if (knownValue && knownValue->isMagic(JS_ELEMENTS_HOLE)) {
+ Address elementsFlags(obj, ObjectElements::offsetOfFlags());
+ masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
+ } else if (handler.compileDebugInstrumentation()) {
+ MarkElementsNonPackedIfHoleValue(masm, obj, R0);
+ } else {
+#ifdef DEBUG
+ Label notHole;
+ masm.branchTestMagic(Assembler::NotEqual, R0, &notHole);
+ masm.assumeUnreachable("Unexpected hole value");
+ masm.bind(&notHole);
+#endif
+ }
+
+ // Post-barrier.
+ if (knownValue) {
+ MOZ_ASSERT(JS::GCPolicy<Value>::isTenured(*knownValue));
+ } else {
+ Label skipBarrier;
+ Register scratch = R1.scratchReg();
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, scratch,
+ &skipBarrier);
+ {
+ masm.unboxObject(frame.addressOfStackValue(-1), obj);
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch,
+ &skipBarrier);
+ MOZ_ASSERT(obj == R2.scratchReg(), "post barrier expects object in R2");
+ masm.call(&postBarrierSlot_);
+ }
+ masm.bind(&skipBarrier);
+ }
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewObject() {
+ return emitNewObject();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewInit() {
+ return emitNewObject();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitNewObject() {
+ frame.syncStack(0);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitElem() {
+ // Store RHS in the scratch slot.
+ frame.storeStackValue(-1, frame.addressOfScratchValue(), R2);
+ frame.pop();
+
+ // Keep object and index in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Push the object to store the result of the IC.
+ frame.push(R0);
+ frame.syncStack(0);
+
+ // Keep RHS on the stack.
+ frame.pushScratchValue();
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Pop the rhs, so that the object is on the top of the stack.
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenElem() {
+ return emit_InitElem();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitLockedElem() {
+ return emit_InitElem();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_MutateProto() {
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+
+ masm.unboxObject(frame.addressOfStackValue(-2), R0.scratchReg());
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ prepareVMCall();
+
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, Handle<PlainObject*>, HandleValue);
+ if (!callVM<Fn, MutatePrototype>()) {
+ return false;
+ }
+
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitProp() {
+ // Load lhs in R0, rhs in R1.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Leave the object on the stack.
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitLockedProp() {
+ return emit_InitProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenProp() {
+ return emit_InitProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetElem() {
+ // Keep top two stack values in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetElemSuper() {
+ // Store obj in the scratch slot.
+ frame.storeStackValue(-1, frame.addressOfScratchValue(), R2);
+ frame.pop();
+
+ // Keep receiver and index in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Keep obj on the stack.
+ frame.pushScratchValue();
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.pop();
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetElem() {
+ // Store RHS in the scratch slot.
+ frame.storeStackValue(-1, frame.addressOfScratchValue(), R2);
+ frame.pop();
+
+ // Keep object and index in R0 and R1.
+ frame.popRegsAndSync(2);
+
+ // Keep RHS on the stack.
+ frame.pushScratchValue();
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetElem() {
+ return emit_SetElem();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitSetElemSuper(bool strict) {
+ // Incoming stack is |receiver, propval, obj, rval|. We need to shuffle
+ // stack to leave rval when operation is complete.
+
+ // Pop rval into R0, then load receiver into R1 and replace with rval.
+ frame.popRegsAndSync(1);
+ masm.loadValue(frame.addressOfStackValue(-3), R1);
+ masm.storeValue(R0, frame.addressOfStackValue(-3));
+
+ prepareVMCall();
+
+ pushArg(Imm32(strict));
+ pushArg(R0); // rval
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ pushArg(R0); // propval
+ pushArg(R1); // receiver
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ pushArg(R0); // obj
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, HandleValue,
+ HandleValue, bool);
+ if (!callVM<Fn, js::SetElementSuper>()) {
+ return false;
+ }
+
+ frame.popn(2);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetElemSuper() {
+ return emitSetElemSuper(/* strict = */ false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetElemSuper() {
+ return emitSetElemSuper(/* strict = */ true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitDelElem(bool strict) {
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ prepareVMCall();
+
+ pushArg(R1);
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
+ if (strict) {
+ if (!callVM<Fn, DelElemOperation<true>>()) {
+ return false;
+ }
+ } else {
+ if (!callVM<Fn, DelElemOperation<false>>()) {
+ return false;
+ }
+ }
+
+ masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+ frame.popn(2);
+ frame.push(R1, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DelElem() {
+ return emitDelElem(/* strict = */ false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictDelElem() {
+ return emitDelElem(/* strict = */ true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_In() {
+ frame.popRegsAndSync(2);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_HasOwn() {
+ frame.popRegsAndSync(2);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckPrivateField() {
+ // Keep key and val on the stack.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewPrivateName() {
+ prepareVMCall();
+
+ pushScriptNameArg(R0.scratchReg(), R1.scratchReg());
+
+ using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
+ if (!callVM<Fn, NewPrivateName>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetGName() {
+ frame.syncStack(0);
+
+ loadGlobalLexicalEnvironment(R0.scratchReg());
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::tryOptimizeBindGlobalName() {
+ JSScript* script = handler.script();
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+
+ Rooted<GlobalObject*> global(cx, &script->global());
+ Rooted<PropertyName*> name(cx, script->getName(handler.pc()));
+ if (JSObject* binding = MaybeOptimizeBindGlobalName(cx, global, name)) {
+ frame.push(ObjectValue(*binding));
+ return true;
+ }
+ return false;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::tryOptimizeBindGlobalName() {
+ // Interpreter doesn't optimize simple BindGNames.
+ return false;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BindGName() {
+ if (tryOptimizeBindGlobalName()) {
+ return true;
+ }
+
+ frame.syncStack(0);
+ loadGlobalLexicalEnvironment(R0.scratchReg());
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BindVar() {
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, JSObject*);
+ if (!callVM<Fn, BindVarOperation>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetProp() {
+ // Keep lhs in R0, rhs in R1.
+ frame.popRegsAndSync(2);
+
+ // Keep RHS on the stack.
+ frame.push(R1);
+ frame.syncStack(0);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetProp() {
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetName() {
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetName() {
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetGName() {
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetGName() {
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitSetPropSuper(bool strict) {
+ // Incoming stack is |receiver, obj, rval|. We need to shuffle stack to
+ // leave rval when operation is complete.
+
+ // Pop rval into R0, then load receiver into R1 and replace with rval.
+ frame.popRegsAndSync(1);
+ masm.loadValue(frame.addressOfStackValue(-2), R1);
+ masm.storeValue(R0, frame.addressOfStackValue(-2));
+
+ prepareVMCall();
+
+ pushArg(Imm32(strict));
+ pushArg(R0); // rval
+ pushScriptNameArg(R0.scratchReg(), R2.scratchReg());
+ pushArg(R1); // receiver
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ pushArg(R0); // obj
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue,
+ Handle<PropertyName*>, HandleValue, bool);
+ if (!callVM<Fn, js::SetPropertySuper>()) {
+ return false;
+ }
+
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetPropSuper() {
+ return emitSetPropSuper(/* strict = */ false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSetPropSuper() {
+ return emitSetPropSuper(/* strict = */ true);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetProp() {
+ // Keep object in R0.
+ frame.popRegsAndSync(1);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetBoundName() {
+ return emit_GetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetPropSuper() {
+ // Receiver -> R1, ObjectOrNull -> R0
+ frame.popRegsAndSync(1);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+ frame.pop();
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitDelProp(bool strict) {
+ // Keep value on the stack for the decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+
+ pushScriptNameArg(R1.scratchReg(), R2.scratchReg());
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
+ if (strict) {
+ if (!callVM<Fn, DelPropOperation<true>>()) {
+ return false;
+ }
+ } else {
+ if (!callVM<Fn, DelPropOperation<false>>()) {
+ return false;
+ }
+ }
+
+ masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1);
+ frame.pop();
+ frame.push(R1, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DelProp() {
+ return emitDelProp(/* strict = */ false);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictDelProp() {
+ return emitDelProp(/* strict = */ true);
+}
+
+template <>
+void BaselineCompilerCodeGen::getEnvironmentCoordinateObject(Register reg) {
+ EnvironmentCoordinate ec(handler.pc());
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), reg);
+ for (unsigned i = ec.hops(); i; i--) {
+ masm.unboxObject(
+ Address(reg, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
+ }
+}
+
+template <>
+void BaselineInterpreterCodeGen::getEnvironmentCoordinateObject(Register reg) {
+ MOZ_CRASH("Shouldn't call this for interpreter");
+}
+
+template <>
+Address BaselineCompilerCodeGen::getEnvironmentCoordinateAddressFromObject(
+ Register objReg, Register reg) {
+ EnvironmentCoordinate ec(handler.pc());
+
+ if (EnvironmentObject::nonExtensibleIsFixedSlot(ec)) {
+ return Address(objReg, NativeObject::getFixedSlotOffset(ec.slot()));
+ }
+
+ uint32_t slot = EnvironmentObject::nonExtensibleDynamicSlotIndex(ec);
+ masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), reg);
+ return Address(reg, slot * sizeof(Value));
+}
+
+template <>
+Address BaselineInterpreterCodeGen::getEnvironmentCoordinateAddressFromObject(
+ Register objReg, Register reg) {
+ MOZ_CRASH("Shouldn't call this for interpreter");
+}
+
+template <typename Handler>
+Address BaselineCodeGen<Handler>::getEnvironmentCoordinateAddress(
+ Register reg) {
+ getEnvironmentCoordinateObject(reg);
+ return getEnvironmentCoordinateAddressFromObject(reg, reg);
+}
+
+// For a JOF_ENVCOORD op load the number of hops from the bytecode and skip this
+// number of environment objects.
+static void LoadAliasedVarEnv(MacroAssembler& masm, Register env,
+ Register scratch) {
+ static_assert(ENVCOORD_HOPS_LEN == 1,
+ "Code assumes number of hops is stored in uint8 operand");
+ LoadUint8Operand(masm, scratch);
+
+ Label top, done;
+ masm.branchTest32(Assembler::Zero, scratch, scratch, &done);
+ masm.bind(&top);
+ {
+ Address nextEnv(env, EnvironmentObject::offsetOfEnclosingEnvironment());
+ masm.unboxObject(nextEnv, env);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), scratch, &top);
+ }
+ masm.bind(&done);
+}
+
+template <>
+void BaselineCompilerCodeGen::emitGetAliasedVar(ValueOperand dest) {
+ frame.syncStack(0);
+
+ Address address = getEnvironmentCoordinateAddress(R0.scratchReg());
+ masm.loadValue(address, dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitGetAliasedVar(ValueOperand dest) {
+ Register env = R0.scratchReg();
+ Register scratch = R1.scratchReg();
+
+ // Load the right environment object.
+ masm.loadPtr(frame.addressOfEnvironmentChain(), env);
+ LoadAliasedVarEnv(masm, env, scratch);
+
+ // Load the slot index.
+ static_assert(ENVCOORD_SLOT_LEN == 3,
+ "Code assumes slot is stored in uint24 operand");
+ LoadUint24Operand(masm, ENVCOORD_HOPS_LEN, scratch);
+
+ // Load the Value from a fixed or dynamic slot.
+ // See EnvironmentObject::nonExtensibleIsFixedSlot.
+ Label isDynamic, done;
+ masm.branch32(Assembler::AboveOrEqual, scratch,
+ Imm32(NativeObject::MAX_FIXED_SLOTS), &isDynamic);
+ {
+ uint32_t offset = NativeObject::getFixedSlotOffset(0);
+ masm.loadValue(BaseValueIndex(env, scratch, offset), dest);
+ masm.jump(&done);
+ }
+ masm.bind(&isDynamic);
+ {
+ masm.loadPtr(Address(env, NativeObject::offsetOfSlots()), env);
+
+ // Use an offset to subtract the number of fixed slots.
+ int32_t offset = -int32_t(NativeObject::MAX_FIXED_SLOTS * sizeof(Value));
+ masm.loadValue(BaseValueIndex(env, scratch, offset), dest);
+ }
+ masm.bind(&done);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitGetAliasedDebugVar(ValueOperand dest) {
+ frame.syncStack(0);
+ Register env = R0.scratchReg();
+ // Load the right environment object.
+ masm.loadPtr(frame.addressOfEnvironmentChain(), env);
+
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(env);
+
+ using Fn =
+ bool (*)(JSContext*, JSObject* env, jsbytecode*, MutableHandleValue);
+ return callVM<Fn, LoadAliasedDebugVar>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetAliasedDebugVar() {
+ if (!emitGetAliasedDebugVar(R0)) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetAliasedVar() {
+ emitGetAliasedVar(R0);
+
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_SetAliasedVar() {
+ // Keep rvalue in R0.
+ frame.popRegsAndSync(1);
+ Register objReg = R2.scratchReg();
+
+ getEnvironmentCoordinateObject(objReg);
+ Address address =
+ getEnvironmentCoordinateAddressFromObject(objReg, R1.scratchReg());
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+ masm.storeValue(R0, address);
+ frame.push(R0);
+
+ // Only R0 is live at this point.
+ // Scope coordinate object is already in R2.scratchReg().
+ Register temp = R1.scratchReg();
+
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, objReg, temp, &skipBarrier);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, temp, &skipBarrier);
+
+ masm.call(&postBarrierSlot_); // Won't clobber R0
+
+ masm.bind(&skipBarrier);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_SetAliasedVar() {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(R2);
+ if (HasInterpreterPCReg()) {
+ regs.take(InterpreterPCReg);
+ }
+
+ Register env = regs.takeAny();
+ Register scratch1 = regs.takeAny();
+ Register scratch2 = regs.takeAny();
+ Register scratch3 = regs.takeAny();
+
+ // Load the right environment object.
+ masm.loadPtr(frame.addressOfEnvironmentChain(), env);
+ LoadAliasedVarEnv(masm, env, scratch1);
+
+ // Load the slot index.
+ static_assert(ENVCOORD_SLOT_LEN == 3,
+ "Code assumes slot is stored in uint24 operand");
+ LoadUint24Operand(masm, ENVCOORD_HOPS_LEN, scratch1);
+
+ // Store the RHS Value in R2.
+ masm.loadValue(frame.addressOfStackValue(-1), R2);
+
+ // Load a pointer to the fixed or dynamic slot into scratch2. We want to call
+ // guardedCallPreBarrierAnyZone once to avoid code bloat.
+
+ // See EnvironmentObject::nonExtensibleIsFixedSlot.
+ Label isDynamic, done;
+ masm.branch32(Assembler::AboveOrEqual, scratch1,
+ Imm32(NativeObject::MAX_FIXED_SLOTS), &isDynamic);
+ {
+ uint32_t offset = NativeObject::getFixedSlotOffset(0);
+ BaseValueIndex slotAddr(env, scratch1, offset);
+ masm.computeEffectiveAddress(slotAddr, scratch2);
+ masm.jump(&done);
+ }
+ masm.bind(&isDynamic);
+ {
+ masm.loadPtr(Address(env, NativeObject::offsetOfSlots()), scratch2);
+
+ // Use an offset to subtract the number of fixed slots.
+ int32_t offset = -int32_t(NativeObject::MAX_FIXED_SLOTS * sizeof(Value));
+ BaseValueIndex slotAddr(scratch2, scratch1, offset);
+ masm.computeEffectiveAddress(slotAddr, scratch2);
+ }
+ masm.bind(&done);
+
+ // Pre-barrier and store.
+ Address slotAddr(scratch2, 0);
+ masm.guardedCallPreBarrierAnyZone(slotAddr, MIRType::Value, scratch3);
+ masm.storeValue(R2, slotAddr);
+
+ // Post barrier.
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, env, scratch1, &skipBarrier);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R2, scratch1,
+ &skipBarrier);
+ {
+ // Post barrier code expects the object in R2.
+ masm.movePtr(env, R2.scratchReg());
+ masm.call(&postBarrierSlot_);
+ }
+ masm.bind(&skipBarrier);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetName() {
+ frame.syncStack(0);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_BindName() {
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DelName() {
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R0.scratchReg());
+ pushScriptNameArg(R1.scratchReg(), R2.scratchReg());
+
+ using Fn = bool (*)(JSContext*, Handle<PropertyName*>, HandleObject,
+ MutableHandleValue);
+ if (!callVM<Fn, js::DeleteNameOperation>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_GetImport() {
+ JSScript* script = handler.script();
+ ModuleEnvironmentObject* env = GetModuleEnvironmentForScript(script);
+ MOZ_ASSERT(env);
+
+ jsid id = NameToId(script->getName(handler.pc()));
+ ModuleEnvironmentObject* targetEnv;
+ Maybe<PropertyInfo> prop;
+ MOZ_ALWAYS_TRUE(env->lookupImport(id, &targetEnv, &prop));
+
+ frame.syncStack(0);
+
+ uint32_t slot = prop->slot();
+ Register scratch = R0.scratchReg();
+ masm.movePtr(ImmGCPtr(targetEnv), scratch);
+ if (slot < targetEnv->numFixedSlots()) {
+ masm.loadValue(Address(scratch, NativeObject::getFixedSlotOffset(slot)),
+ R0);
+ } else {
+ masm.loadPtr(Address(scratch, NativeObject::offsetOfSlots()), scratch);
+ masm.loadValue(
+ Address(scratch, (slot - targetEnv->numFixedSlots()) * sizeof(Value)),
+ R0);
+ }
+
+ // Imports are initialized by this point except in rare circumstances, so
+ // don't emit a check unless we have to.
+ if (targetEnv->getSlot(slot).isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ if (!emitUninitializedLexicalCheck(R0)) {
+ return false;
+ }
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_GetImport() {
+ frame.syncStack(0);
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushBytecodePCArg();
+ pushScriptArg();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleScript, jsbytecode*,
+ MutableHandleValue);
+ if (!callVM<Fn, GetImportOperation>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetIntrinsic() {
+ frame.syncStack(0);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetIntrinsic() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+
+ pushArg(R0);
+ pushBytecodePCArg();
+ pushScriptArg();
+
+ using Fn = bool (*)(JSContext*, JSScript*, jsbytecode*, HandleValue);
+ return callVM<Fn, SetIntrinsicOperation>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GlobalOrEvalDeclInstantiation() {
+ frame.syncStack(0);
+
+ prepareVMCall();
+
+ loadInt32LengthBytecodeOperand(R0.scratchReg());
+ pushArg(R0.scratchReg());
+ pushScriptArg();
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleScript, GCThingIndex);
+ return callVM<Fn, js::GlobalOrEvalDeclInstantiation>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitInitPropGetterSetter() {
+ // Keep values on the stack for the decompiler.
+ frame.syncStack(0);
+
+ prepareVMCall();
+
+ masm.unboxObject(frame.addressOfStackValue(-1), R0.scratchReg());
+ masm.unboxObject(frame.addressOfStackValue(-2), R1.scratchReg());
+
+ pushArg(R0.scratchReg());
+ pushScriptNameArg(R0.scratchReg(), R2.scratchReg());
+ pushArg(R1.scratchReg());
+ pushBytecodePCArg();
+
+ using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
+ Handle<PropertyName*>, HandleObject);
+ if (!callVM<Fn, InitPropGetterSetterOperation>()) {
+ return false;
+ }
+
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitPropGetter() {
+ return emitInitPropGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenPropGetter() {
+ return emitInitPropGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitPropSetter() {
+ return emitInitPropGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenPropSetter() {
+ return emitInitPropGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitInitElemGetterSetter() {
+ // Load index and value in R0 and R1, but keep values on the stack for the
+ // decompiler.
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+ masm.unboxObject(frame.addressOfStackValue(-1), R1.scratchReg());
+
+ prepareVMCall();
+
+ pushArg(R1.scratchReg());
+ pushArg(R0);
+ masm.unboxObject(frame.addressOfStackValue(-3), R0.scratchReg());
+ pushArg(R0.scratchReg());
+ pushBytecodePCArg();
+
+ using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
+ HandleObject);
+ if (!callVM<Fn, InitElemGetterSetterOperation>()) {
+ return false;
+ }
+
+ frame.popn(2);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitElemGetter() {
+ return emitInitElemGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenElemGetter() {
+ return emitInitElemGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitElemSetter() {
+ return emitInitElemGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHiddenElemSetter() {
+ return emitInitElemGetterSetter();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitElemInc() {
+ // Keep the object and rhs on the stack.
+ frame.syncStack(0);
+
+ // Load object in R0, index in R1.
+ masm.loadValue(frame.addressOfStackValue(-3), R0);
+ masm.loadValue(frame.addressOfStackValue(-2), R1);
+
+ // Call IC.
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Pop the rhs
+ frame.pop();
+
+ // Increment index
+ Address indexAddr = frame.addressOfStackValue(-1);
+#ifdef DEBUG
+ Label isInt32;
+ masm.branchTestInt32(Assembler::Equal, indexAddr, &isInt32);
+ masm.assumeUnreachable("INITELEM_INC index must be Int32");
+ masm.bind(&isInt32);
+#endif
+ masm.incrementInt32Value(indexAddr);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_GetLocal() {
+ frame.pushLocal(GET_LOCALNO(handler.pc()));
+ return true;
+}
+
+static BaseValueIndex ComputeAddressOfLocal(MacroAssembler& masm,
+ Register indexScratch) {
+ // Locals are stored in memory at a negative offset from the frame pointer. We
+ // negate the index first to effectively subtract it.
+ masm.negPtr(indexScratch);
+ return BaseValueIndex(FramePointer, indexScratch,
+ BaselineFrame::reverseOffsetOfLocal(0));
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_GetLocal() {
+ Register scratch = R0.scratchReg();
+ LoadUint24Operand(masm, 0, scratch);
+ BaseValueIndex addr = ComputeAddressOfLocal(masm, scratch);
+ masm.loadValue(addr, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_SetLocal() {
+ // Ensure no other StackValue refers to the old value, for instance i + (i =
+ // 3). This also allows us to use R0 as scratch below.
+ frame.syncStack(1);
+
+ uint32_t local = GET_LOCALNO(handler.pc());
+ frame.storeStackValue(-1, frame.addressOfLocal(local), R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_SetLocal() {
+ Register scratch = R0.scratchReg();
+ LoadUint24Operand(masm, 0, scratch);
+ BaseValueIndex addr = ComputeAddressOfLocal(masm, scratch);
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+ masm.storeValue(R1, addr);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitFormalArgAccess(JSOp op) {
+ MOZ_ASSERT(op == JSOp::GetArg || op == JSOp::SetArg);
+
+ uint32_t arg = GET_ARGNO(handler.pc());
+
+ // Fast path: the script does not use |arguments| or formals don't
+ // alias the arguments object.
+ if (!handler.script()->argsObjAliasesFormals()) {
+ if (op == JSOp::GetArg) {
+ frame.pushArg(arg);
+ } else {
+ // See the comment in emit_SetLocal.
+ frame.syncStack(1);
+ frame.storeStackValue(-1, frame.addressOfArg(arg), R0);
+ }
+
+ return true;
+ }
+
+ // Sync so that we can use R0.
+ frame.syncStack(0);
+
+ // Load the arguments object data vector.
+ Register reg = R2.scratchReg();
+ masm.loadPtr(frame.addressOfArgsObj(), reg);
+ masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
+
+ // Load/store the argument.
+ Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value));
+ if (op == JSOp::GetArg) {
+ masm.loadValue(argAddr, R0);
+ frame.push(R0);
+ } else {
+ Register temp = R1.scratchReg();
+ masm.guardedCallPreBarrierAnyZone(argAddr, MIRType::Value, temp);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ masm.storeValue(R0, argAddr);
+
+ MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
+
+ // Reload the arguments object.
+ Register reg = R2.scratchReg();
+ masm.loadPtr(frame.addressOfArgsObj(), reg);
+
+ Label skipBarrier;
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, reg, temp, &skipBarrier);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, temp, &skipBarrier);
+
+ masm.call(&postBarrierSlot_);
+
+ masm.bind(&skipBarrier);
+ }
+
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitFormalArgAccess(JSOp op) {
+ MOZ_ASSERT(op == JSOp::GetArg || op == JSOp::SetArg);
+
+ // Load the index.
+ Register argReg = R1.scratchReg();
+ LoadUint16Operand(masm, argReg);
+
+ // If the frame has no arguments object, this must be an unaliased access.
+ Label isUnaliased, done;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_ARGS_OBJ), &isUnaliased);
+ {
+ Register reg = R2.scratchReg();
+
+ // If it's an unmapped arguments object, this is an unaliased access.
+ loadScript(reg);
+ masm.branchTest32(
+ Assembler::Zero, Address(reg, JSScript::offsetOfImmutableFlags()),
+ Imm32(uint32_t(JSScript::ImmutableFlags::HasMappedArgsObj)),
+ &isUnaliased);
+
+ // Load the arguments object data vector.
+ masm.loadPtr(frame.addressOfArgsObj(), reg);
+ masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
+
+ // Load/store the argument.
+ BaseValueIndex argAddr(reg, argReg, ArgumentsData::offsetOfArgs());
+ if (op == JSOp::GetArg) {
+ masm.loadValue(argAddr, R0);
+ frame.push(R0);
+ } else {
+ masm.guardedCallPreBarrierAnyZone(argAddr, MIRType::Value,
+ R0.scratchReg());
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ masm.storeValue(R0, argAddr);
+
+ // Reload the arguments object.
+ masm.loadPtr(frame.addressOfArgsObj(), reg);
+
+ Register temp = R1.scratchReg();
+ masm.branchPtrInNurseryChunk(Assembler::Equal, reg, temp, &done);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, temp, &done);
+
+ masm.call(&postBarrierSlot_);
+ }
+ masm.jump(&done);
+ }
+ masm.bind(&isUnaliased);
+ {
+ BaseValueIndex addr(FramePointer, argReg,
+ JitFrameLayout::offsetOfActualArgs());
+ if (op == JSOp::GetArg) {
+ masm.loadValue(addr, R0);
+ frame.push(R0);
+ } else {
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ masm.storeValue(R0, addr);
+ }
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetArg() {
+ return emitFormalArgAccess(JSOp::GetArg);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetArg() {
+ return emitFormalArgAccess(JSOp::SetArg);
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_GetFrameArg() {
+ frame.syncStack(0);
+
+ Register argReg = R1.scratchReg();
+ LoadUint16Operand(masm, argReg);
+
+ BaseValueIndex addr(FramePointer, argReg,
+ JitFrameLayout::offsetOfActualArgs());
+ masm.loadValue(addr, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_GetFrameArg() {
+ uint32_t arg = GET_ARGNO(handler.pc());
+ frame.pushArg(arg);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ArgumentsLength() {
+ frame.syncStack(0);
+
+ masm.loadNumActualArgs(FramePointer, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetActualArg() {
+ frame.popRegsAndSync(1);
+
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.branchTestInt32(Assembler::Equal, R0, &ok);
+ masm.assumeUnreachable("GetActualArg unexpected type");
+ masm.bind(&ok);
+ }
+#endif
+
+ Register index = R0.scratchReg();
+ masm.unboxInt32(R0, index);
+
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.loadNumActualArgs(FramePointer, R1.scratchReg());
+ masm.branch32(Assembler::Above, R1.scratchReg(), index, &ok);
+ masm.assumeUnreachable("GetActualArg invalid index");
+ masm.bind(&ok);
+ }
+#endif
+
+ BaseValueIndex addr(FramePointer, index,
+ JitFrameLayout::offsetOfActualArgs());
+ masm.loadValue(addr, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+void BaselineCompilerCodeGen::loadNumFormalArguments(Register dest) {
+ masm.move32(Imm32(handler.function()->nargs()), dest);
+}
+
+template <>
+void BaselineInterpreterCodeGen::loadNumFormalArguments(Register dest) {
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), dest);
+ masm.loadFunctionArgCount(dest, dest);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewTarget() {
+ MOZ_ASSERT_IF(handler.maybeFunction(), !handler.maybeFunction()->isArrow());
+
+ frame.syncStack(0);
+
+#ifdef DEBUG
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+
+ Label isFunction;
+ masm.loadPtr(frame.addressOfCalleeToken(), scratch1);
+ masm.branchTestPtr(Assembler::Zero, scratch1, Imm32(CalleeTokenScriptBit),
+ &isFunction);
+ masm.assumeUnreachable("Unexpected non-function script");
+ masm.bind(&isFunction);
+
+ Label notArrow;
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch1);
+ masm.branchFunctionKind(Assembler::NotEqual,
+ FunctionFlags::FunctionKind::Arrow, scratch1,
+ scratch2, &notArrow);
+ masm.assumeUnreachable("Unexpected arrow function");
+ masm.bind(&notArrow);
+#endif
+
+ // if (isConstructing()) push(argv[Max(numActualArgs, numFormalArgs)])
+ Label notConstructing, done;
+ masm.branchTestPtr(Assembler::Zero, frame.addressOfCalleeToken(),
+ Imm32(CalleeToken_FunctionConstructing), &notConstructing);
+ {
+ Register argvLen = R0.scratchReg();
+ Register nformals = R1.scratchReg();
+ masm.loadNumActualArgs(FramePointer, argvLen);
+
+ // If argvLen < nformals, set argvlen := nformals.
+ loadNumFormalArguments(nformals);
+ masm.cmp32Move32(Assembler::Below, argvLen, nformals, nformals, argvLen);
+
+ BaseValueIndex newTarget(FramePointer, argvLen,
+ JitFrameLayout::offsetOfActualArgs());
+ masm.loadValue(newTarget, R0);
+ masm.jump(&done);
+ }
+ // else push(undefined)
+ masm.bind(&notConstructing);
+ masm.moveValue(UndefinedValue(), R0);
+
+ masm.bind(&done);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ThrowSetConst() {
+ prepareVMCall();
+ pushArg(Imm32(JSMSG_BAD_CONST_ASSIGN));
+
+ using Fn = bool (*)(JSContext*, unsigned);
+ return callVM<Fn, jit::ThrowRuntimeLexicalError>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitUninitializedLexicalCheck(
+ const ValueOperand& val) {
+ Label done;
+ masm.branchTestMagicValue(Assembler::NotEqual, val, JS_UNINITIALIZED_LEXICAL,
+ &done);
+
+ prepareVMCall();
+ pushArg(Imm32(JSMSG_UNINITIALIZED_LEXICAL));
+
+ using Fn = bool (*)(JSContext*, unsigned);
+ if (!callVM<Fn, jit::ThrowRuntimeLexicalError>()) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckLexical() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+ return emitUninitializedLexicalCheck(R0);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckAliasedLexical() {
+ return emit_CheckLexical();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitLexical() {
+ return emit_SetLocal();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitGLexical() {
+ frame.popRegsAndSync(1);
+ pushGlobalLexicalEnvironmentValue(R1);
+ frame.push(R0);
+ return emit_SetProp();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitAliasedLexical() {
+ return emit_SetAliasedVar();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Uninitialized() {
+ frame.push(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emitCall(JSOp op) {
+ MOZ_ASSERT(IsInvokeOp(op));
+
+ frame.syncStack(0);
+
+ uint32_t argc = GET_ARGC(handler.pc());
+ masm.move32(Imm32(argc), R0.scratchReg());
+
+ // Call IC
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Update FrameInfo.
+ bool construct = IsConstructOp(op);
+ frame.popn(2 + argc + construct);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emitCall(JSOp op) {
+ MOZ_ASSERT(IsInvokeOp(op));
+
+ // The IC expects argc in R0.
+ LoadUint16Operand(masm, R0.scratchReg());
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Pop the arguments. We have to reload pc/argc because the IC clobbers them.
+ // The return value is in R0 so we can't use that.
+ Register scratch = R1.scratchReg();
+ uint32_t extraValuesToPop = IsConstructOp(op) ? 3 : 2;
+ Register spReg = AsRegister(masm.getStackPointer());
+ LoadUint16Operand(masm, scratch);
+ masm.computeEffectiveAddress(
+ BaseValueIndex(spReg, scratch, extraValuesToPop * sizeof(Value)), spReg);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitSpreadCall(JSOp op) {
+ MOZ_ASSERT(IsInvokeOp(op));
+
+ frame.syncStack(0);
+ masm.move32(Imm32(1), R0.scratchReg());
+
+ // Call IC
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Update FrameInfo.
+ bool construct = op == JSOp::SpreadNew || op == JSOp::SpreadSuperCall;
+ frame.popn(3 + construct);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Call() {
+ return emitCall(JSOp::Call);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CallContent() {
+ return emitCall(JSOp::CallContent);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CallIgnoresRv() {
+ return emitCall(JSOp::CallIgnoresRv);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CallIter() {
+ return emitCall(JSOp::CallIter);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CallContentIter() {
+ return emitCall(JSOp::CallContentIter);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_New() {
+ return emitCall(JSOp::New);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_NewContent() {
+ return emitCall(JSOp::NewContent);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SuperCall() {
+ return emitCall(JSOp::SuperCall);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Eval() {
+ return emitCall(JSOp::Eval);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictEval() {
+ return emitCall(JSOp::StrictEval);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SpreadCall() {
+ return emitSpreadCall(JSOp::SpreadCall);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SpreadNew() {
+ return emitSpreadCall(JSOp::SpreadNew);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SpreadSuperCall() {
+ return emitSpreadCall(JSOp::SpreadSuperCall);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SpreadEval() {
+ return emitSpreadCall(JSOp::SpreadEval);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_StrictSpreadEval() {
+ return emitSpreadCall(JSOp::StrictSpreadEval);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_OptimizeSpreadCall() {
+ frame.popRegsAndSync(1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ImplicitThis() {
+ frame.syncStack(0);
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg());
+
+ prepareVMCall();
+
+ pushScriptNameArg(R1.scratchReg(), R2.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
+ MutableHandleValue);
+ if (!callVM<Fn, ImplicitThisOperation>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Instanceof() {
+ frame.popRegsAndSync(2);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Typeof() {
+ frame.popRegsAndSync(1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_TypeofExpr() {
+ return emit_Typeof();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ThrowMsg() {
+ prepareVMCall();
+ pushUint8BytecodeOperandArg(R2.scratchReg());
+
+ using Fn = bool (*)(JSContext*, const unsigned);
+ return callVM<Fn, js::ThrowMsgOperation>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Throw() {
+ // Keep value to throw in R0.
+ frame.popRegsAndSync(1);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ return callVM<Fn, js::ThrowOperation>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Try() {
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Finally() {
+ // To match the interpreter, emit an interrupt check at the start of the
+ // finally block.
+ return emitInterruptCheck();
+}
+
+static void LoadBaselineScriptResumeEntries(MacroAssembler& masm,
+ JSScript* script, Register dest,
+ Register scratch) {
+ MOZ_ASSERT(dest != scratch);
+
+ masm.movePtr(ImmPtr(script->jitScript()), dest);
+ masm.loadPtr(Address(dest, JitScript::offsetOfBaselineScript()), dest);
+ masm.load32(Address(dest, BaselineScript::offsetOfResumeEntriesOffset()),
+ scratch);
+ masm.addPtr(scratch, dest);
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::emitInterpJumpToResumeEntry(Register script,
+ Register resumeIndex,
+ Register scratch) {
+ // Load JSScript::immutableScriptData() into |script|.
+ masm.loadPtr(Address(script, JSScript::offsetOfSharedData()), script);
+ masm.loadPtr(Address(script, SharedImmutableScriptData::offsetOfISD()),
+ script);
+
+ // Load the resume pcOffset in |resumeIndex|.
+ masm.load32(
+ Address(script, ImmutableScriptData::offsetOfResumeOffsetsOffset()),
+ scratch);
+ masm.computeEffectiveAddress(BaseIndex(scratch, resumeIndex, TimesFour),
+ scratch);
+ masm.load32(BaseIndex(script, scratch, TimesOne), resumeIndex);
+
+ // Add resume offset to PC, jump to it.
+ masm.computeEffectiveAddress(BaseIndex(script, resumeIndex, TimesOne,
+ ImmutableScriptData::offsetOfCode()),
+ script);
+ Address pcAddr(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
+ masm.storePtr(script, pcAddr);
+ emitJumpToInterpretOpLabel();
+}
+
+template <>
+void BaselineCompilerCodeGen::jumpToResumeEntry(Register resumeIndex,
+ Register scratch1,
+ Register scratch2) {
+ LoadBaselineScriptResumeEntries(masm, handler.script(), scratch1, scratch2);
+ masm.loadPtr(
+ BaseIndex(scratch1, resumeIndex, ScaleFromElemWidth(sizeof(uintptr_t))),
+ scratch1);
+ masm.jump(scratch1);
+}
+
+template <>
+void BaselineInterpreterCodeGen::jumpToResumeEntry(Register resumeIndex,
+ Register scratch1,
+ Register scratch2) {
+ loadScript(scratch1);
+ emitInterpJumpToResumeEntry(scratch1, resumeIndex, scratch2);
+}
+
+template <>
+template <typename F1, typename F2>
+[[nodiscard]] bool BaselineCompilerCodeGen::emitDebugInstrumentation(
+ const F1& ifDebuggee, const Maybe<F2>& ifNotDebuggee) {
+ // The JIT calls either ifDebuggee or (if present) ifNotDebuggee, because it
+ // knows statically whether we're compiling with debug instrumentation.
+
+ if (handler.compileDebugInstrumentation()) {
+ return ifDebuggee();
+ }
+
+ if (ifNotDebuggee) {
+ return (*ifNotDebuggee)();
+ }
+
+ return true;
+}
+
+template <>
+template <typename F1, typename F2>
+[[nodiscard]] bool BaselineInterpreterCodeGen::emitDebugInstrumentation(
+ const F1& ifDebuggee, const Maybe<F2>& ifNotDebuggee) {
+ // The interpreter emits both ifDebuggee and (if present) ifNotDebuggee
+ // paths, with a toggled jump followed by a branch on the frame's DEBUGGEE
+ // flag.
+
+ Label isNotDebuggee, done;
+
+ CodeOffset toggleOffset = masm.toggledJump(&isNotDebuggee);
+ if (!handler.addDebugInstrumentationOffset(cx, toggleOffset)) {
+ return false;
+ }
+
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::DEBUGGEE), &isNotDebuggee);
+
+ if (!ifDebuggee()) {
+ return false;
+ }
+
+ if (ifNotDebuggee) {
+ masm.jump(&done);
+ }
+
+ masm.bind(&isNotDebuggee);
+
+ if (ifNotDebuggee && !(*ifNotDebuggee)()) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_PushLexicalEnv() {
+ // Call a stub to push the block on the block chain.
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ pushScriptGCThingArg(ScriptGCThingType::Scope, R1.scratchReg(),
+ R2.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, Handle<LexicalScope*>);
+ return callVM<Fn, jit::PushLexicalEnv>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_PushClassBodyEnv() {
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ pushScriptGCThingArg(ScriptGCThingType::Scope, R1.scratchReg(),
+ R2.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, Handle<ClassBodyScope*>);
+ return callVM<Fn, jit::PushClassBodyEnv>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_PopLexicalEnv() {
+ frame.syncStack(0);
+
+ Register scratch1 = R0.scratchReg();
+
+ auto ifDebuggee = [this, scratch1]() {
+ masm.loadBaselineFramePtr(FramePointer, scratch1);
+
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(scratch1);
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const jsbytecode*);
+ return callVM<Fn, jit::DebugLeaveThenPopLexicalEnv>();
+ };
+ auto ifNotDebuggee = [this, scratch1]() {
+ Register scratch2 = R1.scratchReg();
+ masm.loadPtr(frame.addressOfEnvironmentChain(), scratch1);
+ masm.debugAssertObjectHasClass(scratch1, scratch2,
+ &LexicalEnvironmentObject::class_);
+ Address enclosingAddr(scratch1,
+ EnvironmentObject::offsetOfEnclosingEnvironment());
+ masm.unboxObject(enclosingAddr, scratch1);
+ masm.storePtr(scratch1, frame.addressOfEnvironmentChain());
+ return true;
+ };
+ return emitDebugInstrumentation(ifDebuggee, mozilla::Some(ifNotDebuggee));
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_FreshenLexicalEnv() {
+ frame.syncStack(0);
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ auto ifDebuggee = [this]() {
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const jsbytecode*);
+ return callVM<Fn, jit::DebugLeaveThenFreshenLexicalEnv>();
+ };
+ auto ifNotDebuggee = [this]() {
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ return callVM<Fn, jit::FreshenLexicalEnv>();
+ };
+ return emitDebugInstrumentation(ifDebuggee, mozilla::Some(ifNotDebuggee));
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_RecreateLexicalEnv() {
+ frame.syncStack(0);
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ auto ifDebuggee = [this]() {
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const jsbytecode*);
+ return callVM<Fn, jit::DebugLeaveThenRecreateLexicalEnv>();
+ };
+ auto ifNotDebuggee = [this]() {
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ return callVM<Fn, jit::RecreateLexicalEnv>();
+ };
+ return emitDebugInstrumentation(ifDebuggee, mozilla::Some(ifNotDebuggee));
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DebugLeaveLexicalEnv() {
+ auto ifDebuggee = [this]() {
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const jsbytecode*);
+ return callVM<Fn, jit::DebugLeaveLexicalEnv>();
+ };
+ return emitDebugInstrumentation(ifDebuggee);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_PushVarEnv() {
+ prepareVMCall();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushScriptGCThingArg(ScriptGCThingType::Scope, R1.scratchReg(),
+ R2.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, Handle<Scope*>);
+ return callVM<Fn, jit::PushVarEnv>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_EnterWith() {
+ // Pop "with" object to R0.
+ frame.popRegsAndSync(1);
+
+ // Call a stub to push the object onto the environment chain.
+ prepareVMCall();
+
+ pushScriptGCThingArg(ScriptGCThingType::Scope, R1.scratchReg(),
+ R2.scratchReg());
+ pushArg(R0);
+ masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
+ pushArg(R1.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, HandleValue, Handle<WithScope*>);
+ return callVM<Fn, jit::EnterWith>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_LeaveWith() {
+ // Call a stub to pop the with object from the environment chain.
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ return callVM<Fn, jit::LeaveWith>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Exception() {
+ prepareVMCall();
+
+ using Fn = bool (*)(JSContext*, MutableHandleValue);
+ if (!callVM<Fn, GetAndClearException>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Debugger() {
+ prepareVMCall();
+
+ frame.assertSyncedStack();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVM<Fn, jit::OnDebuggerStatement>()) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitDebugEpilogue() {
+ auto ifDebuggee = [this]() {
+ // Move return value into the frame's rval slot.
+ masm.storeValue(JSReturnOperand, frame.addressOfReturnValue());
+ masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+
+ // Load BaselineFrame pointer in R0.
+ frame.syncStack(0);
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+
+ const RetAddrEntry::Kind kind = RetAddrEntry::Kind::DebugEpilogue;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const jsbytecode*);
+ if (!callVM<Fn, jit::DebugEpilogueOnBaselineReturn>(kind)) {
+ return false;
+ }
+
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ return true;
+ };
+ return emitDebugInstrumentation(ifDebuggee);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitReturn() {
+ if (handler.shouldEmitDebugEpilogueAtReturnOp()) {
+ if (!emitDebugEpilogue()) {
+ return false;
+ }
+ }
+
+ // Only emit the jump if this JSOp::RetRval is not the last instruction.
+ // Not needed for last instruction, because last instruction flows
+ // into return label.
+ if (!handler.isDefinitelyLastOp()) {
+ masm.jump(&return_);
+ }
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Return() {
+ frame.assertStackDepth(1);
+
+ frame.popValue(JSReturnOperand);
+ return emitReturn();
+}
+
+template <typename Handler>
+void BaselineCodeGen<Handler>::emitLoadReturnValue(ValueOperand val) {
+ Label done, noRval;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_RVAL), &noRval);
+ masm.loadValue(frame.addressOfReturnValue(), val);
+ masm.jump(&done);
+
+ masm.bind(&noRval);
+ masm.moveValue(UndefinedValue(), val);
+
+ masm.bind(&done);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_RetRval() {
+ frame.assertStackDepth(0);
+
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+
+ if (!handler.maybeScript() || !handler.maybeScript()->noScriptRval()) {
+ // Return the value in the return value slot, if any.
+ Label done;
+ Address flags = frame.addressOfFlags();
+ masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
+ &done);
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ masm.bind(&done);
+ }
+
+ return emitReturn();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ToPropertyKey() {
+ frame.popRegsAndSync(1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ToAsyncIter() {
+ frame.syncStack(0);
+ masm.unboxObject(frame.addressOfStackValue(-2), R0.scratchReg());
+ masm.loadValue(frame.addressOfStackValue(-1), R1);
+
+ prepareVMCall();
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
+ if (!callVM<Fn, js::CreateAsyncFromSyncIterator>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.popn(2);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CanSkipAwait() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
+ if (!callVM<Fn, js::CanSkipAwait>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_MaybeExtractAwaitValue() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R0);
+
+ masm.unboxBoolean(frame.addressOfStackValue(-1), R1.scratchReg());
+
+ Label cantExtract;
+ masm.branchIfFalseBool(R1.scratchReg(), &cantExtract);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
+ if (!callVM<Fn, js::ExtractAwaitValue>()) {
+ return false;
+ }
+
+ masm.storeValue(R0, frame.addressOfStackValue(-2));
+ masm.bind(&cantExtract);
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_AsyncAwait() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R1);
+ masm.unboxObject(frame.addressOfStackValue(-1), R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
+ HandleValue);
+ if (!callVM<Fn, js::AsyncFunctionAwait>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.popn(2);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_AsyncResolve() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-2), R1);
+ masm.unboxObject(frame.addressOfStackValue(-1), R0.scratchReg());
+
+ prepareVMCall();
+ pushUint8BytecodeOperandArg(R2.scratchReg());
+ pushArg(R1);
+ pushArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
+ HandleValue, AsyncFunctionResolveKind);
+ if (!callVM<Fn, js::AsyncFunctionResolve>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.popn(2);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckObjCoercible() {
+ frame.syncStack(0);
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ Label fail, done;
+
+ masm.branchTestUndefined(Assembler::Equal, R0, &fail);
+ masm.branchTestNull(Assembler::NotEqual, R0, &done);
+
+ masm.bind(&fail);
+ prepareVMCall();
+
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ if (!callVM<Fn, ThrowObjectCoercible>()) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ToString() {
+ // Keep top stack value in R0.
+ frame.popRegsAndSync(1);
+
+ // Inline path for string.
+ Label done;
+ masm.branchTestString(Assembler::Equal, R0, &done);
+
+ prepareVMCall();
+
+ pushArg(R0);
+
+ // Call ToStringSlow which doesn't handle string inputs.
+ using Fn = JSString* (*)(JSContext*, HandleValue);
+ if (!callVM<Fn, ToStringSlow<CanGC>>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, R0);
+
+ masm.bind(&done);
+ frame.push(R0);
+ return true;
+}
+
+static constexpr uint32_t TableSwitchOpLowOffset = 1 * JUMP_OFFSET_LEN;
+static constexpr uint32_t TableSwitchOpHighOffset = 2 * JUMP_OFFSET_LEN;
+static constexpr uint32_t TableSwitchOpFirstResumeIndexOffset =
+ 3 * JUMP_OFFSET_LEN;
+
+template <>
+void BaselineCompilerCodeGen::emitGetTableSwitchIndex(ValueOperand val,
+ Register dest,
+ Register scratch1,
+ Register scratch2) {
+ jsbytecode* pc = handler.pc();
+ jsbytecode* defaultpc = pc + GET_JUMP_OFFSET(pc);
+ Label* defaultLabel = handler.labelOf(defaultpc);
+
+ int32_t low = GET_JUMP_OFFSET(pc + TableSwitchOpLowOffset);
+ int32_t high = GET_JUMP_OFFSET(pc + TableSwitchOpHighOffset);
+ int32_t length = high - low + 1;
+
+ // Jump to the 'default' pc if not int32 (tableswitch is only used when
+ // all cases are int32).
+ masm.branchTestInt32(Assembler::NotEqual, val, defaultLabel);
+ masm.unboxInt32(val, dest);
+
+ // Subtract 'low'. Bounds check.
+ if (low != 0) {
+ masm.sub32(Imm32(low), dest);
+ }
+ masm.branch32(Assembler::AboveOrEqual, dest, Imm32(length), defaultLabel);
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitGetTableSwitchIndex(ValueOperand val,
+ Register dest,
+ Register scratch1,
+ Register scratch2) {
+ // Jump to the 'default' pc if not int32 (tableswitch is only used when
+ // all cases are int32).
+ Label done, jumpToDefault;
+ masm.branchTestInt32(Assembler::NotEqual, val, &jumpToDefault);
+ masm.unboxInt32(val, dest);
+
+ Register pcReg = LoadBytecodePC(masm, scratch1);
+ Address lowAddr(pcReg, sizeof(jsbytecode) + TableSwitchOpLowOffset);
+ Address highAddr(pcReg, sizeof(jsbytecode) + TableSwitchOpHighOffset);
+
+ // Jump to default if val > high.
+ masm.branch32(Assembler::LessThan, highAddr, dest, &jumpToDefault);
+
+ // Jump to default if val < low.
+ masm.load32(lowAddr, scratch2);
+ masm.branch32(Assembler::GreaterThan, scratch2, dest, &jumpToDefault);
+
+ // index := val - low.
+ masm.sub32(scratch2, dest);
+ masm.jump(&done);
+
+ masm.bind(&jumpToDefault);
+ emitJump();
+
+ masm.bind(&done);
+}
+
+template <>
+void BaselineCompilerCodeGen::emitTableSwitchJump(Register key,
+ Register scratch1,
+ Register scratch2) {
+ // Jump to resumeEntries[firstResumeIndex + key].
+
+ // Note: BytecodeEmitter::allocateResumeIndex static_asserts
+ // |firstResumeIndex * sizeof(uintptr_t)| fits in int32_t.
+ uint32_t firstResumeIndex =
+ GET_RESUMEINDEX(handler.pc() + TableSwitchOpFirstResumeIndexOffset);
+ LoadBaselineScriptResumeEntries(masm, handler.script(), scratch1, scratch2);
+ masm.loadPtr(BaseIndex(scratch1, key, ScaleFromElemWidth(sizeof(uintptr_t)),
+ firstResumeIndex * sizeof(uintptr_t)),
+ scratch1);
+ masm.jump(scratch1);
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitTableSwitchJump(Register key,
+ Register scratch1,
+ Register scratch2) {
+ // Load the op's firstResumeIndex in scratch1.
+ LoadUint24Operand(masm, TableSwitchOpFirstResumeIndexOffset, scratch1);
+
+ masm.add32(key, scratch1);
+ jumpToResumeEntry(scratch1, key, scratch2);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_TableSwitch() {
+ frame.popRegsAndSync(1);
+
+ Register key = R0.scratchReg();
+ Register scratch1 = R1.scratchReg();
+ Register scratch2 = R2.scratchReg();
+
+ // Call a stub to convert R0 from double to int32 if needed.
+ // Note: this stub may clobber scratch1.
+ masm.call(cx->runtime()->jitRuntime()->getDoubleToInt32ValueStub());
+
+ // Load the index in the jump table in |key|, or branch to default pc if not
+ // int32 or out-of-range.
+ emitGetTableSwitchIndex(R0, key, scratch1, scratch2);
+
+ // Jump to the target pc.
+ emitTableSwitchJump(key, scratch1, scratch2);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Iter() {
+ frame.popRegsAndSync(1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_MoreIter() {
+ frame.syncStack(0);
+
+ masm.unboxObject(frame.addressOfStackValue(-1), R1.scratchReg());
+
+ masm.iteratorMore(R1.scratchReg(), R0, R2.scratchReg());
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitIsMagicValue() {
+ frame.syncStack(0);
+
+ Label isMagic, done;
+ masm.branchTestMagic(Assembler::Equal, frame.addressOfStackValue(-1),
+ &isMagic);
+ masm.moveValue(BooleanValue(false), R0);
+ masm.jump(&done);
+
+ masm.bind(&isMagic);
+ masm.moveValue(BooleanValue(true), R0);
+
+ masm.bind(&done);
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_IsNoIter() {
+ return emitIsMagicValue();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_EndIter() {
+ // Pop iterator value.
+ frame.pop();
+
+ // Pop the iterator object to close in R0.
+ frame.popRegsAndSync(1);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ if (HasInterpreterPCReg()) {
+ regs.take(InterpreterPCReg);
+ }
+
+ Register obj = R0.scratchReg();
+ regs.take(obj);
+ masm.unboxObject(R0, obj);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+ masm.iteratorClose(obj, temp1, temp2, temp3);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CloseIter() {
+ frame.popRegsAndSync(1);
+
+ Register iter = R0.scratchReg();
+ masm.unboxObject(R0, iter);
+
+ return emitNextIC();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_IsGenClosing() {
+ return emitIsMagicValue();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_IsNullOrUndefined() {
+ frame.syncStack(0);
+
+ Label isNullOrUndefined, done;
+ masm.branchTestNull(Assembler::Equal, frame.addressOfStackValue(-1),
+ &isNullOrUndefined);
+ masm.branchTestUndefined(Assembler::Equal, frame.addressOfStackValue(-1),
+ &isNullOrUndefined);
+ masm.moveValue(BooleanValue(false), R0);
+ masm.jump(&done);
+
+ masm.bind(&isNullOrUndefined);
+ masm.moveValue(BooleanValue(true), R0);
+
+ masm.bind(&done);
+ frame.push(R0, JSVAL_TYPE_BOOLEAN);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_GetRval() {
+ frame.syncStack(0);
+
+ emitLoadReturnValue(R0);
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SetRval() {
+ // Store to the frame's return value slot.
+ frame.storeStackValue(-1, frame.addressOfReturnValue(), R2);
+ masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags());
+ frame.pop();
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Callee() {
+ MOZ_ASSERT_IF(handler.maybeScript(), handler.maybeScript()->function());
+ frame.syncStack(0);
+ masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(),
+ R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_OBJECT, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_EnvCallee() {
+ frame.syncStack(0);
+ uint8_t numHops = GET_UINT8(handler.pc());
+ Register scratch = R0.scratchReg();
+
+ masm.loadPtr(frame.addressOfEnvironmentChain(), scratch);
+ for (unsigned i = 0; i < numHops; i++) {
+ Address nextAddr(scratch,
+ EnvironmentObject::offsetOfEnclosingEnvironment());
+ masm.unboxObject(nextAddr, scratch);
+ }
+
+ masm.loadValue(Address(scratch, CallObject::offsetOfCallee()), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_EnvCallee() {
+ Register scratch = R0.scratchReg();
+ Register env = R1.scratchReg();
+
+ static_assert(JSOpLength_EnvCallee - sizeof(jsbytecode) == ENVCOORD_HOPS_LEN,
+ "op must have uint8 operand for LoadAliasedVarEnv");
+
+ // Load the right environment object.
+ masm.loadPtr(frame.addressOfEnvironmentChain(), env);
+ LoadAliasedVarEnv(masm, env, scratch);
+
+ masm.pushValue(Address(env, CallObject::offsetOfCallee()));
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SuperBase() {
+ frame.popRegsAndSync(1);
+
+ Register scratch = R0.scratchReg();
+ Register proto = R1.scratchReg();
+
+ // Unbox callee.
+ masm.unboxObject(R0, scratch);
+
+ // Load [[HomeObject]]
+ Address homeObjAddr(scratch,
+ FunctionExtended::offsetOfMethodHomeObjectSlot());
+
+ masm.assertFunctionIsExtended(scratch);
+#ifdef DEBUG
+ Label isObject;
+ masm.branchTestObject(Assembler::Equal, homeObjAddr, &isObject);
+ masm.assumeUnreachable("[[HomeObject]] must be Object");
+ masm.bind(&isObject);
+#endif
+ masm.unboxObject(homeObjAddr, scratch);
+
+ // Load prototype from [[HomeObject]]
+ masm.loadObjProto(scratch, proto);
+
+#ifdef DEBUG
+ // We won't encounter a lazy proto, because the prototype is guaranteed to
+ // either be a JSFunction or a PlainObject, and only proxy objects can have a
+ // lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label proxyCheckDone;
+ masm.branchPtr(Assembler::NotEqual, proto, ImmWord(1), &proxyCheckDone);
+ masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
+ masm.bind(&proxyCheckDone);
+#endif
+
+ Label nullProto, done;
+ masm.branchPtr(Assembler::Equal, proto, ImmWord(0), &nullProto);
+
+ // Box prototype and return
+ masm.tagValue(JSVAL_TYPE_OBJECT, proto, R1);
+ masm.jump(&done);
+
+ masm.bind(&nullProto);
+ masm.moveValue(NullValue(), R1);
+
+ masm.bind(&done);
+ frame.push(R1);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_SuperFun() {
+ frame.popRegsAndSync(1);
+
+ Register callee = R0.scratchReg();
+ Register proto = R1.scratchReg();
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+#endif
+
+ // Unbox callee.
+ masm.unboxObject(R0, callee);
+
+#ifdef DEBUG
+ Label classCheckDone;
+ masm.branchTestObjIsFunction(Assembler::Equal, callee, scratch, callee,
+ &classCheckDone);
+ masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
+ masm.bind(&classCheckDone);
+#endif
+
+ // Load prototype of callee
+ masm.loadObjProto(callee, proto);
+
+#ifdef DEBUG
+ // We won't encounter a lazy proto, because |callee| is guaranteed to be a
+ // JSFunction and only proxy objects can have a lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label proxyCheckDone;
+ masm.branchPtr(Assembler::NotEqual, proto, ImmWord(1), &proxyCheckDone);
+ masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
+ masm.bind(&proxyCheckDone);
+#endif
+
+ Label nullProto, done;
+ masm.branchPtr(Assembler::Equal, proto, ImmWord(0), &nullProto);
+
+ // Box prototype and return
+ masm.tagValue(JSVAL_TYPE_OBJECT, proto, R1);
+ masm.jump(&done);
+
+ masm.bind(&nullProto);
+ masm.moveValue(NullValue(), R1);
+
+ masm.bind(&done);
+ frame.push(R1);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Arguments() {
+ frame.syncStack(0);
+
+ MOZ_ASSERT_IF(handler.maybeScript(), handler.maybeScript()->needsArgsObj());
+
+ prepareVMCall();
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, MutableHandleValue);
+ if (!callVM<Fn, jit::NewArgumentsObject>()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Rest() {
+ frame.syncStack(0);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ // Mark R0 as pushed stack value.
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Generator() {
+ frame.assertStackDepth(0);
+
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, BaselineFrame*);
+ if (!callVM<Fn, jit::CreateGeneratorFromFrame>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitSuspend(JSOp op) {
+ MOZ_ASSERT(op == JSOp::InitialYield || op == JSOp::Yield ||
+ op == JSOp::Await);
+
+ // Load the generator object in R2, but leave the return value on the
+ // expression stack.
+ Register genObj = R2.scratchReg();
+ if (op == JSOp::InitialYield) {
+ // Generator and return value are one and the same.
+ frame.syncStack(0);
+ frame.assertStackDepth(1);
+ masm.unboxObject(frame.addressOfStackValue(-1), genObj);
+ } else {
+ frame.popRegsAndSync(1);
+ masm.unboxObject(R0, genObj);
+ }
+
+ if (frame.hasKnownStackDepth(1) && !handler.canHaveFixedSlots()) {
+ // If the expression stack is empty, we can inline the Yield. Note that this
+ // branch is never taken for the interpreter because it doesn't know static
+ // stack depths.
+ MOZ_ASSERT_IF(op == JSOp::InitialYield && handler.maybePC(),
+ GET_RESUMEINDEX(handler.maybePC()) == 0);
+ Address resumeIndexSlot(genObj,
+ AbstractGeneratorObject::offsetOfResumeIndexSlot());
+ Register temp = R1.scratchReg();
+ if (op == JSOp::InitialYield) {
+ masm.storeValue(Int32Value(0), resumeIndexSlot);
+ } else {
+ jsbytecode* pc = handler.maybePC();
+ MOZ_ASSERT(pc, "compiler-only code never has a null pc");
+ masm.move32(Imm32(GET_RESUMEINDEX(pc)), temp);
+ masm.storeValue(JSVAL_TYPE_INT32, temp, resumeIndexSlot);
+ }
+
+ Register envObj = R0.scratchReg();
+ Address envChainSlot(
+ genObj, AbstractGeneratorObject::offsetOfEnvironmentChainSlot());
+ masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
+ masm.guardedCallPreBarrierAnyZone(envChainSlot, MIRType::Value, temp);
+ masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
+
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp,
+ &skipBarrier);
+ MOZ_ASSERT(genObj == R2.scratchReg());
+ masm.call(&postBarrierSlot_);
+ masm.bind(&skipBarrier);
+ } else {
+ masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
+ computeFrameSize(R0.scratchReg());
+
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+ pushArg(R1.scratchReg());
+ pushArg(genObj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, BaselineFrame*, uint32_t,
+ const jsbytecode*);
+ if (!callVM<Fn, jit::NormalSuspend>()) {
+ return false;
+ }
+ }
+
+ masm.loadValue(frame.addressOfStackValue(-1), JSReturnOperand);
+ if (!emitReturn()) {
+ return false;
+ }
+
+ // Three values are pushed onto the stack when resuming the generator,
+ // replacing the one slot that holds the return value.
+ frame.incStackDepth(2);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitialYield() {
+ return emitSuspend(JSOp::InitialYield);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Yield() {
+ return emitSuspend(JSOp::Yield);
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Await() {
+ return emitSuspend(JSOp::Await);
+}
+
+template <>
+template <typename F>
+bool BaselineCompilerCodeGen::emitAfterYieldDebugInstrumentation(
+ const F& ifDebuggee, Register) {
+ if (handler.compileDebugInstrumentation()) {
+ return ifDebuggee();
+ }
+ return true;
+}
+
+template <>
+template <typename F>
+bool BaselineInterpreterCodeGen::emitAfterYieldDebugInstrumentation(
+ const F& ifDebuggee, Register scratch) {
+ // Note that we can't use emitDebugInstrumentation here because the frame's
+ // DEBUGGEE flag hasn't been initialized yet.
+
+ // If the current Realm is not a debuggee we're done.
+ Label done;
+ CodeOffset toggleOffset = masm.toggledJump(&done);
+ if (!handler.addDebugInstrumentationOffset(cx, toggleOffset)) {
+ return false;
+ }
+ masm.loadPtr(AbsoluteAddress(cx->addressOfRealm()), scratch);
+ masm.branchTest32(Assembler::Zero,
+ Address(scratch, Realm::offsetOfDebugModeBits()),
+ Imm32(Realm::debugModeIsDebuggeeBit()), &done);
+
+ if (!ifDebuggee()) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_AfterYield() {
+ if (!emit_JumpTarget()) {
+ return false;
+ }
+
+ auto ifDebuggee = [this]() {
+ frame.assertSyncedStack();
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+
+ const RetAddrEntry::Kind kind = RetAddrEntry::Kind::DebugAfterYield;
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*);
+ if (!callVM<Fn, jit::DebugAfterYield>(kind)) {
+ return false;
+ }
+
+ return true;
+ };
+ return emitAfterYieldDebugInstrumentation(ifDebuggee, R0.scratchReg());
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_FinalYieldRval() {
+ // Store generator in R0.
+ frame.popRegsAndSync(1);
+ masm.unboxObject(R0, R0.scratchReg());
+
+ prepareVMCall();
+ pushBytecodePCArg();
+ pushArg(R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, HandleObject, const jsbytecode*);
+ if (!callVM<Fn, jit::FinalSuspend>()) {
+ return false;
+ }
+
+ masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
+ return emitReturn();
+}
+
+template <>
+void BaselineCompilerCodeGen::emitJumpToInterpretOpLabel() {
+ TrampolinePtr code =
+ cx->runtime()->jitRuntime()->baselineInterpreter().interpretOpAddr();
+ masm.jump(code);
+}
+
+template <>
+void BaselineInterpreterCodeGen::emitJumpToInterpretOpLabel() {
+ masm.jump(handler.interpretOpLabel());
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitEnterGeneratorCode(Register script,
+ Register resumeIndex,
+ Register scratch) {
+ // Resume in either the BaselineScript (if present) or Baseline Interpreter.
+
+ static_assert(BaselineDisabledScript == 0x1,
+ "Comparison below requires specific sentinel encoding");
+
+ // Initialize the icScript slot in the baseline frame.
+ masm.loadJitScript(script, scratch);
+ masm.computeEffectiveAddress(Address(scratch, JitScript::offsetOfICScript()),
+ scratch);
+ Address icScriptAddr(FramePointer, BaselineFrame::reverseOffsetOfICScript());
+ masm.storePtr(scratch, icScriptAddr);
+
+ Label noBaselineScript;
+ masm.loadJitScript(script, scratch);
+ masm.loadPtr(Address(scratch, JitScript::offsetOfBaselineScript()), scratch);
+ masm.branchPtr(Assembler::BelowOrEqual, scratch,
+ ImmPtr(BaselineDisabledScriptPtr), &noBaselineScript);
+
+ masm.load32(Address(scratch, BaselineScript::offsetOfResumeEntriesOffset()),
+ script);
+ masm.addPtr(scratch, script);
+ masm.loadPtr(
+ BaseIndex(script, resumeIndex, ScaleFromElemWidth(sizeof(uintptr_t))),
+ scratch);
+ masm.jump(scratch);
+
+ masm.bind(&noBaselineScript);
+
+ // Initialize interpreter frame fields.
+ Address flagsAddr(FramePointer, BaselineFrame::reverseOffsetOfFlags());
+ Address scriptAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfInterpreterScript());
+ masm.or32(Imm32(BaselineFrame::RUNNING_IN_INTERPRETER), flagsAddr);
+ masm.storePtr(script, scriptAddr);
+
+ // Initialize pc and jump to it.
+ emitInterpJumpToResumeEntry(script, resumeIndex, scratch);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_Resume() {
+ frame.syncStack(0);
+ masm.assertStackAlignment(sizeof(Value), 0);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ if (HasInterpreterPCReg()) {
+ regs.take(InterpreterPCReg);
+ }
+
+ saveInterpreterPCReg();
+
+ // Load generator object.
+ Register genObj = regs.takeAny();
+ masm.unboxObject(frame.addressOfStackValue(-3), genObj);
+
+ // Load callee.
+ Register callee = regs.takeAny();
+ masm.unboxObject(
+ Address(genObj, AbstractGeneratorObject::offsetOfCalleeSlot()), callee);
+
+ // Save a pointer to the JSOp::Resume operand stack Values.
+ Register callerStackPtr = regs.takeAny();
+ masm.computeEffectiveAddress(frame.addressOfStackValue(-1), callerStackPtr);
+
+ // Branch to |interpret| to resume the generator in the C++ interpreter if the
+ // script does not have a JitScript.
+ Label interpret;
+ Register scratch1 = regs.takeAny();
+ masm.loadPrivate(Address(callee, JSFunction::offsetOfJitInfoOrScript()),
+ scratch1);
+ masm.branchIfScriptHasNoJitScript(scratch1, &interpret);
+
+ // Push |undefined| for all formals.
+ Register scratch2 = regs.takeAny();
+ Label loop, loopDone;
+ masm.loadFunctionArgCount(callee, scratch2);
+
+ static_assert(sizeof(Value) == 8);
+ static_assert(JitStackAlignment == 16 || JitStackAlignment == 8);
+ // If JitStackValueAlignment == 1, then we were already correctly aligned on
+ // entry, as guaranteed by the assertStackAlignment at the entry to this
+ // function.
+ if (JitStackValueAlignment > 1) {
+ Register alignment = regs.takeAny();
+ masm.moveStackPtrTo(alignment);
+ masm.alignJitStackBasedOnNArgs(scratch2, false);
+
+ // Compute alignment adjustment.
+ masm.subStackPtrFrom(alignment);
+
+ // Some code, like BaselineFrame::trace, will inspect the whole range of
+ // the stack frame. In order to ensure that garbage data left behind from
+ // previous activations doesn't confuse other machinery, we zero out the
+ // alignment bytes.
+ Label alignmentZero;
+ masm.branchPtr(Assembler::Equal, alignment, ImmWord(0), &alignmentZero);
+
+ // Since we know prior to the stack alignment that the stack was 8 byte
+ // aligned, and JitStackAlignment is 8 or 16 bytes, if we are doing an
+ // alignment then we -must- have aligned by subtracting 8 bytes from
+ // the stack pointer.
+ //
+ // So we can freely store a valid double here.
+ masm.storeValue(DoubleValue(0), Address(masm.getStackPointer(), 0));
+ masm.bind(&alignmentZero);
+ }
+
+ masm.branchTest32(Assembler::Zero, scratch2, scratch2, &loopDone);
+ masm.bind(&loop);
+ {
+ masm.pushValue(UndefinedValue());
+ masm.branchSub32(Assembler::NonZero, Imm32(1), scratch2, &loop);
+ }
+ masm.bind(&loopDone);
+
+ // Push |undefined| for |this|.
+ masm.pushValue(UndefinedValue());
+
+#ifdef DEBUG
+ // Update BaselineFrame debugFrameSize field.
+ masm.mov(FramePointer, scratch2);
+ masm.subStackPtrFrom(scratch2);
+ masm.store32(scratch2, frame.addressOfDebugFrameSize());
+#endif
+
+ masm.PushCalleeToken(callee, /* constructing = */ false);
+ masm.pushFrameDescriptorForJitCall(FrameType::BaselineJS, /* argc = */ 0);
+
+ // PushCalleeToken bumped framePushed. Reset it.
+ MOZ_ASSERT(masm.framePushed() == sizeof(uintptr_t));
+ masm.setFramePushed(0);
+
+ regs.add(callee);
+
+ // Push a fake return address on the stack. We will resume here when the
+ // generator returns.
+ Label genStart, returnTarget;
+#ifdef JS_USE_LINK_REGISTER
+ masm.call(&genStart);
+#else
+ masm.callAndPushReturnAddress(&genStart);
+#endif
+
+ // Record the return address so the return offset -> pc mapping works.
+ if (!handler.recordCallRetAddr(cx, RetAddrEntry::Kind::IC,
+ masm.currentOffset())) {
+ return false;
+ }
+
+ masm.jump(&returnTarget);
+ masm.bind(&genStart);
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // Construct BaselineFrame.
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // If profiler instrumentation is on, update lastProfilingFrame on
+ // current JitActivation
+ {
+ Register scratchReg = scratch2;
+ Label skip;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skip);
+ masm.loadJSContext(scratchReg);
+ masm.loadPtr(Address(scratchReg, JSContext::offsetOfProfilingActivation()),
+ scratchReg);
+ masm.storePtr(
+ FramePointer,
+ Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()));
+ masm.bind(&skip);
+ }
+
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+ masm.assertStackAlignment(sizeof(Value), 0);
+
+ // Store flags and env chain.
+ masm.store32(Imm32(BaselineFrame::HAS_INITIAL_ENV), frame.addressOfFlags());
+ masm.unboxObject(
+ Address(genObj, AbstractGeneratorObject::offsetOfEnvironmentChainSlot()),
+ scratch2);
+ masm.storePtr(scratch2, frame.addressOfEnvironmentChain());
+
+ // Store the arguments object if there is one.
+ Label noArgsObj;
+ Address argsObjSlot(genObj, AbstractGeneratorObject::offsetOfArgsObjSlot());
+ masm.fallibleUnboxObject(argsObjSlot, scratch2, &noArgsObj);
+ {
+ masm.storePtr(scratch2, frame.addressOfArgsObj());
+ masm.or32(Imm32(BaselineFrame::HAS_ARGS_OBJ), frame.addressOfFlags());
+ }
+ masm.bind(&noArgsObj);
+
+ // Push locals and expression slots if needed.
+ Label noStackStorage;
+ Address stackStorageSlot(genObj,
+ AbstractGeneratorObject::offsetOfStackStorageSlot());
+ masm.fallibleUnboxObject(stackStorageSlot, scratch2, &noStackStorage);
+ {
+ Register initLength = regs.takeAny();
+ masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
+ masm.load32(Address(scratch2, ObjectElements::offsetOfInitializedLength()),
+ initLength);
+ masm.store32(
+ Imm32(0),
+ Address(scratch2, ObjectElements::offsetOfInitializedLength()));
+
+ Label loop, loopDone;
+ masm.branchTest32(Assembler::Zero, initLength, initLength, &loopDone);
+ masm.bind(&loop);
+ {
+ masm.pushValue(Address(scratch2, 0));
+ masm.guardedCallPreBarrierAnyZone(Address(scratch2, 0), MIRType::Value,
+ scratch1);
+ masm.addPtr(Imm32(sizeof(Value)), scratch2);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), initLength, &loop);
+ }
+ masm.bind(&loopDone);
+ regs.add(initLength);
+ }
+
+ masm.bind(&noStackStorage);
+
+ // Push arg, generator, resumeKind stack Values, in that order.
+ masm.pushValue(Address(callerStackPtr, sizeof(Value)));
+ masm.pushValue(JSVAL_TYPE_OBJECT, genObj);
+ masm.pushValue(Address(callerStackPtr, 0));
+
+ masm.switchToObjectRealm(genObj, scratch2);
+
+ // Load script in scratch1.
+ masm.unboxObject(
+ Address(genObj, AbstractGeneratorObject::offsetOfCalleeSlot()), scratch1);
+ masm.loadPrivate(Address(scratch1, JSFunction::offsetOfJitInfoOrScript()),
+ scratch1);
+
+ // Load resume index in scratch2 and mark generator as running.
+ Address resumeIndexSlot(genObj,
+ AbstractGeneratorObject::offsetOfResumeIndexSlot());
+ masm.unboxInt32(resumeIndexSlot, scratch2);
+ masm.storeValue(Int32Value(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
+ resumeIndexSlot);
+
+ if (!emitEnterGeneratorCode(scratch1, scratch2, regs.getAny())) {
+ return false;
+ }
+
+ // Call into the VM to resume the generator in the C++ interpreter if there's
+ // no JitScript.
+ masm.bind(&interpret);
+
+ prepareVMCall();
+
+ pushArg(callerStackPtr);
+ pushArg(genObj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, Value*, MutableHandleValue);
+ if (!callVM<Fn, jit::InterpretResume>()) {
+ return false;
+ }
+
+ masm.bind(&returnTarget);
+
+ // Restore Stack pointer
+ masm.computeEffectiveAddress(frame.addressOfStackValue(-1),
+ masm.getStackPointer());
+
+ // After the generator returns, we restore the stack pointer, switch back to
+ // the current realm, push the return value, and we're done.
+ if (JSScript* script = handler.maybeScript()) {
+ masm.switchToRealm(script->realm(), R2.scratchReg());
+ } else {
+ masm.switchToBaselineFrameRealm(R2.scratchReg());
+ }
+ restoreInterpreterPCReg();
+ frame.popn(3);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckResumeKind() {
+ // Load resumeKind in R1, generator in R0.
+ frame.popRegsAndSync(2);
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestInt32(Assembler::Equal, R1, &ok);
+ masm.assumeUnreachable("Expected int32 resumeKind");
+ masm.bind(&ok);
+#endif
+
+ // If resumeKind is 'next' we don't have to do anything.
+ Label done;
+ masm.unboxInt32(R1, R1.scratchReg());
+ masm.branch32(Assembler::Equal, R1.scratchReg(),
+ Imm32(int32_t(GeneratorResumeKind::Next)), &done);
+
+ prepareVMCall();
+
+ pushArg(R1.scratchReg()); // resumeKind
+
+ masm.loadValue(frame.addressOfStackValue(-1), R2);
+ pushArg(R2); // arg
+
+ masm.unboxObject(R0, R0.scratchReg());
+ pushArg(R0.scratchReg()); // genObj
+
+ masm.loadBaselineFramePtr(FramePointer, R2.scratchReg());
+ pushArg(R2.scratchReg()); // frame
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*,
+ Handle<AbstractGeneratorObject*>, HandleValue, int32_t);
+ if (!callVM<Fn, jit::GeneratorThrowOrReturn>()) {
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_ResumeKind() {
+ GeneratorResumeKind resumeKind = ResumeKindFromPC(handler.pc());
+ frame.push(Int32Value(int32_t(resumeKind)));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_ResumeKind() {
+ LoadUint8Operand(masm, R0.scratchReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DebugCheckSelfHosted() {
+#ifdef DEBUG
+ frame.syncStack(0);
+
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ if (!callVM<Fn, js::Debug_CheckSelfHosted>()) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_IsConstructing() {
+ frame.push(MagicValue(JS_IS_CONSTRUCTING));
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_JumpTarget() {
+ MaybeIncrementCodeCoverageCounter(masm, handler.script(), handler.pc());
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_JumpTarget() {
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+
+ Label skipCoverage;
+ CodeOffset toggleOffset = masm.toggledJump(&skipCoverage);
+ masm.call(handler.codeCoverageAtPCLabel());
+ masm.bind(&skipCoverage);
+ if (!handler.codeCoverageOffsets().append(toggleOffset.offset())) {
+ return false;
+ }
+
+ // Load icIndex in scratch1.
+ LoadInt32Operand(masm, scratch1);
+
+ // Compute ICEntry* and store to frame->interpreterICEntry.
+ masm.loadPtr(frame.addressOfICScript(), scratch2);
+ static_assert(sizeof(ICEntry) == sizeof(uintptr_t));
+ masm.computeEffectiveAddress(BaseIndex(scratch2, scratch1, ScalePointer,
+ ICScript::offsetOfICEntries()),
+ scratch2);
+ masm.storePtr(scratch2, frame.addressOfInterpreterICEntry());
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_CheckClassHeritage() {
+ frame.syncStack(0);
+
+ // Leave the heritage value on the stack.
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ return callVM<Fn, js::CheckClassHeritageOperation>();
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_InitHomeObject() {
+ // Load HomeObject in R0.
+ frame.popRegsAndSync(1);
+
+ // Load function off stack
+ Register func = R2.scratchReg();
+ masm.unboxObject(frame.addressOfStackValue(-1), func);
+
+ masm.assertFunctionIsExtended(func);
+
+ // Set HOMEOBJECT_SLOT
+ Register temp = R1.scratchReg();
+ Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
+ masm.guardedCallPreBarrierAnyZone(addr, MIRType::Value, temp);
+ masm.storeValue(R0, addr);
+
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, func, temp, &skipBarrier);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, R0, temp, &skipBarrier);
+ masm.call(&postBarrierSlot_);
+ masm.bind(&skipBarrier);
+
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_BuiltinObject() {
+ // Built-in objects are constants for a given global.
+ auto kind = BuiltinObjectKind(GET_UINT8(handler.pc()));
+ JSObject* builtin = BuiltinObjectOperation(cx, kind);
+ if (!builtin) {
+ return false;
+ }
+ frame.push(ObjectValue(*builtin));
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_BuiltinObject() {
+ prepareVMCall();
+
+ pushUint8BytecodeOperandArg(R0.scratchReg());
+
+ using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
+ if (!callVM<Fn, BuiltinObjectOperation>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_ObjWithProto() {
+ frame.syncStack(0);
+
+ // Leave the proto value on the stack for the decompiler
+ masm.loadValue(frame.addressOfStackValue(-1), R0);
+
+ prepareVMCall();
+ pushArg(R0);
+
+ using Fn = PlainObject* (*)(JSContext*, HandleValue);
+ if (!callVM<Fn, js::ObjectWithProtoOperation>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.pop();
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_FunWithProto() {
+ frame.popRegsAndSync(1);
+
+ masm.unboxObject(R0, R0.scratchReg());
+ masm.loadPtr(frame.addressOfEnvironmentChain(), R1.scratchReg());
+
+ prepareVMCall();
+ pushArg(R0.scratchReg());
+ pushArg(R1.scratchReg());
+ pushScriptGCThingArg(ScriptGCThingType::Function, R0.scratchReg(),
+ R1.scratchReg());
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
+ if (!callVM<Fn, js::FunWithProtoOperation>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_ImportMeta() {
+ // Note: this is like the interpreter implementation, but optimized a bit by
+ // calling GetModuleObjectForScript at compile-time.
+
+ Rooted<ModuleObject*> module(cx, GetModuleObjectForScript(handler.script()));
+ MOZ_ASSERT(module);
+
+ frame.syncStack(0);
+
+ prepareVMCall();
+ pushArg(ImmGCPtr(module));
+
+ using Fn = JSObject* (*)(JSContext*, HandleObject);
+ if (!callVM<Fn, js::GetOrCreateModuleMetaObject>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_ImportMeta() {
+ prepareVMCall();
+
+ pushScriptArg();
+
+ using Fn = JSObject* (*)(JSContext*, HandleScript);
+ if (!callVM<Fn, ImportMetaOperation>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_DynamicImport() {
+ // Put specifier into R0 and object value into R1
+ frame.popRegsAndSync(2);
+
+ prepareVMCall();
+ pushArg(R1);
+ pushArg(R0);
+ pushScriptArg();
+
+ using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
+ if (!callVM<Fn, js::StartDynamicModuleImport>()) {
+ return false;
+ }
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
+ frame.push(R0);
+ return true;
+}
+
+template <>
+bool BaselineCompilerCodeGen::emit_ForceInterpreter() {
+ // Caller is responsible for checking script->hasForceInterpreterOp().
+ MOZ_CRASH("JSOp::ForceInterpreter in baseline");
+}
+
+template <>
+bool BaselineInterpreterCodeGen::emit_ForceInterpreter() {
+ masm.assumeUnreachable("JSOp::ForceInterpreter");
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitPrologue() {
+ AutoCreatedBy acb(masm, "BaselineCodeGen<Handler>::emitPrologue");
+
+#ifdef JS_USE_LINK_REGISTER
+ // Push link register from generateEnterJIT()'s BLR.
+ masm.pushReturnAddress();
+#endif
+
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ masm.checkStackAlignment();
+
+ emitProfilerEnterFrame();
+
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+
+ // Initialize BaselineFrame. Also handles env chain pre-initialization (in
+ // case GC gets run during stack check). For global and eval scripts, the env
+ // chain is in R1. For function scripts, the env chain is in the callee.
+ emitInitFrameFields(R1.scratchReg());
+
+ // When compiling with Debugger instrumentation, set the debuggeeness of
+ // the frame before any operation that can call into the VM.
+ if (!emitIsDebuggeeCheck()) {
+ return false;
+ }
+
+ // Initialize the env chain before any operation that may call into the VM and
+ // trigger a GC.
+ if (!initEnvironmentChain()) {
+ return false;
+ }
+
+ // Check for overrecursion before initializing locals.
+ if (!emitStackCheck()) {
+ return false;
+ }
+
+ emitInitializeLocals();
+
+ // Ion prologue bailouts will enter here in the Baseline Interpreter.
+ masm.bind(&bailoutPrologue_);
+
+ frame.assertSyncedStack();
+
+ if (JSScript* script = handler.maybeScript()) {
+ masm.debugAssertContextRealm(script->realm(), R1.scratchReg());
+ }
+
+ if (!emitDebugPrologue()) {
+ return false;
+ }
+
+ if (!emitHandleCodeCoverageAtPrologue()) {
+ return false;
+ }
+
+ if (!emitWarmUpCounterIncrement()) {
+ return false;
+ }
+
+ warmUpCheckPrologueOffset_ = CodeOffset(masm.currentOffset());
+
+ return true;
+}
+
+template <typename Handler>
+bool BaselineCodeGen<Handler>::emitEpilogue() {
+ AutoCreatedBy acb(masm, "BaselineCodeGen<Handler>::emitEpilogue");
+
+ masm.bind(&return_);
+
+ if (!handler.shouldEmitDebugEpilogueAtReturnOp()) {
+ if (!emitDebugEpilogue()) {
+ return false;
+ }
+ }
+
+ emitProfilerExitFrame();
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+
+ masm.ret();
+ return true;
+}
+
+MethodStatus BaselineCompiler::emitBody() {
+ AutoCreatedBy acb(masm, "BaselineCompiler::emitBody");
+
+ JSScript* script = handler.script();
+ MOZ_ASSERT(handler.pc() == script->code());
+
+ mozilla::DebugOnly<jsbytecode*> prevpc = handler.pc();
+
+ while (true) {
+ JSOp op = JSOp(*handler.pc());
+ JitSpew(JitSpew_BaselineOp, "Compiling op @ %d: %s",
+ int(script->pcToOffset(handler.pc())), CodeName(op));
+
+ BytecodeInfo* info = handler.analysis().maybeInfo(handler.pc());
+
+ // Skip unreachable ops.
+ if (!info) {
+ // Test if last instructions and stop emitting in that case.
+ handler.moveToNextPC();
+ if (handler.pc() >= script->codeEnd()) {
+ break;
+ }
+
+ prevpc = handler.pc();
+ continue;
+ }
+
+ if (info->jumpTarget) {
+ // Fully sync the stack if there are incoming jumps.
+ frame.syncStack(0);
+ frame.setStackDepth(info->stackDepth);
+ masm.bind(handler.labelOf(handler.pc()));
+ } else if (MOZ_UNLIKELY(compileDebugInstrumentation())) {
+ // Also fully sync the stack if the debugger is enabled.
+ frame.syncStack(0);
+ } else {
+ // At the beginning of any op, at most the top 2 stack-values are
+ // unsynced.
+ if (frame.stackDepth() > 2) {
+ frame.syncStack(2);
+ }
+ }
+
+ frame.assertValidState(*info);
+
+ // If the script has a resume offset for this pc we need to keep track of
+ // the native code offset.
+ if (info->hasResumeOffset) {
+ frame.assertSyncedStack();
+ uint32_t pcOffset = script->pcToOffset(handler.pc());
+ uint32_t nativeOffset = masm.currentOffset();
+ if (!resumeOffsetEntries_.emplaceBack(pcOffset, nativeOffset)) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+ }
+
+ // Emit traps for breakpoints and step mode.
+ if (MOZ_UNLIKELY(compileDebugInstrumentation()) && !emitDebugTrap()) {
+ return Method_Error;
+ }
+
+ perfSpewer_.recordInstruction(cx, masm, handler.pc(), frame);
+
+#define EMIT_OP(OP, ...) \
+ case JSOp::OP: { \
+ AutoCreatedBy acb(masm, "op=" #OP); \
+ if (MOZ_UNLIKELY(!this->emit_##OP())) return Method_Error; \
+ } break;
+
+ switch (op) {
+ FOR_EACH_OPCODE(EMIT_OP)
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+#undef EMIT_OP
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // Test if last instructions and stop emitting in that case.
+ handler.moveToNextPC();
+ if (handler.pc() >= script->codeEnd()) {
+ break;
+ }
+
+#ifdef DEBUG
+ prevpc = handler.pc();
+#endif
+ }
+
+ MOZ_ASSERT(JSOp(*prevpc) == JSOp::RetRval || JSOp(*prevpc) == JSOp::Return);
+ return Method_Compiled;
+}
+
+bool BaselineInterpreterGenerator::emitDebugTrap() {
+ CodeOffset offset = masm.nopPatchableToCall();
+ if (!debugTrapOffsets_.append(offset.offset())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+// Register holding the bytecode pc during dispatch. This exists so the debug
+// trap handler can reload the pc into this register when it's done.
+static constexpr Register InterpreterPCRegAtDispatch =
+ HasInterpreterPCReg() ? InterpreterPCReg : R0.scratchReg();
+
+bool BaselineInterpreterGenerator::emitInterpreterLoop() {
+ AutoCreatedBy acb(masm, "BaselineInterpreterGenerator::emitInterpreterLoop");
+
+ Register scratch1 = R0.scratchReg();
+ Register scratch2 = R1.scratchReg();
+
+ // Entry point for interpreting a bytecode op. No registers are live except
+ // for InterpreterPCReg.
+ masm.bind(handler.interpretOpWithPCRegLabel());
+
+ // Emit a patchable call for debugger breakpoints/stepping.
+ if (!emitDebugTrap()) {
+ return false;
+ }
+ Label interpretOpAfterDebugTrap;
+ masm.bind(&interpretOpAfterDebugTrap);
+
+ // Load pc, bytecode op.
+ Register pcReg = LoadBytecodePC(masm, scratch1);
+ masm.load8ZeroExtend(Address(pcReg, 0), scratch1);
+
+ // Jump to table[op].
+ {
+ CodeOffset label = masm.moveNearAddressWithPatch(scratch2);
+ if (!tableLabels_.append(label)) {
+ return false;
+ }
+ BaseIndex pointer(scratch2, scratch1, ScalePointer);
+ masm.branchToComputedAddress(pointer);
+ }
+
+ // At the end of each op, emit code to bump the pc and jump to the
+ // next op (this is also known as a threaded interpreter).
+ auto opEpilogue = [&](JSOp op, size_t opLength) -> bool {
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ if (!BytecodeFallsThrough(op)) {
+ // Nothing to do.
+ masm.assumeUnreachable("unexpected fall through");
+ return true;
+ }
+
+ // Bump frame->interpreterICEntry if needed.
+ if (BytecodeOpHasIC(op)) {
+ frame.bumpInterpreterICEntry();
+ }
+
+ // Bump bytecode PC.
+ if (HasInterpreterPCReg()) {
+ MOZ_ASSERT(InterpreterPCRegAtDispatch == InterpreterPCReg);
+ masm.addPtr(Imm32(opLength), InterpreterPCReg);
+ } else {
+ MOZ_ASSERT(InterpreterPCRegAtDispatch == scratch1);
+ masm.loadPtr(frame.addressOfInterpreterPC(), InterpreterPCRegAtDispatch);
+ masm.addPtr(Imm32(opLength), InterpreterPCRegAtDispatch);
+ masm.storePtr(InterpreterPCRegAtDispatch, frame.addressOfInterpreterPC());
+ }
+
+ if (!emitDebugTrap()) {
+ return false;
+ }
+
+ // Load the opcode, jump to table[op].
+ masm.load8ZeroExtend(Address(InterpreterPCRegAtDispatch, 0), scratch1);
+ CodeOffset label = masm.moveNearAddressWithPatch(scratch2);
+ if (!tableLabels_.append(label)) {
+ return false;
+ }
+ BaseIndex pointer(scratch2, scratch1, ScalePointer);
+ masm.branchToComputedAddress(pointer);
+ return true;
+ };
+
+ // Emit code for each bytecode op.
+ Label opLabels[JSOP_LIMIT];
+#define EMIT_OP(OP, ...) \
+ { \
+ AutoCreatedBy acb(masm, "op=" #OP); \
+ perfSpewer_.recordOffset(masm, JSOp::OP); \
+ masm.bind(&opLabels[uint8_t(JSOp::OP)]); \
+ handler.setCurrentOp(JSOp::OP); \
+ if (!this->emit_##OP()) { \
+ return false; \
+ } \
+ if (!opEpilogue(JSOp::OP, JSOpLength_##OP)) { \
+ return false; \
+ } \
+ handler.resetCurrentOp(); \
+ }
+ FOR_EACH_OPCODE(EMIT_OP)
+#undef EMIT_OP
+
+ // External entry point to start interpreting bytecode ops. This is used for
+ // things like exception handling and OSR. DebugModeOSR patches JIT frames to
+ // return here from the DebugTrapHandler.
+ masm.bind(handler.interpretOpLabel());
+ interpretOpOffset_ = masm.currentOffset();
+ restoreInterpreterPCReg();
+ masm.jump(handler.interpretOpWithPCRegLabel());
+
+ // Second external entry point: this skips the debug trap for the first op
+ // and is used by OSR.
+ interpretOpNoDebugTrapOffset_ = masm.currentOffset();
+ restoreInterpreterPCReg();
+ masm.jump(&interpretOpAfterDebugTrap);
+
+ // External entry point for Ion prologue bailouts.
+ bailoutPrologueOffset_ = CodeOffset(masm.currentOffset());
+ restoreInterpreterPCReg();
+ masm.jump(&bailoutPrologue_);
+
+ // Emit debug trap handler code (target of patchable call instructions). This
+ // is just a tail call to the debug trap handler trampoline code.
+ {
+ JitRuntime* jrt = cx->runtime()->jitRuntime();
+ JitCode* handlerCode =
+ jrt->debugTrapHandler(cx, DebugTrapHandlerKind::Interpreter);
+ if (!handlerCode) {
+ return false;
+ }
+
+ debugTrapHandlerOffset_ = masm.currentOffset();
+ masm.jump(handlerCode);
+ }
+
+ // Emit the table.
+ masm.haltingAlign(sizeof(void*));
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ size_t numInstructions = JSOP_LIMIT * (sizeof(uintptr_t) / sizeof(uint32_t));
+ AutoForbidPoolsAndNops afp(&masm, numInstructions);
+#endif
+
+ tableOffset_ = masm.currentOffset();
+
+ for (size_t i = 0; i < JSOP_LIMIT; i++) {
+ const Label& opLabel = opLabels[i];
+ MOZ_ASSERT(opLabel.bound());
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(opLabel.offset());
+ masm.addCodeLabel(cl);
+ }
+
+ return true;
+}
+
+void BaselineInterpreterGenerator::emitOutOfLineCodeCoverageInstrumentation() {
+ AutoCreatedBy acb(masm,
+ "BaselineInterpreterGenerator::"
+ "emitOutOfLineCodeCoverageInstrumentation");
+
+ masm.bind(handler.codeCoverageAtPrologueLabel());
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ saveInterpreterPCReg();
+
+ using Fn1 = void (*)(BaselineFrame* frame);
+ masm.setupUnalignedABICall(R0.scratchReg());
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ masm.passABIArg(R0.scratchReg());
+ masm.callWithABI<Fn1, HandleCodeCoverageAtPrologue>();
+
+ restoreInterpreterPCReg();
+ masm.ret();
+
+ masm.bind(handler.codeCoverageAtPCLabel());
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ saveInterpreterPCReg();
+
+ using Fn2 = void (*)(BaselineFrame* frame, jsbytecode* pc);
+ masm.setupUnalignedABICall(R0.scratchReg());
+ masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
+ masm.passABIArg(R0.scratchReg());
+ Register pcReg = LoadBytecodePC(masm, R2.scratchReg());
+ masm.passABIArg(pcReg);
+ masm.callWithABI<Fn2, HandleCodeCoverageAtPC>();
+
+ restoreInterpreterPCReg();
+ masm.ret();
+}
+
+bool BaselineInterpreterGenerator::generate(BaselineInterpreter& interpreter) {
+ AutoCreatedBy acb(masm, "BaselineInterpreterGenerator::generate");
+
+ perfSpewer_.recordOffset(masm, "Prologue");
+ if (!emitPrologue()) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "InterpreterLoop");
+ if (!emitInterpreterLoop()) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "Epilogue");
+ if (!emitEpilogue()) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "OOLPostBarrierSlot");
+ if (!emitOutOfLinePostBarrierSlot()) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "OOLCodeCoverageInstrumentation");
+ emitOutOfLineCodeCoverageInstrumentation();
+
+ {
+ AutoCreatedBy acb(masm, "everything_else");
+ Linker linker(masm);
+ if (masm.oom()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return false;
+ }
+
+ // Register BaselineInterpreter code with the profiler's JitCode table.
+ {
+ auto entry = MakeJitcodeGlobalEntry<BaselineInterpreterEntry>(
+ cx, code, code->raw(), code->rawEnd());
+ if (!entry) {
+ return false;
+ }
+
+ JitcodeGlobalTable* globalTable =
+ cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(std::move(entry))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ code->setHasBytecodeMap();
+ }
+
+ // Patch loads now that we know the tableswitch base address.
+ CodeLocationLabel tableLoc(code, CodeOffset(tableOffset_));
+ for (CodeOffset off : tableLabels_) {
+ MacroAssembler::patchNearAddressMove(CodeLocationLabel(code, off),
+ tableLoc);
+ }
+
+ perfSpewer_.saveProfile(code);
+
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, "BaselineInterpreter");
+#endif
+
+ interpreter.init(
+ code, interpretOpOffset_, interpretOpNoDebugTrapOffset_,
+ bailoutPrologueOffset_.offset(),
+ profilerEnterFrameToggleOffset_.offset(),
+ profilerExitFrameToggleOffset_.offset(), debugTrapHandlerOffset_,
+ std::move(handler.debugInstrumentationOffsets()),
+ std::move(debugTrapOffsets_), std::move(handler.codeCoverageOffsets()),
+ std::move(handler.icReturnOffsets()), handler.callVMOffsets());
+ }
+
+ if (cx->runtime()->geckoProfiler().enabled()) {
+ interpreter.toggleProfilerInstrumentation(true);
+ }
+
+ if (coverage::IsLCovEnabled()) {
+ interpreter.toggleCodeCoverageInstrumentationUnchecked(true);
+ }
+
+ return true;
+}
+
+JitCode* JitRuntime::generateDebugTrapHandler(JSContext* cx,
+ DebugTrapHandlerKind kind) {
+ TempAllocator temp(&cx->tempLifoAlloc());
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "JitRuntime::generateDebugTrapHandler");
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.takeUnchecked(ICStubReg);
+ if (HasInterpreterPCReg()) {
+ regs.takeUnchecked(InterpreterPCReg);
+ }
+#ifdef JS_CODEGEN_ARM
+ regs.takeUnchecked(BaselineSecondScratchReg);
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+ Register scratch1 = regs.takeAny();
+ Register scratch2 = regs.takeAny();
+ Register scratch3 = regs.takeAny();
+
+ if (kind == DebugTrapHandlerKind::Interpreter) {
+ // The interpreter calls this for every script when debugging, so check if
+ // the script has any breakpoints or is in step mode before calling into
+ // C++.
+ Label hasDebugScript;
+ Address scriptAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfInterpreterScript());
+ masm.loadPtr(scriptAddr, scratch1);
+ masm.branchTest32(Assembler::NonZero,
+ Address(scratch1, JSScript::offsetOfMutableFlags()),
+ Imm32(int32_t(JSScript::MutableFlags::HasDebugScript)),
+ &hasDebugScript);
+ masm.abiret();
+ masm.bind(&hasDebugScript);
+
+ if (HasInterpreterPCReg()) {
+ // Update frame's bytecode pc because the debugger depends on it.
+ Address pcAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfInterpreterPC());
+ masm.storePtr(InterpreterPCReg, pcAddr);
+ }
+ }
+
+ // Load the return address in scratch1.
+ masm.loadAbiReturnAddress(scratch1);
+
+ // Load BaselineFrame pointer in scratch2.
+ masm.loadBaselineFramePtr(FramePointer, scratch2);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is marked
+ // during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch3);
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, const uint8_t*);
+ VMFunctionId id = VMFunctionToId<Fn, jit::HandleDebugTrap>::id;
+ TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
+
+ masm.push(scratch1);
+ masm.push(scratch2);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ if (kind == DebugTrapHandlerKind::Interpreter) {
+ // We have to reload the bytecode pc register.
+ Address pcAddr(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
+ masm.loadPtr(pcAddr, InterpreterPCRegAtDispatch);
+ }
+ masm.abiret();
+
+ Linker linker(masm);
+ JitCode* handlerCode = linker.newCode(cx, CodeKind::Other);
+ if (!handlerCode) {
+ return nullptr;
+ }
+
+ CollectPerfSpewerJitCodeProfile(handlerCode, "DebugTrapHandler");
+
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(handlerCode, "DebugTrapHandler");
+#endif
+
+ return handlerCode;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/BaselineCodeGen.h b/js/src/jit/BaselineCodeGen.h
new file mode 100644
index 0000000000..16df7d3957
--- /dev/null
+++ b/js/src/jit/BaselineCodeGen.h
@@ -0,0 +1,521 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineCodeGen_h
+#define jit_BaselineCodeGen_h
+
+#include "jit/BaselineFrameInfo.h"
+#include "jit/BytecodeAnalysis.h"
+#include "jit/FixedList.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+
+namespace js {
+
+namespace jit {
+
+enum class ScriptGCThingType {
+ Atom,
+ String,
+ RegExp,
+ Object,
+ Function,
+ Scope,
+ BigInt
+};
+
+// Base class for BaselineCompiler and BaselineInterpreterGenerator. The Handler
+// template is a class storing fields/methods that are interpreter or compiler
+// specific. This can be combined with template specialization of methods in
+// this class to specialize behavior.
+template <typename Handler>
+class BaselineCodeGen {
+ protected:
+ Handler handler;
+
+ JSContext* cx;
+ StackMacroAssembler masm;
+
+ typename Handler::FrameInfoT& frame;
+
+ // Shared epilogue code to return to the caller.
+ NonAssertingLabel return_;
+
+ NonAssertingLabel postBarrierSlot_;
+
+ // Prologue code where we resume for Ion prologue bailouts.
+ NonAssertingLabel bailoutPrologue_;
+
+ CodeOffset profilerEnterFrameToggleOffset_;
+ CodeOffset profilerExitFrameToggleOffset_;
+
+ // Early Ion bailouts will enter at this address. This is after frame
+ // construction and before environment chain is initialized.
+ CodeOffset bailoutPrologueOffset_;
+
+ // Baseline Interpreter can enter Baseline Compiler code at this address. This
+ // is right after the warm-up counter check in the prologue.
+ CodeOffset warmUpCheckPrologueOffset_;
+
+ uint32_t pushedBeforeCall_ = 0;
+#ifdef DEBUG
+ bool inCall_ = false;
+#endif
+
+ template <typename... HandlerArgs>
+ explicit BaselineCodeGen(JSContext* cx, TempAllocator& alloc,
+ HandlerArgs&&... args);
+
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+ }
+
+ // Pushes the current script as argument for a VM function.
+ void pushScriptArg();
+
+ // Pushes the bytecode pc as argument for a VM function.
+ void pushBytecodePCArg();
+
+ // Pushes a name/object/scope associated with the current bytecode op (and
+ // stored in the script) as argument for a VM function.
+ void loadScriptGCThing(ScriptGCThingType type, Register dest,
+ Register scratch);
+ void pushScriptGCThingArg(ScriptGCThingType type, Register scratch1,
+ Register scratch2);
+ void pushScriptNameArg(Register scratch1, Register scratch2);
+
+ // Pushes a bytecode operand as argument for a VM function.
+ void pushUint8BytecodeOperandArg(Register scratch);
+ void pushUint16BytecodeOperandArg(Register scratch);
+
+ void loadInt32LengthBytecodeOperand(Register dest);
+ void loadNumFormalArguments(Register dest);
+
+ // Loads the current JSScript* in dest.
+ void loadScript(Register dest);
+
+ void saveInterpreterPCReg();
+ void restoreInterpreterPCReg();
+
+ // Subtracts |script->nslots() * sizeof(Value)| from reg.
+ void subtractScriptSlotsSize(Register reg, Register scratch);
+
+ // Jump to the script's resume entry indicated by resumeIndex.
+ void jumpToResumeEntry(Register resumeIndex, Register scratch1,
+ Register scratch2);
+
+ // Load the global's lexical environment.
+ void loadGlobalLexicalEnvironment(Register dest);
+ void pushGlobalLexicalEnvironmentValue(ValueOperand scratch);
+
+ // Load the |this|-value from the global's lexical environment.
+ void loadGlobalThisValue(ValueOperand dest);
+
+ // Computes the frame size. See BaselineFrame::debugFrameSize_.
+ void computeFrameSize(Register dest);
+
+ void prepareVMCall();
+
+ void storeFrameSizeAndPushDescriptor(uint32_t argSize, Register scratch);
+
+ enum class CallVMPhase { BeforePushingLocals, AfterPushingLocals };
+ bool callVMInternal(VMFunctionId id, RetAddrEntry::Kind kind,
+ CallVMPhase phase);
+
+ template <typename Fn, Fn fn>
+ bool callVM(RetAddrEntry::Kind kind = RetAddrEntry::Kind::CallVM,
+ CallVMPhase phase = CallVMPhase::AfterPushingLocals);
+
+ template <typename Fn, Fn fn>
+ bool callVMNonOp(CallVMPhase phase = CallVMPhase::AfterPushingLocals) {
+ return callVM<Fn, fn>(RetAddrEntry::Kind::NonOpCallVM, phase);
+ }
+
+ // ifDebuggee should be a function emitting code for when the script is a
+ // debuggee script. ifNotDebuggee (if present) is called to emit code for
+ // non-debuggee scripts.
+ template <typename F1, typename F2>
+ [[nodiscard]] bool emitDebugInstrumentation(
+ const F1& ifDebuggee, const mozilla::Maybe<F2>& ifNotDebuggee);
+ template <typename F>
+ [[nodiscard]] bool emitDebugInstrumentation(const F& ifDebuggee) {
+ return emitDebugInstrumentation(ifDebuggee, mozilla::Maybe<F>());
+ }
+
+ bool emitSuspend(JSOp op);
+
+ template <typename F>
+ [[nodiscard]] bool emitAfterYieldDebugInstrumentation(const F& ifDebuggee,
+ Register scratch);
+
+ // ifSet should be a function emitting code for when the script has |flag|
+ // set. ifNotSet emits code for when the flag isn't set.
+ template <typename F1, typename F2>
+ [[nodiscard]] bool emitTestScriptFlag(JSScript::ImmutableFlags flag,
+ const F1& ifSet, const F2& ifNotSet,
+ Register scratch);
+
+ // If |script->hasFlag(flag) == value|, execute the code emitted by |emit|.
+ template <typename F>
+ [[nodiscard]] bool emitTestScriptFlag(JSScript::ImmutableFlags flag,
+ bool value, const F& emit,
+ Register scratch);
+ template <typename F>
+ [[nodiscard]] bool emitTestScriptFlag(JSScript::MutableFlags flag, bool value,
+ const F& emit, Register scratch);
+
+ [[nodiscard]] bool emitEnterGeneratorCode(Register script,
+ Register resumeIndex,
+ Register scratch);
+
+ void emitInterpJumpToResumeEntry(Register script, Register resumeIndex,
+ Register scratch);
+ void emitJumpToInterpretOpLabel();
+
+ [[nodiscard]] bool emitCheckThis(ValueOperand val, bool reinit = false);
+ void emitLoadReturnValue(ValueOperand val);
+ void emitGetAliasedVar(ValueOperand dest);
+ [[nodiscard]] bool emitGetAliasedDebugVar(ValueOperand dest);
+
+ [[nodiscard]] bool emitNextIC();
+ [[nodiscard]] bool emitInterruptCheck();
+ [[nodiscard]] bool emitWarmUpCounterIncrement();
+
+#define EMIT_OP(op, ...) bool emit_##op();
+ FOR_EACH_OPCODE(EMIT_OP)
+#undef EMIT_OP
+
+ // JSOp::Pos, JSOp::Neg, JSOp::BitNot, JSOp::Inc, JSOp::Dec, JSOp::ToNumeric.
+ [[nodiscard]] bool emitUnaryArith();
+
+ // JSOp::BitXor, JSOp::Lsh, JSOp::Add etc.
+ [[nodiscard]] bool emitBinaryArith();
+
+ // Handles JSOp::Lt, JSOp::Gt, and friends
+ [[nodiscard]] bool emitCompare();
+
+ // Handles JSOp::NewObject and JSOp::NewInit.
+ [[nodiscard]] bool emitNewObject();
+
+ // For a JOF_JUMP op, jumps to the op's jump target.
+ void emitJump();
+
+ // For a JOF_JUMP op, jumps to the op's jump target depending on the Value
+ // in |val|.
+ void emitTestBooleanTruthy(bool branchIfTrue, ValueOperand val);
+
+ // Converts |val| to an index in the jump table and stores this in |dest|
+ // or branches to the default pc if not int32 or out-of-range.
+ void emitGetTableSwitchIndex(ValueOperand val, Register dest,
+ Register scratch1, Register scratch2);
+
+ // Jumps to the target of a table switch based on |key| and the
+ // firstResumeIndex stored in JSOp::TableSwitch.
+ void emitTableSwitchJump(Register key, Register scratch1, Register scratch2);
+
+ [[nodiscard]] bool emitReturn();
+
+ [[nodiscard]] bool emitTest(bool branchIfTrue);
+ [[nodiscard]] bool emitAndOr(bool branchIfTrue);
+ [[nodiscard]] bool emitCoalesce();
+
+ [[nodiscard]] bool emitCall(JSOp op);
+ [[nodiscard]] bool emitSpreadCall(JSOp op);
+
+ [[nodiscard]] bool emitDelElem(bool strict);
+ [[nodiscard]] bool emitDelProp(bool strict);
+ [[nodiscard]] bool emitSetElemSuper(bool strict);
+ [[nodiscard]] bool emitSetPropSuper(bool strict);
+
+ // Try to bake in the result of BindGName instead of using an IC.
+ // Return true if we managed to optimize the op.
+ bool tryOptimizeBindGlobalName();
+
+ [[nodiscard]] bool emitInitPropGetterSetter();
+ [[nodiscard]] bool emitInitElemGetterSetter();
+
+ [[nodiscard]] bool emitFormalArgAccess(JSOp op);
+
+ [[nodiscard]] bool emitUninitializedLexicalCheck(const ValueOperand& val);
+
+ [[nodiscard]] bool emitIsMagicValue();
+
+ void getEnvironmentCoordinateObject(Register reg);
+ Address getEnvironmentCoordinateAddressFromObject(Register objReg,
+ Register reg);
+ Address getEnvironmentCoordinateAddress(Register reg);
+
+ [[nodiscard]] bool emitPrologue();
+ [[nodiscard]] bool emitEpilogue();
+ [[nodiscard]] bool emitOutOfLinePostBarrierSlot();
+ [[nodiscard]] bool emitStackCheck();
+ [[nodiscard]] bool emitDebugPrologue();
+ [[nodiscard]] bool emitDebugEpilogue();
+
+ [[nodiscard]] bool initEnvironmentChain();
+
+ [[nodiscard]] bool emitHandleCodeCoverageAtPrologue();
+
+ void emitInitFrameFields(Register nonFunctionEnv);
+ [[nodiscard]] bool emitIsDebuggeeCheck();
+ void emitInitializeLocals();
+
+ void emitProfilerEnterFrame();
+ void emitProfilerExitFrame();
+};
+
+using RetAddrEntryVector = js::Vector<RetAddrEntry, 16, SystemAllocPolicy>;
+
+// Interface used by BaselineCodeGen for BaselineCompiler.
+class BaselineCompilerHandler {
+ CompilerFrameInfo frame_;
+ TempAllocator& alloc_;
+ BytecodeAnalysis analysis_;
+#ifdef DEBUG
+ const MacroAssembler& masm_;
+#endif
+ FixedList<Label> labels_;
+ RetAddrEntryVector retAddrEntries_;
+
+ // Native code offsets for OSR at JSOp::LoopHead ops.
+ using OSREntryVector =
+ Vector<BaselineScript::OSREntry, 16, SystemAllocPolicy>;
+ OSREntryVector osrEntries_;
+
+ JSScript* script_;
+ jsbytecode* pc_;
+
+ // Index of the current ICEntry in the script's JitScript.
+ uint32_t icEntryIndex_;
+
+ bool compileDebugInstrumentation_;
+ bool ionCompileable_;
+
+ public:
+ using FrameInfoT = CompilerFrameInfo;
+
+ BaselineCompilerHandler(JSContext* cx, MacroAssembler& masm,
+ TempAllocator& alloc, JSScript* script);
+
+ [[nodiscard]] bool init(JSContext* cx);
+
+ CompilerFrameInfo& frame() { return frame_; }
+
+ jsbytecode* pc() const { return pc_; }
+ jsbytecode* maybePC() const { return pc_; }
+
+ void moveToNextPC() { pc_ += GetBytecodeLength(pc_); }
+ Label* labelOf(jsbytecode* pc) { return &labels_[script_->pcToOffset(pc)]; }
+
+ bool isDefinitelyLastOp() const { return pc_ == script_->lastPC(); }
+
+ bool shouldEmitDebugEpilogueAtReturnOp() const {
+ // The JIT uses the return address -> pc mapping and bakes in the pc
+ // argument so the DebugEpilogue call needs to be part of the returning
+ // bytecode op for this to work.
+ return true;
+ }
+
+ JSScript* script() const { return script_; }
+ JSScript* maybeScript() const { return script_; }
+
+ JSFunction* function() const { return script_->function(); }
+ JSFunction* maybeFunction() const { return function(); }
+
+ ModuleObject* module() const { return script_->module(); }
+
+ void setCompileDebugInstrumentation() { compileDebugInstrumentation_ = true; }
+ bool compileDebugInstrumentation() const {
+ return compileDebugInstrumentation_;
+ }
+
+ bool maybeIonCompileable() const { return ionCompileable_; }
+
+ uint32_t icEntryIndex() const { return icEntryIndex_; }
+ void moveToNextICEntry() { icEntryIndex_++; }
+
+ BytecodeAnalysis& analysis() { return analysis_; }
+
+ RetAddrEntryVector& retAddrEntries() { return retAddrEntries_; }
+ OSREntryVector& osrEntries() { return osrEntries_; }
+
+ [[nodiscard]] bool recordCallRetAddr(JSContext* cx, RetAddrEntry::Kind kind,
+ uint32_t retOffset);
+
+ // If a script has more |nslots| than this the stack check must account
+ // for these slots explicitly.
+ bool mustIncludeSlotsInStackCheck() const {
+ static constexpr size_t NumSlotsLimit = 128;
+ return script()->nslots() > NumSlotsLimit;
+ }
+
+ bool canHaveFixedSlots() const { return script()->nfixed() != 0; }
+};
+
+using BaselineCompilerCodeGen = BaselineCodeGen<BaselineCompilerHandler>;
+
+class BaselineCompiler final : private BaselineCompilerCodeGen {
+ // Native code offsets for bytecode ops in the script's resume offsets list.
+ ResumeOffsetEntryVector resumeOffsetEntries_;
+
+ // Native code offsets for debug traps if the script is compiled with debug
+ // instrumentation.
+ using DebugTrapEntryVector =
+ Vector<BaselineScript::DebugTrapEntry, 0, SystemAllocPolicy>;
+ DebugTrapEntryVector debugTrapEntries_;
+
+ CodeOffset profilerPushToggleOffset_;
+
+ BaselinePerfSpewer perfSpewer_;
+
+ public:
+ BaselineCompiler(JSContext* cx, TempAllocator& alloc, JSScript* script);
+ [[nodiscard]] bool init();
+
+ MethodStatus compile();
+
+ bool compileDebugInstrumentation() const {
+ return handler.compileDebugInstrumentation();
+ }
+ void setCompileDebugInstrumentation() {
+ handler.setCompileDebugInstrumentation();
+ }
+
+ private:
+ MethodStatus emitBody();
+
+ [[nodiscard]] bool emitDebugTrap();
+};
+
+// Interface used by BaselineCodeGen for BaselineInterpreterGenerator.
+class BaselineInterpreterHandler {
+ InterpreterFrameInfo frame_;
+
+ // Entry point to start interpreting a bytecode op. No registers are live. PC
+ // is loaded from the frame.
+ NonAssertingLabel interpretOp_;
+
+ // Like interpretOp_ but at this point the PC is expected to be in
+ // InterpreterPCReg.
+ NonAssertingLabel interpretOpWithPCReg_;
+
+ // Offsets of toggled jumps for debugger instrumentation.
+ using CodeOffsetVector = Vector<uint32_t, 0, SystemAllocPolicy>;
+ CodeOffsetVector debugInstrumentationOffsets_;
+
+ // Offsets of toggled jumps for code coverage instrumentation.
+ CodeOffsetVector codeCoverageOffsets_;
+ NonAssertingLabel codeCoverageAtPrologueLabel_;
+ NonAssertingLabel codeCoverageAtPCLabel_;
+
+ // Offsets of IC calls for IsIonInlinableOp ops, for Ion bailouts.
+ BaselineInterpreter::ICReturnOffsetVector icReturnOffsets_;
+
+ // Offsets of some callVMs for BaselineDebugModeOSR.
+ BaselineInterpreter::CallVMOffsets callVMOffsets_;
+
+ // The current JSOp we are emitting interpreter code for.
+ mozilla::Maybe<JSOp> currentOp_;
+
+ public:
+ using FrameInfoT = InterpreterFrameInfo;
+
+ explicit BaselineInterpreterHandler(JSContext* cx, MacroAssembler& masm);
+
+ InterpreterFrameInfo& frame() { return frame_; }
+
+ Label* interpretOpLabel() { return &interpretOp_; }
+ Label* interpretOpWithPCRegLabel() { return &interpretOpWithPCReg_; }
+
+ Label* codeCoverageAtPrologueLabel() { return &codeCoverageAtPrologueLabel_; }
+ Label* codeCoverageAtPCLabel() { return &codeCoverageAtPCLabel_; }
+
+ CodeOffsetVector& debugInstrumentationOffsets() {
+ return debugInstrumentationOffsets_;
+ }
+ CodeOffsetVector& codeCoverageOffsets() { return codeCoverageOffsets_; }
+
+ BaselineInterpreter::ICReturnOffsetVector& icReturnOffsets() {
+ return icReturnOffsets_;
+ }
+
+ void setCurrentOp(JSOp op) { currentOp_.emplace(op); }
+ void resetCurrentOp() { currentOp_.reset(); }
+ mozilla::Maybe<JSOp> currentOp() const { return currentOp_; }
+
+ // Interpreter doesn't know the script and pc statically.
+ jsbytecode* maybePC() const { return nullptr; }
+ bool isDefinitelyLastOp() const { return false; }
+ JSScript* maybeScript() const { return nullptr; }
+ JSFunction* maybeFunction() const { return nullptr; }
+
+ bool shouldEmitDebugEpilogueAtReturnOp() const {
+ // The interpreter doesn't use the return address -> pc mapping and doesn't
+ // bake in bytecode PCs so it can emit a shared DebugEpilogue call instead
+ // of duplicating it for every return op.
+ return false;
+ }
+
+ [[nodiscard]] bool addDebugInstrumentationOffset(JSContext* cx,
+ CodeOffset offset);
+
+ const BaselineInterpreter::CallVMOffsets& callVMOffsets() const {
+ return callVMOffsets_;
+ }
+
+ [[nodiscard]] bool recordCallRetAddr(JSContext* cx, RetAddrEntry::Kind kind,
+ uint32_t retOffset);
+
+ bool maybeIonCompileable() const { return true; }
+
+ // The interpreter doesn't know the number of slots statically so we always
+ // include them.
+ bool mustIncludeSlotsInStackCheck() const { return true; }
+
+ bool canHaveFixedSlots() const { return true; }
+};
+
+using BaselineInterpreterCodeGen = BaselineCodeGen<BaselineInterpreterHandler>;
+
+class BaselineInterpreterGenerator final : private BaselineInterpreterCodeGen {
+ // Offsets of patchable call instructions for debugger breakpoints/stepping.
+ Vector<uint32_t, 0, SystemAllocPolicy> debugTrapOffsets_;
+
+ // Offsets of move instructions for tableswitch base address.
+ Vector<CodeOffset, 0, SystemAllocPolicy> tableLabels_;
+
+ // Offset of the first tableswitch entry.
+ uint32_t tableOffset_ = 0;
+
+ // Offset of the code to start interpreting a bytecode op.
+ uint32_t interpretOpOffset_ = 0;
+
+ // Like interpretOpOffset_ but skips the debug trap for the current op.
+ uint32_t interpretOpNoDebugTrapOffset_ = 0;
+
+ // Offset of the jump (tail call) to the debug trap handler trampoline code.
+ // When the debugger is enabled, NOPs are patched to calls to this location.
+ uint32_t debugTrapHandlerOffset_ = 0;
+
+ BaselineInterpreterPerfSpewer perfSpewer_;
+
+ public:
+ explicit BaselineInterpreterGenerator(JSContext* cx, TempAllocator& alloc);
+
+ [[nodiscard]] bool generate(BaselineInterpreter& interpreter);
+
+ private:
+ [[nodiscard]] bool emitInterpreterLoop();
+ [[nodiscard]] bool emitDebugTrap();
+
+ void emitOutOfLineCodeCoverageInstrumentation();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineCodeGen_h */
diff --git a/js/src/jit/BaselineDebugModeOSR.cpp b/js/src/jit/BaselineDebugModeOSR.cpp
new file mode 100644
index 0000000000..bbdcccfebc
--- /dev/null
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -0,0 +1,561 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineDebugModeOSR.h"
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Invalidation.h"
+#include "jit/IonScript.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JSJitFrameIter.h"
+
+#include "jit/JitScript-inl.h"
+#include "jit/JSJitFrameIter-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+struct DebugModeOSREntry {
+ JSScript* script;
+ BaselineScript* oldBaselineScript;
+ uint32_t pcOffset;
+ RetAddrEntry::Kind frameKind;
+
+ explicit DebugModeOSREntry(JSScript* script)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ pcOffset(uint32_t(-1)),
+ frameKind(RetAddrEntry::Kind::Invalid) {}
+
+ DebugModeOSREntry(JSScript* script, const RetAddrEntry& retAddrEntry)
+ : script(script),
+ oldBaselineScript(script->baselineScript()),
+ pcOffset(retAddrEntry.pcOffset()),
+ frameKind(retAddrEntry.kind()) {
+#ifdef DEBUG
+ MOZ_ASSERT(pcOffset == retAddrEntry.pcOffset());
+ MOZ_ASSERT(frameKind == retAddrEntry.kind());
+#endif
+ }
+
+ DebugModeOSREntry(DebugModeOSREntry&& other)
+ : script(other.script),
+ oldBaselineScript(other.oldBaselineScript),
+ pcOffset(other.pcOffset),
+ frameKind(other.frameKind) {}
+
+ bool recompiled() const {
+ return oldBaselineScript != script->baselineScript();
+ }
+};
+
+using DebugModeOSREntryVector = Vector<DebugModeOSREntry>;
+
+class UniqueScriptOSREntryIter {
+ const DebugModeOSREntryVector& entries_;
+ size_t index_;
+
+ public:
+ explicit UniqueScriptOSREntryIter(const DebugModeOSREntryVector& entries)
+ : entries_(entries), index_(0) {}
+
+ bool done() { return index_ == entries_.length(); }
+
+ const DebugModeOSREntry& entry() {
+ MOZ_ASSERT(!done());
+ return entries_[index_];
+ }
+
+ UniqueScriptOSREntryIter& operator++() {
+ MOZ_ASSERT(!done());
+ while (++index_ < entries_.length()) {
+ bool unique = true;
+ for (size_t i = 0; i < index_; i++) {
+ if (entries_[i].script == entries_[index_].script) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique) {
+ break;
+ }
+ }
+ return *this;
+ }
+};
+
+static bool CollectJitStackScripts(JSContext* cx,
+ const DebugAPI::ExecutionObservableSet& obs,
+ const ActivationIterator& activation,
+ DebugModeOSREntryVector& entries) {
+ for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
+ const JSJitFrameIter& frame = iter.frame();
+ switch (frame.type()) {
+ case FrameType::BaselineJS: {
+ JSScript* script = frame.script();
+
+ if (!obs.shouldRecompileOrInvalidate(script)) {
+ break;
+ }
+
+ BaselineFrame* baselineFrame = frame.baselineFrame();
+
+ if (baselineFrame->runningInInterpreter()) {
+ // Baseline Interpreter frames for scripts that have a BaselineScript
+ // or IonScript don't need to be patched but they do need to be
+ // invalidated and recompiled. See also CollectInterpreterStackScripts
+ // for C++ interpreter frames.
+ if (!entries.append(DebugModeOSREntry(script))) {
+ return false;
+ }
+ } else {
+ // The frame must be settled on a pc with a RetAddrEntry.
+ uint8_t* retAddr = frame.resumePCinCurrentFrame();
+ const RetAddrEntry& retAddrEntry =
+ script->baselineScript()->retAddrEntryFromReturnAddress(retAddr);
+ if (!entries.append(DebugModeOSREntry(script, retAddrEntry))) {
+ return false;
+ }
+ }
+
+ break;
+ }
+
+ case FrameType::BaselineStub:
+ break;
+
+ case FrameType::IonJS: {
+ InlineFrameIterator inlineIter(cx, &frame);
+ while (true) {
+ if (obs.shouldRecompileOrInvalidate(inlineIter.script())) {
+ if (!entries.append(DebugModeOSREntry(inlineIter.script()))) {
+ return false;
+ }
+ }
+ if (!inlineIter.more()) {
+ break;
+ }
+ ++inlineIter;
+ }
+ break;
+ }
+
+ default:;
+ }
+ }
+
+ return true;
+}
+
+static bool CollectInterpreterStackScripts(
+ JSContext* cx, const DebugAPI::ExecutionObservableSet& obs,
+ const ActivationIterator& activation, DebugModeOSREntryVector& entries) {
+ // Collect interpreter frame stacks with IonScript or BaselineScript as
+ // well. These do not need to be patched, but do need to be invalidated
+ // and recompiled.
+ InterpreterActivation* act = activation.activation()->asInterpreter();
+ for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
+ JSScript* script = iter.frame()->script();
+ if (obs.shouldRecompileOrInvalidate(script)) {
+ if (!entries.append(DebugModeOSREntry(iter.frame()->script()))) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+#ifdef JS_JITSPEW
+static const char* RetAddrEntryKindToString(RetAddrEntry::Kind kind) {
+ switch (kind) {
+ case RetAddrEntry::Kind::IC:
+ return "IC";
+ case RetAddrEntry::Kind::CallVM:
+ return "callVM";
+ case RetAddrEntry::Kind::StackCheck:
+ return "stack check";
+ case RetAddrEntry::Kind::InterruptCheck:
+ return "interrupt check";
+ case RetAddrEntry::Kind::DebugTrap:
+ return "debug trap";
+ case RetAddrEntry::Kind::DebugPrologue:
+ return "debug prologue";
+ case RetAddrEntry::Kind::DebugAfterYield:
+ return "debug after yield";
+ case RetAddrEntry::Kind::DebugEpilogue:
+ return "debug epilogue";
+ default:
+ MOZ_CRASH("bad RetAddrEntry kind");
+ }
+}
+#endif // JS_JITSPEW
+
+static void SpewPatchBaselineFrame(const uint8_t* oldReturnAddress,
+ const uint8_t* newReturnAddress,
+ JSScript* script,
+ RetAddrEntry::Kind frameKind,
+ const jsbytecode* pc) {
+ JitSpew(JitSpew_BaselineDebugModeOSR,
+ "Patch return %p -> %p on BaselineJS frame (%s:%u:%u) from %s at %s",
+ oldReturnAddress, newReturnAddress, script->filename(),
+ script->lineno(), script->column(),
+ RetAddrEntryKindToString(frameKind), CodeName(JSOp(*pc)));
+}
+
+static void PatchBaselineFramesForDebugMode(
+ JSContext* cx, const DebugAPI::ExecutionObservableSet& obs,
+ const ActivationIterator& activation, DebugModeOSREntryVector& entries,
+ size_t* start) {
+ //
+ // Recompile Patching Overview
+ //
+ // When toggling debug mode with live baseline scripts on the stack, we
+ // could have entered the VM via the following ways from the baseline
+ // script.
+ //
+ // Off to On:
+ // A. From a non-prologue IC (fallback stub or "can call" stub).
+ // B. From a VM call.
+ // C. From inside the interrupt handler via the prologue stack check.
+ //
+ // On to Off:
+ // - All the ways above.
+ // D. From the debug trap handler.
+ // E. From the debug prologue.
+ // F. From the debug epilogue.
+ // G. From a JSOp::AfterYield instruction.
+ //
+ // In general, we patch the return address from VM calls and ICs to the
+ // corresponding entry in the recompiled BaselineScript. For entries that are
+ // not present in the recompiled script (cases D to G above) we switch the
+ // frame to interpreter mode and resume in the Baseline Interpreter.
+ //
+ // Specifics on what needs to be done are documented below.
+ //
+
+ const BaselineInterpreter& baselineInterp =
+ cx->runtime()->jitRuntime()->baselineInterpreter();
+
+ CommonFrameLayout* prev = nullptr;
+ size_t entryIndex = *start;
+
+ for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
+ const JSJitFrameIter& frame = iter.frame();
+ switch (frame.type()) {
+ case FrameType::BaselineJS: {
+ // If the script wasn't recompiled or is not observed, there's
+ // nothing to patch.
+ if (!obs.shouldRecompileOrInvalidate(frame.script())) {
+ break;
+ }
+
+ DebugModeOSREntry& entry = entries[entryIndex];
+
+ if (!entry.recompiled()) {
+ entryIndex++;
+ break;
+ }
+
+ BaselineFrame* baselineFrame = frame.baselineFrame();
+ if (baselineFrame->runningInInterpreter()) {
+ // We recompiled the script's BaselineScript but Baseline Interpreter
+ // frames don't need to be patched.
+ entryIndex++;
+ break;
+ }
+
+ JSScript* script = entry.script;
+ uint32_t pcOffset = entry.pcOffset;
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+
+ MOZ_ASSERT(script == frame.script());
+ MOZ_ASSERT(pcOffset < script->length());
+
+ BaselineScript* bl = script->baselineScript();
+ RetAddrEntry::Kind kind = entry.frameKind;
+ uint8_t* retAddr = nullptr;
+ switch (kind) {
+ case RetAddrEntry::Kind::IC:
+ case RetAddrEntry::Kind::CallVM:
+ case RetAddrEntry::Kind::InterruptCheck:
+ case RetAddrEntry::Kind::StackCheck: {
+ // Cases A, B, C above.
+ //
+ // For the baseline frame here, we resume right after the CallVM or
+ // IC returns.
+ //
+ // For CallVM (case B) the assumption is that all callVMs which can
+ // trigger debug mode OSR are the *only* callVMs generated for their
+ // respective pc locations in the Baseline JIT code.
+ const RetAddrEntry* retAddrEntry = nullptr;
+ switch (kind) {
+ case RetAddrEntry::Kind::IC:
+ case RetAddrEntry::Kind::CallVM:
+ case RetAddrEntry::Kind::InterruptCheck:
+ retAddrEntry = &bl->retAddrEntryFromPCOffset(pcOffset, kind);
+ break;
+ case RetAddrEntry::Kind::StackCheck:
+ retAddrEntry = &bl->prologueRetAddrEntry(kind);
+ break;
+ default:
+ MOZ_CRASH("Unexpected kind");
+ }
+ retAddr = bl->returnAddressForEntry(*retAddrEntry);
+ SpewPatchBaselineFrame(prev->returnAddress(), retAddr, script, kind,
+ pc);
+ break;
+ }
+ case RetAddrEntry::Kind::DebugPrologue:
+ case RetAddrEntry::Kind::DebugEpilogue:
+ case RetAddrEntry::Kind::DebugTrap:
+ case RetAddrEntry::Kind::DebugAfterYield: {
+ // Cases D, E, F, G above.
+ //
+ // Resume in the Baseline Interpreter because these callVMs are not
+ // present in the new BaselineScript if we recompiled without debug
+ // instrumentation.
+ if (kind == RetAddrEntry::Kind::DebugPrologue) {
+ frame.baselineFrame()->switchFromJitToInterpreterAtPrologue(cx);
+ } else {
+ frame.baselineFrame()->switchFromJitToInterpreter(cx, pc);
+ }
+ switch (kind) {
+ case RetAddrEntry::Kind::DebugTrap:
+ // DebugTrap handling is different from the ones below because
+ // it's not a callVM but a trampoline call at the start of the
+ // bytecode op. When we return to the frame we can resume at the
+ // interpretOp label.
+ retAddr = baselineInterp.interpretOpAddr().value;
+ break;
+ case RetAddrEntry::Kind::DebugPrologue:
+ retAddr = baselineInterp.retAddrForDebugPrologueCallVM();
+ break;
+ case RetAddrEntry::Kind::DebugEpilogue:
+ retAddr = baselineInterp.retAddrForDebugEpilogueCallVM();
+ break;
+ case RetAddrEntry::Kind::DebugAfterYield:
+ retAddr = baselineInterp.retAddrForDebugAfterYieldCallVM();
+ break;
+ default:
+ MOZ_CRASH("Unexpected kind");
+ }
+ SpewPatchBaselineFrame(prev->returnAddress(), retAddr, script, kind,
+ pc);
+ break;
+ }
+ case RetAddrEntry::Kind::NonOpCallVM:
+ case RetAddrEntry::Kind::Invalid:
+ // These cannot trigger BaselineDebugModeOSR.
+ MOZ_CRASH("Unexpected RetAddrEntry Kind");
+ }
+
+ prev->setReturnAddress(retAddr);
+ entryIndex++;
+ break;
+ }
+
+ case FrameType::IonJS: {
+ // Nothing to patch.
+ InlineFrameIterator inlineIter(cx, &frame);
+ while (true) {
+ if (obs.shouldRecompileOrInvalidate(inlineIter.script())) {
+ entryIndex++;
+ }
+ if (!inlineIter.more()) {
+ break;
+ }
+ ++inlineIter;
+ }
+ break;
+ }
+
+ default:;
+ }
+
+ prev = frame.current();
+ }
+
+ *start = entryIndex;
+}
+
+static void SkipInterpreterFrameEntries(
+ const DebugAPI::ExecutionObservableSet& obs,
+ const ActivationIterator& activation, size_t* start) {
+ size_t entryIndex = *start;
+
+ // Skip interpreter frames, which do not need patching.
+ InterpreterActivation* act = activation.activation()->asInterpreter();
+ for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
+ if (obs.shouldRecompileOrInvalidate(iter.frame()->script())) {
+ entryIndex++;
+ }
+ }
+
+ *start = entryIndex;
+}
+
+static bool RecompileBaselineScriptForDebugMode(
+ JSContext* cx, JSScript* script, DebugAPI::IsObserving observing) {
+ // If a script is on the stack multiple times, it may have already
+ // been recompiled.
+ if (script->baselineScript()->hasDebugInstrumentation() == observing) {
+ return true;
+ }
+
+ JitSpew(JitSpew_BaselineDebugModeOSR, "Recompiling (%s:%u:%u) for %s",
+ script->filename(), script->lineno(), script->column(),
+ observing ? "DEBUGGING" : "NORMAL EXECUTION");
+
+ AutoKeepJitScripts keepJitScripts(cx);
+ BaselineScript* oldBaselineScript =
+ script->jitScript()->clearBaselineScript(cx->gcContext(), script);
+
+ MethodStatus status =
+ BaselineCompile(cx, script, /* forceDebugMode = */ observing);
+ if (status != Method_Compiled) {
+ // We will only fail to recompile for debug mode due to OOM. Restore
+ // the old baseline script in case something doesn't properly
+ // propagate OOM.
+ MOZ_ASSERT(status == Method_Error);
+ script->jitScript()->setBaselineScript(script, oldBaselineScript);
+ return false;
+ }
+
+ // Don't destroy the old baseline script yet, since if we fail any of the
+ // recompiles we need to rollback all the old baseline scripts.
+ MOZ_ASSERT(script->baselineScript()->hasDebugInstrumentation() == observing);
+ return true;
+}
+
+static bool InvalidateScriptsInZone(JSContext* cx, Zone* zone,
+ const Vector<DebugModeOSREntry>& entries) {
+ RecompileInfoVector invalid;
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ JSScript* script = iter.entry().script;
+ if (script->zone() != zone) {
+ continue;
+ }
+
+ if (script->hasIonScript()) {
+ if (!invalid.emplaceBack(script, script->ionScript()->compilationId())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Cancel off-thread Ion compile for anything that has a
+ // BaselineScript. If we relied on the call to Invalidate below to
+ // cancel off-thread Ion compiles, only those with existing IonScripts
+ // would be cancelled.
+ if (script->hasBaselineScript()) {
+ CancelOffThreadIonCompile(script);
+ }
+ }
+
+ // No need to cancel off-thread Ion compiles again, we already did it
+ // above.
+ Invalidate(cx, invalid,
+ /* resetUses = */ true, /* cancelOffThread = */ false);
+ return true;
+}
+
+static void UndoRecompileBaselineScriptsForDebugMode(
+ JSContext* cx, const DebugModeOSREntryVector& entries) {
+ // In case of failure, roll back the entire set of active scripts so that
+ // we don't have to patch return addresses on the stack.
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ const DebugModeOSREntry& entry = iter.entry();
+ JSScript* script = entry.script;
+ BaselineScript* baselineScript = script->baselineScript();
+ if (entry.recompiled()) {
+ script->jitScript()->setBaselineScript(script, entry.oldBaselineScript);
+ BaselineScript::Destroy(cx->gcContext(), baselineScript);
+ }
+ }
+}
+
+bool jit::RecompileOnStackBaselineScriptsForDebugMode(
+ JSContext* cx, const DebugAPI::ExecutionObservableSet& obs,
+ DebugAPI::IsObserving observing) {
+ // First recompile the active scripts on the stack and patch the live
+ // frames.
+ Vector<DebugModeOSREntry> entries(cx);
+
+ for (ActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->isJit()) {
+ if (!CollectJitStackScripts(cx, obs, iter, entries)) {
+ return false;
+ }
+ } else if (iter->isInterpreter()) {
+ if (!CollectInterpreterStackScripts(cx, obs, iter, entries)) {
+ return false;
+ }
+ }
+ }
+
+ if (entries.empty()) {
+ return true;
+ }
+
+ // When the profiler is enabled, we need to have suppressed sampling,
+ // since the basline jit scripts are in a state of flux.
+ MOZ_ASSERT(!cx->isProfilerSamplingEnabled());
+
+ // Invalidate all scripts we are recompiling.
+ if (Zone* zone = obs.singleZone()) {
+ if (!InvalidateScriptsInZone(cx, zone, entries)) {
+ return false;
+ }
+ } else {
+ using ZoneRange = DebugAPI::ExecutionObservableSet::ZoneRange;
+ for (ZoneRange r = obs.zones()->all(); !r.empty(); r.popFront()) {
+ if (!InvalidateScriptsInZone(cx, r.front(), entries)) {
+ return false;
+ }
+ }
+ }
+
+ // Try to recompile all the scripts. If we encounter an error, we need to
+ // roll back as if none of the compilations happened, so that we don't
+ // crash.
+ for (size_t i = 0; i < entries.length(); i++) {
+ JSScript* script = entries[i].script;
+ AutoRealm ar(cx, script);
+ if (!RecompileBaselineScriptForDebugMode(cx, script, observing)) {
+ UndoRecompileBaselineScriptsForDebugMode(cx, entries);
+ return false;
+ }
+ }
+
+ // If all recompiles succeeded, destroy the old baseline scripts and patch
+ // the live frames.
+ //
+ // After this point the function must be infallible.
+
+ for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
+ const DebugModeOSREntry& entry = iter.entry();
+ if (entry.recompiled()) {
+ BaselineScript::Destroy(cx->gcContext(), entry.oldBaselineScript);
+ }
+ }
+
+ size_t processed = 0;
+ for (ActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->isJit()) {
+ PatchBaselineFramesForDebugMode(cx, obs, iter, entries, &processed);
+ } else if (iter->isInterpreter()) {
+ SkipInterpreterFrameEntries(obs, iter, &processed);
+ }
+ }
+ MOZ_ASSERT(processed == entries.length());
+
+ return true;
+}
diff --git a/js/src/jit/BaselineDebugModeOSR.h b/js/src/jit/BaselineDebugModeOSR.h
new file mode 100644
index 0000000000..dc7df161e6
--- /dev/null
+++ b/js/src/jit/BaselineDebugModeOSR.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineDebugModeOSR_h
+#define jit_BaselineDebugModeOSR_h
+
+#include "jstypes.h"
+
+#include "debugger/DebugAPI.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+// Note that this file and the corresponding .cpp implement debug mode
+// on-stack recompilation. This is to be distinguished from ordinary
+// Baseline->Ion OSR, which is used to jump into compiled loops.
+
+[[nodiscard]] bool RecompileOnStackBaselineScriptsForDebugMode(
+ JSContext* cx, const DebugAPI::ExecutionObservableSet& obs,
+ DebugAPI::IsObserving observing);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_BaselineDebugModeOSR_h
diff --git a/js/src/jit/BaselineFrame-inl.h b/js/src/jit/BaselineFrame-inl.h
new file mode 100644
index 0000000000..73eb9026ac
--- /dev/null
+++ b/js/src/jit/BaselineFrame-inl.h
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrame_inl_h
+#define jit_BaselineFrame_inl_h
+
+#include "jit/BaselineFrame.h"
+
+#include "jit/TrialInlining.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h" // js::NativeObject::initDenseElementsFromRange
+
+namespace js {
+namespace jit {
+
+template <typename SpecificEnvironment>
+inline void BaselineFrame::pushOnEnvironmentChain(SpecificEnvironment& env) {
+ MOZ_ASSERT(*environmentChain() == env.enclosingEnvironment());
+ envChain_ = &env;
+ if (IsFrameInitialEnvironment(this, env)) {
+ flags_ |= HAS_INITIAL_ENV;
+ }
+}
+
+template <typename SpecificEnvironment>
+inline void BaselineFrame::popOffEnvironmentChain() {
+ MOZ_ASSERT(envChain_->is<SpecificEnvironment>());
+ envChain_ = &envChain_->as<SpecificEnvironment>().enclosingEnvironment();
+}
+
+inline void BaselineFrame::replaceInnermostEnvironment(EnvironmentObject& env) {
+ MOZ_ASSERT(env.enclosingEnvironment() ==
+ envChain_->as<EnvironmentObject>().enclosingEnvironment());
+ envChain_ = &env;
+}
+
+inline bool BaselineFrame::saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const {
+ // By convention, generator slots are stored in interpreter order,
+ // which is the reverse of BaselineFrame order.
+
+ MOZ_ASSERT(nslots == numValueSlots(debugFrameSize()) - 1);
+ const Value* end = reinterpret_cast<const Value*>(this);
+ mozilla::Span<const Value> span{end - nslots, end};
+ return dest->initDenseElementsFromRange(cx, span.rbegin(), span.rend());
+}
+
+inline bool BaselineFrame::pushLexicalEnvironment(JSContext* cx,
+ Handle<LexicalScope*> scope) {
+ BlockLexicalEnvironmentObject* env =
+ BlockLexicalEnvironmentObject::createForFrame(cx, scope, this);
+ if (!env) {
+ return false;
+ }
+ pushOnEnvironmentChain(*env);
+
+ return true;
+}
+
+inline bool BaselineFrame::pushClassBodyEnvironment(
+ JSContext* cx, Handle<ClassBodyScope*> scope) {
+ ClassBodyLexicalEnvironmentObject* env =
+ ClassBodyLexicalEnvironmentObject::createForFrame(cx, scope, this);
+ if (!env) {
+ return false;
+ }
+ pushOnEnvironmentChain(*env);
+
+ return true;
+}
+
+inline bool BaselineFrame::freshenLexicalEnvironment(JSContext* cx) {
+ Rooted<BlockLexicalEnvironmentObject*> current(
+ cx, &envChain_->as<BlockLexicalEnvironmentObject>());
+ BlockLexicalEnvironmentObject* clone =
+ BlockLexicalEnvironmentObject::clone(cx, current);
+ if (!clone) {
+ return false;
+ }
+
+ replaceInnermostEnvironment(*clone);
+ return true;
+}
+
+inline bool BaselineFrame::recreateLexicalEnvironment(JSContext* cx) {
+ Rooted<BlockLexicalEnvironmentObject*> current(
+ cx, &envChain_->as<BlockLexicalEnvironmentObject>());
+ BlockLexicalEnvironmentObject* clone =
+ BlockLexicalEnvironmentObject::recreate(cx, current);
+ if (!clone) {
+ return false;
+ }
+
+ replaceInnermostEnvironment(*clone);
+ return true;
+}
+
+inline CallObject& BaselineFrame::callObj() const {
+ MOZ_ASSERT(hasInitialEnvironment());
+ MOZ_ASSERT(callee()->needsCallObject());
+
+ JSObject* obj = environmentChain();
+ while (!obj->is<CallObject>()) {
+ obj = obj->enclosingEnvironment();
+ }
+ return obj->as<CallObject>();
+}
+
+inline JSScript* BaselineFrame::outerScript() const {
+ if (!icScript()->isInlined()) {
+ return script();
+ }
+ return icScript()->inliningRoot()->owningScript();
+}
+
+inline void BaselineFrame::unsetIsDebuggee() {
+ MOZ_ASSERT(!script()->isDebuggee());
+ flags_ &= ~DEBUGGEE;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrame_inl_h */
diff --git a/js/src/jit/BaselineFrame.cpp b/js/src/jit/BaselineFrame.cpp
new file mode 100644
index 0000000000..49c70c3735
--- /dev/null
+++ b/js/src/jit/BaselineFrame.cpp
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineFrame-inl.h"
+
+#include <algorithm>
+
+#include "debugger/DebugAPI.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/JSContext.h"
+
+#include "jit/JSJitFrameIter-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static void TraceLocals(BaselineFrame* frame, JSTracer* trc, unsigned start,
+ unsigned end) {
+ if (start < end) {
+ // Stack grows down.
+ Value* last = frame->valueSlot(end - 1);
+ TraceRootRange(trc, end - start, last, "baseline-stack");
+ }
+}
+
+void BaselineFrame::trace(JSTracer* trc, const JSJitFrameIter& frameIterator) {
+ replaceCalleeToken(TraceCalleeToken(trc, calleeToken()));
+
+ // Trace |this|, actual and formal args.
+ if (isFunctionFrame()) {
+ TraceRoot(trc, &thisArgument(), "baseline-this");
+
+ unsigned numArgs = std::max(numActualArgs(), numFormalArgs());
+ TraceRootRange(trc, numArgs + isConstructing(), argv(), "baseline-args");
+ }
+
+ // Trace environment chain, if it exists.
+ if (envChain_) {
+ TraceRoot(trc, &envChain_, "baseline-envchain");
+ }
+
+ // Trace return value.
+ if (hasReturnValue()) {
+ TraceRoot(trc, returnValue().address(), "baseline-rval");
+ }
+
+ if (hasArgsObj()) {
+ TraceRoot(trc, &argsObj_, "baseline-args-obj");
+ }
+
+ if (runningInInterpreter()) {
+ TraceRoot(trc, &interpreterScript_, "baseline-interpreterScript");
+ }
+
+ // Trace locals and stack values.
+ JSScript* script = this->script();
+ size_t nfixed = script->nfixed();
+ jsbytecode* pc;
+ frameIterator.baselineScriptAndPc(nullptr, &pc);
+ size_t nlivefixed = script->calculateLiveFixed(pc);
+
+ uint32_t numValueSlots = frameIterator.baselineFrameNumValueSlots();
+
+ // NB: It is possible that numValueSlots could be zero, even if nfixed is
+ // nonzero. This is the case when we're initializing the environment chain or
+ // failed the prologue stack check.
+ if (numValueSlots > 0) {
+ MOZ_ASSERT(nfixed <= numValueSlots);
+
+ if (nfixed == nlivefixed) {
+ // All locals are live.
+ TraceLocals(this, trc, 0, numValueSlots);
+ } else {
+ // Trace operand stack.
+ TraceLocals(this, trc, nfixed, numValueSlots);
+
+ // Clear dead block-scoped locals.
+ while (nfixed > nlivefixed) {
+ unaliasedLocal(--nfixed).setUndefined();
+ }
+
+ // Trace live locals.
+ TraceLocals(this, trc, 0, nlivefixed);
+ }
+ }
+
+ if (auto* debugEnvs = script->realm()->debugEnvs()) {
+ debugEnvs->traceLiveFrame(trc, this);
+ }
+}
+
+bool BaselineFrame::uninlineIsProfilerSamplingEnabled(JSContext* cx) {
+ return cx->isProfilerSamplingEnabled();
+}
+
+bool BaselineFrame::initFunctionEnvironmentObjects(JSContext* cx) {
+ return js::InitFunctionEnvironmentObjects(cx, this);
+}
+
+bool BaselineFrame::pushVarEnvironment(JSContext* cx, Handle<Scope*> scope) {
+ return js::PushVarEnvironmentObject(cx, scope, this);
+}
+
+void BaselineFrame::setInterpreterFields(JSScript* script, jsbytecode* pc) {
+ uint32_t pcOffset = script->pcToOffset(pc);
+ interpreterScript_ = script;
+ interpreterPC_ = pc;
+ MOZ_ASSERT(icScript_);
+ interpreterICEntry_ = icScript_->interpreterICEntryFromPCOffset(pcOffset);
+}
+
+void BaselineFrame::setInterpreterFieldsForPrologue(JSScript* script) {
+ interpreterScript_ = script;
+ interpreterPC_ = script->code();
+ if (icScript_->numICEntries() > 0) {
+ interpreterICEntry_ = &icScript_->icEntry(0);
+ } else {
+ // If the script does not have any ICEntries (possible for non-function
+ // scripts) the interpreterICEntry_ field won't be used. Just set it to
+ // nullptr.
+ interpreterICEntry_ = nullptr;
+ }
+}
+
+bool BaselineFrame::initForOsr(InterpreterFrame* fp, uint32_t numStackValues) {
+ mozilla::PodZero(this);
+
+ envChain_ = fp->environmentChain();
+
+ if (fp->hasInitialEnvironmentUnchecked()) {
+ flags_ |= BaselineFrame::HAS_INITIAL_ENV;
+ }
+
+ if (fp->script()->needsArgsObj() && fp->hasArgsObj()) {
+ flags_ |= BaselineFrame::HAS_ARGS_OBJ;
+ argsObj_ = &fp->argsObj();
+ }
+
+ if (fp->hasReturnValue()) {
+ setReturnValue(fp->returnValue());
+ }
+
+ icScript_ = fp->script()->jitScript()->icScript();
+
+ JSContext* cx =
+ fp->script()->runtimeFromMainThread()->mainContextFromOwnThread();
+
+ Activation* interpActivation = cx->activation()->prev();
+ jsbytecode* pc = interpActivation->asInterpreter()->regs().pc;
+ MOZ_ASSERT(fp->script()->containsPC(pc));
+
+ // We are doing OSR into the Baseline Interpreter. We can get the pc from the
+ // C++ interpreter's activation, we just have to skip the JitActivation.
+ flags_ |= BaselineFrame::RUNNING_IN_INTERPRETER;
+ setInterpreterFields(pc);
+
+#ifdef DEBUG
+ debugFrameSize_ = frameSizeForNumValueSlots(numStackValues);
+ MOZ_ASSERT(debugNumValueSlots() == numStackValues);
+#endif
+
+ for (uint32_t i = 0; i < numStackValues; i++) {
+ *valueSlot(i) = fp->slots()[i];
+ }
+
+ if (fp->isDebuggee()) {
+ // For debuggee frames, update any Debugger.Frame objects for the
+ // InterpreterFrame to point to the BaselineFrame.
+ if (!DebugAPI::handleBaselineOsr(cx, fp, this)) {
+ return false;
+ }
+ setIsDebuggee();
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BaselineFrame.h b/js/src/jit/BaselineFrame.h
new file mode 100644
index 0000000000..6614d23e15
--- /dev/null
+++ b/js/src/jit/BaselineFrame.h
@@ -0,0 +1,373 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrame_h
+#define jit_BaselineFrame_h
+
+#include <algorithm>
+
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "vm/Stack.h"
+
+namespace js {
+namespace jit {
+
+class ICEntry;
+class ICScript;
+class JSJitFrameIter;
+
+// The stack looks like this, fp is the frame pointer:
+//
+// fp+y arguments
+// fp => JitFrameLayout (frame header)
+// fp-x BaselineFrame
+// locals
+// stack values
+
+class BaselineFrame {
+ public:
+ enum Flags : uint32_t {
+ // The frame has a valid return value. See also InterpreterFrame::HAS_RVAL.
+ HAS_RVAL = 1 << 0,
+
+ // The frame is running in the Baseline interpreter instead of JIT.
+ RUNNING_IN_INTERPRETER = 1 << 1,
+
+ // An initial environment has been pushed on the environment chain for
+ // function frames that need a CallObject or eval frames that need a
+ // VarEnvironmentObject.
+ HAS_INITIAL_ENV = 1 << 2,
+
+ // Frame has an arguments object, argsObj_.
+ HAS_ARGS_OBJ = 1 << 4,
+
+ // See InterpreterFrame::PREV_UP_TO_DATE.
+ PREV_UP_TO_DATE = 1 << 5,
+
+ // Frame has execution observed by a Debugger.
+ //
+ // See comment above 'isDebuggee' in vm/Realm.h for explanation
+ // of invariants of debuggee compartments, scripts, and frames.
+ DEBUGGEE = 1 << 6,
+ };
+
+ protected: // Silence Clang warning about unused private fields.
+ // The fields below are only valid if RUNNING_IN_INTERPRETER.
+ JSScript* interpreterScript_;
+ jsbytecode* interpreterPC_;
+ ICEntry* interpreterICEntry_;
+
+ JSObject* envChain_; // Environment chain (always initialized).
+ ICScript* icScript_; // IC script (initialized if Warp is enabled).
+ ArgumentsObject* argsObj_; // If HAS_ARGS_OBJ, the arguments object.
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loScratchValue_;
+ uint32_t hiScratchValue_;
+ uint32_t flags_;
+#ifdef DEBUG
+ // Size of the frame. Stored in DEBUG builds when calling into C++. This is
+ // BaselineFrame::Size() + the size of the local and expression stack Values.
+ //
+ // We don't store this in release builds because it's redundant with the frame
+ // size computed from the frame pointers. In debug builds it's still useful
+ // for assertions.
+ uint32_t debugFrameSize_;
+#else
+ uint32_t unused_;
+#endif
+ uint32_t loReturnValue_; // If HAS_RVAL, the frame's return value.
+ uint32_t hiReturnValue_;
+
+ public:
+ [[nodiscard]] bool initForOsr(InterpreterFrame* fp, uint32_t numStackValues);
+
+#ifdef DEBUG
+ uint32_t debugFrameSize() const { return debugFrameSize_; }
+ void setDebugFrameSize(uint32_t frameSize) { debugFrameSize_ = frameSize; }
+#endif
+
+ JSObject* environmentChain() const { return envChain_; }
+ void setEnvironmentChain(JSObject* envChain) { envChain_ = envChain; }
+
+ template <typename SpecificEnvironment>
+ inline void pushOnEnvironmentChain(SpecificEnvironment& env);
+ template <typename SpecificEnvironment>
+ inline void popOffEnvironmentChain();
+ inline void replaceInnermostEnvironment(EnvironmentObject& env);
+
+ CalleeToken calleeToken() const { return framePrefix()->calleeToken(); }
+ void replaceCalleeToken(CalleeToken token) {
+ framePrefix()->replaceCalleeToken(token);
+ }
+ bool isConstructing() const {
+ return CalleeTokenIsConstructing(calleeToken());
+ }
+ JSScript* script() const { return ScriptFromCalleeToken(calleeToken()); }
+ JSFunction* callee() const { return CalleeTokenToFunction(calleeToken()); }
+ Value calleev() const { return ObjectValue(*callee()); }
+
+ size_t numValueSlots(size_t frameSize) const {
+ MOZ_ASSERT(frameSize == debugFrameSize());
+
+ MOZ_ASSERT(frameSize >= BaselineFrame::Size());
+ frameSize -= BaselineFrame::Size();
+
+ MOZ_ASSERT((frameSize % sizeof(Value)) == 0);
+ return frameSize / sizeof(Value);
+ }
+
+#ifdef DEBUG
+ size_t debugNumValueSlots() const { return numValueSlots(debugFrameSize()); }
+#endif
+
+ Value* valueSlot(size_t slot) const {
+ MOZ_ASSERT(slot < debugNumValueSlots());
+ return (Value*)this - (slot + 1);
+ }
+
+ static size_t frameSizeForNumValueSlots(size_t numValueSlots) {
+ return BaselineFrame::Size() + numValueSlots * sizeof(Value);
+ }
+
+ Value& unaliasedFormal(
+ unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) const {
+ MOZ_ASSERT(i < numFormalArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals() &&
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ Value& unaliasedActual(
+ unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING) const {
+ MOZ_ASSERT(i < numActualArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing && i < numFormalArgs(),
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ Value& unaliasedLocal(uint32_t i) const {
+ MOZ_ASSERT(i < script()->nfixed());
+ return *valueSlot(i);
+ }
+
+ unsigned numActualArgs() const { return framePrefix()->numActualArgs(); }
+ unsigned numFormalArgs() const { return script()->function()->nargs(); }
+ Value& thisArgument() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return framePrefix()->thisv();
+ }
+ Value* argv() const { return framePrefix()->actualArgs(); }
+
+ [[nodiscard]] bool saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const;
+
+ public:
+ void prepareForBaselineInterpreterToJitOSR() {
+ // Clearing the RUNNING_IN_INTERPRETER flag is sufficient, but we also null
+ // out the interpreter fields to ensure we don't use stale values.
+ flags_ &= ~RUNNING_IN_INTERPRETER;
+ interpreterScript_ = nullptr;
+ interpreterPC_ = nullptr;
+ }
+
+ private:
+ bool uninlineIsProfilerSamplingEnabled(JSContext* cx);
+
+ public:
+ // Switch a JIT frame on the stack to Interpreter mode. The caller is
+ // responsible for patching the return address into this frame to a location
+ // in the interpreter code. Also assert profiler sampling has been suppressed
+ // so the sampler thread doesn't see an inconsistent state while we are
+ // patching frames.
+ void switchFromJitToInterpreter(JSContext* cx, jsbytecode* pc) {
+ MOZ_ASSERT(!uninlineIsProfilerSamplingEnabled(cx));
+ MOZ_ASSERT(!runningInInterpreter());
+ flags_ |= RUNNING_IN_INTERPRETER;
+ setInterpreterFields(pc);
+ }
+ void switchFromJitToInterpreterAtPrologue(JSContext* cx) {
+ MOZ_ASSERT(!uninlineIsProfilerSamplingEnabled(cx));
+ MOZ_ASSERT(!runningInInterpreter());
+ flags_ |= RUNNING_IN_INTERPRETER;
+ setInterpreterFieldsForPrologue(script());
+ }
+
+ // Like switchFromJitToInterpreter, but set the interpreterICEntry_ field to
+ // nullptr. Initializing this field requires a binary search on the
+ // JitScript's ICEntry list but the exception handler never returns to this
+ // pc anyway so we can avoid the overhead.
+ void switchFromJitToInterpreterForExceptionHandler(JSContext* cx,
+ jsbytecode* pc) {
+ MOZ_ASSERT(!uninlineIsProfilerSamplingEnabled(cx));
+ MOZ_ASSERT(!runningInInterpreter());
+ flags_ |= RUNNING_IN_INTERPRETER;
+ interpreterScript_ = script();
+ interpreterPC_ = pc;
+ interpreterICEntry_ = nullptr;
+ }
+
+ bool runningInInterpreter() const { return flags_ & RUNNING_IN_INTERPRETER; }
+
+ JSScript* interpreterScript() const {
+ MOZ_ASSERT(runningInInterpreter());
+ return interpreterScript_;
+ }
+
+ jsbytecode* interpreterPC() const {
+ MOZ_ASSERT(runningInInterpreter());
+ return interpreterPC_;
+ }
+
+ void setInterpreterFields(JSScript* script, jsbytecode* pc);
+
+ void setInterpreterFields(jsbytecode* pc) {
+ setInterpreterFields(script(), pc);
+ }
+
+ // Initialize interpreter fields for resuming in the prologue (before the
+ // argument type check ICs).
+ void setInterpreterFieldsForPrologue(JSScript* script);
+
+ ICScript* icScript() const { return icScript_; }
+ void setICScript(ICScript* icScript) { icScript_ = icScript; }
+
+ // The script that owns the current ICScript.
+ JSScript* outerScript() const;
+
+ bool hasReturnValue() const { return flags_ & HAS_RVAL; }
+ MutableHandleValue returnValue() {
+ if (!hasReturnValue()) {
+ addressOfReturnValue()->setUndefined();
+ }
+ return MutableHandleValue::fromMarkedLocation(addressOfReturnValue());
+ }
+ void setReturnValue(const Value& v) {
+ returnValue().set(v);
+ flags_ |= HAS_RVAL;
+ }
+ inline Value* addressOfReturnValue() {
+ return reinterpret_cast<Value*>(&loReturnValue_);
+ }
+
+ bool hasInitialEnvironment() const { return flags_ & HAS_INITIAL_ENV; }
+
+ inline CallObject& callObj() const;
+
+ void setFlags(uint32_t flags) { flags_ = flags; }
+
+ [[nodiscard]] inline bool pushLexicalEnvironment(JSContext* cx,
+ Handle<LexicalScope*> scope);
+ [[nodiscard]] inline bool freshenLexicalEnvironment(JSContext* cx);
+ [[nodiscard]] inline bool recreateLexicalEnvironment(JSContext* cx);
+
+ [[nodiscard]] bool initFunctionEnvironmentObjects(JSContext* cx);
+ [[nodiscard]] bool pushClassBodyEnvironment(JSContext* cx,
+ Handle<ClassBodyScope*> scope);
+ [[nodiscard]] bool pushVarEnvironment(JSContext* cx, Handle<Scope*> scope);
+
+ void initArgsObjUnchecked(ArgumentsObject& argsobj) {
+ flags_ |= HAS_ARGS_OBJ;
+ argsObj_ = &argsobj;
+ }
+ void initArgsObj(ArgumentsObject& argsobj) {
+ MOZ_ASSERT(script()->needsArgsObj());
+ initArgsObjUnchecked(argsobj);
+ }
+ bool hasArgsObj() const { return flags_ & HAS_ARGS_OBJ; }
+ ArgumentsObject& argsObj() const {
+ MOZ_ASSERT(hasArgsObj());
+ MOZ_ASSERT(script()->needsArgsObj());
+ return *argsObj_;
+ }
+
+ bool prevUpToDate() const { return flags_ & PREV_UP_TO_DATE; }
+ void setPrevUpToDate() { flags_ |= PREV_UP_TO_DATE; }
+ void unsetPrevUpToDate() { flags_ &= ~PREV_UP_TO_DATE; }
+
+ bool isDebuggee() const { return flags_ & DEBUGGEE; }
+ void setIsDebuggee() { flags_ |= DEBUGGEE; }
+ inline void unsetIsDebuggee();
+
+ void trace(JSTracer* trc, const JSJitFrameIter& frame);
+
+ bool isGlobalFrame() const { return script()->isGlobalCode(); }
+ bool isModuleFrame() const { return script()->isModule(); }
+ bool isEvalFrame() const { return script()->isForEval(); }
+ bool isFunctionFrame() const {
+ return CalleeTokenIsFunction(calleeToken()) && !isModuleFrame();
+ }
+ bool isDebuggerEvalFrame() const { return false; }
+
+ JitFrameLayout* framePrefix() const {
+ uint8_t* fp = (uint8_t*)this + Size();
+ return (JitFrameLayout*)fp;
+ }
+
+ static size_t Size() { return sizeof(BaselineFrame); }
+
+ // The reverseOffsetOf methods below compute the offset relative to the
+ // frame's base pointer. Since the stack grows down, these offsets are
+ // negative.
+
+#ifdef DEBUG
+ static int reverseOffsetOfDebugFrameSize() {
+ return -int(Size()) + offsetof(BaselineFrame, debugFrameSize_);
+ }
+#endif
+
+ // The scratch value slot can either be used as a Value slot or as two
+ // separate 32-bit integer slots.
+ static int reverseOffsetOfScratchValueLow32() {
+ return -int(Size()) + offsetof(BaselineFrame, loScratchValue_);
+ }
+ static int reverseOffsetOfScratchValueHigh32() {
+ return -int(Size()) + offsetof(BaselineFrame, hiScratchValue_);
+ }
+ static int reverseOffsetOfScratchValue() {
+ return reverseOffsetOfScratchValueLow32();
+ }
+
+ static int reverseOffsetOfEnvironmentChain() {
+ return -int(Size()) + offsetof(BaselineFrame, envChain_);
+ }
+ static int reverseOffsetOfArgsObj() {
+ return -int(Size()) + offsetof(BaselineFrame, argsObj_);
+ }
+ static int reverseOffsetOfFlags() {
+ return -int(Size()) + offsetof(BaselineFrame, flags_);
+ }
+ static int reverseOffsetOfReturnValue() {
+ return -int(Size()) + offsetof(BaselineFrame, loReturnValue_);
+ }
+ static int reverseOffsetOfInterpreterScript() {
+ return -int(Size()) + offsetof(BaselineFrame, interpreterScript_);
+ }
+ static int reverseOffsetOfInterpreterPC() {
+ return -int(Size()) + offsetof(BaselineFrame, interpreterPC_);
+ }
+ static int reverseOffsetOfInterpreterICEntry() {
+ return -int(Size()) + offsetof(BaselineFrame, interpreterICEntry_);
+ }
+ static int reverseOffsetOfICScript() {
+ return -int(Size()) + offsetof(BaselineFrame, icScript_);
+ }
+ static int reverseOffsetOfLocal(size_t index) {
+ return -int(Size()) - (index + 1) * sizeof(Value);
+ }
+};
+
+// Ensure the frame is 8-byte aligned (required on ARM).
+static_assert((sizeof(BaselineFrame) % 8) == 0, "frame must be 8-byte aligned");
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrame_h */
diff --git a/js/src/jit/BaselineFrameInfo-inl.h b/js/src/jit/BaselineFrameInfo-inl.h
new file mode 100644
index 0000000000..cd79a8fb64
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo-inl.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrameInfo_inl_h
+#define jit_BaselineFrameInfo_inl_h
+
+#include "jit/BaselineFrameInfo.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+void CompilerFrameInfo::pop(StackAdjustment adjust) {
+ spIndex--;
+ StackValue* popped = &stack[spIndex];
+
+ if (adjust == AdjustStack && popped->kind() == StackValue::Stack) {
+ masm.addToStackPtr(Imm32(sizeof(Value)));
+ }
+ // Assert when anything uses this value.
+ popped->reset();
+}
+
+void CompilerFrameInfo::popn(uint32_t n, StackAdjustment adjust) {
+ uint32_t poppedStack = 0;
+ for (uint32_t i = 0; i < n; i++) {
+ if (peek(-1)->kind() == StackValue::Stack) {
+ poppedStack++;
+ }
+ pop(DontAdjustStack);
+ }
+ if (adjust == AdjustStack && poppedStack > 0) {
+ masm.addToStackPtr(Imm32(sizeof(Value) * poppedStack));
+ }
+}
+
+void InterpreterFrameInfo::pop() { popn(1); }
+
+void InterpreterFrameInfo::popn(uint32_t n) {
+ masm.addToStackPtr(Imm32(n * sizeof(Value)));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrameInfo_inl_h */
diff --git a/js/src/jit/BaselineFrameInfo.cpp b/js/src/jit/BaselineFrameInfo.cpp
new file mode 100644
index 0000000000..d641ace2ab
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo.cpp
@@ -0,0 +1,239 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineFrameInfo.h"
+
+#include <algorithm>
+
+#include "jit/BaselineIC.h"
+#ifdef DEBUG
+# include "jit/BytecodeAnalysis.h"
+#endif
+
+#include "jit/BaselineFrameInfo-inl.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool CompilerFrameInfo::init(TempAllocator& alloc) {
+ // An extra slot is needed for global scopes because INITGLEXICAL (stack
+ // depth 1) is compiled as a SETPROP (stack depth 2) on the global lexical
+ // scope.
+ size_t extra = script->isGlobalCode() ? 1 : 0;
+ size_t nstack =
+ std::max(script->nslots() - script->nfixed(), size_t(MinJITStackSize)) +
+ extra;
+ if (!stack.init(alloc, nstack)) {
+ return false;
+ }
+
+ return true;
+}
+
+void CompilerFrameInfo::sync(StackValue* val) {
+ switch (val->kind()) {
+ case StackValue::Stack:
+ break;
+ case StackValue::LocalSlot:
+ masm.pushValue(addressOfLocal(val->localSlot()));
+ break;
+ case StackValue::ArgSlot:
+ masm.pushValue(addressOfArg(val->argSlot()));
+ break;
+ case StackValue::ThisSlot:
+ masm.pushValue(addressOfThis());
+ break;
+ case StackValue::Register:
+ masm.pushValue(val->reg());
+ break;
+ case StackValue::Constant:
+ masm.pushValue(val->constant());
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+
+ val->setStack();
+}
+
+void CompilerFrameInfo::syncStack(uint32_t uses) {
+ MOZ_ASSERT(uses <= stackDepth());
+
+ uint32_t depth = stackDepth() - uses;
+
+ for (uint32_t i = 0; i < depth; i++) {
+ StackValue* current = &stack[i];
+ sync(current);
+ }
+}
+
+uint32_t CompilerFrameInfo::numUnsyncedSlots() {
+ // Start at the bottom, find the first value that's not synced.
+ uint32_t i = 0;
+ for (; i < stackDepth(); i++) {
+ if (peek(-int32_t(i + 1))->kind() == StackValue::Stack) {
+ break;
+ }
+ }
+ return i;
+}
+
+void CompilerFrameInfo::popValue(ValueOperand dest) {
+ StackValue* val = peek(-1);
+
+ switch (val->kind()) {
+ case StackValue::Constant:
+ masm.moveValue(val->constant(), dest);
+ break;
+ case StackValue::LocalSlot:
+ masm.loadValue(addressOfLocal(val->localSlot()), dest);
+ break;
+ case StackValue::ArgSlot:
+ masm.loadValue(addressOfArg(val->argSlot()), dest);
+ break;
+ case StackValue::ThisSlot:
+ masm.loadValue(addressOfThis(), dest);
+ break;
+ case StackValue::Stack:
+ masm.popValue(dest);
+ break;
+ case StackValue::Register:
+ masm.moveValue(val->reg(), dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+
+ // masm.popValue already adjusted the stack pointer, don't do it twice.
+ pop(DontAdjustStack);
+}
+
+void CompilerFrameInfo::popRegsAndSync(uint32_t uses) {
+ // x86 has only 3 Value registers. Only support 2 regs here for now,
+ // so that there's always a scratch Value register for reg -> reg
+ // moves.
+ MOZ_ASSERT(uses > 0);
+ MOZ_ASSERT(uses <= 2);
+ MOZ_ASSERT(uses <= stackDepth());
+
+ syncStack(uses);
+
+ switch (uses) {
+ case 1:
+ popValue(R0);
+ break;
+ case 2: {
+ // If the second value is in R1, move it to R2 so that it's not
+ // clobbered by the first popValue.
+ StackValue* val = peek(-2);
+ if (val->kind() == StackValue::Register && val->reg() == R1) {
+ masm.moveValue(R1, ValueOperand(R2));
+ val->setRegister(R2);
+ }
+ popValue(R1);
+ popValue(R0);
+ break;
+ }
+ default:
+ MOZ_CRASH("Invalid uses");
+ }
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/bug1580246.js
+}
+
+void InterpreterFrameInfo::popRegsAndSync(uint32_t uses) {
+ switch (uses) {
+ case 1:
+ popValue(R0);
+ break;
+ case 2: {
+ popValue(R1);
+ popValue(R0);
+ break;
+ }
+ default:
+ MOZ_CRASH("Invalid uses");
+ }
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/backup-point-bug1315634.js
+}
+
+void InterpreterFrameInfo::bumpInterpreterICEntry() {
+ masm.addPtr(Imm32(sizeof(ICEntry)), addressOfInterpreterICEntry());
+}
+
+void CompilerFrameInfo::storeStackValue(int32_t depth, const Address& dest,
+ const ValueOperand& scratch) {
+ const StackValue* source = peek(depth);
+ switch (source->kind()) {
+ case StackValue::Constant:
+ masm.storeValue(source->constant(), dest);
+ break;
+ case StackValue::Register:
+ masm.storeValue(source->reg(), dest);
+ break;
+ case StackValue::LocalSlot:
+ masm.loadValue(addressOfLocal(source->localSlot()), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::ArgSlot:
+ masm.loadValue(addressOfArg(source->argSlot()), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::ThisSlot:
+ masm.loadValue(addressOfThis(), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ case StackValue::Stack:
+ masm.loadValue(addressOfStackValue(depth), scratch);
+ masm.storeValue(scratch, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid kind");
+ }
+}
+
+#ifdef DEBUG
+void CompilerFrameInfo::assertValidState(const BytecodeInfo& info) {
+ // Check stack depth.
+ MOZ_ASSERT(stackDepth() == info.stackDepth);
+
+ // Start at the bottom, find the first value that's not synced.
+ uint32_t i = 0;
+ for (; i < stackDepth(); i++) {
+ if (stack[i].kind() != StackValue::Stack) {
+ break;
+ }
+ }
+
+ // Assert all values on top of it are also not synced.
+ for (; i < stackDepth(); i++) {
+ MOZ_ASSERT(stack[i].kind() != StackValue::Stack);
+ }
+
+ // Assert every Value register is used by at most one StackValue.
+ // R2 is used as scratch register by the compiler and FrameInfo,
+ // so it shouldn't be used for StackValues.
+ bool usedR0 = false, usedR1 = false;
+
+ for (i = 0; i < stackDepth(); i++) {
+ if (stack[i].kind() == StackValue::Register) {
+ ValueOperand reg = stack[i].reg();
+ if (reg == R0) {
+ MOZ_ASSERT(!usedR0);
+ usedR0 = true;
+ } else if (reg == R1) {
+ MOZ_ASSERT(!usedR1);
+ usedR1 = true;
+ } else {
+ MOZ_CRASH("Invalid register");
+ }
+ }
+ }
+}
+#endif
diff --git a/js/src/jit/BaselineFrameInfo.h b/js/src/jit/BaselineFrameInfo.h
new file mode 100644
index 0000000000..2e4c994d99
--- /dev/null
+++ b/js/src/jit/BaselineFrameInfo.h
@@ -0,0 +1,435 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineFrameInfo_h
+#define jit_BaselineFrameInfo_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <new>
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineJIT.h"
+#include "jit/FixedList.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+struct BytecodeInfo;
+class MacroAssembler;
+
+// [SMDOC] Baseline FrameInfo overview.
+//
+// FrameInfo is used by BaselineCodeGen to track values stored in the frame.
+// There are two implementations:
+//
+// InterpreterFrameInfo
+// --------------------
+// The InterpreterFrameInfo class is used by the interpreter generator and is
+// a very simple interface on top of the MacroAssembler, because the stack is
+// always synced.
+//
+// CompilerFrameInfo
+// -----------------
+// The CompilerFrameInfo class is more complicated because it maintains a
+// virtual stack to optimize some common stack operations. Locals and arguments
+// are always fully synced. Stack values can either be synced, stored as
+// constant, stored in a Value register or refer to a local slot. Syncing a
+// StackValue ensures it's stored on the stack, e.g. kind == Stack.
+//
+// To see how this works, consider the following statement:
+//
+// var y = x + 9;
+//
+// Here two values are pushed: StackValue(LocalSlot(0)) and
+// StackValue(Int32Value(9)). Only when we reach the ADD op, code is generated
+// to load the operands directly into the right operand registers and sync all
+// other stack values.
+//
+// For stack values, the following invariants hold (and are checked between
+// ops):
+//
+// (1) If a value is synced (kind == Stack), all values below it must also be
+// synced. In other words, values with kind other than Stack can only appear
+// on top of the abstract stack.
+//
+// (2) When we call a stub or IC, all values still on the stack must be synced.
+
+// Represents a value pushed on the stack. Note that StackValue is not used for
+// locals or arguments since these are always fully synced.
+class StackValue {
+ public:
+ enum Kind {
+ Constant,
+ Register,
+ Stack,
+ LocalSlot,
+ ArgSlot,
+ ThisSlot,
+#ifdef DEBUG
+ // In debug builds, assert Kind is initialized.
+ Uninitialized,
+#endif
+ };
+
+ private:
+ MOZ_INIT_OUTSIDE_CTOR Kind kind_;
+
+ MOZ_INIT_OUTSIDE_CTOR union Data {
+ JS::Value constant;
+ ValueOperand reg;
+ uint32_t localSlot;
+ uint32_t argSlot;
+
+ // |constant| has a non-trivial constructor and therefore MUST be
+ // placement-new'd into existence.
+ MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ Data() {}
+ MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ } data;
+
+ MOZ_INIT_OUTSIDE_CTOR JSValueType knownType_;
+
+ public:
+ StackValue() { reset(); }
+
+ Kind kind() const { return kind_; }
+ bool hasKnownType() const { return knownType_ != JSVAL_TYPE_UNKNOWN; }
+ bool hasKnownType(JSValueType type) const {
+ MOZ_ASSERT(type != JSVAL_TYPE_UNKNOWN);
+ return knownType_ == type;
+ }
+ JSValueType knownType() const {
+ MOZ_ASSERT(hasKnownType());
+ return knownType_;
+ }
+ void reset() {
+#ifdef DEBUG
+ kind_ = Uninitialized;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+#endif
+ }
+ Value constant() const {
+ MOZ_ASSERT(kind_ == Constant);
+ return data.constant;
+ }
+ ValueOperand reg() const {
+ MOZ_ASSERT(kind_ == Register);
+ return data.reg;
+ }
+ uint32_t localSlot() const {
+ MOZ_ASSERT(kind_ == LocalSlot);
+ return data.localSlot;
+ }
+ uint32_t argSlot() const {
+ MOZ_ASSERT(kind_ == ArgSlot);
+ return data.argSlot;
+ }
+
+ void setConstant(const Value& v) {
+ kind_ = Constant;
+ new (&data.constant) Value(v);
+ knownType_ = v.isDouble() ? JSVAL_TYPE_DOUBLE : v.extractNonDoubleType();
+ }
+ void setRegister(const ValueOperand& val,
+ JSValueType knownType = JSVAL_TYPE_UNKNOWN) {
+ kind_ = Register;
+ new (&data.reg) ValueOperand(val);
+ knownType_ = knownType;
+ }
+ void setLocalSlot(uint32_t slot) {
+ kind_ = LocalSlot;
+ new (&data.localSlot) uint32_t(slot);
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setArgSlot(uint32_t slot) {
+ kind_ = ArgSlot;
+ new (&data.argSlot) uint32_t(slot);
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setThis() {
+ kind_ = ThisSlot;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+ void setStack() {
+ kind_ = Stack;
+ knownType_ = JSVAL_TYPE_UNKNOWN;
+ }
+};
+
+enum StackAdjustment { AdjustStack, DontAdjustStack };
+
+class FrameInfo {
+ protected:
+ MacroAssembler& masm;
+
+ public:
+ explicit FrameInfo(MacroAssembler& masm) : masm(masm) {}
+
+ Address addressOfLocal(size_t local) const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfLocal(local));
+ }
+ Address addressOfArg(size_t arg) const {
+ return Address(FramePointer, JitFrameLayout::offsetOfActualArg(arg));
+ }
+ Address addressOfThis() const {
+ return Address(FramePointer, JitFrameLayout::offsetOfThis());
+ }
+ Address addressOfCalleeToken() const {
+ return Address(FramePointer, JitFrameLayout::offsetOfCalleeToken());
+ }
+ Address addressOfEnvironmentChain() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfEnvironmentChain());
+ }
+ Address addressOfICScript() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfICScript());
+ }
+ Address addressOfFlags() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfFlags());
+ }
+ Address addressOfReturnValue() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue());
+ }
+ Address addressOfArgsObj() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfArgsObj());
+ }
+ Address addressOfScratchValue() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfScratchValue());
+ }
+ Address addressOfScratchValueLow32() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfScratchValueLow32());
+ }
+ Address addressOfScratchValueHigh32() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfScratchValueHigh32());
+ }
+#ifdef DEBUG
+ Address addressOfDebugFrameSize() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ }
+#endif
+};
+
+class CompilerFrameInfo : public FrameInfo {
+ friend class BaselinePerfSpewer;
+ JSScript* script;
+ FixedList<StackValue> stack;
+ size_t spIndex;
+
+ public:
+ CompilerFrameInfo(JSScript* script, MacroAssembler& masm)
+ : FrameInfo(masm), script(script), stack(), spIndex(0) {}
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ size_t nlocals() const { return script->nfixed(); }
+ size_t nargs() const { return script->function()->nargs(); }
+
+ private:
+ inline StackValue* rawPush() {
+ StackValue* val = &stack[spIndex++];
+ val->reset();
+ return val;
+ }
+
+ inline StackValue* peek(int32_t index) const {
+ MOZ_ASSERT(index < 0);
+ return const_cast<StackValue*>(&stack[spIndex + index]);
+ }
+
+ public:
+ inline size_t stackDepth() const { return spIndex; }
+ inline void setStackDepth(uint32_t newDepth) {
+ if (newDepth <= stackDepth()) {
+ spIndex = newDepth;
+ } else {
+ uint32_t diff = newDepth - stackDepth();
+ for (uint32_t i = 0; i < diff; i++) {
+ StackValue* val = rawPush();
+ val->setStack();
+ }
+
+ MOZ_ASSERT(spIndex == newDepth);
+ }
+ }
+
+ void assertStackDepth(uint32_t depth) { MOZ_ASSERT(stackDepth() == depth); }
+ void incStackDepth(int32_t diff) { setStackDepth(stackDepth() + diff); }
+ bool hasKnownStackDepth(uint32_t depth) { return stackDepth() == depth; }
+
+ inline void pop(StackAdjustment adjust = AdjustStack);
+ inline void popn(uint32_t n, StackAdjustment adjust = AdjustStack);
+ inline void push(const Value& val) {
+ StackValue* sv = rawPush();
+ sv->setConstant(val);
+ }
+ inline void push(const ValueOperand& val,
+ JSValueType knownType = JSVAL_TYPE_UNKNOWN) {
+ StackValue* sv = rawPush();
+ sv->setRegister(val, knownType);
+ }
+ inline void pushLocal(uint32_t local) {
+ MOZ_ASSERT(local < nlocals());
+ StackValue* sv = rawPush();
+ sv->setLocalSlot(local);
+ }
+ inline void pushArg(uint32_t arg) {
+ StackValue* sv = rawPush();
+ sv->setArgSlot(arg);
+ }
+ inline void pushThis() {
+ StackValue* sv = rawPush();
+ sv->setThis();
+ }
+
+ inline void pushScratchValue() {
+ masm.pushValue(addressOfScratchValue());
+ StackValue* sv = rawPush();
+ sv->setStack();
+ }
+
+ Address addressOfLocal(size_t local) const {
+ MOZ_ASSERT(local < nlocals());
+ return FrameInfo::addressOfLocal(local);
+ }
+ Address addressOfArg(size_t arg) const {
+ MOZ_ASSERT(arg < nargs());
+ return FrameInfo::addressOfArg(arg);
+ }
+
+ Address addressOfStackValue(int32_t depth) const {
+ const StackValue* value = peek(depth);
+ MOZ_ASSERT(value->kind() == StackValue::Stack);
+ size_t slot = value - &stack[0];
+ MOZ_ASSERT(slot < stackDepth());
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfLocal(nlocals() + slot));
+ }
+
+ void popValue(ValueOperand dest);
+
+ void sync(StackValue* val);
+ void syncStack(uint32_t uses);
+ uint32_t numUnsyncedSlots();
+ void popRegsAndSync(uint32_t uses);
+
+ void assertSyncedStack() const {
+ MOZ_ASSERT_IF(stackDepth() > 0, peek(-1)->kind() == StackValue::Stack);
+ }
+
+ bool stackValueHasKnownType(int32_t depth, JSValueType type) const {
+ return peek(depth)->hasKnownType(type);
+ }
+
+ mozilla::Maybe<Value> knownStackValue(int32_t depth) const {
+ StackValue* val = peek(depth);
+ if (val->kind() == StackValue::Constant) {
+ return mozilla::Some(val->constant());
+ }
+ return mozilla::Nothing();
+ }
+
+ void storeStackValue(int32_t depth, const Address& dest,
+ const ValueOperand& scratch);
+
+ uint32_t frameSize() const {
+ return BaselineFrame::frameSizeForNumValueSlots(nlocals() + stackDepth());
+ }
+
+#ifdef DEBUG
+ // Assert the state is valid before excuting "pc".
+ void assertValidState(const BytecodeInfo& info);
+#else
+ inline void assertValidState(const BytecodeInfo& info) {}
+#endif
+};
+
+class InterpreterFrameInfo : public FrameInfo {
+ public:
+ explicit InterpreterFrameInfo(MacroAssembler& masm) : FrameInfo(masm) {}
+
+ // These methods are no-ops in the interpreter, because we don't have a
+ // virtual stack there.
+ void syncStack(uint32_t uses) {}
+ void assertSyncedStack() const {}
+ void assertStackDepth(uint32_t depth) {}
+ void incStackDepth(int32_t diff) {}
+ bool hasKnownStackDepth(uint32_t depth) { return false; }
+ uint32_t numUnsyncedSlots() { return 0; }
+
+ bool stackValueHasKnownType(int32_t depth, JSValueType type) const {
+ return false;
+ }
+
+ mozilla::Maybe<Value> knownStackValue(int32_t depth) const {
+ return mozilla::Nothing();
+ }
+
+ Address addressOfStackValue(int depth) const {
+ MOZ_ASSERT(depth < 0);
+ return Address(masm.getStackPointer(),
+ masm.framePushed() + size_t(-(depth + 1)) * sizeof(Value));
+ }
+
+ BaseIndex addressOfStackValue(Register index, int32_t offset = 0) const {
+ return BaseIndex(masm.getStackPointer(), index, ValueScale, offset);
+ }
+
+ void popRegsAndSync(uint32_t uses);
+
+ inline void pop();
+
+ inline void popn(uint32_t n);
+
+ void popn(Register reg) {
+ // sp := sp + reg * sizeof(Value)
+ Register spReg = AsRegister(masm.getStackPointer());
+ masm.computeEffectiveAddress(BaseValueIndex(spReg, reg), spReg);
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/arguments/strict-args-generator-flushstack.js
+ }
+
+ void popValue(ValueOperand dest) { masm.popValue(dest); }
+
+ void push(const ValueOperand& val,
+ JSValueType knownType = JSVAL_TYPE_UNKNOWN) {
+ masm.pushValue(val);
+ }
+ void push(const Value& val) { masm.pushValue(val); }
+
+ void pushThis() { masm.pushValue(addressOfThis()); }
+ void pushScratchValue() { masm.pushValue(addressOfScratchValue()); }
+
+ void storeStackValue(int32_t depth, const Address& dest,
+ const ValueOperand& scratch) {
+ masm.loadValue(addressOfStackValue(depth), scratch);
+ masm.storeValue(scratch, dest);
+ }
+
+ void bumpInterpreterICEntry();
+
+ Address addressOfInterpreterScript() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfInterpreterScript());
+ }
+ Address addressOfInterpreterPC() const {
+ return Address(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
+ }
+ Address addressOfInterpreterICEntry() const {
+ return Address(FramePointer,
+ BaselineFrame::reverseOffsetOfInterpreterICEntry());
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineFrameInfo_h */
diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
new file mode 100644
index 0000000000..7b940f33ec
--- /dev/null
+++ b/js/src/jit/BaselineIC.cpp
@@ -0,0 +1,2497 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Sprintf.h"
+
+#include "jstypes.h"
+
+#include "builtin/Eval.h"
+#include "jit/BaselineCacheIRCompiler.h"
+#include "jit/CacheIRGenerator.h"
+#include "jit/CacheIRHealth.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/PerfSpewer.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/SharedICRegisters.h"
+#include "jit/VMFunctions.h"
+#include "js/Conversions.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/EqualityOperations.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/Opcodes.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/SharedICHelpers-inl.h"
+#include "jit/VMFunctionList-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/BytecodeLocation-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/JSScript-inl.h"
+
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+// Class used to emit all Baseline IC fallback code when initializing the
+// JitRuntime.
+class MOZ_RAII FallbackICCodeCompiler final {
+ BaselineICFallbackCode& code;
+ MacroAssembler& masm;
+
+ JSContext* cx;
+ bool inStubFrame_ = false;
+
+#ifdef DEBUG
+ bool entersStubFrame_ = false;
+ uint32_t framePushedAtEnterStubFrame_ = 0;
+#endif
+
+ [[nodiscard]] bool emitCall(bool isSpread, bool isConstructing);
+ [[nodiscard]] bool emitGetElem(bool hasReceiver);
+ [[nodiscard]] bool emitGetProp(bool hasReceiver);
+
+ public:
+ FallbackICCodeCompiler(JSContext* cx, BaselineICFallbackCode& code,
+ MacroAssembler& masm)
+ : code(code), masm(masm), cx(cx) {}
+
+#define DEF_METHOD(kind) [[nodiscard]] bool emit_##kind();
+ IC_BASELINE_FALLBACK_CODE_KIND_LIST(DEF_METHOD)
+#undef DEF_METHOD
+
+ void pushCallArguments(MacroAssembler& masm,
+ AllocatableGeneralRegisterSet regs, Register argcReg,
+ bool isConstructing);
+
+ // Push a payload specialized per compiler needed to execute stubs.
+ void PushStubPayload(MacroAssembler& masm, Register scratch);
+ void pushStubPayload(MacroAssembler& masm, Register scratch);
+
+ // Emits a tail call to a VMFunction wrapper.
+ [[nodiscard]] bool tailCallVMInternal(MacroAssembler& masm,
+ TailCallVMFunctionId id);
+
+ template <typename Fn, Fn fn>
+ [[nodiscard]] bool tailCallVM(MacroAssembler& masm);
+
+ // Emits a normal (non-tail) call to a VMFunction wrapper.
+ [[nodiscard]] bool callVMInternal(MacroAssembler& masm, VMFunctionId id);
+
+ template <typename Fn, Fn fn>
+ [[nodiscard]] bool callVM(MacroAssembler& masm);
+
+ // A stub frame is used when a stub wants to call into the VM without
+ // performing a tail call. This is required for the return address
+ // to pc mapping to work.
+ void enterStubFrame(MacroAssembler& masm, Register scratch);
+ void assumeStubFrame();
+ void leaveStubFrame(MacroAssembler& masm);
+};
+
+AllocatableGeneralRegisterSet BaselineICAvailableGeneralRegs(size_t numInputs) {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+#if defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+ regs.take(BaselineSecondScratchReg);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+ MOZ_ASSERT(!regs.has(BaselineSecondScratchReg));
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(!regs.has(PseudoStackPointer));
+ MOZ_ASSERT(!regs.has(RealStackPointer));
+ MOZ_ASSERT(!regs.has(ICTailCallReg));
+#endif
+ regs.take(ICStubReg);
+
+ switch (numInputs) {
+ case 0:
+ break;
+ case 1:
+ regs.take(R0);
+ break;
+ case 2:
+ regs.take(R0);
+ regs.take(R1);
+ break;
+ default:
+ MOZ_CRASH("Invalid numInputs");
+ }
+
+ return regs;
+}
+
+static jsbytecode* StubOffsetToPc(const ICFallbackStub* stub,
+ const JSScript* script) {
+ return script->offsetToPC(stub->pcOffset());
+}
+
+#ifdef JS_JITSPEW
+void FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...) {
+ if (JitSpewEnabled(JitSpew_BaselineICFallback)) {
+ RootedScript script(cx, GetTopJitJSScript(cx));
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+
+ char fmtbuf[100];
+ va_list args;
+ va_start(args, fmt);
+ (void)VsprintfLiteral(fmtbuf, fmt, args);
+ va_end(args);
+
+ JitSpew(
+ JitSpew_BaselineICFallback,
+ "Fallback hit for (%s:%u:%u) (pc=%zu,line=%u,uses=%u,stubs=%zu): %s",
+ script->filename(), script->lineno(), script->column(),
+ script->pcToOffset(pc), PCToLineNumber(script, pc),
+ script->getWarmUpCount(), stub->numOptimizedStubs(), fmtbuf);
+ }
+}
+#endif // JS_JITSPEW
+
+void ICEntry::trace(JSTracer* trc) {
+ ICStub* stub = firstStub();
+
+ // Trace CacheIR stubs.
+ while (!stub->isFallback()) {
+ stub->toCacheIRStub()->trace(trc);
+ stub = stub->toCacheIRStub()->next();
+ }
+
+ // Fallback stubs use runtime-wide trampoline code we don't need to trace.
+ MOZ_ASSERT(stub->usesTrampolineCode());
+}
+
+// constexpr table mapping JSOp to BaselineICFallbackKind. Each value in the
+// table is either a fallback kind or a sentinel value (NoICValue) indicating
+// the JSOp is not a JOF_IC op.
+class MOZ_STATIC_CLASS OpToFallbackKindTable {
+ static_assert(sizeof(BaselineICFallbackKind) == sizeof(uint8_t));
+ uint8_t table_[JSOP_LIMIT] = {};
+
+ constexpr void setKind(JSOp op, BaselineICFallbackKind kind) {
+ MOZ_ASSERT(uint8_t(kind) != NoICValue);
+ table_[size_t(op)] = uint8_t(kind);
+ }
+
+ public:
+ static constexpr uint8_t NoICValue = uint8_t(BaselineICFallbackKind::Count);
+
+ uint8_t lookup(JSOp op) const { return table_[size_t(op)]; }
+
+ constexpr OpToFallbackKindTable() {
+ for (size_t i = 0; i < JSOP_LIMIT; i++) {
+ table_[i] = NoICValue;
+ }
+
+ setKind(JSOp::Not, BaselineICFallbackKind::ToBool);
+ setKind(JSOp::And, BaselineICFallbackKind::ToBool);
+ setKind(JSOp::Or, BaselineICFallbackKind::ToBool);
+ setKind(JSOp::JumpIfTrue, BaselineICFallbackKind::ToBool);
+ setKind(JSOp::JumpIfFalse, BaselineICFallbackKind::ToBool);
+
+ setKind(JSOp::BitNot, BaselineICFallbackKind::UnaryArith);
+ setKind(JSOp::Pos, BaselineICFallbackKind::UnaryArith);
+ setKind(JSOp::Neg, BaselineICFallbackKind::UnaryArith);
+ setKind(JSOp::Inc, BaselineICFallbackKind::UnaryArith);
+ setKind(JSOp::Dec, BaselineICFallbackKind::UnaryArith);
+ setKind(JSOp::ToNumeric, BaselineICFallbackKind::UnaryArith);
+
+ setKind(JSOp::BitOr, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::BitXor, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::BitAnd, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Lsh, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Rsh, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Ursh, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Add, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Sub, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Mul, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Div, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Mod, BaselineICFallbackKind::BinaryArith);
+ setKind(JSOp::Pow, BaselineICFallbackKind::BinaryArith);
+
+ setKind(JSOp::Eq, BaselineICFallbackKind::Compare);
+ setKind(JSOp::Ne, BaselineICFallbackKind::Compare);
+ setKind(JSOp::Lt, BaselineICFallbackKind::Compare);
+ setKind(JSOp::Le, BaselineICFallbackKind::Compare);
+ setKind(JSOp::Gt, BaselineICFallbackKind::Compare);
+ setKind(JSOp::Ge, BaselineICFallbackKind::Compare);
+ setKind(JSOp::StrictEq, BaselineICFallbackKind::Compare);
+ setKind(JSOp::StrictNe, BaselineICFallbackKind::Compare);
+
+ setKind(JSOp::NewArray, BaselineICFallbackKind::NewArray);
+
+ setKind(JSOp::NewObject, BaselineICFallbackKind::NewObject);
+ setKind(JSOp::NewInit, BaselineICFallbackKind::NewObject);
+
+ setKind(JSOp::InitElem, BaselineICFallbackKind::SetElem);
+ setKind(JSOp::InitHiddenElem, BaselineICFallbackKind::SetElem);
+ setKind(JSOp::InitLockedElem, BaselineICFallbackKind::SetElem);
+ setKind(JSOp::InitElemInc, BaselineICFallbackKind::SetElem);
+ setKind(JSOp::SetElem, BaselineICFallbackKind::SetElem);
+ setKind(JSOp::StrictSetElem, BaselineICFallbackKind::SetElem);
+
+ setKind(JSOp::InitProp, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::InitLockedProp, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::InitHiddenProp, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::InitGLexical, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::SetProp, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::StrictSetProp, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::SetName, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::StrictSetName, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::SetGName, BaselineICFallbackKind::SetProp);
+ setKind(JSOp::StrictSetGName, BaselineICFallbackKind::SetProp);
+
+ setKind(JSOp::GetProp, BaselineICFallbackKind::GetProp);
+ setKind(JSOp::GetBoundName, BaselineICFallbackKind::GetProp);
+
+ setKind(JSOp::GetPropSuper, BaselineICFallbackKind::GetPropSuper);
+
+ setKind(JSOp::GetElem, BaselineICFallbackKind::GetElem);
+
+ setKind(JSOp::GetElemSuper, BaselineICFallbackKind::GetElemSuper);
+
+ setKind(JSOp::In, BaselineICFallbackKind::In);
+
+ setKind(JSOp::HasOwn, BaselineICFallbackKind::HasOwn);
+
+ setKind(JSOp::CheckPrivateField, BaselineICFallbackKind::CheckPrivateField);
+
+ setKind(JSOp::GetName, BaselineICFallbackKind::GetName);
+ setKind(JSOp::GetGName, BaselineICFallbackKind::GetName);
+
+ setKind(JSOp::BindName, BaselineICFallbackKind::BindName);
+ setKind(JSOp::BindGName, BaselineICFallbackKind::BindName);
+
+ setKind(JSOp::GetIntrinsic, BaselineICFallbackKind::GetIntrinsic);
+
+ setKind(JSOp::Call, BaselineICFallbackKind::Call);
+ setKind(JSOp::CallContent, BaselineICFallbackKind::Call);
+ setKind(JSOp::CallIgnoresRv, BaselineICFallbackKind::Call);
+ setKind(JSOp::CallIter, BaselineICFallbackKind::Call);
+ setKind(JSOp::CallContentIter, BaselineICFallbackKind::Call);
+ setKind(JSOp::Eval, BaselineICFallbackKind::Call);
+ setKind(JSOp::StrictEval, BaselineICFallbackKind::Call);
+
+ setKind(JSOp::SuperCall, BaselineICFallbackKind::CallConstructing);
+ setKind(JSOp::New, BaselineICFallbackKind::CallConstructing);
+ setKind(JSOp::NewContent, BaselineICFallbackKind::CallConstructing);
+
+ setKind(JSOp::SpreadCall, BaselineICFallbackKind::SpreadCall);
+ setKind(JSOp::SpreadEval, BaselineICFallbackKind::SpreadCall);
+ setKind(JSOp::StrictSpreadEval, BaselineICFallbackKind::SpreadCall);
+
+ setKind(JSOp::SpreadSuperCall,
+ BaselineICFallbackKind::SpreadCallConstructing);
+ setKind(JSOp::SpreadNew, BaselineICFallbackKind::SpreadCallConstructing);
+
+ setKind(JSOp::Instanceof, BaselineICFallbackKind::InstanceOf);
+
+ setKind(JSOp::Typeof, BaselineICFallbackKind::TypeOf);
+ setKind(JSOp::TypeofExpr, BaselineICFallbackKind::TypeOf);
+
+ setKind(JSOp::ToPropertyKey, BaselineICFallbackKind::ToPropertyKey);
+
+ setKind(JSOp::Iter, BaselineICFallbackKind::GetIterator);
+
+ setKind(JSOp::OptimizeSpreadCall,
+ BaselineICFallbackKind::OptimizeSpreadCall);
+
+ setKind(JSOp::Rest, BaselineICFallbackKind::Rest);
+
+ setKind(JSOp::CloseIter, BaselineICFallbackKind::CloseIter);
+ }
+};
+
+static constexpr OpToFallbackKindTable FallbackKindTable;
+
+void ICScript::initICEntries(JSContext* cx, JSScript* script) {
+ MOZ_ASSERT(cx->realm()->jitRealm());
+ MOZ_ASSERT(jit::IsBaselineInterpreterEnabled());
+
+ MOZ_ASSERT(numICEntries() == script->numICEntries());
+
+ // Index of the next ICEntry to initialize.
+ uint32_t icEntryIndex = 0;
+
+ const BaselineICFallbackCode& fallbackCode =
+ cx->runtime()->jitRuntime()->baselineICFallbackCode();
+
+ // For JOF_IC ops: initialize ICEntries and fallback stubs.
+ for (BytecodeLocation loc : js::AllBytecodesIterable(script)) {
+ JSOp op = loc.getOp();
+
+ // Assert the frontend stored the correct IC index in jump target ops.
+ MOZ_ASSERT_IF(BytecodeIsJumpTarget(op), loc.icIndex() == icEntryIndex);
+
+ uint8_t tableValue = FallbackKindTable.lookup(op);
+
+ if (tableValue == OpToFallbackKindTable::NoICValue) {
+ MOZ_ASSERT(!BytecodeOpHasIC(op),
+ "Missing entry in OpToFallbackKindTable for JOF_IC op");
+ continue;
+ }
+
+ MOZ_ASSERT(BytecodeOpHasIC(op),
+ "Unexpected fallback kind for non-JOF_IC op");
+
+ BaselineICFallbackKind kind = BaselineICFallbackKind(tableValue);
+ TrampolinePtr stubCode = fallbackCode.addr(kind);
+
+ // Initialize the ICEntry and ICFallbackStub.
+ uint32_t offset = loc.bytecodeToOffset(script);
+ ICEntry& entryRef = this->icEntry(icEntryIndex);
+ ICFallbackStub* stub = fallbackStub(icEntryIndex);
+ icEntryIndex++;
+ new (&entryRef) ICEntry(stub);
+ new (stub) ICFallbackStub(offset, stubCode);
+ }
+
+ // Assert all ICEntries have been initialized.
+ MOZ_ASSERT(icEntryIndex == numICEntries());
+}
+
+bool ICSupportsPolymorphicTypeData(JSOp op) {
+ MOZ_ASSERT(BytecodeOpHasIC(op));
+ BaselineICFallbackKind kind =
+ BaselineICFallbackKind(FallbackKindTable.lookup(op));
+ switch (kind) {
+ case BaselineICFallbackKind::ToBool:
+ case BaselineICFallbackKind::TypeOf:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ICCacheIRStub::makesGCCalls() const { return stubInfo()->makesGCCalls(); }
+
+void ICFallbackStub::trackNotAttached() { state().trackNotAttached(); }
+
+// When we enter a baseline fallback stub, if a Warp compilation
+// exists that transpiled that IC, we notify that compilation. This
+// helps the bailout code tell whether a bailing instruction hoisted
+// by LICM would have been executed anyway.
+static void MaybeNotifyWarp(JSScript* script, ICFallbackStub* stub) {
+ if (stub->state().usedByTranspiler() && script->hasIonScript()) {
+ script->ionScript()->noteBaselineFallback();
+ }
+}
+
+void ICCacheIRStub::trace(JSTracer* trc) {
+ JitCode* stubJitCode = jitCode();
+ TraceManuallyBarrieredEdge(trc, &stubJitCode, "baseline-ic-stub-code");
+
+ TraceCacheIRStub(trc, this, stubInfo());
+}
+
+static void MaybeTransition(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub) {
+ if (stub->state().shouldTransition()) {
+ if (!TryFoldingStubs(cx, stub, frame->script(), frame->icScript())) {
+ cx->recoverFromOutOfMemory();
+ }
+ if (stub->state().maybeTransition()) {
+ ICEntry* icEntry = frame->icScript()->icEntryForStub(stub);
+#ifdef JS_CACHEIR_SPEW
+ if (cx->spewer().enabled(cx, frame->script(),
+ SpewChannel::CacheIRHealthReport)) {
+ CacheIRHealth cih;
+ RootedScript script(cx, frame->script());
+ cih.healthReportForIC(cx, icEntry, stub, script,
+ SpewContext::Transition);
+ }
+#endif
+ stub->discardStubs(cx, icEntry);
+ }
+ }
+}
+
+// This helper handles ICState updates/transitions while attaching CacheIR
+// stubs.
+template <typename IRGenerator, typename... Args>
+static void TryAttachStub(const char* name, JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Args&&... args) {
+ MaybeTransition(cx, frame, stub);
+
+ if (stub->state().canAttachStub()) {
+ RootedScript script(cx, frame->script());
+ ICScript* icScript = frame->icScript();
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ bool attached = false;
+ IRGenerator gen(cx, script, pc, stub->state(), std::forward<Args>(args)...);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::Attach: {
+ ICAttachResult result =
+ AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+ script, icScript, stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ attached = true;
+ JitSpew(JitSpew_BaselineIC, " Attached %s CacheIR stub", name);
+ }
+ } break;
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("Not expected in generic TryAttachStub");
+ break;
+ }
+ if (!attached) {
+ stub->trackNotAttached();
+ }
+ }
+}
+
+void ICFallbackStub::unlinkStub(Zone* zone, ICEntry* icEntry,
+ ICCacheIRStub* prev, ICCacheIRStub* stub) {
+ if (prev) {
+ MOZ_ASSERT(prev->next() == stub);
+ prev->setNext(stub->next());
+ } else {
+ MOZ_ASSERT(icEntry->firstStub() == stub);
+ icEntry->setFirstStub(stub->next());
+ }
+
+ state_.trackUnlinkedStub();
+
+ // We are removing edges from ICStub to gcthings. Perform a barrier to let the
+ // GC know about those edges.
+ PreWriteBarrier(zone, stub);
+
+#ifdef DEBUG
+ // Poison stub code to ensure we don't call this stub again. However, if
+ // this stub can make calls, a pointer to it may be stored in a stub frame
+ // on the stack, so we can't touch the stubCode_ or GC will crash when
+ // tracing this pointer.
+ if (!stub->makesGCCalls()) {
+ stub->stubCode_ = (uint8_t*)0xbad;
+ }
+#endif
+}
+
+void ICFallbackStub::discardStubs(JSContext* cx, ICEntry* icEntry) {
+ ICStub* stub = icEntry->firstStub();
+ while (stub != this) {
+ unlinkStub(cx->zone(), icEntry, /* prev = */ nullptr,
+ stub->toCacheIRStub());
+ stub = stub->toCacheIRStub()->next();
+ }
+ clearHasFoldedStub();
+}
+
+static void InitMacroAssemblerForICStub(StackMacroAssembler& masm) {
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.adjustFrame(sizeof(intptr_t));
+#endif
+#ifdef JS_CODEGEN_ARM
+ masm.setSecondScratchReg(BaselineSecondScratchReg);
+#endif
+}
+
+bool FallbackICCodeCompiler::tailCallVMInternal(MacroAssembler& masm,
+ TailCallVMFunctionId id) {
+ TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
+ const VMFunctionData& fun = GetVMFunction(id);
+ MOZ_ASSERT(fun.expectTailCall == TailCall);
+ uint32_t argSize = fun.explicitStackSlots() * sizeof(void*);
+ EmitBaselineTailCallVM(code, masm, argSize);
+ return true;
+}
+
+bool FallbackICCodeCompiler::callVMInternal(MacroAssembler& masm,
+ VMFunctionId id) {
+ MOZ_ASSERT(inStubFrame_);
+
+ TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
+ MOZ_ASSERT(GetVMFunction(id).expectTailCall == NonTailCall);
+
+ EmitBaselineCallVM(code, masm);
+ return true;
+}
+
+template <typename Fn, Fn fn>
+bool FallbackICCodeCompiler::callVM(MacroAssembler& masm) {
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ return callVMInternal(masm, id);
+}
+
+template <typename Fn, Fn fn>
+bool FallbackICCodeCompiler::tailCallVM(MacroAssembler& masm) {
+ TailCallVMFunctionId id = TailCallVMFunctionToId<Fn, fn>::id;
+ return tailCallVMInternal(masm, id);
+}
+
+void FallbackICCodeCompiler::enterStubFrame(MacroAssembler& masm,
+ Register scratch) {
+ EmitBaselineEnterStubFrame(masm, scratch);
+#ifdef DEBUG
+ framePushedAtEnterStubFrame_ = masm.framePushed();
+#endif
+
+ MOZ_ASSERT(!inStubFrame_);
+ inStubFrame_ = true;
+
+#ifdef DEBUG
+ entersStubFrame_ = true;
+#endif
+}
+
+void FallbackICCodeCompiler::assumeStubFrame() {
+ MOZ_ASSERT(!inStubFrame_);
+ inStubFrame_ = true;
+
+#ifdef DEBUG
+ entersStubFrame_ = true;
+
+ // |framePushed| isn't tracked precisely in ICStubs, so simply assume it to
+ // be the stub frame layout and the pushed ICStub* so that assertions don't
+ // fail in leaveStubFrame
+ framePushedAtEnterStubFrame_ =
+ BaselineStubFrameLayout::Size() + sizeof(ICStub*);
+#endif
+}
+
+void FallbackICCodeCompiler::leaveStubFrame(MacroAssembler& masm) {
+ MOZ_ASSERT(entersStubFrame_ && inStubFrame_);
+ inStubFrame_ = false;
+
+#ifdef DEBUG
+ masm.setFramePushed(framePushedAtEnterStubFrame_);
+#endif
+ EmitBaselineLeaveStubFrame(masm);
+}
+
+void FallbackICCodeCompiler::pushStubPayload(MacroAssembler& masm,
+ Register scratch) {
+ if (inStubFrame_) {
+ masm.loadPtr(Address(FramePointer, 0), scratch);
+ masm.pushBaselineFramePtr(scratch, scratch);
+ } else {
+ masm.pushBaselineFramePtr(FramePointer, scratch);
+ }
+}
+
+void FallbackICCodeCompiler::PushStubPayload(MacroAssembler& masm,
+ Register scratch) {
+ pushStubPayload(masm, scratch);
+ masm.adjustFrame(sizeof(intptr_t));
+}
+
+//
+// ToBool_Fallback
+//
+
+bool DoToBoolFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ HandleValue arg, MutableHandleValue ret) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "ToBool");
+
+ TryAttachStub<ToBoolIRGenerator>("ToBool", cx, frame, stub, arg);
+
+ bool cond = ToBoolean(arg);
+ ret.setBoolean(cond);
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_ToBool() {
+ static_assert(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoToBoolFallback>(masm);
+}
+
+//
+// GetElem_Fallback
+//
+
+bool DoGetElemFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "GetElem");
+
+#ifdef DEBUG
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ MOZ_ASSERT(JSOp(*pc) == JSOp::GetElem);
+#endif
+
+ TryAttachStub<GetPropIRGenerator>("GetElem", cx, frame, stub,
+ CacheKind::GetElem, lhs, rhs);
+
+ if (!GetElementOperation(cx, lhs, rhs, res)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool DoGetElemSuperFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, HandleValue receiver,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetElemSuper(%s)", CodeName(op));
+
+ MOZ_ASSERT(op == JSOp::GetElemSuper);
+
+ // |lhs| is [[HomeObject]].[[Prototype]] which must be an Object or null.
+ MOZ_ASSERT(lhs.isObjectOrNull());
+
+ int lhsIndex = -1;
+ RootedObject lhsObj(
+ cx, ToObjectFromStackForPropertyAccess(cx, lhs, lhsIndex, rhs));
+ if (!lhsObj) {
+ return false;
+ }
+
+ TryAttachStub<GetPropIRGenerator>("GetElemSuper", cx, frame, stub,
+ CacheKind::GetElemSuper, lhs, rhs);
+
+ return GetObjectElementOperation(cx, op, lhsObj, receiver, rhs, res);
+}
+
+bool FallbackICCodeCompiler::emitGetElem(bool hasReceiver) {
+ static_assert(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Super property getters use a |this| that differs from base object
+ if (hasReceiver) {
+ // State: receiver in R0, index in R1, obj on the stack
+
+ // Ensure stack is fully synced for the expression decompiler.
+ // We need: receiver, index, obj
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+ masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 2));
+
+ // Push arguments.
+ masm.pushValue(R0); // Receiver
+ masm.pushValue(R1); // Index
+ masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 5)); // Obj
+ masm.push(ICStubReg);
+ masm.pushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, HandleValue, MutableHandleValue);
+ if (!tailCallVM<Fn, DoGetElemSuperFallback>(masm)) {
+ return false;
+ }
+ } else {
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ masm.pushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*,
+ HandleValue, HandleValue, MutableHandleValue);
+ if (!tailCallVM<Fn, DoGetElemFallback>(masm)) {
+ return false;
+ }
+ }
+
+ // This is the resume point used when bailout rewrites call stack to undo
+ // Ion inlined frames. The return address pushed onto reconstructed stack
+ // will point here.
+ assumeStubFrame();
+ if (hasReceiver) {
+ code.initBailoutReturnOffset(BailoutReturnKind::GetElemSuper,
+ masm.currentOffset());
+ } else {
+ code.initBailoutReturnOffset(BailoutReturnKind::GetElem,
+ masm.currentOffset());
+ }
+
+ leaveStubFrame(masm);
+
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_GetElem() {
+ return emitGetElem(/* hasReceiver = */ false);
+}
+
+bool FallbackICCodeCompiler::emit_GetElemSuper() {
+ return emitGetElem(/* hasReceiver = */ true);
+}
+
+bool DoSetElemFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* stack, HandleValue objv,
+ HandleValue index, HandleValue rhs) {
+ using DeferType = SetPropIRGenerator::DeferType;
+
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ RootedScript outerScript(cx, script);
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "SetElem(%s)", CodeName(JSOp(*pc)));
+
+ MOZ_ASSERT(op == JSOp::SetElem || op == JSOp::StrictSetElem ||
+ op == JSOp::InitElem || op == JSOp::InitHiddenElem ||
+ op == JSOp::InitLockedElem || op == JSOp::InitElemInc);
+
+ int objvIndex = -3;
+ RootedObject obj(
+ cx, ToObjectFromStackForPropertyAccess(cx, objv, objvIndex, index));
+ if (!obj) {
+ return false;
+ }
+
+ Rooted<Shape*> oldShape(cx, obj->shape());
+
+ DeferType deferType = DeferType::None;
+ bool attached = false;
+
+ MaybeTransition(cx, frame, stub);
+
+ if (stub->state().canAttachStub()) {
+ ICScript* icScript = frame->icScript();
+ SetPropIRGenerator gen(cx, script, pc, CacheKind::SetElem, stub->state(),
+ objv, index, rhs);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::Attach: {
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx, gen.writerRef(), gen.cacheKind(), frame->script(), icScript,
+ stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ attached = true;
+ JitSpew(JitSpew_BaselineIC, " Attached SetElem CacheIR stub");
+ }
+ } break;
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ attached = true;
+ break;
+ case AttachDecision::Deferred:
+ deferType = gen.deferType();
+ MOZ_ASSERT(deferType != DeferType::None);
+ break;
+ }
+ }
+
+ if (op == JSOp::InitElem || op == JSOp::InitHiddenElem ||
+ op == JSOp::InitLockedElem) {
+ if (!InitElemOperation(cx, pc, obj, index, rhs)) {
+ return false;
+ }
+ } else if (op == JSOp::InitElemInc) {
+ if (!InitElemIncOperation(cx, obj.as<ArrayObject>(), index.toInt32(),
+ rhs)) {
+ return false;
+ }
+ } else {
+ if (!SetObjectElementWithReceiver(cx, obj, index, rhs, objv,
+ JSOp(*pc) == JSOp::StrictSetElem)) {
+ return false;
+ }
+ }
+
+ // Overwrite the object on the stack (pushed for the decompiler) with the rhs.
+ MOZ_ASSERT(stack[2] == objv);
+ stack[2] = rhs;
+
+ if (attached) {
+ return true;
+ }
+
+ // The SetObjectElement call might have entered this IC recursively, so try
+ // to transition.
+ MaybeTransition(cx, frame, stub);
+
+ bool canAttachStub = stub->state().canAttachStub();
+
+ if (deferType != DeferType::None && canAttachStub) {
+ SetPropIRGenerator gen(cx, script, pc, CacheKind::SetElem, stub->state(),
+ objv, index, rhs);
+
+ MOZ_ASSERT(deferType == DeferType::AddSlot);
+ AttachDecision decision = gen.tryAttachAddSlotStub(oldShape);
+
+ switch (decision) {
+ case AttachDecision::Attach: {
+ ICScript* icScript = frame->icScript();
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx, gen.writerRef(), gen.cacheKind(), frame->script(), icScript,
+ stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ attached = true;
+ JitSpew(JitSpew_BaselineIC, " Attached SetElem CacheIR stub");
+ }
+ } break;
+ case AttachDecision::NoAction:
+ gen.trackAttached(IRGenerator::NotAttached);
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("Invalid attach result");
+ break;
+ }
+ }
+ if (!attached && canAttachStub) {
+ stub->trackNotAttached();
+ }
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_SetElem() {
+ static_assert(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // State: R0: object, R1: index, stack: rhs.
+ // For the decompiler, the stack has to be: object, index, rhs,
+ // so we push the index, then overwrite the rhs Value with R0
+ // and push the rhs value.
+ masm.pushValue(R1);
+ masm.loadValue(Address(masm.getStackPointer(), sizeof(Value)), R1);
+ masm.storeValue(R0, Address(masm.getStackPointer(), sizeof(Value)));
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1); // RHS
+
+ // Push index. On x86 and ARM two push instructions are emitted so use a
+ // separate register to store the old stack pointer.
+ masm.moveStackPtrTo(R1.scratchReg());
+ masm.pushValue(Address(R1.scratchReg(), 2 * sizeof(Value)));
+ masm.pushValue(R0); // Object.
+
+ // Push pointer to stack values, so that the stub can overwrite the object
+ // (pushed for the decompiler) with the rhs.
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), 3 * sizeof(Value)), R0.scratchReg());
+ masm.push(R0.scratchReg());
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, Value*,
+ HandleValue, HandleValue, HandleValue);
+ return tailCallVM<Fn, DoSetElemFallback>(masm);
+}
+
+//
+// In_Fallback
+//
+
+bool DoInFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ HandleValue key, HandleValue objValue,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "In");
+
+ if (!objValue.isObject()) {
+ ReportInNotObjectError(cx, key, objValue);
+ return false;
+ }
+
+ TryAttachStub<HasPropIRGenerator>("In", cx, frame, stub, CacheKind::In, key,
+ objValue);
+
+ RootedObject obj(cx, &objValue.toObject());
+ bool cond = false;
+ if (!OperatorIn(cx, key, obj, &cond)) {
+ return false;
+ }
+ res.setBoolean(cond);
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_In() {
+ EmitRestoreTailCallReg(masm);
+
+ // Sync for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoInFallback>(masm);
+}
+
+//
+// HasOwn_Fallback
+//
+
+bool DoHasOwnFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ HandleValue keyValue, HandleValue objValue,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "HasOwn");
+
+ TryAttachStub<HasPropIRGenerator>("HasOwn", cx, frame, stub,
+ CacheKind::HasOwn, keyValue, objValue);
+
+ bool found;
+ if (!HasOwnProperty(cx, objValue, keyValue, &found)) {
+ return false;
+ }
+
+ res.setBoolean(found);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_HasOwn() {
+ EmitRestoreTailCallReg(masm);
+
+ // Sync for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoHasOwnFallback>(masm);
+}
+
+//
+// CheckPrivate_Fallback
+//
+
+bool DoCheckPrivateFieldFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue objValue,
+ HandleValue keyValue, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+
+ FallbackICSpew(cx, stub, "CheckPrivateField");
+
+ MOZ_ASSERT(keyValue.isSymbol() && keyValue.toSymbol()->isPrivateName());
+
+ TryAttachStub<CheckPrivateFieldIRGenerator>("CheckPrivate", cx, frame, stub,
+ CacheKind::CheckPrivateField,
+ keyValue, objValue);
+
+ bool result;
+ if (!CheckPrivateFieldOperation(cx, pc, objValue, keyValue, &result)) {
+ return false;
+ }
+
+ res.setBoolean(result);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_CheckPrivateField() {
+ EmitRestoreTailCallReg(masm);
+
+ // Sync for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoCheckPrivateFieldFallback>(masm);
+}
+
+//
+// GetName_Fallback
+//
+
+bool DoGetNameFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject envChain,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetName(%s)", CodeName(JSOp(*pc)));
+
+ MOZ_ASSERT(op == JSOp::GetName || op == JSOp::GetGName);
+
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+
+ TryAttachStub<GetNameIRGenerator>("GetName", cx, frame, stub, envChain, name);
+
+ static_assert(JSOpLength_GetGName == JSOpLength_GetName,
+ "Otherwise our check for JSOp::Typeof isn't ok");
+ if (JSOp(pc[JSOpLength_GetGName]) == JSOp::Typeof) {
+ if (!GetEnvironmentName<GetNameMode::TypeOf>(cx, envChain, name, res)) {
+ return false;
+ }
+ } else {
+ if (!GetEnvironmentName<GetNameMode::Normal>(cx, envChain, name, res)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_GetName() {
+ static_assert(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleObject,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoGetNameFallback>(masm);
+}
+
+//
+// BindName_Fallback
+//
+
+bool DoBindNameFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject envChain,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "BindName(%s)", CodeName(JSOp(*pc)));
+
+ MOZ_ASSERT(op == JSOp::BindName || op == JSOp::BindGName);
+
+ Rooted<PropertyName*> name(cx, frame->script()->getName(pc));
+
+ TryAttachStub<BindNameIRGenerator>("BindName", cx, frame, stub, envChain,
+ name);
+
+ RootedObject scope(cx);
+ if (!LookupNameUnqualified(cx, name, envChain, &scope)) {
+ return false;
+ }
+
+ res.setObject(*scope);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_BindName() {
+ static_assert(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleObject,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoBindNameFallback>(masm);
+}
+
+//
+// GetIntrinsic_Fallback
+//
+
+bool DoGetIntrinsicFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ mozilla::DebugOnly<JSOp> op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetIntrinsic(%s)", CodeName(JSOp(*pc)));
+
+ MOZ_ASSERT(op == JSOp::GetIntrinsic);
+
+ if (!GetIntrinsicOperation(cx, script, pc, res)) {
+ return false;
+ }
+
+ TryAttachStub<GetIntrinsicIRGenerator>("GetIntrinsic", cx, frame, stub, res);
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_GetIntrinsic() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, MutableHandleValue);
+ return tailCallVM<Fn, DoGetIntrinsicFallback>(masm);
+}
+
+//
+// GetProp_Fallback
+//
+
+bool DoGetPropFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue val,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "GetProp(%s)", CodeName(op));
+
+ MOZ_ASSERT(op == JSOp::GetProp || op == JSOp::GetBoundName);
+
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ RootedValue idVal(cx, StringValue(name));
+
+ TryAttachStub<GetPropIRGenerator>("GetProp", cx, frame, stub,
+ CacheKind::GetProp, val, idVal);
+
+ if (op == JSOp::GetBoundName) {
+ RootedObject env(cx, &val.toObject());
+ RootedId id(cx, NameToId(name));
+ return GetNameBoundInEnvironment(cx, env, id, res);
+ }
+
+ MOZ_ASSERT(op == JSOp::GetProp);
+ if (!GetProperty(cx, val, name, res)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool DoGetPropSuperFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue receiver,
+ MutableHandleValue val, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ FallbackICSpew(cx, stub, "GetPropSuper(%s)", CodeName(JSOp(*pc)));
+
+ MOZ_ASSERT(JSOp(*pc) == JSOp::GetPropSuper);
+
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ RootedValue idVal(cx, StringValue(name));
+
+ // |val| is [[HomeObject]].[[Prototype]] which must be an Object or null.
+ MOZ_ASSERT(val.isObjectOrNull());
+
+ int valIndex = -1;
+ RootedObject valObj(
+ cx, ToObjectFromStackForPropertyAccess(cx, val, valIndex, name));
+ if (!valObj) {
+ return false;
+ }
+
+ TryAttachStub<GetPropIRGenerator>("GetPropSuper", cx, frame, stub,
+ CacheKind::GetPropSuper, val, idVal);
+
+ if (!GetProperty(cx, valObj, receiver, name, res)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emitGetProp(bool hasReceiver) {
+ static_assert(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Super property getters use a |this| that differs from base object
+ if (hasReceiver) {
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+ masm.push(ICStubReg);
+ masm.pushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*,
+ HandleValue, MutableHandleValue, MutableHandleValue);
+ if (!tailCallVM<Fn, DoGetPropSuperFallback>(masm)) {
+ return false;
+ }
+ } else {
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ masm.pushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*,
+ MutableHandleValue, MutableHandleValue);
+ if (!tailCallVM<Fn, DoGetPropFallback>(masm)) {
+ return false;
+ }
+ }
+
+ // This is the resume point used when bailout rewrites call stack to undo
+ // Ion inlined frames. The return address pushed onto reconstructed stack
+ // will point here.
+ assumeStubFrame();
+ if (hasReceiver) {
+ code.initBailoutReturnOffset(BailoutReturnKind::GetPropSuper,
+ masm.currentOffset());
+ } else {
+ code.initBailoutReturnOffset(BailoutReturnKind::GetProp,
+ masm.currentOffset());
+ }
+
+ leaveStubFrame(masm);
+
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_GetProp() {
+ return emitGetProp(/* hasReceiver = */ false);
+}
+
+bool FallbackICCodeCompiler::emit_GetPropSuper() {
+ return emitGetProp(/* hasReceiver = */ true);
+}
+
+//
+// SetProp_Fallback
+//
+
+bool DoSetPropFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* stack, HandleValue lhs,
+ HandleValue rhs) {
+ using DeferType = SetPropIRGenerator::DeferType;
+
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "SetProp(%s)", CodeName(op));
+
+ MOZ_ASSERT(op == JSOp::SetProp || op == JSOp::StrictSetProp ||
+ op == JSOp::SetName || op == JSOp::StrictSetName ||
+ op == JSOp::SetGName || op == JSOp::StrictSetGName ||
+ op == JSOp::InitProp || op == JSOp::InitLockedProp ||
+ op == JSOp::InitHiddenProp || op == JSOp::InitGLexical);
+
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ RootedId id(cx, NameToId(name));
+
+ int lhsIndex = -2;
+ RootedObject obj(cx,
+ ToObjectFromStackForPropertyAccess(cx, lhs, lhsIndex, id));
+ if (!obj) {
+ return false;
+ }
+ Rooted<Shape*> oldShape(cx, obj->shape());
+
+ DeferType deferType = DeferType::None;
+ bool attached = false;
+ MaybeTransition(cx, frame, stub);
+
+ if (stub->state().canAttachStub()) {
+ RootedValue idVal(cx, StringValue(name));
+ SetPropIRGenerator gen(cx, script, pc, CacheKind::SetProp, stub->state(),
+ lhs, idVal, rhs);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::Attach: {
+ ICScript* icScript = frame->icScript();
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx, gen.writerRef(), gen.cacheKind(), frame->script(), icScript,
+ stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ attached = true;
+ JitSpew(JitSpew_BaselineIC, " Attached SetProp CacheIR stub");
+ }
+ } break;
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ attached = true;
+ break;
+ case AttachDecision::Deferred:
+ deferType = gen.deferType();
+ MOZ_ASSERT(deferType != DeferType::None);
+ break;
+ }
+ }
+
+ if (op == JSOp::InitProp || op == JSOp::InitLockedProp ||
+ op == JSOp::InitHiddenProp) {
+ if (!InitPropertyOperation(cx, pc, obj, name, rhs)) {
+ return false;
+ }
+ } else if (op == JSOp::SetName || op == JSOp::StrictSetName ||
+ op == JSOp::SetGName || op == JSOp::StrictSetGName) {
+ if (!SetNameOperation(cx, script, pc, obj, rhs)) {
+ return false;
+ }
+ } else if (op == JSOp::InitGLexical) {
+ ExtensibleLexicalEnvironmentObject* lexicalEnv;
+ if (script->hasNonSyntacticScope()) {
+ lexicalEnv = &NearestEnclosingExtensibleLexicalEnvironment(
+ frame->environmentChain());
+ } else {
+ lexicalEnv = &cx->global()->lexicalEnvironment();
+ }
+ InitGlobalLexicalOperation(cx, lexicalEnv, script, pc, rhs);
+ } else {
+ MOZ_ASSERT(op == JSOp::SetProp || op == JSOp::StrictSetProp);
+
+ ObjectOpResult result;
+ if (!SetProperty(cx, obj, id, rhs, lhs, result) ||
+ !result.checkStrictModeError(cx, obj, id, op == JSOp::StrictSetProp)) {
+ return false;
+ }
+ }
+
+ // Overwrite the LHS on the stack (pushed for the decompiler) with the RHS.
+ MOZ_ASSERT(stack[1] == lhs);
+ stack[1] = rhs;
+
+ if (attached) {
+ return true;
+ }
+
+ // The SetProperty call might have entered this IC recursively, so try
+ // to transition.
+ MaybeTransition(cx, frame, stub);
+
+ bool canAttachStub = stub->state().canAttachStub();
+
+ if (deferType != DeferType::None && canAttachStub) {
+ RootedValue idVal(cx, StringValue(name));
+ SetPropIRGenerator gen(cx, script, pc, CacheKind::SetProp, stub->state(),
+ lhs, idVal, rhs);
+
+ MOZ_ASSERT(deferType == DeferType::AddSlot);
+ AttachDecision decision = gen.tryAttachAddSlotStub(oldShape);
+
+ switch (decision) {
+ case AttachDecision::Attach: {
+ ICScript* icScript = frame->icScript();
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx, gen.writerRef(), gen.cacheKind(), frame->script(), icScript,
+ stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ attached = true;
+ JitSpew(JitSpew_BaselineIC, " Attached SetElem CacheIR stub");
+ }
+ } break;
+ case AttachDecision::NoAction:
+ gen.trackAttached(IRGenerator::NotAttached);
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("Invalid attach result");
+ break;
+ }
+ }
+ if (!attached && canAttachStub) {
+ stub->trackNotAttached();
+ }
+
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_SetProp() {
+ static_assert(R0 == JSReturnOperand);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ // Overwrite the RHS value on top of the stack with the object, then push
+ // the RHS in R1 on top of that.
+ masm.storeValue(R0, Address(masm.getStackPointer(), 0));
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+
+ // Push pointer to stack values, so that the stub can overwrite the object
+ // (pushed for the decompiler) with the RHS.
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), 2 * sizeof(Value)), R0.scratchReg());
+ masm.push(R0.scratchReg());
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, Value*,
+ HandleValue, HandleValue);
+ if (!tailCallVM<Fn, DoSetPropFallback>(masm)) {
+ return false;
+ }
+
+ // This is the resume point used when bailout rewrites call stack to undo
+ // Ion inlined frames. The return address pushed onto reconstructed stack
+ // will point here.
+ assumeStubFrame();
+ code.initBailoutReturnOffset(BailoutReturnKind::SetProp,
+ masm.currentOffset());
+
+ leaveStubFrame(masm);
+ EmitReturnFromIC(masm);
+
+ return true;
+}
+
+//
+// Call_Fallback
+//
+
+bool DoCallFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ uint32_t argc, Value* vp, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "Call(%s)", CodeName(op));
+
+ MOZ_ASSERT(argc == GET_ARGC(pc));
+ bool constructing =
+ (op == JSOp::New || op == JSOp::NewContent || op == JSOp::SuperCall);
+ bool ignoresReturnValue = (op == JSOp::CallIgnoresRv);
+
+ // Ensure vp array is rooted - we may GC in here.
+ size_t numValues = argc + 2 + constructing;
+ RootedExternalValueArray vpRoot(cx, numValues, vp);
+
+ CallArgs callArgs = CallArgsFromSp(argc + constructing, vp + numValues,
+ constructing, ignoresReturnValue);
+ RootedValue callee(cx, vp[0]);
+ RootedValue newTarget(cx, constructing ? callArgs.newTarget() : NullValue());
+
+ // Transition stub state to megamorphic or generic if warranted.
+ MaybeTransition(cx, frame, stub);
+
+ bool canAttachStub = stub->state().canAttachStub();
+ bool handled = false;
+
+ // Only bother to try optimizing JSOp::Call with CacheIR if the chain is still
+ // allowed to attach stubs.
+ if (canAttachStub) {
+ HandleValueArray args = HandleValueArray::fromMarkedLocation(argc, vp + 2);
+ CallIRGenerator gen(cx, script, pc, op, stub->state(), argc, callee,
+ callArgs.thisv(), newTarget, args);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::Attach: {
+ ICScript* icScript = frame->icScript();
+ ICAttachResult result =
+ AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+ script, icScript, stub, gen.stubName());
+ if (result == ICAttachResult::Attached) {
+ handled = true;
+ JitSpew(JitSpew_BaselineIC, " Attached Call CacheIR stub");
+ }
+ } break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ handled = true;
+ break;
+ case AttachDecision::Deferred:
+ MOZ_CRASH("No deferred Call stubs");
+ }
+ if (!handled) {
+ stub->trackNotAttached();
+ }
+ }
+
+ if (constructing) {
+ if (!ConstructFromStack(cx, callArgs)) {
+ return false;
+ }
+ res.set(callArgs.rval());
+ } else if ((op == JSOp::Eval || op == JSOp::StrictEval) &&
+ cx->global()->valueIsEval(callee)) {
+ if (!DirectEval(cx, callArgs.get(0), res)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(op == JSOp::Call || op == JSOp::CallContent ||
+ op == JSOp::CallIgnoresRv || op == JSOp::CallIter ||
+ op == JSOp::CallContentIter || op == JSOp::Eval ||
+ op == JSOp::StrictEval);
+ if ((op == JSOp::CallIter || op == JSOp::CallContentIter) &&
+ callee.isPrimitive()) {
+ MOZ_ASSERT(argc == 0, "thisv must be on top of the stack");
+ ReportValueError(cx, JSMSG_NOT_ITERABLE, -1, callArgs.thisv(), nullptr);
+ return false;
+ }
+
+ if (!CallFromStack(cx, callArgs)) {
+ return false;
+ }
+
+ res.set(callArgs.rval());
+ }
+
+ return true;
+}
+
+bool DoSpreadCallFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* vp,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+ JSOp op = JSOp(*pc);
+ bool constructing = (op == JSOp::SpreadNew || op == JSOp::SpreadSuperCall);
+ FallbackICSpew(cx, stub, "SpreadCall(%s)", CodeName(op));
+
+ // Ensure vp array is rooted - we may GC in here.
+ RootedExternalValueArray vpRoot(cx, 3 + constructing, vp);
+
+ RootedValue callee(cx, vp[0]);
+ RootedValue thisv(cx, vp[1]);
+ RootedValue arr(cx, vp[2]);
+ RootedValue newTarget(cx, constructing ? vp[3] : NullValue());
+
+ // Transition stub state to megamorphic or generic if warranted.
+ MaybeTransition(cx, frame, stub);
+
+ // Try attaching a call stub.
+ bool handled = false;
+ if (op != JSOp::SpreadEval && op != JSOp::StrictSpreadEval &&
+ stub->state().canAttachStub()) {
+ // Try CacheIR first:
+ Rooted<ArrayObject*> aobj(cx, &arr.toObject().as<ArrayObject>());
+ MOZ_ASSERT(IsPackedArray(aobj));
+
+ HandleValueArray args = HandleValueArray::fromMarkedLocation(
+ aobj->length(), aobj->getDenseElements());
+ CallIRGenerator gen(cx, script, pc, op, stub->state(), 1, callee, thisv,
+ newTarget, args);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::Attach: {
+ ICScript* icScript = frame->icScript();
+ ICAttachResult result =
+ AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+ script, icScript, stub, gen.stubName());
+
+ if (result == ICAttachResult::Attached) {
+ handled = true;
+ JitSpew(JitSpew_BaselineIC, " Attached Spread Call CacheIR stub");
+ }
+ } break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ handled = true;
+ break;
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("No deferred optimizations for spread calls");
+ break;
+ }
+ if (!handled) {
+ stub->trackNotAttached();
+ }
+ }
+
+ return SpreadCallOperation(cx, script, pc, thisv, callee, arr, newTarget,
+ res);
+}
+
+void FallbackICCodeCompiler::pushCallArguments(
+ MacroAssembler& masm, AllocatableGeneralRegisterSet regs, Register argcReg,
+ bool isConstructing) {
+ MOZ_ASSERT(!regs.has(argcReg));
+
+ // argPtr initially points to the last argument.
+ Register argPtr = regs.takeAny();
+ masm.mov(FramePointer, argPtr);
+
+ // Skip 3 pointers pushed on top of the arguments: the frame descriptor,
+ // return address, and old frame pointer.
+ size_t valueOffset = BaselineStubFrameLayout::Size();
+
+ // We have to push |this|, callee, new.target (if constructing) and argc
+ // arguments. Handle the number of Values we know statically first.
+
+ size_t numNonArgValues = 2 + isConstructing;
+ for (size_t i = 0; i < numNonArgValues; i++) {
+ masm.pushValue(Address(argPtr, valueOffset));
+ valueOffset += sizeof(Value);
+ }
+
+ // If there are no arguments we're done.
+ Label done;
+ masm.branchTest32(Assembler::Zero, argcReg, argcReg, &done);
+
+ // Push argc Values.
+ Label loop;
+ Register count = regs.takeAny();
+ masm.addPtr(Imm32(valueOffset), argPtr);
+ masm.move32(argcReg, count);
+ masm.bind(&loop);
+ {
+ masm.pushValue(Address(argPtr, 0));
+ masm.addPtr(Imm32(sizeof(Value)), argPtr);
+
+ masm.branchSub32(Assembler::NonZero, Imm32(1), count, &loop);
+ }
+ masm.bind(&done);
+}
+
+bool FallbackICCodeCompiler::emitCall(bool isSpread, bool isConstructing) {
+ static_assert(R0 == JSReturnOperand);
+
+ // Values are on the stack left-to-right. Calling convention wants them
+ // right-to-left so duplicate them on the stack in reverse order.
+ // |this| and callee are pushed last.
+
+ AllocatableGeneralRegisterSet regs = BaselineICAvailableGeneralRegs(0);
+
+ if (MOZ_UNLIKELY(isSpread)) {
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, R1.scratchReg());
+
+ // Use FramePointer instead of StackPointer because it's not affected by
+ // the stack pushes below.
+
+ // newTarget
+ uint32_t valueOffset = BaselineStubFrameLayout::Size();
+ if (isConstructing) {
+ masm.pushValue(Address(FramePointer, valueOffset));
+ valueOffset += sizeof(Value);
+ }
+
+ // array
+ masm.pushValue(Address(FramePointer, valueOffset));
+ valueOffset += sizeof(Value);
+
+ // this
+ masm.pushValue(Address(FramePointer, valueOffset));
+ valueOffset += sizeof(Value);
+
+ // callee
+ masm.pushValue(Address(FramePointer, valueOffset));
+ valueOffset += sizeof(Value);
+
+ masm.push(masm.getStackPointer());
+ masm.push(ICStubReg);
+
+ PushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, Value*,
+ MutableHandleValue);
+ if (!callVM<Fn, DoSpreadCallFallback>(masm)) {
+ return false;
+ }
+
+ leaveStubFrame(masm);
+ EmitReturnFromIC(masm);
+
+ // SpreadCall is not yet supported in Ion, so do not generate asmcode for
+ // bailout.
+ return true;
+ }
+
+ // Push a stub frame so that we can perform a non-tail call.
+ enterStubFrame(masm, R1.scratchReg());
+
+ regs.take(R0.scratchReg()); // argc.
+
+ pushCallArguments(masm, regs, R0.scratchReg(), isConstructing);
+
+ masm.push(masm.getStackPointer());
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+
+ PushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, uint32_t,
+ Value*, MutableHandleValue);
+ if (!callVM<Fn, DoCallFallback>(masm)) {
+ return false;
+ }
+
+ leaveStubFrame(masm);
+ EmitReturnFromIC(masm);
+
+ // This is the resume point used when bailout rewrites call stack to undo
+ // Ion inlined frames. The return address pushed onto reconstructed stack
+ // will point here.
+ assumeStubFrame();
+
+ MOZ_ASSERT(!isSpread);
+
+ if (isConstructing) {
+ code.initBailoutReturnOffset(BailoutReturnKind::New, masm.currentOffset());
+ } else {
+ code.initBailoutReturnOffset(BailoutReturnKind::Call, masm.currentOffset());
+ }
+
+ // Load passed-in ThisV into R1 just in case it's needed. Need to do this
+ // before we leave the stub frame since that info will be lost.
+ // Current stack: [...., ThisV, CalleeToken, Descriptor ]
+ size_t thisvOffset =
+ JitFrameLayout::offsetOfThis() - JitFrameLayout::bytesPoppedAfterCall();
+ masm.loadValue(Address(masm.getStackPointer(), thisvOffset), R1);
+
+ leaveStubFrame(masm);
+
+ // If this is a |constructing| call, if the callee returns a non-object, we
+ // replace it with the |this| object passed in.
+ if (isConstructing) {
+ static_assert(JSReturnOperand == R0);
+ Label skipThisReplace;
+
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.moveValue(R1, R0);
+#ifdef DEBUG
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
+ masm.assumeUnreachable("Failed to return object in constructing call.");
+#endif
+ masm.bind(&skipThisReplace);
+ }
+
+ EmitReturnFromIC(masm);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_Call() {
+ return emitCall(/* isSpread = */ false, /* isConstructing = */ false);
+}
+
+bool FallbackICCodeCompiler::emit_CallConstructing() {
+ return emitCall(/* isSpread = */ false, /* isConstructing = */ true);
+}
+
+bool FallbackICCodeCompiler::emit_SpreadCall() {
+ return emitCall(/* isSpread = */ true, /* isConstructing = */ false);
+}
+
+bool FallbackICCodeCompiler::emit_SpreadCallConstructing() {
+ return emitCall(/* isSpread = */ true, /* isConstructing = */ true);
+}
+
+//
+// GetIterator_Fallback
+//
+
+bool DoGetIteratorFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue value,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "GetIterator");
+
+ TryAttachStub<GetIteratorIRGenerator>("GetIterator", cx, frame, stub, value);
+
+ PropertyIteratorObject* iterObj = ValueToIterator(cx, value);
+ if (!iterObj) {
+ return false;
+ }
+
+ res.setObject(*iterObj);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_GetIterator() {
+ EmitRestoreTailCallReg(masm);
+
+ // Sync stack for the decompiler.
+ masm.pushValue(R0);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoGetIteratorFallback>(masm);
+}
+
+//
+// OptimizeSpreadCall_Fallback
+//
+
+bool DoOptimizeSpreadCallFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue value,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "OptimizeSpreadCall");
+
+ TryAttachStub<OptimizeSpreadCallIRGenerator>("OptimizeSpreadCall", cx, frame,
+ stub, value);
+
+ return OptimizeSpreadCall(cx, value, res);
+}
+
+bool FallbackICCodeCompiler::emit_OptimizeSpreadCall() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoOptimizeSpreadCallFallback>(masm);
+}
+
+//
+// InstanceOf_Fallback
+//
+
+bool DoInstanceOfFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "InstanceOf");
+
+ if (!rhs.isObject()) {
+ ReportValueError(cx, JSMSG_BAD_INSTANCEOF_RHS, -1, rhs, nullptr);
+ return false;
+ }
+
+ RootedObject obj(cx, &rhs.toObject());
+ bool cond = false;
+ if (!InstanceofOperator(cx, obj, lhs, &cond)) {
+ return false;
+ }
+
+ res.setBoolean(cond);
+
+ if (!obj->is<JSFunction>()) {
+ // ensure we've recorded at least one failure, so we can detect there was a
+ // non-optimizable case
+ if (!stub->state().hasFailures()) {
+ stub->trackNotAttached();
+ }
+ return true;
+ }
+
+ TryAttachStub<InstanceOfIRGenerator>("InstanceOf", cx, frame, stub, lhs, obj);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_InstanceOf() {
+ EmitRestoreTailCallReg(masm);
+
+ // Sync stack for the decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoInstanceOfFallback>(masm);
+}
+
+//
+// TypeOf_Fallback
+//
+
+bool DoTypeOfFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ HandleValue val, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "TypeOf");
+
+ TryAttachStub<TypeOfIRGenerator>("TypeOf", cx, frame, stub, val);
+
+ JSType type = js::TypeOfValue(val);
+ RootedString string(cx, TypeName(type, cx->names()));
+ res.setString(string);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_TypeOf() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoTypeOfFallback>(masm);
+}
+
+//
+// ToPropertyKey_Fallback
+//
+
+bool DoToPropertyKeyFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "ToPropertyKey");
+
+ TryAttachStub<ToPropertyKeyIRGenerator>("ToPropertyKey", cx, frame, stub,
+ val);
+
+ return ToPropertyKeyOperation(cx, val, res);
+}
+
+bool FallbackICCodeCompiler::emit_ToPropertyKey() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoToPropertyKeyFallback>(masm);
+}
+
+//
+// Rest_Fallback
+//
+
+bool DoRestFallback(JSContext* cx, BaselineFrame* frame, ICFallbackStub* stub,
+ MutableHandleValue res) {
+ unsigned numFormals = frame->numFormalArgs() - 1;
+ unsigned numActuals = frame->numActualArgs();
+ unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0;
+ Value* rest = frame->argv() + numFormals;
+
+ ArrayObject* obj = NewDenseCopiedArray(cx, numRest, rest);
+ if (!obj) {
+ return false;
+ }
+ res.setObject(*obj);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_Rest() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, MutableHandleValue);
+ return tailCallVM<Fn, DoRestFallback>(masm);
+}
+
+//
+// UnaryArith_Fallback
+//
+
+bool DoUnaryArithFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(cx, stub, "UnaryArith(%s)", CodeName(op));
+
+ switch (op) {
+ case JSOp::BitNot: {
+ res.set(val);
+ if (!BitNot(cx, res, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Pos: {
+ res.set(val);
+ if (!ToNumber(cx, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Neg: {
+ res.set(val);
+ if (!NegOperation(cx, res, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Inc: {
+ if (!IncOperation(cx, val, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Dec: {
+ if (!DecOperation(cx, val, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::ToNumeric: {
+ res.set(val);
+ if (!ToNumeric(cx, res)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+ MOZ_ASSERT(res.isNumeric());
+
+ TryAttachStub<UnaryArithIRGenerator>("UnaryArith", cx, frame, stub, op, val,
+ res);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_UnaryArith() {
+ static_assert(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+
+ // Push arguments.
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoUnaryArithFallback>(masm);
+}
+
+//
+// BinaryArith_Fallback
+//
+
+bool DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue ret) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ JSOp op = JSOp(*pc);
+ FallbackICSpew(
+ cx, stub, "CacheIRBinaryArith(%s,%d,%d)", CodeName(op),
+ int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
+ int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the arith operation.
+ switch (op) {
+ case JSOp::Add:
+ // Do an add.
+ if (!AddValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Sub:
+ if (!SubValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Mul:
+ if (!MulValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Div:
+ if (!DivValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Mod:
+ if (!ModValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Pow:
+ if (!PowValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::BitOr: {
+ if (!BitOr(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::BitXor: {
+ if (!BitXor(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::BitAnd: {
+ if (!BitAnd(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Lsh: {
+ if (!BitLsh(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Rsh: {
+ if (!BitRsh(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Ursh: {
+ if (!UrshValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unhandled baseline arith op");
+ }
+
+ TryAttachStub<BinaryArithIRGenerator>("BinaryArith", cx, frame, stub, op, lhs,
+ rhs, ret);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_BinaryArith() {
+ static_assert(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoBinaryArithFallback>(masm);
+}
+
+//
+// Compare_Fallback
+//
+bool DoCompareFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue ret) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ JSOp op = JSOp(*pc);
+
+ FallbackICSpew(cx, stub, "Compare(%s)", CodeName(op));
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the compare operation.
+ bool out;
+ switch (op) {
+ case JSOp::Lt:
+ if (!LessThan(cx, &lhsCopy, &rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::Le:
+ if (!LessThanOrEqual(cx, &lhsCopy, &rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::Gt:
+ if (!GreaterThan(cx, &lhsCopy, &rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::Ge:
+ if (!GreaterThanOrEqual(cx, &lhsCopy, &rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::Eq:
+ if (!js::LooselyEqual(cx, lhsCopy, rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::Ne:
+ if (!js::LooselyEqual(cx, lhsCopy, rhsCopy, &out)) {
+ return false;
+ }
+ out = !out;
+ break;
+ case JSOp::StrictEq:
+ if (!js::StrictlyEqual(cx, lhsCopy, rhsCopy, &out)) {
+ return false;
+ }
+ break;
+ case JSOp::StrictNe:
+ if (!js::StrictlyEqual(cx, lhsCopy, rhsCopy, &out)) {
+ return false;
+ }
+ out = !out;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unhandled baseline compare op");
+ return false;
+ }
+
+ ret.setBoolean(out);
+
+ TryAttachStub<CompareIRGenerator>("Compare", cx, frame, stub, op, lhs, rhs);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_Compare() {
+ static_assert(R0 == JSReturnOperand);
+
+ // Restore the tail call register.
+ EmitRestoreTailCallReg(masm);
+
+ // Ensure stack is fully synced for the expression decompiler.
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+
+ // Push arguments.
+ masm.pushValue(R1);
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ HandleValue, MutableHandleValue);
+ return tailCallVM<Fn, DoCompareFallback>(masm);
+}
+
+//
+// NewArray_Fallback
+//
+
+bool DoNewArrayFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "NewArray");
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+
+ uint32_t length = GET_UINT32(pc);
+ MOZ_ASSERT(length <= INT32_MAX,
+ "the bytecode emitter must fail to compile code that would "
+ "produce a length exceeding int32_t range");
+
+ Rooted<ArrayObject*> array(cx, NewArrayOperation(cx, length));
+ if (!array) {
+ return false;
+ }
+
+ TryAttachStub<NewArrayIRGenerator>("NewArray", cx, frame, stub, JSOp(*pc),
+ array, frame);
+
+ res.setObject(*array);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_NewArray() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg); // stub.
+ masm.pushBaselineFramePtr(FramePointer, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, MutableHandleValue);
+ return tailCallVM<Fn, DoNewArrayFallback>(masm);
+}
+
+//
+// NewObject_Fallback
+//
+bool DoNewObjectFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "NewObject");
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = StubOffsetToPc(stub, script);
+
+ RootedObject obj(cx, NewObjectOperation(cx, script, pc));
+ if (!obj) {
+ return false;
+ }
+
+ TryAttachStub<NewObjectIRGenerator>("NewObject", cx, frame, stub, JSOp(*pc),
+ obj, frame);
+
+ res.setObject(*obj);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_NewObject() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(ICStubReg); // stub.
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, MutableHandleValue);
+ return tailCallVM<Fn, DoNewObjectFallback>(masm);
+}
+
+//
+// CloseIter_Fallback
+//
+
+bool DoCloseIterFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject iter) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "CloseIter");
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ CompletionKind kind = CompletionKind(GET_UINT8(pc));
+
+ TryAttachStub<CloseIterIRGenerator>("CloseIter", cx, frame, stub, iter, kind);
+
+ return CloseIterOperation(cx, iter, kind);
+}
+
+bool FallbackICCodeCompiler::emit_CloseIter() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.push(R0.scratchReg());
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn =
+ bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleObject);
+ return tailCallVM<Fn, DoCloseIterFallback>(masm);
+}
+
+bool JitRuntime::generateBaselineICFallbackCode(JSContext* cx) {
+ TempAllocator temp(&cx->tempLifoAlloc());
+ StackMacroAssembler masm(cx, temp);
+ PerfSpewerRangeRecorder rangeRecorder(masm);
+ AutoCreatedBy acb(masm, "JitRuntime::generateBaselineICFallbackCode");
+
+ BaselineICFallbackCode& fallbackCode = baselineICFallbackCode_.ref();
+ FallbackICCodeCompiler compiler(cx, fallbackCode, masm);
+
+ JitSpew(JitSpew_Codegen, "# Emitting Baseline IC fallback code");
+
+#define EMIT_CODE(kind) \
+ { \
+ AutoCreatedBy acb(masm, "kind=" #kind); \
+ uint32_t offset = startTrampolineCode(masm); \
+ InitMacroAssemblerForICStub(masm); \
+ if (!compiler.emit_##kind()) { \
+ return false; \
+ } \
+ fallbackCode.initOffset(BaselineICFallbackKind::kind, offset); \
+ rangeRecorder.recordOffset("BaselineICFallback: " #kind); \
+ }
+ IC_BASELINE_FALLBACK_CODE_KIND_LIST(EMIT_CODE)
+#undef EMIT_CODE
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return false;
+ }
+
+ rangeRecorder.collectRangesForJitCode(code);
+
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, "BaselineICFallback");
+#endif
+
+ fallbackCode.initCode(code);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/BaselineIC.h b/js/src/jit/BaselineIC.h
new file mode 100644
index 0000000000..9098af4825
--- /dev/null
+++ b/js/src/jit/BaselineIC.h
@@ -0,0 +1,439 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineIC_h
+#define jit_BaselineIC_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/ICState.h"
+#include "jit/JitCode.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/TypeData.h"
+#include "js/TypeDecls.h"
+
+class JS_PUBLIC_API JSTracer;
+
+enum class JSOp : uint8_t;
+
+namespace js {
+
+MOZ_COLD void ReportOutOfMemory(JSContext* cx);
+
+namespace jit {
+
+class BaselineFrame;
+class CacheIRStubInfo;
+class ICScript;
+
+enum class TailCallVMFunctionId;
+enum class VMFunctionId;
+
+// [SMDOC] JIT Inline Caches (ICs)
+//
+// Baseline Inline Caches are polymorphic caches that aggressively
+// share their stub code.
+//
+// Every polymorphic site contains a linked list of stubs which are
+// specific to that site. These stubs are composed of a |StubData|
+// structure that stores parametrization information (e.g.
+// the shape pointer for a shape-check-and-property-get stub), any
+// dynamic information (e.g. warm-up counters), a pointer to the stub code,
+// and a pointer to the next stub state in the linked list.
+//
+// Every BaselineScript keeps an table of |CacheDescriptor| data
+// structures, which store the following:
+// A pointer to the first StubData in the cache.
+// The bytecode PC of the relevant IC.
+// The machine-code PC where the call to the stubcode returns.
+//
+// A diagram:
+//
+// Control flow Pointers
+// =======# ----. .---->
+// # | |
+// #======> \-----/
+//
+//
+// .---------------------------------------.
+// | .-------------------------. |
+// | | .----. | |
+// Baseline | | | | | |
+// JIT Code 0 ^ 1 ^ 2 ^ | | |
+// +--------------+ .-->+-----+ +-----+ +-----+ | | |
+// | | #=|==>| |==>| |==>| FB | | | |
+// | | # | +-----+ +-----+ +-----+ | | |
+// | | # | # # # | | |
+// |==============|==# | # # # | | |
+// |=== IC =======| | # # # | | |
+// .->|==============|<===|======#=========#=========# | | |
+// | | | | | | |
+// | | | | | | |
+// | | | | | | |
+// | | | | v | |
+// | | | | +---------+ | |
+// | | | | | Fallback| | |
+// | | | | | Stub | | |
+// | | | | | Code | | |
+// | | | | +---------+ | |
+// | +--------------+ | | |
+// | |_______ | +---------+ | |
+// | | | | Stub |<---/ |
+// | IC | \--. | Code | |
+// | Descriptor | | +---------+ |
+// | Table v | |
+// | +-----------------+ | +---------+ |
+// \--| Ins | PC | Stub |----/ | Stub |<-------/
+// +-----------------+ | Code |
+// | ... | +---------+
+// +-----------------+
+// Shared
+// Stub Code
+//
+
+class ICStub;
+class ICCacheIRStub;
+class ICFallbackStub;
+
+#ifdef JS_JITSPEW
+void FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(3, 4);
+#else
+# define FallbackICSpew(...)
+#endif
+
+// An entry in the ICScript IC table. There's one ICEntry per IC.
+class ICEntry {
+ // A pointer to the first IC stub for this instruction.
+ ICStub* firstStub_;
+
+ public:
+ explicit ICEntry(ICStub* firstStub) : firstStub_(firstStub) {}
+
+ ICStub* firstStub() const {
+ MOZ_ASSERT(firstStub_);
+ return firstStub_;
+ }
+
+ void setFirstStub(ICStub* stub) { firstStub_ = stub; }
+
+ static constexpr size_t offsetOfFirstStub() {
+ return offsetof(ICEntry, firstStub_);
+ }
+
+ void trace(JSTracer* trc);
+};
+
+//
+// Base class for all IC stubs.
+//
+class ICStub {
+ friend class ICFallbackStub;
+
+ protected:
+ // The raw jitcode to call for this stub.
+ uint8_t* stubCode_;
+
+ // Counts the number of times the stub was entered
+ //
+ // See Bug 1494473 comment 6 for a mechanism to handle overflow if overflow
+ // becomes a concern.
+ uint32_t enteredCount_ = 0;
+
+ // Tracks input types for some CacheIR stubs, to help optimize
+ // polymorphic cases. Stored in the base class to make use of
+ // padding bytes.
+ TypeData typeData_;
+
+ // Whether this is an ICFallbackStub or an ICCacheIRStub.
+ bool isFallback_;
+
+ ICStub(uint8_t* stubCode, bool isFallback)
+ : stubCode_(stubCode), isFallback_(isFallback) {
+ MOZ_ASSERT(stubCode != nullptr);
+ }
+
+ public:
+ inline bool isFallback() const { return isFallback_; }
+
+ inline ICStub* maybeNext() const;
+
+ inline const ICFallbackStub* toFallbackStub() const {
+ MOZ_ASSERT(isFallback());
+ return reinterpret_cast<const ICFallbackStub*>(this);
+ }
+
+ inline ICFallbackStub* toFallbackStub() {
+ MOZ_ASSERT(isFallback());
+ return reinterpret_cast<ICFallbackStub*>(this);
+ }
+
+ ICCacheIRStub* toCacheIRStub() {
+ MOZ_ASSERT(!isFallback());
+ return reinterpret_cast<ICCacheIRStub*>(this);
+ }
+ const ICCacheIRStub* toCacheIRStub() const {
+ MOZ_ASSERT(!isFallback());
+ return reinterpret_cast<const ICCacheIRStub*>(this);
+ }
+
+ bool usesTrampolineCode() const {
+ // All fallback code is stored in a single JitCode instance, so we can't
+ // call JitCode::FromExecutable on the raw pointer.
+ return isFallback();
+ }
+ JitCode* jitCode() {
+ MOZ_ASSERT(!usesTrampolineCode());
+ return JitCode::FromExecutable(stubCode_);
+ }
+
+ uint32_t enteredCount() const { return enteredCount_; }
+ inline void incrementEnteredCount() { enteredCount_++; }
+ void resetEnteredCount() { enteredCount_ = 0; }
+
+ static constexpr size_t offsetOfStubCode() {
+ return offsetof(ICStub, stubCode_);
+ }
+ static constexpr size_t offsetOfEnteredCount() {
+ return offsetof(ICStub, enteredCount_);
+ }
+};
+
+class ICFallbackStub final : public ICStub {
+ friend class ICStubConstIterator;
+
+ protected:
+ // The PC offset of this IC's bytecode op within the JSScript.
+ uint32_t pcOffset_;
+
+ // The state of this IC.
+ ICState state_{};
+
+ public:
+ explicit ICFallbackStub(uint32_t pcOffset, TrampolinePtr stubCode)
+ : ICStub(stubCode.value, /* isFallback = */ true), pcOffset_(pcOffset) {}
+
+ inline size_t numOptimizedStubs() const { return state_.numOptimizedStubs(); }
+
+ bool newStubIsFirstStub() const { return state_.newStubIsFirstStub(); }
+
+ ICState& state() { return state_; }
+
+ uint32_t pcOffset() const { return pcOffset_; }
+
+ // Add a new stub to the IC chain terminated by this fallback stub.
+ inline void addNewStub(ICEntry* icEntry, ICCacheIRStub* stub);
+
+ void discardStubs(JSContext* cx, ICEntry* icEntry);
+
+ void clearUsedByTranspiler() { state_.clearUsedByTranspiler(); }
+ void setUsedByTranspiler() { state_.setUsedByTranspiler(); }
+ bool usedByTranspiler() const { return state_.usedByTranspiler(); }
+
+ void clearHasFoldedStub() { state_.clearHasFoldedStub(); }
+ void setHasFoldedStub() { state_.setHasFoldedStub(); }
+ bool hasFoldedStub() const { return state_.hasFoldedStub(); }
+
+ TrialInliningState trialInliningState() const {
+ return state_.trialInliningState();
+ }
+ void setTrialInliningState(TrialInliningState state) {
+ state_.setTrialInliningState(state);
+ }
+
+ void trackNotAttached();
+
+ void unlinkStub(Zone* zone, ICEntry* icEntry, ICCacheIRStub* prev,
+ ICCacheIRStub* stub);
+};
+
+class ICCacheIRStub final : public ICStub {
+ // Pointer to next IC stub.
+ ICStub* next_ = nullptr;
+
+ const CacheIRStubInfo* stubInfo_;
+
+#ifndef JS_64BIT
+ // Ensure stub data is 8-byte aligned on 32-bit.
+ uintptr_t padding_ = 0;
+#endif
+
+ public:
+ ICCacheIRStub(JitCode* stubCode, const CacheIRStubInfo* stubInfo)
+ : ICStub(stubCode->raw(), /* isFallback = */ false),
+ stubInfo_(stubInfo) {}
+
+ ICStub* next() const { return next_; }
+ void setNext(ICStub* stub) { next_ = stub; }
+
+ ICCacheIRStub* nextCacheIR() const {
+ return next_->isFallback() ? nullptr : next_->toCacheIRStub();
+ }
+
+ const CacheIRStubInfo* stubInfo() const { return stubInfo_; }
+ uint8_t* stubDataStart();
+
+ void trace(JSTracer* trc);
+
+ // Optimized stubs get purged on GC. But some stubs can be active on the
+ // stack during GC - specifically the ones that can make calls. To ensure
+ // that these do not get purged, all stubs that can make calls are allocated
+ // in the fallback stub space.
+ bool makesGCCalls() const;
+ bool allocatedInFallbackSpace() const { return makesGCCalls(); }
+
+ static constexpr size_t offsetOfNext() {
+ return offsetof(ICCacheIRStub, next_);
+ }
+
+ void setTypeData(TypeData data) { typeData_ = data; }
+ TypeData typeData() const { return typeData_; }
+};
+
+// Assert stub size is what we expect to catch regressions.
+#ifdef JS_64BIT
+static_assert(sizeof(ICFallbackStub) == 3 * sizeof(uintptr_t));
+static_assert(sizeof(ICCacheIRStub) == 4 * sizeof(uintptr_t));
+#else
+static_assert(sizeof(ICFallbackStub) == 5 * sizeof(uintptr_t));
+static_assert(sizeof(ICCacheIRStub) == 6 * sizeof(uintptr_t));
+#endif
+
+inline ICStub* ICStub::maybeNext() const {
+ return isFallback() ? nullptr : toCacheIRStub()->next();
+}
+
+inline void ICFallbackStub::addNewStub(ICEntry* icEntry, ICCacheIRStub* stub) {
+ MOZ_ASSERT(stub->next() == nullptr);
+ stub->setNext(icEntry->firstStub());
+ icEntry->setFirstStub(stub);
+ state_.trackAttached();
+}
+
+AllocatableGeneralRegisterSet BaselineICAvailableGeneralRegs(size_t numInputs);
+
+bool ICSupportsPolymorphicTypeData(JSOp op);
+
+struct IonOsrTempData;
+
+extern bool DoCallFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, uint32_t argc, Value* vp,
+ MutableHandleValue res);
+
+extern bool DoSpreadCallFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* vp,
+ MutableHandleValue res);
+
+extern bool DoToBoolFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue arg,
+ MutableHandleValue ret);
+
+extern bool DoGetElemSuperFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, HandleValue receiver,
+ MutableHandleValue res);
+
+extern bool DoGetElemFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res);
+
+extern bool DoSetElemFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* stack,
+ HandleValue objv, HandleValue index,
+ HandleValue rhs);
+
+extern bool DoInFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue key,
+ HandleValue objValue, MutableHandleValue res);
+
+extern bool DoHasOwnFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue keyValue,
+ HandleValue objValue, MutableHandleValue res);
+
+extern bool DoCheckPrivateFieldFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub,
+ HandleValue objValue,
+ HandleValue keyValue,
+ MutableHandleValue res);
+
+extern bool DoGetNameFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject envChain,
+ MutableHandleValue res);
+
+extern bool DoBindNameFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject envChain,
+ MutableHandleValue res);
+
+extern bool DoGetIntrinsicFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub,
+ MutableHandleValue res);
+
+extern bool DoGetPropFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue val,
+ MutableHandleValue res);
+
+extern bool DoGetPropSuperFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue receiver,
+ MutableHandleValue val,
+ MutableHandleValue res);
+
+extern bool DoSetPropFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, Value* stack,
+ HandleValue lhs, HandleValue rhs);
+
+extern bool DoGetIteratorFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue value,
+ MutableHandleValue res);
+
+extern bool DoOptimizeSpreadCallFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub,
+ HandleValue value,
+ MutableHandleValue res);
+
+extern bool DoInstanceOfFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res);
+
+extern bool DoTypeOfFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res);
+
+extern bool DoToPropertyKeyFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res);
+
+extern bool DoRestFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res);
+
+extern bool DoUnaryArithFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res);
+
+extern bool DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue ret);
+
+extern bool DoNewArrayFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res);
+
+extern bool DoNewObjectFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, MutableHandleValue res);
+
+extern bool DoCompareFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue ret);
+
+extern bool DoCloseIterFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleObject iter);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineIC_h */
diff --git a/js/src/jit/BaselineICList.h b/js/src/jit/BaselineICList.h
new file mode 100644
index 0000000000..f277b6acdb
--- /dev/null
+++ b/js/src/jit/BaselineICList.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineICList_h
+#define jit_BaselineICList_h
+
+namespace js {
+namespace jit {
+
+// List of trampolines for Baseline IC fallback stubs. Trampoline code is
+// allocated as part of the JitRuntime.
+#define IC_BASELINE_FALLBACK_CODE_KIND_LIST(_) \
+ _(NewArray) \
+ _(NewObject) \
+ _(ToBool) \
+ _(UnaryArith) \
+ _(Call) \
+ _(CallConstructing) \
+ _(SpreadCall) \
+ _(SpreadCallConstructing) \
+ _(GetElem) \
+ _(GetElemSuper) \
+ _(SetElem) \
+ _(In) \
+ _(HasOwn) \
+ _(CheckPrivateField) \
+ _(GetName) \
+ _(BindName) \
+ _(GetIntrinsic) \
+ _(SetProp) \
+ _(GetIterator) \
+ _(OptimizeSpreadCall) \
+ _(InstanceOf) \
+ _(TypeOf) \
+ _(ToPropertyKey) \
+ _(Rest) \
+ _(BinaryArith) \
+ _(Compare) \
+ _(GetProp) \
+ _(GetPropSuper) \
+ _(CloseIter)
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BaselineICList_h */
diff --git a/js/src/jit/BaselineJIT.cpp b/js/src/jit/BaselineJIT.cpp
new file mode 100644
index 0000000000..f4f223040a
--- /dev/null
+++ b/js/src/jit/BaselineJIT.cpp
@@ -0,0 +1,1008 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineJIT.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <algorithm>
+
+#include "debugger/DebugAPI.h"
+#include "gc/GCContext.h"
+#include "gc/PublicIterators.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/BaselineCodeGen.h"
+#include "jit/BaselineIC.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "vm/Interpreter.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "gc/GC-inl.h"
+#include "jit/JitHints-inl.h"
+#include "jit/JitScript-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Stack-inl.h"
+
+using mozilla::BinarySearchIf;
+using mozilla::CheckedInt;
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+void ICStubSpace::freeAllAfterMinorGC(Zone* zone) {
+ if (zone->isAtomsZone()) {
+ MOZ_ASSERT(allocator_.isEmpty());
+ } else {
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ rt->gc.queueAllLifoBlocksForFreeAfterMinorGC(&allocator_);
+ }
+}
+
+static bool CheckFrame(InterpreterFrame* fp) {
+ if (fp->isDebuggerEvalFrame()) {
+ // Debugger eval-in-frame. These are likely short-running scripts so
+ // don't bother compiling them for now.
+ JitSpew(JitSpew_BaselineAbort, "debugger frame");
+ return false;
+ }
+
+ if (fp->isFunctionFrame() && TooManyActualArguments(fp->numActualArgs())) {
+ // Fall back to the interpreter to avoid running out of stack space.
+ JitSpew(JitSpew_BaselineAbort, "Too many arguments (%u)",
+ fp->numActualArgs());
+ return false;
+ }
+
+ return true;
+}
+
+struct EnterJitData {
+ explicit EnterJitData(JSContext* cx)
+ : jitcode(nullptr),
+ osrFrame(nullptr),
+ calleeToken(nullptr),
+ maxArgv(nullptr),
+ maxArgc(0),
+ numActualArgs(0),
+ osrNumStackValues(0),
+ envChain(cx),
+ result(cx),
+ constructing(false) {}
+
+ uint8_t* jitcode;
+ InterpreterFrame* osrFrame;
+
+ void* calleeToken;
+
+ Value* maxArgv;
+ unsigned maxArgc;
+ unsigned numActualArgs;
+ unsigned osrNumStackValues;
+
+ RootedObject envChain;
+ RootedValue result;
+
+ bool constructing;
+};
+
+static JitExecStatus EnterBaseline(JSContext* cx, EnterJitData& data) {
+ MOZ_ASSERT(data.osrFrame);
+
+ // Check for potential stack overflow before OSR-ing.
+ uint32_t extra =
+ BaselineFrame::Size() + (data.osrNumStackValues * sizeof(Value));
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkWithExtra(cx, extra)) {
+ return JitExec_Aborted;
+ }
+
+#ifdef DEBUG
+ // Assert we don't GC before entering JIT code. A GC could discard JIT code
+ // or move the function stored in the CalleeToken (it won't be traced at
+ // this point). We use Maybe<> here so we can call reset() to call the
+ // AutoAssertNoGC destructor before we enter JIT code.
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+ nogc.emplace(cx);
+#endif
+
+ MOZ_ASSERT(IsBaselineInterpreterEnabled());
+ MOZ_ASSERT(CheckFrame(data.osrFrame));
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterJit();
+
+ // Caller must construct |this| before invoking the function.
+ MOZ_ASSERT_IF(data.constructing,
+ data.maxArgv[0].isObject() ||
+ data.maxArgv[0].isMagic(JS_UNINITIALIZED_LEXICAL));
+
+ data.result.setInt32(data.numActualArgs);
+ {
+ AssertRealmUnchanged aru(cx);
+ ActivationEntryMonitor entryMonitor(cx, data.calleeToken);
+ JitActivation activation(cx);
+
+ data.osrFrame->setRunningInJit();
+
+#ifdef DEBUG
+ nogc.reset();
+#endif
+ // Single transition point from Interpreter to Baseline.
+ CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv,
+ data.osrFrame, data.calleeToken, data.envChain.get(),
+ data.osrNumStackValues, data.result.address());
+
+ data.osrFrame->clearRunningInJit();
+ }
+
+ // Jit callers wrap primitive constructor return, except for derived
+ // class constructors, which are forced to do it themselves.
+ if (!data.result.isMagic() && data.constructing &&
+ data.result.isPrimitive()) {
+ MOZ_ASSERT(data.maxArgv[0].isObject());
+ data.result = data.maxArgv[0];
+ }
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->jitRuntime()->freeIonOsrTempData();
+
+ MOZ_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
+ return data.result.isMagic() ? JitExec_Error : JitExec_Ok;
+}
+
+JitExecStatus jit::EnterBaselineInterpreterAtBranch(JSContext* cx,
+ InterpreterFrame* fp,
+ jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);
+
+ EnterJitData data(cx);
+
+ // Use the entry point that skips the debug trap because the C++ interpreter
+ // already handled this for the current op.
+ const BaselineInterpreter& interp =
+ cx->runtime()->jitRuntime()->baselineInterpreter();
+ data.jitcode = interp.interpretOpNoDebugTrapAddr().value;
+
+ data.osrFrame = fp;
+ data.osrNumStackValues =
+ fp->script()->nfixed() + cx->interpreterRegs().stackDepth();
+
+ if (fp->isFunctionFrame()) {
+ data.constructing = fp->isConstructing();
+ data.numActualArgs = fp->numActualArgs();
+ data.maxArgc = std::max(fp->numActualArgs(), fp->numFormalArgs()) +
+ 1; // +1 = include |this|
+ data.maxArgv = fp->argv() - 1; // -1 = include |this|
+ data.envChain = nullptr;
+ data.calleeToken = CalleeToToken(&fp->callee(), data.constructing);
+ } else {
+ data.constructing = false;
+ data.numActualArgs = 0;
+ data.maxArgc = 0;
+ data.maxArgv = nullptr;
+ data.envChain = fp->environmentChain();
+ data.calleeToken = CalleeToToken(fp->script());
+ }
+
+ JitExecStatus status = EnterBaseline(cx, data);
+ if (status != JitExec_Ok) {
+ return status;
+ }
+
+ fp->setReturnValue(data.result);
+ return JitExec_Ok;
+}
+
+MethodStatus jit::BaselineCompile(JSContext* cx, JSScript* script,
+ bool forceDebugInstrumentation) {
+ cx->check(script);
+ MOZ_ASSERT(!script->hasBaselineScript());
+ MOZ_ASSERT(script->canBaselineCompile());
+ MOZ_ASSERT(IsBaselineJitEnabled(cx));
+ AutoGeckoProfilerEntry pseudoFrame(
+ cx, "Baseline script compilation",
+ JS::ProfilingCategoryPair::JS_BaselineCompilation);
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jctx(cx);
+
+ BaselineCompiler compiler(cx, temp, script);
+ if (!compiler.init()) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ if (forceDebugInstrumentation) {
+ compiler.setCompileDebugInstrumentation();
+ }
+
+ MethodStatus status = compiler.compile();
+
+ MOZ_ASSERT_IF(status == Method_Compiled, script->hasBaselineScript());
+ MOZ_ASSERT_IF(status != Method_Compiled, !script->hasBaselineScript());
+
+ if (status == Method_CantCompile) {
+ script->disableBaselineCompile();
+ }
+
+ return status;
+}
+
+static MethodStatus CanEnterBaselineJIT(JSContext* cx, HandleScript script,
+ AbstractFramePtr osrSourceFrame) {
+ // Skip if the script has been disabled.
+ if (!script->canBaselineCompile()) {
+ return Method_Skipped;
+ }
+
+ if (!IsBaselineJitEnabled(cx)) {
+ script->disableBaselineCompile();
+ return Method_CantCompile;
+ }
+
+ // This check is needed in the following corner case. Consider a function h,
+ //
+ // function h(x) {
+ // if (!x)
+ // return;
+ // h(false);
+ // for (var i = 0; i < N; i++)
+ // /* do stuff */
+ // }
+ //
+ // Suppose h is not yet compiled in baseline and is executing in the
+ // interpreter. Let this interpreter frame be f_older. The debugger marks
+ // f_older as isDebuggee. At the point of the recursive call h(false), h is
+ // compiled in baseline without debug instrumentation, pushing a baseline
+ // frame f_newer. The debugger never flags f_newer as isDebuggee, and never
+ // recompiles h. When the recursive call returns and execution proceeds to
+ // the loop, the interpreter attempts to OSR into baseline. Since h is
+ // already compiled in baseline, execution jumps directly into baseline
+ // code. This is incorrect as h's baseline script does not have debug
+ // instrumentation.
+ if (osrSourceFrame && osrSourceFrame.isDebuggee() &&
+ !DebugAPI::ensureExecutionObservabilityOfOsrFrame(cx, osrSourceFrame)) {
+ return Method_Error;
+ }
+
+ if (script->length() > BaselineMaxScriptLength) {
+ script->disableBaselineCompile();
+ return Method_CantCompile;
+ }
+
+ if (script->nslots() > BaselineMaxScriptSlots) {
+ script->disableBaselineCompile();
+ return Method_CantCompile;
+ }
+
+ if (script->hasBaselineScript()) {
+ return Method_Compiled;
+ }
+
+ // If a hint is available, skip the warmup count threshold.
+ bool mightHaveEagerBaselineHint = false;
+ if (!JitOptions.disableJitHints && !script->noEagerBaselineHint() &&
+ cx->runtime()->jitRuntime()->hasJitHintsMap()) {
+ JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
+ // If this lookup fails, the NoEagerBaselineHint script flag is set
+ // to true to prevent any further lookups for this script.
+ if (jitHints->mightHaveEagerBaselineHint(script)) {
+ mightHaveEagerBaselineHint = true;
+ }
+ }
+ // Check script warm-up counter if no hint.
+ if (!mightHaveEagerBaselineHint) {
+ if (script->getWarmUpCount() <= JitOptions.baselineJitWarmUpThreshold) {
+ return Method_Skipped;
+ }
+ }
+
+ // Check this before calling ensureJitRealmExists, so we're less
+ // likely to report OOM in JSRuntime::createJitRuntime.
+ if (!CanLikelyAllocateMoreExecutableMemory()) {
+ return Method_Skipped;
+ }
+
+ if (!cx->realm()->ensureJitRealmExists(cx)) {
+ return Method_Error;
+ }
+
+ if (script->hasForceInterpreterOp()) {
+ script->disableBaselineCompile();
+ return Method_CantCompile;
+ }
+
+ // Frames can be marked as debuggee frames independently of its underlying
+ // script being a debuggee script, e.g., when performing
+ // Debugger.Frame.prototype.eval.
+ bool forceDebugInstrumentation =
+ osrSourceFrame && osrSourceFrame.isDebuggee();
+ return BaselineCompile(cx, script, forceDebugInstrumentation);
+}
+
+bool jit::CanBaselineInterpretScript(JSScript* script) {
+ MOZ_ASSERT(IsBaselineInterpreterEnabled());
+
+ if (script->hasForceInterpreterOp()) {
+ return false;
+ }
+
+ if (script->nslots() > BaselineMaxScriptSlots) {
+ // Avoid overrecursion exceptions when the script has a ton of stack slots
+ // by forcing such scripts to run in the C++ interpreter with heap-allocated
+ // stack frames.
+ return false;
+ }
+
+ return true;
+}
+
+static bool MaybeCreateBaselineInterpreterEntryScript(JSContext* cx,
+ JSScript* script) {
+ MOZ_ASSERT(script->hasJitScript());
+
+ JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
+ if (script->jitCodeRaw() != jitRuntime->baselineInterpreter().codeRaw()) {
+ // script already has an updated interpreter trampoline.
+#ifdef DEBUG
+ auto p = jitRuntime->getInterpreterEntryMap()->lookup(script);
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->value().raw() == script->jitCodeRaw());
+#endif
+ return true;
+ }
+
+ auto p = jitRuntime->getInterpreterEntryMap()->lookupForAdd(script);
+ if (!p) {
+ Rooted<JitCode*> code(
+ cx, jitRuntime->generateEntryTrampolineForScript(cx, script));
+ if (!code) {
+ return false;
+ }
+
+ EntryTrampoline entry(cx, code);
+ if (!jitRuntime->getInterpreterEntryMap()->add(p, script, entry)) {
+ return false;
+ }
+ }
+
+ script->updateJitCodeRaw(cx->runtime());
+ return true;
+}
+
+static MethodStatus CanEnterBaselineInterpreter(JSContext* cx,
+ JSScript* script) {
+ MOZ_ASSERT(IsBaselineInterpreterEnabled());
+
+ if (script->hasJitScript()) {
+ return Method_Compiled;
+ }
+
+ if (!CanBaselineInterpretScript(script)) {
+ return Method_CantCompile;
+ }
+
+ // Check script warm-up counter.
+ if (script->getWarmUpCount() <=
+ JitOptions.baselineInterpreterWarmUpThreshold) {
+ return Method_Skipped;
+ }
+
+ if (!cx->realm()->ensureJitRealmExists(cx)) {
+ return Method_Error;
+ }
+
+ AutoKeepJitScripts keepJitScript(cx);
+ if (!script->ensureHasJitScript(cx, keepJitScript)) {
+ return Method_Error;
+ }
+
+ if (JitOptions.emitInterpreterEntryTrampoline) {
+ if (!MaybeCreateBaselineInterpreterEntryScript(cx, script)) {
+ return Method_Error;
+ }
+ }
+ return Method_Compiled;
+}
+
+MethodStatus jit::CanEnterBaselineInterpreterAtBranch(JSContext* cx,
+ InterpreterFrame* fp) {
+ if (!CheckFrame(fp)) {
+ return Method_CantCompile;
+ }
+
+ // JITs do not respect the debugger's OnNativeCall hook, so JIT execution is
+ // disabled if this hook might need to be called.
+ if (cx->insideDebuggerEvaluationWithOnNativeCallHook) {
+ return Method_CantCompile;
+ }
+
+ return CanEnterBaselineInterpreter(cx, fp->script());
+}
+
+template <BaselineTier Tier>
+MethodStatus jit::CanEnterBaselineMethod(JSContext* cx, RunState& state) {
+ if (state.isInvoke()) {
+ InvokeState& invoke = *state.asInvoke();
+ if (TooManyActualArguments(invoke.args().length())) {
+ JitSpew(JitSpew_BaselineAbort, "Too many arguments (%u)",
+ invoke.args().length());
+ return Method_CantCompile;
+ }
+ } else {
+ if (state.asExecute()->isDebuggerEval()) {
+ JitSpew(JitSpew_BaselineAbort, "debugger frame");
+ return Method_CantCompile;
+ }
+ }
+
+ RootedScript script(cx, state.script());
+ switch (Tier) {
+ case BaselineTier::Interpreter:
+ return CanEnterBaselineInterpreter(cx, script);
+
+ case BaselineTier::Compiler:
+ return CanEnterBaselineJIT(cx, script,
+ /* osrSourceFrame = */ NullFramePtr());
+ }
+
+ MOZ_CRASH("Unexpected tier");
+}
+
+template MethodStatus jit::CanEnterBaselineMethod<BaselineTier::Interpreter>(
+ JSContext* cx, RunState& state);
+template MethodStatus jit::CanEnterBaselineMethod<BaselineTier::Compiler>(
+ JSContext* cx, RunState& state);
+
+bool jit::BaselineCompileFromBaselineInterpreter(JSContext* cx,
+ BaselineFrame* frame,
+ uint8_t** res) {
+ MOZ_ASSERT(frame->runningInInterpreter());
+
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc = frame->interpreterPC();
+ MOZ_ASSERT(pc == script->code() || JSOp(*pc) == JSOp::LoopHead);
+
+ MethodStatus status = CanEnterBaselineJIT(cx, script,
+ /* osrSourceFrame = */ frame);
+ switch (status) {
+ case Method_Error:
+ return false;
+
+ case Method_CantCompile:
+ case Method_Skipped:
+ *res = nullptr;
+ return true;
+
+ case Method_Compiled: {
+ if (JSOp(*pc) == JSOp::LoopHead) {
+ MOZ_ASSERT(pc > script->code(),
+ "Prologue vs OSR cases must not be ambiguous");
+ BaselineScript* baselineScript = script->baselineScript();
+ uint32_t pcOffset = script->pcToOffset(pc);
+ *res = baselineScript->nativeCodeForOSREntry(pcOffset);
+ } else {
+ *res = script->baselineScript()->warmUpCheckPrologueAddr();
+ }
+ frame->prepareForBaselineInterpreterToJitOSR();
+ return true;
+ }
+ }
+
+ MOZ_CRASH("Unexpected status");
+}
+
+BaselineScript* BaselineScript::New(JSContext* cx,
+ uint32_t warmUpCheckPrologueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ size_t retAddrEntries, size_t osrEntries,
+ size_t debugTrapEntries,
+ size_t resumeEntries) {
+ // Compute size including trailing arrays.
+ CheckedInt<Offset> size = sizeof(BaselineScript);
+ size += CheckedInt<Offset>(resumeEntries) * sizeof(uintptr_t);
+ size += CheckedInt<Offset>(retAddrEntries) * sizeof(RetAddrEntry);
+ size += CheckedInt<Offset>(osrEntries) * sizeof(OSREntry);
+ size += CheckedInt<Offset>(debugTrapEntries) * sizeof(DebugTrapEntry);
+
+ if (!size.isValid()) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+
+ // Allocate contiguous raw buffer.
+ void* raw = cx->pod_malloc<uint8_t>(size.value());
+ MOZ_ASSERT(uintptr_t(raw) % alignof(BaselineScript) == 0);
+ if (!raw) {
+ return nullptr;
+ }
+ BaselineScript* script = new (raw)
+ BaselineScript(warmUpCheckPrologueOffset, profilerEnterToggleOffset,
+ profilerExitToggleOffset);
+
+ Offset cursor = sizeof(BaselineScript);
+
+ MOZ_ASSERT(isAlignedOffset<uintptr_t>(cursor));
+ script->resumeEntriesOffset_ = cursor;
+ cursor += resumeEntries * sizeof(uintptr_t);
+
+ MOZ_ASSERT(isAlignedOffset<RetAddrEntry>(cursor));
+ script->retAddrEntriesOffset_ = cursor;
+ cursor += retAddrEntries * sizeof(RetAddrEntry);
+
+ MOZ_ASSERT(isAlignedOffset<OSREntry>(cursor));
+ script->osrEntriesOffset_ = cursor;
+ cursor += osrEntries * sizeof(OSREntry);
+
+ MOZ_ASSERT(isAlignedOffset<DebugTrapEntry>(cursor));
+ script->debugTrapEntriesOffset_ = cursor;
+ cursor += debugTrapEntries * sizeof(DebugTrapEntry);
+
+ MOZ_ASSERT(isAlignedOffset<uint32_t>(cursor));
+
+ script->allocBytes_ = cursor;
+
+ MOZ_ASSERT(script->endOffset() == size.value());
+
+ return script;
+}
+
+void BaselineScript::trace(JSTracer* trc) {
+ TraceEdge(trc, &method_, "baseline-method");
+}
+
+void BaselineScript::Destroy(JS::GCContext* gcx, BaselineScript* script) {
+ MOZ_ASSERT(!script->hasPendingIonCompileTask());
+
+ // This allocation is tracked by JSScript::setBaselineScriptImpl.
+ gcx->deleteUntracked(script);
+}
+
+void JS::DeletePolicy<js::jit::BaselineScript>::operator()(
+ const js::jit::BaselineScript* script) {
+ BaselineScript::Destroy(rt_->gcContext(),
+ const_cast<BaselineScript*>(script));
+}
+
+const RetAddrEntry& BaselineScript::retAddrEntryFromReturnOffset(
+ CodeOffset returnOffset) {
+ mozilla::Span<RetAddrEntry> entries = retAddrEntries();
+ size_t loc;
+#ifdef DEBUG
+ bool found =
+#endif
+ BinarySearchIf(
+ entries.data(), 0, entries.size(),
+ [&returnOffset](const RetAddrEntry& entry) {
+ size_t roffset = returnOffset.offset();
+ size_t entryRoffset = entry.returnOffset().offset();
+ if (roffset < entryRoffset) {
+ return -1;
+ }
+ if (entryRoffset < roffset) {
+ return 1;
+ }
+ return 0;
+ },
+ &loc);
+
+ MOZ_ASSERT(found);
+ MOZ_ASSERT(entries[loc].returnOffset().offset() == returnOffset.offset());
+ return entries[loc];
+}
+
+template <typename Entry>
+static bool ComputeBinarySearchMid(mozilla::Span<Entry> entries,
+ uint32_t pcOffset, size_t* loc) {
+ return BinarySearchIf(
+ entries.data(), 0, entries.size(),
+ [pcOffset](const Entry& entry) {
+ uint32_t entryOffset = entry.pcOffset();
+ if (pcOffset < entryOffset) {
+ return -1;
+ }
+ if (entryOffset < pcOffset) {
+ return 1;
+ }
+ return 0;
+ },
+ loc);
+}
+
+uint8_t* BaselineScript::returnAddressForEntry(const RetAddrEntry& ent) {
+ return method()->raw() + ent.returnOffset().offset();
+}
+
+const RetAddrEntry& BaselineScript::retAddrEntryFromPCOffset(
+ uint32_t pcOffset, RetAddrEntry::Kind kind) {
+ mozilla::Span<RetAddrEntry> entries = retAddrEntries();
+ size_t mid;
+ MOZ_ALWAYS_TRUE(ComputeBinarySearchMid(entries, pcOffset, &mid));
+ MOZ_ASSERT(mid < entries.size());
+
+ // Search for the first entry for this pc.
+ size_t first = mid;
+ while (first > 0 && entries[first - 1].pcOffset() == pcOffset) {
+ first--;
+ }
+
+ // Search for the last entry for this pc.
+ size_t last = mid;
+ while (last + 1 < entries.size() &&
+ entries[last + 1].pcOffset() == pcOffset) {
+ last++;
+ }
+
+ MOZ_ASSERT(first <= last);
+ MOZ_ASSERT(entries[first].pcOffset() == pcOffset);
+ MOZ_ASSERT(entries[last].pcOffset() == pcOffset);
+
+ for (size_t i = first; i <= last; i++) {
+ const RetAddrEntry& entry = entries[i];
+ if (entry.kind() != kind) {
+ continue;
+ }
+
+#ifdef DEBUG
+ // There must be a unique entry for this pcOffset and Kind to ensure our
+ // return value is well-defined.
+ for (size_t j = i + 1; j <= last; j++) {
+ MOZ_ASSERT(entries[j].kind() != kind);
+ }
+#endif
+
+ return entry;
+ }
+
+ MOZ_CRASH("Didn't find RetAddrEntry.");
+}
+
+const RetAddrEntry& BaselineScript::prologueRetAddrEntry(
+ RetAddrEntry::Kind kind) {
+ MOZ_ASSERT(kind == RetAddrEntry::Kind::StackCheck);
+
+ // The prologue entries will always be at a very low offset, so just do a
+ // linear search from the beginning.
+ for (const RetAddrEntry& entry : retAddrEntries()) {
+ if (entry.pcOffset() != 0) {
+ break;
+ }
+ if (entry.kind() == kind) {
+ return entry;
+ }
+ }
+ MOZ_CRASH("Didn't find prologue RetAddrEntry.");
+}
+
+const RetAddrEntry& BaselineScript::retAddrEntryFromReturnAddress(
+ const uint8_t* returnAddr) {
+ MOZ_ASSERT(returnAddr > method_->raw());
+ MOZ_ASSERT(returnAddr < method_->raw() + method_->instructionsSize());
+ CodeOffset offset(returnAddr - method_->raw());
+ return retAddrEntryFromReturnOffset(offset);
+}
+
+uint8_t* BaselineScript::nativeCodeForOSREntry(uint32_t pcOffset) {
+ mozilla::Span<OSREntry> entries = osrEntries();
+ size_t mid;
+ if (!ComputeBinarySearchMid(entries, pcOffset, &mid)) {
+ return nullptr;
+ }
+
+ uint32_t nativeOffset = entries[mid].nativeOffset();
+ return method_->raw() + nativeOffset;
+}
+
+void BaselineScript::computeResumeNativeOffsets(
+ JSScript* script, const ResumeOffsetEntryVector& entries) {
+ // Translate pcOffset to BaselineScript native address. This may return
+ // nullptr if compiler decided code was unreachable.
+ auto computeNative = [this, &entries](uint32_t pcOffset) -> uint8_t* {
+ mozilla::Span<const ResumeOffsetEntry> entriesSpan =
+ mozilla::Span(entries.begin(), entries.length());
+ size_t mid;
+ if (!ComputeBinarySearchMid(entriesSpan, pcOffset, &mid)) {
+ return nullptr;
+ }
+
+ uint32_t nativeOffset = entries[mid].nativeOffset();
+ return method_->raw() + nativeOffset;
+ };
+
+ mozilla::Span<const uint32_t> pcOffsets = script->resumeOffsets();
+ mozilla::Span<uint8_t*> nativeOffsets = resumeEntryList();
+ std::transform(pcOffsets.begin(), pcOffsets.end(), nativeOffsets.begin(),
+ computeNative);
+}
+
+void BaselineScript::copyRetAddrEntries(const RetAddrEntry* entries) {
+ std::copy_n(entries, retAddrEntries().size(), retAddrEntries().data());
+}
+
+void BaselineScript::copyOSREntries(const OSREntry* entries) {
+ std::copy_n(entries, osrEntries().size(), osrEntries().data());
+}
+
+void BaselineScript::copyDebugTrapEntries(const DebugTrapEntry* entries) {
+ std::copy_n(entries, debugTrapEntries().size(), debugTrapEntries().data());
+}
+
+jsbytecode* BaselineScript::approximatePcForNativeAddress(
+ JSScript* script, uint8_t* nativeAddress) {
+ MOZ_ASSERT(script->baselineScript() == this);
+ MOZ_ASSERT(containsCodeAddress(nativeAddress));
+
+ uint32_t nativeOffset = nativeAddress - method_->raw();
+
+ // Use the RetAddrEntry list (sorted on pc and return address) to look for the
+ // first pc that has a return address >= nativeOffset. This isn't perfect but
+ // it's a reasonable approximation for the profiler because most non-trivial
+ // bytecode ops have a RetAddrEntry.
+
+ for (const RetAddrEntry& entry : retAddrEntries()) {
+ uint32_t retOffset = entry.returnOffset().offset();
+ if (retOffset >= nativeOffset) {
+ return script->offsetToPC(entry.pcOffset());
+ }
+ }
+
+ // Return the last entry's pc. Every BaselineScript has at least one
+ // RetAddrEntry for the prologue stack overflow check.
+ MOZ_ASSERT(!retAddrEntries().empty());
+ const RetAddrEntry& lastEntry = retAddrEntries()[retAddrEntries().size() - 1];
+ return script->offsetToPC(lastEntry.pcOffset());
+}
+
+void BaselineScript::toggleDebugTraps(JSScript* script, jsbytecode* pc) {
+ MOZ_ASSERT(script->baselineScript() == this);
+
+ // Only scripts compiled for debug mode have toggled calls.
+ if (!hasDebugInstrumentation()) {
+ return;
+ }
+
+ AutoWritableJitCode awjc(method());
+
+ for (const DebugTrapEntry& entry : debugTrapEntries()) {
+ jsbytecode* entryPC = script->offsetToPC(entry.pcOffset());
+
+ // If the |pc| argument is non-null we can skip all other bytecode ops.
+ if (pc && pc != entryPC) {
+ continue;
+ }
+
+ bool enabled = DebugAPI::stepModeEnabled(script) ||
+ DebugAPI::hasBreakpointsAt(script, entryPC);
+
+ // Patch the trap.
+ CodeLocationLabel label(method(), CodeOffset(entry.nativeOffset()));
+ Assembler::ToggleCall(label, enabled);
+ }
+}
+
+void BaselineScript::setPendingIonCompileTask(JSRuntime* rt, JSScript* script,
+ IonCompileTask* task) {
+ MOZ_ASSERT(script->baselineScript() == this);
+ MOZ_ASSERT(task);
+ MOZ_ASSERT(!hasPendingIonCompileTask());
+
+ if (script->isIonCompilingOffThread()) {
+ script->jitScript()->clearIsIonCompilingOffThread(script);
+ }
+
+ pendingIonCompileTask_ = task;
+ script->updateJitCodeRaw(rt);
+}
+
+void BaselineScript::removePendingIonCompileTask(JSRuntime* rt,
+ JSScript* script) {
+ MOZ_ASSERT(script->baselineScript() == this);
+ MOZ_ASSERT(hasPendingIonCompileTask());
+
+ pendingIonCompileTask_ = nullptr;
+ script->updateJitCodeRaw(rt);
+}
+
+static void ToggleProfilerInstrumentation(JitCode* code,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ bool enable) {
+ CodeLocationLabel enterToggleLocation(code,
+ CodeOffset(profilerEnterToggleOffset));
+ CodeLocationLabel exitToggleLocation(code,
+ CodeOffset(profilerExitToggleOffset));
+ if (enable) {
+ Assembler::ToggleToCmp(enterToggleLocation);
+ Assembler::ToggleToCmp(exitToggleLocation);
+ } else {
+ Assembler::ToggleToJmp(enterToggleLocation);
+ Assembler::ToggleToJmp(exitToggleLocation);
+ }
+}
+
+void BaselineScript::toggleProfilerInstrumentation(bool enable) {
+ if (enable == isProfilerInstrumentationOn()) {
+ return;
+ }
+
+ JitSpew(JitSpew_BaselineIC, " toggling profiling %s for BaselineScript %p",
+ enable ? "on" : "off", this);
+
+ ToggleProfilerInstrumentation(method_, profilerEnterToggleOffset_,
+ profilerExitToggleOffset_, enable);
+
+ if (enable) {
+ flags_ |= uint32_t(PROFILER_INSTRUMENTATION_ON);
+ } else {
+ flags_ &= ~uint32_t(PROFILER_INSTRUMENTATION_ON);
+ }
+}
+
+void BaselineInterpreter::toggleProfilerInstrumentation(bool enable) {
+ if (!IsBaselineInterpreterEnabled()) {
+ return;
+ }
+
+ AutoWritableJitCode awjc(code_);
+ ToggleProfilerInstrumentation(code_, profilerEnterToggleOffset_,
+ profilerExitToggleOffset_, enable);
+}
+
+void BaselineInterpreter::toggleDebuggerInstrumentation(bool enable) {
+ if (!IsBaselineInterpreterEnabled()) {
+ return;
+ }
+
+ AutoWritableJitCode awjc(code_);
+
+ // Toggle jumps for debugger instrumentation.
+ for (uint32_t offset : debugInstrumentationOffsets_) {
+ CodeLocationLabel label(code_, CodeOffset(offset));
+ if (enable) {
+ Assembler::ToggleToCmp(label);
+ } else {
+ Assembler::ToggleToJmp(label);
+ }
+ }
+
+ // Toggle DebugTrapHandler calls.
+
+ uint8_t* debugTrapHandler = codeAtOffset(debugTrapHandlerOffset_);
+
+ for (uint32_t offset : debugTrapOffsets_) {
+ uint8_t* trap = codeAtOffset(offset);
+ if (enable) {
+ MacroAssembler::patchNopToCall(trap, debugTrapHandler);
+ } else {
+ MacroAssembler::patchCallToNop(trap);
+ }
+ }
+}
+
+void BaselineInterpreter::toggleCodeCoverageInstrumentationUnchecked(
+ bool enable) {
+ if (!IsBaselineInterpreterEnabled()) {
+ return;
+ }
+
+ AutoWritableJitCode awjc(code_);
+
+ for (uint32_t offset : codeCoverageOffsets_) {
+ CodeLocationLabel label(code_, CodeOffset(offset));
+ if (enable) {
+ Assembler::ToggleToCmp(label);
+ } else {
+ Assembler::ToggleToJmp(label);
+ }
+ }
+}
+
+void BaselineInterpreter::toggleCodeCoverageInstrumentation(bool enable) {
+ if (coverage::IsLCovEnabled()) {
+ // Instrumentation is enabled no matter what.
+ return;
+ }
+
+ toggleCodeCoverageInstrumentationUnchecked(enable);
+}
+
+void jit::FinishDiscardBaselineScript(JS::GCContext* gcx, JSScript* script) {
+ MOZ_ASSERT(script->hasBaselineScript());
+ MOZ_ASSERT(!script->jitScript()->active());
+
+ BaselineScript* baseline =
+ script->jitScript()->clearBaselineScript(gcx, script);
+ BaselineScript::Destroy(gcx, baseline);
+}
+
+void jit::AddSizeOfBaselineData(JSScript* script,
+ mozilla::MallocSizeOf mallocSizeOf,
+ size_t* data) {
+ if (script->hasBaselineScript()) {
+ script->baselineScript()->addSizeOfIncludingThis(mallocSizeOf, data);
+ }
+}
+
+void jit::ToggleBaselineProfiling(JSContext* cx, bool enable) {
+ JitRuntime* jrt = cx->runtime()->jitRuntime();
+ if (!jrt) {
+ return;
+ }
+
+ jrt->baselineInterpreter().toggleProfilerInstrumentation(enable);
+
+ for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ for (auto base = zone->cellIter<BaseScript>(); !base.done(); base.next()) {
+ if (!base->hasJitScript()) {
+ continue;
+ }
+ JSScript* script = base->asJSScript();
+ if (enable) {
+ script->jitScript()->ensureProfileString(cx, script);
+ }
+ if (!script->hasBaselineScript()) {
+ continue;
+ }
+ AutoWritableJitCode awjc(script->baselineScript()->method());
+ script->baselineScript()->toggleProfilerInstrumentation(enable);
+ }
+ }
+}
+
+void BaselineInterpreter::init(JitCode* code, uint32_t interpretOpOffset,
+ uint32_t interpretOpNoDebugTrapOffset,
+ uint32_t bailoutPrologueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ uint32_t debugTrapHandlerOffset,
+ CodeOffsetVector&& debugInstrumentationOffsets,
+ CodeOffsetVector&& debugTrapOffsets,
+ CodeOffsetVector&& codeCoverageOffsets,
+ ICReturnOffsetVector&& icReturnOffsets,
+ const CallVMOffsets& callVMOffsets) {
+ code_ = code;
+ interpretOpOffset_ = interpretOpOffset;
+ interpretOpNoDebugTrapOffset_ = interpretOpNoDebugTrapOffset;
+ bailoutPrologueOffset_ = bailoutPrologueOffset;
+ profilerEnterToggleOffset_ = profilerEnterToggleOffset;
+ profilerExitToggleOffset_ = profilerExitToggleOffset;
+ debugTrapHandlerOffset_ = debugTrapHandlerOffset;
+ debugInstrumentationOffsets_ = std::move(debugInstrumentationOffsets);
+ debugTrapOffsets_ = std::move(debugTrapOffsets);
+ codeCoverageOffsets_ = std::move(codeCoverageOffsets);
+ icReturnOffsets_ = std::move(icReturnOffsets);
+ callVMOffsets_ = callVMOffsets;
+}
+
+uint8_t* BaselineInterpreter::retAddrForIC(JSOp op) const {
+ for (const ICReturnOffset& entry : icReturnOffsets_) {
+ if (entry.op == op) {
+ return codeAtOffset(entry.offset);
+ }
+ }
+ MOZ_CRASH("Unexpected op");
+}
+
+bool jit::GenerateBaselineInterpreter(JSContext* cx,
+ BaselineInterpreter& interpreter) {
+ if (IsBaselineInterpreterEnabled()) {
+ TempAllocator temp(&cx->tempLifoAlloc());
+ BaselineInterpreterGenerator generator(cx, temp);
+ return generator.generate(interpreter);
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BaselineJIT.h b/js/src/jit/BaselineJIT.h
new file mode 100644
index 0000000000..b1e48dbb2b
--- /dev/null
+++ b/js/src/jit/BaselineJIT.h
@@ -0,0 +1,593 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BaselineJIT_h
+#define jit_BaselineJIT_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Span.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jsfriendapi.h"
+
+#include "jit/IonTypes.h"
+#include "jit/JitCode.h"
+#include "jit/JitContext.h"
+#include "jit/JitOptions.h"
+#include "jit/shared/Assembler-shared.h"
+#include "js/Principals.h"
+#include "js/TypeDecls.h"
+#include "js/Vector.h"
+#include "threading/ProtectedData.h"
+#include "util/TrailingArray.h"
+#include "vm/JSScript.h"
+
+namespace js {
+
+class InterpreterFrame;
+class RunState;
+
+namespace jit {
+
+class BaselineFrame;
+class ExceptionBailoutInfo;
+class IonCompileTask;
+class JitActivation;
+class JSJitFrameIter;
+
+// Base class for entries mapping a pc offset to a native code offset.
+class BasePCToNativeEntry {
+ uint32_t pcOffset_;
+ uint32_t nativeOffset_;
+
+ public:
+ BasePCToNativeEntry(uint32_t pcOffset, uint32_t nativeOffset)
+ : pcOffset_(pcOffset), nativeOffset_(nativeOffset) {}
+ uint32_t pcOffset() const { return pcOffset_; }
+ uint32_t nativeOffset() const { return nativeOffset_; }
+};
+
+// Class used during Baseline compilation to store the native code offset for
+// resume offset ops.
+class ResumeOffsetEntry : public BasePCToNativeEntry {
+ public:
+ using BasePCToNativeEntry::BasePCToNativeEntry;
+};
+
+using ResumeOffsetEntryVector =
+ Vector<ResumeOffsetEntry, 16, SystemAllocPolicy>;
+
+// Largest script that the baseline compiler will attempt to compile.
+#if defined(JS_CODEGEN_ARM)
+// ARM branches can only reach 32MB, and the macroassembler doesn't mitigate
+// that limitation. Use a stricter limit on the acceptable script size to
+// avoid crashing when branches go out of range.
+static constexpr uint32_t BaselineMaxScriptLength = 1000000u;
+#else
+static constexpr uint32_t BaselineMaxScriptLength = 0x0fffffffu;
+#endif
+
+// Limit the locals on a given script so that stack check on baseline frames
+// doesn't overflow a uint32_t value.
+// (BaselineMaxScriptSlots * sizeof(Value)) must fit within a uint32_t.
+//
+// This also applies to the Baseline Interpreter: it ensures we don't run out
+// of stack space (and throw over-recursion exceptions) for scripts with a huge
+// number of locals. The C++ interpreter avoids this by having heap-allocated
+// stack frames.
+static constexpr uint32_t BaselineMaxScriptSlots = 0xffffu;
+
+// An entry in the BaselineScript return address table. These entries are used
+// to determine the bytecode pc for a return address into Baseline code.
+//
+// There must be an entry for each location where we can end up calling into
+// C++ (directly or via script/trampolines) and C++ can request the current
+// bytecode pc (this includes anything that may throw an exception, GC, or walk
+// the stack). We currently add entries for each:
+//
+// * callVM
+// * IC
+// * DebugTrap (trampoline call)
+// * JSOp::Resume (because this is like a scripted call)
+//
+// Note: see also BaselineFrame::HAS_OVERRIDE_PC.
+class RetAddrEntry {
+ // Offset from the start of the JIT code where call instruction is.
+ uint32_t returnOffset_;
+
+ // The offset of this bytecode op within the JSScript.
+ uint32_t pcOffset_ : 28;
+
+ public:
+ enum class Kind : uint32_t {
+ // An IC for a JOF_IC op.
+ IC,
+
+ // A callVM for an op.
+ CallVM,
+
+ // A callVM not for an op (e.g., in the prologue) that can't
+ // trigger debug mode.
+ NonOpCallVM,
+
+ // A callVM for the over-recursion check on function entry.
+ StackCheck,
+
+ // A callVM for an interrupt check.
+ InterruptCheck,
+
+ // DebugTrapHandler (for debugger breakpoints/stepping).
+ DebugTrap,
+
+ // A callVM for Debug{Prologue,AfterYield,Epilogue}.
+ DebugPrologue,
+ DebugAfterYield,
+ DebugEpilogue,
+
+ Invalid
+ };
+
+ private:
+ // What this entry is for.
+ uint32_t kind_ : 4;
+
+ public:
+ RetAddrEntry(uint32_t pcOffset, Kind kind, CodeOffset retOffset)
+ : returnOffset_(uint32_t(retOffset.offset())),
+ pcOffset_(pcOffset),
+ kind_(uint32_t(kind)) {
+ MOZ_ASSERT(returnOffset_ == retOffset.offset(),
+ "retOffset must fit in returnOffset_");
+
+ // The pc offset must fit in at least 28 bits, since we shave off 4 for
+ // the Kind enum.
+ MOZ_ASSERT(pcOffset_ == pcOffset);
+ static_assert(BaselineMaxScriptLength <= (1u << 28) - 1);
+ MOZ_ASSERT(pcOffset <= BaselineMaxScriptLength);
+
+ MOZ_ASSERT(kind < Kind::Invalid);
+ MOZ_ASSERT(this->kind() == kind, "kind must fit in kind_ bit field");
+ }
+
+ CodeOffset returnOffset() const { return CodeOffset(returnOffset_); }
+
+ uint32_t pcOffset() const { return pcOffset_; }
+
+ jsbytecode* pc(JSScript* script) const {
+ return script->offsetToPC(pcOffset_);
+ }
+
+ Kind kind() const {
+ MOZ_ASSERT(kind_ < uint32_t(Kind::Invalid));
+ return Kind(kind_);
+ }
+};
+
+// [SMDOC] BaselineScript
+//
+// This holds the metadata generated by the BaselineCompiler. The machine code
+// associated with this is owned by a JitCode instance. This class instance is
+// followed by several arrays:
+//
+// <BaselineScript itself>
+// --
+// uint8_t*[] resumeEntryList()
+// RetAddrEntry[] retAddrEntries()
+// OSREntry[] osrEntries()
+// DebugTrapEntry[] debugTrapEntries()
+//
+// Note: The arrays are arranged in order of descending alignment requires so
+// that padding is not required.
+class alignas(uintptr_t) BaselineScript final : public TrailingArray {
+ private:
+ // Code pointer containing the actual method.
+ HeapPtr<JitCode*> method_ = nullptr;
+
+ // An ion compilation that is ready, but isn't linked yet.
+ MainThreadData<IonCompileTask*> pendingIonCompileTask_{nullptr};
+
+ // Baseline Interpreter can enter Baseline Compiler code at this address. This
+ // is right after the warm-up counter check in the prologue.
+ uint32_t warmUpCheckPrologueOffset_ = 0;
+
+ // The offsets for the toggledJump instructions for profiler instrumentation.
+ uint32_t profilerEnterToggleOffset_ = 0;
+ uint32_t profilerExitToggleOffset_ = 0;
+
+ private:
+ // Offset (in bytes) from `this` to the start of each trailing array. Each
+ // array ends where following one begins. There is no implicit padding (except
+ // possible at very end).
+ Offset resumeEntriesOffset_ = 0;
+ Offset retAddrEntriesOffset_ = 0;
+ Offset osrEntriesOffset_ = 0;
+ Offset debugTrapEntriesOffset_ = 0;
+ Offset allocBytes_ = 0;
+
+ // See `Flag` type below.
+ uint8_t flags_ = 0;
+
+ // End of fields.
+
+ public:
+ enum Flag {
+ // Flag set when compiled for use with Debugger. Handles various
+ // Debugger hooks and compiles toggled calls for traps.
+ HAS_DEBUG_INSTRUMENTATION = 1 << 0,
+
+ // Flag is set if this script has profiling instrumentation turned on.
+ PROFILER_INSTRUMENTATION_ON = 1 << 1,
+ };
+
+ // Native code offset for OSR from Baseline Interpreter into Baseline JIT at
+ // JSOp::LoopHead ops.
+ class OSREntry : public BasePCToNativeEntry {
+ public:
+ using BasePCToNativeEntry::BasePCToNativeEntry;
+ };
+
+ // Native code offset for a debug trap when the script is compiled with debug
+ // instrumentation.
+ class DebugTrapEntry : public BasePCToNativeEntry {
+ public:
+ using BasePCToNativeEntry::BasePCToNativeEntry;
+ };
+
+ private:
+ // Layout helpers
+ Offset resumeEntriesOffset() const { return resumeEntriesOffset_; }
+ Offset retAddrEntriesOffset() const { return retAddrEntriesOffset_; }
+ Offset osrEntriesOffset() const { return osrEntriesOffset_; }
+ Offset debugTrapEntriesOffset() const { return debugTrapEntriesOffset_; }
+ Offset endOffset() const { return allocBytes_; }
+
+ // Use BaselineScript::New to create new instances. It will properly
+ // allocate trailing objects.
+ BaselineScript(uint32_t warmUpCheckPrologueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset)
+ : warmUpCheckPrologueOffset_(warmUpCheckPrologueOffset),
+ profilerEnterToggleOffset_(profilerEnterToggleOffset),
+ profilerExitToggleOffset_(profilerExitToggleOffset) {}
+
+ template <typename T>
+ mozilla::Span<T> makeSpan(Offset start, Offset end) {
+ return mozilla::Span{offsetToPointer<T>(start), numElements<T>(start, end)};
+ }
+
+ // We store the native code address corresponding to each bytecode offset in
+ // the script's resumeOffsets list.
+ mozilla::Span<uint8_t*> resumeEntryList() {
+ return makeSpan<uint8_t*>(resumeEntriesOffset(), retAddrEntriesOffset());
+ }
+
+ // See each type for documentation of these arrays.
+ mozilla::Span<RetAddrEntry> retAddrEntries() {
+ return makeSpan<RetAddrEntry>(retAddrEntriesOffset(), osrEntriesOffset());
+ }
+ mozilla::Span<OSREntry> osrEntries() {
+ return makeSpan<OSREntry>(osrEntriesOffset(), debugTrapEntriesOffset());
+ }
+ mozilla::Span<DebugTrapEntry> debugTrapEntries() {
+ return makeSpan<DebugTrapEntry>(debugTrapEntriesOffset(), endOffset());
+ }
+
+ public:
+ static BaselineScript* New(JSContext* cx, uint32_t warmUpCheckPrologueOffset,
+ uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset,
+ size_t retAddrEntries, size_t osrEntries,
+ size_t debugTrapEntries, size_t resumeEntries);
+
+ static void Destroy(JS::GCContext* gcx, BaselineScript* script);
+
+ void trace(JSTracer* trc);
+
+ static inline size_t offsetOfMethod() {
+ return offsetof(BaselineScript, method_);
+ }
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* data) const {
+ *data += mallocSizeOf(this);
+ }
+
+ void setHasDebugInstrumentation() { flags_ |= HAS_DEBUG_INSTRUMENTATION; }
+ bool hasDebugInstrumentation() const {
+ return flags_ & HAS_DEBUG_INSTRUMENTATION;
+ }
+
+ uint8_t* warmUpCheckPrologueAddr() const {
+ return method_->raw() + warmUpCheckPrologueOffset_;
+ }
+
+ JitCode* method() const { return method_; }
+ void setMethod(JitCode* code) {
+ MOZ_ASSERT(!method_);
+ method_ = code;
+ }
+
+ bool containsCodeAddress(uint8_t* addr) const {
+ return method()->raw() <= addr &&
+ addr <= method()->raw() + method()->instructionsSize();
+ }
+
+ uint8_t* returnAddressForEntry(const RetAddrEntry& ent);
+
+ const RetAddrEntry& retAddrEntryFromPCOffset(uint32_t pcOffset,
+ RetAddrEntry::Kind kind);
+ const RetAddrEntry& prologueRetAddrEntry(RetAddrEntry::Kind kind);
+ const RetAddrEntry& retAddrEntryFromReturnOffset(CodeOffset returnOffset);
+ const RetAddrEntry& retAddrEntryFromReturnAddress(const uint8_t* returnAddr);
+
+ uint8_t* nativeCodeForOSREntry(uint32_t pcOffset);
+
+ void copyRetAddrEntries(const RetAddrEntry* entries);
+ void copyOSREntries(const OSREntry* entries);
+ void copyDebugTrapEntries(const DebugTrapEntry* entries);
+
+ // Copy resumeOffsets list from |script| and convert the pcOffsets
+ // to native addresses in the Baseline code based on |entries|.
+ void computeResumeNativeOffsets(JSScript* script,
+ const ResumeOffsetEntryVector& entries);
+
+ // Return the bytecode offset for a given native code address. Be careful
+ // when using this method: it's an approximation and not guaranteed to be the
+ // correct pc.
+ jsbytecode* approximatePcForNativeAddress(JSScript* script,
+ uint8_t* nativeAddress);
+
+ // Toggle debug traps (used for breakpoints and step mode) in the script.
+ // If |pc| is nullptr, toggle traps for all ops in the script. Else, only
+ // toggle traps at |pc|.
+ void toggleDebugTraps(JSScript* script, jsbytecode* pc);
+
+ void toggleProfilerInstrumentation(bool enable);
+ bool isProfilerInstrumentationOn() const {
+ return flags_ & PROFILER_INSTRUMENTATION_ON;
+ }
+
+ static size_t offsetOfResumeEntriesOffset() {
+ static_assert(sizeof(Offset) == sizeof(uint32_t),
+ "JIT expect Offset to be uint32_t");
+ return offsetof(BaselineScript, resumeEntriesOffset_);
+ }
+
+ bool hasPendingIonCompileTask() const { return !!pendingIonCompileTask_; }
+
+ js::jit::IonCompileTask* pendingIonCompileTask() {
+ MOZ_ASSERT(hasPendingIonCompileTask());
+ return pendingIonCompileTask_;
+ }
+ void setPendingIonCompileTask(JSRuntime* rt, JSScript* script,
+ js::jit::IonCompileTask* task);
+ void removePendingIonCompileTask(JSRuntime* rt, JSScript* script);
+
+ size_t allocBytes() const { return allocBytes_; }
+};
+static_assert(
+ sizeof(BaselineScript) % sizeof(uintptr_t) == 0,
+ "The data attached to the script must be aligned for fast JIT access.");
+
+enum class BaselineTier { Interpreter, Compiler };
+
+template <BaselineTier Tier>
+MethodStatus CanEnterBaselineMethod(JSContext* cx, RunState& state);
+
+MethodStatus CanEnterBaselineInterpreterAtBranch(JSContext* cx,
+ InterpreterFrame* fp);
+
+JitExecStatus EnterBaselineInterpreterAtBranch(JSContext* cx,
+ InterpreterFrame* fp,
+ jsbytecode* pc);
+
+bool CanBaselineInterpretScript(JSScript* script);
+
+// Called by the Baseline Interpreter to compile a script for the Baseline JIT.
+// |res| is set to the native code address in the BaselineScript to jump to, or
+// nullptr if we were unable to compile this script.
+bool BaselineCompileFromBaselineInterpreter(JSContext* cx, BaselineFrame* frame,
+ uint8_t** res);
+
+void FinishDiscardBaselineScript(JS::GCContext* gcx, JSScript* script);
+
+void AddSizeOfBaselineData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf,
+ size_t* data);
+
+void ToggleBaselineProfiling(JSContext* cx, bool enable);
+
+struct alignas(uintptr_t) BaselineBailoutInfo {
+ // Pointer into the current C stack, where overwriting will start.
+ uint8_t* incomingStack = nullptr;
+
+ // The top and bottom heapspace addresses of the reconstructed stack
+ // which will be copied to the bottom.
+ uint8_t* copyStackTop = nullptr;
+ uint8_t* copyStackBottom = nullptr;
+
+ // The value of the frame pointer register on resume.
+ void* resumeFramePtr = nullptr;
+
+ // The native code address to resume into.
+ void* resumeAddr = nullptr;
+
+ // The bytecode pc of try block and fault block.
+ jsbytecode* tryPC = nullptr;
+ jsbytecode* faultPC = nullptr;
+
+ // Number of baseline frames to push on the stack.
+ uint32_t numFrames = 0;
+
+ // The bailout kind.
+ mozilla::Maybe<BailoutKind> bailoutKind = {};
+
+ BaselineBailoutInfo() = default;
+ BaselineBailoutInfo(const BaselineBailoutInfo&) = default;
+
+ void operator=(const BaselineBailoutInfo&) = delete;
+};
+
+enum class BailoutReason {
+ Normal,
+ ExceptionHandler,
+ Invalidate,
+};
+
+[[nodiscard]] bool BailoutIonToBaseline(
+ JSContext* cx, JitActivation* activation, const JSJitFrameIter& iter,
+ BaselineBailoutInfo** bailoutInfo,
+ const ExceptionBailoutInfo* exceptionInfo, BailoutReason reason);
+
+MethodStatus BaselineCompile(JSContext* cx, JSScript* script,
+ bool forceDebugInstrumentation = false);
+
+// Class storing the generated Baseline Interpreter code for the runtime.
+class BaselineInterpreter {
+ public:
+ struct CallVMOffsets {
+ uint32_t debugPrologueOffset = 0;
+ uint32_t debugEpilogueOffset = 0;
+ uint32_t debugAfterYieldOffset = 0;
+ };
+ struct ICReturnOffset {
+ uint32_t offset;
+ JSOp op;
+ ICReturnOffset(uint32_t offset, JSOp op) : offset(offset), op(op) {}
+ };
+ using ICReturnOffsetVector = Vector<ICReturnOffset, 0, SystemAllocPolicy>;
+
+ private:
+ // The interpreter code.
+ JitCode* code_ = nullptr;
+
+ // Offset of the code to start interpreting a bytecode op.
+ uint32_t interpretOpOffset_ = 0;
+
+ // Like interpretOpOffset_ but skips the debug trap for the current op.
+ uint32_t interpretOpNoDebugTrapOffset_ = 0;
+
+ // Early Ion bailouts will enter at this address. This is after frame
+ // construction and environment initialization.
+ uint32_t bailoutPrologueOffset_ = 0;
+
+ // The offsets for the toggledJump instructions for profiler instrumentation.
+ uint32_t profilerEnterToggleOffset_ = 0;
+ uint32_t profilerExitToggleOffset_ = 0;
+
+ // Offset of the jump (tail call) to the debug trap handler trampoline code.
+ // When the debugger is enabled, NOPs are patched to calls to this location.
+ uint32_t debugTrapHandlerOffset_ = 0;
+
+ // The offsets of toggled jumps for debugger instrumentation.
+ using CodeOffsetVector = Vector<uint32_t, 0, SystemAllocPolicy>;
+ CodeOffsetVector debugInstrumentationOffsets_;
+
+ // Offsets of toggled calls to the DebugTrapHandler trampoline (for
+ // breakpoints and stepping).
+ CodeOffsetVector debugTrapOffsets_;
+
+ // Offsets of toggled jumps for code coverage.
+ CodeOffsetVector codeCoverageOffsets_;
+
+ // Offsets of IC calls for IsIonInlinableOp ops, for Ion bailouts.
+ ICReturnOffsetVector icReturnOffsets_;
+
+ // Offsets of some callVMs for BaselineDebugModeOSR.
+ CallVMOffsets callVMOffsets_;
+
+ uint8_t* codeAtOffset(uint32_t offset) const {
+ MOZ_ASSERT(offset > 0);
+ MOZ_ASSERT(offset < code_->instructionsSize());
+ return codeRaw() + offset;
+ }
+
+ public:
+ BaselineInterpreter() = default;
+
+ BaselineInterpreter(const BaselineInterpreter&) = delete;
+ void operator=(const BaselineInterpreter&) = delete;
+
+ void init(JitCode* code, uint32_t interpretOpOffset,
+ uint32_t interpretOpNoDebugTrapOffset,
+ uint32_t bailoutPrologueOffset, uint32_t profilerEnterToggleOffset,
+ uint32_t profilerExitToggleOffset, uint32_t debugTrapHandlerOffset,
+ CodeOffsetVector&& debugInstrumentationOffsets,
+ CodeOffsetVector&& debugTrapOffsets,
+ CodeOffsetVector&& codeCoverageOffsets,
+ ICReturnOffsetVector&& icReturnOffsets,
+ const CallVMOffsets& callVMOffsets);
+
+ uint8_t* codeRaw() const { return code_->raw(); }
+
+ uint8_t* retAddrForDebugPrologueCallVM() const {
+ return codeAtOffset(callVMOffsets_.debugPrologueOffset);
+ }
+ uint8_t* retAddrForDebugEpilogueCallVM() const {
+ return codeAtOffset(callVMOffsets_.debugEpilogueOffset);
+ }
+ uint8_t* retAddrForDebugAfterYieldCallVM() const {
+ return codeAtOffset(callVMOffsets_.debugAfterYieldOffset);
+ }
+ uint8_t* bailoutPrologueEntryAddr() const {
+ return codeAtOffset(bailoutPrologueOffset_);
+ }
+
+ uint8_t* retAddrForIC(JSOp op) const;
+
+ TrampolinePtr interpretOpAddr() const {
+ return TrampolinePtr(codeAtOffset(interpretOpOffset_));
+ }
+ TrampolinePtr interpretOpNoDebugTrapAddr() const {
+ return TrampolinePtr(codeAtOffset(interpretOpNoDebugTrapOffset_));
+ }
+
+ void toggleProfilerInstrumentation(bool enable);
+ void toggleDebuggerInstrumentation(bool enable);
+
+ void toggleCodeCoverageInstrumentationUnchecked(bool enable);
+ void toggleCodeCoverageInstrumentation(bool enable);
+};
+
+[[nodiscard]] bool GenerateBaselineInterpreter(
+ JSContext* cx, BaselineInterpreter& interpreter);
+
+inline bool IsBaselineJitEnabled(JSContext* cx) {
+ if (MOZ_UNLIKELY(!IsBaselineInterpreterEnabled())) {
+ return false;
+ }
+ if (MOZ_LIKELY(JitOptions.baselineJit)) {
+ return true;
+ }
+ if (JitOptions.jitForTrustedPrincipals) {
+ JS::Realm* realm = js::GetContextRealm(cx);
+ return realm && JS::GetRealmPrincipals(realm) &&
+ JS::GetRealmPrincipals(realm)->isSystemOrAddonPrincipal();
+ }
+ return false;
+}
+
+} // namespace jit
+} // namespace js
+
+namespace JS {
+
+template <>
+struct DeletePolicy<js::jit::BaselineScript> {
+ explicit DeletePolicy(JSRuntime* rt) : rt_(rt) {}
+ void operator()(const js::jit::BaselineScript* script);
+
+ private:
+ JSRuntime* rt_;
+};
+
+} // namespace JS
+
+#endif /* jit_BaselineJIT_h */
diff --git a/js/src/jit/BitSet.cpp b/js/src/jit/BitSet.cpp
new file mode 100644
index 0000000000..b791ea387c
--- /dev/null
+++ b/js/src/jit/BitSet.cpp
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BitSet.h"
+
+#include <string.h>
+
+#include "jit/JitAllocPolicy.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool BitSet::init(TempAllocator& alloc) {
+ size_t sizeRequired = numWords() * sizeof(*bits_);
+
+ bits_ = (uint32_t*)alloc.allocate(sizeRequired);
+ if (!bits_) {
+ return false;
+ }
+
+ memset(bits_, 0, sizeRequired);
+
+ return true;
+}
+
+bool BitSet::empty() const {
+ MOZ_ASSERT(bits_);
+ const uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ if (bits[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void BitSet::insertAll(const BitSet& other) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ bits[i] |= otherBits[i];
+ }
+}
+
+void BitSet::removeAll(const BitSet& other) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ bits[i] &= ~otherBits[i];
+ }
+}
+
+void BitSet::intersect(const BitSet& other) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ bits[i] &= otherBits[i];
+ }
+}
+
+// returns true if the intersection caused the contents of the set to change.
+bool BitSet::fixedPointIntersect(const BitSet& other) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(other.numBits_ == numBits_);
+ MOZ_ASSERT(other.bits_);
+
+ bool changed = false;
+
+ uint32_t* bits = bits_;
+ const uint32_t* otherBits = other.bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ uint32_t old = bits[i];
+ bits[i] &= otherBits[i];
+
+ if (!changed && old != bits[i]) {
+ changed = true;
+ }
+ }
+ return changed;
+}
+
+void BitSet::complement() {
+ MOZ_ASSERT(bits_);
+ uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ bits[i] = ~bits[i];
+ }
+}
+
+void BitSet::clear() {
+ MOZ_ASSERT(bits_);
+ uint32_t* bits = bits_;
+ for (unsigned int i = 0, e = numWords(); i < e; i++) {
+ bits[i] = 0;
+ }
+}
diff --git a/js/src/jit/BitSet.h b/js/src/jit/BitSet.h
new file mode 100644
index 0000000000..4e34b1ecb1
--- /dev/null
+++ b/js/src/jit/BitSet.h
@@ -0,0 +1,167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BitSet_h
+#define jit_BitSet_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace js {
+namespace jit {
+
+class TempAllocator;
+
+// Provides constant time set insertion and removal, and fast linear
+// set operations such as intersection, difference, and union.
+// N.B. All set operations must be performed on sets with the same number
+// of bits.
+class BitSet {
+ public:
+ static const size_t BitsPerWord = 8 * sizeof(uint32_t);
+
+ static size_t RawLengthForBits(size_t bits) {
+ return (bits + BitsPerWord - 1) / BitsPerWord;
+ }
+
+ private:
+ uint32_t* bits_;
+ const unsigned int numBits_;
+
+ static inline uint32_t bitForValue(unsigned int value) {
+ return 1l << uint32_t(value % BitsPerWord);
+ }
+
+ static inline unsigned int wordForValue(unsigned int value) {
+ return value / BitsPerWord;
+ }
+
+ inline unsigned int numWords() const { return RawLengthForBits(numBits_); }
+
+ BitSet(const BitSet&) = delete;
+ void operator=(const BitSet&) = delete;
+
+ public:
+ class Iterator;
+
+ explicit BitSet(unsigned int numBits) : bits_(nullptr), numBits_(numBits) {}
+
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ unsigned int getNumBits() const { return numBits_; }
+
+ // O(1): Check if this set contains the given value.
+ bool contains(unsigned int value) const {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ return !!(bits_[wordForValue(value)] & bitForValue(value));
+ }
+
+ // O(numBits): Check if this set contains any value.
+ bool empty() const;
+
+ // O(1): Insert the given value into this set.
+ void insert(unsigned int value) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ bits_[wordForValue(value)] |= bitForValue(value);
+ }
+
+ // O(numBits): Insert every element of the given set into this set.
+ void insertAll(const BitSet& other);
+
+ // O(1): Remove the given value from this set.
+ void remove(unsigned int value) {
+ MOZ_ASSERT(bits_);
+ MOZ_ASSERT(value < numBits_);
+
+ bits_[wordForValue(value)] &= ~bitForValue(value);
+ }
+
+ // O(numBits): Remove the every element of the given set from this set.
+ void removeAll(const BitSet& other);
+
+ // O(numBits): Intersect this set with the given set.
+ void intersect(const BitSet& other);
+
+ // O(numBits): Intersect this set with the given set; return whether the
+ // intersection caused the set to change.
+ bool fixedPointIntersect(const BitSet& other);
+
+ // O(numBits): Does inplace complement of the set.
+ void complement();
+
+ // O(numBits): Clear this set.
+ void clear();
+
+ uint32_t* raw() const { return bits_; }
+ size_t rawLength() const { return numWords(); }
+};
+
+class BitSet::Iterator {
+ private:
+ BitSet& set_;
+ unsigned index_;
+ unsigned word_;
+ uint32_t value_;
+
+ void skipEmpty() {
+ // Skip words containing only zeros.
+ unsigned numWords = set_.numWords();
+ const uint32_t* bits = set_.bits_;
+ while (value_ == 0) {
+ word_++;
+ if (word_ == numWords) {
+ return;
+ }
+
+ index_ = word_ * BitSet::BitsPerWord;
+ value_ = bits[word_];
+ }
+
+ // Be careful: the result of CountTrailingZeroes32 is undefined if the
+ // input is 0.
+ int numZeros = mozilla::CountTrailingZeroes32(value_);
+ index_ += numZeros;
+ value_ >>= numZeros;
+
+ MOZ_ASSERT_IF(index_ < set_.numBits_, set_.contains(index_));
+ }
+
+ public:
+ explicit Iterator(BitSet& set)
+ : set_(set), index_(0), word_(0), value_(set.bits_[0]) {
+ skipEmpty();
+ }
+
+ inline bool more() const { return word_ < set_.numWords(); }
+ explicit operator bool() const { return more(); }
+
+ inline void operator++() {
+ MOZ_ASSERT(more());
+ MOZ_ASSERT(index_ < set_.numBits_);
+
+ index_++;
+ value_ >>= 1;
+
+ skipEmpty();
+ }
+
+ unsigned int operator*() {
+ MOZ_ASSERT(index_ < set_.numBits_);
+ return index_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BitSet_h */
diff --git a/js/src/jit/BytecodeAnalysis.cpp b/js/src/jit/BytecodeAnalysis.cpp
new file mode 100644
index 0000000000..ed60296fec
--- /dev/null
+++ b/js/src/jit/BytecodeAnalysis.cpp
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BytecodeAnalysis.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/WarpBuilder.h"
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+#include "vm/BytecodeUtil.h"
+
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/BytecodeLocation-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+BytecodeAnalysis::BytecodeAnalysis(TempAllocator& alloc, JSScript* script)
+ : script_(script), infos_(alloc) {}
+
+bool BytecodeAnalysis::init(TempAllocator& alloc) {
+ if (!infos_.growByUninitialized(script_->length())) {
+ return false;
+ }
+
+ // Clear all BytecodeInfo.
+ mozilla::PodZero(infos_.begin(), infos_.length());
+ infos_[0].init(/*stackDepth=*/0);
+
+ // WarpBuilder can compile try blocks, but doesn't support handling
+ // exceptions. If exception unwinding would resume in a catch or finally
+ // block, we instead bail out to the baseline interpreter. Finally blocks can
+ // still be reached by normal means, but the catch block is unreachable and is
+ // not compiled. We therefore need some special machinery to prevent OSR into
+ // Warp code in the following cases:
+ //
+ // (1) Loops in catch blocks:
+ //
+ // try {
+ // ..
+ // } catch (e) {
+ // while (..) {} // Can't OSR here.
+ // }
+ //
+ // (2) Loops only reachable via a catch block:
+ //
+ // for (;;) {
+ // try {
+ // throw 3;
+ // } catch (e) {
+ // break;
+ // }
+ // }
+ // while (..) {} // Loop is only reachable via the catch-block.
+ //
+ // To deal with both of these cases, we track whether the current op is
+ // 'normally reachable' (reachable without exception handling).
+ // Forward jumps propagate this flag to their jump targets (see
+ // BytecodeInfo::jumpTargetNormallyReachable) and when the analysis reaches a
+ // jump target it updates its normallyReachable flag based on the target's
+ // flag.
+ //
+ // Inlining a function without a normally reachable return can cause similar
+ // problems. To avoid this, we mark such functions as uninlineable.
+ bool normallyReachable = true;
+ bool normallyReachableReturn = false;
+
+ for (const BytecodeLocation& it : AllBytecodesIterable(script_)) {
+ JSOp op = it.getOp();
+ uint32_t offset = it.bytecodeToOffset(script_);
+
+ JitSpew(JitSpew_BaselineOp, "Analyzing op @ %u (end=%u): %s",
+ unsigned(offset), unsigned(script_->length()), CodeName(op));
+
+ checkWarpSupport(op);
+
+ // If this bytecode info has not yet been initialized, it's not reachable.
+ if (!infos_[offset].initialized) {
+ continue;
+ }
+
+ uint32_t stackDepth = infos_[offset].stackDepth;
+
+ if (infos_[offset].jumpTarget) {
+ normallyReachable = infos_[offset].jumpTargetNormallyReachable;
+ }
+
+#ifdef DEBUG
+ size_t endOffset = offset + it.length();
+ for (size_t checkOffset = offset + 1; checkOffset < endOffset;
+ checkOffset++) {
+ MOZ_ASSERT(!infos_[checkOffset].initialized);
+ }
+#endif
+ uint32_t nuses = it.useCount();
+ uint32_t ndefs = it.defCount();
+
+ MOZ_ASSERT(stackDepth >= nuses);
+ stackDepth -= nuses;
+ stackDepth += ndefs;
+
+ // If stack depth exceeds max allowed by analysis, fail fast.
+ MOZ_ASSERT(stackDepth <= BytecodeInfo::MAX_STACK_DEPTH);
+
+ switch (op) {
+ case JSOp::TableSwitch: {
+ uint32_t defaultOffset = it.getTableSwitchDefaultOffset(script_);
+ int32_t low = it.getTableSwitchLow();
+ int32_t high = it.getTableSwitchHigh();
+
+ infos_[defaultOffset].init(stackDepth);
+ infos_[defaultOffset].setJumpTarget(normallyReachable);
+
+ uint32_t ncases = high - low + 1;
+
+ for (uint32_t i = 0; i < ncases; i++) {
+ uint32_t targetOffset = it.tableSwitchCaseOffset(script_, i);
+ if (targetOffset != defaultOffset) {
+ infos_[targetOffset].init(stackDepth);
+ infos_[targetOffset].setJumpTarget(normallyReachable);
+ }
+ }
+ break;
+ }
+
+ case JSOp::Try: {
+ for (const TryNote& tn : script_->trynotes()) {
+ if (tn.start == offset + JSOpLength_Try &&
+ (tn.kind() == TryNoteKind::Catch ||
+ tn.kind() == TryNoteKind::Finally)) {
+ uint32_t catchOrFinallyOffset = tn.start + tn.length;
+ uint32_t targetDepth =
+ tn.kind() == TryNoteKind::Finally ? stackDepth + 2 : stackDepth;
+ BytecodeInfo& targetInfo = infos_[catchOrFinallyOffset];
+ targetInfo.init(targetDepth);
+ targetInfo.setJumpTarget(/* normallyReachable = */ false);
+ }
+ }
+ break;
+ }
+
+ case JSOp::LoopHead:
+ infos_[offset].loopHeadCanOsr = normallyReachable;
+ break;
+
+#ifdef DEBUG
+ case JSOp::Exception:
+ // Sanity check: ops only emitted in catch blocks are never
+ // normally reachable.
+ MOZ_ASSERT(!normallyReachable);
+ break;
+#endif
+
+ case JSOp::Return:
+ case JSOp::RetRval:
+ if (normallyReachable) {
+ normallyReachableReturn = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ bool jump = it.isJump();
+ if (jump) {
+ // Case instructions do not push the lvalue back when branching.
+ uint32_t newStackDepth = stackDepth;
+ if (it.is(JSOp::Case)) {
+ newStackDepth--;
+ }
+
+ uint32_t targetOffset = it.getJumpTargetOffset(script_);
+
+#ifdef DEBUG
+ // If this is a backedge, the target JSOp::LoopHead must have been
+ // analyzed already. Furthermore, if the backedge is normally reachable,
+ // the loop head must be normally reachable too (loopHeadCanOsr can be
+ // used to check this since it's equivalent).
+ if (targetOffset < offset) {
+ MOZ_ASSERT(infos_[targetOffset].initialized);
+ MOZ_ASSERT_IF(normallyReachable, infos_[targetOffset].loopHeadCanOsr);
+ }
+#endif
+
+ infos_[targetOffset].init(newStackDepth);
+ infos_[targetOffset].setJumpTarget(normallyReachable);
+ }
+
+ // Handle any fallthrough from this opcode.
+ if (it.fallsThrough()) {
+ BytecodeLocation fallthroughLoc = it.next();
+ MOZ_ASSERT(fallthroughLoc.isInBounds(script_));
+ uint32_t fallthroughOffset = fallthroughLoc.bytecodeToOffset(script_);
+
+ infos_[fallthroughOffset].init(stackDepth);
+
+ // Treat the fallthrough of a branch instruction as a jump target.
+ if (jump) {
+ infos_[fallthroughOffset].setJumpTarget(normallyReachable);
+ }
+ }
+ }
+
+ // Flag (reachable) resume offset instructions.
+ for (uint32_t offset : script_->resumeOffsets()) {
+ BytecodeInfo& info = infos_[offset];
+ if (info.initialized) {
+ info.hasResumeOffset = true;
+ }
+ }
+
+ if (!normallyReachableReturn) {
+ script_->setUninlineable();
+ }
+
+ return true;
+}
+
+void BytecodeAnalysis::checkWarpSupport(JSOp op) {
+ switch (op) {
+#define DEF_CASE(OP) case JSOp::OP:
+ WARP_UNSUPPORTED_OPCODE_LIST(DEF_CASE)
+#undef DEF_CASE
+ if (script_->canIonCompile()) {
+ JitSpew(JitSpew_IonAbort, "Disabling Warp support for %s:%d:%d due to %s",
+ script_->filename(), script_->lineno(), script_->column(),
+ CodeName(op));
+ script_->disableIon();
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+bool js::jit::ScriptUsesEnvironmentChain(JSScript* script) {
+ if (script->isModule() || script->initialEnvironmentShape() ||
+ (script->function() &&
+ script->function()->needsSomeEnvironmentObject())) {
+ return true;
+ }
+
+ AllBytecodesIterable iterator(script);
+
+ for (const BytecodeLocation& location : iterator) {
+ if (OpUsesEnvironmentChain(location.getOp())) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/js/src/jit/BytecodeAnalysis.h b/js/src/jit/BytecodeAnalysis.h
new file mode 100644
index 0000000000..a3a9f65ddf
--- /dev/null
+++ b/js/src/jit/BytecodeAnalysis.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BytecodeAnalysis_h
+#define jit_BytecodeAnalysis_h
+
+#include "jit/JitAllocPolicy.h"
+#include "js/Vector.h"
+#include "vm/JSScript.h"
+
+namespace js {
+namespace jit {
+
+// Basic information about bytecodes in the script. Used to help baseline
+// compilation.
+struct BytecodeInfo {
+ static const uint16_t MAX_STACK_DEPTH = 0xffffU;
+ uint16_t stackDepth;
+ bool initialized : 1;
+ bool jumpTarget : 1;
+
+ // If true, this is a JSOp::LoopHead where we can OSR into Ion/Warp code.
+ bool loopHeadCanOsr : 1;
+
+ // See the comment above normallyReachable in BytecodeAnalysis.cpp for how
+ // this works.
+ bool jumpTargetNormallyReachable : 1;
+
+ // True if the script has a resume offset for this bytecode op.
+ bool hasResumeOffset : 1;
+
+ void init(unsigned depth) {
+ MOZ_ASSERT(depth <= MAX_STACK_DEPTH);
+ MOZ_ASSERT_IF(initialized, stackDepth == depth);
+ initialized = true;
+ stackDepth = depth;
+ }
+
+ void setJumpTarget(bool normallyReachable) {
+ jumpTarget = true;
+ if (normallyReachable) {
+ jumpTargetNormallyReachable = true;
+ }
+ }
+};
+
+class BytecodeAnalysis {
+ JSScript* script_;
+ Vector<BytecodeInfo, 0, JitAllocPolicy> infos_;
+
+ public:
+ explicit BytecodeAnalysis(TempAllocator& alloc, JSScript* script);
+
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ BytecodeInfo& info(jsbytecode* pc) {
+ uint32_t pcOffset = script_->pcToOffset(pc);
+ MOZ_ASSERT(infos_[pcOffset].initialized);
+ return infos_[pcOffset];
+ }
+
+ BytecodeInfo* maybeInfo(jsbytecode* pc) {
+ uint32_t pcOffset = script_->pcToOffset(pc);
+ if (infos_[pcOffset].initialized) {
+ return &infos_[pcOffset];
+ }
+ return nullptr;
+ }
+
+ void checkWarpSupport(JSOp op);
+};
+
+// Whether this script uses the frame's environment chain. The result is cached
+// in JitScript and used by WarpBuilder.
+bool ScriptUsesEnvironmentChain(JSScript* script);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_BytecodeAnalysis_h */
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
new file mode 100644
index 0000000000..68163e7d6c
--- /dev/null
+++ b/js/src/jit/CacheIR.cpp
@@ -0,0 +1,13193 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CacheIR.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/FloatingPoint.h"
+
+#include "jsapi.h"
+#include "jsmath.h"
+
+#include "builtin/DataViewObject.h"
+#include "builtin/MapObject.h"
+#include "builtin/ModuleObject.h"
+#include "builtin/Object.h"
+#include "jit/BaselineIC.h"
+#include "jit/CacheIRCloner.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRGenerator.h"
+#include "jit/CacheIRSpewer.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/InlinableNatives.h"
+#include "jit/JitContext.h"
+#include "jit/JitRealm.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
+#include "js/friend/WindowProxy.h" // js::IsWindow, js::IsWindowProxy, js::ToWindowIfWindowProxy
+#include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo, JS::XrayJitInfo
+#include "js/GCAPI.h" // JS::AutoSuppressGCAnalysis
+#include "js/RegExpFlags.h" // JS::RegExpFlags
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Wrapper.h"
+#include "proxy/DOMProxy.h" // js::GetDOMProxyHandlerFamily
+#include "util/DifferentialTesting.h"
+#include "util/Unicode.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/BoundFunctionObject.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/Compartment.h"
+#include "vm/Iteration.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/ProxyObject.h"
+#include "vm/RegExpObject.h"
+#include "vm/SelfHosting.h"
+#include "vm/ThrowMsgKind.h" // ThrowCondition
+#include "vm/Watchtower.h"
+#include "wasm/WasmInstance.h"
+
+#include "jit/BaselineFrame-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/BytecodeUtil-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSFunction-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/PlainObject-inl.h"
+#include "vm/StringObject-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+using JS::DOMProxyShadowsResult;
+using JS::ExpandoAndGeneration;
+
+const char* const js::jit::CacheKindNames[] = {
+#define DEFINE_KIND(kind) #kind,
+ CACHE_IR_KINDS(DEFINE_KIND)
+#undef DEFINE_KIND
+};
+
+const char* const js::jit::CacheIROpNames[] = {
+#define OPNAME(op, ...) #op,
+ CACHE_IR_OPS(OPNAME)
+#undef OPNAME
+};
+
+const CacheIROpInfo js::jit::CacheIROpInfos[] = {
+#define OPINFO(op, len, transpile, ...) {len, transpile},
+ CACHE_IR_OPS(OPINFO)
+#undef OPINFO
+};
+
+const uint32_t js::jit::CacheIROpHealth[] = {
+#define OPHEALTH(op, len, transpile, health) health,
+ CACHE_IR_OPS(OPHEALTH)
+#undef OPHEALTH
+};
+
+size_t js::jit::NumInputsForCacheKind(CacheKind kind) {
+ switch (kind) {
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ case CacheKind::GetIntrinsic:
+ return 0;
+ case CacheKind::GetProp:
+ case CacheKind::TypeOf:
+ case CacheKind::ToPropertyKey:
+ case CacheKind::GetIterator:
+ case CacheKind::ToBool:
+ case CacheKind::UnaryArith:
+ case CacheKind::GetName:
+ case CacheKind::BindName:
+ case CacheKind::Call:
+ case CacheKind::OptimizeSpreadCall:
+ case CacheKind::CloseIter:
+ return 1;
+ case CacheKind::Compare:
+ case CacheKind::GetElem:
+ case CacheKind::GetPropSuper:
+ case CacheKind::SetProp:
+ case CacheKind::In:
+ case CacheKind::HasOwn:
+ case CacheKind::CheckPrivateField:
+ case CacheKind::InstanceOf:
+ case CacheKind::BinaryArith:
+ return 2;
+ case CacheKind::GetElemSuper:
+ case CacheKind::SetElem:
+ return 3;
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+#ifdef DEBUG
+void CacheIRWriter::assertSameCompartment(JSObject* obj) {
+ cx_->debugOnlyCheck(obj);
+}
+void CacheIRWriter::assertSameZone(Shape* shape) {
+ MOZ_ASSERT(cx_->zone() == shape->zone());
+}
+#endif
+
+StubField CacheIRWriter::readStubField(uint32_t offset,
+ StubField::Type type) const {
+ size_t index = 0;
+ size_t currentOffset = 0;
+
+ // If we've seen an offset earlier than this before, we know we can start the
+ // search there at least, otherwise, we start the search from the beginning.
+ if (lastOffset_ < offset) {
+ currentOffset = lastOffset_;
+ index = lastIndex_;
+ }
+
+ while (currentOffset != offset) {
+ currentOffset += StubField::sizeInBytes(stubFields_[index].type());
+ index++;
+ MOZ_ASSERT(index < stubFields_.length());
+ }
+
+ MOZ_ASSERT(stubFields_[index].type() == type);
+
+ lastOffset_ = currentOffset;
+ lastIndex_ = index;
+
+ return stubFields_[index];
+}
+
+CacheIRCloner::CacheIRCloner(ICCacheIRStub* stub)
+ : stubInfo_(stub->stubInfo()), stubData_(stub->stubDataStart()) {}
+
+void CacheIRCloner::cloneOp(CacheOp op, CacheIRReader& reader,
+ CacheIRWriter& writer) {
+ switch (op) {
+#define DEFINE_OP(op, ...) \
+ case CacheOp::op: \
+ clone##op(reader, writer); \
+ break;
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+}
+
+uintptr_t CacheIRCloner::readStubWord(uint32_t offset) {
+ return stubInfo_->getStubRawWord(stubData_, offset);
+}
+int64_t CacheIRCloner::readStubInt64(uint32_t offset) {
+ return stubInfo_->getStubRawInt64(stubData_, offset);
+}
+
+Shape* CacheIRCloner::getShapeField(uint32_t stubOffset) {
+ return reinterpret_cast<Shape*>(readStubWord(stubOffset));
+}
+GetterSetter* CacheIRCloner::getGetterSetterField(uint32_t stubOffset) {
+ return reinterpret_cast<GetterSetter*>(readStubWord(stubOffset));
+}
+JSObject* CacheIRCloner::getObjectField(uint32_t stubOffset) {
+ return reinterpret_cast<JSObject*>(readStubWord(stubOffset));
+}
+JSString* CacheIRCloner::getStringField(uint32_t stubOffset) {
+ return reinterpret_cast<JSString*>(readStubWord(stubOffset));
+}
+JSAtom* CacheIRCloner::getAtomField(uint32_t stubOffset) {
+ return reinterpret_cast<JSAtom*>(readStubWord(stubOffset));
+}
+JS::Symbol* CacheIRCloner::getSymbolField(uint32_t stubOffset) {
+ return reinterpret_cast<JS::Symbol*>(readStubWord(stubOffset));
+}
+BaseScript* CacheIRCloner::getBaseScriptField(uint32_t stubOffset) {
+ return reinterpret_cast<BaseScript*>(readStubWord(stubOffset));
+}
+JitCode* CacheIRCloner::getJitCodeField(uint32_t stubOffset) {
+ return reinterpret_cast<JitCode*>(readStubWord(stubOffset));
+}
+uint32_t CacheIRCloner::getRawInt32Field(uint32_t stubOffset) {
+ return uint32_t(reinterpret_cast<uintptr_t>(readStubWord(stubOffset)));
+}
+const void* CacheIRCloner::getRawPointerField(uint32_t stubOffset) {
+ return reinterpret_cast<const void*>(readStubWord(stubOffset));
+}
+uint64_t CacheIRCloner::getRawInt64Field(uint32_t stubOffset) {
+ return static_cast<uint64_t>(readStubInt64(stubOffset));
+}
+gc::AllocSite* CacheIRCloner::getAllocSiteField(uint32_t stubOffset) {
+ return reinterpret_cast<gc::AllocSite*>(readStubWord(stubOffset));
+}
+
+jsid CacheIRCloner::getIdField(uint32_t stubOffset) {
+ return jsid::fromRawBits(readStubWord(stubOffset));
+}
+const Value CacheIRCloner::getValueField(uint32_t stubOffset) {
+ return Value::fromRawBits(uint64_t(readStubInt64(stubOffset)));
+}
+double CacheIRCloner::getDoubleField(uint32_t stubOffset) {
+ uint64_t bits = uint64_t(readStubInt64(stubOffset));
+ return mozilla::BitwiseCast<double>(bits);
+}
+
+IRGenerator::IRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ CacheKind cacheKind, ICState state)
+ : writer(cx),
+ cx_(cx),
+ script_(script),
+ pc_(pc),
+ cacheKind_(cacheKind),
+ mode_(state.mode()),
+ isFirstStub_(state.newStubIsFirstStub()) {}
+
+GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ CacheKind cacheKind, HandleValue val,
+ HandleValue idVal)
+ : IRGenerator(cx, script, pc, cacheKind, state), val_(val), idVal_(idVal) {}
+
+static void EmitLoadSlotResult(CacheIRWriter& writer, ObjOperandId holderId,
+ NativeObject* holder, PropertyInfo prop) {
+ if (holder->isFixedSlot(prop.slot())) {
+ writer.loadFixedSlotResult(holderId,
+ NativeObject::getFixedSlotOffset(prop.slot()));
+ } else {
+ size_t dynamicSlotOffset =
+ holder->dynamicSlotIndex(prop.slot()) * sizeof(Value);
+ writer.loadDynamicSlotResult(holderId, dynamicSlotOffset);
+ }
+}
+
+// DOM proxies
+// -----------
+//
+// DOM proxies are proxies that are used to implement various DOM objects like
+// HTMLDocument and NodeList. DOM proxies may have an expando object - a native
+// object that stores extra properties added to the object. The following
+// CacheIR instructions are only used with DOM proxies:
+//
+// * LoadDOMExpandoValue: returns the Value in the proxy's expando slot. This
+// returns either an UndefinedValue (no expando), ObjectValue (the expando
+// object), or PrivateValue(ExpandoAndGeneration*).
+//
+// * LoadDOMExpandoValueGuardGeneration: guards the Value in the proxy's expando
+// slot is the same PrivateValue(ExpandoAndGeneration*), then guards on its
+// generation, then returns expandoAndGeneration->expando. This Value is
+// either an UndefinedValue or ObjectValue.
+//
+// * LoadDOMExpandoValueIgnoreGeneration: assumes the Value in the proxy's
+// expando slot is a PrivateValue(ExpandoAndGeneration*), unboxes it, and
+// returns the expandoAndGeneration->expando Value.
+//
+// * GuardDOMExpandoMissingOrGuardShape: takes an expando Value as input, then
+// guards it's either UndefinedValue or an object with the expected shape.
+
+enum class ProxyStubType {
+ None,
+ DOMExpando,
+ DOMShadowed,
+ DOMUnshadowed,
+ Generic
+};
+
+static bool IsCacheableDOMProxy(ProxyObject* obj) {
+ const BaseProxyHandler* handler = obj->handler();
+ if (handler->family() != GetDOMProxyHandlerFamily()) {
+ return false;
+ }
+
+ // Some DOM proxies have dynamic prototypes. We can't really cache those very
+ // well.
+ return obj->hasStaticPrototype();
+}
+
+static ProxyStubType GetProxyStubType(JSContext* cx, HandleObject obj,
+ HandleId id) {
+ if (!obj->is<ProxyObject>()) {
+ return ProxyStubType::None;
+ }
+ auto proxy = obj.as<ProxyObject>();
+
+ if (!IsCacheableDOMProxy(proxy)) {
+ return ProxyStubType::Generic;
+ }
+
+ DOMProxyShadowsResult shadows = GetDOMProxyShadowsCheck()(cx, proxy, id);
+ if (shadows == DOMProxyShadowsResult::ShadowCheckFailed) {
+ cx->clearPendingException();
+ return ProxyStubType::None;
+ }
+
+ if (DOMProxyIsShadowing(shadows)) {
+ if (shadows == DOMProxyShadowsResult::ShadowsViaDirectExpando ||
+ shadows == DOMProxyShadowsResult::ShadowsViaIndirectExpando) {
+ return ProxyStubType::DOMExpando;
+ }
+ return ProxyStubType::DOMShadowed;
+ }
+
+ MOZ_ASSERT(shadows == DOMProxyShadowsResult::DoesntShadow ||
+ shadows == DOMProxyShadowsResult::DoesntShadowUnique);
+ return ProxyStubType::DOMUnshadowed;
+}
+
+static bool ValueToNameOrSymbolId(JSContext* cx, HandleValue idVal,
+ MutableHandleId id, bool* nameOrSymbol) {
+ *nameOrSymbol = false;
+
+ if (!idVal.isString() && !idVal.isSymbol() && !idVal.isUndefined() &&
+ !idVal.isNull()) {
+ return true;
+ }
+
+ if (!PrimitiveValueToId<CanGC>(cx, idVal, id)) {
+ return false;
+ }
+
+ if (!id.isAtom() && !id.isSymbol()) {
+ id.set(JS::PropertyKey::Void());
+ return true;
+ }
+
+ if (id.isAtom() && id.toAtom()->isIndex()) {
+ id.set(JS::PropertyKey::Void());
+ return true;
+ }
+
+ *nameOrSymbol = true;
+ return true;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ if (cacheKind_ != CacheKind::GetProp) {
+ MOZ_ASSERT_IF(cacheKind_ == CacheKind::GetPropSuper,
+ getSuperReceiverValueId().id() == 1);
+ MOZ_ASSERT_IF(cacheKind_ != CacheKind::GetPropSuper,
+ getElemKeyValueId().id() == 1);
+ writer.setInputOperandId(1);
+ }
+ if (cacheKind_ == CacheKind::GetElemSuper) {
+ MOZ_ASSERT(getSuperReceiverValueId().id() == 2);
+ writer.setInputOperandId(2);
+ }
+
+ RootedId id(cx_);
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ // |super.prop| getter calls use a |this| value that differs from lookup
+ // object.
+ ValOperandId receiverId = isSuper() ? getSuperReceiverValueId() : valId;
+
+ if (val_.isObject()) {
+ RootedObject obj(cx_, &val_.toObject());
+ ObjOperandId objId = writer.guardToObject(valId);
+ if (nameOrSymbol) {
+ TRY_ATTACH(tryAttachObjectLength(obj, objId, id));
+ TRY_ATTACH(tryAttachTypedArray(obj, objId, id));
+ TRY_ATTACH(tryAttachDataView(obj, objId, id));
+ TRY_ATTACH(tryAttachArrayBufferMaybeShared(obj, objId, id));
+ TRY_ATTACH(tryAttachRegExp(obj, objId, id));
+ TRY_ATTACH(tryAttachMap(obj, objId, id));
+ TRY_ATTACH(tryAttachSet(obj, objId, id));
+ TRY_ATTACH(tryAttachNative(obj, objId, id, receiverId));
+ TRY_ATTACH(tryAttachModuleNamespace(obj, objId, id));
+ TRY_ATTACH(tryAttachWindowProxy(obj, objId, id));
+ TRY_ATTACH(tryAttachCrossCompartmentWrapper(obj, objId, id));
+ TRY_ATTACH(
+ tryAttachXrayCrossCompartmentWrapper(obj, objId, id, receiverId));
+ TRY_ATTACH(tryAttachFunction(obj, objId, id));
+ TRY_ATTACH(tryAttachArgumentsObjectIterator(obj, objId, id));
+ TRY_ATTACH(tryAttachArgumentsObjectCallee(obj, objId, id));
+ TRY_ATTACH(tryAttachProxy(obj, objId, id, receiverId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem ||
+ cacheKind_ == CacheKind::GetElemSuper);
+
+ TRY_ATTACH(tryAttachProxyElement(obj, objId));
+ TRY_ATTACH(tryAttachTypedArrayElement(obj, objId));
+
+ uint32_t index;
+ Int32OperandId indexId;
+ if (maybeGuardInt32Index(idVal_, getElemKeyValueId(), &index, &indexId)) {
+ TRY_ATTACH(tryAttachDenseElement(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachDenseElementHole(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachSparseElement(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachArgumentsObjectArg(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachArgumentsObjectArgHole(obj, objId, index, indexId));
+ TRY_ATTACH(
+ tryAttachGenericElement(obj, objId, index, indexId, receiverId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ if (nameOrSymbol) {
+ TRY_ATTACH(tryAttachPrimitive(valId, id));
+ TRY_ATTACH(tryAttachStringLength(valId, id));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ if (idVal_.isInt32()) {
+ ValOperandId indexId = getElemKeyValueId();
+ TRY_ATTACH(tryAttachStringChar(valId, indexId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+#ifdef DEBUG
+// Any property lookups performed when trying to attach ICs must be pure, i.e.
+// must use LookupPropertyPure() or similar functions. Pure lookups are
+// guaranteed to never modify the prototype chain. This ensures that the holder
+// object can always be found on the prototype chain.
+static bool IsCacheableProtoChain(NativeObject* obj, NativeObject* holder) {
+ while (obj != holder) {
+ JSObject* proto = obj->staticPrototype();
+ if (!proto || !proto->is<NativeObject>()) {
+ return false;
+ }
+ obj = &proto->as<NativeObject>();
+ }
+ return true;
+}
+#endif
+
+static bool IsCacheableGetPropSlot(NativeObject* obj, NativeObject* holder,
+ PropertyInfo prop) {
+ MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+
+ return prop.isDataProperty();
+}
+
+enum class NativeGetPropKind {
+ None,
+ Missing,
+ Slot,
+ NativeGetter,
+ ScriptedGetter,
+};
+
+static NativeGetPropKind IsCacheableGetPropCall(NativeObject* obj,
+ NativeObject* holder,
+ PropertyInfo prop) {
+ MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+
+ if (!prop.isAccessorProperty()) {
+ return NativeGetPropKind::None;
+ }
+
+ JSObject* getterObject = holder->getGetter(prop);
+ if (!getterObject || !getterObject->is<JSFunction>()) {
+ return NativeGetPropKind::None;
+ }
+
+ JSFunction& getter = getterObject->as<JSFunction>();
+
+ if (getter.isClassConstructor()) {
+ return NativeGetPropKind::None;
+ }
+
+ // Scripted functions and natives with JIT entry can use the scripted path.
+ if (getter.hasJitEntry()) {
+ return NativeGetPropKind::ScriptedGetter;
+ }
+
+ MOZ_ASSERT(getter.isNativeWithoutJitEntry());
+ return NativeGetPropKind::NativeGetter;
+}
+
+static bool CheckHasNoSuchOwnProperty(JSContext* cx, JSObject* obj, jsid id) {
+ if (!obj->is<NativeObject>()) {
+ return false;
+ }
+ // Don't handle objects with resolve hooks.
+ if (ClassMayResolveId(cx->names(), obj->getClass(), id, obj)) {
+ return false;
+ }
+ if (obj->as<NativeObject>().contains(cx, id)) {
+ return false;
+ }
+ return true;
+}
+
+static bool CheckHasNoSuchProperty(JSContext* cx, JSObject* obj, jsid id) {
+ JSObject* curObj = obj;
+ do {
+ if (!CheckHasNoSuchOwnProperty(cx, curObj, id)) {
+ return false;
+ }
+
+ curObj = curObj->staticPrototype();
+ } while (curObj);
+
+ return true;
+}
+
+static bool IsCacheableNoProperty(JSContext* cx, NativeObject* obj,
+ NativeObject* holder, jsid id,
+ jsbytecode* pc) {
+ MOZ_ASSERT(!holder);
+
+ // If we're doing a name lookup, we have to throw a ReferenceError.
+ if (JSOp(*pc) == JSOp::GetBoundName) {
+ return false;
+ }
+
+ return CheckHasNoSuchProperty(cx, obj, id);
+}
+
+static NativeGetPropKind CanAttachNativeGetProp(JSContext* cx, JSObject* obj,
+ PropertyKey id,
+ NativeObject** holder,
+ Maybe<PropertyInfo>* propInfo,
+ jsbytecode* pc) {
+ MOZ_ASSERT(id.isString() || id.isSymbol());
+ MOZ_ASSERT(!*holder);
+
+ // The lookup needs to be universally pure, otherwise we risk calling hooks
+ // out of turn. We don't mind doing this even when purity isn't required,
+ // because we only miss out on shape hashification, which is only a temporary
+ // perf cost. The limits were arbitrarily set, anyways.
+ NativeObject* baseHolder = nullptr;
+ PropertyResult prop;
+ if (!LookupPropertyPure(cx, obj, id, &baseHolder, &prop)) {
+ return NativeGetPropKind::None;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ if (prop.isNativeProperty()) {
+ MOZ_ASSERT(baseHolder);
+ *holder = baseHolder;
+ *propInfo = mozilla::Some(prop.propertyInfo());
+
+ if (IsCacheableGetPropSlot(nobj, *holder, propInfo->ref())) {
+ return NativeGetPropKind::Slot;
+ }
+
+ return IsCacheableGetPropCall(nobj, *holder, propInfo->ref());
+ }
+
+ if (!prop.isFound()) {
+ if (IsCacheableNoProperty(cx, nobj, *holder, id, pc)) {
+ return NativeGetPropKind::Missing;
+ }
+ }
+
+ return NativeGetPropKind::None;
+}
+
+static void GuardReceiverProto(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ // Note: we guard on the actual prototype and not on the shape because this is
+ // used for sparse elements where we expect shape changes.
+
+ if (JSObject* proto = obj->staticPrototype()) {
+ writer.guardProto(objId, proto);
+ } else {
+ writer.guardNullProto(objId);
+ }
+}
+
+// Guard that a given object has same class and same OwnProperties (excluding
+// dense elements and dynamic properties).
+static void TestMatchingNativeReceiver(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ writer.guardShapeForOwnProperties(objId, obj->shape());
+}
+
+// Similar to |TestMatchingNativeReceiver|, but specialized for ProxyObject.
+static void TestMatchingProxyReceiver(CacheIRWriter& writer, ProxyObject* obj,
+ ObjOperandId objId) {
+ writer.guardShapeForClass(objId, obj->shape());
+}
+
+static void GeneratePrototypeGuards(CacheIRWriter& writer, JSObject* obj,
+ NativeObject* holder, ObjOperandId objId) {
+ // Assuming target property is on |holder|, generate appropriate guards to
+ // ensure |holder| is still on the prototype chain of |obj| and we haven't
+ // introduced any shadowing definitions.
+ //
+ // For each item in the proto chain before holder, we must ensure that
+ // [[GetPrototypeOf]] still has the expected result, and that
+ // [[GetOwnProperty]] has no definition of the target property.
+ //
+ //
+ // [SMDOC] Shape Teleporting Optimization
+ // --------------------------------------
+ //
+ // Starting with the assumption (and guideline to developers) that mutating
+ // prototypes is an uncommon and fair-to-penalize operation we move cost
+ // from the access side to the mutation side.
+ //
+ // Consider the following proto chain, with B defining a property 'x':
+ //
+ // D -> C -> B{x: 3} -> A -> null
+ //
+ // When accessing |D.x| we refer to D as the "receiver", and B as the
+ // "holder". To optimize this access we need to ensure that neither D nor C
+ // has since defined a shadowing property 'x'. Since C is a prototype that
+ // we assume is rarely mutated we would like to avoid checking each time if
+ // new properties are added. To do this we require that whenever C starts
+ // shadowing a property on its proto chain, we invalidate (and opt out of) the
+ // teleporting optimization by setting the InvalidatedTeleporting flag on the
+ // object we're shadowing, triggering a shape change of that object. As a
+ // result, checking the shape of D and B is sufficient. Note that we do not
+ // care if the shape or properties of A change since the lookup of 'x' will
+ // stop at B.
+ //
+ // The second condition we must verify is that the prototype chain was not
+ // mutated. The same mechanism as above is used. When the prototype link is
+ // changed, we generate a new shape for the object. If the object whose
+ // link we are mutating is itself a prototype, we regenerate shapes down
+ // the chain by setting the InvalidatedTeleporting flag on them. This means
+ // the same two shape checks as above are sufficient.
+ //
+ // Once the InvalidatedTeleporting flag is set, it means the shape will no
+ // longer be changed by ReshapeForProtoMutation and ReshapeForShadowedProp.
+ // In this case we can no longer apply the optimization.
+ //
+ // See:
+ // - ReshapeForProtoMutation
+ // - ReshapeForShadowedProp
+
+ MOZ_ASSERT(holder);
+ MOZ_ASSERT(obj != holder);
+
+ // Receiver guards (see TestMatchingReceiver) ensure the receiver's proto is
+ // unchanged so peel off the receiver.
+ JSObject* pobj = obj->staticPrototype();
+ MOZ_ASSERT(pobj->isUsedAsPrototype());
+
+ // If teleporting is supported for this holder, we are done.
+ if (!holder->hasInvalidatedTeleporting()) {
+ return;
+ }
+
+ // If already at the holder, no further proto checks are needed.
+ if (pobj == holder) {
+ return;
+ }
+
+ // Synchronize pobj and protoId.
+ MOZ_ASSERT(pobj == obj->staticPrototype());
+ ObjOperandId protoId = writer.loadProto(objId);
+
+ // Shape guard each prototype object between receiver and holder. This guards
+ // against both proto changes and shadowing properties.
+ while (pobj != holder) {
+ writer.guardShape(protoId, pobj->shape());
+
+ pobj = pobj->staticPrototype();
+ protoId = writer.loadProto(protoId);
+ }
+}
+
+static void GeneratePrototypeHoleGuards(CacheIRWriter& writer,
+ NativeObject* obj, ObjOperandId objId,
+ bool alwaysGuardFirstProto) {
+ if (alwaysGuardFirstProto) {
+ GuardReceiverProto(writer, obj, objId);
+ }
+
+ JSObject* pobj = obj->staticPrototype();
+ while (pobj) {
+ ObjOperandId protoId = writer.loadObject(pobj);
+
+ // Make sure the shape matches, to ensure the proto is unchanged and to
+ // avoid non-dense elements or anything else that is being checked by
+ // CanAttachDenseElementHole.
+ MOZ_ASSERT(pobj->is<NativeObject>());
+ writer.guardShape(protoId, pobj->shape());
+
+ // Also make sure there are no dense elements.
+ writer.guardNoDenseElements(protoId);
+
+ pobj = pobj->staticPrototype();
+ }
+}
+
+// Similar to |TestMatchingReceiver|, but for the holder object (when it
+// differs from the receiver). The holder may also be the expando of the
+// receiver if it exists.
+static void TestMatchingHolder(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ // The GeneratePrototypeGuards + TestMatchingHolder checks only support
+ // prototype chains composed of NativeObject (excluding the receiver
+ // itself).
+ writer.guardShapeForOwnProperties(objId, obj->shape());
+}
+
+enum class IsCrossCompartment { No, Yes };
+
+// Emit a shape guard for all objects on the proto chain. This does NOT include
+// the receiver; callers must ensure the receiver's proto is the first proto by
+// either emitting a shape guard or a prototype guard for |objId|.
+//
+// Note: this relies on shape implying proto.
+template <IsCrossCompartment MaybeCrossCompartment = IsCrossCompartment::No>
+static void ShapeGuardProtoChain(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ uint32_t depth = 0;
+ static const uint32_t MAX_CACHED_LOADS = 4;
+ ObjOperandId receiverObjId = objId;
+
+ while (true) {
+ JSObject* proto = obj->staticPrototype();
+ if (!proto) {
+ return;
+ }
+
+ obj = &proto->as<NativeObject>();
+
+ // After guarding the shape of an object, we can safely bake that
+ // object's proto into the stub data. Compared to LoadProto, this
+ // takes one load instead of three (object -> shape -> baseshape
+ // -> proto). We cap the depth to avoid bloating the size of the
+ // stub data. To avoid compartment mismatch, we skip this optimization
+ // in the cross-compartment case.
+ if (depth < MAX_CACHED_LOADS &&
+ MaybeCrossCompartment == IsCrossCompartment::No) {
+ objId = writer.loadProtoObject(obj, receiverObjId);
+ } else {
+ objId = writer.loadProto(objId);
+ }
+ depth++;
+
+ writer.guardShape(objId, obj->shape());
+ }
+}
+
+// For cross compartment guards we shape-guard the prototype chain to avoid
+// referencing the holder object.
+//
+// This peels off the first layer because it's guarded against obj == holder.
+//
+// Returns the holder's OperandId.
+static ObjOperandId ShapeGuardProtoChainForCrossCompartmentHolder(
+ CacheIRWriter& writer, NativeObject* obj, ObjOperandId objId,
+ NativeObject* holder) {
+ MOZ_ASSERT(obj != holder);
+ MOZ_ASSERT(holder);
+ while (true) {
+ MOZ_ASSERT(obj->staticPrototype());
+ obj = &obj->staticPrototype()->as<NativeObject>();
+
+ objId = writer.loadProto(objId);
+ if (obj == holder) {
+ TestMatchingHolder(writer, obj, objId);
+ return objId;
+ }
+ writer.guardShapeForOwnProperties(objId, obj->shape());
+ }
+}
+
+// Emit guards for reading a data property on |holder|. Returns the holder's
+// OperandId.
+template <IsCrossCompartment MaybeCrossCompartment = IsCrossCompartment::No>
+static ObjOperandId EmitReadSlotGuard(CacheIRWriter& writer, NativeObject* obj,
+ NativeObject* holder,
+ ObjOperandId objId) {
+ MOZ_ASSERT(holder);
+ TestMatchingNativeReceiver(writer, obj, objId);
+
+ if (obj == holder) {
+ return objId;
+ }
+
+ if (MaybeCrossCompartment == IsCrossCompartment::Yes) {
+ // Guard proto chain integrity.
+ // We use a variant of guards that avoid baking in any cross-compartment
+ // object pointers.
+ return ShapeGuardProtoChainForCrossCompartmentHolder(writer, obj, objId,
+ holder);
+ }
+
+ // Guard proto chain integrity.
+ GeneratePrototypeGuards(writer, obj, holder, objId);
+
+ // Guard on the holder's shape.
+ ObjOperandId holderId = writer.loadObject(holder);
+ TestMatchingHolder(writer, holder, holderId);
+ return holderId;
+}
+
+template <IsCrossCompartment MaybeCrossCompartment = IsCrossCompartment::No>
+static void EmitMissingPropGuard(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ TestMatchingNativeReceiver(writer, obj, objId);
+
+ // The property does not exist. Guard on everything in the prototype
+ // chain. This is guaranteed to see only Native objects because of
+ // CanAttachNativeGetProp().
+ ShapeGuardProtoChain<MaybeCrossCompartment>(writer, obj, objId);
+}
+
+template <IsCrossCompartment MaybeCrossCompartment = IsCrossCompartment::No>
+static void EmitReadSlotResult(CacheIRWriter& writer, NativeObject* obj,
+ NativeObject* holder, PropertyInfo prop,
+ ObjOperandId objId) {
+ MOZ_ASSERT(holder);
+
+ ObjOperandId holderId =
+ EmitReadSlotGuard<MaybeCrossCompartment>(writer, obj, holder, objId);
+
+ MOZ_ASSERT(holderId.valid());
+ EmitLoadSlotResult(writer, holderId, holder, prop);
+}
+
+template <IsCrossCompartment MaybeCrossCompartment = IsCrossCompartment::No>
+static void EmitMissingPropResult(CacheIRWriter& writer, NativeObject* obj,
+ ObjOperandId objId) {
+ EmitMissingPropGuard<MaybeCrossCompartment>(writer, obj, objId);
+ writer.loadUndefinedResult();
+}
+
+static void EmitCallGetterResultNoGuards(JSContext* cx, CacheIRWriter& writer,
+ NativeGetPropKind kind,
+ NativeObject* obj,
+ NativeObject* holder,
+ PropertyInfo prop,
+ ValOperandId receiverId) {
+ MOZ_ASSERT(IsCacheableGetPropCall(obj, holder, prop) == kind);
+
+ JSFunction* target = &holder->getGetter(prop)->as<JSFunction>();
+ bool sameRealm = cx->realm() == target->realm();
+
+ switch (kind) {
+ case NativeGetPropKind::NativeGetter: {
+ writer.callNativeGetterResult(receiverId, target, sameRealm);
+ writer.returnFromIC();
+ break;
+ }
+ case NativeGetPropKind::ScriptedGetter: {
+ writer.callScriptedGetterResult(receiverId, target, sameRealm);
+ writer.returnFromIC();
+ break;
+ }
+ default:
+ // CanAttachNativeGetProp guarantees that the getter is either a native or
+ // a scripted function.
+ MOZ_ASSERT_UNREACHABLE("Can't attach getter");
+ break;
+ }
+}
+
+// See the SMDOC comment in vm/GetterSetter.h for more info on Getter/Setter
+// properties
+static void EmitGuardGetterSetterSlot(CacheIRWriter& writer,
+ NativeObject* holder, PropertyInfo prop,
+ ObjOperandId holderId,
+ bool holderIsConstant = false) {
+ // If the holder is guaranteed to be the same object, and it never had a
+ // slot holding a GetterSetter mutated or deleted, its Shape will change when
+ // that does happen so we don't need to guard on the GetterSetter.
+ if (holderIsConstant && !holder->hadGetterSetterChange()) {
+ return;
+ }
+
+ size_t slot = prop.slot();
+ Value slotVal = holder->getSlot(slot);
+ MOZ_ASSERT(slotVal.isPrivateGCThing());
+
+ if (holder->isFixedSlot(slot)) {
+ size_t offset = NativeObject::getFixedSlotOffset(slot);
+ writer.guardFixedSlotValue(holderId, offset, slotVal);
+ } else {
+ size_t offset = holder->dynamicSlotIndex(slot) * sizeof(Value);
+ writer.guardDynamicSlotValue(holderId, offset, slotVal);
+ }
+}
+
+static void EmitCallGetterResultGuards(CacheIRWriter& writer, NativeObject* obj,
+ NativeObject* holder, HandleId id,
+ PropertyInfo prop, ObjOperandId objId,
+ ICState::Mode mode) {
+ // Use the megamorphic guard if we're in megamorphic mode, except if |obj|
+ // is a Window as GuardHasGetterSetter doesn't support this yet (Window may
+ // require outerizing).
+
+ MOZ_ASSERT(holder->containsPure(id, prop));
+
+ if (mode == ICState::Mode::Specialized || IsWindow(obj)) {
+ TestMatchingNativeReceiver(writer, obj, objId);
+
+ if (obj != holder) {
+ GeneratePrototypeGuards(writer, obj, holder, objId);
+
+ // Guard on the holder's shape.
+ ObjOperandId holderId = writer.loadObject(holder);
+ TestMatchingHolder(writer, holder, holderId);
+
+ EmitGuardGetterSetterSlot(writer, holder, prop, holderId,
+ /* holderIsConstant = */ true);
+ } else {
+ EmitGuardGetterSetterSlot(writer, holder, prop, objId);
+ }
+ } else {
+ GetterSetter* gs = holder->getGetterSetter(prop);
+ writer.guardHasGetterSetter(objId, id, gs);
+ }
+}
+
+static void EmitCallGetterResult(JSContext* cx, CacheIRWriter& writer,
+ NativeGetPropKind kind, NativeObject* obj,
+ NativeObject* holder, HandleId id,
+ PropertyInfo prop, ObjOperandId objId,
+ ValOperandId receiverId, ICState::Mode mode) {
+ EmitCallGetterResultGuards(writer, obj, holder, id, prop, objId, mode);
+ EmitCallGetterResultNoGuards(cx, writer, kind, obj, holder, prop, receiverId);
+}
+
+static bool CanAttachDOMCall(JSContext* cx, JSJitInfo::OpType type,
+ JSObject* obj, JSFunction* fun,
+ ICState::Mode mode) {
+ MOZ_ASSERT(type == JSJitInfo::Getter || type == JSJitInfo::Setter ||
+ type == JSJitInfo::Method);
+
+ if (mode != ICState::Mode::Specialized) {
+ return false;
+ }
+
+ if (!fun->hasJitInfo()) {
+ return false;
+ }
+
+ if (cx->realm() != fun->realm()) {
+ return false;
+ }
+
+ const JSJitInfo* jitInfo = fun->jitInfo();
+ if (jitInfo->type() != type) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(IsWindow(obj), !jitInfo->needsOuterizedThisObject());
+
+ const JSClass* clasp = obj->getClass();
+ if (!clasp->isDOMClass()) {
+ return false;
+ }
+
+ if (type != JSJitInfo::Method && clasp->isProxyObject()) {
+ return false;
+ }
+
+ // Ion codegen expects DOM_OBJECT_SLOT to be a fixed slot in LoadDOMPrivate.
+ // It can be a dynamic slot if we transplanted this reflector object with a
+ // proxy.
+ if (obj->is<NativeObject>() && obj->as<NativeObject>().numFixedSlots() == 0) {
+ return false;
+ }
+
+ // Tell the analysis the |DOMInstanceClassHasProtoAtDepth| hook can't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ DOMInstanceClassHasProtoAtDepth instanceChecker =
+ cx->runtime()->DOMcallbacks->instanceClassMatchesProto;
+ return instanceChecker(clasp, jitInfo->protoID, jitInfo->depth);
+}
+
+static bool CanAttachDOMGetterSetter(JSContext* cx, JSJitInfo::OpType type,
+ NativeObject* obj, NativeObject* holder,
+ PropertyInfo prop, ICState::Mode mode) {
+ MOZ_ASSERT(type == JSJitInfo::Getter || type == JSJitInfo::Setter);
+
+ JSObject* accessor = type == JSJitInfo::Getter ? holder->getGetter(prop)
+ : holder->getSetter(prop);
+ JSFunction* fun = &accessor->as<JSFunction>();
+
+ return CanAttachDOMCall(cx, type, obj, fun, mode);
+}
+
+static void EmitCallDOMGetterResultNoGuards(CacheIRWriter& writer,
+ NativeObject* holder,
+ PropertyInfo prop,
+ ObjOperandId objId) {
+ JSFunction* getter = &holder->getGetter(prop)->as<JSFunction>();
+ writer.callDOMGetterResult(objId, getter->jitInfo());
+ writer.returnFromIC();
+}
+
+static void EmitCallDOMGetterResult(JSContext* cx, CacheIRWriter& writer,
+ NativeObject* obj, NativeObject* holder,
+ HandleId id, PropertyInfo prop,
+ ObjOperandId objId) {
+ // Note: this relies on EmitCallGetterResultGuards emitting a shape guard
+ // for specialized stubs.
+ // The shape guard ensures the receiver's Class is valid for this DOM getter.
+ EmitCallGetterResultGuards(writer, obj, holder, id, prop, objId,
+ ICState::Mode::Specialized);
+ EmitCallDOMGetterResultNoGuards(writer, holder, prop, objId);
+}
+
+void GetPropIRGenerator::attachMegamorphicNativeSlot(ObjOperandId objId,
+ jsid id) {
+ MOZ_ASSERT(mode_ == ICState::Mode::Megamorphic);
+
+ // We don't support GetBoundName because environment objects have
+ // lookupProperty hooks and GetBoundName is usually not megamorphic.
+ MOZ_ASSERT(JSOp(*pc_) != JSOp::GetBoundName);
+
+ if (cacheKind_ == CacheKind::GetProp ||
+ cacheKind_ == CacheKind::GetPropSuper) {
+ writer.megamorphicLoadSlotResult(objId, id);
+ } else {
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem ||
+ cacheKind_ == CacheKind::GetElemSuper);
+ writer.megamorphicLoadSlotByValueResult(objId, getElemKeyValueId());
+ }
+ writer.returnFromIC();
+
+ trackAttached("GetProp.MegamorphicNativeSlot");
+}
+
+AttachDecision GetPropIRGenerator::tryAttachNative(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId receiverId) {
+ Maybe<PropertyInfo> prop;
+ NativeObject* holder = nullptr;
+
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ switch (kind) {
+ case NativeGetPropKind::None:
+ return AttachDecision::NoAction;
+ case NativeGetPropKind::Missing:
+ case NativeGetPropKind::Slot: {
+ auto* nobj = &obj->as<NativeObject>();
+
+ if (mode_ == ICState::Mode::Megamorphic &&
+ JSOp(*pc_) != JSOp::GetBoundName) {
+ attachMegamorphicNativeSlot(objId, id);
+ return AttachDecision::Attach;
+ }
+
+ maybeEmitIdGuard(id);
+ if (kind == NativeGetPropKind::Slot) {
+ EmitReadSlotResult(writer, nobj, holder, *prop, objId);
+ writer.returnFromIC();
+ trackAttached("GetProp.NativeSlot");
+ } else {
+ EmitMissingPropResult(writer, nobj, objId);
+ writer.returnFromIC();
+ trackAttached("GetProp.Missing");
+ }
+ return AttachDecision::Attach;
+ }
+ case NativeGetPropKind::ScriptedGetter:
+ case NativeGetPropKind::NativeGetter: {
+ auto* nobj = &obj->as<NativeObject>();
+
+ maybeEmitIdGuard(id);
+
+ if (!isSuper() && CanAttachDOMGetterSetter(cx_, JSJitInfo::Getter, nobj,
+ holder, *prop, mode_)) {
+ EmitCallDOMGetterResult(cx_, writer, nobj, holder, id, *prop, objId);
+
+ trackAttached("GetProp.DOMGetter");
+ return AttachDecision::Attach;
+ }
+
+ EmitCallGetterResult(cx_, writer, kind, nobj, holder, id, *prop, objId,
+ receiverId, mode_);
+
+ trackAttached("GetProp.NativeGetter");
+ return AttachDecision::Attach;
+ }
+ }
+
+ MOZ_CRASH("Bad NativeGetPropKind");
+}
+
+// Returns whether obj is a WindowProxy wrapping the script's global.
+static bool IsWindowProxyForScriptGlobal(JSScript* script, JSObject* obj) {
+ if (!IsWindowProxy(obj)) {
+ return false;
+ }
+
+ MOZ_ASSERT(obj->getClass() ==
+ script->runtimeFromMainThread()->maybeWindowProxyClass());
+
+ JSObject* window = ToWindowIfWindowProxy(obj);
+
+ // Ion relies on the WindowProxy's group changing (and the group getting
+ // marked as having unknown properties) on navigation. If we ever stop
+ // transplanting same-compartment WindowProxies, this assert will fail and we
+ // need to fix that code.
+ MOZ_ASSERT(window == &obj->nonCCWGlobal());
+
+ // This must be a WindowProxy for a global in this compartment. Else it would
+ // be a cross-compartment wrapper and IsWindowProxy returns false for
+ // those.
+ MOZ_ASSERT(script->compartment() == obj->compartment());
+
+ // Only optimize lookups on the WindowProxy for the current global. Other
+ // WindowProxies in the compartment may require security checks (based on
+ // mutable document.domain). See bug 1516775.
+ return window == &script->global();
+}
+
+// Guards objId is a WindowProxy for windowObj. Returns the window's operand id.
+static ObjOperandId GuardAndLoadWindowProxyWindow(CacheIRWriter& writer,
+ ObjOperandId objId,
+ GlobalObject* windowObj) {
+ writer.guardClass(objId, GuardClassKind::WindowProxy);
+ ObjOperandId windowObjId = writer.loadWrapperTarget(objId);
+ writer.guardSpecificObject(windowObjId, windowObj);
+ return windowObjId;
+}
+
+// Whether a getter/setter on the global should have the WindowProxy as |this|
+// value instead of the Window (the global object). This always returns true for
+// scripted functions.
+static bool GetterNeedsWindowProxyThis(NativeObject* holder,
+ PropertyInfo prop) {
+ JSFunction* callee = &holder->getGetter(prop)->as<JSFunction>();
+ return !callee->hasJitInfo() || callee->jitInfo()->needsOuterizedThisObject();
+}
+static bool SetterNeedsWindowProxyThis(NativeObject* holder,
+ PropertyInfo prop) {
+ JSFunction* callee = &holder->getSetter(prop)->as<JSFunction>();
+ return !callee->hasJitInfo() || callee->jitInfo()->needsOuterizedThisObject();
+}
+
+AttachDecision GetPropIRGenerator::tryAttachWindowProxy(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ // Attach a stub when the receiver is a WindowProxy and we can do the lookup
+ // on the Window (the global object).
+
+ if (!IsWindowProxyForScriptGlobal(script_, obj)) {
+ return AttachDecision::NoAction;
+ }
+
+ // If we're megamorphic prefer a generic proxy stub that handles a lot more
+ // cases.
+ if (mode_ == ICState::Mode::Megamorphic) {
+ return AttachDecision::NoAction;
+ }
+
+ // Now try to do the lookup on the Window (the current global).
+ GlobalObject* windowObj = cx_->global();
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, windowObj, id, &holder, &prop, pc_);
+ switch (kind) {
+ case NativeGetPropKind::None:
+ return AttachDecision::NoAction;
+
+ case NativeGetPropKind::Slot: {
+ maybeEmitIdGuard(id);
+ ObjOperandId windowObjId =
+ GuardAndLoadWindowProxyWindow(writer, objId, windowObj);
+ EmitReadSlotResult(writer, windowObj, holder, *prop, windowObjId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.WindowProxySlot");
+ return AttachDecision::Attach;
+ }
+
+ case NativeGetPropKind::Missing: {
+ maybeEmitIdGuard(id);
+ ObjOperandId windowObjId =
+ GuardAndLoadWindowProxyWindow(writer, objId, windowObj);
+ EmitMissingPropResult(writer, windowObj, windowObjId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.WindowProxyMissing");
+ return AttachDecision::Attach;
+ }
+
+ case NativeGetPropKind::NativeGetter:
+ case NativeGetPropKind::ScriptedGetter: {
+ // If a |super| access, it is not worth the complexity to attach an IC.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool needsWindowProxy = GetterNeedsWindowProxyThis(holder, *prop);
+
+ // Guard the incoming object is a WindowProxy and inline a getter call
+ // based on the Window object.
+ maybeEmitIdGuard(id);
+ ObjOperandId windowObjId =
+ GuardAndLoadWindowProxyWindow(writer, objId, windowObj);
+
+ if (CanAttachDOMGetterSetter(cx_, JSJitInfo::Getter, windowObj, holder,
+ *prop, mode_)) {
+ MOZ_ASSERT(!needsWindowProxy);
+ EmitCallDOMGetterResult(cx_, writer, windowObj, holder, id, *prop,
+ windowObjId);
+ trackAttached("GetProp.WindowProxyDOMGetter");
+ } else {
+ ValOperandId receiverId =
+ writer.boxObject(needsWindowProxy ? objId : windowObjId);
+ EmitCallGetterResult(cx_, writer, kind, windowObj, holder, id, *prop,
+ windowObjId, receiverId, mode_);
+ trackAttached("GetProp.WindowProxyGetter");
+ }
+
+ return AttachDecision::Attach;
+ }
+ }
+
+ MOZ_CRASH("Unreachable");
+}
+
+AttachDecision GetPropIRGenerator::tryAttachCrossCompartmentWrapper(
+ HandleObject obj, ObjOperandId objId, HandleId id) {
+ // We can only optimize this very wrapper-handler, because others might
+ // have a security policy.
+ if (!IsWrapper(obj) ||
+ Wrapper::wrapperHandler(obj) != &CrossCompartmentWrapper::singleton) {
+ return AttachDecision::NoAction;
+ }
+
+ // If we're megamorphic prefer a generic proxy stub that handles a lot more
+ // cases.
+ if (mode_ == ICState::Mode::Megamorphic) {
+ return AttachDecision::NoAction;
+ }
+
+ RootedObject unwrapped(cx_, Wrapper::wrappedObject(obj));
+ MOZ_ASSERT(unwrapped == UnwrapOneCheckedStatic(obj));
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(unwrapped),
+ "CCWs must not wrap other CCWs");
+
+ // If we allowed different zones we would have to wrap strings.
+ if (unwrapped->compartment()->zone() != cx_->compartment()->zone()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Take the unwrapped object's global, and wrap in a
+ // this-compartment wrapper. This is what will be stored in the IC
+ // keep the compartment alive.
+ RootedObject wrappedTargetGlobal(cx_, &unwrapped->nonCCWGlobal());
+ if (!cx_->compartment()->wrap(cx_, &wrappedTargetGlobal)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+
+ // Enter realm of target to prevent failing compartment assertions when doing
+ // the lookup.
+ {
+ AutoRealm ar(cx_, unwrapped);
+
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, unwrapped, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::Slot && kind != NativeGetPropKind::Missing) {
+ return AttachDecision::NoAction;
+ }
+ }
+ auto* unwrappedNative = &unwrapped->as<NativeObject>();
+
+ maybeEmitIdGuard(id);
+ writer.guardIsProxy(objId);
+ writer.guardHasProxyHandler(objId, Wrapper::wrapperHandler(obj));
+
+ // Load the object wrapped by the CCW
+ ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+
+ // If the compartment of the wrapped object is different we should fail.
+ writer.guardCompartment(wrapperTargetId, wrappedTargetGlobal,
+ unwrappedNative->compartment());
+
+ ObjOperandId unwrappedId = wrapperTargetId;
+ if (holder) {
+ EmitReadSlotResult<IsCrossCompartment::Yes>(writer, unwrappedNative, holder,
+ *prop, unwrappedId);
+ writer.wrapResult();
+ writer.returnFromIC();
+ trackAttached("GetProp.CCWSlot");
+ } else {
+ EmitMissingPropResult<IsCrossCompartment::Yes>(writer, unwrappedNative,
+ unwrappedId);
+ writer.returnFromIC();
+ trackAttached("GetProp.CCWMissing");
+ }
+ return AttachDecision::Attach;
+}
+
+static JSObject* NewWrapperWithObjectShape(JSContext* cx,
+ Handle<NativeObject*> obj);
+
+static bool GetXrayExpandoShapeWrapper(JSContext* cx, HandleObject xray,
+ MutableHandleObject wrapper) {
+ Value v = GetProxyReservedSlot(xray, GetXrayJitInfo()->xrayHolderSlot);
+ if (v.isObject()) {
+ NativeObject* holder = &v.toObject().as<NativeObject>();
+ v = holder->getFixedSlot(GetXrayJitInfo()->holderExpandoSlot);
+ if (v.isObject()) {
+ Rooted<NativeObject*> expando(
+ cx, &UncheckedUnwrap(&v.toObject())->as<NativeObject>());
+ wrapper.set(NewWrapperWithObjectShape(cx, expando));
+ return wrapper != nullptr;
+ }
+ }
+ wrapper.set(nullptr);
+ return true;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachXrayCrossCompartmentWrapper(
+ HandleObject obj, ObjOperandId objId, HandleId id,
+ ValOperandId receiverId) {
+ if (!obj->is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ JS::XrayJitInfo* info = GetXrayJitInfo();
+ if (!info || !info->isCrossCompartmentXray(GetProxyHandler(obj))) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!info->compartmentHasExclusiveExpandos(obj)) {
+ return AttachDecision::NoAction;
+ }
+
+ RootedObject target(cx_, UncheckedUnwrap(obj));
+
+ RootedObject expandoShapeWrapper(cx_);
+ if (!GetXrayExpandoShapeWrapper(cx_, obj, &expandoShapeWrapper)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Look for a getter we can call on the xray or its prototype chain.
+ Rooted<Maybe<PropertyDescriptor>> desc(cx_);
+ RootedObject holder(cx_, obj);
+ RootedObjectVector prototypes(cx_);
+ RootedObjectVector prototypeExpandoShapeWrappers(cx_);
+ while (true) {
+ if (!GetOwnPropertyDescriptor(cx_, holder, id, &desc)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+ if (desc.isSome()) {
+ break;
+ }
+ if (!GetPrototype(cx_, holder, &holder)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+ if (!holder || !holder->is<ProxyObject>() ||
+ !info->isCrossCompartmentXray(GetProxyHandler(holder))) {
+ return AttachDecision::NoAction;
+ }
+ RootedObject prototypeExpandoShapeWrapper(cx_);
+ if (!GetXrayExpandoShapeWrapper(cx_, holder,
+ &prototypeExpandoShapeWrapper) ||
+ !prototypes.append(holder) ||
+ !prototypeExpandoShapeWrappers.append(prototypeExpandoShapeWrapper)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+ }
+ if (!desc->isAccessorDescriptor()) {
+ return AttachDecision::NoAction;
+ }
+
+ RootedObject getter(cx_, desc->getter());
+ if (!getter || !getter->is<JSFunction>() ||
+ !getter->as<JSFunction>().isNativeWithoutJitEntry()) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+ writer.guardIsProxy(objId);
+ writer.guardHasProxyHandler(objId, GetProxyHandler(obj));
+
+ // Load the object wrapped by the CCW
+ ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+
+ // Test the wrapped object's class. The properties held by xrays or their
+ // prototypes will be invariant for objects of a given class, except for
+ // changes due to xray expandos or xray prototype mutations.
+ writer.guardAnyClass(wrapperTargetId, target->getClass());
+
+ // Make sure the expandos on the xray and its prototype chain match up with
+ // what we expect. The expando shape needs to be consistent, to ensure it
+ // has not had any shadowing properties added, and the expando cannot have
+ // any custom prototype (xray prototypes are stable otherwise).
+ //
+ // We can only do this for xrays with exclusive access to their expandos
+ // (as we checked earlier), which store a pointer to their expando
+ // directly. Xrays in other compartments may share their expandos with each
+ // other and a VM call is needed just to find the expando.
+ if (expandoShapeWrapper) {
+ writer.guardXrayExpandoShapeAndDefaultProto(objId, expandoShapeWrapper);
+ } else {
+ writer.guardXrayNoExpando(objId);
+ }
+ for (size_t i = 0; i < prototypes.length(); i++) {
+ JSObject* proto = prototypes[i];
+ ObjOperandId protoId = writer.loadObject(proto);
+ if (JSObject* protoShapeWrapper = prototypeExpandoShapeWrappers[i]) {
+ writer.guardXrayExpandoShapeAndDefaultProto(protoId, protoShapeWrapper);
+ } else {
+ writer.guardXrayNoExpando(protoId);
+ }
+ }
+
+ bool sameRealm = cx_->realm() == getter->as<JSFunction>().realm();
+ writer.callNativeGetterResult(receiverId, &getter->as<JSFunction>(),
+ sameRealm);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.XrayCCW");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachGenericProxy(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ bool handleDOMProxies) {
+ writer.guardIsProxy(objId);
+
+ if (!handleDOMProxies) {
+ // Ensure that the incoming object is not a DOM proxy, so that we can get to
+ // the specialized stubs
+ writer.guardIsNotDOMProxy(objId);
+ }
+
+ if (cacheKind_ == CacheKind::GetProp || mode_ == ICState::Mode::Specialized) {
+ MOZ_ASSERT(!isSuper());
+ maybeEmitIdGuard(id);
+ writer.proxyGetResult(objId, id);
+ } else {
+ // Attach a stub that handles every id.
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem);
+ MOZ_ASSERT(mode_ == ICState::Mode::Megamorphic);
+ MOZ_ASSERT(!isSuper());
+ writer.proxyGetByValueResult(objId, getElemKeyValueId());
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("GetProp.GenericProxy");
+ return AttachDecision::Attach;
+}
+
+static bool ValueIsInt64Index(const Value& val, int64_t* index) {
+ // Try to convert the Value to a TypedArray index or DataView offset.
+
+ if (val.isInt32()) {
+ *index = val.toInt32();
+ return true;
+ }
+
+ if (val.isDouble()) {
+ // Use NumberEqualsInt64 because ToPropertyKey(-0) is 0.
+ return mozilla::NumberEqualsInt64(val.toDouble(), index);
+ }
+
+ return false;
+}
+
+IntPtrOperandId IRGenerator::guardToIntPtrIndex(const Value& index,
+ ValOperandId indexId,
+ bool supportOOB) {
+#ifdef DEBUG
+ int64_t indexInt64;
+ MOZ_ASSERT_IF(!supportOOB, ValueIsInt64Index(index, &indexInt64));
+#endif
+
+ if (index.isInt32()) {
+ Int32OperandId int32IndexId = writer.guardToInt32(indexId);
+ return writer.int32ToIntPtr(int32IndexId);
+ }
+
+ MOZ_ASSERT(index.isNumber());
+ NumberOperandId numberIndexId = writer.guardIsNumber(indexId);
+ return writer.guardNumberToIntPtrIndex(numberIndexId, supportOOB);
+}
+
+ObjOperandId IRGenerator::guardDOMProxyExpandoObjectAndShape(
+ ProxyObject* obj, ObjOperandId objId, const Value& expandoVal,
+ NativeObject* expandoObj) {
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ TestMatchingProxyReceiver(writer, obj, objId);
+
+ // Shape determines Class, so now it must be a DOM proxy.
+ ValOperandId expandoValId;
+ if (expandoVal.isObject()) {
+ expandoValId = writer.loadDOMExpandoValue(objId);
+ } else {
+ expandoValId = writer.loadDOMExpandoValueIgnoreGeneration(objId);
+ }
+
+ // Guard the expando is an object and shape guard.
+ ObjOperandId expandoObjId = writer.guardToObject(expandoValId);
+ TestMatchingHolder(writer, expandoObj, expandoObjId);
+ return expandoObjId;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDOMProxyExpando(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId receiverId) {
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ Value expandoVal = GetProxyPrivate(obj);
+ JSObject* expandoObj;
+ if (expandoVal.isObject()) {
+ expandoObj = &expandoVal.toObject();
+ } else {
+ MOZ_ASSERT(!expandoVal.isUndefined(),
+ "How did a missing expando manage to shadow things?");
+ auto expandoAndGeneration =
+ static_cast<ExpandoAndGeneration*>(expandoVal.toPrivate());
+ MOZ_ASSERT(expandoAndGeneration);
+ expandoObj = &expandoAndGeneration->expando.toObject();
+ }
+
+ // Try to do the lookup on the expando object.
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, expandoObj, id, &holder, &prop, pc_);
+ if (kind == NativeGetPropKind::None) {
+ return AttachDecision::NoAction;
+ }
+ if (!holder) {
+ return AttachDecision::NoAction;
+ }
+ auto* nativeExpandoObj = &expandoObj->as<NativeObject>();
+
+ MOZ_ASSERT(holder == nativeExpandoObj);
+
+ maybeEmitIdGuard(id);
+ ObjOperandId expandoObjId = guardDOMProxyExpandoObjectAndShape(
+ obj, objId, expandoVal, nativeExpandoObj);
+
+ if (kind == NativeGetPropKind::Slot) {
+ // Load from the expando's slots.
+ EmitLoadSlotResult(writer, expandoObjId, nativeExpandoObj, *prop);
+ writer.returnFromIC();
+ } else {
+ // Call the getter. Note that we pass objId, the DOM proxy, as |this|
+ // and not the expando object.
+ MOZ_ASSERT(kind == NativeGetPropKind::NativeGetter ||
+ kind == NativeGetPropKind::ScriptedGetter);
+ EmitGuardGetterSetterSlot(writer, nativeExpandoObj, *prop, expandoObjId);
+ EmitCallGetterResultNoGuards(cx_, writer, kind, nativeExpandoObj,
+ nativeExpandoObj, *prop, receiverId);
+ }
+
+ trackAttached("GetProp.DOMProxyExpando");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDOMProxyShadowed(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id) {
+ MOZ_ASSERT(!isSuper());
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ maybeEmitIdGuard(id);
+ TestMatchingProxyReceiver(writer, obj, objId);
+ writer.proxyGetResult(objId, id);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.DOMProxyShadowed");
+ return AttachDecision::Attach;
+}
+
+// Callers are expected to have already guarded on the shape of the
+// object, which guarantees the object is a DOM proxy.
+static void CheckDOMProxyExpandoDoesNotShadow(CacheIRWriter& writer,
+ ProxyObject* obj, jsid id,
+ ObjOperandId objId) {
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ Value expandoVal = GetProxyPrivate(obj);
+
+ ValOperandId expandoId;
+ if (!expandoVal.isObject() && !expandoVal.isUndefined()) {
+ auto expandoAndGeneration =
+ static_cast<ExpandoAndGeneration*>(expandoVal.toPrivate());
+ uint64_t generation = expandoAndGeneration->generation;
+ expandoId = writer.loadDOMExpandoValueGuardGeneration(
+ objId, expandoAndGeneration, generation);
+ expandoVal = expandoAndGeneration->expando;
+ } else {
+ expandoId = writer.loadDOMExpandoValue(objId);
+ }
+
+ if (expandoVal.isUndefined()) {
+ // Guard there's no expando object.
+ writer.guardNonDoubleType(expandoId, ValueType::Undefined);
+ } else if (expandoVal.isObject()) {
+ // Guard the proxy either has no expando object or, if it has one, that
+ // the shape matches the current expando object.
+ NativeObject& expandoObj = expandoVal.toObject().as<NativeObject>();
+ MOZ_ASSERT(!expandoObj.containsPure(id));
+ writer.guardDOMExpandoMissingOrGuardShape(expandoId, expandoObj.shape());
+ } else {
+ MOZ_CRASH("Invalid expando value");
+ }
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDOMProxyUnshadowed(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId receiverId) {
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ JSObject* checkObj = obj->staticPrototype();
+ if (!checkObj) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, checkObj, id, &holder, &prop, pc_);
+ if (kind == NativeGetPropKind::None) {
+ return AttachDecision::NoAction;
+ }
+ auto* nativeCheckObj = &checkObj->as<NativeObject>();
+
+ maybeEmitIdGuard(id);
+
+ // Guard that our expando object hasn't started shadowing this property.
+ TestMatchingProxyReceiver(writer, obj, objId);
+ CheckDOMProxyExpandoDoesNotShadow(writer, obj, id, objId);
+
+ if (holder) {
+ // Found the property on the prototype chain. Treat it like a native
+ // getprop.
+ GeneratePrototypeGuards(writer, obj, holder, objId);
+
+ // Guard on the holder of the property.
+ ObjOperandId holderId = writer.loadObject(holder);
+ TestMatchingHolder(writer, holder, holderId);
+
+ if (kind == NativeGetPropKind::Slot) {
+ EmitLoadSlotResult(writer, holderId, holder, *prop);
+ writer.returnFromIC();
+ } else {
+ // EmitCallGetterResultNoGuards expects |obj| to be the object the
+ // property is on to do some checks. Since we actually looked at
+ // checkObj, and no extra guards will be generated, we can just
+ // pass that instead.
+ MOZ_ASSERT(kind == NativeGetPropKind::NativeGetter ||
+ kind == NativeGetPropKind::ScriptedGetter);
+ MOZ_ASSERT(!isSuper());
+ EmitGuardGetterSetterSlot(writer, holder, *prop, holderId,
+ /* holderIsConstant = */ true);
+ EmitCallGetterResultNoGuards(cx_, writer, kind, nativeCheckObj, holder,
+ *prop, receiverId);
+ }
+ } else {
+ // Property was not found on the prototype chain. Deoptimize down to
+ // proxy get call.
+ MOZ_ASSERT(kind == NativeGetPropKind::Missing);
+ MOZ_ASSERT(!isSuper());
+ writer.proxyGetResult(objId, id);
+ writer.returnFromIC();
+ }
+
+ trackAttached("GetProp.DOMProxyUnshadowed");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachProxy(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId receiverId) {
+ ProxyStubType type = GetProxyStubType(cx_, obj, id);
+ if (type == ProxyStubType::None) {
+ return AttachDecision::NoAction;
+ }
+ auto proxy = obj.as<ProxyObject>();
+
+ // The proxy stubs don't currently support |super| access.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ == ICState::Mode::Megamorphic) {
+ return tryAttachGenericProxy(proxy, objId, id,
+ /* handleDOMProxies = */ true);
+ }
+
+ switch (type) {
+ case ProxyStubType::None:
+ break;
+ case ProxyStubType::DOMExpando:
+ TRY_ATTACH(tryAttachDOMProxyExpando(proxy, objId, id, receiverId));
+ [[fallthrough]]; // Fall through to the generic shadowed case.
+ case ProxyStubType::DOMShadowed:
+ return tryAttachDOMProxyShadowed(proxy, objId, id);
+ case ProxyStubType::DOMUnshadowed:
+ TRY_ATTACH(tryAttachDOMProxyUnshadowed(proxy, objId, id, receiverId));
+ return tryAttachGenericProxy(proxy, objId, id,
+ /* handleDOMProxies = */ true);
+ case ProxyStubType::Generic:
+ return tryAttachGenericProxy(proxy, objId, id,
+ /* handleDOMProxies = */ false);
+ }
+
+ MOZ_CRASH("Unexpected ProxyStubType");
+}
+
+// Guards the class of an object. Because shape implies class, and a shape guard
+// is faster than a class guard, if this is our first time attaching a stub, we
+// instead generate a shape guard.
+void IRGenerator::emitOptimisticClassGuard(ObjOperandId objId, JSObject* obj,
+ GuardClassKind kind) {
+#ifdef DEBUG
+ switch (kind) {
+ case GuardClassKind::Array:
+ MOZ_ASSERT(obj->is<ArrayObject>());
+ break;
+ case GuardClassKind::PlainObject:
+ MOZ_ASSERT(obj->is<PlainObject>());
+ break;
+ case GuardClassKind::ArrayBuffer:
+ MOZ_ASSERT(obj->is<ArrayBufferObject>());
+ break;
+ case GuardClassKind::SharedArrayBuffer:
+ MOZ_ASSERT(obj->is<SharedArrayBufferObject>());
+ break;
+ case GuardClassKind::DataView:
+ MOZ_ASSERT(obj->is<DataViewObject>());
+ break;
+ case GuardClassKind::Set:
+ MOZ_ASSERT(obj->is<SetObject>());
+ break;
+ case GuardClassKind::Map:
+ MOZ_ASSERT(obj->is<MapObject>());
+ break;
+
+ case GuardClassKind::MappedArguments:
+ case GuardClassKind::UnmappedArguments:
+ case GuardClassKind::JSFunction:
+ case GuardClassKind::BoundFunction:
+ case GuardClassKind::WindowProxy:
+ // Arguments, functions, and the global object have
+ // less consistent shapes.
+ MOZ_CRASH("GuardClassKind not supported");
+ }
+#endif
+
+ if (isFirstStub_) {
+ writer.guardShapeForClass(objId, obj->shape());
+ } else {
+ writer.guardClass(objId, kind);
+ }
+}
+
+static void AssertArgumentsCustomDataProp(ArgumentsObject* obj,
+ PropertyKey key) {
+#ifdef DEBUG
+ // The property must still be a custom data property if it has been resolved.
+ // If this assertion fails, we're probably missing a call to mark this
+ // property overridden.
+ Maybe<PropertyInfo> prop = obj->lookupPure(key);
+ MOZ_ASSERT_IF(prop, prop->isCustomDataProperty());
+#endif
+}
+
+AttachDecision GetPropIRGenerator::tryAttachObjectLength(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!id.isAtom(cx_->names().length)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (obj->is<ArrayObject>()) {
+ if (obj->as<ArrayObject>().length() > INT32_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+ emitOptimisticClassGuard(objId, obj, GuardClassKind::Array);
+ writer.loadInt32ArrayLengthResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArrayLength");
+ return AttachDecision::Attach;
+ }
+
+ if (obj->is<ArgumentsObject>() &&
+ !obj->as<ArgumentsObject>().hasOverriddenLength()) {
+ AssertArgumentsCustomDataProp(&obj->as<ArgumentsObject>(), id);
+ maybeEmitIdGuard(id);
+ if (obj->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(obj->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+ writer.loadArgumentsObjectLengthResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArgumentsObjectLength");
+ return AttachDecision::Attach;
+ }
+
+ return AttachDecision::NoAction;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool isLength = id.isAtom(cx_->names().length);
+ bool isByteOffset = id.isAtom(cx_->names().byteOffset);
+ if (!isLength && !isByteOffset && !id.isAtom(cx_->names().byteLength)) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ JSFunction& fun = holder->getGetter(*prop)->as<JSFunction>();
+ if (isLength) {
+ if (!TypedArrayObject::isOriginalLengthGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ } else if (isByteOffset) {
+ if (!TypedArrayObject::isOriginalByteOffsetGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ } else {
+ if (!TypedArrayObject::isOriginalByteLengthGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ auto* tarr = &obj->as<TypedArrayObject>();
+
+ maybeEmitIdGuard(id);
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, tarr, holder, id, *prop, objId, mode_);
+ if (isLength) {
+ if (tarr->length() <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
+ trackAttached("GetProp.TypedArrayLength");
+ } else if (isByteOffset) {
+ if (tarr->byteOffset() <= INT32_MAX) {
+ writer.arrayBufferViewByteOffsetInt32Result(objId);
+ } else {
+ writer.arrayBufferViewByteOffsetDoubleResult(objId);
+ }
+ trackAttached("GetProp.TypedArrayByteOffset");
+ } else {
+ if (tarr->byteLength() <= INT32_MAX) {
+ writer.typedArrayByteLengthInt32Result(objId);
+ } else {
+ writer.typedArrayByteLengthDoubleResult(objId);
+ }
+ trackAttached("GetProp.TypedArrayByteLength");
+ }
+ writer.returnFromIC();
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<DataViewObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* dv = &obj->as<DataViewObject>();
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool isByteOffset = id.isAtom(cx_->names().byteOffset);
+ if (!isByteOffset && !id.isAtom(cx_->names().byteLength)) {
+ return AttachDecision::NoAction;
+ }
+
+ // byteOffset and byteLength both throw when the ArrayBuffer is detached.
+ if (dv->hasDetachedBuffer()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ auto& fun = holder->getGetter(*prop)->as<JSFunction>();
+ if (isByteOffset) {
+ if (!DataViewObject::isOriginalByteOffsetGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ } else {
+ if (!DataViewObject::isOriginalByteLengthGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ maybeEmitIdGuard(id);
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, dv, holder, id, *prop, objId, mode_);
+ writer.guardHasAttachedArrayBuffer(objId);
+ if (isByteOffset) {
+ if (dv->byteOffset() <= INT32_MAX) {
+ writer.arrayBufferViewByteOffsetInt32Result(objId);
+ } else {
+ writer.arrayBufferViewByteOffsetDoubleResult(objId);
+ }
+ trackAttached("GetProp.DataViewByteOffset");
+ } else {
+ if (dv->byteLength() <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
+ trackAttached("GetProp.DataViewByteLength");
+ }
+ writer.returnFromIC();
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachArrayBufferMaybeShared(
+ HandleObject obj, ObjOperandId objId, HandleId id) {
+ if (!obj->is<ArrayBufferObjectMaybeShared>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* buf = &obj->as<ArrayBufferObjectMaybeShared>();
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!id.isAtom(cx_->names().byteLength)) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ auto& fun = holder->getGetter(*prop)->as<JSFunction>();
+ if (buf->is<ArrayBufferObject>()) {
+ if (!ArrayBufferObject::isOriginalByteLengthGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ } else {
+ if (!SharedArrayBufferObject::isOriginalByteLengthGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ maybeEmitIdGuard(id);
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, buf, holder, id, *prop, objId, mode_);
+ if (buf->byteLength() <= INT32_MAX) {
+ writer.loadArrayBufferByteLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferByteLengthDoubleResult(objId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArrayBufferMaybeSharedByteLength");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachRegExp(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<RegExpObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* regExp = &obj->as<RegExpObject>();
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ auto& fun = holder->getGetter(*prop)->as<JSFunction>();
+ JS::RegExpFlags flags = JS::RegExpFlag::NoFlags;
+ if (!RegExpObject::isOriginalFlagGetter(fun.native(), &flags)) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, regExp, holder, id, *prop, objId, mode_);
+
+ writer.regExpFlagResult(objId, flags.value());
+ writer.returnFromIC();
+
+ trackAttached("GetProp.RegExpFlag");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachMap(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<MapObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* mapObj = &obj->as<MapObject>();
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!id.isAtom(cx_->names().size)) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ auto& fun = holder->getGetter(*prop)->as<JSFunction>();
+ if (!MapObject::isOriginalSizeGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, mapObj, holder, id, *prop, objId, mode_);
+
+ writer.mapSizeResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.MapSize");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachSet(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<SetObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* setObj = &obj->as<SetObject>();
+
+ if (mode_ != ICState::Mode::Specialized) {
+ return AttachDecision::NoAction;
+ }
+
+ // Receiver should be the object.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!id.isAtom(cx_->names().size)) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, obj, id, &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::NativeGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ auto& fun = holder->getGetter(*prop)->as<JSFunction>();
+ if (!SetObject::isOriginalSizeGetter(fun.native())) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+
+ // Emit all the normal guards for calling this native, but specialize
+ // callNativeGetterResult.
+ EmitCallGetterResultGuards(writer, setObj, holder, id, *prop, objId, mode_);
+
+ writer.setSizeResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.SetSize");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachFunction(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ // Function properties are lazily resolved so they might not be defined yet.
+ // And we might end up in a situation where we always have a fresh function
+ // object during the IC generation.
+ if (!obj->is<JSFunction>()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool isLength = id.isAtom(cx_->names().length);
+ if (!isLength && !id.isAtom(cx_->names().name)) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ PropertyResult prop;
+ // If this property exists already, don't attach the stub.
+ if (LookupPropertyPure(cx_, obj, id, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ JSFunction* fun = &obj->as<JSFunction>();
+
+ if (isLength) {
+ // length was probably deleted from the function.
+ if (fun->hasResolvedLength()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Lazy functions don't store the length.
+ if (!fun->hasBytecode()) {
+ return AttachDecision::NoAction;
+ }
+ } else {
+ // name was probably deleted from the function.
+ if (fun->hasResolvedName()) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ maybeEmitIdGuard(id);
+ writer.guardClass(objId, GuardClassKind::JSFunction);
+ if (isLength) {
+ writer.loadFunctionLengthResult(objId);
+ writer.returnFromIC();
+ trackAttached("GetProp.FunctionLength");
+ } else {
+ writer.loadFunctionNameResult(objId);
+ writer.returnFromIC();
+ trackAttached("GetProp.FunctionName");
+ }
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachArgumentsObjectIterator(
+ HandleObject obj, ObjOperandId objId, HandleId id) {
+ if (!obj->is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!id.isWellKnownSymbol(JS::SymbolCode::iterator)) {
+ return AttachDecision::NoAction;
+ }
+
+ Handle<ArgumentsObject*> args = obj.as<ArgumentsObject>();
+ if (args->hasOverriddenIterator()) {
+ return AttachDecision::NoAction;
+ }
+
+ AssertArgumentsCustomDataProp(args, id);
+
+ RootedValue iterator(cx_);
+ if (!ArgumentsObject::getArgumentsIterator(cx_, &iterator)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(iterator.isObject());
+
+ maybeEmitIdGuard(id);
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+ uint32_t flags = ArgumentsObject::ITERATOR_OVERRIDDEN_BIT;
+ writer.guardArgumentsObjectFlags(objId, flags);
+
+ ObjOperandId iterId = writer.loadObject(&iterator.toObject());
+ writer.loadObjectResult(iterId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArgumentsObjectIterator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachModuleNamespace(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id) {
+ if (!obj->is<ModuleNamespaceObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto* ns = &obj->as<ModuleNamespaceObject>();
+ ModuleEnvironmentObject* env = nullptr;
+ Maybe<PropertyInfo> prop;
+ if (!ns->bindings().lookup(id, &env, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Don't emit a stub until the target binding has been initialized.
+ if (env->getSlot(prop->slot()).isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check for the specific namespace object.
+ maybeEmitIdGuard(id);
+ writer.guardSpecificObject(objId, ns);
+
+ ObjOperandId envId = writer.loadObject(env);
+ EmitLoadSlotResult(writer, envId, env, *prop);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ModuleNamespace");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachPrimitive(ValOperandId valId,
+ HandleId id) {
+ MOZ_ASSERT(!isSuper(), "SuperBase is guaranteed to be an object");
+
+ JSProtoKey protoKey;
+ switch (val_.type()) {
+ case ValueType::String:
+ if (id.isAtom(cx_->names().length)) {
+ // String length is special-cased, see js::GetProperty.
+ return AttachDecision::NoAction;
+ }
+ protoKey = JSProto_String;
+ break;
+ case ValueType::Int32:
+ case ValueType::Double:
+ protoKey = JSProto_Number;
+ break;
+ case ValueType::Boolean:
+ protoKey = JSProto_Boolean;
+ break;
+ case ValueType::Symbol:
+ protoKey = JSProto_Symbol;
+ break;
+ case ValueType::BigInt:
+ protoKey = JSProto_BigInt;
+ break;
+ case ValueType::Null:
+ case ValueType::Undefined:
+ case ValueType::Magic:
+ return AttachDecision::NoAction;
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ case ValueType::Object:
+ case ValueType::PrivateGCThing:
+ MOZ_CRASH("unexpected type");
+ }
+
+ JSObject* proto = cx_->global()->maybeGetPrototype(protoKey);
+ if (!proto) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ NativeGetPropKind kind =
+ CanAttachNativeGetProp(cx_, proto, id, &holder, &prop, pc_);
+ switch (kind) {
+ case NativeGetPropKind::None:
+ return AttachDecision::NoAction;
+ case NativeGetPropKind::Missing:
+ case NativeGetPropKind::Slot: {
+ auto* nproto = &proto->as<NativeObject>();
+
+ if (val_.isNumber()) {
+ writer.guardIsNumber(valId);
+ } else {
+ writer.guardNonDoubleType(valId, val_.type());
+ }
+ maybeEmitIdGuard(id);
+
+ ObjOperandId protoId = writer.loadObject(nproto);
+ if (kind == NativeGetPropKind::Slot) {
+ EmitReadSlotResult(writer, nproto, holder, *prop, protoId);
+ writer.returnFromIC();
+ trackAttached("GetProp.PrimitiveSlot");
+ } else {
+ EmitMissingPropResult(writer, nproto, protoId);
+ writer.returnFromIC();
+ trackAttached("GetProp.PrimitiveMissing");
+ }
+ return AttachDecision::Attach;
+ }
+ case NativeGetPropKind::ScriptedGetter:
+ case NativeGetPropKind::NativeGetter: {
+ auto* nproto = &proto->as<NativeObject>();
+
+ if (val_.isNumber()) {
+ writer.guardIsNumber(valId);
+ } else {
+ writer.guardNonDoubleType(valId, val_.type());
+ }
+ maybeEmitIdGuard(id);
+
+ ObjOperandId protoId = writer.loadObject(nproto);
+ EmitCallGetterResult(cx_, writer, kind, nproto, holder, id, *prop,
+ protoId, valId, mode_);
+
+ trackAttached("GetProp.PrimitiveGetter");
+ return AttachDecision::Attach;
+ }
+ }
+
+ MOZ_CRASH("Bad NativeGetPropKind");
+}
+
+AttachDecision GetPropIRGenerator::tryAttachStringLength(ValOperandId valId,
+ HandleId id) {
+ if (!val_.isString() || !id.isAtom(cx_->names().length)) {
+ return AttachDecision::NoAction;
+ }
+
+ StringOperandId strId = writer.guardToString(valId);
+ maybeEmitIdGuard(id);
+ writer.loadStringLengthResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.StringLength");
+ return AttachDecision::Attach;
+}
+
+enum class AttachStringChar { No, Yes, Linearize, OutOfBounds };
+
+static AttachStringChar CanAttachStringChar(const Value& val,
+ const Value& idVal) {
+ if (!val.isString() || !idVal.isInt32()) {
+ return AttachStringChar::No;
+ }
+
+ int32_t index = idVal.toInt32();
+ if (index < 0) {
+ return AttachStringChar::OutOfBounds;
+ }
+
+ JSString* str = val.toString();
+ if (size_t(index) >= str->length()) {
+ return AttachStringChar::OutOfBounds;
+ }
+
+ // This follows JSString::getChar and MacroAssembler::loadStringChar.
+ if (str->isRope()) {
+ JSRope* rope = &str->asRope();
+ if (size_t(index) < rope->leftChild()->length()) {
+ str = rope->leftChild();
+ } else {
+ str = rope->rightChild();
+ }
+ }
+
+ if (!str->isLinear()) {
+ return AttachStringChar::Linearize;
+ }
+
+ return AttachStringChar::Yes;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachStringChar(ValOperandId valId,
+ ValOperandId indexId) {
+ MOZ_ASSERT(idVal_.isInt32());
+
+ auto attach = CanAttachStringChar(val_, idVal_);
+ if (attach == AttachStringChar::No) {
+ return AttachDecision::NoAction;
+ }
+
+ // Can't attach for out-of-bounds access without guarding that indexed
+ // properties aren't present along the prototype chain of |String.prototype|.
+ if (attach == AttachStringChar::OutOfBounds) {
+ return AttachDecision::NoAction;
+ }
+
+ StringOperandId strId = writer.guardToString(valId);
+ Int32OperandId int32IndexId = writer.guardToInt32Index(indexId);
+ if (attach == AttachStringChar::Linearize) {
+ strId = writer.linearizeForCharAccess(strId, int32IndexId);
+ }
+ writer.loadStringCharResult(strId, int32IndexId, /* handleOOB = */ false);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.StringChar");
+ return AttachDecision::Attach;
+}
+
+static bool ClassCanHaveExtraProperties(const JSClass* clasp) {
+ return clasp->getResolve() || clasp->getOpsLookupProperty() ||
+ clasp->getOpsGetProperty() || IsTypedArrayClass(clasp);
+}
+
+enum class OwnProperty : bool { No, Yes };
+enum class AllowIndexedReceiver : bool { No, Yes };
+enum class AllowExtraReceiverProperties : bool { No, Yes };
+
+static bool CanAttachDenseElementHole(
+ NativeObject* obj, OwnProperty ownProp,
+ AllowIndexedReceiver allowIndexedReceiver = AllowIndexedReceiver::No,
+ AllowExtraReceiverProperties allowExtraReceiverProperties =
+ AllowExtraReceiverProperties::No) {
+ // Make sure the objects on the prototype don't have any indexed properties
+ // or that such properties can't appear without a shape change.
+ // Otherwise returning undefined for holes would obviously be incorrect,
+ // because we would have to lookup a property on the prototype instead.
+ do {
+ // The first two checks are also relevant to the receiver object.
+ if (allowIndexedReceiver == AllowIndexedReceiver::No && obj->isIndexed()) {
+ return false;
+ }
+ allowIndexedReceiver = AllowIndexedReceiver::No;
+
+ if (allowExtraReceiverProperties == AllowExtraReceiverProperties::No &&
+ ClassCanHaveExtraProperties(obj->getClass())) {
+ return false;
+ }
+ allowExtraReceiverProperties = AllowExtraReceiverProperties::No;
+
+ // Don't need to check prototype for OwnProperty checks
+ if (ownProp == OwnProperty::Yes) {
+ return true;
+ }
+
+ JSObject* proto = obj->staticPrototype();
+ if (!proto) {
+ break;
+ }
+
+ if (!proto->is<NativeObject>()) {
+ return false;
+ }
+
+ // Make sure objects on the prototype don't have dense elements.
+ if (proto->as<NativeObject>().getDenseInitializedLength() != 0) {
+ return false;
+ }
+
+ obj = &proto->as<NativeObject>();
+ } while (true);
+
+ return true;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachArgumentsObjectArg(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* args = &obj->as<ArgumentsObject>();
+
+ // No elements must have been overridden or deleted.
+ if (args->hasOverriddenElement()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check bounds.
+ if (index >= args->initialLength()) {
+ return AttachDecision::NoAction;
+ }
+
+ AssertArgumentsCustomDataProp(args, PropertyKey::Int(index));
+
+ // And finally also check that the argument isn't forwarded.
+ if (args->argIsForwarded(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+
+ writer.loadArgumentsObjectArgResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArgumentsObjectArg");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachArgumentsObjectArgHole(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* args = &obj->as<ArgumentsObject>();
+
+ // No elements must have been overridden or deleted.
+ if (args->hasOverriddenElement()) {
+ return AttachDecision::NoAction;
+ }
+
+ // And also check that the argument isn't forwarded.
+ if (index < args->initialLength() && args->argIsForwarded(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!CanAttachDenseElementHole(args, OwnProperty::No,
+ AllowIndexedReceiver::Yes,
+ AllowExtraReceiverProperties::Yes)) {
+ return AttachDecision::NoAction;
+ }
+
+ // We don't need to guard on the shape, because we check if any element is
+ // overridden. Elements are marked as overridden iff any element is defined,
+ // irrespective of whether the element is in-bounds or out-of-bounds. So when
+ // that flag isn't set, we can guarantee that the arguments object doesn't
+ // have any additional own elements.
+
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+
+ GeneratePrototypeHoleGuards(writer, args, objId,
+ /* alwaysGuardFirstProto = */ true);
+
+ writer.loadArgumentsObjectArgHoleResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArgumentsObjectArgHole");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachArgumentsObjectCallee(
+ HandleObject obj, ObjOperandId objId, HandleId id) {
+ // Only mapped arguments objects have a `callee` property.
+ if (!obj->is<MappedArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!id.isAtom(cx_->names().callee)) {
+ return AttachDecision::NoAction;
+ }
+
+ // The callee must not have been overridden or deleted.
+ MappedArgumentsObject* args = &obj->as<MappedArgumentsObject>();
+ if (args->hasOverriddenCallee()) {
+ return AttachDecision::NoAction;
+ }
+
+ AssertArgumentsCustomDataProp(args, id);
+
+ maybeEmitIdGuard(id);
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+
+ uint32_t flags = ArgumentsObject::CALLEE_OVERRIDDEN_BIT;
+ writer.guardArgumentsObjectFlags(objId, flags);
+
+ writer.loadFixedSlotResult(objId,
+ MappedArgumentsObject::getCalleeSlotOffset());
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ArgumentsObjectCallee");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDenseElement(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ == ICState::Mode::Megamorphic) {
+ writer.guardIsNativeObject(objId);
+ } else {
+ TestMatchingNativeReceiver(writer, nobj, objId);
+ }
+ writer.loadDenseElementResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.DenseElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachDenseElementHole(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+ if (!CanAttachDenseElementHole(nobj, OwnProperty::No)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Guard on the shape, to prevent non-dense elements from appearing.
+ TestMatchingNativeReceiver(writer, nobj, objId);
+ GeneratePrototypeHoleGuards(writer, nobj, objId,
+ /* alwaysGuardFirstProto = */ false);
+ writer.loadDenseElementHoleResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.DenseElementHole");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachSparseElement(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+ NativeObject* nobj = &obj->as<NativeObject>();
+
+ // Stub doesn't handle negative indices.
+ if (index > INT32_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ // The object must have sparse elements.
+ if (!nobj->isIndexed()) {
+ return AttachDecision::NoAction;
+ }
+
+ // The index must not be for a dense element.
+ if (nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only handle ArrayObject and PlainObject in this stub.
+ if (!nobj->is<ArrayObject>() && !nobj->is<PlainObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // GetSparseElementHelper assumes that the target and the receiver
+ // are the same.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Here, we ensure that the prototype chain does not define any sparse
+ // indexed properties on the shape lineage. This allows us to guard on
+ // the shapes up the prototype chain to ensure that no indexed properties
+ // exist outside of the dense elements.
+ //
+ // The `GeneratePrototypeHoleGuards` call below will guard on the shapes,
+ // as well as ensure that no prototypes contain dense elements, allowing
+ // us to perform a pure shape-search for out-of-bounds integer-indexed
+ // properties on the receiver object.
+ if (PrototypeMayHaveIndexedProperties(nobj)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure that obj is an ArrayObject or PlainObject.
+ if (nobj->is<ArrayObject>()) {
+ writer.guardClass(objId, GuardClassKind::Array);
+ } else {
+ MOZ_ASSERT(nobj->is<PlainObject>());
+ writer.guardClass(objId, GuardClassKind::PlainObject);
+ }
+
+ // The helper we are going to call only applies to non-dense elements.
+ writer.guardIndexIsNotDenseElement(objId, indexId);
+
+ // Ensures we are able to efficiently able to map to an integral jsid.
+ writer.guardInt32IsNonNegative(indexId);
+
+ // Shape guard the prototype chain to avoid shadowing indexes from appearing.
+ // The helper function also ensures that the index does not appear within the
+ // dense element set of the prototypes.
+ GeneratePrototypeHoleGuards(writer, nobj, objId,
+ /* alwaysGuardFirstProto = */ true);
+
+ // At this point, we are guaranteed that the indexed property will not
+ // be found on one of the prototypes. We are assured that we only have
+ // to check that the receiving object has the property.
+
+ writer.callGetSparseElementResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.SparseElement");
+ return AttachDecision::Attach;
+}
+
+// For Uint32Array we let the stub return an Int32 if we have not seen a
+// double, to allow better codegen in Warp while avoiding bailout loops.
+static bool ForceDoubleForUint32Array(TypedArrayObject* tarr, uint64_t index) {
+ MOZ_ASSERT(index < tarr->length());
+
+ if (tarr->type() != Scalar::Type::Uint32) {
+ // Return value is only relevant for Uint32Array.
+ return false;
+ }
+
+ Value res;
+ MOZ_ALWAYS_TRUE(tarr->getElementPure(index, &res));
+ MOZ_ASSERT(res.isNumber());
+ return res.isDouble();
+}
+
+AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
+ HandleObject obj, ObjOperandId objId) {
+ if (!obj->is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!idVal_.isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ TypedArrayObject* tarr = &obj->as<TypedArrayObject>();
+
+ bool handleOOB = false;
+ int64_t indexInt64;
+ if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
+ uint64_t(indexInt64) >= tarr->length()) {
+ handleOOB = true;
+ }
+
+ // If the number is not representable as an integer the result will be
+ // |undefined| so we leave |forceDoubleForUint32| as false.
+ bool forceDoubleForUint32 = false;
+ if (!handleOOB) {
+ uint64_t index = uint64_t(indexInt64);
+ forceDoubleForUint32 = ForceDoubleForUint32Array(tarr, index);
+ }
+
+ writer.guardShapeForClass(objId, tarr->shape());
+
+ ValOperandId keyId = getElemKeyValueId();
+ IntPtrOperandId intPtrIndexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+
+ writer.loadTypedArrayElementResult(objId, intPtrIndexId, tarr->type(),
+ handleOOB, forceDoubleForUint32);
+ writer.returnFromIC();
+
+ trackAttached("GetProp.TypedElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachGenericElement(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId, ValOperandId receiverId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+#ifdef JS_CODEGEN_X86
+ if (isSuper()) {
+ // There aren't enough registers available on x86.
+ return AttachDecision::NoAction;
+ }
+#endif
+
+ // To allow other types to attach in the non-megamorphic case we test the
+ // specific matching native receiver; however, once megamorphic we can attach
+ // for any native
+ if (mode_ == ICState::Mode::Megamorphic) {
+ writer.guardIsNativeObject(objId);
+ } else {
+ NativeObject* nobj = &obj->as<NativeObject>();
+ TestMatchingNativeReceiver(writer, nobj, objId);
+ }
+ writer.guardIndexIsNotDenseElement(objId, indexId);
+ if (isSuper()) {
+ writer.callNativeGetElementSuperResult(objId, indexId, receiverId);
+ } else {
+ writer.callNativeGetElementResult(objId, indexId);
+ }
+ writer.returnFromIC();
+
+ trackAttached(mode_ == ICState::Mode::Megamorphic
+ ? "GenericElementMegamorphic"
+ : "GenericElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetPropIRGenerator::tryAttachProxyElement(HandleObject obj,
+ ObjOperandId objId) {
+ if (!obj->is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // The proxy stubs don't currently support |super| access.
+ if (isSuper()) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardIsProxy(objId);
+
+ // We are not guarding against DOM proxies here, because there is no other
+ // specialized DOM IC we could attach.
+ // We could call maybeEmitIdGuard here and then emit ProxyGetResult,
+ // but for GetElem we prefer to attach a stub that can handle any Value
+ // so we don't attach a new stub for every id.
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem);
+ MOZ_ASSERT(!isSuper());
+ writer.proxyGetByValueResult(objId, getElemKeyValueId());
+ writer.returnFromIC();
+
+ trackAttached("GetProp.ProxyElement");
+ return AttachDecision::Attach;
+}
+
+void GetPropIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("base", val_);
+ sp.valueProperty("property", idVal_);
+ }
+#endif
+}
+
+void IRGenerator::emitIdGuard(ValOperandId valId, const Value& idVal, jsid id) {
+ if (id.isSymbol()) {
+ MOZ_ASSERT(idVal.toSymbol() == id.toSymbol());
+ SymbolOperandId symId = writer.guardToSymbol(valId);
+ writer.guardSpecificSymbol(symId, id.toSymbol());
+ } else {
+ MOZ_ASSERT(id.isAtom());
+ if (idVal.isUndefined()) {
+ MOZ_ASSERT(id.isAtom(cx_->names().undefined));
+ writer.guardIsUndefined(valId);
+ } else if (idVal.isNull()) {
+ MOZ_ASSERT(id.isAtom(cx_->names().null));
+ writer.guardIsNull(valId);
+ } else {
+ MOZ_ASSERT(idVal.isString());
+ StringOperandId strId = writer.guardToString(valId);
+ writer.guardSpecificAtom(strId, id.toAtom());
+ }
+ }
+}
+
+void GetPropIRGenerator::maybeEmitIdGuard(jsid id) {
+ if (cacheKind_ == CacheKind::GetProp ||
+ cacheKind_ == CacheKind::GetPropSuper) {
+ // Constant PropertyName, no guards necessary.
+ MOZ_ASSERT(&idVal_.toString()->asAtom() == id.toAtom());
+ return;
+ }
+
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem ||
+ cacheKind_ == CacheKind::GetElemSuper);
+ emitIdGuard(getElemKeyValueId(), idVal_, id);
+}
+
+void SetPropIRGenerator::maybeEmitIdGuard(jsid id) {
+ if (cacheKind_ == CacheKind::SetProp) {
+ // Constant PropertyName, no guards necessary.
+ MOZ_ASSERT(&idVal_.toString()->asAtom() == id.toAtom());
+ return;
+ }
+
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ emitIdGuard(setElemKeyValueId(), idVal_, id);
+}
+
+GetNameIRGenerator::GetNameIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleObject env,
+ Handle<PropertyName*> name)
+ : IRGenerator(cx, script, pc, CacheKind::GetName, state),
+ env_(env),
+ name_(name) {}
+
+AttachDecision GetNameIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetName);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ObjOperandId envId(writer.setInputOperandId(0));
+ RootedId id(cx_, NameToId(name_));
+
+ TRY_ATTACH(tryAttachGlobalNameValue(envId, id));
+ TRY_ATTACH(tryAttachGlobalNameGetter(envId, id));
+ TRY_ATTACH(tryAttachEnvironmentName(envId, id));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+static bool CanAttachGlobalName(JSContext* cx,
+ GlobalLexicalEnvironmentObject* globalLexical,
+ PropertyKey id, NativeObject** holder,
+ Maybe<PropertyInfo>* prop) {
+ // The property must be found, and it must be found as a normal data property.
+ NativeObject* current = globalLexical;
+ while (true) {
+ *prop = current->lookup(cx, id);
+ if (prop->isSome()) {
+ break;
+ }
+
+ if (current == globalLexical) {
+ current = &globalLexical->global();
+ } else {
+ // In the browser the global prototype chain should be immutable.
+ if (!current->staticPrototypeIsImmutable()) {
+ return false;
+ }
+
+ JSObject* proto = current->staticPrototype();
+ if (!proto || !proto->is<NativeObject>()) {
+ return false;
+ }
+
+ current = &proto->as<NativeObject>();
+ }
+ }
+
+ *holder = current;
+ return true;
+}
+
+AttachDecision GetNameIRGenerator::tryAttachGlobalNameValue(ObjOperandId objId,
+ HandleId id) {
+ if (!IsGlobalOp(JSOp(*pc_))) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ auto* globalLexical = &env_->as<GlobalLexicalEnvironmentObject>();
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachGlobalName(cx_, globalLexical, id, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ // The property must be found, and it must be found as a normal data property.
+ if (!prop->isDataProperty()) {
+ return AttachDecision::NoAction;
+ }
+
+ // This might still be an uninitialized lexical.
+ if (holder->getSlot(prop->slot()).isMagic()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (holder == globalLexical) {
+ // There is no need to guard on the shape. Lexical bindings are
+ // non-configurable, and this stub cannot be shared across globals.
+ size_t dynamicSlotOffset =
+ holder->dynamicSlotIndex(prop->slot()) * sizeof(Value);
+ writer.loadDynamicSlotResult(objId, dynamicSlotOffset);
+ } else if (holder == &globalLexical->global()) {
+ MOZ_ASSERT(globalLexical->global().isGenerationCountedGlobal());
+ writer.guardGlobalGeneration(
+ globalLexical->global().generationCount(),
+ globalLexical->global().addressOfGenerationCount());
+ ObjOperandId holderId = writer.loadObject(holder);
+#ifdef DEBUG
+ writer.assertPropertyLookup(holderId, id, prop->slot());
+#endif
+ EmitLoadSlotResult(writer, holderId, holder, *prop);
+ } else {
+ // Check the prototype chain from the global to the holder
+ // prototype. Ignore the global lexical scope as it doesn't figure
+ // into the prototype chain. We guard on the global lexical
+ // scope's shape independently.
+ if (!IsCacheableGetPropSlot(&globalLexical->global(), holder, *prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Shape guard for global lexical.
+ writer.guardShape(objId, globalLexical->shape());
+
+ // Guard on the shape of the GlobalObject.
+ ObjOperandId globalId = writer.loadObject(&globalLexical->global());
+ writer.guardShape(globalId, globalLexical->global().shape());
+
+ // Shape guard holder.
+ ObjOperandId holderId = writer.loadObject(holder);
+ writer.guardShape(holderId, holder->shape());
+
+ EmitLoadSlotResult(writer, holderId, holder, *prop);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("GetName.GlobalNameValue");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetNameIRGenerator::tryAttachGlobalNameGetter(ObjOperandId objId,
+ HandleId id) {
+ if (!IsGlobalOp(JSOp(*pc_))) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ Handle<GlobalLexicalEnvironmentObject*> globalLexical =
+ env_.as<GlobalLexicalEnvironmentObject>();
+ MOZ_ASSERT(globalLexical->isGlobal());
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachGlobalName(cx_, globalLexical, id, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (holder == globalLexical) {
+ return AttachDecision::NoAction;
+ }
+
+ GlobalObject* global = &globalLexical->global();
+
+ NativeGetPropKind kind = IsCacheableGetPropCall(global, holder, *prop);
+ if (kind != NativeGetPropKind::NativeGetter &&
+ kind != NativeGetPropKind::ScriptedGetter) {
+ return AttachDecision::NoAction;
+ }
+
+ bool needsWindowProxy =
+ IsWindow(global) && GetterNeedsWindowProxyThis(holder, *prop);
+
+ // Shape guard for global lexical.
+ writer.guardShape(objId, globalLexical->shape());
+
+ // Guard on the shape of the GlobalObject.
+ ObjOperandId globalId = writer.loadEnclosingEnvironment(objId);
+ writer.guardShape(globalId, global->shape());
+
+ if (holder != global) {
+ // Shape guard holder.
+ ObjOperandId holderId = writer.loadObject(holder);
+ writer.guardShape(holderId, holder->shape());
+ EmitGuardGetterSetterSlot(writer, holder, *prop, holderId,
+ /* holderIsConstant = */ true);
+ } else {
+ // Note: pass true for |holderIsConstant| because the holder must be the
+ // current global object.
+ EmitGuardGetterSetterSlot(writer, holder, *prop, globalId,
+ /* holderIsConstant = */ true);
+ }
+
+ if (CanAttachDOMGetterSetter(cx_, JSJitInfo::Getter, global, holder, *prop,
+ mode_)) {
+ // The global shape guard above ensures the instance JSClass is correct.
+ MOZ_ASSERT(!needsWindowProxy);
+ EmitCallDOMGetterResultNoGuards(writer, holder, *prop, globalId);
+ trackAttached("GetName.GlobalNameDOMGetter");
+ } else {
+ ObjOperandId receiverObjId;
+ if (needsWindowProxy) {
+ MOZ_ASSERT(cx_->global()->maybeWindowProxy());
+ receiverObjId = writer.loadObject(cx_->global()->maybeWindowProxy());
+ } else {
+ receiverObjId = globalId;
+ }
+ ValOperandId receiverId = writer.boxObject(receiverObjId);
+ EmitCallGetterResultNoGuards(cx_, writer, kind, global, holder, *prop,
+ receiverId);
+ trackAttached("GetName.GlobalNameGetter");
+ }
+
+ return AttachDecision::Attach;
+}
+
+static bool NeedEnvironmentShapeGuard(JSContext* cx, JSObject* envObj) {
+ if (!envObj->is<CallObject>()) {
+ return true;
+ }
+
+ // We can skip a guard on the call object if the script's bindings are
+ // guaranteed to be immutable (and thus cannot introduce shadowing variables).
+ // If the function is a relazified self-hosted function it has no BaseScript
+ // and we pessimistically create the guard.
+ CallObject* callObj = &envObj->as<CallObject>();
+ JSFunction* fun = &callObj->callee();
+ if (!fun->hasBaseScript() || fun->baseScript()->funHasExtensibleScope() ||
+ DebugEnvironments::hasDebugEnvironment(cx, *callObj)) {
+ return true;
+ }
+
+ return false;
+}
+
+static ValOperandId EmitLoadEnvironmentSlot(CacheIRWriter& writer,
+ NativeObject* holder,
+ ObjOperandId holderId,
+ uint32_t slot) {
+ if (holder->isFixedSlot(slot)) {
+ return writer.loadFixedSlot(holderId,
+ NativeObject::getFixedSlotOffset(slot));
+ }
+ size_t dynamicSlotIndex = holder->dynamicSlotIndex(slot);
+ return writer.loadDynamicSlot(holderId, dynamicSlotIndex);
+}
+
+AttachDecision GetNameIRGenerator::tryAttachEnvironmentName(ObjOperandId objId,
+ HandleId id) {
+ if (IsGlobalOp(JSOp(*pc_)) || script_->hasNonSyntacticScope()) {
+ return AttachDecision::NoAction;
+ }
+
+ JSObject* env = env_;
+ Maybe<PropertyInfo> prop;
+ NativeObject* holder = nullptr;
+
+ while (env) {
+ if (env->is<GlobalObject>()) {
+ prop = env->as<GlobalObject>().lookup(cx_, id);
+ if (prop.isSome()) {
+ break;
+ }
+ return AttachDecision::NoAction;
+ }
+
+ if (!env->is<EnvironmentObject>() || env->is<WithEnvironmentObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check for an 'own' property on the env. There is no need to
+ // check the prototype as non-with scopes do not inherit properties
+ // from any prototype.
+ prop = env->as<NativeObject>().lookup(cx_, id);
+ if (prop.isSome()) {
+ break;
+ }
+
+ env = env->enclosingEnvironment();
+ }
+
+ holder = &env->as<NativeObject>();
+ if (!IsCacheableGetPropSlot(holder, holder, *prop)) {
+ return AttachDecision::NoAction;
+ }
+ if (holder->getSlot(prop->slot()).isMagic()) {
+ MOZ_ASSERT(holder->is<EnvironmentObject>());
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId lastObjId = objId;
+ env = env_;
+ while (env) {
+ if (NeedEnvironmentShapeGuard(cx_, env)) {
+ writer.guardShape(lastObjId, env->shape());
+ }
+
+ if (env == holder) {
+ break;
+ }
+
+ lastObjId = writer.loadEnclosingEnvironment(lastObjId);
+ env = env->enclosingEnvironment();
+ }
+
+ ValOperandId resId =
+ EmitLoadEnvironmentSlot(writer, holder, lastObjId, prop->slot());
+ if (holder->is<EnvironmentObject>()) {
+ writer.guardIsNotUninitializedLexical(resId);
+ }
+ writer.loadOperandResult(resId);
+ writer.returnFromIC();
+
+ trackAttached("GetName.EnvironmentName");
+ return AttachDecision::Attach;
+}
+
+void GetNameIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("base", ObjectValue(*env_));
+ sp.valueProperty("property", StringValue(name_));
+ }
+#endif
+}
+
+BindNameIRGenerator::BindNameIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleObject env,
+ Handle<PropertyName*> name)
+ : IRGenerator(cx, script, pc, CacheKind::BindName, state),
+ env_(env),
+ name_(name) {}
+
+AttachDecision BindNameIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::BindName);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ObjOperandId envId(writer.setInputOperandId(0));
+ RootedId id(cx_, NameToId(name_));
+
+ TRY_ATTACH(tryAttachGlobalName(envId, id));
+ TRY_ATTACH(tryAttachEnvironmentName(envId, id));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision BindNameIRGenerator::tryAttachGlobalName(ObjOperandId objId,
+ HandleId id) {
+ if (!IsGlobalOp(JSOp(*pc_))) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ Handle<GlobalLexicalEnvironmentObject*> globalLexical =
+ env_.as<GlobalLexicalEnvironmentObject>();
+ MOZ_ASSERT(globalLexical->isGlobal());
+
+ JSObject* result = nullptr;
+ if (Maybe<PropertyInfo> prop = globalLexical->lookup(cx_, id)) {
+ // If this is an uninitialized lexical or a const, we need to return a
+ // RuntimeLexicalErrorObject.
+ if (globalLexical->getSlot(prop->slot()).isMagic() || !prop->writable()) {
+ return AttachDecision::NoAction;
+ }
+ result = globalLexical;
+ } else {
+ result = &globalLexical->global();
+ }
+
+ if (result == globalLexical) {
+ // Lexical bindings are non-configurable so we can just return the
+ // global lexical.
+ writer.loadObjectResult(objId);
+ } else {
+ // If the property exists on the global and is non-configurable, it cannot
+ // be shadowed by the lexical scope so we can just return the global without
+ // a shape guard.
+ Maybe<PropertyInfo> prop = result->as<GlobalObject>().lookup(cx_, id);
+ if (prop.isNothing() || prop->configurable()) {
+ writer.guardShape(objId, globalLexical->shape());
+ }
+ ObjOperandId globalId = writer.loadEnclosingEnvironment(objId);
+ writer.loadObjectResult(globalId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("BindName.GlobalName");
+ return AttachDecision::Attach;
+}
+
+AttachDecision BindNameIRGenerator::tryAttachEnvironmentName(ObjOperandId objId,
+ HandleId id) {
+ if (IsGlobalOp(JSOp(*pc_)) || script_->hasNonSyntacticScope()) {
+ return AttachDecision::NoAction;
+ }
+
+ JSObject* env = env_;
+ Maybe<PropertyInfo> prop;
+ while (true) {
+ if (!env->is<GlobalObject>() && !env->is<EnvironmentObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (env->is<WithEnvironmentObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // When we reach an unqualified variables object (like the global) we
+ // have to stop looking and return that object.
+ if (env->isUnqualifiedVarObj()) {
+ break;
+ }
+
+ // Check for an 'own' property on the env. There is no need to
+ // check the prototype as non-with scopes do not inherit properties
+ // from any prototype.
+ prop = env->as<NativeObject>().lookup(cx_, id);
+ if (prop.isSome()) {
+ break;
+ }
+
+ env = env->enclosingEnvironment();
+ }
+
+ // If this is an uninitialized lexical or a const, we need to return a
+ // RuntimeLexicalErrorObject.
+ auto* holder = &env->as<NativeObject>();
+ if (prop.isSome() && holder->is<EnvironmentObject>() &&
+ (holder->getSlot(prop->slot()).isMagic() || !prop->writable())) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId lastObjId = objId;
+ env = env_;
+ while (env) {
+ if (NeedEnvironmentShapeGuard(cx_, env) && !env->is<GlobalObject>()) {
+ writer.guardShape(lastObjId, env->shape());
+ }
+
+ if (env == holder) {
+ break;
+ }
+
+ lastObjId = writer.loadEnclosingEnvironment(lastObjId);
+ env = env->enclosingEnvironment();
+ }
+
+ if (prop.isSome() && holder->is<EnvironmentObject>()) {
+ ValOperandId valId =
+ EmitLoadEnvironmentSlot(writer, holder, lastObjId, prop->slot());
+ writer.guardIsNotUninitializedLexical(valId);
+ }
+
+ writer.loadObjectResult(lastObjId);
+ writer.returnFromIC();
+
+ trackAttached("BindName.EnvironmentName");
+ return AttachDecision::Attach;
+}
+
+void BindNameIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("base", ObjectValue(*env_));
+ sp.valueProperty("property", StringValue(name_));
+ }
+#endif
+}
+
+HasPropIRGenerator::HasPropIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ CacheKind cacheKind, HandleValue idVal,
+ HandleValue val)
+ : IRGenerator(cx, script, pc, cacheKind, state), val_(val), idVal_(idVal) {}
+
+AttachDecision HasPropIRGenerator::tryAttachDense(HandleObject obj,
+ ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ == ICState::Mode::Megamorphic) {
+ writer.guardIsNativeObject(objId);
+ } else {
+ // Guard shape to ensure object class is NativeObject.
+ TestMatchingNativeReceiver(writer, nobj, objId);
+ }
+ writer.loadDenseElementExistsResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.Dense");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachDenseHole(HandleObject obj,
+ ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+ OwnProperty ownProp = hasOwn ? OwnProperty::Yes : OwnProperty::No;
+
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+ if (!CanAttachDenseElementHole(nobj, ownProp)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Guard shape to ensure class is NativeObject and to prevent non-dense
+ // elements being added. Also ensures prototype doesn't change if dynamic
+ // checks aren't emitted.
+ TestMatchingNativeReceiver(writer, nobj, objId);
+
+ // Generate prototype guards if needed. This includes monitoring that
+ // properties were not added in the chain.
+ if (!hasOwn) {
+ GeneratePrototypeHoleGuards(writer, nobj, objId,
+ /* alwaysGuardFirstProto = */ false);
+ }
+
+ writer.loadDenseElementHoleExistsResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.DenseHole");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachSparse(HandleObject obj,
+ ObjOperandId objId,
+ Int32OperandId indexId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+ OwnProperty ownProp = hasOwn ? OwnProperty::Yes : OwnProperty::No;
+
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ if (!nobj->isIndexed()) {
+ return AttachDecision::NoAction;
+ }
+ if (!CanAttachDenseElementHole(nobj, ownProp, AllowIndexedReceiver::Yes)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Guard that this is a native object.
+ writer.guardIsNativeObject(objId);
+
+ // Generate prototype guards if needed. This includes monitoring that
+ // properties were not added in the chain.
+ if (!hasOwn) {
+ GeneratePrototypeHoleGuards(writer, nobj, objId,
+ /* alwaysGuardFirstProto = */ true);
+ }
+
+ // Because of the prototype guard we know that the prototype chain
+ // does not include any dense or sparse (i.e indexed) properties.
+ writer.callObjectHasSparseElementResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.Sparse");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachArgumentsObjectArg(
+ HandleObject obj, ObjOperandId objId, Int32OperandId indexId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+ OwnProperty ownProp = hasOwn ? OwnProperty::Yes : OwnProperty::No;
+
+ if (!obj->is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* args = &obj->as<ArgumentsObject>();
+
+ // No elements must have been overridden or deleted.
+ if (args->hasOverriddenElement()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!CanAttachDenseElementHole(args, ownProp, AllowIndexedReceiver::Yes,
+ AllowExtraReceiverProperties::Yes)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+
+ if (!hasOwn) {
+ GeneratePrototypeHoleGuards(writer, args, objId,
+ /* alwaysGuardFirstProto = */ true);
+ }
+
+ writer.loadArgumentsObjectArgExistsResult(objId, indexId);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.ArgumentsObjectArg");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachNamedProp(HandleObject obj,
+ ObjOperandId objId,
+ HandleId key,
+ ValOperandId keyId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+
+ NativeObject* holder = nullptr;
+ PropertyResult prop;
+
+ if (hasOwn) {
+ if (!LookupOwnPropertyPure(cx_, obj, key, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ holder = &obj->as<NativeObject>();
+ } else {
+ if (!LookupPropertyPure(cx_, obj, key, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+ }
+ if (prop.isNotFound()) {
+ return AttachDecision::NoAction;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ TRY_ATTACH(tryAttachMegamorphic(objId, keyId));
+ TRY_ATTACH(tryAttachNative(nobj, objId, key, keyId, prop, holder));
+
+ return AttachDecision::NoAction;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachMegamorphic(ObjOperandId objId,
+ ValOperandId keyId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+
+ if (mode_ != ICState::Mode::Megamorphic) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.megamorphicHasPropResult(objId, keyId, hasOwn);
+ writer.returnFromIC();
+ trackAttached("HasProp.Megamorphic");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachNative(NativeObject* obj,
+ ObjOperandId objId, jsid key,
+ ValOperandId keyId,
+ PropertyResult prop,
+ NativeObject* holder) {
+ MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+
+ if (!prop.isNativeProperty()) {
+ return AttachDecision::NoAction;
+ }
+
+ emitIdGuard(keyId, idVal_, key);
+ EmitReadSlotGuard(writer, obj, holder, objId);
+ writer.loadBooleanResult(true);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.Native");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachTypedArray(HandleObject obj,
+ ObjOperandId objId,
+ ValOperandId keyId) {
+ if (!obj->is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ int64_t index;
+ if (!ValueIsInt64Index(idVal_, &index)) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardIsTypedArray(objId);
+ IntPtrOperandId intPtrIndexId =
+ guardToIntPtrIndex(idVal_, keyId, /* supportOOB = */ true);
+ writer.loadTypedArrayElementExistsResult(objId, intPtrIndexId);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.TypedArrayObject");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachSlotDoesNotExist(
+ NativeObject* obj, ObjOperandId objId, jsid key, ValOperandId keyId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+
+ emitIdGuard(keyId, idVal_, key);
+ if (hasOwn) {
+ TestMatchingNativeReceiver(writer, obj, objId);
+ } else {
+ EmitMissingPropGuard(writer, obj, objId);
+ }
+ writer.loadBooleanResult(false);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.DoesNotExist");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachDoesNotExist(HandleObject obj,
+ ObjOperandId objId,
+ HandleId key,
+ ValOperandId keyId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+
+ // Check that property doesn't exist on |obj| or it's prototype chain. These
+ // checks allow NativeObjects with a NativeObject prototype chain. They return
+ // NoAction if unknown such as resolve hooks or proxies.
+ if (hasOwn) {
+ if (!CheckHasNoSuchOwnProperty(cx_, obj, key)) {
+ return AttachDecision::NoAction;
+ }
+ } else {
+ if (!CheckHasNoSuchProperty(cx_, obj, key)) {
+ return AttachDecision::NoAction;
+ }
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ TRY_ATTACH(tryAttachMegamorphic(objId, keyId));
+ TRY_ATTACH(tryAttachSlotDoesNotExist(nobj, objId, key, keyId));
+
+ return AttachDecision::NoAction;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachProxyElement(HandleObject obj,
+ ObjOperandId objId,
+ ValOperandId keyId) {
+ bool hasOwn = (cacheKind_ == CacheKind::HasOwn);
+
+ if (!obj->is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardIsProxy(objId);
+ writer.proxyHasPropResult(objId, keyId, hasOwn);
+ writer.returnFromIC();
+
+ trackAttached("HasProp.ProxyElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision HasPropIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::In || cacheKind_ == CacheKind::HasOwn);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ // NOTE: Argument order is PROPERTY, OBJECT
+ ValOperandId keyId(writer.setInputOperandId(0));
+ ValOperandId valId(writer.setInputOperandId(1));
+
+ if (!val_.isObject()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+ RootedObject obj(cx_, &val_.toObject());
+ ObjOperandId objId = writer.guardToObject(valId);
+
+ // Optimize Proxies
+ TRY_ATTACH(tryAttachProxyElement(obj, objId, keyId));
+
+ RootedId id(cx_);
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ if (nameOrSymbol) {
+ TRY_ATTACH(tryAttachNamedProp(obj, objId, id, keyId));
+ TRY_ATTACH(tryAttachDoesNotExist(obj, objId, id, keyId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ TRY_ATTACH(tryAttachTypedArray(obj, objId, keyId));
+
+ uint32_t index;
+ Int32OperandId indexId;
+ if (maybeGuardInt32Index(idVal_, keyId, &index, &indexId)) {
+ TRY_ATTACH(tryAttachDense(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachDenseHole(obj, objId, index, indexId));
+ TRY_ATTACH(tryAttachSparse(obj, objId, indexId));
+ TRY_ATTACH(tryAttachArgumentsObjectArg(obj, objId, indexId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+void HasPropIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("base", val_);
+ sp.valueProperty("property", idVal_);
+ }
+#endif
+}
+
+CheckPrivateFieldIRGenerator::CheckPrivateFieldIRGenerator(
+ JSContext* cx, HandleScript script, jsbytecode* pc, ICState state,
+ CacheKind cacheKind, HandleValue idVal, HandleValue val)
+ : IRGenerator(cx, script, pc, cacheKind, state), val_(val), idVal_(idVal) {
+ MOZ_ASSERT(idVal.isSymbol() && idVal.toSymbol()->isPrivateName());
+}
+
+AttachDecision CheckPrivateFieldIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ ValOperandId keyId(writer.setInputOperandId(1));
+
+ if (!val_.isObject()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+ JSObject* obj = &val_.toObject();
+ ObjOperandId objId = writer.guardToObject(valId);
+ PropertyKey key = PropertyKey::Symbol(idVal_.toSymbol());
+
+ ThrowCondition condition;
+ ThrowMsgKind msgKind;
+ GetCheckPrivateFieldOperands(pc_, &condition, &msgKind);
+
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx_, obj, key, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (CheckPrivateFieldWillThrow(condition, prop.isFound())) {
+ // Don't attach a stub if the operation will throw.
+ return AttachDecision::NoAction;
+ }
+
+ auto* nobj = &obj->as<NativeObject>();
+
+ TRY_ATTACH(tryAttachNative(nobj, objId, key, keyId, prop));
+
+ return AttachDecision::NoAction;
+}
+
+AttachDecision CheckPrivateFieldIRGenerator::tryAttachNative(
+ NativeObject* obj, ObjOperandId objId, jsid key, ValOperandId keyId,
+ PropertyResult prop) {
+ MOZ_ASSERT(prop.isNativeProperty() || prop.isNotFound());
+
+ emitIdGuard(keyId, idVal_, key);
+ TestMatchingNativeReceiver(writer, obj, objId);
+ writer.loadBooleanResult(prop.isFound());
+ writer.returnFromIC();
+
+ trackAttached("CheckPrivateField.Native");
+ return AttachDecision::Attach;
+}
+
+void CheckPrivateFieldIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("base", val_);
+ sp.valueProperty("property", idVal_);
+ }
+#endif
+}
+
+bool IRGenerator::maybeGuardInt32Index(const Value& index, ValOperandId indexId,
+ uint32_t* int32Index,
+ Int32OperandId* int32IndexId) {
+ if (index.isNumber()) {
+ int32_t indexSigned;
+ if (index.isInt32()) {
+ indexSigned = index.toInt32();
+ } else {
+ // We allow negative zero here.
+ if (!mozilla::NumberEqualsInt32(index.toDouble(), &indexSigned)) {
+ return false;
+ }
+ }
+
+ if (indexSigned < 0) {
+ return false;
+ }
+
+ *int32Index = uint32_t(indexSigned);
+ *int32IndexId = writer.guardToInt32Index(indexId);
+ return true;
+ }
+
+ if (index.isString()) {
+ int32_t indexSigned = GetIndexFromString(index.toString());
+ if (indexSigned < 0) {
+ return false;
+ }
+
+ StringOperandId strId = writer.guardToString(indexId);
+ *int32Index = uint32_t(indexSigned);
+ *int32IndexId = writer.guardStringToIndex(strId);
+ return true;
+ }
+
+ return false;
+}
+
+SetPropIRGenerator::SetPropIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, CacheKind cacheKind,
+ ICState state, HandleValue lhsVal,
+ HandleValue idVal, HandleValue rhsVal)
+ : IRGenerator(cx, script, pc, cacheKind, state),
+ lhsVal_(lhsVal),
+ idVal_(idVal),
+ rhsVal_(rhsVal) {}
+
+AttachDecision SetPropIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId objValId(writer.setInputOperandId(0));
+ ValOperandId rhsValId;
+ if (cacheKind_ == CacheKind::SetProp) {
+ rhsValId = ValOperandId(writer.setInputOperandId(1));
+ } else {
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ MOZ_ASSERT(setElemKeyValueId().id() == 1);
+ writer.setInputOperandId(1);
+ rhsValId = ValOperandId(writer.setInputOperandId(2));
+ }
+
+ RootedId id(cx_);
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ if (lhsVal_.isObject()) {
+ RootedObject obj(cx_, &lhsVal_.toObject());
+
+ ObjOperandId objId = writer.guardToObject(objValId);
+ if (IsPropertySetOp(JSOp(*pc_))) {
+ TRY_ATTACH(tryAttachMegamorphicSetElement(obj, objId, rhsValId));
+ }
+ if (nameOrSymbol) {
+ TRY_ATTACH(tryAttachNativeSetSlot(obj, objId, id, rhsValId));
+ if (IsPropertySetOp(JSOp(*pc_))) {
+ TRY_ATTACH(tryAttachSetArrayLength(obj, objId, id, rhsValId));
+ TRY_ATTACH(tryAttachSetter(obj, objId, id, rhsValId));
+ TRY_ATTACH(tryAttachWindowProxy(obj, objId, id, rhsValId));
+ TRY_ATTACH(tryAttachProxy(obj, objId, id, rhsValId));
+ TRY_ATTACH(tryAttachMegamorphicSetSlot(obj, objId, id, rhsValId));
+ }
+ if (canAttachAddSlotStub(obj, id)) {
+ deferType_ = DeferType::AddSlot;
+ return AttachDecision::Deferred;
+ }
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+
+ if (IsPropertySetOp(JSOp(*pc_))) {
+ TRY_ATTACH(tryAttachProxyElement(obj, objId, rhsValId));
+ }
+
+ TRY_ATTACH(tryAttachSetTypedArrayElement(obj, objId, rhsValId));
+
+ uint32_t index;
+ Int32OperandId indexId;
+ if (maybeGuardInt32Index(idVal_, setElemKeyValueId(), &index, &indexId)) {
+ TRY_ATTACH(
+ tryAttachSetDenseElement(obj, objId, index, indexId, rhsValId));
+ TRY_ATTACH(
+ tryAttachSetDenseElementHole(obj, objId, index, indexId, rhsValId));
+ TRY_ATTACH(tryAttachAddOrUpdateSparseElement(obj, objId, index, indexId,
+ rhsValId));
+ return AttachDecision::NoAction;
+ }
+ }
+ return AttachDecision::NoAction;
+}
+
+static void EmitStoreSlotAndReturn(CacheIRWriter& writer, ObjOperandId objId,
+ NativeObject* nobj, PropertyInfo prop,
+ ValOperandId rhsId) {
+ if (nobj->isFixedSlot(prop.slot())) {
+ size_t offset = NativeObject::getFixedSlotOffset(prop.slot());
+ writer.storeFixedSlot(objId, offset, rhsId);
+ } else {
+ size_t offset = nobj->dynamicSlotIndex(prop.slot()) * sizeof(Value);
+ writer.storeDynamicSlot(objId, offset, rhsId);
+ }
+ writer.returnFromIC();
+}
+
+static Maybe<PropertyInfo> LookupShapeForSetSlot(JSOp op, NativeObject* obj,
+ jsid id) {
+ Maybe<PropertyInfo> prop = obj->lookupPure(id);
+ if (prop.isNothing() || !prop->isDataProperty() || !prop->writable()) {
+ return mozilla::Nothing();
+ }
+
+ // If this is a property init operation, the property's attributes may have to
+ // be changed too, so make sure the current flags match.
+ if (IsPropertyInitOp(op)) {
+ // Don't support locked init operations.
+ if (IsLockedInitOp(op)) {
+ return mozilla::Nothing();
+ }
+
+ // Can't redefine a non-configurable property.
+ if (!prop->configurable()) {
+ return mozilla::Nothing();
+ }
+
+ // Make sure the enumerable flag matches the init operation.
+ if (IsHiddenInitOp(op) == prop->enumerable()) {
+ return mozilla::Nothing();
+ }
+ }
+
+ return prop;
+}
+
+static bool CanAttachNativeSetSlot(JSOp op, JSObject* obj, PropertyKey id,
+ Maybe<PropertyInfo>* prop) {
+ if (!obj->is<NativeObject>()) {
+ return false;
+ }
+
+ *prop = LookupShapeForSetSlot(op, &obj->as<NativeObject>(), id);
+ return prop->isSome();
+}
+
+// There is no need to guard on the shape. Global lexical bindings are
+// non-configurable and can not be shadowed.
+static bool IsGlobalLexicalSetGName(JSOp op, NativeObject* obj,
+ PropertyInfo prop) {
+ // Ensure that the env can't change.
+ if (op != JSOp::SetGName && op != JSOp::StrictSetGName) {
+ return false;
+ }
+
+ if (!obj->is<GlobalLexicalEnvironmentObject>()) {
+ return false;
+ }
+
+ // Uninitialized let bindings use a RuntimeLexicalErrorObject.
+ MOZ_ASSERT(!obj->getSlot(prop.slot()).isMagic());
+ MOZ_ASSERT(prop.writable());
+ MOZ_ASSERT(!prop.configurable());
+ return true;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachNativeSetSlot(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId rhsId) {
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachNativeSetSlot(JSOp(*pc_), obj, id, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ == ICState::Mode::Megamorphic && cacheKind_ == CacheKind::SetProp &&
+ IsPropertySetOp(JSOp(*pc_))) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!IsGlobalLexicalSetGName(JSOp(*pc_), nobj, *prop)) {
+ TestMatchingNativeReceiver(writer, nobj, objId);
+ }
+ EmitStoreSlotAndReturn(writer, objId, nobj, *prop, rhsId);
+
+ trackAttached("SetProp.NativeSlot");
+ return AttachDecision::Attach;
+}
+
+OperandId IRGenerator::emitNumericGuard(ValOperandId valId, Scalar::Type type) {
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ return writer.guardToInt32ModUint32(valId);
+
+ case Scalar::Float32:
+ case Scalar::Float64:
+ return writer.guardIsNumber(valId);
+
+ case Scalar::Uint8Clamped:
+ return writer.guardToUint8Clamped(valId);
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ return writer.guardToBigInt(valId);
+
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+ MOZ_CRASH("Unsupported TypedArray type");
+}
+
+static bool ValueIsNumeric(Scalar::Type type, const Value& val) {
+ if (Scalar::isBigIntType(type)) {
+ return val.isBigInt();
+ }
+ return val.isNumber();
+}
+
+void SetPropIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.opcodeProperty("op", JSOp(*pc_));
+ sp.valueProperty("base", lhsVal_);
+ sp.valueProperty("property", idVal_);
+ sp.valueProperty("value", rhsVal_);
+ }
+#endif
+}
+
+static bool IsCacheableSetPropCallNative(NativeObject* obj,
+ NativeObject* holder,
+ PropertyInfo prop) {
+ MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+
+ if (!prop.isAccessorProperty()) {
+ return false;
+ }
+
+ JSObject* setterObject = holder->getSetter(prop);
+ if (!setterObject || !setterObject->is<JSFunction>()) {
+ return false;
+ }
+
+ JSFunction& setter = setterObject->as<JSFunction>();
+ if (!setter.isNativeWithoutJitEntry()) {
+ return false;
+ }
+
+ if (setter.isClassConstructor()) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsCacheableSetPropCallScripted(NativeObject* obj,
+ NativeObject* holder,
+ PropertyInfo prop) {
+ MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+
+ if (!prop.isAccessorProperty()) {
+ return false;
+ }
+
+ JSObject* setterObject = holder->getSetter(prop);
+ if (!setterObject || !setterObject->is<JSFunction>()) {
+ return false;
+ }
+
+ JSFunction& setter = setterObject->as<JSFunction>();
+ if (setter.isClassConstructor()) {
+ return false;
+ }
+
+ // Scripted functions and natives with JIT entry can use the scripted path.
+ return setter.hasJitEntry();
+}
+
+static bool CanAttachSetter(JSContext* cx, jsbytecode* pc, JSObject* obj,
+ PropertyKey id, NativeObject** holder,
+ Maybe<PropertyInfo>* propInfo) {
+ // Don't attach a setter stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc)));
+
+ PropertyResult prop;
+ if (!LookupPropertyPure(cx, obj, id, holder, &prop)) {
+ return false;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ if (!prop.isNativeProperty()) {
+ return false;
+ }
+
+ if (!IsCacheableSetPropCallScripted(nobj, *holder, prop.propertyInfo()) &&
+ !IsCacheableSetPropCallNative(nobj, *holder, prop.propertyInfo())) {
+ return false;
+ }
+
+ *propInfo = mozilla::Some(prop.propertyInfo());
+ return true;
+}
+
+static void EmitCallSetterNoGuards(JSContext* cx, CacheIRWriter& writer,
+ NativeObject* obj, NativeObject* holder,
+ PropertyInfo prop, ObjOperandId receiverId,
+ ValOperandId rhsId) {
+ JSFunction* target = &holder->getSetter(prop)->as<JSFunction>();
+ bool sameRealm = cx->realm() == target->realm();
+
+ if (target->isNativeWithoutJitEntry()) {
+ MOZ_ASSERT(IsCacheableSetPropCallNative(obj, holder, prop));
+ writer.callNativeSetter(receiverId, target, rhsId, sameRealm);
+ writer.returnFromIC();
+ return;
+ }
+
+ MOZ_ASSERT(IsCacheableSetPropCallScripted(obj, holder, prop));
+ writer.callScriptedSetter(receiverId, target, rhsId, sameRealm);
+ writer.returnFromIC();
+}
+
+static void EmitCallDOMSetterNoGuards(JSContext* cx, CacheIRWriter& writer,
+ NativeObject* holder, PropertyInfo prop,
+ ObjOperandId objId, ValOperandId rhsId) {
+ JSFunction* setter = &holder->getSetter(prop)->as<JSFunction>();
+ MOZ_ASSERT(cx->realm() == setter->realm());
+
+ writer.callDOMSetter(objId, setter->jitInfo(), rhsId);
+ writer.returnFromIC();
+}
+
+AttachDecision SetPropIRGenerator::tryAttachSetter(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a setter stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachSetter(cx_, pc_, obj, id, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ bool needsWindowProxy =
+ IsWindow(nobj) && SetterNeedsWindowProxyThis(holder, *prop);
+
+ maybeEmitIdGuard(id);
+
+ // Use the megamorphic guard if we're in megamorphic mode, except if |obj|
+ // is a Window as GuardHasGetterSetter doesn't support this yet (Window may
+ // require outerizing).
+ if (mode_ == ICState::Mode::Specialized || IsWindow(nobj)) {
+ TestMatchingNativeReceiver(writer, nobj, objId);
+
+ if (nobj != holder) {
+ GeneratePrototypeGuards(writer, nobj, holder, objId);
+
+ // Guard on the holder's shape.
+ ObjOperandId holderId = writer.loadObject(holder);
+ TestMatchingHolder(writer, holder, holderId);
+
+ EmitGuardGetterSetterSlot(writer, holder, *prop, holderId,
+ /* holderIsConstant = */ true);
+ } else {
+ EmitGuardGetterSetterSlot(writer, holder, *prop, objId);
+ }
+ } else {
+ GetterSetter* gs = holder->getGetterSetter(*prop);
+ writer.guardHasGetterSetter(objId, id, gs);
+ }
+
+ if (CanAttachDOMGetterSetter(cx_, JSJitInfo::Setter, nobj, holder, *prop,
+ mode_)) {
+ MOZ_ASSERT(!needsWindowProxy);
+ EmitCallDOMSetterNoGuards(cx_, writer, holder, *prop, objId, rhsId);
+
+ trackAttached("SetProp.DOMSetter");
+ return AttachDecision::Attach;
+ }
+
+ ObjOperandId receiverId;
+ if (needsWindowProxy) {
+ MOZ_ASSERT(cx_->global()->maybeWindowProxy());
+ receiverId = writer.loadObject(cx_->global()->maybeWindowProxy());
+ } else {
+ receiverId = objId;
+ }
+ EmitCallSetterNoGuards(cx_, writer, nobj, holder, *prop, receiverId, rhsId);
+
+ trackAttached("SetProp.Setter");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachSetArrayLength(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach an array length stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ if (!obj->is<ArrayObject>() || !id.isAtom(cx_->names().length) ||
+ !obj->as<ArrayObject>().lengthIsWritable()) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+ emitOptimisticClassGuard(objId, obj, GuardClassKind::Array);
+ writer.callSetArrayLength(objId, IsStrictSetPC(pc_), rhsId);
+ writer.returnFromIC();
+
+ trackAttached("SetProp.ArrayLength");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachSetDenseElement(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId, ValOperandId rhsId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->containsDenseElement(index) || nobj->denseElementsAreFrozen()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Setting holes requires extra code for marking the elements non-packed.
+ MOZ_ASSERT(!rhsVal_.isMagic(JS_ELEMENTS_HOLE));
+
+ JSOp op = JSOp(*pc_);
+
+ // We don't currently emit locked init for any indexed properties.
+ MOZ_ASSERT(!IsLockedInitOp(op));
+
+ // We don't currently emit hidden init for any existing indexed properties.
+ MOZ_ASSERT(!IsHiddenInitOp(op));
+
+ // Don't optimize InitElem (DefineProperty) on non-extensible objects: when
+ // the elements are sealed, we have to throw an exception. Note that we have
+ // to check !isExtensible instead of denseElementsAreSealed because sealing
+ // a (non-extensible) object does not necessarily trigger a Shape change.
+ if (IsPropertyInitOp(op) && !nobj->isExtensible()) {
+ return AttachDecision::NoAction;
+ }
+
+ TestMatchingNativeReceiver(writer, nobj, objId);
+
+ writer.storeDenseElement(objId, indexId, rhsId);
+ writer.returnFromIC();
+
+ trackAttached("SetProp.DenseElement");
+ return AttachDecision::Attach;
+}
+
+static bool CanAttachAddElement(NativeObject* obj, bool isInit,
+ AllowIndexedReceiver allowIndexedReceiver) {
+ // Make sure the receiver doesn't have any indexed properties and that such
+ // properties can't appear without a shape change.
+ if (allowIndexedReceiver == AllowIndexedReceiver::No && obj->isIndexed()) {
+ return false;
+ }
+
+ do {
+ // This check is also relevant for the receiver object.
+ const JSClass* clasp = obj->getClass();
+ if (clasp != &ArrayObject::class_ &&
+ (clasp->getAddProperty() || clasp->getResolve() ||
+ clasp->getOpsLookupProperty() || clasp->getOpsSetProperty())) {
+ return false;
+ }
+
+ // If we're initializing a property instead of setting one, the objects
+ // on the prototype are not relevant.
+ if (isInit) {
+ break;
+ }
+
+ JSObject* proto = obj->staticPrototype();
+ if (!proto) {
+ break;
+ }
+
+ if (!proto->is<NativeObject>()) {
+ return false;
+ }
+
+ NativeObject* nproto = &proto->as<NativeObject>();
+ if (nproto->isIndexed()) {
+ return false;
+ }
+
+ // We have to make sure the proto has no non-writable (frozen) elements
+ // because we're not allowed to shadow them.
+ if (nproto->denseElementsAreFrozen() &&
+ nproto->getDenseInitializedLength() > 0) {
+ return false;
+ }
+
+ obj = nproto;
+ } while (true);
+
+ return true;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachSetDenseElementHole(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId, ValOperandId rhsId) {
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Setting holes requires extra code for marking the elements non-packed.
+ if (rhsVal_.isMagic(JS_ELEMENTS_HOLE)) {
+ return AttachDecision::NoAction;
+ }
+
+ JSOp op = JSOp(*pc_);
+ MOZ_ASSERT(IsPropertySetOp(op) || IsPropertyInitOp(op));
+
+ // We don't currently emit locked init for any indexed properties.
+ MOZ_ASSERT(!IsLockedInitOp(op));
+
+ // Hidden init can be emitted for absent indexed properties.
+ if (IsHiddenInitOp(op)) {
+ MOZ_ASSERT(op == JSOp::InitHiddenElem);
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->isExtensible()) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(!nobj->denseElementsAreFrozen(),
+ "Extensible objects should not have frozen elements");
+
+ uint32_t initLength = nobj->getDenseInitializedLength();
+
+ // Optimize if we're adding an element at initLength or writing to a hole.
+ //
+ // In the case where index > initLength, we need noteHasDenseAdd to be called
+ // to ensure Ion is aware that writes have occurred to-out-of-bound indexes
+ // before.
+ //
+ // TODO(post-Warp): noteHasDenseAdd (nee: noteArrayWriteHole) no longer exists
+ bool isAdd = index == initLength;
+ bool isHoleInBounds =
+ index < initLength && !nobj->containsDenseElement(index);
+ if (!isAdd && !isHoleInBounds) {
+ return AttachDecision::NoAction;
+ }
+
+ // Can't add new elements to arrays with non-writable length.
+ if (isAdd && nobj->is<ArrayObject>() &&
+ !nobj->as<ArrayObject>().lengthIsWritable()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Typed arrays don't have dense elements.
+ if (nobj->is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check for other indexed properties or class hooks.
+ if (!CanAttachAddElement(nobj, IsPropertyInitOp(op),
+ AllowIndexedReceiver::No)) {
+ return AttachDecision::NoAction;
+ }
+
+ TestMatchingNativeReceiver(writer, nobj, objId);
+
+ // Also shape guard the proto chain, unless this is an InitElem.
+ if (IsPropertySetOp(op)) {
+ ShapeGuardProtoChain(writer, nobj, objId);
+ }
+
+ writer.storeDenseElementHole(objId, indexId, rhsId, isAdd);
+ writer.returnFromIC();
+
+ trackAttached(isAdd ? "AddDenseElement" : "StoreDenseElementHole");
+ return AttachDecision::Attach;
+}
+
+// Add an IC for adding or updating a sparse element.
+AttachDecision SetPropIRGenerator::tryAttachAddOrUpdateSparseElement(
+ HandleObject obj, ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId, ValOperandId rhsId) {
+ JSOp op = JSOp(*pc_);
+ MOZ_ASSERT(IsPropertySetOp(op) || IsPropertyInitOp(op));
+
+ if (op != JSOp::SetElem && op != JSOp::StrictSetElem) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+ NativeObject* nobj = &obj->as<NativeObject>();
+
+ // We cannot attach a stub to a non-extensible object
+ if (!nobj->isExtensible()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Stub doesn't handle negative indices.
+ if (index > INT32_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ // The index must not be for a dense element.
+ if (nobj->containsDenseElement(index)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only handle ArrayObject and PlainObject in this stub.
+ if (!nobj->is<ArrayObject>() && !nobj->is<PlainObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Don't attach if we're adding to an array with non-writable length.
+ if (nobj->is<ArrayObject>()) {
+ ArrayObject* aobj = &nobj->as<ArrayObject>();
+ bool isAdd = (index >= aobj->length());
+ if (isAdd && !aobj->lengthIsWritable()) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Check for class hooks or indexed properties on the prototype chain that
+ // we're not allowed to shadow.
+ if (!CanAttachAddElement(nobj, /* isInit = */ false,
+ AllowIndexedReceiver::Yes)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure that obj is an ArrayObject or PlainObject.
+ if (nobj->is<ArrayObject>()) {
+ writer.guardClass(objId, GuardClassKind::Array);
+ } else {
+ MOZ_ASSERT(nobj->is<PlainObject>());
+ writer.guardClass(objId, GuardClassKind::PlainObject);
+ }
+
+ // The helper we are going to call only applies to non-dense elements.
+ writer.guardIndexIsNotDenseElement(objId, indexId);
+
+ // Guard extensible: We may be trying to add a new element, and so we'd best
+ // be able to do so safely.
+ writer.guardIsExtensible(objId);
+
+ // Ensures we are able to efficiently able to map to an integral jsid.
+ writer.guardInt32IsNonNegative(indexId);
+
+ // Shape guard the prototype chain to avoid shadowing indexes from appearing.
+ // Guard the prototype of the receiver explicitly, because the receiver's
+ // shape is not being guarded as a proxy for that.
+ GuardReceiverProto(writer, nobj, objId);
+
+ // Dense elements may appear on the prototype chain (and prototypes may
+ // have a different notion of which elements are dense), but they can
+ // only be data properties, so our specialized Set handler is ok to bind
+ // to them.
+ if (IsPropertySetOp(op)) {
+ ShapeGuardProtoChain(writer, nobj, objId);
+ }
+
+ // Ensure that if we're adding an element to the object, the object's
+ // length is writable.
+ if (nobj->is<ArrayObject>()) {
+ writer.guardIndexIsValidUpdateOrAdd(objId, indexId);
+ }
+
+ writer.callAddOrUpdateSparseElementHelper(
+ objId, indexId, rhsId,
+ /* strict = */ op == JSOp::StrictSetElem);
+ writer.returnFromIC();
+
+ trackAttached("SetProp.AddOrUpdateSparseElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
+ HandleObject obj, ObjOperandId objId, ValOperandId rhsId) {
+ if (!obj->is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (!idVal_.isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ TypedArrayObject* tarr = &obj->as<TypedArrayObject>();
+ Scalar::Type elementType = tarr->type();
+
+ // Don't attach if the input type doesn't match the guard added below.
+ if (!ValueIsNumeric(elementType, rhsVal_)) {
+ return AttachDecision::NoAction;
+ }
+
+ bool handleOOB = false;
+ int64_t indexInt64;
+ if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
+ uint64_t(indexInt64) >= tarr->length()) {
+ handleOOB = true;
+ }
+
+ JSOp op = JSOp(*pc_);
+
+ // The only expected property init operation is InitElem.
+ MOZ_ASSERT_IF(IsPropertyInitOp(op), op == JSOp::InitElem);
+
+ // InitElem (DefineProperty) has to throw an exception on out-of-bounds.
+ if (handleOOB && IsPropertyInitOp(op)) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardShapeForClass(objId, tarr->shape());
+
+ OperandId rhsValId = emitNumericGuard(rhsId, elementType);
+
+ ValOperandId keyId = setElemKeyValueId();
+ IntPtrOperandId indexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+
+ writer.storeTypedArrayElement(objId, elementType, indexId, rhsValId,
+ handleOOB);
+ writer.returnFromIC();
+
+ trackAttached(handleOOB ? "SetTypedElementOOB" : "SetTypedElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachGenericProxy(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId rhsId, bool handleDOMProxies) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ writer.guardIsProxy(objId);
+
+ if (!handleDOMProxies) {
+ // Ensure that the incoming object is not a DOM proxy, so that we can
+ // get to the specialized stubs. If handleDOMProxies is true, we were
+ // unable to attach a specialized DOM stub, so we just handle all
+ // proxies here.
+ writer.guardIsNotDOMProxy(objId);
+ }
+
+ if (cacheKind_ == CacheKind::SetProp || mode_ == ICState::Mode::Specialized) {
+ maybeEmitIdGuard(id);
+ writer.proxySet(objId, id, rhsId, IsStrictSetPC(pc_));
+ } else {
+ // Attach a stub that handles every id.
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ MOZ_ASSERT(mode_ == ICState::Mode::Megamorphic);
+ writer.proxySetByValue(objId, setElemKeyValueId(), rhsId,
+ IsStrictSetPC(pc_));
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("SetProp.GenericProxy");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachDOMProxyShadowed(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ maybeEmitIdGuard(id);
+ TestMatchingProxyReceiver(writer, obj, objId);
+ writer.proxySet(objId, id, rhsId, IsStrictSetPC(pc_));
+ writer.returnFromIC();
+
+ trackAttached("SetProp.DOMProxyShadowed");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachDOMProxyUnshadowed(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ JSObject* proto = obj->staticPrototype();
+ if (!proto) {
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* holder = nullptr;
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachSetter(cx_, pc_, proto, id, &holder, &prop)) {
+ return AttachDecision::NoAction;
+ }
+ auto* nproto = &proto->as<NativeObject>();
+
+ maybeEmitIdGuard(id);
+
+ // Guard that our expando object hasn't started shadowing this property.
+ TestMatchingProxyReceiver(writer, obj, objId);
+ CheckDOMProxyExpandoDoesNotShadow(writer, obj, id, objId);
+
+ GeneratePrototypeGuards(writer, obj, holder, objId);
+
+ // Guard on the holder of the property.
+ ObjOperandId holderId = writer.loadObject(holder);
+ TestMatchingHolder(writer, holder, holderId);
+
+ EmitGuardGetterSetterSlot(writer, holder, *prop, holderId,
+ /* holderIsConstant = */ true);
+
+ // EmitCallSetterNoGuards expects |obj| to be the object the property is
+ // on to do some checks. Since we actually looked at proto, and no extra
+ // guards will be generated, we can just pass that instead.
+ EmitCallSetterNoGuards(cx_, writer, nproto, holder, *prop, objId, rhsId);
+
+ trackAttached("SetProp.DOMProxyUnshadowed");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachDOMProxyExpando(
+ Handle<ProxyObject*> obj, ObjOperandId objId, HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ MOZ_ASSERT(IsCacheableDOMProxy(obj));
+
+ Value expandoVal = GetProxyPrivate(obj);
+ JSObject* expandoObj;
+ if (expandoVal.isObject()) {
+ expandoObj = &expandoVal.toObject();
+ } else {
+ MOZ_ASSERT(!expandoVal.isUndefined(),
+ "How did a missing expando manage to shadow things?");
+ auto expandoAndGeneration =
+ static_cast<ExpandoAndGeneration*>(expandoVal.toPrivate());
+ MOZ_ASSERT(expandoAndGeneration);
+ expandoObj = &expandoAndGeneration->expando.toObject();
+ }
+
+ Maybe<PropertyInfo> prop;
+ if (CanAttachNativeSetSlot(JSOp(*pc_), expandoObj, id, &prop)) {
+ auto* nativeExpandoObj = &expandoObj->as<NativeObject>();
+
+ maybeEmitIdGuard(id);
+ ObjOperandId expandoObjId = guardDOMProxyExpandoObjectAndShape(
+ obj, objId, expandoVal, nativeExpandoObj);
+
+ EmitStoreSlotAndReturn(writer, expandoObjId, nativeExpandoObj, *prop,
+ rhsId);
+ trackAttached("SetProp.DOMProxyExpandoSlot");
+ return AttachDecision::Attach;
+ }
+
+ NativeObject* holder = nullptr;
+ if (CanAttachSetter(cx_, pc_, expandoObj, id, &holder, &prop)) {
+ auto* nativeExpandoObj = &expandoObj->as<NativeObject>();
+
+ // Call the setter. Note that we pass objId, the DOM proxy, as |this|
+ // and not the expando object.
+ maybeEmitIdGuard(id);
+ ObjOperandId expandoObjId = guardDOMProxyExpandoObjectAndShape(
+ obj, objId, expandoVal, nativeExpandoObj);
+
+ MOZ_ASSERT(holder == nativeExpandoObj);
+ EmitGuardGetterSetterSlot(writer, nativeExpandoObj, *prop, expandoObjId);
+ EmitCallSetterNoGuards(cx_, writer, nativeExpandoObj, nativeExpandoObj,
+ *prop, objId, rhsId);
+ trackAttached("SetProp.DOMProxyExpandoSetter");
+ return AttachDecision::Attach;
+ }
+
+ return AttachDecision::NoAction;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachProxy(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ ProxyStubType type = GetProxyStubType(cx_, obj, id);
+ if (type == ProxyStubType::None) {
+ return AttachDecision::NoAction;
+ }
+ auto proxy = obj.as<ProxyObject>();
+
+ if (mode_ == ICState::Mode::Megamorphic) {
+ return tryAttachGenericProxy(proxy, objId, id, rhsId,
+ /* handleDOMProxies = */ true);
+ }
+
+ switch (type) {
+ case ProxyStubType::None:
+ break;
+ case ProxyStubType::DOMExpando:
+ TRY_ATTACH(tryAttachDOMProxyExpando(proxy, objId, id, rhsId));
+ [[fallthrough]]; // Fall through to the generic shadowed case.
+ case ProxyStubType::DOMShadowed:
+ return tryAttachDOMProxyShadowed(proxy, objId, id, rhsId);
+ case ProxyStubType::DOMUnshadowed:
+ TRY_ATTACH(tryAttachDOMProxyUnshadowed(proxy, objId, id, rhsId));
+ return tryAttachGenericProxy(proxy, objId, id, rhsId,
+ /* handleDOMProxies = */ true);
+ case ProxyStubType::Generic:
+ return tryAttachGenericProxy(proxy, objId, id, rhsId,
+ /* handleDOMProxies = */ false);
+ }
+
+ MOZ_CRASH("Unexpected ProxyStubType");
+}
+
+AttachDecision SetPropIRGenerator::tryAttachProxyElement(HandleObject obj,
+ ObjOperandId objId,
+ ValOperandId rhsId) {
+ // Don't attach a proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ if (!obj->is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardIsProxy(objId);
+
+ // Like GetPropIRGenerator::tryAttachProxyElement, don't check for DOM
+ // proxies here as we don't have specialized DOM stubs for this.
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ writer.proxySetByValue(objId, setElemKeyValueId(), rhsId, IsStrictSetPC(pc_));
+ writer.returnFromIC();
+
+ trackAttached("SetProp.ProxyElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachMegamorphicSetElement(
+ HandleObject obj, ObjOperandId objId, ValOperandId rhsId) {
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ if (mode_ != ICState::Mode::Megamorphic || cacheKind_ != CacheKind::SetElem) {
+ return AttachDecision::NoAction;
+ }
+
+ // The generic proxy stubs are faster.
+ if (obj->is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.megamorphicSetElement(objId, setElemKeyValueId(), rhsId,
+ IsStrictSetPC(pc_));
+ writer.returnFromIC();
+
+ trackAttached("SetProp.MegamorphicSetElement");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachMegamorphicSetSlot(
+ HandleObject obj, ObjOperandId objId, HandleId id, ValOperandId rhsId) {
+ if (mode_ != ICState::Mode::Megamorphic || cacheKind_ != CacheKind::SetProp) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.megamorphicStoreSlot(objId, id, rhsId, IsStrictSetPC(pc_));
+ writer.returnFromIC();
+ trackAttached("SetProp.MegamorphicNativeSlot");
+ return AttachDecision::Attach;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachWindowProxy(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId rhsId) {
+ // Don't attach a window proxy stub for ops like JSOp::InitElem.
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc_)));
+
+ // Attach a stub when the receiver is a WindowProxy and we can do the set
+ // on the Window (the global object).
+
+ if (!IsWindowProxyForScriptGlobal(script_, obj)) {
+ return AttachDecision::NoAction;
+ }
+
+ // If we're megamorphic prefer a generic proxy stub that handles a lot more
+ // cases.
+ if (mode_ == ICState::Mode::Megamorphic) {
+ return AttachDecision::NoAction;
+ }
+
+ // Now try to do the set on the Window (the current global).
+ GlobalObject* windowObj = cx_->global();
+
+ Maybe<PropertyInfo> prop;
+ if (!CanAttachNativeSetSlot(JSOp(*pc_), windowObj, id, &prop)) {
+ return AttachDecision::NoAction;
+ }
+
+ maybeEmitIdGuard(id);
+
+ ObjOperandId windowObjId =
+ GuardAndLoadWindowProxyWindow(writer, objId, windowObj);
+ writer.guardShape(windowObjId, windowObj->shape());
+
+ EmitStoreSlotAndReturn(writer, windowObjId, windowObj, *prop, rhsId);
+
+ trackAttached("SetProp.WindowProxySlot");
+ return AttachDecision::Attach;
+}
+
+// Detect if |id| refers to the 'prototype' property of a function object. This
+// property is special-cased in canAttachAddSlotStub().
+static bool IsFunctionPrototype(const JSAtomState& names, JSObject* obj,
+ PropertyKey id) {
+ return obj->is<JSFunction>() && id.isAtom(names.prototype);
+}
+
+bool SetPropIRGenerator::canAttachAddSlotStub(HandleObject obj, HandleId id) {
+ if (!obj->is<NativeObject>()) {
+ return false;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ // Special-case JSFunction resolve hook to allow redefining the 'prototype'
+ // property without triggering lazy expansion of property and object
+ // allocation.
+ if (IsFunctionPrototype(cx_->names(), nobj, id)) {
+ MOZ_ASSERT(ClassMayResolveId(cx_->names(), nobj->getClass(), id, nobj));
+
+ // We're only interested in functions that have a builtin .prototype
+ // property (needsPrototypeProperty). The stub will guard on this because
+ // the builtin .prototype property is non-configurable/non-enumerable and it
+ // would be wrong to add a property with those attributes to a function that
+ // doesn't have a builtin .prototype.
+ //
+ // Inlining needsPrototypeProperty in JIT code is complicated so we use
+ // isNonBuiltinConstructor as a stronger condition that's easier to check
+ // from JIT code.
+ JSFunction* fun = &nobj->as<JSFunction>();
+ if (!fun->isNonBuiltinConstructor()) {
+ return false;
+ }
+ MOZ_ASSERT(fun->needsPrototypeProperty());
+
+ // If property exists this isn't an "add".
+ if (fun->lookupPure(id)) {
+ return false;
+ }
+ } else {
+ // Normal Case: If property exists this isn't an "add"
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx_, nobj, id, &prop)) {
+ return false;
+ }
+ if (prop.isFound()) {
+ return false;
+ }
+ }
+
+ // For now we don't optimize Watchtower-monitored objects.
+ if (Watchtower::watchesPropertyAdd(nobj)) {
+ return false;
+ }
+
+ // Object must be extensible, or we must be initializing a private
+ // elem.
+ bool canAddNewProperty = nobj->isExtensible() || id.isPrivateName();
+ if (!canAddNewProperty) {
+ return false;
+ }
+
+ JSOp op = JSOp(*pc_);
+ if (IsPropertyInitOp(op)) {
+ return true;
+ }
+
+ MOZ_ASSERT(IsPropertySetOp(op));
+
+ // Walk up the object prototype chain and ensure that all prototypes are
+ // native, and that all prototypes have no setter defined on the property.
+ for (JSObject* proto = nobj->staticPrototype(); proto;
+ proto = proto->staticPrototype()) {
+ if (!proto->is<NativeObject>()) {
+ return false;
+ }
+
+ // If prototype defines this property in a non-plain way, don't optimize.
+ Maybe<PropertyInfo> protoProp = proto->as<NativeObject>().lookup(cx_, id);
+ if (protoProp.isSome() && !protoProp->isDataProperty()) {
+ return false;
+ }
+
+ // Otherwise, if there's no such property, watch out for a resolve hook
+ // that would need to be invoked and thus prevent inlining of property
+ // addition. Allow the JSFunction resolve hook as it only defines plain
+ // data properties and we don't need to invoke it for objects on the
+ // proto chain.
+ if (ClassMayResolveId(cx_->names(), proto->getClass(), id, proto) &&
+ !proto->is<JSFunction>()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static PropertyFlags SetPropertyFlags(JSOp op, bool isFunctionPrototype) {
+ // Locked properties are non-writable, non-enumerable, and non-configurable.
+ if (IsLockedInitOp(op)) {
+ return {};
+ }
+
+ // Hidden properties are writable, non-enumerable, and configurable.
+ if (IsHiddenInitOp(op)) {
+ return {
+ PropertyFlag::Writable,
+ PropertyFlag::Configurable,
+ };
+ }
+
+ // This is a special case to overwrite an unresolved function.prototype
+ // property. The initial property flags of this property are writable,
+ // non-enumerable, and non-configurable. See canAttachAddSlotStub.
+ if (isFunctionPrototype) {
+ return {
+ PropertyFlag::Writable,
+ };
+ }
+
+ // Other properties are writable, enumerable, and configurable.
+ return PropertyFlags::defaultDataPropFlags;
+}
+
+AttachDecision SetPropIRGenerator::tryAttachAddSlotStub(
+ Handle<Shape*> oldShape) {
+ ValOperandId objValId(writer.setInputOperandId(0));
+ ValOperandId rhsValId;
+ if (cacheKind_ == CacheKind::SetProp) {
+ rhsValId = ValOperandId(writer.setInputOperandId(1));
+ } else {
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ MOZ_ASSERT(setElemKeyValueId().id() == 1);
+ writer.setInputOperandId(1);
+ rhsValId = ValOperandId(writer.setInputOperandId(2));
+ }
+
+ RootedId id(cx_);
+ bool nameOrSymbol;
+ if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ if (!lhsVal_.isObject() || !nameOrSymbol) {
+ return AttachDecision::NoAction;
+ }
+
+ JSObject* obj = &lhsVal_.toObject();
+
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx_, obj, id, &prop)) {
+ return AttachDecision::NoAction;
+ }
+ if (prop.isNotFound()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!obj->is<NativeObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* nobj = &obj->as<NativeObject>();
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ NativeObject* holder = nobj;
+
+ if (holder->inDictionaryMode()) {
+ return AttachDecision::NoAction;
+ }
+
+ SharedShape* oldSharedShape = &oldShape->asShared();
+
+ // The property must be the last added property of the object.
+ SharedShape* newShape = holder->sharedShape();
+ MOZ_RELEASE_ASSERT(newShape->lastProperty() == propInfo);
+
+#ifdef DEBUG
+ // Verify exactly one property was added by comparing the property map
+ // lengths.
+ if (oldSharedShape->propMapLength() == PropMap::Capacity) {
+ MOZ_ASSERT(newShape->propMapLength() == 1);
+ } else {
+ MOZ_ASSERT(newShape->propMapLength() ==
+ oldSharedShape->propMapLength() + 1);
+ }
+#endif
+
+ bool isFunctionPrototype = IsFunctionPrototype(cx_->names(), nobj, id);
+
+ JSOp op = JSOp(*pc_);
+ PropertyFlags flags = SetPropertyFlags(op, isFunctionPrototype);
+
+ // Basic property checks.
+ if (!propInfo.isDataProperty() || propInfo.flags() != flags) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId objId = writer.guardToObject(objValId);
+ maybeEmitIdGuard(id);
+
+ // Shape guard the object.
+ writer.guardShape(objId, oldShape);
+
+ // If this is the special function.prototype case, we need to guard the
+ // function is a non-builtin constructor. See canAttachAddSlotStub.
+ if (isFunctionPrototype) {
+ MOZ_ASSERT(nobj->as<JSFunction>().isNonBuiltinConstructor());
+ writer.guardFunctionIsNonBuiltinCtor(objId);
+ }
+
+ // Also shape guard the proto chain, unless this is an InitElem.
+ if (IsPropertySetOp(op)) {
+ ShapeGuardProtoChain(writer, nobj, objId);
+ }
+
+ // If the JSClass has an addProperty hook, we need to call a VM function to
+ // invoke this hook. Ignore the Array addProperty hook, because it doesn't do
+ // anything for non-index properties.
+ DebugOnly<uint32_t> index;
+ MOZ_ASSERT_IF(obj->is<ArrayObject>(), !IdIsIndex(id, &index));
+ bool mustCallAddPropertyHook =
+ obj->getClass()->getAddProperty() && !obj->is<ArrayObject>();
+
+ if (mustCallAddPropertyHook) {
+ writer.addSlotAndCallAddPropHook(objId, rhsValId, newShape);
+ trackAttached("SetProp.AddSlotWithAddPropertyHook");
+ } else if (holder->isFixedSlot(propInfo.slot())) {
+ size_t offset = NativeObject::getFixedSlotOffset(propInfo.slot());
+ writer.addAndStoreFixedSlot(objId, offset, rhsValId, newShape);
+ trackAttached("SetProp.AddSlotFixed");
+ } else {
+ size_t offset = holder->dynamicSlotIndex(propInfo.slot()) * sizeof(Value);
+ uint32_t numOldSlots = NativeObject::calculateDynamicSlots(oldSharedShape);
+ uint32_t numNewSlots = holder->numDynamicSlots();
+ if (numOldSlots == numNewSlots) {
+ writer.addAndStoreDynamicSlot(objId, offset, rhsValId, newShape);
+ trackAttached("SetProp.AddSlotDynamic");
+ } else {
+ MOZ_ASSERT(numNewSlots > numOldSlots);
+ writer.allocateAndStoreDynamicSlot(objId, offset, rhsValId, newShape,
+ numNewSlots);
+ trackAttached("SetProp.AllocateSlot");
+ }
+ }
+ writer.returnFromIC();
+
+ return AttachDecision::Attach;
+}
+
+InstanceOfIRGenerator::InstanceOfIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue lhs, HandleObject rhs)
+ : IRGenerator(cx, script, pc, CacheKind::InstanceOf, state),
+ lhsVal_(lhs),
+ rhsObj_(rhs) {}
+
+AttachDecision InstanceOfIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::InstanceOf);
+ AutoAssertNoPendingException aanpe(cx_);
+
+ // Ensure RHS is a function -- could be a Proxy, which the IC isn't prepared
+ // to handle.
+ if (!rhsObj_->is<JSFunction>()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ HandleFunction fun = rhsObj_.as<JSFunction>();
+
+ // Look up the @@hasInstance property, and check that Function.__proto__ is
+ // the property holder, and that no object further down the prototype chain
+ // (including this function) has shadowed it; together with the fact that
+ // Function.__proto__[@@hasInstance] is immutable, this ensures that the
+ // hasInstance hook will not change without the need to guard on the actual
+ // property value.
+ PropertyResult hasInstanceProp;
+ NativeObject* hasInstanceHolder = nullptr;
+ jsid hasInstanceID = PropertyKey::Symbol(cx_->wellKnownSymbols().hasInstance);
+ if (!LookupPropertyPure(cx_, fun, hasInstanceID, &hasInstanceHolder,
+ &hasInstanceProp) ||
+ !hasInstanceProp.isNativeProperty()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ JSObject& funProto = cx_->global()->getPrototype(JSProto_Function);
+ if (hasInstanceHolder != &funProto) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ // If the above succeeded, then these should be true about @@hasInstance,
+ // because the property on Function.__proto__ is an immutable data property:
+ MOZ_ASSERT(hasInstanceProp.propertyInfo().isDataProperty());
+ MOZ_ASSERT(!hasInstanceProp.propertyInfo().configurable());
+ MOZ_ASSERT(!hasInstanceProp.propertyInfo().writable());
+
+ MOZ_ASSERT(IsCacheableProtoChain(fun, hasInstanceHolder));
+
+ // Ensure that the function's prototype slot is the same.
+ Maybe<PropertyInfo> prop = fun->lookupPure(cx_->names().prototype);
+ if (prop.isNothing() || !prop->isDataProperty()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ uint32_t slot = prop->slot();
+ MOZ_ASSERT(slot >= fun->numFixedSlots(), "Stub code relies on this");
+ if (!fun->getSlot(slot).isObject()) {
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+ }
+
+ // Abstract Objects
+ ValOperandId lhs(writer.setInputOperandId(0));
+ ValOperandId rhs(writer.setInputOperandId(1));
+
+ ObjOperandId rhsId = writer.guardToObject(rhs);
+ writer.guardShape(rhsId, fun->shape());
+
+ // Ensure that the shapes up the prototype chain for the RHS remain the same
+ // so that @@hasInstance is not shadowed by some intermediate prototype
+ // object.
+ if (hasInstanceHolder != fun) {
+ GeneratePrototypeGuards(writer, fun, hasInstanceHolder, rhsId);
+ ObjOperandId holderId = writer.loadObject(hasInstanceHolder);
+ TestMatchingHolder(writer, hasInstanceHolder, holderId);
+ }
+
+ // Load the .prototype value and ensure it's an object.
+ ValOperandId protoValId =
+ writer.loadDynamicSlot(rhsId, slot - fun->numFixedSlots());
+ ObjOperandId protoId = writer.guardToObject(protoValId);
+
+ // Needn't guard LHS is object, because the actual stub can handle that
+ // and correctly return false.
+ writer.loadInstanceOfObjectResult(lhs, protoId);
+ writer.returnFromIC();
+ trackAttached("InstanceOf");
+ return AttachDecision::Attach;
+}
+
+void InstanceOfIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("lhs", lhsVal_);
+ sp.valueProperty("rhs", ObjectValue(*rhsObj_));
+ }
+#else
+ // Silence Clang -Wunused-private-field warning.
+ (void)lhsVal_;
+#endif
+}
+
+TypeOfIRGenerator::TypeOfIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue value)
+ : IRGenerator(cx, script, pc, CacheKind::TypeOf, state), val_(value) {}
+
+void TypeOfIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+AttachDecision TypeOfIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::TypeOf);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ TRY_ATTACH(tryAttachPrimitive(valId));
+ TRY_ATTACH(tryAttachObject(valId));
+
+ MOZ_ASSERT_UNREACHABLE("Failed to attach TypeOf");
+ return AttachDecision::NoAction;
+}
+
+AttachDecision TypeOfIRGenerator::tryAttachPrimitive(ValOperandId valId) {
+ if (!val_.isPrimitive()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Note: we don't use GuardIsNumber for int32 values because it's less
+ // efficient in Warp (unboxing to double instead of int32).
+ if (val_.isDouble()) {
+ writer.guardIsNumber(valId);
+ } else {
+ writer.guardNonDoubleType(valId, val_.type());
+ }
+
+ writer.loadConstantStringResult(
+ TypeName(js::TypeOfValue(val_), cx_->names()));
+ writer.returnFromIC();
+ writer.setTypeData(TypeData(JSValueType(val_.type())));
+ trackAttached("TypeOf.Primitive");
+ return AttachDecision::Attach;
+}
+
+AttachDecision TypeOfIRGenerator::tryAttachObject(ValOperandId valId) {
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId objId = writer.guardToObject(valId);
+ writer.loadTypeOfObjectResult(objId);
+ writer.returnFromIC();
+ writer.setTypeData(TypeData(JSValueType(val_.type())));
+ trackAttached("TypeOf.Object");
+ return AttachDecision::Attach;
+}
+
+GetIteratorIRGenerator::GetIteratorIRGenerator(JSContext* cx,
+ HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue value)
+ : IRGenerator(cx, script, pc, CacheKind::GetIterator, state), val_(value) {}
+
+AttachDecision GetIteratorIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetIterator);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ TRY_ATTACH(tryAttachObject(valId));
+ TRY_ATTACH(tryAttachNullOrUndefined(valId));
+ TRY_ATTACH(tryAttachGeneric(valId));
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision GetIteratorIRGenerator::tryAttachObject(ValOperandId valId) {
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(val_.toObject().compartment() == cx_->compartment());
+
+ ObjOperandId objId = writer.guardToObject(valId);
+ writer.objectToIteratorResult(objId, cx_->compartment()->enumeratorsAddr());
+ writer.returnFromIC();
+
+ trackAttached("GetIterator.Object");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetIteratorIRGenerator::tryAttachNullOrUndefined(
+ ValOperandId valId) {
+ MOZ_ASSERT(JSOp(*pc_) == JSOp::Iter);
+
+ // For null/undefined we can simply return the empty iterator singleton. This
+ // works because this iterator is unlinked and immutable.
+
+ if (!val_.isNullOrUndefined()) {
+ return AttachDecision::NoAction;
+ }
+
+ PropertyIteratorObject* emptyIter = cx_->global()->maybeEmptyIterator();
+ if (!emptyIter) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardIsNullOrUndefined(valId);
+
+ ObjOperandId iterId = writer.loadObject(emptyIter);
+ writer.loadObjectResult(iterId);
+ writer.returnFromIC();
+
+ trackAttached("GetIterator.NullOrUndefined");
+ return AttachDecision::Attach;
+}
+
+AttachDecision GetIteratorIRGenerator::tryAttachGeneric(ValOperandId valId) {
+ writer.valueToIteratorResult(valId);
+ writer.returnFromIC();
+
+ trackAttached("GetIterator.Generic");
+ return AttachDecision::Attach;
+}
+
+void GetIteratorIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+OptimizeSpreadCallIRGenerator::OptimizeSpreadCallIRGenerator(
+ JSContext* cx, HandleScript script, jsbytecode* pc, ICState state,
+ HandleValue value)
+ : IRGenerator(cx, script, pc, CacheKind::OptimizeSpreadCall, state),
+ val_(value) {}
+
+AttachDecision OptimizeSpreadCallIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::OptimizeSpreadCall);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ TRY_ATTACH(tryAttachArray());
+ TRY_ATTACH(tryAttachArguments());
+ TRY_ATTACH(tryAttachNotOptimizable());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+static bool IsArrayPrototypeOptimizable(JSContext* cx, ArrayObject* arr,
+ NativeObject** arrProto, uint32_t* slot,
+ JSFunction** iterFun) {
+ // Prototype must be Array.prototype.
+ auto* proto = cx->global()->maybeGetArrayPrototype();
+ if (!proto || arr->staticPrototype() != proto) {
+ return false;
+ }
+ *arrProto = proto;
+
+ // The object must not have an own @@iterator property.
+ PropertyKey iteratorKey =
+ PropertyKey::Symbol(cx->wellKnownSymbols().iterator);
+ if (arr->lookupPure(iteratorKey)) {
+ return false;
+ }
+
+ // Ensure that Array.prototype's @@iterator slot is unchanged.
+ Maybe<PropertyInfo> prop = proto->lookupPure(iteratorKey);
+ if (prop.isNothing() || !prop->isDataProperty()) {
+ return false;
+ }
+
+ *slot = prop->slot();
+ MOZ_ASSERT(proto->numFixedSlots() == 0, "Stub code relies on this");
+
+ const Value& iterVal = proto->getSlot(*slot);
+ if (!iterVal.isObject() || !iterVal.toObject().is<JSFunction>()) {
+ return false;
+ }
+
+ *iterFun = &iterVal.toObject().as<JSFunction>();
+ return IsSelfHostedFunctionWithName(*iterFun, cx->names().ArrayValues);
+}
+
+static bool IsArrayIteratorPrototypeOptimizable(JSContext* cx,
+ NativeObject** arrIterProto,
+ uint32_t* slot,
+ JSFunction** nextFun) {
+ auto* proto = cx->global()->maybeGetArrayIteratorPrototype();
+ if (!proto) {
+ return false;
+ }
+ *arrIterProto = proto;
+
+ // Ensure that %ArrayIteratorPrototype%'s "next" slot is unchanged.
+ Maybe<PropertyInfo> prop = proto->lookupPure(cx->names().next);
+ if (prop.isNothing() || !prop->isDataProperty()) {
+ return false;
+ }
+
+ *slot = prop->slot();
+ MOZ_ASSERT(proto->numFixedSlots() == 0, "Stub code relies on this");
+
+ const Value& nextVal = proto->getSlot(*slot);
+ if (!nextVal.isObject() || !nextVal.toObject().is<JSFunction>()) {
+ return false;
+ }
+
+ *nextFun = &nextVal.toObject().as<JSFunction>();
+ return IsSelfHostedFunctionWithName(*nextFun, cx->names().ArrayIteratorNext);
+}
+
+AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArray() {
+ if (!isFirstStub_) {
+ return AttachDecision::NoAction;
+ }
+
+ // The value must be a packed array.
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+ JSObject* obj = &val_.toObject();
+ if (!IsPackedArray(obj)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Prototype must be Array.prototype and Array.prototype[@@iterator] must not
+ // be modified.
+ NativeObject* arrProto;
+ uint32_t arrProtoIterSlot;
+ JSFunction* iterFun;
+ if (!IsArrayPrototypeOptimizable(cx_, &obj->as<ArrayObject>(), &arrProto,
+ &arrProtoIterSlot, &iterFun)) {
+ return AttachDecision::NoAction;
+ }
+
+ // %ArrayIteratorPrototype%.next must not be modified.
+ NativeObject* arrayIteratorProto;
+ uint32_t iterNextSlot;
+ JSFunction* nextFun;
+ if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto,
+ &iterNextSlot, &nextFun)) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ ObjOperandId objId = writer.guardToObject(valId);
+
+ // Guard the object is a packed array with Array.prototype as proto.
+ MOZ_ASSERT(obj->is<ArrayObject>());
+ writer.guardShape(objId, obj->shape());
+ writer.guardArrayIsPacked(objId);
+
+ // Guard on Array.prototype[@@iterator].
+ ObjOperandId arrProtoId = writer.loadObject(arrProto);
+ ObjOperandId iterId = writer.loadObject(iterFun);
+ writer.guardShape(arrProtoId, arrProto->shape());
+ writer.guardDynamicSlotIsSpecificObject(arrProtoId, iterId, arrProtoIterSlot);
+
+ // Guard on %ArrayIteratorPrototype%.next.
+ ObjOperandId iterProtoId = writer.loadObject(arrayIteratorProto);
+ ObjOperandId nextId = writer.loadObject(nextFun);
+ writer.guardShape(iterProtoId, arrayIteratorProto->shape());
+ writer.guardDynamicSlotIsSpecificObject(iterProtoId, nextId, iterNextSlot);
+
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("OptimizeSpreadCall.Array");
+ return AttachDecision::Attach;
+}
+
+AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArguments() {
+ // The value must be an arguments object.
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+ RootedObject obj(cx_, &val_.toObject());
+ if (!obj->is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto args = obj.as<ArgumentsObject>();
+
+ // Ensure neither elements, nor the length, nor the iterator has been
+ // overridden. Also ensure no args are forwarded to allow reading them
+ // directly from the frame.
+ if (args->hasOverriddenElement() || args->hasOverriddenLength() ||
+ args->hasOverriddenIterator() || args->anyArgIsForwarded()) {
+ return AttachDecision::NoAction;
+ }
+
+ Rooted<Shape*> shape(cx_, GlobalObject::getArrayShapeWithDefaultProto(cx_));
+ if (!shape) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* arrayIteratorProto;
+ uint32_t slot;
+ JSFunction* nextFun;
+ if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto, &slot,
+ &nextFun)) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ ObjOperandId objId = writer.guardToObject(valId);
+
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+ uint8_t flags = ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
+ ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
+ ArgumentsObject::ITERATOR_OVERRIDDEN_BIT |
+ ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
+ writer.guardArgumentsObjectFlags(objId, flags);
+
+ ObjOperandId protoId = writer.loadObject(arrayIteratorProto);
+ ObjOperandId nextId = writer.loadObject(nextFun);
+
+ writer.guardShape(protoId, arrayIteratorProto->shape());
+
+ // Ensure that proto[slot] == nextFun.
+ writer.guardDynamicSlotIsSpecificObject(protoId, nextId, slot);
+
+ writer.arrayFromArgumentsObjectResult(objId, shape);
+ writer.returnFromIC();
+
+ trackAttached("OptimizeSpreadCall.Arguments");
+ return AttachDecision::Attach;
+}
+
+AttachDecision OptimizeSpreadCallIRGenerator::tryAttachNotOptimizable() {
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ writer.loadUndefinedResult();
+ writer.returnFromIC();
+
+ trackAttached("OptimizeSpreadCall.NotOptimizable");
+ return AttachDecision::Attach;
+}
+
+void OptimizeSpreadCallIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+CallIRGenerator::CallIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, JSOp op, ICState state,
+ uint32_t argc, HandleValue callee,
+ HandleValue thisval, HandleValue newTarget,
+ HandleValueArray args)
+ : IRGenerator(cx, script, pc, CacheKind::Call, state),
+ op_(op),
+ argc_(argc),
+ callee_(callee),
+ thisval_(thisval),
+ newTarget_(newTarget),
+ args_(args) {}
+
+void InlinableNativeIRGenerator::emitNativeCalleeGuard() {
+ // Note: we rely on GuardSpecificFunction to also guard against the same
+ // native from a different realm.
+ MOZ_ASSERT(callee_->isNativeWithoutJitEntry());
+
+ ObjOperandId calleeObjId;
+ if (flags_.getArgFormat() == CallFlags::Standard) {
+ ValOperandId calleeValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Callee, argc_, flags_);
+ calleeObjId = writer.guardToObject(calleeValId);
+ } else if (flags_.getArgFormat() == CallFlags::Spread) {
+ ValOperandId calleeValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Callee, argc_, flags_);
+ calleeObjId = writer.guardToObject(calleeValId);
+ } else if (flags_.getArgFormat() == CallFlags::FunCall) {
+ MOZ_ASSERT(generator_.writer.numOperandIds() > 0, "argcId is initialized");
+
+ Int32OperandId argcId(0);
+ calleeObjId = generator_.emitFunCallOrApplyGuard(argcId);
+ } else {
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::FunApplyArray);
+ MOZ_ASSERT(generator_.writer.numOperandIds() > 0, "argcId is initialized");
+
+ Int32OperandId argcId(0);
+ calleeObjId = generator_.emitFunApplyGuard(argcId);
+ }
+
+ writer.guardSpecificFunction(calleeObjId, callee_);
+
+ // If we're constructing we also need to guard newTarget == callee.
+ if (flags_.isConstructing()) {
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::Standard);
+ MOZ_ASSERT(&newTarget_.toObject() == callee_);
+
+ ValOperandId newTargetValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::NewTarget, argc_, flags_);
+ ObjOperandId newTargetObjId = writer.guardToObject(newTargetValId);
+ writer.guardSpecificFunction(newTargetObjId, callee_);
+ }
+}
+
+ObjOperandId InlinableNativeIRGenerator::emitLoadArgsArray() {
+ if (flags_.getArgFormat() == CallFlags::Spread) {
+ return writer.loadSpreadArgs();
+ }
+
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::FunApplyArray);
+ return generator_.emitFunApplyArgsGuard(flags_.getArgFormat()).ref();
+}
+
+void IRGenerator::emitCalleeGuard(ObjOperandId calleeId, JSFunction* callee) {
+ // Guarding on the callee JSFunction* is most efficient, but doesn't work well
+ // for lambda clones (multiple functions with the same BaseScript). We guard
+ // on the function's BaseScript if the callee is scripted and this isn't the
+ // first IC stub.
+ if (isFirstStub_ || !callee->hasBaseScript() ||
+ callee->isSelfHostedBuiltin()) {
+ writer.guardSpecificFunction(calleeId, callee);
+ } else {
+ writer.guardClass(calleeId, GuardClassKind::JSFunction);
+ writer.guardFunctionScript(calleeId, callee->baseScript());
+ }
+}
+
+ObjOperandId CallIRGenerator::emitFunCallOrApplyGuard(Int32OperandId argcId) {
+ JSFunction* callee = &callee_.toObject().as<JSFunction>();
+ MOZ_ASSERT(callee->native() == fun_call || callee->native() == fun_apply);
+
+ // Guard that callee is the |fun_call| or |fun_apply| native function.
+ ValOperandId calleeValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+ writer.guardSpecificFunction(calleeObjId, callee);
+
+ // Guard that |this| is an object.
+ ValOperandId thisValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::This, argcId);
+ return writer.guardToObject(thisValId);
+}
+
+ObjOperandId CallIRGenerator::emitFunCallGuard(Int32OperandId argcId) {
+ MOZ_ASSERT(callee_.toObject().as<JSFunction>().native() == fun_call);
+
+ return emitFunCallOrApplyGuard(argcId);
+}
+
+ObjOperandId CallIRGenerator::emitFunApplyGuard(Int32OperandId argcId) {
+ MOZ_ASSERT(callee_.toObject().as<JSFunction>().native() == fun_apply);
+
+ return emitFunCallOrApplyGuard(argcId);
+}
+
+Maybe<ObjOperandId> CallIRGenerator::emitFunApplyArgsGuard(
+ CallFlags::ArgFormat format) {
+ MOZ_ASSERT(argc_ == 2);
+
+ ValOperandId argValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ if (format == CallFlags::FunApplyArgsObj) {
+ ObjOperandId argObjId = writer.guardToObject(argValId);
+ if (args_[1].toObject().is<MappedArgumentsObject>()) {
+ writer.guardClass(argObjId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args_[1].toObject().is<UnmappedArgumentsObject>());
+ writer.guardClass(argObjId, GuardClassKind::UnmappedArguments);
+ }
+ uint8_t flags = ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
+ ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
+ writer.guardArgumentsObjectFlags(argObjId, flags);
+ return mozilla::Some(argObjId);
+ }
+
+ if (format == CallFlags::FunApplyArray) {
+ ObjOperandId argObjId = writer.guardToObject(argValId);
+ emitOptimisticClassGuard(argObjId, &args_[1].toObject(),
+ GuardClassKind::Array);
+ writer.guardArrayIsPacked(argObjId);
+ return mozilla::Some(argObjId);
+ }
+
+ MOZ_ASSERT(format == CallFlags::FunApplyNullUndefined);
+ writer.guardIsNullOrUndefined(argValId);
+ return mozilla::Nothing();
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayPush() {
+ // Only optimize on obj.push(val);
+ if (argc_ != 1 || !thisval_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Where |obj| is a native array.
+ JSObject* thisobj = &thisval_.toObject();
+ if (!thisobj->is<ArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto* thisarray = &thisobj->as<ArrayObject>();
+
+ // Check for other indexed properties or class hooks.
+ if (!CanAttachAddElement(thisarray, /* isInit = */ false,
+ AllowIndexedReceiver::No)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Can't add new elements to arrays with non-writable length.
+ if (!thisarray->lengthIsWritable()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check that array is extensible.
+ if (!thisarray->isExtensible()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check that the array is completely initialized (no holes).
+ if (thisarray->getDenseInitializedLength() != thisarray->length()) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(!thisarray->denseElementsAreFrozen(),
+ "Extensible arrays should not have frozen elements");
+
+ // After this point, we can generate code fine.
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'push' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is an array object.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId thisObjId = writer.guardToObject(thisValId);
+
+ // Guard that the shape matches.
+ TestMatchingNativeReceiver(writer, thisarray, thisObjId);
+
+ // Guard proto chain shapes.
+ ShapeGuardProtoChain(writer, thisarray, thisObjId);
+
+ // arr.push(x) is equivalent to arr[arr.length] = x for regular arrays.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ writer.arrayPush(thisObjId, argId);
+
+ writer.returnFromIC();
+
+ trackAttached("ArrayPush");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayPopShift(
+ InlinableNative native) {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only optimize if |this| is a packed array.
+ if (!thisval_.isObject() || !IsPackedArray(&thisval_.toObject())) {
+ return AttachDecision::NoAction;
+ }
+
+ // Other conditions:
+ //
+ // * The array length needs to be writable because we're changing it.
+ // * The array must be extensible. Non-extensible arrays require preserving
+ // the |initializedLength == capacity| invariant on ObjectElements.
+ // See NativeObject::shrinkCapacityToInitializedLength.
+ // This also ensures the elements aren't sealed/frozen.
+ // * There must not be a for-in iterator for the elements because the IC stub
+ // does not suppress deleted properties.
+ ArrayObject* arr = &thisval_.toObject().as<ArrayObject>();
+ if (!arr->lengthIsWritable() || !arr->isExtensible() ||
+ arr->denseElementsHaveMaybeInIterationFlag()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'pop' or 'shift' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, arr, GuardClassKind::Array);
+
+ if (native == InlinableNative::ArrayPop) {
+ writer.packedArrayPopResult(objId);
+ } else {
+ MOZ_ASSERT(native == InlinableNative::ArrayShift);
+ writer.packedArrayShiftResult(objId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("ArrayPopShift");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayJoin() {
+ // Only handle argc <= 1.
+ if (argc_ > 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only optimize if |this| is an array.
+ if (!thisval_.isObject() || !thisval_.toObject().is<ArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // The separator argument must be a string, if present.
+ if (argc_ > 0 && !args_[0].isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // IC stub code can handle non-packed array.
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'join' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is an array object.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId thisObjId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(thisObjId, &thisval_.toObject(),
+ GuardClassKind::Array);
+
+ StringOperandId sepId;
+ if (argc_ == 1) {
+ // If argcount is 1, guard that the argument is a string.
+ ValOperandId argValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ sepId = writer.guardToString(argValId);
+ } else {
+ sepId = writer.loadConstantString(cx_->names().comma);
+ }
+
+ // Do the join.
+ writer.arrayJoinResult(thisObjId, sepId);
+
+ writer.returnFromIC();
+
+ trackAttached("ArrayJoin");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArraySlice() {
+ // Only handle argc <= 2.
+ if (argc_ > 2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only optimize if |this| is a packed array or an arguments object.
+ if (!thisval_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool isPackedArray = IsPackedArray(&thisval_.toObject());
+ if (!isPackedArray) {
+ if (!thisval_.toObject().is<ArgumentsObject>()) {
+ return AttachDecision::NoAction;
+ }
+ auto* args = &thisval_.toObject().as<ArgumentsObject>();
+
+ // No elements must have been overridden or deleted.
+ if (args->hasOverriddenElement()) {
+ return AttachDecision::NoAction;
+ }
+
+ // The length property mustn't be overridden.
+ if (args->hasOverriddenLength()) {
+ return AttachDecision::NoAction;
+ }
+
+ // And finally also check that no argument is forwarded.
+ if (args->anyArgIsForwarded()) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Arguments for the sliced region must be integers.
+ if (argc_ > 0 && !args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ > 1 && !args_[1].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ JSObject* templateObj = NewDenseFullyAllocatedArray(cx_, 0, TenuredObject);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'slice' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+
+ if (isPackedArray) {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::Array);
+ } else {
+ auto* args = &thisval_.toObject().as<ArgumentsObject>();
+
+ if (args->is<MappedArgumentsObject>()) {
+ writer.guardClass(objId, GuardClassKind::MappedArguments);
+ } else {
+ MOZ_ASSERT(args->is<UnmappedArgumentsObject>());
+ writer.guardClass(objId, GuardClassKind::UnmappedArguments);
+ }
+
+ uint8_t flags = ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
+ ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
+ ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
+ writer.guardArgumentsObjectFlags(objId, flags);
+ }
+
+ Int32OperandId int32BeginId;
+ if (argc_ > 0) {
+ ValOperandId beginId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ int32BeginId = writer.guardToInt32(beginId);
+ } else {
+ int32BeginId = writer.loadInt32Constant(0);
+ }
+
+ Int32OperandId int32EndId;
+ if (argc_ > 1) {
+ ValOperandId endId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ int32EndId = writer.guardToInt32(endId);
+ } else if (isPackedArray) {
+ int32EndId = writer.loadInt32ArrayLength(objId);
+ } else {
+ int32EndId = writer.loadArgumentsObjectLength(objId);
+ }
+
+ if (isPackedArray) {
+ writer.packedArraySliceResult(templateObj, objId, int32BeginId, int32EndId);
+ } else {
+ writer.argumentsSliceResult(templateObj, objId, int32BeginId, int32EndId);
+ }
+ writer.returnFromIC();
+
+ trackAttached(isPackedArray ? "ArraySlice" : "ArgumentsSlice");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayIsArray() {
+ // Need a single argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'isArray' native function.
+ emitNativeCalleeGuard();
+
+ // Check if the argument is an Array and return result.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ writer.isArrayResult(argId);
+ writer.returnFromIC();
+
+ trackAttached("ArrayIsArray");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
+ Scalar::Type type) {
+ // Ensure |this| is a DataViewObject.
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Expected arguments: offset (number), optional littleEndian (boolean).
+ if (argc_ < 1 || argc_ > 2) {
+ return AttachDecision::NoAction;
+ }
+ int64_t offsetInt64;
+ if (!ValueIsInt64Index(args_[0], &offsetInt64)) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ > 1 && !args_[1].isBoolean()) {
+ return AttachDecision::NoAction;
+ }
+
+ DataViewObject* dv = &thisval_.toObject().as<DataViewObject>();
+
+ // Bounds check the offset.
+ if (offsetInt64 < 0 ||
+ !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ return AttachDecision::NoAction;
+ }
+
+ // For getUint32 we let the stub return an Int32 if we have not seen a
+ // double, to allow better codegen in Warp while avoiding bailout loops.
+ bool forceDoubleForUint32 = false;
+ if (type == Scalar::Uint32) {
+ bool isLittleEndian = argc_ > 1 && args_[1].toBoolean();
+ uint32_t res = dv->read<uint32_t>(offsetInt64, isLittleEndian);
+ forceDoubleForUint32 = res >= INT32_MAX;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this DataView native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a DataViewObject.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::DataView);
+
+ // Convert offset to intPtr.
+ ValOperandId offsetId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ IntPtrOperandId intPtrOffsetId =
+ guardToIntPtrIndex(args_[0], offsetId, /* supportOOB = */ false);
+
+ BooleanOperandId boolLittleEndianId;
+ if (argc_ > 1) {
+ ValOperandId littleEndianId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ boolLittleEndianId = writer.guardToBoolean(littleEndianId);
+ } else {
+ boolLittleEndianId = writer.loadBooleanConstant(false);
+ }
+
+ writer.loadDataViewValueResult(objId, intPtrOffsetId, boolLittleEndianId,
+ type, forceDoubleForUint32);
+ writer.returnFromIC();
+
+ trackAttached("DataViewGet");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
+ Scalar::Type type) {
+ // Ensure |this| is a DataViewObject.
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Expected arguments: offset (number), value, optional littleEndian (boolean)
+ if (argc_ < 2 || argc_ > 3) {
+ return AttachDecision::NoAction;
+ }
+ int64_t offsetInt64;
+ if (!ValueIsInt64Index(args_[0], &offsetInt64)) {
+ return AttachDecision::NoAction;
+ }
+ if (!ValueIsNumeric(type, args_[1])) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ > 2 && !args_[2].isBoolean()) {
+ return AttachDecision::NoAction;
+ }
+
+ DataViewObject* dv = &thisval_.toObject().as<DataViewObject>();
+
+ // Bounds check the offset.
+ if (offsetInt64 < 0 ||
+ !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this DataView native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a DataViewObject.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::DataView);
+
+ // Convert offset to intPtr.
+ ValOperandId offsetId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ IntPtrOperandId intPtrOffsetId =
+ guardToIntPtrIndex(args_[0], offsetId, /* supportOOB = */ false);
+
+ // Convert value to number or BigInt.
+ ValOperandId valueId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ OperandId numericValueId = emitNumericGuard(valueId, type);
+
+ BooleanOperandId boolLittleEndianId;
+ if (argc_ > 2) {
+ ValOperandId littleEndianId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ boolLittleEndianId = writer.guardToBoolean(littleEndianId);
+ } else {
+ boolLittleEndianId = writer.loadBooleanConstant(false);
+ }
+
+ writer.storeDataViewValueResult(objId, intPtrOffsetId, numericValueId,
+ boolLittleEndianId, type);
+ writer.returnFromIC();
+
+ trackAttached("DataViewSet");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachUnsafeGetReservedSlot(
+ InlinableNative native) {
+ // Self-hosted code calls this with (object, int32) arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isInt32());
+ MOZ_ASSERT(args_[1].toInt32() >= 0);
+
+ uint32_t slot = uint32_t(args_[1].toInt32());
+ if (slot >= NativeObject::MAX_FIXED_SLOTS) {
+ return AttachDecision::NoAction;
+ }
+ size_t offset = NativeObject::getFixedSlotOffset(slot);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the first argument is an object.
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+
+ // BytecodeEmitter::assertSelfHostedUnsafeGetReservedSlot ensures that the
+ // slot argument is constant. (At least for direct calls)
+
+ switch (native) {
+ case InlinableNative::IntrinsicUnsafeGetReservedSlot:
+ writer.loadFixedSlotResult(objId, offset);
+ break;
+ case InlinableNative::IntrinsicUnsafeGetObjectFromReservedSlot:
+ writer.loadFixedSlotTypedResult(objId, offset, ValueType::Object);
+ break;
+ case InlinableNative::IntrinsicUnsafeGetInt32FromReservedSlot:
+ writer.loadFixedSlotTypedResult(objId, offset, ValueType::Int32);
+ break;
+ case InlinableNative::IntrinsicUnsafeGetStringFromReservedSlot:
+ writer.loadFixedSlotTypedResult(objId, offset, ValueType::String);
+ break;
+ default:
+ MOZ_CRASH("unexpected native");
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("UnsafeGetReservedSlot");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachUnsafeSetReservedSlot() {
+ // Self-hosted code calls this with (object, int32, value) arguments.
+ MOZ_ASSERT(argc_ == 3);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isInt32());
+ MOZ_ASSERT(args_[1].toInt32() >= 0);
+
+ uint32_t slot = uint32_t(args_[1].toInt32());
+ if (slot >= NativeObject::MAX_FIXED_SLOTS) {
+ return AttachDecision::NoAction;
+ }
+ size_t offset = NativeObject::getFixedSlotOffset(slot);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the first argument is an object.
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+
+ // BytecodeEmitter::assertSelfHostedUnsafeSetReservedSlot ensures that the
+ // slot argument is constant. (At least for direct calls)
+
+ // Get the value to set.
+ ValOperandId valId = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+
+ // Set the fixed slot and return undefined.
+ writer.storeFixedSlotUndefinedResult(objId, offset, valId);
+
+ // This stub always returns undefined.
+ writer.returnFromIC();
+
+ trackAttached("UnsafeSetReservedSlot");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsSuspendedGenerator() {
+ // The IsSuspendedGenerator intrinsic is only called in
+ // self-hosted code, so it's safe to assume we have a single
+ // argument and the callee is our intrinsic.
+
+ MOZ_ASSERT(argc_ == 1);
+
+ initializeInputOperand();
+
+ // Stack layout here is (bottom to top):
+ // 2: Callee
+ // 1: ThisValue
+ // 0: Arg <-- Top of stack.
+ // We only care about the argument.
+ ValOperandId valId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ // Check whether the argument is a suspended generator.
+ // We don't need guards, because IsSuspendedGenerator returns
+ // false for values that are not generator objects.
+ writer.callIsSuspendedGeneratorResult(valId);
+ writer.returnFromIC();
+
+ trackAttached("IsSuspendedGenerator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachToObject() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Need a single object argument.
+ // TODO(Warp): Support all or more conversions to object.
+ if (!args_[0].isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("ToObject");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachToInteger() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Need a single int32 argument.
+ // TODO(Warp): Support all or more conversions to integer.
+ // Make sure to update this code correctly if we ever start
+ // returning non-int32 integers.
+ if (!args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an int32.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32Id = writer.guardToInt32(argId);
+
+ // Return the int32.
+ writer.loadInt32Result(int32Id);
+ writer.returnFromIC();
+
+ trackAttached("ToInteger");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachToLength() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Need a single int32 argument.
+ if (!args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // ToLength(int32) is equivalent to max(int32, 0).
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32ArgId = writer.guardToInt32(argId);
+ Int32OperandId zeroId = writer.loadInt32Constant(0);
+ bool isMax = true;
+ Int32OperandId maxId = writer.int32MinMax(isMax, int32ArgId, zeroId);
+ writer.loadInt32Result(maxId);
+ writer.returnFromIC();
+
+ trackAttached("ToLength");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsObject() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Type check the argument and return result.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ writer.isObjectResult(argId);
+ writer.returnFromIC();
+
+ trackAttached("IsObject");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsPackedArray() {
+ // Self-hosted code calls this with a single object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Check if the argument is packed and return result.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+ writer.isPackedArrayResult(objArgId);
+ writer.returnFromIC();
+
+ trackAttached("IsPackedArray");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsCallable() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Check if the argument is callable and return result.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ writer.isCallableResult(argId);
+ writer.returnFromIC();
+
+ trackAttached("IsCallable");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsConstructor() {
+ // Self-hosted code calls this with a single argument.
+ MOZ_ASSERT(argc_ == 1);
+
+ // Need a single object argument.
+ if (!args_[0].isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Check if the argument is a constructor and return result.
+ writer.isConstructorResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("IsConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision
+InlinableNativeIRGenerator::tryAttachIsCrossRealmArrayConstructor() {
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ if (args_[0].toObject().is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+ writer.guardIsNotProxy(objId);
+ writer.isCrossRealmArrayConstructorResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("IsCrossRealmArrayConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachGuardToClass(
+ InlinableNative native) {
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Class must match.
+ const JSClass* clasp = InlinableNativeGuardToClass(native);
+ if (args_[0].toObject().getClass() != clasp) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Guard that the object has the correct class.
+ writer.guardAnyClass(objId, clasp);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GuardToClass");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachHasClass(
+ const JSClass* clasp, bool isPossiblyWrapped) {
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Only optimize when the object isn't a proxy.
+ if (isPossiblyWrapped && args_[0].toObject().is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Perform the Class check.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ if (isPossiblyWrapped) {
+ writer.guardIsNotProxy(objId);
+ }
+
+ writer.hasClassResult(objId, clasp);
+ writer.returnFromIC();
+
+ trackAttached("HasClass");
+ return AttachDecision::Attach;
+}
+
+// Returns whether the .lastIndex property is a non-negative int32 value and is
+// still writable.
+static bool HasOptimizableLastIndexSlot(RegExpObject* regexp, JSContext* cx) {
+ auto lastIndexProp = regexp->lookupPure(cx->names().lastIndex);
+ MOZ_ASSERT(lastIndexProp->isDataProperty());
+ if (!lastIndexProp->writable()) {
+ return false;
+ }
+ Value lastIndex = regexp->getLastIndex();
+ if (!lastIndex.isInt32() || lastIndex.toInt32() < 0) {
+ return false;
+ }
+ return true;
+}
+
+// Returns the RegExp stub used by the optimized code path for this intrinsic.
+// We store a pointer to this in the IC stub to ensure GC doesn't discard it.
+static JitCode* GetOrCreateRegExpStub(JSContext* cx, InlinableNative native) {
+ JitCode* code;
+ switch (native) {
+ case InlinableNative::IntrinsicRegExpBuiltinExecForTest:
+ case InlinableNative::IntrinsicRegExpExecForTest:
+ code = cx->realm()->jitRealm()->ensureRegExpExecTestStubExists(cx);
+ break;
+ case InlinableNative::IntrinsicRegExpBuiltinExec:
+ case InlinableNative::IntrinsicRegExpExec:
+ code = cx->realm()->jitRealm()->ensureRegExpExecMatchStubExists(cx);
+ break;
+ case InlinableNative::RegExpMatcher:
+ code = cx->realm()->jitRealm()->ensureRegExpMatcherStubExists(cx);
+ break;
+ case InlinableNative::RegExpSearcher:
+ code = cx->realm()->jitRealm()->ensureRegExpSearcherStubExists(cx);
+ break;
+ default:
+ MOZ_CRASH("Unexpected native");
+ }
+ if (!code) {
+ cx->recoverFromOutOfMemory();
+ return nullptr;
+ }
+ return code;
+}
+
+static void EmitGuardLastIndexIsNonNegativeInt32(CacheIRWriter& writer,
+ ObjOperandId regExpId) {
+ size_t offset =
+ NativeObject::getFixedSlotOffset(RegExpObject::lastIndexSlot());
+ ValOperandId lastIndexValId = writer.loadFixedSlot(regExpId, offset);
+ Int32OperandId lastIndexId = writer.guardToInt32(lastIndexValId);
+ writer.guardInt32IsNonNegative(lastIndexId);
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIntrinsicRegExpBuiltinExec(
+ InlinableNative native) {
+ // Self-hosted code calls this with (regexp, string) arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isString());
+
+ JitCode* stub = GetOrCreateRegExpStub(cx_, native);
+ if (!stub) {
+ return AttachDecision::NoAction;
+ }
+
+ RegExpObject* re = &args_[0].toObject().as<RegExpObject>();
+ if (!HasOptimizableLastIndexSlot(re, cx_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId regExpId = writer.guardToObject(arg0Id);
+ writer.guardShape(regExpId, re->shape());
+ EmitGuardLastIndexIsNonNegativeInt32(writer, regExpId);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ StringOperandId inputId = writer.guardToString(arg1Id);
+
+ if (native == InlinableNative::IntrinsicRegExpBuiltinExecForTest) {
+ writer.regExpBuiltinExecTestResult(regExpId, inputId, stub);
+ } else {
+ writer.regExpBuiltinExecMatchResult(regExpId, inputId, stub);
+ }
+ writer.returnFromIC();
+
+ trackAttached("IntrinsicRegExpBuiltinExec");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIntrinsicRegExpExec(
+ InlinableNative native) {
+ // Self-hosted code calls this with (object, string) arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isString());
+
+ if (!args_[0].toObject().is<RegExpObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ JitCode* stub = GetOrCreateRegExpStub(cx_, native);
+ if (!stub) {
+ return AttachDecision::NoAction;
+ }
+
+ RegExpObject* re = &args_[0].toObject().as<RegExpObject>();
+ if (!HasOptimizableLastIndexSlot(re, cx_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure regexp.exec is the original RegExp.prototype.exec function on the
+ // prototype.
+ if (re->containsPure(cx_->names().exec)) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(cx_->global()->maybeGetRegExpPrototype());
+ auto* regExpProto =
+ &cx_->global()->maybeGetRegExpPrototype()->as<NativeObject>();
+ if (re->staticPrototype() != regExpProto) {
+ return AttachDecision::NoAction;
+ }
+ auto execProp = regExpProto->as<NativeObject>().lookupPure(cx_->names().exec);
+ if (!execProp || !execProp->isDataProperty()) {
+ return AttachDecision::NoAction;
+ }
+ // It should be stored in a dynamic slot. We assert this in
+ // FinishRegExpClassInit.
+ if (regExpProto->isFixedSlot(execProp->slot())) {
+ return AttachDecision::NoAction;
+ }
+ Value execVal = regExpProto->getSlot(execProp->slot());
+ PropertyName* execName = cx_->names().RegExp_prototype_Exec;
+ if (!IsSelfHostedFunctionWithName(execVal, execName)) {
+ return AttachDecision::NoAction;
+ }
+ JSFunction* execFunction = &execVal.toObject().as<JSFunction>();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId regExpId = writer.guardToObject(arg0Id);
+ writer.guardShape(regExpId, re->shape());
+ EmitGuardLastIndexIsNonNegativeInt32(writer, regExpId);
+
+ // Emit guards for the RegExp.prototype.exec property.
+ ObjOperandId regExpProtoId = writer.loadObject(regExpProto);
+ writer.guardShape(regExpProtoId, regExpProto->shape());
+ size_t offset =
+ regExpProto->dynamicSlotIndex(execProp->slot()) * sizeof(Value);
+ writer.guardDynamicSlotValue(regExpProtoId, offset,
+ ObjectValue(*execFunction));
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ StringOperandId inputId = writer.guardToString(arg1Id);
+
+ if (native == InlinableNative::IntrinsicRegExpExecForTest) {
+ writer.regExpBuiltinExecTestResult(regExpId, inputId, stub);
+ } else {
+ writer.regExpBuiltinExecMatchResult(regExpId, inputId, stub);
+ }
+ writer.returnFromIC();
+
+ trackAttached("IntrinsicRegExpExec");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachRegExpMatcherSearcher(
+ InlinableNative native) {
+ // Self-hosted code calls this with (object, string, number) arguments.
+ MOZ_ASSERT(argc_ == 3);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isString());
+ MOZ_ASSERT(args_[2].isNumber());
+
+ // It's not guaranteed that the JITs have typed |lastIndex| as an Int32.
+ if (!args_[2].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ JitCode* stub = GetOrCreateRegExpStub(cx_, native);
+ if (!stub) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard argument types.
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId reId = writer.guardToObject(arg0Id);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ StringOperandId inputId = writer.guardToString(arg1Id);
+
+ ValOperandId arg2Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ Int32OperandId lastIndexId = writer.guardToInt32(arg2Id);
+
+ switch (native) {
+ case InlinableNative::RegExpMatcher:
+ writer.callRegExpMatcherResult(reId, inputId, lastIndexId, stub);
+ writer.returnFromIC();
+ trackAttached("RegExpMatcher");
+ break;
+
+ case InlinableNative::RegExpSearcher:
+ writer.callRegExpSearcherResult(reId, inputId, lastIndexId, stub);
+ writer.returnFromIC();
+ trackAttached("RegExpSearcher");
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected native");
+ }
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision
+InlinableNativeIRGenerator::tryAttachRegExpPrototypeOptimizable() {
+ // Self-hosted code calls this with a single object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId protoId = writer.guardToObject(arg0Id);
+
+ writer.regExpPrototypeOptimizableResult(protoId);
+ writer.returnFromIC();
+
+ trackAttached("RegExpPrototypeOptimizable");
+ return AttachDecision::Attach;
+}
+
+AttachDecision
+InlinableNativeIRGenerator::tryAttachRegExpInstanceOptimizable() {
+ // Self-hosted code calls this with two object arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isObject());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId regexpId = writer.guardToObject(arg0Id);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ ObjOperandId protoId = writer.guardToObject(arg1Id);
+
+ writer.regExpInstanceOptimizableResult(regexpId, protoId);
+ writer.returnFromIC();
+
+ trackAttached("RegExpInstanceOptimizable");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachGetFirstDollarIndex() {
+ // Self-hosted code calls this with a single string argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isString());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = writer.guardToString(arg0Id);
+
+ writer.getFirstDollarIndexResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("GetFirstDollarIndex");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachSubstringKernel() {
+ // Self-hosted code calls this with (string, int32, int32) arguments.
+ MOZ_ASSERT(argc_ == 3);
+ MOZ_ASSERT(args_[0].isString());
+ MOZ_ASSERT(args_[1].isInt32());
+ MOZ_ASSERT(args_[2].isInt32());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = writer.guardToString(arg0Id);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ Int32OperandId beginId = writer.guardToInt32(arg1Id);
+
+ ValOperandId arg2Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ Int32OperandId lengthId = writer.guardToInt32(arg2Id);
+
+ writer.callSubstringKernelResult(strId, beginId, lengthId);
+ writer.returnFromIC();
+
+ trackAttached("SubstringKernel");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectHasPrototype() {
+ // Self-hosted code calls this with (object, object) arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[1].isObject());
+
+ auto* obj = &args_[0].toObject().as<NativeObject>();
+ auto* proto = &args_[1].toObject().as<NativeObject>();
+
+ // Only attach when obj.__proto__ is proto.
+ if (obj->staticPrototype() != proto) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+
+ writer.guardProto(objId, proto);
+ writer.loadBooleanResult(true);
+ writer.returnFromIC();
+
+ trackAttached("ObjectHasPrototype");
+ return AttachDecision::Attach;
+}
+
+static bool CanConvertToString(const Value& v) {
+ return v.isString() || v.isNumber() || v.isBoolean() || v.isNullOrUndefined();
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachString() {
+ // Need a single argument that is or can be converted to a string.
+ if (argc_ != 1 || !CanConvertToString(args_[0])) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'String' function.
+ emitNativeCalleeGuard();
+
+ // Guard that the argument is a string or can be converted to one.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = emitToStringGuard(argId, args_[0]);
+
+ // Return the string.
+ writer.loadStringResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("String");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringConstructor() {
+ // Need a single argument that is or can be converted to a string.
+ if (argc_ != 1 || !CanConvertToString(args_[0])) {
+ return AttachDecision::NoAction;
+ }
+
+ RootedString emptyString(cx_, cx_->runtime()->emptyString);
+ JSObject* templateObj = StringObject::create(
+ cx_, emptyString, /* proto = */ nullptr, TenuredObject);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'String' function.
+ emitNativeCalleeGuard();
+
+ // Guard on number and convert to string.
+ ValOperandId argId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_, flags_);
+ StringOperandId strId = emitToStringGuard(argId, args_[0]);
+
+ writer.newStringObjectResult(templateObj, strId);
+ writer.returnFromIC();
+
+ trackAttached("StringConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringToStringValueOf() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'toString' OR 'valueOf' native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Return the string
+ writer.loadStringResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("StringToStringValueOf");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringReplaceString() {
+ // Self-hosted code calls this with (string, string, string) arguments.
+ MOZ_ASSERT(argc_ == 3);
+ MOZ_ASSERT(args_[0].isString());
+ MOZ_ASSERT(args_[1].isString());
+ MOZ_ASSERT(args_[2].isString());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = writer.guardToString(arg0Id);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ StringOperandId patternId = writer.guardToString(arg1Id);
+
+ ValOperandId arg2Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ StringOperandId replacementId = writer.guardToString(arg2Id);
+
+ writer.stringReplaceStringResult(strId, patternId, replacementId);
+ writer.returnFromIC();
+
+ trackAttached("StringReplaceString");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringSplitString() {
+ // Self-hosted code calls this with (string, string) arguments.
+ MOZ_ASSERT(argc_ == 2);
+ MOZ_ASSERT(args_[0].isString());
+ MOZ_ASSERT(args_[1].isString());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = writer.guardToString(arg0Id);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ StringOperandId separatorId = writer.guardToString(arg1Id);
+
+ writer.stringSplitStringResult(strId, separatorId);
+ writer.returnFromIC();
+
+ trackAttached("StringSplitString");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringChar(
+ StringChar kind) {
+ // Need one argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ auto attach = CanAttachStringChar(thisval_, args_[0]);
+ if (attach == AttachStringChar::No) {
+ return AttachDecision::NoAction;
+ }
+
+ bool handleOOB = attach == AttachStringChar::OutOfBounds;
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'charCodeAt' or 'charAt' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Guard int32 index.
+ ValOperandId indexId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32IndexId = writer.guardToInt32Index(indexId);
+
+ // Linearize the string.
+ //
+ // AttachStringChar doesn't have a separate state when OOB access happens on
+ // a string which needs to be linearized, so just linearize unconditionally
+ // for out-of-bounds accesses.
+ if (attach == AttachStringChar::Linearize ||
+ attach == AttachStringChar::OutOfBounds) {
+ strId = writer.linearizeForCharAccess(strId, int32IndexId);
+ }
+
+ // Load string char or code.
+ if (kind == StringChar::CodeAt) {
+ writer.loadStringCharCodeResult(strId, int32IndexId, handleOOB);
+ } else {
+ writer.loadStringCharResult(strId, int32IndexId, handleOOB);
+ }
+
+ writer.returnFromIC();
+
+ if (kind == StringChar::CodeAt) {
+ trackAttached("StringCharCodeAt");
+ } else {
+ trackAttached("StringCharAt");
+ }
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringCharCodeAt() {
+ return tryAttachStringChar(StringChar::CodeAt);
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringCharAt() {
+ return tryAttachStringChar(StringChar::At);
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringFromCharCode() {
+ // Need one number argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'fromCharCode' native function.
+ emitNativeCalleeGuard();
+
+ // Guard int32 argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId codeId;
+ if (args_[0].isInt32()) {
+ codeId = writer.guardToInt32(argId);
+ } else {
+ // 'fromCharCode' performs ToUint16 on its input. We can use Uint32
+ // semantics, because ToUint16(ToUint32(v)) == ToUint16(v).
+ codeId = writer.guardToInt32ModUint32(argId);
+ }
+
+ // Return string created from code.
+ writer.stringFromCharCodeResult(codeId);
+ writer.returnFromIC();
+
+ trackAttached("StringFromCharCode");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringFromCodePoint() {
+ // Need one int32 argument.
+ if (argc_ != 1 || !args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // String.fromCodePoint throws for invalid code points.
+ int32_t codePoint = args_[0].toInt32();
+ if (codePoint < 0 || codePoint > int32_t(unicode::NonBMPMax)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'fromCodePoint' native function.
+ emitNativeCalleeGuard();
+
+ // Guard int32 argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId codeId = writer.guardToInt32(argId);
+
+ // Return string created from code point.
+ writer.stringFromCodePointResult(codeId);
+ writer.returnFromIC();
+
+ trackAttached("StringFromCodePoint");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringIndexOf() {
+ // Need one string argument.
+ if (argc_ != 1 || !args_[0].isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'indexOf' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Guard string argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId searchStrId = writer.guardToString(argId);
+
+ writer.stringIndexOfResult(strId, searchStrId);
+ writer.returnFromIC();
+
+ trackAttached("StringIndexOf");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringStartsWith() {
+ // Need one string argument.
+ if (argc_ != 1 || !args_[0].isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'startsWith' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Guard string argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId searchStrId = writer.guardToString(argId);
+
+ writer.stringStartsWithResult(strId, searchStrId);
+ writer.returnFromIC();
+
+ trackAttached("StringStartsWith");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringEndsWith() {
+ // Need one string argument.
+ if (argc_ != 1 || !args_[0].isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'endsWith' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Guard string argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId searchStrId = writer.guardToString(argId);
+
+ writer.stringEndsWithResult(strId, searchStrId);
+ writer.returnFromIC();
+
+ trackAttached("StringEndsWith");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringToLowerCase() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'toLowerCase' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Return string converted to lower-case.
+ writer.stringToLowerCaseResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("StringToLowerCase");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStringToUpperCase() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive string value.
+ if (!thisval_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'toUpperCase' native function.
+ emitNativeCalleeGuard();
+
+ // Guard this is a string.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ StringOperandId strId = writer.guardToString(thisValId);
+
+ // Return string converted to upper-case.
+ writer.stringToUpperCaseResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("StringToUpperCase");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathRandom() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(cx_->realm() == callee_->realm(),
+ "Shouldn't inline cross-realm Math.random because per-realm RNG");
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'random' native function.
+ emitNativeCalleeGuard();
+
+ mozilla::non_crypto::XorShift128PlusRNG* rng =
+ &cx_->realm()->getOrCreateRandomNumberGenerator();
+ writer.mathRandomResult(rng);
+
+ writer.returnFromIC();
+
+ trackAttached("MathRandom");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathAbs() {
+ // Need one argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'abs' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ // abs(INT_MIN) is a double.
+ if (args_[0].isInt32() && args_[0].toInt32() != INT_MIN) {
+ Int32OperandId int32Id = writer.guardToInt32(argumentId);
+ writer.mathAbsInt32Result(int32Id);
+ } else {
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+ writer.mathAbsNumberResult(numberId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathAbs");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathClz32() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'clz32' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ Int32OperandId int32Id;
+ if (args_[0].isInt32()) {
+ int32Id = writer.guardToInt32(argId);
+ } else {
+ MOZ_ASSERT(args_[0].isDouble());
+ NumberOperandId numId = writer.guardIsNumber(argId);
+ int32Id = writer.truncateDoubleToUInt32(numId);
+ }
+ writer.mathClz32Result(int32Id);
+ writer.returnFromIC();
+
+ trackAttached("MathClz32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathSign() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'sign' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isInt32()) {
+ Int32OperandId int32Id = writer.guardToInt32(argId);
+ writer.mathSignInt32Result(int32Id);
+ } else {
+ // Math.sign returns a double only if the input is -0 or NaN so try to
+ // optimize the common Number => Int32 case.
+ double d = math_sign_impl(args_[0].toDouble());
+ int32_t unused;
+ bool resultIsInt32 = mozilla::NumberIsInt32(d, &unused);
+
+ NumberOperandId numId = writer.guardIsNumber(argId);
+ if (resultIsInt32) {
+ writer.mathSignNumberToInt32Result(numId);
+ } else {
+ writer.mathSignNumberResult(numId);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathSign");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathImul() {
+ // Need two (number) arguments.
+ if (argc_ != 2 || !args_[0].isNumber() || !args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'imul' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ Int32OperandId int32Arg0Id, int32Arg1Id;
+ if (args_[0].isInt32() && args_[1].isInt32()) {
+ int32Arg0Id = writer.guardToInt32(arg0Id);
+ int32Arg1Id = writer.guardToInt32(arg1Id);
+ } else {
+ // Treat both arguments as numbers if at least one of them is non-int32.
+ NumberOperandId numArg0Id = writer.guardIsNumber(arg0Id);
+ NumberOperandId numArg1Id = writer.guardIsNumber(arg1Id);
+ int32Arg0Id = writer.truncateDoubleToUInt32(numArg0Id);
+ int32Arg1Id = writer.truncateDoubleToUInt32(numArg1Id);
+ }
+ writer.mathImulResult(int32Arg0Id, int32Arg1Id);
+ writer.returnFromIC();
+
+ trackAttached("MathImul");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathFloor() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check if the result fits in int32.
+ double res = math_floor_impl(args_[0].toNumber());
+ int32_t unused;
+ bool resultIsInt32 = mozilla::NumberIsInt32(res, &unused);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'floor' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isInt32()) {
+ MOZ_ASSERT(resultIsInt32);
+
+ // Use an indirect truncation to inform the optimizer it needs to preserve
+ // a bailout when the input can't be represented as an int32, even if the
+ // final result is fully truncated.
+ Int32OperandId intId = writer.guardToInt32(argumentId);
+ writer.indirectTruncateInt32Result(intId);
+ } else {
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+
+ if (resultIsInt32) {
+ writer.mathFloorToInt32Result(numberId);
+ } else {
+ writer.mathFloorNumberResult(numberId);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathFloor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathCeil() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check if the result fits in int32.
+ double res = math_ceil_impl(args_[0].toNumber());
+ int32_t unused;
+ bool resultIsInt32 = mozilla::NumberIsInt32(res, &unused);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'ceil' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isInt32()) {
+ MOZ_ASSERT(resultIsInt32);
+
+ // Use an indirect truncation to inform the optimizer it needs to preserve
+ // a bailout when the input can't be represented as an int32, even if the
+ // final result is fully truncated.
+ Int32OperandId intId = writer.guardToInt32(argumentId);
+ writer.indirectTruncateInt32Result(intId);
+ } else {
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+
+ if (resultIsInt32) {
+ writer.mathCeilToInt32Result(numberId);
+ } else {
+ writer.mathCeilNumberResult(numberId);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathCeil");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathTrunc() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check if the result fits in int32.
+ double res = math_trunc_impl(args_[0].toNumber());
+ int32_t unused;
+ bool resultIsInt32 = mozilla::NumberIsInt32(res, &unused);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'trunc' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isInt32()) {
+ MOZ_ASSERT(resultIsInt32);
+
+ // We don't need an indirect truncation barrier here, because Math.trunc
+ // always truncates, but never rounds its input away from zero.
+ Int32OperandId intId = writer.guardToInt32(argumentId);
+ writer.loadInt32Result(intId);
+ } else {
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+
+ if (resultIsInt32) {
+ writer.mathTruncToInt32Result(numberId);
+ } else {
+ writer.mathTruncNumberResult(numberId);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathTrunc");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathRound() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check if the result fits in int32.
+ double res = math_round_impl(args_[0].toNumber());
+ int32_t unused;
+ bool resultIsInt32 = mozilla::NumberIsInt32(res, &unused);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'round' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isInt32()) {
+ MOZ_ASSERT(resultIsInt32);
+
+ // Use an indirect truncation to inform the optimizer it needs to preserve
+ // a bailout when the input can't be represented as an int32, even if the
+ // final result is fully truncated.
+ Int32OperandId intId = writer.guardToInt32(argumentId);
+ writer.indirectTruncateInt32Result(intId);
+ } else {
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+
+ if (resultIsInt32) {
+ writer.mathRoundToInt32Result(numberId);
+ } else {
+ writer.mathFunctionNumberResult(numberId, UnaryMathFunction::Round);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathRound");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathSqrt() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'sqrt' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+ writer.mathSqrtNumberResult(numberId);
+ writer.returnFromIC();
+
+ trackAttached("MathSqrt");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathFRound() {
+ // Need one (number) argument.
+ if (argc_ != 1 || !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'fround' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+ writer.mathFRoundNumberResult(numberId);
+ writer.returnFromIC();
+
+ trackAttached("MathFRound");
+ return AttachDecision::Attach;
+}
+
+static bool CanAttachInt32Pow(const Value& baseVal, const Value& powerVal) {
+ auto valToInt32 = [](const Value& v) {
+ if (v.isInt32()) {
+ return v.toInt32();
+ }
+ if (v.isBoolean()) {
+ return int32_t(v.toBoolean());
+ }
+ MOZ_ASSERT(v.isNull());
+ return 0;
+ };
+ int32_t base = valToInt32(baseVal);
+ int32_t power = valToInt32(powerVal);
+
+ // x^y where y < 0 is most of the time not an int32, except when x is 1 or y
+ // gets large enough. It's hard to determine when exactly y is "large enough",
+ // so we don't use Int32PowResult when x != 1 and y < 0.
+ // Note: it's important for this condition to match the code generated by
+ // MacroAssembler::pow32 to prevent failure loops.
+ if (power < 0) {
+ return base == 1;
+ }
+
+ double res = powi(base, power);
+ int32_t unused;
+ return mozilla::NumberIsInt32(res, &unused);
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathPow() {
+ // Need two number arguments.
+ if (argc_ != 2 || !args_[0].isNumber() || !args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'pow' function.
+ emitNativeCalleeGuard();
+
+ ValOperandId baseId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ValOperandId exponentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ if (args_[0].isInt32() && args_[1].isInt32() &&
+ CanAttachInt32Pow(args_[0], args_[1])) {
+ Int32OperandId baseInt32Id = writer.guardToInt32(baseId);
+ Int32OperandId exponentInt32Id = writer.guardToInt32(exponentId);
+ writer.int32PowResult(baseInt32Id, exponentInt32Id);
+ } else {
+ NumberOperandId baseNumberId = writer.guardIsNumber(baseId);
+ NumberOperandId exponentNumberId = writer.guardIsNumber(exponentId);
+ writer.doublePowResult(baseNumberId, exponentNumberId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathPow");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathHypot() {
+ // Only optimize if there are 2-4 arguments.
+ if (argc_ < 2 || argc_ > 4) {
+ return AttachDecision::NoAction;
+ }
+
+ for (size_t i = 0; i < argc_; i++) {
+ if (!args_[i].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'hypot' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId firstId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ValOperandId secondId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ NumberOperandId firstNumId = writer.guardIsNumber(firstId);
+ NumberOperandId secondNumId = writer.guardIsNumber(secondId);
+
+ ValOperandId thirdId;
+ ValOperandId fourthId;
+ NumberOperandId thirdNumId;
+ NumberOperandId fourthNumId;
+
+ switch (argc_) {
+ case 2:
+ writer.mathHypot2NumberResult(firstNumId, secondNumId);
+ break;
+ case 3:
+ thirdId = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ thirdNumId = writer.guardIsNumber(thirdId);
+ writer.mathHypot3NumberResult(firstNumId, secondNumId, thirdNumId);
+ break;
+ case 4:
+ thirdId = writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ fourthId = writer.loadArgumentFixedSlot(ArgumentKind::Arg3, argc_);
+ thirdNumId = writer.guardIsNumber(thirdId);
+ fourthNumId = writer.guardIsNumber(fourthId);
+ writer.mathHypot4NumberResult(firstNumId, secondNumId, thirdNumId,
+ fourthNumId);
+ break;
+ default:
+ MOZ_CRASH("Unexpected number of arguments to hypot function.");
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("MathHypot");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathATan2() {
+ // Requires two numbers as arguments.
+ if (argc_ != 2 || !args_[0].isNumber() || !args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'atan2' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId yId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ValOperandId xId = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ NumberOperandId yNumberId = writer.guardIsNumber(yId);
+ NumberOperandId xNumberId = writer.guardIsNumber(xId);
+
+ writer.mathAtan2NumberResult(yNumberId, xNumberId);
+ writer.returnFromIC();
+
+ trackAttached("MathAtan2");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathMinMax(bool isMax) {
+ // For now only optimize if there are 1-4 arguments.
+ if (argc_ < 1 || argc_ > 4) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure all arguments are numbers.
+ bool allInt32 = true;
+ for (size_t i = 0; i < argc_; i++) {
+ if (!args_[i].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[i].isInt32()) {
+ allInt32 = false;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this Math function.
+ emitNativeCalleeGuard();
+
+ if (allInt32) {
+ ValOperandId valId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId resId = writer.guardToInt32(valId);
+ for (size_t i = 1; i < argc_; i++) {
+ ValOperandId argId =
+ writer.loadArgumentFixedSlot(ArgumentKindForArgIndex(i), argc_);
+ Int32OperandId argInt32Id = writer.guardToInt32(argId);
+ resId = writer.int32MinMax(isMax, resId, argInt32Id);
+ }
+ writer.loadInt32Result(resId);
+ } else {
+ ValOperandId valId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ NumberOperandId resId = writer.guardIsNumber(valId);
+ for (size_t i = 1; i < argc_; i++) {
+ ValOperandId argId =
+ writer.loadArgumentFixedSlot(ArgumentKindForArgIndex(i), argc_);
+ NumberOperandId argNumId = writer.guardIsNumber(argId);
+ resId = writer.numberMinMax(isMax, resId, argNumId);
+ }
+ writer.loadDoubleResult(resId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached(isMax ? "MathMax" : "MathMin");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachSpreadMathMinMax(
+ bool isMax) {
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::Spread ||
+ flags_.getArgFormat() == CallFlags::FunApplyArray);
+
+ // The result will be an int32 if there is at least one argument,
+ // and all the arguments are int32.
+ bool int32Result = args_.length() > 0;
+ for (size_t i = 0; i < args_.length(); i++) {
+ if (!args_[i].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[i].isInt32()) {
+ int32Result = false;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this Math function.
+ emitNativeCalleeGuard();
+
+ // Load the argument array.
+ ObjOperandId argsId = emitLoadArgsArray();
+
+ if (int32Result) {
+ writer.int32MinMaxArrayResult(argsId, isMax);
+ } else {
+ writer.numberMinMaxArrayResult(argsId, isMax);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached(isMax ? "MathMaxArray" : "MathMinArray");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMathFunction(
+ UnaryMathFunction fun) {
+ // Need one argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (math_use_fdlibm_for_sin_cos_tan() ||
+ callee_->realm()->behaviors().shouldResistFingerprinting()) {
+ switch (fun) {
+ case UnaryMathFunction::SinNative:
+ fun = UnaryMathFunction::SinFdlibm;
+ break;
+ case UnaryMathFunction::CosNative:
+ fun = UnaryMathFunction::CosFdlibm;
+ break;
+ case UnaryMathFunction::TanNative:
+ fun = UnaryMathFunction::TanFdlibm;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this Math function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ NumberOperandId numberId = writer.guardIsNumber(argumentId);
+ writer.mathFunctionNumberResult(numberId, fun);
+ writer.returnFromIC();
+
+ trackAttached("MathFunction");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNumber() {
+ // Expect a single string argument.
+ if (argc_ != 1 || !args_[0].isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ double num;
+ if (!StringToNumber(cx_, args_[0].toString(), &num)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `Number` function.
+ emitNativeCalleeGuard();
+
+ // Guard that the argument is a string.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ StringOperandId strId = writer.guardToString(argId);
+
+ // Return either an Int32 or Double result.
+ int32_t unused;
+ if (mozilla::NumberIsInt32(num, &unused)) {
+ Int32OperandId resultId = writer.guardStringToInt32(strId);
+ writer.loadInt32Result(resultId);
+ } else {
+ NumberOperandId resultId = writer.guardStringToNumber(strId);
+ writer.loadDoubleResult(resultId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("Number");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNumberParseInt() {
+ // Expected arguments: input (string or number), optional radix (int32).
+ if (argc_ < 1 || argc_ > 2) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[0].isString() && !args_[0].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+ if (args_[0].isDouble()) {
+ double d = args_[0].toDouble();
+
+ // See num_parseInt for why we have to reject numbers smaller than 1.0e-6.
+ // Negative numbers in the exclusive range (-1, -0) return -0.
+ bool canTruncateToInt32 =
+ (DOUBLE_DECIMAL_IN_SHORTEST_LOW <= d && d <= double(INT32_MAX)) ||
+ (double(INT32_MIN) <= d && d <= -1.0) || (d == 0.0);
+ if (!canTruncateToInt32) {
+ return AttachDecision::NoAction;
+ }
+ }
+ if (argc_ > 1 && !args_[1].isInt32(10)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'parseInt' native function.
+ emitNativeCalleeGuard();
+
+ auto guardRadix = [&]() {
+ ValOperandId radixId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ Int32OperandId intRadixId = writer.guardToInt32(radixId);
+ writer.guardSpecificInt32(intRadixId, 10);
+ return intRadixId;
+ };
+
+ ValOperandId inputId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ if (args_[0].isString()) {
+ StringOperandId strId = writer.guardToString(inputId);
+
+ Int32OperandId intRadixId;
+ if (argc_ > 1) {
+ intRadixId = guardRadix();
+ } else {
+ intRadixId = writer.loadInt32Constant(0);
+ }
+
+ writer.numberParseIntResult(strId, intRadixId);
+ } else if (args_[0].isInt32()) {
+ Int32OperandId intId = writer.guardToInt32(inputId);
+ if (argc_ > 1) {
+ guardRadix();
+ }
+ writer.loadInt32Result(intId);
+ } else {
+ MOZ_ASSERT(args_[0].isDouble());
+
+ NumberOperandId numId = writer.guardIsNumber(inputId);
+ if (argc_ > 1) {
+ guardRadix();
+ }
+ writer.doubleParseIntResult(numId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("NumberParseInt");
+ return AttachDecision::Attach;
+}
+
+StringOperandId IRGenerator::emitToStringGuard(ValOperandId id,
+ const Value& v) {
+ MOZ_ASSERT(CanConvertToString(v));
+ if (v.isString()) {
+ return writer.guardToString(id);
+ }
+ if (v.isBoolean()) {
+ BooleanOperandId boolId = writer.guardToBoolean(id);
+ return writer.booleanToString(boolId);
+ }
+ if (v.isNull()) {
+ writer.guardIsNull(id);
+ return writer.loadConstantString(cx_->names().null);
+ }
+ if (v.isUndefined()) {
+ writer.guardIsUndefined(id);
+ return writer.loadConstantString(cx_->names().undefined);
+ }
+ if (v.isInt32()) {
+ Int32OperandId intId = writer.guardToInt32(id);
+ return writer.callInt32ToString(intId);
+ }
+ // At this point we are creating an IC that will handle
+ // both Int32 and Double cases.
+ MOZ_ASSERT(v.isNumber());
+ NumberOperandId numId = writer.guardIsNumber(id);
+ return writer.callNumberToString(numId);
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNumberToString() {
+ // Expecting no arguments or a single int32 argument.
+ if (argc_ > 1) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ == 1 && !args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is a primitive number value.
+ if (!thisval_.isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // No arguments means base 10.
+ int32_t base = 10;
+ if (argc_ > 0) {
+ base = args_[0].toInt32();
+ if (base < 2 || base > 36) {
+ return AttachDecision::NoAction;
+ }
+
+ // Non-decimal bases currently only support int32 inputs.
+ if (base != 10 && !thisval_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+ }
+ MOZ_ASSERT(2 <= base && base <= 36);
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'toString' native function.
+ emitNativeCalleeGuard();
+
+ // Initialize the |this| operand.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+
+ // Guard on number and convert to string.
+ if (base == 10) {
+ // If an explicit base was passed, guard its value.
+ if (argc_ > 0) {
+ // Guard the `base` argument is an int32.
+ ValOperandId baseId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId intBaseId = writer.guardToInt32(baseId);
+
+ // Guard `base` is 10 for decimal toString representation.
+ writer.guardSpecificInt32(intBaseId, 10);
+ }
+
+ StringOperandId strId = emitToStringGuard(thisValId, thisval_);
+
+ // Return the string.
+ writer.loadStringResult(strId);
+ } else {
+ MOZ_ASSERT(argc_ > 0);
+
+ // Guard the |this| value is an int32.
+ Int32OperandId thisIntId = writer.guardToInt32(thisValId);
+
+ // Guard the `base` argument is an int32.
+ ValOperandId baseId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId intBaseId = writer.guardToInt32(baseId);
+
+ // Return the string.
+ writer.int32ToStringWithBaseResult(thisIntId, intBaseId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("NumberToString");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachReflectGetPrototypeOf() {
+ // Need one argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!args_[0].isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'getPrototypeOf' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argumentId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argumentId);
+
+ writer.reflectGetPrototypeOfResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("ReflectGetPrototypeOf");
+ return AttachDecision::Attach;
+}
+
+static bool AtomicsMeetsPreconditions(TypedArrayObject* typedArray,
+ const Value& index) {
+ switch (typedArray->type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ break;
+
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Uint8Clamped:
+ // Exclude floating types and Uint8Clamped.
+ return false;
+
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ // Bounds check the index argument.
+ int64_t indexInt64;
+ if (!ValueIsInt64Index(index, &indexInt64)) {
+ return false;
+ }
+ if (indexInt64 < 0 || uint64_t(indexInt64) >= typedArray->length()) {
+ return false;
+ }
+
+ return true;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsCompareExchange() {
+ if (!JitSupportsAtomics()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need four arguments.
+ if (argc_ != 4) {
+ return AttachDecision::NoAction;
+ }
+
+ // Arguments: typedArray, index (number), expected, replacement.
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
+ return AttachDecision::NoAction;
+ }
+
+ Scalar::Type elementType = typedArray->type();
+ if (!ValueIsNumeric(elementType, args_[2])) {
+ return AttachDecision::NoAction;
+ }
+ if (!ValueIsNumeric(elementType, args_[3])) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `compareExchange` native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+ writer.guardShapeForClass(objId, typedArray->shape());
+
+ // Convert index to intPtr.
+ ValOperandId indexId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ IntPtrOperandId intPtrIndexId =
+ guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
+
+ // Convert expected value to int32/BigInt.
+ ValOperandId expectedId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ OperandId numericExpectedId = emitNumericGuard(expectedId, elementType);
+
+ // Convert replacement value to int32/BigInt.
+ ValOperandId replacementId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg3, argc_);
+ OperandId numericReplacementId = emitNumericGuard(replacementId, elementType);
+
+ writer.atomicsCompareExchangeResult(objId, intPtrIndexId, numericExpectedId,
+ numericReplacementId, typedArray->type());
+ writer.returnFromIC();
+
+ trackAttached("AtomicsCompareExchange");
+ return AttachDecision::Attach;
+}
+
+bool InlinableNativeIRGenerator::canAttachAtomicsReadWriteModify() {
+ if (!JitSupportsAtomics()) {
+ return false;
+ }
+
+ // Need three arguments.
+ if (argc_ != 3) {
+ return false;
+ }
+
+ // Arguments: typedArray, index (number), value.
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
+ return false;
+ }
+ if (!args_[1].isNumber()) {
+ return false;
+ }
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
+ return false;
+ }
+ if (!ValueIsNumeric(typedArray->type(), args_[2])) {
+ return false;
+ }
+ return true;
+}
+
+InlinableNativeIRGenerator::AtomicsReadWriteModifyOperands
+InlinableNativeIRGenerator::emitAtomicsReadWriteModifyOperands() {
+ MOZ_ASSERT(canAttachAtomicsReadWriteModify());
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is this Atomics function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+ writer.guardShapeForClass(objId, typedArray->shape());
+
+ // Convert index to intPtr.
+ ValOperandId indexId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ IntPtrOperandId intPtrIndexId =
+ guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
+
+ // Convert value to int32/BigInt.
+ ValOperandId valueId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ OperandId numericValueId = emitNumericGuard(valueId, typedArray->type());
+
+ return {objId, intPtrIndexId, numericValueId};
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsExchange() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+
+ writer.atomicsExchangeResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type());
+ writer.returnFromIC();
+
+ trackAttached("AtomicsExchange");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAdd() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ bool forEffect = ignoresResult();
+
+ writer.atomicsAddResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type(), forEffect);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsAdd");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsSub() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ bool forEffect = ignoresResult();
+
+ writer.atomicsSubResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type(), forEffect);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsSub");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAnd() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ bool forEffect = ignoresResult();
+
+ writer.atomicsAndResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type(), forEffect);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsAnd");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsOr() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ bool forEffect = ignoresResult();
+
+ writer.atomicsOrResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type(), forEffect);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsOr");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsXor() {
+ if (!canAttachAtomicsReadWriteModify()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto [objId, intPtrIndexId, numericValueId] =
+ emitAtomicsReadWriteModifyOperands();
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ bool forEffect = ignoresResult();
+
+ writer.atomicsXorResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type(), forEffect);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsXor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsLoad() {
+ if (!JitSupportsAtomics()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need two arguments.
+ if (argc_ != 2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Arguments: typedArray, index (number).
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `load` native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+ writer.guardShapeForClass(objId, typedArray->shape());
+
+ // Convert index to intPtr.
+ ValOperandId indexId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ IntPtrOperandId intPtrIndexId =
+ guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
+
+ writer.atomicsLoadResult(objId, intPtrIndexId, typedArray->type());
+ writer.returnFromIC();
+
+ trackAttached("AtomicsLoad");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsStore() {
+ if (!JitSupportsAtomics()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need three arguments.
+ if (argc_ != 3) {
+ return AttachDecision::NoAction;
+ }
+
+ // Atomics.store() is annoying because it returns the result of converting the
+ // value by ToInteger(), not the input value, nor the result of converting the
+ // value by ToInt32(). It is especially annoying because almost nobody uses
+ // the result value.
+ //
+ // As an expedient compromise, therefore, we inline only if the result is
+ // obviously unused or if the argument is already Int32 and thus requires no
+ // conversion.
+
+ // Arguments: typedArray, index (number), value.
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (!args_[1].isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
+ return AttachDecision::NoAction;
+ }
+
+ Scalar::Type elementType = typedArray->type();
+ if (!ValueIsNumeric(elementType, args_[2])) {
+ return AttachDecision::NoAction;
+ }
+
+ bool guardIsInt32 = !Scalar::isBigIntType(elementType) && !ignoresResult();
+
+ if (guardIsInt32 && !args_[2].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `store` native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+ writer.guardShapeForClass(objId, typedArray->shape());
+
+ // Convert index to intPtr.
+ ValOperandId indexId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ IntPtrOperandId intPtrIndexId =
+ guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
+
+ // Ensure value is int32 or BigInt.
+ ValOperandId valueId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_);
+ OperandId numericValueId;
+ if (guardIsInt32) {
+ numericValueId = writer.guardToInt32(valueId);
+ } else {
+ numericValueId = emitNumericGuard(valueId, elementType);
+ }
+
+ writer.atomicsStoreResult(objId, intPtrIndexId, numericValueId,
+ typedArray->type());
+ writer.returnFromIC();
+
+ trackAttached("AtomicsStore");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsIsLockFree() {
+ // Need one argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `isLockFree` native function.
+ emitNativeCalleeGuard();
+
+ // Ensure value is int32.
+ ValOperandId valueId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32ValueId = writer.guardToInt32(valueId);
+
+ writer.atomicsIsLockFreeResult(int32ValueId);
+ writer.returnFromIC();
+
+ trackAttached("AtomicsIsLockFree");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachBoolean() {
+ // Need zero or one argument.
+ if (argc_ > 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'Boolean' native function.
+ emitNativeCalleeGuard();
+
+ if (argc_ == 0) {
+ writer.loadBooleanResult(false);
+ } else {
+ ValOperandId valId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ writer.loadValueTruthyResult(valId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("Boolean");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachBailout() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'bailout' native function.
+ emitNativeCalleeGuard();
+
+ writer.bailout();
+ writer.loadUndefinedResult();
+ writer.returnFromIC();
+
+ trackAttached("Bailout");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAssertFloat32() {
+ // Expecting two arguments.
+ if (argc_ != 2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'assertFloat32' native function.
+ emitNativeCalleeGuard();
+
+ // TODO: Warp doesn't yet optimize Float32 (bug 1655773).
+
+ // NOP when not in IonMonkey.
+ writer.loadUndefinedResult();
+ writer.returnFromIC();
+
+ trackAttached("AssertFloat32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachAssertRecoveredOnBailout() {
+ // Expecting two arguments.
+ if (argc_ != 2) {
+ return AttachDecision::NoAction;
+ }
+
+ // (Fuzzing unsafe) testing function which must be called with a constant
+ // boolean as its second argument.
+ bool mustBeRecovered = args_[1].toBoolean();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'assertRecoveredOnBailout' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId valId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ writer.assertRecoveredOnBailoutResult(valId, mustBeRecovered);
+ writer.returnFromIC();
+
+ trackAttached("AssertRecoveredOnBailout");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectIs() {
+ // Need two arguments.
+ if (argc_ != 2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `is` native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId lhsId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ValOperandId rhsId = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+
+ HandleValue lhs = args_[0];
+ HandleValue rhs = args_[1];
+
+ if (!isFirstStub()) {
+ writer.sameValueResult(lhsId, rhsId);
+ } else if (lhs.isNumber() && rhs.isNumber() &&
+ !(lhs.isInt32() && rhs.isInt32())) {
+ NumberOperandId lhsNumId = writer.guardIsNumber(lhsId);
+ NumberOperandId rhsNumId = writer.guardIsNumber(rhsId);
+ writer.compareDoubleSameValueResult(lhsNumId, rhsNumId);
+ } else if (!SameType(lhs, rhs)) {
+ // Compare tags for strictly different types.
+ ValueTagOperandId lhsTypeId = writer.loadValueTag(lhsId);
+ ValueTagOperandId rhsTypeId = writer.loadValueTag(rhsId);
+ writer.guardTagNotEqual(lhsTypeId, rhsTypeId);
+ writer.loadBooleanResult(false);
+ } else {
+ MOZ_ASSERT(lhs.type() == rhs.type());
+ MOZ_ASSERT(lhs.type() != JS::ValueType::Double);
+
+ switch (lhs.type()) {
+ case JS::ValueType::Int32: {
+ Int32OperandId lhsIntId = writer.guardToInt32(lhsId);
+ Int32OperandId rhsIntId = writer.guardToInt32(rhsId);
+ writer.compareInt32Result(JSOp::StrictEq, lhsIntId, rhsIntId);
+ break;
+ }
+ case JS::ValueType::Boolean: {
+ Int32OperandId lhsIntId = writer.guardBooleanToInt32(lhsId);
+ Int32OperandId rhsIntId = writer.guardBooleanToInt32(rhsId);
+ writer.compareInt32Result(JSOp::StrictEq, lhsIntId, rhsIntId);
+ break;
+ }
+ case JS::ValueType::Undefined: {
+ writer.guardIsUndefined(lhsId);
+ writer.guardIsUndefined(rhsId);
+ writer.loadBooleanResult(true);
+ break;
+ }
+ case JS::ValueType::Null: {
+ writer.guardIsNull(lhsId);
+ writer.guardIsNull(rhsId);
+ writer.loadBooleanResult(true);
+ break;
+ }
+ case JS::ValueType::String: {
+ StringOperandId lhsStrId = writer.guardToString(lhsId);
+ StringOperandId rhsStrId = writer.guardToString(rhsId);
+ writer.compareStringResult(JSOp::StrictEq, lhsStrId, rhsStrId);
+ break;
+ }
+ case JS::ValueType::Symbol: {
+ SymbolOperandId lhsSymId = writer.guardToSymbol(lhsId);
+ SymbolOperandId rhsSymId = writer.guardToSymbol(rhsId);
+ writer.compareSymbolResult(JSOp::StrictEq, lhsSymId, rhsSymId);
+ break;
+ }
+ case JS::ValueType::BigInt: {
+ BigIntOperandId lhsBigIntId = writer.guardToBigInt(lhsId);
+ BigIntOperandId rhsBigIntId = writer.guardToBigInt(rhsId);
+ writer.compareBigIntResult(JSOp::StrictEq, lhsBigIntId, rhsBigIntId);
+ break;
+ }
+ case JS::ValueType::Object: {
+ ObjOperandId lhsObjId = writer.guardToObject(lhsId);
+ ObjOperandId rhsObjId = writer.guardToObject(rhsId);
+ writer.compareObjectResult(JSOp::StrictEq, lhsObjId, rhsObjId);
+ break;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ case JS::ValueType::Double:
+ case JS::ValueType::Magic:
+ case JS::ValueType::PrivateGCThing:
+ MOZ_CRASH("Unexpected type");
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("ObjectIs");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectIsPrototypeOf() {
+ // Ensure |this| is an object.
+ if (!thisval_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need a single argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the `isPrototypeOf` native function.
+ emitNativeCalleeGuard();
+
+ // Guard that |this| is an object.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId thisObjId = writer.guardToObject(thisValId);
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ writer.loadInstanceOfObjectResult(argId, thisObjId);
+ writer.returnFromIC();
+
+ trackAttached("ObjectIsPrototypeOf");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectToString() {
+ // Expecting no arguments.
+ if (argc_ != 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure |this| is an object.
+ if (!thisval_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Don't attach if the object has @@toStringTag or is a proxy.
+ if (!ObjectClassToString(cx_, &thisval_.toObject())) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'toString' native function.
+ emitNativeCalleeGuard();
+
+ // Guard that |this| is an object.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId thisObjId = writer.guardToObject(thisValId);
+
+ writer.objectToStringResult(thisObjId);
+ writer.returnFromIC();
+
+ trackAttached("ObjectToString");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachBigIntAsIntN() {
+ // Need two arguments (Int32, BigInt).
+ if (argc_ != 2 || !args_[0].isInt32() || !args_[1].isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Negative bits throws an error.
+ if (args_[0].toInt32() < 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'BigInt.asIntN' native function.
+ emitNativeCalleeGuard();
+
+ // Convert bits to int32.
+ ValOperandId bitsId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32BitsId = writer.guardToInt32Index(bitsId);
+
+ // Number of bits mustn't be negative.
+ writer.guardInt32IsNonNegative(int32BitsId);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ BigIntOperandId bigIntId = writer.guardToBigInt(arg1Id);
+
+ writer.bigIntAsIntNResult(int32BitsId, bigIntId);
+ writer.returnFromIC();
+
+ trackAttached("BigIntAsIntN");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachBigIntAsUintN() {
+ // Need two arguments (Int32, BigInt).
+ if (argc_ != 2 || !args_[0].isInt32() || !args_[1].isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Negative bits throws an error.
+ if (args_[0].toInt32() < 0) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'BigInt.asUintN' native function.
+ emitNativeCalleeGuard();
+
+ // Convert bits to int32.
+ ValOperandId bitsId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ Int32OperandId int32BitsId = writer.guardToInt32Index(bitsId);
+
+ // Number of bits mustn't be negative.
+ writer.guardInt32IsNonNegative(int32BitsId);
+
+ ValOperandId arg1Id = writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ BigIntOperandId bigIntId = writer.guardToBigInt(arg1Id);
+
+ writer.bigIntAsUintNResult(int32BitsId, bigIntId);
+ writer.returnFromIC();
+
+ trackAttached("BigIntAsUintN");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachSetHas() {
+ // Ensure |this| is a SetObject.
+ if (!thisval_.isObject() || !thisval_.toObject().is<SetObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need a single argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'has' native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a SetObject.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, &thisval_.toObject(), GuardClassKind::Set);
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+#ifndef JS_CODEGEN_X86
+ // Assume the hash key will likely always have the same type when attaching
+ // the first stub. If the call is polymorphic on the hash key, attach a stub
+ // which handles any value.
+ if (isFirstStub()) {
+ switch (args_[0].type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ case ValueType::Boolean:
+ case ValueType::Undefined:
+ case ValueType::Null: {
+ writer.guardToNonGCThing(argId);
+ writer.setHasNonGCThingResult(objId, argId);
+ break;
+ }
+ case ValueType::String: {
+ StringOperandId strId = writer.guardToString(argId);
+ writer.setHasStringResult(objId, strId);
+ break;
+ }
+ case ValueType::Symbol: {
+ SymbolOperandId symId = writer.guardToSymbol(argId);
+ writer.setHasSymbolResult(objId, symId);
+ break;
+ }
+ case ValueType::BigInt: {
+ BigIntOperandId bigIntId = writer.guardToBigInt(argId);
+ writer.setHasBigIntResult(objId, bigIntId);
+ break;
+ }
+ case ValueType::Object: {
+ // Currently only supported on 64-bit platforms.
+# ifdef JS_PUNBOX64
+ ObjOperandId valId = writer.guardToObject(argId);
+ writer.setHasObjectResult(objId, valId);
+# else
+ writer.setHasResult(objId, argId);
+# endif
+ break;
+ }
+
+# ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+# endif
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ writer.setHasResult(objId, argId);
+ }
+#else
+ // The optimized versions require too many registers on x86.
+ writer.setHasResult(objId, argId);
+#endif
+
+ writer.returnFromIC();
+
+ trackAttached("SetHas");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMapHas() {
+ // Ensure |this| is a MapObject.
+ if (!thisval_.isObject() || !thisval_.toObject().is<MapObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need a single argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'has' native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a MapObject.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, &thisval_.toObject(), GuardClassKind::Map);
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+#ifndef JS_CODEGEN_X86
+ // Assume the hash key will likely always have the same type when attaching
+ // the first stub. If the call is polymorphic on the hash key, attach a stub
+ // which handles any value.
+ if (isFirstStub()) {
+ switch (args_[0].type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ case ValueType::Boolean:
+ case ValueType::Undefined:
+ case ValueType::Null: {
+ writer.guardToNonGCThing(argId);
+ writer.mapHasNonGCThingResult(objId, argId);
+ break;
+ }
+ case ValueType::String: {
+ StringOperandId strId = writer.guardToString(argId);
+ writer.mapHasStringResult(objId, strId);
+ break;
+ }
+ case ValueType::Symbol: {
+ SymbolOperandId symId = writer.guardToSymbol(argId);
+ writer.mapHasSymbolResult(objId, symId);
+ break;
+ }
+ case ValueType::BigInt: {
+ BigIntOperandId bigIntId = writer.guardToBigInt(argId);
+ writer.mapHasBigIntResult(objId, bigIntId);
+ break;
+ }
+ case ValueType::Object: {
+ // Currently only supported on 64-bit platforms.
+# ifdef JS_PUNBOX64
+ ObjOperandId valId = writer.guardToObject(argId);
+ writer.mapHasObjectResult(objId, valId);
+# else
+ writer.mapHasResult(objId, argId);
+# endif
+ break;
+ }
+
+# ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+# endif
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ writer.mapHasResult(objId, argId);
+ }
+#else
+ // The optimized versions require too many registers on x86.
+ writer.mapHasResult(objId, argId);
+#endif
+
+ writer.returnFromIC();
+
+ trackAttached("MapHas");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachMapGet() {
+ // Ensure |this| is a MapObject.
+ if (!thisval_.isObject() || !thisval_.toObject().is<MapObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Need a single argument.
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'get' native function.
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a MapObject.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId objId = writer.guardToObject(thisValId);
+ emitOptimisticClassGuard(objId, &thisval_.toObject(), GuardClassKind::Map);
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+#ifndef JS_CODEGEN_X86
+ // Assume the hash key will likely always have the same type when attaching
+ // the first stub. If the call is polymorphic on the hash key, attach a stub
+ // which handles any value.
+ if (isFirstStub()) {
+ switch (args_[0].type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ case ValueType::Boolean:
+ case ValueType::Undefined:
+ case ValueType::Null: {
+ writer.guardToNonGCThing(argId);
+ writer.mapGetNonGCThingResult(objId, argId);
+ break;
+ }
+ case ValueType::String: {
+ StringOperandId strId = writer.guardToString(argId);
+ writer.mapGetStringResult(objId, strId);
+ break;
+ }
+ case ValueType::Symbol: {
+ SymbolOperandId symId = writer.guardToSymbol(argId);
+ writer.mapGetSymbolResult(objId, symId);
+ break;
+ }
+ case ValueType::BigInt: {
+ BigIntOperandId bigIntId = writer.guardToBigInt(argId);
+ writer.mapGetBigIntResult(objId, bigIntId);
+ break;
+ }
+ case ValueType::Object: {
+ // Currently only supported on 64-bit platforms.
+# ifdef JS_PUNBOX64
+ ObjOperandId valId = writer.guardToObject(argId);
+ writer.mapGetObjectResult(objId, valId);
+# else
+ writer.mapGetResult(objId, argId);
+# endif
+ break;
+ }
+
+# ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+# endif
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ writer.mapGetResult(objId, argId);
+ }
+#else
+ // The optimized versions require too many registers on x86.
+ writer.mapGetResult(objId, argId);
+#endif
+
+ writer.returnFromIC();
+
+ trackAttached("MapGet");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachFunCall(HandleFunction callee) {
+ MOZ_ASSERT(callee->isNativeWithoutJitEntry());
+
+ if (callee->native() != fun_call) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!thisval_.isObject() || !thisval_.toObject().is<JSFunction>()) {
+ return AttachDecision::NoAction;
+ }
+ RootedFunction target(cx_, &thisval_.toObject().as<JSFunction>());
+
+ bool isScripted = target->hasJitEntry();
+ MOZ_ASSERT_IF(!isScripted, target->isNativeWithoutJitEntry());
+
+ if (target->isClassConstructor()) {
+ return AttachDecision::NoAction;
+ }
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ CallFlags targetFlags(CallFlags::FunCall);
+ if (mode_ == ICState::Mode::Specialized) {
+ if (cx_->realm() == target->realm()) {
+ targetFlags.setIsSameRealm();
+ }
+ }
+
+ if (mode_ == ICState::Mode::Specialized && !isScripted && argc_ > 0) {
+ // The stack layout is already in the correct form for calls with at least
+ // one argument.
+ //
+ // clang-format off
+ //
+ // *** STACK LAYOUT (bottom to top) *** *** INDEX ***
+ // Callee <-- argc+1
+ // ThisValue <-- argc
+ // Args: | Arg0 | <-- argc-1
+ // | Arg1 | <-- argc-2
+ // | ... | <-- ...
+ // | ArgN | <-- 0
+ //
+ // When passing |argc-1| as the number of arguments, we get:
+ //
+ // *** STACK LAYOUT (bottom to top) *** *** INDEX ***
+ // Callee <-- (argc-1)+1 = argc = ThisValue
+ // ThisValue <-- (argc-1) = argc-1 = Arg0
+ // Args: | Arg0 | <-- (argc-1)-1 = argc-2 = Arg1
+ // | Arg1 | <-- (argc-1)-2 = argc-3 = Arg2
+ // | ... | <-- ...
+ //
+ // clang-format on
+ //
+ // This allows to call |loadArgumentFixedSlot(ArgumentKind::Arg0)| and we
+ // still load the correct argument index from |ArgumentKind::Arg1|.
+ //
+ // When no arguments are passed, i.e. |argc==0|, we have to replace
+ // |ArgumentKind::Arg0| with the undefined value. But we don't yet support
+ // this case.
+ HandleValue newTarget = NullHandleValue;
+ HandleValue thisValue = args_[0];
+ HandleValueArray args =
+ HandleValueArray::subarray(args_, 1, args_.length() - 1);
+
+ // Check for specific native-function optimizations.
+ InlinableNativeIRGenerator nativeGen(*this, target, newTarget, thisValue,
+ args, targetFlags);
+ TRY_ATTACH(nativeGen.tryAttachStub());
+ }
+
+ ObjOperandId thisObjId = emitFunCallGuard(argcId);
+
+ if (mode_ == ICState::Mode::Specialized) {
+ // Ensure that |this| is the expected target function.
+ emitCalleeGuard(thisObjId, target);
+
+ if (isScripted) {
+ writer.callScriptedFunction(thisObjId, argcId, targetFlags,
+ ClampFixedArgc(argc_));
+ } else {
+ writer.callNativeFunction(thisObjId, argcId, op_, target, targetFlags,
+ ClampFixedArgc(argc_));
+ }
+ } else {
+ // Guard that |this| is a function.
+ writer.guardClass(thisObjId, GuardClassKind::JSFunction);
+
+ // Guard that function is not a class constructor.
+ writer.guardNotClassConstructor(thisObjId);
+
+ if (isScripted) {
+ writer.guardFunctionHasJitEntry(thisObjId, /*isConstructing =*/false);
+ writer.callScriptedFunction(thisObjId, argcId, targetFlags,
+ ClampFixedArgc(argc_));
+ } else {
+ writer.guardFunctionHasNoJitEntry(thisObjId);
+ writer.callAnyNativeFunction(thisObjId, argcId, targetFlags,
+ ClampFixedArgc(argc_));
+ }
+ }
+
+ writer.returnFromIC();
+
+ if (isScripted) {
+ trackAttached("Scripted fun_call");
+ } else {
+ trackAttached("Native fun_call");
+ }
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsTypedArray(
+ bool isPossiblyWrapped) {
+ // Self-hosted code calls this with a single object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+ writer.isTypedArrayResult(objArgId, isPossiblyWrapped);
+ writer.returnFromIC();
+
+ trackAttached(isPossiblyWrapped ? "IsPossiblyWrappedTypedArray"
+ : "IsTypedArray");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsTypedArrayConstructor() {
+ // Self-hosted code calls this with a single object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+ writer.isTypedArrayConstructorResult(objArgId);
+ writer.returnFromIC();
+
+ trackAttached("IsTypedArrayConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayByteOffset() {
+ // Self-hosted code calls this with a single TypedArrayObject argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
+
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+ if (tarr->byteOffset() <= INT32_MAX) {
+ writer.arrayBufferViewByteOffsetInt32Result(objArgId);
+ } else {
+ writer.arrayBufferViewByteOffsetDoubleResult(objArgId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("IntrinsicTypedArrayByteOffset");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayElementSize() {
+ // Self-hosted code calls this with a single TypedArrayObject argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+ MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+ writer.typedArrayElementSizeResult(objArgId);
+ writer.returnFromIC();
+
+ trackAttached("TypedArrayElementSize");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
+ bool isPossiblyWrapped) {
+ // Self-hosted code calls this with a single, possibly wrapped,
+ // TypedArrayObject argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Only optimize when the object isn't a wrapper.
+ if (isPossiblyWrapped && IsWrapper(&args_[0].toObject())) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
+
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+
+ if (isPossiblyWrapped) {
+ writer.guardIsNotProxy(objArgId);
+ }
+
+ if (tarr->length() <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objArgId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objArgId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("IntrinsicTypedArrayLength");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayBufferByteLength(
+ bool isPossiblyWrapped) {
+ // Self-hosted code calls this with a single, possibly wrapped,
+ // ArrayBufferObject argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Only optimize when the object isn't a wrapper.
+ if (isPossiblyWrapped && IsWrapper(&args_[0].toObject())) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(args_[0].toObject().is<ArrayBufferObject>());
+
+ auto* buffer = &args_[0].toObject().as<ArrayBufferObject>();
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objArgId = writer.guardToObject(argId);
+
+ if (isPossiblyWrapped) {
+ writer.guardIsNotProxy(objArgId);
+ }
+
+ if (buffer->byteLength() <= INT32_MAX) {
+ writer.loadArrayBufferByteLengthInt32Result(objArgId);
+ } else {
+ writer.loadArrayBufferByteLengthDoubleResult(objArgId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("ArrayBufferByteLength");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachIsConstructing() {
+ // Self-hosted code calls this with no arguments in function scripts.
+ MOZ_ASSERT(argc_ == 0);
+ MOZ_ASSERT(script()->isFunction());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ writer.frameIsConstructingResult();
+ writer.returnFromIC();
+
+ trackAttached("IsConstructing");
+ return AttachDecision::Attach;
+}
+
+AttachDecision
+InlinableNativeIRGenerator::tryAttachGetNextMapSetEntryForIterator(bool isMap) {
+ // Self-hosted code calls this with two objects.
+ MOZ_ASSERT(argc_ == 2);
+ if (isMap) {
+ MOZ_ASSERT(args_[0].toObject().is<MapIteratorObject>());
+ } else {
+ MOZ_ASSERT(args_[0].toObject().is<SetIteratorObject>());
+ }
+ MOZ_ASSERT(args_[1].toObject().is<ArrayObject>());
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ValOperandId iterId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objIterId = writer.guardToObject(iterId);
+
+ ValOperandId resultArrId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+ ObjOperandId objResultArrId = writer.guardToObject(resultArrId);
+
+ writer.getNextMapSetEntryForIteratorResult(objIterId, objResultArrId, isMap);
+ writer.returnFromIC();
+
+ trackAttached("GetNextMapSetEntryForIterator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNewArrayIterator() {
+ // Self-hosted code calls this without any arguments
+ MOZ_ASSERT(argc_ == 0);
+
+ JSObject* templateObj = NewArrayIteratorTemplate(cx_);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ writer.newArrayIteratorResult(templateObj);
+ writer.returnFromIC();
+
+ trackAttached("NewArrayIterator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNewStringIterator() {
+ // Self-hosted code calls this without any arguments
+ MOZ_ASSERT(argc_ == 0);
+
+ JSObject* templateObj = NewStringIteratorTemplate(cx_);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ writer.newStringIteratorResult(templateObj);
+ writer.returnFromIC();
+
+ trackAttached("NewStringIterator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachNewRegExpStringIterator() {
+ // Self-hosted code calls this without any arguments
+ MOZ_ASSERT(argc_ == 0);
+
+ JSObject* templateObj = NewRegExpStringIteratorTemplate(cx_);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ writer.newRegExpStringIteratorResult(templateObj);
+ writer.returnFromIC();
+
+ trackAttached("NewRegExpStringIterator");
+ return AttachDecision::Attach;
+}
+
+AttachDecision
+InlinableNativeIRGenerator::tryAttachArrayIteratorPrototypeOptimizable() {
+ // Self-hosted code calls this without any arguments
+ MOZ_ASSERT(argc_ == 0);
+
+ if (!isFirstStub()) {
+ // Attach only once to prevent slowdowns for polymorphic calls.
+ return AttachDecision::NoAction;
+ }
+
+ NativeObject* arrayIteratorProto;
+ uint32_t slot;
+ JSFunction* nextFun;
+ if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto, &slot,
+ &nextFun)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ ObjOperandId protoId = writer.loadObject(arrayIteratorProto);
+ ObjOperandId nextId = writer.loadObject(nextFun);
+
+ writer.guardShape(protoId, arrayIteratorProto->shape());
+
+ // Ensure that proto[slot] == nextFun.
+ writer.guardDynamicSlotIsSpecificObject(protoId, nextId, slot);
+ writer.loadBooleanResult(true);
+ writer.returnFromIC();
+
+ trackAttached("ArrayIteratorPrototypeOptimizable");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectCreate() {
+ // Need a single object-or-null argument.
+ if (argc_ != 1 || !args_[0].isObjectOrNull()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!isFirstStub()) {
+ // Attach only once to prevent slowdowns for polymorphic calls.
+ return AttachDecision::NoAction;
+ }
+
+ RootedObject proto(cx_, args_[0].toObjectOrNull());
+ JSObject* templateObj = ObjectCreateImpl(cx_, proto, TenuredObject);
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'create' native function.
+ emitNativeCalleeGuard();
+
+ // Guard on the proto argument.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ if (proto) {
+ ObjOperandId protoId = writer.guardToObject(argId);
+ writer.guardSpecificObject(protoId, proto);
+ } else {
+ writer.guardIsNull(argId);
+ }
+
+ writer.objectCreateResult(templateObj);
+ writer.returnFromIC();
+
+ trackAttached("ObjectCreate");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachObjectConstructor() {
+ // Expecting no arguments or a single object argument.
+ // TODO(Warp): Support all or more conversions to object.
+ if (argc_ > 1) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ == 1 && !args_[0].isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ PlainObject* templateObj = nullptr;
+ if (argc_ == 0) {
+ // Stub doesn't support metadata builder
+ if (cx_->realm()->hasAllocationMetadataBuilder()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Create a temporary object to act as the template object.
+ templateObj = NewPlainObjectWithAllocKind(cx_, NewObjectGCKind());
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee and newTarget (if constructing) are this Object constructor
+ // function.
+ emitNativeCalleeGuard();
+
+ if (argc_ == 0) {
+ // TODO: Support pre-tenuring.
+ gc::AllocSite* site =
+ script()->zone()->unknownAllocSite(JS::TraceKind::Object);
+ MOZ_ASSERT(site);
+
+ uint32_t numFixedSlots = templateObj->numUsedFixedSlots();
+ uint32_t numDynamicSlots = templateObj->numDynamicSlots();
+ gc::AllocKind allocKind = templateObj->allocKindForTenure();
+ Shape* shape = templateObj->shape();
+
+ writer.guardNoAllocationMetadataBuilder(
+ cx_->realm()->addressOfMetadataBuilder());
+ writer.newPlainObjectResult(numFixedSlots, numDynamicSlots, allocKind,
+ shape, site);
+ } else {
+ // Use standard call flags when this is an inline Function.prototype.call(),
+ // because GetIndexOfArgument() doesn't yet support |CallFlags::FunCall|.
+ CallFlags flags = flags_;
+ if (flags.getArgFormat() == CallFlags::FunCall) {
+ flags = CallFlags(CallFlags::Standard);
+ }
+
+ // Guard that the argument is an object.
+ ValOperandId argId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_, flags);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("ObjectConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachArrayConstructor() {
+ // Only optimize the |Array()| and |Array(n)| cases (with or without |new|)
+ // for now. Note that self-hosted code calls this without |new| via std_Array.
+ if (argc_ > 1) {
+ return AttachDecision::NoAction;
+ }
+ if (argc_ == 1 && !args_[0].isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ int32_t length = (argc_ == 1) ? args_[0].toInt32() : 0;
+ if (length < 0 || uint32_t(length) > ArrayObject::EagerAllocationMaxLength) {
+ return AttachDecision::NoAction;
+ }
+
+ // We allow inlining this function across realms so make sure the template
+ // object is allocated in that realm. See CanInlineNativeCrossRealm.
+ JSObject* templateObj;
+ {
+ AutoRealm ar(cx_, callee_);
+ templateObj = NewDenseFullyAllocatedArray(cx_, length, TenuredObject);
+ if (!templateObj) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee and newTarget (if constructing) are this Array constructor
+ // function.
+ emitNativeCalleeGuard();
+
+ Int32OperandId lengthId;
+ if (argc_ == 1) {
+ // Use standard call flags when this is an inline Function.prototype.call(),
+ // because GetIndexOfArgument() doesn't yet support |CallFlags::FunCall|.
+ CallFlags flags = flags_;
+ if (flags.getArgFormat() == CallFlags::FunCall) {
+ flags = CallFlags(CallFlags::Standard);
+ }
+
+ ValOperandId arg0Id =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_, flags);
+ lengthId = writer.guardToInt32(arg0Id);
+ } else {
+ MOZ_ASSERT(argc_ == 0);
+ lengthId = writer.loadInt32Constant(0);
+ }
+
+ writer.newArrayFromLengthResult(templateObj, lengthId);
+ writer.returnFromIC();
+
+ trackAttached("ArrayConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayConstructor() {
+ MOZ_ASSERT(flags_.isConstructing());
+
+ if (argc_ == 0 || argc_ > 3) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!isFirstStub()) {
+ // Attach only once to prevent slowdowns for polymorphic calls.
+ return AttachDecision::NoAction;
+ }
+
+ // The first argument must be int32 or a non-proxy object.
+ if (!args_[0].isInt32() && !args_[0].isObject()) {
+ return AttachDecision::NoAction;
+ }
+ if (args_[0].isObject() && args_[0].toObject().is<ProxyObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+#ifdef JS_CODEGEN_X86
+ // Unfortunately NewTypedArrayFromArrayBufferResult needs more registers than
+ // we can easily support on 32-bit x86 for now.
+ if (args_[0].isObject() &&
+ args_[0].toObject().is<ArrayBufferObjectMaybeShared>()) {
+ return AttachDecision::NoAction;
+ }
+#endif
+
+ RootedObject templateObj(cx_);
+ if (!TypedArrayObject::GetTemplateObjectForNative(cx_, callee_->native(),
+ args_, &templateObj)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ if (!templateObj) {
+ // This can happen for large length values.
+ MOZ_ASSERT(args_[0].isInt32());
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee and newTarget are this TypedArray constructor function.
+ emitNativeCalleeGuard();
+
+ ValOperandId arg0Id =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_, flags_);
+
+ if (args_[0].isInt32()) {
+ // From length.
+ Int32OperandId lengthId = writer.guardToInt32(arg0Id);
+ writer.newTypedArrayFromLengthResult(templateObj, lengthId);
+ } else {
+ JSObject* obj = &args_[0].toObject();
+ ObjOperandId objId = writer.guardToObject(arg0Id);
+
+ if (obj->is<ArrayBufferObjectMaybeShared>()) {
+ // From ArrayBuffer.
+ if (obj->is<ArrayBufferObject>()) {
+ writer.guardClass(objId, GuardClassKind::ArrayBuffer);
+ } else {
+ MOZ_ASSERT(obj->is<SharedArrayBufferObject>());
+ writer.guardClass(objId, GuardClassKind::SharedArrayBuffer);
+ }
+ ValOperandId byteOffsetId;
+ if (argc_ > 1) {
+ byteOffsetId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_, flags_);
+ } else {
+ byteOffsetId = writer.loadUndefined();
+ }
+ ValOperandId lengthId;
+ if (argc_ > 2) {
+ lengthId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg2, argc_, flags_);
+ } else {
+ lengthId = writer.loadUndefined();
+ }
+ writer.newTypedArrayFromArrayBufferResult(templateObj, objId,
+ byteOffsetId, lengthId);
+ } else {
+ // From Array-like.
+ writer.guardIsNotArrayBufferMaybeShared(objId);
+ writer.guardIsNotProxy(objId);
+ writer.newTypedArrayFromArrayResult(templateObj, objId);
+ }
+ }
+
+ writer.returnFromIC();
+
+ trackAttached("TypedArrayConstructor");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachSpecializedFunctionBind(
+ Handle<JSObject*> target, Handle<BoundFunctionObject*> templateObj) {
+ // Try to attach a faster stub that's more specialized than what we emit in
+ // tryAttachFunctionBind. This lets us allocate and initialize a bound
+ // function object in Ion without calling into C++.
+ //
+ // We can do this if:
+ //
+ // * The target's prototype is Function.prototype, because that's the proto we
+ // use for the template object.
+ // * All bound arguments can be stored inline.
+ // * The `.name`, `.length`, and `IsConstructor` values match `target`.
+ //
+ // We initialize the template object with the bound function's name, length,
+ // and flags. At runtime we then only have to clone the template object and
+ // initialize the slots for the target, the bound `this` and the bound
+ // arguments.
+
+ if (!isFirstStub()) {
+ return AttachDecision::NoAction;
+ }
+ if (!target->is<JSFunction>() && !target->is<BoundFunctionObject>()) {
+ return AttachDecision::NoAction;
+ }
+ if (target->staticPrototype() != &cx_->global()->getFunctionPrototype()) {
+ return AttachDecision::NoAction;
+ }
+ size_t numBoundArgs = argc_ > 0 ? argc_ - 1 : 0;
+ if (numBoundArgs > BoundFunctionObject::MaxInlineBoundArgs) {
+ return AttachDecision::NoAction;
+ }
+
+ const bool targetIsConstructor = target->isConstructor();
+ Rooted<JSAtom*> targetName(cx_);
+ uint32_t targetLength = 0;
+
+ if (target->is<JSFunction>()) {
+ Rooted<JSFunction*> fun(cx_, &target->as<JSFunction>());
+ if (fun->isNativeFun()) {
+ return AttachDecision::NoAction;
+ }
+ if (fun->hasResolvedLength() || fun->hasResolvedName()) {
+ return AttachDecision::NoAction;
+ }
+ uint16_t len;
+ if (!JSFunction::getUnresolvedLength(cx_, fun, &len)) {
+ cx_->clearPendingException();
+ return AttachDecision::NoAction;
+ }
+ targetName = fun->infallibleGetUnresolvedName(cx_);
+ targetLength = len;
+ } else {
+ BoundFunctionObject* bound = &target->as<BoundFunctionObject>();
+ if (!targetIsConstructor) {
+ // Only support constructors for now. This lets us use
+ // GuardBoundFunctionIsConstructor.
+ return AttachDecision::NoAction;
+ }
+ Shape* initialShape =
+ cx_->global()->maybeBoundFunctionShapeWithDefaultProto();
+ if (bound->shape() != initialShape) {
+ return AttachDecision::NoAction;
+ }
+ Value lenVal = bound->getLengthForInitialShape();
+ Value nameVal = bound->getNameForInitialShape();
+ if (!lenVal.isInt32() || lenVal.toInt32() < 0 || !nameVal.isString() ||
+ !nameVal.toString()->isAtom()) {
+ return AttachDecision::NoAction;
+ }
+ targetName = &nameVal.toString()->asAtom();
+ targetLength = uint32_t(lenVal.toInt32());
+ }
+
+ if (!templateObj->initTemplateSlotsForSpecializedBind(
+ cx_, numBoundArgs, targetIsConstructor, targetLength, targetName)) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ initializeInputOperand();
+ emitNativeCalleeGuard();
+
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId targetId = writer.guardToObject(thisValId);
+
+ // Ensure the JSClass and proto match, and that the `length` and `name`
+ // properties haven't been redefined.
+ writer.guardShape(targetId, target->shape());
+
+ // Emit guards for the `IsConstructor`, `.length`, and `.name` values.
+ if (target->is<JSFunction>()) {
+ // Guard on:
+ // * The BaseScript (because that's what JSFunction uses for the `length`).
+ // Because MGuardFunctionScript doesn't support self-hosted functions yet,
+ // we use GuardSpecificFunction instead in this case.
+ // See assertion in MGuardFunctionScript::getAliasSet.
+ // * The flags slot (for the CONSTRUCTOR, RESOLVED_NAME, RESOLVED_LENGTH,
+ // HAS_INFERRED_NAME, and HAS_GUESSED_ATOM flags).
+ // * The atom slot.
+ JSFunction* fun = &target->as<JSFunction>();
+ if (fun->isSelfHostedBuiltin()) {
+ writer.guardSpecificFunction(targetId, fun);
+ } else {
+ writer.guardFunctionScript(targetId, fun->baseScript());
+ }
+ writer.guardFixedSlotValue(
+ targetId, JSFunction::offsetOfFlagsAndArgCount(),
+ fun->getReservedSlot(JSFunction::FlagsAndArgCountSlot));
+ writer.guardFixedSlotValue(targetId, JSFunction::offsetOfAtom(),
+ fun->getReservedSlot(JSFunction::AtomSlot));
+ } else {
+ BoundFunctionObject* bound = &target->as<BoundFunctionObject>();
+ writer.guardBoundFunctionIsConstructor(targetId);
+ writer.guardFixedSlotValue(targetId,
+ BoundFunctionObject::offsetOfLengthSlot(),
+ bound->getLengthForInitialShape());
+ writer.guardFixedSlotValue(targetId,
+ BoundFunctionObject::offsetOfNameSlot(),
+ bound->getNameForInitialShape());
+ }
+
+ writer.specializedBindFunctionResult(targetId, argc_, templateObj);
+ writer.returnFromIC();
+
+ trackAttached("SpecializedFunctionBind");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachFunctionBind() {
+ // Ensure |this| (the target) is a function object or a bound function object.
+ // We could support other callables too, but note that we rely on the target
+ // having a static prototype in BoundFunctionObject::functionBindImpl.
+ if (!thisval_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+ Rooted<JSObject*> target(cx_, &thisval_.toObject());
+ if (!target->is<JSFunction>() && !target->is<BoundFunctionObject>()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only support standard, non-spread calls.
+ if (flags_.getArgFormat() != CallFlags::Standard) {
+ return AttachDecision::NoAction;
+ }
+
+ // Only optimize if the number of arguments is small. This ensures we don't
+ // compile a lot of different stubs (because we bake in argc) and that we
+ // don't get anywhere near ARGS_LENGTH_MAX.
+ static constexpr size_t MaxArguments = 6;
+ if (argc_ > MaxArguments) {
+ return AttachDecision::NoAction;
+ }
+
+ Rooted<BoundFunctionObject*> templateObj(
+ cx_, BoundFunctionObject::createTemplateObject(cx_));
+ if (!templateObj) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
+ TRY_ATTACH(tryAttachSpecializedFunctionBind(target, templateObj));
+
+ initializeInputOperand();
+
+ emitNativeCalleeGuard();
+
+ // Guard |this| is a function object or a bound function object.
+ ValOperandId thisValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+ ObjOperandId targetId = writer.guardToObject(thisValId);
+ if (target->is<JSFunction>()) {
+ writer.guardClass(targetId, GuardClassKind::JSFunction);
+ } else {
+ MOZ_ASSERT(target->is<BoundFunctionObject>());
+ writer.guardClass(targetId, GuardClassKind::BoundFunction);
+ }
+
+ writer.bindFunctionResult(targetId, argc_, templateObj);
+ writer.returnFromIC();
+
+ trackAttached("FunctionBind");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachFunApply(HandleFunction calleeFunc) {
+ MOZ_ASSERT(calleeFunc->isNativeWithoutJitEntry());
+
+ if (calleeFunc->native() != fun_apply) {
+ return AttachDecision::NoAction;
+ }
+
+ if (argc_ > 2) {
+ return AttachDecision::NoAction;
+ }
+
+ if (!thisval_.isObject() || !thisval_.toObject().is<JSFunction>()) {
+ return AttachDecision::NoAction;
+ }
+ Rooted<JSFunction*> target(cx_, &thisval_.toObject().as<JSFunction>());
+
+ bool isScripted = target->hasJitEntry();
+ MOZ_ASSERT_IF(!isScripted, target->isNativeWithoutJitEntry());
+
+ if (target->isClassConstructor()) {
+ return AttachDecision::NoAction;
+ }
+
+ CallFlags::ArgFormat format = CallFlags::Standard;
+ if (argc_ < 2) {
+ // |fun.apply()| and |fun.apply(thisValue)| are equivalent to |fun.call()|
+ // resp. |fun.call(thisValue)|.
+ format = CallFlags::FunCall;
+ } else if (args_[1].isNullOrUndefined()) {
+ // |fun.apply(thisValue, null)| and |fun.apply(thisValue, undefined)| are
+ // also equivalent to |fun.call(thisValue)|, but we can't use FunCall
+ // because we have to discard the second argument.
+ format = CallFlags::FunApplyNullUndefined;
+ } else if (args_[1].isObject() && args_[1].toObject().is<ArgumentsObject>()) {
+ auto* argsObj = &args_[1].toObject().as<ArgumentsObject>();
+ if (argsObj->hasOverriddenElement() || argsObj->anyArgIsForwarded() ||
+ argsObj->hasOverriddenLength() ||
+ argsObj->initialLength() > JIT_ARGS_LENGTH_MAX) {
+ return AttachDecision::NoAction;
+ }
+ format = CallFlags::FunApplyArgsObj;
+ } else if (args_[1].isObject() && args_[1].toObject().is<ArrayObject>() &&
+ args_[1].toObject().as<ArrayObject>().length() <=
+ JIT_ARGS_LENGTH_MAX &&
+ IsPackedArray(&args_[1].toObject())) {
+ format = CallFlags::FunApplyArray;
+ } else {
+ return AttachDecision::NoAction;
+ }
+
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ CallFlags targetFlags(format);
+ if (mode_ == ICState::Mode::Specialized) {
+ if (cx_->realm() == target->realm()) {
+ targetFlags.setIsSameRealm();
+ }
+ }
+
+ if (mode_ == ICState::Mode::Specialized && !isScripted &&
+ format == CallFlags::FunApplyArray) {
+ HandleValue newTarget = NullHandleValue;
+ HandleValue thisValue = args_[0];
+ Rooted<ArrayObject*> aobj(cx_, &args_[1].toObject().as<ArrayObject>());
+ HandleValueArray args = HandleValueArray::fromMarkedLocation(
+ aobj->length(), aobj->getDenseElements());
+
+ // Check for specific native-function optimizations.
+ InlinableNativeIRGenerator nativeGen(*this, target, newTarget, thisValue,
+ args, targetFlags);
+ TRY_ATTACH(nativeGen.tryAttachStub());
+ }
+
+ // Don't inline when no arguments are passed, cf. |tryAttachFunCall()|.
+ if (mode_ == ICState::Mode::Specialized && !isScripted &&
+ format == CallFlags::FunCall && argc_ > 0) {
+ MOZ_ASSERT(argc_ == 1);
+
+ HandleValue newTarget = NullHandleValue;
+ HandleValue thisValue = args_[0];
+ HandleValueArray args = HandleValueArray::empty();
+
+ // Check for specific native-function optimizations.
+ InlinableNativeIRGenerator nativeGen(*this, target, newTarget, thisValue,
+ args, targetFlags);
+ TRY_ATTACH(nativeGen.tryAttachStub());
+ }
+
+ ObjOperandId thisObjId = emitFunApplyGuard(argcId);
+
+ uint32_t fixedArgc;
+ if (format == CallFlags::FunApplyArray ||
+ format == CallFlags::FunApplyArgsObj ||
+ format == CallFlags::FunApplyNullUndefined) {
+ emitFunApplyArgsGuard(format);
+
+ // We always use MaxUnrolledArgCopy here because the fixed argc is
+ // meaningless in a FunApply case.
+ fixedArgc = MaxUnrolledArgCopy;
+ } else {
+ MOZ_ASSERT(format == CallFlags::FunCall);
+
+ // Whereas for the FunCall case we need to use the actual fixed argc value.
+ fixedArgc = ClampFixedArgc(argc_);
+ }
+
+ if (mode_ == ICState::Mode::Specialized) {
+ // Ensure that |this| is the expected target function.
+ emitCalleeGuard(thisObjId, target);
+
+ if (isScripted) {
+ writer.callScriptedFunction(thisObjId, argcId, targetFlags, fixedArgc);
+ } else {
+ writer.callNativeFunction(thisObjId, argcId, op_, target, targetFlags,
+ fixedArgc);
+ }
+ } else {
+ // Guard that |this| is a function.
+ writer.guardClass(thisObjId, GuardClassKind::JSFunction);
+
+ // Guard that function is not a class constructor.
+ writer.guardNotClassConstructor(thisObjId);
+
+ if (isScripted) {
+ // Guard that function is scripted.
+ writer.guardFunctionHasJitEntry(thisObjId, /*constructing =*/false);
+ writer.callScriptedFunction(thisObjId, argcId, targetFlags, fixedArgc);
+ } else {
+ // Guard that function is native.
+ writer.guardFunctionHasNoJitEntry(thisObjId);
+ writer.callAnyNativeFunction(thisObjId, argcId, targetFlags, fixedArgc);
+ }
+ }
+
+ writer.returnFromIC();
+
+ if (isScripted) {
+ trackAttached("Call.ScriptedFunApply");
+ } else {
+ trackAttached("Call.NativeFunApply");
+ }
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachWasmCall(HandleFunction calleeFunc) {
+ // Try to optimize calls into Wasm code by emitting the CallWasmFunction
+ // CacheIR op. Baseline ICs currently treat this as a CallScriptedFunction op
+ // (calling Wasm's JitEntry stub) but Warp transpiles it to a more direct call
+ // into Wasm code.
+ //
+ // Note: some code refers to these optimized Wasm calls as "inlined" calls.
+
+ MOZ_ASSERT(calleeFunc->isWasmWithJitEntry());
+
+ if (!JitOptions.enableWasmIonFastCalls) {
+ return AttachDecision::NoAction;
+ }
+ if (!isFirstStub_) {
+ return AttachDecision::NoAction;
+ }
+ JSOp op = JSOp(*pc_);
+ if (op != JSOp::Call && op != JSOp::CallContent &&
+ op != JSOp::CallIgnoresRv) {
+ return AttachDecision::NoAction;
+ }
+ if (cx_->realm() != calleeFunc->realm()) {
+ return AttachDecision::NoAction;
+ }
+
+ wasm::Instance& inst = wasm::ExportedFunctionToInstance(calleeFunc);
+ uint32_t funcIndex = inst.code().getFuncIndex(calleeFunc);
+
+ auto bestTier = inst.code().bestTier();
+ const wasm::FuncExport& funcExport =
+ inst.metadata(bestTier).lookupFuncExport(funcIndex);
+ const wasm::FuncType& sig = inst.metadata().getFuncExportType(funcExport);
+
+ MOZ_ASSERT(!IsInsideNursery(inst.object()));
+ MOZ_ASSERT(sig.canHaveJitEntry(), "Function should allow a Wasm JitEntry");
+
+ // If there are too many arguments, don't optimize (we won't be able to store
+ // the arguments in the LIR node).
+ static_assert(wasm::MaxArgsForJitInlineCall <= ArgumentKindArgIndexLimit);
+ if (sig.args().length() > wasm::MaxArgsForJitInlineCall ||
+ argc_ > ArgumentKindArgIndexLimit) {
+ return AttachDecision::NoAction;
+ }
+
+ // If there are too many results, don't optimize as Warp currently doesn't
+ // have code to handle this.
+ if (sig.results().length() > wasm::MaxResultsForJitInlineCall) {
+ return AttachDecision::NoAction;
+ }
+
+ // Bug 1631656 - Don't try to optimize with I64 args on 32-bit platforms
+ // because it is more difficult (because it requires multiple LIR arguments
+ // per I64).
+ //
+ // Bug 1631650 - On 64-bit platforms, we also give up optimizing for I64 args
+ // spilled to the stack because it causes problems with register allocation.
+#ifdef JS_64BIT
+ constexpr bool optimizeWithI64 = true;
+#else
+ constexpr bool optimizeWithI64 = false;
+#endif
+ ABIArgGenerator abi;
+ for (const auto& valType : sig.args()) {
+ MIRType mirType = valType.toMIRType();
+ ABIArg abiArg = abi.next(mirType);
+ if (mirType != MIRType::Int64) {
+ continue;
+ }
+ if (!optimizeWithI64 || abiArg.kind() == ABIArg::Stack) {
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Check that all arguments can be converted to the Wasm type in Warp code
+ // without bailing out.
+ for (size_t i = 0; i < sig.args().length(); i++) {
+ Value argVal = i < argc_ ? args_[i] : UndefinedValue();
+ switch (sig.args()[i].kind()) {
+ case wasm::ValType::I32:
+ case wasm::ValType::F32:
+ case wasm::ValType::F64:
+ if (!argVal.isNumber() && !argVal.isBoolean() &&
+ !argVal.isUndefined()) {
+ return AttachDecision::NoAction;
+ }
+ break;
+ case wasm::ValType::I64:
+ if (!argVal.isBigInt() && !argVal.isBoolean() && !argVal.isString()) {
+ return AttachDecision::NoAction;
+ }
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("Function should not have a Wasm JitEntry");
+ case wasm::ValType::Ref:
+ // canHaveJitEntry restricts args to externref, where all JS values are
+ // valid and can be boxed.
+ MOZ_ASSERT(sig.args()[i].refType().isExtern(),
+ "Unexpected type for Wasm JitEntry");
+ break;
+ }
+ }
+
+ CallFlags flags(/* isConstructing = */ false, /* isSpread = */ false,
+ /* isSameRealm = */ true);
+
+ // Load argc.
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ // Load the callee and ensure it is an object
+ ValOperandId calleeValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Callee, argc_, flags);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+
+ // Ensure the callee is this Wasm function.
+ emitCalleeGuard(calleeObjId, calleeFunc);
+
+ // Guard the argument types.
+ uint32_t guardedArgs = std::min<uint32_t>(sig.args().length(), argc_);
+ for (uint32_t i = 0; i < guardedArgs; i++) {
+ ArgumentKind argKind = ArgumentKindForArgIndex(i);
+ ValOperandId argId = writer.loadArgumentFixedSlot(argKind, argc_, flags);
+ writer.guardWasmArg(argId, sig.args()[i].kind());
+ }
+
+ writer.callWasmFunction(calleeObjId, argcId, flags, ClampFixedArgc(argc_),
+ &funcExport, inst.object());
+ writer.returnFromIC();
+
+ trackAttached("Call.WasmCall");
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachInlinableNative(HandleFunction callee,
+ CallFlags flags) {
+ MOZ_ASSERT(mode_ == ICState::Mode::Specialized);
+ MOZ_ASSERT(callee->isNativeWithoutJitEntry());
+ MOZ_ASSERT(flags.getArgFormat() == CallFlags::Standard ||
+ flags.getArgFormat() == CallFlags::Spread);
+
+ // Special case functions are only optimized for normal calls.
+ if (op_ != JSOp::Call && op_ != JSOp::CallContent && op_ != JSOp::New &&
+ op_ != JSOp::NewContent && op_ != JSOp::CallIgnoresRv &&
+ op_ != JSOp::SpreadCall) {
+ return AttachDecision::NoAction;
+ }
+
+ InlinableNativeIRGenerator nativeGen(*this, callee, newTarget_, thisval_,
+ args_, flags);
+ return nativeGen.tryAttachStub();
+}
+
+#ifdef FUZZING_JS_FUZZILLI
+AttachDecision InlinableNativeIRGenerator::tryAttachFuzzilliHash() {
+ if (argc_ != 1) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Guard callee is the 'fuzzilli_hash' native function.
+ emitNativeCalleeGuard();
+
+ ValOperandId argValId =
+ writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+
+ writer.fuzzilliHashResult(argValId);
+ writer.returnFromIC();
+
+ trackAttached("FuzzilliHash");
+ return AttachDecision::Attach;
+}
+#endif
+
+AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
+ if (!callee_->hasJitInfo() ||
+ callee_->jitInfo()->type() != JSJitInfo::InlinableNative) {
+ return AttachDecision::NoAction;
+ }
+
+ InlinableNative native = callee_->jitInfo()->inlinableNative;
+
+ // Not all natives can be inlined cross-realm.
+ if (cx_->realm() != callee_->realm() && !CanInlineNativeCrossRealm(native)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check for special-cased native constructors.
+ if (flags_.isConstructing()) {
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::Standard);
+
+ // newTarget must match the callee. CacheIR for this is emitted in
+ // emitNativeCalleeGuard.
+ if (ObjectValue(*callee_) != newTarget_) {
+ return AttachDecision::NoAction;
+ }
+ switch (native) {
+ case InlinableNative::Array:
+ return tryAttachArrayConstructor();
+ case InlinableNative::TypedArrayConstructor:
+ return tryAttachTypedArrayConstructor();
+ case InlinableNative::String:
+ return tryAttachStringConstructor();
+ case InlinableNative::Object:
+ return tryAttachObjectConstructor();
+ default:
+ break;
+ }
+ return AttachDecision::NoAction;
+ }
+
+ // Check for special-cased native spread calls.
+ if (flags_.getArgFormat() == CallFlags::Spread ||
+ flags_.getArgFormat() == CallFlags::FunApplyArray) {
+ switch (native) {
+ case InlinableNative::MathMin:
+ return tryAttachSpreadMathMinMax(/*isMax = */ false);
+ case InlinableNative::MathMax:
+ return tryAttachSpreadMathMinMax(/*isMax = */ true);
+ default:
+ break;
+ }
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(flags_.getArgFormat() == CallFlags::Standard ||
+ flags_.getArgFormat() == CallFlags::FunCall);
+
+ // Check for special-cased native functions.
+ switch (native) {
+ // Array natives.
+ case InlinableNative::Array:
+ return tryAttachArrayConstructor();
+ case InlinableNative::ArrayPush:
+ return tryAttachArrayPush();
+ case InlinableNative::ArrayPop:
+ case InlinableNative::ArrayShift:
+ return tryAttachArrayPopShift(native);
+ case InlinableNative::ArrayJoin:
+ return tryAttachArrayJoin();
+ case InlinableNative::ArraySlice:
+ return tryAttachArraySlice();
+ case InlinableNative::ArrayIsArray:
+ return tryAttachArrayIsArray();
+
+ // DataView natives.
+ case InlinableNative::DataViewGetInt8:
+ return tryAttachDataViewGet(Scalar::Int8);
+ case InlinableNative::DataViewGetUint8:
+ return tryAttachDataViewGet(Scalar::Uint8);
+ case InlinableNative::DataViewGetInt16:
+ return tryAttachDataViewGet(Scalar::Int16);
+ case InlinableNative::DataViewGetUint16:
+ return tryAttachDataViewGet(Scalar::Uint16);
+ case InlinableNative::DataViewGetInt32:
+ return tryAttachDataViewGet(Scalar::Int32);
+ case InlinableNative::DataViewGetUint32:
+ return tryAttachDataViewGet(Scalar::Uint32);
+ case InlinableNative::DataViewGetFloat32:
+ return tryAttachDataViewGet(Scalar::Float32);
+ case InlinableNative::DataViewGetFloat64:
+ return tryAttachDataViewGet(Scalar::Float64);
+ case InlinableNative::DataViewGetBigInt64:
+ return tryAttachDataViewGet(Scalar::BigInt64);
+ case InlinableNative::DataViewGetBigUint64:
+ return tryAttachDataViewGet(Scalar::BigUint64);
+ case InlinableNative::DataViewSetInt8:
+ return tryAttachDataViewSet(Scalar::Int8);
+ case InlinableNative::DataViewSetUint8:
+ return tryAttachDataViewSet(Scalar::Uint8);
+ case InlinableNative::DataViewSetInt16:
+ return tryAttachDataViewSet(Scalar::Int16);
+ case InlinableNative::DataViewSetUint16:
+ return tryAttachDataViewSet(Scalar::Uint16);
+ case InlinableNative::DataViewSetInt32:
+ return tryAttachDataViewSet(Scalar::Int32);
+ case InlinableNative::DataViewSetUint32:
+ return tryAttachDataViewSet(Scalar::Uint32);
+ case InlinableNative::DataViewSetFloat32:
+ return tryAttachDataViewSet(Scalar::Float32);
+ case InlinableNative::DataViewSetFloat64:
+ return tryAttachDataViewSet(Scalar::Float64);
+ case InlinableNative::DataViewSetBigInt64:
+ return tryAttachDataViewSet(Scalar::BigInt64);
+ case InlinableNative::DataViewSetBigUint64:
+ return tryAttachDataViewSet(Scalar::BigUint64);
+
+ // Function natives.
+ case InlinableNative::FunctionBind:
+ return tryAttachFunctionBind();
+
+ // Intl natives.
+ case InlinableNative::IntlGuardToCollator:
+ case InlinableNative::IntlGuardToDateTimeFormat:
+ case InlinableNative::IntlGuardToDisplayNames:
+ case InlinableNative::IntlGuardToListFormat:
+ case InlinableNative::IntlGuardToNumberFormat:
+ case InlinableNative::IntlGuardToPluralRules:
+ case InlinableNative::IntlGuardToRelativeTimeFormat:
+ return tryAttachGuardToClass(native);
+
+ // Slot intrinsics.
+ case InlinableNative::IntrinsicUnsafeGetReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetObjectFromReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetInt32FromReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetStringFromReservedSlot:
+ return tryAttachUnsafeGetReservedSlot(native);
+ case InlinableNative::IntrinsicUnsafeSetReservedSlot:
+ return tryAttachUnsafeSetReservedSlot();
+
+ // Intrinsics.
+ case InlinableNative::IntrinsicIsSuspendedGenerator:
+ return tryAttachIsSuspendedGenerator();
+ case InlinableNative::IntrinsicToObject:
+ return tryAttachToObject();
+ case InlinableNative::IntrinsicToInteger:
+ return tryAttachToInteger();
+ case InlinableNative::IntrinsicToLength:
+ return tryAttachToLength();
+ case InlinableNative::IntrinsicIsObject:
+ return tryAttachIsObject();
+ case InlinableNative::IntrinsicIsPackedArray:
+ return tryAttachIsPackedArray();
+ case InlinableNative::IntrinsicIsCallable:
+ return tryAttachIsCallable();
+ case InlinableNative::IntrinsicIsConstructor:
+ return tryAttachIsConstructor();
+ case InlinableNative::IntrinsicIsCrossRealmArrayConstructor:
+ return tryAttachIsCrossRealmArrayConstructor();
+ case InlinableNative::IntrinsicGuardToArrayIterator:
+ case InlinableNative::IntrinsicGuardToMapIterator:
+ case InlinableNative::IntrinsicGuardToSetIterator:
+ case InlinableNative::IntrinsicGuardToStringIterator:
+ case InlinableNative::IntrinsicGuardToRegExpStringIterator:
+ case InlinableNative::IntrinsicGuardToWrapForValidIterator:
+ case InlinableNative::IntrinsicGuardToIteratorHelper:
+ case InlinableNative::IntrinsicGuardToAsyncIteratorHelper:
+ return tryAttachGuardToClass(native);
+ case InlinableNative::IntrinsicSubstringKernel:
+ return tryAttachSubstringKernel();
+ case InlinableNative::IntrinsicIsConstructing:
+ return tryAttachIsConstructing();
+ case InlinableNative::IntrinsicNewArrayIterator:
+ return tryAttachNewArrayIterator();
+ case InlinableNative::IntrinsicNewStringIterator:
+ return tryAttachNewStringIterator();
+ case InlinableNative::IntrinsicNewRegExpStringIterator:
+ return tryAttachNewRegExpStringIterator();
+ case InlinableNative::IntrinsicArrayIteratorPrototypeOptimizable:
+ return tryAttachArrayIteratorPrototypeOptimizable();
+ case InlinableNative::IntrinsicObjectHasPrototype:
+ return tryAttachObjectHasPrototype();
+
+ // RegExp natives.
+ case InlinableNative::IsRegExpObject:
+ return tryAttachHasClass(&RegExpObject::class_,
+ /* isPossiblyWrapped = */ false);
+ case InlinableNative::IsPossiblyWrappedRegExpObject:
+ return tryAttachHasClass(&RegExpObject::class_,
+ /* isPossiblyWrapped = */ true);
+ case InlinableNative::RegExpMatcher:
+ case InlinableNative::RegExpSearcher:
+ return tryAttachRegExpMatcherSearcher(native);
+ case InlinableNative::RegExpPrototypeOptimizable:
+ return tryAttachRegExpPrototypeOptimizable();
+ case InlinableNative::RegExpInstanceOptimizable:
+ return tryAttachRegExpInstanceOptimizable();
+ case InlinableNative::GetFirstDollarIndex:
+ return tryAttachGetFirstDollarIndex();
+ case InlinableNative::IntrinsicRegExpBuiltinExec:
+ case InlinableNative::IntrinsicRegExpBuiltinExecForTest:
+ return tryAttachIntrinsicRegExpBuiltinExec(native);
+ case InlinableNative::IntrinsicRegExpExec:
+ case InlinableNative::IntrinsicRegExpExecForTest:
+ return tryAttachIntrinsicRegExpExec(native);
+
+ // String natives.
+ case InlinableNative::String:
+ return tryAttachString();
+ case InlinableNative::StringToString:
+ case InlinableNative::StringValueOf:
+ return tryAttachStringToStringValueOf();
+ case InlinableNative::StringCharCodeAt:
+ return tryAttachStringCharCodeAt();
+ case InlinableNative::StringCharAt:
+ return tryAttachStringCharAt();
+ case InlinableNative::StringFromCharCode:
+ return tryAttachStringFromCharCode();
+ case InlinableNative::StringFromCodePoint:
+ return tryAttachStringFromCodePoint();
+ case InlinableNative::StringIndexOf:
+ return tryAttachStringIndexOf();
+ case InlinableNative::StringStartsWith:
+ return tryAttachStringStartsWith();
+ case InlinableNative::StringEndsWith:
+ return tryAttachStringEndsWith();
+ case InlinableNative::StringToLowerCase:
+ return tryAttachStringToLowerCase();
+ case InlinableNative::StringToUpperCase:
+ return tryAttachStringToUpperCase();
+ case InlinableNative::IntrinsicStringReplaceString:
+ return tryAttachStringReplaceString();
+ case InlinableNative::IntrinsicStringSplitString:
+ return tryAttachStringSplitString();
+
+ // Math natives.
+ case InlinableNative::MathRandom:
+ return tryAttachMathRandom();
+ case InlinableNative::MathAbs:
+ return tryAttachMathAbs();
+ case InlinableNative::MathClz32:
+ return tryAttachMathClz32();
+ case InlinableNative::MathSign:
+ return tryAttachMathSign();
+ case InlinableNative::MathImul:
+ return tryAttachMathImul();
+ case InlinableNative::MathFloor:
+ return tryAttachMathFloor();
+ case InlinableNative::MathCeil:
+ return tryAttachMathCeil();
+ case InlinableNative::MathTrunc:
+ return tryAttachMathTrunc();
+ case InlinableNative::MathRound:
+ return tryAttachMathRound();
+ case InlinableNative::MathSqrt:
+ return tryAttachMathSqrt();
+ case InlinableNative::MathFRound:
+ return tryAttachMathFRound();
+ case InlinableNative::MathHypot:
+ return tryAttachMathHypot();
+ case InlinableNative::MathATan2:
+ return tryAttachMathATan2();
+ case InlinableNative::MathSin:
+ return tryAttachMathFunction(UnaryMathFunction::SinNative);
+ case InlinableNative::MathTan:
+ return tryAttachMathFunction(UnaryMathFunction::TanNative);
+ case InlinableNative::MathCos:
+ return tryAttachMathFunction(UnaryMathFunction::CosNative);
+ case InlinableNative::MathExp:
+ return tryAttachMathFunction(UnaryMathFunction::Exp);
+ case InlinableNative::MathLog:
+ return tryAttachMathFunction(UnaryMathFunction::Log);
+ case InlinableNative::MathASin:
+ return tryAttachMathFunction(UnaryMathFunction::ASin);
+ case InlinableNative::MathATan:
+ return tryAttachMathFunction(UnaryMathFunction::ATan);
+ case InlinableNative::MathACos:
+ return tryAttachMathFunction(UnaryMathFunction::ACos);
+ case InlinableNative::MathLog10:
+ return tryAttachMathFunction(UnaryMathFunction::Log10);
+ case InlinableNative::MathLog2:
+ return tryAttachMathFunction(UnaryMathFunction::Log2);
+ case InlinableNative::MathLog1P:
+ return tryAttachMathFunction(UnaryMathFunction::Log1P);
+ case InlinableNative::MathExpM1:
+ return tryAttachMathFunction(UnaryMathFunction::ExpM1);
+ case InlinableNative::MathCosH:
+ return tryAttachMathFunction(UnaryMathFunction::CosH);
+ case InlinableNative::MathSinH:
+ return tryAttachMathFunction(UnaryMathFunction::SinH);
+ case InlinableNative::MathTanH:
+ return tryAttachMathFunction(UnaryMathFunction::TanH);
+ case InlinableNative::MathACosH:
+ return tryAttachMathFunction(UnaryMathFunction::ACosH);
+ case InlinableNative::MathASinH:
+ return tryAttachMathFunction(UnaryMathFunction::ASinH);
+ case InlinableNative::MathATanH:
+ return tryAttachMathFunction(UnaryMathFunction::ATanH);
+ case InlinableNative::MathCbrt:
+ return tryAttachMathFunction(UnaryMathFunction::Cbrt);
+ case InlinableNative::MathPow:
+ return tryAttachMathPow();
+ case InlinableNative::MathMin:
+ return tryAttachMathMinMax(/* isMax = */ false);
+ case InlinableNative::MathMax:
+ return tryAttachMathMinMax(/* isMax = */ true);
+
+ // Map intrinsics.
+ case InlinableNative::IntrinsicGuardToMapObject:
+ return tryAttachGuardToClass(native);
+ case InlinableNative::IntrinsicGetNextMapEntryForIterator:
+ return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ true);
+
+ // Number natives.
+ case InlinableNative::Number:
+ return tryAttachNumber();
+ case InlinableNative::NumberParseInt:
+ return tryAttachNumberParseInt();
+ case InlinableNative::NumberToString:
+ return tryAttachNumberToString();
+
+ // Object natives.
+ case InlinableNative::Object:
+ return tryAttachObjectConstructor();
+ case InlinableNative::ObjectCreate:
+ return tryAttachObjectCreate();
+ case InlinableNative::ObjectIs:
+ return tryAttachObjectIs();
+ case InlinableNative::ObjectIsPrototypeOf:
+ return tryAttachObjectIsPrototypeOf();
+ case InlinableNative::ObjectToString:
+ return tryAttachObjectToString();
+
+ // Set intrinsics.
+ case InlinableNative::IntrinsicGuardToSetObject:
+ return tryAttachGuardToClass(native);
+ case InlinableNative::IntrinsicGetNextSetEntryForIterator:
+ return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ false);
+
+ // ArrayBuffer intrinsics.
+ case InlinableNative::IntrinsicGuardToArrayBuffer:
+ return tryAttachGuardToClass(native);
+ case InlinableNative::IntrinsicArrayBufferByteLength:
+ return tryAttachArrayBufferByteLength(/* isPossiblyWrapped = */ false);
+ case InlinableNative::IntrinsicPossiblyWrappedArrayBufferByteLength:
+ return tryAttachArrayBufferByteLength(/* isPossiblyWrapped = */ true);
+
+ // SharedArrayBuffer intrinsics.
+ case InlinableNative::IntrinsicGuardToSharedArrayBuffer:
+ return tryAttachGuardToClass(native);
+
+ // TypedArray intrinsics.
+ case InlinableNative::TypedArrayConstructor:
+ return AttachDecision::NoAction; // Not callable.
+ case InlinableNative::IntrinsicIsTypedArray:
+ return tryAttachIsTypedArray(/* isPossiblyWrapped = */ false);
+ case InlinableNative::IntrinsicIsPossiblyWrappedTypedArray:
+ return tryAttachIsTypedArray(/* isPossiblyWrapped = */ true);
+ case InlinableNative::IntrinsicIsTypedArrayConstructor:
+ return tryAttachIsTypedArrayConstructor();
+ case InlinableNative::IntrinsicTypedArrayByteOffset:
+ return tryAttachTypedArrayByteOffset();
+ case InlinableNative::IntrinsicTypedArrayElementSize:
+ return tryAttachTypedArrayElementSize();
+ case InlinableNative::IntrinsicTypedArrayLength:
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false);
+ case InlinableNative::IntrinsicPossiblyWrappedTypedArrayLength:
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ true);
+
+ // Reflect natives.
+ case InlinableNative::ReflectGetPrototypeOf:
+ return tryAttachReflectGetPrototypeOf();
+
+ // Atomics intrinsics:
+ case InlinableNative::AtomicsCompareExchange:
+ return tryAttachAtomicsCompareExchange();
+ case InlinableNative::AtomicsExchange:
+ return tryAttachAtomicsExchange();
+ case InlinableNative::AtomicsAdd:
+ return tryAttachAtomicsAdd();
+ case InlinableNative::AtomicsSub:
+ return tryAttachAtomicsSub();
+ case InlinableNative::AtomicsAnd:
+ return tryAttachAtomicsAnd();
+ case InlinableNative::AtomicsOr:
+ return tryAttachAtomicsOr();
+ case InlinableNative::AtomicsXor:
+ return tryAttachAtomicsXor();
+ case InlinableNative::AtomicsLoad:
+ return tryAttachAtomicsLoad();
+ case InlinableNative::AtomicsStore:
+ return tryAttachAtomicsStore();
+ case InlinableNative::AtomicsIsLockFree:
+ return tryAttachAtomicsIsLockFree();
+
+ // BigInt natives.
+ case InlinableNative::BigIntAsIntN:
+ return tryAttachBigIntAsIntN();
+ case InlinableNative::BigIntAsUintN:
+ return tryAttachBigIntAsUintN();
+
+ // Boolean natives.
+ case InlinableNative::Boolean:
+ return tryAttachBoolean();
+
+ // Set natives.
+ case InlinableNative::SetHas:
+ return tryAttachSetHas();
+
+ // Map natives.
+ case InlinableNative::MapHas:
+ return tryAttachMapHas();
+ case InlinableNative::MapGet:
+ return tryAttachMapGet();
+
+ // Testing functions.
+ case InlinableNative::TestBailout:
+ if (js::SupportDifferentialTesting()) {
+ return AttachDecision::NoAction;
+ }
+ return tryAttachBailout();
+ case InlinableNative::TestAssertFloat32:
+ return tryAttachAssertFloat32();
+ case InlinableNative::TestAssertRecoveredOnBailout:
+ if (js::SupportDifferentialTesting()) {
+ return AttachDecision::NoAction;
+ }
+ return tryAttachAssertRecoveredOnBailout();
+
+#ifdef FUZZING_JS_FUZZILLI
+ // Fuzzilli function
+ case InlinableNative::FuzzilliHash:
+ return tryAttachFuzzilliHash();
+#endif
+
+ case InlinableNative::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Shouldn't get here");
+}
+
+// Remember the shape of the this object for any script being called as a
+// constructor, for later use during Ion compilation.
+ScriptedThisResult CallIRGenerator::getThisShapeForScripted(
+ HandleFunction calleeFunc, Handle<JSObject*> newTarget,
+ MutableHandle<Shape*> result) {
+ // Some constructors allocate their own |this| object.
+ if (calleeFunc->constructorNeedsUninitializedThis()) {
+ return ScriptedThisResult::UninitializedThis;
+ }
+
+ // Only attach a stub if the newTarget is a function with a
+ // nonconfigurable prototype.
+ if (!newTarget->is<JSFunction>() ||
+ !newTarget->as<JSFunction>().hasNonConfigurablePrototypeDataProperty()) {
+ return ScriptedThisResult::NoAction;
+ }
+
+ AutoRealm ar(cx_, calleeFunc);
+ Shape* thisShape = ThisShapeForFunction(cx_, calleeFunc, newTarget);
+ if (!thisShape) {
+ cx_->clearPendingException();
+ return ScriptedThisResult::NoAction;
+ }
+
+ MOZ_ASSERT(thisShape->realm() == calleeFunc->realm());
+ result.set(thisShape);
+ return ScriptedThisResult::PlainObjectShape;
+}
+
+static bool CanOptimizeScriptedCall(JSFunction* callee, bool isConstructing) {
+ if (!callee->hasJitEntry()) {
+ return false;
+ }
+
+ // If callee is not an interpreted constructor, we have to throw.
+ if (isConstructing && !callee->isConstructor()) {
+ return false;
+ }
+
+ // Likewise, if the callee is a class constructor, we have to throw.
+ if (!isConstructing && callee->isClassConstructor()) {
+ return false;
+ }
+
+ return true;
+}
+
+void CallIRGenerator::emitCallScriptedGuards(ObjOperandId calleeObjId,
+ JSFunction* calleeFunc,
+ Int32OperandId argcId,
+ CallFlags flags, Shape* thisShape,
+ bool isBoundFunction) {
+ bool isConstructing = flags.isConstructing();
+
+ if (mode_ == ICState::Mode::Specialized) {
+ MOZ_ASSERT_IF(isConstructing, thisShape || flags.needsUninitializedThis());
+
+ // Ensure callee matches this stub's callee
+ emitCalleeGuard(calleeObjId, calleeFunc);
+ if (thisShape) {
+ // Emit guards to ensure the newTarget's .prototype property is what we
+ // expect. Note that getThisForScripted checked newTarget is a function
+ // with a non-configurable .prototype data property.
+
+ JSFunction* newTarget;
+ ObjOperandId newTargetObjId;
+ if (isBoundFunction) {
+ newTarget = calleeFunc;
+ newTargetObjId = calleeObjId;
+ } else {
+ newTarget = &newTarget_.toObject().as<JSFunction>();
+ ValOperandId newTargetValId = writer.loadArgumentDynamicSlot(
+ ArgumentKind::NewTarget, argcId, flags);
+ newTargetObjId = writer.guardToObject(newTargetValId);
+ }
+
+ Maybe<PropertyInfo> prop = newTarget->lookupPure(cx_->names().prototype);
+ MOZ_ASSERT(prop.isSome());
+ uint32_t slot = prop->slot();
+ MOZ_ASSERT(slot >= newTarget->numFixedSlots(),
+ "Stub code relies on this");
+
+ writer.guardShape(newTargetObjId, newTarget->shape());
+
+ const Value& value = newTarget->getSlot(slot);
+ if (value.isObject()) {
+ JSObject* prototypeObject = &value.toObject();
+
+ ObjOperandId protoId = writer.loadObject(prototypeObject);
+ writer.guardDynamicSlotIsSpecificObject(
+ newTargetObjId, protoId, slot - newTarget->numFixedSlots());
+ } else {
+ writer.guardDynamicSlotIsNotObject(newTargetObjId,
+ slot - newTarget->numFixedSlots());
+ }
+
+ // Call metaScriptedThisShape before emitting the call, so that Warp can
+ // use the shape to create the |this| object before transpiling the call.
+ writer.metaScriptedThisShape(thisShape);
+ }
+ } else {
+ // Guard that object is a scripted function
+ writer.guardClass(calleeObjId, GuardClassKind::JSFunction);
+ writer.guardFunctionHasJitEntry(calleeObjId, isConstructing);
+
+ if (isConstructing) {
+ // If callee is not a constructor, we have to throw.
+ writer.guardFunctionIsConstructor(calleeObjId);
+ } else {
+ // If callee is a class constructor, we have to throw.
+ writer.guardNotClassConstructor(calleeObjId);
+ }
+ }
+}
+
+AttachDecision CallIRGenerator::tryAttachCallScripted(
+ HandleFunction calleeFunc) {
+ MOZ_ASSERT(calleeFunc->hasJitEntry());
+
+ if (calleeFunc->isWasmWithJitEntry()) {
+ TRY_ATTACH(tryAttachWasmCall(calleeFunc));
+ }
+
+ bool isSpecialized = mode_ == ICState::Mode::Specialized;
+
+ bool isConstructing = IsConstructPC(pc_);
+ bool isSpread = IsSpreadPC(pc_);
+ bool isSameRealm = isSpecialized && cx_->realm() == calleeFunc->realm();
+ CallFlags flags(isConstructing, isSpread, isSameRealm);
+
+ if (!CanOptimizeScriptedCall(calleeFunc, isConstructing)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (isConstructing && !calleeFunc->hasJitScript()) {
+ // If we're constructing, require the callee to have a JitScript. This isn't
+ // required for correctness but avoids allocating a template object below
+ // for constructors that aren't hot. See bug 1419758.
+ return AttachDecision::TemporarilyUnoptimizable;
+ }
+
+ // Verify that spread calls have a reasonable number of arguments.
+ if (isSpread && args_.length() > JIT_ARGS_LENGTH_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ Rooted<Shape*> thisShape(cx_);
+ if (isConstructing && isSpecialized) {
+ Rooted<JSObject*> newTarget(cx_, &newTarget_.toObject());
+ switch (getThisShapeForScripted(calleeFunc, newTarget, &thisShape)) {
+ case ScriptedThisResult::PlainObjectShape:
+ break;
+ case ScriptedThisResult::UninitializedThis:
+ flags.setNeedsUninitializedThis();
+ break;
+ case ScriptedThisResult::NoAction:
+ return AttachDecision::NoAction;
+ }
+ }
+
+ // Load argc.
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ // Load the callee and ensure it is an object
+ ValOperandId calleeValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId, flags);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+
+ emitCallScriptedGuards(calleeObjId, calleeFunc, argcId, flags, thisShape,
+ /* isBoundFunction = */ false);
+
+ writer.callScriptedFunction(calleeObjId, argcId, flags,
+ ClampFixedArgc(argc_));
+ writer.returnFromIC();
+
+ if (isSpecialized) {
+ trackAttached("Call.CallScripted");
+ } else {
+ trackAttached("Call.CallAnyScripted");
+ }
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachCallNative(HandleFunction calleeFunc) {
+ MOZ_ASSERT(calleeFunc->isNativeWithoutJitEntry());
+
+ bool isSpecialized = mode_ == ICState::Mode::Specialized;
+
+ bool isSpread = IsSpreadPC(pc_);
+ bool isSameRealm = isSpecialized && cx_->realm() == calleeFunc->realm();
+ bool isConstructing = IsConstructPC(pc_);
+ CallFlags flags(isConstructing, isSpread, isSameRealm);
+
+ if (isConstructing && !calleeFunc->isConstructor()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Verify that spread calls have a reasonable number of arguments.
+ if (isSpread && args_.length() > JIT_ARGS_LENGTH_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check for specific native-function optimizations.
+ if (isSpecialized) {
+ TRY_ATTACH(tryAttachInlinableNative(calleeFunc, flags));
+ }
+
+ // Load argc.
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ // Load the callee and ensure it is an object
+ ValOperandId calleeValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId, flags);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+
+ // DOM calls need an additional guard so only try optimizing the first stub.
+ // Can only optimize normal (non-spread) calls.
+ if (isFirstStub_ && !isSpread && thisval_.isObject() &&
+ CanAttachDOMCall(cx_, JSJitInfo::Method, &thisval_.toObject(), calleeFunc,
+ mode_)) {
+ MOZ_ASSERT(!isConstructing, "DOM functions are not constructors");
+
+ // Guard that |this| is an object.
+ ValOperandId thisValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::This, argcId, flags);
+ ObjOperandId thisObjId = writer.guardToObject(thisValId);
+
+ // Guard on the |this| shape to make sure it's the right instance. This also
+ // ensures DOM_OBJECT_SLOT is stored in a fixed slot. See CanAttachDOMCall.
+ writer.guardShape(thisObjId, thisval_.toObject().shape());
+
+ // Ensure callee matches this stub's callee
+ writer.guardSpecificFunction(calleeObjId, calleeFunc);
+ writer.callDOMFunction(calleeObjId, argcId, thisObjId, calleeFunc, flags,
+ ClampFixedArgc(argc_));
+
+ trackAttached("Call.CallDOM");
+ } else if (isSpecialized) {
+ // Ensure callee matches this stub's callee
+ writer.guardSpecificFunction(calleeObjId, calleeFunc);
+ writer.callNativeFunction(calleeObjId, argcId, op_, calleeFunc, flags,
+ ClampFixedArgc(argc_));
+
+ trackAttached("Call.CallNative");
+ } else {
+ // Guard that object is a native function
+ writer.guardClass(calleeObjId, GuardClassKind::JSFunction);
+ writer.guardFunctionHasNoJitEntry(calleeObjId);
+
+ if (isConstructing) {
+ // If callee is not a constructor, we have to throw.
+ writer.guardFunctionIsConstructor(calleeObjId);
+ } else {
+ // If callee is a class constructor, we have to throw.
+ writer.guardNotClassConstructor(calleeObjId);
+ }
+ writer.callAnyNativeFunction(calleeObjId, argcId, flags,
+ ClampFixedArgc(argc_));
+
+ trackAttached("Call.CallAnyNative");
+ }
+
+ writer.returnFromIC();
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachCallHook(HandleObject calleeObj) {
+ if (mode_ != ICState::Mode::Specialized) {
+ // We do not have megamorphic call hook stubs.
+ // TODO: Should we attach specialized call hook stubs in
+ // megamorphic mode to avoid going generic?
+ return AttachDecision::NoAction;
+ }
+
+ bool isSpread = IsSpreadPC(pc_);
+ bool isConstructing = IsConstructPC(pc_);
+ CallFlags flags(isConstructing, isSpread);
+ JSNative hook =
+ isConstructing ? calleeObj->constructHook() : calleeObj->callHook();
+ if (!hook) {
+ return AttachDecision::NoAction;
+ }
+
+ // Bound functions have a JSClass construct hook but are not always
+ // constructors.
+ if (isConstructing && !calleeObj->isConstructor()) {
+ return AttachDecision::NoAction;
+ }
+
+ // We don't support spread calls in the transpiler yet.
+ if (isSpread) {
+ return AttachDecision::NoAction;
+ }
+
+ // Load argc.
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ // Load the callee and ensure it is an object
+ ValOperandId calleeValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId, flags);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+
+ // Ensure the callee's class matches the one in this stub.
+ writer.guardAnyClass(calleeObjId, calleeObj->getClass());
+
+ if (isConstructing && calleeObj->is<BoundFunctionObject>()) {
+ writer.guardBoundFunctionIsConstructor(calleeObjId);
+ }
+
+ writer.callClassHook(calleeObjId, argcId, hook, flags, ClampFixedArgc(argc_));
+ writer.returnFromIC();
+
+ trackAttached("Call.CallHook");
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachBoundFunction(
+ Handle<BoundFunctionObject*> calleeObj) {
+ // The target must be a JSFunction with a JitEntry.
+ if (!calleeObj->getTarget()->is<JSFunction>()) {
+ return AttachDecision::NoAction;
+ }
+
+ bool isSpread = IsSpreadPC(pc_);
+ bool isConstructing = IsConstructPC(pc_);
+
+ // Spread calls are not supported yet.
+ if (isSpread) {
+ return AttachDecision::NoAction;
+ }
+
+ Rooted<JSFunction*> target(cx_, &calleeObj->getTarget()->as<JSFunction>());
+ if (!CanOptimizeScriptedCall(target, isConstructing)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Limit the number of bound arguments to prevent us from compiling many
+ // different stubs (we bake in numBoundArgs and it's usually very small).
+ static constexpr size_t MaxBoundArgs = 10;
+ size_t numBoundArgs = calleeObj->numBoundArgs();
+ if (numBoundArgs > MaxBoundArgs) {
+ return AttachDecision::NoAction;
+ }
+
+ // Ensure we don't exceed JIT_ARGS_LENGTH_MAX.
+ if (numBoundArgs + argc_ > JIT_ARGS_LENGTH_MAX) {
+ return AttachDecision::NoAction;
+ }
+
+ CallFlags flags(isConstructing, isSpread);
+
+ if (mode_ == ICState::Mode::Specialized) {
+ if (cx_->realm() == target->realm()) {
+ flags.setIsSameRealm();
+ }
+ }
+
+ Rooted<Shape*> thisShape(cx_);
+ if (isConstructing) {
+ // Only optimize if newTarget == callee. This is the common case and ensures
+ // we can always pass the bound function's target as newTarget.
+ if (newTarget_ != ObjectValue(*calleeObj)) {
+ return AttachDecision::NoAction;
+ }
+
+ if (mode_ == ICState::Mode::Specialized) {
+ Handle<JSFunction*> newTarget = target;
+ switch (getThisShapeForScripted(target, newTarget, &thisShape)) {
+ case ScriptedThisResult::PlainObjectShape:
+ break;
+ case ScriptedThisResult::UninitializedThis:
+ flags.setNeedsUninitializedThis();
+ break;
+ case ScriptedThisResult::NoAction:
+ return AttachDecision::NoAction;
+ }
+ }
+ }
+
+ // Load argc.
+ Int32OperandId argcId(writer.setInputOperandId(0));
+
+ // Load the callee and ensure it's a bound function.
+ ValOperandId calleeValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId, flags);
+ ObjOperandId calleeObjId = writer.guardToObject(calleeValId);
+ writer.guardClass(calleeObjId, GuardClassKind::BoundFunction);
+
+ // Ensure numBoundArgs matches.
+ Int32OperandId numBoundArgsId = writer.loadBoundFunctionNumArgs(calleeObjId);
+ writer.guardSpecificInt32(numBoundArgsId, numBoundArgs);
+
+ if (isConstructing) {
+ // Guard newTarget == callee. We depend on this in CallBoundScriptedFunction
+ // and in emitCallScriptedGuards by using boundTarget as newTarget.
+ ValOperandId newTargetValId =
+ writer.loadArgumentDynamicSlot(ArgumentKind::NewTarget, argcId, flags);
+ ObjOperandId newTargetObjId = writer.guardToObject(newTargetValId);
+ writer.guardObjectIdentity(newTargetObjId, calleeObjId);
+ }
+
+ ObjOperandId targetId = writer.loadBoundFunctionTarget(calleeObjId);
+
+ emitCallScriptedGuards(targetId, target, argcId, flags, thisShape,
+ /* isBoundFunction = */ true);
+
+ writer.callBoundScriptedFunction(calleeObjId, targetId, argcId, flags,
+ numBoundArgs);
+ writer.returnFromIC();
+
+ trackAttached("Call.BoundFunction");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CallIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ // Some opcodes are not yet supported.
+ switch (op_) {
+ case JSOp::Call:
+ case JSOp::CallContent:
+ case JSOp::CallIgnoresRv:
+ case JSOp::CallIter:
+ case JSOp::CallContentIter:
+ case JSOp::SpreadCall:
+ case JSOp::New:
+ case JSOp::NewContent:
+ case JSOp::SpreadNew:
+ case JSOp::SuperCall:
+ case JSOp::SpreadSuperCall:
+ break;
+ default:
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(mode_ != ICState::Mode::Generic);
+
+ // Ensure callee is a function.
+ if (!callee_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ RootedObject calleeObj(cx_, &callee_.toObject());
+ if (calleeObj->is<BoundFunctionObject>()) {
+ TRY_ATTACH(tryAttachBoundFunction(calleeObj.as<BoundFunctionObject>()));
+ }
+ if (!calleeObj->is<JSFunction>()) {
+ return tryAttachCallHook(calleeObj);
+ }
+
+ HandleFunction calleeFunc = calleeObj.as<JSFunction>();
+
+ // Check for scripted optimizations.
+ if (calleeFunc->hasJitEntry()) {
+ return tryAttachCallScripted(calleeFunc);
+ }
+
+ // Check for native-function optimizations.
+ MOZ_ASSERT(calleeFunc->isNativeWithoutJitEntry());
+
+ // Try inlining Function.prototype.{call,apply}. We don't use the
+ // InlinableNative mechanism for this because we want to optimize these more
+ // aggressively than other natives.
+ if (op_ == JSOp::Call || op_ == JSOp::CallContent ||
+ op_ == JSOp::CallIgnoresRv) {
+ TRY_ATTACH(tryAttachFunCall(calleeFunc));
+ TRY_ATTACH(tryAttachFunApply(calleeFunc));
+ }
+
+ return tryAttachCallNative(calleeFunc);
+}
+
+void CallIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("callee", callee_);
+ sp.valueProperty("thisval", thisval_);
+ sp.valueProperty("argc", Int32Value(argc_));
+
+ // Try to log the first two arguments.
+ if (args_.length() >= 1) {
+ sp.valueProperty("arg0", args_[0]);
+ }
+ if (args_.length() >= 2) {
+ sp.valueProperty("arg1", args_[1]);
+ }
+ }
+#endif
+}
+
+// Class which holds a shape pointer for use when caches might reference data in
+// other zones.
+static const JSClass shapeContainerClass = {"ShapeContainer",
+ JSCLASS_HAS_RESERVED_SLOTS(1)};
+
+static const size_t SHAPE_CONTAINER_SLOT = 0;
+
+static JSObject* NewWrapperWithObjectShape(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ MOZ_ASSERT(cx->compartment() != obj->compartment());
+
+ RootedObject wrapper(cx);
+ {
+ AutoRealm ar(cx, obj);
+ wrapper = NewBuiltinClassInstance(cx, &shapeContainerClass);
+ if (!wrapper) {
+ return nullptr;
+ }
+ wrapper->as<NativeObject>().setReservedSlot(
+ SHAPE_CONTAINER_SLOT, PrivateGCThingValue(obj->shape()));
+ }
+ if (!JS_WrapObject(cx, &wrapper)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(IsWrapper(wrapper));
+ return wrapper;
+}
+
+void jit::LoadShapeWrapperContents(MacroAssembler& masm, Register obj,
+ Register dst, Label* failure) {
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), dst);
+ Address privateAddr(dst,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+ masm.fallibleUnboxObject(privateAddr, dst, failure);
+ masm.unboxNonDouble(
+ Address(dst, NativeObject::getFixedSlotOffset(SHAPE_CONTAINER_SLOT)), dst,
+ JSVAL_TYPE_PRIVATE_GCTHING);
+}
+
+static bool CanConvertToInt32ForToNumber(const Value& v) {
+ return v.isInt32() || v.isBoolean() || v.isNull();
+}
+
+static Int32OperandId EmitGuardToInt32ForToNumber(CacheIRWriter& writer,
+ ValOperandId id,
+ const Value& v) {
+ if (v.isInt32()) {
+ return writer.guardToInt32(id);
+ }
+ if (v.isNull()) {
+ writer.guardIsNull(id);
+ return writer.loadInt32Constant(0);
+ }
+ MOZ_ASSERT(v.isBoolean());
+ return writer.guardBooleanToInt32(id);
+}
+
+static bool CanConvertToDoubleForToNumber(const Value& v) {
+ return v.isNumber() || v.isBoolean() || v.isNullOrUndefined();
+}
+
+static NumberOperandId EmitGuardToDoubleForToNumber(CacheIRWriter& writer,
+ ValOperandId id,
+ const Value& v) {
+ if (v.isNumber()) {
+ return writer.guardIsNumber(id);
+ }
+ if (v.isBoolean()) {
+ BooleanOperandId boolId = writer.guardToBoolean(id);
+ return writer.booleanToNumber(boolId);
+ }
+ if (v.isNull()) {
+ writer.guardIsNull(id);
+ return writer.loadDoubleConstant(0.0);
+ }
+ MOZ_ASSERT(v.isUndefined());
+ writer.guardIsUndefined(id);
+ return writer.loadDoubleConstant(JS::GenericNaN());
+}
+
+CompareIRGenerator::CompareIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state, JSOp op,
+ HandleValue lhsVal, HandleValue rhsVal)
+ : IRGenerator(cx, script, pc, CacheKind::Compare, state),
+ op_(op),
+ lhsVal_(lhsVal),
+ rhsVal_(rhsVal) {}
+
+AttachDecision CompareIRGenerator::tryAttachString(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ if (!lhsVal_.isString() || !rhsVal_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ StringOperandId lhsStrId = writer.guardToString(lhsId);
+ StringOperandId rhsStrId = writer.guardToString(rhsId);
+ writer.compareStringResult(op_, lhsStrId, rhsStrId);
+ writer.returnFromIC();
+
+ trackAttached("Compare.String");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachObject(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op_));
+
+ if (!lhsVal_.isObject() || !rhsVal_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId lhsObjId = writer.guardToObject(lhsId);
+ ObjOperandId rhsObjId = writer.guardToObject(rhsId);
+ writer.compareObjectResult(op_, lhsObjId, rhsObjId);
+ writer.returnFromIC();
+
+ trackAttached("Compare.Object");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachSymbol(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op_));
+
+ if (!lhsVal_.isSymbol() || !rhsVal_.isSymbol()) {
+ return AttachDecision::NoAction;
+ }
+
+ SymbolOperandId lhsSymId = writer.guardToSymbol(lhsId);
+ SymbolOperandId rhsSymId = writer.guardToSymbol(rhsId);
+ writer.compareSymbolResult(op_, lhsSymId, rhsSymId);
+ writer.returnFromIC();
+
+ trackAttached("Compare.Symbol");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachStrictDifferentTypes(
+ ValOperandId lhsId, ValOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op_));
+
+ if (op_ != JSOp::StrictEq && op_ != JSOp::StrictNe) {
+ return AttachDecision::NoAction;
+ }
+
+ // Probably can't hit some of these.
+ if (SameType(lhsVal_, rhsVal_) ||
+ (lhsVal_.isNumber() && rhsVal_.isNumber())) {
+ return AttachDecision::NoAction;
+ }
+
+ // Compare tags
+ ValueTagOperandId lhsTypeId = writer.loadValueTag(lhsId);
+ ValueTagOperandId rhsTypeId = writer.loadValueTag(rhsId);
+ writer.guardTagNotEqual(lhsTypeId, rhsTypeId);
+
+ // Now that we've passed the guard, we know differing types, so return the
+ // bool result.
+ writer.loadBooleanResult(op_ == JSOp::StrictNe ? true : false);
+ writer.returnFromIC();
+
+ trackAttached("Compare.StrictDifferentTypes");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachInt32(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ if (!CanConvertToInt32ForToNumber(lhsVal_) ||
+ !CanConvertToInt32ForToNumber(rhsVal_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Strictly different types should have been handed by
+ // tryAttachStrictDifferentTypes.
+ MOZ_ASSERT_IF(op_ == JSOp::StrictEq || op_ == JSOp::StrictNe,
+ lhsVal_.type() == rhsVal_.type());
+
+ // Should have been handled by tryAttachAnyNullUndefined.
+ MOZ_ASSERT_IF(lhsVal_.isNull() || rhsVal_.isNull(), !IsEqualityOp(op_));
+
+ Int32OperandId lhsIntId = EmitGuardToInt32ForToNumber(writer, lhsId, lhsVal_);
+ Int32OperandId rhsIntId = EmitGuardToInt32ForToNumber(writer, rhsId, rhsVal_);
+
+ writer.compareInt32Result(op_, lhsIntId, rhsIntId);
+ writer.returnFromIC();
+
+ trackAttached("Compare.Int32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachNumber(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ if (!CanConvertToDoubleForToNumber(lhsVal_) ||
+ !CanConvertToDoubleForToNumber(rhsVal_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Strictly different types should have been handed by
+ // tryAttachStrictDifferentTypes.
+ MOZ_ASSERT_IF(op_ == JSOp::StrictEq || op_ == JSOp::StrictNe,
+ lhsVal_.type() == rhsVal_.type() ||
+ (lhsVal_.isNumber() && rhsVal_.isNumber()));
+
+ // Should have been handled by tryAttachAnyNullUndefined.
+ MOZ_ASSERT_IF(lhsVal_.isNullOrUndefined() || rhsVal_.isNullOrUndefined(),
+ !IsEqualityOp(op_));
+
+ NumberOperandId lhs = EmitGuardToDoubleForToNumber(writer, lhsId, lhsVal_);
+ NumberOperandId rhs = EmitGuardToDoubleForToNumber(writer, rhsId, rhsVal_);
+ writer.compareDoubleResult(op_, lhs, rhs);
+ writer.returnFromIC();
+
+ trackAttached("Compare.Number");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachBigInt(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ if (!lhsVal_.isBigInt() || !rhsVal_.isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+
+ BigIntOperandId lhs = writer.guardToBigInt(lhsId);
+ BigIntOperandId rhs = writer.guardToBigInt(rhsId);
+
+ writer.compareBigIntResult(op_, lhs, rhs);
+ writer.returnFromIC();
+
+ trackAttached("Compare.BigInt");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachAnyNullUndefined(
+ ValOperandId lhsId, ValOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op_));
+
+ // Either RHS or LHS needs to be null/undefined.
+ if (!lhsVal_.isNullOrUndefined() && !rhsVal_.isNullOrUndefined()) {
+ return AttachDecision::NoAction;
+ }
+
+ // We assume that the side with null/undefined is usually constant, in
+ // code like `if (x === undefined) { x = {}; }`.
+ // That is why we don't attach when both sides are undefined/null,
+ // because we would basically need to decide by chance which side is
+ // the likely constant.
+ // The actual generated code however handles null/undefined of course.
+ if (lhsVal_.isNullOrUndefined() && rhsVal_.isNullOrUndefined()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (rhsVal_.isNullOrUndefined()) {
+ if (rhsVal_.isNull()) {
+ writer.guardIsNull(rhsId);
+ writer.compareNullUndefinedResult(op_, /* isUndefined */ false, lhsId);
+ trackAttached("Compare.AnyNull");
+ } else {
+ writer.guardIsUndefined(rhsId);
+ writer.compareNullUndefinedResult(op_, /* isUndefined */ true, lhsId);
+ trackAttached("Compare.AnyUndefined");
+ }
+ } else {
+ if (lhsVal_.isNull()) {
+ writer.guardIsNull(lhsId);
+ writer.compareNullUndefinedResult(op_, /* isUndefined */ false, rhsId);
+ trackAttached("Compare.NullAny");
+ } else {
+ writer.guardIsUndefined(lhsId);
+ writer.compareNullUndefinedResult(op_, /* isUndefined */ true, rhsId);
+ trackAttached("Compare.UndefinedAny");
+ }
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+// Handle {null/undefined} x {null,undefined} equality comparisons
+AttachDecision CompareIRGenerator::tryAttachNullUndefined(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ if (!lhsVal_.isNullOrUndefined() || !rhsVal_.isNullOrUndefined()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (op_ == JSOp::Eq || op_ == JSOp::Ne) {
+ writer.guardIsNullOrUndefined(lhsId);
+ writer.guardIsNullOrUndefined(rhsId);
+ // Sloppy equality means we actually only care about the op:
+ writer.loadBooleanResult(op_ == JSOp::Eq);
+ trackAttached("Compare.SloppyNullUndefined");
+ } else {
+ // Strict equality only hits this branch, and only in the
+ // undef {!,=}== undef and null {!,=}== null cases.
+ // The other cases should have hit tryAttachStrictDifferentTypes.
+ MOZ_ASSERT(lhsVal_.isNull() == rhsVal_.isNull());
+ lhsVal_.isNull() ? writer.guardIsNull(lhsId)
+ : writer.guardIsUndefined(lhsId);
+ rhsVal_.isNull() ? writer.guardIsNull(rhsId)
+ : writer.guardIsUndefined(rhsId);
+ writer.loadBooleanResult(op_ == JSOp::StrictEq);
+ trackAttached("Compare.StrictNullUndefinedEquality");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachStringNumber(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ // Ensure String x {Number, Boolean, Null, Undefined}
+ if (!(lhsVal_.isString() && CanConvertToDoubleForToNumber(rhsVal_)) &&
+ !(rhsVal_.isString() && CanConvertToDoubleForToNumber(lhsVal_))) {
+ return AttachDecision::NoAction;
+ }
+
+ // Case should have been handled by tryAttachStrictDifferentTypes
+ MOZ_ASSERT(op_ != JSOp::StrictEq && op_ != JSOp::StrictNe);
+
+ auto createGuards = [&](const Value& v, ValOperandId vId) {
+ if (v.isString()) {
+ StringOperandId strId = writer.guardToString(vId);
+ return writer.guardStringToNumber(strId);
+ }
+ return EmitGuardToDoubleForToNumber(writer, vId, v);
+ };
+
+ NumberOperandId lhsGuardedId = createGuards(lhsVal_, lhsId);
+ NumberOperandId rhsGuardedId = createGuards(rhsVal_, rhsId);
+ writer.compareDoubleResult(op_, lhsGuardedId, rhsGuardedId);
+ writer.returnFromIC();
+
+ trackAttached("Compare.StringNumber");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachPrimitiveSymbol(
+ ValOperandId lhsId, ValOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op_));
+
+ // The set of primitive cases we want to handle here (excluding null,
+ // undefined, and symbol)
+ auto isPrimitive = [](const Value& x) {
+ return x.isString() || x.isBoolean() || x.isNumber() || x.isBigInt();
+ };
+
+ // Ensure Symbol x {String, Bool, Number, BigInt}.
+ if (!(lhsVal_.isSymbol() && isPrimitive(rhsVal_)) &&
+ !(rhsVal_.isSymbol() && isPrimitive(lhsVal_))) {
+ return AttachDecision::NoAction;
+ }
+
+ auto guardPrimitive = [&](const Value& v, ValOperandId id) {
+ MOZ_ASSERT(isPrimitive(v));
+ if (v.isNumber()) {
+ writer.guardIsNumber(id);
+ return;
+ }
+ switch (v.extractNonDoubleType()) {
+ case JSVAL_TYPE_STRING:
+ writer.guardToString(id);
+ return;
+ case JSVAL_TYPE_BOOLEAN:
+ writer.guardToBoolean(id);
+ return;
+ case JSVAL_TYPE_BIGINT:
+ writer.guardToBigInt(id);
+ return;
+ default:
+ MOZ_CRASH("unexpected type");
+ return;
+ }
+ };
+
+ if (lhsVal_.isSymbol()) {
+ writer.guardToSymbol(lhsId);
+ guardPrimitive(rhsVal_, rhsId);
+ } else {
+ guardPrimitive(lhsVal_, lhsId);
+ writer.guardToSymbol(rhsId);
+ }
+
+ // Comparing a primitive with symbol will always be true for Ne/StrictNe, and
+ // always be false for other compare ops.
+ writer.loadBooleanResult(op_ == JSOp::Ne || op_ == JSOp::StrictNe);
+ writer.returnFromIC();
+
+ trackAttached("Compare.PrimitiveSymbol");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachBigIntInt32(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ // Ensure BigInt x {Int32, Boolean, Null}.
+ if (!(lhsVal_.isBigInt() && CanConvertToInt32ForToNumber(rhsVal_)) &&
+ !(rhsVal_.isBigInt() && CanConvertToInt32ForToNumber(lhsVal_))) {
+ return AttachDecision::NoAction;
+ }
+
+ // Case should have been handled by tryAttachStrictDifferentTypes
+ MOZ_ASSERT(op_ != JSOp::StrictEq && op_ != JSOp::StrictNe);
+
+ if (lhsVal_.isBigInt()) {
+ BigIntOperandId bigIntId = writer.guardToBigInt(lhsId);
+ Int32OperandId intId = EmitGuardToInt32ForToNumber(writer, rhsId, rhsVal_);
+
+ writer.compareBigIntInt32Result(op_, bigIntId, intId);
+ } else {
+ Int32OperandId intId = EmitGuardToInt32ForToNumber(writer, lhsId, lhsVal_);
+ BigIntOperandId bigIntId = writer.guardToBigInt(rhsId);
+
+ writer.compareBigIntInt32Result(ReverseCompareOp(op_), bigIntId, intId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("Compare.BigIntInt32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachBigIntNumber(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ // Ensure BigInt x {Number, Undefined}.
+ if (!(lhsVal_.isBigInt() && CanConvertToDoubleForToNumber(rhsVal_)) &&
+ !(rhsVal_.isBigInt() && CanConvertToDoubleForToNumber(lhsVal_))) {
+ return AttachDecision::NoAction;
+ }
+
+ // Case should have been handled by tryAttachStrictDifferentTypes
+ MOZ_ASSERT(op_ != JSOp::StrictEq && op_ != JSOp::StrictNe);
+
+ // Case should have been handled by tryAttachBigIntInt32.
+ MOZ_ASSERT(!CanConvertToInt32ForToNumber(lhsVal_));
+ MOZ_ASSERT(!CanConvertToInt32ForToNumber(rhsVal_));
+
+ if (lhsVal_.isBigInt()) {
+ BigIntOperandId bigIntId = writer.guardToBigInt(lhsId);
+ NumberOperandId numId =
+ EmitGuardToDoubleForToNumber(writer, rhsId, rhsVal_);
+
+ writer.compareBigIntNumberResult(op_, bigIntId, numId);
+ } else {
+ NumberOperandId numId =
+ EmitGuardToDoubleForToNumber(writer, lhsId, lhsVal_);
+ BigIntOperandId bigIntId = writer.guardToBigInt(rhsId);
+
+ writer.compareBigIntNumberResult(ReverseCompareOp(op_), bigIntId, numId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("Compare.BigIntNumber");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachBigIntString(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ // Ensure BigInt x String.
+ if (!(lhsVal_.isBigInt() && rhsVal_.isString()) &&
+ !(rhsVal_.isBigInt() && lhsVal_.isString())) {
+ return AttachDecision::NoAction;
+ }
+
+ // Case should have been handled by tryAttachStrictDifferentTypes
+ MOZ_ASSERT(op_ != JSOp::StrictEq && op_ != JSOp::StrictNe);
+
+ if (lhsVal_.isBigInt()) {
+ BigIntOperandId bigIntId = writer.guardToBigInt(lhsId);
+ StringOperandId strId = writer.guardToString(rhsId);
+
+ writer.compareBigIntStringResult(op_, bigIntId, strId);
+ } else {
+ StringOperandId strId = writer.guardToString(lhsId);
+ BigIntOperandId bigIntId = writer.guardToBigInt(rhsId);
+
+ writer.compareBigIntStringResult(ReverseCompareOp(op_), bigIntId, strId);
+ }
+ writer.returnFromIC();
+
+ trackAttached("Compare.BigIntString");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CompareIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::Compare);
+ MOZ_ASSERT(IsEqualityOp(op_) || IsRelationalOp(op_));
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ constexpr uint8_t lhsIndex = 0;
+ constexpr uint8_t rhsIndex = 1;
+
+ ValOperandId lhsId(writer.setInputOperandId(lhsIndex));
+ ValOperandId rhsId(writer.setInputOperandId(rhsIndex));
+
+ // For sloppy equality ops, there are cases this IC does not handle:
+ // - {Object} x {String, Symbol, Bool, Number, BigInt}.
+ //
+ // For relational comparison ops, these cases aren't handled:
+ // - Object x {String, Bool, Number, BigInt, Object, Null, Undefined}.
+ // Note: |Symbol x any| always throws, so it doesn't need to be handled.
+ //
+ // (The above lists omits the equivalent case {B} x {A} when {A} x {B} is
+ // already present.)
+
+ if (IsEqualityOp(op_)) {
+ TRY_ATTACH(tryAttachObject(lhsId, rhsId));
+ TRY_ATTACH(tryAttachSymbol(lhsId, rhsId));
+
+ // Handles any (non null or undefined) comparison with null/undefined.
+ TRY_ATTACH(tryAttachAnyNullUndefined(lhsId, rhsId));
+
+ // This covers -strict- equality/inequality using a type tag check, so
+ // catches all different type pairs outside of Numbers, which cannot be
+ // checked on tags alone.
+ TRY_ATTACH(tryAttachStrictDifferentTypes(lhsId, rhsId));
+
+ TRY_ATTACH(tryAttachNullUndefined(lhsId, rhsId));
+
+ TRY_ATTACH(tryAttachPrimitiveSymbol(lhsId, rhsId));
+ }
+
+ // We want these to be last, to allow us to bypass the
+ // strictly-different-types cases in the below attachment code
+ TRY_ATTACH(tryAttachInt32(lhsId, rhsId));
+ TRY_ATTACH(tryAttachNumber(lhsId, rhsId));
+ TRY_ATTACH(tryAttachBigInt(lhsId, rhsId));
+ TRY_ATTACH(tryAttachString(lhsId, rhsId));
+
+ TRY_ATTACH(tryAttachStringNumber(lhsId, rhsId));
+
+ TRY_ATTACH(tryAttachBigIntInt32(lhsId, rhsId));
+ TRY_ATTACH(tryAttachBigIntNumber(lhsId, rhsId));
+ TRY_ATTACH(tryAttachBigIntString(lhsId, rhsId));
+
+ // Strict equality is always supported.
+ MOZ_ASSERT(!IsStrictEqualityOp(op_));
+
+ // Other operations are unsupported iff at least one operand is an object.
+ MOZ_ASSERT(lhsVal_.isObject() || rhsVal_.isObject());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+void CompareIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("lhs", lhsVal_);
+ sp.valueProperty("rhs", rhsVal_);
+ sp.opcodeProperty("op", op_);
+ }
+#endif
+}
+
+ToBoolIRGenerator::ToBoolIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue val)
+ : IRGenerator(cx, script, pc, CacheKind::ToBool, state), val_(val) {}
+
+void ToBoolIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+ writer.setTypeData(TypeData(JSValueType(val_.type())));
+
+ TRY_ATTACH(tryAttachBool());
+ TRY_ATTACH(tryAttachInt32());
+ TRY_ATTACH(tryAttachNumber());
+ TRY_ATTACH(tryAttachString());
+ TRY_ATTACH(tryAttachNullOrUndefined());
+ TRY_ATTACH(tryAttachObject());
+ TRY_ATTACH(tryAttachSymbol());
+ TRY_ATTACH(tryAttachBigInt());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachBool() {
+ if (!val_.isBoolean()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ writer.guardNonDoubleType(valId, ValueType::Boolean);
+ writer.loadOperandResult(valId);
+ writer.returnFromIC();
+ trackAttached("ToBool.Bool");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachInt32() {
+ if (!val_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ writer.guardNonDoubleType(valId, ValueType::Int32);
+ writer.loadInt32TruthyResult(valId);
+ writer.returnFromIC();
+ trackAttached("ToBool.Int32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachNumber() {
+ if (!val_.isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ NumberOperandId numId = writer.guardIsNumber(valId);
+ writer.loadDoubleTruthyResult(numId);
+ writer.returnFromIC();
+ trackAttached("ToBool.Number");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachSymbol() {
+ if (!val_.isSymbol()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ writer.guardNonDoubleType(valId, ValueType::Symbol);
+ writer.loadBooleanResult(true);
+ writer.returnFromIC();
+ trackAttached("ToBool.Symbol");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachString() {
+ if (!val_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ StringOperandId strId = writer.guardToString(valId);
+ writer.loadStringTruthyResult(strId);
+ writer.returnFromIC();
+ trackAttached("ToBool.String");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachNullOrUndefined() {
+ if (!val_.isNullOrUndefined()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ writer.guardIsNullOrUndefined(valId);
+ writer.loadBooleanResult(false);
+ writer.returnFromIC();
+ trackAttached("ToBool.NullOrUndefined");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachObject() {
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ ObjOperandId objId = writer.guardToObject(valId);
+ writer.loadObjectTruthyResult(objId);
+ writer.returnFromIC();
+ trackAttached("ToBool.Object");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToBoolIRGenerator::tryAttachBigInt() {
+ if (!val_.isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ BigIntOperandId bigIntId = writer.guardToBigInt(valId);
+ writer.loadBigIntTruthyResult(bigIntId);
+ writer.returnFromIC();
+ trackAttached("ToBool.BigInt");
+ return AttachDecision::Attach;
+}
+
+GetIntrinsicIRGenerator::GetIntrinsicIRGenerator(JSContext* cx,
+ HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue val)
+ : IRGenerator(cx, script, pc, CacheKind::GetIntrinsic, state), val_(val) {}
+
+void GetIntrinsicIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+AttachDecision GetIntrinsicIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+ writer.loadValueResult(val_);
+ writer.returnFromIC();
+ trackAttached("GetIntrinsic");
+ return AttachDecision::Attach;
+}
+
+UnaryArithIRGenerator::UnaryArithIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ JSOp op, HandleValue val,
+ HandleValue res)
+ : IRGenerator(cx, script, pc, CacheKind::UnaryArith, state),
+ op_(op),
+ val_(val),
+ res_(res) {}
+
+void UnaryArithIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ sp.valueProperty("res", res_);
+ }
+#endif
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+ TRY_ATTACH(tryAttachInt32());
+ TRY_ATTACH(tryAttachNumber());
+ TRY_ATTACH(tryAttachBitwise());
+ TRY_ATTACH(tryAttachBigInt());
+ TRY_ATTACH(tryAttachStringInt32());
+ TRY_ATTACH(tryAttachStringNumber());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachInt32() {
+ if (op_ == JSOp::BitNot) {
+ return AttachDecision::NoAction;
+ }
+ if (!CanConvertToInt32ForToNumber(val_) || !res_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ Int32OperandId intId = EmitGuardToInt32ForToNumber(writer, valId, val_);
+ switch (op_) {
+ case JSOp::Pos:
+ writer.loadInt32Result(intId);
+ trackAttached("UnaryArith.Int32Pos");
+ break;
+ case JSOp::Neg:
+ writer.int32NegationResult(intId);
+ trackAttached("UnaryArith.Int32Neg");
+ break;
+ case JSOp::Inc:
+ writer.int32IncResult(intId);
+ trackAttached("UnaryArith.Int32Inc");
+ break;
+ case JSOp::Dec:
+ writer.int32DecResult(intId);
+ trackAttached("UnaryArith.Int32Dec");
+ break;
+ case JSOp::ToNumeric:
+ writer.loadInt32Result(intId);
+ trackAttached("UnaryArith.Int32ToNumeric");
+ break;
+ default:
+ MOZ_CRASH("unexpected OP");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachNumber() {
+ if (op_ == JSOp::BitNot) {
+ return AttachDecision::NoAction;
+ }
+ if (!CanConvertToDoubleForToNumber(val_)) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(res_.isNumber());
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ NumberOperandId numId = EmitGuardToDoubleForToNumber(writer, valId, val_);
+
+ switch (op_) {
+ case JSOp::Pos:
+ writer.loadDoubleResult(numId);
+ trackAttached("UnaryArith.DoublePos");
+ break;
+ case JSOp::Neg:
+ writer.doubleNegationResult(numId);
+ trackAttached("UnaryArith.DoubleNeg");
+ break;
+ case JSOp::Inc:
+ writer.doubleIncResult(numId);
+ trackAttached("UnaryArith.DoubleInc");
+ break;
+ case JSOp::Dec:
+ writer.doubleDecResult(numId);
+ trackAttached("UnaryArith.DoubleDec");
+ break;
+ case JSOp::ToNumeric:
+ writer.loadDoubleResult(numId);
+ trackAttached("UnaryArith.DoubleToNumeric");
+ break;
+ default:
+ MOZ_CRASH("Unexpected OP");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+static bool CanTruncateToInt32(const Value& val) {
+ return val.isNumber() || val.isBoolean() || val.isNullOrUndefined() ||
+ val.isString();
+}
+
+// Convert type into int32 for the bitwise/shift operands.
+static Int32OperandId EmitTruncateToInt32Guard(CacheIRWriter& writer,
+ ValOperandId id,
+ const Value& val) {
+ MOZ_ASSERT(CanTruncateToInt32(val));
+ if (val.isInt32()) {
+ return writer.guardToInt32(id);
+ }
+ if (val.isBoolean()) {
+ return writer.guardBooleanToInt32(id);
+ }
+ if (val.isNullOrUndefined()) {
+ writer.guardIsNullOrUndefined(id);
+ return writer.loadInt32Constant(0);
+ }
+ NumberOperandId numId;
+ if (val.isString()) {
+ StringOperandId strId = writer.guardToString(id);
+ numId = writer.guardStringToNumber(strId);
+ } else {
+ MOZ_ASSERT(val.isDouble());
+ numId = writer.guardIsNumber(id);
+ }
+ return writer.truncateDoubleToUInt32(numId);
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachBitwise() {
+ // Only bitwise operators.
+ if (op_ != JSOp::BitNot) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check guard conditions
+ if (!CanTruncateToInt32(val_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // Bitwise operators always produce Int32 values.
+ MOZ_ASSERT(res_.isInt32());
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ Int32OperandId intId = EmitTruncateToInt32Guard(writer, valId, val_);
+ writer.int32NotResult(intId);
+ trackAttached("UnaryArith.BitwiseBitNot");
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachBigInt() {
+ if (!val_.isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(res_.isBigInt());
+
+ MOZ_ASSERT(op_ != JSOp::Pos,
+ "Applying the unary + operator on BigInt values throws an error");
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ BigIntOperandId bigIntId = writer.guardToBigInt(valId);
+ switch (op_) {
+ case JSOp::BitNot:
+ writer.bigIntNotResult(bigIntId);
+ trackAttached("UnaryArith.BigIntNot");
+ break;
+ case JSOp::Neg:
+ writer.bigIntNegationResult(bigIntId);
+ trackAttached("UnaryArith.BigIntNeg");
+ break;
+ case JSOp::Inc:
+ writer.bigIntIncResult(bigIntId);
+ trackAttached("UnaryArith.BigIntInc");
+ break;
+ case JSOp::Dec:
+ writer.bigIntDecResult(bigIntId);
+ trackAttached("UnaryArith.BigIntDec");
+ break;
+ case JSOp::ToNumeric:
+ writer.loadBigIntResult(bigIntId);
+ trackAttached("UnaryArith.BigIntToNumeric");
+ break;
+ default:
+ MOZ_CRASH("Unexpected OP");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachStringInt32() {
+ if (!val_.isString()) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(res_.isNumber());
+
+ // Case should have been handled by tryAttachBitwise.
+ MOZ_ASSERT(op_ != JSOp::BitNot);
+
+ if (!res_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ StringOperandId stringId = writer.guardToString(valId);
+ Int32OperandId intId = writer.guardStringToInt32(stringId);
+
+ switch (op_) {
+ case JSOp::Pos:
+ writer.loadInt32Result(intId);
+ trackAttached("UnaryArith.StringInt32Pos");
+ break;
+ case JSOp::Neg:
+ writer.int32NegationResult(intId);
+ trackAttached("UnaryArith.StringInt32Neg");
+ break;
+ case JSOp::Inc:
+ writer.int32IncResult(intId);
+ trackAttached("UnaryArith.StringInt32Inc");
+ break;
+ case JSOp::Dec:
+ writer.int32DecResult(intId);
+ trackAttached("UnaryArith.StringInt32Dec");
+ break;
+ case JSOp::ToNumeric:
+ writer.loadInt32Result(intId);
+ trackAttached("UnaryArith.StringInt32ToNumeric");
+ break;
+ default:
+ MOZ_CRASH("Unexpected OP");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision UnaryArithIRGenerator::tryAttachStringNumber() {
+ if (!val_.isString()) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(res_.isNumber());
+
+ // Case should have been handled by tryAttachBitwise.
+ MOZ_ASSERT(op_ != JSOp::BitNot);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+ StringOperandId stringId = writer.guardToString(valId);
+ NumberOperandId numId = writer.guardStringToNumber(stringId);
+
+ Int32OperandId truncatedId;
+ switch (op_) {
+ case JSOp::Pos:
+ writer.loadDoubleResult(numId);
+ trackAttached("UnaryArith.StringNumberPos");
+ break;
+ case JSOp::Neg:
+ writer.doubleNegationResult(numId);
+ trackAttached("UnaryArith.StringNumberNeg");
+ break;
+ case JSOp::Inc:
+ writer.doubleIncResult(numId);
+ trackAttached("UnaryArith.StringNumberInc");
+ break;
+ case JSOp::Dec:
+ writer.doubleDecResult(numId);
+ trackAttached("UnaryArith.StringNumberDec");
+ break;
+ case JSOp::ToNumeric:
+ writer.loadDoubleResult(numId);
+ trackAttached("UnaryArith.StringNumberToNumeric");
+ break;
+ default:
+ MOZ_CRASH("Unexpected OP");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+ToPropertyKeyIRGenerator::ToPropertyKeyIRGenerator(JSContext* cx,
+ HandleScript script,
+ jsbytecode* pc,
+ ICState state,
+ HandleValue val)
+ : IRGenerator(cx, script, pc, CacheKind::ToPropertyKey, state), val_(val) {}
+
+void ToPropertyKeyIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ }
+#endif
+}
+
+AttachDecision ToPropertyKeyIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+ TRY_ATTACH(tryAttachInt32());
+ TRY_ATTACH(tryAttachNumber());
+ TRY_ATTACH(tryAttachString());
+ TRY_ATTACH(tryAttachSymbol());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision ToPropertyKeyIRGenerator::tryAttachInt32() {
+ if (!val_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ Int32OperandId intId = writer.guardToInt32(valId);
+ writer.loadInt32Result(intId);
+ writer.returnFromIC();
+
+ trackAttached("ToPropertyKey.Int32");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToPropertyKeyIRGenerator::tryAttachNumber() {
+ if (!val_.isNumber()) {
+ return AttachDecision::NoAction;
+ }
+
+ // We allow negative zero here because ToPropertyKey(-0.0) is 0.
+ int32_t unused;
+ if (!mozilla::NumberEqualsInt32(val_.toNumber(), &unused)) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ Int32OperandId intId = writer.guardToInt32Index(valId);
+ writer.loadInt32Result(intId);
+ writer.returnFromIC();
+
+ trackAttached("ToPropertyKey.Number");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToPropertyKeyIRGenerator::tryAttachString() {
+ if (!val_.isString()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ StringOperandId strId = writer.guardToString(valId);
+ writer.loadStringResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("ToPropertyKey.String");
+ return AttachDecision::Attach;
+}
+
+AttachDecision ToPropertyKeyIRGenerator::tryAttachSymbol() {
+ if (!val_.isSymbol()) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ SymbolOperandId strId = writer.guardToSymbol(valId);
+ writer.loadSymbolResult(strId);
+ writer.returnFromIC();
+
+ trackAttached("ToPropertyKey.Symbol");
+ return AttachDecision::Attach;
+}
+
+BinaryArithIRGenerator::BinaryArithIRGenerator(JSContext* cx,
+ HandleScript script,
+ jsbytecode* pc, ICState state,
+ JSOp op, HandleValue lhs,
+ HandleValue rhs, HandleValue res)
+ : IRGenerator(cx, script, pc, CacheKind::BinaryArith, state),
+ op_(op),
+ lhs_(lhs),
+ rhs_(rhs),
+ res_(res) {}
+
+void BinaryArithIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.opcodeProperty("op", op_);
+ sp.valueProperty("rhs", rhs_);
+ sp.valueProperty("lhs", lhs_);
+ }
+#endif
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+ // Arithmetic operations with Int32 operands
+ TRY_ATTACH(tryAttachInt32());
+
+ // Bitwise operations with Int32/Double/Boolean/Null/Undefined/String
+ // operands.
+ TRY_ATTACH(tryAttachBitwise());
+
+ // Arithmetic operations with Double operands. This needs to come after
+ // tryAttachInt32, as the guards overlap, and we'd prefer to attach the
+ // more specialized Int32 IC if it is possible.
+ TRY_ATTACH(tryAttachDouble());
+
+ // String x {String,Number,Boolean,Null,Undefined}
+ TRY_ATTACH(tryAttachStringConcat());
+
+ // String x Object
+ TRY_ATTACH(tryAttachStringObjectConcat());
+
+ // Arithmetic operations or bitwise operations with BigInt operands
+ TRY_ATTACH(tryAttachBigInt());
+
+ // Arithmetic operations (without addition) with String x Int32.
+ TRY_ATTACH(tryAttachStringInt32Arith());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachBitwise() {
+ // Only bit-wise and shifts.
+ if (op_ != JSOp::BitOr && op_ != JSOp::BitXor && op_ != JSOp::BitAnd &&
+ op_ != JSOp::Lsh && op_ != JSOp::Rsh && op_ != JSOp::Ursh) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check guard conditions
+ if (!CanTruncateToInt32(lhs_) || !CanTruncateToInt32(rhs_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // All ops, with the exception of Ursh, produce Int32 values.
+ MOZ_ASSERT_IF(op_ != JSOp::Ursh, res_.isInt32());
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ Int32OperandId lhsIntId = EmitTruncateToInt32Guard(writer, lhsId, lhs_);
+ Int32OperandId rhsIntId = EmitTruncateToInt32Guard(writer, rhsId, rhs_);
+
+ switch (op_) {
+ case JSOp::BitOr:
+ writer.int32BitOrResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.BitwiseBitOr");
+ break;
+ case JSOp::BitXor:
+ writer.int32BitXorResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.BitwiseBitXor");
+ break;
+ case JSOp::BitAnd:
+ writer.int32BitAndResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.BitwiseBitAnd");
+ break;
+ case JSOp::Lsh:
+ writer.int32LeftShiftResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.BitwiseLeftShift");
+ break;
+ case JSOp::Rsh:
+ writer.int32RightShiftResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.BitwiseRightShift");
+ break;
+ case JSOp::Ursh:
+ writer.int32URightShiftResult(lhsIntId, rhsIntId, res_.isDouble());
+ trackAttached("BinaryArith.BitwiseUnsignedRightShift");
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in tryAttachBitwise");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachDouble() {
+ // Check valid opcodes
+ if (op_ != JSOp::Add && op_ != JSOp::Sub && op_ != JSOp::Mul &&
+ op_ != JSOp::Div && op_ != JSOp::Mod && op_ != JSOp::Pow) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check guard conditions.
+ if (!CanConvertToDoubleForToNumber(lhs_) ||
+ !CanConvertToDoubleForToNumber(rhs_)) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ NumberOperandId lhs = EmitGuardToDoubleForToNumber(writer, lhsId, lhs_);
+ NumberOperandId rhs = EmitGuardToDoubleForToNumber(writer, rhsId, rhs_);
+
+ switch (op_) {
+ case JSOp::Add:
+ writer.doubleAddResult(lhs, rhs);
+ trackAttached("BinaryArith.DoubleAdd");
+ break;
+ case JSOp::Sub:
+ writer.doubleSubResult(lhs, rhs);
+ trackAttached("BinaryArith.DoubleSub");
+ break;
+ case JSOp::Mul:
+ writer.doubleMulResult(lhs, rhs);
+ trackAttached("BinaryArith.DoubleMul");
+ break;
+ case JSOp::Div:
+ writer.doubleDivResult(lhs, rhs);
+ trackAttached("BinaryArith.DoubleDiv");
+ break;
+ case JSOp::Mod:
+ writer.doubleModResult(lhs, rhs);
+ trackAttached("BinaryArith.DoubleMod");
+ break;
+ case JSOp::Pow:
+ writer.doublePowResult(lhs, rhs);
+ trackAttached("BinaryArith.DoublePow");
+ break;
+ default:
+ MOZ_CRASH("Unhandled Op");
+ }
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachInt32() {
+ // Check guard conditions.
+ if (!CanConvertToInt32ForToNumber(lhs_) ||
+ !CanConvertToInt32ForToNumber(rhs_)) {
+ return AttachDecision::NoAction;
+ }
+
+ // These ICs will failure() if result can't be encoded in an Int32:
+ // If sample result is not Int32, we should avoid IC.
+ if (!res_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (op_ != JSOp::Add && op_ != JSOp::Sub && op_ != JSOp::Mul &&
+ op_ != JSOp::Div && op_ != JSOp::Mod && op_ != JSOp::Pow) {
+ return AttachDecision::NoAction;
+ }
+
+ if (op_ == JSOp::Pow && !CanAttachInt32Pow(lhs_, rhs_)) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ Int32OperandId lhsIntId = EmitGuardToInt32ForToNumber(writer, lhsId, lhs_);
+ Int32OperandId rhsIntId = EmitGuardToInt32ForToNumber(writer, rhsId, rhs_);
+
+ switch (op_) {
+ case JSOp::Add:
+ writer.int32AddResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Add");
+ break;
+ case JSOp::Sub:
+ writer.int32SubResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Sub");
+ break;
+ case JSOp::Mul:
+ writer.int32MulResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Mul");
+ break;
+ case JSOp::Div:
+ writer.int32DivResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Div");
+ break;
+ case JSOp::Mod:
+ writer.int32ModResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Mod");
+ break;
+ case JSOp::Pow:
+ writer.int32PowResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.Int32Pow");
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in tryAttachInt32");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachStringConcat() {
+ // Only Addition
+ if (op_ != JSOp::Add) {
+ return AttachDecision::NoAction;
+ }
+
+ // One side must be a string, the other side a primitive value we can easily
+ // convert to a string.
+ if (!(lhs_.isString() && CanConvertToString(rhs_)) &&
+ !(CanConvertToString(lhs_) && rhs_.isString())) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ StringOperandId lhsStrId = emitToStringGuard(lhsId, lhs_);
+ StringOperandId rhsStrId = emitToStringGuard(rhsId, rhs_);
+
+ writer.callStringConcatResult(lhsStrId, rhsStrId);
+
+ writer.returnFromIC();
+ trackAttached("BinaryArith.StringConcat");
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachStringObjectConcat() {
+ // Only Addition
+ if (op_ != JSOp::Add) {
+ return AttachDecision::NoAction;
+ }
+
+ // Check Guards
+ if (!(lhs_.isObject() && rhs_.isString()) &&
+ !(lhs_.isString() && rhs_.isObject()))
+ return AttachDecision::NoAction;
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ // This guard is actually overly tight, as the runtime
+ // helper can handle lhs or rhs being a string, so long
+ // as the other is an object.
+ if (lhs_.isString()) {
+ writer.guardToString(lhsId);
+ writer.guardToObject(rhsId);
+ } else {
+ writer.guardToObject(lhsId);
+ writer.guardToString(rhsId);
+ }
+
+ writer.callStringObjectConcatResult(lhsId, rhsId);
+
+ writer.returnFromIC();
+ trackAttached("BinaryArith.StringObjectConcat");
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachBigInt() {
+ // Check Guards
+ if (!lhs_.isBigInt() || !rhs_.isBigInt()) {
+ return AttachDecision::NoAction;
+ }
+
+ switch (op_) {
+ case JSOp::Add:
+ case JSOp::Sub:
+ case JSOp::Mul:
+ case JSOp::Div:
+ case JSOp::Mod:
+ case JSOp::Pow:
+ // Arithmetic operations.
+ break;
+
+ case JSOp::BitOr:
+ case JSOp::BitXor:
+ case JSOp::BitAnd:
+ case JSOp::Lsh:
+ case JSOp::Rsh:
+ // Bitwise operations.
+ break;
+
+ default:
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ BigIntOperandId lhsBigIntId = writer.guardToBigInt(lhsId);
+ BigIntOperandId rhsBigIntId = writer.guardToBigInt(rhsId);
+
+ switch (op_) {
+ case JSOp::Add:
+ writer.bigIntAddResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntAdd");
+ break;
+ case JSOp::Sub:
+ writer.bigIntSubResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntSub");
+ break;
+ case JSOp::Mul:
+ writer.bigIntMulResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntMul");
+ break;
+ case JSOp::Div:
+ writer.bigIntDivResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntDiv");
+ break;
+ case JSOp::Mod:
+ writer.bigIntModResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntMod");
+ break;
+ case JSOp::Pow:
+ writer.bigIntPowResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntPow");
+ break;
+ case JSOp::BitOr:
+ writer.bigIntBitOrResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntBitOr");
+ break;
+ case JSOp::BitXor:
+ writer.bigIntBitXorResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntBitXor");
+ break;
+ case JSOp::BitAnd:
+ writer.bigIntBitAndResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntBitAnd");
+ break;
+ case JSOp::Lsh:
+ writer.bigIntLeftShiftResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntLeftShift");
+ break;
+ case JSOp::Rsh:
+ writer.bigIntRightShiftResult(lhsBigIntId, rhsBigIntId);
+ trackAttached("BinaryArith.BigIntRightShift");
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in tryAttachBigInt");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+AttachDecision BinaryArithIRGenerator::tryAttachStringInt32Arith() {
+ // Check for either int32 x string or string x int32.
+ if (!(lhs_.isInt32() && rhs_.isString()) &&
+ !(lhs_.isString() && rhs_.isInt32())) {
+ return AttachDecision::NoAction;
+ }
+
+ // The created ICs will fail if the result can't be encoded as as int32.
+ // Thus skip this IC, if the sample result is not an int32.
+ if (!res_.isInt32()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Must _not_ support Add, because it would be string concatenation instead.
+ // For Pow we can't easily determine the CanAttachInt32Pow conditions so we
+ // reject that as well.
+ if (op_ != JSOp::Sub && op_ != JSOp::Mul && op_ != JSOp::Div &&
+ op_ != JSOp::Mod) {
+ return AttachDecision::NoAction;
+ }
+
+ ValOperandId lhsId(writer.setInputOperandId(0));
+ ValOperandId rhsId(writer.setInputOperandId(1));
+
+ auto guardToInt32 = [&](ValOperandId id, const Value& v) {
+ if (v.isInt32()) {
+ return writer.guardToInt32(id);
+ }
+
+ MOZ_ASSERT(v.isString());
+ StringOperandId strId = writer.guardToString(id);
+ return writer.guardStringToInt32(strId);
+ };
+
+ Int32OperandId lhsIntId = guardToInt32(lhsId, lhs_);
+ Int32OperandId rhsIntId = guardToInt32(rhsId, rhs_);
+
+ switch (op_) {
+ case JSOp::Sub:
+ writer.int32SubResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.StringInt32Sub");
+ break;
+ case JSOp::Mul:
+ writer.int32MulResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.StringInt32Mul");
+ break;
+ case JSOp::Div:
+ writer.int32DivResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.StringInt32Div");
+ break;
+ case JSOp::Mod:
+ writer.int32ModResult(lhsIntId, rhsIntId);
+ trackAttached("BinaryArith.StringInt32Mod");
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in tryAttachStringInt32Arith");
+ }
+
+ writer.returnFromIC();
+ return AttachDecision::Attach;
+}
+
+NewArrayIRGenerator::NewArrayIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state, JSOp op,
+ HandleObject templateObj,
+ BaselineFrame* frame)
+ : IRGenerator(cx, script, pc, CacheKind::NewArray, state),
+#ifdef JS_CACHEIR_SPEW
+ op_(op),
+#endif
+ templateObject_(templateObj),
+ frame_(frame) {
+ MOZ_ASSERT(templateObject_);
+}
+
+void NewArrayIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.opcodeProperty("op", op_);
+ }
+#endif
+}
+
+// Allocation sites are usually created during baseline compilation, but we also
+// need to created them when an IC stub is added to a baseline compiled script
+// and when trial inlining.
+static gc::AllocSite* MaybeCreateAllocSite(jsbytecode* pc,
+ BaselineFrame* frame) {
+ MOZ_ASSERT(BytecodeOpCanHaveAllocSite(JSOp(*pc)));
+
+ JSScript* outerScript = frame->outerScript();
+ bool inInterpreter = frame->runningInInterpreter();
+ bool isInlined = frame->icScript()->isInlined();
+
+ if (inInterpreter && !isInlined) {
+ return outerScript->zone()->unknownAllocSite(JS::TraceKind::Object);
+ }
+
+ return outerScript->createAllocSite();
+}
+
+AttachDecision NewArrayIRGenerator::tryAttachArrayObject() {
+ ArrayObject* arrayObj = &templateObject_->as<ArrayObject>();
+
+ MOZ_ASSERT(arrayObj->numUsedFixedSlots() == 0);
+ MOZ_ASSERT(arrayObj->numDynamicSlots() == 0);
+ MOZ_ASSERT(!arrayObj->isSharedMemory());
+
+ // The macro assembler only supports creating arrays with fixed elements.
+ if (arrayObj->hasDynamicElements()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Stub doesn't support metadata builder
+ if (cx_->realm()->hasAllocationMetadataBuilder()) {
+ return AttachDecision::NoAction;
+ }
+
+ writer.guardNoAllocationMetadataBuilder(
+ cx_->realm()->addressOfMetadataBuilder());
+
+ gc::AllocSite* site = MaybeCreateAllocSite(pc_, frame_);
+ if (!site) {
+ return AttachDecision::NoAction;
+ }
+
+ Shape* shape = arrayObj->shape();
+ uint32_t length = arrayObj->length();
+
+ writer.newArrayObjectResult(length, shape, site);
+
+ writer.returnFromIC();
+
+ trackAttached("NewArray.Object");
+ return AttachDecision::Attach;
+}
+
+AttachDecision NewArrayIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ TRY_ATTACH(tryAttachArrayObject());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+NewObjectIRGenerator::NewObjectIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ JSOp op, HandleObject templateObj,
+ BaselineFrame* frame)
+ : IRGenerator(cx, script, pc, CacheKind::NewObject, state),
+#ifdef JS_CACHEIR_SPEW
+ op_(op),
+#endif
+ templateObject_(templateObj),
+ frame_(frame) {
+ MOZ_ASSERT(templateObject_);
+}
+
+void NewObjectIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.opcodeProperty("op", op_);
+ }
+#endif
+}
+
+AttachDecision NewObjectIRGenerator::tryAttachPlainObject() {
+ // Don't optimize allocations with too many dynamic slots. We use an unrolled
+ // loop when initializing slots and this avoids generating too much code.
+ static const uint32_t MaxDynamicSlotsToOptimize = 64;
+
+ NativeObject* nativeObj = &templateObject_->as<NativeObject>();
+ MOZ_ASSERT(nativeObj->is<PlainObject>());
+
+ // Stub doesn't support metadata builder
+ if (cx_->realm()->hasAllocationMetadataBuilder()) {
+ return AttachDecision::NoAction;
+ }
+
+ if (nativeObj->numDynamicSlots() > MaxDynamicSlotsToOptimize) {
+ return AttachDecision::NoAction;
+ }
+
+ MOZ_ASSERT(!nativeObj->hasDynamicElements());
+ MOZ_ASSERT(!nativeObj->isSharedMemory());
+
+ gc::AllocSite* site = MaybeCreateAllocSite(pc_, frame_);
+ if (!site) {
+ return AttachDecision::NoAction;
+ }
+
+ uint32_t numFixedSlots = nativeObj->numUsedFixedSlots();
+ uint32_t numDynamicSlots = nativeObj->numDynamicSlots();
+ gc::AllocKind allocKind = nativeObj->allocKindForTenure();
+ Shape* shape = nativeObj->shape();
+
+ writer.guardNoAllocationMetadataBuilder(
+ cx_->realm()->addressOfMetadataBuilder());
+ writer.newPlainObjectResult(numFixedSlots, numDynamicSlots, allocKind, shape,
+ site);
+
+ writer.returnFromIC();
+
+ trackAttached("NewObject.PlainObject");
+ return AttachDecision::Attach;
+}
+
+AttachDecision NewObjectIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ TRY_ATTACH(tryAttachPlainObject());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+CloseIterIRGenerator::CloseIterIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleObject iter,
+ CompletionKind kind)
+ : IRGenerator(cx, script, pc, CacheKind::CloseIter, state),
+ iter_(iter),
+ kind_(kind) {}
+
+void CloseIterIRGenerator::trackAttached(const char* name) {
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("iter", ObjectValue(*iter_));
+ }
+#endif
+}
+
+AttachDecision CloseIterIRGenerator::tryAttachNoReturnMethod() {
+ Maybe<PropertyInfo> prop;
+ NativeObject* holder = nullptr;
+
+ // If we can guard that the iterator does not have a |return| method,
+ // then this CloseIter is a no-op.
+ NativeGetPropKind kind = CanAttachNativeGetProp(
+ cx_, iter_, NameToId(cx_->names().return_), &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::Missing) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(!holder);
+
+ ObjOperandId objId(writer.setInputOperandId(0));
+
+ EmitMissingPropGuard(writer, &iter_->as<NativeObject>(), objId);
+
+ // There is no return method, so we don't have to do anything.
+ writer.returnFromIC();
+
+ trackAttached("CloseIter.NoReturn");
+ return AttachDecision::Attach;
+}
+
+AttachDecision CloseIterIRGenerator::tryAttachScriptedReturn() {
+ Maybe<PropertyInfo> prop;
+ NativeObject* holder = nullptr;
+
+ NativeGetPropKind kind = CanAttachNativeGetProp(
+ cx_, iter_, NameToId(cx_->names().return_), &holder, &prop, pc_);
+ if (kind != NativeGetPropKind::Slot) {
+ return AttachDecision::NoAction;
+ }
+ MOZ_ASSERT(holder);
+ MOZ_ASSERT(prop->isDataProperty());
+
+ size_t slot = prop->slot();
+ Value calleeVal = holder->getSlot(slot);
+ if (!calleeVal.isObject() || !calleeVal.toObject().is<JSFunction>()) {
+ return AttachDecision::NoAction;
+ }
+
+ JSFunction* callee = &calleeVal.toObject().as<JSFunction>();
+ if (!callee->hasJitEntry()) {
+ return AttachDecision::NoAction;
+ }
+ if (callee->isClassConstructor()) {
+ return AttachDecision::NoAction;
+ }
+
+ // We don't support cross-realm |return|.
+ if (cx_->realm() != callee->realm()) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId objId(writer.setInputOperandId(0));
+
+ ObjOperandId holderId =
+ EmitReadSlotGuard(writer, &iter_->as<NativeObject>(), holder, objId);
+
+ ValOperandId calleeValId;
+ if (holder->isFixedSlot(slot)) {
+ size_t offset = NativeObject::getFixedSlotOffset(slot);
+ calleeValId = writer.loadFixedSlot(holderId, offset);
+ } else {
+ size_t index = holder->dynamicSlotIndex(slot);
+ calleeValId = writer.loadDynamicSlot(holderId, index);
+ }
+ ObjOperandId calleeId = writer.guardToObject(calleeValId);
+ emitCalleeGuard(calleeId, callee);
+
+ writer.closeIterScriptedResult(objId, calleeId, kind_, callee->nargs());
+
+ writer.returnFromIC();
+ trackAttached("CloseIter.ScriptedReturn");
+
+ return AttachDecision::Attach;
+}
+
+AttachDecision CloseIterIRGenerator::tryAttachStub() {
+ AutoAssertNoPendingException aanpe(cx_);
+
+ TRY_ATTACH(tryAttachNoReturnMethod());
+ TRY_ATTACH(tryAttachScriptedReturn());
+
+ trackAttached(IRGenerator::NotAttached);
+ return AttachDecision::NoAction;
+}
+
+#ifdef JS_SIMULATOR
+bool js::jit::CallAnyNative(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JSObject* calleeObj = &args.callee();
+
+ MOZ_ASSERT(calleeObj->is<JSFunction>());
+ auto* calleeFunc = &calleeObj->as<JSFunction>();
+ MOZ_ASSERT(calleeFunc->isNativeWithoutJitEntry());
+
+ JSNative native = calleeFunc->native();
+ return native(cx, args.length(), args.base());
+}
+#endif
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
new file mode 100644
index 0000000000..2485929df0
--- /dev/null
+++ b/js/src/jit/CacheIR.h
@@ -0,0 +1,528 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIR_h
+#define jit_CacheIR_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/CacheIROpsGenerated.h"
+#include "js/GCAnnotations.h"
+#include "js/Value.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+// [SMDOC] CacheIR
+//
+// CacheIR is an (extremely simple) linear IR language for inline caches.
+// From this IR, we can generate machine code for Baseline or Ion IC stubs.
+//
+// IRWriter
+// --------
+// CacheIR bytecode is written using IRWriter. This class also records some
+// metadata that's used by the Baseline and Ion code generators to generate
+// (efficient) machine code.
+//
+// Sharing Baseline stub code
+// --------------------------
+// Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub
+// structure, instead of embedding them directly in the JitCode. This makes
+// Baseline IC code slightly slower, but allows us to share IC code between
+// caches. CacheIR makes it easy to share code between stubs: stubs that have
+// the same CacheIR (and CacheKind), will have the same Baseline stub code.
+//
+// Baseline stubs that share JitCode also share a CacheIRStubInfo structure.
+// This class stores the CacheIR and the location of GC things stored in the
+// stub, for the GC.
+//
+// JitZone has a CacheIRStubInfo* -> JitCode* weak map that's used to share both
+// the IR and JitCode between Baseline CacheIR stubs. This HashMap owns the
+// stubInfo (it uses UniquePtr), so once there are no references left to the
+// shared stub code, we can also free the CacheIRStubInfo.
+//
+// Ion stubs
+// ---------
+// Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in
+// the IonICStub is baked into JIT code. This is one of the reasons Ion stubs
+// are faster than Baseline stubs. Also note that Ion ICs contain more state
+// (see IonGetPropertyIC for example) and use dynamic input/output registers,
+// so sharing stub code for Ion would be much more difficult.
+
+// An OperandId represents either a cache input or a value returned by a
+// CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
+// classes below. The ObjOperandId class represents an operand that's known to
+// be an object, just as StringOperandId represents a known string, etc.
+class OperandId {
+ protected:
+ static const uint16_t InvalidId = UINT16_MAX;
+ uint16_t id_;
+
+ explicit OperandId(uint16_t id) : id_(id) {}
+
+ public:
+ OperandId() : id_(InvalidId) {}
+ uint16_t id() const { return id_; }
+ bool valid() const { return id_ != InvalidId; }
+};
+
+class ValOperandId : public OperandId {
+ public:
+ ValOperandId() = default;
+ explicit ValOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class ValueTagOperandId : public OperandId {
+ public:
+ ValueTagOperandId() = default;
+ explicit ValueTagOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class IntPtrOperandId : public OperandId {
+ public:
+ IntPtrOperandId() = default;
+ explicit IntPtrOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class ObjOperandId : public OperandId {
+ public:
+ ObjOperandId() = default;
+ explicit ObjOperandId(uint16_t id) : OperandId(id) {}
+
+ bool operator==(const ObjOperandId& other) const { return id_ == other.id_; }
+ bool operator!=(const ObjOperandId& other) const { return id_ != other.id_; }
+};
+
+class NumberOperandId : public ValOperandId {
+ public:
+ NumberOperandId() = default;
+ explicit NumberOperandId(uint16_t id) : ValOperandId(id) {}
+};
+
+class StringOperandId : public OperandId {
+ public:
+ StringOperandId() = default;
+ explicit StringOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class SymbolOperandId : public OperandId {
+ public:
+ SymbolOperandId() = default;
+ explicit SymbolOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class BigIntOperandId : public OperandId {
+ public:
+ BigIntOperandId() = default;
+ explicit BigIntOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class BooleanOperandId : public OperandId {
+ public:
+ BooleanOperandId() = default;
+ explicit BooleanOperandId(uint16_t id) : OperandId(id) {}
+};
+
+class Int32OperandId : public OperandId {
+ public:
+ Int32OperandId() = default;
+ explicit Int32OperandId(uint16_t id) : OperandId(id) {}
+};
+
+class TypedOperandId : public OperandId {
+ JSValueType type_;
+
+ public:
+ MOZ_IMPLICIT TypedOperandId(ObjOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_OBJECT) {}
+ MOZ_IMPLICIT TypedOperandId(StringOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_STRING) {}
+ MOZ_IMPLICIT TypedOperandId(SymbolOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_SYMBOL) {}
+ MOZ_IMPLICIT TypedOperandId(BigIntOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_BIGINT) {}
+ MOZ_IMPLICIT TypedOperandId(BooleanOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_BOOLEAN) {}
+ MOZ_IMPLICIT TypedOperandId(Int32OperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_INT32) {}
+
+ MOZ_IMPLICIT TypedOperandId(ValueTagOperandId val)
+ : OperandId(val.id()), type_(JSVAL_TYPE_UNKNOWN) {}
+ MOZ_IMPLICIT TypedOperandId(IntPtrOperandId id)
+ : OperandId(id.id()), type_(JSVAL_TYPE_UNKNOWN) {}
+
+ TypedOperandId(ValOperandId val, JSValueType type)
+ : OperandId(val.id()), type_(type) {}
+
+ JSValueType type() const { return type_; }
+};
+
+#define CACHE_IR_KINDS(_) \
+ _(GetProp) \
+ _(GetElem) \
+ _(GetName) \
+ _(GetPropSuper) \
+ _(GetElemSuper) \
+ _(GetIntrinsic) \
+ _(SetProp) \
+ _(SetElem) \
+ _(BindName) \
+ _(In) \
+ _(HasOwn) \
+ _(CheckPrivateField) \
+ _(TypeOf) \
+ _(ToPropertyKey) \
+ _(InstanceOf) \
+ _(GetIterator) \
+ _(CloseIter) \
+ _(OptimizeSpreadCall) \
+ _(Compare) \
+ _(ToBool) \
+ _(Call) \
+ _(UnaryArith) \
+ _(BinaryArith) \
+ _(NewObject) \
+ _(NewArray)
+
+enum class CacheKind : uint8_t {
+#define DEFINE_KIND(kind) kind,
+ CACHE_IR_KINDS(DEFINE_KIND)
+#undef DEFINE_KIND
+};
+
+extern const char* const CacheKindNames[];
+
+extern size_t NumInputsForCacheKind(CacheKind kind);
+
+enum class CacheOp {
+#define DEFINE_OP(op, ...) op,
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+};
+
+// CacheIR opcode info that's read in performance-sensitive code. Stored as a
+// single byte per op for better cache locality.
+struct CacheIROpInfo {
+ uint8_t argLength : 7;
+ bool transpile : 1;
+};
+static_assert(sizeof(CacheIROpInfo) == 1);
+extern const CacheIROpInfo CacheIROpInfos[];
+
+extern const char* const CacheIROpNames[];
+
+inline const char* CacheIRCodeName(CacheOp op) {
+ return CacheIROpNames[static_cast<size_t>(op)];
+}
+
+extern const uint32_t CacheIROpHealth[];
+
+class StubField {
+ public:
+ enum class Type : uint8_t {
+ // These fields take up a single word.
+ RawInt32,
+ RawPointer,
+ Shape,
+ GetterSetter,
+ JSObject,
+ Symbol,
+ String,
+ BaseScript,
+ JitCode,
+
+ Id,
+ AllocSite,
+
+ // These fields take up 64 bits on all platforms.
+ RawInt64,
+ First64BitType = RawInt64,
+ Value,
+ Double,
+
+ Limit
+ };
+
+ static bool sizeIsWord(Type type) {
+ MOZ_ASSERT(type != Type::Limit);
+ return type < Type::First64BitType;
+ }
+
+ static bool sizeIsInt64(Type type) {
+ MOZ_ASSERT(type != Type::Limit);
+ return type >= Type::First64BitType;
+ }
+
+ static size_t sizeInBytes(Type type) {
+ if (sizeIsWord(type)) {
+ return sizeof(uintptr_t);
+ }
+ MOZ_ASSERT(sizeIsInt64(type));
+ return sizeof(int64_t);
+ }
+
+ private:
+ uint64_t data_;
+ Type type_;
+
+ public:
+ StubField(uint64_t data, Type type) : data_(data), type_(type) {
+ MOZ_ASSERT_IF(sizeIsWord(), data <= UINTPTR_MAX);
+ }
+
+ Type type() const { return type_; }
+
+ bool sizeIsWord() const { return sizeIsWord(type_); }
+ bool sizeIsInt64() const { return sizeIsInt64(type_); }
+
+ size_t sizeInBytes() const { return sizeInBytes(type_); }
+
+ uintptr_t asWord() const {
+ MOZ_ASSERT(sizeIsWord());
+ return uintptr_t(data_);
+ }
+ uint64_t asInt64() const {
+ MOZ_ASSERT(sizeIsInt64());
+ return data_;
+ }
+} JS_HAZ_GC_POINTER;
+
+// This class is used to wrap up information about a call to make it
+// easier to convey from one function to another. (In particular,
+// CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader
+// decodes them and uses them for compilation.)
+class CallFlags {
+ public:
+ enum ArgFormat : uint8_t {
+ Unknown,
+ Standard,
+ Spread,
+ FunCall,
+ FunApplyArgsObj,
+ FunApplyArray,
+ FunApplyNullUndefined,
+ LastArgFormat = FunApplyNullUndefined
+ };
+
+ CallFlags() = default;
+ explicit CallFlags(ArgFormat format) : argFormat_(format) {}
+ CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false,
+ bool needsUninitializedThis = false)
+ : argFormat_(isSpread ? Spread : Standard),
+ isConstructing_(isConstructing),
+ isSameRealm_(isSameRealm),
+ needsUninitializedThis_(needsUninitializedThis) {}
+
+ ArgFormat getArgFormat() const { return argFormat_; }
+ bool isConstructing() const {
+ MOZ_ASSERT_IF(isConstructing_,
+ argFormat_ == Standard || argFormat_ == Spread);
+ return isConstructing_;
+ }
+ bool isSameRealm() const { return isSameRealm_; }
+ void setIsSameRealm() { isSameRealm_ = true; }
+
+ bool needsUninitializedThis() const { return needsUninitializedThis_; }
+ void setNeedsUninitializedThis() { needsUninitializedThis_ = true; }
+
+ uint8_t toByte() const {
+ // See CacheIRReader::callFlags()
+ MOZ_ASSERT(argFormat_ != ArgFormat::Unknown);
+ uint8_t value = getArgFormat();
+ if (isConstructing()) {
+ value |= CallFlags::IsConstructing;
+ }
+ if (isSameRealm()) {
+ value |= CallFlags::IsSameRealm;
+ }
+ if (needsUninitializedThis()) {
+ value |= CallFlags::NeedsUninitializedThis;
+ }
+ return value;
+ }
+
+ private:
+ ArgFormat argFormat_ = ArgFormat::Unknown;
+ bool isConstructing_ = false;
+ bool isSameRealm_ = false;
+ bool needsUninitializedThis_ = false;
+
+ // Used for encoding/decoding
+ static const uint8_t ArgFormatBits = 4;
+ static const uint8_t ArgFormatMask = (1 << ArgFormatBits) - 1;
+ static_assert(LastArgFormat <= ArgFormatMask, "Not enough arg format bits");
+ static const uint8_t IsConstructing = 1 << 5;
+ static const uint8_t IsSameRealm = 1 << 6;
+ static const uint8_t NeedsUninitializedThis = 1 << 7;
+
+ friend class CacheIRReader;
+ friend class CacheIRWriter;
+};
+
+// In baseline, we have to copy args onto the stack. Below this threshold, we
+// will unroll the arg copy loop. We need to clamp this before providing it as
+// an arg to a CacheIR op so that everything 5 or greater can share an IC.
+const uint32_t MaxUnrolledArgCopy = 5;
+inline uint32_t ClampFixedArgc(uint32_t argc) {
+ return std::min(argc, MaxUnrolledArgCopy);
+}
+
+enum class AttachDecision {
+ // We cannot attach a stub.
+ NoAction,
+
+ // We can attach a stub.
+ Attach,
+
+ // We cannot currently attach a stub, but we expect to be able to do so in the
+ // future. In this case, we do not call trackNotAttached().
+ TemporarilyUnoptimizable,
+
+ // We want to attach a stub, but the result of the operation is
+ // needed to generate that stub. For example, AddSlot needs to know
+ // the resulting shape. Note: the attached stub will inspect the
+ // inputs to the operation, so most input checks should be done
+ // before the actual operation, with only minimal checks remaining
+ // for the deferred portion. This prevents arbitrary scripted code
+ // run by the operation from interfering with the conditions being
+ // checked.
+ Deferred
+};
+
+// If the input expression evaluates to an AttachDecision other than NoAction,
+// return that AttachDecision. If it is NoAction, do nothing.
+#define TRY_ATTACH(expr) \
+ do { \
+ AttachDecision tryAttachTempResult_ = expr; \
+ if (tryAttachTempResult_ != AttachDecision::NoAction) { \
+ return tryAttachTempResult_; \
+ } \
+ } while (0)
+
+// Set of arguments supported by GetIndexOfArgument.
+// Support for higher argument indices can be added easily, but is currently
+// unneeded.
+enum class ArgumentKind : uint8_t {
+ Callee,
+ This,
+ NewTarget,
+ Arg0,
+ Arg1,
+ Arg2,
+ Arg3,
+ Arg4,
+ Arg5,
+ Arg6,
+ Arg7,
+ NumKinds
+};
+
+const uint8_t ArgumentKindArgIndexLimit =
+ uint8_t(ArgumentKind::NumKinds) - uint8_t(ArgumentKind::Arg0);
+
+inline ArgumentKind ArgumentKindForArgIndex(uint32_t idx) {
+ MOZ_ASSERT(idx < ArgumentKindArgIndexLimit);
+ return ArgumentKind(uint32_t(ArgumentKind::Arg0) + idx);
+}
+
+// This function calculates the index of an argument based on the call flags.
+// addArgc is an out-parameter, indicating whether the value of argc should
+// be added to the return value to find the actual index.
+inline int32_t GetIndexOfArgument(ArgumentKind kind, CallFlags flags,
+ bool* addArgc) {
+ // *** STACK LAYOUT (bottom to top) *** ******** INDEX ********
+ // Callee <-- argc+1 + isConstructing
+ // ThisValue <-- argc + isConstructing
+ // Args: | Arg0 | | ArgArray | <-- argc-1 + isConstructing
+ // | Arg1 | --or-- | | <-- argc-2 + isConstructing
+ // | ... | | (if spread | <-- ...
+ // | ArgN | | call) | <-- 0 + isConstructing
+ // NewTarget (only if constructing) <-- 0 (if it exists)
+ //
+ // If this is a spread call, then argc is always 1, and we can calculate the
+ // index directly. If this is not a spread call, then the index of any
+ // argument other than NewTarget depends on argc.
+
+ // First we determine whether the caller needs to add argc.
+ switch (flags.getArgFormat()) {
+ case CallFlags::Standard:
+ *addArgc = true;
+ break;
+ case CallFlags::Spread:
+ // Spread calls do not have Arg1 or higher.
+ MOZ_ASSERT(kind <= ArgumentKind::Arg0);
+ *addArgc = false;
+ break;
+ case CallFlags::Unknown:
+ case CallFlags::FunCall:
+ case CallFlags::FunApplyArgsObj:
+ case CallFlags::FunApplyArray:
+ case CallFlags::FunApplyNullUndefined:
+ MOZ_CRASH("Currently unreachable");
+ break;
+ }
+
+ // Second, we determine the offset relative to argc.
+ bool hasArgumentArray = !*addArgc;
+ switch (kind) {
+ case ArgumentKind::Callee:
+ return flags.isConstructing() + hasArgumentArray + 1;
+ case ArgumentKind::This:
+ return flags.isConstructing() + hasArgumentArray;
+ case ArgumentKind::Arg0:
+ return flags.isConstructing() + hasArgumentArray - 1;
+ case ArgumentKind::Arg1:
+ return flags.isConstructing() + hasArgumentArray - 2;
+ case ArgumentKind::Arg2:
+ return flags.isConstructing() + hasArgumentArray - 3;
+ case ArgumentKind::Arg3:
+ return flags.isConstructing() + hasArgumentArray - 4;
+ case ArgumentKind::Arg4:
+ return flags.isConstructing() + hasArgumentArray - 5;
+ case ArgumentKind::Arg5:
+ return flags.isConstructing() + hasArgumentArray - 6;
+ case ArgumentKind::Arg6:
+ return flags.isConstructing() + hasArgumentArray - 7;
+ case ArgumentKind::Arg7:
+ return flags.isConstructing() + hasArgumentArray - 8;
+ case ArgumentKind::NewTarget:
+ MOZ_ASSERT(flags.isConstructing());
+ *addArgc = false;
+ return 0;
+ default:
+ MOZ_CRASH("Invalid argument kind");
+ }
+}
+
+// We use this enum as GuardClass operand, instead of storing Class* pointers
+// in the IR, to keep the IR compact and the same size on all platforms.
+enum class GuardClassKind : uint8_t {
+ Array,
+ PlainObject,
+ ArrayBuffer,
+ SharedArrayBuffer,
+ DataView,
+ MappedArguments,
+ UnmappedArguments,
+ WindowProxy,
+ JSFunction,
+ BoundFunction,
+ Set,
+ Map,
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIR_h */
diff --git a/js/src/jit/CacheIRCloner.h b/js/src/jit/CacheIRCloner.h
new file mode 100644
index 0000000000..5e5e1d73af
--- /dev/null
+++ b/js/src/jit/CacheIRCloner.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRCloner_h
+#define jit_CacheIRCloner_h
+
+#include "mozilla/Attributes.h"
+
+#include <stdint.h>
+
+#include "NamespaceImports.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIROpsGenerated.h"
+#include "jit/CacheIRReader.h"
+#include "jit/CacheIRWriter.h"
+#include "js/Id.h"
+#include "js/Value.h"
+
+class JSAtom;
+class JSObject;
+class JSString;
+
+namespace JS {
+class Symbol;
+}
+
+namespace js {
+
+class BaseScript;
+class GetterSetter;
+class Shape;
+
+namespace gc {
+class AllocSite;
+}
+
+namespace jit {
+
+class CacheIRStubInfo;
+class ICCacheIRStub;
+class JitCode;
+
+class MOZ_RAII CacheIRCloner {
+ public:
+ explicit CacheIRCloner(ICCacheIRStub* stubInfo);
+
+ void cloneOp(CacheOp op, CacheIRReader& reader, CacheIRWriter& writer);
+
+ CACHE_IR_CLONE_GENERATED
+
+ private:
+ const CacheIRStubInfo* stubInfo_;
+ const uint8_t* stubData_;
+
+ uintptr_t readStubWord(uint32_t offset);
+ int64_t readStubInt64(uint32_t offset);
+
+ Shape* getShapeField(uint32_t stubOffset);
+ GetterSetter* getGetterSetterField(uint32_t stubOffset);
+ JSObject* getObjectField(uint32_t stubOffset);
+ JSString* getStringField(uint32_t stubOffset);
+ JSAtom* getAtomField(uint32_t stubOffset);
+ JS::Symbol* getSymbolField(uint32_t stubOffset);
+ BaseScript* getBaseScriptField(uint32_t stubOffset);
+ JitCode* getJitCodeField(uint32_t stubOffset);
+ uint32_t getRawInt32Field(uint32_t stubOffset);
+ const void* getRawPointerField(uint32_t stubOffset);
+ jsid getIdField(uint32_t stubOffset);
+ const Value getValueField(uint32_t stubOffset);
+ uint64_t getRawInt64Field(uint32_t stubOffset);
+ double getDoubleField(uint32_t stubOffset);
+ gc::AllocSite* getAllocSiteField(uint32_t stubOffset);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIRCloner_h */
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
new file mode 100644
index 0000000000..53b694eee6
--- /dev/null
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -0,0 +1,9638 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CacheIRCompiler.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/FunctionTypeTraits.h"
+#include "mozilla/MaybeOneOf.h"
+#include "mozilla/ScopeExit.h"
+
+#include <type_traits>
+#include <utility>
+
+#include "jslibmath.h"
+#include "jsmath.h"
+
+#include "builtin/DataViewObject.h"
+#include "builtin/MapObject.h"
+#include "builtin/Object.h"
+#include "gc/Allocator.h"
+#include "jit/BaselineCacheIRCompiler.h"
+#include "jit/CacheIRGenerator.h"
+#include "jit/IonCacheIRCompiler.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/SharedICRegisters.h"
+#include "jit/VMFunctions.h"
+#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
+#include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "proxy/DOMProxy.h"
+#include "proxy/Proxy.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/ArrayBufferViewObject.h"
+#include "vm/BigIntType.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorObject.h"
+#include "vm/GetterSetter.h"
+#include "vm/Interpreter.h"
+#include "vm/Uint8Clamped.h"
+
+#include "builtin/Boolean-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/SharedICHelpers-inl.h"
+#include "jit/VMFunctionList-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::Maybe;
+
+using JS::ExpandoAndGeneration;
+
+ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
+ ValOperandId op) {
+ OperandLocation& loc = operandLocations_[op.id()];
+
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg:
+ currentOpRegs_.add(loc.valueReg());
+ return loc.valueReg();
+
+ case OperandLocation::ValueStack: {
+ ValueOperand reg = allocateValueRegister(masm);
+ popValue(masm, &loc, reg);
+ return reg;
+ }
+
+ case OperandLocation::BaselineFrame: {
+ ValueOperand reg = allocateValueRegister(masm);
+ Address addr = addressOf(masm, loc.baselineFrameSlot());
+ masm.loadValue(addr, reg);
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ case OperandLocation::Constant: {
+ ValueOperand reg = allocateValueRegister(masm);
+ masm.moveValue(loc.constant(), reg);
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ case OperandLocation::PayloadReg: {
+ // Temporarily add the payload register to currentOpRegs_ so
+ // allocateValueRegister will stay away from it.
+ currentOpRegs_.add(loc.payloadReg());
+ ValueOperand reg = allocateValueRegister(masm);
+ masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
+ currentOpRegs_.take(loc.payloadReg());
+ availableRegs_.add(loc.payloadReg());
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ case OperandLocation::PayloadStack: {
+ ValueOperand reg = allocateValueRegister(masm);
+ popPayload(masm, &loc, reg.scratchReg());
+ masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ case OperandLocation::DoubleReg: {
+ ValueOperand reg = allocateValueRegister(masm);
+ {
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(loc.doubleReg(), reg, fpscratch);
+ }
+ loc.setValueReg(reg);
+ return reg;
+ }
+
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH();
+}
+
+// Load a value operand directly into a float register. Caller must have
+// guarded isNumber on the provided val.
+void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
+ NumberOperandId op,
+ FloatRegister dest) const {
+ // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
+ // any stack slot offsets below.
+ int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
+
+ const OperandLocation& loc = operandLocations_[op.id()];
+
+ Label failure, done;
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg: {
+ masm.ensureDouble(loc.valueReg(), dest, &failure);
+ break;
+ }
+
+ case OperandLocation::ValueStack: {
+ Address addr = valueAddress(masm, &loc);
+ addr.offset += stackOffset;
+ masm.ensureDouble(addr, dest, &failure);
+ break;
+ }
+
+ case OperandLocation::BaselineFrame: {
+ Address addr = addressOf(masm, loc.baselineFrameSlot());
+ addr.offset += stackOffset;
+ masm.ensureDouble(addr, dest, &failure);
+ break;
+ }
+
+ case OperandLocation::DoubleReg: {
+ masm.moveDouble(loc.doubleReg(), dest);
+ return;
+ }
+
+ case OperandLocation::Constant: {
+ MOZ_ASSERT(loc.constant().isNumber(),
+ "Caller must ensure the operand is a number value");
+ masm.loadConstantDouble(loc.constant().toNumber(), dest);
+ return;
+ }
+
+ case OperandLocation::PayloadReg: {
+ // Doubles can't be stored in payload registers, so this must be an int32.
+ MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
+ "Caller must ensure the operand is a number value");
+ masm.convertInt32ToDouble(loc.payloadReg(), dest);
+ return;
+ }
+
+ case OperandLocation::PayloadStack: {
+ // Doubles can't be stored in payload registers, so this must be an int32.
+ MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
+ "Caller must ensure the operand is a number value");
+ MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
+ Address addr = payloadAddress(masm, &loc);
+ addr.offset += stackOffset;
+ masm.convertInt32ToDouble(addr, dest);
+ return;
+ }
+
+ case OperandLocation::Uninitialized:
+ MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
+ return;
+ }
+ masm.jump(&done);
+ masm.bind(&failure);
+ masm.assumeUnreachable(
+ "Missing guard allowed non-number to hit ensureDoubleRegister");
+ masm.bind(&done);
+}
+
+void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
+ TypedOperandId typedId,
+ Register dest) const {
+ // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
+ // any stack slot offsets below.
+ int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
+
+ const OperandLocation& loc = operandLocations_[typedId.id()];
+
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg: {
+ masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
+ break;
+ }
+ case OperandLocation::ValueStack: {
+ Address addr = valueAddress(masm, &loc);
+ addr.offset += stackOffset;
+ masm.unboxNonDouble(addr, dest, typedId.type());
+ break;
+ }
+ case OperandLocation::BaselineFrame: {
+ Address addr = addressOf(masm, loc.baselineFrameSlot());
+ addr.offset += stackOffset;
+ masm.unboxNonDouble(addr, dest, typedId.type());
+ break;
+ }
+ case OperandLocation::PayloadReg: {
+ MOZ_ASSERT(loc.payloadType() == typedId.type());
+ masm.mov(loc.payloadReg(), dest);
+ return;
+ }
+ case OperandLocation::PayloadStack: {
+ MOZ_ASSERT(loc.payloadType() == typedId.type());
+ MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
+ Address addr = payloadAddress(masm, &loc);
+ addr.offset += stackOffset;
+ masm.loadPtr(addr, dest);
+ return;
+ }
+ case OperandLocation::DoubleReg:
+ case OperandLocation::Constant:
+ case OperandLocation::Uninitialized:
+ MOZ_CRASH("Unhandled operand location");
+ }
+}
+
+void CacheRegisterAllocator::copyToScratchValueRegister(
+ MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ const OperandLocation& loc = operandLocations_[valId.id()];
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg:
+ masm.moveValue(loc.valueReg(), dest);
+ break;
+ case OperandLocation::ValueStack: {
+ Address addr = valueAddress(masm, &loc);
+ masm.loadValue(addr, dest);
+ break;
+ }
+ case OperandLocation::BaselineFrame: {
+ Address addr = addressOf(masm, loc.baselineFrameSlot());
+ masm.loadValue(addr, dest);
+ break;
+ }
+ case OperandLocation::Constant:
+ masm.moveValue(loc.constant(), dest);
+ break;
+ case OperandLocation::PayloadReg:
+ masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
+ break;
+ case OperandLocation::PayloadStack: {
+ Address addr = payloadAddress(masm, &loc);
+ masm.loadPtr(addr, dest.scratchReg());
+ masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
+ break;
+ }
+ case OperandLocation::DoubleReg: {
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(loc.doubleReg(), dest, fpscratch);
+ break;
+ }
+ case OperandLocation::Uninitialized:
+ MOZ_CRASH();
+ }
+}
+
+Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
+ TypedOperandId typedId) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ OperandLocation& loc = operandLocations_[typedId.id()];
+ switch (loc.kind()) {
+ case OperandLocation::PayloadReg:
+ currentOpRegs_.add(loc.payloadReg());
+ return loc.payloadReg();
+
+ case OperandLocation::ValueReg: {
+ // It's possible the value is still boxed: as an optimization, we unbox
+ // the first time we use a value as object.
+ ValueOperand val = loc.valueReg();
+ availableRegs_.add(val);
+ Register reg = val.scratchReg();
+ availableRegs_.take(reg);
+ masm.unboxNonDouble(val, reg, typedId.type());
+ loc.setPayloadReg(reg, typedId.type());
+ currentOpRegs_.add(reg);
+ return reg;
+ }
+
+ case OperandLocation::PayloadStack: {
+ Register reg = allocateRegister(masm);
+ popPayload(masm, &loc, reg);
+ return reg;
+ }
+
+ case OperandLocation::ValueStack: {
+ // The value is on the stack, but boxed. If it's on top of the stack we
+ // unbox it and then remove it from the stack, else we just unbox.
+ Register reg = allocateRegister(masm);
+ if (loc.valueStack() == stackPushed_) {
+ masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
+ typedId.type());
+ masm.addToStackPtr(Imm32(sizeof(js::Value)));
+ MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
+ stackPushed_ -= sizeof(js::Value);
+ } else {
+ MOZ_ASSERT(loc.valueStack() < stackPushed_);
+ masm.unboxNonDouble(
+ Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
+ reg, typedId.type());
+ }
+ loc.setPayloadReg(reg, typedId.type());
+ return reg;
+ }
+
+ case OperandLocation::BaselineFrame: {
+ Register reg = allocateRegister(masm);
+ Address addr = addressOf(masm, loc.baselineFrameSlot());
+ masm.unboxNonDouble(addr, reg, typedId.type());
+ loc.setPayloadReg(reg, typedId.type());
+ return reg;
+ };
+
+ case OperandLocation::Constant: {
+ Value v = loc.constant();
+ Register reg = allocateRegister(masm);
+ if (v.isString()) {
+ masm.movePtr(ImmGCPtr(v.toString()), reg);
+ } else if (v.isSymbol()) {
+ masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
+ } else if (v.isBigInt()) {
+ masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
+ } else {
+ MOZ_CRASH("Unexpected Value");
+ }
+ loc.setPayloadReg(reg, v.extractNonDoubleType());
+ return reg;
+ }
+
+ case OperandLocation::DoubleReg:
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH();
+}
+
+ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
+ MacroAssembler& masm, ValOperandId val) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ OperandLocation& loc = operandLocations_[val.id()];
+ switch (loc.kind()) {
+ case OperandLocation::Constant:
+ return loc.constant();
+
+ case OperandLocation::PayloadReg:
+ case OperandLocation::PayloadStack: {
+ JSValueType payloadType = loc.payloadType();
+ Register reg = useRegister(masm, TypedOperandId(val, payloadType));
+ return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
+ AnyRegister(reg));
+ }
+
+ case OperandLocation::ValueReg:
+ case OperandLocation::ValueStack:
+ case OperandLocation::BaselineFrame:
+ return TypedOrValueRegister(useValueRegister(masm, val));
+
+ case OperandLocation::DoubleReg:
+ return TypedOrValueRegister(MIRType::Double,
+ AnyRegister(loc.doubleReg()));
+
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH();
+}
+
+Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
+ TypedOperandId typedId) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ OperandLocation& loc = operandLocations_[typedId.id()];
+ MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
+
+ Register reg = allocateRegister(masm);
+ loc.setPayloadReg(reg, typedId.type());
+ return reg;
+}
+
+ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
+ ValOperandId val) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ OperandLocation& loc = operandLocations_[val.id()];
+ MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
+
+ ValueOperand reg = allocateValueRegister(masm);
+ loc.setValueReg(reg);
+ return reg;
+}
+
+void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
+ // See if any operands are dead so we can reuse their registers. Note that
+ // we skip the input operands, as those are also used by failure paths, and
+ // we currently don't track those uses.
+ for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
+ i++) {
+ if (!writer_.operandIsDead(i, currentInstruction_)) {
+ continue;
+ }
+
+ OperandLocation& loc = operandLocations_[i];
+ switch (loc.kind()) {
+ case OperandLocation::PayloadReg:
+ availableRegs_.add(loc.payloadReg());
+ break;
+ case OperandLocation::ValueReg:
+ availableRegs_.add(loc.valueReg());
+ break;
+ case OperandLocation::PayloadStack:
+ masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
+ break;
+ case OperandLocation::ValueStack:
+ masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
+ break;
+ case OperandLocation::Uninitialized:
+ case OperandLocation::BaselineFrame:
+ case OperandLocation::Constant:
+ case OperandLocation::DoubleReg:
+ break;
+ }
+ loc.setUninitialized();
+ }
+}
+
+void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
+ // This should only be called when we are no longer using the operands,
+ // as we're discarding everything from the native stack. Set all operand
+ // locations to Uninitialized to catch bugs.
+ for (size_t i = 0; i < operandLocations_.length(); i++) {
+ operandLocations_[i].setUninitialized();
+ }
+
+ if (stackPushed_ > 0) {
+ masm.addToStackPtr(Imm32(stackPushed_));
+ stackPushed_ = 0;
+ }
+ freePayloadSlots_.clear();
+ freeValueSlots_.clear();
+}
+
+Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ if (availableRegs_.empty()) {
+ freeDeadOperandLocations(masm);
+ }
+
+ if (availableRegs_.empty()) {
+ // Still no registers available, try to spill unused operands to
+ // the stack.
+ for (size_t i = 0; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (loc.kind() == OperandLocation::PayloadReg) {
+ Register reg = loc.payloadReg();
+ if (currentOpRegs_.has(reg)) {
+ continue;
+ }
+
+ spillOperandToStack(masm, &loc);
+ availableRegs_.add(reg);
+ break; // We got a register, so break out of the loop.
+ }
+ if (loc.kind() == OperandLocation::ValueReg) {
+ ValueOperand reg = loc.valueReg();
+ if (currentOpRegs_.aliases(reg)) {
+ continue;
+ }
+
+ spillOperandToStack(masm, &loc);
+ availableRegs_.add(reg);
+ break; // Break out of the loop.
+ }
+ }
+ }
+
+ if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
+ Register reg = availableRegsAfterSpill_.takeAny();
+ masm.push(reg);
+ stackPushed_ += sizeof(uintptr_t);
+
+ masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
+
+ availableRegs_.add(reg);
+ }
+
+ // At this point, there must be a free register.
+ MOZ_RELEASE_ASSERT(!availableRegs_.empty());
+
+ Register reg = availableRegs_.takeAny();
+ currentOpRegs_.add(reg);
+ return reg;
+}
+
+void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
+ Register reg) {
+ MOZ_ASSERT(!addedFailurePath_);
+ MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
+
+ // Fixed registers should be allocated first, to ensure they're
+ // still available.
+ MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
+
+ freeDeadOperandLocations(masm);
+
+ if (availableRegs_.has(reg)) {
+ availableRegs_.take(reg);
+ currentOpRegs_.add(reg);
+ return;
+ }
+
+ // Register may be available only after spilling contents.
+ if (availableRegsAfterSpill_.has(reg)) {
+ availableRegsAfterSpill_.take(reg);
+ masm.push(reg);
+ stackPushed_ += sizeof(uintptr_t);
+
+ masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
+ currentOpRegs_.add(reg);
+ return;
+ }
+
+ // The register must be used by some operand. Spill it to the stack.
+ for (size_t i = 0; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (loc.kind() == OperandLocation::PayloadReg) {
+ if (loc.payloadReg() != reg) {
+ continue;
+ }
+
+ spillOperandToStackOrRegister(masm, &loc);
+ currentOpRegs_.add(reg);
+ return;
+ }
+ if (loc.kind() == OperandLocation::ValueReg) {
+ if (!loc.valueReg().aliases(reg)) {
+ continue;
+ }
+
+ ValueOperand valueReg = loc.valueReg();
+ spillOperandToStackOrRegister(masm, &loc);
+
+ availableRegs_.add(valueReg);
+ availableRegs_.take(reg);
+ currentOpRegs_.add(reg);
+ return;
+ }
+ }
+
+ MOZ_CRASH("Invalid register");
+}
+
+void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
+ ValueOperand reg) {
+#ifdef JS_NUNBOX32
+ allocateFixedRegister(masm, reg.payloadReg());
+ allocateFixedRegister(masm, reg.typeReg());
+#else
+ allocateFixedRegister(masm, reg.valueReg());
+#endif
+}
+
+#ifdef JS_NUNBOX32
+// Possible miscompilation in clang-12 (bug 1689641)
+MOZ_NEVER_INLINE
+#endif
+ValueOperand CacheRegisterAllocator::allocateValueRegister(
+ MacroAssembler& masm) {
+#ifdef JS_NUNBOX32
+ Register reg1 = allocateRegister(masm);
+ Register reg2 = allocateRegister(masm);
+ return ValueOperand(reg1, reg2);
+#else
+ Register reg = allocateRegister(masm);
+ return ValueOperand(reg);
+#endif
+}
+
+bool CacheRegisterAllocator::init() {
+ if (!origInputLocations_.resize(writer_.numInputOperands())) {
+ return false;
+ }
+ if (!operandLocations_.resize(writer_.numOperandIds())) {
+ return false;
+ }
+ return true;
+}
+
+void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
+ // Registers not in availableRegs_ and not used by input operands are
+ // available after being spilled.
+ availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
+ GeneralRegisterSet::Not(availableRegs_.set()),
+ GeneralRegisterSet::Not(inputRegisterSet()));
+}
+
+void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
+ // If IC inputs alias each other, make sure they are stored in different
+ // locations so we don't have to deal with this complexity in the rest of
+ // the allocator.
+ //
+ // Note that this can happen in IonMonkey with something like |o.foo = o|
+ // or |o[i] = i|.
+
+ size_t numInputs = writer_.numInputOperands();
+ MOZ_ASSERT(origInputLocations_.length() == numInputs);
+
+ for (size_t i = 1; i < numInputs; i++) {
+ OperandLocation& loc1 = operandLocations_[i];
+ if (!loc1.isInRegister()) {
+ continue;
+ }
+
+ for (size_t j = 0; j < i; j++) {
+ OperandLocation& loc2 = operandLocations_[j];
+ if (!loc1.aliasesReg(loc2)) {
+ continue;
+ }
+
+ // loc1 and loc2 alias so we spill one of them. If one is a
+ // ValueReg and the other is a PayloadReg, we have to spill the
+ // PayloadReg: spilling the ValueReg instead would leave its type
+ // register unallocated on 32-bit platforms.
+ if (loc1.kind() == OperandLocation::ValueReg) {
+ spillOperandToStack(masm, &loc2);
+ } else {
+ MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
+ spillOperandToStack(masm, &loc1);
+ break; // Spilled loc1, so nothing else will alias it.
+ }
+ }
+ }
+
+#ifdef DEBUG
+ assertValidState();
+#endif
+}
+
+GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
+ MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
+
+ AllocatableGeneralRegisterSet result;
+ for (size_t i = 0; i < writer_.numInputOperands(); i++) {
+ const OperandLocation& loc = operandLocations_[i];
+ MOZ_ASSERT(loc == origInputLocations_[i]);
+
+ switch (loc.kind()) {
+ case OperandLocation::PayloadReg:
+ result.addUnchecked(loc.payloadReg());
+ continue;
+ case OperandLocation::ValueReg:
+ result.addUnchecked(loc.valueReg());
+ continue;
+ case OperandLocation::PayloadStack:
+ case OperandLocation::ValueStack:
+ case OperandLocation::BaselineFrame:
+ case OperandLocation::Constant:
+ case OperandLocation::DoubleReg:
+ continue;
+ case OperandLocation::Uninitialized:
+ break;
+ }
+ MOZ_CRASH("Invalid kind");
+ }
+
+ return result.set();
+}
+
+JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
+ const OperandLocation& loc = operandLocations_[val.id()];
+
+ switch (loc.kind()) {
+ case OperandLocation::ValueReg:
+ case OperandLocation::ValueStack:
+ case OperandLocation::BaselineFrame:
+ return JSVAL_TYPE_UNKNOWN;
+
+ case OperandLocation::PayloadStack:
+ case OperandLocation::PayloadReg:
+ return loc.payloadType();
+
+ case OperandLocation::Constant:
+ return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
+ : loc.constant().extractNonDoubleType();
+
+ case OperandLocation::DoubleReg:
+ return JSVAL_TYPE_DOUBLE;
+
+ case OperandLocation::Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH("Invalid kind");
+}
+
+void CacheRegisterAllocator::initInputLocation(
+ size_t i, const TypedOrValueRegister& reg) {
+ if (reg.hasValue()) {
+ initInputLocation(i, reg.valueReg());
+ } else if (reg.typedReg().isFloat()) {
+ MOZ_ASSERT(reg.type() == MIRType::Double);
+ initInputLocation(i, reg.typedReg().fpu());
+ } else {
+ initInputLocation(i, reg.typedReg().gpr(),
+ ValueTypeFromMIRType(reg.type()));
+ }
+}
+
+void CacheRegisterAllocator::initInputLocation(
+ size_t i, const ConstantOrRegister& value) {
+ if (value.constant()) {
+ initInputLocation(i, value.value());
+ } else {
+ initInputLocation(i, value.reg());
+ }
+}
+
+void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
+ OperandLocation* loc) {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+
+ if (loc->kind() == OperandLocation::ValueReg) {
+ if (!freeValueSlots_.empty()) {
+ uint32_t stackPos = freeValueSlots_.popCopy();
+ MOZ_ASSERT(stackPos <= stackPushed_);
+ masm.storeValue(loc->valueReg(),
+ Address(masm.getStackPointer(), stackPushed_ - stackPos));
+ loc->setValueStack(stackPos);
+ return;
+ }
+ stackPushed_ += sizeof(js::Value);
+ masm.pushValue(loc->valueReg());
+ loc->setValueStack(stackPushed_);
+ return;
+ }
+
+ MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
+
+ if (!freePayloadSlots_.empty()) {
+ uint32_t stackPos = freePayloadSlots_.popCopy();
+ MOZ_ASSERT(stackPos <= stackPushed_);
+ masm.storePtr(loc->payloadReg(),
+ Address(masm.getStackPointer(), stackPushed_ - stackPos));
+ loc->setPayloadStack(stackPos, loc->payloadType());
+ return;
+ }
+ stackPushed_ += sizeof(uintptr_t);
+ masm.push(loc->payloadReg());
+ loc->setPayloadStack(stackPushed_, loc->payloadType());
+}
+
+void CacheRegisterAllocator::spillOperandToStackOrRegister(
+ MacroAssembler& masm, OperandLocation* loc) {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+
+ // If enough registers are available, use them.
+ if (loc->kind() == OperandLocation::ValueReg) {
+ static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
+ if (availableRegs_.set().size() >= BoxPieces) {
+ ValueOperand reg = availableRegs_.takeAnyValue();
+ masm.moveValue(loc->valueReg(), reg);
+ loc->setValueReg(reg);
+ return;
+ }
+ } else {
+ MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
+ if (!availableRegs_.empty()) {
+ Register reg = availableRegs_.takeAny();
+ masm.movePtr(loc->payloadReg(), reg);
+ loc->setPayloadReg(reg, loc->payloadType());
+ return;
+ }
+ }
+
+ // Not enough registers available, spill to the stack.
+ spillOperandToStack(masm, loc);
+}
+
+void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
+ OperandLocation* loc, Register dest) {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+ MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
+
+ // The payload is on the stack. If it's on top of the stack we can just
+ // pop it, else we emit a load.
+ if (loc->payloadStack() == stackPushed_) {
+ masm.pop(dest);
+ stackPushed_ -= sizeof(uintptr_t);
+ } else {
+ MOZ_ASSERT(loc->payloadStack() < stackPushed_);
+ masm.loadPtr(payloadAddress(masm, loc), dest);
+ masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
+ }
+
+ loc->setPayloadReg(dest, loc->payloadType());
+}
+
+Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
+ const OperandLocation* loc) const {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+ return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
+}
+
+Address CacheRegisterAllocator::payloadAddress(
+ MacroAssembler& masm, const OperandLocation* loc) const {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+ return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
+}
+
+void CacheRegisterAllocator::popValue(MacroAssembler& masm,
+ OperandLocation* loc, ValueOperand dest) {
+ MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
+ MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
+
+ // The Value is on the stack. If it's on top of the stack we can just
+ // pop it, else we emit a load.
+ if (loc->valueStack() == stackPushed_) {
+ masm.popValue(dest);
+ stackPushed_ -= sizeof(js::Value);
+ } else {
+ MOZ_ASSERT(loc->valueStack() < stackPushed_);
+ masm.loadValue(
+ Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
+ dest);
+ masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
+ }
+
+ loc->setValueReg(dest);
+}
+
+#ifdef DEBUG
+void CacheRegisterAllocator::assertValidState() const {
+ // Assert different operands don't have aliasing storage. We depend on this
+ // when spilling registers, for instance.
+
+ if (!JitOptions.fullDebugChecks) {
+ return;
+ }
+
+ for (size_t i = 0; i < operandLocations_.length(); i++) {
+ const auto& loc1 = operandLocations_[i];
+ if (loc1.isUninitialized()) {
+ continue;
+ }
+
+ for (size_t j = 0; j < i; j++) {
+ const auto& loc2 = operandLocations_[j];
+ if (loc2.isUninitialized()) {
+ continue;
+ }
+ MOZ_ASSERT(!loc1.aliasesReg(loc2));
+ }
+ }
+}
+#endif
+
+bool OperandLocation::aliasesReg(const OperandLocation& other) const {
+ MOZ_ASSERT(&other != this);
+
+ switch (other.kind_) {
+ case PayloadReg:
+ return aliasesReg(other.payloadReg());
+ case ValueReg:
+ return aliasesReg(other.valueReg());
+ case PayloadStack:
+ case ValueStack:
+ case BaselineFrame:
+ case Constant:
+ case DoubleReg:
+ return false;
+ case Uninitialized:
+ break;
+ }
+
+ MOZ_CRASH("Invalid kind");
+}
+
+void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
+ bool shouldDiscardStack) {
+ size_t numInputOperands = origInputLocations_.length();
+ MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
+
+ for (size_t j = 0; j < numInputOperands; j++) {
+ const OperandLocation& dest = origInputLocations_[j];
+ OperandLocation& cur = operandLocations_[j];
+ if (dest == cur) {
+ continue;
+ }
+
+ auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
+
+ // We have a cycle if a destination register will be used later
+ // as source register. If that happens, just push the current value
+ // on the stack and later get it from there.
+ for (size_t k = j + 1; k < numInputOperands; k++) {
+ OperandLocation& laterSource = operandLocations_[k];
+ if (dest.aliasesReg(laterSource)) {
+ spillOperandToStack(masm, &laterSource);
+ }
+ }
+
+ if (dest.kind() == OperandLocation::ValueReg) {
+ // We have to restore a Value register.
+ switch (cur.kind()) {
+ case OperandLocation::ValueReg:
+ masm.moveValue(cur.valueReg(), dest.valueReg());
+ continue;
+ case OperandLocation::PayloadReg:
+ masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
+ continue;
+ case OperandLocation::PayloadStack: {
+ Register scratch = dest.valueReg().scratchReg();
+ popPayload(masm, &cur, scratch);
+ masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
+ continue;
+ }
+ case OperandLocation::ValueStack:
+ popValue(masm, &cur, dest.valueReg());
+ continue;
+ case OperandLocation::DoubleReg:
+ masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
+ continue;
+ case OperandLocation::Constant:
+ case OperandLocation::BaselineFrame:
+ case OperandLocation::Uninitialized:
+ break;
+ }
+ } else if (dest.kind() == OperandLocation::PayloadReg) {
+ // We have to restore a payload register.
+ switch (cur.kind()) {
+ case OperandLocation::ValueReg:
+ MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
+ masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
+ dest.payloadType());
+ continue;
+ case OperandLocation::PayloadReg:
+ MOZ_ASSERT(cur.payloadType() == dest.payloadType());
+ masm.mov(cur.payloadReg(), dest.payloadReg());
+ continue;
+ case OperandLocation::PayloadStack: {
+ MOZ_ASSERT(cur.payloadType() == dest.payloadType());
+ popPayload(masm, &cur, dest.payloadReg());
+ continue;
+ }
+ case OperandLocation::ValueStack:
+ MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
+ MOZ_ASSERT(cur.valueStack() <= stackPushed_);
+ MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
+ masm.unboxNonDouble(
+ Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
+ dest.payloadReg(), dest.payloadType());
+ continue;
+ case OperandLocation::Constant:
+ case OperandLocation::BaselineFrame:
+ case OperandLocation::DoubleReg:
+ case OperandLocation::Uninitialized:
+ break;
+ }
+ } else if (dest.kind() == OperandLocation::Constant ||
+ dest.kind() == OperandLocation::BaselineFrame ||
+ dest.kind() == OperandLocation::DoubleReg) {
+ // Nothing to do.
+ continue;
+ }
+
+ MOZ_CRASH("Invalid kind");
+ }
+
+ for (const SpilledRegister& spill : spilledRegs_) {
+ MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
+
+ if (spill.stackPushed == stackPushed_) {
+ masm.pop(spill.reg);
+ stackPushed_ -= sizeof(uintptr_t);
+ } else {
+ MOZ_ASSERT(spill.stackPushed < stackPushed_);
+ masm.loadPtr(
+ Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
+ spill.reg);
+ }
+ }
+
+ if (shouldDiscardStack) {
+ discardStack(masm);
+ }
+}
+
+size_t CacheIRStubInfo::stubDataSize() const {
+ size_t field = 0;
+ size_t size = 0;
+ while (true) {
+ StubField::Type type = fieldType(field++);
+ if (type == StubField::Type::Limit) {
+ return size;
+ }
+ size += StubField::sizeInBytes(type);
+ }
+}
+
+template <typename T>
+static GCPtr<T>* AsGCPtr(uintptr_t* ptr) {
+ return reinterpret_cast<GCPtr<T>*>(ptr);
+}
+
+uintptr_t CacheIRStubInfo::getStubRawWord(const uint8_t* stubData,
+ uint32_t offset) const {
+ MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
+ return *reinterpret_cast<const uintptr_t*>(stubData + offset);
+}
+
+uintptr_t CacheIRStubInfo::getStubRawWord(ICCacheIRStub* stub,
+ uint32_t offset) const {
+ uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
+ return getStubRawWord(stubData, offset);
+}
+
+int64_t CacheIRStubInfo::getStubRawInt64(const uint8_t* stubData,
+ uint32_t offset) const {
+ MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(int64_t) == 0);
+ return *reinterpret_cast<const int64_t*>(stubData + offset);
+}
+
+int64_t CacheIRStubInfo::getStubRawInt64(ICCacheIRStub* stub,
+ uint32_t offset) const {
+ uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
+ return getStubRawInt64(stubData, offset);
+}
+
+void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
+ uintptr_t oldWord,
+ uintptr_t newWord) const {
+ MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
+ uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
+ MOZ_ASSERT(*addr == oldWord);
+ *addr = newWord;
+}
+
+template <class Stub, class T>
+GCPtr<T>& CacheIRStubInfo::getStubField(Stub* stub, uint32_t offset) const {
+ uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
+ MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
+
+ return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
+}
+
+template GCPtr<Shape*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<GetterSetter*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JSObject*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JSString*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JSFunction*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JS::Value>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<jsid>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<JSClass*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+template GCPtr<ArrayObject*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
+ ICCacheIRStub* stub, uint32_t offset) const;
+
+template <class Stub, class T>
+T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
+ uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
+ MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
+
+ return *reinterpret_cast<T**>(stubData + offset);
+}
+
+template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
+ uint32_t offset) const;
+
+template <typename T, typename V>
+static void InitGCPtr(uintptr_t* ptr, V val) {
+ AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
+}
+
+void CacheIRWriter::copyStubData(uint8_t* dest) const {
+ MOZ_ASSERT(!failed());
+
+ uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
+
+ for (const StubField& field : stubFields_) {
+ MOZ_ASSERT((uintptr_t(destWords) % field.sizeInBytes()) == 0,
+ "Unaligned stub field");
+
+ switch (field.type()) {
+ case StubField::Type::RawInt32:
+ case StubField::Type::RawPointer:
+ case StubField::Type::AllocSite:
+ *destWords = field.asWord();
+ break;
+ case StubField::Type::Shape:
+ InitGCPtr<Shape*>(destWords, field.asWord());
+ break;
+ case StubField::Type::GetterSetter:
+ InitGCPtr<GetterSetter*>(destWords, field.asWord());
+ break;
+ case StubField::Type::JSObject:
+ InitGCPtr<JSObject*>(destWords, field.asWord());
+ break;
+ case StubField::Type::Symbol:
+ InitGCPtr<JS::Symbol*>(destWords, field.asWord());
+ break;
+ case StubField::Type::String:
+ InitGCPtr<JSString*>(destWords, field.asWord());
+ break;
+ case StubField::Type::BaseScript:
+ InitGCPtr<BaseScript*>(destWords, field.asWord());
+ break;
+ case StubField::Type::JitCode:
+ InitGCPtr<JitCode*>(destWords, field.asWord());
+ break;
+ case StubField::Type::Id:
+ AsGCPtr<jsid>(destWords)->init(jsid::fromRawBits(field.asWord()));
+ break;
+ case StubField::Type::RawInt64:
+ case StubField::Type::Double:
+ *reinterpret_cast<uint64_t*>(destWords) = field.asInt64();
+ break;
+ case StubField::Type::Value:
+ AsGCPtr<Value>(destWords)->init(
+ Value::fromRawBits(uint64_t(field.asInt64())));
+ break;
+ case StubField::Type::Limit:
+ MOZ_CRASH("Invalid type");
+ }
+ destWords += StubField::sizeInBytes(field.type()) / sizeof(uintptr_t);
+ }
+}
+
+template <typename T>
+void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
+ const CacheIRStubInfo* stubInfo) {
+ uint32_t field = 0;
+ size_t offset = 0;
+ while (true) {
+ StubField::Type fieldType = stubInfo->fieldType(field);
+ switch (fieldType) {
+ case StubField::Type::RawInt32:
+ case StubField::Type::RawPointer:
+ case StubField::Type::RawInt64:
+ case StubField::Type::Double:
+ break;
+ case StubField::Type::Shape: {
+ // For CCW IC stubs, we can store same-zone but cross-compartment
+ // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
+ // GC. Note: CacheIRWriter::writeShapeField asserts we never store
+ // cross-zone shapes.
+ GCPtr<Shape*>& shapeField =
+ stubInfo->getStubField<T, Shape*>(stub, offset);
+ TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
+ break;
+ }
+ case StubField::Type::GetterSetter:
+ TraceEdge(trc, &stubInfo->getStubField<T, GetterSetter*>(stub, offset),
+ "cacheir-getter-setter");
+ break;
+ case StubField::Type::JSObject:
+ TraceEdge(trc, &stubInfo->getStubField<T, JSObject*>(stub, offset),
+ "cacheir-object");
+ break;
+ case StubField::Type::Symbol:
+ TraceEdge(trc, &stubInfo->getStubField<T, JS::Symbol*>(stub, offset),
+ "cacheir-symbol");
+ break;
+ case StubField::Type::String:
+ TraceEdge(trc, &stubInfo->getStubField<T, JSString*>(stub, offset),
+ "cacheir-string");
+ break;
+ case StubField::Type::BaseScript:
+ TraceEdge(trc, &stubInfo->getStubField<T, BaseScript*>(stub, offset),
+ "cacheir-script");
+ break;
+ case StubField::Type::JitCode:
+ TraceEdge(trc, &stubInfo->getStubField<T, JitCode*>(stub, offset),
+ "cacheir-jitcode");
+ break;
+ case StubField::Type::Id:
+ TraceEdge(trc, &stubInfo->getStubField<T, jsid>(stub, offset),
+ "cacheir-id");
+ break;
+ case StubField::Type::Value:
+ TraceEdge(trc, &stubInfo->getStubField<T, JS::Value>(stub, offset),
+ "cacheir-value");
+ break;
+ case StubField::Type::AllocSite: {
+ gc::AllocSite* site =
+ stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
+ site->trace(trc);
+ break;
+ }
+ case StubField::Type::Limit:
+ return; // Done.
+ }
+ field++;
+ offset += StubField::sizeInBytes(fieldType);
+ }
+}
+
+template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
+ const CacheIRStubInfo* stubInfo);
+
+template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
+ const CacheIRStubInfo* stubInfo);
+
+bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
+ MOZ_ASSERT(!failed());
+
+ const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
+
+ for (const StubField& field : stubFields_) {
+ if (field.sizeIsWord()) {
+ if (field.asWord() != *stubDataWords) {
+ return false;
+ }
+ stubDataWords++;
+ continue;
+ }
+
+ if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
+ return false;
+ }
+ stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
+ }
+
+ return true;
+}
+
+bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
+ uint32_t ignoreOffset) const {
+ MOZ_ASSERT(!failed());
+
+ uint32_t offset = 0;
+ for (const StubField& field : stubFields_) {
+ if (offset != ignoreOffset) {
+ if (field.sizeIsWord()) {
+ uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
+ if (field.asWord() != raw) {
+ return false;
+ }
+ } else {
+ uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
+ if (field.asInt64() != raw) {
+ return false;
+ }
+ }
+ }
+ offset += StubField::sizeInBytes(field.type());
+ }
+
+ return true;
+}
+
+HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
+ HashNumber hash = mozilla::HashBytes(l.code, l.length);
+ hash = mozilla::AddToHash(hash, uint32_t(l.kind));
+ hash = mozilla::AddToHash(hash, uint32_t(l.engine));
+ return hash;
+}
+
+bool CacheIRStubKey::match(const CacheIRStubKey& entry,
+ const CacheIRStubKey::Lookup& l) {
+ if (entry.stubInfo->kind() != l.kind) {
+ return false;
+ }
+
+ if (entry.stubInfo->engine() != l.engine) {
+ return false;
+ }
+
+ if (entry.stubInfo->codeLength() != l.length) {
+ return false;
+ }
+
+ if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
+ return false;
+ }
+
+ return true;
+}
+
+CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
+ : CacheIRReader(stubInfo->code(),
+ stubInfo->code() + stubInfo->codeLength()) {}
+
+CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
+ bool makesGCCalls,
+ uint32_t stubDataOffset,
+ const CacheIRWriter& writer) {
+ size_t numStubFields = writer.numStubFields();
+ size_t bytesNeeded =
+ sizeof(CacheIRStubInfo) + writer.codeLength() +
+ (numStubFields + 1); // +1 for the GCType::Limit terminator.
+ uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
+ if (!p) {
+ return nullptr;
+ }
+
+ // Copy the CacheIR code.
+ uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
+ mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
+
+ static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
+ "StubField::Type must fit in uint8_t");
+
+ // Copy the stub field types.
+ uint8_t* fieldTypes = codeStart + writer.codeLength();
+ for (size_t i = 0; i < numStubFields; i++) {
+ fieldTypes[i] = uint8_t(writer.stubFieldType(i));
+ }
+ fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
+
+ return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
+ writer.codeLength());
+}
+
+bool OperandLocation::operator==(const OperandLocation& other) const {
+ if (kind_ != other.kind_) {
+ return false;
+ }
+
+ switch (kind()) {
+ case Uninitialized:
+ return true;
+ case PayloadReg:
+ return payloadReg() == other.payloadReg() &&
+ payloadType() == other.payloadType();
+ case ValueReg:
+ return valueReg() == other.valueReg();
+ case PayloadStack:
+ return payloadStack() == other.payloadStack() &&
+ payloadType() == other.payloadType();
+ case ValueStack:
+ return valueStack() == other.valueStack();
+ case BaselineFrame:
+ return baselineFrameSlot() == other.baselineFrameSlot();
+ case Constant:
+ return constant() == other.constant();
+ case DoubleReg:
+ return doubleReg() == other.doubleReg();
+ }
+
+ MOZ_CRASH("Invalid OperandLocation kind");
+}
+
+AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
+ : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
+ if (output_.hasValue()) {
+ alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
+ } else if (!output_.typedReg().isFloat()) {
+ alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
+ }
+}
+
+AutoOutputRegister::~AutoOutputRegister() {
+ if (output_.hasValue()) {
+ alloc_.releaseValueRegister(output_.valueReg());
+ } else if (!output_.typedReg().isFloat()) {
+ alloc_.releaseRegister(output_.typedReg().gpr());
+ }
+}
+
+bool FailurePath::canShareFailurePath(const FailurePath& other) const {
+ if (stackPushed_ != other.stackPushed_) {
+ return false;
+ }
+
+ if (spilledRegs_.length() != other.spilledRegs_.length()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < spilledRegs_.length(); i++) {
+ if (spilledRegs_[i] != other.spilledRegs_[i]) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(inputs_.length() == other.inputs_.length());
+
+ for (size_t i = 0; i < inputs_.length(); i++) {
+ if (inputs_[i] != other.inputs_[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
+#ifdef DEBUG
+ allocator.setAddedFailurePath();
+#endif
+ MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
+
+ FailurePath newFailure;
+ for (size_t i = 0; i < writer_.numInputOperands(); i++) {
+ if (!newFailure.appendInput(allocator.operandLocation(i))) {
+ return false;
+ }
+ }
+ if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
+ return false;
+ }
+ newFailure.setStackPushed(allocator.stackPushed());
+
+ // Reuse the previous failure path if the current one is the same, to
+ // avoid emitting duplicate code.
+ if (failurePaths.length() > 0 &&
+ failurePaths.back().canShareFailurePath(newFailure)) {
+ *failure = &failurePaths.back();
+ return true;
+ }
+
+ if (!failurePaths.append(std::move(newFailure))) {
+ return false;
+ }
+
+ *failure = &failurePaths.back();
+ return true;
+}
+
+bool CacheIRCompiler::emitFailurePath(size_t index) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ FailurePath& failure = failurePaths[index];
+
+ allocator.setStackPushed(failure.stackPushed());
+
+ for (size_t i = 0; i < writer_.numInputOperands(); i++) {
+ allocator.setOperandLocation(i, failure.input(i));
+ }
+
+ if (!allocator.setSpilledRegs(failure.spilledRegs())) {
+ return false;
+ }
+
+ masm.bind(failure.label());
+ allocator.restoreInputState(masm);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ JSValueType knownType = allocator.knownType(inputId);
+
+ // Doubles and ints are numbers!
+ if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.branchTestObject(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ JSValueType knownType = allocator.knownType(inputId);
+ if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label success;
+ masm.branchTestNull(Assembler::Equal, input, &success);
+ masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
+
+ masm.bind(&success);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ JSValueType knownType = allocator.knownType(inputId);
+ if (knownType == JSVAL_TYPE_NULL) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestNull(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ JSValueType knownType = allocator.knownType(inputId);
+ if (knownType == JSVAL_TYPE_UNDEFINED) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
+ Register input =
+ allocator.useRegister(masm, BooleanOperandId(inputId.id()));
+ masm.move32(input, output);
+ return true;
+ }
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.fallibleUnboxBoolean(input, output, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.branchTestString(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestGCThing(Assembler::Equal, input, failure->label());
+ return true;
+}
+
+// Infallible |emitDouble| emitters can use this implementation to avoid
+// generating extra clean-up instructions to restore the scratch float register.
+// To select this function simply omit the |Label* fail| parameter for the
+// emitter lambda function.
+template <typename EmitDouble>
+static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
+ void>
+EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
+ ValueOperand input, FailurePath* failure,
+ EmitDouble emitDouble) {
+ AutoScratchFloatRegister floatReg(compiler);
+
+ masm.unboxDouble(input, floatReg);
+ emitDouble(floatReg.get());
+}
+
+template <typename EmitDouble>
+static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
+ void>
+EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
+ ValueOperand input, FailurePath* failure,
+ EmitDouble emitDouble) {
+ AutoScratchFloatRegister floatReg(compiler, failure);
+
+ masm.unboxDouble(input, floatReg);
+ emitDouble(floatReg.get(), floatReg.failure());
+}
+
+template <typename EmitInt32, typename EmitDouble>
+static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
+ MacroAssembler& masm, ValueOperand input,
+ Register output, FailurePath* failure,
+ EmitInt32 emitInt32, EmitDouble emitDouble) {
+ Label done;
+
+ {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ Label notInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxInt32(input, output);
+ emitInt32();
+
+ masm.jump(&done);
+ }
+ masm.bind(&notInt32);
+
+ masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ EmitGuardDouble(compiler, masm, input, failure, emitDouble);
+ }
+ }
+
+ masm.bind(&done);
+}
+
+bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
+ Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
+ masm.move32(input, output);
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ EmitGuardInt32OrDouble(
+ this, masm, input, output, failure,
+ []() {
+ // No-op if the value is already an int32.
+ },
+ [&](FloatRegister floatReg, Label* fail) {
+ // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
+ masm.convertDoubleToInt32(floatReg, output, fail, false);
+ });
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
+ IntPtrOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register input = allocator.useRegister(masm, inputId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ masm.move32SignExtendToPtr(input, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
+ bool supportOOB,
+ IntPtrOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register output = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure = nullptr;
+ if (!supportOOB) {
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ }
+
+ AutoScratchFloatRegister floatReg(this, failure);
+ allocator.ensureDoubleRegister(masm, inputId, floatReg);
+
+ // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
+ if (supportOOB) {
+ Label done, fail;
+ masm.convertDoubleToPtr(floatReg, output, &fail, false);
+ masm.jump(&done);
+
+ // Substitute the invalid index with an arbitrary out-of-bounds index.
+ masm.bind(&fail);
+ masm.movePtr(ImmWord(-1), output);
+
+ masm.bind(&done);
+ } else {
+ masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
+ ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
+ if (input.constant()) {
+ masm.move32(Imm32(input.value().toInt32()), output);
+ } else {
+ MOZ_ASSERT(input.reg().type() == MIRType::Int32);
+ masm.move32(input.reg().typedReg().gpr(), output);
+ }
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ EmitGuardInt32OrDouble(
+ this, masm, input, output, failure,
+ []() {
+ // No-op if the value is already an int32.
+ },
+ [&](FloatRegister floatReg, Label* fail) {
+ masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
+ });
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
+ ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
+ if (input.constant()) {
+ masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
+ } else {
+ MOZ_ASSERT(input.reg().type() == MIRType::Int32);
+ masm.move32(input.reg().typedReg().gpr(), output);
+ masm.clampIntToUint8(output);
+ }
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ EmitGuardInt32OrDouble(
+ this, masm, input, output, failure,
+ [&]() {
+ // |output| holds the unboxed int32 value.
+ masm.clampIntToUint8(output);
+ },
+ [&](FloatRegister floatReg) {
+ masm.clampDoubleToUint8(floatReg, output);
+ });
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
+ ValueType type) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (allocator.knownType(inputId) == JSValueType(type)) {
+ return true;
+ }
+
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ switch (type) {
+ case ValueType::String:
+ masm.branchTestString(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Symbol:
+ masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::BigInt:
+ masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Int32:
+ masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Boolean:
+ masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Undefined:
+ masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Null:
+ masm.branchTestNull(Assembler::NotEqual, input, failure->label());
+ break;
+ case ValueType::Double:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ case ValueType::Object:
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ MOZ_CRASH("unexpected type");
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ if (kind == GuardClassKind::JSFunction) {
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
+ failure->label());
+ } else {
+ masm.branchTestObjIsFunctionNoSpectreMitigations(
+ Assembler::NotEqual, obj, scratch, failure->label());
+ }
+ return true;
+ }
+
+ const JSClass* clasp = nullptr;
+ switch (kind) {
+ case GuardClassKind::Array:
+ clasp = &ArrayObject::class_;
+ break;
+ case GuardClassKind::PlainObject:
+ clasp = &PlainObject::class_;
+ break;
+ case GuardClassKind::ArrayBuffer:
+ clasp = &ArrayBufferObject::class_;
+ break;
+ case GuardClassKind::SharedArrayBuffer:
+ clasp = &SharedArrayBufferObject::class_;
+ break;
+ case GuardClassKind::DataView:
+ clasp = &DataViewObject::class_;
+ break;
+ case GuardClassKind::MappedArguments:
+ clasp = &MappedArgumentsObject::class_;
+ break;
+ case GuardClassKind::UnmappedArguments:
+ clasp = &UnmappedArgumentsObject::class_;
+ break;
+ case GuardClassKind::WindowProxy:
+ clasp = cx_->runtime()->maybeWindowProxyClass();
+ break;
+ case GuardClassKind::Set:
+ clasp = &SetObject::class_;
+ break;
+ case GuardClassKind::Map:
+ clasp = &MapObject::class_;
+ break;
+ case GuardClassKind::BoundFunction:
+ clasp = &BoundFunctionObject::class_;
+ break;
+ case GuardClassKind::JSFunction:
+ MOZ_CRASH("JSFunction handled before switch");
+ }
+ MOZ_ASSERT(clasp);
+
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
+ failure->label());
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
+ scratch, failure->label());
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjProto(obj, scratch);
+ masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
+ ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register expectedObject = allocator.useRegister(masm, expectedId);
+
+ // Allocate registers before the failure path to make sure they're registered
+ // by addFailurePath.
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Guard on the expected object.
+ StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+ emitLoadStubField(slot, scratch2);
+ BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
+ masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
+ masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
+ failure->label());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
+ uint32_t slotOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Guard that the slot isn't an object.
+ StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+ emitLoadStubField(slot, scratch2);
+ BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
+ masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
+ uint32_t offsetOffset,
+ uint32_t valOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchValueRegister scratchVal(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
+ emitLoadStubField(offset, scratch);
+
+ StubFieldOffset val(valOffset, StubField::Type::Value);
+ emitLoadValueStubField(val, scratchVal);
+
+ BaseIndex slotVal(obj, scratch, TimesOne);
+ masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
+ uint32_t offsetOffset,
+ uint32_t valOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchValueRegister scratchVal(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+
+ StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
+ emitLoadStubField(offset, scratch2);
+
+ StubFieldOffset val(valOffset, StubField::Type::Value);
+ emitLoadValueStubField(val, scratchVal);
+
+ BaseIndex slotVal(scratch1, scratch2, TimesOne);
+ masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
+ ObjOperandId objId,
+ uint32_t offsetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
+ emitLoadStubField(slotIndex, scratch);
+
+ masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
+ ObjOperandId objId,
+ uint32_t slotOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch1(allocator, masm);
+ Register scratch2 = output.scratchReg();
+
+ StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
+ emitLoadStubField(slotIndex, scratch2);
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+ masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfNonNativeObj(obj, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.branchPtr(Assembler::Equal, scratch, ImmPtr(&ArrayBufferObject::class_),
+ failure->label());
+ masm.branchPtr(Assembler::Equal, scratch,
+ ImmPtr(&SharedArrayBufferObject::class_), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.branchIfClassIsNotTypedArray(scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
+ GetDOMProxyHandlerFamily(),
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Make sure there are no dense elements.
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
+ int32_t expected) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register num = allocator.useRegister(masm, numId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register str = allocator.useRegister(masm, strId);
+ Register output = allocator.defineRegister(masm, resultId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
+ NumberOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register str = allocator.useRegister(masm, strId);
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label vmCall, done;
+ // Use indexed value as fast path if possible.
+ masm.loadStringIndexValue(str, scratch, &vmCall);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
+ masm.jump(&done);
+ {
+ masm.bind(&vmCall);
+
+ // Reserve stack for holding the result value of the call.
+ masm.reserveStack(sizeof(double));
+ masm.moveStackPtrTo(output.payloadOrValueReg());
+
+ // We cannot use callVM, as callVM expects to be able to clobber all
+ // operands, however, since this op is not the last in the generated IC, we
+ // want to be able to reference other live values.
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(str);
+ masm.passABIArg(output.payloadOrValueReg());
+ masm.callWithABI<Fn, js::StringToNumberPure>();
+ masm.storeCallPointerResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+
+ Label ok;
+ masm.branchIfTrueBool(scratch, &ok);
+ {
+ // OOM path, recovered by StringToNumberPure.
+ //
+ // Use addToStackPtr instead of freeStack as freeStack tracks stack height
+ // flow-insensitively, and using it twice would confuse the stack height
+ // tracking.
+ masm.addToStackPtr(Imm32(sizeof(double)));
+ masm.jump(failure->label());
+ }
+ masm.bind(&ok);
+
+ {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
+ masm.boxDouble(fpscratch, output, fpscratch);
+ }
+ masm.freeStack(sizeof(double));
+ }
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
+ Int32OperandId radixId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register radix = allocator.useRegister(masm, radixId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
+
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
+ masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
+ masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
+ masm.bind(&ok);
+#endif
+
+ // Discard the stack to ensure it's balanced when we skip the vm-call.
+ allocator.discardStack(masm);
+
+ // Use indexed value as fast path if possible.
+ Label vmCall, done;
+ masm.loadStringIndexValue(str, scratch, &vmCall);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
+ masm.jump(&done);
+ {
+ masm.bind(&vmCall);
+
+ callvm.prepare();
+ masm.Push(radix);
+ masm.Push(str);
+
+ using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
+ callvm.call<Fn, js::NumberParseInt>();
+ }
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, numId, floatScratch1);
+
+ masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
+ failure->label());
+ masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
+
+ Label ok;
+ masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
+ {
+ // Accept both +0 and -0 and return 0.
+ masm.loadConstantDouble(0.0, floatScratch2);
+ masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
+ &ok);
+
+ // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
+ masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
+ masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
+ failure->label());
+ }
+ masm.bind(&ok);
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
+ NumberOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register boolean = allocator.useRegister(masm, booleanId);
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register str = allocator.useRegister(masm, strId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label vmCall, done;
+ masm.loadStringIndexValue(str, output, &vmCall);
+ masm.jump(&done);
+
+ {
+ masm.bind(&vmCall);
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = int32_t (*)(JSString* str);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(str);
+ masm.callWithABI<Fn, GetIndexFromString>();
+ masm.storeCallInt32Result(output);
+
+ LiveRegisterSet ignore;
+ ignore.add(output);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ // GetIndexFromString returns a negative value on failure.
+ masm.branchTest32(Assembler::Signed, output, output, failure->label());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register reg = allocator.defineRegister(masm, resultId);
+ masm.loadObjProto(obj, reg);
+
+#ifdef DEBUG
+ // We shouldn't encounter a null or lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label done;
+ masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
+ masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
+ masm.bind(&done);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
+ ObjOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register reg = allocator.defineRegister(masm, resultId);
+ masm.unboxObject(
+ Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
+ ObjOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register reg = allocator.defineRegister(masm, resultId);
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
+ masm.unboxObject(
+ Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
+ ValueTagOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+ Register res = allocator.defineRegister(masm, resultId);
+
+ Register tag = masm.extractTag(val, res);
+ if (tag != res) {
+ masm.mov(tag, res);
+ }
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
+ ValOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.defineValueRegister(masm, resultId);
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
+ val.scratchReg());
+ masm.loadValue(Address(val.scratchReg(),
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ val);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
+ ObjOperandId objId, ValOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+
+ // Determine the expando's Address.
+ Register scratch = output.scratchReg();
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
+ Address expandoAddr(scratch,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+
+#ifdef DEBUG
+ // Private values are stored as doubles, so assert we have a double.
+ Label ok;
+ masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
+ masm.assumeUnreachable("DOM expando is not a PrivateValue!");
+ masm.bind(&ok);
+#endif
+
+ // Load the ExpandoAndGeneration* from the PrivateValue.
+ masm.loadPrivate(expandoAddr, scratch);
+
+ // Load expandoAndGeneration->expando into the output Value register.
+ masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
+ output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadUndefinedResult() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ return true;
+}
+
+static void EmitStoreBoolean(MacroAssembler& masm, bool b,
+ const AutoOutputRegister& output) {
+ if (output.hasValue()) {
+ Value val = BooleanValue(b);
+ masm.moveValue(val, output.valueReg());
+ } else {
+ MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
+ masm.movePtr(ImmWord(b), output.typedReg().gpr());
+ }
+}
+
+bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ EmitStoreBoolean(masm, val, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ masm.moveValue(input, output.valueReg());
+ return true;
+}
+
+static void EmitStoreResult(MacroAssembler& masm, Register reg,
+ JSValueType type,
+ const AutoOutputRegister& output) {
+ if (output.hasValue()) {
+ masm.tagValue(type, reg, output.valueReg());
+ return;
+ }
+ if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
+ masm.convertInt32ToDouble(reg, output.typedReg().fpu());
+ return;
+ }
+ if (type == output.type()) {
+ masm.mov(reg, output.typedReg().gpr());
+ return;
+ }
+ masm.assumeUnreachable("Should have monitored result");
+}
+
+bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+ masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
+
+ // Guard length fits in an int32.
+ masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register res = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
+ masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
+
+ // Guard length fits in an int32.
+ masm.branchTest32(Assembler::Signed, res, res, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ masm.addDouble(floatScratch1, floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ masm.subDouble(floatScratch1, floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ masm.mulDouble(floatScratch1, floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ masm.divDouble(floatScratch1, floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double a, double b);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, js::NumberMod>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double x, double y);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, js::ecmaPow>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(rhs, scratch);
+ masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(lhs, scratch);
+ masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label maybeNegZero, done;
+ masm.mov(lhs, scratch);
+ masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
+ masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
+ masm.jump(&done);
+
+ masm.bind(&maybeNegZero);
+ masm.mov(lhs, scratch2);
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ masm.or32(rhs, scratch2);
+ masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegister rem(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Prevent division by 0.
+ masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
+
+ // Prevent -2147483648 / -1.
+ Label notOverflow;
+ masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
+ masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
+ masm.bind(&notOverflow);
+
+ // Prevent negative 0.
+ Label notZero;
+ masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
+ masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
+ masm.bind(&notZero);
+
+ masm.mov(lhs, scratch);
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
+
+ // A remainder implies a double result.
+ masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // x % 0 results in NaN
+ masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
+
+ // Prevent -2147483648 % -1.
+ //
+ // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
+ // called).
+ Label notOverflow;
+ masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
+ masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
+ masm.bind(&notOverflow);
+
+ masm.mov(lhs, scratch);
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
+
+ // Modulo takes the sign of the dividend; we can't return negative zero here.
+ Label notZero;
+ masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
+ masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
+ masm.bind(&notZero);
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register base = allocator.useRegister(masm, lhsId);
+ Register power = allocator.useRegister(masm, rhsId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ masm.mov(rhs, scratch);
+ masm.or32(lhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ masm.mov(rhs, scratch);
+ masm.xor32(lhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ masm.mov(rhs, scratch);
+ masm.and32(lhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.mov(lhs, scratch);
+ masm.flexibleLshift32(rhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.mov(lhs, scratch);
+ masm.flexibleRshift32Arithmetic(rhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId,
+ bool forceDouble) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(lhs, scratch);
+ masm.flexibleRshift32(rhs, scratch);
+ if (forceDouble) {
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertUInt32ToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ } else {
+ masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ }
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register val = allocator.useRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
+ // Both of these result in a double.
+ masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
+ masm.mov(val, scratch);
+ masm.neg32(scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register input = allocator.useRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(input, scratch);
+ masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register input = allocator.useRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(input, scratch);
+ masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register val = allocator.useRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.mov(val, scratch);
+ masm.not32(scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoScratchFloatRegister floatReg(this);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatReg);
+
+ masm.negateDouble(floatReg);
+ masm.boxDouble(floatReg, output.valueReg(), floatReg);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
+ NumberOperandId inputId) {
+ AutoOutputRegister output(*this);
+
+ AutoScratchFloatRegister floatReg(this);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatReg);
+
+ {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadConstantDouble(1.0, fpscratch);
+ if (isInc) {
+ masm.addDouble(fpscratch, floatReg);
+ } else {
+ masm.subDouble(fpscratch, floatReg);
+ }
+ }
+ masm.boxDouble(floatReg, output.valueReg(), floatReg);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitDoubleIncDecResult(true, inputId);
+}
+
+bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitDoubleIncDecResult(false, inputId);
+}
+
+template <typename Fn, Fn fn>
+bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ AutoCallVM callvm(masm, this, allocator);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ callvm.prepare();
+
+ masm.Push(rhs);
+ masm.Push(lhs);
+
+ callvm.call<Fn, fn>();
+ return true;
+}
+
+bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
+}
+
+template <typename Fn, Fn fn>
+bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
+ AutoCallVM callvm(masm, this, allocator);
+ Register val = allocator.useRegister(masm, inputId);
+
+ callvm.prepare();
+
+ masm.Push(val);
+
+ callvm.call<Fn, fn>();
+ return true;
+}
+
+bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
+}
+
+bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
+}
+
+bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
+}
+
+bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
+}
+
+bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register res = allocator.defineRegister(masm, resultId);
+
+ AutoScratchFloatRegister floatReg(this);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatReg);
+
+ Label done, truncateABICall;
+
+ masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
+ masm.jump(&done);
+
+ masm.bind(&truncateABICall);
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ save.takeUnchecked(floatReg);
+ // Bug 1451976
+ save.takeUnchecked(floatReg.get().asSingle());
+ masm.PushRegsInMask(save);
+
+ using Fn = int32_t (*)(double);
+ masm.setupUnalignedABICall(res);
+ masm.passABIArg(floatReg, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ masm.storeCallInt32Result(res);
+
+ LiveRegisterSet ignore;
+ ignore.add(res);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArgumentsObjectLength(obj, scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register res = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArgumentsObjectLength(obj, res, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
+ masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
+ masm.convertIntPtrToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.convertIntPtrToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
+ output);
+ masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
+ ObjOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
+ output);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(BoundFunctionObject::IsConstructorFlag),
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
+ ObjOperandId obj2Id) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj1 = allocator.useRegister(masm, obj1Id);
+ Register obj2 = allocator.useRegister(masm, obj2Id);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Get the JSFunction flags and arg count.
+ masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
+
+ // Functions with a SelfHostedLazyScript must be compiled with the slow-path
+ // before the function length is known. If the length was previously resolved,
+ // the length property may be shadowed.
+ masm.branchTest32(
+ Assembler::NonZero, scratch,
+ Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
+ failure->label());
+
+ masm.loadFunctionLength(obj, scratch, scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty),
+ failure->label());
+
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
+ Int32OperandId indexId,
+ StringOperandId resultId) {
+ Register str = allocator.useRegister(masm, strId);
+ Register index = allocator.useRegister(masm, indexId);
+ Register result = allocator.defineRegister(masm, resultId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.movePtr(str, result);
+
+ // We can omit the bounds check, because we only compare the index against the
+ // string length. In the worst case we unnecessarily linearize the string
+ // when the index is out-of-bounds.
+
+ masm.branchIfCanLoadStringChar(str, index, scratch, &done);
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSLinearString* (*)(JSString*);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(str);
+ masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
+ masm.storeCallPointerResult(result);
+
+ LiveRegisterSet ignore;
+ ignore.add(result);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+
+ masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.loadStringLength(str, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
+ Int32OperandId indexId,
+ bool handleOOB) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ // Bounds check, load string char.
+ Label done;
+ if (!handleOOB) {
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch1, failure->label());
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
+ failure->label());
+ } else {
+ // Return NaN for out-of-bounds access.
+ masm.moveValue(JS::NaNValue(), output.valueReg());
+
+ // The bounds check mustn't use a scratch register which aliases the output.
+ MOZ_ASSERT(!output.valueReg().aliases(scratch3));
+
+ // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
+ // guaranteed to see no nested ropes.
+ Label loadFailed;
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch3, &done);
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
+
+ Label loadedChar;
+ masm.jump(&loadedChar);
+ masm.bind(&loadFailed);
+ masm.assumeUnreachable("loadStringChar can't fail for linear strings");
+ masm.bind(&loadedChar);
+ }
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
+ StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+
+ callvm.prepare();
+ masm.Push(str);
+
+ using Fn = JSObject* (*)(JSContext*, HandleString);
+ callvm.call<Fn, NewStringObject>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
+ StringOperandId searchStrId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register searchStr = allocator.useRegister(masm, searchStrId);
+
+ callvm.prepare();
+ masm.Push(searchStr);
+ masm.Push(str);
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
+ callvm.call<Fn, js::StringIndexOf>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
+ StringOperandId searchStrId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register searchStr = allocator.useRegister(masm, searchStrId);
+
+ callvm.prepare();
+ masm.Push(searchStr);
+ masm.Push(str);
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ callvm.call<Fn, js::StringStartsWith>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
+ StringOperandId searchStrId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register searchStr = allocator.useRegister(masm, searchStrId);
+
+ callvm.prepare();
+ masm.Push(searchStr);
+ masm.Push(str);
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ callvm.call<Fn, js::StringEndsWith>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+
+ callvm.prepare();
+ masm.Push(str);
+
+ using Fn = JSString* (*)(JSContext*, HandleString);
+ callvm.call<Fn, js::StringToLowerCase>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+
+ callvm.prepare();
+ masm.Push(str);
+
+ using Fn = JSString* (*)(JSContext*, HandleString);
+ callvm.call<Fn, js::StringToUpperCase>();
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
+ failure->label());
+ EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
+
+ // Bounds check.
+ Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
+
+ // Hole check.
+ BaseObjectElementIndex element(scratch1, index);
+ masm.branchTestMagic(Assembler::Equal, element, failure->label());
+ masm.loadTypedOrValue(element, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register index = allocator.useRegister(masm, indexId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Ensure index >= initLength or the element is a hole.
+ Label notDense;
+ Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
+
+ BaseValueIndex element(scratch, index);
+ masm.branchTestMagic(Assembler::Equal, element, &notDense);
+
+ masm.jump(failure->label());
+
+ masm.bind(&notDense);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ Label success;
+
+ // If length is writable, branch to &success. All indices are writable.
+ Address flags(scratch, ObjectElements::offsetOfFlags());
+ masm.branchTest32(Assembler::Zero, flags,
+ Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
+ &success);
+
+ // Otherwise, ensure index is in bounds.
+ Address length(scratch, ObjectElements::offsetOfLength());
+ masm.spectreBoundsCheck32(index, length, spectreScratch,
+ /* failure = */ failure->label());
+ masm.bind(&success);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
+ ValueTagOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
+
+ // If both lhs and rhs are numbers, can't use tag comparison to do inequality
+ // comparison
+ masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
+ masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
+ masm.jump(failure->label());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
+ ObjOperandId objId, uint32_t shapeWrapperOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
+
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
+ Address holderAddress(scratch,
+ sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
+ Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
+ GetXrayJitInfo()->holderExpandoSlot));
+
+ masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
+ masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
+
+ // Unwrap the expando before checking its shape.
+ masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
+ masm.unboxObject(
+ Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ scratch);
+
+ emitLoadStubField(shapeWrapper, scratch2);
+ LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
+ masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
+ scratch, failure->label());
+
+ // The reserved slots on the expando should all be in fixed slots.
+ Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
+ GetXrayJitInfo()->expandoProtoSlot));
+ masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
+ Address holderAddress(scratch,
+ sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
+ Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
+ GetXrayJitInfo()->holderExpandoSlot));
+
+ Label done;
+ masm.fallibleUnboxObject(holderAddress, scratch, &done);
+ masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
+ masm.bind(&done);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
+ uint32_t builderAddrOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
+ emitLoadStubField(builderField, scratch);
+ masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
+ failure->label());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
+ bool constructing) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register fun = allocator.useRegister(masm, funId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register fun = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register funcReg = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Ensure obj is a constructor
+ masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
+ Assembler::Zero, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register fun = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
+ fun, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register array = allocator.useRegister(masm, arrayId);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
+ uint8_t flags) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Make sure the index is nonnegative.
+ masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
+
+ // Guard on the initialized length.
+ Label hole;
+ Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
+
+ // Load the value.
+ Label done;
+ masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
+ masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
+
+ // Load undefined for the hole.
+ masm.bind(&hole);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
+ ObjOperandId objId, IntPtrOperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label outOfBounds, done;
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
+ EmitStoreBoolean(masm, true, output);
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ EmitStoreBoolean(masm, false, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Bounds check. Unsigned compare sends negative indices to next IC.
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
+
+ // Hole check.
+ BaseObjectElementIndex element(scratch, index);
+ masm.branchTestMagic(Assembler::Equal, element, failure->label());
+
+ EmitStoreBoolean(masm, true, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Make sure the index is nonnegative.
+ masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Guard on the initialized length.
+ Label hole;
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
+
+ // Load value and replace with true.
+ Label done;
+ BaseObjectElementIndex element(scratch, index);
+ masm.branchTestMagic(Assembler::Equal, element, &hole);
+ EmitStoreBoolean(masm, true, output);
+ masm.jump(&done);
+
+ // Load false for the hole.
+ masm.bind(&hole);
+ EmitStoreBoolean(masm, false, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register array = allocator.useRegister(masm, arrayId);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register array = allocator.useRegister(masm, arrayId);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
+ volatileRegs, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ ValueOperand val = allocator.useValueRegister(masm, inputId);
+
+ masm.testObjectSet(Assembler::Equal, val, scratch);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ Register outputScratch = output.valueReg().scratchReg();
+ masm.setIsPackedArray(obj, outputScratch, scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+
+ ValueOperand val = allocator.useValueRegister(masm, inputId);
+
+ Label isObject, done;
+ masm.branchTestObject(Assembler::Equal, val, &isObject);
+ // Primitives are never callable.
+ masm.move32(Imm32(0), scratch2);
+ masm.jump(&done);
+
+ masm.bind(&isObject);
+ masm.unboxObject(val, scratch1);
+
+ Label isProxy;
+ masm.isCallable(scratch1, scratch2, &isProxy);
+ masm.jump(&done);
+
+ masm.bind(&isProxy);
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupUnalignedABICall(scratch2);
+ masm.passABIArg(scratch1);
+ masm.callWithABI<Fn, ObjectIsCallable>();
+ masm.storeCallBoolResult(scratch2);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch2);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ Label isProxy, done;
+ masm.isConstructor(obj, scratch, &isProxy);
+ masm.jump(&done);
+
+ masm.bind(&isProxy);
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, ObjectIsConstructor>();
+ masm.storeCallBoolResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ masm.setIsCrossRealmArrayConstructor(obj, scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
+ masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
+ masm.convertIntPtrToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+ masm.typedArrayElementSize(obj, scratch2);
+
+ masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
+ failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
+ masm.typedArrayElementSize(obj, scratch2);
+ masm.mulPtr(scratch2, scratch1);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ masm.typedArrayElementSize(obj, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
+ ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register iter = allocator.useRegister(masm, iterId);
+ Register resultArr = allocator.useRegister(masm, resultArrId);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ save.takeUnchecked(output.valueReg());
+ save.takeUnchecked(scratch);
+ masm.PushRegsInMask(save);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(iter);
+ masm.passABIArg(resultArr);
+ if (isMap) {
+ using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
+ masm.callWithABI<Fn, MapIteratorObject::next>();
+ } else {
+ using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
+ masm.callWithABI<Fn, SetIteratorObject::next>();
+ }
+ masm.storeCallBoolResult(scratch);
+
+ masm.PopRegsInMask(save);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
+ Register iterObject,
+ Register nativeIter,
+ Register scratch, Register scratch2,
+ uint32_t enumeratorsAddrOffset) {
+ // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
+ Address iterObjAddr(nativeIter,
+ NativeIterator::offsetOfObjectBeingIterated());
+#ifdef DEBUG
+ Label ok;
+ masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
+ masm.assumeUnreachable("iterator with non-null object");
+ masm.bind(&ok);
+#endif
+
+ // Mark iterator as active.
+ Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
+ masm.storePtr(objBeingIterated, iterObjAddr);
+ masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
+
+ // Post-write barrier for stores to 'objectBeingIterated_'.
+ emitPostBarrierSlot(
+ iterObject,
+ TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
+ scratch);
+
+ // Chain onto the active iterator stack.
+ StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
+ StubField::Type::RawPointer);
+ emitLoadStubField(enumeratorsAddr, scratch);
+ masm.registerIterator(scratch, nativeIter, scratch2);
+}
+
+bool CacheIRCompiler::emitObjectToIteratorResult(
+ ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister iterObj(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
+ AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
+
+ Label callVM, done;
+ masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
+ &callVM);
+
+ masm.loadPrivate(
+ Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
+ scratch);
+
+ emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
+ enumeratorsAddrOffset);
+ masm.jump(&done);
+
+ masm.bind(&callVM);
+ callvm.prepare();
+ masm.Push(obj);
+ using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
+ callvm.call<Fn, GetIterator>();
+ masm.storeCallPointerResult(iterObj);
+
+ masm.bind(&done);
+ EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
+ return true;
+}
+
+bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ callvm.prepare();
+
+ masm.Push(val);
+
+ using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
+ callvm.call<Fn, ValueToIterator>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewArrayIteratorResult(
+ uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ callvm.prepare();
+
+ using Fn = ArrayIteratorObject* (*)(JSContext*);
+ callvm.call<Fn, NewArrayIterator>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewStringIteratorResult(
+ uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ callvm.prepare();
+
+ using Fn = StringIteratorObject* (*)(JSContext*);
+ callvm.call<Fn, NewStringIterator>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
+ uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ callvm.prepare();
+
+ using Fn = RegExpStringIteratorObject* (*)(JSContext*);
+ callvm.call<Fn, NewRegExpStringIterator>();
+ return true;
+}
+
+bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ AutoScratchRegister scratch(allocator, masm);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch);
+
+ callvm.prepare();
+ masm.Push(scratch);
+
+ using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
+ callvm.call<Fn, ObjectCreateWithTemplate>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewArrayFromLengthResult(
+ uint32_t templateObjectOffset, Int32OperandId lengthId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ AutoScratchRegister scratch(allocator, masm);
+ Register length = allocator.useRegister(masm, lengthId);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch);
+
+ callvm.prepare();
+ masm.Push(length);
+ masm.Push(scratch);
+
+ using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
+ callvm.call<Fn, ArrayConstructorOneArg>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
+ uint32_t templateObjectOffset, Int32OperandId lengthId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ AutoScratchRegister scratch(allocator, masm);
+ Register length = allocator.useRegister(masm, lengthId);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch);
+
+ callvm.prepare();
+ masm.Push(length);
+ masm.Push(scratch);
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
+ callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
+ uint32_t templateObjectOffset, ObjOperandId bufferId,
+ ValOperandId byteOffsetId, ValOperandId lengthId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+#ifdef JS_CODEGEN_X86
+ MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
+#endif
+
+ AutoCallVM callvm(masm, this, allocator);
+ AutoScratchRegister scratch(allocator, masm);
+ Register buffer = allocator.useRegister(masm, bufferId);
+ ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
+ ValueOperand length = allocator.useValueRegister(masm, lengthId);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch);
+
+ callvm.prepare();
+ masm.Push(length);
+ masm.Push(byteOffset);
+ masm.Push(buffer);
+ masm.Push(scratch);
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
+ HandleValue, HandleValue);
+ callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
+ return true;
+}
+
+bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
+ uint32_t templateObjectOffset, ObjOperandId arrayId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ AutoScratchRegister scratch(allocator, masm);
+ Register array = allocator.useRegister(masm, arrayId);
+
+ StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
+ emitLoadStubField(objectField, scratch);
+
+ callvm.prepare();
+ masm.Push(array);
+ masm.Push(scratch);
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
+ callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
+ return true;
+}
+
+bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
+ ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
+
+ StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
+ emitLoadStubField(shapeField, scratch);
+
+ callvm.prepare();
+
+ masm.Push(scratch);
+ masm.Push(rhs);
+ masm.Push(obj);
+
+ using Fn =
+ bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
+ callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
+ return true;
+}
+
+bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Register input = allocator.useRegister(masm, inputId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.mov(input, scratch);
+ // Don't negate already positive values.
+ Label positive;
+ masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
+ // neg32 might overflow for INT_MIN.
+ masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
+ masm.bind(&positive);
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ masm.absDouble(scratch, scratch);
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register input = allocator.useRegister(masm, inputId);
+
+ masm.clz32(input, scratch, /* knownNotZero = */ false);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register input = allocator.useRegister(masm, inputId);
+
+ masm.signInt32(input, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
+
+ masm.signDouble(floatScratch1, floatScratch2);
+ masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
+
+ masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
+ failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ masm.mov(lhs, scratch);
+ masm.mul32(rhs, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ masm.sqrtDouble(scratch, scratch);
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
+ masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+ }
+
+ return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
+ output.valueReg());
+}
+
+bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
+ masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+ }
+
+ return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
+ output.valueReg());
+}
+
+bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
+ masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+ }
+
+ return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
+ output.valueReg());
+}
+
+bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+ FloatRegister scratchFloat32 = scratch.get().asSingle();
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ masm.convertDoubleToFloat32(scratch, scratchFloat32);
+ masm.convertFloat32ToDouble(scratchFloat32, scratch);
+
+ masm.boxDouble(scratch, output.valueReg(), scratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
+ NumberOperandId second) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, first, floatScratch0);
+ allocator.ensureDoubleRegister(masm, second, floatScratch1);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double x, double y);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+
+ masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
+ NumberOperandId second,
+ NumberOperandId third) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
+
+ allocator.ensureDoubleRegister(masm, first, floatScratch0);
+ allocator.ensureDoubleRegister(masm, second, floatScratch1);
+ allocator.ensureDoubleRegister(masm, third, floatScratch2);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double x, double y, double z);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
+
+ masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
+ NumberOperandId second,
+ NumberOperandId third,
+ NumberOperandId fourth) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
+ AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
+
+ allocator.ensureDoubleRegister(masm, first, floatScratch0);
+ allocator.ensureDoubleRegister(masm, second, floatScratch1);
+ allocator.ensureDoubleRegister(masm, third, floatScratch2);
+ allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double x, double y, double z, double w);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch3, MoveOp::DOUBLE);
+
+ masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
+ NumberOperandId xId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, yId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, xId, floatScratch1);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = double (*)(double x, double y);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, js::ecmaAtan2>(MoveOp::DOUBLE);
+ masm.storeCallFloatResult(floatScratch0);
+
+ LiveRegisterSet ignore;
+ ignore.add(floatScratch0);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
+
+ masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
+
+ masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
+
+ masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
+ AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
+
+ masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
+ failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
+ Int32OperandId secondId,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register first = allocator.useRegister(masm, firstId);
+ Register second = allocator.useRegister(masm, secondId);
+ Register result = allocator.defineRegister(masm, resultId);
+
+ Assembler::Condition cond =
+ isMax ? Assembler::GreaterThan : Assembler::LessThan;
+ masm.move32(first, result);
+ masm.cmp32Move32(cond, second, first, second, result);
+ return true;
+}
+
+bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
+ NumberOperandId secondId,
+ NumberOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+
+ AutoAvailableFloatRegister scratch1(*this, FloatReg0);
+ AutoAvailableFloatRegister scratch2(*this, FloatReg1);
+
+ allocator.ensureDoubleRegister(masm, firstId, scratch1);
+ allocator.ensureDoubleRegister(masm, secondId, scratch2);
+
+ if (isMax) {
+ masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
+ } else {
+ masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
+ }
+
+ masm.boxDouble(scratch1, output, scratch1);
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
+ bool isMax) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register array = allocator.useRegister(masm, arrayId);
+
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
+ AutoScratchRegisterMaybeOutput result(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
+ failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
+ bool isMax) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register array = allocator.useRegister(masm, arrayId);
+
+ AutoAvailableFloatRegister result(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
+ failure->label());
+ masm.boxDouble(result, output.valueReg(), result);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathFunctionNumberResultShared(
+ UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
+ UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ save.takeUnchecked(inputScratch);
+ masm.PushRegsInMask(save);
+
+ masm.setupUnalignedABICall(output.scratchReg());
+ masm.passABIArg(inputScratch, MoveOp::DOUBLE);
+ masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
+ MoveOp::DOUBLE);
+ masm.storeCallFloatResult(inputScratch);
+
+ masm.PopRegsInMask(save);
+
+ masm.boxDouble(inputScratch, output, inputScratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
+ UnaryMathFunction fun) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoAvailableFloatRegister scratch(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, scratch);
+
+ return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
+}
+
+static void EmitStoreDenseElement(MacroAssembler& masm,
+ const ConstantOrRegister& value,
+ BaseObjectElementIndex target) {
+ if (value.constant()) {
+ Value v = value.value();
+ masm.storeValue(v, target);
+ return;
+ }
+
+ TypedOrValueRegister reg = value.reg();
+ masm.storeTypedOrValue(reg, target);
+}
+
+bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
+ Int32OperandId indexId,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements in scratch.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // Bounds check. Unfortunately we don't have more registers available on
+ // x86, so use InvalidReg and emit slightly slower code on x86.
+ Register spectreTemp = InvalidReg;
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
+
+ // Hole check.
+ BaseObjectElementIndex element(scratch, index);
+ masm.branchTestMagic(Assembler::Equal, element, failure->label());
+
+ // Perform the store.
+ EmitPreBarrier(masm, element, MIRType::Value);
+ EmitStoreDenseElement(masm, val, element);
+
+ emitPostBarrierElement(obj, val, scratch, index);
+ return true;
+}
+
+static void EmitAssertExtensibleElements(MacroAssembler& masm,
+ Register elementsReg) {
+#ifdef DEBUG
+ // Preceding shape guards ensure the object elements are extensible.
+ Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
+ Label ok;
+ masm.branchTest32(Assembler::Zero, elementsFlags,
+ Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
+ masm.assumeUnreachable("Unexpected non-extensible elements");
+ masm.bind(&ok);
+#endif
+}
+
+static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
+ Register elementsReg) {
+#ifdef DEBUG
+ // Preceding shape guards ensure the array length is writable.
+ Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
+ Label ok;
+ masm.branchTest32(Assembler::Zero, elementsFlags,
+ Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
+ &ok);
+ masm.assumeUnreachable("Unexpected non-writable array length elements");
+ masm.bind(&ok);
+#endif
+}
+
+bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
+ Int32OperandId indexId,
+ ValOperandId rhsId,
+ bool handleAdd) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements in scratch.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ EmitAssertExtensibleElements(masm, scratch);
+ if (handleAdd) {
+ EmitAssertWritableArrayLengthElements(masm, scratch);
+ }
+
+ BaseObjectElementIndex element(scratch, index);
+ Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+ Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
+
+ // We don't have enough registers on x86 so use InvalidReg. This will emit
+ // slightly less efficient code on x86.
+ Register spectreTemp = InvalidReg;
+
+ Label storeSkipPreBarrier;
+ if (handleAdd) {
+ // Bounds check.
+ Label inBounds, outOfBounds;
+ masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
+ masm.jump(&inBounds);
+
+ // If we're out-of-bounds, only handle the index == initLength case.
+ masm.bind(&outOfBounds);
+ masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
+
+ // If index < capacity, we can add a dense element inline. If not we
+ // need to allocate more elements.
+ Label allocElement, addNewElement;
+ Address capacity(scratch, ObjectElements::offsetOfCapacity());
+ masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
+ masm.jump(&addNewElement);
+
+ masm.bind(&allocElement);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ save.takeUnchecked(scratch);
+ masm.PushRegsInMask(save);
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
+ masm.storeCallPointerResult(scratch);
+
+ masm.PopRegsInMask(save);
+ masm.branchIfFalseBool(scratch, failure->label());
+
+ // Load the reallocated elements pointer.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ masm.bind(&addNewElement);
+
+ // Increment initLength.
+ masm.add32(Imm32(1), initLength);
+
+ // If length is now <= index, increment length too.
+ Label skipIncrementLength;
+ Address length(scratch, ObjectElements::offsetOfLength());
+ masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
+ masm.add32(Imm32(1), length);
+ masm.bind(&skipIncrementLength);
+
+ // Skip EmitPreBarrier as the memory is uninitialized.
+ masm.jump(&storeSkipPreBarrier);
+
+ masm.bind(&inBounds);
+ } else {
+ // Fail if index >= initLength.
+ masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
+ }
+
+ EmitPreBarrier(masm, element, MIRType::Value);
+
+ masm.bind(&storeSkipPreBarrier);
+ EmitStoreDenseElement(masm, val, element);
+
+ emitPostBarrierElement(obj, val, scratch, index);
+ return true;
+}
+
+bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Load obj->elements in scratch.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ EmitAssertExtensibleElements(masm, scratch);
+ EmitAssertWritableArrayLengthElements(masm, scratch);
+
+ Address elementsInitLength(scratch,
+ ObjectElements::offsetOfInitializedLength());
+ Address elementsLength(scratch, ObjectElements::offsetOfLength());
+ Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
+
+ // Fail if length != initLength.
+ masm.load32(elementsInitLength, scratchLength);
+ masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
+ failure->label());
+
+ // If scratchLength < capacity, we can add a dense element inline. If not we
+ // need to allocate more elements.
+ Label allocElement, addNewElement;
+ Address capacity(scratch, ObjectElements::offsetOfCapacity());
+ masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
+ masm.jump(&addNewElement);
+
+ masm.bind(&allocElement);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ save.takeUnchecked(scratch);
+ masm.PushRegsInMask(save);
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
+ masm.storeCallPointerResult(scratch);
+
+ masm.PopRegsInMask(save);
+ masm.branchIfFalseBool(scratch, failure->label());
+
+ // Load the reallocated elements pointer.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ masm.bind(&addNewElement);
+
+ // Increment initLength and length.
+ masm.add32(Imm32(1), elementsInitLength);
+ masm.add32(Imm32(1), elementsLength);
+
+ // Store the value.
+ BaseObjectElementIndex element(scratch, scratchLength);
+ masm.storeValue(val, element);
+ emitPostBarrierElement(obj, val, scratch, scratchLength);
+
+ // Return value is new length.
+ masm.add32(Imm32(1), scratchLength);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
+ Scalar::Type elementType,
+ IntPtrOperandId indexId,
+ uint32_t rhsId,
+ bool handleOOB) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+
+ Maybe<Register> valInt32;
+ Maybe<Register> valBigInt;
+ switch (elementType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
+ break;
+
+ case Scalar::Float32:
+ case Scalar::Float64:
+ allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
+ floatScratch0);
+ break;
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
+ break;
+
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ AutoScratchRegister scratch1(allocator, masm);
+ Maybe<AutoScratchRegister> scratch2;
+ Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
+ if (Scalar::isBigIntType(elementType)) {
+ scratch2.emplace(allocator, masm);
+ } else {
+ spectreScratch.emplace(allocator, masm);
+ }
+
+ FailurePath* failure = nullptr;
+ if (!handleOOB) {
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+ }
+
+ // Bounds check.
+ Label done;
+ Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
+ masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
+ handleOOB ? &done : failure->label());
+
+ // Load the elements vector.
+ masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
+
+ BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
+
+ if (Scalar::isBigIntType(elementType)) {
+#ifdef JS_PUNBOX64
+ Register64 temp(scratch2->get());
+#else
+ // We don't have more registers available on x86, so spill |obj|.
+ masm.push(obj);
+ Register64 temp(scratch2->get(), obj);
+#endif
+
+ masm.loadBigInt64(*valBigInt, temp);
+ masm.storeToTypedBigIntArray(elementType, temp, dest);
+
+#ifndef JS_PUNBOX64
+ masm.pop(obj);
+#endif
+ } else if (elementType == Scalar::Float32) {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.convertDoubleToFloat32(floatScratch0, fpscratch);
+ masm.storeToTypedFloatArray(elementType, fpscratch, dest);
+ } else if (elementType == Scalar::Float64) {
+ masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
+ } else {
+ masm.storeToTypedIntArray(elementType, *valInt32, dest);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+static gc::Heap InitialBigIntHeap(JSContext* cx) {
+ JS::Zone* zone = cx->zone();
+ return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
+}
+
+static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
+ Register temp, const LiveRegisterSet& liveSet,
+ gc::Heap initialHeap, Label* fail) {
+ Label fallback, done;
+ masm.newGCBigInt(result, temp, initialHeap, &fallback);
+ masm.jump(&done);
+ {
+ masm.bind(&fallback);
+
+ // Request a minor collection at a later time if nursery allocation failed.
+ bool requestMinorGC = initialHeap == gc::Heap::Default;
+
+ masm.PushRegsInMask(liveSet);
+ using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
+ masm.setupUnalignedABICall(temp);
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.move32(Imm32(requestMinorGC), result);
+ masm.passABIArg(result);
+ masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
+ masm.storeCallPointerResult(result);
+
+ masm.PopRegsInMask(liveSet);
+ masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
+ }
+ masm.bind(&done);
+}
+
+bool CacheIRCompiler::emitLoadTypedArrayElementResult(
+ ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
+ bool handleOOB, bool forceDoubleForUint32) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+#ifdef JS_PUNBOX64
+ AutoScratchRegister scratch2(allocator, masm);
+#else
+ // There are too few registers available on x86, so we may need to reuse the
+ // output's scratch register.
+ AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
+#endif
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Bounds check.
+ Label outOfBounds;
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
+ masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
+ handleOOB ? &outOfBounds : failure->label());
+
+ // Allocate BigInt if needed. The code after this should be infallible.
+ Maybe<Register> bigInt;
+ if (Scalar::isBigIntType(elementType)) {
+ bigInt.emplace(output.valueReg().scratchReg());
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ save.takeUnchecked(scratch1);
+ save.takeUnchecked(scratch2);
+ save.takeUnchecked(output);
+
+ gc::Heap initialHeap = InitialBigIntHeap(cx_);
+ EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
+ failure->label());
+ }
+
+ // Load the elements vector.
+ masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
+
+ // Load the value.
+ BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
+
+ if (Scalar::isBigIntType(elementType)) {
+#ifdef JS_PUNBOX64
+ Register64 temp(scratch2);
+#else
+ // We don't have more registers available on x86, so spill |obj| and
+ // additionally use the output's type register.
+ MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
+ masm.push(obj);
+ Register64 temp(output.valueReg().typeReg(), obj);
+#endif
+
+ masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
+
+#ifndef JS_PUNBOX64
+ masm.pop(obj);
+#endif
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
+ } else {
+ MacroAssembler::Uint32Mode uint32Mode =
+ forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
+ : MacroAssembler::Uint32Mode::FailOnDouble;
+ masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
+ scratch1, failure->label());
+ }
+
+ if (handleOOB) {
+ Label done;
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+
+ masm.bind(&done);
+ }
+
+ return true;
+}
+
+static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
+ Register obj, Register offset,
+ Register scratch, Label* fail) {
+ // Ensure both offset < length and offset + (byteSize - 1) < length.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ if (byteSize == 1) {
+ masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
+ } else {
+ // temp := length - (byteSize - 1)
+ // if temp < 0: fail
+ // if offset >= temp: fail
+ masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
+ masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
+ }
+}
+
+bool CacheIRCompiler::emitLoadDataViewValueResult(
+ ObjOperandId objId, IntPtrOperandId offsetId,
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ bool forceDoubleForUint32) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register offset = allocator.useRegister(masm, offsetId);
+ Register littleEndian = allocator.useRegister(masm, littleEndianId);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+
+ Register64 outputReg64 = output.valueReg().toRegister64();
+ Register outputScratch = outputReg64.scratchReg();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ const size_t byteSize = Scalar::byteSize(elementType);
+
+ EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
+ failure->label());
+
+ masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
+
+ // Load the value.
+ BaseIndex source(outputScratch, offset, TimesOne);
+ switch (elementType) {
+ case Scalar::Int8:
+ masm.load8SignExtend(source, outputScratch);
+ break;
+ case Scalar::Uint8:
+ masm.load8ZeroExtend(source, outputScratch);
+ break;
+ case Scalar::Int16:
+ masm.load16UnalignedSignExtend(source, outputScratch);
+ break;
+ case Scalar::Uint16:
+ masm.load16UnalignedZeroExtend(source, outputScratch);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.load32Unaligned(source, outputScratch);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.load64Unaligned(source, outputReg64);
+ break;
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ // Swap the bytes in the loaded value.
+ if (byteSize > 1) {
+ Label skip;
+ masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
+ littleEndian, Imm32(0), &skip);
+
+ switch (elementType) {
+ case Scalar::Int16:
+ masm.byteSwap16SignExtend(outputScratch);
+ break;
+ case Scalar::Uint16:
+ masm.byteSwap16ZeroExtend(outputScratch);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.byteSwap32(outputScratch);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.byteSwap64(outputReg64);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid type");
+ }
+
+ masm.bind(&skip);
+ }
+
+ // Move the value into the output register.
+ switch (elementType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
+ break;
+ case Scalar::Uint32: {
+ MacroAssembler::Uint32Mode uint32Mode =
+ forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
+ : MacroAssembler::Uint32Mode::FailOnDouble;
+ masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
+ failure->label());
+ break;
+ }
+ case Scalar::Float32: {
+ FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
+ masm.moveGPRToFloat32(outputScratch, scratchFloat32);
+ masm.canonicalizeFloat(scratchFloat32);
+ masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+ break;
+ }
+ case Scalar::Float64:
+ masm.moveGPR64ToDouble(outputReg64, floatScratch0);
+ masm.canonicalizeDouble(floatScratch0);
+ masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
+ break;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64: {
+ // We need two extra registers. Reuse the obj/littleEndian registers.
+ Register bigInt = obj;
+ Register bigIntScratch = littleEndian;
+ masm.push(bigInt);
+ masm.push(bigIntScratch);
+ Label fail, done;
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ save.takeUnchecked(bigInt);
+ save.takeUnchecked(bigIntScratch);
+ gc::Heap initialHeap = InitialBigIntHeap(cx_);
+ EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
+ masm.jump(&done);
+
+ masm.bind(&fail);
+ masm.pop(bigIntScratch);
+ masm.pop(bigInt);
+ masm.jump(failure->label());
+
+ masm.bind(&done);
+ masm.initializeBigInt64(elementType, bigInt, outputReg64);
+ masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
+ masm.pop(bigIntScratch);
+ masm.pop(bigInt);
+ break;
+ }
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitStoreDataViewValueResult(
+ ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
+ BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+#ifdef JS_CODEGEN_X86
+ // Use a scratch register to avoid running out of the registers.
+ Register obj = output.valueReg().typeReg();
+ allocator.copyToScratchRegister(masm, objId, obj);
+#else
+ Register obj = allocator.useRegister(masm, objId);
+#endif
+ Register offset = allocator.useRegister(masm, offsetId);
+ Register littleEndian = allocator.useRegister(masm, littleEndianId);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ Maybe<Register> valInt32;
+ Maybe<Register> valBigInt;
+ switch (elementType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
+ break;
+
+ case Scalar::Float32:
+ case Scalar::Float64:
+ allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
+ floatScratch0);
+ break;
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
+ break;
+
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("Unsupported type");
+ }
+
+ Register scratch1 = output.valueReg().scratchReg();
+ MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
+
+ // On platforms with enough registers, |scratch2| is an extra scratch register
+ // (pair) used for byte-swapping the value.
+#ifndef JS_CODEGEN_X86
+ mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
+ switch (elementType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ scratch2.construct<AutoScratchRegister>(allocator, masm);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ scratch2.construct<AutoScratchRegister64>(allocator, masm);
+ break;
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid type");
+ }
+#endif
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ const size_t byteSize = Scalar::byteSize(elementType);
+
+ EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
+ failure->label());
+
+ masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
+ BaseIndex dest(scratch1, offset, TimesOne);
+
+ if (byteSize == 1) {
+ // Byte swapping has no effect, so just do the byte store.
+ masm.store8(*valInt32, dest);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ return true;
+ }
+
+ // On 32-bit x86, |obj| is already a scratch register so use that. If we need
+ // a Register64 we also use the littleEndian register and use the stack
+ // location for the check below.
+ bool pushedLittleEndian = false;
+#ifdef JS_CODEGEN_X86
+ if (byteSize == 8) {
+ masm.push(littleEndian);
+ pushedLittleEndian = true;
+ }
+ auto valScratch32 = [&]() -> Register { return obj; };
+ auto valScratch64 = [&]() -> Register64 {
+ return Register64(obj, littleEndian);
+ };
+#else
+ auto valScratch32 = [&]() -> Register {
+ return scratch2.ref<AutoScratchRegister>();
+ };
+ auto valScratch64 = [&]() -> Register64 {
+ return scratch2.ref<AutoScratchRegister64>();
+ };
+#endif
+
+ // Load the value into a gpr register.
+ switch (elementType) {
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.move32(*valInt32, valScratch32());
+ break;
+ case Scalar::Float32: {
+ FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
+ masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
+ masm.canonicalizeFloatIfDeterministic(scratchFloat32);
+ masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
+ break;
+ }
+ case Scalar::Float64: {
+ masm.canonicalizeDoubleIfDeterministic(floatScratch0);
+ masm.moveDoubleToGPR64(floatScratch0, valScratch64());
+ break;
+ }
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.loadBigInt64(*valBigInt, valScratch64());
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid type");
+ }
+
+ // Swap the bytes in the loaded value.
+ Label skip;
+ if (pushedLittleEndian) {
+ masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
+ Address(masm.getStackPointer(), 0), Imm32(0), &skip);
+ } else {
+ masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
+ littleEndian, Imm32(0), &skip);
+ }
+ switch (elementType) {
+ case Scalar::Int16:
+ masm.byteSwap16SignExtend(valScratch32());
+ break;
+ case Scalar::Uint16:
+ masm.byteSwap16ZeroExtend(valScratch32());
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.byteSwap32(valScratch32());
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.byteSwap64(valScratch64());
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid type");
+ }
+ masm.bind(&skip);
+
+ // Store the value.
+ switch (elementType) {
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.store16Unaligned(valScratch32(), dest);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.store32Unaligned(valScratch32(), dest);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.store64Unaligned(valScratch64(), dest);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+#ifdef JS_CODEGEN_X86
+ // Restore registers.
+ if (pushedLittleEndian) {
+ masm.pop(littleEndian);
+ }
+#endif
+
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
+ emitLoadStubField(offset, scratch);
+
+ BaseIndex slot(obj, scratch, TimesOne);
+ EmitPreBarrier(masm, slot, MIRType::Value);
+ masm.storeValue(val, slot);
+ emitPostBarrierSlot(obj, val, scratch);
+
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+
+ EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+
+ masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register sym = allocator.useRegister(masm, symId);
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register val = allocator.useRegister(masm, valId);
+
+ masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register val = allocator.useRegister(masm, valId);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestDouble(Assembler::Equal, val, &ok);
+ masm.branchTestInt32(Assembler::Equal, val, &ok);
+ masm.assumeUnreachable("input must be double or int32");
+ masm.bind(&ok);
+#endif
+
+ masm.moveValue(val, output.valueReg());
+ masm.convertInt32ValueToDouble(output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label slowCheck, isObject, isCallable, isUndefined, done;
+ masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
+ &isUndefined);
+
+ masm.bind(&isCallable);
+ masm.moveValue(StringValue(cx_->names().function), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&isUndefined);
+ masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&isObject);
+ masm.moveValue(StringValue(cx_->names().object), output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&slowCheck);
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = JSString* (*)(JSObject* obj, JSRuntime* rt);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.movePtr(ImmPtr(cx_->runtime()), scratch);
+ masm.passABIArg(scratch);
+ masm.callWithABI<Fn, TypeOfNameObject>();
+ masm.storeCallPointerResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ ValueOperand val = allocator.useValueRegister(masm, inputId);
+
+ Label ifFalse, done;
+ masm.branchTestInt32Truthy(false, val, &ifFalse);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+
+ Label ifFalse, done;
+ masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
+ Imm32(0), &ifFalse);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoScratchFloatRegister floatReg(this);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatReg);
+
+ Label ifFalse, done;
+
+ masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label emulatesUndefined, slowPath, done;
+ masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
+ &emulatesUndefined);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&emulatesUndefined);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&slowPath);
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch);
+ volatileRegs.takeUnchecked(output);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::EmulatesUndefined>();
+ masm.storeCallBoolResult(scratch);
+ masm.xor32(Imm32(1), scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ Label ifFalse, done;
+ masm.branch32(Assembler::Equal,
+ Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
+ &ifFalse);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ ValueOperand value = allocator.useValueRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchFloatRegister floatReg(this);
+
+ Label ifFalse, ifTrue, done;
+
+ {
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
+ masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
+
+ Label notBoolean;
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestBooleanTruthy(false, value, &ifFalse);
+ masm.jump(&ifTrue);
+ }
+ masm.bind(&notBoolean);
+
+ Label notInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestInt32Truthy(false, value, &ifFalse);
+ masm.jump(&ifTrue);
+ }
+ masm.bind(&notInt32);
+
+ Label notObject;
+ masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ Register obj = masm.extractObject(value, scratch1);
+
+ Label slowPath;
+ masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
+ masm.jump(&ifTrue);
+
+ masm.bind(&slowPath);
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(scratch2);
+ volatileRegs.takeUnchecked(output);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupUnalignedABICall(scratch2);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::EmulatesUndefined>();
+ masm.storeCallPointerResult(scratch2);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchIfTrueBool(scratch2, &ifFalse);
+ masm.jump(&ifTrue);
+ }
+ }
+ masm.bind(&notObject);
+
+ Label notString;
+ masm.branchTestString(Assembler::NotEqual, tag, &notString);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestStringTruthy(false, value, &ifFalse);
+ masm.jump(&ifTrue);
+ }
+ masm.bind(&notString);
+
+ Label notBigInt;
+ masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestBigIntTruthy(false, value, &ifFalse);
+ masm.jump(&ifTrue);
+ }
+ masm.bind(&notBigInt);
+
+ masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
+
+#ifdef DEBUG
+ Label isDouble;
+ masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
+ masm.assumeUnreachable("Unexpected value type");
+ masm.bind(&isDouble);
+#endif
+
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.unboxDouble(value, floatReg);
+ masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
+ }
+
+ // Fall through to true case.
+ }
+
+ masm.bind(&ifTrue);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
+ TypedOperandId lhsId,
+ TypedOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register left = allocator.useRegister(masm, lhsId);
+ Register right = allocator.useRegister(masm, rhsId);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label ifTrue, done;
+ masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
+ &ifTrue);
+
+ EmitStoreBoolean(masm, false, output);
+ masm.jump(&done);
+
+ masm.bind(&ifTrue);
+ EmitStoreBoolean(masm, true, output);
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
+ ObjOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitComparePointerResultShared(op, lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
+ SymbolOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitComparePointerResultShared(op, lhsId, rhsId);
+}
+
+bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register left = allocator.useRegister(masm, lhsId);
+ Register right = allocator.useRegister(masm, rhsId);
+
+ Label ifTrue, done;
+ masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
+
+ EmitStoreBoolean(masm, false, output);
+ masm.jump(&done);
+
+ masm.bind(&ifTrue);
+ EmitStoreBoolean(masm, true, output);
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ Label done, ifTrue;
+ masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
+ &ifTrue);
+ EmitStoreBoolean(masm, false, output);
+ masm.jump(&done);
+
+ masm.bind(&ifTrue);
+ EmitStoreBoolean(masm, true, output);
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ masm.setupUnalignedABICall(scratch);
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.passABIArg(rhs);
+ masm.passABIArg(lhs);
+ } else {
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ }
+
+ using Fn = bool (*)(BigInt*, BigInt*);
+ Fn fn;
+ if (op == JSOp::Eq || op == JSOp::StrictEq) {
+ fn = jit::BigIntEqual<EqualityKind::Equal>;
+ } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
+ fn = jit::BigIntEqual<EqualityKind::NotEqual>;
+ } else if (op == JSOp::Lt || op == JSOp::Gt) {
+ fn = jit::BigIntCompare<ComparisonKind::LessThan>;
+ } else {
+ MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
+ fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
+ }
+
+ masm.callWithABI(DynamicFunction<Fn>(fn));
+ masm.storeCallBoolResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
+ BigIntOperandId lhsId,
+ Int32OperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register bigInt = allocator.useRegister(masm, lhsId);
+ Register int32 = allocator.useRegister(masm, rhsId);
+
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ Label ifTrue, ifFalse;
+ masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
+ &ifFalse);
+
+ Label done;
+ masm.bind(&ifFalse);
+ EmitStoreBoolean(masm, false, output);
+ masm.jump(&done);
+
+ masm.bind(&ifTrue);
+ EmitStoreBoolean(masm, true, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
+ BigIntOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ masm.setupUnalignedABICall(scratch);
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.passABIArg(lhs);
+ } else {
+ masm.passABIArg(lhs);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ }
+
+ using FnBigIntNumber = bool (*)(BigInt*, double);
+ using FnNumberBigInt = bool (*)(double, BigInt*);
+ switch (op) {
+ case JSOp::Eq: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberEqual<EqualityKind::Equal>>();
+ break;
+ }
+ case JSOp::Ne: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
+ break;
+ }
+ case JSOp::Lt: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
+ break;
+ }
+ case JSOp::Gt: {
+ masm.callWithABI<FnNumberBigInt,
+ jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
+ break;
+ }
+ case JSOp::Le: {
+ masm.callWithABI<
+ FnNumberBigInt,
+ jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
+ break;
+ }
+ case JSOp::Ge: {
+ masm.callWithABI<
+ FnBigIntNumber,
+ jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
+ break;
+ }
+ default:
+ MOZ_CRASH("unhandled op");
+ }
+
+ masm.storeCallBoolResult(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
+ BigIntOperandId lhsId,
+ StringOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ callvm.prepare();
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.Push(lhs);
+ masm.Push(rhs);
+ } else {
+ masm.Push(rhs);
+ masm.Push(lhs);
+ }
+
+ using FnBigIntString =
+ bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
+ using FnStringBigInt =
+ bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
+
+ switch (op) {
+ case JSOp::Eq: {
+ constexpr auto Equal = EqualityKind::Equal;
+ callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
+ break;
+ }
+ case JSOp::Ne: {
+ constexpr auto NotEqual = EqualityKind::NotEqual;
+ callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
+ break;
+ }
+ case JSOp::Lt: {
+ constexpr auto LessThan = ComparisonKind::LessThan;
+ callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
+ break;
+ }
+ case JSOp::Gt: {
+ constexpr auto LessThan = ComparisonKind::LessThan;
+ callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
+ break;
+ }
+ case JSOp::Le: {
+ constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
+ callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
+ break;
+ }
+ case JSOp::Ge: {
+ constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
+ callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
+ break;
+ }
+ default:
+ MOZ_CRASH("unhandled op");
+ }
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
+ ValOperandId inputId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ ValueOperand input = allocator.useValueRegister(masm, inputId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ if (IsStrictEqualityOp(op)) {
+ if (isUndefined) {
+ masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
+ } else {
+ masm.testNullSet(JSOpToCondition(op, false), input, scratch);
+ }
+ EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
+ return true;
+ }
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ MOZ_ASSERT(IsLooseEqualityOp(op));
+
+ Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
+ {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ if (isUndefined) {
+ masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
+ masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
+ } else {
+ masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
+ masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
+ }
+ masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
+
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxObject(input, scratch);
+ masm.branchIfObjectEmulatesUndefined(scratch, scratch, failure->label(),
+ &nullOrLikeUndefined);
+ masm.jump(&notNullOrLikeUndefined);
+ }
+ }
+
+ masm.bind(&nullOrLikeUndefined);
+ EmitStoreBoolean(masm, op == JSOp::Eq, output);
+ masm.jump(&done);
+
+ masm.bind(&notNullOrLikeUndefined);
+ EmitStoreBoolean(masm, op == JSOp::Ne, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+ AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
+ AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
+
+ allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
+ allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
+
+ masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register val = allocator.useRegister(masm, valId);
+
+ if (output.hasValue()) {
+ masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
+ } else {
+ masm.mov(val, output.typedReg().gpr());
+ }
+ return true;
+}
+
+bool CacheIRCompiler::emitCallPrintString(const char* str) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ masm.printf(str);
+ return true;
+}
+
+bool CacheIRCompiler::emitBreakpoint() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ masm.breakpoint();
+ return true;
+}
+
+void CacheIRCompiler::emitPostBarrierShared(Register obj,
+ const ConstantOrRegister& val,
+ Register scratch,
+ Register maybeIndex) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (val.constant()) {
+ MOZ_ASSERT_IF(val.value().isGCThing(),
+ !IsInsideNursery(val.value().toGCThing()));
+ return;
+ }
+
+ TypedOrValueRegister reg = val.reg();
+ if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
+ return;
+ }
+
+ Label skipBarrier;
+ if (reg.hasValue()) {
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
+ &skipBarrier);
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
+ scratch, &skipBarrier);
+ }
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
+
+ // Call one of these, depending on maybeIndex:
+ //
+ // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
+ // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
+ // int32_t index);
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+ masm.setupUnalignedABICall(scratch);
+ masm.movePtr(ImmPtr(cx_->runtime()), scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ if (maybeIndex != InvalidReg) {
+ masm.passABIArg(maybeIndex);
+ using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
+ masm.callWithABI<Fn, PostWriteElementBarrier<IndexInBounds::Yes>>();
+ } else {
+ using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
+ masm.callWithABI<Fn, PostWriteBarrier>();
+ }
+ masm.PopRegsInMask(save);
+
+ masm.bind(&skipBarrier);
+}
+
+bool CacheIRCompiler::emitWrapResult() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ // We only have to wrap objects, because we are in the same zone.
+ masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
+
+ Register obj = output.valueReg().scratchReg();
+ masm.unboxObject(output.valueReg(), obj);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = JSObject* (*)(JSContext* cx, JSObject* obj);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, WrapObjectPure>();
+ masm.storeCallPointerResult(obj);
+
+ LiveRegisterSet ignore;
+ ignore.add(obj);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ // We could not get a wrapper for this object.
+ masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
+
+ // We clobbered the output register, so we have to retag.
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
+ ValOperandId idId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+
+#ifdef JS_CODEGEN_X86
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+#else
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+#endif
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+#ifdef JS_CODEGEN_X86
+ masm.xorPtr(scratch2, scratch2);
+#else
+ Label cacheHit;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ masm.emitMegamorphicCacheLookupByValue(
+ idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
+ } else {
+ masm.xorPtr(scratch2, scratch2);
+ }
+#endif
+
+ masm.branchIfNonNativeObj(obj, scratch1, failure->label());
+
+ // idVal will be in vp[0], result will be stored in vp[1].
+ masm.reserveStack(sizeof(Value));
+ masm.Push(idVal);
+ masm.moveStackPtrTo(idVal.scratchReg());
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(idVal);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ masm.passABIArg(scratch2);
+ masm.passABIArg(idVal.scratchReg());
+ masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
+
+ masm.storeCallPointerResult(scratch1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.Pop(idVal);
+
+ Label ok;
+ uint32_t framePushed = masm.framePushed();
+ masm.branchIfTrueBool(scratch1, &ok);
+ masm.adjustStack(sizeof(Value));
+ masm.jump(failure->label());
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
+ masm.adjustStack(sizeof(Value));
+
+#ifndef JS_CODEGEN_X86
+ masm.bind(&cacheHit);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
+ ValOperandId idId,
+ bool hasOwn) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+
+#ifdef JS_CODEGEN_X86
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+#else
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+#endif
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+#ifndef JS_CODEGEN_X86
+ Label cacheHit, done;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
+ scratch2, output.maybeReg(),
+ &cacheHit, hasOwn);
+ } else {
+ masm.xorPtr(scratch2, scratch2);
+ }
+#else
+ masm.xorPtr(scratch2, scratch2);
+#endif
+
+ masm.branchIfNonNativeObj(obj, scratch1, failure->label());
+
+ // idVal will be in vp[0], result will be stored in vp[1].
+ masm.reserveStack(sizeof(Value));
+ masm.Push(idVal);
+ masm.moveStackPtrTo(idVal.scratchReg());
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(idVal);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ masm.passABIArg(scratch2);
+ masm.passABIArg(idVal.scratchReg());
+ if (hasOwn) {
+ masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
+ } else {
+ masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
+ }
+ masm.storeCallPointerResult(scratch1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.Pop(idVal);
+
+ Label ok;
+ uint32_t framePushed = masm.framePushed();
+ masm.branchIfTrueBool(scratch1, &ok);
+ masm.adjustStack(sizeof(Value));
+ masm.jump(failure->label());
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
+ masm.adjustStack(sizeof(Value));
+
+#ifndef JS_CODEGEN_X86
+ masm.jump(&done);
+ masm.bind(&cacheHit);
+ if (output.hasValue()) {
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
+ output.valueReg());
+ }
+ masm.bind(&done);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.reserveStack(sizeof(Value));
+ masm.moveStackPtrTo(scratch2.get());
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(index);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn =
+ bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ masm.passABIArg(index);
+ masm.passABIArg(scratch2);
+ masm.callWithABI<Fn, HasNativeElementPure>();
+ masm.storeCallPointerResult(scratch1);
+ masm.PopRegsInMask(volatileRegs);
+
+ Label ok;
+ uint32_t framePushed = masm.framePushed();
+ masm.branchIfTrueBool(scratch1, &ok);
+ masm.adjustStack(sizeof(Value));
+ masm.jump(failure->label());
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
+ masm.adjustStack(sizeof(Value));
+ return true;
+}
+
+/*
+ * Move a constant value into register dest.
+ */
+void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
+ Register dest) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ MOZ_ASSERT(mode_ == Mode::Ion);
+ switch (val.getStubFieldType()) {
+ case StubField::Type::Shape:
+ masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::GetterSetter:
+ masm.movePtr(ImmGCPtr(getterSetterStubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::String:
+ masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::JSObject:
+ masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::RawPointer:
+ masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::RawInt32:
+ masm.move32(Imm32(int32StubField(val.getOffset())), dest);
+ break;
+ case StubField::Type::Id:
+ masm.movePropertyKey(idStubField(val.getOffset()), dest);
+ break;
+ default:
+ MOZ_CRASH("Unhandled stub field constant type");
+ }
+}
+
+/*
+ * After this is done executing, dest contains the value; either through a
+ * constant load or through the load from the stub data.
+ *
+ * The current policy is that Baseline will use loads from the stub data (to
+ * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
+ * constants in the IC.
+ */
+void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
+ emitLoadStubFieldConstant(val, dest);
+ } else {
+ Address load(ICStubReg, stubDataOffset_ + val.getOffset());
+
+ switch (val.getStubFieldType()) {
+ case StubField::Type::RawPointer:
+ case StubField::Type::Shape:
+ case StubField::Type::GetterSetter:
+ case StubField::Type::JSObject:
+ case StubField::Type::Symbol:
+ case StubField::Type::String:
+ case StubField::Type::Id:
+ masm.loadPtr(load, dest);
+ break;
+ case StubField::Type::RawInt32:
+ masm.load32(load, dest);
+ break;
+ default:
+ MOZ_CRASH("Unhandled stub field constant type");
+ }
+ }
+}
+
+void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
+ ValueOperand dest) {
+ MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
+
+ if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
+ MOZ_ASSERT(mode_ == Mode::Ion);
+ masm.moveValue(valueStubField(val.getOffset()), dest);
+ } else {
+ Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
+ masm.loadValue(addr, dest);
+ }
+}
+
+void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
+ ValueOperand dest,
+ FloatRegister scratch) {
+ MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
+
+ if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
+ MOZ_ASSERT(mode_ == Mode::Ion);
+ double d = doubleStubField(val.getOffset());
+ masm.moveValue(DoubleValue(d), dest);
+ } else {
+ Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
+ masm.loadDouble(addr, scratch);
+ masm.boxDouble(scratch, dest, scratch);
+ }
+}
+
+bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
+ ObjOperandId protoId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
+ Register proto = allocator.useRegister(masm, protoId);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label returnFalse, returnTrue, done;
+ masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
+
+ // LHS is an object. Load its proto.
+ masm.loadObjProto(scratch, scratch);
+ {
+ // Walk the proto chain until we either reach the target object,
+ // nullptr or LazyProto.
+ Label loop;
+ masm.bind(&loop);
+
+ masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
+ masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
+
+ masm.loadObjProto(scratch, scratch);
+ masm.jump(&loop);
+ }
+
+ masm.bind(&returnFalse);
+ EmitStoreBoolean(masm, false, output);
+ masm.jump(&done);
+
+ masm.bind(&returnTrue);
+ EmitStoreBoolean(masm, true, output);
+ // fallthrough
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
+ uint32_t idOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ StubFieldOffset id(idOffset, StubField::Type::Id);
+
+ AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+#ifdef JS_CODEGEN_X86
+ masm.xorPtr(scratch3, scratch3);
+#else
+ Label cacheHit;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ emitLoadStubField(id, idReg);
+ masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
+ scratch3, output.valueReg(),
+ &cacheHit);
+ } else {
+ masm.xorPtr(scratch3, scratch3);
+ }
+#endif
+
+ masm.branchIfNonNativeObj(obj, scratch1, failure->label());
+
+ masm.Push(UndefinedValue());
+ masm.moveStackPtrTo(idReg.get());
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(scratch2);
+ volatileRegs.takeUnchecked(scratch3);
+ volatileRegs.takeUnchecked(idReg);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ emitLoadStubField(id, scratch2);
+ masm.passABIArg(scratch2);
+ masm.passABIArg(scratch3);
+ masm.passABIArg(idReg);
+
+#ifdef JS_CODEGEN_X86
+ masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
+#else
+ masm.callWithABI<Fn, GetNativeDataPropertyPure>();
+#endif
+
+ masm.storeCallPointerResult(scratch2);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
+ masm.adjustStack(sizeof(Value));
+
+ masm.branchIfFalseBool(scratch2, failure->label());
+#ifndef JS_CODEGEN_X86
+ masm.bind(&cacheHit);
+#endif
+
+ return true;
+}
+
+bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
+ uint32_t idOffset,
+ ValOperandId rhsId,
+ bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+ StubFieldOffset id(idOffset, StubField::Type::Id);
+ AutoScratchRegister scratch(allocator, masm);
+
+ callvm.prepare();
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ emitLoadStubField(id, scratch);
+ masm.Push(scratch);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
+ callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
+ uint32_t idOffset,
+ uint32_t getterSetterOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ StubFieldOffset id(idOffset, StubField::Type::Id);
+ StubFieldOffset getterSetter(getterSetterOffset,
+ StubField::Type::GetterSetter);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(scratch2);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
+ GetterSetter* getterSetter);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ emitLoadStubField(id, scratch2);
+ masm.passABIArg(scratch2);
+ emitLoadStubField(getterSetter, scratch3);
+ masm.passABIArg(scratch3);
+ masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
+ masm.storeCallPointerResult(scratch1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchIfFalseBool(scratch1, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
+ wasm::ValType::Kind kind) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ // All values can be boxed as AnyRef.
+ if (kind == wasm::ValType::Ref) {
+ return true;
+ }
+ MOZ_ASSERT(kind != wasm::ValType::V128);
+
+ ValueOperand arg = allocator.useValueRegister(masm, argId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Check that the argument can be converted to the Wasm type in Warp code
+ // without bailing out.
+ Label done;
+ switch (kind) {
+ case wasm::ValType::I32:
+ case wasm::ValType::F32:
+ case wasm::ValType::F64: {
+ // Argument must be number, bool, or undefined.
+ masm.branchTestNumber(Assembler::Equal, arg, &done);
+ masm.branchTestBoolean(Assembler::Equal, arg, &done);
+ masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
+ break;
+ }
+ case wasm::ValType::I64: {
+ // Argument must be bigint, bool, or string.
+ masm.branchTestBigInt(Assembler::Equal, arg, &done);
+ masm.branchTestBoolean(Assembler::Equal, arg, &done);
+ masm.branchTestString(Assembler::NotEqual, arg, failure->label());
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected kind");
+ }
+ masm.bind(&done);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
+ uint32_t shapesOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister shapes(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+ Register spectreScratch = InvalidReg;
+ Maybe<AutoScratchRegister> maybeSpectreScratch;
+ if (needSpectreMitigations) {
+ maybeSpectreScratch.emplace(allocator, masm);
+ spectreScratch = *maybeSpectreScratch;
+ }
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // The stub field contains a ListObject. Load its elements.
+ StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
+ emitLoadStubField(shapeArray, shapes);
+ masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
+
+ masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch,
+ scratch2, spectreScratch, failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
+ uint32_t objOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register reg = allocator.defineRegister(masm, resultId);
+ StubFieldOffset obj(objOffset, StubField::Type::JSObject);
+ emitLoadStubField(obj, reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
+ uint32_t objOffset,
+ ObjOperandId receiverObjId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register reg = allocator.defineRegister(masm, resultId);
+ StubFieldOffset obj(objOffset, StubField::Type::JSObject);
+ emitLoadStubField(obj, reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
+ Int32OperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register reg = allocator.defineRegister(masm, resultId);
+ StubFieldOffset val(valOffset, StubField::Type::RawInt32);
+ emitLoadStubField(val, reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
+ BooleanOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register reg = allocator.defineRegister(masm, resultId);
+ masm.move32(Imm32(val), reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
+ NumberOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ StubFieldOffset val(valOffset, StubField::Type::Double);
+
+ AutoScratchFloatRegister floatReg(this);
+
+ emitLoadDoubleValueStubField(val, output, floatReg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand reg = allocator.defineValueRegister(masm, resultId);
+ masm.moveValue(UndefinedValue(), reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
+ StringOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register reg = allocator.defineRegister(masm, resultId);
+ StubFieldOffset str(strOffset, StubField::Type::String);
+ emitLoadStubField(str, reg);
+ return true;
+}
+
+bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
+ StringOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register input = allocator.useRegister(masm, inputId);
+ Register result = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(result);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
+ masm.setupUnalignedABICall(result);
+ masm.loadJSContext(result);
+ masm.passABIArg(result);
+ masm.passABIArg(input);
+ masm.callWithABI<Fn, js::Int32ToStringPure>();
+
+ masm.storeCallPointerResult(result);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
+ StringOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
+
+ allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
+ Register result = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(result);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSString* (*)(JSContext* cx, double d);
+ masm.setupUnalignedABICall(result);
+ masm.loadJSContext(result);
+ masm.passABIArg(result);
+ masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, js::NumberToStringPure>();
+
+ masm.storeCallPointerResult(result);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
+ Int32OperandId baseId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+ Register input = allocator.useRegister(masm, inputId);
+ Register base = allocator.useRegister(masm, baseId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
+ // we can't use both at the same time. This isn't an issue here, because Ion
+ // doesn't support CallICs. If that ever changes, this code must be updated.
+ MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
+
+ masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
+ masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
+
+ callvm.prepare();
+
+ masm.Push(base);
+ masm.Push(input);
+
+ using Fn = JSString* (*)(JSContext*, int32_t, int32_t);
+ callvm.call<Fn, js::Int32ToStringWithBase>();
+ return true;
+}
+
+bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
+ StringOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register boolean = allocator.useRegister(masm, inputId);
+ Register result = allocator.defineRegister(masm, resultId);
+ const JSAtomState& names = cx_->names();
+ Label true_, done;
+
+ masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
+
+ // False case
+ masm.movePtr(ImmGCPtr(names.false_), result);
+ masm.jump(&done);
+
+ // True case
+ masm.bind(&true_);
+ masm.movePtr(ImmGCPtr(names.true_), result);
+ masm.bind(&done);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(output.valueReg());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSString* (*)(JSContext*, JSObject*);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::ObjectClassToString>();
+ masm.storeCallPointerResult(scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
+ masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
+ StringOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register lhs = allocator.useRegister(masm, lhsId);
+ Register rhs = allocator.useRegister(masm, rhsId);
+
+ callvm.prepare();
+
+ masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
+ masm.Push(rhs);
+ masm.Push(lhs);
+
+ using Fn =
+ JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
+ callvm.call<Fn, ConcatStrings<CanGC>>();
+
+ return true;
+}
+
+bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ ValueOperand input = allocator.useValueRegister(masm, valId);
+
+ // Test if it's an object.
+ Label returnFalse, done;
+ masm.fallibleUnboxObject(input, scratch, &returnFalse);
+
+ // Test if it's a GeneratorObject.
+ masm.branchTestObjClass(Assembler::NotEqual, scratch,
+ &GeneratorObject::class_, scratch2, scratch,
+ &returnFalse);
+
+ // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
+ // the generator is suspended.
+ Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
+ masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
+ masm.branch32(Assembler::AboveOrEqual, scratch,
+ Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
+ &returnFalse);
+
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&returnFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+// This op generates no code. It is consumed by the transpiler.
+bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
+
+bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+
+ callvm.prepare();
+
+ masm.Push(index);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
+ MutableHandleValue);
+ callvm.call<Fn, NativeGetElement>();
+
+ return true;
+}
+
+bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
+ ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
+
+ callvm.prepare();
+
+ masm.Push(index);
+ masm.Push(receiver);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
+ MutableHandleValue);
+ callvm.call<Fn, NativeGetElement>();
+
+ return true;
+}
+
+bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
+ ValOperandId idId, bool hasOwn) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+
+ callvm.prepare();
+
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ if (hasOwn) {
+ callvm.call<Fn, ProxyHasOwn>();
+ } else {
+ callvm.call<Fn, ProxyHas>();
+ }
+ return true;
+}
+
+bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
+ ValOperandId idId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand idVal = allocator.useValueRegister(masm, idId);
+
+ callvm.prepare();
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn =
+ bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
+ callvm.call<Fn, ProxyGetPropertyByValue>();
+ return true;
+}
+
+bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register id = allocator.useRegister(masm, indexId);
+
+ callvm.prepare();
+ masm.Push(id);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
+ MutableHandleValue result);
+ callvm.call<Fn, GetSparseElementHelper>();
+ return true;
+}
+
+bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
+ int32_t flagsMask) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Address flagsAddr(
+ regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
+ masm.unboxInt32(flagsAddr, scratch);
+
+ Label ifFalse, done;
+ masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&ifFalse);
+ masm.moveValue(BooleanValue(false), output.valueReg());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
+ Int32OperandId beginId,
+ Int32OperandId lengthId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register begin = allocator.useRegister(masm, beginId);
+ Register length = allocator.useRegister(masm, lengthId);
+
+ callvm.prepare();
+ masm.Push(length);
+ masm.Push(begin);
+ masm.Push(str);
+
+ using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
+ int32_t len);
+ callvm.call<Fn, SubstringKernel>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringReplaceStringResult(
+ StringOperandId strId, StringOperandId patternId,
+ StringOperandId replacementId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register pattern = allocator.useRegister(masm, patternId);
+ Register replacement = allocator.useRegister(masm, replacementId);
+
+ callvm.prepare();
+ masm.Push(replacement);
+ masm.Push(pattern);
+ masm.Push(str);
+
+ using Fn =
+ JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
+ callvm.call<Fn, jit::StringReplace>();
+ return true;
+}
+
+bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
+ StringOperandId separatorId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+ Register separator = allocator.useRegister(masm, separatorId);
+
+ callvm.prepare();
+ masm.Push(Imm32(INT32_MAX));
+ masm.Push(separator);
+ masm.Push(str);
+
+ using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
+ callvm.call<Fn, js::StringSplitString>();
+ return true;
+}
+
+bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
+ ObjOperandId protoId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register proto = allocator.useRegister(masm, protoId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label slow, done;
+ masm.branchIfNotRegExpPrototypeOptimizable(proto, scratch, &slow);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&slow);
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* proto);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(proto);
+ masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
+ masm.storeCallBoolResult(scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
+ ObjOperandId regexpId, ObjOperandId protoId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register regexp = allocator.useRegister(masm, regexpId);
+ Register proto = allocator.useRegister(masm, protoId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ Label slow, done;
+ masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch, &slow);
+ masm.moveValue(BooleanValue(true), output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&slow);
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
+ masm.setupUnalignedABICall(scratch);
+ masm.loadJSContext(scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(regexp);
+ masm.passABIArg(proto);
+ masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
+ masm.storeCallBoolResult(scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register str = allocator.useRegister(masm, strId);
+
+ callvm.prepare();
+ masm.Push(str);
+
+ using Fn = bool (*)(JSContext*, JSString*, int32_t*);
+ callvm.call<Fn, GetFirstDollarIndexRaw>();
+ return true;
+}
+
+bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
+ uint32_t replacementId, Scalar::Type elementType) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Maybe<AutoOutputRegister> output;
+ Maybe<AutoCallVM> callvm;
+ if (!Scalar::isBigIntType(elementType)) {
+ output.emplace(*this);
+ } else {
+ callvm.emplace(masm, this, allocator);
+ }
+#ifdef JS_CODEGEN_X86
+ // Use a scratch register to avoid running out of registers.
+ Register obj = output ? output->valueReg().typeReg()
+ : callvm->outputValueReg().typeReg();
+ allocator.copyToScratchRegister(masm, objId, obj);
+#else
+ Register obj = allocator.useRegister(masm, objId);
+#endif
+ Register index = allocator.useRegister(masm, indexId);
+ Register expected;
+ Register replacement;
+ if (!Scalar::isBigIntType(elementType)) {
+ expected = allocator.useRegister(masm, Int32OperandId(expectedId));
+ replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
+ } else {
+ expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
+ replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
+ }
+
+ Register scratch = output ? output->valueReg().scratchReg()
+ : callvm->outputValueReg().scratchReg();
+ MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
+
+ // Not enough registers on X86.
+ Register spectreTemp = Register::Invalid();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
+ // we can't use both at the same time. This isn't an issue here, because Ion
+ // doesn't support CallICs. If that ever changes, this code must be updated.
+ MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+
+ // Atomic operations are highly platform-dependent, for example x86/x64 has
+ // specific requirements on which registers are used; MIPS needs multiple
+ // additional temporaries. Therefore we're using either an ABI or VM call here
+ // instead of handling each platform separately.
+
+ if (Scalar::isBigIntType(elementType)) {
+ callvm->prepare();
+
+ masm.Push(replacement);
+ masm.Push(expected);
+ masm.Push(index);
+ masm.Push(obj);
+
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
+ const BigInt*);
+ callvm->call<Fn, jit::AtomicsCompareExchange64>();
+ return true;
+ }
+
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(output->valueReg());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.passABIArg(index);
+ masm.passABIArg(expected);
+ masm.passABIArg(replacement);
+ masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
+ AtomicsCompareExchange(elementType)));
+ masm.storeCallInt32Result(scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+ }
+
+ if (elementType != Scalar::Uint32) {
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
+ } else {
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertUInt32ToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ Register value = allocator.useRegister(masm, Int32OperandId(valueId));
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ // Not enough registers on X86.
+ Register spectreTemp = Register::Invalid();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+
+ // See comment in emitAtomicsCompareExchange for why we use an ABI call.
+ {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(output.valueReg());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.passABIArg(index);
+ masm.passABIArg(value);
+ masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
+ masm.storeCallInt32Result(scratch);
+
+ masm.PopRegsInMask(volatileRegs);
+ }
+
+ if (elementType != Scalar::Uint32) {
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ } else {
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertUInt32ToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ }
+
+ return true;
+}
+
+template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
+bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
+ AutoCallVM callvm(masm, this, allocator);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
+
+ // Not enough registers on X86.
+ Register spectreTemp = Register::Invalid();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
+ // we can't use both at the same time. This isn't an issue here, because Ion
+ // doesn't support CallICs. If that ever changes, this code must be updated.
+ MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+
+ // See comment in emitAtomicsCompareExchange for why we use a VM call.
+
+ callvm.prepare();
+
+ masm.Push(value);
+ masm.Push(index);
+ masm.Push(obj);
+
+ callvm.call<AtomicsReadWriteModify64Fn, fn>();
+ return true;
+}
+
+bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
+ objId, indexId, valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsExchange(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
+ valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsAdd(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
+ valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsSub(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
+ valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsAnd(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
+ valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsOr(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ if (Scalar::isBigIntType(elementType)) {
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
+ valueId);
+ }
+ return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ AtomicsXor(elementType));
+}
+
+bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ Scalar::Type elementType) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Maybe<AutoOutputRegister> output;
+ Maybe<AutoCallVM> callvm;
+ if (!Scalar::isBigIntType(elementType)) {
+ output.emplace(*this);
+ } else {
+ callvm.emplace(masm, this, allocator);
+ }
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm,
+ output ? *output : callvm->output());
+ AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
+ AutoAvailableFloatRegister floatReg(*this, FloatReg0);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
+ // we can't use both at the same time. This isn't an issue here, because Ion
+ // doesn't support CallICs. If that ever changes, this code must be updated.
+ MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+
+ // Atomic operations are highly platform-dependent, for example x86/arm32 has
+ // specific requirements on which registers are used. Therefore we're using a
+ // VM call here instead of handling each platform separately.
+ if (Scalar::isBigIntType(elementType)) {
+ callvm->prepare();
+
+ masm.Push(index);
+ masm.Push(obj);
+
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
+ callvm->call<Fn, jit::AtomicsLoad64>();
+ return true;
+ }
+
+ // Load the elements vector.
+ masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
+
+ // Load the value.
+ BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Load();
+
+ masm.memoryBarrierBefore(sync);
+
+ Label* failUint32 = nullptr;
+ MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
+ masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
+ scratch, failUint32);
+ masm.memoryBarrierAfter(sync);
+
+ return true;
+}
+
+bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ Register index = allocator.useRegister(masm, indexId);
+ Maybe<Register> valueInt32;
+ Maybe<Register> valueBigInt;
+ if (!Scalar::isBigIntType(elementType)) {
+ valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
+ } else {
+ valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
+ }
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ // Not enough registers on X86.
+ Register spectreTemp = Register::Invalid();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Bounds check.
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+
+ if (!Scalar::isBigIntType(elementType)) {
+ // Load the elements vector.
+ masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
+
+ // Store the value.
+ BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Store();
+
+ masm.memoryBarrierBefore(sync);
+ masm.storeToTypedIntArray(elementType, *valueInt32, dest);
+ masm.memoryBarrierAfter(sync);
+
+ masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
+ } else {
+ // See comment in emitAtomicsCompareExchange for why we use an ABI call.
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(output.valueReg());
+ volatileRegs.takeUnchecked(scratch);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.passABIArg(index);
+ masm.passABIArg(*valueBigInt);
+ masm.callWithABI<Fn, jit::AtomicsStore64>();
+
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
+ }
+
+ return true;
+}
+
+bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register value = allocator.useRegister(masm, valueId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.atomicIsLockFreeJS(value, scratch);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
+ BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register bits = allocator.useRegister(masm, bitsId);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ callvm.prepare();
+ masm.Push(bits);
+ masm.Push(bigInt);
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
+ callvm.call<Fn, jit::BigIntAsIntN>();
+ return true;
+}
+
+bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
+ BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register bits = allocator.useRegister(masm, bitsId);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ callvm.prepare();
+ masm.Push(bits);
+ masm.Push(bigInt);
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
+ callvm.call<Fn, jit::BigIntAsUintN>();
+ return true;
+}
+
+bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register set = allocator.useRegister(masm, setId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ callvm.prepare();
+ masm.Push(val);
+ masm.Push(set);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ callvm.call<Fn, jit::SetObjectHas>();
+ return true;
+}
+
+bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
+ ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
+ masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
+
+ masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
+ SymbolOperandId symId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ Register sym = allocator.useRegister(masm, symId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ masm.prepareHashSymbol(sym, scratch1);
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
+ masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
+ BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+#ifndef JS_CODEGEN_ARM
+ AutoScratchRegister scratch6(allocator, masm);
+#else
+ // We don't have more registers available on ARM32.
+ Register scratch6 = set;
+
+ masm.push(set);
+#endif
+
+ masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
+ masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
+ scratch4, scratch5, scratch6);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+
+#ifdef JS_CODEGEN_ARM
+ masm.pop(set);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
+ masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
+ scratch4, scratch5);
+
+ masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register set = allocator.useRegister(masm, setId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.loadSetObjectSize(set, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register map = allocator.useRegister(masm, mapId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ callvm.prepare();
+ masm.Push(val);
+ masm.Push(map);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ callvm.call<Fn, jit::MapObjectHas>();
+ return true;
+}
+
+bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
+ masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
+
+ masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
+ SymbolOperandId symId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register sym = allocator.useRegister(masm, symId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ masm.prepareHashSymbol(sym, scratch1);
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
+ masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
+ BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+#ifndef JS_CODEGEN_ARM
+ AutoScratchRegister scratch6(allocator, masm);
+#else
+ // We don't have more registers available on ARM32.
+ Register scratch6 = map;
+
+ masm.push(map);
+#endif
+
+ masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
+ masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
+ scratch4, scratch5, scratch6);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+
+#ifdef JS_CODEGEN_ARM
+ masm.pop(map);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
+ masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
+ scratch4, scratch5);
+
+ masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
+ scratch3, scratch4);
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register map = allocator.useRegister(masm, mapId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ callvm.prepare();
+ masm.Push(val);
+ masm.Push(map);
+
+ using Fn =
+ bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
+ callvm.call<Fn, jit::MapObjectGet>();
+ return true;
+}
+
+bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ ValueOperand val = allocator.useValueRegister(masm, valId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
+
+ masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
+ masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
+
+ masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
+ output.valueReg(), scratch2, scratch3, scratch4);
+ return true;
+}
+
+bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
+ SymbolOperandId symId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register sym = allocator.useRegister(masm, symId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+
+ masm.prepareHashSymbol(sym, scratch1);
+
+ masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
+ masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
+ output.valueReg(), scratch2, scratch3, scratch4);
+ return true;
+}
+
+bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
+ BigIntOperandId bigIntId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register bigInt = allocator.useRegister(masm, bigIntId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+#ifndef JS_CODEGEN_ARM
+ AutoScratchRegister scratch6(allocator, masm);
+#else
+ // We don't have more registers available on ARM32.
+ Register scratch6 = map;
+
+ masm.push(map);
+#endif
+
+ masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
+ masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
+ scratch2, scratch3, scratch4, scratch5, scratch6);
+
+#ifdef JS_CODEGEN_ARM
+ masm.pop(map);
+#endif
+ return true;
+}
+
+bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+ AutoScratchRegister scratch3(allocator, masm);
+ AutoScratchRegister scratch4(allocator, masm);
+ AutoScratchRegister scratch5(allocator, masm);
+
+ masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
+ masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
+ scratch4, scratch5);
+
+ masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
+ output.valueReg(), scratch2, scratch3, scratch4);
+ return true;
+}
+
+bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register map = allocator.useRegister(masm, mapId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+ masm.loadMapObjectSize(map, scratch);
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
+ uint32_t shapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoCallVM callvm(masm, this, allocator);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ callvm.prepare();
+ masm.Push(obj);
+
+ using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
+ callvm.call<Fn, js::ArrayFromArgumentsObject>();
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
+ uint32_t generationAddrOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
+ emitLoadStubField(expected, scratch);
+
+ StubFieldOffset generationAddr(generationAddrOffset,
+ StubField::Type::RawPointer);
+ emitLoadStubField(generationAddr, scratch2);
+
+ masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
+ failure->label());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitBailout() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ // Generates no code.
+
+ return true;
+}
+
+bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
+ bool mustBeRecovered) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+
+ // NOP when not in IonMonkey
+ masm.moveValue(UndefinedValue(), output.valueReg());
+
+ return true;
+}
+
+bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
+ uint32_t idOffset,
+ uint32_t slotOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ AutoScratchRegister id(allocator, masm);
+ AutoScratchRegister slot(allocator, masm);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ masm.setupUnalignedABICall(id);
+
+ StubFieldOffset idField(idOffset, StubField::Type::Id);
+ emitLoadStubField(idField, id);
+
+ StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
+ emitLoadStubField(slotField, slot);
+
+ masm.passABIArg(obj);
+ masm.passABIArg(id);
+ masm.passABIArg(slot);
+ using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
+ masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
+ masm.PopRegsInMask(save);
+
+ return true;
+}
+
+#ifdef FUZZING_JS_FUZZILLI
+bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ ValueOperand input = allocator.useValueRegister(masm, valId);
+ AutoScratchRegister scratch(allocator, masm);
+ AutoScratchRegister scratchJSContext(allocator, masm);
+ AutoScratchFloatRegister floatReg(this);
+# ifdef JS_PUNBOX64
+ AutoScratchRegister64 scratch64(allocator, masm);
+# else
+ AutoScratchRegister scratch2(allocator, masm);
+# endif
+
+ Label addFloat, updateHash, done;
+
+ {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ Label notInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxInt32(input, scratch);
+ masm.convertInt32ToDouble(scratch, floatReg);
+ masm.jump(&addFloat);
+ }
+ masm.bind(&notInt32);
+
+ Label notDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxDouble(input, floatReg);
+ masm.canonicalizeDouble(floatReg);
+ masm.jump(&addFloat);
+ }
+ masm.bind(&notDouble);
+
+ Label notNull;
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.move32(Imm32(1), scratch);
+ masm.convertInt32ToDouble(scratch, floatReg);
+ masm.jump(&addFloat);
+ }
+ masm.bind(&notNull);
+
+ Label notUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.move32(Imm32(2), scratch);
+ masm.convertInt32ToDouble(scratch, floatReg);
+ masm.jump(&addFloat);
+ }
+ masm.bind(&notUndefined);
+
+ Label notBoolean;
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxBoolean(input, scratch);
+ masm.add32(Imm32(3), scratch);
+ masm.convertInt32ToDouble(scratch, floatReg);
+ masm.jump(&addFloat);
+ }
+ masm.bind(&notBoolean);
+
+ Label notBigInt;
+ masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ masm.unboxBigInt(input, scratch);
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(volatileRegs);
+ // TODO: remove floatReg, scratch, scratchJS?
+
+ using Fn = uint32_t (*)(BigInt* bigInt);
+ masm.setupUnalignedABICall(scratchJSContext);
+ masm.loadJSContext(scratchJSContext);
+ masm.passABIArg(scratch);
+ masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
+ masm.storeCallInt32Result(scratch);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ ignore.add(scratchJSContext);
+ masm.PopRegsInMaskIgnore(volatileRegs, ignore);
+ masm.jump(&updateHash);
+ }
+ masm.bind(&notBigInt);
+
+ Label notObject;
+ masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ AutoCallVM callvm(masm, this, allocator);
+ Register obj = allocator.allocateRegister(masm);
+ masm.unboxObject(input, obj);
+
+ callvm.prepare();
+ masm.Push(obj);
+
+ using Fn = void (*)(JSContext* cx, JSObject* o);
+ callvm.callNoResult<Fn, js::FuzzilliHashObject>();
+ allocator.releaseRegister(obj);
+
+ masm.jump(&done);
+ }
+ masm.bind(&notObject);
+ {
+ masm.move32(Imm32(0), scratch);
+ masm.jump(&updateHash);
+ }
+ }
+
+ {
+ masm.bind(&addFloat);
+
+ masm.loadJSContext(scratchJSContext);
+ Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
+
+# ifdef JS_PUNBOX64
+ masm.moveDoubleToGPR64(floatReg, scratch64);
+ masm.move32(scratch64.get().reg, scratch);
+ masm.rshift64(Imm32(32), scratch64);
+ masm.add32(scratch64.get().reg, scratch);
+# else
+ Register64 scratch64(scratch, scratch2);
+ masm.moveDoubleToGPR64(floatReg, scratch64);
+ masm.add32(scratch2, scratch);
+# endif
+ }
+
+ {
+ masm.bind(&updateHash);
+
+ masm.loadJSContext(scratchJSContext);
+ Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
+ masm.load32(addrExecHash, scratchJSContext);
+ masm.add32(scratchJSContext, scratch);
+ masm.rotateLeft(Imm32(1), scratch, scratch);
+ masm.loadJSContext(scratchJSContext);
+ masm.store32(scratch, addrExecHash);
+
+ // stats
+ Address addrExecHashInputs(scratchJSContext,
+ offsetof(JSContext, executionHashInputs));
+ masm.load32(addrExecHashInputs, scratch);
+ masm.add32(Imm32(1), scratch);
+ masm.store32(scratch, addrExecHashInputs);
+ }
+
+ masm.bind(&done);
+
+ AutoOutputRegister output(*this);
+ masm.moveValue(UndefinedValue(), output.valueReg());
+ return true;
+}
+#endif
+
+template <typename Fn, Fn fn>
+void CacheIRCompiler::callVM(MacroAssembler& masm) {
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ callVMInternal(masm, id);
+}
+
+void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
+ MOZ_ASSERT(enteredStubFrame_);
+ if (mode_ == Mode::Ion) {
+ TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
+ const VMFunctionData& fun = GetVMFunction(id);
+ uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
+ masm.PushFrameDescriptor(FrameType::IonICCall);
+ masm.callJit(code);
+
+ // Pop rest of the exit frame and the arguments left on the stack.
+ int framePop =
+ sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
+ masm.implicitPop(frameSize + framePop);
+
+ // Pop IonICCallFrameLayout.
+ masm.Pop(FramePointer);
+ masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
+ return;
+ }
+
+ MOZ_ASSERT(mode_ == Mode::Baseline);
+
+ TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
+ MOZ_ASSERT(GetVMFunction(id).expectTailCall == NonTailCall);
+
+ EmitBaselineCallVM(code, masm);
+}
+
+bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
+
+bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
+
+BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
+ MOZ_ASSERT(this->isBaseline());
+ return static_cast<BaselineCacheIRCompiler*>(this);
+}
+
+IonCacheIRCompiler* CacheIRCompiler::asIon() {
+ MOZ_ASSERT(this->isIon());
+ return static_cast<IonCacheIRCompiler*>(this);
+}
+
+#ifdef DEBUG
+void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
+ if (isBaseline()) {
+ // Baseline does not have any FloatRegisters live when calling an IC stub.
+ return;
+ }
+
+ asIon()->assertFloatRegisterAvailable(reg);
+}
+#endif
+
+AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
+ CacheRegisterAllocator& allocator)
+ : masm_(masm), compiler_(compiler), allocator_(allocator) {
+ // Ion needs to `enterStubFrame` before it can callVM and it also needs to
+ // initialize AutoSaveLiveRegisters.
+ if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
+ // Will need to use a downcast here as well, in order to pass the
+ // stub to AutoSaveLiveRegisters
+ save_.emplace(*compiler_->asIon());
+ }
+
+ if (compiler->outputUnchecked_.isSome()) {
+ output_.emplace(*compiler);
+ }
+
+ if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
+ stubFrame_.emplace(*compiler_->asBaseline());
+ if (output_.isSome()) {
+ scratch_.emplace(allocator_, masm_, output_.ref());
+ } else {
+ scratch_.emplace(allocator_, masm_);
+ }
+ }
+}
+
+void AutoCallVM::prepare() {
+ allocator_.discardStack(masm_);
+ MOZ_ASSERT(compiler_ != nullptr);
+ if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
+ compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
+ return;
+ }
+ MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
+ stubFrame_->enter(masm_, scratch_.ref());
+}
+
+void AutoCallVM::storeResult(JSValueType returnType) {
+ MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
+
+ if (returnType == JSVAL_TYPE_UNKNOWN) {
+ masm_.storeCallResultValue(output_.ref());
+ } else {
+ if (output_->hasValue()) {
+ masm_.tagValue(returnType, ReturnReg, output_->valueReg());
+ } else {
+ masm_.storeCallPointerResult(output_->typedReg().gpr());
+ }
+ }
+}
+
+void AutoCallVM::leaveBaselineStubFrame() {
+ if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
+ stubFrame_->leave(masm_);
+ }
+}
+
+template <typename...>
+struct VMFunctionReturnType;
+
+template <class R, typename... Args>
+struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
+ using LastArgument = typename LastArg<Args...>::Type;
+
+ // By convention VMFunctions returning `bool` use an output parameter.
+ using ReturnType =
+ std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
+};
+
+template <class>
+struct ReturnTypeToJSValueType;
+
+// Definitions for the currently used return types.
+template <>
+struct ReturnTypeToJSValueType<MutableHandleValue> {
+ static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
+};
+template <>
+struct ReturnTypeToJSValueType<bool*> {
+ static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
+};
+template <>
+struct ReturnTypeToJSValueType<int32_t*> {
+ static constexpr JSValueType result = JSVAL_TYPE_INT32;
+};
+template <>
+struct ReturnTypeToJSValueType<JSString*> {
+ static constexpr JSValueType result = JSVAL_TYPE_STRING;
+};
+template <>
+struct ReturnTypeToJSValueType<BigInt*> {
+ static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
+};
+template <>
+struct ReturnTypeToJSValueType<JSObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<StringIteratorObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<PlainObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<ArrayObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
+struct ReturnTypeToJSValueType<TypedArrayObject*> {
+ static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+
+template <typename Fn>
+void AutoCallVM::storeResult() {
+ using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
+ storeResult(ReturnTypeToJSValueType<ReturnType>::result);
+}
+
+AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
+ FailurePath* failure)
+ : compiler_(compiler), failure_(failure) {
+ // If we're compiling a Baseline IC, FloatReg0 is always available.
+ if (!compiler_->isBaseline()) {
+ MacroAssembler& masm = compiler_->masm;
+ masm.push(FloatReg0);
+ compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
+ }
+
+ if (failure_) {
+ failure_->setHasAutoScratchFloatRegister();
+ }
+}
+
+AutoScratchFloatRegister::~AutoScratchFloatRegister() {
+ if (failure_) {
+ failure_->clearHasAutoScratchFloatRegister();
+ }
+
+ if (!compiler_->isBaseline()) {
+ MacroAssembler& masm = compiler_->masm;
+ masm.pop(FloatReg0);
+ compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
+
+ if (failure_) {
+ Label done;
+ masm.jump(&done);
+ masm.bind(&failurePopReg_);
+ masm.pop(FloatReg0);
+ masm.jump(failure_->label());
+ masm.bind(&done);
+ }
+ }
+}
+
+Label* AutoScratchFloatRegister::failure() {
+ MOZ_ASSERT(failure_);
+
+ if (!compiler_->isBaseline()) {
+ return &failurePopReg_;
+ }
+ return failure_->labelUnchecked();
+}
diff --git a/js/src/jit/CacheIRCompiler.h b/js/src/jit/CacheIRCompiler.h
new file mode 100644
index 0000000000..465db7a9b8
--- /dev/null
+++ b/js/src/jit/CacheIRCompiler.h
@@ -0,0 +1,1314 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRCompiler_h
+#define jit_CacheIRCompiler_h
+
+#include "mozilla/Casting.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRReader.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/JitOptions.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+#include "jit/SharedICRegisters.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+namespace JS {
+class BigInt;
+}
+
+namespace js {
+
+class TypedArrayObject;
+enum class UnaryMathFunction : uint8_t;
+
+namespace jit {
+
+class BaselineCacheIRCompiler;
+class ICCacheIRStub;
+class IonCacheIRCompiler;
+class IonScript;
+
+enum class ICStubEngine : uint8_t;
+
+// [SMDOC] CacheIR Value Representation and Tracking
+//
+// While compiling an IC stub the CacheIR compiler needs to keep track of the
+// physical location for each logical piece of data we care about, as well as
+// ensure that in the case of a stub failing, we are able to restore the input
+// state so that a subsequent stub can attempt to provide a value.
+//
+// OperandIds are created in the CacheIR front-end to keep track of values that
+// are passed between CacheIR ops during the execution of a given CacheIR stub.
+// In the CacheRegisterAllocator these OperandIds are given OperandLocations,
+// that represent the physical location of the OperandId at a given point in
+// time during CacheRegister allocation.
+//
+// In the CacheRegisterAllocator physical locations include the stack, and
+// registers, as well as whether or not the value has been unboxed or not.
+// Constants are also represented separately to provide for on-demand
+// materialization.
+//
+// Intra-op Register allocation:
+//
+// During the emission of a CacheIR op, code can ask the CacheRegisterAllocator
+// for access to a particular OperandId, and the register allocator will
+// generate the required code to fill that request.
+//
+// Input OperandIds should be considered as immutable, and should not be mutated
+// during the execution of a stub.
+//
+// There are also a number of RAII classes that interact with the register
+// allocator, in order to provide access to more registers than just those
+// provided for by the OperandIds.
+//
+// - AutoOutputReg: The register which will hold the output value of the stub.
+// - AutoScratchReg: By default, an arbitrary scratch register, however a
+// specific register can be requested.
+// - AutoScratchRegMaybeOutput: Any arbitrary scratch register, but the output
+// register may be used as well.
+//
+// These RAII classes take ownership of a register for the duration of their
+// lifetime so they can be used for computation or output. The register
+// allocator can spill values with OperandLocations in order to try to ensure
+// that a register is made available for use.
+//
+// If a specific register is required (via AutoScratchRegister), it should be
+// the first register acquired, as the register rallocator will be unable to
+// allocate the fixed register if the current op is using it for something else.
+//
+// If no register can be provided after attempting to spill, a
+// MOZ_RELEASE_ASSERT ensures the browser will crash. The register allocator is
+// not provided enough information in its current design to insert spills and
+// fills at arbitrary locations, and so it can fail to find an allocation
+// solution. However, this will only happen within the implementation of an
+// operand emitter, and because the cache register allocator is mostly
+// determinstic, so long as the operand id emitter is tested, this won't
+// suddenly crop up in an arbitrary webpage. It's worth noting the most
+// difficult platform to support is x86-32, because it has the least number of
+// registers available.
+//
+// FailurePaths checkpoint the state of the register allocator so that the input
+// state can be recomputed from the current state before jumping to the next
+// stub in the IC chain. An important invariant is that the FailurePath must be
+// allocated for each op after all the manipulation of OperandLocations has
+// happened, so that its recording is correct.
+//
+// Inter-op Register Allocation:
+//
+// The RAII register management classes are RAII because all register state
+// outside the OperandLocations is reset before the compilation of each
+// individual CacheIR op. This means that you cannot rely on a value surviving
+// between ops, even if you use the ability of AutoScratchRegister to name a
+// specific register. Values that need to be preserved between ops must be given
+// an OperandId.
+
+// Represents a Value on the Baseline frame's expression stack. Slot 0 is the
+// value on top of the stack (the most recently pushed value), slot 1 is the
+// value pushed before that, etc.
+class BaselineFrameSlot {
+ uint32_t slot_;
+
+ public:
+ explicit BaselineFrameSlot(uint32_t slot) : slot_(slot) {}
+ uint32_t slot() const { return slot_; }
+
+ bool operator==(const BaselineFrameSlot& other) const {
+ return slot_ == other.slot_;
+ }
+ bool operator!=(const BaselineFrameSlot& other) const {
+ return slot_ != other.slot_;
+ }
+};
+
+// OperandLocation represents the location of an OperandId. The operand is
+// either in a register or on the stack, and is either boxed or unboxed.
+class OperandLocation {
+ public:
+ enum Kind {
+ Uninitialized = 0,
+ PayloadReg,
+ DoubleReg,
+ ValueReg,
+ PayloadStack,
+ ValueStack,
+ BaselineFrame,
+ Constant,
+ };
+
+ private:
+ Kind kind_;
+
+ union Data {
+ struct {
+ Register reg;
+ JSValueType type;
+ } payloadReg;
+ FloatRegister doubleReg;
+ ValueOperand valueReg;
+ struct {
+ uint32_t stackPushed;
+ JSValueType type;
+ } payloadStack;
+ uint32_t valueStackPushed;
+ BaselineFrameSlot baselineFrameSlot;
+ Value constant;
+
+ Data() : valueStackPushed(0) {}
+ };
+ Data data_;
+
+ public:
+ OperandLocation() : kind_(Uninitialized) {}
+
+ Kind kind() const { return kind_; }
+
+ void setUninitialized() { kind_ = Uninitialized; }
+
+ ValueOperand valueReg() const {
+ MOZ_ASSERT(kind_ == ValueReg);
+ return data_.valueReg;
+ }
+ Register payloadReg() const {
+ MOZ_ASSERT(kind_ == PayloadReg);
+ return data_.payloadReg.reg;
+ }
+ FloatRegister doubleReg() const {
+ MOZ_ASSERT(kind_ == DoubleReg);
+ return data_.doubleReg;
+ }
+ uint32_t payloadStack() const {
+ MOZ_ASSERT(kind_ == PayloadStack);
+ return data_.payloadStack.stackPushed;
+ }
+ uint32_t valueStack() const {
+ MOZ_ASSERT(kind_ == ValueStack);
+ return data_.valueStackPushed;
+ }
+ JSValueType payloadType() const {
+ if (kind_ == PayloadReg) {
+ return data_.payloadReg.type;
+ }
+ MOZ_ASSERT(kind_ == PayloadStack);
+ return data_.payloadStack.type;
+ }
+ Value constant() const {
+ MOZ_ASSERT(kind_ == Constant);
+ return data_.constant;
+ }
+ BaselineFrameSlot baselineFrameSlot() const {
+ MOZ_ASSERT(kind_ == BaselineFrame);
+ return data_.baselineFrameSlot;
+ }
+
+ void setPayloadReg(Register reg, JSValueType type) {
+ kind_ = PayloadReg;
+ data_.payloadReg.reg = reg;
+ data_.payloadReg.type = type;
+ }
+ void setDoubleReg(FloatRegister reg) {
+ kind_ = DoubleReg;
+ data_.doubleReg = reg;
+ }
+ void setValueReg(ValueOperand reg) {
+ kind_ = ValueReg;
+ data_.valueReg = reg;
+ }
+ void setPayloadStack(uint32_t stackPushed, JSValueType type) {
+ kind_ = PayloadStack;
+ data_.payloadStack.stackPushed = stackPushed;
+ data_.payloadStack.type = type;
+ }
+ void setValueStack(uint32_t stackPushed) {
+ kind_ = ValueStack;
+ data_.valueStackPushed = stackPushed;
+ }
+ void setConstant(const Value& v) {
+ kind_ = Constant;
+ data_.constant = v;
+ }
+ void setBaselineFrame(BaselineFrameSlot slot) {
+ kind_ = BaselineFrame;
+ data_.baselineFrameSlot = slot;
+ }
+
+ bool isUninitialized() const { return kind_ == Uninitialized; }
+ bool isInRegister() const { return kind_ == PayloadReg || kind_ == ValueReg; }
+ bool isOnStack() const {
+ return kind_ == PayloadStack || kind_ == ValueStack;
+ }
+
+ size_t stackPushed() const {
+ if (kind_ == PayloadStack) {
+ return data_.payloadStack.stackPushed;
+ }
+ MOZ_ASSERT(kind_ == ValueStack);
+ return data_.valueStackPushed;
+ }
+ size_t stackSizeInBytes() const {
+ if (kind_ == PayloadStack) {
+ return sizeof(uintptr_t);
+ }
+ MOZ_ASSERT(kind_ == ValueStack);
+ return sizeof(js::Value);
+ }
+ void adjustStackPushed(int32_t diff) {
+ if (kind_ == PayloadStack) {
+ data_.payloadStack.stackPushed += diff;
+ return;
+ }
+ MOZ_ASSERT(kind_ == ValueStack);
+ data_.valueStackPushed += diff;
+ }
+
+ bool aliasesReg(Register reg) const {
+ if (kind_ == PayloadReg) {
+ return payloadReg() == reg;
+ }
+ if (kind_ == ValueReg) {
+ return valueReg().aliases(reg);
+ }
+ return false;
+ }
+ bool aliasesReg(ValueOperand reg) const {
+#if defined(JS_NUNBOX32)
+ return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
+#else
+ return aliasesReg(reg.valueReg());
+#endif
+ }
+
+ bool aliasesReg(const OperandLocation& other) const;
+
+ bool operator==(const OperandLocation& other) const;
+ bool operator!=(const OperandLocation& other) const {
+ return !operator==(other);
+ }
+};
+
+struct SpilledRegister {
+ Register reg;
+ uint32_t stackPushed;
+
+ SpilledRegister(Register reg, uint32_t stackPushed)
+ : reg(reg), stackPushed(stackPushed) {}
+ bool operator==(const SpilledRegister& other) const {
+ return reg == other.reg && stackPushed == other.stackPushed;
+ }
+ bool operator!=(const SpilledRegister& other) const {
+ return !(*this == other);
+ }
+};
+
+using SpilledRegisterVector = Vector<SpilledRegister, 2, SystemAllocPolicy>;
+
+// Class to track and allocate registers while emitting IC code.
+class MOZ_RAII CacheRegisterAllocator {
+ // The original location of the inputs to the cache.
+ Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
+
+ // The current location of each operand.
+ Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
+
+ // Free lists for value- and payload-slots on stack
+ Vector<uint32_t, 2, SystemAllocPolicy> freeValueSlots_;
+ Vector<uint32_t, 2, SystemAllocPolicy> freePayloadSlots_;
+
+ // The registers allocated while emitting the current CacheIR op.
+ // This prevents us from allocating a register and then immediately
+ // clobbering it for something else, while we're still holding on to it.
+ LiveGeneralRegisterSet currentOpRegs_;
+
+ const AllocatableGeneralRegisterSet allocatableRegs_;
+
+ // Registers that are currently unused and available.
+ AllocatableGeneralRegisterSet availableRegs_;
+
+ // Registers that are available, but before use they must be saved and
+ // then restored when returning from the stub.
+ AllocatableGeneralRegisterSet availableRegsAfterSpill_;
+
+ // Registers we took from availableRegsAfterSpill_ and spilled to the stack.
+ SpilledRegisterVector spilledRegs_;
+
+ // The number of bytes pushed on the native stack.
+ uint32_t stackPushed_;
+
+#ifdef DEBUG
+ // Flag used to assert individual CacheIR instructions don't allocate
+ // registers after calling addFailurePath.
+ bool addedFailurePath_;
+#endif
+
+ // The index of the CacheIR instruction we're currently emitting.
+ uint32_t currentInstruction_;
+
+ // Whether the stack contains a double spilled by AutoScratchFloatRegister.
+ bool hasAutoScratchFloatRegisterSpill_ = false;
+
+ const CacheIRWriter& writer_;
+
+ CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
+ CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
+
+ void freeDeadOperandLocations(MacroAssembler& masm);
+
+ void spillOperandToStack(MacroAssembler& masm, OperandLocation* loc);
+ void spillOperandToStackOrRegister(MacroAssembler& masm,
+ OperandLocation* loc);
+
+ void popPayload(MacroAssembler& masm, OperandLocation* loc, Register dest);
+ void popValue(MacroAssembler& masm, OperandLocation* loc, ValueOperand dest);
+ Address payloadAddress(MacroAssembler& masm,
+ const OperandLocation* loc) const;
+ Address valueAddress(MacroAssembler& masm, const OperandLocation* loc) const;
+
+#ifdef DEBUG
+ void assertValidState() const;
+#endif
+
+ public:
+ friend class AutoScratchRegister;
+ friend class AutoScratchRegisterExcluding;
+
+ explicit CacheRegisterAllocator(const CacheIRWriter& writer)
+ : allocatableRegs_(GeneralRegisterSet::All()),
+ stackPushed_(0),
+#ifdef DEBUG
+ addedFailurePath_(false),
+#endif
+ currentInstruction_(0),
+ writer_(writer) {
+ }
+
+ [[nodiscard]] bool init();
+
+ void initAvailableRegs(const AllocatableGeneralRegisterSet& available) {
+ availableRegs_ = available;
+ }
+ void initAvailableRegsAfterSpill();
+
+ void fixupAliasedInputs(MacroAssembler& masm);
+
+ OperandLocation operandLocation(size_t i) const {
+ return operandLocations_[i];
+ }
+ void setOperandLocation(size_t i, const OperandLocation& loc) {
+ operandLocations_[i] = loc;
+ }
+
+ OperandLocation origInputLocation(size_t i) const {
+ return origInputLocations_[i];
+ }
+ void initInputLocation(size_t i, ValueOperand reg) {
+ origInputLocations_[i].setValueReg(reg);
+ operandLocations_[i].setValueReg(reg);
+ }
+ void initInputLocation(size_t i, Register reg, JSValueType type) {
+ origInputLocations_[i].setPayloadReg(reg, type);
+ operandLocations_[i].setPayloadReg(reg, type);
+ }
+ void initInputLocation(size_t i, FloatRegister reg) {
+ origInputLocations_[i].setDoubleReg(reg);
+ operandLocations_[i].setDoubleReg(reg);
+ }
+ void initInputLocation(size_t i, const Value& v) {
+ origInputLocations_[i].setConstant(v);
+ operandLocations_[i].setConstant(v);
+ }
+ void initInputLocation(size_t i, BaselineFrameSlot slot) {
+ origInputLocations_[i].setBaselineFrame(slot);
+ operandLocations_[i].setBaselineFrame(slot);
+ }
+
+ void initInputLocation(size_t i, const TypedOrValueRegister& reg);
+ void initInputLocation(size_t i, const ConstantOrRegister& value);
+
+ const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
+
+ [[nodiscard]] bool setSpilledRegs(const SpilledRegisterVector& regs) {
+ spilledRegs_.clear();
+ return spilledRegs_.appendAll(regs);
+ }
+
+ bool hasAutoScratchFloatRegisterSpill() const {
+ return hasAutoScratchFloatRegisterSpill_;
+ }
+ void setHasAutoScratchFloatRegisterSpill(bool b) {
+ MOZ_ASSERT(hasAutoScratchFloatRegisterSpill_ != b);
+ hasAutoScratchFloatRegisterSpill_ = b;
+ }
+
+ void nextOp() {
+#ifdef DEBUG
+ assertValidState();
+ addedFailurePath_ = false;
+#endif
+ currentOpRegs_.clear();
+ currentInstruction_++;
+ }
+
+#ifdef DEBUG
+ void setAddedFailurePath() {
+ MOZ_ASSERT(!addedFailurePath_, "multiple failure paths for instruction");
+ addedFailurePath_ = true;
+ }
+#endif
+
+ bool isDeadAfterInstruction(OperandId opId) const {
+ return writer_.operandIsDead(opId.id(), currentInstruction_ + 1);
+ }
+
+ uint32_t stackPushed() const { return stackPushed_; }
+ void setStackPushed(uint32_t pushed) { stackPushed_ = pushed; }
+
+ bool isAllocatable(Register reg) const { return allocatableRegs_.has(reg); }
+
+ // Allocates a new register.
+ Register allocateRegister(MacroAssembler& masm);
+ ValueOperand allocateValueRegister(MacroAssembler& masm);
+
+ void allocateFixedRegister(MacroAssembler& masm, Register reg);
+ void allocateFixedValueRegister(MacroAssembler& masm, ValueOperand reg);
+
+ // Releases a register so it can be reused later.
+ void releaseRegister(Register reg) {
+ MOZ_ASSERT(currentOpRegs_.has(reg));
+ availableRegs_.add(reg);
+ currentOpRegs_.take(reg);
+ }
+ void releaseValueRegister(ValueOperand reg) {
+#ifdef JS_NUNBOX32
+ releaseRegister(reg.payloadReg());
+ releaseRegister(reg.typeReg());
+#else
+ releaseRegister(reg.valueReg());
+#endif
+ }
+
+ // Removes spilled values from the native stack. This should only be
+ // called after all registers have been allocated.
+ void discardStack(MacroAssembler& masm);
+
+ Address addressOf(MacroAssembler& masm, BaselineFrameSlot slot) const;
+ BaseValueIndex addressOf(MacroAssembler& masm, Register argcReg,
+ BaselineFrameSlot slot) const;
+
+ // Returns the register for the given operand. If the operand is currently
+ // not in a register, it will load it into one.
+ ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
+ Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
+
+ ConstantOrRegister useConstantOrRegister(MacroAssembler& masm,
+ ValOperandId val);
+
+ // Allocates an output register for the given operand.
+ Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
+ ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
+
+ // Loads (potentially coercing) and unboxes a value into a float register
+ // This is infallible, as there should have been a previous guard
+ // to ensure the value is already a number.
+ // Does not change the allocator's state.
+ void ensureDoubleRegister(MacroAssembler& masm, NumberOperandId op,
+ FloatRegister dest) const;
+
+ // Loads an unboxed value into a scratch register. This can be useful
+ // especially on 32-bit x86 when there are not enough registers for
+ // useRegister.
+ // Does not change the allocator's state.
+ void copyToScratchRegister(MacroAssembler& masm, TypedOperandId typedId,
+ Register dest) const;
+ void copyToScratchValueRegister(MacroAssembler& masm, ValOperandId valId,
+ ValueOperand dest) const;
+
+ // Returns |val|'s JSValueType or JSVAL_TYPE_UNKNOWN.
+ JSValueType knownType(ValOperandId val) const;
+
+ // Emits code to restore registers and stack to the state at the start of
+ // the stub.
+ void restoreInputState(MacroAssembler& masm, bool discardStack = true);
+
+ // Returns the set of registers storing the IC input operands.
+ GeneralRegisterSet inputRegisterSet() const;
+
+ void saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
+ Register scratch, IonScript* ionScript);
+ void restoreIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs);
+};
+
+// RAII class to allocate a scratch register and release it when we're done
+// with it.
+class MOZ_RAII AutoScratchRegister {
+ CacheRegisterAllocator& alloc_;
+ Register reg_;
+
+ AutoScratchRegister(const AutoScratchRegister&) = delete;
+ void operator=(const AutoScratchRegister&) = delete;
+
+ public:
+ AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
+ Register reg = InvalidReg)
+ : alloc_(alloc) {
+ if (reg != InvalidReg) {
+ alloc.allocateFixedRegister(masm, reg);
+ reg_ = reg;
+ } else {
+ reg_ = alloc.allocateRegister(masm);
+ }
+ MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
+ }
+ ~AutoScratchRegister() { alloc_.releaseRegister(reg_); }
+
+ Register get() const { return reg_; }
+ operator Register() const { return reg_; }
+};
+
+// On x86, spectreBoundsCheck32 can emit better code if it has a scratch
+// register and index masking is enabled.
+class MOZ_RAII AutoSpectreBoundsScratchRegister {
+ mozilla::Maybe<AutoScratchRegister> scratch_;
+ Register reg_ = InvalidReg;
+
+ AutoSpectreBoundsScratchRegister(const AutoSpectreBoundsScratchRegister&) =
+ delete;
+ void operator=(const AutoSpectreBoundsScratchRegister&) = delete;
+
+ public:
+ AutoSpectreBoundsScratchRegister(CacheRegisterAllocator& alloc,
+ MacroAssembler& masm) {
+#ifdef JS_CODEGEN_X86
+ if (JitOptions.spectreIndexMasking) {
+ scratch_.emplace(alloc, masm);
+ reg_ = scratch_->get();
+ }
+#endif
+ }
+
+ Register get() const { return reg_; }
+ operator Register() const { return reg_; }
+};
+
+// Scratch Register64. Implemented with a single AutoScratchRegister on 64-bit
+// platforms and two AutoScratchRegisters on 32-bit platforms.
+class MOZ_RAII AutoScratchRegister64 {
+ AutoScratchRegister reg1_;
+#if JS_BITS_PER_WORD == 32
+ AutoScratchRegister reg2_;
+#endif
+
+ public:
+ AutoScratchRegister64(const AutoScratchRegister64&) = delete;
+ void operator=(const AutoScratchRegister64&) = delete;
+
+#if JS_BITS_PER_WORD == 32
+ AutoScratchRegister64(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+ : reg1_(alloc, masm), reg2_(alloc, masm) {}
+
+ Register64 get() const { return Register64(reg1_, reg2_); }
+#else
+ AutoScratchRegister64(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+ : reg1_(alloc, masm) {}
+
+ Register64 get() const { return Register64(reg1_); }
+#endif
+
+ operator Register64() const { return get(); }
+};
+
+// Scratch ValueOperand. Implemented with a single AutoScratchRegister on 64-bit
+// platforms and two AutoScratchRegisters on 32-bit platforms.
+class MOZ_RAII AutoScratchValueRegister {
+ AutoScratchRegister reg1_;
+#if JS_BITS_PER_WORD == 32
+ AutoScratchRegister reg2_;
+#endif
+
+ public:
+ AutoScratchValueRegister(const AutoScratchValueRegister&) = delete;
+ void operator=(const AutoScratchValueRegister&) = delete;
+
+#if JS_BITS_PER_WORD == 32
+ AutoScratchValueRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+ : reg1_(alloc, masm), reg2_(alloc, masm) {}
+
+ ValueOperand get() const { return ValueOperand(reg1_, reg2_); }
+#else
+ AutoScratchValueRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+ : reg1_(alloc, masm) {}
+
+ ValueOperand get() const { return ValueOperand(reg1_); }
+#endif
+
+ operator ValueOperand() const { return get(); }
+};
+
+// The FailurePath class stores everything we need to generate a failure path
+// at the end of the IC code. The failure path restores the input registers, if
+// needed, and jumps to the next stub.
+class FailurePath {
+ Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
+ SpilledRegisterVector spilledRegs_;
+ NonAssertingLabel label_;
+ uint32_t stackPushed_;
+#ifdef DEBUG
+ // Flag to ensure FailurePath::label() isn't taken while there's a scratch
+ // float register which still needs to be restored.
+ bool hasAutoScratchFloatRegister_ = false;
+#endif
+
+ public:
+ FailurePath() = default;
+
+ FailurePath(FailurePath&& other)
+ : inputs_(std::move(other.inputs_)),
+ spilledRegs_(std::move(other.spilledRegs_)),
+ label_(other.label_),
+ stackPushed_(other.stackPushed_) {}
+
+ Label* labelUnchecked() { return &label_; }
+ Label* label() {
+ MOZ_ASSERT(!hasAutoScratchFloatRegister_);
+ return labelUnchecked();
+ }
+
+ void setStackPushed(uint32_t i) { stackPushed_ = i; }
+ uint32_t stackPushed() const { return stackPushed_; }
+
+ [[nodiscard]] bool appendInput(const OperandLocation& loc) {
+ return inputs_.append(loc);
+ }
+ OperandLocation input(size_t i) const { return inputs_[i]; }
+
+ const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
+
+ [[nodiscard]] bool setSpilledRegs(const SpilledRegisterVector& regs) {
+ MOZ_ASSERT(spilledRegs_.empty());
+ return spilledRegs_.appendAll(regs);
+ }
+
+ // If canShareFailurePath(other) returns true, the same machine code will
+ // be emitted for two failure paths, so we can share them.
+ bool canShareFailurePath(const FailurePath& other) const;
+
+ void setHasAutoScratchFloatRegister() {
+#ifdef DEBUG
+ MOZ_ASSERT(!hasAutoScratchFloatRegister_);
+ hasAutoScratchFloatRegister_ = true;
+#endif
+ }
+
+ void clearHasAutoScratchFloatRegister() {
+#ifdef DEBUG
+ MOZ_ASSERT(hasAutoScratchFloatRegister_);
+ hasAutoScratchFloatRegister_ = false;
+#endif
+ }
+};
+
+/**
+ * Wrap an offset so that a call can decide to embed a constant
+ * or load from the stub data.
+ */
+class StubFieldOffset {
+ private:
+ uint32_t offset_;
+ StubField::Type type_;
+
+ public:
+ StubFieldOffset(uint32_t offset, StubField::Type type)
+ : offset_(offset), type_(type) {}
+
+ uint32_t getOffset() { return offset_; }
+ StubField::Type getStubFieldType() { return type_; }
+};
+
+class AutoOutputRegister;
+
+// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
+class MOZ_RAII CacheIRCompiler {
+ protected:
+ friend class AutoOutputRegister;
+ friend class AutoStubFrame;
+ friend class AutoSaveLiveRegisters;
+ friend class AutoCallVM;
+ friend class AutoScratchFloatRegister;
+ friend class AutoAvailableFloatRegister;
+
+ enum class Mode { Baseline, Ion };
+
+ bool enteredStubFrame_;
+
+ bool isBaseline();
+ bool isIon();
+ BaselineCacheIRCompiler* asBaseline();
+ IonCacheIRCompiler* asIon();
+
+ JSContext* cx_;
+ const CacheIRWriter& writer_;
+ StackMacroAssembler masm;
+
+ CacheRegisterAllocator allocator;
+ Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
+
+ // Float registers that are live. Registers not in this set can be
+ // clobbered and don't need to be saved before performing a VM call.
+ // Doing this for non-float registers is a bit more complicated because
+ // the IC register allocator allocates GPRs.
+ LiveFloatRegisterSet liveFloatRegs_;
+
+ mozilla::Maybe<TypedOrValueRegister> outputUnchecked_;
+ Mode mode_;
+
+ // Distance from the IC to the stub data; mostly will be
+ // sizeof(stubType)
+ uint32_t stubDataOffset_;
+
+ enum class StubFieldPolicy { Address, Constant };
+
+ StubFieldPolicy stubFieldPolicy_;
+
+ CacheIRCompiler(JSContext* cx, TempAllocator& alloc,
+ const CacheIRWriter& writer, uint32_t stubDataOffset,
+ Mode mode, StubFieldPolicy policy)
+ : enteredStubFrame_(false),
+ cx_(cx),
+ writer_(writer),
+ masm(cx, alloc),
+ allocator(writer_),
+ liveFloatRegs_(FloatRegisterSet::All()),
+ mode_(mode),
+ stubDataOffset_(stubDataOffset),
+ stubFieldPolicy_(policy) {
+ MOZ_ASSERT(!writer.failed());
+ }
+
+ [[nodiscard]] bool addFailurePath(FailurePath** failure);
+ [[nodiscard]] bool emitFailurePath(size_t i);
+
+ // Returns the set of volatile float registers that are live. These
+ // registers need to be saved when making non-GC calls with callWithABI.
+ FloatRegisterSet liveVolatileFloatRegs() const {
+ return FloatRegisterSet::Intersect(liveFloatRegs_.set(),
+ FloatRegisterSet::Volatile());
+ }
+
+ bool objectGuardNeedsSpectreMitigations(ObjOperandId objId) const {
+ // Instructions like GuardShape need Spectre mitigations if
+ // (1) mitigations are enabled and (2) the object is used by other
+ // instructions (if the object is *not* used by other instructions,
+ // zeroing its register is pointless).
+ return JitOptions.spectreObjectMitigations &&
+ !allocator.isDeadAfterInstruction(objId);
+ }
+
+ private:
+ void emitPostBarrierShared(Register obj, const ConstantOrRegister& val,
+ Register scratch, Register maybeIndex);
+
+ void emitPostBarrierShared(Register obj, ValueOperand val, Register scratch,
+ Register maybeIndex) {
+ emitPostBarrierShared(obj, ConstantOrRegister(val), scratch, maybeIndex);
+ }
+
+ protected:
+ template <typename T>
+ void emitPostBarrierSlot(Register obj, const T& val, Register scratch) {
+ emitPostBarrierShared(obj, val, scratch, InvalidReg);
+ }
+
+ template <typename T>
+ void emitPostBarrierElement(Register obj, const T& val, Register scratch,
+ Register index) {
+ MOZ_ASSERT(index != InvalidReg);
+ emitPostBarrierShared(obj, val, scratch, index);
+ }
+
+ bool emitComparePointerResultShared(JSOp op, TypedOperandId lhsId,
+ TypedOperandId rhsId);
+
+ [[nodiscard]] bool emitMathFunctionNumberResultShared(
+ UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output);
+
+ template <typename Fn, Fn fn>
+ [[nodiscard]] bool emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
+ BigIntOperandId rhsId);
+
+ template <typename Fn, Fn fn>
+ [[nodiscard]] bool emitBigIntUnaryOperationShared(BigIntOperandId inputId);
+
+ bool emitDoubleIncDecResult(bool isInc, NumberOperandId inputId);
+
+ using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
+
+ [[nodiscard]] bool emitAtomicsReadModifyWriteResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, AtomicsReadWriteModifyFn fn);
+
+ using AtomicsReadWriteModify64Fn = JS::BigInt* (*)(JSContext*,
+ TypedArrayObject*, size_t,
+ const JS::BigInt*);
+
+ template <AtomicsReadWriteModify64Fn fn>
+ [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId);
+
+ void emitActivateIterator(Register objBeingIterated, Register iterObject,
+ Register nativeIter, Register scratch,
+ Register scratch2, uint32_t enumeratorsAddrOffset);
+
+ CACHE_IR_COMPILER_SHARED_GENERATED
+
+ void emitLoadStubField(StubFieldOffset val, Register dest);
+ void emitLoadStubFieldConstant(StubFieldOffset val, Register dest);
+
+ void emitLoadValueStubField(StubFieldOffset val, ValueOperand dest);
+ void emitLoadDoubleValueStubField(StubFieldOffset val, ValueOperand dest,
+ FloatRegister scratch);
+
+ uintptr_t readStubWord(uint32_t offset, StubField::Type type) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
+ return writer_.readStubField(offset, type).asWord();
+ }
+ uint64_t readStubInt64(uint32_t offset, StubField::Type type) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
+ return writer_.readStubField(offset, type).asInt64();
+ }
+ int32_t int32StubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return readStubWord(offset, StubField::Type::RawInt32);
+ }
+ uint32_t uint32StubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return readStubWord(offset, StubField::Type::RawInt32);
+ }
+ Shape* shapeStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (Shape*)readStubWord(offset, StubField::Type::Shape);
+ }
+ GetterSetter* getterSetterStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (GetterSetter*)readStubWord(offset, StubField::Type::GetterSetter);
+ }
+ JSObject* objectStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (JSObject*)readStubWord(offset, StubField::Type::JSObject);
+ }
+ Value valueStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ uint64_t raw = readStubInt64(offset, StubField::Type::Value);
+ return Value::fromRawBits(raw);
+ }
+ double doubleStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ uint64_t raw = readStubInt64(offset, StubField::Type::Double);
+ return mozilla::BitwiseCast<double>(raw);
+ }
+ JSString* stringStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (JSString*)readStubWord(offset, StubField::Type::String);
+ }
+ JS::Symbol* symbolStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (JS::Symbol*)readStubWord(offset, StubField::Type::Symbol);
+ }
+ JS::Compartment* compartmentStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (JS::Compartment*)readStubWord(offset, StubField::Type::RawPointer);
+ }
+ BaseScript* baseScriptStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (BaseScript*)readStubWord(offset, StubField::Type::BaseScript);
+ }
+ const JSClass* classStubField(uintptr_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (const JSClass*)readStubWord(offset, StubField::Type::RawPointer);
+ }
+ const void* proxyHandlerStubField(uintptr_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (const void*)readStubWord(offset, StubField::Type::RawPointer);
+ }
+ const void* pointerStubField(uintptr_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return (const void*)readStubWord(offset, StubField::Type::RawPointer);
+ }
+ jsid idStubField(uint32_t offset) {
+ MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
+ return jsid::fromRawBits(readStubWord(offset, StubField::Type::Id));
+ }
+
+#ifdef DEBUG
+ void assertFloatRegisterAvailable(FloatRegister reg);
+#endif
+
+ void callVMInternal(MacroAssembler& masm, VMFunctionId id);
+ template <typename Fn, Fn fn>
+ void callVM(MacroAssembler& masm);
+};
+
+// Ensures the IC's output register is available for writing.
+class MOZ_RAII AutoOutputRegister {
+ TypedOrValueRegister output_;
+ CacheRegisterAllocator& alloc_;
+
+ AutoOutputRegister(const AutoOutputRegister&) = delete;
+ void operator=(const AutoOutputRegister&) = delete;
+
+ public:
+ explicit AutoOutputRegister(CacheIRCompiler& compiler);
+ ~AutoOutputRegister();
+
+ Register maybeReg() const {
+ if (output_.hasValue()) {
+ return output_.valueReg().scratchReg();
+ }
+ if (!output_.typedReg().isFloat()) {
+ return output_.typedReg().gpr();
+ }
+ return InvalidReg;
+ }
+
+ bool hasValue() const { return output_.hasValue(); }
+ ValueOperand valueReg() const { return output_.valueReg(); }
+ AnyRegister typedReg() const { return output_.typedReg(); }
+
+ JSValueType type() const {
+ MOZ_ASSERT(!hasValue());
+ return ValueTypeFromMIRType(output_.type());
+ }
+
+ operator TypedOrValueRegister() const { return output_; }
+};
+
+enum class CallCanGC { CanGC, CanNotGC };
+
+// Instructions that have to perform a callVM require a stub frame. Call its
+// enter() and leave() methods to enter/leave the stub frame.
+// Hoisted from jit/BaselineCacheIRCompiler.cpp. See there for method
+// definitions.
+class MOZ_RAII AutoStubFrame {
+ BaselineCacheIRCompiler& compiler;
+#ifdef DEBUG
+ uint32_t framePushedAtEnterStubFrame_;
+#endif
+
+ AutoStubFrame(const AutoStubFrame&) = delete;
+ void operator=(const AutoStubFrame&) = delete;
+
+ public:
+ explicit AutoStubFrame(BaselineCacheIRCompiler& compiler);
+
+ void enter(MacroAssembler& masm, Register scratch,
+ CallCanGC canGC = CallCanGC::CanGC);
+ void leave(MacroAssembler& masm);
+
+#ifdef DEBUG
+ ~AutoStubFrame();
+#endif
+};
+// AutoSaveLiveRegisters must be used when we make a call that can GC. The
+// constructor ensures all live registers are stored on the stack (where the GC
+// expects them) and the destructor restores these registers.
+class MOZ_RAII AutoSaveLiveRegisters {
+ IonCacheIRCompiler& compiler_;
+
+ AutoSaveLiveRegisters(const AutoSaveLiveRegisters&) = delete;
+ void operator=(const AutoSaveLiveRegisters&) = delete;
+
+ public:
+ explicit AutoSaveLiveRegisters(IonCacheIRCompiler& compiler);
+
+ ~AutoSaveLiveRegisters();
+};
+// Like AutoScratchRegister, but reuse a register of |output| if possible.
+class MOZ_RAII AutoScratchRegisterMaybeOutput {
+ mozilla::Maybe<AutoScratchRegister> scratch_;
+ Register scratchReg_;
+
+ AutoScratchRegisterMaybeOutput(const AutoScratchRegisterMaybeOutput&) =
+ delete;
+ void operator=(const AutoScratchRegisterMaybeOutput&) = delete;
+
+ public:
+ AutoScratchRegisterMaybeOutput(CacheRegisterAllocator& alloc,
+ MacroAssembler& masm,
+ const AutoOutputRegister& output) {
+ scratchReg_ = output.maybeReg();
+ if (scratchReg_ == InvalidReg) {
+ scratch_.emplace(alloc, masm);
+ scratchReg_ = scratch_.ref();
+ }
+ }
+ AutoScratchRegisterMaybeOutput(CacheRegisterAllocator& alloc,
+ MacroAssembler& masm) {
+ scratch_.emplace(alloc, masm);
+ scratchReg_ = scratch_.ref();
+ }
+
+ Register get() const { return scratchReg_; }
+ operator Register() const { return scratchReg_; }
+};
+
+// Like AutoScratchRegisterMaybeOutput, but tries to use the ValueOperand's
+// type register for the scratch register on 32-bit.
+//
+// Word of warning: Passing an instance of this class and AutoOutputRegister to
+// functions may not work correctly, because no guarantee is given that the type
+// register is used last when modifying the output's ValueOperand.
+class MOZ_RAII AutoScratchRegisterMaybeOutputType {
+ mozilla::Maybe<AutoScratchRegister> scratch_;
+ Register scratchReg_;
+
+ public:
+ AutoScratchRegisterMaybeOutputType(CacheRegisterAllocator& alloc,
+ MacroAssembler& masm,
+ const AutoOutputRegister& output) {
+#if defined(JS_NUNBOX32)
+ scratchReg_ = output.hasValue() ? output.valueReg().typeReg() : InvalidReg;
+#else
+ scratchReg_ = InvalidReg;
+#endif
+ if (scratchReg_ == InvalidReg) {
+ scratch_.emplace(alloc, masm);
+ scratchReg_ = scratch_.ref();
+ }
+ }
+
+ AutoScratchRegisterMaybeOutputType(
+ const AutoScratchRegisterMaybeOutputType&) = delete;
+
+ void operator=(const AutoScratchRegisterMaybeOutputType&) = delete;
+
+ Register get() const { return scratchReg_; }
+ operator Register() const { return scratchReg_; }
+};
+
+// AutoCallVM is a wrapper class that unifies methods shared by
+// IonCacheIRCompiler and BaselineCacheIRCompiler that perform a callVM, but
+// require stub specific functionality before performing the VM call.
+//
+// Expected Usage:
+//
+// OPs with implementations that may be unified by this class must:
+// - Be listed in the CACHEIR_OPS list but not in the CACHE_IR_SHARED_OPS
+// list
+// - Differ only in their use of `AutoSaveLiveRegisters`,
+// `AutoOutputRegister`, and `AutoScratchRegister`. The Ion
+// implementation will use `AutoSaveLiveRegisters` and
+// `AutoOutputRegister`, while the Baseline implementation will use
+// `AutoScratchRegister`.
+// - Both use the `callVM` method.
+//
+// Using AutoCallVM:
+// - The constructor initializes `AutoOutputRegister` for both compiler
+// types. Additionally it initializes an `AutoSaveLiveRegisters` for
+// CacheIRCompilers with the mode Ion, and initializes
+// `AutoScratchRegisterMaybeOutput` and `AutoStubFrame` variables for
+// compilers with mode Baseline.
+// - The `prepare()` method calls the IonCacheIRCompiler method
+// `prepareVMCall` for IonCacheIRCompilers, calls the `enter()` method of
+// `AutoStubFrame` for BaselineCacheIRCompilers, and calls the
+// `discardStack` method of the `Register` class for both compiler types.
+// - The `call()` method invokes `callVM` on the CacheIRCompiler and stores
+// the call result according to its type. Finally it calls the `leave`
+// method of `AutoStubFrame` for BaselineCacheIRCompilers.
+//
+// Expected Usage Example:
+// See: `CacheIRCompiler::emitCallGetSparseElementResult()`
+//
+// Restrictions:
+// - OPs that do not meet the criteria listed above can not be unified with
+// AutoCallVM
+//
+
+class MOZ_RAII AutoCallVM {
+ MacroAssembler& masm_;
+ CacheIRCompiler* compiler_;
+ CacheRegisterAllocator& allocator_;
+ mozilla::Maybe<AutoOutputRegister> output_;
+
+ // Baseline specific stuff
+ mozilla::Maybe<AutoStubFrame> stubFrame_;
+ mozilla::Maybe<AutoScratchRegisterMaybeOutput> scratch_;
+
+ // Ion specific stuff
+ mozilla::Maybe<AutoSaveLiveRegisters> save_;
+
+ void storeResult(JSValueType returnType);
+
+ template <typename Fn>
+ void storeResult();
+
+ void leaveBaselineStubFrame();
+
+ public:
+ AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
+ CacheRegisterAllocator& allocator);
+
+ void prepare();
+
+ template <typename Fn, Fn fn>
+ void call() {
+ compiler_->callVM<Fn, fn>(masm_);
+ storeResult<Fn>();
+ leaveBaselineStubFrame();
+ }
+
+ template <typename Fn, Fn fn>
+ void callNoResult() {
+ compiler_->callVM<Fn, fn>(masm_);
+ leaveBaselineStubFrame();
+ }
+
+ const AutoOutputRegister& output() const { return *output_; }
+ ValueOperand outputValueReg() const { return output_->valueReg(); }
+};
+
+// RAII class to allocate FloatReg0 as a scratch register and release it when
+// we're done with it. The previous contents of FloatReg0 may be spilled on the
+// stack and, if necessary, are restored when the destructor runs.
+//
+// When FailurePath is passed to the constructor, FailurePath::label() must not
+// be used during the life time of the AutoScratchFloatRegister. Instead use
+// AutoScratchFloatRegister::failure().
+class MOZ_RAII AutoScratchFloatRegister {
+ Label failurePopReg_{};
+ CacheIRCompiler* compiler_;
+ FailurePath* failure_;
+
+ AutoScratchFloatRegister(const AutoScratchFloatRegister&) = delete;
+ void operator=(const AutoScratchFloatRegister&) = delete;
+
+ public:
+ explicit AutoScratchFloatRegister(CacheIRCompiler* compiler)
+ : AutoScratchFloatRegister(compiler, nullptr) {}
+
+ AutoScratchFloatRegister(CacheIRCompiler* compiler, FailurePath* failure);
+
+ ~AutoScratchFloatRegister();
+
+ Label* failure();
+
+ FloatRegister get() const { return FloatReg0; }
+ operator FloatRegister() const { return FloatReg0; }
+};
+
+// This class can be used to assert a certain FloatRegister is available. In
+// Baseline mode, all float registers are available. In Ion mode, only the
+// registers added as fixed temps in LIRGenerator are available.
+class MOZ_RAII AutoAvailableFloatRegister {
+ FloatRegister reg_;
+
+ AutoAvailableFloatRegister(const AutoAvailableFloatRegister&) = delete;
+ void operator=(const AutoAvailableFloatRegister&) = delete;
+
+ public:
+ explicit AutoAvailableFloatRegister(CacheIRCompiler& compiler,
+ FloatRegister reg)
+ : reg_(reg) {
+#ifdef DEBUG
+ compiler.assertFloatRegisterAvailable(reg);
+#endif
+ }
+
+ FloatRegister get() const { return reg_; }
+ operator FloatRegister() const { return reg_; }
+};
+
+// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
+// of this class.
+//
+// CacheIRStubInfo has a trailing variable-length array of bytes. The memory
+// layout is as follows:
+//
+// Item | Offset
+// -----------------+--------------------------------------
+// CacheIRStubInfo | 0
+// CacheIR bytecode | sizeof(CacheIRStubInfo)
+// Stub field types | sizeof(CacheIRStubInfo) + codeLength_
+//
+// The array of stub field types is terminated by StubField::Type::Limit.
+class CacheIRStubInfo {
+ uint32_t codeLength_;
+ CacheKind kind_;
+ ICStubEngine engine_;
+ uint8_t stubDataOffset_;
+ bool makesGCCalls_;
+
+ CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
+ uint32_t stubDataOffset, uint32_t codeLength)
+ : codeLength_(codeLength),
+ kind_(kind),
+ engine_(engine),
+ stubDataOffset_(stubDataOffset),
+ makesGCCalls_(makesGCCalls) {
+ MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
+ MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
+ MOZ_ASSERT(stubDataOffset_ == stubDataOffset,
+ "stubDataOffset must fit in uint8_t");
+ }
+
+ CacheIRStubInfo(const CacheIRStubInfo&) = delete;
+ CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
+
+ public:
+ CacheKind kind() const { return kind_; }
+ ICStubEngine engine() const { return engine_; }
+ bool makesGCCalls() const { return makesGCCalls_; }
+
+ const uint8_t* code() const {
+ return reinterpret_cast<const uint8_t*>(this) + sizeof(CacheIRStubInfo);
+ }
+ uint32_t codeLength() const { return codeLength_; }
+ uint32_t stubDataOffset() const { return stubDataOffset_; }
+
+ size_t stubDataSize() const;
+
+ StubField::Type fieldType(uint32_t i) const {
+ static_assert(sizeof(StubField::Type) == sizeof(uint8_t));
+ const uint8_t* fieldTypes = code() + codeLength_;
+ return static_cast<StubField::Type>(fieldTypes[i]);
+ }
+
+ static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine,
+ bool canMakeCalls, uint32_t stubDataOffset,
+ const CacheIRWriter& writer);
+
+ template <class Stub, class T>
+ js::GCPtr<T>& getStubField(Stub* stub, uint32_t offset) const;
+
+ template <class Stub, class T>
+ T* getPtrStubField(Stub* stub, uint32_t offset) const;
+
+ template <class T>
+ js::GCPtr<T>& getStubField(ICCacheIRStub* stub, uint32_t offset) const {
+ return getStubField<ICCacheIRStub, T>(stub, offset);
+ }
+
+ uintptr_t getStubRawWord(const uint8_t* stubData, uint32_t offset) const;
+ uintptr_t getStubRawWord(ICCacheIRStub* stub, uint32_t offset) const;
+
+ int64_t getStubRawInt64(const uint8_t* stubData, uint32_t offset) const;
+ int64_t getStubRawInt64(ICCacheIRStub* stub, uint32_t offset) const;
+
+ void replaceStubRawWord(uint8_t* stubData, uint32_t offset, uintptr_t oldWord,
+ uintptr_t newWord) const;
+};
+
+template <typename T>
+void TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIRCompiler_h */
diff --git a/js/src/jit/CacheIRGenerator.h b/js/src/jit/CacheIRGenerator.h
new file mode 100644
index 0000000000..9d1439a434
--- /dev/null
+++ b/js/src/jit/CacheIRGenerator.h
@@ -0,0 +1,912 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRGenerator_h
+#define jit_CacheIRGenerator_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/ICState.h"
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+#include "js/ScalarType.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "js/ValueArray.h"
+#include "vm/Opcodes.h"
+
+class JSFunction;
+
+namespace JS {
+struct XrayJitInfo;
+}
+
+namespace js {
+
+class BoundFunctionObject;
+class NativeObject;
+class PropertyResult;
+class ProxyObject;
+enum class UnaryMathFunction : uint8_t;
+
+namespace jit {
+
+class BaselineFrame;
+class Label;
+class MacroAssembler;
+struct Register;
+enum class InlinableNative : uint16_t;
+
+// Some ops refer to shapes that might be in other zones. Instead of putting
+// cross-zone pointers in the caches themselves (which would complicate tracing
+// enormously), these ops instead contain wrappers for objects in the target
+// zone, which refer to the actual shape via a reserved slot.
+void LoadShapeWrapperContents(MacroAssembler& masm, Register obj, Register dst,
+ Label* failure);
+
+class MOZ_RAII IRGenerator {
+ protected:
+ CacheIRWriter writer;
+ JSContext* cx_;
+ HandleScript script_;
+ jsbytecode* pc_;
+ CacheKind cacheKind_;
+ ICState::Mode mode_;
+ bool isFirstStub_;
+
+ // Important: This pointer may be passed to the profiler. If this is non-null,
+ // it must point to a C string literal with static lifetime, not a heap- or
+ // stack- allocated string.
+ const char* stubName_ = nullptr;
+
+ IRGenerator(const IRGenerator&) = delete;
+ IRGenerator& operator=(const IRGenerator&) = delete;
+
+ bool maybeGuardInt32Index(const Value& index, ValOperandId indexId,
+ uint32_t* int32Index, Int32OperandId* int32IndexId);
+
+ IntPtrOperandId guardToIntPtrIndex(const Value& index, ValOperandId indexId,
+ bool supportOOB);
+
+ ObjOperandId guardDOMProxyExpandoObjectAndShape(ProxyObject* obj,
+ ObjOperandId objId,
+ const Value& expandoVal,
+ NativeObject* expandoObj);
+
+ void emitIdGuard(ValOperandId valId, const Value& idVal, jsid id);
+
+ OperandId emitNumericGuard(ValOperandId valId, Scalar::Type type);
+
+ StringOperandId emitToStringGuard(ValOperandId id, const Value& v);
+
+ void emitCalleeGuard(ObjOperandId calleeId, JSFunction* callee);
+
+ void emitOptimisticClassGuard(ObjOperandId objId, JSObject* obj,
+ GuardClassKind kind);
+
+ friend class CacheIRSpewer;
+
+ public:
+ explicit IRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ CacheKind cacheKind, ICState state);
+
+ const CacheIRWriter& writerRef() const { return writer; }
+ CacheKind cacheKind() const { return cacheKind_; }
+
+ // See comment on `stubName_` above.
+ const char* stubName() const { return stubName_; }
+
+ static constexpr char* NotAttached = nullptr;
+};
+
+// GetPropIRGenerator generates CacheIR for a GetProp IC.
+class MOZ_RAII GetPropIRGenerator : public IRGenerator {
+ HandleValue val_;
+ HandleValue idVal_;
+
+ AttachDecision tryAttachNative(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId receiverId);
+ AttachDecision tryAttachObjectLength(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachTypedArray(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachDataView(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachArrayBufferMaybeShared(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachRegExp(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachMap(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachSet(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachModuleNamespace(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachWindowProxy(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachCrossCompartmentWrapper(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachXrayCrossCompartmentWrapper(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id,
+ ValOperandId receiverId);
+ AttachDecision tryAttachFunction(HandleObject obj, ObjOperandId objId,
+ HandleId id);
+ AttachDecision tryAttachArgumentsObjectIterator(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id);
+
+ AttachDecision tryAttachGenericProxy(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ bool handleDOMProxies);
+ AttachDecision tryAttachDOMProxyExpando(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId receiverId);
+ AttachDecision tryAttachDOMProxyShadowed(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id);
+ AttachDecision tryAttachDOMProxyUnshadowed(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId receiverId);
+ AttachDecision tryAttachProxy(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId receiverId);
+
+ AttachDecision tryAttachPrimitive(ValOperandId valId, HandleId id);
+ AttachDecision tryAttachStringChar(ValOperandId valId, ValOperandId indexId);
+ AttachDecision tryAttachStringLength(ValOperandId valId, HandleId id);
+
+ AttachDecision tryAttachArgumentsObjectArg(HandleObject obj,
+ ObjOperandId objId, uint32_t index,
+ Int32OperandId indexId);
+ AttachDecision tryAttachArgumentsObjectArgHole(HandleObject obj,
+ ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId);
+ AttachDecision tryAttachArgumentsObjectCallee(HandleObject obj,
+ ObjOperandId objId,
+ HandleId id);
+
+ AttachDecision tryAttachDenseElement(HandleObject obj, ObjOperandId objId,
+ uint32_t index, Int32OperandId indexId);
+ AttachDecision tryAttachDenseElementHole(HandleObject obj, ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId);
+ AttachDecision tryAttachSparseElement(HandleObject obj, ObjOperandId objId,
+ uint32_t index, Int32OperandId indexId);
+ AttachDecision tryAttachTypedArrayElement(HandleObject obj,
+ ObjOperandId objId);
+
+ AttachDecision tryAttachGenericElement(HandleObject obj, ObjOperandId objId,
+ uint32_t index, Int32OperandId indexId,
+ ValOperandId receiverId);
+
+ AttachDecision tryAttachProxyElement(HandleObject obj, ObjOperandId objId);
+
+ void attachMegamorphicNativeSlot(ObjOperandId objId, jsid id);
+
+ ValOperandId getElemKeyValueId() const {
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElem ||
+ cacheKind_ == CacheKind::GetElemSuper);
+ return ValOperandId(1);
+ }
+
+ ValOperandId getSuperReceiverValueId() const {
+ if (cacheKind_ == CacheKind::GetPropSuper) {
+ return ValOperandId(1);
+ }
+
+ MOZ_ASSERT(cacheKind_ == CacheKind::GetElemSuper);
+ return ValOperandId(2);
+ }
+
+ bool isSuper() const {
+ return (cacheKind_ == CacheKind::GetPropSuper ||
+ cacheKind_ == CacheKind::GetElemSuper);
+ }
+
+ // If this is a GetElem cache, emit instructions to guard the incoming Value
+ // matches |id|.
+ void maybeEmitIdGuard(jsid id);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ GetPropIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICState state, CacheKind cacheKind, HandleValue val,
+ HandleValue idVal);
+
+ AttachDecision tryAttachStub();
+};
+
+// GetNameIRGenerator generates CacheIR for a GetName IC.
+class MOZ_RAII GetNameIRGenerator : public IRGenerator {
+ HandleObject env_;
+ Handle<PropertyName*> name_;
+
+ AttachDecision tryAttachGlobalNameValue(ObjOperandId objId, HandleId id);
+ AttachDecision tryAttachGlobalNameGetter(ObjOperandId objId, HandleId id);
+ AttachDecision tryAttachEnvironmentName(ObjOperandId objId, HandleId id);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ GetNameIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICState state, HandleObject env,
+ Handle<PropertyName*> name);
+
+ AttachDecision tryAttachStub();
+};
+
+// BindNameIRGenerator generates CacheIR for a BindName IC.
+class MOZ_RAII BindNameIRGenerator : public IRGenerator {
+ HandleObject env_;
+ Handle<PropertyName*> name_;
+
+ AttachDecision tryAttachGlobalName(ObjOperandId objId, HandleId id);
+ AttachDecision tryAttachEnvironmentName(ObjOperandId objId, HandleId id);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ BindNameIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICState state, HandleObject env,
+ Handle<PropertyName*> name);
+
+ AttachDecision tryAttachStub();
+};
+
+// SetPropIRGenerator generates CacheIR for a SetProp IC.
+class MOZ_RAII SetPropIRGenerator : public IRGenerator {
+ HandleValue lhsVal_;
+ HandleValue idVal_;
+ HandleValue rhsVal_;
+
+ public:
+ enum class DeferType { None, AddSlot };
+
+ private:
+ DeferType deferType_ = DeferType::None;
+
+ ValOperandId setElemKeyValueId() const {
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ return ValOperandId(1);
+ }
+
+ ValOperandId rhsValueId() const {
+ if (cacheKind_ == CacheKind::SetProp) {
+ return ValOperandId(1);
+ }
+ MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
+ return ValOperandId(2);
+ }
+
+ // If this is a SetElem cache, emit instructions to guard the incoming Value
+ // matches |id|.
+ void maybeEmitIdGuard(jsid id);
+
+ AttachDecision tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId rhsId);
+ AttachDecision tryAttachMegamorphicSetSlot(HandleObject obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId rhsId);
+ AttachDecision tryAttachSetter(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId rhsId);
+ AttachDecision tryAttachSetArrayLength(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId rhsId);
+ AttachDecision tryAttachWindowProxy(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId rhsId);
+
+ AttachDecision tryAttachSetDenseElement(HandleObject obj, ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId,
+ ValOperandId rhsId);
+ AttachDecision tryAttachSetTypedArrayElement(HandleObject obj,
+ ObjOperandId objId,
+ ValOperandId rhsId);
+
+ AttachDecision tryAttachSetDenseElementHole(HandleObject obj,
+ ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId,
+ ValOperandId rhsId);
+
+ AttachDecision tryAttachAddOrUpdateSparseElement(HandleObject obj,
+ ObjOperandId objId,
+ uint32_t index,
+ Int32OperandId indexId,
+ ValOperandId rhsId);
+
+ AttachDecision tryAttachGenericProxy(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId rhsId,
+ bool handleDOMProxies);
+ AttachDecision tryAttachDOMProxyShadowed(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId rhsId);
+ AttachDecision tryAttachDOMProxyUnshadowed(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId rhsId);
+ AttachDecision tryAttachDOMProxyExpando(Handle<ProxyObject*> obj,
+ ObjOperandId objId, HandleId id,
+ ValOperandId rhsId);
+ AttachDecision tryAttachProxy(HandleObject obj, ObjOperandId objId,
+ HandleId id, ValOperandId rhsId);
+ AttachDecision tryAttachProxyElement(HandleObject obj, ObjOperandId objId,
+ ValOperandId rhsId);
+ AttachDecision tryAttachMegamorphicSetElement(HandleObject obj,
+ ObjOperandId objId,
+ ValOperandId rhsId);
+
+ bool canAttachAddSlotStub(HandleObject obj, HandleId id);
+
+ public:
+ SetPropIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ CacheKind cacheKind, ICState state, HandleValue lhsVal,
+ HandleValue idVal, HandleValue rhsVal);
+
+ AttachDecision tryAttachStub();
+ AttachDecision tryAttachAddSlotStub(Handle<Shape*> oldShape);
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ DeferType deferType() const { return deferType_; }
+};
+
+// HasPropIRGenerator generates CacheIR for a HasProp IC. Used for
+// CacheKind::In / CacheKind::HasOwn.
+class MOZ_RAII HasPropIRGenerator : public IRGenerator {
+ HandleValue val_;
+ HandleValue idVal_;
+
+ AttachDecision tryAttachDense(HandleObject obj, ObjOperandId objId,
+ uint32_t index, Int32OperandId indexId);
+ AttachDecision tryAttachDenseHole(HandleObject obj, ObjOperandId objId,
+ uint32_t index, Int32OperandId indexId);
+ AttachDecision tryAttachTypedArray(HandleObject obj, ObjOperandId objId,
+ ValOperandId keyId);
+ AttachDecision tryAttachSparse(HandleObject obj, ObjOperandId objId,
+ Int32OperandId indexId);
+ AttachDecision tryAttachArgumentsObjectArg(HandleObject obj,
+ ObjOperandId objId,
+ Int32OperandId indexId);
+ AttachDecision tryAttachNamedProp(HandleObject obj, ObjOperandId objId,
+ HandleId key, ValOperandId keyId);
+ AttachDecision tryAttachMegamorphic(ObjOperandId objId, ValOperandId keyId);
+ AttachDecision tryAttachNative(NativeObject* obj, ObjOperandId objId,
+ jsid key, ValOperandId keyId,
+ PropertyResult prop, NativeObject* holder);
+ AttachDecision tryAttachSlotDoesNotExist(NativeObject* obj,
+ ObjOperandId objId, jsid key,
+ ValOperandId keyId);
+ AttachDecision tryAttachDoesNotExist(HandleObject obj, ObjOperandId objId,
+ HandleId key, ValOperandId keyId);
+ AttachDecision tryAttachProxyElement(HandleObject obj, ObjOperandId objId,
+ ValOperandId keyId);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ // NOTE: Argument order is PROPERTY, OBJECT
+ HasPropIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+ ICState state, CacheKind cacheKind, HandleValue idVal,
+ HandleValue val);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII CheckPrivateFieldIRGenerator : public IRGenerator {
+ HandleValue val_;
+ HandleValue idVal_;
+
+ AttachDecision tryAttachNative(NativeObject* obj, ObjOperandId objId,
+ jsid key, ValOperandId keyId,
+ PropertyResult prop);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ CheckPrivateFieldIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ CacheKind cacheKind, HandleValue idVal,
+ HandleValue val);
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII InstanceOfIRGenerator : public IRGenerator {
+ HandleValue lhsVal_;
+ HandleObject rhsObj_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ InstanceOfIRGenerator(JSContext*, HandleScript, jsbytecode*, ICState,
+ HandleValue, HandleObject);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII TypeOfIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ AttachDecision tryAttachPrimitive(ValOperandId valId);
+ AttachDecision tryAttachObject(ValOperandId valId);
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ TypeOfIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc, ICState state,
+ HandleValue value);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII GetIteratorIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ AttachDecision tryAttachObject(ValOperandId valId);
+ AttachDecision tryAttachNullOrUndefined(ValOperandId valId);
+ AttachDecision tryAttachGeneric(ValOperandId valId);
+
+ public:
+ GetIteratorIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, HandleValue value);
+
+ AttachDecision tryAttachStub();
+
+ void trackAttached(const char* name /* must be a C string literal */);
+};
+
+class MOZ_RAII OptimizeSpreadCallIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ AttachDecision tryAttachArray();
+ AttachDecision tryAttachArguments();
+ AttachDecision tryAttachNotOptimizable();
+
+ public:
+ OptimizeSpreadCallIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue value);
+
+ AttachDecision tryAttachStub();
+
+ void trackAttached(const char* name /* must be a C string literal */);
+};
+
+enum class StringChar { CodeAt, At };
+enum class ScriptedThisResult { NoAction, UninitializedThis, PlainObjectShape };
+
+class MOZ_RAII CallIRGenerator : public IRGenerator {
+ private:
+ JSOp op_;
+ uint32_t argc_;
+ HandleValue callee_;
+ HandleValue thisval_;
+ HandleValue newTarget_;
+ HandleValueArray args_;
+
+ friend class InlinableNativeIRGenerator;
+
+ ScriptedThisResult getThisShapeForScripted(HandleFunction calleeFunc,
+ Handle<JSObject*> newTarget,
+ MutableHandle<Shape*> result);
+
+ ObjOperandId emitFunCallOrApplyGuard(Int32OperandId argcId);
+ ObjOperandId emitFunCallGuard(Int32OperandId argcId);
+ ObjOperandId emitFunApplyGuard(Int32OperandId argcId);
+ mozilla::Maybe<ObjOperandId> emitFunApplyArgsGuard(
+ CallFlags::ArgFormat format);
+
+ void emitCallScriptedGuards(ObjOperandId calleeObjId, JSFunction* calleeFunc,
+ Int32OperandId argcId, CallFlags flags,
+ Shape* thisShape, bool isBoundFunction);
+
+ AttachDecision tryAttachFunCall(HandleFunction calleeFunc);
+ AttachDecision tryAttachFunApply(HandleFunction calleeFunc);
+ AttachDecision tryAttachCallScripted(HandleFunction calleeFunc);
+ AttachDecision tryAttachInlinableNative(HandleFunction calleeFunc,
+ CallFlags flags);
+ AttachDecision tryAttachWasmCall(HandleFunction calleeFunc);
+ AttachDecision tryAttachCallNative(HandleFunction calleeFunc);
+ AttachDecision tryAttachCallHook(HandleObject calleeObj);
+ AttachDecision tryAttachBoundFunction(Handle<BoundFunctionObject*> calleeObj);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ CallIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc, JSOp op,
+ ICState state, uint32_t argc, HandleValue callee,
+ HandleValue thisval, HandleValue newTarget,
+ HandleValueArray args);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII InlinableNativeIRGenerator {
+ CallIRGenerator& generator_;
+ CacheIRWriter& writer;
+ JSContext* cx_;
+
+ HandleFunction callee_;
+ HandleValue newTarget_;
+ HandleValue thisval_;
+ HandleValueArray args_;
+ uint32_t argc_;
+ CallFlags flags_;
+
+ HandleScript script() const { return generator_.script_; }
+ bool isFirstStub() const { return generator_.isFirstStub_; }
+ bool ignoresResult() const { return generator_.op_ == JSOp::CallIgnoresRv; }
+
+ void emitNativeCalleeGuard();
+ void emitOptimisticClassGuard(ObjOperandId objId, JSObject* obj,
+ GuardClassKind kind) {
+ generator_.emitOptimisticClassGuard(objId, obj, kind);
+ }
+
+ ObjOperandId emitLoadArgsArray();
+
+ void initializeInputOperand() {
+ // The input operand is already initialized for FunCall and FunApplyArray.
+ if (flags_.getArgFormat() == CallFlags::FunCall ||
+ flags_.getArgFormat() == CallFlags::FunApplyArray) {
+ return;
+ }
+ (void)writer.setInputOperandId(0);
+ }
+
+ auto emitToStringGuard(ValOperandId id, const Value& v) {
+ return generator_.emitToStringGuard(id, v);
+ }
+
+ auto emitNumericGuard(ValOperandId valId, Scalar::Type type) {
+ return generator_.emitNumericGuard(valId, type);
+ }
+
+ auto guardToIntPtrIndex(const Value& index, ValOperandId indexId,
+ bool supportOOB) {
+ return generator_.guardToIntPtrIndex(index, indexId, supportOOB);
+ }
+
+ bool canAttachAtomicsReadWriteModify();
+
+ struct AtomicsReadWriteModifyOperands {
+ ObjOperandId objId;
+ IntPtrOperandId intPtrIndexId;
+ OperandId numericValueId;
+ };
+
+ AtomicsReadWriteModifyOperands emitAtomicsReadWriteModifyOperands();
+
+ AttachDecision tryAttachArrayPush();
+ AttachDecision tryAttachArrayPopShift(InlinableNative native);
+ AttachDecision tryAttachArrayJoin();
+ AttachDecision tryAttachArraySlice();
+ AttachDecision tryAttachArrayIsArray();
+ AttachDecision tryAttachDataViewGet(Scalar::Type type);
+ AttachDecision tryAttachDataViewSet(Scalar::Type type);
+ AttachDecision tryAttachFunctionBind();
+ AttachDecision tryAttachSpecializedFunctionBind(
+ Handle<JSObject*> target, Handle<BoundFunctionObject*> templateObj);
+ AttachDecision tryAttachUnsafeGetReservedSlot(InlinableNative native);
+ AttachDecision tryAttachUnsafeSetReservedSlot();
+ AttachDecision tryAttachIsSuspendedGenerator();
+ AttachDecision tryAttachToObject();
+ AttachDecision tryAttachToInteger();
+ AttachDecision tryAttachToLength();
+ AttachDecision tryAttachIsObject();
+ AttachDecision tryAttachIsPackedArray();
+ AttachDecision tryAttachIsCallable();
+ AttachDecision tryAttachIsConstructor();
+ AttachDecision tryAttachIsCrossRealmArrayConstructor();
+ AttachDecision tryAttachGuardToClass(InlinableNative native);
+ AttachDecision tryAttachHasClass(const JSClass* clasp,
+ bool isPossiblyWrapped);
+ AttachDecision tryAttachRegExpMatcherSearcher(InlinableNative native);
+ AttachDecision tryAttachRegExpPrototypeOptimizable();
+ AttachDecision tryAttachRegExpInstanceOptimizable();
+ AttachDecision tryAttachIntrinsicRegExpBuiltinExec(InlinableNative native);
+ AttachDecision tryAttachIntrinsicRegExpExec(InlinableNative native);
+ AttachDecision tryAttachGetFirstDollarIndex();
+ AttachDecision tryAttachSubstringKernel();
+ AttachDecision tryAttachObjectHasPrototype();
+ AttachDecision tryAttachString();
+ AttachDecision tryAttachStringConstructor();
+ AttachDecision tryAttachStringToStringValueOf();
+ AttachDecision tryAttachStringChar(StringChar kind);
+ AttachDecision tryAttachStringCharCodeAt();
+ AttachDecision tryAttachStringCharAt();
+ AttachDecision tryAttachStringFromCharCode();
+ AttachDecision tryAttachStringFromCodePoint();
+ AttachDecision tryAttachStringIndexOf();
+ AttachDecision tryAttachStringStartsWith();
+ AttachDecision tryAttachStringEndsWith();
+ AttachDecision tryAttachStringToLowerCase();
+ AttachDecision tryAttachStringToUpperCase();
+ AttachDecision tryAttachStringReplaceString();
+ AttachDecision tryAttachStringSplitString();
+ AttachDecision tryAttachMathRandom();
+ AttachDecision tryAttachMathAbs();
+ AttachDecision tryAttachMathClz32();
+ AttachDecision tryAttachMathSign();
+ AttachDecision tryAttachMathImul();
+ AttachDecision tryAttachMathFloor();
+ AttachDecision tryAttachMathCeil();
+ AttachDecision tryAttachMathTrunc();
+ AttachDecision tryAttachMathRound();
+ AttachDecision tryAttachMathSqrt();
+ AttachDecision tryAttachMathFRound();
+ AttachDecision tryAttachMathHypot();
+ AttachDecision tryAttachMathATan2();
+ AttachDecision tryAttachMathFunction(UnaryMathFunction fun);
+ AttachDecision tryAttachMathPow();
+ AttachDecision tryAttachMathMinMax(bool isMax);
+ AttachDecision tryAttachSpreadMathMinMax(bool isMax);
+ AttachDecision tryAttachIsTypedArray(bool isPossiblyWrapped);
+ AttachDecision tryAttachIsTypedArrayConstructor();
+ AttachDecision tryAttachTypedArrayByteOffset();
+ AttachDecision tryAttachTypedArrayElementSize();
+ AttachDecision tryAttachTypedArrayLength(bool isPossiblyWrapped);
+ AttachDecision tryAttachArrayBufferByteLength(bool isPossiblyWrapped);
+ AttachDecision tryAttachIsConstructing();
+ AttachDecision tryAttachGetNextMapSetEntryForIterator(bool isMap);
+ AttachDecision tryAttachNewArrayIterator();
+ AttachDecision tryAttachNewStringIterator();
+ AttachDecision tryAttachNewRegExpStringIterator();
+ AttachDecision tryAttachArrayIteratorPrototypeOptimizable();
+ AttachDecision tryAttachObjectCreate();
+ AttachDecision tryAttachObjectConstructor();
+ AttachDecision tryAttachArrayConstructor();
+ AttachDecision tryAttachTypedArrayConstructor();
+ AttachDecision tryAttachNumber();
+ AttachDecision tryAttachNumberParseInt();
+ AttachDecision tryAttachNumberToString();
+ AttachDecision tryAttachReflectGetPrototypeOf();
+ AttachDecision tryAttachAtomicsCompareExchange();
+ AttachDecision tryAttachAtomicsExchange();
+ AttachDecision tryAttachAtomicsAdd();
+ AttachDecision tryAttachAtomicsSub();
+ AttachDecision tryAttachAtomicsAnd();
+ AttachDecision tryAttachAtomicsOr();
+ AttachDecision tryAttachAtomicsXor();
+ AttachDecision tryAttachAtomicsLoad();
+ AttachDecision tryAttachAtomicsStore();
+ AttachDecision tryAttachAtomicsIsLockFree();
+ AttachDecision tryAttachBoolean();
+ AttachDecision tryAttachBailout();
+ AttachDecision tryAttachAssertFloat32();
+ AttachDecision tryAttachAssertRecoveredOnBailout();
+ AttachDecision tryAttachObjectIs();
+ AttachDecision tryAttachObjectIsPrototypeOf();
+ AttachDecision tryAttachObjectToString();
+ AttachDecision tryAttachBigIntAsIntN();
+ AttachDecision tryAttachBigIntAsUintN();
+ AttachDecision tryAttachSetHas();
+ AttachDecision tryAttachMapHas();
+ AttachDecision tryAttachMapGet();
+#ifdef FUZZING_JS_FUZZILLI
+ AttachDecision tryAttachFuzzilliHash();
+#endif
+
+ void trackAttached(const char* name /* must be a C string literal */) {
+ return generator_.trackAttached(name);
+ }
+
+ public:
+ InlinableNativeIRGenerator(CallIRGenerator& generator, HandleFunction callee,
+ HandleValue newTarget, HandleValue thisValue,
+ HandleValueArray args, CallFlags flags)
+ : generator_(generator),
+ writer(generator.writer),
+ cx_(generator.cx_),
+ callee_(callee),
+ newTarget_(newTarget),
+ thisval_(thisValue),
+ args_(args),
+ argc_(args.length()),
+ flags_(flags) {}
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII CompareIRGenerator : public IRGenerator {
+ JSOp op_;
+ HandleValue lhsVal_;
+ HandleValue rhsVal_;
+
+ AttachDecision tryAttachString(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachObject(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachSymbol(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachStrictDifferentTypes(ValOperandId lhsId,
+ ValOperandId rhsId);
+ AttachDecision tryAttachInt32(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachNumber(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachBigInt(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachAnyNullUndefined(ValOperandId lhsId,
+ ValOperandId rhsId);
+ AttachDecision tryAttachNullUndefined(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachStringNumber(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachPrimitiveSymbol(ValOperandId lhsId,
+ ValOperandId rhsId);
+ AttachDecision tryAttachBigIntInt32(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachBigIntNumber(ValOperandId lhsId, ValOperandId rhsId);
+ AttachDecision tryAttachBigIntString(ValOperandId lhsId, ValOperandId rhsId);
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ CompareIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc, ICState state,
+ JSOp op, HandleValue lhsVal, HandleValue rhsVal);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII ToBoolIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ AttachDecision tryAttachBool();
+ AttachDecision tryAttachInt32();
+ AttachDecision tryAttachNumber();
+ AttachDecision tryAttachString();
+ AttachDecision tryAttachSymbol();
+ AttachDecision tryAttachNullOrUndefined();
+ AttachDecision tryAttachObject();
+ AttachDecision tryAttachBigInt();
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ ToBoolIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc, ICState state,
+ HandleValue val);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII GetIntrinsicIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ GetIntrinsicIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, HandleValue val);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII UnaryArithIRGenerator : public IRGenerator {
+ JSOp op_;
+ HandleValue val_;
+ HandleValue res_;
+
+ AttachDecision tryAttachInt32();
+ AttachDecision tryAttachNumber();
+ AttachDecision tryAttachBitwise();
+ AttachDecision tryAttachBigInt();
+ AttachDecision tryAttachStringInt32();
+ AttachDecision tryAttachStringNumber();
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ UnaryArithIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, JSOp op, HandleValue val,
+ HandleValue res);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII ToPropertyKeyIRGenerator : public IRGenerator {
+ HandleValue val_;
+
+ AttachDecision tryAttachInt32();
+ AttachDecision tryAttachNumber();
+ AttachDecision tryAttachString();
+ AttachDecision tryAttachSymbol();
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ ToPropertyKeyIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, HandleValue val);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII BinaryArithIRGenerator : public IRGenerator {
+ JSOp op_;
+ HandleValue lhs_;
+ HandleValue rhs_;
+ HandleValue res_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ AttachDecision tryAttachInt32();
+ AttachDecision tryAttachDouble();
+ AttachDecision tryAttachBitwise();
+ AttachDecision tryAttachStringConcat();
+ AttachDecision tryAttachStringObjectConcat();
+ AttachDecision tryAttachBigInt();
+ AttachDecision tryAttachStringInt32Arith();
+
+ public:
+ BinaryArithIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, JSOp op, HandleValue lhs,
+ HandleValue rhs, HandleValue res);
+
+ AttachDecision tryAttachStub();
+};
+
+class MOZ_RAII NewArrayIRGenerator : public IRGenerator {
+#ifdef JS_CACHEIR_SPEW
+ JSOp op_;
+#endif
+ HandleObject templateObject_;
+ BaselineFrame* frame_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ NewArrayIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, JSOp op, HandleObject templateObj,
+ BaselineFrame* frame);
+
+ AttachDecision tryAttachStub();
+ AttachDecision tryAttachArrayObject();
+};
+
+class MOZ_RAII NewObjectIRGenerator : public IRGenerator {
+#ifdef JS_CACHEIR_SPEW
+ JSOp op_;
+#endif
+ HandleObject templateObject_;
+ BaselineFrame* frame_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ NewObjectIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, JSOp op, HandleObject templateObj,
+ BaselineFrame* frame);
+
+ AttachDecision tryAttachStub();
+ AttachDecision tryAttachPlainObject();
+};
+
+inline bool BytecodeOpCanHaveAllocSite(JSOp op) {
+ return op == JSOp::NewArray || op == JSOp::NewObject || op == JSOp::NewInit;
+}
+
+class MOZ_RAII CloseIterIRGenerator : public IRGenerator {
+ HandleObject iter_;
+ CompletionKind kind_;
+
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ CloseIterIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, HandleObject iter, CompletionKind kind);
+
+ AttachDecision tryAttachStub();
+ AttachDecision tryAttachNoReturnMethod();
+ AttachDecision tryAttachScriptedReturn();
+};
+
+// Retrieve Xray JIT info set by the embedder.
+extern JS::XrayJitInfo* GetXrayJitInfo();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIRGenerator_h */
diff --git a/js/src/jit/CacheIRHealth.cpp b/js/src/jit/CacheIRHealth.cpp
new file mode 100644
index 0000000000..a363dba38d
--- /dev/null
+++ b/js/src/jit/CacheIRHealth.cpp
@@ -0,0 +1,416 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifdef JS_CACHEIR_SPEW
+
+# include "jit/CacheIRHealth.h"
+
+# include "mozilla/Maybe.h"
+
+# include "gc/Zone.h"
+# include "jit/BaselineIC.h"
+# include "jit/CacheIRCompiler.h"
+# include "jit/JitScript.h"
+# include "vm/JSScript.h"
+
+# include "vm/JSObject-inl.h"
+# include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// TODO: Refine how we assign happiness based on total health score.
+CacheIRHealth::Happiness CacheIRHealth::determineStubHappiness(
+ uint32_t stubHealthScore) {
+ if (stubHealthScore >= 30) {
+ return Sad;
+ }
+
+ if (stubHealthScore >= 20) {
+ return MediumSad;
+ }
+
+ if (stubHealthScore >= 10) {
+ return MediumHappy;
+ }
+
+ return Happy;
+}
+
+CacheIRHealth::Happiness CacheIRHealth::spewStubHealth(
+ AutoStructuredSpewer& spew, ICCacheIRStub* stub) {
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ CacheIRReader stubReader(stubInfo);
+ uint32_t totalStubHealth = 0;
+ spew->beginListProperty("cacheIROps");
+ while (stubReader.more()) {
+ CacheOp op = stubReader.readOp();
+ uint32_t opHealth = CacheIROpHealth[size_t(op)];
+ uint32_t argLength = CacheIROpInfos[size_t(op)].argLength;
+ const char* opName = CacheIROpNames[size_t(op)];
+
+ spew->beginObject();
+ if (opHealth == UINT32_MAX) {
+ spew->property("unscoredOp", opName);
+ } else {
+ spew->property("cacheIROp", opName);
+ spew->property("opHealth", opHealth);
+ totalStubHealth += opHealth;
+ }
+ spew->endObject();
+
+ stubReader.skip(argLength);
+ }
+ spew->endList(); // cacheIROps
+
+ spew->property("stubHealth", totalStubHealth);
+
+ Happiness stubHappiness = determineStubHappiness(totalStubHealth);
+ spew->property("stubHappiness", stubHappiness);
+
+ return stubHappiness;
+}
+
+BaseScript* CacheIRHealth::maybeExtractBaseScript(JSContext* cx, Shape* shape) {
+ TaggedProto taggedProto = shape->base()->proto();
+ if (!taggedProto.isObject()) {
+ return nullptr;
+ }
+ Value cval;
+ JSObject* proto = taggedProto.toObject();
+ AutoRealm ar(cx, proto);
+ if (!GetPropertyPure(cx, proto, NameToId(cx->names().constructor), &cval)) {
+ return nullptr;
+ }
+ if (!IsFunctionObject(cval)) {
+ return nullptr;
+ }
+ JSFunction& jsfun = cval.toObject().as<JSFunction>();
+ if (!jsfun.hasBaseScript()) {
+ return nullptr;
+ }
+ return jsfun.baseScript();
+}
+
+void CacheIRHealth::spewShapeInformation(AutoStructuredSpewer& spew,
+ JSContext* cx, ICStub* stub) {
+ bool shapesStarted = false;
+ const CacheIRStubInfo* stubInfo = stub->toCacheIRStub()->stubInfo();
+ size_t offset = 0;
+ uint32_t fieldIndex = 0;
+
+ while (stubInfo->fieldType(fieldIndex) != StubField::Type::Limit) {
+ if (stubInfo->fieldType(fieldIndex) == StubField::Type::Shape) {
+ Shape* shape = reinterpret_cast<Shape*>(
+ stubInfo->getStubRawWord(stub->toCacheIRStub(), offset));
+ if (!shapesStarted) {
+ shapesStarted = true;
+ spew->beginListProperty("shapes");
+ }
+
+ const PropMap* propMap =
+ shape->isNative() ? shape->asNative().propMap() : nullptr;
+ if (propMap) {
+ spew->beginObject();
+ {
+ if (!propMap->isDictionary()) {
+ uint32_t mapLength = shape->asNative().propMapLength();
+ if (mapLength) {
+ PropertyKey lastKey = shape->asNative().lastProperty().key();
+ if (lastKey.isInt()) {
+ spew->property("lastProperty", lastKey.toInt());
+ } else if (lastKey.isString()) {
+ JSString* str = lastKey.toString();
+ if (str && str->isLinear()) {
+ spew->property("lastProperty", &str->asLinear());
+ }
+ } else {
+ MOZ_ASSERT(lastKey.isSymbol());
+ JSString* str = lastKey.toSymbol()->description();
+ if (str && str->isLinear()) {
+ spew->property("lastProperty", &str->asLinear());
+ }
+ }
+ }
+ spew->property("totalKeys", propMap->approximateEntryCount());
+ BaseScript* baseScript = maybeExtractBaseScript(cx, shape);
+ if (baseScript) {
+ spew->beginObjectProperty("shapeAllocSite");
+ {
+ spew->property("filename", baseScript->filename());
+ spew->property("line", baseScript->lineno());
+ spew->property("column", baseScript->column());
+ }
+ spew->endObject();
+ }
+ }
+ }
+ spew->endObject();
+ }
+ }
+ offset += StubField::sizeInBytes(stubInfo->fieldType(fieldIndex));
+ fieldIndex++;
+ }
+
+ if (shapesStarted) {
+ spew->endList();
+ }
+}
+
+bool CacheIRHealth::spewNonFallbackICInformation(AutoStructuredSpewer& spew,
+ JSContext* cx,
+ ICStub* firstStub,
+ Happiness* entryHappiness) {
+ const CacheIRStubInfo* stubInfo = firstStub->toCacheIRStub()->stubInfo();
+ Vector<bool, 8, SystemAllocPolicy> sawDistinctValueAtFieldIndex;
+
+ bool sawNonZeroCount = false;
+ bool sawDifferentCacheIRStubs = false;
+ ICStub* stub = firstStub;
+
+ spew->beginListProperty("stubs");
+ while (stub && !stub->isFallback()) {
+ spew->beginObject();
+ {
+ Happiness stubHappiness = spewStubHealth(spew, stub->toCacheIRStub());
+ if (stubHappiness < *entryHappiness) {
+ *entryHappiness = stubHappiness;
+ }
+
+ spewShapeInformation(spew, cx, stub);
+
+ ICStub* nextStub = stub->toCacheIRStub()->next();
+ if (!nextStub->isFallback()) {
+ if (nextStub->enteredCount() > 0) {
+ // More than one stub has a hit count greater than zero.
+ // This is sad because we do not Warp transpile in this case.
+ *entryHappiness = Sad;
+ sawNonZeroCount = true;
+ }
+
+ if (nextStub->toCacheIRStub()->stubInfo() != stubInfo) {
+ sawDifferentCacheIRStubs = true;
+ }
+
+ // If there are multiple stubs with non zero hit counts and if all
+ // of the stubs have equivalent CacheIR, then keep track of how many
+ // distinct stub field values are seen for each field index.
+ if (sawNonZeroCount && !sawDifferentCacheIRStubs) {
+ uint32_t fieldIndex = 0;
+ size_t offset = 0;
+
+ while (stubInfo->fieldType(fieldIndex) != StubField::Type::Limit) {
+ if (sawDistinctValueAtFieldIndex.length() <= fieldIndex) {
+ if (!sawDistinctValueAtFieldIndex.append(false)) {
+ return false;
+ }
+ }
+
+ if (StubField::sizeIsWord(stubInfo->fieldType(fieldIndex))) {
+ uintptr_t firstRaw =
+ stubInfo->getStubRawWord(firstStub->toCacheIRStub(), offset);
+ uintptr_t nextRaw =
+ stubInfo->getStubRawWord(nextStub->toCacheIRStub(), offset);
+ if (firstRaw != nextRaw) {
+ sawDistinctValueAtFieldIndex[fieldIndex] = true;
+ }
+ } else {
+ MOZ_ASSERT(
+ StubField::sizeIsInt64(stubInfo->fieldType(fieldIndex)));
+ int64_t firstRaw =
+ stubInfo->getStubRawInt64(firstStub->toCacheIRStub(), offset);
+ int64_t nextRaw =
+ stubInfo->getStubRawInt64(nextStub->toCacheIRStub(), offset);
+
+ if (firstRaw != nextRaw) {
+ sawDistinctValueAtFieldIndex[fieldIndex] = true;
+ }
+ }
+
+ offset += StubField::sizeInBytes(stubInfo->fieldType(fieldIndex));
+ fieldIndex++;
+ }
+ }
+ }
+
+ spew->property("hitCount", stub->enteredCount());
+ stub = nextStub;
+ }
+ spew->endObject();
+ }
+ spew->endList(); // stubs
+
+ // If more than one CacheIR stub has an entered count greater than
+ // zero and all the stubs have equivalent CacheIR, then spew
+ // the information collected about the stub fields across the IC.
+ if (sawNonZeroCount && !sawDifferentCacheIRStubs) {
+ spew->beginListProperty("stubFields");
+ for (size_t i = 0; i < sawDistinctValueAtFieldIndex.length(); i++) {
+ spew->beginObject();
+ {
+ spew->property("fieldType", uint8_t(stubInfo->fieldType(i)));
+ spew->property("sawDistinctFieldValues",
+ sawDistinctValueAtFieldIndex[i]);
+ }
+ spew->endObject();
+ }
+ spew->endList();
+ }
+
+ return true;
+}
+
+bool CacheIRHealth::spewICEntryHealth(AutoStructuredSpewer& spew, JSContext* cx,
+ HandleScript script, ICEntry* entry,
+ ICFallbackStub* fallback, jsbytecode* pc,
+ JSOp op, Happiness* entryHappiness) {
+ spew->property("op", CodeName(op));
+
+ // TODO: If a perf issue arises, look into improving the SrcNotes
+ // API call below.
+ unsigned column;
+ spew->property("lineno", PCToLineNumber(script, pc, &column));
+ spew->property("column", column);
+
+ ICStub* firstStub = entry->firstStub();
+ if (!firstStub->isFallback()) {
+ if (!spewNonFallbackICInformation(spew, cx, firstStub, entryHappiness)) {
+ return false;
+ }
+ }
+
+ if (fallback->state().mode() != ICState::Mode::Specialized) {
+ *entryHappiness = Sad;
+ }
+
+ spew->property("entryHappiness", uint8_t(*entryHappiness));
+
+ spew->property("mode", uint8_t(fallback->state().mode()));
+
+ spew->property("fallbackCount", fallback->enteredCount());
+
+ return true;
+}
+
+void CacheIRHealth::spewScriptFinalWarmUpCount(JSContext* cx,
+ const char* filename,
+ JSScript* script,
+ uint32_t warmUpCount) {
+ AutoStructuredSpewer spew(cx, SpewChannel::CacheIRHealthReport, nullptr);
+ if (!spew) {
+ return;
+ }
+
+ spew->property("filename", filename);
+ spew->property("line", script->lineno());
+ spew->property("column", script->column());
+ spew->property("finalWarmUpCount", warmUpCount);
+}
+
+static bool addScriptToFinalWarmUpCountMap(JSContext* cx, HandleScript script) {
+ // Create Zone::scriptFilenameMap if necessary.
+ JS::Zone* zone = script->zone();
+ if (!zone->scriptFinalWarmUpCountMap) {
+ auto map = MakeUnique<ScriptFinalWarmUpCountMap>();
+ if (!map) {
+ return false;
+ }
+
+ zone->scriptFinalWarmUpCountMap = std::move(map);
+ }
+
+ SharedImmutableString sfilename =
+ SharedImmutableStringsCache::getSingleton().getOrCreate(
+ script->filename(), strlen(script->filename()));
+ if (!sfilename) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!zone->scriptFinalWarmUpCountMap->put(
+ script, std::make_tuple(uint32_t(0), std::move(sfilename)))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ script->setNeedsFinalWarmUpCount();
+ return true;
+}
+
+void CacheIRHealth::healthReportForIC(JSContext* cx, ICEntry* entry,
+ ICFallbackStub* fallback,
+ HandleScript script,
+ SpewContext context) {
+ AutoStructuredSpewer spew(cx, SpewChannel::CacheIRHealthReport, script);
+ if (!spew) {
+ return;
+ }
+
+ if (!addScriptToFinalWarmUpCountMap(cx, script)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+ spew->property("spewContext", uint8_t(context));
+
+ jsbytecode* op = script->offsetToPC(fallback->pcOffset());
+ JSOp jsOp = JSOp(*op);
+
+ Happiness entryHappiness = Happy;
+ if (!spewICEntryHealth(spew, cx, script, entry, fallback, op, jsOp,
+ &entryHappiness)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+ MOZ_ASSERT(entryHappiness == Sad);
+}
+
+void CacheIRHealth::healthReportForScript(JSContext* cx, HandleScript script,
+ SpewContext context) {
+ jit::JitScript* jitScript = script->maybeJitScript();
+ if (!jitScript) {
+ return;
+ }
+
+ AutoStructuredSpewer spew(cx, SpewChannel::CacheIRHealthReport, script);
+ if (!spew) {
+ return;
+ }
+
+ if (!addScriptToFinalWarmUpCountMap(cx, script)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+
+ spew->property("spewContext", uint8_t(context));
+
+ spew->beginListProperty("entries");
+
+ Happiness scriptHappiness = Happy;
+
+ for (size_t i = 0; i < jitScript->numICEntries(); i++) {
+ ICEntry& entry = jitScript->icEntry(i);
+ ICFallbackStub* fallback = jitScript->fallbackStub(i);
+ jsbytecode* pc = script->offsetToPC(fallback->pcOffset());
+ JSOp op = JSOp(*pc);
+
+ spew->beginObject();
+ Happiness entryHappiness = Happy;
+ if (!spewICEntryHealth(spew, cx, script, &entry, fallback, pc, op,
+ &entryHappiness)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+ if (entryHappiness < scriptHappiness) {
+ scriptHappiness = entryHappiness;
+ }
+ spew->endObject();
+ }
+
+ spew->endList(); // entries
+
+ spew->property("scriptHappiness", uint8_t(scriptHappiness));
+}
+
+#endif /* JS_CACHEIR_SPEW */
diff --git a/js/src/jit/CacheIRHealth.h b/js/src/jit/CacheIRHealth.h
new file mode 100644
index 0000000000..56528aa8d6
--- /dev/null
+++ b/js/src/jit/CacheIRHealth.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRHealth_h
+#define jit_CacheIRHealth_h
+
+#ifdef JS_CACHEIR_SPEW
+
+# include "mozilla/Sprintf.h"
+
+# include "NamespaceImports.h"
+
+# include "jit/CacheIR.h"
+# include "js/TypeDecls.h"
+
+enum class JSOp : uint8_t;
+
+namespace js {
+
+class AutoStructuredSpewer;
+
+namespace jit {
+
+class ICEntry;
+class ICStub;
+class ICCacheIRStub;
+class ICFallbackStub;
+
+// [SMDOC] CacheIR Health Report
+//
+// The goal of CacheIR health report is to make the costlier
+// CacheIR stubs more apparent and easier to diagnose.
+// This is done by using the scores assigned to different CacheIROps in
+// CacheIROps.yaml (see the description of cost_estimate in the
+// aforementioned file for how these scores are determined), summing
+// the score of each op generated for a particular stub together, and displaying
+// this score for each stub in an inline cache. The higher the total stub score,
+// the more expensive the stub is.
+//
+// There are a few ways to generate a health report for a script:
+// 1. Simply running a JS program with the evironment variable
+// SPEW=CacheIRHealthReport. We generate a health report for a script
+// whenever we reach the trial inlining threshold.
+// ex) SPEW=CacheIRHealthReport dist/bin/js jsprogram.js
+// 2. In the shell you can call cacheIRHealthReport() with no arguments and a
+// report
+// will be generated for all scripts in the current zone.
+// ex) cacheIRHealthReport()
+// 3. You may also call cacheIRHealthReport() on a particular function to see
+// the
+// health report associated with that function's script.
+// ex) cacheIRHealthReport(foo)
+//
+// Once you have generated a health report, you may go to
+// https://mozilla-spidermonkey.github.io/cacheirhealthreport/ to visualize the
+// data and aid in understanding what may be going wrong with the CacheIR for a
+// particular stub. For more information about the tool and why a particular
+// script, inline cache entry, or stub is unhappy go to:
+// https://mozilla-spidermonkey.github.io/cacheirhealthreport/info.html
+//
+enum SpewContext : uint8_t { Shell, Transition, TrialInlining };
+
+class CacheIRHealth {
+ enum Happiness : uint8_t { Sad, MediumSad, MediumHappy, Happy };
+
+ // Get happiness from health score.
+ Happiness determineStubHappiness(uint32_t stubHealthScore);
+ // Health of an individual stub.
+ Happiness spewStubHealth(AutoStructuredSpewer& spew, ICCacheIRStub* stub);
+ // If there is more than just a fallback stub in an IC Entry, then additional
+ // information about the IC entry.
+ bool spewNonFallbackICInformation(AutoStructuredSpewer& spew, JSContext* cx,
+ ICStub* firstStub,
+ Happiness* entryHappiness);
+ // Health of all the stubs in an individual CacheIR Entry.
+ bool spewICEntryHealth(AutoStructuredSpewer& spew, JSContext* cx,
+ HandleScript script, ICEntry* entry,
+ ICFallbackStub* fallback, jsbytecode* pc, JSOp op,
+ Happiness* entryHappiness);
+ // Spews first and last property name for each shape checked by
+ // GuardShape in the stub.
+ void spewShapeInformation(AutoStructuredSpewer& spew, JSContext* cx,
+ ICStub* stub);
+ // Returns the BaseScript of a Shape if available.
+ BaseScript* maybeExtractBaseScript(JSContext* cx, Shape* shape);
+
+ public:
+ // Spews the final hit count for scripts where we care about its final hit
+ // count.
+ void spewScriptFinalWarmUpCount(JSContext* cx, const char* filename,
+ JSScript* script, uint32_t warmUpCount);
+ // Spew the health of a particular ICEntry only.
+ void healthReportForIC(JSContext* cx, ICEntry* entry,
+ ICFallbackStub* fallback, HandleScript script,
+ SpewContext context);
+ // If a JitScript exists, spew the health of all ICEntries that exist
+ // for the specified script.
+ void healthReportForScript(JSContext* cx, HandleScript script,
+ SpewContext context);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_CACHEIR_SPEW */
+#endif /* jit_CacheIRHealth_h */
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
new file mode 100644
index 0000000000..a528193ce7
--- /dev/null
+++ b/js/src/jit/CacheIROps.yaml
@@ -0,0 +1,3086 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# [SMDOC] CacheIR Opcodes
+# =======================
+# This file defines all CacheIR opcodes and their arguments.
+#
+# Each op has the following attributes:
+#
+# name
+# ====
+# Opcode name. Convention is to use a name ending in *Result for ops that store
+# to the IC's output register.
+#
+# shared
+# ======
+# If true, Baseline and Ion use the same CacheIRCompiler code for this op.
+# If false, the op must be implemented in both BaselineCacheIRCompiler and
+# IonCacheIRCompiler.
+#
+# transpile
+# =========
+# Whether this op can be transpiled to MIR by WarpCacheIRTranspiler.
+#
+# cost_estimate
+# =========
+# Score of an individual CacheIR Opcode's contribution to the overall score for
+# each stub. This score is based off of the cost of the masm calls made by the op's
+# implementation. The higher the score the more costly the op is.
+#
+# How to decide the cost estimate for a CacheIROp:
+# 0 points - Generates no code
+# 1 point - 1-5 simple masm ops, no callVM or callWithABI
+# 2 points - 5-20 masm ops, no callVM or callWithABI
+# 3 points - 20+ masm ops, no callVM or callWithABI
+# 4 points - callWithABI
+# 5 points - callVM
+# 6 points - more than one callWithABI or callVM
+#
+# In the case of the op not being shared, default to counting the Baseline
+# implementation.
+#
+# If the cost estimate is different based off of what branch of a conditional
+# is taken, assign the score of the branch with the highest cost.
+#
+# Note:
+# Currently, the scoring is tentative. It is in place to provide an
+# estimate for the cost of each op. The scoring will be refined.
+#
+# custom_writer (optional)
+# ========================
+# If true, the generated CacheIRWriter method will be private and has a trailing
+# '_'. This is useful for ops that need custom CacheIRWriter logic on top of the
+# generated code.
+#
+# args
+# ====
+# List of arguments encoded in the bytecode stream. There are three argument
+# kinds:
+#
+# - Id (ObjId, ValId, ...): refers to either an IC input or a value defined by
+# a previous CacheIR instruction. This is encoded as integer in the bytecode
+# stream.
+#
+# - Field (ObjectField, StringField, ...): specific value is stored in the stub
+# data and the bytecode stream stores the offset of this field. This means the
+# CacheIR is not specialized for particular values and code can be shared.
+#
+# - Immediate (BoolImm, Int32Imm, JSOpImm, ...): a value baked directly into
+# the bytecode stream. This is useful for bits of state that need to be
+# available to all CacheIR compilers/transpilers.
+#
+# If there's an argument named 'result', the generated CacheIRWriter method will
+# return a new OperandId of this type.
+
+- name: ReturnFromIC
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+
+- name: GuardToObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardIsNullOrUndefined
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+- name: GuardIsNull
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+- name: GuardIsUndefined
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+- name: GuardIsNotUninitializedLexical
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: ValId
+
+- name: GuardToBoolean
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardToString
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardToSymbol
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardToBigInt
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardIsNumber
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardToInt32
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ input: ValId
+
+- name: GuardToNonGCThing
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+# If the Value is a boolean, convert it to int32.
+- name: GuardBooleanToInt32
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+ result: Int32Id
+
+- name: GuardToInt32Index
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+ result: Int32Id
+
+- name: Int32ToIntPtr
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+ result: IntPtrId
+
+- name: GuardNumberToIntPtrIndex
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: NumberId
+ supportOOB: BoolImm
+ result: IntPtrId
+
+- name: GuardToInt32ModUint32
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: ValId
+ result: Int32Id
+
+- name: GuardToUint8Clamped
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: ValId
+ result: Int32Id
+
+# Note: this doesn't accept doubles to avoid ambiguity about whether it includes
+# int32 values. Use GuardIsNumber instead.
+- name: GuardNonDoubleType
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+ type: ValueTypeImm
+
+- name: GuardShape
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ shape: ShapeField
+
+- name: GuardMultipleShapes
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ custom_writer: true
+ args:
+ obj: ObjId
+ shapes: ObjectField
+
+- name: GuardProto
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ proto: ObjectField
+
+- name: GuardNullProto
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+# Guard per GuardClassKind.
+- name: GuardClass
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ kind: GuardClassKindImm
+
+# Guard on an arbitrary JSClass.
+- name: GuardAnyClass
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ clasp: RawPointerField
+
+- name: GuardGlobalGeneration
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ expected: RawInt32Field
+ generationAddr: RawPointerField
+
+- name: HasClassResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ clasp: RawPointerField
+
+- name: CallRegExpMatcherResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ regexp: ObjId
+ input: StringId
+ lastIndex: Int32Id
+ stub: JitCodeField
+
+- name: CallRegExpSearcherResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ regexp: ObjId
+ input: StringId
+ lastIndex: Int32Id
+ stub: JitCodeField
+
+- name: RegExpBuiltinExecMatchResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ regexp: ObjId
+ input: StringId
+ stub: JitCodeField
+
+- name: RegExpBuiltinExecTestResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ regexp: ObjId
+ input: StringId
+ stub: JitCodeField
+
+- name: RegExpFlagResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ regexp: ObjId
+ flagsMask: Int32Imm
+
+- name: CallSubstringKernelResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ begin: Int32Id
+ length: Int32Id
+
+- name: StringReplaceStringResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ pattern: StringId
+ replacement: StringId
+
+- name: StringSplitStringResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ separator: StringId
+
+- name: RegExpPrototypeOptimizableResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ proto: ObjId
+
+- name: RegExpInstanceOptimizableResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ regexp: ObjId
+ proto: ObjId
+
+- name: GetFirstDollarIndexResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+
+# Add a reference to a global in the compartment to keep it alive.
+- name: GuardCompartment
+ shared: false
+ transpile: false
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ global: ObjectField
+ compartment: RawPointerField
+
+- name: GuardIsExtensible
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardIsNativeObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardIsProxy
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardIsNotProxy
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardIsNotArrayBufferMaybeShared
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardIsTypedArray
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardHasProxyHandler
+ shared: false
+ transpile: false
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ handler: RawPointerField
+
+- name: GuardIsNotDOMProxy
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardSpecificObject
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ expected: ObjectField
+
+- name: GuardObjectIdentity
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj1: ObjId
+ obj2: ObjId
+
+- name: GuardSpecificFunction
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ fun: ObjId
+ expected: ObjectField
+ nargsAndFlags: RawInt32Field
+
+- name: GuardFunctionScript
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ obj: ObjId
+ expected: BaseScriptField
+ nargsAndFlags: RawInt32Field
+
+- name: GuardSpecificAtom
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ str: StringId
+ expected: AtomField
+
+- name: GuardSpecificSymbol
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ sym: SymbolId
+ expected: SymbolField
+
+- name: GuardSpecificInt32
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ num: Int32Id
+ expected: Int32Imm
+
+- name: GuardNoDenseElements
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: GuardStringToIndex
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ str: StringId
+ result: Int32Id
+
+- name: GuardStringToInt32
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ str: StringId
+ result: Int32Id
+
+- name: GuardStringToNumber
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ str: StringId
+ result: NumberId
+
+- name: BooleanToNumber
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ boolean: BooleanId
+ result: NumberId
+
+- name: GuardHasGetterSetter
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ id: IdField
+ getterSetter: GetterSetterField
+
+- name: GuardInt32IsNonNegative
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ index: Int32Id
+
+- name: GuardIndexIsValidUpdateOrAdd
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: GuardIndexIsNotDenseElement
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: GuardTagNotEqual
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: ValueTagId
+ rhs: ValueTagId
+
+- name: GuardXrayExpandoShapeAndDefaultProto
+ shared: true
+ transpile: false
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ shapeWrapper: ObjectField
+
+- name: GuardXrayNoExpando
+ shared: true
+ transpile: false
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+# Guard obj[slot] == expected.
+- name: GuardDynamicSlotIsSpecificObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ expected: ObjId
+ slot: RawInt32Field
+
+# Guard obj[slot] is not an object.
+- name: GuardDynamicSlotIsNotObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ slot: RawInt32Field
+
+- name: GuardFixedSlotValue
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ val: ValueField
+
+- name: GuardDynamicSlotValue
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ val: ValueField
+
+- name: LoadFixedSlot
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ result: ValId
+ obj: ObjId
+ offset: RawInt32Field
+
+- name: LoadDynamicSlot
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ result: ValId
+ obj: ObjId
+ slot: RawInt32Field
+
+- name: GuardNoAllocationMetadataBuilder
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ builderAddr: RawPointerField
+
+- name: GuardFunctionHasJitEntry
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ fun: ObjId
+ constructing: BoolImm
+
+- name: GuardFunctionHasNoJitEntry
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ fun: ObjId
+
+- name: GuardFunctionIsNonBuiltinCtor
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ fun: ObjId
+
+- name: GuardFunctionIsConstructor
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ fun: ObjId
+
+- name: GuardNotClassConstructor
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ fun: ObjId
+
+- name: GuardArrayIsPacked
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ array: ObjId
+
+- name: GuardArgumentsObjectFlags
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ flags: ByteImm
+
+- name: LoadObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ result: ObjId
+ obj: ObjectField
+
+# This is just LoadObject with extra information for the purpose of optimizing
+# out shape guards if we're just storing to slots of the receiver object.
+- name: LoadProtoObject
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ result: ObjId
+ protoObj: ObjectField
+ receiverObj: ObjId
+
+- name: LoadProto
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ObjId
+
+- name: LoadEnclosingEnvironment
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ObjId
+
+- name: LoadWrapperTarget
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ObjId
+
+- name: LoadValueTag
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: ValId
+ result: ValueTagId
+
+- name: LoadArgumentFixedSlot
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ result: ValId
+ slotIndex: ByteImm
+
+- name: LoadArgumentDynamicSlot
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ custom_writer: true
+ args:
+ result: ValId
+ argc: Int32Id
+ slotIndex: ByteImm
+
+- name: TruncateDoubleToUInt32
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+ result: Int32Id
+
+- name: MegamorphicLoadSlotResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ name: IdField
+
+- name: MegamorphicLoadSlotByValueResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ id: ValId
+
+- name: MegamorphicStoreSlot
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ name: IdField
+ rhs: ValId
+ strict: BoolImm
+
+- name: MegamorphicSetElement
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: ValId
+ rhs: ValId
+ strict: BoolImm
+
+- name: MegamorphicHasPropResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ id: ValId
+ hasOwn: BoolImm
+
+- name: ObjectToIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ enumeratorsAddr: RawPointerField
+
+- name: ValueToIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ val: ValId
+
+# See CacheIR.cpp 'DOM proxies' comment.
+- name: LoadDOMExpandoValue
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ValId
+
+- name: LoadDOMExpandoValueGuardGeneration
+ shared: false
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ expandoAndGeneration: RawPointerField
+ generation: RawInt64Field
+ result: ValId
+
+- name: LoadDOMExpandoValueIgnoreGeneration
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ValId
+
+- name: GuardDOMExpandoMissingOrGuardShape
+ shared: false
+ transpile: true
+ cost_estimate: 2
+ args:
+ expando: ValId
+ shape: ShapeField
+
+- name: StoreFixedSlot
+ shared: false
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+
+- name: StoreDynamicSlot
+ shared: false
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+
+- name: AddAndStoreFixedSlot
+ shared: false
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+ newShape: ShapeField
+
+- name: AddAndStoreDynamicSlot
+ shared: false
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+ newShape: ShapeField
+
+- name: AllocateAndStoreDynamicSlot
+ shared: false
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+ newShape: ShapeField
+ numNewSlots: RawInt32Field
+
+- name: AddSlotAndCallAddPropHook
+ shared: true
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ rhs: ValId
+ newShape: ShapeField
+
+- name: StoreDenseElement
+ shared: true
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ index: Int32Id
+ rhs: ValId
+
+- name: StoreDenseElementHole
+ shared: true
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ index: Int32Id
+ rhs: ValId
+ handleAdd: BoolImm
+
+- name: ArrayPush
+ shared: true
+ transpile: true
+ cost_estimate: 6
+ args:
+ obj: ObjId
+ rhs: ValId
+
+- name: ArrayJoinResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ sep: StringId
+
+- name: PackedArrayPopResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ array: ObjId
+
+- name: PackedArrayShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ array: ObjId
+
+- name: PackedArraySliceResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ array: ObjId
+ begin: Int32Id
+ end: Int32Id
+
+- name: ArgumentsSliceResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ args: ObjId
+ begin: Int32Id
+ end: Int32Id
+
+- name: IsArrayResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ input: ValId
+
+- name: StoreFixedSlotUndefinedResult
+ shared: true
+ transpile: true
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ rhs: ValId
+
+- name: IsObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+- name: IsPackedArrayResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: IsCallableResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: ValId
+
+- name: IsConstructorResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+
+- name: IsCrossRealmArrayConstructorResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: IsTypedArrayResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ isPossiblyWrapped: BoolImm
+
+- name: IsTypedArrayConstructorResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ArrayBufferViewByteOffsetInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: ArrayBufferViewByteOffsetDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: TypedArrayByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: TypedArrayByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: TypedArrayElementSizeResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GuardHasAttachedArrayBuffer
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: NewArrayIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+
+- name: NewStringIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+
+- name: NewRegExpStringIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+
+- name: ObjectCreateResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+
+- name: NewArrayFromLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ length: Int32Id
+
+- name: NewTypedArrayFromLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ length: Int32Id
+
+- name: NewTypedArrayFromArrayBufferResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ buffer: ObjId
+ byteOffset: ValId
+ length: ValId
+
+- name: NewTypedArrayFromArrayResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ array: ObjId
+
+- name: NewStringObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ templateObject: ObjectField
+ str: StringId
+
+- name: StringFromCharCodeResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ code: Int32Id
+
+- name: StringFromCodePointResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ code: Int32Id
+
+- name: StringIndexOfResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ searchStr: StringId
+
+- name: StringStartsWithResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ searchStr: StringId
+
+- name: StringEndsWithResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ searchStr: StringId
+
+- name: StringToLowerCaseResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+
+- name: StringToUpperCaseResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+
+- name: MathAbsInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: Int32Id
+
+- name: MathAbsNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: MathClz32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: MathSignInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: MathSignNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: NumberId
+
+- name: MathSignNumberToInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: NumberId
+
+- name: MathImulResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: MathSqrtNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: MathFRoundNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+# Because Baseline stub code is shared by all realms in the Zone, this
+# instruction loads a pointer to the RNG from a stub field.
+- name: MathRandomResult
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ args:
+ rng: RawPointerField
+
+- name: MathHypot2NumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ first: NumberId
+ second: NumberId
+
+- name: MathHypot3NumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ first: NumberId
+ second: NumberId
+ third: NumberId
+
+- name: MathHypot4NumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ first: NumberId
+ second: NumberId
+ third: NumberId
+ fourth: NumberId
+
+- name: MathAtan2NumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: MathFloorNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+
+- name: MathCeilNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+
+- name: MathTruncNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+
+- name: MathFloorToInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ input: NumberId
+
+- name: MathCeilToInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: MathTruncToInt32Result
+ shared: true
+ transpile: true
+ args:
+ input: NumberId
+
+- name: MathRoundToInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: Int32MinMax
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ isMax: BoolImm
+ first: Int32Id
+ second: Int32Id
+ result: Int32Id
+
+- name: NumberMinMax
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ isMax: BoolImm
+ first: NumberId
+ second: NumberId
+ result: NumberId
+
+- name: Int32MinMaxArrayResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ array: ObjId
+ isMax: BoolImm
+
+- name: NumberMinMaxArrayResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ array: ObjId
+ isMax: BoolImm
+
+- name: MathFunctionNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+ fun: UnaryMathFunctionImm
+
+- name: NumberParseIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ radix: Int32Id
+
+- name: DoubleParseIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ num: NumberId
+
+- name: ObjectToStringResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+
+- name: ReflectGetPrototypeOfResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+
+- name: StoreTypedArrayElement
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ obj: ObjId
+ elementType: ScalarTypeImm
+ index: IntPtrId
+ rhs: RawId
+ handleOOB: BoolImm
+
+- name: AtomicsCompareExchangeResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ expected: RawId
+ replacement: RawId
+ elementType: ScalarTypeImm
+
+- name: AtomicsExchangeResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+
+- name: AtomicsAddResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+ forEffect: BoolImm
+
+- name: AtomicsSubResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+ forEffect: BoolImm
+
+- name: AtomicsAndResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+ forEffect: BoolImm
+
+- name: AtomicsOrResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+ forEffect: BoolImm
+
+- name: AtomicsXorResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+ forEffect: BoolImm
+
+- name: AtomicsLoadResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: IntPtrId
+ elementType: ScalarTypeImm
+
+- name: AtomicsStoreResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: IntPtrId
+ value: RawId
+ elementType: ScalarTypeImm
+
+- name: AtomicsIsLockFreeResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ value: Int32Id
+
+- name: CallNativeSetter
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ custom_writer: true
+ args:
+ receiver: ObjId
+ setter: ObjectField
+ rhs: ValId
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallScriptedSetter
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ custom_writer: true
+ args:
+ receiver: ObjId
+ setter: ObjectField
+ rhs: ValId
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallInlinedSetter
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ custom_writer: true
+ args:
+ receiver: ObjId
+ setter: ObjectField
+ rhs: ValId
+ icScript: RawPointerField
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallDOMSetter
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ jitInfo: RawPointerField
+ rhs: ValId
+
+- name: CallSetArrayLength
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ strict: BoolImm
+ rhs: ValId
+
+- name: ProxySet
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: IdField
+ rhs: ValId
+ strict: BoolImm
+
+- name: ProxySetByValue
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: ValId
+ rhs: ValId
+ strict: BoolImm
+
+- name: CallAddOrUpdateSparseElementHelper
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: Int32Id
+ rhs: ValId
+ strict: BoolImm
+
+- name: CallInt32ToString
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: Int32Id
+ result: StringId
+
+- name: CallNumberToString
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: NumberId
+ result: StringId
+
+- name: Int32ToStringWithBaseResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ input: Int32Id
+ base: Int32Id
+
+- name: BooleanToString
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: BooleanId
+ result: StringId
+
+- name: CallScriptedFunction
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ custom_writer: true
+ args:
+ callee: ObjId
+ argc: Int32Id
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+
+- name: CallBoundScriptedFunction
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ args:
+ callee: ObjId
+ target: ObjId
+ argc: Int32Id
+ flags: CallFlagsImm
+ numBoundArgs: UInt32Imm
+
+- name: CallWasmFunction
+ shared: false
+ transpile: true
+ cost_estimate: 3
+ args:
+ callee: ObjId
+ argc: Int32Id
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+ funcExport: RawPointerField
+ instance: ObjectField
+
+- name: GuardWasmArg
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ arg: ValId
+ type: WasmValTypeImm
+
+- name: CallNativeFunction
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ custom_writer: true
+ args:
+ callee: ObjId
+ argc: Int32Id
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+#ifdef JS_SIMULATOR
+ target: RawPointerField
+#else
+ ignoresReturnValue: BoolImm
+#endif
+
+- name: CallDOMFunction
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ custom_writer: true
+ args:
+ callee: ObjId
+ argc: Int32Id
+ thisObj: ObjId
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+#ifdef JS_SIMULATOR
+ target: RawPointerField
+#endif
+
+- name: CallClassHook
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ custom_writer: true
+ args:
+ callee: ObjId
+ argc: Int32Id
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+ target: RawPointerField
+
+- name: CallInlinedFunction
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ custom_writer: true
+ args:
+ callee: ObjId
+ argc: Int32Id
+ icScript: RawPointerField
+ flags: CallFlagsImm
+ argcFixed: UInt32Imm
+
+
+# Meta ops generate no code, but contain data for the Warp Transpiler.
+- name: MetaScriptedThisShape
+ shared: true
+ transpile: true
+ cost_estimate: 0
+ custom_writer: true
+ args:
+ thisShape: ShapeField
+
+- name: BindFunctionResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ target: ObjId
+ argc: UInt32Imm
+ templateObject: ObjectField
+
+- name: SpecializedBindFunctionResult
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ target: ObjId
+ argc: UInt32Imm
+ templateObject: ObjectField
+
+- name: LoadFixedSlotResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+
+- name: LoadFixedSlotTypedResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+ type: ValueTypeImm
+
+- name: LoadDynamicSlotResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ offset: RawInt32Field
+
+- name: LoadDenseElementResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadDenseElementHoleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: CallGetSparseElementResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadDenseElementExistsResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadTypedArrayElementExistsResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: IntPtrId
+
+- name: LoadDenseElementHoleExistsResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadTypedArrayElementResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: IntPtrId
+ elementType: ScalarTypeImm
+ handleOOB: BoolImm
+ forceDoubleForUint32: BoolImm
+
+- name: LoadDataViewValueResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ offset: IntPtrId
+ littleEndian: BooleanId
+ elementType: ScalarTypeImm
+ forceDoubleForUint32: BoolImm
+
+- name: StoreDataViewValueResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ offset: IntPtrId
+ value: RawId
+ littleEndian: BooleanId
+ elementType: ScalarTypeImm
+
+- name: LoadInt32ArrayLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadInt32ArrayLength
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: Int32Id
+
+- name: LoadArgumentsObjectArgResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadArgumentsObjectArgHoleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadArgumentsObjectArgExistsResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: LoadArgumentsObjectLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadArgumentsObjectLength
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: Int32Id
+
+- name: LoadFunctionLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: LoadFunctionNameResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: LoadBoundFunctionNumArgs
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: Int32Id
+
+- name: LoadBoundFunctionTarget
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ result: ObjId
+
+- name: GuardBoundFunctionIsConstructor
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadArrayBufferByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadArrayBufferByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadArrayBufferViewLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadArrayBufferViewLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LinearizeForCharAccess
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ str: StringId
+ index: Int32Id
+ result: StringId
+
+- name: LoadStringCharResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ str: StringId
+ index: Int32Id
+ handleOOB: BoolImm
+
+- name: LoadStringCharCodeResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ str: StringId
+ index: Int32Id
+ handleOOB: BoolImm
+
+- name: LoadStringLengthResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ str: StringId
+
+- name: FrameIsConstructingResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+
+- name: LoadObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
+- name: LoadStringResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ str: StringId
+
+- name: LoadSymbolResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ sym: SymbolId
+
+- name: LoadInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: Int32Id
+
+- name: LoadDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ val: NumberId
+
+- name: LoadBigIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: BigIntId
+
+- name: CallScriptedGetterResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ custom_writer: true
+ args:
+ receiver: ValId
+ getter: ObjectField
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallInlinedGetterResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ custom_writer: true
+ args:
+ receiver: ValId
+ getter: ObjectField
+ icScript: RawPointerField
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallNativeGetterResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ custom_writer: true
+ args:
+ receiver: ValId
+ getter: ObjectField
+ sameRealm: BoolImm
+ nargsAndFlags: RawInt32Field
+
+- name: CallDOMGetterResult
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ jitInfo: RawPointerField
+
+- name: ProxyGetResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: IdField
+
+- name: ProxyGetByValueResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: ValId
+
+- name: ProxyHasPropResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ id: ValId
+ hasOwn: BoolImm
+
+- name: CallObjectHasSparseElementResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: CallNativeGetElementResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ index: Int32Id
+
+- name: CallNativeGetElementSuperResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ index: Int32Id
+ receiver: ValId
+
+- name: GetNextMapSetEntryForIteratorResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ iter: ObjId
+ resultArr: ObjId
+ isMap: BoolImm
+
+- name: LoadUndefinedResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+
+- name: LoadBooleanResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: BoolImm
+
+- name: LoadInt32Constant
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: RawInt32Field
+ result: Int32Id
+
+- name: LoadDoubleConstant
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: DoubleField
+ result: NumberId
+
+- name: LoadBooleanConstant
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: BoolImm
+ result: BooleanId
+
+- name: LoadUndefined
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ result: ValId
+
+- name: LoadConstantString
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ str: StringField
+ result: StringId
+
+- name: LoadConstantStringResult
+ shared: false
+ transpile: true
+ cost_estimate: 1
+ args:
+ str: StringField
+
+- name: LoadInstanceOfObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ lhs: ValId
+ proto: ObjId
+
+- name: LoadTypeOfObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+
+- name: DoubleAddResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: DoubleSubResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: DoubleMulResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: DoubleDivResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: DoubleModResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: DoublePowResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: Int32AddResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32SubResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32MulResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32DivResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32ModResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32PowResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: BigIntAddResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntSubResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntMulResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntDivResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntModResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntPowResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: Int32BitOrResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32BitXorResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32BitAndResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32LeftShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32RightShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: Int32URightShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ lhs: Int32Id
+ rhs: Int32Id
+ forceDouble: BoolImm
+
+- name: Int32NotResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: BigIntBitOrResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntBitXorResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntBitAndResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntLeftShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntRightShiftResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: BigIntNotResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ input: BigIntId
+
+- name: Int32NegationResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: DoubleNegationResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: BigIntNegationResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ input: BigIntId
+
+- name: Int32IncResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: Int32DecResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: Int32Id
+
+- name: DoubleIncResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: DoubleDecResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: NumberId
+
+- name: BigIntIncResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ input: BigIntId
+
+- name: BigIntDecResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ input: BigIntId
+
+- name: LoadInt32TruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: ValId
+
+- name: LoadDoubleTruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ input: NumberId
+
+- name: LoadStringTruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ str: StringId
+
+- name: LoadObjectTruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+
+- name: LoadBigIntTruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ bigInt: BigIntId
+
+- name: LoadValueTruthyResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ input: ValId
+
+- name: LoadValueResult
+ shared: false
+ transpile: false
+ cost_estimate: 1
+ args:
+ val: ValueField
+
+- name: LoadOperandResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ input: ValId
+
+- name: NewPlainObjectResult
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ numFixedSlots: UInt32Imm
+ numDynamicSlots: UInt32Imm
+ allocKind: AllocKindImm
+ shape: ShapeField
+ site: AllocSiteField
+
+- name: NewArrayObjectResult
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ arrayLength: UInt32Imm
+ shape: ShapeField
+ site: AllocSiteField
+
+- name: CallStringConcatResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ lhs: StringId
+ rhs: StringId
+
+- name: CallStringObjectConcatResult
+ shared: false
+ transpile: false
+ cost_estimate: 5
+ args:
+ lhs: ValId
+ rhs: ValId
+
+- name: CallIsSuspendedGeneratorResult
+ shared: true
+ transpile: false
+ cost_estimate: 2
+ args:
+ val: ValId
+
+- name: CompareStringResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ op: JSOpImm
+ lhs: StringId
+ rhs: StringId
+
+- name: CompareObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ op: JSOpImm
+ lhs: ObjId
+ rhs: ObjId
+
+- name: CompareSymbolResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ op: JSOpImm
+ lhs: SymbolId
+ rhs: SymbolId
+
+- name: CompareInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ op: JSOpImm
+ lhs: Int32Id
+ rhs: Int32Id
+
+- name: CompareDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ op: JSOpImm
+ lhs: NumberId
+ rhs: NumberId
+
+- name: CompareBigIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ op: JSOpImm
+ lhs: BigIntId
+ rhs: BigIntId
+
+- name: CompareBigIntInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ op: JSOpImm
+ lhs: BigIntId
+ rhs: Int32Id
+
+- name: CompareBigIntNumberResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ op: JSOpImm
+ lhs: BigIntId
+ rhs: NumberId
+
+- name: CompareBigIntStringResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ op: JSOpImm
+ lhs: BigIntId
+ rhs: StringId
+
+- name: CompareNullUndefinedResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ op: JSOpImm
+ isUndefined: BoolImm
+ input: ValId
+
+- name: CompareDoubleSameValueResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ lhs: NumberId
+ rhs: NumberId
+
+- name: SameValueResult
+ shared: false
+ transpile: true
+ cost_estimate: 4
+ args:
+ lhs: ValId
+ rhs: ValId
+
+- name: IndirectTruncateInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: Int32Id
+
+- name: BigIntAsIntNResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ bits: Int32Id
+ bigInt: BigIntId
+
+- name: BigIntAsUintNResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ bits: Int32Id
+ bigInt: BigIntId
+
+- name: SetHasResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ set: ObjId
+ val: ValId
+
+- name: SetHasNonGCThingResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ set: ObjId
+ val: ValId
+
+- name: SetHasStringResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ set: ObjId
+ str: StringId
+
+- name: SetHasSymbolResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ set: ObjId
+ sym: SymbolId
+
+- name: SetHasBigIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ set: ObjId
+ bigInt: BigIntId
+
+- name: SetHasObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ set: ObjId
+ obj: ObjId
+
+- name: SetSizeResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ set: ObjId
+
+- name: MapHasResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ map: ObjId
+ val: ValId
+
+- name: MapHasNonGCThingResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ val: ValId
+
+- name: MapHasStringResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ map: ObjId
+ str: StringId
+
+- name: MapHasSymbolResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ sym: SymbolId
+
+- name: MapHasBigIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ bigInt: BigIntId
+
+- name: MapHasObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ obj: ObjId
+
+- name: MapGetResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ map: ObjId
+ val: ValId
+
+- name: MapGetNonGCThingResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ val: ValId
+
+- name: MapGetStringResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ map: ObjId
+ str: StringId
+
+- name: MapGetSymbolResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ sym: SymbolId
+
+- name: MapGetBigIntResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ bigInt: BigIntId
+
+- name: MapGetObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 3
+ args:
+ map: ObjId
+ obj: ObjId
+
+- name: MapSizeResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ map: ObjId
+
+- name: ArrayFromArgumentsObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 5
+ args:
+ obj: ObjId
+ shape: ShapeField
+
+- name: CloseIterScriptedResult
+ shared: false
+ transpile: true
+ cost_estimate: 5
+ args:
+ iter: ObjId
+ callee: ObjId
+ kind: CompletionKindImm
+ targetNargs: UInt32Imm
+
+- name: CallPrintString
+ shared: true
+ transpile: false
+ cost_estimate: 1
+ args:
+ str: StaticStringImm
+
+- name: Breakpoint
+ shared: true
+ transpile: false
+ cost_estimate: 1
+ args:
+
+- name: WrapResult
+ shared: true
+ transpile: false
+ cost_estimate: 4
+ args:
+
+- name: Bailout
+ shared: true
+ transpile: true
+ cost_estimate: 0
+ args:
+
+- name: AssertRecoveredOnBailoutResult
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ val: ValId
+ mustBeRecovered: BoolImm
+
+- name: AssertPropertyLookup
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ id: IdField
+ slot: RawInt32Field
+
+#ifdef FUZZING_JS_FUZZILLI
+- name: FuzzilliHashResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ val: ValId
+#endif
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
new file mode 100644
index 0000000000..8326c1d066
--- /dev/null
+++ b/js/src/jit/CacheIRReader.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRReader_h
+#define jit_CacheIRReader_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+#include <stdint.h>
+#include "NamespaceImports.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/CompactBuffer.h"
+#include "js/ScalarType.h"
+#include "js/Value.h"
+#include "wasm/WasmValType.h"
+
+enum class JSOp : uint8_t;
+
+namespace js {
+
+enum class UnaryMathFunction : uint8_t;
+
+namespace gc {
+enum class AllocKind : uint8_t;
+}
+
+namespace jit {
+
+class CacheIRStubInfo;
+
+// Helper class for reading CacheIR bytecode.
+class MOZ_RAII CacheIRReader {
+ CompactBufferReader buffer_;
+
+ CacheIRReader(const CacheIRReader&) = delete;
+ CacheIRReader& operator=(const CacheIRReader&) = delete;
+
+ public:
+ CacheIRReader(const uint8_t* start, const uint8_t* end)
+ : buffer_(start, end) {}
+ explicit CacheIRReader(const CacheIRWriter& writer)
+ : CacheIRReader(writer.codeStart(), writer.codeEnd()) {}
+ explicit CacheIRReader(const CacheIRStubInfo* stubInfo);
+
+ bool more() const { return buffer_.more(); }
+
+ CacheOp readOp() { return CacheOp(buffer_.readUnsigned15Bit()); }
+
+ // Skip data not currently used.
+ void skip() { buffer_.readByte(); }
+ void skip(uint32_t skipLength) {
+ if (skipLength > 0) {
+ buffer_.seek(buffer_.currentPosition(), skipLength);
+ }
+ }
+
+ ValOperandId valOperandId() { return ValOperandId(buffer_.readByte()); }
+ ValueTagOperandId valueTagOperandId() {
+ return ValueTagOperandId(buffer_.readByte());
+ }
+
+ IntPtrOperandId intPtrOperandId() {
+ return IntPtrOperandId(buffer_.readByte());
+ }
+
+ ObjOperandId objOperandId() { return ObjOperandId(buffer_.readByte()); }
+ NumberOperandId numberOperandId() {
+ return NumberOperandId(buffer_.readByte());
+ }
+ StringOperandId stringOperandId() {
+ return StringOperandId(buffer_.readByte());
+ }
+
+ SymbolOperandId symbolOperandId() {
+ return SymbolOperandId(buffer_.readByte());
+ }
+
+ BigIntOperandId bigIntOperandId() {
+ return BigIntOperandId(buffer_.readByte());
+ }
+
+ BooleanOperandId booleanOperandId() {
+ return BooleanOperandId(buffer_.readByte());
+ }
+
+ Int32OperandId int32OperandId() { return Int32OperandId(buffer_.readByte()); }
+
+ uint32_t rawOperandId() { return buffer_.readByte(); }
+
+ uint32_t stubOffset() { return buffer_.readByte() * sizeof(uintptr_t); }
+ GuardClassKind guardClassKind() { return GuardClassKind(buffer_.readByte()); }
+ ValueType valueType() { return ValueType(buffer_.readByte()); }
+ wasm::ValType::Kind wasmValType() {
+ return wasm::ValType::Kind(buffer_.readByte());
+ }
+ gc::AllocKind allocKind() { return gc::AllocKind(buffer_.readByte()); }
+ CompletionKind completionKind() { return CompletionKind(buffer_.readByte()); }
+
+ Scalar::Type scalarType() { return Scalar::Type(buffer_.readByte()); }
+ JSWhyMagic whyMagic() { return JSWhyMagic(buffer_.readByte()); }
+ JSOp jsop() { return JSOp(buffer_.readByte()); }
+ int32_t int32Immediate() { return int32_t(buffer_.readFixedUint32_t()); }
+ uint32_t uint32Immediate() { return buffer_.readFixedUint32_t(); }
+ void* pointer() { return buffer_.readRawPointer(); }
+
+ UnaryMathFunction unaryMathFunction() {
+ return UnaryMathFunction(buffer_.readByte());
+ }
+
+ CallFlags callFlags() {
+ // See CacheIRWriter::writeCallFlagsImm()
+ uint8_t encoded = buffer_.readByte();
+ CallFlags::ArgFormat format =
+ CallFlags::ArgFormat(encoded & CallFlags::ArgFormatMask);
+ bool isConstructing = encoded & CallFlags::IsConstructing;
+ bool isSameRealm = encoded & CallFlags::IsSameRealm;
+ bool needsUninitializedThis = encoded & CallFlags::NeedsUninitializedThis;
+ MOZ_ASSERT_IF(needsUninitializedThis, isConstructing);
+ switch (format) {
+ case CallFlags::Unknown:
+ MOZ_CRASH("Unexpected call flags");
+ case CallFlags::Standard:
+ return CallFlags(isConstructing, /*isSpread =*/false, isSameRealm,
+ needsUninitializedThis);
+ case CallFlags::Spread:
+ return CallFlags(isConstructing, /*isSpread =*/true, isSameRealm,
+ needsUninitializedThis);
+ default:
+ // The existing non-standard argument formats (FunCall and FunApply)
+ // can't be constructors.
+ MOZ_ASSERT(!isConstructing);
+ return CallFlags(format);
+ }
+ }
+
+ uint8_t readByte() { return buffer_.readByte(); }
+ bool readBool() {
+ uint8_t b = buffer_.readByte();
+ MOZ_ASSERT(b <= 1);
+ return bool(b);
+ }
+
+ const uint8_t* currentPosition() const { return buffer_.currentPosition(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIRReader_h */
diff --git a/js/src/jit/CacheIRSpewer.cpp b/js/src/jit/CacheIRSpewer.cpp
new file mode 100644
index 0000000000..ef6dbac35b
--- /dev/null
+++ b/js/src/jit/CacheIRSpewer.cpp
@@ -0,0 +1,441 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_CACHEIR_SPEW
+
+# include "jit/CacheIRSpewer.h"
+
+# include "mozilla/Sprintf.h"
+
+# include <algorithm>
+# include <stdarg.h>
+
+# include "jsapi.h"
+# include "jsmath.h"
+
+# include "js/ScalarType.h" // js::Scalar::Type
+# include "util/GetPidProvider.h"
+# include "util/Text.h"
+# include "vm/JSFunction.h"
+# include "vm/JSObject.h"
+# include "vm/JSScript.h"
+
+# include "vm/JSObject-inl.h"
+# include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Text spewer for CacheIR ops that can be used with JitSpew.
+// Output looks like this:
+//
+// GuardToInt32 inputId 0, resultId 2
+// GuardToInt32 inputId 1, resultId 3
+// CompareInt32Result op JSOp::Lt, lhsId 2, rhsId 3
+// ReturnFromIC
+class MOZ_RAII CacheIROpsJitSpewer {
+ GenericPrinter& out_;
+
+ // String prepended to each line. Can be used for indentation.
+ const char* prefix_;
+
+ CACHE_IR_SPEWER_GENERATED
+
+ void spewOp(CacheOp op) {
+ const char* opName = CacheIROpNames[size_t(op)];
+ out_.printf("%s%-30s", prefix_, opName);
+ }
+ void spewOpEnd() { out_.printf("\n"); }
+
+ void spewArgSeparator() { out_.printf(", "); }
+
+ void spewOperandId(const char* name, OperandId id) {
+ spewRawOperandId(name, id.id());
+ }
+ void spewRawOperandId(const char* name, uint32_t id) {
+ out_.printf("%s %u", name, id);
+ }
+ void spewField(const char* name, uint32_t offset) {
+ out_.printf("%s %u", name, offset);
+ }
+ void spewBoolImm(const char* name, bool b) {
+ out_.printf("%s %s", name, b ? "true" : "false");
+ }
+ void spewByteImm(const char* name, uint8_t val) {
+ out_.printf("%s %u", name, val);
+ }
+ void spewJSOpImm(const char* name, JSOp op) {
+ out_.printf("%s JSOp::%s", name, CodeName(op));
+ }
+ void spewStaticStringImm(const char* name, const char* str) {
+ out_.printf("%s \"%s\"", name, str);
+ }
+ void spewInt32Imm(const char* name, int32_t val) {
+ out_.printf("%s %d", name, val);
+ }
+ void spewUInt32Imm(const char* name, uint32_t val) {
+ out_.printf("%s %u", name, val);
+ }
+ void spewCallFlagsImm(const char* name, CallFlags flags) {
+ out_.printf(
+ "%s (format %u%s%s%s)", name, flags.getArgFormat(),
+ flags.isConstructing() ? ", isConstructing" : "",
+ flags.isSameRealm() ? ", isSameRealm" : "",
+ flags.needsUninitializedThis() ? ", needsUninitializedThis" : "");
+ }
+ void spewJSWhyMagicImm(const char* name, JSWhyMagic magic) {
+ out_.printf("%s JSWhyMagic(%u)", name, unsigned(magic));
+ }
+ void spewScalarTypeImm(const char* name, Scalar::Type type) {
+ out_.printf("%s Scalar::Type(%u)", name, unsigned(type));
+ }
+ void spewUnaryMathFunctionImm(const char* name, UnaryMathFunction fun) {
+ const char* funName = GetUnaryMathFunctionName(fun);
+ out_.printf("%s UnaryMathFunction::%s", name, funName);
+ }
+ void spewValueTypeImm(const char* name, ValueType type) {
+ out_.printf("%s ValueType(%u)", name, unsigned(type));
+ }
+ void spewJSNativeImm(const char* name, JSNative native) {
+ out_.printf("%s %p", name, native);
+ }
+ void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
+ out_.printf("%s GuardClassKind(%u)", name, unsigned(kind));
+ }
+ void spewWasmValTypeImm(const char* name, wasm::ValType::Kind kind) {
+ out_.printf("%s WasmValTypeKind(%u)", name, unsigned(kind));
+ }
+ void spewAllocKindImm(const char* name, gc::AllocKind kind) {
+ out_.printf("%s AllocKind(%u)", name, unsigned(kind));
+ }
+ void spewCompletionKindImm(const char* name, CompletionKind kind) {
+ out_.printf("%s CompletionKind(%u)", name, unsigned(kind));
+ }
+
+ public:
+ CacheIROpsJitSpewer(GenericPrinter& out, const char* prefix)
+ : out_(out), prefix_(prefix) {}
+
+ void spew(CacheIRReader& reader) {
+ do {
+ switch (reader.readOp()) {
+# define SPEW_OP(op, ...) \
+ case CacheOp::op: \
+ spew##op(reader); \
+ break;
+ CACHE_IR_OPS(SPEW_OP)
+# undef SPEW_OP
+
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+ } while (reader.more());
+ }
+};
+
+void js::jit::SpewCacheIROps(GenericPrinter& out, const char* prefix,
+ const CacheIRStubInfo* info) {
+ CacheIRReader reader(info);
+ CacheIROpsJitSpewer spewer(out, prefix);
+ spewer.spew(reader);
+}
+
+// JSON spewer for CacheIR ops. Output looks like this:
+//
+// ...
+// {
+// "op":"GuardToInt32",
+// "args":[
+// {
+// "name":"inputId",
+// "type":"Id",
+// "value":0
+// },
+// {
+// "name":"resultId",
+// "type":"Id",
+// "value":1
+// }
+// ]
+// },
+// {
+// "op":"Int32IncResult",
+// "args":[
+// {
+// "name":"inputId",
+// "type":"Id",
+// "value":1
+// }
+// ]
+// }
+// ...
+class MOZ_RAII CacheIROpsJSONSpewer {
+ JSONPrinter& j_;
+
+ CACHE_IR_SPEWER_GENERATED
+
+ void spewOp(CacheOp op) {
+ const char* opName = CacheIROpNames[size_t(op)];
+ j_.beginObject();
+ j_.property("op", opName);
+ j_.beginListProperty("args");
+ }
+ void spewOpEnd() {
+ j_.endList();
+ j_.endObject();
+ }
+
+ void spewArgSeparator() {}
+
+ template <typename T>
+ void spewArgImpl(const char* name, const char* type, T value) {
+ j_.beginObject();
+ j_.property("name", name);
+ j_.property("type", type);
+ j_.property("value", value);
+ j_.endObject();
+ }
+
+ void spewOperandId(const char* name, OperandId id) {
+ spewRawOperandId(name, id.id());
+ }
+ void spewRawOperandId(const char* name, uint32_t id) {
+ spewArgImpl(name, "Id", id);
+ }
+ void spewField(const char* name, uint32_t offset) {
+ spewArgImpl(name, "Field", offset);
+ }
+ void spewBoolImm(const char* name, bool b) { spewArgImpl(name, "Imm", b); }
+ void spewByteImm(const char* name, uint8_t val) {
+ spewArgImpl(name, "Imm", val);
+ }
+ void spewJSOpImm(const char* name, JSOp op) {
+ spewArgImpl(name, "JSOp", CodeName(op));
+ }
+ void spewStaticStringImm(const char* name, const char* str) {
+ spewArgImpl(name, "String", str);
+ }
+ void spewInt32Imm(const char* name, int32_t val) {
+ spewArgImpl(name, "Imm", val);
+ }
+ void spewUInt32Imm(const char* name, uint32_t val) {
+ spewArgImpl(name, "Imm", val);
+ }
+ void spewCallFlagsImm(const char* name, CallFlags flags) {
+ spewArgImpl(name, "Imm", flags.toByte());
+ }
+ void spewJSWhyMagicImm(const char* name, JSWhyMagic magic) {
+ spewArgImpl(name, "Imm", unsigned(magic));
+ }
+ void spewScalarTypeImm(const char* name, Scalar::Type type) {
+ spewArgImpl(name, "Imm", unsigned(type));
+ }
+ void spewUnaryMathFunctionImm(const char* name, UnaryMathFunction fun) {
+ const char* funName = GetUnaryMathFunctionName(fun);
+ spewArgImpl(name, "MathFunction", funName);
+ }
+ void spewValueTypeImm(const char* name, ValueType type) {
+ spewArgImpl(name, "Imm", unsigned(type));
+ }
+ void spewJSNativeImm(const char* name, JSNative native) {
+ spewArgImpl(name, "Word", uintptr_t(native));
+ }
+ void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
+ void spewWasmValTypeImm(const char* name, wasm::ValType::Kind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
+ void spewAllocKindImm(const char* name, gc::AllocKind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
+ void spewCompletionKindImm(const char* name, CompletionKind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
+
+ public:
+ explicit CacheIROpsJSONSpewer(JSONPrinter& j) : j_(j) {}
+
+ void spew(CacheIRReader& reader) {
+ do {
+ switch (reader.readOp()) {
+# define SPEW_OP(op, ...) \
+ case CacheOp::op: \
+ spew##op(reader); \
+ break;
+ CACHE_IR_OPS(SPEW_OP)
+# undef SPEW_OP
+
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+ } while (reader.more());
+ }
+};
+
+CacheIRSpewer CacheIRSpewer::cacheIRspewer;
+
+CacheIRSpewer::CacheIRSpewer()
+ : outputLock_(mutexid::CacheIRSpewer), guardCount_(0) {
+ spewInterval_ =
+ getenv("CACHEIR_LOG_FLUSH") ? atoi(getenv("CACHEIR_LOG_FLUSH")) : 10000;
+
+ if (spewInterval_ < 1) {
+ spewInterval_ = 1;
+ }
+}
+
+CacheIRSpewer::~CacheIRSpewer() {
+ if (!enabled()) {
+ return;
+ }
+
+ json_.ref().endList();
+ output_.flush();
+ output_.finish();
+}
+
+# ifndef JIT_SPEW_DIR
+# if defined(_WIN32)
+# define JIT_SPEW_DIR "."
+# elif defined(__ANDROID__)
+# define JIT_SPEW_DIR "/data/local/tmp"
+# else
+# define JIT_SPEW_DIR "/tmp"
+# endif
+# endif
+
+bool CacheIRSpewer::init(const char* filename) {
+ if (enabled()) {
+ return true;
+ }
+
+ char name[256];
+ uint32_t pid = getpid();
+ // Default to JIT_SPEW_DIR/cacheir${pid}.json
+ if (filename[0] == '1') {
+ SprintfLiteral(name, JIT_SPEW_DIR "/cacheir%" PRIu32 ".json", pid);
+ } else {
+ SprintfLiteral(name, "%s%" PRIu32 ".json", filename, pid);
+ }
+
+ if (!output_.init(name)) {
+ return false;
+ }
+
+ json_.emplace(output_);
+ json_->beginList();
+ return true;
+}
+
+void CacheIRSpewer::beginCache(const IRGenerator& gen) {
+ MOZ_ASSERT(enabled());
+ JSONPrinter& j = json_.ref();
+ const char* filename = gen.script_->filename();
+ j.beginObject();
+ j.property("name", CacheKindNames[uint8_t(gen.cacheKind_)]);
+ j.property("file", filename ? filename : "null");
+ j.property("mode", int(gen.mode_));
+ if (jsbytecode* pc = gen.pc_) {
+ unsigned column;
+ j.property("line", PCToLineNumber(gen.script_, pc, &column));
+ j.property("column", column);
+ j.formatProperty("pc", "%p", pc);
+ }
+}
+
+void CacheIRSpewer::valueProperty(const char* name, const Value& v) {
+ MOZ_ASSERT(enabled());
+ JSONPrinter& j = json_.ref();
+
+ j.beginObjectProperty(name);
+
+ const char* type = InformalValueTypeName(v);
+ if (v.isInt32()) {
+ type = "int32";
+ }
+ j.property("type", type);
+
+ if (v.isInt32()) {
+ j.property("value", v.toInt32());
+ } else if (v.isDouble()) {
+ j.floatProperty("value", v.toDouble(), 3);
+ } else if (v.isString() || v.isSymbol()) {
+ JSString* str = v.isString() ? v.toString() : v.toSymbol()->description();
+ if (str && str->isLinear()) {
+ j.property("value", &str->asLinear());
+ }
+ } else if (v.isObject()) {
+ JSObject& object = v.toObject();
+ j.formatProperty("value", "%p (shape: %p)", &object, object.shape());
+
+ if (object.is<JSFunction>()) {
+ if (JSAtom* name = object.as<JSFunction>().displayAtom()) {
+ j.property("funName", name);
+ }
+ }
+
+ if (NativeObject* nobj =
+ object.is<NativeObject>() ? &object.as<NativeObject>() : nullptr) {
+ j.beginListProperty("flags");
+ {
+ if (nobj->isIndexed()) {
+ j.value("indexed");
+ }
+ if (nobj->inDictionaryMode()) {
+ j.value("dictionaryMode");
+ }
+ }
+ j.endList();
+ if (nobj->isIndexed()) {
+ j.beginObjectProperty("indexed");
+ {
+ j.property("denseInitializedLength",
+ nobj->getDenseInitializedLength());
+ j.property("denseCapacity", nobj->getDenseCapacity());
+ j.property("denseElementsAreSealed", nobj->denseElementsAreSealed());
+ j.property("denseElementsAreFrozen", nobj->denseElementsAreFrozen());
+ }
+ j.endObject();
+ }
+ }
+ }
+
+ j.endObject();
+}
+
+void CacheIRSpewer::opcodeProperty(const char* name, const JSOp op) {
+ MOZ_ASSERT(enabled());
+ JSONPrinter& j = json_.ref();
+
+ j.beginStringProperty(name);
+ output_.put(CodeName(op));
+ j.endStringProperty();
+}
+
+void CacheIRSpewer::cacheIRSequence(CacheIRReader& reader) {
+ MOZ_ASSERT(enabled());
+ JSONPrinter& j = json_.ref();
+
+ j.beginListProperty("cacheIR");
+
+ CacheIROpsJSONSpewer spewer(j);
+ spewer.spew(reader);
+
+ j.endList();
+}
+
+void CacheIRSpewer::attached(const char* name) {
+ MOZ_ASSERT(enabled());
+ json_.ref().property("attached", name);
+}
+
+void CacheIRSpewer::endCache() {
+ MOZ_ASSERT(enabled());
+ json_.ref().endObject();
+}
+
+#endif /* JS_CACHEIR_SPEW */
diff --git a/js/src/jit/CacheIRSpewer.h b/js/src/jit/CacheIRSpewer.h
new file mode 100644
index 0000000000..fba33ba990
--- /dev/null
+++ b/js/src/jit/CacheIRSpewer.h
@@ -0,0 +1,116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRSpewer_h
+#define jit_CacheIRSpewer_h
+
+#ifdef JS_CACHEIR_SPEW
+
+# include "mozilla/Maybe.h"
+
+# include "jit/CacheIR.h"
+# include "jit/CacheIRGenerator.h"
+# include "jit/CacheIRReader.h"
+# include "jit/CacheIRWriter.h"
+# include "js/TypeDecls.h"
+# include "threading/LockGuard.h"
+# include "vm/JSONPrinter.h"
+# include "vm/MutexIDs.h"
+
+namespace js {
+namespace jit {
+
+class CacheIRSpewer {
+ Mutex outputLock_ MOZ_UNANNOTATED;
+ Fprinter output_;
+ mozilla::Maybe<JSONPrinter> json_;
+ static CacheIRSpewer cacheIRspewer;
+
+ // Counter to record how many times Guard class is called. This is used to
+ // determine when to flush outputs based on the given interval value.
+ // For example, if |spewInterval_ = 2|, outputs will be flushed on
+ // guardCount_ values 0,2,4,6,...
+ uint32_t guardCount_;
+
+ // Interval at which to flush output files. This value can be set with the
+ // environment variable |CACHEIR_LOG_FLUSH|.
+ uint32_t spewInterval_;
+
+ CacheIRSpewer();
+ ~CacheIRSpewer();
+
+ bool enabled() { return json_.isSome(); }
+
+ // These methods can only be called when enabled() is true.
+ Mutex& lock() {
+ MOZ_ASSERT(enabled());
+ return outputLock_;
+ }
+
+ void beginCache(const IRGenerator& generator);
+ void valueProperty(const char* name, const Value& v);
+ void opcodeProperty(const char* name, const JSOp op);
+ void cacheIRSequence(CacheIRReader& reader);
+ void attached(const char* name);
+ void endCache();
+
+ public:
+ static CacheIRSpewer& singleton() { return cacheIRspewer; }
+ bool init(const char* name);
+
+ class MOZ_RAII Guard {
+ CacheIRSpewer& sp_;
+ const IRGenerator& gen_;
+ const char* name_;
+
+ public:
+ Guard(const IRGenerator& gen, const char* name)
+ : sp_(CacheIRSpewer::singleton()), gen_(gen), name_(name) {
+ if (sp_.enabled()) {
+ sp_.lock().lock();
+ sp_.beginCache(gen_);
+ }
+ }
+
+ ~Guard() {
+ if (sp_.enabled()) {
+ const CacheIRWriter& writer = gen_.writerRef();
+ if (!writer.failed() && writer.codeLength() > 0) {
+ CacheIRReader reader(writer);
+ sp_.cacheIRSequence(reader);
+ }
+ if (name_ != nullptr) {
+ sp_.attached(name_);
+ }
+ sp_.endCache();
+ if (sp_.guardCount_++ % sp_.spewInterval_ == 0) {
+ sp_.output_.flush();
+ }
+ sp_.lock().unlock();
+ }
+ }
+
+ void valueProperty(const char* name, const Value& v) const {
+ sp_.valueProperty(name, v);
+ }
+
+ void opcodeProperty(const char* name, const JSOp op) const {
+ sp_.opcodeProperty(name, op);
+ }
+
+ explicit operator bool() const { return sp_.enabled(); }
+ };
+};
+
+extern void SpewCacheIROps(GenericPrinter& out, const char* prefix,
+ const CacheIRStubInfo* info);
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_CACHEIR_SPEW */
+
+#endif /* jit_CacheIRSpewer_h */
diff --git a/js/src/jit/CacheIRWriter.h b/js/src/jit/CacheIRWriter.h
new file mode 100644
index 0000000000..48e35c4667
--- /dev/null
+++ b/js/src/jit/CacheIRWriter.h
@@ -0,0 +1,642 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CacheIRWriter_h
+#define jit_CacheIRWriter_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/Maybe.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/AllocKind.h"
+#include "jit/CacheIR.h"
+#include "jit/CacheIROpsGenerated.h"
+#include "jit/CompactBuffer.h"
+#include "jit/ICState.h"
+#include "jit/Simulator.h"
+#include "jit/TypeData.h"
+#include "js/AllocPolicy.h"
+#include "js/CallArgs.h"
+#include "js/Class.h"
+#include "js/experimental/JitInfo.h"
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+#include "js/ScalarType.h"
+#include "js/Value.h"
+#include "js/Vector.h"
+#include "util/Memory.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/List.h"
+#include "vm/Opcodes.h"
+#include "vm/Shape.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmValType.h"
+
+class JS_PUBLIC_API JSTracer;
+struct JS_PUBLIC_API JSContext;
+
+class JSObject;
+class JSString;
+
+namespace JS {
+class Symbol;
+}
+
+namespace js {
+
+class GetterSetter;
+enum class UnaryMathFunction : uint8_t;
+
+namespace gc {
+class AllocSite;
+}
+
+namespace jit {
+
+class ICScript;
+
+#ifdef JS_SIMULATOR
+bool CallAnyNative(JSContext* cx, unsigned argc, Value* vp);
+#endif
+
+// Class to record CacheIR + some additional metadata for code generation.
+class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter {
+#ifdef DEBUG
+ JSContext* cx_;
+#endif
+ CompactBufferWriter buffer_;
+
+ uint32_t nextOperandId_;
+ uint32_t nextInstructionId_;
+ uint32_t numInputOperands_;
+
+ TypeData typeData_;
+
+ // The data (shapes, slot offsets, etc.) that will be stored in the ICStub.
+ Vector<StubField, 8, SystemAllocPolicy> stubFields_;
+ size_t stubDataSize_;
+
+ // For each operand id, record which instruction accessed it last. This
+ // information greatly improves register allocation.
+ Vector<uint32_t, 8, SystemAllocPolicy> operandLastUsed_;
+
+ // OperandId and stub offsets are stored in a single byte, so make sure
+ // this doesn't overflow. We use a very conservative limit for now.
+ static const size_t MaxOperandIds = 20;
+ static const size_t MaxStubDataSizeInBytes = 20 * sizeof(uintptr_t);
+ bool tooLarge_;
+
+ // Assume this stub can't be trial inlined until we see a scripted call/inline
+ // instruction.
+ TrialInliningState trialInliningState_ = TrialInliningState::Failure;
+
+ // Basic caching to avoid quadatic lookup behaviour in readStubField.
+ mutable uint32_t lastOffset_;
+ mutable uint32_t lastIndex_;
+
+#ifdef DEBUG
+ // Information for assertLengthMatches.
+ mozilla::Maybe<CacheOp> currentOp_;
+ size_t currentOpArgsStart_ = 0;
+#endif
+
+#ifdef DEBUG
+ void assertSameCompartment(JSObject* obj);
+ void assertSameZone(Shape* shape);
+#else
+ void assertSameCompartment(JSObject* obj) {}
+ void assertSameZone(Shape* shape) {}
+#endif
+
+ void writeOp(CacheOp op) {
+ buffer_.writeUnsigned15Bit(uint32_t(op));
+ nextInstructionId_++;
+#ifdef DEBUG
+ MOZ_ASSERT(currentOp_.isNothing(), "Missing call to assertLengthMatches?");
+ currentOp_.emplace(op);
+ currentOpArgsStart_ = buffer_.length();
+#endif
+ }
+
+ void assertLengthMatches() {
+#ifdef DEBUG
+ // After writing arguments, assert the length matches CacheIROpArgLengths.
+ size_t expectedLen = CacheIROpInfos[size_t(*currentOp_)].argLength;
+ MOZ_ASSERT_IF(!failed(),
+ buffer_.length() - currentOpArgsStart_ == expectedLen);
+ currentOp_.reset();
+#endif
+ }
+
+ void writeOperandId(OperandId opId) {
+ if (opId.id() < MaxOperandIds) {
+ static_assert(MaxOperandIds <= UINT8_MAX,
+ "operand id must fit in a single byte");
+ buffer_.writeByte(opId.id());
+ } else {
+ tooLarge_ = true;
+ return;
+ }
+ if (opId.id() >= operandLastUsed_.length()) {
+ buffer_.propagateOOM(operandLastUsed_.resize(opId.id() + 1));
+ if (buffer_.oom()) {
+ return;
+ }
+ }
+ MOZ_ASSERT(nextInstructionId_ > 0);
+ operandLastUsed_[opId.id()] = nextInstructionId_ - 1;
+ }
+
+ void writeCallFlagsImm(CallFlags flags) { buffer_.writeByte(flags.toByte()); }
+
+ void addStubField(uint64_t value, StubField::Type fieldType) {
+ size_t fieldOffset = stubDataSize_;
+#ifndef JS_64BIT
+ // On 32-bit platforms there are two stub field sizes (4 bytes and 8 bytes).
+ // Ensure 8-byte fields are properly aligned.
+ if (StubField::sizeIsInt64(fieldType)) {
+ fieldOffset = AlignBytes(fieldOffset, sizeof(uint64_t));
+ }
+#endif
+ MOZ_ASSERT((fieldOffset % StubField::sizeInBytes(fieldType)) == 0);
+
+ size_t newStubDataSize = fieldOffset + StubField::sizeInBytes(fieldType);
+ if (newStubDataSize < MaxStubDataSizeInBytes) {
+#ifndef JS_64BIT
+ // Add a RawInt32 stub field for padding if necessary, because when we
+ // iterate over the stub fields we assume there are no 'holes'.
+ if (fieldOffset != stubDataSize_) {
+ MOZ_ASSERT((stubDataSize_ + sizeof(uintptr_t)) == fieldOffset);
+ buffer_.propagateOOM(
+ stubFields_.append(StubField(0, StubField::Type::RawInt32)));
+ }
+#endif
+ buffer_.propagateOOM(stubFields_.append(StubField(value, fieldType)));
+ MOZ_ASSERT((fieldOffset % sizeof(uintptr_t)) == 0);
+ buffer_.writeByte(fieldOffset / sizeof(uintptr_t));
+ stubDataSize_ = newStubDataSize;
+ } else {
+ tooLarge_ = true;
+ }
+ }
+
+ void writeShapeField(Shape* shape) {
+ MOZ_ASSERT(shape);
+ assertSameZone(shape);
+ addStubField(uintptr_t(shape), StubField::Type::Shape);
+ }
+ void writeGetterSetterField(GetterSetter* gs) {
+ MOZ_ASSERT(gs);
+ addStubField(uintptr_t(gs), StubField::Type::GetterSetter);
+ }
+ void writeObjectField(JSObject* obj) {
+ MOZ_ASSERT(obj);
+ assertSameCompartment(obj);
+ addStubField(uintptr_t(obj), StubField::Type::JSObject);
+ }
+ void writeStringField(JSString* str) {
+ MOZ_ASSERT(str);
+ addStubField(uintptr_t(str), StubField::Type::String);
+ }
+ void writeSymbolField(JS::Symbol* sym) {
+ MOZ_ASSERT(sym);
+ addStubField(uintptr_t(sym), StubField::Type::Symbol);
+ }
+ void writeBaseScriptField(BaseScript* script) {
+ MOZ_ASSERT(script);
+ addStubField(uintptr_t(script), StubField::Type::BaseScript);
+ }
+ void writeJitCodeField(JitCode* code) {
+ MOZ_ASSERT(code);
+ addStubField(uintptr_t(code), StubField::Type::JitCode);
+ }
+ void writeRawInt32Field(uint32_t val) {
+ addStubField(val, StubField::Type::RawInt32);
+ }
+ void writeRawPointerField(const void* ptr) {
+ addStubField(uintptr_t(ptr), StubField::Type::RawPointer);
+ }
+ void writeIdField(jsid id) {
+ addStubField(id.asRawBits(), StubField::Type::Id);
+ }
+ void writeValueField(const Value& val) {
+ addStubField(val.asRawBits(), StubField::Type::Value);
+ }
+ void writeRawInt64Field(uint64_t val) {
+ addStubField(val, StubField::Type::RawInt64);
+ }
+ void writeDoubleField(double d) {
+ uint64_t bits = mozilla::BitwiseCast<uint64_t>(d);
+ addStubField(bits, StubField::Type::Double);
+ }
+ void writeAllocSiteField(gc::AllocSite* ptr) {
+ addStubField(uintptr_t(ptr), StubField::Type::AllocSite);
+ }
+
+ void writeJSOpImm(JSOp op) {
+ static_assert(sizeof(JSOp) == sizeof(uint8_t), "JSOp must fit in a byte");
+ buffer_.writeByte(uint8_t(op));
+ }
+ void writeGuardClassKindImm(GuardClassKind kind) {
+ static_assert(sizeof(GuardClassKind) == sizeof(uint8_t),
+ "GuardClassKind must fit in a byte");
+ buffer_.writeByte(uint8_t(kind));
+ }
+ void writeValueTypeImm(ValueType type) {
+ static_assert(sizeof(ValueType) == sizeof(uint8_t),
+ "ValueType must fit in uint8_t");
+ buffer_.writeByte(uint8_t(type));
+ }
+ void writeJSWhyMagicImm(JSWhyMagic whyMagic) {
+ static_assert(JS_WHY_MAGIC_COUNT <= UINT8_MAX,
+ "JSWhyMagic must fit in uint8_t");
+ buffer_.writeByte(uint8_t(whyMagic));
+ }
+ void writeScalarTypeImm(Scalar::Type type) {
+ MOZ_ASSERT(size_t(type) <= UINT8_MAX);
+ buffer_.writeByte(uint8_t(type));
+ }
+ void writeUnaryMathFunctionImm(UnaryMathFunction fun) {
+ static_assert(sizeof(UnaryMathFunction) == sizeof(uint8_t),
+ "UnaryMathFunction must fit in a byte");
+ buffer_.writeByte(uint8_t(fun));
+ }
+ void writeCompletionKindImm(CompletionKind kind) {
+ static_assert(sizeof(CompletionKind) == sizeof(uint8_t),
+ "CompletionKind must fit in a byte");
+ buffer_.writeByte(uint8_t(kind));
+ }
+ void writeBoolImm(bool b) { buffer_.writeByte(uint32_t(b)); }
+
+ void writeByteImm(uint32_t b) {
+ MOZ_ASSERT(b <= UINT8_MAX);
+ buffer_.writeByte(b);
+ }
+
+ void writeInt32Imm(int32_t i32) { buffer_.writeFixedUint32_t(i32); }
+ void writeUInt32Imm(uint32_t u32) { buffer_.writeFixedUint32_t(u32); }
+ void writePointer(const void* ptr) { buffer_.writeRawPointer(ptr); }
+
+ void writeJSNativeImm(JSNative native) {
+ writePointer(JS_FUNC_TO_DATA_PTR(void*, native));
+ }
+ void writeStaticStringImm(const char* str) { writePointer(str); }
+
+ void writeWasmValTypeImm(wasm::ValType::Kind kind) {
+ static_assert(unsigned(wasm::TypeCode::Limit) <= UINT8_MAX);
+ buffer_.writeByte(uint8_t(kind));
+ }
+
+ void writeAllocKindImm(gc::AllocKind kind) {
+ static_assert(unsigned(gc::AllocKind::LIMIT) <= UINT8_MAX);
+ buffer_.writeByte(uint8_t(kind));
+ }
+
+ uint32_t newOperandId() { return nextOperandId_++; }
+
+ CacheIRWriter(const CacheIRWriter&) = delete;
+ CacheIRWriter& operator=(const CacheIRWriter&) = delete;
+
+ public:
+ explicit CacheIRWriter(JSContext* cx)
+ : CustomAutoRooter(cx),
+#ifdef DEBUG
+ cx_(cx),
+#endif
+ nextOperandId_(0),
+ nextInstructionId_(0),
+ numInputOperands_(0),
+ stubDataSize_(0),
+ tooLarge_(false),
+ lastOffset_(0),
+ lastIndex_(0) {
+ }
+
+ bool tooLarge() const { return tooLarge_; }
+ bool oom() const { return buffer_.oom(); }
+ bool failed() const { return tooLarge() || oom(); }
+
+ TrialInliningState trialInliningState() const { return trialInliningState_; }
+
+ uint32_t numInputOperands() const { return numInputOperands_; }
+ uint32_t numOperandIds() const { return nextOperandId_; }
+ uint32_t numInstructions() const { return nextInstructionId_; }
+
+ size_t numStubFields() const { return stubFields_.length(); }
+ StubField::Type stubFieldType(uint32_t i) const {
+ return stubFields_[i].type();
+ }
+
+ uint32_t setInputOperandId(uint32_t op) {
+ MOZ_ASSERT(op == nextOperandId_);
+ nextOperandId_++;
+ numInputOperands_++;
+ return op;
+ }
+
+ TypeData typeData() const { return typeData_; }
+ void setTypeData(TypeData data) { typeData_ = data; }
+
+ void trace(JSTracer* trc) override {
+ // For now, assert we only GC before we append stub fields.
+ MOZ_RELEASE_ASSERT(stubFields_.empty());
+ }
+
+ size_t stubDataSize() const { return stubDataSize_; }
+ void copyStubData(uint8_t* dest) const;
+ bool stubDataEquals(const uint8_t* stubData) const;
+ bool stubDataEqualsIgnoring(const uint8_t* stubData,
+ uint32_t ignoreOffset) const;
+
+ bool operandIsDead(uint32_t operandId, uint32_t currentInstruction) const {
+ if (operandId >= operandLastUsed_.length()) {
+ return false;
+ }
+ return currentInstruction > operandLastUsed_[operandId];
+ }
+
+ const uint8_t* codeStart() const {
+ MOZ_ASSERT(!failed());
+ return buffer_.buffer();
+ }
+
+ const uint8_t* codeEnd() const {
+ MOZ_ASSERT(!failed());
+ return buffer_.buffer() + buffer_.length();
+ }
+
+ uint32_t codeLength() const {
+ MOZ_ASSERT(!failed());
+ return buffer_.length();
+ }
+
+ // This should not be used when compiling Baseline code, as Baseline code
+ // shouldn't bake in stub values.
+ StubField readStubField(uint32_t offset, StubField::Type type) const;
+
+ ObjOperandId guardToObject(ValOperandId input) {
+ guardToObject_(input);
+ return ObjOperandId(input.id());
+ }
+
+ StringOperandId guardToString(ValOperandId input) {
+ guardToString_(input);
+ return StringOperandId(input.id());
+ }
+
+ SymbolOperandId guardToSymbol(ValOperandId input) {
+ guardToSymbol_(input);
+ return SymbolOperandId(input.id());
+ }
+
+ BigIntOperandId guardToBigInt(ValOperandId input) {
+ guardToBigInt_(input);
+ return BigIntOperandId(input.id());
+ }
+
+ BooleanOperandId guardToBoolean(ValOperandId input) {
+ guardToBoolean_(input);
+ return BooleanOperandId(input.id());
+ }
+
+ Int32OperandId guardToInt32(ValOperandId input) {
+ guardToInt32_(input);
+ return Int32OperandId(input.id());
+ }
+
+ NumberOperandId guardIsNumber(ValOperandId input) {
+ guardIsNumber_(input);
+ return NumberOperandId(input.id());
+ }
+
+ ValOperandId boxObject(ObjOperandId input) {
+ return ValOperandId(input.id());
+ }
+
+ void guardShapeForClass(ObjOperandId obj, Shape* shape) {
+ // Guard shape to ensure that object class is unchanged. This is true
+ // for all shapes.
+ guardShape(obj, shape);
+ }
+
+ void guardShapeForOwnProperties(ObjOperandId obj, Shape* shape) {
+ // Guard shape to detect changes to (non-dense) own properties. This
+ // also implies |guardShapeForClass|.
+ MOZ_ASSERT(shape->getObjectClass()->isNativeObject());
+ guardShape(obj, shape);
+ }
+
+ public:
+ void guardSpecificFunction(ObjOperandId obj, JSFunction* expected) {
+ // Guard object is a specific function. This implies immutable fields on
+ // the JSFunction struct itself are unchanged.
+ // Bake in the nargs and FunctionFlags so Warp can use them off-main thread,
+ // instead of directly using the JSFunction fields.
+ uint32_t nargsAndFlags = expected->flagsAndArgCountRaw();
+ guardSpecificFunction_(obj, expected, nargsAndFlags);
+ }
+
+ void guardFunctionScript(ObjOperandId fun, BaseScript* expected) {
+ // Guard function has a specific BaseScript. This implies immutable fields
+ // on the JSFunction struct itself are unchanged and are equivalent for
+ // lambda clones.
+ // Bake in the nargs and FunctionFlags so Warp can use them off-main thread,
+ // instead of directly using the JSFunction fields.
+ uint32_t nargsAndFlags = expected->function()->flagsAndArgCountRaw();
+ guardFunctionScript_(fun, expected, nargsAndFlags);
+ }
+
+ ValOperandId loadArgumentFixedSlot(
+ ArgumentKind kind, uint32_t argc,
+ CallFlags flags = CallFlags(CallFlags::Standard)) {
+ bool addArgc;
+ int32_t slotIndex = GetIndexOfArgument(kind, flags, &addArgc);
+ if (addArgc) {
+ slotIndex += argc;
+ }
+ MOZ_ASSERT(slotIndex >= 0);
+ MOZ_ASSERT(slotIndex <= UINT8_MAX);
+ return loadArgumentFixedSlot_(slotIndex);
+ }
+
+ ValOperandId loadArgumentDynamicSlot(
+ ArgumentKind kind, Int32OperandId argcId,
+ CallFlags flags = CallFlags(CallFlags::Standard)) {
+ bool addArgc;
+ int32_t slotIndex = GetIndexOfArgument(kind, flags, &addArgc);
+ if (addArgc) {
+ return loadArgumentDynamicSlot_(argcId, slotIndex);
+ }
+ return loadArgumentFixedSlot_(slotIndex);
+ }
+
+ ObjOperandId loadSpreadArgs() {
+ ArgumentKind kind = ArgumentKind::Arg0;
+ uint32_t argc = 1;
+ CallFlags flags(CallFlags::Spread);
+ return ObjOperandId(loadArgumentFixedSlot(kind, argc, flags).id());
+ }
+
+ void callScriptedFunction(ObjOperandId callee, Int32OperandId argc,
+ CallFlags flags, uint32_t argcFixed) {
+ callScriptedFunction_(callee, argc, flags, argcFixed);
+ trialInliningState_ = TrialInliningState::Candidate;
+ }
+
+ void callInlinedFunction(ObjOperandId callee, Int32OperandId argc,
+ ICScript* icScript, CallFlags flags,
+ uint32_t argcFixed) {
+ callInlinedFunction_(callee, argc, icScript, flags, argcFixed);
+ trialInliningState_ = TrialInliningState::Inlined;
+ }
+
+ void callNativeFunction(ObjOperandId calleeId, Int32OperandId argc, JSOp op,
+ JSFunction* calleeFunc, CallFlags flags,
+ uint32_t argcFixed) {
+ // Some native functions can be implemented faster if we know that
+ // the return value is ignored.
+ bool ignoresReturnValue =
+ op == JSOp::CallIgnoresRv && calleeFunc->hasJitInfo() &&
+ calleeFunc->jitInfo()->type() == JSJitInfo::IgnoresReturnValueNative;
+
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special
+ // swi instruction to handle them, so we store the redirected
+ // pointer in the stub and use that instead of the original one.
+ // If we are calling the ignoresReturnValue version of a native
+ // function, we bake it into the redirected pointer.
+ // (See BaselineCacheIRCompiler::emitCallNativeFunction.)
+ JSNative target = ignoresReturnValue
+ ? calleeFunc->jitInfo()->ignoresReturnValueMethod
+ : calleeFunc->native();
+ void* rawPtr = JS_FUNC_TO_DATA_PTR(void*, target);
+ void* redirected = Simulator::RedirectNativeFunction(rawPtr, Args_General3);
+ callNativeFunction_(calleeId, argc, flags, argcFixed, redirected);
+#else
+ // If we are not running in the simulator, we generate different jitcode
+ // to find the ignoresReturnValue version of a native function.
+ callNativeFunction_(calleeId, argc, flags, argcFixed, ignoresReturnValue);
+#endif
+ }
+
+ void callDOMFunction(ObjOperandId calleeId, Int32OperandId argc,
+ ObjOperandId thisObjId, JSFunction* calleeFunc,
+ CallFlags flags, uint32_t argcFixed) {
+#ifdef JS_SIMULATOR
+ void* rawPtr = JS_FUNC_TO_DATA_PTR(void*, calleeFunc->native());
+ void* redirected = Simulator::RedirectNativeFunction(rawPtr, Args_General3);
+ callDOMFunction_(calleeId, argc, thisObjId, flags, argcFixed, redirected);
+#else
+ callDOMFunction_(calleeId, argc, thisObjId, flags, argcFixed);
+#endif
+ }
+
+ void callAnyNativeFunction(ObjOperandId calleeId, Int32OperandId argc,
+ CallFlags flags, uint32_t argcFixed) {
+ MOZ_ASSERT(!flags.isSameRealm());
+#ifdef JS_SIMULATOR
+ // The simulator requires native calls to be redirected to a
+ // special swi instruction. If we are calling an arbitrary native
+ // function, we can't wrap the real target ahead of time, so we
+ // call a wrapper function (CallAnyNative) that calls the target
+ // itself, and redirect that wrapper.
+ JSNative target = CallAnyNative;
+ void* rawPtr = JS_FUNC_TO_DATA_PTR(void*, target);
+ void* redirected = Simulator::RedirectNativeFunction(rawPtr, Args_General3);
+ callNativeFunction_(calleeId, argc, flags, argcFixed, redirected);
+#else
+ callNativeFunction_(calleeId, argc, flags, argcFixed,
+ /* ignoresReturnValue = */ false);
+#endif
+ }
+
+ void callClassHook(ObjOperandId calleeId, Int32OperandId argc, JSNative hook,
+ CallFlags flags, uint32_t argcFixed) {
+ MOZ_ASSERT(!flags.isSameRealm());
+ void* target = JS_FUNC_TO_DATA_PTR(void*, hook);
+#ifdef JS_SIMULATOR
+ // The simulator requires VM calls to be redirected to a special
+ // swi instruction to handle them, so we store the redirected
+ // pointer in the stub and use that instead of the original one.
+ target = Simulator::RedirectNativeFunction(target, Args_General3);
+#endif
+ callClassHook_(calleeId, argc, flags, argcFixed, target);
+ }
+
+ void callScriptedGetterResult(ValOperandId receiver, JSFunction* getter,
+ bool sameRealm) {
+ MOZ_ASSERT(getter->hasJitEntry());
+ uint32_t nargsAndFlags = getter->flagsAndArgCountRaw();
+ callScriptedGetterResult_(receiver, getter, sameRealm, nargsAndFlags);
+ trialInliningState_ = TrialInliningState::Candidate;
+ }
+
+ void callInlinedGetterResult(ValOperandId receiver, JSFunction* getter,
+ ICScript* icScript, bool sameRealm) {
+ MOZ_ASSERT(getter->hasJitEntry());
+ uint32_t nargsAndFlags = getter->flagsAndArgCountRaw();
+ callInlinedGetterResult_(receiver, getter, icScript, sameRealm,
+ nargsAndFlags);
+ trialInliningState_ = TrialInliningState::Inlined;
+ }
+
+ void callNativeGetterResult(ValOperandId receiver, JSFunction* getter,
+ bool sameRealm) {
+ MOZ_ASSERT(getter->isNativeWithoutJitEntry());
+ uint32_t nargsAndFlags = getter->flagsAndArgCountRaw();
+ callNativeGetterResult_(receiver, getter, sameRealm, nargsAndFlags);
+ }
+
+ void callScriptedSetter(ObjOperandId receiver, JSFunction* setter,
+ ValOperandId rhs, bool sameRealm) {
+ MOZ_ASSERT(setter->hasJitEntry());
+ uint32_t nargsAndFlags = setter->flagsAndArgCountRaw();
+ callScriptedSetter_(receiver, setter, rhs, sameRealm, nargsAndFlags);
+ trialInliningState_ = TrialInliningState::Candidate;
+ }
+
+ void callInlinedSetter(ObjOperandId receiver, JSFunction* setter,
+ ValOperandId rhs, ICScript* icScript, bool sameRealm) {
+ MOZ_ASSERT(setter->hasJitEntry());
+ uint32_t nargsAndFlags = setter->flagsAndArgCountRaw();
+ callInlinedSetter_(receiver, setter, rhs, icScript, sameRealm,
+ nargsAndFlags);
+ trialInliningState_ = TrialInliningState::Inlined;
+ }
+
+ void callNativeSetter(ObjOperandId receiver, JSFunction* setter,
+ ValOperandId rhs, bool sameRealm) {
+ MOZ_ASSERT(setter->isNativeWithoutJitEntry());
+ uint32_t nargsAndFlags = setter->flagsAndArgCountRaw();
+ callNativeSetter_(receiver, setter, rhs, sameRealm, nargsAndFlags);
+ }
+
+ void metaScriptedThisShape(Shape* thisShape) {
+ metaScriptedThisShape_(thisShape);
+ }
+
+ void guardMultipleShapes(ObjOperandId obj, ListObject* shapes) {
+ MOZ_ASSERT(shapes->length() > 0);
+ guardMultipleShapes_(obj, shapes);
+ }
+
+ friend class CacheIRCloner;
+
+ CACHE_IR_WRITER_GENERATED
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CacheIRWriter_h */
diff --git a/js/src/jit/CalleeToken.h b/js/src/jit/CalleeToken.h
new file mode 100644
index 0000000000..b2944108a5
--- /dev/null
+++ b/js/src/jit/CalleeToken.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CalleeToken_h
+#define jit_CalleeToken_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "js/TypeDecls.h"
+
+class JS_PUBLIC_API JSTracer;
+
+namespace js::jit {
+
+using CalleeToken = void*;
+
+enum CalleeTokenTag {
+ CalleeToken_Function = 0x0, // untagged
+ CalleeToken_FunctionConstructing = 0x1,
+ CalleeToken_Script = 0x2
+};
+
+// Any CalleeToken with this bit set must be CalleeToken_Script.
+static const uintptr_t CalleeTokenScriptBit = CalleeToken_Script;
+
+static const uintptr_t CalleeTokenMask = ~uintptr_t(0x3);
+
+static inline CalleeTokenTag GetCalleeTokenTag(CalleeToken token) {
+ CalleeTokenTag tag = CalleeTokenTag(uintptr_t(token) & 0x3);
+ MOZ_ASSERT(tag <= CalleeToken_Script);
+ return tag;
+}
+static inline CalleeToken CalleeToToken(JSFunction* fun, bool constructing) {
+ CalleeTokenTag tag =
+ constructing ? CalleeToken_FunctionConstructing : CalleeToken_Function;
+ return CalleeToken(uintptr_t(fun) | uintptr_t(tag));
+}
+static inline CalleeToken CalleeToToken(JSScript* script) {
+ return CalleeToken(uintptr_t(script) | uintptr_t(CalleeToken_Script));
+}
+static inline bool CalleeTokenIsFunction(CalleeToken token) {
+ CalleeTokenTag tag = GetCalleeTokenTag(token);
+ return tag == CalleeToken_Function || tag == CalleeToken_FunctionConstructing;
+}
+static inline bool CalleeTokenIsConstructing(CalleeToken token) {
+ return GetCalleeTokenTag(token) == CalleeToken_FunctionConstructing;
+}
+static inline JSFunction* CalleeTokenToFunction(CalleeToken token) {
+ MOZ_ASSERT(CalleeTokenIsFunction(token));
+ return (JSFunction*)(uintptr_t(token) & CalleeTokenMask);
+}
+static inline JSScript* CalleeTokenToScript(CalleeToken token) {
+ MOZ_ASSERT(GetCalleeTokenTag(token) == CalleeToken_Script);
+ return (JSScript*)(uintptr_t(token) & CalleeTokenMask);
+}
+
+CalleeToken TraceCalleeToken(JSTracer* trc, CalleeToken token);
+
+} /* namespace js::jit */
+
+#endif /* jit_CalleeToken_h */
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
new file mode 100644
index 0000000000..f4b7ea7e13
--- /dev/null
+++ b/js/src/jit/CodeGenerator.cpp
@@ -0,0 +1,18631 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CodeGenerator.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/EnumSet.h"
+#include "mozilla/IntegerTypeTraits.h"
+#include "mozilla/Latin1.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/ScopeExit.h"
+
+#include <limits>
+#include <type_traits>
+#include <utility>
+
+#include "jslibmath.h"
+#include "jsmath.h"
+#include "jsnum.h"
+
+#include "builtin/MapObject.h"
+#include "builtin/RegExp.h"
+#include "builtin/String.h"
+#include "irregexp/RegExpTypes.h"
+#include "jit/ABIArgGenerator.h"
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/Invalidation.h"
+#include "jit/IonIC.h"
+#include "jit/IonScript.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/JitZone.h"
+#include "jit/Linker.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MoveEmitter.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/RegExpStubConstants.h"
+#include "jit/SafepointIndex.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/SharedICRegisters.h"
+#include "jit/VMFunctions.h"
+#include "jit/WarpSnapshot.h"
+#include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
+#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
+#include "js/RegExpFlags.h" // JS::RegExpFlag
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "proxy/DOMProxy.h"
+#include "util/CheckedArithmetic.h"
+#include "util/Unicode.h"
+#include "vm/ArrayBufferViewObject.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BuiltinObjectKind.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/Interpreter.h"
+#include "vm/JSAtom.h"
+#include "vm/MatchPairs.h"
+#include "vm/RegExpObject.h"
+#include "vm/RegExpStatics.h"
+#include "vm/StaticStrings.h"
+#include "vm/StringObject.h"
+#include "vm/StringType.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmValType.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+#include "wasm/WasmBinary.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmStubs.h"
+
+#include "builtin/Boolean-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "jit/TemplateObject-inl.h"
+#include "jit/VMFunctionList-inl.h"
+#include "vm/JSScript-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::AssertedCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::Maybe;
+using mozilla::NegativeInfinity;
+using mozilla::PositiveInfinity;
+
+using JS::ExpandoAndGeneration;
+
+namespace js {
+namespace jit {
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+template <class Op>
+static void HandleRegisterDump(Op op, MacroAssembler& masm,
+ LiveRegisterSet liveRegs, Register activation,
+ Register scratch) {
+ const size_t baseOffset = JitActivation::offsetOfRegs();
+
+ // Handle live GPRs.
+ for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
+ Register reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+
+ if (reg == activation) {
+ // To use the original value of the activation register (that's
+ // now on top of the stack), we need the scratch register.
+ masm.push(scratch);
+ masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
+ op(scratch, dump);
+ masm.pop(scratch);
+ } else {
+ op(reg, dump);
+ }
+ }
+
+ // Handle live FPRs.
+ for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+ op(reg, dump);
+ }
+}
+
+class StoreOp {
+ MacroAssembler& masm;
+
+ public:
+ explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
+
+ void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
+ void operator()(FloatRegister reg, Address dump) {
+ if (reg.isDouble()) {
+ masm.storeDouble(reg, dump);
+ } else if (reg.isSingle()) {
+ masm.storeFloat32(reg, dump);
+ } else if (reg.isSimd128()) {
+ MOZ_CRASH("Unexpected case for SIMD");
+ } else {
+ MOZ_CRASH("Unexpected register type.");
+ }
+ }
+};
+
+class VerifyOp {
+ MacroAssembler& masm;
+ Label* failure_;
+
+ public:
+ VerifyOp(MacroAssembler& masm, Label* failure)
+ : masm(masm), failure_(failure) {}
+
+ void operator()(Register reg, Address dump) {
+ masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
+ }
+ void operator()(FloatRegister reg, Address dump) {
+ if (reg.isDouble()) {
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(dump, scratch);
+ masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ } else if (reg.isSingle()) {
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(dump, scratch);
+ masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ } else if (reg.isSimd128()) {
+ MOZ_CRASH("Unexpected case for SIMD");
+ } else {
+ MOZ_CRASH("Unexpected register type.");
+ }
+ }
+};
+
+void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
+ // Ensure the live registers stored by callVM did not change between
+ // the call and this OsiPoint. Try-catch relies on this invariant.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ // If we should not check registers (because the instruction did not call
+ // into the VM, or a GC happened), we're done.
+ Label failure, done;
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
+
+ // Having more than one VM function call made in one visit function at
+ // runtime is a sec-ciritcal error, because if we conservatively assume that
+ // one of the function call can re-enter Ion, then the invalidation process
+ // will potentially add a call at a random location, by patching the code
+ // before the return address.
+ masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
+
+ // Set checkRegs to 0, so that we don't try to verify registers after we
+ // return from this script to the caller.
+ masm.store32(Imm32(0), checkRegs);
+
+ // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
+ // temps after calling into the VM. This is fine because no other
+ // instructions (including this OsiPoint) will depend on them. Also
+ // backtracking can also use the same register for an input and an output.
+ // These are marked as clobbered and shouldn't get checked.
+ LiveRegisterSet liveRegs;
+ liveRegs.set() = RegisterSet::Intersect(
+ safepoint->liveRegs().set(),
+ RegisterSet::Not(safepoint->clobberedRegs().set()));
+
+ VerifyOp op(masm, &failure);
+ HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.jump(&done);
+
+ // Do not profile the callWithABI that occurs below. This is to avoid a
+ // rare corner case that occurs when profiling interacts with itself:
+ //
+ // When slow profiling assertions are turned on, FunctionBoundary ops
+ // (which update the profiler pseudo-stack) may emit a callVM, which
+ // forces them to have an osi point associated with them. The
+ // FunctionBoundary for inline function entry is added to the caller's
+ // graph with a PC from the caller's code, but during codegen it modifies
+ // Gecko Profiler instrumentation to add the callee as the current top-most
+ // script. When codegen gets to the OSIPoint, and the callWithABI below is
+ // emitted, the codegen thinks that the current frame is the callee, but
+ // the PC it's using from the OSIPoint refers to the caller. This causes
+ // the profiler instrumentation of the callWithABI below to ASSERT, since
+ // the script and pc are mismatched. To avoid this, we simply omit
+ // instrumentation for these callWithABIs.
+
+ // Any live register captured by a safepoint (other than temp registers)
+ // must remain unchanged between the call and the OsiPoint instruction.
+ masm.bind(&failure);
+ masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
+
+ masm.bind(&done);
+ masm.pop(scratch);
+}
+
+bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
+ if (!checkOsiPointRegisters) {
+ return false;
+ }
+
+ if (safepoint->liveRegs().emptyGeneral() &&
+ safepoint->liveRegs().emptyFloat()) {
+ return false; // No registers to check.
+ }
+
+ return true;
+}
+
+void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
+ if (!shouldVerifyOsiPointRegs(safepoint)) {
+ return;
+ }
+
+ // Set checkRegs to 0. If we perform a VM call, the instruction
+ // will set it to 1.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.store32(Imm32(0), checkRegs);
+ masm.pop(scratch);
+}
+
+static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
+ // Store a copy of all live registers before performing the call.
+ // When we reach the OsiPoint, we can use this to check nothing
+ // modified them in the meantime.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.add32(Imm32(1), checkRegs);
+
+ StoreOp op(masm);
+ HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.pop(scratch);
+}
+#endif // CHECK_OSIPOINT_REGISTERS
+
+// Before doing any call to Cpp, you should ensure that volatile
+// registers are evicted by the register allocator.
+void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
+ TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
+ const VMFunctionData& fun = GetVMFunction(id);
+
+ // Stack is:
+ // ... frame ...
+ // [args]
+#ifdef DEBUG
+ MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
+ pushedArgs_ = 0;
+#endif
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (shouldVerifyOsiPointRegs(ins->safepoint())) {
+ StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
+ }
+#endif
+
+#ifdef DEBUG
+ if (ins->mirRaw()) {
+ MOZ_ASSERT(ins->mirRaw()->isInstruction());
+ MInstruction* mir = ins->mirRaw()->toInstruction();
+ MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
+
+ // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
+ // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
+ // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
+ // interrupt callbacks can call JS (chrome JS or shell testing functions).
+ bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
+ if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
+ const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
+ masm.move32(Imm32(1), ReturnReg);
+ masm.store32(ReturnReg, AbsoluteAddress(addr));
+ }
+ }
+#endif
+
+ // Push an exit frame descriptor.
+ masm.PushFrameDescriptor(FrameType::IonJS);
+
+ // Call the wrapper function. The wrapper is in charge to unwind the stack
+ // when returning from the call. Failures are handled with exceptions based
+ // on the return value of the C functions. To guard the outcome of the
+ // returned value, use another LIR instruction.
+ ensureOsiSpace();
+ uint32_t callOffset = masm.callJit(code);
+ markSafepointAt(callOffset, ins);
+
+#ifdef DEBUG
+ // Reset the disallowArbitraryCode flag after the call.
+ {
+ const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
+ masm.push(ReturnReg);
+ masm.move32(Imm32(0), ReturnReg);
+ masm.store32(ReturnReg, AbsoluteAddress(addr));
+ masm.pop(ReturnReg);
+ }
+#endif
+
+ // Pop rest of the exit frame and the arguments left on the stack.
+ int framePop =
+ sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
+ masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
+
+ // Stack is:
+ // ... frame ...
+}
+
+template <typename Fn, Fn fn>
+void CodeGenerator::callVM(LInstruction* ins) {
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ callVMInternal(id, ins);
+}
+
+// ArgSeq store arguments for OutOfLineCallVM.
+//
+// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
+// this function is an instance of a class which provides a "generate" in charge
+// of pushing the argument, with "pushArg", for a VMFunction.
+//
+// Such list of arguments can be created by using the "ArgList" function which
+// creates one instance of "ArgSeq", where the type of the arguments are
+// inferred from the type of the arguments.
+//
+// The list of arguments must be written in the same order as if you were
+// calling the function in C++.
+//
+// Example:
+// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
+
+template <typename... ArgTypes>
+class ArgSeq {
+ std::tuple<std::remove_reference_t<ArgTypes>...> args_;
+
+ template <std::size_t... ISeq>
+ inline void generate(CodeGenerator* codegen,
+ std::index_sequence<ISeq...>) const {
+ // Arguments are pushed in reverse order, from last argument to first
+ // argument.
+ (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
+ }
+
+ public:
+ explicit ArgSeq(ArgTypes&&... args)
+ : args_(std::forward<ArgTypes>(args)...) {}
+
+ inline void generate(CodeGenerator* codegen) const {
+ generate(codegen, std::index_sequence_for<ArgTypes...>{});
+ }
+
+#ifdef DEBUG
+ static constexpr size_t numArgs = sizeof...(ArgTypes);
+#endif
+};
+
+template <typename... ArgTypes>
+inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
+ return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
+}
+
+// Store wrappers, to generate the right move of data after the VM call.
+
+struct StoreNothing {
+ inline void generate(CodeGenerator* codegen) const {}
+ inline LiveRegisterSet clobbered() const {
+ return LiveRegisterSet(); // No register gets clobbered
+ }
+};
+
+class StoreRegisterTo {
+ private:
+ Register out_;
+
+ public:
+ explicit StoreRegisterTo(Register out) : out_(out) {}
+
+ inline void generate(CodeGenerator* codegen) const {
+ // It's okay to use storePointerResultTo here - the VMFunction wrapper
+ // ensures the upper bytes are zero for bool/int32 return values.
+ codegen->storePointerResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+class StoreFloatRegisterTo {
+ private:
+ FloatRegister out_;
+
+ public:
+ explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
+
+ inline void generate(CodeGenerator* codegen) const {
+ codegen->storeFloatResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+class StoreValueTo_ {
+ private:
+ Output out_;
+
+ public:
+ explicit StoreValueTo_(const Output& out) : out_(out) {}
+
+ inline void generate(CodeGenerator* codegen) const {
+ codegen->storeResultValueTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+StoreValueTo_<Output> StoreValueTo(const Output& out) {
+ return StoreValueTo_<Output>(out);
+}
+
+template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
+ private:
+ LInstruction* lir_;
+ ArgSeq args_;
+ StoreOutputTo out_;
+
+ public:
+ OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
+ const StoreOutputTo& out)
+ : lir_(lir), args_(args), out_(out) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineCallVM(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+ const ArgSeq& args() const { return args_; }
+ const StoreOutputTo& out() const { return out_; }
+};
+
+template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
+ const StoreOutputTo& out) {
+ MOZ_ASSERT(lir->mirRaw());
+ MOZ_ASSERT(lir->mirRaw()->isInstruction());
+
+#ifdef DEBUG
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ const VMFunctionData& fun = GetVMFunction(id);
+ MOZ_ASSERT(fun.explicitArgs == args.numArgs);
+ MOZ_ASSERT(fun.returnsData() !=
+ (std::is_same_v<StoreOutputTo, StoreNothing>));
+#endif
+
+ OutOfLineCode* ool = new (alloc())
+ OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
+ addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
+ return ool;
+}
+
+template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+void CodeGenerator::visitOutOfLineCallVM(
+ OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
+ LInstruction* lir = ool->lir();
+
+ saveLive(lir);
+ ool->args().generate(this);
+ callVM<Fn, fn>(lir);
+ ool->out().generate(this);
+ restoreLiveIgnore(lir, ool->out().clobbered());
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
+ private:
+ LInstruction* lir_;
+ size_t cacheIndex_;
+ size_t cacheInfoIndex_;
+
+ public:
+ OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
+ size_t cacheInfoIndex)
+ : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
+
+ void bind(MacroAssembler* masm) override {
+ // The binding of the initial jump is done in
+ // CodeGenerator::visitOutOfLineICFallback.
+ }
+
+ size_t cacheIndex() const { return cacheIndex_; }
+ size_t cacheInfoIndex() const { return cacheInfoIndex_; }
+ LInstruction* lir() const { return lir_; }
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineICFallback(this);
+ }
+};
+
+void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
+ if (cacheIndex == SIZE_MAX) {
+ masm.setOOM();
+ return;
+ }
+
+ DataPtr<IonIC> cache(this, cacheIndex);
+ MInstruction* mir = lir->mirRaw()->toInstruction();
+ cache->setScriptedLocation(mir->block()->info().script(),
+ mir->resumePoint()->pc());
+
+ Register temp = cache->scratchRegisterForEntryJump();
+ icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
+ masm.jump(Address(temp, 0));
+
+ MOZ_ASSERT(!icInfo_.empty());
+
+ OutOfLineICFallback* ool =
+ new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
+ addOutOfLineCode(ool, mir);
+
+ masm.bind(ool->rejoin());
+ cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
+}
+
+void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
+ LInstruction* lir = ool->lir();
+ size_t cacheIndex = ool->cacheIndex();
+ size_t cacheInfoIndex = ool->cacheInfoIndex();
+
+ DataPtr<IonIC> ic(this, cacheIndex);
+
+ // Register the location of the OOL path in the IC.
+ ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
+
+ switch (ic->kind()) {
+ case CacheKind::GetProp:
+ case CacheKind::GetElem: {
+ IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
+
+ saveLive(lir);
+
+ pushArg(getPropIC->id());
+ pushArg(getPropIC->value());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
+ HandleValue, HandleValue, MutableHandleValue);
+ callVM<Fn, IonGetPropertyIC::update>(lir);
+
+ StoreValueTo(getPropIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::GetPropSuper:
+ case CacheKind::GetElemSuper: {
+ IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
+
+ saveLive(lir);
+
+ pushArg(getPropSuperIC->id());
+ pushArg(getPropSuperIC->receiver());
+ pushArg(getPropSuperIC->object());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn =
+ bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
+ HandleValue, HandleValue, MutableHandleValue);
+ callVM<Fn, IonGetPropSuperIC::update>(lir);
+
+ StoreValueTo(getPropSuperIC->output()).generate(this);
+ restoreLiveIgnore(lir,
+ StoreValueTo(getPropSuperIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::SetProp:
+ case CacheKind::SetElem: {
+ IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
+
+ saveLive(lir);
+
+ pushArg(setPropIC->rhs());
+ pushArg(setPropIC->id());
+ pushArg(setPropIC->object());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
+ HandleObject, HandleValue, HandleValue);
+ callVM<Fn, IonSetPropertyIC::update>(lir);
+
+ restoreLive(lir);
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::GetName: {
+ IonGetNameIC* getNameIC = ic->asGetNameIC();
+
+ saveLive(lir);
+
+ pushArg(getNameIC->environment());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
+ MutableHandleValue);
+ callVM<Fn, IonGetNameIC::update>(lir);
+
+ StoreValueTo(getNameIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::BindName: {
+ IonBindNameIC* bindNameIC = ic->asBindNameIC();
+
+ saveLive(lir);
+
+ pushArg(bindNameIC->environment());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
+ callVM<Fn, IonBindNameIC::update>(lir);
+
+ StoreRegisterTo(bindNameIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::GetIterator: {
+ IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
+
+ saveLive(lir);
+
+ pushArg(getIteratorIC->value());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
+ HandleValue);
+ callVM<Fn, IonGetIteratorIC::update>(lir);
+
+ StoreRegisterTo(getIteratorIC->output()).generate(this);
+ restoreLiveIgnore(lir,
+ StoreRegisterTo(getIteratorIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::OptimizeSpreadCall: {
+ auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
+
+ saveLive(lir);
+
+ pushArg(optimizeSpreadCallIC->value());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
+ HandleValue, MutableHandleValue);
+ callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
+
+ StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
+ restoreLiveIgnore(
+ lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::In: {
+ IonInIC* inIC = ic->asInIC();
+
+ saveLive(lir);
+
+ pushArg(inIC->object());
+ pushArg(inIC->key());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
+ HandleObject, bool*);
+ callVM<Fn, IonInIC::update>(lir);
+
+ StoreRegisterTo(inIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::HasOwn: {
+ IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
+
+ saveLive(lir);
+
+ pushArg(hasOwnIC->id());
+ pushArg(hasOwnIC->value());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
+ HandleValue, int32_t*);
+ callVM<Fn, IonHasOwnIC::update>(lir);
+
+ StoreRegisterTo(hasOwnIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::CheckPrivateField: {
+ IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
+
+ saveLive(lir);
+
+ pushArg(checkPrivateFieldIC->id());
+ pushArg(checkPrivateFieldIC->value());
+
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
+ HandleValue, HandleValue, bool*);
+ callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
+
+ StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
+ restoreLiveIgnore(
+ lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::InstanceOf: {
+ IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
+
+ saveLive(lir);
+
+ pushArg(hasInstanceOfIC->rhs());
+ pushArg(hasInstanceOfIC->lhs());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
+ HandleValue lhs, HandleObject rhs, bool* res);
+ callVM<Fn, IonInstanceOfIC::update>(lir);
+
+ StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
+ restoreLiveIgnore(lir,
+ StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::UnaryArith: {
+ IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
+
+ saveLive(lir);
+
+ pushArg(unaryArithIC->input());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
+ IonUnaryArithIC* stub, HandleValue val,
+ MutableHandleValue res);
+ callVM<Fn, IonUnaryArithIC::update>(lir);
+
+ StoreValueTo(unaryArithIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::ToPropertyKey: {
+ IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
+
+ saveLive(lir);
+
+ pushArg(toPropertyKeyIC->input());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
+ IonToPropertyKeyIC* ic, HandleValue val,
+ MutableHandleValue res);
+ callVM<Fn, IonToPropertyKeyIC::update>(lir);
+
+ StoreValueTo(toPropertyKeyIC->output()).generate(this);
+ restoreLiveIgnore(lir,
+ StoreValueTo(toPropertyKeyIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::BinaryArith: {
+ IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
+
+ saveLive(lir);
+
+ pushArg(binaryArithIC->rhs());
+ pushArg(binaryArithIC->lhs());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
+ IonBinaryArithIC* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res);
+ callVM<Fn, IonBinaryArithIC::update>(lir);
+
+ StoreValueTo(binaryArithIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::Compare: {
+ IonCompareIC* compareIC = ic->asCompareIC();
+
+ saveLive(lir);
+
+ pushArg(compareIC->rhs());
+ pushArg(compareIC->lhs());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn =
+ bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
+ HandleValue lhs, HandleValue rhs, bool* res);
+ callVM<Fn, IonCompareIC::update>(lir);
+
+ StoreRegisterTo(compareIC->output()).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::CloseIter: {
+ IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
+
+ saveLive(lir);
+
+ pushArg(closeIterIC->iter());
+ icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
+ pushArg(ImmGCPtr(gen->outerInfo().script()));
+
+ using Fn =
+ bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
+ callVM<Fn, IonCloseIterIC::update>(lir);
+
+ restoreLive(lir);
+
+ masm.jump(ool->rejoin());
+ return;
+ }
+ case CacheKind::Call:
+ case CacheKind::TypeOf:
+ case CacheKind::ToBool:
+ case CacheKind::GetIntrinsic:
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ MOZ_CRASH("Unsupported IC");
+ }
+ MOZ_CRASH();
+}
+
+StringObject* MNewStringObject::templateObj() const {
+ return &templateObj_->as<StringObject>();
+}
+
+CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorSpecific(gen, graph, masm),
+ ionScriptLabels_(gen->alloc()),
+ ionNurseryObjectLabels_(gen->alloc()),
+ scriptCounts_(nullptr),
+ realmStubsToReadBarrier_(0) {}
+
+CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
+
+void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
+ ValueOperand operand = ToValue(lir, LValueToInt32::Input);
+ Register output = ToRegister(lir->output());
+ FloatRegister temp = ToFloatRegister(lir->tempFloat());
+
+ Label fails;
+ if (lir->mode() == LValueToInt32::TRUNCATE) {
+ OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
+
+ // We can only handle strings in truncation contexts, like bitwise
+ // operations.
+ Register stringReg = ToRegister(lir->temp());
+ using Fn = bool (*)(JSContext*, JSString*, double*);
+ auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
+ StoreFloatRegisterTo(temp));
+ Label* stringEntry = oolString->entry();
+ Label* stringRejoin = oolString->rejoin();
+
+ masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
+ oolDouble->entry(), stringReg, temp, output,
+ &fails);
+ masm.bind(oolDouble->rejoin());
+ } else {
+ MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
+ masm.convertValueToInt32(operand, temp, output, &fails,
+ lir->mirNormal()->needsNegativeZeroCheck(),
+ lir->mirNormal()->conversion());
+ }
+
+ bailoutFrom(&fails, lir->snapshot());
+}
+
+void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
+ ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ // Set if we can handle other primitives beside strings, as long as they're
+ // guaranteed to never throw. This rules out symbols and BigInts, but allows
+ // booleans, undefined, and null.
+ bool hasNonStringPrimitives =
+ lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
+
+ Label isDouble, isInt32, isBool, isNull, isUndefined, done;
+
+ {
+ ScratchTagScope tag(masm, operand);
+ masm.splitTagForTest(operand, tag);
+
+ masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
+ masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
+
+ if (hasNonStringPrimitives) {
+ masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
+ masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
+ masm.branchTestNull(Assembler::Equal, tag, &isNull);
+ }
+ }
+
+ bailout(lir->snapshot());
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isNull);
+ masm.loadConstantDouble(0.0, output);
+ masm.jump(&done);
+ }
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isUndefined);
+ masm.loadConstantDouble(GenericNaN(), output);
+ masm.jump(&done);
+ }
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isBool);
+ masm.boolValueToDouble(operand, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&isInt32);
+ masm.int32ValueToDouble(operand, output);
+ masm.jump(&done);
+
+ masm.bind(&isDouble);
+ masm.unboxDouble(operand, output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
+ ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ // Set if we can handle other primitives beside strings, as long as they're
+ // guaranteed to never throw. This rules out symbols and BigInts, but allows
+ // booleans, undefined, and null.
+ bool hasNonStringPrimitives =
+ lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
+
+ Label isDouble, isInt32, isBool, isNull, isUndefined, done;
+
+ {
+ ScratchTagScope tag(masm, operand);
+ masm.splitTagForTest(operand, tag);
+
+ masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
+ masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
+
+ if (hasNonStringPrimitives) {
+ masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
+ masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
+ masm.branchTestNull(Assembler::Equal, tag, &isNull);
+ }
+ }
+
+ bailout(lir->snapshot());
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isNull);
+ masm.loadConstantFloat32(0.0f, output);
+ masm.jump(&done);
+ }
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isUndefined);
+ masm.loadConstantFloat32(float(GenericNaN()), output);
+ masm.jump(&done);
+ }
+
+ if (hasNonStringPrimitives) {
+ masm.bind(&isBool);
+ masm.boolValueToFloat32(operand, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&isInt32);
+ masm.int32ValueToFloat32(operand, output);
+ masm.jump(&done);
+
+ masm.bind(&isDouble);
+ // ARM and MIPS may not have a double register available if we've
+ // allocated output as a float32.
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ ScratchDoubleScope fpscratch(masm);
+ masm.unboxDouble(operand, fpscratch);
+ masm.convertDoubleToFloat32(fpscratch, output);
+#else
+ masm.unboxDouble(operand, output);
+ masm.convertDoubleToFloat32(output, output);
+#endif
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
+ ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
+ Register output = ToRegister(lir->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleValue);
+ auto* ool =
+ oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
+
+ Register tag = masm.extractTag(operand, output);
+
+ Label notBigInt, done;
+ masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
+ masm.unboxBigInt(operand, output);
+ masm.jump(&done);
+ masm.bind(&notBigInt);
+
+ masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
+ masm.branchTestString(Assembler::Equal, tag, ool->entry());
+
+ // ToBigInt(object) can have side-effects; all other types throw a TypeError.
+ bailout(lir->snapshot());
+
+ masm.bind(ool->rejoin());
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
+ masm.convertInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
+ masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
+ masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
+ masm.convertInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
+ Label fail;
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.convertDoubleToInt32(input, output, &fail,
+ lir->mir()->needsNegativeZeroCheck());
+ bailoutFrom(&fail, lir->snapshot());
+}
+
+void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
+ Label fail;
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.convertFloat32ToInt32(input, output, &fail,
+ lir->mir()->needsNegativeZeroCheck());
+ bailoutFrom(&fail, lir->snapshot());
+}
+
+void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
+#ifdef JS_64BIT
+ // This LIR instruction is only used if the input can be negative.
+ MOZ_ASSERT(lir->mir()->canBeNegative());
+
+ Register output = ToRegister(lir->output());
+ const LAllocation* input = lir->input();
+ if (input->isRegister()) {
+ masm.move32SignExtendToPtr(ToRegister(input), output);
+ } else {
+ masm.load32SignExtendToPtr(ToAddress(input), output);
+ }
+#else
+ MOZ_CRASH("Not used on 32-bit platforms");
+#endif
+}
+
+void CodeGenerator::visitNonNegativeIntPtrToInt32(
+ LNonNegativeIntPtrToInt32* lir) {
+#ifdef JS_64BIT
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+
+ Label bail;
+ masm.guardNonNegativeIntPtrToInt32(output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+#else
+ MOZ_CRASH("Not used on 32-bit platforms");
+#endif
+}
+
+void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
+ Register input = ToRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+ masm.convertIntPtrToDouble(input, output);
+}
+
+void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+
+ uint32_t byteSize = lir->mir()->byteSize();
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
+ masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
+ masm.bind(&ok);
+#endif
+
+ Label bail;
+ masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::emitOOLTestObject(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch) {
+ saveVolatile(scratch);
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupAlignedABICall();
+ masm.passABIArg(objreg);
+ masm.callWithABI<Fn, js::EmulatesUndefined>();
+ masm.storeCallPointerResult(scratch);
+ restoreVolatile(scratch);
+
+ masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
+ masm.jump(ifDoesntEmulateUndefined);
+}
+
+// Base out-of-line code generator for all tests of the truthiness of an
+// object, where the object might not be truthy. (Recall that per spec all
+// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
+// flag to permit objects to look like |undefined| in certain contexts,
+// including in object truthiness testing.) We check truthiness inline except
+// when we're testing it on a proxy, in which case out-of-line code will call
+// EmulatesUndefined for a conclusive answer.
+class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
+ Register objreg_;
+ Register scratch_;
+
+ Label* ifEmulatesUndefined_;
+ Label* ifDoesntEmulateUndefined_;
+
+#ifdef DEBUG
+ bool initialized() { return ifEmulatesUndefined_ != nullptr; }
+#endif
+
+ public:
+ OutOfLineTestObject()
+ : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
+
+ void accept(CodeGenerator* codegen) final {
+ MOZ_ASSERT(initialized());
+ codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
+ ifDoesntEmulateUndefined_, scratch_);
+ }
+
+ // Specify the register where the object to be tested is found, labels to
+ // jump to if the object is truthy or falsy, and a scratch register for
+ // use in the out-of-line path.
+ void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined, Register scratch) {
+ MOZ_ASSERT(!initialized());
+ MOZ_ASSERT(ifEmulatesUndefined);
+ objreg_ = objreg;
+ scratch_ = scratch;
+ ifEmulatesUndefined_ = ifEmulatesUndefined;
+ ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
+ }
+};
+
+// A subclass of OutOfLineTestObject containing two extra labels, for use when
+// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
+// code. The user should bind these labels in inline code, and specify them as
+// targets via setInputAndTargets, as appropriate.
+class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
+ Label label1_;
+ Label label2_;
+
+ public:
+ OutOfLineTestObjectWithLabels() = default;
+
+ Label* label1() { return &label1_; }
+ Label* label2() { return &label2_; }
+};
+
+void CodeGenerator::testObjectEmulatesUndefinedKernel(
+ Register objreg, Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined, Register scratch,
+ OutOfLineTestObject* ool) {
+ ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
+ scratch);
+
+ // Perform a fast-path check of the object's class flags if the object's
+ // not a proxy. Let out-of-line code handle the slow cases that require
+ // saving registers, making a function call, and restoring registers.
+ masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
+ ifEmulatesUndefined);
+}
+
+void CodeGenerator::branchTestObjectEmulatesUndefined(
+ Register objreg, Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined, Register scratch,
+ OutOfLineTestObject* ool) {
+ MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
+ "ifDoesntEmulateUndefined will be bound to the fallthrough path");
+
+ testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
+ ifDoesntEmulateUndefined, scratch, ool);
+ masm.bind(ifDoesntEmulateUndefined);
+}
+
+void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch,
+ OutOfLineTestObject* ool) {
+ testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
+ ifDoesntEmulateUndefined, scratch, ool);
+ masm.jump(ifDoesntEmulateUndefined);
+}
+
+void CodeGenerator::testValueTruthyForType(
+ JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
+ Register tempToUnbox, Register temp, FloatRegister floatTemp,
+ Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
+ bool skipTypeTest) {
+#ifdef DEBUG
+ if (skipTypeTest) {
+ Label expected;
+ masm.branchTestType(Assembler::Equal, tag, type, &expected);
+ masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
+ masm.bind(&expected);
+ }
+#endif
+
+ // Handle irregular types first.
+ switch (type) {
+ case JSVAL_TYPE_UNDEFINED:
+ case JSVAL_TYPE_NULL:
+ // Undefined and null are falsy.
+ if (!skipTypeTest) {
+ masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
+ } else {
+ masm.jump(ifFalsy);
+ }
+ return;
+ case JSVAL_TYPE_SYMBOL:
+ // Symbols are truthy.
+ if (!skipTypeTest) {
+ masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
+ } else {
+ masm.jump(ifTruthy);
+ }
+ return;
+ case JSVAL_TYPE_OBJECT: {
+ Label notObject;
+ if (!skipTypeTest) {
+ masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
+ }
+ ScratchTagScopeRelease _(&tag);
+ Register objreg = masm.extractObject(value, tempToUnbox);
+ testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
+ masm.bind(&notObject);
+ return;
+ }
+ default:
+ break;
+ }
+
+ // Check the type of the value (unless this is the last possible type).
+ Label differentType;
+ if (!skipTypeTest) {
+ masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
+ }
+
+ // Branch if the value is falsy.
+ ScratchTagScopeRelease _(&tag);
+ switch (type) {
+ case JSVAL_TYPE_BOOLEAN: {
+ masm.branchTestBooleanTruthy(false, value, ifFalsy);
+ break;
+ }
+ case JSVAL_TYPE_INT32: {
+ masm.branchTestInt32Truthy(false, value, ifFalsy);
+ break;
+ }
+ case JSVAL_TYPE_STRING: {
+ masm.branchTestStringTruthy(false, value, ifFalsy);
+ break;
+ }
+ case JSVAL_TYPE_BIGINT: {
+ masm.branchTestBigIntTruthy(false, value, ifFalsy);
+ break;
+ }
+ case JSVAL_TYPE_DOUBLE: {
+ masm.unboxDouble(value, floatTemp);
+ masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected value type");
+ }
+
+ // If we reach this point, the value is truthy. We fall through for
+ // truthy on the last test; otherwise, branch.
+ if (!skipTypeTest) {
+ masm.jump(ifTruthy);
+ }
+
+ masm.bind(&differentType);
+}
+
+void CodeGenerator::testValueTruthy(const ValueOperand& value,
+ Register tempToUnbox, Register temp,
+ FloatRegister floatTemp,
+ const TypeDataList& observedTypes,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool) {
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ const std::initializer_list<JSValueType> defaultOrder = {
+ JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
+ JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
+ JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
+
+ mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
+
+ // Generate tests for previously observed types first.
+ // The TypeDataList is sorted by descending frequency.
+ for (auto& observed : observedTypes) {
+ JSValueType type = observed.type();
+ remaining -= type;
+
+ testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
+ ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
+ }
+
+ // Generate tests for remaining types.
+ for (auto type : defaultOrder) {
+ if (!remaining.contains(type)) {
+ continue;
+ }
+ remaining -= type;
+
+ // We don't need a type test for the last possible type.
+ bool skipTypeTest = remaining.isEmpty();
+ testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
+ ifTruthy, ifFalsy, ool, skipTypeTest);
+ }
+ MOZ_ASSERT(remaining.isEmpty());
+
+ // We fall through if the final test is truthy.
+}
+
+void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
+ Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
+ Register input = ToRegister(lir->input());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ masm.branchIfBigIntIsZero(input, ifFalseLabel);
+ } else {
+ masm.branchIfBigIntIsZero(input, ifFalseLabel);
+ jumpToBlock(lir->ifTrue());
+ }
+}
+
+void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
+ Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
+ Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
+ Register input = ToRegister(lir->input());
+
+ auto* ool = new (alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->mir());
+
+ testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
+ ool);
+}
+
+void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
+ auto* ool = new (alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
+ Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
+
+ ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
+ Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
+ Register temp = ToRegister(lir->temp2());
+ FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
+ const TypeDataList& observedTypes = lir->mir()->observedTypes();
+
+ testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
+ falsy, ool);
+ masm.jump(truthy);
+}
+
+void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ const JSAtomState& names = gen->runtime->names();
+ Label true_, done;
+
+ masm.branchTest32(Assembler::NonZero, input, input, &true_);
+ masm.movePtr(ImmGCPtr(names.false_), output);
+ masm.jump(&done);
+
+ masm.bind(&true_);
+ masm.movePtr(ImmGCPtr(names.true_), output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::emitIntToString(Register input, Register output,
+ Label* ool) {
+ masm.boundsCheck32PowerOfTwo(input, StaticStrings::INT_STATIC_LIMIT, ool);
+
+ // Fast path for small integers.
+ masm.movePtr(ImmPtr(&gen->runtime->staticStrings().intStaticTable), output);
+ masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
+}
+
+void CodeGenerator::visitIntToString(LIntToString* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ using Fn = JSLinearString* (*)(JSContext*, int);
+ OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
+ lir, ArgList(input), StoreRegisterTo(output));
+
+ emitIntToString(input, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register temp = ToRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ using Fn = JSString* (*)(JSContext*, double);
+ OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
+ lir, ArgList(input), StoreRegisterTo(output));
+
+ // Try double to integer conversion and run integer to string code.
+ masm.convertDoubleToInt32(input, temp, ool->entry(), false);
+ emitIntToString(temp, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitValueToString(LValueToString* lir) {
+ ValueOperand input = ToValue(lir, LValueToString::InputIndex);
+ Register output = ToRegister(lir->output());
+
+ using Fn = JSString* (*)(JSContext*, HandleValue);
+ OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
+ lir, ArgList(input), StoreRegisterTo(output));
+
+ Label done;
+ Register tag = masm.extractTag(input, output);
+ const JSAtomState& names = gen->runtime->names();
+
+ // String
+ {
+ Label notString;
+ masm.branchTestString(Assembler::NotEqual, tag, &notString);
+ masm.unboxString(input, output);
+ masm.jump(&done);
+ masm.bind(&notString);
+ }
+
+ // Integer
+ {
+ Label notInteger;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
+ Register unboxed = ToTempUnboxRegister(lir->temp0());
+ unboxed = masm.extractInt32(input, unboxed);
+ emitIntToString(unboxed, output, ool->entry());
+ masm.jump(&done);
+ masm.bind(&notInteger);
+ }
+
+ // Double
+ {
+ // Note: no fastpath. Need two extra registers and can only convert doubles
+ // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
+ masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
+ }
+
+ // Undefined
+ {
+ Label notUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ masm.movePtr(ImmGCPtr(names.undefined), output);
+ masm.jump(&done);
+ masm.bind(&notUndefined);
+ }
+
+ // Null
+ {
+ Label notNull;
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ masm.movePtr(ImmGCPtr(names.null), output);
+ masm.jump(&done);
+ masm.bind(&notNull);
+ }
+
+ // Boolean
+ {
+ Label notBoolean, true_;
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
+ masm.branchTestBooleanTruthy(true, input, &true_);
+ masm.movePtr(ImmGCPtr(names.false_), output);
+ masm.jump(&done);
+ masm.bind(&true_);
+ masm.movePtr(ImmGCPtr(names.true_), output);
+ masm.jump(&done);
+ masm.bind(&notBoolean);
+ }
+
+ // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
+ if (lir->mir()->mightHaveSideEffects()) {
+ // Object
+ if (lir->mir()->supportSideEffects()) {
+ masm.branchTestObject(Assembler::Equal, tag, ool->entry());
+ } else {
+ // Bail.
+ MOZ_ASSERT(lir->mir()->needsSnapshot());
+ Label bail;
+ masm.branchTestObject(Assembler::Equal, tag, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+
+ // Symbol
+ if (lir->mir()->supportSideEffects()) {
+ masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
+ } else {
+ // Bail.
+ MOZ_ASSERT(lir->mir()->needsSnapshot());
+ Label bail;
+ masm.branchTestSymbol(Assembler::Equal, tag, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ }
+ }
+
+ // BigInt
+ {
+ // No fastpath currently implemented.
+ masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
+ }
+
+ masm.assumeUnreachable("Unexpected type for LValueToString.");
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
+
+static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
+ size_t offset, Register buffer,
+ LiveGeneralRegisterSet& liveVolatiles,
+ StoreBufferMutationFn fun) {
+ Label callVM;
+ Label exit;
+
+ // Call into the VM to barrier the write. The only registers that need to
+ // be preserved are those in liveVolatiles, so once they are saved on the
+ // stack all volatile registers are available for use.
+ masm.bind(&callVM);
+ masm.PushRegsInMask(liveVolatiles);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(buffer);
+ regs.takeUnchecked(holder);
+ Register addrReg = regs.takeAny();
+
+ masm.computeEffectiveAddress(Address(holder, offset), addrReg);
+
+ bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
+ if (needExtraReg) {
+ masm.push(holder);
+ masm.setupUnalignedABICall(holder);
+ } else {
+ masm.setupUnalignedABICall(regs.takeAny());
+ }
+ masm.passABIArg(buffer);
+ masm.passABIArg(addrReg);
+ masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun), MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ if (needExtraReg) {
+ masm.pop(holder);
+ }
+ masm.PopRegsInMask(liveVolatiles);
+ masm.bind(&exit);
+}
+
+// Warning: this function modifies prev and next.
+static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
+ size_t offset, Register prev, Register next,
+ LiveGeneralRegisterSet& liveVolatiles) {
+ Label exit;
+ Label checkRemove, putCell;
+
+ // if (next && (buffer = next->storeBuffer()))
+ // but we never pass in nullptr for next.
+ Register storebuffer = next;
+ masm.loadStoreBuffer(next, storebuffer);
+ masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
+
+ // if (prev && prev->storeBuffer())
+ masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
+ masm.loadStoreBuffer(prev, prev);
+ masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
+
+ // buffer->putCell(cellp)
+ masm.bind(&putCell);
+ EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
+ JSString::addCellAddressToStoreBuffer);
+ masm.jump(&exit);
+
+ // if (prev && (buffer = prev->storeBuffer()))
+ masm.bind(&checkRemove);
+ masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
+ masm.loadStoreBuffer(prev, storebuffer);
+ masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
+ EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
+ JSString::removeCellAddressFromStoreBuffer);
+
+ masm.bind(&exit);
+}
+
+void CodeGenerator::visitRegExp(LRegExp* lir) {
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+ JSObject* source = lir->mir()->source();
+
+ using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
+ OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
+ lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
+ if (lir->mir()->hasShared()) {
+ TemplateObject templateObject(source);
+ masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
+ ool->entry());
+ } else {
+ masm.jump(ool->entry());
+ }
+ masm.bind(ool->rejoin());
+}
+
+static constexpr int32_t RegExpPairsVectorStartOffset(
+ int32_t inputOutputDataStartOffset) {
+ return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
+ int32_t(sizeof(MatchPairs));
+}
+
+static Address RegExpPairCountAddress(MacroAssembler& masm,
+ int32_t inputOutputDataStartOffset) {
+ return Address(FramePointer, inputOutputDataStartOffset +
+ int32_t(InputOutputDataSize) +
+ MatchPairs::offsetOfPairCount());
+}
+
+// When the unicode flag is set, if lastIndex points to a trail
+// surrogate, we should step back to the corresponding lead surrogate.
+// See ExecuteRegExp in builtin/RegExp.cpp for more detail.
+static void StepBackToLeadSurrogate(MacroAssembler& masm, Register regexpShared,
+ Register input, Register lastIndex,
+ Register temp1, Register temp2) {
+ Label done;
+
+ // If the unicode flag is not set, there is nothing to do.
+ masm.branchTest32(Assembler::Zero,
+ Address(regexpShared, RegExpShared::offsetOfFlags()),
+ Imm32(int32_t(JS::RegExpFlag::Unicode)), &done);
+
+ // If the input is latin1, there can't be any surrogates.
+ masm.branchLatin1String(input, &done);
+
+ // Check if |lastIndex > 0 && lastIndex < input->length()|.
+ // lastIndex should already have no sign here.
+ masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
+ masm.loadStringLength(input, temp1);
+ masm.branch32(Assembler::AboveOrEqual, lastIndex, temp1, &done);
+
+ // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
+ // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
+ // equations hold.
+ //
+ // SurrogateMin ≤ x ≤ SurrogateMax
+ // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
+ // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
+ // See Hacker's Delight, section 4-1 for details.
+ //
+ // ((x - SurrogateMin) >>> 10) = 0
+ // <> floor((x - SurrogateMin) / 1024) = 0
+ // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
+ // <> floor(x / 1024) = SurrogateMin / 1024
+ // <> floor(x / 1024) * 1024 = SurrogateMin
+ // <> (x >>> 10) << 10 = SurrogateMin
+ // <> x & ~(2^10 - 1) = SurrogateMin
+
+ constexpr char16_t SurrogateMask = 0xFC00;
+
+ Register charsReg = temp1;
+ masm.loadStringChars(input, charsReg, CharEncoding::TwoByte);
+
+ // Check if input[lastIndex] is trail surrogate.
+ masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte);
+ masm.and32(Imm32(SurrogateMask), temp2);
+ masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::TrailSurrogateMin),
+ &done);
+
+ // Check if input[lastIndex-1] is lead surrogate.
+ masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte,
+ -int32_t(sizeof(char16_t)));
+ masm.and32(Imm32(SurrogateMask), temp2);
+ masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::LeadSurrogateMin),
+ &done);
+
+ // Move lastIndex back to lead surrogate.
+ masm.sub32(Imm32(1), lastIndex);
+
+ masm.bind(&done);
+}
+
+static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
+ Register input, Register lastIndex,
+ Register staticsReg, Register temp1,
+ Register temp2, gc::Heap initialStringHeap,
+ LiveGeneralRegisterSet& volatileRegs) {
+ Address pendingInputAddress(staticsReg,
+ RegExpStatics::offsetOfPendingInput());
+ Address matchesInputAddress(staticsReg,
+ RegExpStatics::offsetOfMatchesInput());
+ Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
+ Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
+
+ masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
+ masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
+ masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
+
+ if (initialStringHeap == gc::Heap::Default) {
+ // Writing into RegExpStatics tenured memory; must post-barrier.
+ if (staticsReg.volatile_()) {
+ volatileRegs.add(staticsReg);
+ }
+
+ masm.loadPtr(pendingInputAddress, temp1);
+ masm.storePtr(input, pendingInputAddress);
+ masm.movePtr(input, temp2);
+ EmitPostWriteBarrierS(masm, staticsReg,
+ RegExpStatics::offsetOfPendingInput(),
+ temp1 /* prev */, temp2 /* next */, volatileRegs);
+
+ masm.loadPtr(matchesInputAddress, temp1);
+ masm.storePtr(input, matchesInputAddress);
+ masm.movePtr(input, temp2);
+ EmitPostWriteBarrierS(masm, staticsReg,
+ RegExpStatics::offsetOfMatchesInput(),
+ temp1 /* prev */, temp2 /* next */, volatileRegs);
+ } else {
+ masm.storePtr(input, pendingInputAddress);
+ masm.storePtr(input, matchesInputAddress);
+ }
+
+ masm.storePtr(lastIndex,
+ Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
+ masm.store32(
+ Imm32(1),
+ Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
+
+ masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
+ RegExpObject::SHARED_SLOT)),
+ temp1, JSVAL_TYPE_PRIVATE_GCTHING);
+ masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
+ masm.storePtr(temp2, lazySourceAddress);
+ static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
+ masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
+ masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
+}
+
+// Prepare an InputOutputData and optional MatchPairs which space has been
+// allocated for on the stack, and try to execute a RegExp on a string input.
+// If the RegExp was successfully executed and matched the input, fallthrough.
+// Otherwise, jump to notFound or failure.
+//
+// inputOutputDataStartOffset is the offset relative to the frame pointer
+// register. This offset is negative for the RegExpExecTest stub.
+static bool PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm,
+ Register regexp, Register input,
+ Register lastIndex, Register temp1,
+ Register temp2, Register temp3,
+ int32_t inputOutputDataStartOffset,
+ gc::Heap initialStringHeap, Label* notFound,
+ Label* failure) {
+ JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
+
+ using irregexp::InputOutputData;
+
+ /*
+ * [SMDOC] Stack layout for PrepareAndExecuteRegExp
+ *
+ * Before this function is called, the caller is responsible for
+ * allocating enough stack space for the following data:
+ *
+ * inputOutputDataStartOffset +-----> +---------------+
+ * |InputOutputData|
+ * inputStartAddress +----------> inputStart|
+ * inputEndAddress +----------> inputEnd|
+ * startIndexAddress +----------> startIndex|
+ * matchesAddress +----------> matches|-----+
+ * +---------------+ |
+ * matchPairs(Address|Offset) +-----> +---------------+ <--+
+ * | MatchPairs |
+ * pairCountAddress +----------> count |
+ * pairsPointerAddress +----------> pairs |-----+
+ * +---------------+ |
+ * pairsArray(Address|Offset) +-----> +---------------+ <--+
+ * | MatchPair |
+ * firstMatchStartAddress +----------> start | <--+
+ * | limit | |
+ * +---------------+ |
+ * . |
+ * . Reserved space for
+ * . RegExpObject::MaxPairCount
+ * . MatchPair objects
+ * . |
+ * +---------------+ |
+ * | MatchPair | |
+ * | start | |
+ * | limit | <--+
+ * +---------------+
+ */
+
+ int32_t ioOffset = inputOutputDataStartOffset;
+ int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
+ int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
+
+ Address inputStartAddress(FramePointer,
+ ioOffset + InputOutputData::offsetOfInputStart());
+ Address inputEndAddress(FramePointer,
+ ioOffset + InputOutputData::offsetOfInputEnd());
+ Address startIndexAddress(FramePointer,
+ ioOffset + InputOutputData::offsetOfStartIndex());
+ Address matchesAddress(FramePointer,
+ ioOffset + InputOutputData::offsetOfMatches());
+
+ Address matchPairsAddress(FramePointer, matchPairsOffset);
+ Address pairCountAddress(FramePointer,
+ matchPairsOffset + MatchPairs::offsetOfPairCount());
+ Address pairsPointerAddress(FramePointer,
+ matchPairsOffset + MatchPairs::offsetOfPairs());
+
+ Address pairsArrayAddress(FramePointer, pairsArrayOffset);
+ Address firstMatchStartAddress(FramePointer,
+ pairsArrayOffset + MatchPair::offsetOfStart());
+
+ // First, fill in a skeletal MatchPairs instance on the stack. This will be
+ // passed to the OOL stub in the caller if we aren't able to execute the
+ // RegExp inline, and that stub needs to be able to determine whether the
+ // execution finished successfully.
+
+ // Initialize MatchPairs::pairCount to 1. The correct value can only
+ // be determined after loading the RegExpShared. If the RegExpShared
+ // has Kind::Atom, this is the correct pairCount.
+ masm.store32(Imm32(1), pairCountAddress);
+
+ // Initialize MatchPairs::pairs pointer
+ masm.computeEffectiveAddress(pairsArrayAddress, temp1);
+ masm.storePtr(temp1, pairsPointerAddress);
+
+ // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
+ masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
+
+ // Determine the set of volatile inputs to save when calling into C++ or
+ // regexp code.
+ LiveGeneralRegisterSet volatileRegs;
+ if (lastIndex.volatile_()) {
+ volatileRegs.add(lastIndex);
+ }
+ if (input.volatile_()) {
+ volatileRegs.add(input);
+ }
+ if (regexp.volatile_()) {
+ volatileRegs.add(regexp);
+ }
+
+ // Ensure the input string is not a rope.
+ Label isLinear;
+ masm.branchIfNotRope(input, &isLinear);
+ {
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSLinearString* (*)(JSString*);
+ masm.setupUnalignedABICall(temp1);
+ masm.passABIArg(input);
+ masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
+
+ MOZ_ASSERT(!volatileRegs.has(temp1));
+ masm.storeCallPointerResult(temp1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
+ }
+ masm.bind(&isLinear);
+
+ // Load the RegExpShared.
+ Register regexpReg = temp1;
+ Address sharedSlot = Address(
+ regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
+ masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
+ masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
+
+ // Handle Atom matches
+ Label notAtom, checkSuccess;
+ masm.branchPtr(Assembler::Equal,
+ Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
+ ImmWord(0), &notAtom);
+ {
+ masm.computeEffectiveAddress(matchPairsAddress, temp3);
+
+ masm.PushRegsInMask(volatileRegs);
+ using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
+ size_t start, MatchPairs* matchPairs);
+ masm.setupUnalignedABICall(temp2);
+ masm.passABIArg(regexpReg);
+ masm.passABIArg(input);
+ masm.passABIArg(lastIndex);
+ masm.passABIArg(temp3);
+ masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
+
+ MOZ_ASSERT(!volatileRegs.has(temp1));
+ masm.storeCallInt32Result(temp1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.jump(&checkSuccess);
+ }
+ masm.bind(&notAtom);
+
+ // Don't handle regexps with too many capture pairs.
+ masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
+ masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
+ failure);
+
+ // Fill in the pair count in the MatchPairs on the stack.
+ masm.store32(temp2, pairCountAddress);
+
+ // Update lastIndex if necessary.
+ StepBackToLeadSurrogate(masm, regexpReg, input, lastIndex, temp2, temp3);
+
+ // Load code pointer and length of input (in bytes).
+ // Store the input start in the InputOutputData.
+ Register codePointer = temp1; // Note: temp1 was previously regexpReg.
+ Register byteLength = temp3;
+ {
+ Label isLatin1, done;
+ masm.loadStringLength(input, byteLength);
+
+ masm.branchLatin1String(input, &isLatin1);
+
+ // Two-byte input
+ masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
+ masm.storePtr(temp2, inputStartAddress);
+ masm.loadPtr(
+ Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
+ codePointer);
+ masm.lshiftPtr(Imm32(1), byteLength);
+ masm.jump(&done);
+
+ // Latin1 input
+ masm.bind(&isLatin1);
+ masm.loadStringChars(input, temp2, CharEncoding::Latin1);
+ masm.storePtr(temp2, inputStartAddress);
+ masm.loadPtr(
+ Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
+ codePointer);
+
+ masm.bind(&done);
+
+ // Store end pointer
+ masm.addPtr(byteLength, temp2);
+ masm.storePtr(temp2, inputEndAddress);
+ }
+
+ // Guard that the RegExpShared has been compiled for this type of input.
+ // If it has not been compiled, we fall back to the OOL case, which will
+ // do a VM call into the interpreter.
+ // TODO: add an interpreter trampoline?
+ masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
+ masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
+
+ // Finish filling in the InputOutputData instance on the stack
+ masm.computeEffectiveAddress(matchPairsAddress, temp2);
+ masm.storePtr(temp2, matchesAddress);
+ masm.storePtr(lastIndex, startIndexAddress);
+
+ // Execute the RegExp.
+ masm.computeEffectiveAddress(
+ Address(FramePointer, inputOutputDataStartOffset), temp2);
+ masm.PushRegsInMask(volatileRegs);
+ masm.setupUnalignedABICall(temp3);
+ masm.passABIArg(temp2);
+ masm.callWithABI(codePointer);
+ masm.storeCallInt32Result(temp1);
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.bind(&checkSuccess);
+ masm.branch32(Assembler::Equal, temp1,
+ Imm32(RegExpRunStatus_Success_NotFound), notFound);
+ masm.branch32(Assembler::Equal, temp1, Imm32(RegExpRunStatus_Error), failure);
+
+ // Lazily update the RegExpStatics.
+ RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global());
+ if (!res) {
+ return false;
+ }
+ masm.movePtr(ImmPtr(res), temp1);
+ UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
+ initialStringHeap, volatileRegs);
+
+ return true;
+}
+
+static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
+ Register len, Register byteOpScratch,
+ CharEncoding encoding);
+
+class CreateDependentString {
+ CharEncoding encoding_;
+ Register string_;
+ Register temp1_;
+ Register temp2_;
+ Label* failure_;
+
+ enum class FallbackKind : uint8_t {
+ InlineString,
+ FatInlineString,
+ NotInlineString,
+ Count
+ };
+ mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
+ joins_;
+
+ public:
+ CreateDependentString(CharEncoding encoding, Register string, Register temp1,
+ Register temp2, Label* failure)
+ : encoding_(encoding),
+ string_(string),
+ temp1_(temp1),
+ temp2_(temp2),
+ failure_(failure) {}
+
+ Register string() const { return string_; }
+ CharEncoding encoding() const { return encoding_; }
+
+ // Generate code that creates DependentString.
+ // Caller should call generateFallback after masm.ret(), to generate
+ // fallback path.
+ void generate(MacroAssembler& masm, const JSAtomState& names,
+ CompileRuntime* runtime, Register base,
+ BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
+ gc::Heap initialStringHeap);
+
+ // Generate fallback path for creating DependentString.
+ void generateFallback(MacroAssembler& masm);
+};
+
+void CreateDependentString::generate(MacroAssembler& masm,
+ const JSAtomState& names,
+ CompileRuntime* runtime, Register base,
+ BaseIndex startIndexAddress,
+ BaseIndex limitIndexAddress,
+ gc::Heap initialStringHeap) {
+ JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
+ (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
+
+ auto newGCString = [&](FallbackKind kind) {
+ uint32_t flags = kind == FallbackKind::InlineString
+ ? JSString::INIT_THIN_INLINE_FLAGS
+ : kind == FallbackKind::FatInlineString
+ ? JSString::INIT_FAT_INLINE_FLAGS
+ : JSString::INIT_DEPENDENT_FLAGS;
+ if (encoding_ == CharEncoding::Latin1) {
+ flags |= JSString::LATIN1_CHARS_BIT;
+ }
+
+ if (kind != FallbackKind::FatInlineString) {
+ masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
+ } else {
+ masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
+ &fallbacks_[kind]);
+ }
+ masm.bind(&joins_[kind]);
+ masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
+ };
+
+ // Compute the string length.
+ masm.load32(startIndexAddress, temp2_);
+ masm.load32(limitIndexAddress, temp1_);
+ masm.sub32(temp2_, temp1_);
+
+ Label done, nonEmpty;
+
+ // Zero length matches use the empty string.
+ masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
+ masm.movePtr(ImmGCPtr(names.empty), string_);
+ masm.jump(&done);
+
+ masm.bind(&nonEmpty);
+
+ // Complete matches use the base string.
+ Label nonBaseStringMatch;
+ masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
+ masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
+ temp1_, &nonBaseStringMatch);
+ masm.movePtr(base, string_);
+ masm.jump(&done);
+
+ masm.bind(&nonBaseStringMatch);
+
+ Label notInline;
+
+ int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
+ ? JSFatInlineString::MAX_LENGTH_LATIN1
+ : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
+ masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
+ {
+ // Make a thin or fat inline string.
+ Label stringAllocated, fatInline;
+
+ int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
+ ? JSThinInlineString::MAX_LENGTH_LATIN1
+ : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
+ masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
+ &fatInline);
+ if (encoding_ == CharEncoding::Latin1) {
+ // One character Latin-1 strings can be loaded directly from the
+ // static strings table.
+ Label thinInline;
+ masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
+ {
+ static_assert(
+ StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
+ "Latin-1 strings can be loaded from static strings");
+
+ masm.loadStringChars(base, temp1_, encoding_);
+ masm.loadChar(temp1_, temp2_, temp1_, encoding_);
+
+ masm.movePtr(ImmPtr(&runtime->staticStrings().unitStaticTable),
+ string_);
+ masm.loadPtr(BaseIndex(string_, temp1_, ScalePointer), string_);
+
+ masm.jump(&done);
+ }
+ masm.bind(&thinInline);
+ }
+ {
+ newGCString(FallbackKind::InlineString);
+ masm.jump(&stringAllocated);
+ }
+ masm.bind(&fatInline);
+ { newGCString(FallbackKind::FatInlineString); }
+ masm.bind(&stringAllocated);
+
+ masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
+
+ masm.push(string_);
+ masm.push(base);
+
+ MOZ_ASSERT(startIndexAddress.base == FramePointer,
+ "startIndexAddress is still valid after stack pushes");
+
+ // Load chars pointer for the new string.
+ masm.loadInlineStringCharsForStore(string_, string_);
+
+ // Load the source characters pointer.
+ masm.loadStringChars(base, temp2_, encoding_);
+ masm.load32(startIndexAddress, base);
+ masm.addToCharPtr(temp2_, base, encoding_);
+
+ CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
+
+ masm.pop(base);
+ masm.pop(string_);
+
+ masm.jump(&done);
+ }
+
+ masm.bind(&notInline);
+
+ {
+ // Make a dependent string.
+ // Warning: string may be tenured (if the fallback case is hit), so
+ // stores into it must be post barriered.
+ newGCString(FallbackKind::NotInlineString);
+
+ masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
+
+ masm.loadNonInlineStringChars(base, temp1_, encoding_);
+ masm.load32(startIndexAddress, temp2_);
+ masm.addToCharPtr(temp1_, temp2_, encoding_);
+ masm.storeNonInlineStringChars(temp1_, string_);
+ masm.storeDependentStringBase(base, string_);
+ masm.movePtr(base, temp1_);
+
+ // Follow any base pointer if the input is itself a dependent string.
+ // Watch for undepended strings, which have a base pointer but don't
+ // actually share their characters with it.
+ Label noBase;
+ masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
+ masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
+ masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
+ &noBase);
+ masm.loadDependentStringBase(base, temp1_);
+ masm.storeDependentStringBase(temp1_, string_);
+ masm.bind(&noBase);
+
+ // Post-barrier the base store, whether it was the direct or indirect
+ // base (both will end up in temp1 here).
+ masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
+
+ LiveRegisterSet regsToSave(RegisterSet::Volatile());
+ regsToSave.takeUnchecked(temp1_);
+ regsToSave.takeUnchecked(temp2_);
+
+ masm.PushRegsInMask(regsToSave);
+
+ masm.mov(ImmPtr(runtime), temp1_);
+
+ using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
+ masm.setupUnalignedABICall(temp2_);
+ masm.passABIArg(temp1_);
+ masm.passABIArg(string_);
+ masm.callWithABI<Fn, PostWriteBarrier>();
+
+ masm.PopRegsInMask(regsToSave);
+ }
+
+ masm.bind(&done);
+}
+
+void CreateDependentString::generateFallback(MacroAssembler& masm) {
+ JitSpew(JitSpew_Codegen,
+ "# Emitting CreateDependentString fallback (encoding=%s)",
+ (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
+
+ LiveRegisterSet regsToSave(RegisterSet::Volatile());
+ regsToSave.takeUnchecked(string_);
+ regsToSave.takeUnchecked(temp2_);
+
+ for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
+ masm.bind(&fallbacks_[kind]);
+
+ masm.PushRegsInMask(regsToSave);
+
+ using Fn = void* (*)(JSContext* cx);
+ masm.setupUnalignedABICall(string_);
+ masm.loadJSContext(string_);
+ masm.passABIArg(string_);
+ if (kind == FallbackKind::FatInlineString) {
+ masm.callWithABI<Fn, AllocateFatInlineString>();
+ } else {
+ masm.callWithABI<Fn, AllocateDependentString>();
+ }
+ masm.storeCallPointerResult(string_);
+
+ masm.PopRegsInMask(regsToSave);
+
+ masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
+
+ masm.jump(&joins_[kind]);
+ }
+}
+
+static void CreateMatchResultFallback(MacroAssembler& masm, Register object,
+ Register temp1, Register temp2,
+ const TemplateObject& templateObject,
+ Label* fail) {
+ JitSpew(JitSpew_Codegen, "# Emitting CreateMatchResult fallback");
+
+ MOZ_ASSERT(templateObject.isArrayObject());
+
+ LiveRegisterSet regsToSave(RegisterSet::Volatile());
+ regsToSave.takeUnchecked(object);
+ regsToSave.takeUnchecked(temp1);
+ regsToSave.takeUnchecked(temp2);
+
+ masm.PushRegsInMask(regsToSave);
+
+ using Fn = void* (*)(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots);
+ masm.setupUnalignedABICall(object);
+
+ masm.loadJSContext(object);
+ masm.passABIArg(object);
+ masm.move32(Imm32(int32_t(templateObject.getAllocKind())), temp1);
+ masm.passABIArg(temp1);
+ masm.move32(
+ Imm32(int32_t(templateObject.asTemplateNativeObject().numDynamicSlots())),
+ temp2);
+ masm.passABIArg(temp2);
+ masm.callWithABI<Fn, CreateMatchResultFallbackFunc>();
+ masm.storeCallPointerResult(object);
+
+ masm.PopRegsInMask(regsToSave);
+
+ masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail);
+
+ masm.initGCThing(object, temp1, templateObject);
+}
+
+// Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
+// but RegExpExecMatch also has to load and update .lastIndex for global/sticky
+// regular expressions.
+static JitCode* GenerateRegExpMatchStubShared(JSContext* cx, bool isExecMatch) {
+ if (isExecMatch) {
+ JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
+ } else {
+ JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
+ }
+
+ Register regexp = RegExpMatcherRegExpReg;
+ Register input = RegExpMatcherStringReg;
+ Register lastIndex = RegExpMatcherLastIndexReg;
+ ValueOperand result = JSReturnOperand;
+
+ // We are free to clobber all registers, as LRegExpMatcher is a call
+ // instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ regs.take(lastIndex);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+ Register maybeTemp4 = InvalidReg;
+ if (!regs.empty()) {
+ // There are not enough registers on x86.
+ maybeTemp4 = regs.takeAny();
+ }
+ Register maybeTemp5 = InvalidReg;
+ if (!regs.empty()) {
+ // There are not enough registers on x86.
+ maybeTemp5 = regs.takeAny();
+ }
+
+ Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
+ Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
+
+ ArrayObject* templateObject =
+ cx->realm()->regExps.getOrCreateMatchResultTemplateObject(cx);
+ if (!templateObject) {
+ return nullptr;
+ }
+ TemplateObject templateObj(templateObject);
+ const TemplateNativeObject& nativeTemplateObj =
+ templateObj.asTemplateNativeObject();
+
+ // The template object should have enough space for the maximum number of
+ // pairs this stub can handle.
+ MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount ==
+ gc::GetGCKindSlots(templateObj.getAllocKind()));
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jcx(cx);
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ Label notFoundZeroLastIndex;
+ if (isExecMatch) {
+ masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
+ }
+
+ // The InputOutputData is placed above the frame pointer and return address on
+ // the stack.
+ int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
+
+ JS::AutoAssertNoGC nogc(cx);
+ gc::Heap initialStringHeap = cx->realm()->jitRealm()->getInitialStringHeap();
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
+ temp3, inputOutputDataStartOffset,
+ initialStringHeap, &notFound, &oolEntry)) {
+ return nullptr;
+ }
+
+ // If a regexp has named captures, fall back to the OOL stub, which
+ // will end up calling CreateRegExpMatchResults.
+ Register shared = temp2;
+ masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
+ RegExpObject::SHARED_SLOT)),
+ shared, JSVAL_TYPE_PRIVATE_GCTHING);
+ masm.branchPtr(Assembler::NotEqual,
+ Address(shared, RegExpShared::offsetOfGroupsTemplate()),
+ ImmWord(0), &oolEntry);
+
+ // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
+ masm.branchTest32(Assembler::NonZero,
+ Address(shared, RegExpShared::offsetOfFlags()),
+ Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
+
+ // Construct the result.
+ Register object = temp1;
+ Label matchResultFallback, matchResultJoin;
+ masm.createGCObject(object, temp2, templateObj, gc::Heap::Default,
+ &matchResultFallback);
+ masm.bind(&matchResultJoin);
+
+ MOZ_ASSERT(nativeTemplateObj.numFixedSlots() == 0);
+ // Dynamic slot count is always two less than a power of 2.
+ MOZ_ASSERT(nativeTemplateObj.numDynamicSlots() == 6);
+ static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
+ "First slot holds the 'index' property");
+ static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
+ "Second slot holds the 'input' property");
+ static_assert(RegExpRealm::MatchResultObjectGroupsSlot == 2,
+ "Third slot holds the 'groups' property");
+
+ // Initialize the slots of the result object with the dummy values
+ // defined in createMatchResultTemplateObject.
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+ masm.storeValue(
+ nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectIndexSlot),
+ Address(temp2, RegExpRealm::offsetOfMatchResultObjectIndexSlot()));
+ masm.storeValue(
+ nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectInputSlot),
+ Address(temp2, RegExpRealm::offsetOfMatchResultObjectInputSlot()));
+ masm.storeValue(
+ nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectGroupsSlot),
+ Address(temp2, RegExpRealm::offsetOfMatchResultObjectGroupsSlot()));
+
+ // clang-format off
+ /*
+ * [SMDOC] Stack layout for the RegExpMatcher stub
+ *
+ * +---------------+
+ * FramePointer +-----> |Caller-FramePtr|
+ * +---------------+
+ * |Return-Address |
+ * +---------------+
+ * inputOutputDataStartOffset +-----> +---------------+
+ * |InputOutputData|
+ * +---------------+
+ * +---------------+
+ * | MatchPairs |
+ * pairsCountAddress +-----------> count |
+ * | pairs |
+ * | |
+ * +---------------+
+ * pairsVectorStartOffset +-----> +---------------+
+ * | MatchPair |
+ * matchPairStart +------------> start | <-------+
+ * matchPairLimit +------------> limit | | Reserved space for
+ * +---------------+ | `RegExpObject::MaxPairCount`
+ * . | MatchPair objects.
+ * . |
+ * . | `count` objects will be
+ * +---------------+ | initialized and can be
+ * | MatchPair | | accessed below.
+ * | start | <-------+
+ * | limit |
+ * +---------------+
+ */
+ // clang-format on
+
+ static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
+ "MatchPair consists of two int32 values representing the start"
+ "and the end offset of the match");
+
+ Address pairCountAddress =
+ RegExpPairCountAddress(masm, inputOutputDataStartOffset);
+
+ int32_t pairsVectorStartOffset =
+ RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+
+ // Incremented by one below for each match pair.
+ Register matchIndex = temp2;
+ masm.move32(Imm32(0), matchIndex);
+
+ // The element in which to store the result of the current match.
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+ BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
+
+ // The current match pair's "start" and "limit" member.
+ BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
+ pairsVectorStartOffset + MatchPair::offsetOfStart());
+ BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
+ pairsVectorStartOffset + MatchPair::offsetOfLimit());
+
+ Label* depStrFailure = &oolEntry;
+ Label restoreRegExpAndLastIndex;
+
+ Register temp4;
+ if (maybeTemp4 == InvalidReg) {
+ depStrFailure = &restoreRegExpAndLastIndex;
+
+ // We don't have enough registers for a fourth temporary. Reuse |regexp|
+ // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
+ masm.push(regexp);
+ temp4 = regexp;
+ } else {
+ temp4 = maybeTemp4;
+ }
+
+ Register temp5;
+ if (maybeTemp5 == InvalidReg) {
+ depStrFailure = &restoreRegExpAndLastIndex;
+
+ // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
+ // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
+ masm.push(lastIndex);
+ temp5 = lastIndex;
+ } else {
+ temp5 = maybeTemp5;
+ }
+
+ auto maybeRestoreRegExpAndLastIndex = [&]() {
+ if (maybeTemp5 == InvalidReg) {
+ masm.pop(lastIndex);
+ }
+ if (maybeTemp4 == InvalidReg) {
+ masm.pop(regexp);
+ }
+ };
+
+ // Loop to construct the match strings. There are two different loops,
+ // depending on whether the input is a Two-Byte or a Latin-1 string.
+ CreateDependentString depStrs[]{
+ {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
+ {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
+ };
+
+ {
+ Label isLatin1, done;
+ masm.branchLatin1String(input, &isLatin1);
+
+ for (auto& depStr : depStrs) {
+ if (depStr.encoding() == CharEncoding::Latin1) {
+ masm.bind(&isLatin1);
+ }
+
+ Label matchLoop;
+ masm.bind(&matchLoop);
+
+ static_assert(MatchPair::NoMatch == -1,
+ "MatchPair::start is negative if no match was found");
+
+ Label isUndefined, storeDone;
+ masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
+ &isUndefined);
+ {
+ depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
+ input, matchPairStart, matchPairLimit,
+ initialStringHeap);
+
+ // Storing into nursery-allocated results object's elements; no post
+ // barrier.
+ masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
+ masm.jump(&storeDone);
+ }
+ masm.bind(&isUndefined);
+ { masm.storeValue(UndefinedValue(), objectMatchElement); }
+ masm.bind(&storeDone);
+
+ masm.add32(Imm32(1), matchIndex);
+ masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
+ &done);
+ masm.jump(&matchLoop);
+ }
+
+#ifdef DEBUG
+ masm.assumeUnreachable("The match string loop doesn't fall through.");
+#endif
+
+ masm.bind(&done);
+ }
+
+ maybeRestoreRegExpAndLastIndex();
+
+ // Fill in the rest of the output object.
+ masm.store32(
+ matchIndex,
+ Address(object,
+ elementsOffset + ObjectElements::offsetOfInitializedLength()));
+ masm.store32(
+ matchIndex,
+ Address(object, elementsOffset + ObjectElements::offsetOfLength()));
+
+ Address firstMatchPairStartAddress(
+ FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
+ Address firstMatchPairLimitAddress(
+ FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
+
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+
+ masm.load32(firstMatchPairStartAddress, temp3);
+ masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
+
+ // No post barrier needed (address is within nursery object.)
+ masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
+
+ // For the ExecMatch stub, if the regular expression is global or sticky, we
+ // have to update its .lastIndex slot.
+ if (isExecMatch) {
+ MOZ_ASSERT(object != lastIndex);
+ Label notGlobalOrSticky;
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
+ &notGlobalOrSticky);
+ masm.load32(firstMatchPairLimitAddress, lastIndex);
+ masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
+ masm.bind(&notGlobalOrSticky);
+ }
+
+ // All done!
+ masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&notFound);
+ if (isExecMatch) {
+ Label notGlobalOrSticky;
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
+ &notGlobalOrSticky);
+ masm.bind(&notFoundZeroLastIndex);
+ masm.storeValue(Int32Value(0), lastIndexSlot);
+ masm.bind(&notGlobalOrSticky);
+ }
+ masm.moveValue(NullValue(), result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ // Fallback paths for CreateDependentString.
+ for (auto& depStr : depStrs) {
+ depStr.generateFallback(masm);
+ }
+
+ // Fallback path for createGCObject.
+ masm.bind(&matchResultFallback);
+ CreateMatchResultFallback(masm, object, temp2, temp3, templateObj, &oolEntry);
+ masm.jump(&matchResultJoin);
+
+ // Fall-through to the ool entry after restoring the registers.
+ masm.bind(&restoreRegExpAndLastIndex);
+ maybeRestoreRegExpAndLastIndex();
+
+ // Use an undefined value to signal to the caller that the OOL stub needs to
+ // be called.
+ masm.bind(&oolEntry);
+ masm.moveValue(UndefinedValue(), result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return nullptr;
+ }
+
+ const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
+ CollectPerfSpewerJitCodeProfile(code, name);
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, name);
+#endif
+
+ return code;
+}
+
+JitCode* JitRealm::generateRegExpMatcherStub(JSContext* cx) {
+ return GenerateRegExpMatchStubShared(cx, /* isExecMatch = */ false);
+}
+
+JitCode* JitRealm::generateRegExpExecMatchStub(JSContext* cx) {
+ return GenerateRegExpMatchStubShared(cx, /* isExecMatch = */ true);
+}
+
+class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpMatcher* lir_;
+
+ public:
+ explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpMatcher(this);
+ }
+
+ LRegExpMatcher* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
+ LRegExpMatcher* lir = ool->lir();
+ Register lastIndex = ToRegister(lir->lastIndex());
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lastIndex);
+ regs.take(input);
+ regs.take(regexp);
+ Register temp = regs.takeAny();
+
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), temp);
+
+ pushArg(temp);
+ pushArg(lastIndex);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call, and that live
+ // registers are already saved by the the register allocator.
+ using Fn =
+ bool (*)(JSContext*, HandleObject regexp, HandleString input,
+ int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
+ callVM<Fn, RegExpMatcherRaw>(lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
+ MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
+ MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
+
+#if defined(JS_NUNBOX32)
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
+ static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
+ static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
+ static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
+ static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
+#elif defined(JS_PUNBOX64)
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg);
+ static_assert(RegExpMatcherStringReg != JSReturnReg);
+ static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
+#endif
+
+ masm.reserveStack(RegExpReservedStack);
+
+ OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ const JitRealm* jitRealm = gen->realm->jitRealm();
+ JitCode* regExpMatcherStub =
+ jitRealm->regExpMatcherStubNoBarrier(&realmStubsToReadBarrier_);
+ masm.call(regExpMatcherStub);
+ masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
+ masm.bind(ool->rejoin());
+
+ masm.freeStack(RegExpReservedStack);
+}
+
+class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpExecMatch* lir_;
+
+ public:
+ explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpExecMatch(this);
+ }
+
+ LRegExpExecMatch* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitOutOfLineRegExpExecMatch(
+ OutOfLineRegExpExecMatch* ool) {
+ LRegExpExecMatch* lir = ool->lir();
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ Register temp = regs.takeAny();
+
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), temp);
+
+ pushArg(temp);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call and live registers have
+ // already been saved by the register allocator.
+ using Fn =
+ bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
+ MatchPairs* pairs, MutableHandleValue output);
+ callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
+ MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
+
+#if defined(JS_NUNBOX32)
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
+ static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
+ static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
+#elif defined(JS_PUNBOX64)
+ static_assert(RegExpMatcherRegExpReg != JSReturnReg);
+ static_assert(RegExpMatcherStringReg != JSReturnReg);
+#endif
+
+ masm.reserveStack(RegExpReservedStack);
+
+ auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ const JitRealm* jitRealm = gen->realm->jitRealm();
+ JitCode* regExpExecMatchStub =
+ jitRealm->regExpExecMatchStubNoBarrier(&realmStubsToReadBarrier_);
+ masm.call(regExpExecMatchStub);
+ masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
+
+ masm.bind(ool->rejoin());
+ masm.freeStack(RegExpReservedStack);
+}
+
+JitCode* JitRealm::generateRegExpSearcherStub(JSContext* cx) {
+ JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
+
+ Register regexp = RegExpSearcherRegExpReg;
+ Register input = RegExpSearcherStringReg;
+ Register lastIndex = RegExpSearcherLastIndexReg;
+ Register result = ReturnReg;
+
+ // We are free to clobber all registers, as LRegExpSearcher is a call
+ // instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+ regs.take(lastIndex);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jcx(cx);
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "JitRealm::generateRegExpSearcherStub");
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // The InputOutputData is placed above the frame pointer and return address on
+ // the stack.
+ int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
+ temp3, inputOutputDataStartOffset,
+ initialStringHeap, &notFound, &oolEntry)) {
+ return nullptr;
+ }
+
+ // clang-format off
+ /*
+ * [SMDOC] Stack layout for the RegExpSearcher stub
+ *
+ * +---------------+
+ * FramePointer +-----> |Caller-FramePtr|
+ * +---------------+
+ * |Return-Address |
+ * +---------------+
+ * inputOutputDataStartOffset +-----> +---------------+
+ * |InputOutputData|
+ * +---------------+
+ * +---------------+
+ * | MatchPairs |
+ * | count |
+ * | pairs |
+ * | |
+ * +---------------+
+ * pairsVectorStartOffset +-----> +---------------+
+ * | MatchPair |
+ * matchPairStart +------------> start | <-------+
+ * matchPairLimit +------------> limit | | Reserved space for
+ * +---------------+ | `RegExpObject::MaxPairCount`
+ * . | MatchPair objects.
+ * . |
+ * . | Only a single object will
+ * +---------------+ | be initialized and can be
+ * | MatchPair | | accessed below.
+ * | start | <-------+
+ * | limit |
+ * +---------------+
+ */
+ // clang-format on
+
+ int32_t pairsVectorStartOffset =
+ RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+ Address matchPairStart(FramePointer,
+ pairsVectorStartOffset + MatchPair::offsetOfStart());
+ Address matchPairLimit(FramePointer,
+ pairsVectorStartOffset + MatchPair::offsetOfLimit());
+
+ masm.load32(matchPairStart, result);
+ masm.load32(matchPairLimit, input);
+ masm.lshiftPtr(Imm32(15), input);
+ masm.or32(input, result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&notFound);
+ masm.move32(Imm32(RegExpSearcherResultNotFound), result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&oolEntry);
+ masm.move32(Imm32(RegExpSearcherResultFailed), result);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return nullptr;
+ }
+
+ CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, "RegExpSearcherStub");
+#endif
+
+ return code;
+}
+
+class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpSearcher* lir_;
+
+ public:
+ explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpSearcher(this);
+ }
+
+ LRegExpSearcher* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
+ LRegExpSearcher* lir = ool->lir();
+ Register lastIndex = ToRegister(lir->lastIndex());
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lastIndex);
+ regs.take(input);
+ regs.take(regexp);
+ Register temp = regs.takeAny();
+
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), InputOutputDataSize), temp);
+
+ pushArg(temp);
+ pushArg(lastIndex);
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call, and that live
+ // registers are already saved by the the register allocator.
+ using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
+ int32_t lastIndex, MatchPairs* pairs, int32_t* result);
+ callVM<Fn, RegExpSearcherRaw>(lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
+ MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
+ MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
+
+ static_assert(RegExpSearcherRegExpReg != ReturnReg);
+ static_assert(RegExpSearcherStringReg != ReturnReg);
+ static_assert(RegExpSearcherLastIndexReg != ReturnReg);
+
+ masm.reserveStack(RegExpReservedStack);
+
+ OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ const JitRealm* jitRealm = gen->realm->jitRealm();
+ JitCode* regExpSearcherStub =
+ jitRealm->regExpSearcherStubNoBarrier(&realmStubsToReadBarrier_);
+ masm.call(regExpSearcherStub);
+ masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
+ ool->entry());
+ masm.bind(ool->rejoin());
+
+ masm.freeStack(RegExpReservedStack);
+}
+
+JitCode* JitRealm::generateRegExpExecTestStub(JSContext* cx) {
+ JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
+
+ Register regexp = RegExpExecTestRegExpReg;
+ Register input = RegExpExecTestStringReg;
+ Register result = ReturnReg;
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jcx(cx);
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "JitRealm::generateRegExpExecTestStub");
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // We are free to clobber all registers, as LRegExpExecTest is a call
+ // instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(regexp);
+
+ // Ensure lastIndex != result.
+ regs.take(result);
+ Register lastIndex = regs.takeAny();
+ regs.add(result);
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ Register temp3 = regs.takeAny();
+
+ Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
+ Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
+
+ masm.reserveStack(RegExpReservedStack);
+
+ // Load lastIndex and skip RegExp execution if needed.
+ Label notFoundZeroLastIndex;
+ masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
+
+ // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
+ // before calling the stub. For RegExpExecTest we call the stub before
+ // reserving stack space, so the offset of the InputOutputData relative to the
+ // frame pointer is negative.
+ constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
+
+ // On ARM64, load/store instructions can encode an immediate offset in the
+ // range [-256, 4095]. If we ever fail this assertion, it would be more
+ // efficient to store the data above the frame pointer similar to
+ // RegExpMatcher and RegExpSearcher.
+ static_assert(inputOutputDataStartOffset >= -256);
+
+ Label notFound, oolEntry;
+ if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
+ temp3, inputOutputDataStartOffset,
+ initialStringHeap, &notFound, &oolEntry)) {
+ return nullptr;
+ }
+
+ // Set `result` to true/false to indicate found/not-found, or to
+ // RegExpExecTestResultFailed if we have to retry in C++. If the regular
+ // expression is global or sticky, we also have to update its .lastIndex slot.
+
+ Label done;
+ int32_t pairsVectorStartOffset =
+ RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
+ Address matchPairLimit(FramePointer,
+ pairsVectorStartOffset + MatchPair::offsetOfLimit());
+
+ masm.move32(Imm32(1), result);
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
+ &done);
+ masm.load32(matchPairLimit, lastIndex);
+ masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
+ masm.jump(&done);
+
+ masm.bind(&notFound);
+ masm.move32(Imm32(0), result);
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
+ &done);
+ masm.storeValue(Int32Value(0), lastIndexSlot);
+ masm.jump(&done);
+
+ masm.bind(&notFoundZeroLastIndex);
+ masm.move32(Imm32(0), result);
+ masm.storeValue(Int32Value(0), lastIndexSlot);
+ masm.jump(&done);
+
+ masm.bind(&oolEntry);
+ masm.move32(Imm32(RegExpExecTestResultFailed), result);
+
+ masm.bind(&done);
+ masm.freeStack(RegExpReservedStack);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return nullptr;
+ }
+
+ CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, "RegExpExecTestStub");
+#endif
+
+ return code;
+}
+
+class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpExecTest* lir_;
+
+ public:
+ explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpExecTest(this);
+ }
+
+ LRegExpExecTest* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
+ LRegExpExecTest* lir = ool->lir();
+ Register input = ToRegister(lir->string());
+ Register regexp = ToRegister(lir->regexp());
+
+ pushArg(input);
+ pushArg(regexp);
+
+ // We are not using oolCallVM because we are in a Call and live registers have
+ // already been saved by the register allocator.
+ using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
+ HandleString input, bool* result);
+ callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
+ MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
+ MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
+ MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
+
+ static_assert(RegExpExecTestRegExpReg != ReturnReg);
+ static_assert(RegExpExecTestStringReg != ReturnReg);
+
+ auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ const JitRealm* jitRealm = gen->realm->jitRealm();
+ JitCode* regExpExecTestStub =
+ jitRealm->regExpExecTestStubNoBarrier(&realmStubsToReadBarrier_);
+ masm.call(regExpExecTestStub);
+
+ masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
+ ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+class OutOfLineRegExpPrototypeOptimizable
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpPrototypeOptimizable* ins_;
+
+ public:
+ explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
+ : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
+ }
+ LRegExpPrototypeOptimizable* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitRegExpPrototypeOptimizable(
+ LRegExpPrototypeOptimizable* ins) {
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+
+ OutOfLineRegExpPrototypeOptimizable* ool =
+ new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchIfNotRegExpPrototypeOptimizable(object, temp, ool->entry());
+ masm.move32(Imm32(0x1), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
+ OutOfLineRegExpPrototypeOptimizable* ool) {
+ LRegExpPrototypeOptimizable* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* proto);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(output);
+ masm.passABIArg(output);
+ masm.passABIArg(object);
+ masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
+ masm.storeCallBoolResult(output);
+
+ restoreVolatile(output);
+
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineRegExpInstanceOptimizable
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LRegExpInstanceOptimizable* ins_;
+
+ public:
+ explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
+ : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineRegExpInstanceOptimizable(this);
+ }
+ LRegExpInstanceOptimizable* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitRegExpInstanceOptimizable(
+ LRegExpInstanceOptimizable* ins) {
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+
+ OutOfLineRegExpInstanceOptimizable* ool =
+ new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchIfNotRegExpInstanceOptimizable(object, temp, ool->entry());
+ masm.move32(Imm32(0x1), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
+ OutOfLineRegExpInstanceOptimizable* ool) {
+ LRegExpInstanceOptimizable* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register proto = ToRegister(ins->proto());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(output);
+ masm.passABIArg(output);
+ masm.passABIArg(object);
+ masm.passABIArg(proto);
+ masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
+ masm.storeCallBoolResult(output);
+
+ restoreVolatile(output);
+
+ masm.jump(ool->rejoin());
+}
+
+static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
+ Register len, Register temp0, Register temp1,
+ Register output, CharEncoding encoding) {
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
+ masm.assumeUnreachable("Length should be greater than 0.");
+ masm.bind(&ok);
+#endif
+
+ Register chars = temp0;
+ masm.loadStringChars(str, chars, encoding);
+
+ masm.move32(Imm32(0), output);
+
+ Label start, done;
+ masm.bind(&start);
+
+ Register currentChar = temp1;
+ masm.loadChar(chars, output, currentChar, encoding);
+ masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
+
+ masm.add32(Imm32(1), output);
+ masm.branch32(Assembler::NotEqual, output, len, &start);
+
+ masm.move32(Imm32(-1), output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
+ Register str = ToRegister(ins->str());
+ Register output = ToRegister(ins->output());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register len = ToRegister(ins->temp2());
+
+ using Fn = bool (*)(JSContext*, JSString*, int32_t*);
+ OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
+ ins, ArgList(str), StoreRegisterTo(output));
+
+ masm.branchIfRope(str, ool->entry());
+ masm.loadStringLength(str, len);
+
+ Label isLatin1, done;
+ masm.branchLatin1String(str, &isLatin1);
+ {
+ FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
+ CharEncoding::TwoByte);
+ masm.jump(&done);
+ }
+ masm.bind(&isLatin1);
+ {
+ FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
+ CharEncoding::Latin1);
+ }
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitStringReplace(LStringReplace* lir) {
+ if (lir->replacement()->isConstant()) {
+ pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
+ } else {
+ pushArg(ToRegister(lir->replacement()));
+ }
+
+ if (lir->pattern()->isConstant()) {
+ pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
+ } else {
+ pushArg(ToRegister(lir->pattern()));
+ }
+
+ if (lir->string()->isConstant()) {
+ pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
+ } else {
+ pushArg(ToRegister(lir->string()));
+ }
+
+ using Fn =
+ JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
+ if (lir->mir()->isFlatReplacement()) {
+ callVM<Fn, StringFlatReplaceString>(lir);
+ } else {
+ callVM<Fn, StringReplace>(lir);
+ }
+}
+
+void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ TypedOrValueRegister lhs =
+ TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
+ TypedOrValueRegister rhs =
+ TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
+ ValueOperand output = ToOutValue(lir);
+
+ JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
+
+ switch (jsop) {
+ case JSOp::Add:
+ case JSOp::Sub:
+ case JSOp::Mul:
+ case JSOp::Div:
+ case JSOp::Mod:
+ case JSOp::Pow:
+ case JSOp::BitAnd:
+ case JSOp::BitOr:
+ case JSOp::BitXor:
+ case JSOp::Lsh:
+ case JSOp::Rsh:
+ case JSOp::Ursh: {
+ IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
+ addIC(lir, allocateIC(ic));
+ return;
+ }
+ default:
+ MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
+ }
+}
+
+void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ TypedOrValueRegister lhs =
+ TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
+ TypedOrValueRegister rhs =
+ TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
+ Register output = ToRegister(lir->output());
+
+ JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
+
+ switch (jsop) {
+ case JSOp::Lt:
+ case JSOp::Le:
+ case JSOp::Gt:
+ case JSOp::Ge:
+ case JSOp::Eq:
+ case JSOp::Ne:
+ case JSOp::StrictEq:
+ case JSOp::StrictNe: {
+ IonCompareIC ic(liveRegs, lhs, rhs, output);
+ addIC(lir, allocateIC(ic));
+ return;
+ }
+ default:
+ MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
+ }
+}
+
+void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ TypedOrValueRegister input =
+ TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
+ ValueOperand output = ToOutValue(lir);
+
+ IonUnaryArithIC ic(liveRegs, input, output);
+ addIC(lir, allocateIC(ic));
+}
+
+void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
+ pushArg(ImmPtr(lir->mir()->module()));
+
+ using Fn = JSObject* (*)(JSContext*, HandleObject);
+ callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
+}
+
+void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
+ pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
+ pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
+ pushArg(ImmGCPtr(current->mir()->info().script()));
+
+ using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
+ callVM<Fn, js::StartDynamicModuleImport>(lir);
+}
+
+void CodeGenerator::visitLambda(LLambda* lir) {
+ Register envChain = ToRegister(lir->environmentChain());
+ Register output = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+
+ JSFunction* fun = lir->mir()->templateFunction();
+
+ using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
+ OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
+ lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
+
+ TemplateObject templateObject(fun);
+ masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
+ Address(output, JSFunction::offsetOfEnvironment()));
+ // No post barrier needed because output is guaranteed to be allocated in
+ // the nursery.
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
+ Register envChain = ToRegister(lir->envChain());
+ Register prototype = ToRegister(lir->prototype());
+
+ pushArg(prototype);
+ pushArg(envChain);
+ pushArg(ImmGCPtr(lir->mir()->function()));
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
+ callVM<Fn, js::FunWithProtoOperation>(lir);
+}
+
+void CodeGenerator::visitSetFunName(LSetFunName* lir) {
+ pushArg(Imm32(lir->mir()->prefixKind()));
+ pushArg(ToValue(lir, LSetFunName::NameIndex));
+ pushArg(ToRegister(lir->fun()));
+
+ using Fn =
+ bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
+ callVM<Fn, js::SetFunctionName>(lir);
+}
+
+void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
+ // Note: markOsiPoint ensures enough space exists between the last
+ // LOsiPoint and this one to patch adjacent call instructions.
+
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ uint32_t osiCallPointOffset = markOsiPoint(lir);
+
+ LSafepoint* safepoint = lir->associatedSafepoint();
+ MOZ_ASSERT(!safepoint->osiCallPointOffset());
+ safepoint->setOsiCallPointOffset(osiCallPointOffset);
+
+#ifdef DEBUG
+ // There should be no movegroups or other instructions between
+ // an instruction and its OsiPoint. This is necessary because
+ // we use the OsiPoint's snapshot from within VM calls.
+ for (LInstructionReverseIterator iter(current->rbegin(lir));
+ iter != current->rend(); iter++) {
+ if (*iter == lir) {
+ continue;
+ }
+ MOZ_ASSERT(!iter->isMoveGroup());
+ MOZ_ASSERT(iter->safepoint() == safepoint);
+ break;
+ }
+#endif
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (shouldVerifyOsiPointRegs(safepoint)) {
+ verifyOsiPointRegs(safepoint);
+ }
+#endif
+}
+
+void CodeGenerator::visitPhi(LPhi* lir) {
+ MOZ_CRASH("Unexpected LPhi in CodeGenerator");
+}
+
+void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
+
+void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
+ MTableSwitch* mir = ins->mir();
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+ const LAllocation* temp;
+
+ if (mir->getOperand(0)->type() != MIRType::Int32) {
+ temp = ins->tempInt()->output();
+
+ // The input is a double, so try and convert it to an integer.
+ // If it does not fit in an integer, take the default case.
+ masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
+ defaultcase, false);
+ } else {
+ temp = ins->index();
+ }
+
+ emitTableSwitchDispatch(mir, ToRegister(temp),
+ ToRegisterOrInvalid(ins->tempPointer()));
+}
+
+void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
+ MTableSwitch* mir = ins->mir();
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ Register index = ToRegister(ins->tempInt());
+ ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
+ Register tag = masm.extractTag(value, index);
+ masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
+
+ Label unboxInt, isInt;
+ masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
+ {
+ FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
+ masm.unboxDouble(value, floatIndex);
+ masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
+ masm.jump(&isInt);
+ }
+
+ masm.bind(&unboxInt);
+ masm.unboxInt32(value, index);
+
+ masm.bind(&isInt);
+
+ emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
+}
+
+void CodeGenerator::visitParameter(LParameter* lir) {}
+
+void CodeGenerator::visitCallee(LCallee* lir) {
+ Register callee = ToRegister(lir->output());
+ Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
+
+ masm.loadFunctionFromCalleeToken(ptr, callee);
+}
+
+void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
+ Register output = ToRegister(lir->output());
+ Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
+ masm.loadPtr(calleeToken, output);
+
+ // We must be inside a function.
+ MOZ_ASSERT(current->mir()->info().script()->function());
+
+ // The low bit indicates whether this call is constructing, just clear the
+ // other bits.
+ static_assert(CalleeToken_Function == 0x0,
+ "CalleeTokenTag value should match");
+ static_assert(CalleeToken_FunctionConstructing == 0x1,
+ "CalleeTokenTag value should match");
+ masm.andPtr(Imm32(0x1), output);
+}
+
+void CodeGenerator::visitReturn(LReturn* lir) {
+#if defined(JS_NUNBOX32)
+ DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
+ DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
+ MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
+ MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
+#elif defined(JS_PUNBOX64)
+ DebugOnly<LAllocation*> result = lir->getOperand(0);
+ MOZ_ASSERT(ToRegister(result) == JSReturnReg);
+#endif
+ // Don't emit a jump to the return label if this is the last block, as
+ // it'll fall through to the epilogue.
+ //
+ // This is -not- true however for a Generator-return, which may appear in the
+ // middle of the last block, so we should always emit the jump there.
+ if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
+ masm.jump(&returnLabel_);
+ }
+}
+
+void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
+ Register temp = ToRegister(lir->temp());
+
+ // Remember the OSR entry offset into the code buffer.
+ masm.flushBuffer();
+ setOsrEntryOffset(masm.size());
+
+ // Allocate the full frame for this function
+ // Note we have a new entry here. So we reset MacroAssembler::framePushed()
+ // to 0, before reserving the stack.
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+ masm.setFramePushed(0);
+
+ // The Baseline code ensured both the frame pointer and stack pointer point to
+ // the JitFrameLayout on the stack.
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled()) {
+ masm.profilerEnterFrame(FramePointer, temp);
+ }
+
+ masm.reserveStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ // Ensure that the Ion frames is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+}
+
+void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
+ const LAllocation* frame = lir->getOperand(0);
+ const LDefinition* object = lir->getDef(0);
+
+ const ptrdiff_t frameOffset =
+ BaselineFrame::reverseOffsetOfEnvironmentChain();
+
+ masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
+}
+
+void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
+ const LAllocation* frame = lir->getOperand(0);
+ const LDefinition* object = lir->getDef(0);
+
+ const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
+
+ masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
+}
+
+void CodeGenerator::visitOsrValue(LOsrValue* value) {
+ const LAllocation* frame = value->getOperand(0);
+ const ValueOperand out = ToOutValue(value);
+
+ const ptrdiff_t frameOffset = value->mir()->frameOffset();
+
+ masm.loadValue(Address(ToRegister(frame), frameOffset), out);
+}
+
+void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
+ const LAllocation* frame = lir->getOperand(0);
+ const ValueOperand out = ToOutValue(lir);
+
+ Address flags =
+ Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
+ Address retval =
+ Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
+
+ masm.moveValue(UndefinedValue(), out);
+
+ Label done;
+ masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
+ &done);
+ masm.loadValue(retval, out);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitStackArgT(LStackArgT* lir) {
+ const LAllocation* arg = lir->arg();
+ MIRType argType = lir->type();
+ uint32_t argslot = lir->argslot();
+ MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
+
+ Address dest = AddressOfPassedArg(argslot);
+
+ if (arg->isFloatReg()) {
+ masm.boxDouble(ToFloatRegister(arg), dest);
+ } else if (arg->isRegister()) {
+ masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
+ } else {
+ masm.storeValue(arg->toConstant()->toJSValue(), dest);
+ }
+}
+
+void CodeGenerator::visitStackArgV(LStackArgV* lir) {
+ ValueOperand val = ToValue(lir, 0);
+ uint32_t argslot = lir->argslot();
+ MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
+
+ masm.storeValue(val, AddressOfPassedArg(argslot));
+}
+
+void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
+ if (!group->numMoves()) {
+ return;
+ }
+
+ MoveResolver& resolver = masm.moveResolver();
+
+ for (size_t i = 0; i < group->numMoves(); i++) {
+ const LMove& move = group->getMove(i);
+
+ LAllocation from = move.from();
+ LAllocation to = move.to();
+ LDefinition::Type type = move.type();
+
+ // No bogus moves.
+ MOZ_ASSERT(from != to);
+ MOZ_ASSERT(!from.isConstant());
+ MoveOp::Type moveType;
+ switch (type) {
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ case LDefinition::PAYLOAD:
+#else
+ case LDefinition::BOX:
+#endif
+ case LDefinition::GENERAL:
+ case LDefinition::STACKRESULTS:
+ moveType = MoveOp::GENERAL;
+ break;
+ case LDefinition::INT32:
+ moveType = MoveOp::INT32;
+ break;
+ case LDefinition::FLOAT32:
+ moveType = MoveOp::FLOAT32;
+ break;
+ case LDefinition::DOUBLE:
+ moveType = MoveOp::DOUBLE;
+ break;
+ case LDefinition::SIMD128:
+ moveType = MoveOp::SIMD128;
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+
+ masm.propagateOOM(
+ resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
+ }
+
+ masm.propagateOOM(resolver.resolve());
+ if (masm.oom()) {
+ return;
+ }
+
+ MoveEmitter emitter(masm);
+
+#ifdef JS_CODEGEN_X86
+ if (group->maybeScratchRegister().isGeneralReg()) {
+ emitter.setScratchRegister(
+ group->maybeScratchRegister().toGeneralReg()->reg());
+ } else {
+ resolver.sortMemoryToMemoryMoves();
+ }
+#endif
+
+ emitter.emit(resolver);
+ emitter.finish();
+}
+
+void CodeGenerator::visitInteger(LInteger* lir) {
+ masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitInteger64(LInteger64* lir) {
+ masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitPointer(LPointer* lir) {
+ masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
+ Register output = ToRegister(lir->output());
+ uint32_t nurseryIndex = lir->mir()->nurseryIndex();
+
+ // Load a pointer to the entry in IonScript's nursery objects list.
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
+ masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
+
+ // Load the JSObject*.
+ masm.loadPtr(Address(output, 0), output);
+}
+
+void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
+ // No-op.
+}
+
+void CodeGenerator::visitDebugEnterGCUnsafeRegion(
+ LDebugEnterGCUnsafeRegion* lir) {
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadJSContext(temp);
+
+ Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
+ masm.add32(Imm32(1), inUnsafeRegion);
+
+ Label ok;
+ masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
+ masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
+ LDebugLeaveGCUnsafeRegion* lir) {
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadJSContext(temp);
+
+ Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
+ masm.add32(Imm32(-1), inUnsafeRegion);
+
+ Label ok;
+ masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
+ masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitSlots(LSlots* lir) {
+ Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
+ masm.loadPtr(slots, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
+ ValueOperand dest = ToOutValue(lir);
+ Register base = ToRegister(lir->input());
+ int32_t offset = lir->mir()->slot() * sizeof(js::Value);
+
+ masm.loadValue(Address(base, offset), dest);
+}
+
+static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
+ MIRType valueType) {
+ if (value->isConstant()) {
+ return ConstantOrRegister(value->toConstant()->toJSValue());
+ }
+ return TypedOrValueRegister(valueType, ToAnyRegister(value));
+}
+
+void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
+ Register base = ToRegister(lir->slots());
+ int32_t offset = lir->mir()->slot() * sizeof(js::Value);
+ Address dest(base, offset);
+
+ if (lir->mir()->needsBarrier()) {
+ emitPreBarrier(dest);
+ }
+
+ MIRType valueType = lir->mir()->value()->type();
+ ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
+ masm.storeUnboxedValue(value, valueType, dest);
+}
+
+void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
+ Register base = ToRegister(lir->slots());
+ int32_t offset = lir->mir()->slot() * sizeof(Value);
+
+ const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
+
+ if (lir->mir()->needsBarrier()) {
+ emitPreBarrier(Address(base, offset));
+ }
+
+ masm.storeValue(value, Address(base, offset));
+}
+
+void CodeGenerator::visitElements(LElements* lir) {
+ Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
+ masm.loadPtr(elements, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
+ Address environment(ToRegister(lir->function()),
+ JSFunction::offsetOfEnvironment());
+ masm.unboxObject(environment, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitHomeObject(LHomeObject* lir) {
+ Register func = ToRegister(lir->function());
+ Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
+
+ masm.assertFunctionIsExtended(func);
+#ifdef DEBUG
+ Label isObject;
+ masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
+ masm.assumeUnreachable("[[HomeObject]] must be Object");
+ masm.bind(&isObject);
+#endif
+
+ masm.unboxObject(homeObject, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
+ Register homeObject = ToRegister(lir->homeObject());
+ ValueOperand output = ToOutValue(lir);
+ Register temp = output.scratchReg();
+
+ masm.loadObjProto(homeObject, temp);
+
+#ifdef DEBUG
+ // We won't encounter a lazy proto, because the prototype is guaranteed to
+ // either be a JSFunction or a PlainObject, and only proxy objects can have a
+ // lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label proxyCheckDone;
+ masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
+ masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
+ masm.bind(&proxyCheckDone);
+#endif
+
+ Label nullProto, done;
+ masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
+
+ // Box prototype and return
+ masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
+ masm.jump(&done);
+
+ masm.bind(&nullProto);
+ masm.moveValue(NullValue(), output);
+
+ masm.bind(&done);
+}
+
+template <class T>
+static T* ToConstantObject(MDefinition* def) {
+ MOZ_ASSERT(def->isConstant());
+ return &def->toConstant()->toObject().as<T>();
+}
+
+void CodeGenerator::visitNewLexicalEnvironmentObject(
+ LNewLexicalEnvironmentObject* lir) {
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
+ lir->mir()->templateObj());
+ auto* scope = &templateObj->scope();
+ gc::Heap initialHeap = gc::Heap::Default;
+
+ using Fn =
+ BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
+ auto* ool =
+ oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
+ lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
+
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewClassBodyEnvironmentObject(
+ LNewClassBodyEnvironmentObject* lir) {
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
+ lir->mir()->templateObj());
+ auto* scope = &templateObj->scope();
+ gc::Heap initialHeap = gc::Heap::Default;
+
+ using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
+ Handle<ClassBodyScope*>);
+ auto* ool =
+ oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
+ lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
+
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewVarEnvironmentObject(
+ LNewVarEnvironmentObject* lir) {
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ auto* templateObj =
+ ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
+ auto* scope = &templateObj->scope().as<VarScope>();
+ gc::Heap initialHeap = gc::Heap::Default;
+
+ using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
+ auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
+ lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
+
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitGuardShape(LGuardShape* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToTempRegisterOrInvalid(guard->temp0());
+ Label bail;
+ masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
+ obj, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
+ Register obj = ToRegister(guard->object());
+ Register shapeList = ToRegister(guard->shapeList());
+ Register temp = ToRegister(guard->temp0());
+ Register temp2 = ToRegister(guard->temp1());
+ Register temp3 = ToRegister(guard->temp2());
+ Register spectre = ToTempRegisterOrInvalid(guard->temp3());
+
+ Label bail;
+ masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
+ masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
+ spectre, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardProto(LGuardProto* guard) {
+ Register obj = ToRegister(guard->object());
+ Register expected = ToRegister(guard->expected());
+ Register temp = ToRegister(guard->temp0());
+
+ masm.loadObjProto(obj, temp);
+
+ Label bail;
+ masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ masm.loadObjProto(obj, temp);
+
+ Label bail;
+ masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.branchIfNonNativeObj(obj, temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
+ Register temp = ToRegister(guard->temp0());
+ Label bail;
+
+ masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
+ masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
+ &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.branchTestObjectIsProxy(false, obj, temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.branchTestObjectIsProxy(true, obj, temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
+ Register proxy = ToRegister(guard->proxy());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
+ GetDOMProxyHandlerFamily(), &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitProxyGet(LProxyGet* lir) {
+ Register proxy = ToRegister(lir->proxy());
+ Register temp = ToRegister(lir->temp0());
+
+ pushArg(lir->mir()->id(), temp);
+ pushArg(proxy);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
+ callVM<Fn, ProxyGetProperty>(lir);
+}
+
+void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
+ Register proxy = ToRegister(lir->proxy());
+ ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
+
+ pushArg(idVal);
+ pushArg(proxy);
+
+ using Fn =
+ bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
+ callVM<Fn, ProxyGetPropertyByValue>(lir);
+}
+
+void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
+ Register proxy = ToRegister(lir->proxy());
+ ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
+
+ pushArg(idVal);
+ pushArg(proxy);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ if (lir->mir()->hasOwn()) {
+ callVM<Fn, ProxyHasOwn>(lir);
+ } else {
+ callVM<Fn, ProxyHas>(lir);
+ }
+}
+
+void CodeGenerator::visitProxySet(LProxySet* lir) {
+ Register proxy = ToRegister(lir->proxy());
+ ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
+ Register temp = ToRegister(lir->temp0());
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(rhs);
+ pushArg(lir->mir()->id(), temp);
+ pushArg(proxy);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
+ callVM<Fn, ProxySetProperty>(lir);
+}
+
+void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
+ Register proxy = ToRegister(lir->proxy());
+ ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
+ ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(rhs);
+ pushArg(idVal);
+ pushArg(proxy);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, ProxySetPropertyByValue>(lir);
+}
+
+void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
+ Register obj = ToRegister(lir->obj());
+ ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(rhs);
+ pushArg(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
+ callVM<Fn, jit::SetArrayLength>(lir);
+}
+
+void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register temp3 = ToRegister(lir->temp3());
+ ValueOperand output = ToOutValue(lir);
+
+ Label bail, cacheHit;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1,
+ temp2, output, &cacheHit);
+ } else {
+ masm.xorPtr(temp2, temp2);
+ }
+
+ masm.branchIfNonNativeObj(obj, temp0, &bail);
+
+ masm.Push(UndefinedValue());
+ masm.moveStackPtrTo(temp3);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp0);
+ masm.passABIArg(temp0);
+ masm.passABIArg(obj);
+ masm.movePropertyKey(lir->mir()->name(), temp1);
+ masm.passABIArg(temp1);
+ masm.passABIArg(temp2);
+ masm.passABIArg(temp3);
+
+ masm.callWithABI<Fn, GetNativeDataPropertyPure>();
+
+ MOZ_ASSERT(!output.aliases(ReturnReg));
+ masm.Pop(output);
+
+ masm.branchIfFalseBool(ReturnReg, &bail);
+
+ masm.bind(&cacheHit);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitMegamorphicLoadSlotByValue(
+ LMegamorphicLoadSlotByValue* lir) {
+ Register obj = ToRegister(lir->object());
+ ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ ValueOperand output = ToOutValue(lir);
+
+ Label bail, cacheHit;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
+ output, &cacheHit);
+ } else {
+ masm.xorPtr(temp2, temp2);
+ }
+
+ masm.branchIfNonNativeObj(obj, temp0, &bail);
+
+ // idVal will be in vp[0], result will be stored in vp[1].
+ masm.reserveStack(sizeof(Value));
+ masm.Push(idVal);
+ masm.moveStackPtrTo(temp0);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp1);
+ masm.passABIArg(temp1);
+ masm.passABIArg(obj);
+ masm.passABIArg(temp2);
+ masm.passABIArg(temp0);
+ masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
+
+ MOZ_ASSERT(!idVal.aliases(temp0));
+ masm.storeCallPointerResult(temp0);
+ masm.Pop(idVal);
+
+ uint32_t framePushed = masm.framePushed();
+ Label ok;
+ masm.branchIfTrueBool(temp0, &ok);
+ masm.freeStack(sizeof(Value)); // Discard result Value.
+ masm.jump(&bail);
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.Pop(output);
+
+ masm.bind(&cacheHit);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
+ Register obj = ToRegister(lir->object());
+ ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
+
+ Register temp0 = ToRegister(lir->temp0());
+#ifndef JS_CODEGEN_X86
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+#endif
+
+ Label cacheHit, done;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+#ifdef JS_CODEGEN_X86
+ masm.emitMegamorphicCachedSetSlot(
+ lir->mir()->name(), obj, temp0, value, &cacheHit,
+ [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
+ EmitPreBarrier(masm, addr, mirType);
+ });
+#else
+ masm.emitMegamorphicCachedSetSlot(
+ lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
+ [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
+ EmitPreBarrier(masm, addr, mirType);
+ });
+#endif
+ }
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(value);
+ pushArg(lir->mir()->name(), temp0);
+ pushArg(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
+ callVM<Fn, SetPropertyMegamorphic<true>>(lir);
+
+ masm.jump(&done);
+ masm.bind(&cacheHit);
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
+
+ saveVolatile(temp0);
+ emitPostWriteBarrier(obj);
+ restoreVolatile(temp0);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
+ Register obj = ToRegister(lir->object());
+ ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register output = ToRegister(lir->output());
+
+ Label bail, cacheHit;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2,
+ output, &cacheHit,
+ lir->mir()->hasOwn());
+ } else {
+ masm.xorPtr(temp2, temp2);
+ }
+
+ masm.branchIfNonNativeObj(obj, temp0, &bail);
+
+ // idVal will be in vp[0], result will be stored in vp[1].
+ masm.reserveStack(sizeof(Value));
+ masm.Push(idVal);
+ masm.moveStackPtrTo(temp0);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj,
+ MegamorphicCache::Entry* cacheEntry, Value* vp);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp1);
+ masm.passABIArg(temp1);
+ masm.passABIArg(obj);
+ masm.passABIArg(temp2);
+ masm.passABIArg(temp0);
+ if (lir->mir()->hasOwn()) {
+ masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
+ } else {
+ masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
+ }
+
+ MOZ_ASSERT(!idVal.aliases(temp0));
+ masm.storeCallPointerResult(temp0);
+ masm.Pop(idVal);
+
+ uint32_t framePushed = masm.framePushed();
+ Label ok;
+ masm.branchIfTrueBool(temp0, &ok);
+ masm.freeStack(sizeof(Value)); // Discard result Value.
+ masm.jump(&bail);
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
+ masm.freeStack(sizeof(Value));
+ masm.bind(&cacheHit);
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
+ LGuardIsNotArrayBufferMaybeShared* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.loadObjClassUnsafe(obj, temp);
+ masm.branchPtr(Assembler::Equal, temp, ImmPtr(&ArrayBufferObject::class_),
+ &bail);
+ masm.branchPtr(Assembler::Equal, temp,
+ ImmPtr(&SharedArrayBufferObject::class_), &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.loadObjClassUnsafe(obj, temp);
+ masm.branchIfClassIsNotTypedArray(temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
+ Register input = ToRegister(guard->input());
+ Register expected = ToRegister(guard->expected());
+
+ Assembler::Condition cond =
+ guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
+ bailoutCmpPtr(cond, input, expected, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
+ Register input = ToRegister(guard->input());
+ Register expected = ToRegister(guard->expected());
+
+ bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
+ Register str = ToRegister(guard->str());
+ Register scratch = ToRegister(guard->temp0());
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
+ volatileRegs.takeUnchecked(scratch);
+
+ Label bail;
+ masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
+ &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
+void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
+ Register symbol = ToRegister(guard->symbol());
+
+ bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
+ guard->snapshot());
+}
+
+void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
+ Register num = ToRegister(guard->num());
+
+ bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
+ guard->snapshot());
+}
+
+void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
+ Register str = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+
+ Label vmCall, done;
+ masm.loadStringIndexValue(str, output, &vmCall);
+ masm.jump(&done);
+
+ {
+ masm.bind(&vmCall);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+ volatileRegs.takeUnchecked(output);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = int32_t (*)(JSString* str);
+ masm.setupAlignedABICall();
+ masm.passABIArg(str);
+ masm.callWithABI<Fn, GetIndexFromString>();
+ masm.storeCallInt32Result(output);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ // GetIndexFromString returns a negative value on failure.
+ bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
+ Register str = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+
+ Label bail;
+ masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
+ Register str = ToRegister(lir->string());
+ FloatRegister output = ToFloatRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ Label vmCall, done;
+ // Use indexed value as fast path if possible.
+ masm.loadStringIndexValue(str, temp0, &vmCall);
+ masm.convertInt32ToDouble(temp0, output);
+ masm.jump(&done);
+ {
+ masm.bind(&vmCall);
+
+ // Reserve stack for holding the result value of the call.
+ masm.reserveStack(sizeof(double));
+ masm.moveStackPtrTo(temp0);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+ volatileRegs.takeUnchecked(temp0);
+ volatileRegs.takeUnchecked(temp1);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp1);
+ masm.passABIArg(temp1);
+ masm.passABIArg(str);
+ masm.passABIArg(temp0);
+ masm.callWithABI<Fn, StringToNumberPure>();
+ masm.storeCallPointerResult(temp0);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ Label ok;
+ masm.branchIfTrueBool(temp0, &ok);
+ {
+ // OOM path, recovered by StringToNumberPure.
+ //
+ // Use addToStackPtr instead of freeStack as freeStack tracks stack height
+ // flow-insensitively, and using it here would confuse the stack height
+ // tracking.
+ masm.addToStackPtr(Imm32(sizeof(double)));
+ bailout(lir->snapshot());
+ }
+ masm.bind(&ok);
+ masm.Pop(output);
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ // Load obj->elements.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
+
+ // Make sure there are no dense elements.
+ Address initLength(temp, ObjectElements::offsetOfInitializedLength());
+ bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
+}
+
+void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ masm.move32To64ZeroExtend(input, output);
+}
+
+void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
+ Register64 output) {
+ Register temp = output.scratchReg();
+
+ saveLive(lir);
+
+ masm.reserveStack(sizeof(uint64_t));
+ masm.moveStackPtrTo(temp);
+ pushArg(temp);
+ pushArg(input);
+
+ using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
+ callVM<Fn, DoStringToInt64>(lir);
+
+ masm.load64(Address(masm.getStackPointer(), 0), output);
+ masm.freeStack(sizeof(uint64_t));
+
+ restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
+}
+
+void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ emitStringToInt64(lir, input, output);
+}
+
+void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
+ ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
+ Register temp = ToRegister(lir->temp0());
+ Register64 output = ToOutRegister64(lir);
+
+ int checks = 3;
+
+ Label fail, done;
+ // Jump to fail if this is the last check and we fail it,
+ // otherwise to the next test.
+ auto emitTestAndUnbox = [&](auto testAndUnbox) {
+ MOZ_ASSERT(checks > 0);
+
+ checks--;
+ Label notType;
+ Label* target = checks ? &notType : &fail;
+
+ testAndUnbox(target);
+
+ if (checks) {
+ masm.jump(&done);
+ masm.bind(&notType);
+ }
+ };
+
+ Register tag = masm.extractTag(input, temp);
+
+ // BigInt.
+ emitTestAndUnbox([&](Label* target) {
+ masm.branchTestBigInt(Assembler::NotEqual, tag, target);
+ masm.unboxBigInt(input, temp);
+ masm.loadBigInt64(temp, output);
+ });
+
+ // Boolean
+ emitTestAndUnbox([&](Label* target) {
+ masm.branchTestBoolean(Assembler::NotEqual, tag, target);
+ masm.unboxBoolean(input, temp);
+ masm.move32To64ZeroExtend(temp, output);
+ });
+
+ // String
+ emitTestAndUnbox([&](Label* target) {
+ masm.branchTestString(Assembler::NotEqual, tag, target);
+ masm.unboxString(input, temp);
+ emitStringToInt64(lir, temp, output);
+ });
+
+ MOZ_ASSERT(checks == 0);
+
+ bailoutFrom(&fail, lir->snapshot());
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
+ Register operand = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ masm.loadBigInt64(operand, output);
+}
+
+OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
+ Scalar::Type type,
+ Register64 input,
+ Register output) {
+#if JS_BITS_PER_WORD == 32
+ using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
+ auto args = ArgList(input.low, input.high);
+#else
+ using Fn = BigInt* (*)(JSContext*, uint64_t);
+ auto args = ArgList(input);
+#endif
+
+ if (type == Scalar::BigInt64) {
+ return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
+ StoreRegisterTo(output));
+ }
+ MOZ_ASSERT(type == Scalar::BigUint64);
+ return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
+ StoreRegisterTo(output));
+}
+
+void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
+ Register64 input, Register output,
+ Register maybeTemp) {
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
+
+ if (maybeTemp != InvalidReg) {
+ masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
+ } else {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+ regs.take(output);
+
+ Register temp = regs.takeAny();
+
+ masm.push(temp);
+
+ Label fail, ok;
+ masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
+ masm.pop(temp);
+ masm.jump(&ok);
+ masm.bind(&fail);
+ masm.pop(temp);
+ masm.jump(ool->entry());
+ masm.bind(&ok);
+ }
+ masm.initializeBigInt64(type, output, input);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
+ Register64 input = ToRegister64(lir->input());
+ Register temp = ToRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
+}
+
+void CodeGenerator::visitGuardValue(LGuardValue* lir) {
+ ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
+ Value expected = lir->mir()->expected();
+ Label bail;
+ masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
+ ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
+
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ Label done;
+ masm.branchTestNull(Assembler::Equal, tag, &done);
+
+ Label bail;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
+ ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
+
+ Label bail;
+ masm.branchTestObject(Assembler::Equal, input, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
+ Register function = ToRegister(lir->function());
+
+ Label bail;
+ if (uint16_t flags = lir->mir()->expectedFlags()) {
+ masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
+ }
+ if (uint16_t flags = lir->mir()->unexpectedFlags()) {
+ masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
+ }
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
+ LGuardFunctionIsNonBuiltinCtor* lir) {
+ Register function = ToRegister(lir->function());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
+ Register function = ToRegister(lir->function());
+ Register temp = ToRegister(lir->temp0());
+
+ Assembler::Condition cond =
+ lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
+
+ Label bail;
+ masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
+ Register function = ToRegister(lir->function());
+
+ Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
+ bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
+ ImmGCPtr(lir->mir()->expected()), lir->snapshot());
+}
+
+// Out-of-line path to update the store buffer.
+class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* lir_;
+ const LAllocation* object_;
+
+ public:
+ OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
+ : lir_(lir), object_(object) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineCallPostWriteBarrier(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+ const LAllocation* object() const { return object_; }
+};
+
+static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
+ const gc::TenuredCell* cell,
+ AllocatableGeneralRegisterSet& regs,
+ Label* exit, Label* callVM) {
+ Register temp = regs.takeAny();
+
+ gc::Arena* arena = cell->arena();
+
+ Register cells = temp;
+ masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
+
+ size_t index = gc::ArenaCellSet::getCellIndex(cell);
+ size_t word;
+ uint32_t mask;
+ gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
+ size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
+
+ masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
+ exit);
+
+ // Check whether this is the sentinel set and if so call the VM to allocate
+ // one for this arena.
+ masm.branchPtr(Assembler::Equal,
+ Address(cells, gc::ArenaCellSet::offsetOfArena()),
+ ImmPtr(nullptr), callVM);
+
+ // Add the cell to the set.
+ masm.or32(Imm32(mask), Address(cells, offset));
+ masm.jump(exit);
+
+ regs.add(temp);
+}
+
+static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
+ Register objreg, JSObject* maybeConstant,
+ bool isGlobal,
+ AllocatableGeneralRegisterSet& regs) {
+ MOZ_ASSERT_IF(isGlobal, maybeConstant);
+
+ Label callVM;
+ Label exit;
+
+ Register temp = regs.takeAny();
+
+ // We already have a fast path to check whether a global is in the store
+ // buffer.
+ if (!isGlobal) {
+ if (maybeConstant) {
+ // Check store buffer bitmap directly for known object.
+ EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
+ &exit, &callVM);
+ } else {
+ // Check one element cache to avoid VM call.
+ masm.loadPtr(AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
+ temp);
+ masm.branchPtr(Assembler::Equal, temp, objreg, &exit);
+ }
+ }
+
+ // Call into the VM to barrier the write.
+ masm.bind(&callVM);
+
+ Register runtimereg = temp;
+ masm.mov(ImmPtr(runtime), runtimereg);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(runtimereg);
+ masm.passABIArg(objreg);
+ if (isGlobal) {
+ using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
+ masm.callWithABI<Fn, PostGlobalWriteBarrier>();
+ } else {
+ using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
+ masm.callWithABI<Fn, PostWriteBarrier>();
+ }
+
+ masm.bind(&exit);
+}
+
+void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+
+ Register objreg;
+ JSObject* object = nullptr;
+ bool isGlobal = false;
+ if (obj->isConstant()) {
+ object = &obj->toConstant()->toObject();
+ isGlobal = isGlobalObject(object);
+ objreg = regs.takeAny();
+ masm.movePtr(ImmGCPtr(object), objreg);
+ } else {
+ objreg = ToRegister(obj);
+ regs.takeUnchecked(objreg);
+ }
+
+ EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
+}
+
+// Returns true if `def` might be allocated in the nursery.
+static bool ValueNeedsPostBarrier(MDefinition* def) {
+ if (def->isBox()) {
+ def = def->toBox()->input();
+ }
+ if (def->type() == MIRType::Value) {
+ return true;
+ }
+ return NeedsPostBarrier(def->type());
+}
+
+class OutOfLineElementPostWriteBarrier
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LiveRegisterSet liveVolatileRegs_;
+ const LAllocation* index_;
+ int32_t indexDiff_;
+ Register obj_;
+ Register scratch_;
+
+ public:
+ OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
+ Register obj, const LAllocation* index,
+ Register scratch, int32_t indexDiff)
+ : liveVolatileRegs_(liveVolatileRegs),
+ index_(index),
+ indexDiff_(indexDiff),
+ obj_(obj),
+ scratch_(scratch) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineElementPostWriteBarrier(this);
+ }
+
+ const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
+ const LAllocation* index() const { return index_; }
+ int32_t indexDiff() const { return indexDiff_; }
+
+ Register object() const { return obj_; }
+ Register scratch() const { return scratch_; }
+};
+
+void CodeGenerator::emitElementPostWriteBarrier(
+ MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
+ const LAllocation* index, Register scratch, const ConstantOrRegister& val,
+ int32_t indexDiff) {
+ if (val.constant()) {
+ MOZ_ASSERT_IF(val.value().isGCThing(),
+ !IsInsideNursery(val.value().toGCThing()));
+ return;
+ }
+
+ TypedOrValueRegister reg = val.reg();
+ if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
+ return;
+ }
+
+ auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
+ liveVolatileRegs, obj, index, scratch, indexDiff);
+ addOutOfLineCode(ool, mir);
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
+
+ if (reg.hasValue()) {
+ masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
+ ool->entry());
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
+ scratch, ool->entry());
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
+ OutOfLineElementPostWriteBarrier* ool) {
+ Register obj = ool->object();
+ Register scratch = ool->scratch();
+ const LAllocation* index = ool->index();
+ int32_t indexDiff = ool->indexDiff();
+
+ masm.PushRegsInMask(ool->liveVolatileRegs());
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(obj);
+ regs.takeUnchecked(scratch);
+
+ Register indexReg;
+ if (index->isConstant()) {
+ indexReg = regs.takeAny();
+ masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
+ } else {
+ indexReg = ToRegister(index);
+ regs.takeUnchecked(indexReg);
+ if (indexDiff != 0) {
+ masm.add32(Imm32(indexDiff), indexReg);
+ }
+ }
+
+ masm.setupUnalignedABICall(scratch);
+ masm.movePtr(ImmPtr(gen->runtime), scratch);
+ masm.passABIArg(scratch);
+ masm.passABIArg(obj);
+ masm.passABIArg(indexReg);
+ using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
+ masm.callWithABI<Fn, PostWriteElementBarrier<IndexInBounds::Yes>>();
+
+ // We don't need a sub32 here because indexReg must be in liveVolatileRegs
+ // if indexDiff is not zero, so it will be restored below.
+ MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
+
+ masm.PopRegsInMask(ool->liveVolatileRegs());
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::emitPostWriteBarrier(Register objreg) {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(objreg);
+ EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
+}
+
+void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
+ OutOfLineCallPostWriteBarrier* ool) {
+ saveLiveVolatile(ool->lir());
+ const LAllocation* obj = ool->object();
+ emitPostWriteBarrier(obj);
+ restoreLiveVolatile(ool->lir());
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
+ OutOfLineCode* ool) {
+ // Check whether an object is a global that we have already barriered before
+ // calling into the VM.
+ //
+ // We only check for the script's global, not other globals within the same
+ // compartment, because we bake in a pointer to realm->globalWriteBarriered
+ // and doing that would be invalid for other realms because they could be
+ // collected before the Ion code is discarded.
+
+ if (!maybeGlobal->isConstant()) {
+ return;
+ }
+
+ JSObject* obj = &maybeGlobal->toConstant()->toObject();
+ if (gen->realm->maybeGlobal() != obj) {
+ return;
+ }
+
+ const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
+ masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
+ ool->rejoin());
+}
+
+template <class LPostBarrierType, MIRType nurseryType>
+void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
+ OutOfLineCode* ool) {
+ static_assert(NeedsPostBarrier(nurseryType));
+
+ addOutOfLineCode(ool, lir->mir());
+
+ Register temp = ToTempRegisterOrInvalid(lir->temp0());
+
+ if (lir->object()->isConstant()) {
+ // Constant nursery objects cannot appear here, see
+ // LIRGenerator::visitPostWriteElementBarrier.
+ MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
+ temp, ool->rejoin());
+ }
+
+ maybeEmitGlobalBarrierCheck(lir->object(), ool);
+
+ Register value = ToRegister(lir->value());
+ if constexpr (nurseryType == MIRType::Object) {
+ MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
+ } else if constexpr (nurseryType == MIRType::String) {
+ MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
+ } else {
+ static_assert(nurseryType == MIRType::BigInt);
+ MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
+ }
+ masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+template <class LPostBarrierType>
+void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
+ OutOfLineCode* ool) {
+ addOutOfLineCode(ool, lir->mir());
+
+ Register temp = ToTempRegisterOrInvalid(lir->temp0());
+
+ if (lir->object()->isConstant()) {
+ // Constant nursery objects cannot appear here, see
+ // LIRGenerator::visitPostWriteElementBarrier.
+ MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
+ } else {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
+ temp, ool->rejoin());
+ }
+
+ maybeEmitGlobalBarrierCheck(lir->object(), ool);
+
+ ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
+ masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
+ auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
+}
+
+void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
+ auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
+}
+
+void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
+ auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
+}
+
+void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
+ auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
+ visitPostWriteBarrierCommonV(lir, ool);
+}
+
+// Out-of-line path to update the store buffer.
+class OutOfLineCallPostWriteElementBarrier
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* lir_;
+ const LAllocation* object_;
+ const LAllocation* index_;
+
+ public:
+ OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
+ const LAllocation* object,
+ const LAllocation* index)
+ : lir_(lir), object_(object), index_(index) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineCallPostWriteElementBarrier(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+
+ const LAllocation* object() const { return object_; }
+
+ const LAllocation* index() const { return index_; }
+};
+
+void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
+ OutOfLineCallPostWriteElementBarrier* ool) {
+ saveLiveVolatile(ool->lir());
+
+ const LAllocation* obj = ool->object();
+ const LAllocation* index = ool->index();
+
+ Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
+ Register indexreg = ToRegister(index);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ regs.takeUnchecked(indexreg);
+
+ if (obj->isConstant()) {
+ objreg = regs.takeAny();
+ masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
+ } else {
+ regs.takeUnchecked(objreg);
+ }
+
+ Register runtimereg = regs.takeAny();
+ using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
+ masm.setupAlignedABICall();
+ masm.mov(ImmPtr(gen->runtime), runtimereg);
+ masm.passABIArg(runtimereg);
+ masm.passABIArg(objreg);
+ masm.passABIArg(indexreg);
+ masm.callWithABI<Fn, PostWriteElementBarrier<IndexInBounds::Maybe>>();
+
+ restoreLiveVolatile(ool->lir());
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitPostWriteElementBarrierO(
+ LPostWriteElementBarrierO* lir) {
+ auto ool = new (alloc())
+ OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
+ ool);
+}
+
+void CodeGenerator::visitPostWriteElementBarrierS(
+ LPostWriteElementBarrierS* lir) {
+ auto ool = new (alloc())
+ OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
+ ool);
+}
+
+void CodeGenerator::visitPostWriteElementBarrierBI(
+ LPostWriteElementBarrierBI* lir) {
+ auto ool = new (alloc())
+ OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
+ ool);
+}
+
+void CodeGenerator::visitPostWriteElementBarrierV(
+ LPostWriteElementBarrierV* lir) {
+ auto ool = new (alloc())
+ OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
+ visitPostWriteBarrierCommonV(lir, ool);
+}
+
+void CodeGenerator::visitAssertCanElidePostWriteBarrier(
+ LAssertCanElidePostWriteBarrier* lir) {
+ Register object = ToRegister(lir->object());
+ ValueOperand value =
+ ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
+ Register temp = ToRegister(lir->temp0());
+
+ Label ok;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
+
+ masm.assumeUnreachable("Unexpected missing post write barrier");
+
+ masm.bind(&ok);
+}
+
+template <typename LCallIns>
+void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
+ MCallBase* mir = call->mir();
+
+ uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
+
+ // Registers used for callWithABI() argument-passing.
+ const Register argContextReg = ToRegister(call->getArgContextReg());
+ const Register argUintNReg = ToRegister(call->getArgUintNReg());
+ const Register argVpReg = ToRegister(call->getArgVpReg());
+
+ // Misc. temporary registers.
+ const Register tempReg = ToRegister(call->getTempReg());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Native functions have the signature:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Allocate space for the outparam, moving the StackPointer to what will be
+ // &vp[1].
+ masm.adjustStack(unusedStack);
+
+ // Push a Value containing the callee object: natives are allowed to access
+ // their callee before setting the return value. The StackPointer is moved
+ // to &vp[0].
+ if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
+ Register calleeReg = ToRegister(call->getCallee());
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+
+ if (call->mir()->maybeCrossRealm()) {
+ masm.switchToObjectRealm(calleeReg, tempReg);
+ }
+ } else {
+ WrappedFunction* target = call->getSingleTarget();
+ masm.Push(ObjectValue(*target->rawNativeJSFunction()));
+
+ if (call->mir()->maybeCrossRealm()) {
+ masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
+ masm.switchToObjectRealm(tempReg, tempReg);
+ }
+ }
+
+ // Preload arguments into registers.
+ masm.loadJSContext(argContextReg);
+ masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
+ masm.moveStackPtrTo(argVpReg);
+
+ masm.Push(argUintNReg);
+
+ // Construct native exit frame.
+ uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
+ masm.enterFakeExitFrameForNative(argContextReg, tempReg,
+ call->mir()->isConstructing());
+
+ markSafepointAt(safepointOffset, call);
+
+ // Construct and execute call.
+ masm.setupAlignedABICall();
+ masm.passABIArg(argContextReg);
+ masm.passABIArg(argUintNReg);
+ masm.passABIArg(argVpReg);
+
+ ensureOsiSpace();
+ // If we're using a simulator build, `native` will already point to the
+ // simulator's call-redirection code for LCallClassHook. Load the address in
+ // a register first so that we don't try to redirect it a second time.
+ bool emittedCall = false;
+#ifdef JS_SIMULATOR
+ if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
+ masm.movePtr(ImmPtr(native), tempReg);
+ masm.callWithABI(tempReg);
+ emittedCall = true;
+ }
+#endif
+ if (!emittedCall) {
+ masm.callWithABI(DynamicFunction<JSNative>(native), MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ }
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
+
+ if (call->mir()->maybeCrossRealm()) {
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Load the outparam vp[0] into output register(s).
+ masm.loadValue(
+ Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
+ mir->hasLiveDefUses()) {
+ masm.speculationBarrier();
+ }
+
+ // The next instruction is removing the footer of the exit frame, so there
+ // is no need for leaveFakeExitFrame.
+
+ // Move the StackPointer back to its original location, unwinding the native
+ // exit frame.
+ masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+void CodeGenerator::visitCallNative(LCallNative* call) {
+ WrappedFunction* target = call->getSingleTarget();
+ MOZ_ASSERT(target);
+ MOZ_ASSERT(target->isNativeWithoutJitEntry());
+
+ JSNative native = target->native();
+ if (call->ignoresReturnValue() && target->hasJitInfo()) {
+ const JSJitInfo* jitInfo = target->jitInfo();
+ if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
+ native = jitInfo->ignoresReturnValueMethod;
+ }
+ }
+ emitCallNative(call, native);
+}
+
+void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
+ emitCallNative(call, call->mir()->target());
+}
+
+static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
+ DOMObjectKind kind) {
+ // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
+ // will be in the first slot but may be fixed or non-fixed.
+ MOZ_ASSERT(obj != priv);
+
+ switch (kind) {
+ case DOMObjectKind::Native:
+ // If it's a native object, the value must be in a fixed slot.
+ // See CanAttachDOMCall in CacheIR.cpp.
+ masm.debugAssertObjHasFixedSlots(obj, priv);
+ masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
+ break;
+ case DOMObjectKind::Proxy: {
+#ifdef DEBUG
+ // Sanity check: it must be a DOM proxy.
+ Label isDOMProxy;
+ masm.branchTestProxyHandlerFamily(
+ Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
+ masm.assumeUnreachable("Expected a DOM proxy");
+ masm.bind(&isDOMProxy);
+#endif
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
+ masm.loadPrivate(
+ Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
+ break;
+ }
+ }
+}
+
+void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
+ WrappedFunction* target = call->getSingleTarget();
+ MOZ_ASSERT(target);
+ MOZ_ASSERT(target->isNativeWithoutJitEntry());
+ MOZ_ASSERT(target->hasJitInfo());
+ MOZ_ASSERT(call->mir()->isCallDOMNative());
+
+ int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
+
+ // Registers used for callWithABI() argument-passing.
+ const Register argJSContext = ToRegister(call->getArgJSContext());
+ const Register argObj = ToRegister(call->getArgObj());
+ const Register argPrivate = ToRegister(call->getArgPrivate());
+ const Register argArgs = ToRegister(call->getArgArgs());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // DOM methods have the signature:
+ // bool (*)(JSContext*, HandleObject, void* private, const
+ // JSJitMethodCallArgs& args)
+ // Where args is initialized from an argc and a vp, vp[0] is space for an
+ // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
+ // function arguments. Note that args stores the argv, not the vp, and
+ // argv == vp + 2.
+
+ // Nestle the stack up against the pushed arguments, leaving StackPointer at
+ // &vp[1]
+ masm.adjustStack(unusedStack);
+ // argObj is filled with the extracted object, then returned.
+ Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
+ MOZ_ASSERT(obj == argObj);
+
+ // Push a Value containing the callee object: natives are allowed to access
+ // their callee before setting the return value. After this the StackPointer
+ // points to &vp[0].
+ masm.Push(ObjectValue(*target->rawNativeJSFunction()));
+
+ // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
+ // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
+ // StackPointer.
+ static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
+ static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
+ IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
+
+ LoadDOMPrivate(masm, obj, argPrivate,
+ static_cast<MCallDOMNative*>(call->mir())->objectKind());
+
+ // Push argc from the call instruction into what will become the IonExitFrame
+ masm.Push(Imm32(call->numActualArgs()));
+
+ // Push our argv onto the stack
+ masm.Push(argArgs);
+ // And store our JSJitMethodCallArgs* in argArgs.
+ masm.moveStackPtrTo(argArgs);
+
+ // Push |this| object for passing HandleObject. We push after argc to
+ // maintain the same sp-relative location of the object pointer with other
+ // DOMExitFrames.
+ masm.Push(argObj);
+ masm.moveStackPtrTo(argObj);
+
+ if (call->mir()->maybeCrossRealm()) {
+ // We use argJSContext as scratch register here.
+ masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
+ masm.switchToObjectRealm(argJSContext, argJSContext);
+ }
+
+ // Construct native exit frame.
+ uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
+ masm.loadJSContext(argJSContext);
+ masm.enterFakeExitFrame(argJSContext, argJSContext,
+ ExitFrameType::IonDOMMethod);
+
+ markSafepointAt(safepointOffset, call);
+
+ // Construct and execute call.
+ masm.setupAlignedABICall();
+ masm.loadJSContext(argJSContext);
+ masm.passABIArg(argJSContext);
+ masm.passABIArg(argObj);
+ masm.passABIArg(argPrivate);
+ masm.passABIArg(argArgs);
+ ensureOsiSpace();
+ masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
+ MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ if (target->jitInfo()->isInfallible) {
+ masm.loadValue(Address(masm.getStackPointer(),
+ IonDOMMethodExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ } else {
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ masm.loadValue(Address(masm.getStackPointer(),
+ IonDOMMethodExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ }
+
+ // Switch back to the current realm if needed. Note: if the DOM method threw
+ // an exception, the exception handler will do this.
+ if (call->mir()->maybeCrossRealm()) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "Clobbering ReturnReg should not affect the return value");
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
+ masm.speculationBarrier();
+ }
+
+ // The next instruction is removing the footer of the exit frame, so there
+ // is no need for leaveFakeExitFrame.
+
+ // Move the StackPointer back to its original location, unwinding the native
+ // exit frame.
+ masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
+ pushArg(ImmGCPtr(lir->mir()->name()));
+
+ using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
+ callVM<Fn, GetIntrinsicValue>(lir);
+}
+
+void CodeGenerator::emitCallInvokeFunction(
+ LInstruction* call, Register calleereg, bool constructing,
+ bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
+ // Nestle %esp up to the argument vector.
+ // Each path must account for framePushed_ separately, for callVM to be valid.
+ masm.freeStack(unusedStack);
+
+ pushArg(masm.getStackPointer()); // argv.
+ pushArg(Imm32(argc)); // argc.
+ pushArg(Imm32(ignoresReturnValue));
+ pushArg(Imm32(constructing)); // constructing.
+ pushArg(calleereg); // JSFunction*.
+
+ using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
+ MutableHandleValue);
+ callVM<Fn, jit::InvokeFunction>(call);
+
+ // Un-nestle %esp from the argument vector. No prefix was pushed.
+ masm.reserveStack(unusedStack);
+}
+
+void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
+ Register calleereg = ToRegister(call->getFunction());
+ Register objreg = ToRegister(call->getTempObject());
+ Register nargsreg = ToRegister(call->getNargsReg());
+ uint32_t unusedStack =
+ UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
+ Label invoke, thunk, makeCall, end;
+
+ // Known-target case is handled by LCallKnown.
+ MOZ_ASSERT(!call->hasSingleTarget());
+
+ masm.checkStackAlignment();
+
+ // Guard that calleereg is actually a function object.
+ if (call->mir()->needsClassCheck()) {
+ masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, nargsreg,
+ calleereg, &invoke);
+ }
+
+ // Guard that callee allows the [[Call]] or [[Construct]] operation required.
+ if (call->mir()->isConstructing()) {
+ masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
+ Assembler::Zero, &invoke);
+ } else {
+ masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
+ calleereg, objreg, &invoke);
+ }
+
+ // Use the slow path if CreateThis was unable to create the |this| object.
+ if (call->mir()->needsThisCheck()) {
+ MOZ_ASSERT(call->mir()->isConstructing());
+ Address thisAddr(masm.getStackPointer(), unusedStack);
+ masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
+ } else {
+#ifdef DEBUG
+ if (call->mir()->isConstructing()) {
+ Address thisAddr(masm.getStackPointer(), unusedStack);
+ Label ok;
+ masm.branchTestNull(Assembler::NotEqual, thisAddr, &ok);
+ masm.assumeUnreachable("Unexpected null this-value");
+ masm.bind(&ok);
+ }
+#endif
+ }
+
+ // Load jitCodeRaw for callee if it exists.
+ masm.branchIfFunctionHasNoJitEntry(calleereg, call->mir()->isConstructing(),
+ &invoke);
+ masm.loadJitCodeRaw(calleereg, objreg);
+
+ // Target may be a different realm even if same compartment.
+ if (call->mir()->maybeCrossRealm()) {
+ masm.switchToObjectRealm(calleereg, nargsreg);
+ }
+
+ // Nestle the StackPointer up to the argument vector.
+ masm.freeStack(unusedStack);
+
+ // Construct the JitFrameLayout.
+ masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
+ masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
+
+ // Check whether the provided arguments satisfy target argc.
+ // We cannot have lowered to LCallGeneric with a known target. Assert that we
+ // didn't add any undefineds in WarpBuilder. NB: MCall::numStackArgs includes
+ // |this|.
+ DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
+ MOZ_ASSERT(call->numActualArgs() ==
+ call->mir()->numStackArgs() - numNonArgsOnStack);
+ masm.loadFunctionArgCount(calleereg, nargsreg);
+ masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()),
+ &thunk);
+ masm.jump(&makeCall);
+
+ // Argument fixup needed. Load the ArgumentsRectifier.
+ masm.bind(&thunk);
+ {
+ TrampolinePtr argumentsRectifier =
+ gen->jitRuntime()->getArgumentsRectifier();
+ masm.movePtr(argumentsRectifier, objreg);
+ }
+
+ // Finally call the function in objreg.
+ masm.bind(&makeCall);
+ ensureOsiSpace();
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, call);
+
+ if (call->mir()->maybeCrossRealm()) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "ReturnReg available as scratch after scripted calls");
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Restore stack pointer: pop JitFrameLayout fields still left on the stack
+ // and undo the earlier |freeStack(unusedStack)|.
+ int prefixGarbage =
+ sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
+ masm.adjustStack(prefixGarbage - unusedStack);
+ masm.jump(&end);
+
+ // Handle uncompiled or native functions.
+ masm.bind(&invoke);
+ emitCallInvokeFunction(call, calleereg, call->isConstructing(),
+ call->ignoresReturnValue(), call->numActualArgs(),
+ unusedStack);
+
+ masm.bind(&end);
+
+ // If the return value of the constructing function is Primitive,
+ // replace the return value with the Object from CreateThis.
+ if (call->mir()->isConstructing()) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.loadValue(Address(masm.getStackPointer(), unusedStack),
+ JSReturnOperand);
+#ifdef DEBUG
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.assumeUnreachable("CreateThis creates an object");
+#endif
+ masm.bind(&notPrimitive);
+ }
+}
+
+void CodeGenerator::visitCallKnown(LCallKnown* call) {
+ Register calleereg = ToRegister(call->getFunction());
+ Register objreg = ToRegister(call->getTempObject());
+ uint32_t unusedStack =
+ UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
+ WrappedFunction* target = call->getSingleTarget();
+
+ // Native single targets (except wasm) are handled by LCallNative.
+ MOZ_ASSERT(target->hasJitEntry());
+
+ // Missing arguments must have been explicitly appended by WarpBuilder.
+ DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
+ MOZ_ASSERT(target->nargs() <=
+ call->mir()->numStackArgs() - numNonArgsOnStack);
+
+ MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
+
+ masm.checkStackAlignment();
+
+ if (target->isClassConstructor() && !call->isConstructing()) {
+ emitCallInvokeFunction(call, calleereg, call->isConstructing(),
+ call->ignoresReturnValue(), call->numActualArgs(),
+ unusedStack);
+ return;
+ }
+
+ MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
+
+ MOZ_ASSERT(!call->mir()->needsThisCheck());
+
+ if (call->mir()->maybeCrossRealm()) {
+ masm.switchToObjectRealm(calleereg, objreg);
+ }
+
+ masm.loadJitCodeRaw(calleereg, objreg);
+
+ // Nestle the StackPointer up to the argument vector.
+ masm.freeStack(unusedStack);
+
+ // Construct the JitFrameLayout.
+ masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
+ masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
+
+ // Finally call the function in objreg.
+ ensureOsiSpace();
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, call);
+
+ if (call->mir()->maybeCrossRealm()) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "ReturnReg available as scratch after scripted calls");
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Restore stack pointer: pop JitFrameLayout fields still left on the stack
+ // and undo the earlier |freeStack(unusedStack)|.
+ int prefixGarbage =
+ sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
+ masm.adjustStack(prefixGarbage - unusedStack);
+
+ // If the return value of the constructing function is Primitive,
+ // replace the return value with the Object from CreateThis.
+ if (call->mir()->isConstructing()) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.loadValue(Address(masm.getStackPointer(), unusedStack),
+ JSReturnOperand);
+#ifdef DEBUG
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.assumeUnreachable("CreateThis creates an object");
+#endif
+ masm.bind(&notPrimitive);
+ }
+}
+
+template <typename T>
+void CodeGenerator::emitCallInvokeFunction(T* apply) {
+ Register objreg = ToRegister(apply->getTempObject());
+
+ // Push the space used by the arguments.
+ masm.moveStackPtrTo(objreg);
+
+ pushArg(objreg); // argv.
+ pushArg(ToRegister(apply->getArgc())); // argc.
+ pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
+ pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
+ pushArg(ToRegister(apply->getFunction())); // JSFunction*.
+
+ using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
+ MutableHandleValue);
+ callVM<Fn, jit::InvokeFunction>(apply);
+}
+
+// Do not bailout after the execution of this function since the stack no longer
+// correspond to what is expected by the snapshots.
+void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
+ Register scratch) {
+ // Use scratch register to calculate stack space (including padding).
+ masm.movePtr(argcreg, scratch);
+
+ // Align the JitFrameLayout on the JitStackAlignment.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
+ "Stack padding assumes that the frameSize is correct");
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+ Label noPaddingNeeded;
+ // if the number of arguments is odd, then we do not need any padding.
+ masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
+ masm.addPtr(Imm32(1), scratch);
+ masm.bind(&noPaddingNeeded);
+ }
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), scratch);
+ masm.subFromStackPtr(scratch);
+
+#ifdef DEBUG
+ // Put a magic value in the space reserved for padding. Note, this code
+ // cannot be merged with the previous test, as not all architectures can
+ // write below their stack pointers.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+ Label noPaddingNeeded;
+ // if the number of arguments is odd, then we do not need any padding.
+ masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
+ BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
+ masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
+ masm.bind(&noPaddingNeeded);
+ }
+#endif
+}
+
+// Do not bailout after the execution of this function since the stack no longer
+// correspond to what is expected by the snapshots.
+void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
+ Register argcreg, Register newTargetAndScratch) {
+ // Align the JitFrameLayout on the JitStackAlignment. Contrary to
+ // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
+ // we can't write to |newTargetAndScratch| before |new.target| has
+ // been pushed onto the stack.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
+ "Stack padding assumes that the frameSize is correct");
+ MOZ_ASSERT(JitStackValueAlignment == 2);
+
+ Label noPaddingNeeded;
+ // If the number of arguments is even, then we do not need any padding.
+ masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
+ masm.pushValue(MagicValue(JS_ARG_POISON));
+ masm.bind(&noPaddingNeeded);
+ }
+
+ // Push |new.target| after the padding value, but before any arguments.
+ masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
+
+ // Use newTargetAndScratch to calculate stack space (including padding).
+ masm.movePtr(argcreg, newTargetAndScratch);
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
+ masm.subFromStackPtr(newTargetAndScratch);
+}
+
+// Destroys argvIndex and copyreg.
+void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
+ Register argvIndex, Register copyreg,
+ size_t argvSrcOffset,
+ size_t argvDstOffset) {
+ Label loop;
+ masm.bind(&loop);
+
+ // As argvIndex is off by 1, and we use the decBranchPtr instruction
+ // to loop back, we have to substract the size of the word which are
+ // copied.
+ BaseValueIndex srcPtr(argvSrcBase, argvIndex,
+ int32_t(argvSrcOffset) - sizeof(void*));
+ BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
+ int32_t(argvDstOffset) - sizeof(void*));
+ masm.loadPtr(srcPtr, copyreg);
+ masm.storePtr(copyreg, dstPtr);
+
+ // Handle 32 bits architectures.
+ if (sizeof(Value) == 2 * sizeof(void*)) {
+ BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
+ int32_t(argvSrcOffset) - 2 * sizeof(void*));
+ BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
+ int32_t(argvDstOffset) - 2 * sizeof(void*));
+ masm.loadPtr(srcPtrLow, copyreg);
+ masm.storePtr(copyreg, dstPtrLow);
+ }
+
+ masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
+}
+
+void CodeGenerator::emitRestoreStackPointerFromFP() {
+ // This is used to restore the stack pointer after a call with a dynamic
+ // number of arguments.
+
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ int32_t offset = -int32_t(frameSize());
+ masm.computeEffectiveAddress(Address(FramePointer, offset),
+ masm.getStackPointer());
+}
+
+void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
+ Register copyreg, uint32_t extraFormals) {
+ Label end;
+
+ // Skip the copy of arguments if there are none.
+ masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
+
+ // clang-format off
+ //
+ // We are making a copy of the arguments which are above the JitFrameLayout
+ // of the current Ion frame.
+ //
+ // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
+ //
+ // clang-format on
+
+ // Compute the source and destination offsets into the stack.
+ Register argvSrcBase = FramePointer;
+ size_t argvSrcOffset =
+ JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
+ size_t argvDstOffset = 0;
+
+ Register argvIndex = scratch;
+ masm.move32(argcreg, argvIndex);
+
+ // Copy arguments.
+ emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
+ argvDstOffset);
+
+ // Join with all arguments copied and the extra stack usage computed.
+ masm.bind(&end);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
+ Register scratch) {
+ // Holds the function nargs. Initially the number of args to the caller.
+ Register argcreg = ToRegister(apply->getArgc());
+ Register copyreg = ToRegister(apply->getTempObject());
+ uint32_t extraFormals = apply->numExtraFormals();
+
+ emitAllocateSpaceForApply(argcreg, scratch);
+
+ emitPushArguments(argcreg, scratch, copyreg, extraFormals);
+
+ // Push |this|.
+ masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
+ // argc and argsObj are mapped to the same calltemp register.
+ MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
+
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register argsObj = ToRegister(apply->getArgsObj());
+
+ // Load argc into tmpArgc.
+ Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
+ masm.unboxInt32(lengthAddr, tmpArgc);
+ masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
+
+ // Allocate space on the stack for arguments. This modifies scratch.
+ emitAllocateSpaceForApply(tmpArgc, scratch);
+
+ // Load arguments data
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
+ argsObj);
+ size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
+
+ // This is the end of the lifetime of argsObj.
+ // After this call, the argsObj register holds the argument count instead.
+ emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
+
+ masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
+}
+
+void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
+ Register srcBaseAndArgc,
+ Register scratch,
+ size_t argvSrcOffset) {
+ // Preconditions:
+ // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
+ // the stack to hold arguments.
+ // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
+ //
+ // Postconditions:
+ // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
+ // the allocated space.
+ // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
+ //
+ // |scratch| is used as a temp register within this function and clobbered.
+
+ Label noCopy, epilogue;
+
+ // Skip the copy of arguments if there are none.
+ masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+
+ // Copy the values. This code is skipped entirely if there are
+ // no values.
+ size_t argvDstOffset = 0;
+
+ Register argvSrcBase = srcBaseAndArgc;
+ Register copyreg = scratch;
+
+ masm.push(tmpArgc);
+ Register argvIndex = tmpArgc;
+ argvDstOffset += sizeof(void*);
+
+ // Copy
+ emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
+ argvDstOffset);
+
+ // Restore.
+ masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
+ masm.jump(&epilogue);
+
+ // Clear argc if we skipped the copy step.
+ masm.bind(&noCopy);
+ masm.movePtr(ImmWord(0), srcBaseAndArgc);
+
+ // Join with all arguments copied and the extra stack usage computed.
+ // Note, "srcBase" has become "argc".
+ masm.bind(&epilogue);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
+ Register scratch) {
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register elementsAndArgc = ToRegister(apply->getElements());
+
+ // Invariants guarded in the caller:
+ // - the array is not too long
+ // - the array length equals its initialized length
+
+ // The array length is our argc for the purposes of allocating space.
+ Address length(ToRegister(apply->getElements()),
+ ObjectElements::offsetOfLength());
+ masm.load32(length, tmpArgc);
+
+ // Allocate space for the values.
+ emitAllocateSpaceForApply(tmpArgc, scratch);
+
+ // After this call "elements" has become "argc".
+ size_t elementsOffset = 0;
+ emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+
+ // Push |this|.
+ masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
+}
+
+void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
+ Register scratch) {
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
+
+ // Holds the function nargs. Initially the number of args to the caller.
+ Register argcreg = ToRegister(construct->getArgc());
+ Register copyreg = ToRegister(construct->getTempObject());
+ uint32_t extraFormals = construct->numExtraFormals();
+
+ // Allocate space for the values.
+ // After this call "newTarget" has become "scratch".
+ emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
+
+ emitPushArguments(argcreg, scratch, copyreg, extraFormals);
+
+ // Push |this|.
+ masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
+}
+
+void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
+ Register scratch) {
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
+
+ Register tmpArgc = ToRegister(construct->getTempObject());
+ Register elementsAndArgc = ToRegister(construct->getElements());
+
+ // Invariants guarded in the caller:
+ // - the array is not too long
+ // - the array length equals its initialized length
+
+ // The array length is our argc for the purposes of allocating space.
+ Address length(ToRegister(construct->getElements()),
+ ObjectElements::offsetOfLength());
+ masm.load32(length, tmpArgc);
+
+ // Allocate space for the values.
+ emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
+
+ // After this call "elements" has become "argc" and "newTarget" has become
+ // "scratch".
+ size_t elementsOffset = 0;
+ emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+
+ // Push |this|.
+ masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
+}
+
+template <typename T>
+void CodeGenerator::emitApplyGeneric(T* apply) {
+ // Holds the function object.
+ Register calleereg = ToRegister(apply->getFunction());
+
+ // Temporary register for modifying the function object.
+ Register objreg = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // Holds the function nargs, computed in the invoker or (for ApplyArray,
+ // ConstructArray, or ApplyArgsObj) in the argument pusher.
+ Register argcreg = ToRegister(apply->getArgc());
+
+ // Copy the arguments of the current function.
+ //
+ // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
+ // compute argc. The argc register and the elements/argsObj register
+ // are the same; argc must not be referenced before the call to
+ // emitPushArguments() and elements/argsObj must not be referenced
+ // after it returns.
+ //
+ // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
+ // with scratch; newTarget must not be referenced after this point.
+ //
+ // objreg is dead across this call.
+ emitPushArguments(apply, scratch);
+
+ masm.checkStackAlignment();
+
+ bool constructing = apply->mir()->isConstructing();
+
+ // If the function is native, only emit the call to InvokeFunction.
+ if (apply->hasSingleTarget() &&
+ apply->getSingleTarget()->isNativeWithoutJitEntry()) {
+ emitCallInvokeFunction(apply);
+
+#ifdef DEBUG
+ // Native constructors are guaranteed to return an Object value, so we never
+ // have to replace a primitive result with the previously allocated Object
+ // from CreateThis.
+ if (constructing) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.assumeUnreachable("native constructors don't return primitives");
+ masm.bind(&notPrimitive);
+ }
+#endif
+
+ emitRestoreStackPointerFromFP();
+ return;
+ }
+
+ Label end, invoke;
+
+ // Unless already known, guard that calleereg is actually a function object.
+ if (!apply->hasSingleTarget()) {
+ masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
+ calleereg, &invoke);
+ }
+
+ // Guard that calleereg is an interpreted function with a JSScript.
+ masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
+
+ // Guard that callee allows the [[Call]] or [[Construct]] operation required.
+ if (constructing) {
+ masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
+ Assembler::Zero, &invoke);
+ } else {
+ masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
+ calleereg, objreg, &invoke);
+ }
+
+ // Use the slow path if CreateThis was unable to create the |this| object.
+ if (constructing) {
+ Address thisAddr(masm.getStackPointer(), 0);
+ masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
+ }
+
+ // Call with an Ion frame or a rectifier frame.
+ {
+ if (apply->mir()->maybeCrossRealm()) {
+ masm.switchToObjectRealm(calleereg, objreg);
+ }
+
+ // Knowing that calleereg is a non-native function, load jitcode.
+ masm.loadJitCodeRaw(calleereg, objreg);
+
+ masm.PushCalleeToken(calleereg, constructing);
+ masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
+
+ Label underflow, rejoin;
+
+ // Check whether the provided arguments satisfy target argc.
+ if (!apply->hasSingleTarget()) {
+ Register nformals = scratch;
+ masm.loadFunctionArgCount(calleereg, nformals);
+ masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
+ } else {
+ masm.branch32(Assembler::Below, argcreg,
+ Imm32(apply->getSingleTarget()->nargs()), &underflow);
+ }
+
+ // Skip the construction of the rectifier frame because we have no
+ // underflow.
+ masm.jump(&rejoin);
+
+ // Argument fixup needed. Get ready to call the argumentsRectifier.
+ {
+ masm.bind(&underflow);
+
+ // Hardcode the address of the argumentsRectifier code.
+ TrampolinePtr argumentsRectifier =
+ gen->jitRuntime()->getArgumentsRectifier();
+ masm.movePtr(argumentsRectifier, objreg);
+ }
+
+ masm.bind(&rejoin);
+
+ // Finally call the function in objreg, as assigned by one of the paths
+ // above.
+ ensureOsiSpace();
+ uint32_t callOffset = masm.callJit(objreg);
+ markSafepointAt(callOffset, apply);
+
+ if (apply->mir()->maybeCrossRealm()) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "ReturnReg available as scratch after scripted calls");
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Discard JitFrameLayout fields still left on the stack.
+ masm.freeStack(sizeof(JitFrameLayout) -
+ JitFrameLayout::bytesPoppedAfterCall());
+ masm.jump(&end);
+ }
+
+ // Handle uncompiled or native functions.
+ {
+ masm.bind(&invoke);
+ emitCallInvokeFunction(apply);
+ }
+
+ masm.bind(&end);
+
+ // If the return value of the constructing function is Primitive,
+ // replace the return value with the Object from CreateThis.
+ if (constructing) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
+
+#ifdef DEBUG
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.assumeUnreachable("CreateThis creates an object");
+#endif
+
+ masm.bind(&notPrimitive);
+ }
+
+ // Pop arguments and continue.
+ emitRestoreStackPointerFromFP();
+}
+
+void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
+ LSnapshot* snapshot = apply->snapshot();
+ Register argcreg = ToRegister(apply->getArgc());
+
+ // Ensure that we have a reasonable number of arguments.
+ bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+
+ emitApplyGeneric(apply);
+}
+
+void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
+ Register argsObj = ToRegister(apply->getArgsObj());
+ Register temp = ToRegister(apply->getTempObject());
+
+ Label bail;
+ masm.loadArgumentsObjectLength(argsObj, temp, &bail);
+ masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
+ bailoutFrom(&bail, apply->snapshot());
+
+ emitApplyGeneric(apply);
+}
+
+void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
+ LSnapshot* snapshot = apply->snapshot();
+ Register tmp = ToRegister(apply->getTempObject());
+
+ Address length(ToRegister(apply->getElements()),
+ ObjectElements::offsetOfLength());
+ masm.load32(length, tmp);
+
+ // Ensure that we have a reasonable number of arguments.
+ bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+
+ // Ensure that the array does not contain an uninitialized tail.
+
+ Address initializedLength(ToRegister(apply->getElements()),
+ ObjectElements::offsetOfInitializedLength());
+ masm.sub32(initializedLength, tmp);
+ bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+
+ emitApplyGeneric(apply);
+}
+
+void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
+ LSnapshot* snapshot = lir->snapshot();
+ Register argcreg = ToRegister(lir->getArgc());
+
+ // Ensure that we have a reasonable number of arguments.
+ bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+
+ emitApplyGeneric(lir);
+}
+
+void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
+ LSnapshot* snapshot = lir->snapshot();
+ Register tmp = ToRegister(lir->getTempObject());
+
+ Address length(ToRegister(lir->getElements()),
+ ObjectElements::offsetOfLength());
+ masm.load32(length, tmp);
+
+ // Ensure that we have a reasonable number of arguments.
+ bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+
+ // Ensure that the array does not contain an uninitialized tail.
+
+ Address initializedLength(ToRegister(lir->getElements()),
+ ObjectElements::offsetOfInitializedLength());
+ masm.sub32(initializedLength, tmp);
+ bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+
+ emitApplyGeneric(lir);
+}
+
+void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
+
+void CodeGenerator::visitUnreachable(LUnreachable* lir) {
+ masm.assumeUnreachable("end-of-block assumed unreachable");
+}
+
+void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
+ encode(lir->snapshot());
+}
+
+void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
+ masm.assumeUnreachable("must be unreachable");
+}
+
+void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
+ masm.assumeUnreachable("must be unreachable");
+}
+
+// Out-of-line path to report over-recursed error and fail.
+class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* lir_;
+
+ public:
+ explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitCheckOverRecursedFailure(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
+ // If we don't push anything on the stack, skip the check.
+ if (omitOverRecursedCheck()) {
+ return;
+ }
+
+ // Ensure that this frame will not cross the stack limit.
+ // This is a weak check, justified by Ion using the C stack: we must always
+ // be some distance away from the actual limit, since if the limit is
+ // crossed, an error must be thrown, which requires more frames.
+ //
+ // It must always be possible to trespass past the stack limit.
+ // Ion may legally place frames very close to the limit. Calling additional
+ // C functions may then violate the limit without any checking.
+ //
+ // Since Ion frames exist on the C stack, the stack limit may be
+ // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
+
+ CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ // Conditional forward (unlikely) branch to failure.
+ const void* limitAddr = gen->runtime->addressOfJitStackLimit();
+ masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
+ ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckOverRecursedFailure(
+ CheckOverRecursedFailure* ool) {
+ // The OOL path is hit if the recursion depth has been exceeded.
+ // Throw an InternalError for over-recursion.
+
+ // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
+ // to save all live registers to avoid crashes if CheckOverRecursed triggers
+ // a GC.
+ saveLive(ool->lir());
+
+ using Fn = bool (*)(JSContext*);
+ callVM<Fn, CheckOverRecursed>(ool->lir());
+
+ restoreLive(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
+ // If scripts are being profiled, create a new IonScriptCounts for the
+ // profiling data, which will be attached to the associated JSScript or
+ // wasm module after code generation finishes.
+ if (!gen->hasProfilingScripts()) {
+ return nullptr;
+ }
+
+ // This test inhibits IonScriptCount creation for wasm code which is
+ // currently incompatible with wasm codegen for two reasons: (1) wasm code
+ // must be serializable and script count codegen bakes in absolute
+ // addresses, (2) wasm code does not have a JSScript with which to associate
+ // code coverage data.
+ JSScript* script = gen->outerInfo().script();
+ if (!script) {
+ return nullptr;
+ }
+
+ auto counts = MakeUnique<IonScriptCounts>();
+ if (!counts || !counts->init(graph.numBlocks())) {
+ return nullptr;
+ }
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ MBasicBlock* block = graph.getBlock(i)->mir();
+
+ uint32_t offset = 0;
+ char* description = nullptr;
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ // Find a PC offset in the outermost script to use. If this
+ // block is from an inlined script, find a location in the
+ // outer script to associate information about the inlining
+ // with.
+ while (resume->caller()) {
+ resume = resume->caller();
+ }
+ offset = script->pcToOffset(resume->pc());
+
+ if (block->entryResumePoint()->caller()) {
+ // Get the filename and line number of the inner script.
+ JSScript* innerScript = block->info().script();
+ description = js_pod_calloc<char>(200);
+ if (description) {
+ snprintf(description, 200, "%s:%u", innerScript->filename(),
+ innerScript->lineno());
+ }
+ }
+ }
+
+ if (!counts->block(i).init(block->id(), offset, description,
+ block->numSuccessors())) {
+ return nullptr;
+ }
+
+ for (size_t j = 0; j < block->numSuccessors(); j++) {
+ counts->block(i).setSuccessor(
+ j, skipTrivialBlocks(block->getSuccessor(j))->id());
+ }
+ }
+
+ scriptCounts_ = counts.release();
+ return scriptCounts_;
+}
+
+// Structure for managing the state tracked for a block by script counters.
+struct ScriptCountBlockState {
+ IonBlockCounts& block;
+ MacroAssembler& masm;
+
+ Sprinter printer;
+
+ public:
+ ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
+ : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
+
+ bool init() {
+ if (!printer.init()) {
+ return false;
+ }
+
+ // Bump the hit count for the block at the start. This code is not
+ // included in either the text for the block or the instruction byte
+ // counts.
+ masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
+
+ // Collect human readable assembly for the code generated in the block.
+ masm.setPrinter(&printer);
+
+ return true;
+ }
+
+ void visitInstruction(LInstruction* ins) {
+#ifdef JS_JITSPEW
+ // Prefix stream of assembly instructions with their LIR instruction
+ // name and any associated high level info.
+ if (const char* extra = ins->getExtraName()) {
+ printer.printf("[%s:%s]\n", ins->opName(), extra);
+ } else {
+ printer.printf("[%s]\n", ins->opName());
+ }
+#endif
+ }
+
+ ~ScriptCountBlockState() {
+ masm.setPrinter(nullptr);
+
+ if (!printer.hadOutOfMemory()) {
+ block.setCode(printer.string());
+ }
+ }
+};
+
+void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
+ masm.propagateOOM(ionScriptLabels_.append(label));
+
+ // If IonScript::invalidationCount_ != 0, the script has been invalidated.
+ masm.branch32(Assembler::NotEqual,
+ Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
+ invalidated);
+}
+
+#ifdef DEBUG
+void CodeGenerator::emitAssertGCThingResult(Register input,
+ const MDefinition* mir) {
+ MIRType type = mir->type();
+ MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
+ type == MIRType::Symbol || type == MIRType::BigInt);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+
+ Register temp = regs.takeAny();
+ masm.push(temp);
+
+ // Don't check if the script has been invalidated. In that case invalid
+ // types are expected (until we reach the OsiPoint and bailout).
+ Label done;
+ branchIfInvalidated(temp, &done);
+
+# ifndef JS_SIMULATOR
+ // Check that we have a valid GC pointer.
+ // Disable for wasm because we don't have a context on wasm compilation
+ // threads and this needs a context.
+ // Also disable for simulator builds because the C++ call is a lot slower
+ // there than on actual hardware.
+ if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
+ saveVolatile();
+ masm.setupUnalignedABICall(temp);
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(input);
+
+ switch (type) {
+ case MIRType::Object: {
+ using Fn = void (*)(JSContext* cx, JSObject* obj);
+ masm.callWithABI<Fn, AssertValidObjectPtr>();
+ break;
+ }
+ case MIRType::String: {
+ using Fn = void (*)(JSContext* cx, JSString* str);
+ masm.callWithABI<Fn, AssertValidStringPtr>();
+ break;
+ }
+ case MIRType::Symbol: {
+ using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
+ masm.callWithABI<Fn, AssertValidSymbolPtr>();
+ break;
+ }
+ case MIRType::BigInt: {
+ using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
+ masm.callWithABI<Fn, AssertValidBigIntPtr>();
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+
+ restoreVolatile();
+ }
+# endif
+
+ masm.bind(&done);
+ masm.pop(temp);
+}
+
+void CodeGenerator::emitAssertResultV(const ValueOperand input,
+ const MDefinition* mir) {
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input);
+
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+ masm.push(temp1);
+ masm.push(temp2);
+
+ // Don't check if the script has been invalidated. In that case invalid
+ // types are expected (until we reach the OsiPoint and bailout).
+ Label done;
+ branchIfInvalidated(temp1, &done);
+
+ // Check that we have a valid GC pointer.
+ if (JitOptions.fullDebugChecks) {
+ saveVolatile();
+
+ masm.pushValue(input);
+ masm.moveStackPtrTo(temp1);
+
+ using Fn = void (*)(JSContext* cx, Value* v);
+ masm.setupUnalignedABICall(temp2);
+ masm.loadJSContext(temp2);
+ masm.passABIArg(temp2);
+ masm.passABIArg(temp1);
+ masm.callWithABI<Fn, AssertValidValue>();
+ masm.popValue(input);
+ restoreVolatile();
+ }
+
+ masm.bind(&done);
+ masm.pop(temp2);
+ masm.pop(temp1);
+}
+
+void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
+ MDefinition* mir) {
+ if (lir->numDefs() == 0) {
+ return;
+ }
+
+ MOZ_ASSERT(lir->numDefs() == 1);
+ if (lir->getDef(0)->isBogusTemp()) {
+ return;
+ }
+
+ Register output = ToRegister(lir->getDef(0));
+ emitAssertGCThingResult(output, mir);
+}
+
+void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
+ if (lir->numDefs() == 0) {
+ return;
+ }
+
+ MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
+ if (!lir->getDef(0)->output()->isRegister()) {
+ return;
+ }
+
+ ValueOperand output = ToOutValue(lir);
+
+ emitAssertResultV(output, mir);
+}
+
+void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
+ // In debug builds, check that LIR instructions return valid values.
+
+ MDefinition* mir = ins->mirRaw();
+ if (!mir) {
+ return;
+ }
+
+ switch (mir->type()) {
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ emitGCThingResultChecks(ins, mir);
+ break;
+ case MIRType::Value:
+ emitValueResultChecks(ins, mir);
+ break;
+ default:
+ break;
+ }
+}
+
+void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
+ if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
+ return;
+ }
+ if (!lir->snapshot()) {
+ return;
+ }
+ if (lir->isOsiPoint()) {
+ return;
+ }
+
+ masm.comment("emitDebugForceBailing");
+ const void* bailAfterCounterAddr =
+ gen->runtime->addressOfIonBailAfterCounter();
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+
+ Label done, notBail;
+ masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
+ Imm32(0), &done);
+ {
+ Register temp = regs.takeAny();
+
+ masm.push(temp);
+ masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
+ masm.sub32(Imm32(1), temp);
+ masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
+
+ masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
+ {
+ masm.pop(temp);
+ bailout(lir->snapshot());
+ }
+ masm.bind(&notBail);
+ masm.pop(temp);
+ }
+ masm.bind(&done);
+}
+#endif
+
+bool CodeGenerator::generateBody() {
+ JitSpewCont(JitSpew_Codegen, "\n");
+ AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
+
+ JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
+ IonScriptCounts* counts = maybeCreateScriptCounts();
+
+ const bool compilingWasm = gen->compilingWasm();
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ current = graph.getBlock(i);
+
+ // Don't emit any code for trivial blocks, containing just a goto. Such
+ // blocks are created to split critical edges, and if we didn't end up
+ // putting any instructions in them, we can skip them.
+ if (current->isTrivial()) {
+ continue;
+ }
+
+#ifdef JS_JITSPEW
+ const char* filename = nullptr;
+ size_t lineNumber = 0;
+ unsigned columnNumber = 0;
+ if (current->mir()->info().script()) {
+ filename = current->mir()->info().script()->filename();
+ if (current->mir()->pc()) {
+ lineNumber = PCToLineNumber(current->mir()->info().script(),
+ current->mir()->pc(), &columnNumber);
+ }
+ } else {
+# ifdef DEBUG
+ lineNumber = current->mir()->lineno();
+ columnNumber = current->mir()->columnIndex();
+# endif
+ }
+ JitSpew(JitSpew_Codegen, "--------------------------------");
+ JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
+ filename ? filename : "?", lineNumber, columnNumber,
+ current->mir()->isLoopHeader() ? " (loop header)" : "");
+#endif
+
+ if (current->mir()->isLoopHeader() && compilingWasm) {
+ masm.nopAlign(CodeAlignment);
+ }
+
+ masm.bind(current->label());
+
+ mozilla::Maybe<ScriptCountBlockState> blockCounts;
+ if (counts) {
+ blockCounts.emplace(&counts->block(i), &masm);
+ if (!blockCounts->init()) {
+ return false;
+ }
+ }
+
+ for (LInstructionIterator iter = current->begin(); iter != current->end();
+ iter++) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ perfSpewer_.recordInstruction(masm, *iter);
+#ifdef JS_JITSPEW
+ JitSpewStart(JitSpew_Codegen, " # LIR=%s",
+ iter->opName());
+ if (const char* extra = iter->getExtraName()) {
+ JitSpewCont(JitSpew_Codegen, ":%s", extra);
+ }
+ JitSpewFin(JitSpew_Codegen);
+#endif
+
+ if (counts) {
+ blockCounts->visitInstruction(*iter);
+ }
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (iter->safepoint() && !compilingWasm) {
+ resetOsiPointRegs(iter->safepoint());
+ }
+#endif
+
+ if (!compilingWasm) {
+ if (MDefinition* mir = iter->mirRaw()) {
+ if (!addNativeToBytecodeEntry(mir->trackedSite())) {
+ return false;
+ }
+ }
+ }
+
+ setElement(*iter); // needed to encode correct snapshot location.
+
+#ifdef DEBUG
+ emitDebugForceBailing(*iter);
+#endif
+
+ switch (iter->op()) {
+#ifndef JS_CODEGEN_NONE
+# define LIROP(op) \
+ case LNode::Opcode::op: \
+ visit##op(iter->to##op()); \
+ break;
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+#endif
+ case LNode::Opcode::Invalid:
+ default:
+ MOZ_CRASH("Invalid LIR op");
+ }
+
+#ifdef DEBUG
+ if (!counts) {
+ emitDebugResultChecks(*iter);
+ }
+#endif
+ }
+ if (masm.oom()) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
+ return true;
+}
+
+// Out-of-line object allocation for LNewArray.
+class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
+ LNewArray* lir_;
+
+ public:
+ explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineNewArray(this);
+ }
+
+ LNewArray* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
+ Register objReg = ToRegister(lir->output());
+
+ MOZ_ASSERT(!lir->isCall());
+ saveLive(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+
+ if (templateObject) {
+ pushArg(ImmGCPtr(templateObject->shape()));
+ pushArg(Imm32(lir->mir()->length()));
+
+ using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
+ callVM<Fn, NewArrayWithShape>(lir);
+ } else {
+ pushArg(Imm32(GenericObject));
+ pushArg(Imm32(lir->mir()->length()));
+
+ using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
+ callVM<Fn, NewArrayOperation>(lir);
+ }
+
+ masm.storeCallPointerResult(objReg);
+
+ MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
+ restoreLive(lir);
+}
+
+void CodeGenerator::visitAtan2D(LAtan2D* lir) {
+ FloatRegister y = ToFloatRegister(lir->y());
+ FloatRegister x = ToFloatRegister(lir->x());
+
+ using Fn = double (*)(double x, double y);
+ masm.setupAlignedABICall();
+ masm.passABIArg(y, MoveOp::DOUBLE);
+ masm.passABIArg(x, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, ecmaAtan2>(MoveOp::DOUBLE);
+
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
+}
+
+void CodeGenerator::visitHypot(LHypot* lir) {
+ uint32_t numArgs = lir->numArgs();
+ masm.setupAlignedABICall();
+
+ for (uint32_t i = 0; i < numArgs; ++i) {
+ masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE);
+ }
+
+ switch (numArgs) {
+ case 2: {
+ using Fn = double (*)(double x, double y);
+ masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
+ break;
+ }
+ case 3: {
+ using Fn = double (*)(double x, double y, double z);
+ masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
+ break;
+ }
+ case 4: {
+ using Fn = double (*)(double x, double y, double z, double w);
+ masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected number of arguments to hypot function.");
+ }
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
+}
+
+void CodeGenerator::visitNewArray(LNewArray* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+ DebugOnly<uint32_t> length = lir->mir()->length();
+
+ MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
+
+ if (lir->mir()->isVMCall()) {
+ visitNewArrayCallVM(lir);
+ return;
+ }
+
+ OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ TemplateObject templateObject(lir->mir()->templateObject());
+#ifdef DEBUG
+ size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
+ ObjectElements::VALUES_PER_HEADER;
+ MOZ_ASSERT(length <= numInlineElements,
+ "Inline allocation only supports inline elements");
+#endif
+ masm.createGCObject(objReg, tempReg, templateObject,
+ lir->mir()->initialHeap(), ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
+ visitNewArrayCallVM(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
+ Register lengthReg = ToRegister(lir->length());
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::Heap initialHeap = lir->mir()->initialHeap();
+
+ using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
+ OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
+ lir, ArgList(ImmGCPtr(templateObject), lengthReg),
+ StoreRegisterTo(objReg));
+
+ bool canInline = true;
+ size_t inlineLength = 0;
+ if (templateObject->as<ArrayObject>().hasFixedElements()) {
+ size_t numSlots =
+ gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
+ inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
+ } else {
+ canInline = false;
+ }
+
+ if (canInline) {
+ // Try to do the allocation inline if the template object is big enough
+ // for the length in lengthReg. If the length is bigger we could still
+ // use the template object and not allocate the elements, but it's more
+ // efficient to do a single big allocation than (repeatedly) reallocating
+ // the array later on when filling it.
+ masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
+ ool->entry());
+
+ TemplateObject templateObj(templateObject);
+ masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
+ ool->entry());
+
+ size_t lengthOffset = NativeObject::offsetOfFixedElements() +
+ ObjectElements::offsetOfLength();
+ masm.store32(lengthReg, Address(objReg, lengthOffset));
+ } else {
+ masm.jump(ool->entry());
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewIterator(LNewIterator* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+
+ OutOfLineCode* ool;
+ switch (lir->mir()->type()) {
+ case MNewIterator::ArrayIterator: {
+ using Fn = ArrayIteratorObject* (*)(JSContext*);
+ ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
+ StoreRegisterTo(objReg));
+ break;
+ }
+ case MNewIterator::StringIterator: {
+ using Fn = StringIteratorObject* (*)(JSContext*);
+ ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
+ StoreRegisterTo(objReg));
+ break;
+ }
+ case MNewIterator::RegExpStringIterator: {
+ using Fn = RegExpStringIteratorObject* (*)(JSContext*);
+ ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
+ StoreRegisterTo(objReg));
+ break;
+ }
+ default:
+ MOZ_CRASH("unexpected iterator type");
+ }
+
+ TemplateObject templateObject(lir->mir()->templateObject());
+ masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+ Register lengthReg = ToRegister(lir->temp1());
+ LiveRegisterSet liveRegs = liveVolatileRegs(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::Heap initialHeap = lir->mir()->initialHeap();
+
+ TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
+
+ size_t n = ttemplate->length();
+ MOZ_ASSERT(n <= INT32_MAX,
+ "Template objects are only created for int32 lengths");
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
+ OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
+ lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
+ StoreRegisterTo(objReg));
+
+ TemplateObject templateObj(templateObject);
+ masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
+
+ masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
+ ttemplate, MacroAssembler::TypedArrayLength::Fixed);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewTypedArrayDynamicLength(
+ LNewTypedArrayDynamicLength* lir) {
+ Register lengthReg = ToRegister(lir->length());
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+ LiveRegisterSet liveRegs = liveVolatileRegs(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+ gc::Heap initialHeap = lir->mir()->initialHeap();
+
+ TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
+ OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
+ lir, ArgList(ImmGCPtr(templateObject), lengthReg),
+ StoreRegisterTo(objReg));
+
+ // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
+ MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
+
+ TemplateObject templateObj(templateObject);
+ masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
+
+ masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
+ ttemplate,
+ MacroAssembler::TypedArrayLength::Dynamic);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
+ pushArg(ToRegister(lir->array()));
+ pushArg(ImmGCPtr(lir->mir()->templateObject()));
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
+ callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
+}
+
+void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
+ LNewTypedArrayFromArrayBuffer* lir) {
+ pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
+ pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
+ pushArg(ToRegister(lir->arrayBuffer()));
+ pushArg(ImmGCPtr(lir->mir()->templateObject()));
+
+ using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
+ HandleValue, HandleValue);
+ callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
+}
+
+void CodeGenerator::visitBindFunction(LBindFunction* lir) {
+ Register target = ToRegister(lir->target());
+ Register temp1 = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp1());
+
+ // Try to allocate a new BoundFunctionObject we can pass to the VM function.
+ // If this fails, we set temp1 to nullptr so we do the allocation in C++.
+ TemplateObject templateObject(lir->mir()->templateObject());
+ Label allocOk, allocFailed;
+ masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
+ &allocFailed);
+ masm.jump(&allocOk);
+
+ masm.bind(&allocFailed);
+ masm.movePtr(ImmWord(0), temp1);
+
+ masm.bind(&allocOk);
+
+ // Set temp2 to the address of the first argument on the stack.
+ // Note that the Value slots used for arguments are currently aligned for a
+ // JIT call, even though that's not strictly necessary for calling into C++.
+ uint32_t argc = lir->mir()->numStackArgs();
+ if (JitStackValueAlignment > 1) {
+ argc = AlignBytes(argc, JitStackValueAlignment);
+ }
+ uint32_t unusedStack = UnusedStackBytesForCall(argc);
+ masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
+ temp2);
+
+ pushArg(temp1);
+ pushArg(Imm32(lir->mir()->numStackArgs()));
+ pushArg(temp2);
+ pushArg(target);
+
+ using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
+ uint32_t, Handle<BoundFunctionObject*>);
+ callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
+}
+
+void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ JSObject* templateObj = lir->mir()->templateObj();
+
+ using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
+ OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
+ lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
+
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+// Out-of-line object allocation for JSOp::NewObject.
+class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
+ LNewObject* lir_;
+
+ public:
+ explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineNewObject(this);
+ }
+
+ LNewObject* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
+ Register objReg = ToRegister(lir->output());
+
+ MOZ_ASSERT(!lir->isCall());
+ saveLive(lir);
+
+ JSObject* templateObject = lir->mir()->templateObject();
+
+ // If we're making a new object with a class prototype (that is, an object
+ // that derives its class from its prototype instead of being
+ // PlainObject::class_'d) from self-hosted code, we need a different init
+ // function.
+ switch (lir->mir()->mode()) {
+ case MNewObject::ObjectLiteral: {
+ MOZ_ASSERT(!templateObject);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+ pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
+
+ using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
+ callVM<Fn, NewObjectOperation>(lir);
+ break;
+ }
+ case MNewObject::ObjectCreate: {
+ pushArg(ImmGCPtr(templateObject));
+
+ using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
+ callVM<Fn, ObjectCreateWithTemplate>(lir);
+ break;
+ }
+ }
+
+ masm.storeCallPointerResult(objReg);
+
+ MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
+ restoreLive(lir);
+}
+
+static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
+ uint32_t nfixed) {
+ // Look for StoreFixedSlot instructions following an object allocation
+ // that write to this object before a GC is triggered or this object is
+ // passed to a VM call. If all fixed slots will be initialized, the
+ // allocation code doesn't need to set the slots to |undefined|.
+
+ if (nfixed == 0) {
+ return false;
+ }
+
+ // Keep track of the fixed slots that are initialized. initializedSlots is
+ // a bit mask with a bit for each slot.
+ MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
+ static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
+ "Slot bits must fit in 32 bits");
+ uint32_t initializedSlots = 0;
+ uint32_t numInitialized = 0;
+
+ MInstruction* allocMir = lir->mir();
+ MBasicBlock* block = allocMir->block();
+
+ // Skip the allocation instruction.
+ MInstructionIterator iter = block->begin(allocMir);
+ MOZ_ASSERT(*iter == allocMir);
+ iter++;
+
+ // Handle the leading shape guard, if present.
+ for (; iter != block->end(); iter++) {
+ if (iter->isConstant()) {
+ // This instruction won't trigger a GC or read object slots.
+ continue;
+ }
+ if (iter->isGuardShape()) {
+ auto* guard = iter->toGuardShape();
+ if (guard->object() != allocMir || guard->shape() != shape) {
+ return true;
+ }
+ allocMir = guard;
+ iter++;
+ }
+ break;
+ }
+
+ for (; iter != block->end(); iter++) {
+ if (iter->isConstant() || iter->isPostWriteBarrier()) {
+ // These instructions won't trigger a GC or read object slots.
+ continue;
+ }
+
+ if (iter->isStoreFixedSlot()) {
+ MStoreFixedSlot* store = iter->toStoreFixedSlot();
+ if (store->object() != allocMir) {
+ return true;
+ }
+
+ // We may not initialize this object slot on allocation, so the
+ // pre-barrier could read uninitialized memory. Simply disable
+ // the barrier for this store: the object was just initialized
+ // so the barrier is not necessary.
+ store->setNeedsBarrier(false);
+
+ uint32_t slot = store->slot();
+ MOZ_ASSERT(slot < nfixed);
+ if ((initializedSlots & (1 << slot)) == 0) {
+ numInitialized++;
+ initializedSlots |= (1 << slot);
+
+ if (numInitialized == nfixed) {
+ // All fixed slots will be initialized.
+ MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
+ return false;
+ }
+ }
+ continue;
+ }
+
+ // Unhandled instruction, assume it bails or reads object slots.
+ return true;
+ }
+
+ MOZ_CRASH("Shouldn't get here");
+}
+
+void CodeGenerator::visitNewObject(LNewObject* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp());
+
+ if (lir->mir()->isVMCall()) {
+ visitNewObjectVMCall(lir);
+ return;
+ }
+
+ OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ TemplateObject templateObject(lir->mir()->templateObject());
+
+ masm.createGCObject(objReg, tempReg, templateObject,
+ lir->mir()->initialHeap(), ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
+ visitNewObjectVMCall(ool->lir());
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register temp0Reg = ToRegister(lir->temp0());
+ Register temp1Reg = ToRegister(lir->temp1());
+ Register shapeReg = ToRegister(lir->temp2());
+
+ auto* mir = lir->mir();
+ const Shape* shape = mir->shape();
+ gc::Heap initialHeap = mir->initialHeap();
+ gc::AllocKind allocKind = mir->allocKind();
+
+ using Fn =
+ JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
+ OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
+ lir,
+ ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
+ Imm32(int32_t(initialHeap))),
+ StoreRegisterTo(objReg));
+
+ bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
+
+ masm.movePtr(ImmGCPtr(shape), shapeReg);
+ masm.createPlainGCObject(
+ objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
+ mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
+ AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
+
+#ifdef DEBUG
+ // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
+ // so ensure the newly created object has the correct shape. Should the guard
+ // ever fail, we may end up with uninitialized fixed slots, which can confuse
+ // the GC.
+ Label ok;
+ masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
+ &ok);
+ masm.assumeUnreachable("Newly created object has the correct shape");
+ masm.bind(&ok);
+#endif
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register temp0Reg = ToRegister(lir->temp0());
+ Register shapeReg = ToRegister(lir->temp1());
+
+ auto* mir = lir->mir();
+ uint32_t arrayLength = mir->length();
+
+ gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
+ MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
+ allocKind = ForegroundToBackgroundAllocKind(allocKind);
+
+ uint32_t slotCount = GetGCKindSlots(allocKind);
+ MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
+ uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
+
+ const Shape* shape = mir->shape();
+
+ NewObjectKind objectKind =
+ mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
+
+ using Fn =
+ ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
+ OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
+ lir,
+ ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
+ StoreRegisterTo(objReg));
+
+ masm.movePtr(ImmPtr(shape), shapeReg);
+ masm.createArrayWithFixedElements(
+ objReg, shapeReg, temp0Reg, arrayLength, arrayCapacity, allocKind,
+ mir->initialHeap(), ool->entry(),
+ AllocSiteInput(gc::CatchAllAllocSite::Optimized));
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+ const CompileInfo& info = lir->mir()->block()->info();
+
+ using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
+ OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
+ lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
+
+ TemplateObject templateObject(lir->mir()->templateObj());
+
+ masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
+ Register objReg = ToRegister(lir->output());
+ Register tempReg = ToRegister(lir->temp0());
+
+ CallObject* templateObj = lir->mir()->templateObject();
+
+ using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
+ OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
+ lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
+ StoreRegisterTo(objReg));
+
+ // Inline call object creation, using the OOL path only for tricky cases.
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ StringObject* templateObj = lir->mir()->templateObj();
+
+ using Fn = JSObject* (*)(JSContext*, HandleString);
+ OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
+ StoreRegisterTo(output));
+
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
+ ool->entry());
+
+ masm.loadStringLength(input, temp);
+
+ masm.storeValue(JSVAL_TYPE_STRING, input,
+ Address(output, StringObject::offsetOfPrimitiveValue()));
+ masm.storeValue(JSVAL_TYPE_INT32, temp,
+ Address(output, StringObject::offsetOfLength()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
+ Register obj = ToRegister(lir->object());
+ Register value = ToRegister(lir->value());
+
+ pushArg(value);
+ pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
+ pushArg(obj);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+
+ using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
+ HandleObject);
+ callVM<Fn, InitElemGetterSetterOperation>(lir);
+}
+
+void CodeGenerator::visitMutateProto(LMutateProto* lir) {
+ Register objReg = ToRegister(lir->object());
+
+ pushArg(ToValue(lir, LMutateProto::ValueIndex));
+ pushArg(objReg);
+
+ using Fn =
+ bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
+ callVM<Fn, MutatePrototype>(lir);
+}
+
+void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
+ Register obj = ToRegister(lir->object());
+ Register value = ToRegister(lir->value());
+
+ pushArg(value);
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(obj);
+ pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
+
+ using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
+ Handle<PropertyName*>, HandleObject);
+ callVM<Fn, InitPropGetterSetterOperation>(lir);
+}
+
+void CodeGenerator::visitCreateThis(LCreateThis* lir) {
+ const LAllocation* callee = lir->callee();
+ const LAllocation* newTarget = lir->newTarget();
+
+ if (newTarget->isConstant()) {
+ pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
+ } else {
+ pushArg(ToRegister(newTarget));
+ }
+
+ if (callee->isConstant()) {
+ pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
+ } else {
+ pushArg(ToRegister(callee));
+ }
+
+ using Fn = bool (*)(JSContext* cx, HandleObject callee,
+ HandleObject newTarget, MutableHandleValue rval);
+ callVM<Fn, jit::CreateThisFromIon>(lir);
+}
+
+void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
+ // This should be getting constructed in the first block only, and not any OSR
+ // entry blocks.
+ MOZ_ASSERT(lir->mir()->block()->id() == 0);
+
+ Register callObj = ToRegister(lir->callObject());
+ Register temp0 = ToRegister(lir->temp0());
+ Label done;
+
+ if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
+ Register objTemp = ToRegister(lir->temp1());
+ Register cxTemp = ToRegister(lir->temp2());
+
+ masm.Push(callObj);
+
+ // Try to allocate an arguments object. This will leave the reserved
+ // slots uninitialized, so it's important we don't GC until we
+ // initialize these slots in ArgumentsObject::finishForIonPure.
+ Label failure;
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
+ &failure,
+ /* initContents = */ false);
+
+ masm.moveStackPtrTo(temp0);
+ masm.addPtr(Imm32(masm.framePushed()), temp0);
+
+ using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
+ JSObject* scopeChain, ArgumentsObject* obj);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(cxTemp);
+ masm.passABIArg(cxTemp);
+ masm.passABIArg(temp0);
+ masm.passABIArg(callObj);
+ masm.passABIArg(objTemp);
+
+ masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
+
+ // Discard saved callObj on the stack.
+ masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
+ masm.jump(&done);
+
+ masm.bind(&failure);
+ masm.Pop(callObj);
+ }
+
+ masm.moveStackPtrTo(temp0);
+ masm.addPtr(Imm32(frameSize()), temp0);
+
+ pushArg(callObj);
+ pushArg(temp0);
+
+ using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
+ callVM<Fn, ArgumentsObject::createForIon>(lir);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCreateInlinedArgumentsObject(
+ LCreateInlinedArgumentsObject* lir) {
+ Register callObj = ToRegister(lir->getCallObject());
+ Register callee = ToRegister(lir->getCallee());
+ Register argsAddress = ToRegister(lir->temp1());
+ Register argsObj = ToRegister(lir->temp2());
+
+ // TODO: Do we have to worry about alignment here?
+
+ // Create a contiguous array of values for ArgumentsObject::create
+ // by pushing the arguments onto the stack in reverse order.
+ uint32_t argc = lir->mir()->numActuals();
+ for (uint32_t i = 0; i < argc; i++) {
+ uint32_t argNum = argc - i - 1;
+ uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
+ ConstantOrRegister arg =
+ toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
+ masm.Push(arg);
+ }
+ masm.moveStackPtrTo(argsAddress);
+
+ Label done;
+ if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
+ LiveRegisterSet liveRegs;
+ liveRegs.add(callObj);
+ liveRegs.add(callee);
+
+ masm.PushRegsInMask(liveRegs);
+
+ // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
+ // a call instruction.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ allRegs.take(callObj);
+ allRegs.take(callee);
+ allRegs.take(argsObj);
+ allRegs.take(argsAddress);
+
+ Register temp3 = allRegs.takeAny();
+ Register temp4 = allRegs.takeAny();
+
+ // Try to allocate an arguments object. This will leave the reserved slots
+ // uninitialized, so it's important we don't GC until we initialize these
+ // slots in ArgumentsObject::finishForIonPure.
+ Label failure;
+ TemplateObject templateObject(templateObj);
+ masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
+ &failure,
+ /* initContents = */ false);
+
+ Register numActuals = temp3;
+ masm.move32(Imm32(argc), numActuals);
+
+ using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
+ uint32_t, ArgumentsObject*);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp4);
+ masm.passABIArg(temp4);
+ masm.passABIArg(callObj);
+ masm.passABIArg(callee);
+ masm.passABIArg(argsAddress);
+ masm.passABIArg(numActuals);
+ masm.passABIArg(argsObj);
+
+ masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
+
+ // Discard saved callObj, callee, and values array on the stack.
+ masm.addToStackPtr(
+ Imm32(masm.PushRegsInMaskSizeInBytes(liveRegs) + argc * sizeof(Value)));
+ masm.jump(&done);
+
+ masm.bind(&failure);
+ masm.PopRegsInMask(liveRegs);
+
+ // Reload argsAddress because it may have been overridden.
+ masm.moveStackPtrTo(argsAddress);
+ }
+
+ pushArg(Imm32(argc));
+ pushArg(callObj);
+ pushArg(callee);
+ pushArg(argsAddress);
+
+ using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
+ HandleObject, uint32_t);
+ callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
+
+ // Discard the array of values.
+ masm.freeStack(argc * sizeof(Value));
+
+ masm.bind(&done);
+}
+
+template <class GetInlinedArgument>
+void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
+ Register index,
+ ValueOperand output) {
+ uint32_t numActuals = lir->mir()->numActuals();
+ MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
+
+ // The index has already been bounds-checked, so the code we
+ // generate here should be unreachable. We can end up in this
+ // situation in self-hosted code using GetArgument(), or in a
+ // monomorphically inlined function if we've inlined some CacheIR
+ // that was created for a different caller.
+ if (numActuals == 0) {
+ masm.assumeUnreachable("LGetInlinedArgument: invalid index");
+ return;
+ }
+
+ // Check the first n-1 possible indices.
+ Label done;
+ for (uint32_t i = 0; i < numActuals - 1; i++) {
+ Label skip;
+ ConstantOrRegister arg = toConstantOrRegister(
+ lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
+ masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
+ masm.moveValue(arg, output);
+
+ masm.jump(&done);
+ masm.bind(&skip);
+ }
+
+#ifdef DEBUG
+ Label skip;
+ masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
+ masm.assumeUnreachable("LGetInlinedArgument: invalid index");
+ masm.bind(&skip);
+#endif
+
+ // The index has already been bounds-checked, so load the last argument.
+ uint32_t lastIdx = numActuals - 1;
+ ConstantOrRegister arg =
+ toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
+ lir->mir()->getArg(lastIdx)->type());
+ masm.moveValue(arg, output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
+ Register index = ToRegister(lir->getIndex());
+ ValueOperand output = ToOutValue(lir);
+
+ emitGetInlinedArgument(lir, index, output);
+}
+
+void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
+ Register index = ToRegister(lir->getIndex());
+ ValueOperand output = ToOutValue(lir);
+
+ uint32_t numActuals = lir->mir()->numActuals();
+
+ if (numActuals == 0) {
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+ masm.moveValue(UndefinedValue(), output);
+ return;
+ }
+
+ Label outOfBounds, done;
+ masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
+ &outOfBounds);
+
+ emitGetInlinedArgument(lir, index, output);
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+ masm.moveValue(UndefinedValue(), output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
+ Register temp = ToRegister(lir->temp0());
+ Register argsObj = ToRegister(lir->argsObject());
+ ValueOperand out = ToOutValue(lir);
+
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
+ temp);
+ Address argAddr(temp, ArgumentsData::offsetOfArgs() +
+ lir->mir()->argno() * sizeof(Value));
+ masm.loadValue(argAddr, out);
+#ifdef DEBUG
+ Label success;
+ masm.branchTestMagic(Assembler::NotEqual, out, &success);
+ masm.assumeUnreachable(
+ "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
+ masm.bind(&success);
+#endif
+}
+
+void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
+ Register temp = ToRegister(lir->getTemp(0));
+ Register argsObj = ToRegister(lir->argsObject());
+ ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
+
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
+ temp);
+ Address argAddr(temp, ArgumentsData::offsetOfArgs() +
+ lir->mir()->argno() * sizeof(Value));
+ emitPreBarrier(argAddr);
+#ifdef DEBUG
+ Label success;
+ masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
+ masm.assumeUnreachable(
+ "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
+ masm.bind(&success);
+#endif
+ masm.storeValue(value, argAddr);
+}
+
+void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
+ Register temp = ToRegister(lir->temp0());
+ Register argsObj = ToRegister(lir->argsObject());
+ Register index = ToRegister(lir->index());
+ ValueOperand out = ToOutValue(lir);
+
+ Label bail;
+ masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitLoadArgumentsObjectArgHole(
+ LLoadArgumentsObjectArgHole* lir) {
+ Register temp = ToRegister(lir->temp0());
+ Register argsObj = ToRegister(lir->argsObject());
+ Register index = ToRegister(lir->index());
+ ValueOperand out = ToOutValue(lir);
+
+ Label bail;
+ masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
+ Register temp = ToRegister(lir->temp0());
+ Register argsObj = ToRegister(lir->argsObject());
+ Register index = ToRegister(lir->index());
+ Register out = ToRegister(lir->output());
+
+ Label bail;
+ masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
+ Register argsObj = ToRegister(lir->argsObject());
+ Register out = ToRegister(lir->output());
+
+ Label bail;
+ masm.loadArgumentsObjectLength(argsObj, out, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitArrayFromArgumentsObject(
+ LArrayFromArgumentsObject* lir) {
+ pushArg(ToRegister(lir->argsObject()));
+
+ using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
+ callVM<Fn, js::ArrayFromArgumentsObject>(lir);
+}
+
+void CodeGenerator::visitGuardArgumentsObjectFlags(
+ LGuardArgumentsObjectFlags* lir) {
+ Register argsObj = ToRegister(lir->argsObject());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
+ Assembler::NonZero, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
+ Register obj = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
+ output);
+ masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
+}
+
+void CodeGenerator::visitGuardBoundFunctionIsConstructor(
+ LGuardBoundFunctionIsConstructor* lir) {
+ Register obj = ToRegister(lir->object());
+
+ Label bail;
+ Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
+ masm.branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
+ ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
+ Register obj = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ Label valueIsObject, end;
+
+ masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
+
+ // Value is not an object. Return that other object.
+ masm.movePtr(obj, output);
+ masm.jump(&end);
+
+ // Value is an object. Return unbox(Value).
+ masm.bind(&valueIsObject);
+ Register payload = masm.extractObject(value, output);
+ if (payload != output) {
+ masm.movePtr(payload, output);
+ }
+
+ masm.bind(&end);
+}
+
+class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
+ LBoxNonStrictThis* ins_;
+
+ public:
+ explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineBoxNonStrictThis(this);
+ }
+ LBoxNonStrictThis* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
+ ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.fallibleUnboxObject(value, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineBoxNonStrictThis(
+ OutOfLineBoxNonStrictThis* ool) {
+ LBoxNonStrictThis* lir = ool->ins();
+
+ ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ Label notNullOrUndefined;
+ {
+ Label isNullOrUndefined;
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+ masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
+ masm.bind(&isNullOrUndefined);
+ masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
+ masm.jump(ool->rejoin());
+ }
+
+ masm.bind(&notNullOrUndefined);
+
+ saveLive(lir);
+
+ pushArg(value);
+ using Fn = JSObject* (*)(JSContext*, HandleValue);
+ callVM<Fn, BoxNonStrictThis>(lir);
+
+ StoreRegisterTo(output).generate(this);
+ restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(ToRegister(lir->env()));
+
+ using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
+ MutableHandleValue);
+ callVM<Fn, ImplicitThisOperation>(lir);
+}
+
+void CodeGenerator::visitArrayLength(LArrayLength* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register output = ToRegister(lir->output());
+
+ Address length(elements, ObjectElements::offsetOfLength());
+ masm.load32(length, output);
+
+ // Bail out if the length doesn't fit in int32.
+ bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
+}
+
+static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
+ const Address& length) {
+ if (index->isConstant()) {
+ masm.store32(Imm32(ToInt32(index) + 1), length);
+ } else {
+ Register newLength = ToRegister(index);
+ masm.add32(Imm32(1), newLength);
+ masm.store32(newLength, length);
+ masm.sub32(Imm32(1), newLength);
+ }
+}
+
+void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
+ Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
+ SetLengthFromIndex(masm, lir->index(), length);
+}
+
+void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
+ Register function = ToRegister(lir->function());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+
+ // Get the JSFunction flags.
+ masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
+ output);
+
+ // Functions with a SelfHostedLazyScript must be compiled with the slow-path
+ // before the function length is known. If the length was previously resolved,
+ // the length property may be shadowed.
+ masm.branchTest32(
+ Assembler::NonZero, output,
+ Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
+ &bail);
+
+ masm.loadFunctionLength(function, output, output, &bail);
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitFunctionName(LFunctionName* lir) {
+ Register function = ToRegister(lir->function());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+
+ const JSAtomState& names = gen->runtime->names();
+ masm.loadFunctionName(function, output, ImmGCPtr(names.empty), &bail);
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+template <class OrderedHashTable>
+static void RangeFront(MacroAssembler&, Register, Register, Register);
+
+template <>
+void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
+ Register front) {
+ masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
+ masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
+
+ MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
+ "offsetof(Data, element) is 0");
+ static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
+ masm.mulBy3(i, i);
+ masm.lshiftPtr(Imm32(3), i);
+ masm.addPtr(i, front);
+}
+
+template <>
+void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
+ Register front) {
+ masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
+ masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
+
+ MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
+ "offsetof(Data, element) is 0");
+ static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
+ masm.lshiftPtr(Imm32(4), i);
+ masm.addPtr(i, front);
+}
+
+template <class OrderedHashTable>
+static void RangePopFront(MacroAssembler& masm, Register range, Register front,
+ Register dataLength, Register temp) {
+ Register i = temp;
+
+ masm.add32(Imm32(1),
+ Address(range, OrderedHashTable::Range::offsetOfCount()));
+
+ masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
+
+ Label done, seek;
+ masm.bind(&seek);
+ masm.add32(Imm32(1), i);
+ masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
+
+ // We can add sizeof(Data) to |front| to select the next element, because
+ // |front| and |range.ht.data[i]| point to the same location.
+ MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
+ "offsetof(Data, element) is 0");
+ masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
+
+ masm.branchTestMagic(Assembler::Equal,
+ Address(front, OrderedHashTable::offsetOfEntryKey()),
+ JS_HASH_KEY_EMPTY, &seek);
+
+ masm.bind(&done);
+ masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
+}
+
+template <class OrderedHashTable>
+static inline void RangeDestruct(MacroAssembler& masm, Register iter,
+ Register range, Register temp0,
+ Register temp1) {
+ Register next = temp0;
+ Register prevp = temp1;
+
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
+ masm.storePtr(next, Address(prevp, 0));
+
+ Label hasNoNext;
+ masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
+
+ masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
+
+ masm.bind(&hasNoNext);
+
+ Label nurseryAllocated;
+ masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
+ &nurseryAllocated);
+
+ masm.callFreeStub(range);
+
+ masm.bind(&nurseryAllocated);
+}
+
+template <>
+void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
+ Register temp,
+ Register front) {
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+
+ Address keyAddress(front, ValueMap::Entry::offsetOfKey());
+ Address valueAddress(front, ValueMap::Entry::offsetOfValue());
+ Address keyElemAddress(result, elementsOffset);
+ Address valueElemAddress(result, elementsOffset + sizeof(Value));
+ masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
+ masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
+ masm.storeValue(keyAddress, keyElemAddress, temp);
+ masm.storeValue(valueAddress, valueElemAddress, temp);
+
+ Label emitBarrier, skipBarrier;
+ masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
+ &emitBarrier);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
+ &skipBarrier);
+ {
+ masm.bind(&emitBarrier);
+ saveVolatile(temp);
+ emitPostWriteBarrier(result);
+ restoreVolatile(temp);
+ }
+ masm.bind(&skipBarrier);
+}
+
+template <>
+void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
+ Register temp,
+ Register front) {
+ size_t elementsOffset = NativeObject::offsetOfFixedElements();
+
+ Address keyAddress(front, ValueSet::offsetOfEntryKey());
+ Address keyElemAddress(result, elementsOffset);
+ masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
+ masm.storeValue(keyAddress, keyElemAddress, temp);
+
+ Label skipBarrier;
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
+ &skipBarrier);
+ {
+ saveVolatile(temp);
+ emitPostWriteBarrier(result);
+ restoreVolatile(temp);
+ }
+ masm.bind(&skipBarrier);
+}
+
+template <class IteratorObject, class OrderedHashTable>
+void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
+ Register iter = ToRegister(lir->iter());
+ Register result = ToRegister(lir->result());
+ Register temp = ToRegister(lir->temp0());
+ Register dataLength = ToRegister(lir->temp1());
+ Register range = ToRegister(lir->temp2());
+ Register output = ToRegister(lir->output());
+
+#ifdef DEBUG
+ // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
+ // only called with the correct iterator class. Assert here all self-
+ // hosted callers of GetNextEntryForIterator perform this class check.
+ // No Spectre mitigations are needed because this is DEBUG-only code.
+ Label success;
+ masm.branchTestObjClassNoSpectreMitigations(
+ Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
+ masm.assumeUnreachable("Iterator object should have the correct class.");
+ masm.bind(&success);
+#endif
+
+ masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
+ IteratorObject::RangeSlot)),
+ range);
+
+ Label iterAlreadyDone, iterDone, done;
+ masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
+
+ masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
+ masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
+ dataLength);
+ masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
+ dataLength);
+ masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
+ {
+ masm.Push(iter);
+
+ Register front = iter;
+ RangeFront<OrderedHashTable>(masm, range, temp, front);
+
+ emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
+
+ RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
+
+ masm.Pop(iter);
+ masm.move32(Imm32(0), output);
+ }
+ masm.jump(&done);
+ {
+ masm.bind(&iterDone);
+
+ RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
+
+ masm.storeValue(PrivateValue(nullptr),
+ Address(iter, NativeObject::getFixedSlotOffset(
+ IteratorObject::RangeSlot)));
+
+ masm.bind(&iterAlreadyDone);
+
+ masm.move32(Imm32(1), output);
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGetNextEntryForIterator(
+ LGetNextEntryForIterator* lir) {
+ if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
+ emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
+ } else {
+ MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
+ emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
+ }
+}
+
+// The point of these is to inform Ion of where these values already are; they
+// don't normally generate (much) code.
+void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
+void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
+void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
+
+void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
+ LAllocation* output = lir->getDef(0)->output();
+ MOZ_ASSERT(output->isStackArea());
+ bool tempInit = false;
+ for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
+ // Zero out ref stack results.
+ if (iter.isGcPointer()) {
+ Register temp = ToRegister(lir->temp0());
+ if (!tempInit) {
+ masm.xorPtr(temp, temp);
+ tempInit = true;
+ }
+ masm.storePtr(temp, ToAddress(iter.alloc()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
+#ifdef JS_64BIT
+ if (MWasmRegisterResult* mir = lir->mir()) {
+ if (mir->type() == MIRType::Int32) {
+ masm.widenInt32(ToRegister(lir->output()));
+ }
+ }
+#endif
+}
+
+void CodeGenerator::visitWasmCall(LWasmCall* lir) {
+ const MWasmCallBase* callBase = lir->callBase();
+
+ // If this call is in Wasm try code block, initialise a wasm::TryNote for this
+ // call.
+ bool inTry = callBase->inTry();
+ if (inTry) {
+ size_t tryNoteIndex = callBase->tryNoteIndex();
+ wasm::TryNoteVector& tryNotes = masm.tryNotes();
+ wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
+ tryNote.setTryBodyBegin(masm.currentOffset());
+ }
+
+ MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
+ 0);
+ static_assert(
+ WasmStackAlignment >= ABIStackAlignment &&
+ WasmStackAlignment % ABIStackAlignment == 0,
+ "The wasm stack alignment should subsume the ABI-required alignment");
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+
+ // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
+ // instance and pinned regs. The only case where where we don't have to
+ // reload the instance and pinned regs is when the callee preserves them.
+ bool reloadRegs = true;
+ bool switchRealm = true;
+
+ const wasm::CallSiteDesc& desc = callBase->desc();
+ const wasm::CalleeDesc& callee = callBase->callee();
+ CodeOffset retOffset;
+ CodeOffset secondRetOffset;
+ switch (callee.which()) {
+ case wasm::CalleeDesc::Func:
+ retOffset = masm.call(desc, callee.funcIndex());
+ reloadRegs = false;
+ switchRealm = false;
+ break;
+ case wasm::CalleeDesc::Import:
+ retOffset = masm.wasmCallImport(desc, callee);
+ break;
+ case wasm::CalleeDesc::AsmJSTable:
+ retOffset = masm.asmCallIndirect(desc, callee);
+ break;
+ case wasm::CalleeDesc::WasmTable: {
+ Label* boundsCheckFailed = nullptr;
+ if (lir->needsBoundsCheck()) {
+ OutOfLineAbortingWasmTrap* ool =
+ new (alloc()) OutOfLineAbortingWasmTrap(
+ wasm::BytecodeOffset(desc.lineOrBytecode()),
+ wasm::Trap::OutOfBounds);
+ if (lir->isCatchable()) {
+ addOutOfLineCode(ool, lir->mirCatchable());
+ } else {
+ addOutOfLineCode(ool, lir->mirUncatchable());
+ }
+ boundsCheckFailed = ool->entry();
+ }
+ Label* nullCheckFailed = nullptr;
+#ifndef WASM_HAS_HEAPREG
+ {
+ OutOfLineAbortingWasmTrap* ool =
+ new (alloc()) OutOfLineAbortingWasmTrap(
+ wasm::BytecodeOffset(desc.lineOrBytecode()),
+ wasm::Trap::IndirectCallToNull);
+ if (lir->isCatchable()) {
+ addOutOfLineCode(ool, lir->mirCatchable());
+ } else {
+ addOutOfLineCode(ool, lir->mirUncatchable());
+ }
+ nullCheckFailed = ool->entry();
+ }
+#endif
+ masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
+ lir->tableSize(), &retOffset, &secondRetOffset);
+ // Register reloading and realm switching are handled dynamically inside
+ // wasmCallIndirect. There are two return offsets, one for each call
+ // instruction (fast path and slow path).
+ reloadRegs = false;
+ switchRealm = false;
+ break;
+ }
+ case wasm::CalleeDesc::Builtin:
+ retOffset = masm.call(desc, callee.builtin());
+ reloadRegs = false;
+ switchRealm = false;
+ break;
+ case wasm::CalleeDesc::BuiltinInstanceMethod:
+ retOffset = masm.wasmCallBuiltinInstanceMethod(
+ desc, callBase->instanceArg(), callee.builtin(),
+ callBase->builtinMethodFailureMode());
+ switchRealm = false;
+ break;
+ case wasm::CalleeDesc::FuncRef:
+ // Register reloading and realm switching are handled dynamically inside
+ // wasmCallRef. There are two return offsets, one for each call
+ // instruction (fast path and slow path).
+ masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
+ reloadRegs = false;
+ switchRealm = false;
+ break;
+ }
+
+ // Note the assembler offset for the associated LSafePoint.
+ markSafepointAt(retOffset.offset(), lir);
+
+ // Now that all the outbound in-memory args are on the stack, note the
+ // required lower boundary point of the associated StackMap.
+ uint32_t framePushedAtStackMapBase =
+ masm.framePushed() - callBase->stackArgAreaSizeUnaligned();
+ lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
+ MOZ_ASSERT(!lir->safepoint()->isWasmTrap());
+
+ // Note the assembler offset and framePushed for use by the adjunct
+ // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
+ if (callee.which() == wasm::CalleeDesc::WasmTable) {
+ lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
+ framePushedAtStackMapBase);
+ }
+
+ if (reloadRegs) {
+ masm.loadPtr(
+ Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
+ InstanceReg);
+ masm.loadWasmPinnedRegsFromInstance();
+ if (switchRealm) {
+ masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+ }
+ } else {
+ MOZ_ASSERT(!switchRealm);
+ }
+
+ if (inTry) {
+ // Set the end of the try note range
+ size_t tryNoteIndex = callBase->tryNoteIndex();
+ wasm::TryNoteVector& tryNotes = masm.tryNotes();
+ wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
+
+ // Don't set the end of the try note if we've OOM'ed, as the above
+ // instructions may not have been emitted, which will trigger an assert
+ // about zero-length try-notes. This is okay as this compilation will be
+ // thrown away.
+ if (!masm.oom()) {
+ tryNote.setTryBodyEnd(masm.currentOffset());
+ }
+
+ // This instruction or the adjunct safepoint must be the last instruction
+ // in the block. No other instructions may be inserted.
+ LBlock* block = lir->block();
+ MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
+ (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
+ *(++block->rbegin()) == lir));
+
+ // Jump to the fallthrough block
+ jumpToBlock(lir->mirCatchable()->getSuccessor(
+ MWasmCallCatchable::FallthroughBranchIndex));
+ }
+}
+
+void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
+ LBlock* block = lir->block();
+ MWasmCallLandingPrePad* mir = lir->mir();
+ MBasicBlock* mirBlock = mir->block();
+ MBasicBlock* callMirBlock = mir->callBlock();
+
+ // This block must be the pre-pad successor of the call block. No blocks may
+ // be inserted between us, such as for critical edge splitting.
+ MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
+ MWasmCallCatchable::PrePadBranchIndex));
+
+ // This instruction or a move group must be the first instruction in the
+ // block. No other instructions may be inserted.
+ MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
+ *(++block->begin()) == lir));
+
+ wasm::TryNoteVector& tryNotes = masm.tryNotes();
+ wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
+ // Set the entry point for the call try note to be the beginning of this
+ // block. The above assertions (and assertions in visitWasmCall) guarantee
+ // that we are not skipping over instructions that should be executed.
+ tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
+}
+
+void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
+ LWasmCallIndirectAdjunctSafepoint* lir) {
+ markSafepointAt(lir->safepointLocation().offset(), lir);
+ lir->safepoint()->setFramePushedAtStackMapBase(
+ lir->framePushedAtStackMapBase());
+}
+
+template <typename InstructionWithMaybeTrapSite>
+void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
+ InstructionWithMaybeTrapSite* ins) {
+ if (!ins->maybeTrap()) {
+ return;
+ }
+ wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
+ masm.append(wasm::Trap::NullPointerDereference,
+ wasm::TrapSite(masm.currentOffset(), trapOffset));
+}
+
+void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
+ MIRType type = ins->type();
+ MWideningOp wideningOp = ins->wideningOp();
+ Register container = ToRegister(ins->containerRef());
+ Address addr(container, ins->offset());
+ AnyRegister dst = ToAnyRegister(ins->output());
+
+ EmitSignalNullCheckTrapSite(masm, ins);
+ switch (type) {
+ case MIRType::Int32:
+ switch (wideningOp) {
+ case MWideningOp::None:
+ masm.load32(addr, dst.gpr());
+ break;
+ case MWideningOp::FromU16:
+ masm.load16ZeroExtend(addr, dst.gpr());
+ break;
+ case MWideningOp::FromS16:
+ masm.load16SignExtend(addr, dst.gpr());
+ break;
+ case MWideningOp::FromU8:
+ masm.load8ZeroExtend(addr, dst.gpr());
+ break;
+ case MWideningOp::FromS8:
+ masm.load8SignExtend(addr, dst.gpr());
+ break;
+ default:
+ MOZ_CRASH("unexpected widening op in ::visitWasmLoadSlot");
+ }
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(wideningOp == MWideningOp::None);
+ masm.loadFloat32(addr, dst.fpu());
+ break;
+ case MIRType::Double:
+ MOZ_ASSERT(wideningOp == MWideningOp::None);
+ masm.loadDouble(addr, dst.fpu());
+ break;
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ MOZ_ASSERT(wideningOp == MWideningOp::None);
+ masm.loadPtr(addr, dst.gpr());
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ MOZ_ASSERT(wideningOp == MWideningOp::None);
+ masm.loadUnalignedSimd128(addr, dst.fpu());
+ break;
+#endif
+ default:
+ MOZ_CRASH("unexpected type in ::visitWasmLoadSlot");
+ }
+}
+
+void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
+ MIRType type = ins->type();
+ MNarrowingOp narrowingOp = ins->narrowingOp();
+ Register container = ToRegister(ins->containerRef());
+ Address addr(container, ins->offset());
+ AnyRegister src = ToAnyRegister(ins->value());
+ if (type != MIRType::Int32) {
+ MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
+ }
+
+ EmitSignalNullCheckTrapSite(masm, ins);
+ switch (type) {
+ case MIRType::Int32:
+ switch (narrowingOp) {
+ case MNarrowingOp::None:
+ masm.store32(src.gpr(), addr);
+ break;
+ case MNarrowingOp::To16:
+ masm.store16(src.gpr(), addr);
+ break;
+ case MNarrowingOp::To8:
+ masm.store8(src.gpr(), addr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case MIRType::Float32:
+ masm.storeFloat32(src.fpu(), addr);
+ break;
+ case MIRType::Double:
+ masm.storeDouble(src.fpu(), addr);
+ break;
+ case MIRType::Pointer:
+ // This could be correct, but it would be a new usage, so check carefully.
+ MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
+ case MIRType::RefOrNull:
+ MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ masm.storeUnalignedSimd128(src.fpu(), addr);
+ break;
+#endif
+ default:
+ MOZ_CRASH("unexpected type in StorePrimitiveValue");
+ }
+}
+
+void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
+ Register elements = ToRegister(ins->elements());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+ masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
+}
+
+void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
+ masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
+ masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmDerivedIndexPointer(
+ LWasmDerivedIndexPointer* ins) {
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+ masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
+}
+
+void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
+ Register instance = ToRegister(ins->instance());
+ Register valueBase = ToRegister(ins->valueBase());
+ size_t offset = ins->offset();
+ Register value = ToRegister(ins->value());
+ Register temp = ToRegister(ins->temp0());
+
+ if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
+ Label skipPreBarrier;
+ wasm::EmitWasmPreBarrierGuard(
+ masm, instance, temp, valueBase, offset, &skipPreBarrier,
+ ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
+ wasm::EmitWasmPreBarrierCall(masm, instance, temp, valueBase, offset);
+ masm.bind(&skipPreBarrier);
+ }
+
+ EmitSignalNullCheckTrapSite(masm, ins);
+ masm.storePtr(value, Address(valueBase, offset));
+ // The postbarrier is handled separately.
+}
+
+// Out-of-line path to update the store buffer for wasm references.
+class OutOfLineWasmCallPostWriteBarrier
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* lir_;
+ Register valueBase_;
+ Register temp_;
+ uint32_t valueOffset_;
+
+ public:
+ OutOfLineWasmCallPostWriteBarrier(LInstruction* lir, Register valueBase,
+ Register temp, uint32_t valueOffset)
+ : lir_(lir),
+ valueBase_(valueBase),
+ temp_(temp),
+ valueOffset_(valueOffset) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineWasmCallPostWriteBarrier(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+ Register valueBase() const { return valueBase_; }
+ Register temp() const { return temp_; }
+ uint32_t valueOffset() const { return valueOffset_; }
+};
+
+void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrier(
+ OutOfLineWasmCallPostWriteBarrier* ool) {
+ saveLiveVolatile(ool->lir());
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ // Fold the value offset into the value base
+ Register valueAddr = ool->valueBase();
+ Register temp = ool->temp();
+ masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
+
+ // Call Instance::postBarrier
+ masm.setupWasmABICall();
+ masm.passABIArg(InstanceReg);
+ masm.passABIArg(temp);
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
+ mozilla::Some(instanceOffset), MoveOp::GENERAL);
+
+ masm.Pop(InstanceReg);
+ restoreLiveVolatile(ool->lir());
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmPostWriteBarrier(LWasmPostWriteBarrier* lir) {
+ Register object = ToRegister(lir->object());
+ Register value = ToRegister(lir->value());
+ Register valueBase = ToRegister(lir->valueBase());
+ Register temp = ToRegister(lir->temp0());
+ MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
+ auto ool = new (alloc()) OutOfLineWasmCallPostWriteBarrier(
+ lir, valueBase, temp, lir->valueOffset());
+ addOutOfLineCode(ool, lir->mir());
+
+ // If the pointer being stored is null, no barrier.
+ masm.branchTestPtr(Assembler::Zero, value, value, ool->rejoin());
+
+ // If there is a containing object and it is in the nursery, no barrier.
+ masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, ool->rejoin());
+
+ // If the pointer being stored is to a tenured object, no barrier.
+ masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
+ Register container = ToRegister(ins->containerRef());
+ Address addr(container, ins->offset());
+ Register64 output = ToOutRegister64(ins);
+ EmitSignalNullCheckTrapSite(masm, ins);
+ masm.load64(addr, output);
+}
+
+void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
+ Register container = ToRegister(ins->containerRef());
+ Address addr(container, ins->offset());
+ Register64 value = ToRegister64(ins->value());
+ EmitSignalNullCheckTrapSite(masm, ins);
+ masm.store64(value, addr);
+}
+
+void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.loadArrayBufferByteLengthIntPtr(obj, out);
+}
+
+void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.loadArrayBufferViewLengthIntPtr(obj, out);
+}
+
+void CodeGenerator::visitArrayBufferViewByteOffset(
+ LArrayBufferViewByteOffset* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
+}
+
+void CodeGenerator::visitArrayBufferViewElements(
+ LArrayBufferViewElements* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
+}
+
+void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+
+ masm.typedArrayElementSize(obj, out);
+}
+
+void CodeGenerator::visitGuardHasAttachedArrayBuffer(
+ LGuardHasAttachedArrayBuffer* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+class OutOfLineGuardNumberToIntPtrIndex
+ : public OutOfLineCodeBase<CodeGenerator> {
+ LGuardNumberToIntPtrIndex* lir_;
+
+ public:
+ explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
+ : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
+ }
+ LGuardNumberToIntPtrIndex* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitGuardNumberToIntPtrIndex(
+ LGuardNumberToIntPtrIndex* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ if (!lir->mir()->supportOOB()) {
+ Label bail;
+ masm.convertDoubleToPtr(input, output, &bail, false);
+ bailoutFrom(&bail, lir->snapshot());
+ return;
+ }
+
+ auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.convertDoubleToPtr(input, output, ool->entry(), false);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
+ OutOfLineGuardNumberToIntPtrIndex* ool) {
+ // Substitute the invalid index with an arbitrary out-of-bounds index.
+ masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitStringLength(LStringLength* lir) {
+ Register input = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+
+ masm.loadStringLength(input, output);
+}
+
+void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
+ Register first = ToRegister(ins->first());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT(first == output);
+
+ Assembler::Condition cond =
+ ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
+
+ if (ins->second()->isConstant()) {
+ Label done;
+ masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
+ masm.move32(Imm32(ToInt32(ins->second())), output);
+ masm.bind(&done);
+ } else {
+ Register second = ToRegister(ins->second());
+ masm.cmp32Move32(cond, second, first, second, output);
+ }
+}
+
+void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
+ Register array = ToRegister(ins->array());
+ Register output = ToRegister(ins->output());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ bool isMax = ins->isMax();
+
+ Label bail;
+ masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
+ Register array = ToRegister(ins->array());
+ FloatRegister output = ToFloatRegister(ins->output());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
+ bool isMax = ins->isMax();
+
+ Label bail;
+ masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+// For Abs*, lowering will have tied input to output on platforms where that is
+// sensible, and otherwise left them untied.
+
+void CodeGenerator::visitAbsI(LAbsI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ if (ins->mir()->fallible()) {
+ Label positive;
+ if (input != output) {
+ masm.move32(input, output);
+ }
+ masm.branchTest32(Assembler::NotSigned, output, output, &positive);
+ Label bail;
+ masm.branchNeg32(Assembler::Overflow, output, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ masm.bind(&positive);
+ } else {
+ masm.abs32(input, output);
+ }
+}
+
+void CodeGenerator::visitAbsD(LAbsD* ins) {
+ masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
+}
+
+void CodeGenerator::visitAbsF(LAbsF* ins) {
+ masm.absFloat32(ToFloatRegister(ins->input()),
+ ToFloatRegister(ins->output()));
+}
+
+void CodeGenerator::visitPowII(LPowII* ins) {
+ Register value = ToRegister(ins->value());
+ Register power = ToRegister(ins->power());
+ Register output = ToRegister(ins->output());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+
+ Label bailout;
+ masm.pow32(value, power, output, temp0, temp1, &bailout);
+ bailoutFrom(&bailout, ins->snapshot());
+}
+
+void CodeGenerator::visitPowI(LPowI* ins) {
+ FloatRegister value = ToFloatRegister(ins->value());
+ Register power = ToRegister(ins->power());
+
+ using Fn = double (*)(double x, int32_t y);
+ masm.setupAlignedABICall();
+ masm.passABIArg(value, MoveOp::DOUBLE);
+ masm.passABIArg(power);
+
+ masm.callWithABI<Fn, js::powi>(MoveOp::DOUBLE);
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+}
+
+void CodeGenerator::visitPowD(LPowD* ins) {
+ FloatRegister value = ToFloatRegister(ins->value());
+ FloatRegister power = ToFloatRegister(ins->power());
+
+ using Fn = double (*)(double x, double y);
+ masm.setupAlignedABICall();
+ masm.passABIArg(value, MoveOp::DOUBLE);
+ masm.passABIArg(power, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, ecmaPow>(MoveOp::DOUBLE);
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+}
+
+void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
+ Register power = ToRegister(ins->power());
+ Register output = ToRegister(ins->output());
+
+ uint32_t base = ins->base();
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
+
+ uint32_t n = mozilla::FloorLog2(base);
+ MOZ_ASSERT(n != 0);
+
+ // Hacker's Delight, 2nd edition, theorem D2.
+ auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
+
+ // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
+ // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
+ //
+ // Note: it's important for this condition to match the code in CacheIR.cpp
+ // (CanAttachInt32Pow) to prevent failure loops.
+ bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
+ ins->snapshot());
+
+ // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
+ // |power| and perform a single shift, but due to the lack of necessary
+ // MacroAssembler functionality, like multiplying a register with an
+ // immediate, we restrict the number of generated shift instructions when
+ // lowering this operation.
+ masm.move32(Imm32(1), output);
+ do {
+ masm.lshift32(power, output);
+ n--;
+ } while (n > 0);
+}
+
+void CodeGenerator::visitSqrtD(LSqrtD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.sqrtDouble(input, output);
+}
+
+void CodeGenerator::visitSqrtF(LSqrtF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.sqrtFloat32(input, output);
+}
+
+void CodeGenerator::visitSignI(LSignI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ masm.signInt32(input, output);
+}
+
+void CodeGenerator::visitSignD(LSignD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.signDouble(input, output);
+}
+
+void CodeGenerator::visitSignDI(LSignDI* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister temp = ToFloatRegister(ins->temp0());
+ Register output = ToRegister(ins->output());
+
+ Label bail;
+ masm.signDoubleToInt32(input, output, temp, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+
+ UnaryMathFunction fun = ins->mir()->function();
+ UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
+
+ masm.setupAlignedABICall();
+
+ masm.passABIArg(input, MoveOp::DOUBLE);
+ masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
+ MoveOp::DOUBLE);
+}
+
+void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(input, MoveOp::FLOAT32);
+
+ using Fn = float (*)(float x);
+ Fn funptr = nullptr;
+ CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
+ switch (ins->mir()->function()) {
+ case UnaryMathFunction::Floor:
+ funptr = floorf;
+ check = CheckUnsafeCallWithABI::DontCheckOther;
+ break;
+ case UnaryMathFunction::Round:
+ funptr = math_roundf_impl;
+ break;
+ case UnaryMathFunction::Trunc:
+ funptr = math_truncf_impl;
+ break;
+ case UnaryMathFunction::Ceil:
+ funptr = ceilf;
+ check = CheckUnsafeCallWithABI::DontCheckOther;
+ break;
+ default:
+ MOZ_CRASH("Unknown or unsupported float32 math function");
+ }
+
+ masm.callWithABI(DynamicFunction<Fn>(funptr), MoveOp::FLOAT32, check);
+}
+
+void CodeGenerator::visitModD(LModD* ins) {
+ MOZ_ASSERT(!gen->compilingWasm());
+
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+
+ using Fn = double (*)(double a, double b);
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs, MoveOp::DOUBLE);
+ masm.passABIArg(rhs, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, NumberMod>(MoveOp::DOUBLE);
+}
+
+void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ uint32_t divisor = ins->divisor();
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
+
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
+ //
+ // This doesn't work if |d| isn't a power of two, because we may lose too much
+ // precision. For example |Number.MAX_VALUE % 3 == 2|, but
+ // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
+
+ Label done;
+ {
+ ScratchDoubleScope scratch(masm);
+
+ // Subnormals can lead to performance degradation, which can make calling
+ // |fmod| faster than this inline implementation. Work around this issue by
+ // directly returning the input for any value in the interval ]-1, +1[.
+ Label notSubnormal;
+ masm.loadConstantDouble(1.0, scratch);
+ masm.loadConstantDouble(-1.0, output);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
+ &notSubnormal);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
+ &notSubnormal);
+
+ masm.moveDouble(lhs, output);
+ masm.jump(&done);
+
+ masm.bind(&notSubnormal);
+
+ if (divisor == 1) {
+ // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
+ // the multiplication by one in this case.
+ masm.moveDouble(lhs, output);
+ masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
+ masm.subDouble(scratch, output);
+ } else {
+ masm.loadConstantDouble(1.0 / double(divisor), scratch);
+ masm.loadConstantDouble(double(divisor), output);
+
+ masm.mulDouble(lhs, scratch);
+ masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
+ masm.mulDouble(output, scratch);
+
+ masm.moveDouble(lhs, output);
+ masm.subDouble(scratch, output);
+ }
+ }
+
+ masm.copySignDouble(output, lhs, output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs, MoveOp::DOUBLE);
+ masm.passABIArg(rhs, MoveOp::DOUBLE);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
+ mozilla::Some(instanceOffset), MoveOp::DOUBLE);
+
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n + x == x
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(rhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x + 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // x - 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigInt(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n * x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x * 0n == 0n
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(rhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // x / 0 throws an error.
+ if (ins->mir()->canBeDivideByZero()) {
+ masm.branchIfBigIntIsZero(rhs, ool->entry());
+ }
+
+ // 0n / x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
+ // allocation which might trigger a minor GC to free up nursery space. This
+ // requires us to apply the same optimization here, otherwise we'd end up with
+ // always entering the OOL call, because the nursery is never evicted.
+ Label notOne;
+ masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&notOne);
+
+ static constexpr auto DigitMin = std::numeric_limits<
+ mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
+
+ // Handle an integer overflow from INT{32,64}_MIN / -1.
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
+ masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
+ masm.bind(&notOverflow);
+
+ emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // x % 0 throws an error.
+ if (ins->mir()->canBeDivideByZero()) {
+ masm.branchIfBigIntIsZero(rhs, ool->entry());
+ }
+
+ // 0n % x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
+ masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
+
+ // Similar to the case for BigInt division, we must apply the same allocation
+ // optimizations as performed in |BigInt::mod()|.
+ Label notBelow;
+ masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&notBelow);
+
+ // Convert both digits to signed pointer-sized values.
+ masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
+ masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
+
+ static constexpr auto DigitMin = std::numeric_limits<
+ mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
+
+ // Handle an integer overflow from INT{32,64}_MIN / -1.
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
+ masm.movePtr(ImmWord(0), temp1);
+ masm.bind(&notOverflow);
+
+ emitBigIntMod(ins, temp1, temp2, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // x ** -y throws an error.
+ if (ins->mir()->canBeNegativeExponent()) {
+ masm.branchIfBigIntIsNegative(rhs, ool->entry());
+ }
+
+ Register dest = temp1;
+ Register base = temp2;
+ Register exponent = output;
+
+ Label done;
+ masm.movePtr(ImmWord(1), dest); // p = 1
+
+ // 1n ** y == 1n
+ // -1n ** y == 1n when y is even
+ // -1n ** y == -1n when y is odd
+ Label lhsNotOne;
+ masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
+ Imm32(1), &lhsNotOne);
+ masm.loadFirstBigIntDigitOrZero(lhs, base);
+ masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
+ {
+ masm.loadFirstBigIntDigitOrZero(rhs, exponent);
+
+ Label lhsNonNegative;
+ masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
+ masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
+ masm.bind(&lhsNonNegative);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&lhsNotOne);
+
+ // x ** 0n == 1n
+ masm.branchIfBigIntIsZero(rhs, &done);
+
+ // 0n ** y == 0n with y != 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ {
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&lhsNonZero);
+
+ // Call into the VM when the exponent can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
+
+ // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
+ masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
+ ool->entry());
+
+ // x ** 1n == x
+ Label rhsNotOne;
+ masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
+ {
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&rhsNotOne);
+
+ // Call into the VM when the base operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, base, ool->entry());
+
+ // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
+ {
+ // m = base
+ // n = exponent
+
+ Label start, loop;
+ masm.jump(&start);
+ masm.bind(&loop);
+
+ // m *= m
+ masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
+
+ masm.bind(&start);
+
+ // if ((n & 1) != 0) p *= m
+ Label even;
+ masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
+ masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
+ masm.bind(&even);
+
+ // n >>= 1
+ // if (n == 0) return p
+ masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
+ }
+
+ MOZ_ASSERT(temp1 == dest);
+
+ // Create and return the result.
+ masm.bind(&done);
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n & x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x & 0n == 0n
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(rhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.andPtr(temp2, temp1);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n | x == x
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(rhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x | 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.orPtr(temp2, temp1);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n ^ x == x
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(rhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x ^ 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Call into the VM when either operand can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigIntNonZero(lhs, temp1, ool->entry());
+ masm.loadBigIntNonZero(rhs, temp2, ool->entry());
+
+ masm.xorPtr(temp2, temp1);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n << x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x << 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
+
+ Label rhsTooLarge;
+ masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
+
+ // Call into the VM when the left-hand side operand can't be loaded into a
+ // pointer-sized register.
+ masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
+
+ // Handle shifts exceeding |BigInt::DigitBits| first.
+ Label shift, create;
+ masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
+ {
+ masm.bind(&rhsTooLarge);
+
+ // x << DigitBits with x != 0n always exceeds pointer-sized storage.
+ masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
+
+ // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
+ masm.move32(Imm32(0), temp1);
+ masm.branchIfBigIntIsNonNegative(lhs, &create);
+ masm.move32(Imm32(1), temp1);
+ masm.jump(&create);
+ }
+ masm.bind(&shift);
+
+ Label nonNegative;
+ masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
+ {
+ masm.movePtr(temp1, temp3);
+
+ // |x << -y| is computed as |x >> y|.
+ masm.rshiftPtr(temp2, temp1);
+
+ // For negative numbers, round down if any bit was shifted out.
+ masm.branchIfBigIntIsNonNegative(lhs, &create);
+
+ // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
+ masm.movePtr(ImmWord(-1), output);
+ masm.lshiftPtr(temp2, output);
+ masm.notPtr(output);
+
+ // Add plus one when |(lhs.digit(0) & mask) != 0|.
+ masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
+ masm.addPtr(ImmWord(1), temp1);
+ masm.jump(&create);
+ }
+ masm.bind(&nonNegative);
+ {
+ masm.movePtr(temp2, temp3);
+
+ // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
+ masm.negPtr(temp2);
+ masm.addPtr(Imm32(BigInt::DigitBits), temp2);
+ masm.movePtr(temp1, output);
+ masm.rshiftPtr(temp2, output);
+
+ // Call into the VM when any bit will be shifted out.
+ masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
+
+ masm.movePtr(temp3, temp2);
+ masm.lshiftPtr(temp2, temp1);
+ }
+ masm.bind(&create);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigIntAbsolute(output, temp1);
+
+ // Set the sign bit when the left-hand side is negative.
+ masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
+ masm.or32(Imm32(BigInt::signBitMask()),
+ Address(output, BigInt::offsetOfFlags()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
+ StoreRegisterTo(output));
+
+ // 0n >> x == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // x >> 0n == x
+ Label rhsNonZero;
+ masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
+ masm.movePtr(lhs, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&rhsNonZero);
+
+ // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
+
+ Label rhsTooLarge;
+ masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
+
+ // Call into the VM when the left-hand side operand can't be loaded into a
+ // pointer-sized register.
+ masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
+
+ // Handle shifts exceeding |BigInt::DigitBits| first.
+ Label shift, create;
+ masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
+ {
+ masm.bind(&rhsTooLarge);
+
+ // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
+ masm.branchIfBigIntIsNegative(rhs, ool->entry());
+
+ // x >> DigitBits is either 0n or -1n.
+ masm.move32(Imm32(0), temp1);
+ masm.branchIfBigIntIsNonNegative(lhs, &create);
+ masm.move32(Imm32(1), temp1);
+ masm.jump(&create);
+ }
+ masm.bind(&shift);
+
+ Label nonNegative;
+ masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
+ {
+ masm.movePtr(temp2, temp3);
+
+ // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
+ masm.negPtr(temp2);
+ masm.addPtr(Imm32(BigInt::DigitBits), temp2);
+ masm.movePtr(temp1, output);
+ masm.rshiftPtr(temp2, output);
+
+ // Call into the VM when any bit will be shifted out.
+ masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
+
+ // |x >> -y| is computed as |x << y|.
+ masm.movePtr(temp3, temp2);
+ masm.lshiftPtr(temp2, temp1);
+ masm.jump(&create);
+ }
+ masm.bind(&nonNegative);
+ {
+ masm.movePtr(temp1, temp3);
+
+ masm.rshiftPtr(temp2, temp1);
+
+ // For negative numbers, round down if any bit was shifted out.
+ masm.branchIfBigIntIsNonNegative(lhs, &create);
+
+ // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
+ masm.movePtr(ImmWord(-1), output);
+ masm.lshiftPtr(temp2, output);
+ masm.notPtr(output);
+
+ // Add plus one when |(lhs.digit(0) & mask) != 0|.
+ masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
+ masm.addPtr(ImmWord(1), temp1);
+ }
+ masm.bind(&create);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigIntAbsolute(output, temp1);
+
+ // Set the sign bit when the left-hand side is negative.
+ masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
+ masm.or32(Imm32(BigInt::signBitMask()),
+ Address(output, BigInt::offsetOfFlags()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ auto* ool =
+ oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
+
+ // Call into the VM when the input can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigInt(input, temp1, ool->entry());
+ masm.movePtr(ImmWord(1), temp2);
+
+ masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ auto* ool =
+ oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
+
+ // Call into the VM when the input can't be loaded into a pointer-sized
+ // register.
+ masm.loadBigInt(input, temp1, ool->entry());
+ masm.movePtr(ImmWord(1), temp2);
+
+ masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigInt(output, temp1);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ auto* ool =
+ oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
+
+ // -0n == 0n
+ Label lhsNonZero;
+ masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
+ masm.movePtr(input, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&lhsNonZero);
+
+ // Call into the VM when the input uses heap digits.
+ masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
+ ool->entry());
+
+ // Flip the sign bit.
+ masm.xor32(Imm32(BigInt::signBitMask()),
+ Address(output, BigInt::offsetOfFlags()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt);
+ auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
+ StoreRegisterTo(output));
+
+ masm.loadBigIntAbsolute(input, temp1, ool->entry());
+
+ // This follows the C++ implementation because it let's us support the full
+ // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
+ Label nonNegative, done;
+ masm.branchIfBigIntIsNonNegative(input, &nonNegative);
+ {
+ // ~(-x) == ~(~(x-1)) == x-1
+ masm.subPtr(Imm32(1), temp1);
+ masm.jump(&done);
+ }
+ masm.bind(&nonNegative);
+ {
+ // ~x == -x-1 == -(x+1)
+ masm.movePtr(ImmWord(1), temp2);
+ masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
+ }
+ masm.bind(&done);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
+ masm.initializeBigIntAbsolute(output, temp1);
+
+ // Set the sign bit when the input is positive.
+ masm.branchIfBigIntIsNegative(input, ool->rejoin());
+ masm.or32(Imm32(BigInt::signBitMask()),
+ Address(output, BigInt::offsetOfFlags()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
+ Register input = ToRegister(lir->input());
+ RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ using Fn = JSString* (*)(JSContext*, int32_t, int32_t);
+ if (base.is<Register>()) {
+ auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
+ lir, ArgList(input, base.as<Register>()), StoreRegisterTo(output));
+
+ LiveRegisterSet liveRegs = liveVolatileRegs(lir);
+ masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
+ temp1, gen->runtime->staticStrings(),
+ liveRegs, ool->entry());
+ masm.bind(ool->rejoin());
+ } else {
+ auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
+ lir, ArgList(input, Imm32(base.as<int32_t>())),
+ StoreRegisterTo(output));
+
+ masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
+ temp1, gen->runtime->staticStrings(),
+ ool->entry());
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
+ Register string = ToRegister(lir->string());
+ Register radix = ToRegister(lir->radix());
+ ValueOperand output = ToOutValue(lir);
+ Register temp = ToRegister(lir->temp0());
+
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
+ masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
+ masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
+ masm.bind(&ok);
+#endif
+
+ // Use indexed value as fast path if possible.
+ Label vmCall, done;
+ masm.loadStringIndexValue(string, temp, &vmCall);
+ masm.tagValue(JSVAL_TYPE_INT32, temp, output);
+ masm.jump(&done);
+ {
+ masm.bind(&vmCall);
+
+ pushArg(radix);
+ pushArg(string);
+
+ using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
+ callVM<Fn, js::NumberParseInt>(lir);
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
+ FloatRegister number = ToFloatRegister(lir->number());
+ Register output = ToRegister(lir->output());
+ FloatRegister temp = ToFloatRegister(lir->temp0());
+
+ Label bail;
+ masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
+ masm.branchTruncateDoubleToInt32(number, output, &bail);
+
+ Label ok;
+ masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
+ {
+ // Accept both +0 and -0 and return 0.
+ masm.loadConstantDouble(0.0, temp);
+ masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
+
+ // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
+ masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
+ masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
+ }
+ masm.bind(&ok);
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitFloor(LFloor* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.floorDoubleToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitFloorF(LFloorF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.floorFloat32ToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitCeil(LCeil* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.ceilDoubleToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitCeilF(LCeilF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.ceilFloat32ToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitRound(LRound* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.roundDoubleToInt32(input, output, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitRoundF(LRoundF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.roundFloat32ToInt32(input, output, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitTrunc(LTrunc* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.truncDoubleToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitTruncF(LTruncF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ Label bail;
+ masm.truncFloat32ToInt32(input, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitCompareS(LCompareS* lir) {
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ Register right = ToRegister(lir->right());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = nullptr;
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ if (op == JSOp::Eq || op == JSOp::StrictEq) {
+ ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
+ lir, ArgList(left, right), StoreRegisterTo(output));
+ } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
+ ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
+ lir, ArgList(left, right), StoreRegisterTo(output));
+ } else if (op == JSOp::Lt) {
+ ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
+ lir, ArgList(left, right), StoreRegisterTo(output));
+ } else if (op == JSOp::Le) {
+ // Push the operands in reverse order for JSOp::Le:
+ // - |left <= right| is implemented as |right >= left|.
+ ool =
+ oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
+ lir, ArgList(right, left), StoreRegisterTo(output));
+ } else if (op == JSOp::Gt) {
+ // Push the operands in reverse order for JSOp::Gt:
+ // - |left > right| is implemented as |right < left|.
+ ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
+ lir, ArgList(right, left), StoreRegisterTo(output));
+ } else {
+ MOZ_ASSERT(op == JSOp::Ge);
+ ool =
+ oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
+ lir, ArgList(left, right), StoreRegisterTo(output));
+ }
+
+ masm.compareStrings(op, left, right, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+template <typename T, typename CharT>
+static inline T CopyCharacters(const CharT* chars) {
+ T value = 0;
+ std::memcpy(&value, chars, sizeof(T));
+ return value;
+}
+
+template <typename T>
+static inline T CopyCharacters(const JSLinearString* str, size_t index) {
+ JS::AutoCheckCannotGC nogc;
+
+ if (str->hasLatin1Chars()) {
+ MOZ_ASSERT(index + sizeof(T) / sizeof(JS::Latin1Char) <= str->length());
+ return CopyCharacters<T>(str->latin1Chars(nogc) + index);
+ }
+
+ MOZ_ASSERT(sizeof(T) >= sizeof(char16_t));
+ MOZ_ASSERT(index + sizeof(T) / sizeof(char16_t) <= str->length());
+ return CopyCharacters<T>(str->twoByteChars(nogc) + index);
+}
+
+enum class CompareDirection { Forward, Backward };
+
+// NOTE: Clobbers the input when CompareDirection is backward.
+static void CompareCharacters(MacroAssembler& masm, Register input,
+ const JSLinearString* str, Register output,
+ JSOp op, CompareDirection direction, Label* done,
+ Label* oolEntry) {
+ MOZ_ASSERT(input != output);
+
+ size_t length = str->length();
+ MOZ_ASSERT(length > 0);
+
+ CharEncoding encoding =
+ str->hasLatin1Chars() ? CharEncoding::Latin1 : CharEncoding::TwoByte;
+ size_t encodingSize = encoding == CharEncoding::Latin1
+ ? sizeof(JS::Latin1Char)
+ : sizeof(char16_t);
+ size_t byteLength = length * encodingSize;
+
+ // Take the OOL path when the string is a rope or has a different character
+ // representation.
+ masm.branchIfRope(input, oolEntry);
+ if (encoding == CharEncoding::Latin1) {
+ masm.branchTwoByteString(input, oolEntry);
+ } else {
+ JS::AutoCheckCannotGC nogc;
+ if (mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
+ masm.branchLatin1String(input, oolEntry);
+ } else {
+ // This case was already handled in the caller.
+#ifdef DEBUG
+ Label ok;
+ masm.branchTwoByteString(input, &ok);
+ masm.assumeUnreachable("Unexpected Latin-1 string");
+ masm.bind(&ok);
+#endif
+ }
+ }
+
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.branch32(Assembler::AboveOrEqual,
+ Address(input, JSString::offsetOfLength()), Imm32(length),
+ &ok);
+ masm.assumeUnreachable("Input mustn't be smaller than search string");
+ masm.bind(&ok);
+ }
+#endif
+
+ // Load the input string's characters.
+ Register stringChars = output;
+ masm.loadStringChars(input, stringChars, encoding);
+
+ if (direction == CompareDirection::Backward) {
+ masm.loadStringLength(input, input);
+ masm.sub32(Imm32(length), input);
+
+ masm.addToCharPtr(stringChars, input, encoding);
+ }
+
+ // Prefer a single compare-and-set instruction if possible.
+ if (byteLength == 1 || byteLength == 2 || byteLength == 4 ||
+ byteLength == 8) {
+ auto cond = JSOpToCondition(op, /* isSigned = */ false);
+
+ Address addr(stringChars, 0);
+ switch (byteLength) {
+ case 8: {
+ auto x = CopyCharacters<uint64_t>(str, 0);
+ masm.cmp64Set(cond, addr, Imm64(x), output);
+ break;
+ }
+ case 4: {
+ auto x = CopyCharacters<uint32_t>(str, 0);
+ masm.cmp32Set(cond, addr, Imm32(x), output);
+ break;
+ }
+ case 2: {
+ auto x = CopyCharacters<uint16_t>(str, 0);
+ masm.cmp16Set(cond, addr, Imm32(x), output);
+ break;
+ }
+ case 1: {
+ auto x = CopyCharacters<uint8_t>(str, 0);
+ masm.cmp8Set(cond, addr, Imm32(x), output);
+ break;
+ }
+ }
+ } else {
+ Label setNotEqualResult;
+
+ size_t pos = 0;
+ for (size_t stride : {8, 4, 2, 1}) {
+ while (byteLength >= stride) {
+ Address addr(stringChars, pos * encodingSize);
+ switch (stride) {
+ case 8: {
+ auto x = CopyCharacters<uint64_t>(str, pos);
+ masm.branch64(Assembler::NotEqual, addr, Imm64(x),
+ &setNotEqualResult);
+ break;
+ }
+ case 4: {
+ auto x = CopyCharacters<uint32_t>(str, pos);
+ masm.branch32(Assembler::NotEqual, addr, Imm32(x),
+ &setNotEqualResult);
+ break;
+ }
+ case 2: {
+ auto x = CopyCharacters<uint16_t>(str, pos);
+ masm.branch16(Assembler::NotEqual, addr, Imm32(x),
+ &setNotEqualResult);
+ break;
+ }
+ case 1: {
+ auto x = CopyCharacters<uint8_t>(str, pos);
+ masm.branch8(Assembler::NotEqual, addr, Imm32(x),
+ &setNotEqualResult);
+ break;
+ }
+ }
+
+ byteLength -= stride;
+ pos += stride / encodingSize;
+ }
+
+ // Prefer a single comparison for trailing bytes instead of doing
+ // multiple consecutive comparisons.
+ //
+ // For example when comparing against the string "example", emit two
+ // four-byte comparisons against "exam" and "mple" instead of doing
+ // three comparisons against "exam", "pl", and finally "e".
+ if (pos > 0 && byteLength > stride / 2) {
+ MOZ_ASSERT(stride == 8 || stride == 4);
+
+ size_t prev = pos - (stride - byteLength) / encodingSize;
+ Address addr(stringChars, prev * encodingSize);
+ switch (stride) {
+ case 8: {
+ auto x = CopyCharacters<uint64_t>(str, prev);
+ masm.branch64(Assembler::NotEqual, addr, Imm64(x),
+ &setNotEqualResult);
+ break;
+ }
+ case 4: {
+ auto x = CopyCharacters<uint32_t>(str, prev);
+ masm.branch32(Assembler::NotEqual, addr, Imm32(x),
+ &setNotEqualResult);
+ break;
+ }
+ }
+
+ // Break from the loop, because we've finished the complete string.
+ break;
+ }
+ }
+
+ // Falls through if both strings are equal.
+
+ masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
+ masm.jump(done);
+
+ masm.bind(&setNotEqualResult);
+ masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
+ }
+}
+
+void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ const JSLinearString* str = lir->constant();
+ MOZ_ASSERT(str->length() > 0);
+
+ OutOfLineCode* ool = nullptr;
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ if (op == JSOp::Eq || op == JSOp::StrictEq) {
+ ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
+ lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
+ } else {
+ MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
+ ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
+ lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
+ }
+
+ Label compareChars;
+ {
+ Label notPointerEqual;
+
+ // If operands point to the same instance, the strings are trivially equal.
+ masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
+ masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&notPointerEqual);
+
+ Label setNotEqualResult;
+ if (str->isAtom()) {
+ // Atoms cannot be equal to each other if they point to different strings.
+ Imm32 atomBit(JSString::ATOM_BIT);
+ masm.branchTest32(Assembler::NonZero,
+ Address(input, JSString::offsetOfFlags()), atomBit,
+ &setNotEqualResult);
+ }
+
+ if (str->hasTwoByteChars()) {
+ // Pure two-byte strings can't be equal to Latin-1 strings.
+ JS::AutoCheckCannotGC nogc;
+ if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
+ masm.branchLatin1String(input, &setNotEqualResult);
+ }
+ }
+
+ // Strings of different length can never be equal.
+ masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
+ Imm32(str->length()), &compareChars);
+
+ masm.bind(&setNotEqualResult);
+ masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
+ masm.jump(ool->rejoin());
+ }
+
+ masm.bind(&compareChars);
+
+ CompareCharacters(masm, input, str, output, op, CompareDirection::Forward,
+ ool->rejoin(), ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ Register right = ToRegister(lir->right());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register output = ToRegister(lir->output());
+
+ Label notSame;
+ Label compareSign;
+ Label compareLength;
+ Label compareDigit;
+
+ Label* notSameSign;
+ Label* notSameLength;
+ Label* notSameDigit;
+ if (IsEqualityOp(op)) {
+ notSameSign = &notSame;
+ notSameLength = &notSame;
+ notSameDigit = &notSame;
+ } else {
+ notSameSign = &compareSign;
+ notSameLength = &compareLength;
+ notSameDigit = &compareDigit;
+ }
+
+ masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
+ notSameLength, notSameDigit);
+
+ Label done;
+ masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
+ op == JSOp::Ge),
+ output);
+ masm.jump(&done);
+
+ if (IsEqualityOp(op)) {
+ masm.bind(&notSame);
+ masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
+ } else {
+ Label invertWhenNegative;
+
+ // There are two cases when sign(left) != sign(right):
+ // 1. sign(left) = positive and sign(right) = negative,
+ // 2. or the dual case with reversed signs.
+ //
+ // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
+ // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
+ // with |invertWhenNegative|.
+ masm.bind(&compareSign);
+ masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
+ masm.jump(&invertWhenNegative);
+
+ // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
+ // we have to consider the two cases:
+ // 1. len(digits(left)) < len(digits(right))
+ // 2. len(digits(left)) > len(digits(right))
+ //
+ // For |left| <cmp> |right| with cmp=Lt:
+ // Assume both BigInts are positive, then |left < right| is true for case 1
+ // and false for case 2. When both are negative, the result is reversed.
+ //
+ // The other comparison operators can be handled similarly.
+ //
+ // |temp0| holds the digits length of the right-hand side operand.
+ masm.bind(&compareLength);
+ masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
+ Address(left, BigInt::offsetOfLength()), temp0, output);
+ masm.jump(&invertWhenNegative);
+
+ // Similar to the case above, compare the current digit to determine the
+ // overall comparison result.
+ //
+ // |temp1| points to the current digit of the left-hand side operand.
+ // |output| holds the current digit of the right-hand side operand.
+ masm.bind(&compareDigit);
+ masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
+ Address(temp1, 0), output, output);
+
+ Label nonNegative;
+ masm.bind(&invertWhenNegative);
+ masm.branchIfBigIntIsNonNegative(left, &nonNegative);
+ masm.xor32(Imm32(1), output);
+ masm.bind(&nonNegative);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ Register right = ToRegister(lir->right());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register output = ToRegister(lir->output());
+
+ Label ifTrue, ifFalse;
+ masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
+
+ Label done;
+ masm.bind(&ifFalse);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+ masm.bind(&ifTrue);
+ masm.move32(Imm32(1), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ FloatRegister right = ToFloatRegister(lir->right());
+ Register output = ToRegister(lir->output());
+
+ masm.setupAlignedABICall();
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.passABIArg(right, MoveOp::DOUBLE);
+ masm.passABIArg(left);
+ } else {
+ masm.passABIArg(left);
+ masm.passABIArg(right, MoveOp::DOUBLE);
+ }
+
+ using FnBigIntNumber = bool (*)(BigInt*, double);
+ using FnNumberBigInt = bool (*)(double, BigInt*);
+ switch (op) {
+ case JSOp::Eq: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberEqual<EqualityKind::Equal>>();
+ break;
+ }
+ case JSOp::Ne: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
+ break;
+ }
+ case JSOp::Lt: {
+ masm.callWithABI<FnBigIntNumber,
+ jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
+ break;
+ }
+ case JSOp::Gt: {
+ masm.callWithABI<FnNumberBigInt,
+ jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
+ break;
+ }
+ case JSOp::Le: {
+ masm.callWithABI<
+ FnNumberBigInt,
+ jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
+ break;
+ }
+ case JSOp::Ge: {
+ masm.callWithABI<
+ FnBigIntNumber,
+ jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
+ break;
+ }
+ default:
+ MOZ_CRASH("unhandled op");
+ }
+
+ masm.storeCallBoolResult(output);
+}
+
+void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
+ JSOp op = lir->mir()->jsop();
+ Register left = ToRegister(lir->left());
+ Register right = ToRegister(lir->right());
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ pushArg(left);
+ pushArg(right);
+ } else {
+ pushArg(right);
+ pushArg(left);
+ }
+
+ using FnBigIntString =
+ bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
+ using FnStringBigInt =
+ bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
+
+ switch (op) {
+ case JSOp::Eq: {
+ constexpr auto Equal = EqualityKind::Equal;
+ callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
+ break;
+ }
+ case JSOp::Ne: {
+ constexpr auto NotEqual = EqualityKind::NotEqual;
+ callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
+ break;
+ }
+ case JSOp::Lt: {
+ constexpr auto LessThan = ComparisonKind::LessThan;
+ callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
+ break;
+ }
+ case JSOp::Gt: {
+ constexpr auto LessThan = ComparisonKind::LessThan;
+ callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
+ break;
+ }
+ case JSOp::Le: {
+ constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
+ callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
+ break;
+ }
+ case JSOp::Ge: {
+ constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
+ callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected compare op");
+ }
+}
+
+void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
+ MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
+ lir->mir()->compareType() == MCompare::Compare_Null);
+
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(IsLooseEqualityOp(op));
+
+ const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* nullOrLikeUndefined = ool->label1();
+ Label* notNullOrLikeUndefined = ool->label2();
+
+ {
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
+ masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
+
+ // Check whether it's a truthy object or a falsy object that emulates
+ // undefined.
+ masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
+ }
+
+ Register objreg =
+ masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
+ branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
+ notNullOrLikeUndefined, output, ool);
+ // fall through
+
+ Label done;
+
+ // It's not null or undefined, and if it's an object it doesn't
+ // emulate undefined, so it's not like undefined.
+ masm.move32(Imm32(op == JSOp::Ne), output);
+ masm.jump(&done);
+
+ masm.bind(nullOrLikeUndefined);
+ masm.move32(Imm32(op == JSOp::Eq), output);
+
+ // Both branches meet here.
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
+ LIsNullOrLikeUndefinedAndBranchV* lir) {
+ MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
+ lir->cmpMir()->compareType() == MCompare::Compare_Null);
+
+ JSOp op = lir->cmpMir()->jsop();
+ MOZ_ASSERT(IsLooseEqualityOp(op));
+
+ const ValueOperand value =
+ ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
+
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ if (op == JSOp::Ne) {
+ // Swap branches.
+ std::swap(ifTrue, ifFalse);
+ }
+
+ auto* ool = new (alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->cmpMir());
+
+ Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
+ Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
+
+ {
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
+ masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
+
+ masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
+ }
+
+ // Objects that emulate undefined are loosely equal to null/undefined.
+ Register objreg =
+ masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
+ Register scratch = ToRegister(lir->temp());
+ testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
+}
+
+void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
+ MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
+ lir->mir()->compareType() == MCompare::Compare_Null);
+ MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
+
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
+
+ Register objreg = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* emulatesUndefined = ool->label1();
+ Label* doesntEmulateUndefined = ool->label2();
+
+ branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
+ doesntEmulateUndefined, output, ool);
+
+ Label done;
+
+ masm.move32(Imm32(op == JSOp::Ne), output);
+ masm.jump(&done);
+
+ masm.bind(emulatesUndefined);
+ masm.move32(Imm32(op == JSOp::Eq), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
+ LIsNullOrLikeUndefinedAndBranchT* lir) {
+ MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
+ lir->cmpMir()->compareType() == MCompare::Compare_Null);
+ MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
+
+ JSOp op = lir->cmpMir()->jsop();
+ MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
+
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ if (op == JSOp::Ne) {
+ // Swap branches.
+ std::swap(ifTrue, ifFalse);
+ }
+
+ Register input = ToRegister(lir->getOperand(0));
+
+ auto* ool = new (alloc()) OutOfLineTestObject();
+ addOutOfLineCode(ool, lir->cmpMir());
+
+ Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
+ Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
+
+ // Objects that emulate undefined are loosely equal to null/undefined.
+ Register scratch = ToRegister(lir->temp());
+ testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
+}
+
+void CodeGenerator::visitIsNull(LIsNull* lir) {
+ MCompare::CompareType compareType = lir->mir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Null);
+
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(IsStrictEqualityOp(op));
+
+ const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ masm.testNullSet(cond, value, output);
+}
+
+void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
+ MCompare::CompareType compareType = lir->mir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
+
+ JSOp op = lir->mir()->jsop();
+ MOZ_ASSERT(IsStrictEqualityOp(op));
+
+ const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ masm.testUndefinedSet(cond, value, output);
+}
+
+void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
+ MCompare::CompareType compareType = lir->cmpMir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Null);
+
+ JSOp op = lir->cmpMir()->jsop();
+ MOZ_ASSERT(IsStrictEqualityOp(op));
+
+ const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
+ MCompare::CompareType compareType = lir->cmpMir()->compareType();
+ MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
+
+ JSOp op = lir->cmpMir()->jsop();
+ MOZ_ASSERT(IsStrictEqualityOp(op));
+
+ const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
+
+ Assembler::Condition cond = JSOpToCondition(compareType, op);
+ testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
+ FloatRegister left = ToFloatRegister(lir->left());
+ FloatRegister right = ToFloatRegister(lir->right());
+ FloatRegister temp = ToFloatRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ masm.sameValueDouble(left, right, temp, output);
+}
+
+void CodeGenerator::visitSameValue(LSameValue* lir) {
+ ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
+ ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
+ Register output = ToRegister(lir->output());
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
+ OutOfLineCode* ool =
+ oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
+
+ // First check to see if the values have identical bits.
+ // This is correct for SameValue because SameValue(NaN,NaN) is true,
+ // and SameValue(0,-0) is false.
+ masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
+ ool->entry());
+ masm.move32(Imm32(1), output);
+
+ // If this fails, call SameValue.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
+ Register output) {
+ using Fn =
+ JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
+ OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
+ lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
+ StoreRegisterTo(output));
+
+ const JitRealm* jitRealm = gen->realm->jitRealm();
+ JitCode* stringConcatStub =
+ jitRealm->stringConcatStubNoBarrier(&realmStubsToReadBarrier_);
+ masm.call(stringConcatStub);
+ masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitConcat(LConcat* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(lhs == CallTempReg0);
+ MOZ_ASSERT(rhs == CallTempReg1);
+ MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
+ MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
+ MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
+ MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
+ MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
+ MOZ_ASSERT(output == CallTempReg5);
+
+ emitConcat(lir, lhs, rhs, output);
+}
+
+static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
+ Register len, Register byteOpScratch,
+ CharEncoding fromEncoding,
+ CharEncoding toEncoding) {
+ // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
+ // (checked below in debug builds), and when done |to| must point to the
+ // next available char.
+
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
+ masm.assumeUnreachable("Length should be greater than 0.");
+ masm.bind(&ok);
+#endif
+
+ MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
+ fromEncoding == CharEncoding::Latin1);
+
+ size_t fromWidth =
+ fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
+ size_t toWidth =
+ toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
+
+ Label start;
+ masm.bind(&start);
+ masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
+ masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
+ masm.addPtr(Imm32(fromWidth), from);
+ masm.addPtr(Imm32(toWidth), to);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
+}
+
+static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
+ Register len, Register byteOpScratch,
+ CharEncoding encoding) {
+ CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding);
+}
+
+static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
+ Register destChars, Register temp1,
+ Register temp2) {
+ // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
+ // have to inflate.
+
+ Label isLatin1, done;
+ masm.loadStringLength(input, temp1);
+ masm.branchLatin1String(input, &isLatin1);
+ {
+ masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
+ masm.movePtr(temp2, input);
+ CopyStringChars(masm, destChars, input, temp1, temp2,
+ CharEncoding::TwoByte);
+ masm.jump(&done);
+ }
+ masm.bind(&isLatin1);
+ {
+ masm.loadStringChars(input, temp2, CharEncoding::Latin1);
+ masm.movePtr(temp2, input);
+ CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
+ CharEncoding::TwoByte);
+ }
+ masm.bind(&done);
+}
+
+static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
+ Register length, Register temp,
+ gc::Heap initialStringHeap,
+ Label* failure,
+ CharEncoding encoding) {
+#ifdef DEBUG
+ size_t maxInlineLength;
+ if (encoding == CharEncoding::Latin1) {
+ maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
+ } else {
+ maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
+ }
+
+ Label ok;
+ masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
+ masm.assumeUnreachable("string length too large to be allocated as inline");
+ masm.bind(&ok);
+#endif
+
+ size_t maxThinInlineLength;
+ if (encoding == CharEncoding::Latin1) {
+ maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
+ } else {
+ maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
+ }
+
+ Label isFat, allocDone;
+ masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
+ {
+ uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
+ if (encoding == CharEncoding::Latin1) {
+ flags |= JSString::LATIN1_CHARS_BIT;
+ }
+ masm.newGCString(output, temp, initialStringHeap, failure);
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ masm.jump(&allocDone);
+ }
+ masm.bind(&isFat);
+ {
+ uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
+ if (encoding == CharEncoding::Latin1) {
+ flags |= JSString::LATIN1_CHARS_BIT;
+ }
+ masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ }
+ masm.bind(&allocDone);
+
+ // Store length.
+ masm.store32(length, Address(output, JSString::offsetOfLength()));
+}
+
+static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
+ Register output, Register temp1, Register temp2,
+ Register temp3, gc::Heap initialStringHeap,
+ Label* failure, CharEncoding encoding) {
+ JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
+ (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
+
+ // State: result length in temp2.
+
+ // Ensure both strings are linear.
+ masm.branchIfRope(lhs, failure);
+ masm.branchIfRope(rhs, failure);
+
+ // Allocate a JSThinInlineString or JSFatInlineString.
+ AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
+ failure, encoding);
+
+ // Load chars pointer in temp2.
+ masm.loadInlineStringCharsForStore(output, temp2);
+
+ auto copyChars = [&](Register src) {
+ if (encoding == CharEncoding::TwoByte) {
+ CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
+ } else {
+ masm.loadStringLength(src, temp3);
+ masm.loadStringChars(src, temp1, CharEncoding::Latin1);
+ masm.movePtr(temp1, src);
+ CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
+ }
+ };
+
+ // Copy lhs chars. Note that this advances temp2 to point to the next
+ // char. This also clobbers the lhs register.
+ copyChars(lhs);
+
+ // Copy rhs chars. Clobbers the rhs register.
+ copyChars(rhs);
+}
+
+void CodeGenerator::visitSubstr(LSubstr* lir) {
+ Register string = ToRegister(lir->string());
+ Register begin = ToRegister(lir->begin());
+ Register length = ToRegister(lir->length());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp2());
+
+ // On x86 there are not enough registers. In that case reuse the string
+ // register as temporary.
+ Register temp1 =
+ lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
+
+ Label isLatin1, notInline, nonZero, nonInput, isInlinedLatin1;
+
+ // For every edge case use the C++ variant.
+ // Note: we also use this upon allocation failure in newGCString and
+ // newGCFatInlineString. To squeeze out even more performance those failures
+ // can be handled by allocate in ool code and returning to jit code to fill
+ // in all data.
+ using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
+ int32_t len);
+ OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
+ lir, ArgList(string, begin, length), StoreRegisterTo(output));
+ Label* slowPath = ool->entry();
+ Label* done = ool->rejoin();
+
+ // Zero length, return emptystring.
+ masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
+ const JSAtomState& names = gen->runtime->names();
+ masm.movePtr(ImmGCPtr(names.empty), output);
+ masm.jump(done);
+
+ // Substring from 0..|str.length|, return str.
+ masm.bind(&nonZero);
+ masm.branch32(Assembler::NotEqual,
+ Address(string, JSString::offsetOfLength()), length, &nonInput);
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.branchTest32(Assembler::Zero, begin, begin, &ok);
+ masm.assumeUnreachable("length == str.length implies begin == 0");
+ masm.bind(&ok);
+ }
+#endif
+ masm.movePtr(string, output);
+ masm.jump(done);
+
+ // Use slow path for ropes.
+ masm.bind(&nonInput);
+ masm.branchIfRope(string, slowPath);
+
+ // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
+ // notInline if we need a dependent string.
+ {
+ static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
+ JSFatInlineString::MAX_LENGTH_LATIN1);
+ static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE);
+
+ // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
+ // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
+ // strings.
+
+ Label isLatin1, allocFat, allocThin, allocDone;
+ masm.branchLatin1String(string, &isLatin1);
+ {
+ masm.branch32(Assembler::Above, length,
+ Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE), &notInline);
+ masm.move32(Imm32(0), temp2);
+ masm.branch32(Assembler::Above, length,
+ Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE), &allocFat);
+ masm.jump(&allocThin);
+ }
+
+ masm.bind(&isLatin1);
+ {
+ masm.branch32(Assembler::Above, length,
+ Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &notInline);
+ masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
+ masm.branch32(Assembler::Above, length,
+ Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
+ }
+
+ masm.bind(&allocThin);
+ {
+ masm.newGCString(output, temp0, initialStringHeap(), slowPath);
+ masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
+ masm.jump(&allocDone);
+ }
+ masm.bind(&allocFat);
+ {
+ masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
+ masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
+ }
+
+ masm.bind(&allocDone);
+ masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
+ masm.store32(length, Address(output, JSString::offsetOfLength()));
+ }
+
+ auto initializeInlineString = [&](CharEncoding encoding) {
+ masm.loadStringChars(string, temp0, encoding);
+ masm.addToCharPtr(temp0, begin, encoding);
+ if (temp1 == string) {
+ masm.push(string);
+ }
+ masm.loadInlineStringCharsForStore(output, temp1);
+ CopyStringChars(masm, temp1, temp0, length, temp2, encoding);
+ masm.loadStringLength(output, length);
+ if (temp1 == string) {
+ masm.pop(string);
+ }
+ masm.jump(done);
+ };
+
+ masm.branchLatin1String(string, &isInlinedLatin1);
+ initializeInlineString(CharEncoding::TwoByte);
+
+ masm.bind(&isInlinedLatin1);
+ initializeInlineString(CharEncoding::Latin1);
+
+ // Handle other cases with a DependentString.
+ masm.bind(&notInline);
+ masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
+ masm.store32(length, Address(output, JSString::offsetOfLength()));
+ masm.storeDependentStringBase(string, output);
+
+ auto initializeDependentString = [&](CharEncoding encoding) {
+ uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
+ if (encoding == CharEncoding::Latin1) {
+ flags |= JSString::LATIN1_CHARS_BIT;
+ }
+
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ masm.loadNonInlineStringChars(string, temp0, encoding);
+ masm.addToCharPtr(temp0, begin, encoding);
+ masm.storeNonInlineStringChars(temp0, output);
+ masm.jump(done);
+ };
+
+ masm.branchLatin1String(string, &isLatin1);
+ initializeDependentString(CharEncoding::TwoByte);
+
+ masm.bind(&isLatin1);
+ initializeDependentString(CharEncoding::Latin1);
+
+ masm.bind(done);
+}
+
+JitCode* JitRealm::generateStringConcatStub(JSContext* cx) {
+ JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jcx(cx);
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "JitRealm::generateStringConcatStub");
+
+ Register lhs = CallTempReg0;
+ Register rhs = CallTempReg1;
+ Register temp1 = CallTempReg2;
+ Register temp2 = CallTempReg3;
+ Register temp3 = CallTempReg4;
+ Register output = CallTempReg5;
+
+ Label failure;
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // If lhs is empty, return rhs.
+ Label leftEmpty;
+ masm.loadStringLength(lhs, temp1);
+ masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
+
+ // If rhs is empty, return lhs.
+ Label rightEmpty;
+ masm.loadStringLength(rhs, temp2);
+ masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
+
+ masm.add32(temp1, temp2);
+
+ // Check if we can use a JSInlineString. The result is a Latin1 string if
+ // lhs and rhs are both Latin1, so we AND the flags.
+ Label isInlineTwoByte, isInlineLatin1;
+ masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
+ masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
+
+ Label isLatin1, notInline;
+ masm.branchTest32(Assembler::NonZero, temp1,
+ Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
+ {
+ masm.branch32(Assembler::BelowOrEqual, temp2,
+ Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
+ &isInlineTwoByte);
+ masm.jump(&notInline);
+ }
+ masm.bind(&isLatin1);
+ {
+ masm.branch32(Assembler::BelowOrEqual, temp2,
+ Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
+ }
+ masm.bind(&notInline);
+
+ // Keep AND'ed flags in temp1.
+
+ // Ensure result length <= JSString::MAX_LENGTH.
+ masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
+
+ // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
+ // == gc::Heap::Default. (As a result, no post barriers are needed below.)
+ masm.newGCString(output, temp3, initialStringHeap, &failure);
+
+ // Store rope length and flags. temp1 still holds the result of AND'ing the
+ // lhs and rhs flags, so we just have to clear the other flags to get our rope
+ // flags (Latin1 if both lhs and rhs are Latin1).
+ static_assert(JSString::INIT_ROPE_FLAGS == 0,
+ "Rope type flags must have no bits set");
+ masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
+ masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
+ masm.store32(temp2, Address(output, JSString::offsetOfLength()));
+
+ // Store left and right nodes.
+ masm.storeRopeChildren(lhs, rhs, output);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&leftEmpty);
+ masm.mov(rhs, output);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&rightEmpty);
+ masm.mov(lhs, output);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&isInlineTwoByte);
+ ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+ initialStringHeap, &failure, CharEncoding::TwoByte);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.bind(&isInlineLatin1);
+ ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+ initialStringHeap, &failure, CharEncoding::Latin1);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ masm.bind(&failure);
+ masm.movePtr(ImmPtr(nullptr), output);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+
+ CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(code, "StringConcatStub");
+#endif
+
+ return code;
+}
+
+void JitRuntime::generateFreeStub(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
+
+ const Register regSlots = CallTempReg0;
+
+ freeStubOffset_ = startTrampolineCode(masm);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(regSlots);
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ const Register regTemp = regs.takeAnyGeneral();
+ MOZ_ASSERT(regTemp != regSlots);
+
+ using Fn = void (*)(void* p);
+ masm.setupUnalignedABICall(regTemp);
+ masm.passABIArg(regSlots);
+ masm.callWithABI<Fn, js_free>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.PopRegsInMask(save);
+
+ masm.ret();
+}
+
+void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
+
+ lazyLinkStubOffset_ = startTrampolineCode(masm);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ Register temp0 = regs.takeAny();
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+
+ masm.loadJSContext(temp0);
+ masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
+ masm.moveStackPtrTo(temp1);
+
+ using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
+ masm.setupUnalignedABICall(temp2);
+ masm.passABIArg(temp0);
+ masm.passABIArg(temp1);
+ masm.callWithABI<Fn, LazyLinkTopActivation>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Discard exit frame and restore frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+#ifdef JS_USE_LINK_REGISTER
+ // Restore the return address such that the emitPrologue function of the
+ // CodeGenerator can push it back on the stack with pushReturnAddress.
+ masm.popReturnAddress();
+#endif
+ masm.jump(ReturnReg);
+}
+
+void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
+
+ interpreterStubOffset_ = startTrampolineCode(masm);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ Register temp0 = regs.takeAny();
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+
+ masm.loadJSContext(temp0);
+ masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
+ masm.moveStackPtrTo(temp1);
+
+ using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
+ masm.setupUnalignedABICall(temp2);
+ masm.passABIArg(temp0);
+ masm.passABIArg(temp1);
+ masm.callWithABI<Fn, InvokeFromInterpreterStub>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
+
+ // Discard exit frame and restore frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+ // InvokeFromInterpreterStub stores the return value in argv[0], where the
+ // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
+ // just popped.
+ masm.loadValue(Address(masm.getStackPointer(),
+ JitFrameLayout::offsetOfThis() - sizeof(void*)),
+ JSReturnOperand);
+ masm.ret();
+}
+
+void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
+ doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
+
+ Label done;
+ masm.branchTestDouble(Assembler::NotEqual, R0, &done);
+
+ masm.unboxDouble(R0, FloatReg0);
+ masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
+ /* negativeZeroCheck = */ false);
+ masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
+
+ masm.bind(&done);
+ masm.abiret();
+}
+
+void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
+ Register str = ToRegister(lir->str());
+ Register index = ToRegister(lir->index());
+ Register output = ToRegister(lir->output());
+
+ using Fn = JSLinearString* (*)(JSContext*, JSString*);
+ auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
+ lir, ArgList(str), StoreRegisterTo(output));
+
+ masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
+
+ masm.movePtr(str, output);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
+ Register str = ToRegister(lir->str());
+ Register index = ToRegister(lir->index());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
+ OutOfLineCode* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
+ StoreRegisterTo(output));
+ masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCharCodeAtMaybeOutOfBounds(
+ LCharCodeAtMaybeOutOfBounds* lir) {
+ Register str = ToRegister(lir->str());
+ Register index = ToRegister(lir->index());
+ ValueOperand output = ToOutValue(lir);
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
+ auto* ool = oolCallVM<Fn, jit::CharCodeAt>(
+ lir, ArgList(str, index), StoreRegisterTo(output.scratchReg()));
+
+ // Return NaN for out-of-bounds access.
+ Label done;
+ masm.moveValue(JS::NaNValue(), output);
+
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ temp0, &done);
+
+ masm.loadStringChar(str, index, output.scratchReg(), temp0, temp1,
+ ool->entry());
+ masm.bind(ool->rejoin());
+
+ masm.tagValue(JSVAL_TYPE_INT32, output.scratchReg(), output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCharAtMaybeOutOfBounds(LCharAtMaybeOutOfBounds* lir) {
+ Register str = ToRegister(lir->str());
+ Register index = ToRegister(lir->index());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ using Fn1 = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
+ auto* oolLoadChar = oolCallVM<Fn1, jit::CharCodeAt>(lir, ArgList(str, index),
+ StoreRegisterTo(output));
+
+ using Fn2 = JSLinearString* (*)(JSContext*, int32_t);
+ auto* oolFromCharCode = oolCallVM<Fn2, jit::StringFromCharCode>(
+ lir, ArgList(output), StoreRegisterTo(output));
+
+ // Return the empty string for out-of-bounds access.
+ const JSAtomState& names = gen->runtime->names();
+ masm.movePtr(ImmGCPtr(names.empty), output);
+
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ temp0, oolFromCharCode->rejoin());
+
+ masm.loadStringChar(str, index, output, temp0, temp1, oolLoadChar->entry());
+ masm.bind(oolLoadChar->rejoin());
+
+ // OOL path if code >= UNIT_STATIC_LIMIT.
+ masm.boundsCheck32PowerOfTwo(output, StaticStrings::UNIT_STATIC_LIMIT,
+ oolFromCharCode->entry());
+
+ masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable), temp0);
+ masm.loadPtr(BaseIndex(temp0, output, ScalePointer), output);
+
+ masm.bind(oolFromCharCode->rejoin());
+}
+
+void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
+ Register code = ToRegister(lir->code());
+ Register output = ToRegister(lir->output());
+
+ using Fn = JSLinearString* (*)(JSContext*, int32_t);
+ OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCharCode>(
+ lir, ArgList(code), StoreRegisterTo(output));
+
+ // OOL path if code >= UNIT_STATIC_LIMIT.
+ masm.boundsCheck32PowerOfTwo(code, StaticStrings::UNIT_STATIC_LIMIT,
+ ool->entry());
+
+ masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable), output);
+ masm.loadPtr(BaseIndex(output, code, ScalePointer), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
+ Register codePoint = ToRegister(lir->codePoint());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ LSnapshot* snapshot = lir->snapshot();
+
+ // The OOL path is only taken when we can't allocate the inline string.
+ using Fn = JSString* (*)(JSContext*, int32_t);
+ OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCodePoint>(
+ lir, ArgList(codePoint), StoreRegisterTo(output));
+
+ Label isTwoByte;
+ Label* done = ool->rejoin();
+
+ static_assert(
+ StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
+ "Latin-1 strings can be loaded from static strings");
+ masm.boundsCheck32PowerOfTwo(codePoint, StaticStrings::UNIT_STATIC_LIMIT,
+ &isTwoByte);
+ {
+ masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable),
+ output);
+ masm.loadPtr(BaseIndex(output, codePoint, ScalePointer), output);
+ masm.jump(done);
+ }
+ masm.bind(&isTwoByte);
+ {
+ // Use a bailout if the input is not a valid code point, because
+ // MFromCodePoint is movable and it'd be observable when a moved
+ // fromCodePoint throws an exception before its actual call site.
+ bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
+ snapshot);
+
+ // Allocate a JSThinInlineString.
+ {
+ static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
+ "JSThinInlineString can hold a supplementary code point");
+
+ uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
+ masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
+ masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
+ }
+
+ Label isSupplementary;
+ masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
+ &isSupplementary);
+ {
+ // Store length.
+ masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
+
+ // Load chars pointer in temp0.
+ masm.loadInlineStringCharsForStore(output, temp0);
+
+ masm.store16(codePoint, Address(temp0, 0));
+
+ masm.jump(done);
+ }
+ masm.bind(&isSupplementary);
+ {
+ // Store length.
+ masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
+
+ // Load chars pointer in temp0.
+ masm.loadInlineStringCharsForStore(output, temp0);
+
+ // Inlined unicode::LeadSurrogate(uint32_t).
+ masm.move32(codePoint, temp1);
+ masm.rshift32(Imm32(10), temp1);
+ masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
+ temp1);
+
+ masm.store16(temp1, Address(temp0, 0));
+
+ // Inlined unicode::TrailSurrogate(uint32_t).
+ masm.move32(codePoint, temp1);
+ masm.and32(Imm32(0x3FF), temp1);
+ masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
+
+ masm.store16(temp1, Address(temp0, sizeof(char16_t)));
+ }
+ }
+
+ masm.bind(done);
+}
+
+void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
+ pushArg(ToRegister(lir->searchString()));
+ pushArg(ToRegister(lir->string()));
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
+ callVM<Fn, js::StringIndexOf>(lir);
+}
+
+void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
+ pushArg(ToRegister(lir->searchString()));
+ pushArg(ToRegister(lir->string()));
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ callVM<Fn, js::StringStartsWith>(lir);
+}
+
+void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
+ Register string = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ const JSLinearString* searchString = lir->searchString();
+
+ size_t length = searchString->length();
+ MOZ_ASSERT(length > 0);
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ auto* ool = oolCallVM<Fn, js::StringStartsWith>(
+ lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
+
+ masm.move32(Imm32(0), output);
+
+ // Can't be a prefix when the string is smaller than the search string.
+ masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
+ Imm32(length), ool->rejoin());
+
+ // Unwind ropes at the start if possible.
+ Label compare;
+ masm.movePtr(string, temp);
+ masm.branchIfNotRope(temp, &compare);
+
+ Label unwindRope;
+ masm.bind(&unwindRope);
+ masm.loadRopeLeftChild(temp, output);
+ masm.movePtr(output, temp);
+
+ // If the left child is smaller than the search string, jump into the VM to
+ // linearize the string.
+ masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
+ Imm32(length), ool->entry());
+
+ // Otherwise keep unwinding ropes.
+ masm.branchIfRope(temp, &unwindRope);
+
+ masm.bind(&compare);
+
+ // If operands point to the same instance, it's trivially a prefix.
+ Label notPointerEqual;
+ masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
+ &notPointerEqual);
+ masm.move32(Imm32(1), output);
+ masm.jump(ool->rejoin());
+ masm.bind(&notPointerEqual);
+
+ if (searchString->hasTwoByteChars()) {
+ // Pure two-byte strings can't be a prefix of Latin-1 strings.
+ JS::AutoCheckCannotGC nogc;
+ if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
+ Label compareChars;
+ masm.branchTwoByteString(temp, &compareChars);
+ masm.move32(Imm32(0), output);
+ masm.jump(ool->rejoin());
+ masm.bind(&compareChars);
+ }
+ }
+
+ // Otherwise start comparing character by character.
+ CompareCharacters(masm, temp, searchString, output, JSOp::Eq,
+ CompareDirection::Forward, ool->rejoin(), ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
+ pushArg(ToRegister(lir->searchString()));
+ pushArg(ToRegister(lir->string()));
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ callVM<Fn, js::StringEndsWith>(lir);
+}
+
+void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
+ Register string = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ const JSLinearString* searchString = lir->searchString();
+
+ size_t length = searchString->length();
+ MOZ_ASSERT(length > 0);
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ auto* ool = oolCallVM<Fn, js::StringEndsWith>(
+ lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
+
+ masm.move32(Imm32(0), output);
+
+ // Can't be a suffix when the string is smaller than the search string.
+ masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
+ Imm32(length), ool->rejoin());
+
+ // Unwind ropes at the end if possible.
+ Label compare;
+ masm.movePtr(string, temp);
+ masm.branchIfNotRope(temp, &compare);
+
+ Label unwindRope;
+ masm.bind(&unwindRope);
+ masm.loadRopeRightChild(temp, output);
+ masm.movePtr(output, temp);
+
+ // If the right child is smaller than the search string, jump into the VM to
+ // linearize the string.
+ masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
+ Imm32(length), ool->entry());
+
+ // Otherwise keep unwinding ropes.
+ masm.branchIfRope(temp, &unwindRope);
+
+ masm.bind(&compare);
+
+ // If operands point to the same instance, it's trivially a suffix.
+ Label notPointerEqual;
+ masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
+ &notPointerEqual);
+ masm.move32(Imm32(1), output);
+ masm.jump(ool->rejoin());
+ masm.bind(&notPointerEqual);
+
+ if (searchString->hasTwoByteChars()) {
+ // Pure two-byte strings can't be a suffix of Latin-1 strings.
+ JS::AutoCheckCannotGC nogc;
+ if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
+ Label compareChars;
+ masm.branchTwoByteString(temp, &compareChars);
+ masm.move32(Imm32(0), output);
+ masm.jump(ool->rejoin());
+ masm.bind(&compareChars);
+ }
+ }
+
+ // Otherwise start comparing character by character.
+ CompareCharacters(masm, temp, searchString, output, JSOp::Eq,
+ CompareDirection::Backward, ool->rejoin(), ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
+ Register string = ToRegister(lir->string());
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+
+ // On x86 there are not enough registers. In that case reuse the string
+ // register as a temporary.
+ Register temp3 =
+ lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
+ Register temp4 = ToRegister(lir->temp4());
+
+ using Fn = JSString* (*)(JSContext*, HandleString);
+ OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
+ lir, ArgList(string), StoreRegisterTo(output));
+
+ // Take the slow path if the string isn't a linear Latin-1 string.
+ Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
+ Register flags = temp0;
+ masm.load32(Address(string, JSString::offsetOfFlags()), flags);
+ masm.and32(linearLatin1Bits, flags);
+ masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
+
+ Register length = temp0;
+ masm.loadStringLength(string, length);
+
+ // Return the input if it's the empty string.
+ Label notEmptyString;
+ masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
+ {
+ masm.movePtr(string, output);
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&notEmptyString);
+
+ Register inputChars = temp1;
+ masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
+
+ Register toLowerCaseTable = temp2;
+ masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
+
+ // Single element strings can be directly retrieved from static strings cache.
+ Label notSingleElementString;
+ masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
+ {
+ Register current = temp4;
+
+ masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
+ masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
+ current);
+ masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable),
+ output);
+ masm.loadPtr(BaseIndex(output, current, ScalePointer), output);
+
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&notSingleElementString);
+
+ // Use the OOL-path when the string is too long. This prevents scanning long
+ // strings which have upper case characters only near the end a second time in
+ // the VM.
+ constexpr int32_t MaxInlineLength = 64;
+ masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
+
+ {
+ // Check if there are any characters which need to be converted.
+ //
+ // This extra loop gives a small performance improvement for strings which
+ // are already lower cased and lets us avoid calling into the runtime for
+ // non-inline, all lower case strings. But more importantly it avoids
+ // repeated inline allocation failures:
+ // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
+ // |js::StringToLowerCase| runtime function when the result string can't be
+ // allocated inline. And |js::StringToLowerCase| directly returns the input
+ // string when no characters need to be converted. That means it won't
+ // trigger GC to clear up the free nursery space, so the next toLowerCase()
+ // call will again fail to inline allocate the result string.
+ Label hasUpper;
+ {
+ Register checkInputChars = output;
+ masm.movePtr(inputChars, checkInputChars);
+
+ Register current = temp4;
+
+ Label start;
+ masm.bind(&start);
+ masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
+ masm.branch8(Assembler::NotEqual,
+ BaseIndex(toLowerCaseTable, current, TimesOne), current,
+ &hasUpper);
+ masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
+
+ // Input is already in lower case.
+ masm.movePtr(string, output);
+ masm.jump(ool->rejoin());
+ }
+ masm.bind(&hasUpper);
+
+ // |length| was clobbered above, reload.
+ masm.loadStringLength(string, length);
+
+ // Call into the runtime when we can't create an inline string.
+ masm.branch32(Assembler::Above, length,
+ Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
+
+ AllocateThinOrFatInlineString(masm, output, length, temp4,
+ initialStringHeap(), ool->entry(),
+ CharEncoding::Latin1);
+
+ if (temp3 == string) {
+ masm.push(string);
+ }
+
+ Register outputChars = temp3;
+ masm.loadInlineStringCharsForStore(output, outputChars);
+
+ {
+ Register current = temp4;
+
+ Label start;
+ masm.bind(&start);
+ masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
+ masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
+ current);
+ masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
+ masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
+ masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
+ masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
+ }
+
+ if (temp3 == string) {
+ masm.pop(string);
+ }
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
+ pushArg(ToRegister(lir->string()));
+
+ using Fn = JSString* (*)(JSContext*, HandleString);
+ callVM<Fn, js::StringToUpperCase>(lir);
+}
+
+void CodeGenerator::visitStringSplit(LStringSplit* lir) {
+ pushArg(Imm32(INT32_MAX));
+ pushArg(ToRegister(lir->separator()));
+ pushArg(ToRegister(lir->string()));
+
+ using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
+ callVM<Fn, js::StringSplitString>(lir);
+}
+
+void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
+ Address initLength(ToRegister(lir->elements()),
+ ObjectElements::offsetOfInitializedLength());
+ masm.load32(initLength, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
+ Address initLength(ToRegister(lir->elements()),
+ ObjectElements::offsetOfInitializedLength());
+ SetLengthFromIndex(masm, lir->index(), initLength);
+}
+
+void CodeGenerator::visitNotBI(LNotBI* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
+ Imm32(0), output);
+}
+
+void CodeGenerator::visitNotO(LNotO* lir) {
+ auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* ifEmulatesUndefined = ool->label1();
+ Label* ifDoesntEmulateUndefined = ool->label2();
+
+ Register objreg = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
+ ifDoesntEmulateUndefined, output, ool);
+ // fall through
+
+ Label join;
+
+ masm.move32(Imm32(0), output);
+ masm.jump(&join);
+
+ masm.bind(ifEmulatesUndefined);
+ masm.move32(Imm32(1), output);
+
+ masm.bind(&join);
+}
+
+void CodeGenerator::visitNotV(LNotV* lir) {
+ auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
+ addOutOfLineCode(ool, lir->mir());
+
+ Label* ifTruthy = ool->label1();
+ Label* ifFalsy = ool->label2();
+
+ ValueOperand input = ToValue(lir, LNotV::InputIndex);
+ Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
+ FloatRegister floatTemp = ToFloatRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+ const TypeDataList& observedTypes = lir->mir()->observedTypes();
+
+ testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
+ ifTruthy, ifFalsy, ool);
+
+ Label join;
+
+ // Note that the testValueTruthy call above may choose to fall through
+ // to ifTruthy instead of branching there.
+ masm.bind(ifTruthy);
+ masm.move32(Imm32(0), output);
+ masm.jump(&join);
+
+ masm.bind(ifFalsy);
+ masm.move32(Imm32(1), output);
+
+ // both branches meet here.
+ masm.bind(&join);
+}
+
+void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
+ const LAllocation* index = lir->index();
+ const LAllocation* length = lir->length();
+ LSnapshot* snapshot = lir->snapshot();
+
+ MIRType type = lir->mir()->type();
+
+ auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
+ if (type == MIRType::Int32) {
+ bailoutCmp32(cond, lhs, rhs, snapshot);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ bailoutCmpPtr(cond, lhs, rhs, snapshot);
+ }
+ };
+
+ auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
+ int32_t rhs) {
+ if (type == MIRType::Int32) {
+ bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
+ }
+ };
+
+ if (index->isConstant()) {
+ // Use uint32 so that the comparison is unsigned.
+ uint32_t idx = ToInt32(index);
+ if (length->isConstant()) {
+ uint32_t len = ToInt32(lir->length());
+ if (idx < len) {
+ return;
+ }
+ bailout(snapshot);
+ return;
+ }
+
+ if (length->isRegister()) {
+ bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
+ } else {
+ bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
+ }
+ return;
+ }
+
+ Register indexReg = ToRegister(index);
+ if (length->isConstant()) {
+ bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
+ } else if (length->isRegister()) {
+ bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
+ } else {
+ bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
+ }
+}
+
+void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
+ int32_t min = lir->mir()->minimum();
+ int32_t max = lir->mir()->maximum();
+ MOZ_ASSERT(max >= min);
+
+ LSnapshot* snapshot = lir->snapshot();
+ MIRType type = lir->mir()->type();
+
+ const LAllocation* length = lir->length();
+ Register temp = ToRegister(lir->getTemp(0));
+
+ auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
+ if (type == MIRType::Int32) {
+ bailoutCmp32(cond, lhs, rhs, snapshot);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ bailoutCmpPtr(cond, lhs, rhs, snapshot);
+ }
+ };
+
+ auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
+ int32_t rhs) {
+ if (type == MIRType::Int32) {
+ bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
+ }
+ };
+
+ if (lir->index()->isConstant()) {
+ int32_t nmin, nmax;
+ int32_t index = ToInt32(lir->index());
+ if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
+ if (length->isRegister()) {
+ bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
+ } else {
+ bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
+ }
+ return;
+ }
+ masm.mov(ImmWord(index), temp);
+ } else {
+ masm.mov(ToRegister(lir->index()), temp);
+ }
+
+ // If the minimum and maximum differ then do an underflow check first.
+ // If the two are the same then doing an unsigned comparison on the
+ // length will also catch a negative index.
+ if (min != max) {
+ if (min != 0) {
+ Label bail;
+ if (type == MIRType::Int32) {
+ masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
+ } else {
+ masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
+ }
+ bailoutFrom(&bail, snapshot);
+ }
+
+ bailoutCmpConstant(Assembler::LessThan, temp, 0);
+
+ if (min != 0) {
+ int32_t diff;
+ if (SafeSub(max, min, &diff)) {
+ max = diff;
+ } else {
+ if (type == MIRType::Int32) {
+ masm.sub32(Imm32(min), temp);
+ } else {
+ masm.subPtr(Imm32(min), temp);
+ }
+ }
+ }
+ }
+
+ // Compute the maximum possible index. No overflow check is needed when
+ // max > 0. We can only wraparound to a negative number, which will test as
+ // larger than all nonnegative numbers in the unsigned comparison, and the
+ // length is required to be nonnegative (else testing a negative length
+ // would succeed on any nonnegative index).
+ if (max != 0) {
+ if (max < 0) {
+ Label bail;
+ if (type == MIRType::Int32) {
+ masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
+ } else {
+ masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
+ }
+ bailoutFrom(&bail, snapshot);
+ } else {
+ if (type == MIRType::Int32) {
+ masm.add32(Imm32(max), temp);
+ } else {
+ masm.addPtr(Imm32(max), temp);
+ }
+ }
+ }
+
+ if (length->isRegister()) {
+ bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
+ } else {
+ bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
+ }
+}
+
+void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
+ int32_t min = lir->mir()->minimum();
+ bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
+ lir->snapshot());
+}
+
+void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+
+ const LAllocation* length = lir->length();
+ Register index = ToRegister(lir->index());
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->type() == MIRType::Int32) {
+ if (length->isRegister()) {
+ masm.spectreMaskIndex32(index, ToRegister(length), output);
+ } else {
+ masm.spectreMaskIndex32(index, ToAddress(length), output);
+ }
+ } else {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
+ if (length->isRegister()) {
+ masm.spectreMaskIndexPtr(index, ToRegister(length), output);
+ } else {
+ masm.spectreMaskIndexPtr(index, ToAddress(length), output);
+ }
+ }
+}
+
+class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
+ MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
+ }
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineStoreElementHole(this);
+ }
+
+ MStoreElementHole* mir() const {
+ return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
+ : ins_->toStoreElementHoleT()->mir();
+ }
+ LInstruction* ins() const { return ins_; }
+};
+
+void CodeGenerator::emitStoreHoleCheck(Register elements,
+ const LAllocation* index,
+ LSnapshot* snapshot) {
+ Label bail;
+ if (index->isConstant()) {
+ Address dest(elements, ToInt32(index) * sizeof(js::Value));
+ masm.branchTestMagic(Assembler::Equal, dest, &bail);
+ } else {
+ BaseObjectElementIndex dest(elements, ToRegister(index));
+ masm.branchTestMagic(Assembler::Equal, dest, &bail);
+ }
+ bailoutFrom(&bail, snapshot);
+}
+
+void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
+ MIRType valueType, Register elements,
+ const LAllocation* index) {
+ MOZ_ASSERT(valueType != MIRType::MagicHole);
+ ConstantOrRegister v = ToConstantOrRegister(value, valueType);
+ if (index->isConstant()) {
+ Address dest(elements, ToInt32(index) * sizeof(js::Value));
+ masm.storeUnboxedValue(v, valueType, dest);
+ } else {
+ BaseObjectElementIndex dest(elements, ToRegister(index));
+ masm.storeUnboxedValue(v, valueType, dest);
+ }
+}
+
+void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
+ Register elements = ToRegister(store->elements());
+ const LAllocation* index = store->index();
+
+ if (store->mir()->needsBarrier()) {
+ emitPreBarrier(elements, index);
+ }
+
+ if (store->mir()->needsHoleCheck()) {
+ emitStoreHoleCheck(elements, index, store->snapshot());
+ }
+
+ emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
+ index);
+}
+
+void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
+ const ValueOperand value = ToValue(lir, LStoreElementV::Value);
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+
+ if (lir->mir()->needsBarrier()) {
+ emitPreBarrier(elements, index);
+ }
+
+ if (lir->mir()->needsHoleCheck()) {
+ emitStoreHoleCheck(elements, index, lir->snapshot());
+ }
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
+ masm.storeValue(value, dest);
+ } else {
+ BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
+ masm.storeValue(value, dest);
+ }
+}
+
+void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+
+ Address elementsFlags(elements, ObjectElements::offsetOfFlags());
+ masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
+
+ BaseObjectElementIndex element(elements, index);
+ masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
+}
+
+void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
+ auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register temp = ToRegister(lir->temp0());
+
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
+
+ emitPreBarrier(elements, lir->index());
+
+ masm.bind(ool->rejoin());
+ emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
+ lir->index());
+
+ if (ValueNeedsPostBarrier(lir->mir()->value())) {
+ LiveRegisterSet regs = liveVolatileRegs(lir);
+ ConstantOrRegister val =
+ ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
+ emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
+ }
+}
+
+void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
+ auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ Register obj = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
+ Register temp = ToRegister(lir->temp0());
+
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
+
+ emitPreBarrier(elements, lir->index());
+
+ masm.bind(ool->rejoin());
+ masm.storeValue(value, BaseObjectElementIndex(elements, index));
+
+ if (ValueNeedsPostBarrier(lir->mir()->value())) {
+ LiveRegisterSet regs = liveVolatileRegs(lir);
+ emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
+ ConstantOrRegister(value));
+ }
+}
+
+void CodeGenerator::visitOutOfLineStoreElementHole(
+ OutOfLineStoreElementHole* ool) {
+ Register object, elements, index;
+ LInstruction* ins = ool->ins();
+ mozilla::Maybe<ConstantOrRegister> value;
+ Register temp;
+
+ if (ins->isStoreElementHoleV()) {
+ LStoreElementHoleV* store = ins->toStoreElementHoleV();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = ToRegister(store->index());
+ value.emplace(
+ TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
+ temp = ToRegister(store->temp0());
+ } else {
+ LStoreElementHoleT* store = ins->toStoreElementHoleT();
+ object = ToRegister(store->object());
+ elements = ToRegister(store->elements());
+ index = ToRegister(store->index());
+ if (store->value()->isConstant()) {
+ value.emplace(
+ ConstantOrRegister(store->value()->toConstant()->toJSValue()));
+ } else {
+ MIRType valueType = store->mir()->value()->type();
+ value.emplace(
+ TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
+ }
+ temp = ToRegister(store->temp0());
+ }
+
+ Address initLength(elements, ObjectElements::offsetOfInitializedLength());
+
+ // We're out-of-bounds. We only handle the index == initlength case.
+ // If index > initializedLength, bail out. Note that this relies on the
+ // condition flags sticking from the incoming branch.
+ // Also note: this branch does not need Spectre mitigations, doing that for
+ // the capacity check below is sufficient.
+ Label allocElement, addNewElement;
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ // Had to reimplement for MIPS because there are no flags.
+ bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
+#else
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+#endif
+
+ // If index < capacity, we can add a dense element inline. If not, we need
+ // to allocate more elements first.
+ masm.spectreBoundsCheck32(
+ index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
+ &allocElement);
+ masm.jump(&addNewElement);
+
+ masm.bind(&allocElement);
+
+ // Save all live volatile registers, except |temp|.
+ LiveRegisterSet liveRegs = liveVolatileRegs(ins);
+ liveRegs.takeUnchecked(temp);
+ masm.PushRegsInMask(liveRegs);
+
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(object);
+
+ using Fn = bool (*)(JSContext*, NativeObject*);
+ masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
+ masm.storeCallPointerResult(temp);
+
+ masm.PopRegsInMask(liveRegs);
+ bailoutIfFalseBool(temp, ins->snapshot());
+
+ // Load the reallocated elements pointer.
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
+
+ masm.bind(&addNewElement);
+
+ // Increment initLength
+ masm.add32(Imm32(1), initLength);
+
+ // If length is now <= index, increment length too.
+ Label skipIncrementLength;
+ Address length(elements, ObjectElements::offsetOfLength());
+ masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
+ masm.add32(Imm32(1), length);
+ masm.bind(&skipIncrementLength);
+
+ // Jump to the inline path where we will store the value.
+ // We rejoin after the prebarrier, because the memory is uninitialized.
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp1 = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp1());
+ ValueOperand out = ToOutValue(lir);
+
+ Label bail;
+ if (lir->mir()->mode() == MArrayPopShift::Pop) {
+ masm.packedArrayPop(obj, out, temp1, temp2, &bail);
+ } else {
+ MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+ masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
+ }
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
+ LArrayPush* ins_;
+
+ public:
+ explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineArrayPush(this);
+ }
+
+ LArrayPush* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitArrayPush(LArrayPush* lir) {
+ Register obj = ToRegister(lir->object());
+ Register elementsTemp = ToRegister(lir->temp0());
+ Register length = ToRegister(lir->output());
+ ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
+ Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
+
+ auto* ool = new (alloc()) OutOfLineArrayPush(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ // Load elements and length.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
+ masm.load32(Address(elementsTemp, ObjectElements::offsetOfLength()), length);
+
+ // TODO(post-Warp): reuse/share the CacheIR implementation when IonBuilder and
+ // TI are gone (bug 1654180).
+
+ // Bailout if the incremented length does not fit in int32.
+ bailoutCmp32(Assembler::AboveOrEqual, length, Imm32(INT32_MAX),
+ lir->snapshot());
+
+ // Guard length == initializedLength.
+ Address initLength(elementsTemp, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::NotEqual, initLength, length, ool->entry());
+
+ // Guard length < capacity.
+ Address capacity(elementsTemp, ObjectElements::offsetOfCapacity());
+ masm.spectreBoundsCheck32(length, capacity, spectreTemp, ool->entry());
+
+ // Do the store.
+ masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
+
+ masm.add32(Imm32(1), length);
+
+ // Update length and initialized length.
+ masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
+ masm.store32(length, Address(elementsTemp,
+ ObjectElements::offsetOfInitializedLength()));
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
+ LArrayPush* ins = ool->ins();
+
+ Register object = ToRegister(ins->object());
+ Register temp = ToRegister(ins->temp0());
+ Register output = ToRegister(ins->output());
+ ValueOperand value = ToValue(ins, LArrayPush::ValueIndex);
+
+ // Save all live volatile registers, except |temp| and |output|, because both
+ // are overwritten anyway.
+ LiveRegisterSet liveRegs = liveVolatileRegs(ins);
+ liveRegs.takeUnchecked(temp);
+ liveRegs.takeUnchecked(output);
+
+ masm.PushRegsInMask(liveRegs);
+
+ masm.Push(value);
+ masm.moveStackPtrTo(output);
+
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(object);
+ masm.passABIArg(output);
+
+ using Fn = bool (*)(JSContext*, ArrayObject*, Value*);
+ masm.callWithABI<Fn, jit::ArrayPushDensePure>();
+ masm.storeCallPointerResult(temp);
+
+ masm.freeStack(sizeof(Value)); // Discard pushed Value.
+
+ MOZ_ASSERT(!liveRegs.has(temp));
+ masm.PopRegsInMask(liveRegs);
+
+ bailoutIfFalseBool(temp, ins->snapshot());
+
+ // Load the new length into the output register.
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), output);
+ masm.load32(Address(output, ObjectElements::offsetOfLength()), output);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitArraySlice(LArraySlice* lir) {
+ Register object = ToRegister(lir->object());
+ Register begin = ToRegister(lir->begin());
+ Register end = ToRegister(lir->end());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ Label call, fail;
+
+ Label bail;
+ masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ // Try to allocate an object.
+ TemplateObject templateObject(lir->mir()->templateObj());
+ masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
+ &fail);
+
+ masm.jump(&call);
+ {
+ masm.bind(&fail);
+ masm.movePtr(ImmPtr(nullptr), temp0);
+ }
+ masm.bind(&call);
+
+ pushArg(temp0);
+ pushArg(end);
+ pushArg(begin);
+ pushArg(object);
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+ callVM<Fn, ArraySliceDense>(lir);
+}
+
+void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
+ Register object = ToRegister(lir->object());
+ Register begin = ToRegister(lir->begin());
+ Register end = ToRegister(lir->end());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ Label call, fail;
+
+ // Try to allocate an object.
+ TemplateObject templateObject(lir->mir()->templateObj());
+ masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
+ &fail);
+
+ masm.jump(&call);
+ {
+ masm.bind(&fail);
+ masm.movePtr(ImmPtr(nullptr), temp0);
+ }
+ masm.bind(&call);
+
+ pushArg(temp0);
+ pushArg(end);
+ pushArg(begin);
+ pushArg(object);
+
+ using Fn =
+ JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+ callVM<Fn, ArgumentsSliceDense>(lir);
+}
+
+#ifdef DEBUG
+void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
+ const RegisterOrInt32& count,
+ Register numActualArgs) {
+ // |begin| must be positive or zero.
+ if (begin.is<Register>()) {
+ Label beginOk;
+ masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
+ &beginOk);
+ masm.assumeUnreachable("begin < 0");
+ masm.bind(&beginOk);
+ } else {
+ MOZ_ASSERT(begin.as<int32_t>() >= 0);
+ }
+
+ // |count| must be positive or zero.
+ if (count.is<Register>()) {
+ Label countOk;
+ masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
+ &countOk);
+ masm.assumeUnreachable("count < 0");
+ masm.bind(&countOk);
+ } else {
+ MOZ_ASSERT(count.as<int32_t>() >= 0);
+ }
+
+ // |begin| must be less-or-equal to |numActualArgs|.
+ Label argsBeginOk;
+ if (begin.is<Register>()) {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
+ &argsBeginOk);
+ } else {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
+ Imm32(begin.as<int32_t>()), &argsBeginOk);
+ }
+ masm.assumeUnreachable("begin <= numActualArgs");
+ masm.bind(&argsBeginOk);
+
+ // |count| must be less-or-equal to |numActualArgs|.
+ Label argsCountOk;
+ if (count.is<Register>()) {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
+ &argsCountOk);
+ } else {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
+ Imm32(count.as<int32_t>()), &argsCountOk);
+ }
+ masm.assumeUnreachable("count <= numActualArgs");
+ masm.bind(&argsCountOk);
+
+ // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
+ //
+ // Pre-condition: |count| <= |numActualArgs|
+ // Condition to test: |begin + count| <= |numActualArgs|
+ // Transform to: |begin| <= |numActualArgs - count|
+ if (count.is<Register>()) {
+ masm.subPtr(count.as<Register>(), numActualArgs);
+ } else {
+ masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
+ }
+
+ // |begin + count| must be less-or-equal to |numActualArgs|.
+ Label argsBeginCountOk;
+ if (begin.is<Register>()) {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
+ &argsBeginCountOk);
+ } else {
+ masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
+ Imm32(begin.as<int32_t>()), &argsBeginCountOk);
+ }
+ masm.assumeUnreachable("begin + count <= numActualArgs");
+ masm.bind(&argsBeginCountOk);
+}
+#endif
+
+template <class ArgumentsSlice>
+void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
+ const RegisterOrInt32& count, Register output,
+ Register temp) {
+ using Fn = ArrayObject* (*)(JSContext*, int32_t);
+ auto* ool = count.match(
+ [&](Register count) {
+ return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
+ lir, ArgList(count), StoreRegisterTo(output));
+ },
+ [&](int32_t count) {
+ return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
+ lir, ArgList(Imm32(count)), StoreRegisterTo(output));
+ });
+
+ TemplateObject templateObject(lir->mir()->templateObj());
+ MOZ_ASSERT(templateObject.isArrayObject());
+
+ auto templateNativeObj = templateObject.asTemplateNativeObject();
+ MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
+ MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
+ MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
+
+ // Check array capacity. Call into the VM if the template object's capacity
+ // is too small.
+ bool tryAllocate = count.match(
+ [&](Register count) {
+ masm.branch32(Assembler::Above, count,
+ Imm32(templateNativeObj.getDenseCapacity()),
+ ool->entry());
+ return true;
+ },
+ [&](int32_t count) {
+ MOZ_ASSERT(count >= 0);
+ if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
+ masm.jump(ool->entry());
+ return false;
+ }
+ return true;
+ });
+
+ if (tryAllocate) {
+ // Try to allocate an object.
+ masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
+ ool->entry());
+
+ auto setInitializedLengthAndLength = [&](auto count) {
+ const int elementsOffset = NativeObject::offsetOfFixedElements();
+
+ // Update initialized length.
+ Address initLength(
+ output, elementsOffset + ObjectElements::offsetOfInitializedLength());
+ masm.store32(count, initLength);
+
+ // Update length.
+ Address length(output, elementsOffset + ObjectElements::offsetOfLength());
+ masm.store32(count, length);
+ };
+
+ // The array object was successfully created. Set the length and initialized
+ // length and then proceed to fill the elements.
+ count.match([&](Register count) { setInitializedLengthAndLength(count); },
+ [&](int32_t count) {
+ if (count > 0) {
+ setInitializedLengthAndLength(Imm32(count));
+ }
+ });
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
+ Register begin = ToRegister(lir->begin());
+ Register count = ToRegister(lir->count());
+ Register temp = ToRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+#ifdef DEBUG
+ masm.loadNumActualArgs(FramePointer, temp);
+ emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
+ temp);
+#endif
+
+ emitNewArray(lir, RegisterOrInt32(count), output, temp);
+
+ Label done;
+ masm.branch32(Assembler::Equal, count, Imm32(0), &done);
+ {
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ allRegs.take(begin);
+ allRegs.take(count);
+ allRegs.take(temp);
+ allRegs.take(output);
+
+ ValueOperand value = allRegs.takeAnyValue();
+
+ LiveRegisterSet liveRegs;
+ liveRegs.add(output);
+ liveRegs.add(begin);
+ liveRegs.add(value);
+
+ masm.PushRegsInMask(liveRegs);
+
+ // Initialize all elements.
+
+ Register elements = output;
+ masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
+
+ Register argIndex = begin;
+
+ Register index = temp;
+ masm.move32(Imm32(0), index);
+
+ size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
+ BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
+
+ Label loop;
+ masm.bind(&loop);
+
+ masm.loadValue(argPtr, value);
+
+ // We don't need a pre-barrier, because the element at |index| is guaranteed
+ // to be a non-GC thing (either uninitialized memory or the magic hole
+ // value).
+ masm.storeValue(value, BaseObjectElementIndex(elements, index));
+
+ masm.add32(Imm32(1), index);
+ masm.add32(Imm32(1), argIndex);
+
+ masm.branch32(Assembler::LessThan, index, count, &loop);
+
+ masm.PopRegsInMask(liveRegs);
+
+ // Emit a post-write barrier if |output| is tenured.
+ //
+ // We expect that |output| is nursery allocated, so it isn't worth the
+ // trouble to check if no frame argument is a nursery thing, which would
+ // allow to omit the post-write barrier.
+ masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+ volatileRegs.takeUnchecked(temp);
+ if (output.volatile_()) {
+ volatileRegs.addUnchecked(output);
+ }
+
+ masm.PushRegsInMask(volatileRegs);
+ emitPostWriteBarrier(output);
+ masm.PopRegsInMask(volatileRegs);
+ }
+ masm.bind(&done);
+}
+
+CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
+ const LAllocation* allocation) {
+ if (allocation->isConstant()) {
+ return RegisterOrInt32(allocation->toConstant()->toInt32());
+ }
+ return RegisterOrInt32(ToRegister(allocation));
+}
+
+void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
+ RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
+ RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
+ Register temp = ToRegister(lir->temp());
+ Register output = ToRegister(lir->output());
+
+ uint32_t numActuals = lir->mir()->numActuals();
+
+#ifdef DEBUG
+ masm.move32(Imm32(numActuals), temp);
+
+ emitAssertArgumentsSliceBounds(begin, count, temp);
+#endif
+
+ emitNewArray(lir, count, output, temp);
+
+ // We're done if there are no actual arguments.
+ if (numActuals == 0) {
+ return;
+ }
+
+ // Check if any arguments have to be copied.
+ Label done;
+ if (count.is<Register>()) {
+ masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
+ } else if (count.as<int32_t>() == 0) {
+ return;
+ }
+
+ auto getArg = [&](uint32_t i) {
+ return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
+ lir->mir()->getArg(i)->type());
+ };
+
+ auto storeArg = [&](uint32_t i, auto dest) {
+ // We don't need a pre-barrier because the element at |index| is guaranteed
+ // to be a non-GC thing (either uninitialized memory or the magic hole
+ // value).
+ masm.storeConstantOrRegister(getArg(i), dest);
+ };
+
+ // Initialize all elements.
+ if (numActuals == 1) {
+ // There's exactly one argument. We've checked that |count| is non-zero,
+ // which implies that |begin| must be zero.
+ MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
+
+ Register elements = temp;
+ masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
+
+ storeArg(0, Address(elements, 0));
+ } else if (begin.is<Register>()) {
+ // There is more than one argument and |begin| isn't a compile-time
+ // constant. Iterate through 0..numActuals to search for |begin| and then
+ // start copying |count| arguments from that index.
+
+ LiveGeneralRegisterSet liveRegs;
+ liveRegs.add(output);
+ liveRegs.add(begin.as<Register>());
+
+ masm.PushRegsInMask(liveRegs);
+
+ Register elements = output;
+ masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
+
+ Register argIndex = begin.as<Register>();
+
+ Register index = temp;
+ masm.move32(Imm32(0), index);
+
+ Label doneLoop;
+ for (uint32_t i = 0; i < numActuals; ++i) {
+ Label next;
+ masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
+
+ storeArg(i, BaseObjectElementIndex(elements, index));
+
+ masm.add32(Imm32(1), index);
+ masm.add32(Imm32(1), argIndex);
+
+ if (count.is<Register>()) {
+ masm.branch32(Assembler::GreaterThanOrEqual, index,
+ count.as<Register>(), &doneLoop);
+ } else {
+ masm.branch32(Assembler::GreaterThanOrEqual, index,
+ Imm32(count.as<int32_t>()), &doneLoop);
+ }
+
+ masm.bind(&next);
+ }
+ masm.bind(&doneLoop);
+
+ masm.PopRegsInMask(liveRegs);
+ } else {
+ // There is more than one argument and |begin| is a compile-time constant.
+
+ Register elements = temp;
+ masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
+
+ int32_t argIndex = begin.as<int32_t>();
+
+ int32_t index = 0;
+
+ Label doneLoop;
+ for (uint32_t i = argIndex; i < numActuals; ++i) {
+ storeArg(i, Address(elements, index * sizeof(Value)));
+
+ index += 1;
+
+ if (count.is<Register>()) {
+ masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
+ Imm32(index), &doneLoop);
+ } else {
+ if (index >= count.as<int32_t>()) {
+ break;
+ }
+ }
+ }
+ masm.bind(&doneLoop);
+ }
+
+ // Determine if we have to emit post-write barrier.
+ //
+ // If either |begin| or |count| is a constant, use their value directly.
+ // Otherwise assume we copy all inline arguments from 0..numActuals.
+ bool postWriteBarrier = false;
+ uint32_t actualBegin = begin.match([](Register) { return 0; },
+ [](int32_t value) { return value; });
+ uint32_t actualCount =
+ count.match([=](Register) { return numActuals; },
+ [](int32_t value) -> uint32_t { return value; });
+ for (uint32_t i = 0; i < actualCount; ++i) {
+ ConstantOrRegister arg = getArg(actualBegin + i);
+ if (arg.constant()) {
+ Value v = arg.value();
+ if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
+ postWriteBarrier = true;
+ }
+ } else {
+ MIRType type = arg.reg().type();
+ if (type == MIRType::Value || NeedsPostBarrier(type)) {
+ postWriteBarrier = true;
+ }
+ }
+ }
+
+ // Emit a post-write barrier if |output| is tenured and we couldn't
+ // determine at compile-time that no barrier is needed.
+ if (postWriteBarrier) {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
+ volatileRegs.takeUnchecked(temp);
+ if (output.volatile_()) {
+ volatileRegs.addUnchecked(output);
+ }
+
+ masm.PushRegsInMask(volatileRegs);
+ emitPostWriteBarrier(output);
+ masm.PopRegsInMask(volatileRegs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
+ Register value = ToRegister(lir->value());
+ Register length = ToRegister(lir->length());
+ Register output = ToRegister(lir->output());
+
+ masm.move32(value, output);
+
+ Label positive;
+ masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
+
+ Label done;
+ masm.add32(length, output);
+ masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+
+ masm.bind(&positive);
+ masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
+ Label skipCall;
+
+ Register output = ToRegister(lir->output());
+ Register sep = ToRegister(lir->separator());
+ Register array = ToRegister(lir->array());
+ Register temp = ToRegister(lir->temp0());
+
+ // Fast path for simple length <= 1 cases.
+ {
+ masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
+ Address length(temp, ObjectElements::offsetOfLength());
+ Address initLength(temp, ObjectElements::offsetOfInitializedLength());
+
+ // Check for length == 0
+ Label notEmpty;
+ masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
+ const JSAtomState& names = gen->runtime->names();
+ masm.movePtr(ImmGCPtr(names.empty), output);
+ masm.jump(&skipCall);
+
+ masm.bind(&notEmpty);
+ Label notSingleString;
+ // Check for length == 1, initializedLength >= 1, arr[0].isString()
+ masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
+ masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
+
+ Address elem0(temp, 0);
+ masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
+
+ // At this point, 'output' can be used as a scratch register, since we're
+ // guaranteed to succeed.
+ masm.unboxString(elem0, output);
+ masm.jump(&skipCall);
+ masm.bind(&notSingleString);
+ }
+
+ pushArg(sep);
+ pushArg(array);
+
+ using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
+ callVM<Fn, jit::ArrayJoin>(lir);
+ masm.bind(&skipCall);
+}
+
+void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ TypedOrValueRegister val =
+ toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
+ lir->mir()->value()->type())
+ .reg();
+ Register output = ToRegister(lir->output());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
+ addIC(lir, allocateIC(ic));
+}
+
+void CodeGenerator::visitOptimizeSpreadCallCache(
+ LOptimizeSpreadCallCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
+ ValueOperand output = ToOutValue(lir);
+ Register temp = ToRegister(lir->temp0());
+
+ IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
+ addIC(lir, allocateIC(ic));
+}
+
+void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ Register iter = ToRegister(lir->iter());
+ Register temp = ToRegister(lir->temp0());
+ CompletionKind kind = CompletionKind(lir->mir()->completionKind());
+
+ IonCloseIterIC ic(liveRegs, iter, temp, kind);
+ addIC(lir, allocateIC(ic));
+}
+
+void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
+ const Register obj = ToRegister(lir->iterator());
+ const ValueOperand output = ToOutValue(lir);
+ const Register temp = ToRegister(lir->temp0());
+
+ masm.iteratorMore(obj, output, temp);
+}
+
+void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
+ ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
+ Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
+ Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
+
+ masm.branchTestMagic(Assembler::Equal, input, ifTrue);
+
+ if (!isNextBlock(lir->ifFalse()->lir())) {
+ masm.jump(ifFalse);
+ }
+}
+
+void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
+ const Register obj = ToRegister(lir->object());
+ const Register temp0 = ToRegister(lir->temp0());
+ const Register temp1 = ToRegister(lir->temp1());
+ const Register temp2 = ToRegister(lir->temp2());
+
+ masm.iteratorClose(obj, temp0, temp1, temp2);
+}
+
+void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
+ // read number of actual arguments from the JS frame.
+ Register argc = ToRegister(lir->output());
+ masm.loadNumActualArgs(FramePointer, argc);
+}
+
+void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
+ ValueOperand result = ToOutValue(lir);
+ const LAllocation* index = lir->index();
+ size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
+
+ // This instruction is used to access actual arguments and formal arguments.
+ // The number of Values on the stack is |max(numFormals, numActuals)|, so we
+ // assert |index < numFormals || index < numActuals| in debug builds.
+ DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
+
+ if (index->isConstant()) {
+ int32_t i = index->toConstant()->toInt32();
+#ifdef DEBUG
+ if (uint32_t(i) >= numFormals) {
+ Label ok;
+ Register argc = result.scratchReg();
+ masm.loadNumActualArgs(FramePointer, argc);
+ masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
+ masm.assumeUnreachable("Invalid argument index");
+ masm.bind(&ok);
+ }
+#endif
+ Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
+ masm.loadValue(argPtr, result);
+ } else {
+ Register i = ToRegister(index);
+#ifdef DEBUG
+ Label ok;
+ Register argc = result.scratchReg();
+ masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
+ masm.loadNumActualArgs(FramePointer, argc);
+ masm.branch32(Assembler::Above, argc, i, &ok);
+ masm.assumeUnreachable("Invalid argument index");
+ masm.bind(&ok);
+#endif
+ BaseValueIndex argPtr(FramePointer, i, argvOffset);
+ masm.loadValue(argPtr, result);
+ }
+}
+
+void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
+ ValueOperand result = ToOutValue(lir);
+ Register index = ToRegister(lir->index());
+ Register length = ToRegister(lir->length());
+ Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
+ size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
+
+ Label outOfBounds, done;
+ masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
+
+ BaseValueIndex argPtr(FramePointer, index, argvOffset);
+ masm.loadValue(argPtr, result);
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+ masm.moveValue(UndefinedValue(), result);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitRest(LRest* lir) {
+ Register numActuals = ToRegister(lir->numActuals());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ unsigned numFormals = lir->mir()->numFormals();
+
+ if (Shape* shape = lir->mir()->shape()) {
+ uint32_t arrayLength = 0;
+ uint32_t arrayCapacity = 2;
+ gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
+ MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
+ allocKind = ForegroundToBackgroundAllocKind(allocKind);
+ MOZ_ASSERT(GetGCKindSlots(allocKind) ==
+ arrayCapacity + ObjectElements::VALUES_PER_HEADER);
+
+ Label joinAlloc, failAlloc;
+ masm.movePtr(ImmGCPtr(shape), temp0);
+ masm.createArrayWithFixedElements(temp2, temp0, temp1, arrayLength,
+ arrayCapacity, allocKind,
+ gc::Heap::Default, &failAlloc);
+ masm.jump(&joinAlloc);
+ {
+ masm.bind(&failAlloc);
+ masm.movePtr(ImmPtr(nullptr), temp2);
+ }
+ masm.bind(&joinAlloc);
+ } else {
+ masm.movePtr(ImmPtr(nullptr), temp2);
+ }
+
+ // Set temp1 to the address of the first actual argument.
+ size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
+ masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
+
+ // Compute array length: max(numActuals - numFormals, 0).
+ Register lengthReg;
+ if (numFormals) {
+ lengthReg = temp0;
+ Label emptyLength, joinLength;
+ masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
+ &emptyLength);
+ {
+ masm.move32(numActuals, lengthReg);
+ masm.sub32(Imm32(numFormals), lengthReg);
+
+ // Skip formal arguments.
+ masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
+
+ masm.jump(&joinLength);
+ }
+ masm.bind(&emptyLength);
+ {
+ masm.move32(Imm32(0), lengthReg);
+
+ // Leave temp1 pointed to the start of actuals() when the rest-array
+ // length is zero. We don't use |actuals() + numFormals| because
+ // |numFormals| can be any non-negative int32 value when this MRest was
+ // created from scalar replacement optimizations. And it seems
+ // questionable to compute a Value* pointer which points to who knows
+ // where.
+ }
+ masm.bind(&joinLength);
+ } else {
+ // Use numActuals directly when there are no formals.
+ lengthReg = numActuals;
+ }
+
+ pushArg(temp2);
+ pushArg(temp1);
+ pushArg(lengthReg);
+
+ using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
+ callVM<Fn, InitRestParameter>(lir);
+}
+
+// Create a stackmap from the given safepoint, with the structure:
+//
+// <reg dump area, if trap>
+// | ++ <body (general spill)>
+// | ++ <space for Frame>
+// | ++ <inbound args>
+// | |
+// Lowest Addr Highest Addr
+//
+// The caller owns the resulting stackmap. This assumes a grow-down stack.
+//
+// For non-debug builds, if the stackmap would contain no pointers, no
+// stackmap is created, and nullptr is returned. For a debug build, a
+// stackmap is always created and returned.
+static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
+ const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutNumWords,
+ size_t nInboundStackArgBytes,
+ wasm::StackMap** result) {
+ // Ensure this is defined on all return paths.
+ *result = nullptr;
+
+ // The size of the wasm::Frame itself.
+ const size_t nFrameBytes = sizeof(wasm::Frame);
+
+ // This is the number of bytes in the general spill area, below the Frame.
+ const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
+
+ // This is the number of bytes in the general spill area, the Frame, and the
+ // incoming args, but not including any trap (register dump) area.
+ const size_t nNonTrapBytes = nBodyBytes + nFrameBytes + nInboundStackArgBytes;
+ MOZ_ASSERT(nNonTrapBytes % sizeof(void*) == 0);
+
+ // This is the total number of bytes covered by the map.
+ const DebugOnly<size_t> nTotalBytes =
+ nNonTrapBytes +
+ (safepoint.isWasmTrap() ? (trapExitLayoutNumWords * sizeof(void*)) : 0);
+
+ // Create the stackmap initially in this vector. Since most frames will
+ // contain 128 or fewer words, heap allocation is avoided in the majority of
+ // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
+ // highest address in the map.
+ wasm::StackMapBoolVector vec;
+
+ // Keep track of whether we've actually seen any refs.
+ bool hasRefs = false;
+
+ // REG DUMP AREA, if any.
+ const LiveGeneralRegisterSet gcRegs = safepoint.gcRegs();
+ GeneralRegisterForwardIterator gcRegsIter(gcRegs);
+ if (safepoint.isWasmTrap()) {
+ // Deal with roots in registers. This can only happen for safepoints
+ // associated with a trap. For safepoints associated with a call, we
+ // don't expect to have any live values in registers, hence no roots in
+ // registers.
+ if (!vec.appendN(false, trapExitLayoutNumWords)) {
+ return false;
+ }
+ for (; gcRegsIter.more(); ++gcRegsIter) {
+ Register reg = *gcRegsIter;
+ size_t offsetFromTop = trapExitLayout.getOffset(reg);
+
+ // If this doesn't hold, the associated register wasn't saved by
+ // the trap exit stub. Better to crash now than much later, in
+ // some obscure place, and possibly with security consequences.
+ MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
+
+ // offsetFromTop is an offset in words down from the highest
+ // address in the exit stub save area. Switch it around to be an
+ // offset up from the bottom of the (integer register) save area.
+ size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
+
+ vec[offsetFromBottom] = true;
+ hasRefs = true;
+ }
+ } else {
+ // This map is associated with a call instruction. We expect there to be
+ // no live ref-carrying registers, and if there are we're in deep trouble.
+ MOZ_RELEASE_ASSERT(!gcRegsIter.more());
+ }
+
+ // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
+ // Deal with roots on the stack.
+ size_t wordsSoFar = vec.length();
+ if (!vec.appendN(false, nNonTrapBytes / sizeof(void*))) {
+ return false;
+ }
+ const LSafepoint::SlotList& gcSlots = safepoint.gcSlots();
+ for (SafepointSlotEntry gcSlot : gcSlots) {
+ // The following needs to correspond with JitFrameLayout::slotRef
+ // gcSlot.stack == 0 means the slot is in the args area
+ if (gcSlot.stack) {
+ // It's a slot in the body allocation, so .slot is interpreted
+ // as an index downwards from the Frame*
+ MOZ_ASSERT(gcSlot.slot <= nBodyBytes);
+ uint32_t offsetInBytes = nBodyBytes - gcSlot.slot;
+ MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
+ vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
+ } else {
+ // It's an argument slot
+ MOZ_ASSERT(gcSlot.slot < nInboundStackArgBytes);
+ uint32_t offsetInBytes = nBodyBytes + nFrameBytes + gcSlot.slot;
+ MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
+ vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
+ }
+ hasRefs = true;
+ }
+
+#ifndef DEBUG
+ // We saw no references, and this is a non-debug build, so don't bother
+ // building the stackmap.
+ if (!hasRefs) {
+ return true;
+ }
+#endif
+
+ // Convert vec into a wasm::StackMap.
+ MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
+ wasm::StackMap* stackMap =
+ wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
+ if (!stackMap) {
+ return false;
+ }
+ if (safepoint.isWasmTrap()) {
+ stackMap->setExitStubWords(trapExitLayoutNumWords);
+ }
+
+ // Record in the map, how far down from the highest address the Frame* is.
+ // Take the opportunity to check that we haven't marked any part of the
+ // Frame itself as a pointer.
+ stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
+ sizeof(void*));
+#ifdef DEBUG
+ for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
+ MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
+ stackMap->header.frameOffsetFromTop + i) == 0);
+ }
+#endif
+
+ *result = stackMap;
+ return true;
+}
+
+bool CodeGenerator::generateWasm(
+ wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
+ const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
+ wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
+ AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm code");
+
+ size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
+
+ wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
+ offsets);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // Very large frames are implausible, probably an attack.
+ if (frameSize() > wasm::MaxFrameSize) {
+ return decoder->fail(decoder->beginOffset(), "stack frame is too large");
+ }
+
+ if (omitOverRecursedCheck()) {
+ masm.reserveStack(frameSize());
+ } else {
+ std::pair<CodeOffset, uint32_t> pair =
+ masm.wasmReserveStackChecked(frameSize(), trapOffset);
+ CodeOffset trapInsnOffset = pair.first;
+ size_t nBytesReservedBeforeTrap = pair.second;
+
+ wasm::StackMap* functionEntryStackMap = nullptr;
+ if (!CreateStackMapForFunctionEntryTrap(
+ argTypes, trapExitLayout, trapExitLayoutNumWords,
+ nBytesReservedBeforeTrap, nInboundStackArgBytes,
+ &functionEntryStackMap)) {
+ return false;
+ }
+
+ // In debug builds, we'll always have a stack map, even if there are no
+ // refs to track.
+ MOZ_ASSERT(functionEntryStackMap);
+
+ if (functionEntryStackMap &&
+ !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
+ functionEntryStackMap)) {
+ functionEntryStackMap->destroy();
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ if (!generateBody()) {
+ return false;
+ }
+
+ masm.bind(&returnLabel_);
+ wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
+
+ if (!generateOutOfLineCode()) {
+ return false;
+ }
+
+ masm.flush();
+ if (masm.oom()) {
+ return false;
+ }
+
+ offsets->end = masm.currentOffset();
+
+ MOZ_ASSERT(!masm.failureLabel()->used());
+ MOZ_ASSERT(snapshots_.listSize() == 0);
+ MOZ_ASSERT(snapshots_.RVATableSize() == 0);
+ MOZ_ASSERT(recovers_.size() == 0);
+ MOZ_ASSERT(graph.numConstants() == 0);
+ MOZ_ASSERT(osiIndices_.empty());
+ MOZ_ASSERT(icList_.empty());
+ MOZ_ASSERT(safepoints_.size() == 0);
+ MOZ_ASSERT(!scriptCounts_);
+
+ // Convert the safepoints to stackmaps and add them to our running
+ // collection thereof.
+ for (CodegenSafepointIndex& index : safepointIndices_) {
+ wasm::StackMap* stackMap = nullptr;
+ if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
+ trapExitLayoutNumWords,
+ nInboundStackArgBytes, &stackMap)) {
+ return false;
+ }
+
+ // In debug builds, we'll always have a stack map.
+ MOZ_ASSERT(stackMap);
+ if (!stackMap) {
+ continue;
+ }
+
+ if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
+ stackMap->destroy();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool CodeGenerator::generate() {
+ AutoCreatedBy acb(masm, "CodeGenerator::generate");
+
+ JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
+ gen->outerInfo().script()->filename(),
+ gen->outerInfo().script()->lineno(),
+ gen->outerInfo().script()->column());
+
+ // Initialize native code table with an entry to the start of
+ // top-level script.
+ InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
+ jsbytecode* startPC = tree->script()->code();
+ BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
+ if (!addNativeToBytecodeEntry(startSite)) {
+ return false;
+ }
+
+ if (!safepoints_.init(gen->alloc())) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "Prologue");
+ if (!generatePrologue()) {
+ return false;
+ }
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite)) {
+ return false;
+ }
+
+ if (!generateBody()) {
+ return false;
+ }
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite)) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "Epilogue");
+ if (!generateEpilogue()) {
+ return false;
+ }
+
+ // Reset native => bytecode map table with top-level script and startPc.
+ if (!addNativeToBytecodeEntry(startSite)) {
+ return false;
+ }
+
+ perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
+ generateInvalidateEpilogue();
+
+ // native => bytecode entries for OOL code will be added
+ // by CodeGeneratorShared::generateOutOfLineCode
+ perfSpewer_.recordOffset(masm, "OOLCode");
+ if (!generateOutOfLineCode()) {
+ return false;
+ }
+
+ // Add terminal entry.
+ if (!addNativeToBytecodeEntry(startSite)) {
+ return false;
+ }
+
+ // Dump Native to bytecode entries to spew.
+ dumpNativeToBytecodeEntries();
+
+ return !masm.oom();
+}
+
+static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
+ IonCompilationId compilationId,
+ const WarpSnapshot* snapshot,
+ bool* isValid) {
+ MOZ_ASSERT(!*isValid);
+ RecompileInfo recompileInfo(script, compilationId);
+
+ JitZone* jitZone = cx->zone()->jitZone();
+
+ for (const auto* scriptSnapshot : snapshot->scripts()) {
+ JSScript* inlinedScript = scriptSnapshot->script();
+ if (inlinedScript == script) {
+ continue;
+ }
+
+ // TODO(post-Warp): This matches FinishCompilation and is necessary to
+ // ensure in-progress compilations are canceled when an inlined functon
+ // becomes a debuggee. See the breakpoint-14.js jit-test.
+ // When TI is gone, try to clean this up by moving AddInlinedCompilations to
+ // WarpOracle so that we can handle this as part of addPendingRecompile
+ // instead of requiring this separate check.
+ if (inlinedScript->isDebuggee()) {
+ *isValid = false;
+ return true;
+ }
+
+ if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
+ return false;
+ }
+ }
+
+ *isValid = true;
+ return true;
+}
+
+bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
+ AutoCreatedBy acb(masm, "CodeGenerator::link");
+
+ // We cancel off-thread Ion compilations in a few places during GC, but if
+ // this compilation was performed off-thread it will already have been
+ // removed from the relevant lists by this point. Don't allow GC here.
+ JS::AutoAssertNoGC nogc(cx);
+
+ RootedScript script(cx, gen->outerInfo().script());
+ MOZ_ASSERT(!script->hasIonScript());
+
+ // Perform any read barriers which were skipped while compiling the
+ // script, which may have happened off-thread.
+ const JitRealm* jr = gen->realm->jitRealm();
+ jr->performStubReadBarriers(realmStubsToReadBarrier_);
+
+ if (scriptCounts_ && !script->hasScriptCounts() &&
+ !script->initScriptCounts(cx)) {
+ return false;
+ }
+
+ IonCompilationId compilationId =
+ cx->runtime()->jitRuntime()->nextCompilationId();
+ JitZone* jitZone = cx->zone()->jitZone();
+ jitZone->currentCompilationIdRef().emplace(compilationId);
+ auto resetCurrentId = mozilla::MakeScopeExit(
+ [jitZone] { jitZone->currentCompilationIdRef().reset(); });
+
+ // Record constraints. If an error occured, returns false and potentially
+ // prevent future compilations. Otherwise, if an invalidation occured, then
+ // skip the current compilation.
+ bool isValid = false;
+
+ // If an inlined script is invalidated (for example, by attaching
+ // a debugger), we must also invalidate the parent IonScript.
+ if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
+ return false;
+ }
+ if (!isValid) {
+ return true;
+ }
+
+ uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
+
+ // We encode safepoints after the OSI-point offsets have been determined.
+ if (!encodeSafepoints()) {
+ return false;
+ }
+
+ size_t numNurseryObjects = snapshot->nurseryObjects().length();
+
+ IonScript* ionScript = IonScript::New(
+ cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
+ snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
+ graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
+ osiIndices_.length(), icList_.length(), runtimeData_.length(),
+ safepoints_.size());
+ if (!ionScript) {
+ return false;
+ }
+#ifdef DEBUG
+ ionScript->setICHash(snapshot->icHash());
+#endif
+
+ auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
+ // Use js_free instead of IonScript::Destroy: the cache list is still
+ // uninitialized.
+ js_free(ionScript);
+ });
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Ion);
+ if (!code) {
+ return false;
+ }
+
+ // Encode native to bytecode map if profiling is enabled.
+ if (isProfilerInstrumentationEnabled()) {
+ // Generate native-to-bytecode main table.
+ IonEntry::ScriptList scriptList;
+ if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
+ return false;
+ }
+
+ uint8_t* ionTableAddr =
+ ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
+ JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
+
+ // Construct the IonEntry that will go into the global table.
+ auto entry = MakeJitcodeGlobalEntry<IonEntry>(
+ cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
+ if (!entry) {
+ return false;
+ }
+ (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable =
+ cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(std::move(entry))) {
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ } else {
+ // Add a dumy jitcodeGlobalTable entry.
+ auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
+ code->rawEnd());
+ if (!entry) {
+ return false;
+ }
+
+ // Add entry to the global table.
+ JitcodeGlobalTable* globalTable =
+ cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(std::move(entry))) {
+ return false;
+ }
+
+ // Mark the jitcode as having a bytecode map.
+ code->setHasBytecodeMap();
+ }
+
+ ionScript->setMethod(code);
+
+ // If the Gecko Profiler is enabled, mark IonScript as having been
+ // instrumented accordingly.
+ if (isProfilerInstrumentationEnabled()) {
+ ionScript->setHasProfilingInstrumentation();
+ }
+
+ Assembler::PatchDataWithValueCheck(
+ CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
+ ImmPtr((void*)-1));
+
+ for (CodeOffset offset : ionScriptLabels_) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
+ ImmPtr(ionScript), ImmPtr((void*)-1));
+ }
+
+ for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
+ void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
+ ImmPtr(entry), ImmPtr((void*)-1));
+ }
+
+ // for generating inline caches during the execution.
+ if (runtimeData_.length()) {
+ ionScript->copyRuntimeData(&runtimeData_[0]);
+ }
+ if (icList_.length()) {
+ ionScript->copyICEntries(&icList_[0]);
+ }
+
+ for (size_t i = 0; i < icInfo_.length(); i++) {
+ IonIC& ic = ionScript->getICFromIndex(i);
+ Assembler::PatchDataWithValueCheck(
+ CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
+ ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
+ Assembler::PatchDataWithValueCheck(
+ CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
+ ImmPtr((void*)-1));
+ }
+
+ JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
+ (void*)code->raw());
+
+ ionScript->setInvalidationEpilogueDataOffset(
+ invalidateEpilogueData_.offset());
+ if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
+ ionScript->setOsrPc(osrPc);
+ ionScript->setOsrEntryOffset(getOsrEntryOffset());
+ }
+ ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
+
+ perfSpewer_.saveProfile(cx, script, code);
+
+#ifdef MOZ_VTUNE
+ vtune::MarkScript(code, script, "ion");
+#endif
+
+ // for marking during GC.
+ if (safepointIndices_.length()) {
+ ionScript->copySafepointIndices(&safepointIndices_[0]);
+ }
+ if (safepoints_.size()) {
+ ionScript->copySafepoints(&safepoints_);
+ }
+
+ // for recovering from an Ion Frame.
+ if (osiIndices_.length()) {
+ ionScript->copyOsiIndices(&osiIndices_[0]);
+ }
+ if (snapshots_.listSize()) {
+ ionScript->copySnapshots(&snapshots_);
+ }
+ MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
+ if (recovers_.size()) {
+ ionScript->copyRecovers(&recovers_);
+ }
+ if (graph.numConstants()) {
+ const Value* vp = graph.constantPool();
+ ionScript->copyConstants(vp);
+ for (size_t i = 0; i < graph.numConstants(); i++) {
+ const Value& v = vp[i];
+ if (v.isGCThing()) {
+ if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
+ sb->putWholeCell(script);
+ break;
+ }
+ }
+ }
+ }
+
+ // Attach any generated script counts to the script.
+ if (IonScriptCounts* counts = extractScriptCounts()) {
+ script->addIonCounts(counts);
+ }
+
+ // WARNING: Code after this point must be infallible!
+
+ // Copy the list of nursery objects. Note that the store buffer can add
+ // HeapPtr edges that must be cleared in IonScript::Destroy. See the
+ // infallibility warning above.
+ const auto& nurseryObjects = snapshot->nurseryObjects();
+ for (size_t i = 0; i < nurseryObjects.length(); i++) {
+ ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
+ }
+
+ // Transfer ownership of the IonScript to the JitScript. At this point enough
+ // of the IonScript must be initialized for IonScript::Destroy to work.
+ freeIonScript.release();
+ script->jitScript()->setIonScript(script, ionScript);
+
+ return true;
+}
+
+// An out-of-line path to convert a boxed int32 to either a float or double.
+class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
+ LUnboxFloatingPoint* unboxFloatingPoint_;
+
+ public:
+ explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
+ : unboxFloatingPoint_(unboxFloatingPoint) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineUnboxFloatingPoint(this);
+ }
+
+ LUnboxFloatingPoint* unboxFloatingPoint() const {
+ return unboxFloatingPoint_;
+ }
+};
+
+void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
+ const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
+ const LDefinition* result = lir->output();
+
+ // Out-of-line path to convert int32 to double or bailout
+ // if this instruction is fallible.
+ OutOfLineUnboxFloatingPoint* ool =
+ new (alloc()) OutOfLineUnboxFloatingPoint(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ FloatRegister resultReg = ToFloatRegister(result);
+ masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
+ masm.unboxDouble(box, resultReg);
+ if (lir->type() == MIRType::Float32) {
+ masm.convertDoubleToFloat32(resultReg, resultReg);
+ }
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
+ OutOfLineUnboxFloatingPoint* ool) {
+ LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
+ const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
+
+ if (ins->mir()->fallible()) {
+ Label bail;
+ masm.branchTestInt32(Assembler::NotEqual, value, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ }
+ masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
+ ins->type());
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
+ pushArg(ToRegister(lir->environmentChain()));
+
+ using Fn = JSObject* (*)(JSContext*, JSObject*);
+ callVM<Fn, BindVarOperation>(lir);
+}
+
+void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
+ Register obj = ToRegister(lir->getOperand(0));
+ ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
+ ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
+
+ Register temp0 = ToRegister(lir->temp0());
+ // See comment in LIROps.yaml (x86 is short on registers)
+#ifndef JS_CODEGEN_X86
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+#endif
+
+ Label cacheHit, done;
+ if (JitOptions.enableWatchtowerMegamorphic) {
+#ifdef JS_CODEGEN_X86
+ masm.emitMegamorphicCachedSetSlot(
+ idVal, obj, temp0, value, &cacheHit,
+ [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
+ EmitPreBarrier(masm, addr, mirType);
+ });
+#else
+ masm.emitMegamorphicCachedSetSlot(
+ idVal, obj, temp0, temp1, temp2, value, &cacheHit,
+ [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
+ EmitPreBarrier(masm, addr, mirType);
+ });
+#endif
+ }
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
+ pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
+ pushArg(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
+
+ masm.jump(&done);
+ masm.bind(&cacheHit);
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
+
+ saveVolatile(temp0);
+ emitPostWriteBarrier(obj);
+ restoreVolatile(temp0);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+ ValueOperand result = ToOutValue(ins);
+
+ masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
+}
+
+void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+ AnyRegister result = ToAnyRegister(ins->getDef(0));
+ MIRType type = ins->mir()->type();
+
+ masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
+ type, result);
+}
+
+template <typename T>
+static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
+ bool fallible, AnyRegister dest, Label* fail) {
+ if (type == MIRType::Double) {
+ MOZ_ASSERT(dest.isFloat());
+ masm.ensureDouble(src, dest.fpu(), fail);
+ return;
+ }
+ if (fallible) {
+ switch (type) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(src, dest.gpr(), fail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(src, dest.gpr(), fail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(src, dest.gpr(), fail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
+ break;
+ default:
+ MOZ_CRASH("Unexpected MIRType");
+ }
+ return;
+ }
+ masm.loadUnboxedValue(src, type, dest);
+}
+
+void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
+ const MLoadFixedSlotAndUnbox* mir = ins->mir();
+ MIRType type = mir->type();
+ Register input = ToRegister(ins->object());
+ AnyRegister result = ToAnyRegister(ins->output());
+ size_t slot = mir->slot();
+
+ Address address(input, NativeObject::getFixedSlotOffset(slot));
+
+ Label bail;
+ EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
+ if (mir->fallible()) {
+ bailoutFrom(&bail, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitLoadDynamicSlotAndUnbox(
+ LLoadDynamicSlotAndUnbox* ins) {
+ const MLoadDynamicSlotAndUnbox* mir = ins->mir();
+ MIRType type = mir->type();
+ Register input = ToRegister(ins->slots());
+ AnyRegister result = ToAnyRegister(ins->output());
+ size_t slot = mir->slot();
+
+ Address address(input, slot * sizeof(JS::Value));
+
+ Label bail;
+ EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
+ if (mir->fallible()) {
+ bailoutFrom(&bail, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
+ const MLoadElementAndUnbox* mir = ins->mir();
+ MIRType type = mir->type();
+ Register elements = ToRegister(ins->elements());
+ AnyRegister result = ToAnyRegister(ins->output());
+
+ Label bail;
+ if (ins->index()->isConstant()) {
+ NativeObject::elementsSizeMustNotOverflow();
+ int32_t offset = ToInt32(ins->index()) * sizeof(Value);
+ Address address(elements, offset);
+ EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
+ } else {
+ BaseObjectElementIndex address(elements, ToRegister(ins->index()));
+ EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
+ }
+
+ if (mir->fallible()) {
+ bailoutFrom(&bail, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
+ const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
+
+ Shape* shape = ins->mir()->shape();
+ masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
+ EmitPreBarrier(masm, addr, MIRType::Shape);
+ });
+
+ // Perform the store. No pre-barrier required since this is a new
+ // initialization.
+
+ uint32_t offset = ins->mir()->slotOffset();
+ if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
+ Address slot(obj, offset);
+ masm.storeValue(value, slot);
+ } else {
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
+ Address slot(maybeTemp, offset);
+ masm.storeValue(value, slot);
+ }
+}
+
+void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
+ const Register temp0 = ToRegister(ins->temp0());
+ const Register temp1 = ToRegister(ins->temp1());
+
+ masm.Push(obj);
+ masm.Push(value);
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp0);
+ masm.passABIArg(temp0);
+ masm.passABIArg(obj);
+ masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
+ masm.passABIArg(temp1);
+ masm.callWithABI<Fn, NativeObject::growSlotsPure>();
+ masm.storeCallPointerResult(temp0);
+
+ masm.Pop(value);
+ masm.Pop(obj);
+
+ bailoutIfFalseBool(temp0, ins->snapshot());
+
+ masm.storeObjShape(ins->mir()->shape(), obj,
+ [](MacroAssembler& masm, const Address& addr) {
+ EmitPreBarrier(masm, addr, MIRType::Shape);
+ });
+
+ // Perform the store. No pre-barrier required since this is a new
+ // initialization.
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
+ Address slot(temp0, ins->mir()->slotOffset());
+ masm.storeValue(value, slot);
+}
+
+void CodeGenerator::visitAddSlotAndCallAddPropHook(
+ LAddSlotAndCallAddPropHook* ins) {
+ const Register obj = ToRegister(ins->object());
+ const ValueOperand value =
+ ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
+
+ pushArg(ImmGCPtr(ins->mir()->shape()));
+ pushArg(value);
+ pushArg(obj);
+
+ using Fn =
+ bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
+ callVM<Fn, AddSlotAndCallAddPropHook>(ins);
+}
+
+void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+
+ const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
+
+ Address address(obj, NativeObject::getFixedSlotOffset(slot));
+ if (ins->mir()->needsBarrier()) {
+ emitPreBarrier(address);
+ }
+
+ masm.storeValue(value, address);
+}
+
+void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
+ const Register obj = ToRegister(ins->getOperand(0));
+ size_t slot = ins->mir()->slot();
+
+ const LAllocation* value = ins->value();
+ MIRType valueType = ins->mir()->value()->type();
+
+ Address address(obj, NativeObject::getFixedSlotOffset(slot));
+ if (ins->mir()->needsBarrier()) {
+ emitPreBarrier(address);
+ }
+
+ ConstantOrRegister nvalue =
+ value->isConstant()
+ ? ConstantOrRegister(value->toConstant()->toJSValue())
+ : TypedOrValueRegister(valueType, ToAnyRegister(value));
+ masm.storeConstantOrRegister(nvalue, address);
+}
+
+void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register envChain = ToRegister(ins->envObj());
+ ValueOperand output = ToOutValue(ins);
+ Register temp = ToRegister(ins->temp0());
+
+ IonGetNameIC ic(liveRegs, envChain, output, temp);
+ addIC(ins, allocateIC(ic));
+}
+
+void CodeGenerator::addGetPropertyCache(LInstruction* ins,
+ LiveRegisterSet liveRegs,
+ TypedOrValueRegister value,
+ const ConstantOrRegister& id,
+ ValueOperand output) {
+ CacheKind kind = CacheKind::GetElem;
+ if (id.constant() && id.value().isString()) {
+ JSString* idString = id.value().toString();
+ if (idString->isAtom() && !idString->asAtom().isIndex()) {
+ kind = CacheKind::GetProp;
+ }
+ }
+ IonGetPropertyIC cache(kind, liveRegs, value, id, output);
+ addIC(ins, allocateIC(cache));
+}
+
+void CodeGenerator::addSetPropertyCache(LInstruction* ins,
+ LiveRegisterSet liveRegs,
+ Register objReg, Register temp,
+ const ConstantOrRegister& id,
+ const ConstantOrRegister& value,
+ bool strict) {
+ CacheKind kind = CacheKind::SetElem;
+ if (id.constant() && id.value().isString()) {
+ JSString* idString = id.value().toString();
+ if (idString->isAtom() && !idString->asAtom().isIndex()) {
+ kind = CacheKind::SetProp;
+ }
+ }
+ IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
+ addIC(ins, allocateIC(cache));
+}
+
+ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
+ size_t n, MIRType type) {
+ if (type == MIRType::Value) {
+ return TypedOrValueRegister(ToValue(lir, n));
+ }
+
+ const LAllocation* value = lir->getOperand(n);
+ if (value->isConstant()) {
+ return ConstantOrRegister(value->toConstant()->toJSValue());
+ }
+
+ return TypedOrValueRegister(type, ToAnyRegister(value));
+}
+
+void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ TypedOrValueRegister value =
+ toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
+ ins->mir()->value()->type())
+ .reg();
+ ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
+ ins->mir()->idval()->type());
+ ValueOperand output = ToOutValue(ins);
+ addGetPropertyCache(ins, liveRegs, value, id, output);
+}
+
+void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register obj = ToRegister(ins->obj());
+ TypedOrValueRegister receiver =
+ toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
+ ins->mir()->receiver()->type())
+ .reg();
+ ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
+ ins->mir()->idval()->type());
+ ValueOperand output = ToOutValue(ins);
+
+ CacheKind kind = CacheKind::GetElemSuper;
+ if (id.constant() && id.value().isString()) {
+ JSString* idString = id.value().toString();
+ if (idString->isAtom() && !idString->asAtom().isIndex()) {
+ kind = CacheKind::GetPropSuper;
+ }
+ }
+
+ IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
+ addIC(ins, allocateIC(cache));
+}
+
+void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register envChain = ToRegister(ins->environmentChain());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+
+ IonBindNameIC ic(liveRegs, envChain, output, temp);
+ addIC(ins, allocateIC(ic));
+}
+
+void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ TypedOrValueRegister value =
+ toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
+ ins->mir()->value()->type())
+ .reg();
+ TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
+ ins->mir()->idval()->type())
+ .reg();
+ Register output = ToRegister(ins->output());
+
+ IonHasOwnIC cache(liveRegs, value, id, output);
+ addIC(ins, allocateIC(cache));
+}
+
+void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ TypedOrValueRegister value =
+ toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
+ ins->mir()->value()->type())
+ .reg();
+ TypedOrValueRegister id =
+ toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
+ ins->mir()->idval()->type())
+ .reg();
+ Register output = ToRegister(ins->output());
+
+ IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
+ addIC(ins, allocateIC(cache));
+}
+
+void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
+ pushArg(ImmGCPtr(ins->mir()->name()));
+
+ using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
+ callVM<Fn, NewPrivateName>(ins);
+}
+
+void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
+ pushArg(ImmGCPtr(lir->mir()->name()));
+ pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
+
+ using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
+ if (lir->mir()->strict()) {
+ callVM<Fn, DelPropOperation<true>>(lir);
+ } else {
+ callVM<Fn, DelPropOperation<false>>(lir);
+ }
+}
+
+void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
+ pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
+ pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
+ if (lir->mir()->strict()) {
+ callVM<Fn, DelElemOperation<true>>(lir);
+ } else {
+ callVM<Fn, DelElemOperation<false>>(lir);
+ }
+}
+
+void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
+ Register obj = ToRegister(lir->object());
+ Register iterObj = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp1());
+ Register temp3 = ToRegister(lir->temp2());
+
+ using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
+ OutOfLineCode* ool = (lir->mir()->wantsIndices())
+ ? oolCallVM<Fn, GetIteratorWithIndices>(
+ lir, ArgList(obj), StoreRegisterTo(iterObj))
+ : oolCallVM<Fn, GetIterator>(
+ lir, ArgList(obj), StoreRegisterTo(iterObj));
+
+ masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
+ ool->entry());
+
+ Register nativeIter = temp;
+ masm.loadPrivate(
+ Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
+ nativeIter);
+
+ if (lir->mir()->wantsIndices()) {
+ // At least one consumer of the output of this iterator has been optimized
+ // to use iterator indices. If the cached iterator doesn't include indices,
+ // but it was marked to indicate that we can create them if needed, then we
+ // do a VM call to replace the cached iterator with a fresh iterator
+ // including indices.
+ masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
+ NativeIteratorIndices::AvailableOnRequest,
+ ool->entry());
+ }
+
+ Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
+ masm.storePtr(
+ obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
+ masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
+
+ Register enumeratorsAddr = temp2;
+ masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
+ masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
+
+ // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
+ // We already know that |iterObj| is tenured, so we only have to check |obj|.
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
+ {
+ LiveRegisterSet save = liveVolatileRegs(lir);
+ save.takeUnchecked(temp);
+ save.takeUnchecked(temp2);
+ save.takeUnchecked(temp3);
+ if (iterObj.volatile_()) {
+ save.addUnchecked(iterObj);
+ }
+
+ masm.PushRegsInMask(save);
+ emitPostWriteBarrier(iterObj);
+ masm.PopRegsInMask(save);
+ }
+ masm.bind(&skipBarrier);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
+ pushArg(ToValue(lir, LValueToIterator::ValueIndex));
+
+ using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
+ callVM<Fn, ValueToIterator>(lir);
+}
+
+void CodeGenerator::visitIteratorHasIndicesAndBranch(
+ LIteratorHasIndicesAndBranch* lir) {
+ Register iterator = ToRegister(lir->iterator());
+ Register object = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp());
+ Register temp2 = ToRegister(lir->temp2());
+ Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
+ Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
+
+ // Check that the iterator has indices available.
+ Address nativeIterAddr(iterator,
+ PropertyIteratorObject::offsetOfIteratorSlot());
+ masm.loadPrivate(nativeIterAddr, temp);
+ masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
+ NativeIteratorIndices::Valid, ifFalse);
+
+ // Guard that the first shape stored in the iterator matches the current
+ // shape of the iterated object.
+ Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
+ masm.loadPtr(firstShapeAddr, temp);
+ masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
+ ifFalse);
+
+ if (!isNextBlock(lir->ifTrue()->lir())) {
+ masm.jump(ifTrue);
+ }
+}
+
+void CodeGenerator::visitLoadSlotByIteratorIndex(
+ LLoadSlotByIteratorIndex* lir) {
+ Register object = ToRegister(lir->object());
+ Register iterator = ToRegister(lir->iterator());
+ Register temp = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp1());
+ ValueOperand result = ToOutValue(lir);
+
+ masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
+
+ Label notDynamicSlot, notFixedSlot, done;
+ masm.branch32(Assembler::NotEqual, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
+ &notDynamicSlot);
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+ masm.loadValue(BaseValueIndex(temp2, temp), result);
+ masm.jump(&done);
+
+ masm.bind(&notDynamicSlot);
+ masm.branch32(Assembler::NotEqual, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
+ // Fixed slot
+ masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
+ masm.jump(&done);
+ masm.bind(&notFixedSlot);
+
+#ifdef DEBUG
+ Label kindOkay;
+ masm.branch32(Assembler::Equal, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
+ masm.assumeUnreachable("Invalid PropertyIndex::Kind");
+ masm.bind(&kindOkay);
+#endif
+
+ // Dense element
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
+ Label indexOkay;
+ Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
+ masm.assumeUnreachable("Dense element out of bounds");
+ masm.bind(&indexOkay);
+
+ masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitStoreSlotByIteratorIndex(
+ LStoreSlotByIteratorIndex* lir) {
+ Register object = ToRegister(lir->object());
+ Register iterator = ToRegister(lir->iterator());
+ ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
+ Register temp = ToRegister(lir->temp0());
+ Register temp2 = ToRegister(lir->temp1());
+
+ masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
+
+ Label notDynamicSlot, notFixedSlot, done, doStore;
+ masm.branch32(Assembler::NotEqual, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
+ &notDynamicSlot);
+ masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
+ masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
+ masm.jump(&doStore);
+
+ masm.bind(&notDynamicSlot);
+ masm.branch32(Assembler::NotEqual, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
+ // Fixed slot
+ masm.computeEffectiveAddress(
+ BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
+ masm.jump(&doStore);
+ masm.bind(&notFixedSlot);
+
+#ifdef DEBUG
+ Label kindOkay;
+ masm.branch32(Assembler::Equal, temp2,
+ Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
+ masm.assumeUnreachable("Invalid PropertyIndex::Kind");
+ masm.bind(&kindOkay);
+#endif
+
+ // Dense element
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
+ Label indexOkay;
+ Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
+ masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
+ masm.assumeUnreachable("Dense element out of bounds");
+ masm.bind(&indexOkay);
+
+ BaseObjectElementIndex elementAddress(temp2, temp);
+ masm.computeEffectiveAddress(elementAddress, temp);
+
+ masm.bind(&doStore);
+ Address storeAddress(temp, 0);
+ emitPreBarrier(storeAddress);
+ masm.storeValue(value, storeAddress);
+
+ masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
+ masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
+
+ saveVolatile(temp2);
+ emitPostWriteBarrier(object);
+ restoreVolatile(temp2);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ Register objReg = ToRegister(ins->object());
+ Register temp = ToRegister(ins->temp0());
+
+ ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
+ ins->mir()->idval()->type());
+ ConstantOrRegister value = toConstantOrRegister(
+ ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
+
+ addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
+ ins->mir()->strict());
+}
+
+void CodeGenerator::visitThrow(LThrow* lir) {
+ pushArg(ToValue(lir, LThrow::ValueIndex));
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ callVM<Fn, js::ThrowOperation>(lir);
+}
+
+class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
+ LTypeOfV* ins_;
+
+ public:
+ explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineTypeOfV(this);
+ }
+ LTypeOfV* ins() const { return ins_; }
+};
+
+void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
+ switch (type) {
+ case JSVAL_TYPE_OBJECT:
+ masm.move32(Imm32(JSTYPE_OBJECT), output);
+ break;
+ case JSVAL_TYPE_DOUBLE:
+ case JSVAL_TYPE_INT32:
+ masm.move32(Imm32(JSTYPE_NUMBER), output);
+ break;
+ case JSVAL_TYPE_BOOLEAN:
+ masm.move32(Imm32(JSTYPE_BOOLEAN), output);
+ break;
+ case JSVAL_TYPE_UNDEFINED:
+ masm.move32(Imm32(JSTYPE_UNDEFINED), output);
+ break;
+ case JSVAL_TYPE_NULL:
+ masm.move32(Imm32(JSTYPE_OBJECT), output);
+ break;
+ case JSVAL_TYPE_STRING:
+ masm.move32(Imm32(JSTYPE_STRING), output);
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ masm.move32(Imm32(JSTYPE_SYMBOL), output);
+ break;
+ case JSVAL_TYPE_BIGINT:
+ masm.move32(Imm32(JSTYPE_BIGINT), output);
+ break;
+ default:
+ MOZ_CRASH("Unsupported JSValueType");
+ }
+}
+
+void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
+ Register output, Label* done,
+ Label* oolObject) {
+ Label notMatch;
+ switch (type) {
+ case JSVAL_TYPE_OBJECT:
+ // The input may be a callable object (result is "function") or
+ // may emulate undefined (result is "undefined"). Use an OOL path.
+ masm.branchTestObject(Assembler::Equal, tag, oolObject);
+ return;
+ case JSVAL_TYPE_DOUBLE:
+ case JSVAL_TYPE_INT32:
+ masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
+ break;
+ default:
+ masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
+ break;
+ }
+
+ emitTypeOfJSType(type, output);
+ masm.jump(done);
+ masm.bind(&notMatch);
+}
+
+void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
+ const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
+ Register output = ToRegister(lir->output());
+ Register tag = masm.extractTag(value, output);
+
+ Label done;
+
+ auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ const std::initializer_list<JSValueType> defaultOrder = {
+ JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
+ JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
+ JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
+
+ mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
+
+ // Generate checks for previously observed types first.
+ // The TypeDataList is sorted by descending frequency.
+ for (auto& observed : lir->mir()->observedTypes()) {
+ JSValueType type = observed.type();
+
+ // Unify number types.
+ if (type == JSVAL_TYPE_INT32) {
+ type = JSVAL_TYPE_DOUBLE;
+ }
+
+ remaining -= type;
+
+ emitTypeOfCheck(type, tag, output, &done, ool->entry());
+ }
+
+ // Generate checks for remaining types.
+ for (auto type : defaultOrder) {
+ if (!remaining.contains(type)) {
+ continue;
+ }
+ remaining -= type;
+
+ if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
+ // We can skip the check for the last remaining type, unless the type is
+ // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
+#ifdef DEBUG
+ emitTypeOfCheck(type, tag, output, &done, ool->entry());
+ masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
+#else
+ emitTypeOfJSType(type, output);
+#endif
+ } else {
+ emitTypeOfCheck(type, tag, output, &done, ool->entry());
+ }
+ }
+ MOZ_ASSERT(remaining.isEmpty());
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::emitTypeOfObject(Register obj, Register output,
+ Label* done) {
+ Label slowCheck, isObject, isCallable, isUndefined;
+ masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
+ &isUndefined);
+
+ masm.bind(&isCallable);
+ masm.move32(Imm32(JSTYPE_FUNCTION), output);
+ masm.jump(done);
+
+ masm.bind(&isUndefined);
+ masm.move32(Imm32(JSTYPE_UNDEFINED), output);
+ masm.jump(done);
+
+ masm.bind(&isObject);
+ masm.move32(Imm32(JSTYPE_OBJECT), output);
+ masm.jump(done);
+
+ masm.bind(&slowCheck);
+
+ saveVolatile(output);
+ using Fn = JSType (*)(JSObject*);
+ masm.setupAlignedABICall();
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::TypeOfObject>();
+ masm.storeCallInt32Result(output);
+ restoreVolatile(output);
+}
+
+void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
+ LTypeOfV* ins = ool->ins();
+
+ ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
+ Register temp = ToTempUnboxRegister(ins->temp0());
+ Register output = ToRegister(ins->output());
+
+ Register obj = masm.extractObject(input, temp);
+ emitTypeOfObject(obj, output, ool->rejoin());
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
+ Register obj = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+ emitTypeOfObject(obj, output, &done);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
+ masm.assumeUnreachable("bad JSType");
+ masm.bind(&ok);
+#endif
+
+ static_assert(JSTYPE_UNDEFINED == 0);
+
+ masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
+ masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
+}
+
+class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
+ LTypeOfIsNonPrimitiveV* ins_;
+
+ public:
+ explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
+ : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
+ }
+ auto* ins() const { return ins_; }
+};
+
+class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
+ LTypeOfIsNonPrimitiveO* ins_;
+
+ public:
+ explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
+ : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
+ }
+ auto* ins() const { return ins_; }
+};
+
+void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
+ Register output) {
+ saveVolatile(output);
+ using Fn = JSType (*)(JSObject*);
+ masm.setupAlignedABICall();
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::TypeOfObject>();
+ masm.storeCallInt32Result(output);
+ restoreVolatile(output);
+
+ auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
+ masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
+}
+
+void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
+ OutOfLineTypeOfIsNonPrimitiveV* ool) {
+ auto* ins = ool->ins();
+ ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
+ Register output = ToRegister(ins->output());
+ Register temp = ToTempUnboxRegister(ins->temp0());
+
+ Register obj = masm.extractObject(input, temp);
+
+ emitTypeOfIsObjectOOL(ins->mir(), obj, output);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
+ OutOfLineTypeOfIsNonPrimitiveO* ool) {
+ auto* ins = ool->ins();
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ emitTypeOfIsObjectOOL(ins->mir(), input, output);
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
+ Register output, Label* success,
+ Label* fail, Label* slowCheck) {
+ Label* isObject = fail;
+ Label* isFunction = fail;
+ Label* isUndefined = fail;
+
+ switch (mir->jstype()) {
+ case JSTYPE_UNDEFINED:
+ isUndefined = success;
+ break;
+
+ case JSTYPE_OBJECT:
+ isObject = success;
+ break;
+
+ case JSTYPE_FUNCTION:
+ isFunction = success;
+ break;
+
+ case JSTYPE_STRING:
+ case JSTYPE_NUMBER:
+ case JSTYPE_BOOLEAN:
+ case JSTYPE_SYMBOL:
+ case JSTYPE_BIGINT:
+#ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+#endif
+ case JSTYPE_LIMIT:
+ MOZ_CRASH("Primitive type");
+ }
+
+ masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
+
+ auto op = mir->jsop();
+
+ Label done;
+ masm.bind(fail);
+ masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
+ masm.jump(&done);
+ masm.bind(success);
+ masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
+ ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
+ Register output = ToRegister(lir->output());
+ Register temp = ToTempUnboxRegister(lir->temp0());
+
+ auto* mir = lir->mir();
+
+ auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
+ addOutOfLineCode(ool, mir);
+
+ Label success, fail;
+
+ switch (mir->jstype()) {
+ case JSTYPE_UNDEFINED: {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ masm.branchTestUndefined(Assembler::Equal, tag, &success);
+ masm.branchTestObject(Assembler::NotEqual, tag, &fail);
+ break;
+ }
+
+ case JSTYPE_OBJECT: {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+
+ masm.branchTestNull(Assembler::Equal, tag, &success);
+ masm.branchTestObject(Assembler::NotEqual, tag, &fail);
+ break;
+ }
+
+ case JSTYPE_FUNCTION: {
+ masm.branchTestObject(Assembler::NotEqual, input, &fail);
+ break;
+ }
+
+ case JSTYPE_STRING:
+ case JSTYPE_NUMBER:
+ case JSTYPE_BOOLEAN:
+ case JSTYPE_SYMBOL:
+ case JSTYPE_BIGINT:
+#ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+#endif
+ case JSTYPE_LIMIT:
+ MOZ_CRASH("Primitive type");
+ }
+
+ Register obj = masm.extractObject(input, temp);
+
+ emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ auto* mir = lir->mir();
+
+ auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
+ addOutOfLineCode(ool, mir);
+
+ Label success, fail;
+ emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
+ ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
+ Register output = ToRegister(lir->output());
+
+ auto* mir = lir->mir();
+ auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
+
+ switch (mir->jstype()) {
+ case JSTYPE_STRING:
+ masm.testStringSet(cond, input, output);
+ break;
+ case JSTYPE_NUMBER:
+ masm.testNumberSet(cond, input, output);
+ break;
+ case JSTYPE_BOOLEAN:
+ masm.testBooleanSet(cond, input, output);
+ break;
+ case JSTYPE_SYMBOL:
+ masm.testSymbolSet(cond, input, output);
+ break;
+ case JSTYPE_BIGINT:
+ masm.testBigIntSet(cond, input, output);
+ break;
+
+ case JSTYPE_UNDEFINED:
+ case JSTYPE_OBJECT:
+ case JSTYPE_FUNCTION:
+#ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+#endif
+ case JSTYPE_LIMIT:
+ MOZ_CRASH("Non-primitive type");
+ }
+}
+
+void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
+ pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
+ pushArg(ToRegister(lir->iterator()));
+
+ using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
+ callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
+}
+
+void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
+ LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
+ ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
+ ValueOperand output = ToOutValue(lir);
+
+ IonToPropertyKeyIC ic(liveRegs, input, output);
+ addIC(lir, allocateIC(ic));
+}
+
+void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
+ Register elements = ToRegister(load->elements());
+ const ValueOperand out = ToOutValue(load);
+
+ if (load->index()->isConstant()) {
+ NativeObject::elementsSizeMustNotOverflow();
+ int32_t offset = ToInt32(load->index()) * sizeof(Value);
+ masm.loadValue(Address(elements, offset), out);
+ } else {
+ masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
+ out);
+ }
+
+ Label testMagic;
+ masm.branchTestMagic(Assembler::Equal, out, &testMagic);
+ bailoutFrom(&testMagic, load->snapshot());
+}
+
+void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register initLength = ToRegister(lir->initLength());
+ const ValueOperand out = ToOutValue(lir);
+
+ const MLoadElementHole* mir = lir->mir();
+
+ // If the index is out of bounds, load |undefined|. Otherwise, load the
+ // value.
+ Label outOfBounds, done;
+ masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
+
+ masm.loadValue(BaseObjectElementIndex(elements, index), out);
+
+ // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
+ masm.branchTestMagic(Assembler::NotEqual, out, &done);
+
+ if (mir->needsNegativeIntCheck()) {
+ Label loadUndefined;
+ masm.jump(&loadUndefined);
+
+ masm.bind(&outOfBounds);
+
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+
+ masm.bind(&loadUndefined);
+ } else {
+ masm.bind(&outOfBounds);
+ }
+ masm.moveValue(UndefinedValue(), out);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToTempRegisterOrInvalid(lir->temp0());
+ AnyRegister out = ToAnyRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ Label fail;
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.loadFromTypedArray(storageType, source, out, temp, &fail);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.loadFromTypedArray(storageType, source, out, temp, &fail);
+ }
+
+ if (fail.used()) {
+ bailoutFrom(&fail, lir->snapshot());
+ }
+}
+
+void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* littleEndian = lir->littleEndian();
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
+ AnyRegister out = ToAnyRegister(lir->output());
+
+ const MLoadDataViewElement* mir = lir->mir();
+ Scalar::Type storageType = mir->storageType();
+
+ BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
+
+ bool noSwap = littleEndian->isConstant() &&
+ ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
+
+ // Directly load if no byte swap is needed and the platform supports unaligned
+ // accesses for the access. (Such support is assumed for integer types.)
+ if (noSwap && (!Scalar::isFloatingType(storageType) ||
+ MacroAssembler::SupportsFastUnalignedFPAccesses())) {
+ if (!Scalar::isBigIntType(storageType)) {
+ Label fail;
+ masm.loadFromTypedArray(storageType, source, out, temp, &fail);
+
+ if (fail.used()) {
+ bailoutFrom(&fail, lir->snapshot());
+ }
+ } else {
+ masm.load64(source, temp64);
+
+ emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
+ }
+ return;
+ }
+
+ // Load the value into a gpr register.
+ switch (storageType) {
+ case Scalar::Int16:
+ masm.load16UnalignedSignExtend(source, out.gpr());
+ break;
+ case Scalar::Uint16:
+ masm.load16UnalignedZeroExtend(source, out.gpr());
+ break;
+ case Scalar::Int32:
+ masm.load32Unaligned(source, out.gpr());
+ break;
+ case Scalar::Uint32:
+ masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
+ break;
+ case Scalar::Float32:
+ masm.load32Unaligned(source, temp);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.load64Unaligned(source, temp64);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ if (!noSwap) {
+ // Swap the bytes in the loaded value.
+ Label skip;
+ if (!littleEndian->isConstant()) {
+ masm.branch32(
+ MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
+ ToRegister(littleEndian), Imm32(0), &skip);
+ }
+
+ switch (storageType) {
+ case Scalar::Int16:
+ masm.byteSwap16SignExtend(out.gpr());
+ break;
+ case Scalar::Uint16:
+ masm.byteSwap16ZeroExtend(out.gpr());
+ break;
+ case Scalar::Int32:
+ masm.byteSwap32(out.gpr());
+ break;
+ case Scalar::Uint32:
+ masm.byteSwap32(out.isFloat() ? temp : out.gpr());
+ break;
+ case Scalar::Float32:
+ masm.byteSwap32(temp);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.byteSwap64(temp64);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ if (skip.used()) {
+ masm.bind(&skip);
+ }
+ }
+
+ // Move the value into the output register.
+ switch (storageType) {
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ break;
+ case Scalar::Uint32:
+ if (out.isFloat()) {
+ masm.convertUInt32ToDouble(temp, out.fpu());
+ } else {
+ // Bail out if the value doesn't fit into a signed int32 value. This
+ // is what allows MLoadDataViewElement to have a type() of
+ // MIRType::Int32 for UInt32 array loads.
+ bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
+ }
+ break;
+ case Scalar::Float32:
+ masm.moveGPRToFloat32(temp, out.fpu());
+ masm.canonicalizeFloat(out.fpu());
+ break;
+ case Scalar::Float64:
+ masm.moveGPR64ToDouble(temp64, out.fpu());
+ masm.canonicalizeDouble(out.fpu());
+ break;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+void CodeGenerator::visitLoadTypedArrayElementHole(
+ LLoadTypedArrayElementHole* lir) {
+ Register object = ToRegister(lir->object());
+ const ValueOperand out = ToOutValue(lir);
+
+ // Load the length.
+ Register scratch = out.scratchReg();
+ Register scratch2 = ToRegister(lir->temp0());
+ Register index = ToRegister(lir->index());
+ masm.loadArrayBufferViewLengthIntPtr(object, scratch);
+
+ // Load undefined if index >= length.
+ Label outOfBounds, done;
+ masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
+
+ // Load the elements vector.
+ masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ Label fail;
+ BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ MacroAssembler::Uint32Mode uint32Mode =
+ lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
+ : MacroAssembler::Uint32Mode::FailOnDouble;
+ masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
+ &fail);
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ masm.moveValue(UndefinedValue(), out);
+
+ if (fail.used()) {
+ bailoutFrom(&fail, lir->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
+ LLoadTypedArrayElementHoleBigInt* lir) {
+ Register object = ToRegister(lir->object());
+ const ValueOperand out = ToOutValue(lir);
+
+ // On x86 there are not enough registers. In that case reuse the output's
+ // type register as temporary.
+#ifdef JS_CODEGEN_X86
+ MOZ_ASSERT(lir->temp()->isBogusTemp());
+ Register temp = out.typeReg();
+#else
+ Register temp = ToRegister(lir->temp());
+#endif
+ Register64 temp64 = ToRegister64(lir->temp64());
+
+ // Load the length.
+ Register scratch = out.scratchReg();
+ Register index = ToRegister(lir->index());
+ masm.loadArrayBufferViewLengthIntPtr(object, scratch);
+
+ // Load undefined if index >= length.
+ Label outOfBounds, done;
+ masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
+
+ // Load the elements vector.
+ masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ masm.load64(source, temp64);
+
+ Register bigInt = out.scratchReg();
+ emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
+
+ masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
+ masm.jump(&done);
+
+ masm.bind(&outOfBounds);
+ masm.moveValue(UndefinedValue(), out);
+
+ masm.bind(&done);
+}
+
+template <SwitchTableType tableType>
+class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
+ using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
+ using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
+ LabelsVector labels_;
+ CodeLabelsVector codeLabels_;
+ CodeLabel start_;
+ bool isOutOfLine_;
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineSwitch(this);
+ }
+
+ public:
+ explicit OutOfLineSwitch(TempAllocator& alloc)
+ : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
+
+ CodeLabel* start() { return &start_; }
+
+ CodeLabelsVector& codeLabels() { return codeLabels_; }
+ LabelsVector& labels() { return labels_; }
+
+ void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
+ Register base;
+ if (tableType == SwitchTableType::Inline) {
+#if defined(JS_CODEGEN_ARM)
+ base = ::js::jit::pc;
+#else
+ MOZ_CRASH("NYI: SwitchTableType::Inline");
+#endif
+ } else {
+#if defined(JS_CODEGEN_ARM)
+ MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
+#else
+ masm.mov(start(), temp);
+ base = temp;
+#endif
+ }
+ BaseIndex jumpTarget(base, index, ScalePointer);
+ masm.branchToComputedAddress(jumpTarget);
+ }
+
+ // Register an entry in the switch table.
+ void addTableEntry(MacroAssembler& masm) {
+ if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
+ (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ masm.propagateOOM(codeLabels_.append(std::move(cl)));
+ }
+ }
+ // Register the code, to which the table will jump to.
+ void addCodeEntry(MacroAssembler& masm) {
+ Label entry;
+ masm.bind(&entry);
+ masm.propagateOOM(labels_.append(std::move(entry)));
+ }
+
+ void setOutOfLine() { isOutOfLine_ = true; }
+};
+
+template <SwitchTableType tableType>
+void CodeGenerator::visitOutOfLineSwitch(
+ OutOfLineSwitch<tableType>* jumpTable) {
+ jumpTable->setOutOfLine();
+ auto& labels = jumpTable->labels();
+
+ if (tableType == SwitchTableType::OutOfLine) {
+#if defined(JS_CODEGEN_ARM)
+ MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+
+# if defined(JS_CODEGEN_ARM64)
+ AutoForbidPoolsAndNops afp(
+ &masm,
+ (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
+# endif
+
+ masm.haltingAlign(sizeof(void*));
+
+ // Bind the address of the jump table and reserve the space for code
+ // pointers to jump in the newly generated code.
+ masm.bind(jumpTable->start());
+ masm.addCodeLabel(*jumpTable->start());
+ for (size_t i = 0, e = labels.length(); i < e; i++) {
+ jumpTable->addTableEntry(masm);
+ }
+#endif
+ }
+
+ // Register all reserved pointers of the jump table to target labels. The
+ // entries of the jump table need to be absolute addresses and thus must be
+ // patched after codegen is finished.
+ auto& codeLabels = jumpTable->codeLabels();
+ for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
+ auto& cl = codeLabels[i];
+ cl.target()->bind(labels[i].offset());
+ masm.addCodeLabel(cl);
+ }
+}
+
+template void CodeGenerator::visitOutOfLineSwitch(
+ OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
+template void CodeGenerator::visitOutOfLineSwitch(
+ OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
+
+template <typename T>
+static inline void StoreToTypedArray(MacroAssembler& masm,
+ Scalar::Type writeType,
+ const LAllocation* value, const T& dest) {
+ if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
+ masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
+ } else {
+ if (value->isConstant()) {
+ masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
+ } else {
+ masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
+ }
+ }
+}
+
+void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+
+ const MStoreUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type writeType = mir->writeType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ StoreToTypedArray(masm, writeType, value, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ StoreToTypedArray(masm, writeType, value, dest);
+ }
+}
+
+void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp = ToRegister64(lir->temp());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.storeToTypedBigIntArray(writeType, temp, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.storeToTypedBigIntArray(writeType, temp, dest);
+ }
+}
+
+void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+ const LAllocation* littleEndian = lir->littleEndian();
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
+
+ const MStoreDataViewElement* mir = lir->mir();
+ Scalar::Type writeType = mir->writeType();
+
+ BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
+
+ bool noSwap = littleEndian->isConstant() &&
+ ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
+
+ // Directly store if no byte swap is needed and the platform supports
+ // unaligned accesses for the access. (Such support is assumed for integer
+ // types.)
+ if (noSwap && (!Scalar::isFloatingType(writeType) ||
+ MacroAssembler::SupportsFastUnalignedFPAccesses())) {
+ if (!Scalar::isBigIntType(writeType)) {
+ StoreToTypedArray(masm, writeType, value, dest);
+ } else {
+ masm.loadBigInt64(ToRegister(value), temp64);
+ masm.storeToTypedBigIntArray(writeType, temp64, dest);
+ }
+ return;
+ }
+
+ // Load the value into a gpr register.
+ switch (writeType) {
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (value->isConstant()) {
+ masm.move32(Imm32(ToInt32(value)), temp);
+ } else {
+ masm.move32(ToRegister(value), temp);
+ }
+ break;
+ case Scalar::Float32: {
+ FloatRegister fvalue = ToFloatRegister(value);
+ masm.canonicalizeFloatIfDeterministic(fvalue);
+ masm.moveFloat32ToGPR(fvalue, temp);
+ break;
+ }
+ case Scalar::Float64: {
+ FloatRegister fvalue = ToFloatRegister(value);
+ masm.canonicalizeDoubleIfDeterministic(fvalue);
+ masm.moveDoubleToGPR64(fvalue, temp64);
+ break;
+ }
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.loadBigInt64(ToRegister(value), temp64);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ if (!noSwap) {
+ // Swap the bytes in the loaded value.
+ Label skip;
+ if (!littleEndian->isConstant()) {
+ masm.branch32(
+ MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
+ ToRegister(littleEndian), Imm32(0), &skip);
+ }
+
+ switch (writeType) {
+ case Scalar::Int16:
+ masm.byteSwap16SignExtend(temp);
+ break;
+ case Scalar::Uint16:
+ masm.byteSwap16ZeroExtend(temp);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.byteSwap32(temp);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.byteSwap64(temp64);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+
+ if (skip.used()) {
+ masm.bind(&skip);
+ }
+ }
+
+ // Store the value into the destination.
+ switch (writeType) {
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.store16Unaligned(temp, dest);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ masm.store32Unaligned(temp, dest);
+ break;
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ masm.store64Unaligned(temp64, dest);
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+void CodeGenerator::visitStoreTypedArrayElementHole(
+ LStoreTypedArrayElementHole* lir) {
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ Register index = ToRegister(lir->index());
+ const LAllocation* length = lir->length();
+ Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
+
+ Label skip;
+ if (length->isRegister()) {
+ masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
+ } else {
+ masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
+ }
+
+ BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
+ StoreToTypedArray(masm, arrayType, value, dest);
+
+ masm.bind(&skip);
+}
+
+void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
+ LStoreTypedArrayElementHoleBigInt* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp = ToRegister64(lir->temp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ Register index = ToRegister(lir->index());
+ const LAllocation* length = lir->length();
+ Register spectreTemp = temp.scratchReg();
+
+ Label skip;
+ if (length->isRegister()) {
+ masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
+ } else {
+ masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
+ }
+
+ masm.loadBigInt64(value, temp);
+
+ BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
+ masm.storeToTypedBigIntArray(arrayType, temp, dest);
+
+ masm.bind(&skip);
+}
+
+void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
+ Register value = ToRegister(lir->value());
+ Register output = ToRegister(lir->output());
+
+ masm.atomicIsLockFreeJS(value, output);
+}
+
+void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(output == ToRegister(lir->input()));
+ masm.clampIntToUint8(output);
+}
+
+void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.clampDoubleToUint8(input, output);
+}
+
+void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
+ ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
+ FloatRegister tempFloat = ToFloatRegister(lir->temp0());
+ Register output = ToRegister(lir->output());
+
+ using Fn = bool (*)(JSContext*, JSString*, double*);
+ OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
+ lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
+ Label* stringEntry = oolString->entry();
+ Label* stringRejoin = oolString->rejoin();
+
+ Label fails;
+ masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
+ output, &fails);
+
+ bailoutFrom(&fails, lir->snapshot());
+}
+
+void CodeGenerator::visitInCache(LInCache* ins) {
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+
+ ConstantOrRegister key =
+ toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
+ Register object = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+
+ IonInIC cache(liveRegs, key, object, output, temp);
+ addIC(ins, allocateIC(cache));
+}
+
+void CodeGenerator::visitInArray(LInArray* lir) {
+ const MInArray* mir = lir->mir();
+ Register elements = ToRegister(lir->elements());
+ Register initLength = ToRegister(lir->initLength());
+ Register output = ToRegister(lir->output());
+
+ Label falseBranch, done, trueBranch;
+
+ if (lir->index()->isConstant()) {
+ int32_t index = ToInt32(lir->index());
+
+ if (index < 0) {
+ MOZ_ASSERT(mir->needsNegativeIntCheck());
+ bailout(lir->snapshot());
+ return;
+ }
+
+ masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
+ &falseBranch);
+
+ NativeObject::elementsSizeMustNotOverflow();
+ Address address = Address(elements, index * sizeof(Value));
+ masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
+ } else {
+ Register index = ToRegister(lir->index());
+
+ Label negativeIntCheck;
+ Label* failedInitLength = &falseBranch;
+ if (mir->needsNegativeIntCheck()) {
+ failedInitLength = &negativeIntCheck;
+ }
+
+ masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
+
+ BaseObjectElementIndex address(elements, index);
+ masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
+
+ if (mir->needsNegativeIntCheck()) {
+ masm.jump(&trueBranch);
+ masm.bind(&negativeIntCheck);
+
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+
+ masm.jump(&falseBranch);
+ }
+ }
+
+ masm.bind(&trueBranch);
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+
+ masm.bind(&falseBranch);
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* index = lir->index();
+
+ Label testMagic;
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * sizeof(js::Value));
+ masm.branchTestMagic(Assembler::Equal, address, &testMagic);
+ } else {
+ BaseObjectElementIndex address(elements, ToRegister(index));
+ masm.branchTestMagic(Assembler::Equal, address, &testMagic);
+ }
+ bailoutFrom(&testMagic, lir->snapshot());
+}
+
+void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
+ Register protoReg = ToRegister(ins->rhs());
+ emitInstanceOf(ins, protoReg);
+}
+
+void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
+ Register protoReg = ToRegister(ins->rhs());
+ emitInstanceOf(ins, protoReg);
+}
+
+void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
+ // This path implements fun_hasInstance when the function's prototype is
+ // known to be the object in protoReg
+
+ Label done;
+ Register output = ToRegister(ins->getDef(0));
+
+ // If the lhs is a primitive, the result is false.
+ Register objReg;
+ if (ins->isInstanceOfV()) {
+ Label isObject;
+ ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
+ masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
+ masm.mov(ImmWord(0), output);
+ masm.jump(&done);
+ masm.bind(&isObject);
+ objReg = masm.extractObject(lhsValue, output);
+ } else {
+ objReg = ToRegister(ins->toInstanceOfO()->lhs());
+ }
+
+ // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
+ // This follows the main loop of js::IsPrototypeOf, though additionally breaks
+ // out of the loop on Proxy::LazyProto.
+
+ // Load the lhs's prototype.
+ masm.loadObjProto(objReg, output);
+
+ Label testLazy;
+ {
+ Label loopPrototypeChain;
+ masm.bind(&loopPrototypeChain);
+
+ // Test for the target prototype object.
+ Label notPrototypeObject;
+ masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
+ masm.mov(ImmWord(1), output);
+ masm.jump(&done);
+ masm.bind(&notPrototypeObject);
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ // Test for nullptr or Proxy::LazyProto
+ masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
+
+ // Load the current object's prototype.
+ masm.loadObjProto(output, output);
+
+ masm.jump(&loopPrototypeChain);
+ }
+
+ // Make a VM call if an object with a lazy proto was found on the prototype
+ // chain. This currently occurs only for cross compartment wrappers, which
+ // we do not expect to be compared with non-wrapper functions from this
+ // compartment. Otherwise, we stopped on a nullptr prototype and the output
+ // register is already correct.
+
+ using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
+ auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
+ StoreRegisterTo(output));
+
+ // Regenerate the original lhs object for the VM call.
+ Label regenerate, *lazyEntry;
+ if (objReg != output) {
+ lazyEntry = ool->entry();
+ } else {
+ masm.bind(&regenerate);
+ lazyEntry = &regenerate;
+ if (ins->isInstanceOfV()) {
+ ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
+ objReg = masm.extractObject(lhsValue, output);
+ } else {
+ objReg = ToRegister(ins->toInstanceOfO()->lhs());
+ }
+ MOZ_ASSERT(objReg == output);
+ masm.jump(ool->entry());
+ }
+
+ masm.bind(&testLazy);
+ masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
+
+ masm.bind(&done);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
+ // The Lowering ensures that RHS is an object, and that LHS is a value.
+ LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
+ TypedOrValueRegister lhs =
+ TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
+ addIC(ins, allocateIC(ic));
+}
+
+void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
+ const Register JSContextReg = ToRegister(ins->getJSContextReg());
+ const Register ObjectReg = ToRegister(ins->getObjectReg());
+ const Register PrivateReg = ToRegister(ins->getPrivReg());
+ const Register ValueReg = ToRegister(ins->getValueReg());
+
+ Label haveValue;
+ if (ins->mir()->valueMayBeInSlot()) {
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ // It's a bit annoying to redo these slot calculations, which duplcate
+ // LSlots and a few other things like that, but I'm not sure there's a
+ // way to reuse those here.
+ //
+ // If this ever gets fixed to work with proxies (by not assuming that
+ // reserved slot indices, which is what domMemberSlotIndex() returns,
+ // match fixed slot indices), we can reenable MGetDOMProperty for
+ // proxies in IonBuilder.
+ if (slot < NativeObject::MAX_FIXED_SLOTS) {
+ masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
+ JSReturnOperand);
+ } else {
+ // It's a dynamic slot.
+ slot -= NativeObject::MAX_FIXED_SLOTS;
+ // Use PrivateReg as a scratch register for the slots pointer.
+ masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
+ PrivateReg);
+ masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
+ JSReturnOperand);
+ }
+ masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
+ }
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Make space for the outparam. Pre-initialize it to UndefinedValue so we
+ // can trace it at GC time.
+ masm.Push(UndefinedValue());
+ // We pass the pointer to our out param as an instance of
+ // JSJitGetterCallArgs, since on the binary level it's the same thing.
+ static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
+ masm.moveStackPtrTo(ValueReg);
+
+ masm.Push(ObjectReg);
+
+ LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
+
+ // Rooting will happen at GC time.
+ masm.moveStackPtrTo(ObjectReg);
+
+ Realm* getterRealm = ins->mir()->getterRealm();
+ if (gen->realm->realmPtr() != getterRealm) {
+ // We use JSContextReg as scratch register here.
+ masm.switchToRealm(getterRealm, JSContextReg);
+ }
+
+ uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
+ masm.loadJSContext(JSContextReg);
+ masm.enterFakeExitFrame(JSContextReg, JSContextReg,
+ ExitFrameType::IonDOMGetter);
+
+ markSafepointAt(safepointOffset, ins);
+
+ masm.setupAlignedABICall();
+ masm.loadJSContext(JSContextReg);
+ masm.passABIArg(JSContextReg);
+ masm.passABIArg(ObjectReg);
+ masm.passABIArg(PrivateReg);
+ masm.passABIArg(ValueReg);
+ ensureOsiSpace();
+ masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
+ MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ if (ins->mir()->isInfallible()) {
+ masm.loadValue(Address(masm.getStackPointer(),
+ IonDOMExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ } else {
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ masm.loadValue(Address(masm.getStackPointer(),
+ IonDOMExitFrameLayout::offsetOfResult()),
+ JSReturnOperand);
+ }
+
+ // Switch back to the current realm if needed. Note: if the getter threw an
+ // exception, the exception handler will do this.
+ if (gen->realm->realmPtr() != getterRealm) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "Clobbering ReturnReg should not affect the return value");
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
+ masm.speculationBarrier();
+ }
+
+ masm.adjustStack(IonDOMExitFrameLayout::Size());
+
+ masm.bind(&haveValue);
+
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
+ // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
+ // use an LLoadFixedSlotV or some subclass of it for this case: that would
+ // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
+ // we'd have to duplicate a bunch of stuff we now get for free from
+ // MGetDOMProperty.
+ //
+ // If this ever gets fixed to work with proxies (by not assuming that
+ // reserved slot indices, which is what domMemberSlotIndex() returns,
+ // match fixed slot indices), we can reenable MGetDOMMember for
+ // proxies in IonBuilder.
+ Register object = ToRegister(ins->object());
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ ValueOperand result = ToOutValue(ins);
+
+ masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
+ result);
+}
+
+void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
+ // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
+ // use an LLoadFixedSlotT or some subclass of it for this case: that would
+ // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
+ // we'd have to duplicate a bunch of stuff we now get for free from
+ // MGetDOMProperty.
+ //
+ // If this ever gets fixed to work with proxies (by not assuming that
+ // reserved slot indices, which is what domMemberSlotIndex() returns,
+ // match fixed slot indices), we can reenable MGetDOMMember for
+ // proxies in IonBuilder.
+ Register object = ToRegister(ins->object());
+ size_t slot = ins->mir()->domMemberSlotIndex();
+ AnyRegister result = ToAnyRegister(ins->getDef(0));
+ MIRType type = ins->mir()->type();
+
+ masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
+ type, result);
+}
+
+void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
+ const Register JSContextReg = ToRegister(ins->getJSContextReg());
+ const Register ObjectReg = ToRegister(ins->getObjectReg());
+ const Register PrivateReg = ToRegister(ins->getPrivReg());
+ const Register ValueReg = ToRegister(ins->getValueReg());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ masm.checkStackAlignment();
+
+ // Push the argument. Rooting will happen at GC time.
+ ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
+ masm.Push(argVal);
+ // We pass the pointer to our out param as an instance of
+ // JSJitGetterCallArgs, since on the binary level it's the same thing.
+ static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
+ masm.moveStackPtrTo(ValueReg);
+
+ masm.Push(ObjectReg);
+
+ LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
+
+ // Rooting will happen at GC time.
+ masm.moveStackPtrTo(ObjectReg);
+
+ Realm* setterRealm = ins->mir()->setterRealm();
+ if (gen->realm->realmPtr() != setterRealm) {
+ // We use JSContextReg as scratch register here.
+ masm.switchToRealm(setterRealm, JSContextReg);
+ }
+
+ uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
+ masm.loadJSContext(JSContextReg);
+ masm.enterFakeExitFrame(JSContextReg, JSContextReg,
+ ExitFrameType::IonDOMSetter);
+
+ markSafepointAt(safepointOffset, ins);
+
+ masm.setupAlignedABICall();
+ masm.loadJSContext(JSContextReg);
+ masm.passABIArg(JSContextReg);
+ masm.passABIArg(ObjectReg);
+ masm.passABIArg(PrivateReg);
+ masm.passABIArg(ValueReg);
+ ensureOsiSpace();
+ masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
+ MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Switch back to the current realm if needed. Note: if the setter threw an
+ // exception, the exception handler will do this.
+ if (gen->realm->realmPtr() != setterRealm) {
+ masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
+ }
+
+ masm.adjustStack(IonDOMExitFrameLayout::Size());
+
+ MOZ_ASSERT(masm.framePushed() == initialStack);
+}
+
+void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
+ Register proxy = ToRegister(ins->proxy());
+ ValueOperand out = ToOutValue(ins);
+
+ masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
+ out.scratchReg());
+ masm.loadValue(Address(out.scratchReg(),
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ out);
+}
+
+void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
+ LLoadDOMExpandoValueGuardGeneration* ins) {
+ Register proxy = ToRegister(ins->proxy());
+ ValueOperand out = ToOutValue(ins);
+
+ Label bail;
+ masm.loadDOMExpandoValueGuardGeneration(proxy, out,
+ ins->mir()->expandoAndGeneration(),
+ ins->mir()->generation(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
+ LLoadDOMExpandoValueIgnoreGeneration* ins) {
+ Register proxy = ToRegister(ins->proxy());
+ ValueOperand out = ToOutValue(ins);
+
+ masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
+ out.scratchReg());
+
+ // Load the ExpandoAndGeneration* from the PrivateValue.
+ masm.loadPrivate(
+ Address(out.scratchReg(),
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ out.scratchReg());
+
+ // Load expandoAndGeneration->expando into the output Value register.
+ masm.loadValue(
+ Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
+}
+
+void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
+ LGuardDOMExpandoMissingOrGuardShape* ins) {
+ Register temp = ToRegister(ins->temp0());
+ ValueOperand input =
+ ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
+
+ Label done;
+ masm.branchTestUndefined(Assembler::Equal, input, &done);
+
+ masm.debugAssertIsObject(input);
+ masm.unboxObject(input, temp);
+ // The expando object is not used in this case, so we don't need Spectre
+ // mitigations.
+ Label bail;
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
+ ins->mir()->shape(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+
+ masm.bind(&done);
+}
+
+class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
+ Register object_;
+ Register output_;
+
+ public:
+ OutOfLineIsCallable(Register object, Register output)
+ : object_(object), output_(output) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineIsCallable(this);
+ }
+ Register object() const { return object_; }
+ Register output() const { return output_; }
+};
+
+void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.isCallable(object, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
+ ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+
+ Label notObject;
+ masm.fallibleUnboxObject(val, temp, &notObject);
+
+ OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.isCallable(temp, output, ool->entry());
+ masm.jump(ool->rejoin());
+
+ masm.bind(&notObject);
+ masm.move32(Imm32(0), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
+ Register object = ool->object();
+ Register output = ool->output();
+
+ saveVolatile(output);
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupAlignedABICall();
+ masm.passABIArg(object);
+ masm.callWithABI<Fn, ObjectIsCallable>();
+ masm.storeCallBoolResult(output);
+ restoreVolatile(output);
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
+ LIsConstructor* ins_;
+
+ public:
+ explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineIsConstructor(this);
+ }
+ LIsConstructor* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.isConstructor(object, output, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
+ LIsConstructor* ins = ool->ins();
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ saveVolatile(output);
+ using Fn = bool (*)(JSObject* obj);
+ masm.setupAlignedABICall();
+ masm.passABIArg(object);
+ masm.callWithABI<Fn, ObjectIsConstructor>();
+ masm.storeCallBoolResult(output);
+ restoreVolatile(output);
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitIsCrossRealmArrayConstructor(
+ LIsCrossRealmArrayConstructor* ins) {
+ Register object = ToRegister(ins->object());
+ Register output = ToRegister(ins->output());
+
+ masm.setIsCrossRealmArrayConstructor(object, output);
+}
+
+static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
+ Register obj, Register output,
+ Label* notArray = nullptr) {
+ masm.loadObjClassUnsafe(obj, output);
+
+ Label isArray;
+ masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
+ &isArray);
+
+ // Branch to OOL path if it's a proxy.
+ masm.branchTestClassIsProxy(true, output, ool->entry());
+
+ if (notArray) {
+ masm.bind(notArray);
+ }
+ masm.move32(Imm32(0), output);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&isArray);
+ masm.move32(Imm32(1), output);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
+ Register object = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ using Fn = bool (*)(JSContext*, HandleObject, bool*);
+ OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
+ lir, ArgList(object), StoreRegisterTo(output));
+ EmitObjectIsArray(masm, ool, object, output);
+}
+
+void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
+ ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ Label notArray;
+ masm.fallibleUnboxObject(val, temp, &notArray);
+
+ using Fn = bool (*)(JSContext*, HandleObject, bool*);
+ OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
+ lir, ArgList(temp), StoreRegisterTo(output));
+ EmitObjectIsArray(masm, ool, temp, output, &notArray);
+}
+
+void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
+ Register object = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ OutOfLineCode* ool = nullptr;
+ if (lir->mir()->isPossiblyWrapped()) {
+ using Fn = bool (*)(JSContext*, JSObject*, bool*);
+ ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
+ lir, ArgList(object), StoreRegisterTo(output));
+ }
+
+ Label notTypedArray;
+ Label done;
+
+ masm.loadObjClassUnsafe(object, output);
+ masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
+
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+ masm.bind(&notTypedArray);
+ if (ool) {
+ masm.branchTestClassIsProxy(true, output, ool->entry());
+ }
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+ if (ool) {
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitIsObject(LIsObject* ins) {
+ Register output = ToRegister(ins->output());
+ ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
+ masm.testObjectSet(Assembler::Equal, value, output);
+}
+
+void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
+ ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
+ testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
+}
+
+void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
+ Register output = ToRegister(ins->output());
+ ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
+
+ Label isNotNull, done;
+ masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
+
+ masm.move32(Imm32(1), output);
+ masm.jump(&done);
+
+ masm.bind(&isNotNull);
+ masm.testUndefinedSet(Assembler::Equal, value, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitIsNullOrUndefinedAndBranch(
+ LIsNullOrUndefinedAndBranch* ins) {
+ Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
+ Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
+ ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
+
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ masm.branchTestNull(Assembler::Equal, tag, ifTrue);
+ masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
+
+ if (!isNextBlock(ins->ifFalse()->lir())) {
+ masm.jump(ifFalse);
+ }
+}
+
+void CodeGenerator::loadOutermostJSScript(Register reg) {
+ // The "outermost" JSScript means the script that we are compiling
+ // basically; this is not always the script associated with the
+ // current basic block, which might be an inlined script.
+
+ MIRGraph& graph = current->mir()->graph();
+ MBasicBlock* entryBlock = graph.entryBlock();
+ masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
+}
+
+void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
+ // The current JSScript means the script for the current
+ // basic block. This may be an inlined script.
+
+ JSScript* script = block->info().script();
+ masm.movePtr(ImmGCPtr(script), reg);
+}
+
+void CodeGenerator::visitHasClass(LHasClass* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register output = ToRegister(ins->output());
+
+ masm.loadObjClassUnsafe(lhs, output);
+ masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
+ output);
+}
+
+void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp0());
+
+ // branchTestObjClass may zero the object register on speculative paths
+ // (we should have a defineReuseInput allocation in this case).
+ Register spectreRegToZero = lhs;
+
+ Label notEqual;
+
+ masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
+ temp, spectreRegToZero, &notEqual);
+
+ // Can't return null-return here, so bail.
+ bailoutFrom(&notEqual, ins->snapshot());
+}
+
+void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp0());
+
+ // branchTestObjClass may zero the object register on speculative paths
+ // (we should have a defineReuseInput allocation in this case).
+ Register spectreRegToZero = lhs;
+
+ Label notEqual;
+
+ masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
+ &notEqual);
+
+ // Can't return null-return here, so bail.
+ bailoutFrom(&notEqual, ins->snapshot());
+}
+
+void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
+ Register obj = ToRegister(lir->lhs());
+ Register temp = ToRegister(lir->temp0());
+
+ using Fn = JSString* (*)(JSContext*, JSObject*);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp);
+ masm.passABIArg(temp);
+ masm.passABIArg(obj);
+ masm.callWithABI<Fn, js::ObjectClassToString>();
+
+ bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
+}
+
+void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
+
+void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
+
+void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin()) {
+ masm.jump(&returnLabel_);
+ }
+}
+
+void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin()) {
+ masm.jump(&returnLabel_);
+ }
+}
+
+void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
+ // Don't emit a jump to the return label if this is the last block.
+ if (current->mir() != *gen->graph().poBegin()) {
+ masm.jump(&returnLabel_);
+ }
+}
+
+void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
+ Register input) {
+ // Check the lower bound.
+ if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
+ Label success;
+ if (type == MIRType::Int32 || type == MIRType::Boolean) {
+ masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
+ &success);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
+ &success);
+ }
+ masm.assumeUnreachable(
+ "Integer input should be equal or higher than Lowerbound.");
+ masm.bind(&success);
+ }
+
+ // Check the upper bound.
+ if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
+ Label success;
+ if (type == MIRType::Int32 || type == MIRType::Boolean) {
+ masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
+ &success);
+ } else {
+ MOZ_ASSERT(type == MIRType::IntPtr);
+ masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
+ &success);
+ }
+ masm.assumeUnreachable(
+ "Integer input should be lower or equal than Upperbound.");
+ masm.bind(&success);
+ }
+
+ // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
+ // r->exponent(), there's nothing to check, because if we ended up in the
+ // integer range checking code, the value is already in an integer register
+ // in the integer range.
+}
+
+void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
+ FloatRegister temp) {
+ // Check the lower bound.
+ if (r->hasInt32LowerBound()) {
+ Label success;
+ masm.loadConstantDouble(r->lower(), temp);
+ if (r->canBeNaN()) {
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
+ }
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
+ &success);
+ masm.assumeUnreachable(
+ "Double input should be equal or higher than Lowerbound.");
+ masm.bind(&success);
+ }
+ // Check the upper bound.
+ if (r->hasInt32UpperBound()) {
+ Label success;
+ masm.loadConstantDouble(r->upper(), temp);
+ if (r->canBeNaN()) {
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
+ }
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
+ masm.assumeUnreachable(
+ "Double input should be lower or equal than Upperbound.");
+ masm.bind(&success);
+ }
+
+ // This code does not yet check r->canHaveFractionalPart(). This would require
+ // new assembler interfaces to make rounding instructions available.
+
+ if (!r->canBeNegativeZero()) {
+ Label success;
+
+ // First, test for being equal to 0.0, which also includes -0.0.
+ masm.loadConstantDouble(0.0, temp);
+ masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
+ &success);
+
+ // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
+ // -Infinity instead of Infinity.
+ masm.loadConstantDouble(1.0, temp);
+ masm.divDouble(input, temp);
+ masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
+
+ masm.assumeUnreachable("Input shouldn't be negative zero.");
+
+ masm.bind(&success);
+ }
+
+ if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
+ r->exponent() < FloatingPoint<double>::kExponentBias) {
+ // Check the bounds implied by the maximum exponent.
+ Label exponentLoOk;
+ masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
+ &exponentLoOk);
+ masm.assumeUnreachable("Check for exponent failed.");
+ masm.bind(&exponentLoOk);
+
+ Label exponentHiOk;
+ masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
+ &exponentHiOk);
+ masm.assumeUnreachable("Check for exponent failed.");
+ masm.bind(&exponentHiOk);
+ } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
+ // If we think the value can't be NaN, check that it isn't.
+ Label notnan;
+ masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
+ masm.assumeUnreachable("Input shouldn't be NaN.");
+ masm.bind(&notnan);
+
+ // If we think the value also can't be an infinity, check that it isn't.
+ if (!r->canBeInfiniteOrNaN()) {
+ Label notposinf;
+ masm.loadConstantDouble(PositiveInfinity<double>(), temp);
+ masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
+ masm.assumeUnreachable("Input shouldn't be +Inf.");
+ masm.bind(&notposinf);
+
+ Label notneginf;
+ masm.loadConstantDouble(NegativeInfinity<double>(), temp);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
+ masm.assumeUnreachable("Input shouldn't be -Inf.");
+ masm.bind(&notneginf);
+ }
+ }
+}
+
+void CodeGenerator::visitAssertClass(LAssertClass* ins) {
+ Register obj = ToRegister(ins->input());
+ Register temp = ToRegister(ins->getTemp(0));
+
+ Label success;
+ if (ins->mir()->getClass() == &FunctionClass) {
+ // Allow both possible function classes here.
+ masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
+ temp, &success);
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(
+ Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
+ }
+ masm.assumeUnreachable("Wrong KnownClass during run-time");
+ masm.bind(&success);
+}
+
+void CodeGenerator::visitAssertShape(LAssertShape* ins) {
+ Register obj = ToRegister(ins->input());
+
+ Label success;
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
+ ins->mir()->shape(), &success);
+ masm.assumeUnreachable("Wrong Shape during run-time");
+ masm.bind(&success);
+}
+
+void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
+ Register input = ToRegister(ins->input());
+ const Range* r = ins->range();
+
+ emitAssertRangeI(ins->mir()->input()->type(), r, input);
+}
+
+void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ const Range* r = ins->range();
+
+ emitAssertRangeD(r, input, temp);
+}
+
+void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ FloatRegister temp2 = ToFloatRegister(ins->temp2());
+
+ const Range* r = ins->range();
+
+ masm.convertFloat32ToDouble(input, temp);
+ emitAssertRangeD(r, temp, temp2);
+}
+
+void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
+ const Range* r = ins->range();
+ const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
+ Label done;
+
+ {
+ ScratchTagScope tag(masm, value);
+ masm.splitTagForTest(value, tag);
+
+ {
+ Label isNotInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
+ {
+ ScratchTagScopeRelease _(&tag);
+ Register unboxInt32 = ToTempUnboxRegister(ins->temp());
+ Register input = masm.extractInt32(value, unboxInt32);
+ emitAssertRangeI(MIRType::Int32, r, input);
+ masm.jump(&done);
+ }
+ masm.bind(&isNotInt32);
+ }
+
+ {
+ Label isNotDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
+ {
+ ScratchTagScopeRelease _(&tag);
+ FloatRegister input = ToFloatRegister(ins->floatTemp1());
+ FloatRegister temp = ToFloatRegister(ins->floatTemp2());
+ masm.unboxDouble(value, input);
+ emitAssertRangeD(r, input, temp);
+ masm.jump(&done);
+ }
+ masm.bind(&isNotDouble);
+ }
+ }
+
+ masm.assumeUnreachable("Incorrect range for Value.");
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
+ using Fn = bool (*)(JSContext*);
+ OutOfLineCode* ool =
+ oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
+
+ const void* interruptAddr = gen->runtime->addressOfInterruptBits();
+ masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
+ ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineResumableWasmTrap(
+ OutOfLineResumableWasmTrap* ool) {
+ LInstruction* lir = ool->lir();
+ masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
+
+ markSafepointAt(masm.currentOffset(), lir);
+
+ // Note that masm.framePushed() doesn't include the register dump area.
+ // That will be taken into account when the StackMap is created from the
+ // LSafepoint.
+ lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
+ lir->safepoint()->setIsWasmTrap();
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitOutOfLineAbortingWasmTrap(
+ OutOfLineAbortingWasmTrap* ool) {
+ masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
+}
+
+void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+
+ OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
+ lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
+ wasm::Trap::CheckInterrupt);
+ addOutOfLineCode(ool, lir->mir());
+ masm.branch32(
+ Assembler::NotEqual,
+ Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
+ Imm32(0), ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ const MWasmTrap* mir = lir->mir();
+
+ masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
+}
+
+void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ const MWasmTrapIfNull* mir = lir->mir();
+ Label nonNull;
+ Register input = ToRegister(lir->object());
+
+ masm.branchTestPtr(Assembler::NonZero, input, input, &nonNull);
+ masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
+ masm.bind(&nonNull);
+}
+
+void CodeGenerator::visitWasmGcObjectIsSubtypeOfAbstract(
+ LWasmGcObjectIsSubtypeOfAbstract* ins) {
+ MOZ_ASSERT(gen->compilingWasm());
+
+ const MWasmGcObjectIsSubtypeOfAbstract* mir = ins->mir();
+ MOZ_ASSERT(!mir->destType().isTypeRef());
+
+ Register object = ToRegister(ins->object());
+ Register superSuperTypeVector = Register::Invalid();
+ Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
+ Register scratch2 = Register::Invalid();
+ Register result = ToRegister(ins->output());
+ Label onSuccess;
+ Label onFail;
+ Label join;
+ masm.branchWasmGcObjectIsRefType(
+ object, mir->sourceType(), mir->destType(), &onSuccess,
+ /*onSuccess=*/true, superSuperTypeVector, scratch1, scratch2);
+ masm.bind(&onFail);
+ masm.xor32(result, result);
+ masm.jump(&join);
+ masm.bind(&onSuccess);
+ masm.move32(Imm32(1), result);
+ masm.bind(&join);
+}
+
+void CodeGenerator::visitWasmGcObjectIsSubtypeOfConcrete(
+ LWasmGcObjectIsSubtypeOfConcrete* ins) {
+ MOZ_ASSERT(gen->compilingWasm());
+
+ const MWasmGcObjectIsSubtypeOfConcrete* mir = ins->mir();
+ MOZ_ASSERT(mir->destType().isTypeRef());
+
+ Register object = ToRegister(ins->object());
+ Register superSuperTypeVector = ToRegister(ins->superSuperTypeVector());
+ Register scratch1 = ToRegister(ins->temp0());
+ Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
+ Register result = ToRegister(ins->output());
+ Label onSuccess;
+ Label join;
+ masm.branchWasmGcObjectIsRefType(
+ object, mir->sourceType(), mir->destType(), &onSuccess,
+ /*onSuccess=*/true, superSuperTypeVector, scratch1, scratch2);
+ masm.move32(Imm32(0), result);
+ masm.jump(&join);
+ masm.bind(&onSuccess);
+ masm.move32(Imm32(1), result);
+ masm.bind(&join);
+}
+
+void CodeGenerator::visitWasmGcObjectIsSubtypeOfAbstractAndBranch(
+ LWasmGcObjectIsSubtypeOfAbstractAndBranch* ins) {
+ MOZ_ASSERT(gen->compilingWasm());
+ Register object = ToRegister(ins->object());
+ Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
+ Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
+ Label* onFail = getJumpLabelForBranch(ins->ifFalse());
+ masm.branchWasmGcObjectIsRefType(
+ object, ins->sourceType(), ins->destType(), onSuccess,
+ /*onSuccess=*/true, Register::Invalid(), scratch1, Register::Invalid());
+ masm.jump(onFail);
+}
+
+void CodeGenerator::visitWasmGcObjectIsSubtypeOfConcreteAndBranch(
+ LWasmGcObjectIsSubtypeOfConcreteAndBranch* ins) {
+ MOZ_ASSERT(gen->compilingWasm());
+ Register object = ToRegister(ins->object());
+ Register superSuperTypeVector = ToRegister(ins->superSuperTypeVector());
+ Register scratch1 = ToRegister(ins->temp0());
+ Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
+ Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
+ Label* onFail = getJumpLabelForBranch(ins->ifFalse());
+ masm.branchWasmGcObjectIsRefType(
+ object, ins->sourceType(), ins->destType(), onSuccess,
+ /*onSuccess=*/true, superSuperTypeVector, scratch1, scratch2);
+ masm.jump(onFail);
+}
+
+void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
+ const MWasmBoundsCheck* mir = ins->mir();
+ Register ptr = ToRegister(ins->ptr());
+ Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
+ // When there are no spectre mitigations in place, branching out-of-line to
+ // the trap is a big performance win, but with mitigations it's trickier. See
+ // bug 1680243.
+ if (JitOptions.spectreIndexMasking) {
+ Label ok;
+ masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else {
+ OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
+ mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
+ ool->entry());
+ }
+}
+
+void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
+ const MWasmBoundsCheck* mir = ins->mir();
+ Register64 ptr = ToRegister64(ins->ptr());
+ Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
+ // See above.
+ if (JitOptions.spectreIndexMasking) {
+ Label ok;
+ masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else {
+ OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
+ mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
+ ool->entry());
+ }
+}
+
+void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
+ const MWasmAlignmentCheck* mir = ins->mir();
+ Register ptr = ToRegister(ins->ptr());
+ OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
+ mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
+ addOutOfLineCode(ool, mir);
+ masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
+ ool->entry());
+}
+
+void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
+ const MWasmAlignmentCheck* mir = ins->mir();
+ Register64 ptr = ToRegister64(ins->ptr());
+#ifdef JS_64BIT
+ Register r = ptr.reg;
+#else
+ Register r = ptr.low;
+#endif
+ OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
+ mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
+ addOutOfLineCode(ool, mir);
+ masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
+ ool->entry());
+}
+
+void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
+ switch (ins->mir()->type()) {
+ case MIRType::RefOrNull:
+ case MIRType::Pointer:
+ masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
+ ToRegister(ins->output()));
+ break;
+ case MIRType::Int32:
+ masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
+ ToRegister(ins->output()));
+ break;
+ default:
+ MOZ_CRASH("MIRType not supported in WasmLoadInstance");
+ }
+}
+
+void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
+ MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
+ masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
+ ToOutRegister64(ins));
+}
+
+void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
+ JSScript* script, Register tmp) {
+ // The code depends on the JitScript* not being discarded without also
+ // invalidating Ion code. Assert this.
+#ifdef DEBUG
+ Label ok;
+ masm.movePtr(ImmGCPtr(script), tmp);
+ masm.loadJitScript(tmp, tmp);
+ masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
+ masm.assumeUnreachable("Didn't find JitScript?");
+ masm.bind(&ok);
+#endif
+
+ masm.load32(warmUpCount, tmp);
+ masm.add32(Imm32(1), tmp);
+ masm.store32(tmp, warmUpCount);
+}
+
+void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
+ Register tmp = ToRegister(ins->temp0());
+
+ AbsoluteAddress warmUpCount =
+ AbsoluteAddress(ins->mir()->script()->jitScript())
+ .offset(JitScript::offsetOfWarmUpCount());
+ incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
+}
+
+void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
+ ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
+ Label bail;
+ masm.branchTestMagicValue(Assembler::Equal, inputValue,
+ JS_UNINITIALIZED_LEXICAL, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitThrowRuntimeLexicalError(
+ LThrowRuntimeLexicalError* ins) {
+ pushArg(Imm32(ins->mir()->errorNumber()));
+
+ using Fn = bool (*)(JSContext*, unsigned);
+ callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
+}
+
+void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
+ pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
+
+ using Fn = bool (*)(JSContext*, unsigned);
+ callVM<Fn, js::ThrowMsgOperation>(ins);
+}
+
+void CodeGenerator::visitGlobalDeclInstantiation(
+ LGlobalDeclInstantiation* ins) {
+ pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
+ pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
+
+ using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
+ callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
+}
+
+void CodeGenerator::visitDebugger(LDebugger* ins) {
+ Register cx = ToRegister(ins->temp0());
+
+ masm.loadJSContext(cx);
+ using Fn = bool (*)(JSContext* cx);
+ masm.setupAlignedABICall();
+ masm.passABIArg(cx);
+ masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
+
+ Label bail;
+ masm.branchIfTrueBool(ReturnReg, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitNewTarget(LNewTarget* ins) {
+ ValueOperand output = ToOutValue(ins);
+
+ // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
+ Label notConstructing, done;
+ Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
+ masm.branchTestPtr(Assembler::Zero, calleeToken,
+ Imm32(CalleeToken_FunctionConstructing), &notConstructing);
+
+ Register argvLen = output.scratchReg();
+ masm.loadNumActualArgs(FramePointer, argvLen);
+
+ Label useNFormals;
+
+ size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
+ masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
+
+ size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
+ {
+ BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
+ masm.loadValue(newTarget, output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&useNFormals);
+
+ {
+ Address newTarget(FramePointer,
+ argsOffset + (numFormalArgs * sizeof(Value)));
+ masm.loadValue(newTarget, output);
+ masm.jump(&done);
+ }
+
+ // else output = undefined
+ masm.bind(&notConstructing);
+ masm.moveValue(UndefinedValue(), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
+ ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
+ ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
+ ValueOperand output = ToOutValue(ins);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
+ ins, ArgList(returnValue), StoreNothing());
+
+ Label noChecks;
+ masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
+ masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
+ masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
+ masm.moveValue(thisValue, output);
+ masm.jump(ool->rejoin());
+ masm.bind(&noChecks);
+ masm.moveValue(returnValue, output);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
+ ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
+ Register output = ToRegister(ins->output());
+
+ using Fn = bool (*)(JSContext*, CheckIsObjectKind);
+ OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
+ ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
+
+ masm.fallibleUnboxObject(value, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
+ ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
+ ins, ArgList(checkValue), StoreNothing());
+ masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
+ masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
+ ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+
+ using Fn = bool (*)(JSContext*, HandleValue);
+ OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
+ ins, ArgList(heritage), StoreNothing());
+
+ masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
+ masm.fallibleUnboxObject(heritage, temp0, ool->entry());
+
+ masm.isConstructor(temp0, temp1, ool->entry());
+ masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckThis(LCheckThis* ins) {
+ ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
+
+ using Fn = bool (*)(JSContext*);
+ OutOfLineCode* ool =
+ oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
+ masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
+ ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
+
+ using Fn = bool (*)(JSContext*);
+ OutOfLineCode* ool =
+ oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
+ masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitGenerator(LGenerator* lir) {
+ Register callee = ToRegister(lir->callee());
+ Register environmentChain = ToRegister(lir->environmentChain());
+ Register argsObject = ToRegister(lir->argsObject());
+
+ pushArg(argsObject);
+ pushArg(environmentChain);
+ pushArg(ImmGCPtr(current->mir()->info().script()));
+ pushArg(callee);
+
+ using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
+ HandleObject, HandleObject);
+ callVM<Fn, CreateGenerator>(lir);
+}
+
+void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
+ Register generator = ToRegister(lir->generator());
+ ValueOperand valueOrReason = ToValue(lir, LAsyncResolve::ValueOrReasonIndex);
+ AsyncFunctionResolveKind resolveKind = lir->mir()->resolveKind();
+
+ pushArg(Imm32(static_cast<int32_t>(resolveKind)));
+ pushArg(valueOrReason);
+ pushArg(generator);
+
+ using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
+ HandleValue, AsyncFunctionResolveKind);
+ callVM<Fn, js::AsyncFunctionResolve>(lir);
+}
+
+void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
+ ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
+ Register generator = ToRegister(lir->generator());
+
+ pushArg(value);
+ pushArg(generator);
+
+ using Fn =
+ JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
+ HandleValue value);
+ callVM<Fn, js::AsyncFunctionAwait>(lir);
+}
+
+void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
+ ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
+
+ pushArg(value);
+
+ using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
+ callVM<Fn, js::CanSkipAwait>(lir);
+}
+
+void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
+ ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
+ ValueOperand output = ToOutValue(lir);
+ Register canSkip = ToRegister(lir->canSkip());
+
+ Label cantExtract, finished;
+ masm.branchIfFalseBool(canSkip, &cantExtract);
+
+ pushArg(value);
+
+ using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
+ callVM<Fn, js::ExtractAwaitValue>(lir);
+ masm.jump(&finished);
+ masm.bind(&cantExtract);
+
+ masm.moveValue(value, output);
+
+ masm.bind(&finished);
+}
+
+void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
+ ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
+ pushArg(checkValue);
+ using Fn = bool (*)(JSContext*, HandleValue);
+ callVM<Fn, js::Debug_CheckSelfHosted>(ins);
+}
+
+void CodeGenerator::visitRandom(LRandom* ins) {
+ using mozilla::non_crypto::XorShift128PlusRNG;
+
+ FloatRegister output = ToFloatRegister(ins->output());
+ Register rngReg = ToRegister(ins->temp0());
+
+ Register64 temp1 = ToRegister64(ins->temp1());
+ Register64 temp2 = ToRegister64(ins->temp2());
+
+ const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
+ masm.movePtr(ImmPtr(rng), rngReg);
+
+ masm.randomDouble(rngReg, output, temp1, temp2);
+ if (js::SupportDifferentialTesting()) {
+ masm.loadConstantDouble(0.0, output);
+ }
+}
+
+void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ switch (ins->mode()) {
+ case MSignExtendInt32::Byte:
+ masm.move8SignExtend(input, output);
+ break;
+ case MSignExtendInt32::Half:
+ masm.move16SignExtend(input, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitRotate(LRotate* ins) {
+ MRotate* mir = ins->mir();
+ Register input = ToRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ const LAllocation* count = ins->count();
+ if (count->isConstant()) {
+ int32_t c = ToInt32(count) & 0x1F;
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft(Imm32(c), input, dest);
+ } else {
+ masm.rotateRight(Imm32(c), input, dest);
+ }
+ } else {
+ Register creg = ToRegister(count);
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft(creg, input, dest);
+ } else {
+ masm.rotateRight(creg, input, dest);
+ }
+ }
+}
+
+class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
+ LNaNToZero* lir_;
+
+ public:
+ explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineNaNToZero(this);
+ }
+ LNaNToZero* lir() const { return lir_; }
+};
+
+void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
+ FloatRegister output = ToFloatRegister(ool->lir()->output());
+ masm.loadConstantDouble(0.0, output);
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+
+ OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ if (lir->mir()->operandIsNeverNegativeZero()) {
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
+ } else {
+ FloatRegister scratch = ToFloatRegister(lir->temp0());
+ masm.loadConstantDouble(0.0, scratch);
+ masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
+ ool->entry());
+ }
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
+ Register obj = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.setIsPackedArray(obj, output, temp);
+}
+
+void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
+ Register array = ToRegister(lir->array());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+
+ Label bail;
+ masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
+ Register target = ToRegister(lir->target());
+ ValueOperand out = ToOutValue(lir);
+ Register scratch = out.scratchReg();
+
+ using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
+ OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
+ StoreValueTo(out));
+
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ masm.loadObjProto(target, scratch);
+
+ Label hasProto;
+ masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
+
+ // Call into the VM for lazy prototypes.
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
+
+ masm.moveValue(NullValue(), out);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&hasProto);
+ masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
+ pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
+
+ using Fn = PlainObject* (*)(JSContext*, HandleValue);
+ callVM<Fn, js::ObjectWithProtoOperation>(lir);
+}
+
+void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
+ Register obj = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ masm.loadObjProto(obj, output);
+
+#ifdef DEBUG
+ // We shouldn't encounter a null or lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label done;
+ masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
+ masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
+ masm.bind(&done);
+#endif
+}
+
+void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
+ pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
+
+ using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
+ callVM<Fn, js::BuiltinObjectOperation>(lir);
+}
+
+void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
+ Register callee = ToRegister(lir->callee());
+ ValueOperand out = ToOutValue(lir);
+ Register temp = ToRegister(lir->temp0());
+
+#ifdef DEBUG
+ Label classCheckDone;
+ masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
+ &classCheckDone);
+ masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
+ masm.bind(&classCheckDone);
+#endif
+
+ // Load prototype of callee
+ masm.loadObjProto(callee, temp);
+
+#ifdef DEBUG
+ // We won't encounter a lazy proto, because |callee| is guaranteed to be a
+ // JSFunction and only proxy objects can have a lazy proto.
+ MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
+
+ Label proxyCheckDone;
+ masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
+ masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
+ masm.bind(&proxyCheckDone);
+#endif
+
+ Label nullProto, done;
+ masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
+
+ // Box prototype and return
+ masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
+ masm.jump(&done);
+
+ masm.bind(&nullProto);
+ masm.moveValue(NullValue(), out);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
+ Register func = ToRegister(lir->function());
+ ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
+
+ masm.assertFunctionIsExtended(func);
+
+ Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
+
+ emitPreBarrier(addr);
+ masm.storeValue(homeObject, addr);
+}
+
+void CodeGenerator::visitIsTypedArrayConstructor(
+ LIsTypedArrayConstructor* lir) {
+ Register object = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ masm.setIsDefinitelyTypedArrayConstructor(object, output);
+}
+
+void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
+ ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
+ Register output = ToRegister(lir->output());
+
+ Register tag = masm.extractTag(value, output);
+ if (tag != output) {
+ masm.mov(tag, output);
+ }
+}
+
+void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
+
+ // If both lhs and rhs are numbers, can't use tag comparison to do inequality
+ // comparison
+ Label done;
+ masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
+ masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
+ bailout(lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
+ Register object = ToRegister(lir->object());
+ Register output = ToRegister(lir->output());
+
+ masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
+ masm.unboxObject(
+ Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ output);
+}
+
+void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
+ Register object = ToRegister(lir->object());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+
+ masm.movePropertyKey(lir->mir()->propId(), temp1);
+ masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
+
+ using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
+ GetterSetter* getterSetter);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp0);
+ masm.passABIArg(temp0);
+ masm.passABIArg(object);
+ masm.passABIArg(temp1);
+ masm.passABIArg(temp2);
+ masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
+
+ bailoutIfFalseBool(ReturnReg, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
+ Register object = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchIfObjectNotExtensible(object, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardInt32IsNonNegative(
+ LGuardInt32IsNonNegative* lir) {
+ Register index = ToRegister(lir->index());
+
+ bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
+}
+
+void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
+ Register input = ToRegister(lir->input());
+
+ bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
+ lir->snapshot());
+ bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
+ lir->snapshot());
+}
+
+void CodeGenerator::visitGuardIndexIsNotDenseElement(
+ LGuardIndexIsNotDenseElement* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+ Register temp = ToRegister(lir->temp0());
+ Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
+
+ // Load obj->elements.
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
+
+ // Ensure index >= initLength or the element is a hole.
+ Label notDense;
+ Address capacity(temp, ObjectElements::offsetOfInitializedLength());
+ masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
+
+ BaseValueIndex element(temp, index);
+ masm.branchTestMagic(Assembler::Equal, element, &notDense);
+
+ bailout(lir->snapshot());
+
+ masm.bind(&notDense);
+}
+
+void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
+ LGuardIndexIsValidUpdateOrAdd* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+ Register temp = ToRegister(lir->temp0());
+ Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
+
+ // Load obj->elements.
+ masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
+
+ Label success;
+
+ // If length is writable, branch to &success. All indices are writable.
+ Address flags(temp, ObjectElements::offsetOfFlags());
+ masm.branchTest32(Assembler::Zero, flags,
+ Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
+ &success);
+
+ // Otherwise, ensure index is in bounds.
+ Label bail;
+ Address length(temp, ObjectElements::offsetOfLength());
+ masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
+ masm.bind(&success);
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitCallAddOrUpdateSparseElement(
+ LCallAddOrUpdateSparseElement* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+ ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
+
+ pushArg(Imm32(lir->mir()->strict()));
+ pushArg(value);
+ pushArg(index);
+ pushArg(object);
+
+ using Fn =
+ bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
+ callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
+}
+
+void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+
+ pushArg(index);
+ pushArg(object);
+
+ using Fn =
+ bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
+ callVM<Fn, js::GetSparseElementHelper>(lir);
+}
+
+void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+
+ pushArg(index);
+ pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
+ pushArg(object);
+
+ using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
+ MutableHandleValue);
+ callVM<Fn, js::NativeGetElement>(lir);
+}
+
+void CodeGenerator::visitCallNativeGetElementSuper(
+ LCallNativeGetElementSuper* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+ ValueOperand receiver =
+ ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
+
+ pushArg(index);
+ pushArg(receiver);
+ pushArg(object);
+
+ using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
+ MutableHandleValue);
+ callVM<Fn, js::NativeGetElement>(lir);
+}
+
+void CodeGenerator::visitCallObjectHasSparseElement(
+ LCallObjectHasSparseElement* lir) {
+ Register object = ToRegister(lir->object());
+ Register index = ToRegister(lir->index());
+ Register temp0 = ToRegister(lir->temp0());
+ Register temp1 = ToRegister(lir->temp1());
+ Register output = ToRegister(lir->output());
+
+ masm.reserveStack(sizeof(Value));
+ masm.moveStackPtrTo(temp1);
+
+ using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
+ masm.setupAlignedABICall();
+ masm.loadJSContext(temp0);
+ masm.passABIArg(temp0);
+ masm.passABIArg(object);
+ masm.passABIArg(index);
+ masm.passABIArg(temp1);
+ masm.callWithABI<Fn, HasNativeElementPure>();
+ masm.storeCallPointerResult(temp0);
+
+ Label bail, ok;
+ uint32_t framePushed = masm.framePushed();
+ masm.branchIfTrueBool(temp0, &ok);
+ masm.adjustStack(sizeof(Value));
+ masm.jump(&bail);
+
+ masm.bind(&ok);
+ masm.setFramePushed(framePushed);
+ masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
+ masm.adjustStack(sizeof(Value));
+
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
+ Register bits = ToRegister(ins->bits());
+ Register input = ToRegister(ins->input());
+
+ pushArg(bits);
+ pushArg(input);
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
+ callVM<Fn, jit::BigIntAsIntN>(ins);
+}
+
+void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp());
+ Register64 temp64 = ToRegister64(ins->temp64());
+ Register output = ToRegister(ins->output());
+
+ Label done, create;
+
+ masm.movePtr(input, output);
+
+ // Load the BigInt value as an int64.
+ masm.loadBigInt64(input, temp64);
+
+ // Create a new BigInt when the input exceeds the int64 range.
+ masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
+ Imm32(64 / BigInt::DigitBits), &create);
+
+ // And create a new BigInt when the value and the BigInt have different signs.
+ Label nonNegative;
+ masm.branchIfBigIntIsNonNegative(input, &nonNegative);
+ masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
+ masm.jump(&done);
+
+ masm.bind(&nonNegative);
+ masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
+
+ masm.bind(&create);
+ emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp());
+ Register64 temp64 = ToRegister64(ins->temp64());
+ Register output = ToRegister(ins->output());
+
+ Label done, create;
+
+ masm.movePtr(input, output);
+
+ // Load the absolute value of the first digit.
+ masm.loadFirstBigIntDigitOrZero(input, temp);
+
+ // If the absolute value exceeds the int32 range, create a new BigInt.
+ masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
+
+ // Also create a new BigInt if we have more than one digit.
+ masm.branch32(Assembler::BelowOrEqual,
+ Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
+
+ masm.bind(&create);
+
+ // |temp| stores the absolute value, negate it when the sign flag is set.
+ Label nonNegative;
+ masm.branchIfBigIntIsNonNegative(input, &nonNegative);
+ masm.negPtr(temp);
+ masm.bind(&nonNegative);
+
+ masm.move32To64SignExtend(temp, temp64);
+ emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
+ Register bits = ToRegister(ins->bits());
+ Register input = ToRegister(ins->input());
+
+ pushArg(bits);
+ pushArg(input);
+
+ using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
+ callVM<Fn, jit::BigIntAsUintN>(ins);
+}
+
+void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp());
+ Register64 temp64 = ToRegister64(ins->temp64());
+ Register output = ToRegister(ins->output());
+
+ Label done, create;
+
+ masm.movePtr(input, output);
+
+ // Load the BigInt value as an uint64.
+ masm.loadBigInt64(input, temp64);
+
+ // Create a new BigInt when the input exceeds the uint64 range.
+ masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
+ Imm32(64 / BigInt::DigitBits), &create);
+
+ // And create a new BigInt when the input has the sign flag set.
+ masm.branchIfBigIntIsNonNegative(input, &done);
+
+ masm.bind(&create);
+ emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp());
+ Register64 temp64 = ToRegister64(ins->temp64());
+ Register output = ToRegister(ins->output());
+
+ Label done, create;
+
+ masm.movePtr(input, output);
+
+ // Load the absolute value of the first digit.
+ masm.loadFirstBigIntDigitOrZero(input, temp);
+
+ // If the absolute value exceeds the uint32 range, create a new BigInt.
+#if JS_PUNBOX64
+ masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
+#endif
+
+ // Also create a new BigInt if we have more than one digit.
+ masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
+ Imm32(1), &create);
+
+ // And create a new BigInt when the input has the sign flag set.
+ masm.branchIfBigIntIsNonNegative(input, &done);
+
+ masm.bind(&create);
+
+ // |temp| stores the absolute value, negate it when the sign flag is set.
+ Label nonNegative;
+ masm.branchIfBigIntIsNonNegative(input, &nonNegative);
+ masm.negPtr(temp);
+ masm.bind(&nonNegative);
+
+ masm.move32To64ZeroExtend(temp, temp64);
+ emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
+ ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
+
+ Label bail;
+ masm.branchTestGCThing(Assembler::Equal, input, &bail);
+ bailoutFrom(&bail, ins->snapshot());
+}
+
+void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
+ ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
+ FloatRegister tempFloat = ToFloatRegister(ins->temp0());
+ ValueOperand output = ToOutValue(ins);
+
+ masm.toHashableNonGCThing(input, output, tempFloat);
+}
+
+void CodeGenerator::visitToHashableString(LToHashableString* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ using Fn = JSAtom* (*)(JSContext*, JSString*);
+ auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
+ StoreRegisterTo(output));
+
+ masm.branchTest32(Assembler::Zero, Address(input, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), ool->entry());
+ masm.movePtr(input, output);
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
+ ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
+ FloatRegister tempFloat = ToFloatRegister(ins->temp0());
+ ValueOperand output = ToOutValue(ins);
+
+ Register str = output.scratchReg();
+
+ using Fn = JSAtom* (*)(JSContext*, JSString*);
+ auto* ool =
+ oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
+
+ masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
+}
+
+void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
+ ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
+ Register temp = ToRegister(ins->temp0());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashNonGCThing(input, output, temp);
+}
+
+void CodeGenerator::visitHashString(LHashString* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp = ToRegister(ins->temp0());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashString(input, output, temp);
+}
+
+void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashSymbol(input, output);
+}
+
+void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
+ Register input = ToRegister(ins->input());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
+}
+
+void CodeGenerator::visitHashObject(LHashObject* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ ValueOperand input = ToValue(ins, LHashObject::InputIndex);
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
+}
+
+void CodeGenerator::visitHashValue(LHashValue* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ ValueOperand input = ToValue(ins, LHashValue::InputIndex);
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
+}
+
+void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register output = ToRegister(ins->output());
+
+ masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
+}
+
+void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
+ temp3);
+}
+
+void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
+ temp3);
+}
+
+void CodeGenerator::visitSetObjectHasValueVMCall(
+ LSetObjectHasValueVMCall* ins) {
+ pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
+ pushArg(ToRegister(ins->setObject()));
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ callVM<Fn, jit::SetObjectHas>(ins);
+}
+
+void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
+ Register setObj = ToRegister(ins->setObject());
+ Register output = ToRegister(ins->output());
+
+ masm.loadSetObjectSize(setObj, output);
+}
+
+void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register output = ToRegister(ins->output());
+
+ masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
+}
+
+void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
+ temp3);
+}
+
+void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ Register output = ToRegister(ins->output());
+
+ masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
+ temp3);
+}
+
+void CodeGenerator::visitMapObjectHasValueVMCall(
+ LMapObjectHasValueVMCall* ins) {
+ pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
+ pushArg(ToRegister(ins->mapObject()));
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
+ callVM<Fn, jit::MapObjectHas>(ins);
+}
+
+void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ ValueOperand output = ToOutValue(ins);
+
+ masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
+ output.scratchReg());
+}
+
+void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ ValueOperand output = ToOutValue(ins);
+
+ masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
+ temp3, output.scratchReg());
+}
+
+void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
+ Register hash = ToRegister(ins->hash());
+ Register temp0 = ToRegister(ins->temp0());
+ Register temp1 = ToRegister(ins->temp1());
+ Register temp2 = ToRegister(ins->temp2());
+ Register temp3 = ToRegister(ins->temp3());
+ ValueOperand output = ToOutValue(ins);
+
+ masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
+ temp3, output.scratchReg());
+}
+
+void CodeGenerator::visitMapObjectGetValueVMCall(
+ LMapObjectGetValueVMCall* ins) {
+ pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
+ pushArg(ToRegister(ins->mapObject()));
+
+ using Fn =
+ bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
+ callVM<Fn, jit::MapObjectGet>(ins);
+}
+
+void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
+ Register mapObj = ToRegister(ins->mapObject());
+ Register output = ToRegister(ins->output());
+
+ masm.loadMapObjectSize(mapObj, output);
+}
+
+template <size_t NumDefs>
+void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
+ wasm::JitCallStackArgVector stackArgs;
+ masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
+ if (masm.oom()) {
+ return;
+ }
+
+ MIonToWasmCall* mir = lir->mir();
+ const wasm::FuncExport& funcExport = mir->funcExport();
+ const wasm::FuncType& sig =
+ mir->instance()->metadata().getFuncExportType(funcExport);
+
+ WasmABIArgGenerator abi;
+ for (size_t i = 0; i < lir->numOperands(); i++) {
+ MIRType argMir;
+ switch (sig.args()[i].kind()) {
+ case wasm::ValType::I32:
+ case wasm::ValType::I64:
+ case wasm::ValType::F32:
+ case wasm::ValType::F64:
+ argMir = sig.args()[i].toMIRType();
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("unexpected argument type when calling from ion to wasm");
+ case wasm::ValType::Ref:
+ // temporarilyUnsupportedReftypeForEntry() restricts args to externref
+ MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
+ // Argument is boxed on the JS side to an anyref, so passed as a
+ // pointer here.
+ argMir = sig.args()[i].toMIRType();
+ break;
+ }
+
+ ABIArg arg = abi.next(argMir);
+ switch (arg.kind()) {
+ case ABIArg::GPR:
+ case ABIArg::FPU: {
+ MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
+ stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
+ break;
+ }
+ case ABIArg::Stack: {
+ const LAllocation* larg = lir->getOperand(i);
+ if (larg->isConstant()) {
+ stackArgs.infallibleEmplaceBack(ToInt32(larg));
+ } else if (larg->isGeneralReg()) {
+ stackArgs.infallibleEmplaceBack(ToRegister(larg));
+ } else if (larg->isFloatReg()) {
+ stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
+ } else {
+ // Always use the stack pointer here because GenerateDirectCallFromJit
+ // depends on this.
+ Address addr = ToAddress<BaseRegForAddress::SP>(larg);
+ stackArgs.infallibleEmplaceBack(addr);
+ }
+ break;
+ }
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH(
+ "no way to pass i64, and wasm uses hardfp for function calls");
+ }
+#endif
+ case ABIArg::Uninitialized: {
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+ }
+
+ const wasm::ValTypeVector& results = sig.results();
+ if (results.length() == 0) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
+ switch (results[0].kind()) {
+ case wasm::ValType::I32:
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
+ MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
+ break;
+ case wasm::ValType::I64:
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+ break;
+ case wasm::ValType::F32:
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
+ break;
+ case wasm::ValType::F64:
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ case wasm::ValType::Ref:
+ // The wasm stubs layer unboxes anything that needs to be unboxed
+ // and leaves it in a Value. A FuncRef/EqRef we could in principle
+ // leave it as a raw object pointer but for now it complicates the
+ // API to do so.
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
+ break;
+ }
+ }
+
+ WasmInstanceObject* instObj = lir->mir()->instanceObject();
+
+ Register scratch = ToRegister(lir->temp());
+
+ uint32_t callOffset;
+ ensureOsiSpace();
+ GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
+ scratch, &callOffset);
+
+ // Add the instance object to the constant pool, so it is transferred to
+ // the owning IonScript and so that it gets traced as long as the IonScript
+ // lives.
+
+ uint32_t unused;
+ masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
+
+ markSafepointAt(callOffset, lir);
+}
+
+void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
+ emitIonToWasmCallBase(lir);
+}
+void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
+ emitIonToWasmCallBase(lir);
+}
+void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
+ emitIonToWasmCallBase(lir);
+}
+
+void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
+ masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmFence(LWasmFence* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ masm.memoryBarrier(MembarFull);
+}
+
+void CodeGenerator::visitWasmBoxValue(LWasmBoxValue* lir) {
+ ValueOperand input = ToValue(lir, LWasmBoxValue::InputIndex);
+ Register output = ToRegister(lir->output());
+
+ Label nullValue, objectValue, done;
+ {
+ ScratchTagScope tag(masm, input);
+ masm.splitTagForTest(input, tag);
+ masm.branchTestObject(Assembler::Equal, tag, &objectValue);
+ masm.branchTestNull(Assembler::Equal, tag, &nullValue);
+ }
+
+ using Fn = JSObject* (*)(JSContext*, HandleValue);
+ OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::BoxBoxableValue>(
+ lir, ArgList(input), StoreRegisterTo(output));
+
+ masm.jump(oolBoxValue->entry());
+
+ masm.bind(&nullValue);
+ // See the definition of AnyRef for a discussion of pointer representation.
+ masm.xorPtr(output, output);
+ masm.jump(&done);
+
+ masm.bind(&objectValue);
+ // See the definition of AnyRef for a discussion of pointer representation.
+ masm.unboxObject(input, output);
+
+ masm.bind(&done);
+ masm.bind(oolBoxValue->rejoin());
+}
+
+void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ // See the definition of AnyRef for a discussion of pointer representation.
+ if (input != output) {
+ masm.movePtr(input, output);
+ }
+}
+
+#ifdef FUZZING_JS_FUZZILLI
+void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
+ Register scratch, Register output) {
+# ifdef JS_PUNBOX64
+ Register64 reg64_1(scratch);
+ Register64 reg64_2(output);
+ masm.moveDoubleToGPR64(floatDouble, reg64_1);
+ masm.move64(reg64_1, reg64_2);
+ masm.rshift64(Imm32(32), reg64_2);
+ masm.add32(scratch, output);
+# else
+ Register64 reg64(scratch, output);
+ masm.moveDoubleToGPR64(floatDouble, reg64);
+ masm.add32(scratch, output);
+# endif
+}
+
+void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
+ Register output) {
+ using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
+ OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
+ lir, ArgList(obj), StoreRegisterTo(output));
+
+ masm.jump(ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
+ FloatRegisterSet::All());
+ volatileRegs.takeUnchecked(output);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = uint32_t (*)(BigInt* bigInt);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(bigInt);
+ masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
+ masm.storeCallInt32Result(output);
+
+ masm.PopRegsInMask(volatileRegs);
+}
+
+void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
+ MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
+
+ ValueOperand value = ToValue(ins, 0);
+
+ Label isDouble, isObject, isBigInt, done;
+
+ FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
+ Register scratch = ToRegister(ins->getTemp(0));
+ Register output = ToRegister(ins->output());
+ MOZ_ASSERT(scratch != output);
+
+# ifdef JS_PUNBOX64
+ Register tagReg = ToRegister(ins->getTemp(0));
+ masm.splitTag(value, tagReg);
+# else
+ Register tagReg = value.typeReg();
+# endif
+
+ Label noBigInt;
+ masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
+ masm.unboxBigInt(value, scratch);
+ masm.jump(&isBigInt);
+ masm.bind(&noBigInt);
+
+ Label noObject;
+ masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
+ masm.unboxObject(value, scratch);
+ masm.jump(&isObject);
+ masm.bind(&noObject);
+
+ Label noInt32;
+ masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
+ masm.unboxInt32(value, scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ masm.jump(&isDouble);
+ masm.bind(&noInt32);
+
+ Label noNull;
+ masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
+ masm.move32(Imm32(1), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ masm.jump(&isDouble);
+ masm.bind(&noNull);
+
+ Label noUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
+ masm.move32(Imm32(2), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ masm.jump(&isDouble);
+ masm.bind(&noUndefined);
+
+ Label noBoolean;
+ masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
+ masm.unboxBoolean(value, scratch);
+ masm.add32(Imm32(3), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ masm.jump(&isDouble);
+ masm.bind(&noBoolean);
+
+ Label noDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
+ masm.unboxDouble(value, scratchFloat);
+ masm.canonicalizeDoubleIfDeterministic(scratchFloat);
+
+ masm.jump(&isDouble);
+ masm.bind(&noDouble);
+ masm.move32(Imm32(0), output);
+ masm.jump(&done);
+
+ masm.bind(&isBigInt);
+ emitFuzzilliHashBigInt(scratch, output);
+ masm.jump(&done);
+
+ masm.bind(&isObject);
+ emitFuzzilliHashObject(ins, scratch, output);
+ masm.jump(&done);
+
+ masm.bind(&isDouble);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
+ const LAllocation* value = ins->value();
+ MIRType mirType = ins->mir()->getOperand(0)->type();
+
+ FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
+ Register scratch = ToRegister(ins->getTemp(0));
+ Register output = ToRegister(ins->output());
+ MOZ_ASSERT(scratch != output);
+
+ if (mirType == MIRType::Object) {
+ MOZ_ASSERT(value->isGeneralReg());
+ masm.mov(value->toGeneralReg()->reg(), scratch);
+ emitFuzzilliHashObject(ins, scratch, output);
+ } else if (mirType == MIRType::BigInt) {
+ MOZ_ASSERT(value->isGeneralReg());
+ masm.mov(value->toGeneralReg()->reg(), scratch);
+ emitFuzzilliHashBigInt(scratch, output);
+ } else if (mirType == MIRType::Double) {
+ MOZ_ASSERT(value->isFloatReg());
+ masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
+ masm.canonicalizeDoubleIfDeterministic(scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else if (mirType == MIRType::Float32) {
+ MOZ_ASSERT(value->isFloatReg());
+ masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
+ masm.canonicalizeDoubleIfDeterministic(scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else if (mirType == MIRType::Int32) {
+ MOZ_ASSERT(value->isGeneralReg());
+ masm.mov(value->toGeneralReg()->reg(), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else if (mirType == MIRType::Null) {
+ MOZ_ASSERT(value->isBogus());
+ masm.move32(Imm32(1), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else if (mirType == MIRType::Undefined) {
+ MOZ_ASSERT(value->isBogus());
+ masm.move32(Imm32(2), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else if (mirType == MIRType::Boolean) {
+ MOZ_ASSERT(value->isGeneralReg());
+ masm.mov(value->toGeneralReg()->reg(), scratch);
+ masm.add32(Imm32(3), scratch);
+ masm.convertInt32ToDouble(scratch, scratchFloat);
+ emitFuzzilliHashDouble(scratchFloat, scratch, output);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
+ const LAllocation* value = ins->value();
+ MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
+ MOZ_ASSERT(value->isGeneralReg());
+
+ Register scratchJSContext = ToRegister(ins->getTemp(0));
+ Register scratch = ToRegister(ins->getTemp(1));
+
+ masm.loadJSContext(scratchJSContext);
+
+ // stats
+ Address addrExecHashInputs(scratchJSContext,
+ offsetof(JSContext, executionHashInputs));
+ masm.load32(addrExecHashInputs, scratch);
+ masm.add32(Imm32(1), scratch);
+ masm.store32(scratch, addrExecHashInputs);
+
+ Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
+ masm.load32(addrExecHash, scratch);
+ masm.add32(value->toGeneralReg()->reg(), scratch);
+ masm.rotateLeft(Imm32(1), scratch, scratch);
+ masm.store32(scratch, addrExecHash);
+}
+#endif
+
+static_assert(!std::is_polymorphic_v<CodeGenerator>,
+ "CodeGenerator should not have any virtual methods");
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
new file mode 100644
index 0000000000..7240bc07b4
--- /dev/null
+++ b/js/src/jit/CodeGenerator.h
@@ -0,0 +1,447 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CodeGenerator_h
+#define jit_CodeGenerator_h
+
+#include "jit/PerfSpewer.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/CodeGenerator-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/CodeGenerator-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/CodeGenerator-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/CodeGenerator-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/CodeGenerator-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/CodeGenerator-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/CodeGenerator-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/CodeGenerator-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/CodeGenerator-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/CodeGenerator-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+
+namespace wasm {
+class Decoder;
+class StackMaps;
+} // namespace wasm
+
+namespace jit {
+
+class WarpSnapshot;
+
+template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM;
+
+enum class SwitchTableType { Inline, OutOfLine };
+
+template <SwitchTableType tableType>
+class OutOfLineSwitch;
+class OutOfLineTestObject;
+class OutOfLineNewArray;
+class OutOfLineNewObject;
+class CheckOverRecursedFailure;
+class OutOfLineUnboxFloatingPoint;
+class OutOfLineStoreElementHole;
+class OutOfLineTypeOfV;
+class OutOfLineTypeOfIsNonPrimitiveV;
+class OutOfLineTypeOfIsNonPrimitiveO;
+class OutOfLineUpdateCache;
+class OutOfLineICFallback;
+class OutOfLineCallPostWriteBarrier;
+class OutOfLineCallPostWriteElementBarrier;
+class OutOfLineElementPostWriteBarrier;
+class OutOfLineIsCallable;
+class OutOfLineIsConstructor;
+class OutOfLineRegExpMatcher;
+class OutOfLineRegExpSearcher;
+class OutOfLineRegExpExecMatch;
+class OutOfLineRegExpExecTest;
+class OutOfLineRegExpPrototypeOptimizable;
+class OutOfLineRegExpInstanceOptimizable;
+class OutOfLineNaNToZero;
+class OutOfLineResumableWasmTrap;
+class OutOfLineAbortingWasmTrap;
+class OutOfLineGuardNumberToIntPtrIndex;
+class OutOfLineBoxNonStrictThis;
+class OutOfLineArrayPush;
+class OutOfLineWasmCallPostWriteBarrier;
+
+class CodeGenerator final : public CodeGeneratorSpecific {
+ [[nodiscard]] bool generateBody();
+
+ ConstantOrRegister toConstantOrRegister(LInstruction* lir, size_t n,
+ MIRType type);
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void resetOsiPointRegs(LSafepoint* safepoint);
+ bool shouldVerifyOsiPointRegs(LSafepoint* safepoint);
+ void verifyOsiPointRegs(LSafepoint* safepoint);
+#endif
+
+ void callVMInternal(VMFunctionId id, LInstruction* ins);
+
+ template <typename Fn, Fn fn>
+ void callVM(LInstruction* ins);
+
+ template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+ inline OutOfLineCode* oolCallVM(LInstruction* ins, const ArgSeq& args,
+ const StoreOutputTo& out);
+
+ template <typename LCallIns>
+ void emitCallNative(LCallIns* call, JSNative native);
+
+ public:
+ CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm = nullptr);
+ ~CodeGenerator();
+
+ [[nodiscard]] bool generate();
+ [[nodiscard]] bool generateWasm(
+ wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
+ const wasm::ArgTypeVector& argTys, const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
+ wasm::StackMaps* stackMaps, wasm::Decoder* decoder);
+
+ [[nodiscard]] bool link(JSContext* cx, const WarpSnapshot* snapshot);
+
+ void emitOOLTestObject(Register objreg, Label* ifTruthy, Label* ifFalsy,
+ Register scratch);
+ void emitIntToString(Register input, Register output, Label* ool);
+
+ void emitTypeOfCheck(JSValueType type, Register tag, Register output,
+ Label* done, Label* oolObject);
+ void emitTypeOfJSType(JSValueType type, Register output);
+ void emitTypeOfObject(Register obj, Register output, Label* done);
+ void emitTypeOfIsObject(MTypeOfIs* mir, Register obj, Register output,
+ Label* success, Label* fail, Label* slowCheck);
+ void emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj, Register output);
+
+ template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
+ void visitOutOfLineCallVM(
+ OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool);
+
+ void visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool);
+ void visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool);
+ void visitOutOfLineRegExpExecMatch(OutOfLineRegExpExecMatch* ool);
+ void visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool);
+ void visitOutOfLineRegExpPrototypeOptimizable(
+ OutOfLineRegExpPrototypeOptimizable* ool);
+ void visitOutOfLineRegExpInstanceOptimizable(
+ OutOfLineRegExpInstanceOptimizable* ool);
+
+ void visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool);
+ void visitOutOfLineTypeOfIsNonPrimitiveV(OutOfLineTypeOfIsNonPrimitiveV* ool);
+ void visitOutOfLineTypeOfIsNonPrimitiveO(OutOfLineTypeOfIsNonPrimitiveO* ool);
+
+ template <SwitchTableType tableType>
+ void visitOutOfLineSwitch(OutOfLineSwitch<tableType>* ool);
+
+ void visitOutOfLineIsCallable(OutOfLineIsCallable* ool);
+ void visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool);
+
+ void visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool);
+
+ void visitOutOfLineResumableWasmTrap(OutOfLineResumableWasmTrap* ool);
+ void visitOutOfLineAbortingWasmTrap(OutOfLineAbortingWasmTrap* ool);
+ void visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool);
+
+ void visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint* ool);
+ void visitOutOfLineStoreElementHole(OutOfLineStoreElementHole* ool);
+
+ void visitOutOfLineBoxNonStrictThis(OutOfLineBoxNonStrictThis* ool);
+
+ void visitOutOfLineICFallback(OutOfLineICFallback* ool);
+
+ void visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool);
+ void visitOutOfLineCallPostWriteElementBarrier(
+ OutOfLineCallPostWriteElementBarrier* ool);
+
+ void visitOutOfLineElementPostWriteBarrier(
+ OutOfLineElementPostWriteBarrier* ool);
+
+ void visitOutOfLineNewArray(OutOfLineNewArray* ool);
+ void visitOutOfLineNewObject(OutOfLineNewObject* ool);
+
+ void visitOutOfLineGuardNumberToIntPtrIndex(
+ OutOfLineGuardNumberToIntPtrIndex* ool);
+
+ void visitOutOfLineArrayPush(OutOfLineArrayPush* ool);
+
+ void visitOutOfLineWasmCallPostWriteBarrier(
+ OutOfLineWasmCallPostWriteBarrier* ool);
+
+ private:
+ void emitPostWriteBarrier(const LAllocation* obj);
+ void emitPostWriteBarrier(Register objreg);
+ void emitPostWriteBarrierS(Address address, Register prev, Register next);
+
+ void emitElementPostWriteBarrier(MInstruction* mir,
+ const LiveRegisterSet& liveVolatileRegs,
+ Register obj, const LAllocation* index,
+ Register scratch,
+ const ConstantOrRegister& val,
+ int32_t indexDiff = 0);
+
+ template <class LPostBarrierType, MIRType nurseryType>
+ void visitPostWriteBarrierCommon(LPostBarrierType* lir, OutOfLineCode* ool);
+ template <class LPostBarrierType>
+ void visitPostWriteBarrierCommonV(LPostBarrierType* lir, OutOfLineCode* ool);
+
+ void emitCallInvokeFunction(LInstruction* call, Register callereg,
+ bool isConstructing, bool ignoresReturnValue,
+ uint32_t argc, uint32_t unusedStack);
+ template <typename T>
+ void emitApplyGeneric(T* apply);
+ template <typename T>
+ void emitCallInvokeFunction(T* apply);
+ void emitAllocateSpaceForApply(Register argcreg, Register scratch);
+ void emitAllocateSpaceForConstructAndPushNewTarget(
+ Register argcreg, Register newTargetAndScratch);
+ void emitCopyValuesForApply(Register argvSrcBase, Register argvIndex,
+ Register copyreg, size_t argvSrcOffset,
+ size_t argvDstOffset);
+ void emitRestoreStackPointerFromFP();
+ void emitPushArguments(Register argcreg, Register scratch, Register copyreg,
+ uint32_t extraFormals);
+ void emitPushArrayAsArguments(Register tmpArgc, Register srcBaseAndArgc,
+ Register scratch, size_t argvSrcOffset);
+ void emitPushArguments(LApplyArgsGeneric* apply, Register scratch);
+ void emitPushArguments(LApplyArgsObj* apply, Register scratch);
+ void emitPushArguments(LApplyArrayGeneric* apply, Register scratch);
+ void emitPushArguments(LConstructArgsGeneric* construct, Register scratch);
+ void emitPushArguments(LConstructArrayGeneric* construct, Register scratch);
+
+ template <class GetInlinedArgument>
+ void emitGetInlinedArgument(GetInlinedArgument* lir, Register index,
+ ValueOperand output);
+
+ using RegisterOrInt32 = mozilla::Variant<Register, int32_t>;
+
+ static RegisterOrInt32 ToRegisterOrInt32(const LAllocation* allocation);
+
+#ifdef DEBUG
+ void emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
+ const RegisterOrInt32& count,
+ Register numActualArgs);
+#endif
+
+ template <class ArgumentsSlice>
+ void emitNewArray(ArgumentsSlice* lir, const RegisterOrInt32& count,
+ Register output, Register temp);
+
+ void visitNewArrayCallVM(LNewArray* lir);
+ void visitNewObjectVMCall(LNewObject* lir);
+
+ void emitConcat(LInstruction* lir, Register lhs, Register rhs,
+ Register output);
+
+ void emitInstanceOf(LInstruction* ins, Register protoReg);
+
+ void loadJSScriptForBlock(MBasicBlock* block, Register reg);
+ void loadOutermostJSScript(Register reg);
+
+#ifdef DEBUG
+ void emitAssertResultV(const ValueOperand output, const MDefinition* mir);
+ void emitAssertGCThingResult(Register input, const MDefinition* mir);
+#endif
+
+#ifdef DEBUG
+ void emitDebugForceBailing(LInstruction* lir);
+#endif
+
+ IonScriptCounts* extractScriptCounts() {
+ IonScriptCounts* counts = scriptCounts_;
+ scriptCounts_ = nullptr; // prevent delete in dtor
+ return counts;
+ }
+
+ void addGetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs,
+ TypedOrValueRegister value,
+ const ConstantOrRegister& id, ValueOperand output);
+ void addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs,
+ Register objReg, Register temp,
+ const ConstantOrRegister& id,
+ const ConstantOrRegister& value, bool strict);
+
+ template <class IteratorObject, class OrderedHashTable>
+ void emitGetNextEntryForIterator(LGetNextEntryForIterator* lir);
+
+ template <class OrderedHashTable>
+ void emitLoadIteratorValues(Register result, Register temp, Register front);
+
+ void emitStringToInt64(LInstruction* lir, Register input, Register64 output);
+
+ OutOfLineCode* createBigIntOutOfLine(LInstruction* lir, Scalar::Type type,
+ Register64 input, Register output);
+
+ void emitCreateBigInt(LInstruction* lir, Scalar::Type type, Register64 input,
+ Register output, Register maybeTemp);
+
+ template <size_t NumDefs>
+ void emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir);
+
+ IonScriptCounts* maybeCreateScriptCounts();
+
+ void emitWasmCompareAndSelect(LWasmCompareAndSelect* ins);
+
+ void testValueTruthyForType(JSValueType type, ScratchTagScope& tag,
+ const ValueOperand& value, Register tempToUnbox,
+ Register temp, FloatRegister floatTemp,
+ Label* ifTruthy, Label* ifFalsy,
+ OutOfLineTestObject* ool, bool skipTypeTest);
+
+ // Test whether value is truthy or not and jump to the corresponding label.
+ // The control flow falls through when the object is truthy, as an
+ // optimization.
+ void testValueTruthy(const ValueOperand& value, Register tempToUnbox,
+ Register temp, FloatRegister floatTemp,
+ const TypeDataList& observedTypes, Label* ifTruthy,
+ Label* ifFalsy, OutOfLineTestObject* ool);
+
+ // This function behaves like testObjectEmulatesUndefined with the exception
+ // that it can choose to let control flow fall through when the object
+ // doesn't emulate undefined, as an optimization. Use the regular
+ // testObjectEmulatesUndefined when it's required to branch to one of the
+ // two labels.
+ void testObjectEmulatesUndefinedKernel(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch,
+ OutOfLineTestObject* ool);
+
+ // Test whether an object emulates |undefined|. If it does, jump to
+ // |ifEmulatesUndefined|; the caller is responsible for binding this label.
+ // If it doesn't, fall through; the label |ifDoesntEmulateUndefined| (which
+ // must be initially unbound) will be bound at this point.
+ void branchTestObjectEmulatesUndefined(Register objreg,
+ Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch,
+ OutOfLineTestObject* ool);
+
+ // Test whether an object emulates |undefined|, and jump to the
+ // corresponding label.
+ //
+ // This method should be used when subsequent code can't be laid out in a
+ // straight line; if it can, branchTest* should be used instead.
+ void testObjectEmulatesUndefined(Register objreg, Label* ifEmulatesUndefined,
+ Label* ifDoesntEmulateUndefined,
+ Register scratch, OutOfLineTestObject* ool);
+
+ void emitStoreElementTyped(const LAllocation* value, MIRType valueType,
+ Register elements, const LAllocation* index);
+
+ // Bailout if an element about to be written to is a hole.
+ void emitStoreHoleCheck(Register elements, const LAllocation* index,
+ LSnapshot* snapshot);
+
+ void emitAssertRangeI(MIRType type, const Range* r, Register input);
+ void emitAssertRangeD(const Range* r, FloatRegister input,
+ FloatRegister temp);
+
+ void maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
+ OutOfLineCode* ool);
+
+ void incrementWarmUpCounter(AbsoluteAddress warmUpCount, JSScript* script,
+ Register tmp);
+
+ Vector<CodeOffset, 0, JitAllocPolicy> ionScriptLabels_;
+
+ // Used to bake in a pointer into the IonScript's list of nursery objects, for
+ // MNurseryObject codegen.
+ struct NurseryObjectLabel {
+ CodeOffset offset;
+ uint32_t nurseryIndex;
+ NurseryObjectLabel(CodeOffset offset, uint32_t nurseryIndex)
+ : offset(offset), nurseryIndex(nurseryIndex) {}
+ };
+ Vector<NurseryObjectLabel, 0, JitAllocPolicy> ionNurseryObjectLabels_;
+
+ void branchIfInvalidated(Register temp, Label* invalidated);
+
+#ifdef DEBUG
+ void emitDebugResultChecks(LInstruction* ins);
+ void emitGCThingResultChecks(LInstruction* lir, MDefinition* mir);
+ void emitValueResultChecks(LInstruction* lir, MDefinition* mir);
+#endif
+
+ // Script counts created during code generation.
+ IonScriptCounts* scriptCounts_;
+
+ IonPerfSpewer perfSpewer_;
+
+ // Bit mask of JitRealm stubs that are to be read-barriered.
+ uint32_t realmStubsToReadBarrier_;
+
+#ifdef FUZZING_JS_FUZZILLI
+ void emitFuzzilliHashDouble(FloatRegister floatDouble, Register scratch,
+ Register output);
+ void emitFuzzilliHashObject(LInstruction* lir, Register obj, Register output);
+ void emitFuzzilliHashBigInt(Register bigInt, Register output);
+#endif
+
+#define LIR_OP(op) void visit##op(L##op* ins);
+ LIR_OPCODE_LIST(LIR_OP)
+#undef LIR_OP
+};
+
+class OutOfLineResumableWasmTrap : public OutOfLineCodeBase<CodeGenerator> {
+ LInstruction* lir_;
+ size_t framePushed_;
+ wasm::BytecodeOffset bytecodeOffset_;
+ wasm::Trap trap_;
+
+ public:
+ OutOfLineResumableWasmTrap(LInstruction* lir, size_t framePushed,
+ wasm::BytecodeOffset bytecodeOffset,
+ wasm::Trap trap)
+ : lir_(lir),
+ framePushed_(framePushed),
+ bytecodeOffset_(bytecodeOffset),
+ trap_(trap) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineResumableWasmTrap(this);
+ }
+ LInstruction* lir() const { return lir_; }
+ size_t framePushed() const { return framePushed_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+ wasm::Trap trap() const { return trap_; }
+};
+
+class OutOfLineAbortingWasmTrap : public OutOfLineCodeBase<CodeGenerator> {
+ wasm::BytecodeOffset bytecodeOffset_;
+ wasm::Trap trap_;
+
+ public:
+ OutOfLineAbortingWasmTrap(wasm::BytecodeOffset bytecodeOffset,
+ wasm::Trap trap)
+ : bytecodeOffset_(bytecodeOffset), trap_(trap) {}
+
+ void accept(CodeGenerator* codegen) override {
+ codegen->visitOutOfLineAbortingWasmTrap(this);
+ }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+ wasm::Trap trap() const { return trap_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CodeGenerator_h */
diff --git a/js/src/jit/CompactBuffer.h b/js/src/jit/CompactBuffer.h
new file mode 100644
index 0000000000..2c7fefcb9a
--- /dev/null
+++ b/js/src/jit/CompactBuffer.h
@@ -0,0 +1,254 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Compactbuffer_h
+#define jit_Compactbuffer_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+
+namespace js {
+namespace jit {
+
+class CompactBufferWriter;
+
+// CompactBuffers are byte streams designed for compressable integers. It has
+// helper functions for writing bytes, fixed-size integers, and variable-sized
+// integers. Variable sized integers are encoded in 1-5 bytes, each byte
+// containing 7 bits of the integer and a bit which specifies whether the next
+// byte is also part of the integer.
+//
+// Fixed-width integers are also available, in case the actual value will not
+// be known until later.
+
+class CompactBufferReader {
+ const uint8_t* buffer_;
+ const uint8_t* end_;
+
+ uint32_t readVariableLength() {
+ uint32_t val = 0;
+ uint32_t shift = 0;
+ uint8_t byte;
+ while (true) {
+ MOZ_ASSERT(shift < 32);
+ byte = readByte();
+ val |= (uint32_t(byte) >> 1) << shift;
+ shift += 7;
+ if (!(byte & 1)) {
+ return val;
+ }
+ }
+ }
+
+ uint64_t readVariableLength64() {
+ uint64_t val = 0;
+ uint32_t shift = 0;
+ uint8_t byte;
+ while (true) {
+ MOZ_ASSERT(shift < 64);
+ byte = readByte();
+ val |= (uint64_t(byte) >> 1) << shift;
+ shift += 7;
+ if (!(byte & 1)) {
+ return val;
+ }
+ }
+ }
+
+ public:
+ CompactBufferReader(const uint8_t* start, const uint8_t* end)
+ : buffer_(start), end_(end) {}
+ inline explicit CompactBufferReader(const CompactBufferWriter& writer);
+ uint8_t readByte() {
+ MOZ_ASSERT(buffer_ < end_);
+ return *buffer_++;
+ }
+ uint32_t readFixedUint32_t() {
+ uint32_t b0 = readByte();
+ uint32_t b1 = readByte();
+ uint32_t b2 = readByte();
+ uint32_t b3 = readByte();
+ return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+ }
+ uint16_t readFixedUint16_t() {
+ uint32_t b0 = readByte();
+ uint32_t b1 = readByte();
+ return b0 | (b1 << 8);
+ }
+ uint32_t readNativeEndianUint32_t() {
+ // Must be at 4-byte boundary
+ MOZ_ASSERT(uintptr_t(buffer_) % sizeof(uint32_t) == 0);
+ return *reinterpret_cast<const uint32_t*>(buffer_);
+ }
+ uint32_t readUnsigned() { return readVariableLength(); }
+ uint64_t readUnsigned64() { return readVariableLength64(); }
+ int32_t readSigned() {
+ uint8_t b = readByte();
+ bool isNegative = !!(b & (1 << 0));
+ bool more = !!(b & (1 << 1));
+ int32_t result = b >> 2;
+ if (more) {
+ result |= readUnsigned() << 6;
+ }
+ if (isNegative) {
+ return -result;
+ }
+ return result;
+ }
+ // Reads a value written by writeUnsigned15Bit.
+ uint32_t readUnsigned15Bit() {
+ uint8_t byte = readByte();
+ uint32_t val = byte >> 1;
+ if (byte & 1) {
+ val |= uint32_t(readByte()) << 7;
+ }
+ return val;
+ }
+ void* readRawPointer() {
+ uintptr_t ptrWord = 0;
+ for (unsigned i = 0; i < sizeof(uintptr_t); i++) {
+ ptrWord |= static_cast<uintptr_t>(readByte()) << (i * 8);
+ }
+ return reinterpret_cast<void*>(ptrWord);
+ }
+
+ bool more() const {
+ MOZ_ASSERT(buffer_ <= end_);
+ return buffer_ < end_;
+ }
+
+ void seek(const uint8_t* start, uint32_t offset) {
+ buffer_ = start + offset;
+ MOZ_ASSERT(start < end_);
+ MOZ_ASSERT(buffer_ <= end_);
+ }
+
+ const uint8_t* currentPosition() const { return buffer_; }
+};
+
+class CompactBufferWriter {
+ js::Vector<uint8_t, 32, SystemAllocPolicy> buffer_;
+ bool enoughMemory_;
+
+ public:
+ CompactBufferWriter() : enoughMemory_(true) {}
+
+ void setOOM() { enoughMemory_ = false; }
+
+ // Note: writeByte() takes uint32 to catch implicit casts with a runtime
+ // assert.
+ void writeByte(uint32_t byte) {
+ MOZ_ASSERT(byte <= 0xFF);
+ if (!buffer_.append(byte)) {
+ enoughMemory_ = false;
+ }
+ }
+ void writeByteAt(uint32_t pos, uint32_t byte) {
+ MOZ_ASSERT(byte <= 0xFF);
+ if (!oom()) {
+ buffer_[pos] = byte;
+ }
+ }
+ // Writes a variable-length value similar to writeUnsigned, but optimized for
+ // small 15-bit values that fit in one or two variable-length-encoded bytes.
+ // Must be read using readUnsigned15Bit.
+ void writeUnsigned15Bit(uint32_t value) {
+ uint8_t byte1 = ((value & 0x7F) << 1) | (value > 0x7F);
+ writeByte(byte1);
+ value >>= 7;
+ if (value) {
+ MOZ_ASSERT(value <= 0xFF);
+ writeByte(value);
+ }
+ }
+ void writeUnsigned(uint32_t value) {
+ do {
+ uint8_t byte = ((value & 0x7F) << 1) | (value > 0x7F);
+ writeByte(byte);
+ value >>= 7;
+ } while (value);
+ }
+ void writeUnsignedAt(uint32_t pos, uint32_t value, uint32_t original) {
+ MOZ_ASSERT(value <= original);
+ do {
+ uint8_t byte = ((value & 0x7F) << 1) | (original > 0x7F);
+ writeByteAt(pos++, byte);
+ value >>= 7;
+ original >>= 7;
+ } while (original);
+ }
+ void writeUnsigned64(uint64_t value) {
+ do {
+ uint8_t byte = ((value & 0x7F) << 1) | (value > 0x7F);
+ writeByte(byte);
+ value >>= 7;
+ } while (value);
+ }
+ void writeSigned(int32_t v) {
+ bool isNegative = v < 0;
+ uint32_t value = isNegative ? -v : v;
+ uint8_t byte =
+ ((value & 0x3F) << 2) | ((value > 0x3F) << 1) | uint32_t(isNegative);
+ writeByte(byte);
+
+ // Write out the rest of the bytes, if needed.
+ value >>= 6;
+ if (value == 0) {
+ return;
+ }
+ writeUnsigned(value);
+ }
+ void writeFixedUint32_t(uint32_t value) {
+ writeByte(value & 0xFF);
+ writeByte((value >> 8) & 0xFF);
+ writeByte((value >> 16) & 0xFF);
+ writeByte((value >> 24) & 0xFF);
+ }
+ void writeFixedUint16_t(uint16_t value) {
+ writeByte(value & 0xFF);
+ writeByte(value >> 8);
+ }
+ void writeNativeEndianUint32_t(uint32_t value) {
+ // Must be at 4-byte boundary
+ MOZ_ASSERT_IF(!oom(), length() % sizeof(uint32_t) == 0);
+ writeFixedUint32_t(0);
+ if (oom()) {
+ return;
+ }
+ uint8_t* endPtr = buffer() + length();
+ reinterpret_cast<uint32_t*>(endPtr)[-1] = value;
+ }
+ void writeRawPointer(const void* ptr) {
+ uintptr_t ptrWord = reinterpret_cast<uintptr_t>(ptr);
+ for (unsigned i = 0; i < sizeof(uintptr_t); i++) {
+ writeByte((ptrWord >> (i * 8)) & 0xFF);
+ }
+ }
+ size_t length() const { return buffer_.length(); }
+ uint8_t* buffer() {
+ MOZ_ASSERT(!oom());
+ return &buffer_[0];
+ }
+ const uint8_t* buffer() const {
+ MOZ_ASSERT(!oom());
+ return &buffer_[0];
+ }
+ bool oom() const { return !enoughMemory_; }
+ void propagateOOM(bool success) { enoughMemory_ &= success; }
+};
+
+CompactBufferReader::CompactBufferReader(const CompactBufferWriter& writer)
+ : buffer_(writer.buffer()), end_(writer.buffer() + writer.length()) {}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Compactbuffer_h */
diff --git a/js/src/jit/CompileInfo.h b/js/src/jit/CompileInfo.h
new file mode 100644
index 0000000000..311c1d30b7
--- /dev/null
+++ b/js/src/jit/CompileInfo.h
@@ -0,0 +1,382 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CompileInfo_h
+#define jit_CompileInfo_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Maybe.h" // mozilla::Maybe, mozilla::Some
+
+#include <algorithm> // std::max
+#include <stdint.h> // uint32_t
+
+#include "jit/CompileWrappers.h" // CompileRuntime
+#include "jit/JitFrames.h" // MinJITStackSize
+#include "jit/shared/Assembler-shared.h"
+#include "js/TypeDecls.h" // jsbytecode
+#include "vm/BindingKind.h" // BindingLocation
+#include "vm/JSAtomState.h" // JSAtomState
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/JSScript.h" // JSScript
+#include "vm/Opcodes.h" // JSOp
+#include "vm/Scope.h" // BindingIter
+
+namespace js {
+
+class ModuleObject;
+
+namespace jit {
+
+class InlineScriptTree;
+
+inline unsigned StartArgSlot(JSScript* script) {
+ // Reserved slots:
+ // Slot 0: Environment chain.
+ // Slot 1: Return value.
+
+ // When needed:
+ // Slot 2: Argumentsobject.
+
+ // Note: when updating this, please also update the assert in
+ // SnapshotWriter::startFrame
+ return 2 + (script->needsArgsObj() ? 1 : 0);
+}
+
+inline unsigned CountArgSlots(JSScript* script, JSFunction* fun) {
+ // Slot x + 0: This value.
+ // Slot x + 1: Argument 1.
+ // ...
+ // Slot x + n: Argument n.
+
+ // Note: when updating this, please also update the assert in
+ // SnapshotWriter::startFrame
+ return StartArgSlot(script) + (fun ? fun->nargs() + 1 : 0);
+}
+
+inline unsigned CountArgSlots(JSScript* script, bool hasFun,
+ uint32_t funArgCount) {
+ // Same as the previous function, for use when the JSFunction is not
+ // available.
+ return StartArgSlot(script) + (hasFun ? funArgCount + 1 : 0);
+}
+
+// Contains information about the compilation source for IR being generated.
+class CompileInfo {
+ public:
+ CompileInfo(CompileRuntime* runtime, JSScript* script, JSFunction* fun,
+ jsbytecode* osrPc, bool scriptNeedsArgsObj,
+ InlineScriptTree* inlineScriptTree)
+ : script_(script),
+ fun_(fun),
+ osrPc_(osrPc),
+ scriptNeedsArgsObj_(scriptNeedsArgsObj),
+ hadEagerTruncationBailout_(script->hadEagerTruncationBailout()),
+ hadSpeculativePhiBailout_(script->hadSpeculativePhiBailout()),
+ hadLICMInvalidation_(script->hadLICMInvalidation()),
+ hadReorderingBailout_(script->hadReorderingBailout()),
+ hadBoundsCheckBailout_(script->failedBoundsCheck()),
+ hadUnboxFoldingBailout_(script->hadUnboxFoldingBailout()),
+ mayReadFrameArgsDirectly_(script->mayReadFrameArgsDirectly()),
+ anyFormalIsForwarded_(script->anyFormalIsForwarded()),
+ isDerivedClassConstructor_(script->isDerivedClassConstructor()),
+ inlineScriptTree_(inlineScriptTree) {
+ MOZ_ASSERT_IF(osrPc, JSOp(*osrPc) == JSOp::LoopHead);
+
+ // The function here can flow in from anywhere so look up the canonical
+ // function to ensure that we do not try to embed a nursery pointer in
+ // jit-code. Precisely because it can flow in from anywhere, it's not
+ // guaranteed to be non-lazy. Hence, don't access its script!
+ if (fun_) {
+ fun_ = fun_->baseScript()->function();
+ MOZ_ASSERT(fun_->isTenured());
+ }
+
+ nimplicit_ = StartArgSlot(script) /* env chain and argument obj */
+ + (fun ? 1 : 0); /* this */
+ nargs_ = fun ? fun->nargs() : 0;
+ nlocals_ = script->nfixed();
+
+ // An extra slot is needed for global scopes because InitGLexical (stack
+ // depth 1) is compiled as a SetProp (stack depth 2) on the global lexical
+ // scope.
+ uint32_t extra = script->isGlobalCode() ? 1 : 0;
+ nstack_ = std::max<unsigned>(script->nslots() - script->nfixed(),
+ MinJITStackSize) +
+ extra;
+ nslots_ = nimplicit_ + nargs_ + nlocals_ + nstack_;
+
+ // For derived class constructors, find and cache the frame slot for
+ // the .this binding. This slot is assumed to be always
+ // observable. See isObservableFrameSlot.
+ if (script->isDerivedClassConstructor()) {
+ MOZ_ASSERT(script->functionHasThisBinding());
+ for (BindingIter bi(script); bi; bi++) {
+ if (bi.name() != runtime->names().dotThis) {
+ continue;
+ }
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Frame) {
+ thisSlotForDerivedClassConstructor_ =
+ mozilla::Some(localSlot(loc.slot()));
+ break;
+ }
+ }
+ }
+
+ // If the script uses an environment in body, the environment chain
+ // will need to be observable.
+ needsBodyEnvironmentObject_ = script->needsBodyEnvironment();
+ funNeedsSomeEnvironmentObject_ =
+ fun ? fun->needsSomeEnvironmentObject() : false;
+ }
+
+ explicit CompileInfo(unsigned nlocals)
+ : script_(nullptr),
+ fun_(nullptr),
+ osrPc_(nullptr),
+ scriptNeedsArgsObj_(false),
+ hadEagerTruncationBailout_(false),
+ hadSpeculativePhiBailout_(false),
+ hadLICMInvalidation_(false),
+ hadReorderingBailout_(false),
+ hadBoundsCheckBailout_(false),
+ hadUnboxFoldingBailout_(false),
+ mayReadFrameArgsDirectly_(false),
+ anyFormalIsForwarded_(false),
+ inlineScriptTree_(nullptr),
+ needsBodyEnvironmentObject_(false),
+ funNeedsSomeEnvironmentObject_(false) {
+ nimplicit_ = 0;
+ nargs_ = 0;
+ nlocals_ = nlocals;
+ nstack_ = 1; /* For FunctionCompiler::pushPhiInput/popPhiOutput */
+ nslots_ = nlocals_ + nstack_;
+ }
+
+ JSScript* script() const { return script_; }
+ bool compilingWasm() const { return script() == nullptr; }
+ ModuleObject* module() const { return script_->module(); }
+ jsbytecode* osrPc() const { return osrPc_; }
+ InlineScriptTree* inlineScriptTree() const { return inlineScriptTree_; }
+
+ // It's not safe to access the JSFunction off main thread.
+ bool hasFunMaybeLazy() const { return fun_; }
+ ImmGCPtr funMaybeLazy() const { return ImmGCPtr(fun_); }
+
+ const char* filename() const { return script_->filename(); }
+
+ unsigned lineno() const { return script_->lineno(); }
+
+ // Total number of slots: args, locals, and stack.
+ unsigned nslots() const { return nslots_; }
+
+ // Number of slots needed for env chain, return value,
+ // maybe argumentsobject and this value.
+ unsigned nimplicit() const { return nimplicit_; }
+ // Number of arguments (without counting this value).
+ unsigned nargs() const { return nargs_; }
+ // Number of slots needed for all local variables. This includes "fixed
+ // vars" (see above) and also block-scoped locals.
+ unsigned nlocals() const { return nlocals_; }
+ unsigned ninvoke() const { return nslots_ - nstack_; }
+
+ uint32_t environmentChainSlot() const {
+ MOZ_ASSERT(script());
+ return 0;
+ }
+ uint32_t returnValueSlot() const {
+ MOZ_ASSERT(script());
+ return 1;
+ }
+ uint32_t argsObjSlot() const {
+ MOZ_ASSERT(needsArgsObj());
+ return 2;
+ }
+ uint32_t thisSlot() const {
+ MOZ_ASSERT(hasFunMaybeLazy());
+ MOZ_ASSERT(nimplicit_ > 0);
+ return nimplicit_ - 1;
+ }
+ uint32_t firstArgSlot() const { return nimplicit_; }
+ uint32_t argSlotUnchecked(uint32_t i) const {
+ // During initialization, some routines need to get at arg
+ // slots regardless of how regular argument access is done.
+ MOZ_ASSERT(i < nargs_);
+ return nimplicit_ + i;
+ }
+ uint32_t argSlot(uint32_t i) const {
+ // This should only be accessed when compiling functions for
+ // which argument accesses don't need to go through the
+ // argument object.
+ MOZ_ASSERT(!argsObjAliasesFormals());
+ return argSlotUnchecked(i);
+ }
+ uint32_t firstLocalSlot() const { return nimplicit_ + nargs_; }
+ uint32_t localSlot(uint32_t i) const { return firstLocalSlot() + i; }
+ uint32_t firstStackSlot() const { return firstLocalSlot() + nlocals(); }
+ uint32_t stackSlot(uint32_t i) const { return firstStackSlot() + i; }
+
+ uint32_t totalSlots() const {
+ MOZ_ASSERT(script() && hasFunMaybeLazy());
+ return nimplicit() + nargs() + nlocals();
+ }
+
+ bool hasMappedArgsObj() const { return script()->hasMappedArgsObj(); }
+ bool needsArgsObj() const { return scriptNeedsArgsObj_; }
+ bool argsObjAliasesFormals() const {
+ return scriptNeedsArgsObj_ && script()->hasMappedArgsObj();
+ }
+
+ bool needsBodyEnvironmentObject() const {
+ return needsBodyEnvironmentObject_;
+ }
+
+ enum class SlotObservableKind {
+ // This slot must be preserved because it's observable outside SSA uses.
+ // It can't be recovered before or during bailout.
+ ObservableNotRecoverable,
+
+ // This slot must be preserved because it's observable, but it can be
+ // recovered.
+ ObservableRecoverable,
+
+ // This slot is not observable outside SSA uses.
+ NotObservable,
+ };
+
+ inline SlotObservableKind getSlotObservableKind(uint32_t slot) const {
+ // Locals and expression stack slots.
+ if (slot >= firstLocalSlot()) {
+ // The |this| slot for a derived class constructor is a local slot.
+ // It should never be optimized out, as a Debugger might need to perform
+ // TDZ checks on it via, e.g., an exceptionUnwind handler. The TDZ check
+ // is required for correctness if the handler decides to continue
+ // execution.
+ if (thisSlotForDerivedClassConstructor_ &&
+ *thisSlotForDerivedClassConstructor_ == slot) {
+ return SlotObservableKind::ObservableNotRecoverable;
+ }
+ return SlotObservableKind::NotObservable;
+ }
+
+ // Formal argument slots.
+ if (slot >= firstArgSlot()) {
+ MOZ_ASSERT(hasFunMaybeLazy());
+ MOZ_ASSERT(slot - firstArgSlot() < nargs());
+
+ // Preserve formal arguments if they might be read when creating a rest or
+ // arguments object. In non-strict scripts, Function.arguments can create
+ // an arguments object dynamically so we always preserve the arguments.
+ if (mayReadFrameArgsDirectly_ || !script()->strict()) {
+ return SlotObservableKind::ObservableRecoverable;
+ }
+ return SlotObservableKind::NotObservable;
+ }
+
+ // |this| slot is observable but it can be recovered.
+ if (hasFunMaybeLazy() && slot == thisSlot()) {
+ return SlotObservableKind::ObservableRecoverable;
+ }
+
+ // Environment chain slot.
+ if (slot == environmentChainSlot()) {
+ // If environments can be added in the body (after the prologue) we need
+ // to preserve the environment chain slot. It can't be recovered.
+ if (needsBodyEnvironmentObject()) {
+ return SlotObservableKind::ObservableNotRecoverable;
+ }
+ // If the function may need an arguments object, also preserve the
+ // environment chain because it may be needed to reconstruct the arguments
+ // object during bailout.
+ if (funNeedsSomeEnvironmentObject_ || needsArgsObj()) {
+ return SlotObservableKind::ObservableRecoverable;
+ }
+ return SlotObservableKind::NotObservable;
+ }
+
+ // The arguments object is observable. If it does not escape, it can
+ // be recovered.
+ if (needsArgsObj() && slot == argsObjSlot()) {
+ MOZ_ASSERT(hasFunMaybeLazy());
+ return SlotObservableKind::ObservableRecoverable;
+ }
+
+ MOZ_ASSERT(slot == returnValueSlot());
+ return SlotObservableKind::NotObservable;
+ }
+
+ // Returns true if a slot can be observed out-side the current frame while
+ // the frame is active on the stack. This implies that these definitions
+ // would have to be executed and that they cannot be removed even if they
+ // are unused.
+ inline bool isObservableSlot(uint32_t slot) const {
+ SlotObservableKind kind = getSlotObservableKind(slot);
+ return (kind == SlotObservableKind::ObservableNotRecoverable ||
+ kind == SlotObservableKind::ObservableRecoverable);
+ }
+
+ // Returns true if a slot can be recovered before or during a bailout. A
+ // definition which can be observed and recovered, implies that this
+ // definition can be optimized away as long as we can compute its values.
+ bool isRecoverableOperand(uint32_t slot) const {
+ SlotObservableKind kind = getSlotObservableKind(slot);
+ return (kind == SlotObservableKind::ObservableRecoverable ||
+ kind == SlotObservableKind::NotObservable);
+ }
+
+ // Check previous bailout states to prevent doing the same bailout in the
+ // next compilation.
+ bool hadEagerTruncationBailout() const { return hadEagerTruncationBailout_; }
+ bool hadSpeculativePhiBailout() const { return hadSpeculativePhiBailout_; }
+ bool hadLICMInvalidation() const { return hadLICMInvalidation_; }
+ bool hadReorderingBailout() const { return hadReorderingBailout_; }
+ bool hadBoundsCheckBailout() const { return hadBoundsCheckBailout_; }
+ bool hadUnboxFoldingBailout() const { return hadUnboxFoldingBailout_; }
+
+ bool mayReadFrameArgsDirectly() const { return mayReadFrameArgsDirectly_; }
+ bool anyFormalIsForwarded() const { return anyFormalIsForwarded_; }
+
+ bool isDerivedClassConstructor() const { return isDerivedClassConstructor_; }
+
+ private:
+ unsigned nimplicit_;
+ unsigned nargs_;
+ unsigned nlocals_;
+ unsigned nstack_;
+ unsigned nslots_;
+ mozilla::Maybe<unsigned> thisSlotForDerivedClassConstructor_;
+ JSScript* script_;
+ JSFunction* fun_;
+ jsbytecode* osrPc_;
+
+ bool scriptNeedsArgsObj_;
+
+ // Record the state of previous bailouts in order to prevent compiling the
+ // same function identically the next time.
+ bool hadEagerTruncationBailout_;
+ bool hadSpeculativePhiBailout_;
+ bool hadLICMInvalidation_;
+ bool hadReorderingBailout_;
+ bool hadBoundsCheckBailout_;
+ bool hadUnboxFoldingBailout_;
+
+ bool mayReadFrameArgsDirectly_;
+ bool anyFormalIsForwarded_;
+
+ bool isDerivedClassConstructor_;
+
+ InlineScriptTree* inlineScriptTree_;
+
+ // Whether a script needs environments within its body. This informs us
+ // that the environment chain is not easy to reconstruct.
+ bool needsBodyEnvironmentObject_;
+ bool funNeedsSomeEnvironmentObject_;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_CompileInfo_h */
diff --git a/js/src/jit/CompileWrappers.cpp b/js/src/jit/CompileWrappers.cpp
new file mode 100644
index 0000000000..6bbf4fb6d8
--- /dev/null
+++ b/js/src/jit/CompileWrappers.cpp
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/CompileWrappers.h"
+
+#include "gc/Heap.h"
+#include "gc/Zone.h"
+#include "jit/Ion.h"
+#include "jit/JitRuntime.h"
+#include "vm/Realm.h"
+
+using namespace js;
+using namespace js::jit;
+
+JSRuntime* CompileRuntime::runtime() {
+ return reinterpret_cast<JSRuntime*>(this);
+}
+
+/* static */
+CompileRuntime* CompileRuntime::get(JSRuntime* rt) {
+ return reinterpret_cast<CompileRuntime*>(rt);
+}
+
+#ifdef JS_GC_ZEAL
+const uint32_t* CompileRuntime::addressOfGCZealModeBits() {
+ return runtime()->gc.addressOfZealModeBits();
+}
+#endif
+
+const JitRuntime* CompileRuntime::jitRuntime() {
+ return runtime()->jitRuntime();
+}
+
+GeckoProfilerRuntime& CompileRuntime::geckoProfiler() {
+ return runtime()->geckoProfiler();
+}
+
+bool CompileRuntime::hadOutOfMemory() { return runtime()->hadOutOfMemory; }
+
+bool CompileRuntime::profilingScripts() { return runtime()->profilingScripts; }
+
+const JSAtomState& CompileRuntime::names() { return *runtime()->commonNames; }
+
+const PropertyName* CompileRuntime::emptyString() {
+ return runtime()->emptyString;
+}
+
+const StaticStrings& CompileRuntime::staticStrings() {
+ return *runtime()->staticStrings;
+}
+
+const WellKnownSymbols& CompileRuntime::wellKnownSymbols() {
+ return *runtime()->wellKnownSymbols;
+}
+
+const JSClass* CompileRuntime::maybeWindowProxyClass() {
+ return runtime()->maybeWindowProxyClass();
+}
+
+const void* CompileRuntime::mainContextPtr() {
+ return runtime()->mainContextFromAnyThread();
+}
+
+const void* CompileRuntime::addressOfJitStackLimit() {
+ return runtime()->mainContextFromAnyThread()->addressOfJitStackLimit();
+}
+
+const void* CompileRuntime::addressOfInterruptBits() {
+ return runtime()->mainContextFromAnyThread()->addressOfInterruptBits();
+}
+
+const void* CompileRuntime::addressOfZone() {
+ return runtime()->mainContextFromAnyThread()->addressOfZone();
+}
+
+const void* CompileRuntime::addressOfMegamorphicCache() {
+ return &runtime()->caches().megamorphicCache;
+}
+
+const void* CompileRuntime::addressOfMegamorphicSetPropCache() {
+ return runtime()->caches().megamorphicSetPropCache.get();
+}
+
+const void* CompileRuntime::addressOfStringToAtomCache() {
+ return &runtime()->caches().stringToAtomCache;
+}
+
+const void* CompileRuntime::addressOfLastBufferedWholeCell() {
+ return runtime()->gc.addressOfLastBufferedWholeCell();
+}
+
+const DOMCallbacks* CompileRuntime::DOMcallbacks() {
+ return runtime()->DOMcallbacks;
+}
+
+bool CompileRuntime::runtimeMatches(JSRuntime* rt) { return rt == runtime(); }
+
+Zone* CompileZone::zone() { return reinterpret_cast<Zone*>(this); }
+
+/* static */
+CompileZone* CompileZone::get(Zone* zone) {
+ return reinterpret_cast<CompileZone*>(zone);
+}
+
+CompileRuntime* CompileZone::runtime() {
+ return CompileRuntime::get(zone()->runtimeFromAnyThread());
+}
+
+bool CompileZone::isAtomsZone() { return zone()->isAtomsZone(); }
+
+#ifdef DEBUG
+const void* CompileRuntime::addressOfIonBailAfterCounter() {
+ return runtime()->jitRuntime()->addressOfIonBailAfterCounter();
+}
+#endif
+
+const uint32_t* CompileZone::addressOfNeedsIncrementalBarrier() {
+ // Cast away relaxed atomic wrapper for JIT access to barrier state.
+ const mozilla::Atomic<uint32_t, mozilla::Relaxed>* ptr =
+ zone()->addressOfNeedsIncrementalBarrier();
+ return reinterpret_cast<const uint32_t*>(ptr);
+}
+
+uint32_t* CompileZone::addressOfTenuredAllocCount() {
+ return zone()->addressOfTenuredAllocCount();
+}
+
+gc::FreeSpan** CompileZone::addressOfFreeList(gc::AllocKind allocKind) {
+ return zone()->arenas.addressOfFreeList(allocKind);
+}
+
+bool CompileZone::allocNurseryObjects() {
+ return zone()->allocNurseryObjects();
+}
+
+bool CompileZone::allocNurseryStrings() {
+ return zone()->allocNurseryStrings();
+}
+
+bool CompileZone::allocNurseryBigInts() {
+ return zone()->allocNurseryBigInts();
+}
+
+void* CompileZone::addressOfNurseryPosition() {
+ return zone()->runtimeFromAnyThread()->gc.addressOfNurseryPosition();
+}
+
+void* CompileZone::addressOfNurseryAllocatedSites() {
+ JSRuntime* rt = zone()->runtimeFromAnyThread();
+ return rt->gc.nursery().addressOfNurseryAllocatedSites();
+}
+
+bool CompileZone::canNurseryAllocateStrings() {
+ return zone()->allocNurseryStrings();
+}
+
+bool CompileZone::canNurseryAllocateBigInts() {
+ return zone()->allocNurseryBigInts();
+}
+
+gc::AllocSite* CompileZone::catchAllAllocSite(JS::TraceKind traceKind,
+ gc::CatchAllAllocSite siteKind) {
+ if (siteKind == gc::CatchAllAllocSite::Optimized) {
+ return zone()->optimizedAllocSite();
+ }
+ return zone()->unknownAllocSite(traceKind);
+}
+
+JS::Realm* CompileRealm::realm() { return reinterpret_cast<JS::Realm*>(this); }
+
+/* static */
+CompileRealm* CompileRealm::get(JS::Realm* realm) {
+ return reinterpret_cast<CompileRealm*>(realm);
+}
+
+CompileZone* CompileRealm::zone() { return CompileZone::get(realm()->zone()); }
+
+CompileRuntime* CompileRealm::runtime() {
+ return CompileRuntime::get(realm()->runtimeFromAnyThread());
+}
+
+const mozilla::non_crypto::XorShift128PlusRNG*
+CompileRealm::addressOfRandomNumberGenerator() {
+ return realm()->addressOfRandomNumberGenerator();
+}
+
+const JitRealm* CompileRealm::jitRealm() { return realm()->jitRealm(); }
+
+const GlobalObject* CompileRealm::maybeGlobal() {
+ // This uses unsafeUnbarrieredMaybeGlobal() so as not to trigger the read
+ // barrier on the global from off thread. This is safe because we
+ // abort Ion compilation when we GC.
+ return realm()->unsafeUnbarrieredMaybeGlobal();
+}
+
+const uint32_t* CompileRealm::addressOfGlobalWriteBarriered() {
+ return &realm()->globalWriteBarriered;
+}
+
+bool CompileRealm::hasAllocationMetadataBuilder() {
+ return realm()->hasAllocationMetadataBuilder();
+}
+
+JitCompileOptions::JitCompileOptions()
+ : profilerSlowAssertionsEnabled_(false),
+ offThreadCompilationAvailable_(false) {}
+
+JitCompileOptions::JitCompileOptions(JSContext* cx) {
+ profilerSlowAssertionsEnabled_ =
+ cx->runtime()->geckoProfiler().enabled() &&
+ cx->runtime()->geckoProfiler().slowAssertionsEnabled();
+ offThreadCompilationAvailable_ = OffThreadCompilationAvailable(cx);
+#ifdef DEBUG
+ ionBailAfterEnabled_ = cx->runtime()->jitRuntime()->ionBailAfterEnabled();
+#endif
+}
diff --git a/js/src/jit/CompileWrappers.h b/js/src/jit/CompileWrappers.h
new file mode 100644
index 0000000000..b281e93d67
--- /dev/null
+++ b/js/src/jit/CompileWrappers.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_CompileWrappers_h
+#define jit_CompileWrappers_h
+
+#include <stdint.h>
+
+#include "gc/Pretenuring.h"
+#include "js/TypeDecls.h"
+
+struct JSAtomState;
+
+namespace mozilla::non_crypto {
+class XorShift128PlusRNG;
+}
+
+namespace JS {
+enum class TraceKind;
+}
+
+namespace js {
+
+class GeckoProfilerRuntime;
+class GlobalObject;
+struct JSDOMCallbacks;
+class PropertyName;
+class StaticStrings;
+struct WellKnownSymbols;
+
+using DOMCallbacks = struct JSDOMCallbacks;
+
+namespace gc {
+
+enum class AllocKind : uint8_t;
+
+class FreeSpan;
+
+} // namespace gc
+
+namespace jit {
+
+class JitRuntime;
+
+// During Ion compilation we need access to various bits of the current
+// compartment, runtime and so forth. However, since compilation can run off
+// thread while the main thread is mutating the VM, this access needs
+// to be restricted. The classes below give the compiler an interface to access
+// all necessary information in a threadsafe fashion.
+
+class CompileRuntime {
+ JSRuntime* runtime();
+
+ public:
+ static CompileRuntime* get(JSRuntime* rt);
+
+#ifdef JS_GC_ZEAL
+ const uint32_t* addressOfGCZealModeBits();
+#endif
+
+ const JitRuntime* jitRuntime();
+
+ // Compilation does not occur off thread when the Gecko Profiler is enabled.
+ GeckoProfilerRuntime& geckoProfiler();
+
+ bool hadOutOfMemory();
+ bool profilingScripts();
+
+ const JSAtomState& names();
+ const PropertyName* emptyString();
+ const StaticStrings& staticStrings();
+ const WellKnownSymbols& wellKnownSymbols();
+ const JSClass* maybeWindowProxyClass();
+
+ const void* mainContextPtr();
+ const void* addressOfJitStackLimit();
+ const void* addressOfInterruptBits();
+ const void* addressOfZone();
+ const void* addressOfMegamorphicCache();
+ const void* addressOfMegamorphicSetPropCache();
+ const void* addressOfStringToAtomCache();
+ const void* addressOfLastBufferedWholeCell();
+
+#ifdef DEBUG
+ const void* addressOfIonBailAfterCounter();
+#endif
+
+ // DOM callbacks must be threadsafe (and will hopefully be removed soon).
+ const DOMCallbacks* DOMcallbacks();
+
+ bool runtimeMatches(JSRuntime* rt);
+};
+
+class CompileZone {
+ friend class MacroAssembler;
+ JS::Zone* zone();
+
+ public:
+ static CompileZone* get(JS::Zone* zone);
+
+ CompileRuntime* runtime();
+ bool isAtomsZone();
+
+ const uint32_t* addressOfNeedsIncrementalBarrier();
+ uint32_t* addressOfTenuredAllocCount();
+ gc::FreeSpan** addressOfFreeList(gc::AllocKind allocKind);
+ bool allocNurseryObjects();
+ bool allocNurseryStrings();
+ bool allocNurseryBigInts();
+ void* addressOfNurseryPosition();
+
+ void* addressOfNurseryAllocatedSites();
+
+ bool canNurseryAllocateStrings();
+ bool canNurseryAllocateBigInts();
+
+ gc::AllocSite* catchAllAllocSite(JS::TraceKind traceKind,
+ gc::CatchAllAllocSite siteKind);
+};
+
+class JitRealm;
+
+class CompileRealm {
+ JS::Realm* realm();
+
+ public:
+ static CompileRealm* get(JS::Realm* realm);
+
+ CompileZone* zone();
+ CompileRuntime* runtime();
+
+ const void* realmPtr() { return realm(); }
+
+ const mozilla::non_crypto::XorShift128PlusRNG*
+ addressOfRandomNumberGenerator();
+
+ const JitRealm* jitRealm();
+
+ const GlobalObject* maybeGlobal();
+ const uint32_t* addressOfGlobalWriteBarriered();
+
+ bool hasAllocationMetadataBuilder();
+};
+
+class JitCompileOptions {
+ public:
+ JitCompileOptions();
+ explicit JitCompileOptions(JSContext* cx);
+
+ bool profilerSlowAssertionsEnabled() const {
+ return profilerSlowAssertionsEnabled_;
+ }
+
+ bool offThreadCompilationAvailable() const {
+ return offThreadCompilationAvailable_;
+ }
+
+#ifdef DEBUG
+ bool ionBailAfterEnabled() const { return ionBailAfterEnabled_; }
+#endif
+
+ private:
+ bool profilerSlowAssertionsEnabled_;
+ bool offThreadCompilationAvailable_;
+#ifdef DEBUG
+ bool ionBailAfterEnabled_ = false;
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_CompileWrappers_h
diff --git a/js/src/jit/Disassemble.cpp b/js/src/jit/Disassemble.cpp
new file mode 100644
index 0000000000..6026df27d6
--- /dev/null
+++ b/js/src/jit/Disassemble.cpp
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Disassemble.h"
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, int32_t
+
+#include "js/Printf.h" // JS_smprintf
+#include "js/Utility.h" // JS::UniqueChars
+
+#if defined(JS_JITSPEW)
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "zydis/ZydisAPI.h" // zydisDisassemble
+# elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm/disasm/Disasm-arm.h" // js::jit::disasm::*
+# include "jit/arm64/vixl/Decoder-vixl.h" // vixl::Decoder
+# include "jit/arm64/vixl/Disasm-vixl.h" // vixl::Disassembler
+# include "jit/arm64/vixl/Instructions-vixl.h" // vixl::Instruction
+# elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/disasm/Disasm-arm.h" // js::jit::disasm::*
+# endif
+#endif
+
+namespace js {
+namespace jit {
+
+#if defined(JS_JITSPEW) && (defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64))
+
+bool HasDisassembler() { return true; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ zydisDisassemble(code, length, callback);
+}
+
+#elif defined(JS_JITSPEW) && defined(JS_CODEGEN_ARM64)
+
+class ARM64Disassembler : public vixl::Disassembler {
+ public:
+ explicit ARM64Disassembler(InstrCallback callback) : callback_(callback) {}
+
+ protected:
+ void ProcessOutput(const vixl::Instruction* instr) override {
+ JS::UniqueChars formatted = JS_smprintf(
+ "0x%p %08x %s", instr, instr->InstructionBits(), GetOutput());
+ callback_(formatted.get());
+ }
+
+ private:
+ InstrCallback callback_;
+};
+
+bool HasDisassembler() { return true; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ ARM64Disassembler dis(callback);
+ vixl::Decoder decoder;
+ decoder.AppendVisitor(&dis);
+
+ uint8_t* instr = code;
+ uint8_t* end = code + length;
+
+ while (instr < end) {
+ decoder.Decode(reinterpret_cast<vixl::Instruction*>(instr));
+
+ instr += sizeof(vixl::Instr);
+ }
+}
+
+#elif defined(JS_JITSPEW) && defined(JS_CODEGEN_ARM)
+
+bool HasDisassembler() { return true; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ disasm::NameConverter converter;
+ disasm::Disassembler d(converter);
+
+ uint8_t* instr = code;
+ uint8_t* end = code + length;
+
+ while (instr < end) {
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* next_instr = instr + d.InstructionDecode(buffer, instr);
+
+ JS::UniqueChars formatted =
+ JS_smprintf("0x%p %08x %s", instr, *reinterpret_cast<int32_t*>(instr),
+ buffer.start());
+ callback(formatted.get());
+
+ instr = next_instr;
+ }
+}
+
+#else
+
+bool HasDisassembler() { return false; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ callback("*** No disassembly available ***\n");
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/Disassemble.h b/js/src/jit/Disassemble.h
new file mode 100644
index 0000000000..ad132a8554
--- /dev/null
+++ b/js/src/jit/Disassemble.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Disassemble_h
+#define jit_Disassemble_h
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace js {
+namespace jit {
+
+using InstrCallback = void (*)(const char* text);
+
+extern bool HasDisassembler();
+extern void Disassemble(uint8_t* code, size_t length, InstrCallback callback);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Disassemble_h */
diff --git a/js/src/jit/EdgeCaseAnalysis.cpp b/js/src/jit/EdgeCaseAnalysis.cpp
new file mode 100644
index 0000000000..a92ca93ddd
--- /dev/null
+++ b/js/src/jit/EdgeCaseAnalysis.cpp
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/EdgeCaseAnalysis.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+EdgeCaseAnalysis::EdgeCaseAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : mir(mir), graph(graph) {}
+
+bool EdgeCaseAnalysis::analyzeLate() {
+ // Renumber definitions for NeedNegativeZeroCheck under
+ // analyzeEdgeCasesBackward.
+ uint32_t nextId = 0;
+
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ if (mir->shouldCancel("Analyze Late (first loop)")) {
+ return false;
+ }
+
+ iter->setId(nextId++);
+ iter->analyzeEdgeCasesForward();
+ }
+ block->lastIns()->setId(nextId++);
+ }
+
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ block++) {
+ for (MInstructionReverseIterator riter(block->rbegin());
+ riter != block->rend(); riter++) {
+ if (mir->shouldCancel("Analyze Late (second loop)")) {
+ return false;
+ }
+
+ riter->analyzeEdgeCasesBackward();
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/EdgeCaseAnalysis.h b/js/src/jit/EdgeCaseAnalysis.h
new file mode 100644
index 0000000000..d3e2a58d31
--- /dev/null
+++ b/js/src/jit/EdgeCaseAnalysis.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_EdgeCaseAnalysis_h
+#define jit_EdgeCaseAnalysis_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+class EdgeCaseAnalysis {
+ MIRGenerator* mir;
+ MIRGraph& graph;
+
+ public:
+ EdgeCaseAnalysis(MIRGenerator* mir, MIRGraph& graph);
+ [[nodiscard]] bool analyzeLate();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_EdgeCaseAnalysis_h */
diff --git a/js/src/jit/EffectiveAddressAnalysis.cpp b/js/src/jit/EffectiveAddressAnalysis.cpp
new file mode 100644
index 0000000000..7d78549387
--- /dev/null
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -0,0 +1,256 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/EffectiveAddressAnalysis.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "util/CheckedArithmetic.h"
+
+using namespace js;
+using namespace jit;
+
+static void AnalyzeLsh(TempAllocator& alloc, MLsh* lsh) {
+ if (lsh->type() != MIRType::Int32) {
+ return;
+ }
+
+ if (lsh->isRecoveredOnBailout()) {
+ return;
+ }
+
+ MDefinition* index = lsh->lhs();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ MConstant* shiftValue = lsh->rhs()->maybeConstantValue();
+ if (!shiftValue) {
+ return;
+ }
+
+ if (shiftValue->type() != MIRType::Int32 ||
+ !IsShiftInScaleRange(shiftValue->toInt32())) {
+ return;
+ }
+
+ Scale scale = ShiftToScale(shiftValue->toInt32());
+
+ int32_t displacement = 0;
+ MInstruction* last = lsh;
+ MDefinition* base = nullptr;
+ while (true) {
+ if (!last->hasOneUse()) {
+ break;
+ }
+
+ MUseIterator use = last->usesBegin();
+ if (!use->consumer()->isDefinition() ||
+ !use->consumer()->toDefinition()->isAdd()) {
+ break;
+ }
+
+ MAdd* add = use->consumer()->toDefinition()->toAdd();
+ if (add->type() != MIRType::Int32 || !add->isTruncated()) {
+ break;
+ }
+
+ MDefinition* other = add->getOperand(1 - add->indexOf(*use));
+
+ if (MConstant* otherConst = other->maybeConstantValue()) {
+ displacement += otherConst->toInt32();
+ } else {
+ if (base) {
+ break;
+ }
+ base = other;
+ }
+
+ last = add;
+ if (last->isRecoveredOnBailout()) {
+ return;
+ }
+ }
+
+ if (!base) {
+ uint32_t elemSize = 1 << ScaleToShift(scale);
+ if (displacement % elemSize != 0) {
+ return;
+ }
+
+ if (!last->hasOneUse()) {
+ return;
+ }
+
+ MUseIterator use = last->usesBegin();
+ if (!use->consumer()->isDefinition() ||
+ !use->consumer()->toDefinition()->isBitAnd()) {
+ return;
+ }
+
+ MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd();
+ if (bitAnd->isRecoveredOnBailout()) {
+ return;
+ }
+
+ MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
+ MConstant* otherConst = other->maybeConstantValue();
+ if (!otherConst || otherConst->type() != MIRType::Int32) {
+ return;
+ }
+
+ uint32_t bitsClearedByShift = elemSize - 1;
+ uint32_t bitsClearedByMask = ~uint32_t(otherConst->toInt32());
+ if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask) {
+ return;
+ }
+
+ bitAnd->replaceAllUsesWith(last);
+ return;
+ }
+
+ if (base->isRecoveredOnBailout()) {
+ return;
+ }
+
+ MEffectiveAddress* eaddr =
+ MEffectiveAddress::New(alloc, base, index, scale, displacement);
+ last->replaceAllUsesWith(eaddr);
+ last->block()->insertAfter(last, eaddr);
+}
+
+// Transform:
+//
+// [AddI]
+// addl $9, %esi
+// [LoadUnboxedScalar]
+// movsd 0x0(%rbx,%rsi,8), %xmm4
+//
+// into:
+//
+// [LoadUnboxedScalar]
+// movsd 0x48(%rbx,%rsi,8), %xmm4
+//
+// This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
+static void AnalyzeLoadUnboxedScalar(MLoadUnboxedScalar* load) {
+ if (load->isRecoveredOnBailout()) {
+ return;
+ }
+
+ if (!load->getOperand(1)->isAdd()) {
+ return;
+ }
+
+ JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id());
+
+ MAdd* add = load->getOperand(1)->toAdd();
+
+ if (add->type() != MIRType::Int32 || !add->hasUses() ||
+ add->truncateKind() != TruncateKind::Truncate) {
+ return;
+ }
+
+ MDefinition* lhs = add->lhs();
+ MDefinition* rhs = add->rhs();
+ MDefinition* constant = nullptr;
+ MDefinition* node = nullptr;
+
+ if (lhs->isConstant()) {
+ constant = lhs;
+ node = rhs;
+ } else if (rhs->isConstant()) {
+ constant = rhs;
+ node = lhs;
+ } else
+ return;
+
+ MOZ_ASSERT(constant->type() == MIRType::Int32);
+
+ size_t storageSize = Scalar::byteSize(load->storageType());
+ int32_t c1 = load->offsetAdjustment();
+ int32_t c2 = 0;
+ if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2)) {
+ return;
+ }
+
+ int32_t offset = 0;
+ if (!SafeAdd(c1, c2, &offset)) {
+ return;
+ }
+
+ JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset,
+ load->opName(), load->id());
+ load->setOffsetAdjustment(offset);
+ load->replaceOperand(1, node);
+
+ if (!add->hasLiveDefUses() && DeadIfUnused(add) &&
+ add->canRecoverOnBailout()) {
+ JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u", add->opName(),
+ add->id());
+ add->setRecoveredOnBailoutUnchecked();
+ }
+}
+
+template <typename AsmJSMemoryAccess>
+void EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins) {
+ MDefinition* base = ins->base();
+
+ if (base->isConstant()) {
+ // If the index is within the minimum heap length, we can optimize away the
+ // bounds check. Asm.js accesses always have an int32 base, the memory is
+ // always a memory32.
+ int32_t imm = base->toConstant()->toInt32();
+ if (imm >= 0) {
+ int32_t end = (uint32_t)imm + ins->byteSize();
+ if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength()) {
+ ins->removeBoundsCheck();
+ }
+ }
+ }
+}
+
+// This analysis converts patterns of the form:
+// truncate(x + (y << {0,1,2,3}))
+// truncate(x + (y << {0,1,2,3}) + imm32)
+// into a single lea instruction, and patterns of the form:
+// asmload(x + imm32)
+// asmload(x << {0,1,2,3})
+// asmload((x << {0,1,2,3}) + imm32)
+// asmload((x << {0,1,2,3}) & mask) (where mask is redundant
+// with shift)
+// asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant
+// with shift + imm32)
+// into a single asmload instruction (and for asmstore too).
+//
+// Additionally, we should consider the general forms:
+// truncate(x + y + imm32)
+// truncate((y << {0,1,2,3}) + imm32)
+bool EffectiveAddressAnalysis::analyze() {
+ JitSpew(JitSpew_EAA, "Begin");
+ for (ReversePostorderIterator block(graph_.rpoBegin());
+ block != graph_.rpoEnd(); block++) {
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+
+ // Note that we don't check for MWasmCompareExchangeHeap
+ // or MWasmAtomicBinopHeap, because the backend and the OOB
+ // mechanism don't support non-zero offsets for them yet
+ // (TODO bug 1254935).
+ if (i->isLsh()) {
+ AnalyzeLsh(graph_.alloc(), i->toLsh());
+ } else if (i->isLoadUnboxedScalar()) {
+ AnalyzeLoadUnboxedScalar(i->toLoadUnboxedScalar());
+ } else if (i->isAsmJSLoadHeap()) {
+ analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
+ } else if (i->isAsmJSStoreHeap()) {
+ analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
+ }
+ }
+ }
+ return true;
+}
diff --git a/js/src/jit/EffectiveAddressAnalysis.h b/js/src/jit/EffectiveAddressAnalysis.h
new file mode 100644
index 0000000000..f66ba758e5
--- /dev/null
+++ b/js/src/jit/EffectiveAddressAnalysis.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_EffectiveAddressAnalysis_h
+#define jit_EffectiveAddressAnalysis_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+class EffectiveAddressAnalysis {
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+
+ template <typename AsmJSMemoryAccess>
+ void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins);
+
+ public:
+ EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir), graph_(graph) {}
+
+ [[nodiscard]] bool analyze();
+};
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_EffectiveAddressAnalysis_h */
diff --git a/js/src/jit/ExecutableAllocator.cpp b/js/src/jit/ExecutableAllocator.cpp
new file mode 100644
index 0000000000..cbae513455
--- /dev/null
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -0,0 +1,329 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "jit/ExecutableAllocator.h"
+
+#include "js/MemoryMetrics.h"
+#include "util/Poison.h"
+
+using namespace js::jit;
+
+ExecutablePool::~ExecutablePool() {
+#ifdef DEBUG
+ for (size_t bytes : m_codeBytes) {
+ MOZ_ASSERT(bytes == 0);
+ }
+#endif
+
+ MOZ_ASSERT(!isMarked());
+
+ m_allocator->releasePoolPages(this);
+}
+
+void ExecutablePool::release(bool willDestroy) {
+ MOZ_ASSERT(m_refCount != 0);
+ MOZ_ASSERT_IF(willDestroy, m_refCount == 1);
+ if (--m_refCount == 0) {
+ js_delete(this);
+ }
+}
+
+void ExecutablePool::release(size_t n, CodeKind kind) {
+ m_codeBytes[kind] -= n;
+ MOZ_ASSERT(m_codeBytes[kind] < m_allocation.size); // Shouldn't underflow.
+
+ release();
+}
+
+void ExecutablePool::addRef() {
+ // It should be impossible for us to roll over, because only small
+ // pools have multiple holders, and they have one holder per chunk
+ // of generated code, and they only hold 16KB or so of code.
+ MOZ_ASSERT(m_refCount);
+ ++m_refCount;
+ MOZ_ASSERT(m_refCount, "refcount overflow");
+}
+
+void* ExecutablePool::alloc(size_t n, CodeKind kind) {
+ MOZ_ASSERT(n <= available());
+ void* result = m_freePtr;
+ m_freePtr += n;
+
+ m_codeBytes[kind] += n;
+
+ MOZ_MAKE_MEM_UNDEFINED(result, n);
+ return result;
+}
+
+size_t ExecutablePool::available() const {
+ MOZ_ASSERT(m_end >= m_freePtr);
+ return m_end - m_freePtr;
+}
+
+ExecutableAllocator::~ExecutableAllocator() {
+ for (size_t i = 0; i < m_smallPools.length(); i++) {
+ m_smallPools[i]->release(/* willDestroy = */ true);
+ }
+
+ // If this asserts we have a pool leak.
+ MOZ_ASSERT(m_pools.empty());
+}
+
+ExecutablePool* ExecutableAllocator::poolForSize(size_t n) {
+ // Try to fit in an existing small allocator. Use the pool with the
+ // least available space that is big enough (best-fit). This is the
+ // best strategy because (a) it maximizes the chance of the next
+ // allocation fitting in a small pool, and (b) it minimizes the
+ // potential waste when a small pool is next abandoned.
+ ExecutablePool* minPool = nullptr;
+ for (size_t i = 0; i < m_smallPools.length(); i++) {
+ ExecutablePool* pool = m_smallPools[i];
+ if (n <= pool->available() &&
+ (!minPool || pool->available() < minPool->available())) {
+ minPool = pool;
+ }
+ }
+ if (minPool) {
+ minPool->addRef();
+ return minPool;
+ }
+
+ // If the request is large, we just provide a unshared allocator
+ if (n > ExecutableCodePageSize) {
+ return createPool(n);
+ }
+
+ // Create a new allocator
+ ExecutablePool* pool = createPool(ExecutableCodePageSize);
+ if (!pool) {
+ return nullptr;
+ }
+ // At this point, local |pool| is the owner.
+
+ if (m_smallPools.length() < maxSmallPools) {
+ // We haven't hit the maximum number of live pools; add the new pool.
+ // If append() OOMs, we just return an unshared allocator.
+ if (m_smallPools.append(pool)) {
+ pool->addRef();
+ }
+ } else {
+ // Find the pool with the least space.
+ int iMin = 0;
+ for (size_t i = 1; i < m_smallPools.length(); i++) {
+ if (m_smallPools[i]->available() < m_smallPools[iMin]->available()) {
+ iMin = i;
+ }
+ }
+
+ // If the new allocator will result in more free space than the small
+ // pool with the least space, then we will use it instead
+ ExecutablePool* minPool = m_smallPools[iMin];
+ if ((pool->available() - n) > minPool->available()) {
+ minPool->release();
+ m_smallPools[iMin] = pool;
+ pool->addRef();
+ }
+ }
+
+ // Pass ownership to the caller.
+ return pool;
+}
+
+/* static */
+size_t ExecutableAllocator::roundUpAllocationSize(size_t request,
+ size_t granularity) {
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request) {
+ return OVERSIZE_ALLOCATION;
+ }
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ MOZ_ASSERT(size >= request);
+ return size;
+}
+
+ExecutablePool* ExecutableAllocator::createPool(size_t n) {
+ size_t allocSize = roundUpAllocationSize(n, ExecutableCodePageSize);
+ if (allocSize == OVERSIZE_ALLOCATION) {
+ return nullptr;
+ }
+
+ ExecutablePool::Allocation a = systemAlloc(allocSize);
+ if (!a.pages) {
+ return nullptr;
+ }
+
+ ExecutablePool* pool = js_new<ExecutablePool>(this, a);
+ if (!pool) {
+ systemRelease(a);
+ return nullptr;
+ }
+
+ if (!m_pools.put(pool)) {
+ // Note: this will call |systemRelease(a)|.
+ js_delete(pool);
+ return nullptr;
+ }
+
+ return pool;
+}
+
+void* ExecutableAllocator::alloc(JSContext* cx, size_t n,
+ ExecutablePool** poolp, CodeKind type) {
+ // Caller must ensure 'n' is word-size aligned. If all allocations are
+ // of word sized quantities, then all subsequent allocations will be
+ // aligned.
+ MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
+
+ if (n == OVERSIZE_ALLOCATION) {
+ *poolp = nullptr;
+ return nullptr;
+ }
+
+ *poolp = poolForSize(n);
+ if (!*poolp) {
+ return nullptr;
+ }
+
+ // This alloc is infallible because poolForSize() just obtained
+ // (found, or created if necessary) a pool that had enough space.
+ void* result = (*poolp)->alloc(n, type);
+ MOZ_ASSERT(result);
+
+ return result;
+}
+
+void ExecutableAllocator::releasePoolPages(ExecutablePool* pool) {
+ MOZ_ASSERT(pool->m_allocation.pages);
+ systemRelease(pool->m_allocation);
+
+ // Pool may not be present in m_pools if we hit OOM during creation.
+ if (auto ptr = m_pools.lookup(pool)) {
+ m_pools.remove(ptr);
+ }
+}
+
+void ExecutableAllocator::purge() {
+ for (size_t i = 0; i < m_smallPools.length();) {
+ ExecutablePool* pool = m_smallPools[i];
+ if (pool->m_refCount > 1) {
+ // Releasing this pool is not going to deallocate it, so we might as
+ // well hold on to it and reuse it for future allocations.
+ i++;
+ continue;
+ }
+
+ MOZ_ASSERT(pool->m_refCount == 1);
+ pool->release();
+ m_smallPools.erase(&m_smallPools[i]);
+ }
+}
+
+void ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const {
+ for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
+ ExecutablePool* pool = r.front();
+ sizes->ion += pool->m_codeBytes[CodeKind::Ion];
+ sizes->baseline += pool->m_codeBytes[CodeKind::Baseline];
+ sizes->regexp += pool->m_codeBytes[CodeKind::RegExp];
+ sizes->other += pool->m_codeBytes[CodeKind::Other];
+ sizes->unused += pool->m_allocation.size - pool->usedCodeBytes();
+ }
+}
+
+/* static */
+void ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool,
+ ProtectionSetting protection,
+ MustFlushICache flushICache) {
+ char* start = pool->m_allocation.pages;
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!ReprotectRegion(start, pool->m_freePtr - start, protection,
+ flushICache)) {
+ oomUnsafe.crash("ExecutableAllocator::reprotectPool");
+ }
+}
+
+/* static */
+void ExecutableAllocator::poisonCode(JSRuntime* rt,
+ JitPoisonRangeVector& ranges) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+#ifdef DEBUG
+ // Make sure no pools have the mark bit set.
+ for (size_t i = 0; i < ranges.length(); i++) {
+ MOZ_ASSERT(!ranges[i].pool->isMarked());
+ }
+#endif
+
+ for (size_t i = 0; i < ranges.length(); i++) {
+ ExecutablePool* pool = ranges[i].pool;
+ if (pool->m_refCount == 1) {
+ // This is the last reference so the release() call below will
+ // unmap the memory. Don't bother poisoning it.
+ continue;
+ }
+
+ MOZ_ASSERT(pool->m_refCount > 1);
+
+ // Use the pool's mark bit to indicate we made the pool writable.
+ // This avoids reprotecting a pool multiple times.
+ if (!pool->isMarked()) {
+ reprotectPool(rt, pool, ProtectionSetting::Writable, MustFlushICache::No);
+ pool->mark();
+ }
+
+ // Note: we use memset instead of js::Poison because we want to poison
+ // JIT code in release builds too. Furthermore, we don't want the
+ // invalid-ObjectValue poisoning js::Poison does in debug builds.
+ memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
+ MOZ_MAKE_MEM_NOACCESS(ranges[i].start, ranges[i].size);
+ }
+
+ // Make the pools executable again and drop references. We don't flush the
+ // ICache here to not add extra overhead.
+ for (size_t i = 0; i < ranges.length(); i++) {
+ ExecutablePool* pool = ranges[i].pool;
+ if (pool->isMarked()) {
+ reprotectPool(rt, pool, ProtectionSetting::Executable,
+ MustFlushICache::No);
+ pool->unmark();
+ }
+ pool->release();
+ }
+}
+
+ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n) {
+ void* allocation = AllocateExecutableMemory(n, ProtectionSetting::Executable,
+ MemCheckKind::MakeNoAccess);
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
+ return alloc;
+}
+
+void ExecutableAllocator::systemRelease(
+ const ExecutablePool::Allocation& alloc) {
+ DeallocateExecutableMemory(alloc.pages, alloc.size);
+}
diff --git a/js/src/jit/ExecutableAllocator.h b/js/src/jit/ExecutableAllocator.h
new file mode 100644
index 0000000000..85c01562c3
--- /dev/null
+++ b/js/src/jit/ExecutableAllocator.h
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef jit_ExecutableAllocator_h
+#define jit_ExecutableAllocator_h
+
+#include "mozilla/EnumeratedArray.h"
+
+#include <limits>
+#include <stddef.h> // for ptrdiff_t
+
+#include "jit/ProcessExecutableMemory.h"
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/TypeDecls.h"
+#include "js/Vector.h"
+
+namespace JS {
+struct CodeSizes;
+} // namespace JS
+
+namespace js {
+namespace jit {
+
+enum class CodeKind : uint8_t { Ion, Baseline, RegExp, Other, Count };
+
+class ExecutableAllocator;
+
+// These are reference-counted. A new one starts with a count of 1.
+class ExecutablePool {
+ friend class ExecutableAllocator;
+
+ private:
+ struct Allocation {
+ char* pages;
+ size_t size;
+ };
+
+ ExecutableAllocator* m_allocator;
+ char* m_freePtr;
+ char* m_end;
+ Allocation m_allocation;
+
+ // Reference count for automatic reclamation.
+ unsigned m_refCount : 31;
+
+ // Flag that can be used by algorithms operating on pools.
+ bool m_mark : 1;
+
+ // Number of bytes currently allocated for each CodeKind.
+ mozilla::EnumeratedArray<CodeKind, CodeKind::Count, size_t> m_codeBytes;
+
+ public:
+ void release(bool willDestroy = false);
+ void release(size_t n, CodeKind kind);
+
+ void addRef();
+
+ ExecutablePool(ExecutableAllocator* allocator, Allocation a)
+ : m_allocator(allocator),
+ m_freePtr(a.pages),
+ m_end(m_freePtr + a.size),
+ m_allocation(a),
+ m_refCount(1),
+ m_mark(false) {
+ for (size_t& count : m_codeBytes) {
+ count = 0;
+ }
+ }
+
+ ~ExecutablePool();
+
+ void mark() {
+ MOZ_ASSERT(!m_mark);
+ m_mark = true;
+ }
+ void unmark() {
+ MOZ_ASSERT(m_mark);
+ m_mark = false;
+ }
+ bool isMarked() const { return m_mark; }
+
+ private:
+ ExecutablePool(const ExecutablePool&) = delete;
+ void operator=(const ExecutablePool&) = delete;
+
+ void* alloc(size_t n, CodeKind kind);
+
+ size_t available() const;
+
+ // Returns the number of bytes that are currently in use (referenced by
+ // live JitCode objects).
+ size_t usedCodeBytes() const {
+ size_t res = 0;
+ for (size_t count : m_codeBytes) {
+ res += count;
+ }
+ return res;
+ }
+};
+
+struct JitPoisonRange {
+ jit::ExecutablePool* pool;
+ void* start;
+ size_t size;
+
+ JitPoisonRange(jit::ExecutablePool* pool, void* start, size_t size)
+ : pool(pool), start(start), size(size) {}
+};
+
+typedef Vector<JitPoisonRange, 0, SystemAllocPolicy> JitPoisonRangeVector;
+
+class ExecutableAllocator {
+ public:
+ ExecutableAllocator() = default;
+ ~ExecutableAllocator();
+
+ void purge();
+
+ // alloc() returns a pointer to some memory, and also (by reference) a
+ // pointer to reference-counted pool. The caller owns a reference to the
+ // pool; i.e. alloc() increments the count before returning the object.
+ void* alloc(JSContext* cx, size_t n, ExecutablePool** poolp, CodeKind type);
+
+ void releasePoolPages(ExecutablePool* pool);
+
+ void addSizeOfCode(JS::CodeSizes* sizes) const;
+
+ private:
+ static const size_t OVERSIZE_ALLOCATION = size_t(-1);
+
+ static size_t roundUpAllocationSize(size_t request, size_t granularity);
+
+ // On OOM, this will return an Allocation where pages is nullptr.
+ ExecutablePool::Allocation systemAlloc(size_t n);
+ static void systemRelease(const ExecutablePool::Allocation& alloc);
+
+ ExecutablePool* createPool(size_t n);
+ ExecutablePool* poolForSize(size_t n);
+
+ static void reprotectPool(JSRuntime* rt, ExecutablePool* pool,
+ ProtectionSetting protection,
+ MustFlushICache flushICache);
+
+ public:
+ [[nodiscard]] static bool makeWritable(void* start, size_t size) {
+ return ReprotectRegion(start, size, ProtectionSetting::Writable,
+ MustFlushICache::No);
+ }
+
+ [[nodiscard]] static bool makeExecutableAndFlushICache(void* start,
+ size_t size) {
+ return ReprotectRegion(start, size, ProtectionSetting::Executable,
+ MustFlushICache::Yes);
+ }
+
+ static void poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges);
+
+ private:
+ ExecutableAllocator(const ExecutableAllocator&) = delete;
+ void operator=(const ExecutableAllocator&) = delete;
+
+ // These are strong references; they keep pools alive.
+ static const size_t maxSmallPools = 4;
+ typedef js::Vector<ExecutablePool*, maxSmallPools, js::SystemAllocPolicy>
+ SmallExecPoolVector;
+ SmallExecPoolVector m_smallPools;
+
+ // All live pools are recorded here, just for stats purposes. These are
+ // weak references; they don't keep pools alive. When a pool is destroyed
+ // its reference is removed from m_pools.
+ typedef js::HashSet<ExecutablePool*, js::DefaultHasher<ExecutablePool*>,
+ js::SystemAllocPolicy>
+ ExecPoolHashSet;
+ ExecPoolHashSet m_pools; // All pools, just for stats purposes.
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ExecutableAllocator_h */
diff --git a/js/src/jit/FixedList.h b/js/src/jit/FixedList.h
new file mode 100644
index 0000000000..e8422e24d3
--- /dev/null
+++ b/js/src/jit/FixedList.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_FixedList_h
+#define jit_FixedList_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Likely.h"
+
+#include <stddef.h>
+
+#include "jit/JitAllocPolicy.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// List of a fixed length, but the length is unknown until runtime.
+template <typename T>
+class FixedList {
+ T* list_;
+ size_t length_;
+
+ private:
+ FixedList(const FixedList&); // no copy definition.
+ void operator=(const FixedList*); // no assignment definition.
+
+ public:
+ FixedList() : list_(nullptr), length_(0) {}
+
+ // Dynamic memory allocation requires the ability to report failure.
+ [[nodiscard]] bool init(TempAllocator& alloc, size_t length) {
+ if (length == 0) {
+ return true;
+ }
+
+ list_ = alloc.allocateArray<T>(length);
+ if (!list_) {
+ return false;
+ }
+
+ length_ = length;
+ return true;
+ }
+
+ size_t empty() const { return length_ == 0; }
+
+ size_t length() const { return length_; }
+
+ void shrink(size_t num) {
+ MOZ_ASSERT(num < length_);
+ length_ -= num;
+ }
+
+ [[nodiscard]] bool growBy(TempAllocator& alloc, size_t num) {
+ size_t newlength = length_ + num;
+ if (newlength < length_) {
+ return false;
+ }
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(newlength, &bytes))) {
+ return false;
+ }
+ T* list = (T*)alloc.allocate(bytes);
+ if (MOZ_UNLIKELY(!list)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < length_; i++) {
+ list[i] = list_[i];
+ }
+
+ length_ += num;
+ list_ = list;
+ return true;
+ }
+
+ T& operator[](size_t index) {
+ MOZ_ASSERT(index < length_);
+ return list_[index];
+ }
+ const T& operator[](size_t index) const {
+ MOZ_ASSERT(index < length_);
+ return list_[index];
+ }
+
+ T* data() { return list_; }
+
+ T* begin() { return list_; }
+ T* end() { return list_ + length_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_FixedList_h */
diff --git a/js/src/jit/FlushICache.cpp b/js/src/jit/FlushICache.cpp
new file mode 100644
index 0000000000..1e2ec69272
--- /dev/null
+++ b/js/src/jit/FlushICache.cpp
@@ -0,0 +1,132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/FlushICache.h"
+
+#ifdef JS_CODEGEN_ARM64
+# include "jit/arm64/vixl/MozCachingDecoder.h"
+# include "jit/arm64/vixl/Simulator-vixl.h"
+#endif
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+
+# ifdef __linux__
+# include <linux/version.h>
+# define LINUX_HAS_MEMBARRIER (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
+# else
+# define LINUX_HAS_MEMBARRIER 0
+# endif
+
+# if LINUX_HAS_MEMBARRIER || defined(__android__)
+# include <string.h>
+
+# if LINUX_HAS_MEMBARRIER
+# include <linux/membarrier.h>
+# include <sys/syscall.h>
+# include <sys/utsname.h>
+# include <unistd.h>
+# elif defined(__android__)
+# include <sys/syscall.h>
+# include <unistd.h>
+# else
+# error "Missing platform-specific declarations for membarrier syscall!"
+# endif // __linux__ / ANDROID
+
+static int membarrier(int cmd, int flags) {
+ return syscall(__NR_membarrier, cmd, flags);
+}
+
+// These definitions come from the Linux kernel source, for kernels before 4.16
+// which didn't have access to these membarrier commands.
+# ifndef MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
+# define MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE (1 << 5)
+# endif
+
+# ifndef MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
+# define MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE (1 << 6)
+# endif
+# endif // LINUX_HAS_MEMBARRIER || defined(__android__)
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+bool CanFlushExecutionContextForAllThreads() {
+# if (LINUX_HAS_MEMBARRIER || defined(__android__))
+ // On linux, check the kernel supports membarrier(2), that is, it's a kernel
+ // above Linux 4.16 included.
+ //
+ // Note: this code has been extracted (August 2020) from
+ // https://android.googlesource.com/platform/art/+/58520dfba31d6eeef75f5babff15e09aa28e5db8/libartbase/base/membarrier.cc#50
+ static constexpr int kRequiredMajor = 4;
+ static constexpr int kRequiredMinor = 16;
+
+ static bool computed = false;
+ static bool kernelHasMembarrier = false;
+
+ if (computed) {
+ return kernelHasMembarrier;
+ }
+
+ struct utsname uts;
+ int major, minor;
+ kernelHasMembarrier = uname(&uts) == 0 && strcmp(uts.sysname, "Linux") == 0 &&
+ sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
+ major >= kRequiredMajor &&
+ (major != kRequiredMajor || minor >= kRequiredMinor);
+
+ // As a test bed, try to run the syscall with the command registering the
+ // intent to use the actual membarrier we'll want to carry out later.
+ //
+ // IMPORTANT: This is required or else running the membarrier later won't
+ // actually interrupt the threads in this process.
+ if (kernelHasMembarrier &&
+ membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE, 0) != 0) {
+ kernelHasMembarrier = false;
+ }
+
+ computed = true;
+ return kernelHasMembarrier;
+# else
+ // On other platforms, we assume that the syscall for flushing the icache
+ // will flush the execution context for other cores.
+ return true;
+# endif
+}
+
+void FlushExecutionContextForAllThreads() {
+ // Callers must check that this operation is available.
+ MOZ_RELEASE_ASSERT(CanFlushExecutionContextForAllThreads());
+
+# if defined(JS_SIMULATOR_ARM64) && defined(JS_CACHE_SIMULATOR_ARM64)
+ // Emulate what the real hardware would do by emitting a membarrier that'll
+ // interrupt and flush the execution context of all threads.
+ using js::jit::SimulatorProcess;
+ js::jit::AutoLockSimulatorCache alsc;
+ SimulatorProcess::membarrier();
+# elif (LINUX_HAS_MEMBARRIER || defined(__android__))
+ // The caller has checked this can be performed, which will have registered
+ // this process to receive the membarrier. See above.
+ //
+ // membarrier will trigger an inter-processor-interrupt on any active threads
+ // of this process. This is an execution context synchronization event
+ // equivalent to running an `isb` instruction.
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE, 0) != 0) {
+ // Better safe than sorry.
+ MOZ_CRASH("membarrier can't be executed");
+ }
+# else
+ // On other platforms, we assume that the syscall for flushing the icache
+ // will flush the execution context for other cores.
+# endif
+}
+
+} // namespace jit
+} // namespace js
+
+#endif
diff --git a/js/src/jit/FlushICache.h b/js/src/jit/FlushICache.h
new file mode 100644
index 0000000000..dd15bdc8ff
--- /dev/null
+++ b/js/src/jit/FlushICache.h
@@ -0,0 +1,92 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Flush the instruction cache of instructions in an address range. */
+
+#ifndef jit_FlushICache_h
+#define jit_FlushICache_h
+
+#include "mozilla/Assertions.h" // MOZ_CRASH
+
+#include <stddef.h> // size_t
+
+namespace js {
+namespace jit {
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+
+inline void FlushICache(void* code, size_t size) {
+ // No-op. Code and data caches are coherent on x86 and x64.
+}
+
+#elif (defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)) || \
+ (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+
+// Invalidate the given code range from the icache. This will also flush the
+// execution context for this core. If this code is to be executed on another
+// thread, that thread must perform an execution context flush first using
+// `FlushExecutionContext` below.
+extern void FlushICache(void* code, size_t size);
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+inline void FlushICache(void* code, size_t size) { MOZ_CRASH(); }
+
+#else
+# error "Unknown architecture!"
+#endif
+
+#if (defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)) || \
+ (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+
+inline void FlushExecutionContext() {
+ // No-op. Execution context is coherent with instruction cache.
+}
+inline bool CanFlushExecutionContextForAllThreads() { return true; }
+inline void FlushExecutionContextForAllThreads() {
+ // No-op. Execution context is coherent with instruction cache.
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+inline void FlushExecutionContext() { MOZ_CRASH(); }
+inline bool CanFlushExecutionContextForAllThreads() { MOZ_CRASH(); }
+inline void FlushExecutionContextForAllThreads() { MOZ_CRASH(); }
+
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+
+// ARM and ARM64 must flush the instruction pipeline of the current core
+// before executing newly JIT'ed code. This will remove any stale data from
+// the pipeline that may have referenced invalidated instructions.
+//
+// `FlushICache` will perform this for the thread that compiles the code, but
+// other threads that may execute the code are responsible to call
+// this method.
+extern void FlushExecutionContext();
+
+// Some platforms can flush the excecution context for other threads using a
+// syscall. This is required when JIT'ed code will be published to multiple
+// threads without a synchronization point where a `FlushExecutionContext`
+// could be inserted.
+extern bool CanFlushExecutionContextForAllThreads();
+
+// Flushes the execution context of all threads in this process, equivalent to
+// running `FlushExecutionContext` on every thread.
+//
+// Callers must ensure `CanFlushExecutionContextForAllThreads` is true, or
+// else this will crash.
+extern void FlushExecutionContextForAllThreads();
+
+#else
+# error "Unknown architecture!"
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_FlushICache_h
diff --git a/js/src/jit/FoldLinearArithConstants.cpp b/js/src/jit/FoldLinearArithConstants.cpp
new file mode 100644
index 0000000000..5cb5ef62f6
--- /dev/null
+++ b/js/src/jit/FoldLinearArithConstants.cpp
@@ -0,0 +1,112 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/FoldLinearArithConstants.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace jit;
+
+namespace js {
+namespace jit {
+
+// Mark this node and its children as RecoveredOnBailout when they are not used.
+// The marked nodes will be removed during DCE. Marking as RecoveredOnBailout is
+// necessary because the Sink pass is ran before this pass.
+static void markNodesAsRecoveredOnBailout(MDefinition* def) {
+ if (def->hasLiveDefUses() || !DeadIfUnused(def) ||
+ !def->canRecoverOnBailout()) {
+ return;
+ }
+
+ JitSpew(JitSpew_FLAC, "mark as recovered on bailout: %s%u", def->opName(),
+ def->id());
+ def->setRecoveredOnBailoutUnchecked();
+
+ // Recursively mark nodes that do not have multiple uses. This loop is
+ // necessary because a node could be an unused right shift zero or an
+ // unused add, and both need to be marked as RecoveredOnBailout.
+ for (size_t i = 0; i < def->numOperands(); i++) {
+ markNodesAsRecoveredOnBailout(def->getOperand(i));
+ }
+}
+
+// Fold AddIs with one variable and two or more constants into one AddI.
+static void AnalyzeAdd(TempAllocator& alloc, MAdd* add) {
+ if (add->type() != MIRType::Int32 || add->isRecoveredOnBailout()) {
+ return;
+ }
+
+ if (!add->hasUses()) {
+ return;
+ }
+
+ JitSpew(JitSpew_FLAC, "analyze add: %s%u", add->opName(), add->id());
+
+ SimpleLinearSum sum = ExtractLinearSum(add);
+ if (sum.constant == 0 || !sum.term) {
+ return;
+ }
+
+ // Determine which operand is the constant.
+ int idx = add->getOperand(0)->isConstant() ? 0 : 1;
+ if (add->getOperand(idx)->isConstant()) {
+ // Do not replace an add where the outcome is the same add instruction.
+ MOZ_ASSERT(add->getOperand(idx)->toConstant()->type() == MIRType::Int32);
+ if (sum.term == add->getOperand(1 - idx) ||
+ sum.constant == add->getOperand(idx)->toConstant()->toInt32()) {
+ return;
+ }
+ }
+
+ MInstruction* rhs = MConstant::New(alloc, Int32Value(sum.constant));
+ add->block()->insertBefore(add, rhs);
+
+ MAdd* addNew = MAdd::New(alloc, sum.term, rhs, add->truncateKind());
+ addNew->setBailoutKind(add->bailoutKind());
+
+ add->replaceAllLiveUsesWith(addNew);
+ add->block()->insertBefore(add, addNew);
+ JitSpew(JitSpew_FLAC, "replaced with: %s%u", addNew->opName(), addNew->id());
+ JitSpew(JitSpew_FLAC, "and constant: %s%u (%d)", rhs->opName(), rhs->id(),
+ sum.constant);
+
+ // Mark the stale nodes as RecoveredOnBailout since the Sink pass has
+ // been run before this pass. DCE will then remove the unused nodes.
+ markNodesAsRecoveredOnBailout(add);
+}
+
+bool FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_FLAC, "Begin");
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)")) {
+ return false;
+ }
+
+ for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)")) {
+ return false;
+ }
+
+ if (i->isAdd()) {
+ AnalyzeAdd(graph.alloc(), i->toAdd());
+ }
+ }
+ }
+ return true;
+}
+
+} /* namespace jit */
+} /* namespace js */
diff --git a/js/src/jit/FoldLinearArithConstants.h b/js/src/jit/FoldLinearArithConstants.h
new file mode 100644
index 0000000000..78f86f02e5
--- /dev/null
+++ b/js/src/jit/FoldLinearArithConstants.h
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_FoldLinearArithConstants_h
+#define jit_FoldLinearArithConstants_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+[[nodiscard]] bool FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph);
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_FoldLinearArithConstants_h */
diff --git a/js/src/jit/GenerateAtomicOperations.py b/js/src/jit/GenerateAtomicOperations.py
new file mode 100644
index 0000000000..24b5a191cf
--- /dev/null
+++ b/js/src/jit/GenerateAtomicOperations.py
@@ -0,0 +1,873 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script generates jit/AtomicOperationsGenerated.h
+#
+# See the big comment in jit/AtomicOperations.h for an explanation.
+
+import buildconfig
+
+is_64bit = "JS_64BIT" in buildconfig.defines
+cpu_arch = buildconfig.substs["CPU_ARCH"]
+is_gcc = buildconfig.substs["CC_TYPE"] == "gcc"
+
+
+def fmt_insn(s):
+ return '"' + s + '\\n\\t"\n'
+
+
+def gen_seqcst(fun_name):
+ if cpu_arch in ("x86", "x86_64"):
+ return r"""
+ INLINE_ATTR void %(fun_name)s() {
+ asm volatile ("mfence\n\t" ::: "memory");
+ }""" % {
+ "fun_name": fun_name,
+ }
+ if cpu_arch == "aarch64":
+ return r"""
+ INLINE_ATTR void %(fun_name)s() {
+ asm volatile ("dmb ish\n\t" ::: "memory");
+ }""" % {
+ "fun_name": fun_name,
+ }
+ if cpu_arch == "arm":
+ return r"""
+ INLINE_ATTR void %(fun_name)s() {
+ asm volatile ("dmb sy\n\t" ::: "memory");
+ }""" % {
+ "fun_name": fun_name,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_load(fun_name, cpp_type, size, barrier):
+ # NOTE: the assembly code must match the generated code in:
+ # - CacheIRCompiler::emitAtomicsLoadResult
+ # - LIRGenerator::visitLoadUnboxedScalar
+ # - CodeGenerator::visitAtomicLoad64 (on 64-bit platforms)
+ # - MacroAssembler::wasmLoad
+ if cpu_arch in ("x86", "x86_64"):
+ insns = ""
+ if barrier:
+ insns += fmt_insn("mfence")
+ if size == 8:
+ insns += fmt_insn("movb (%[arg]), %[res]")
+ elif size == 16:
+ insns += fmt_insn("movw (%[arg]), %[res]")
+ elif size == 32:
+ insns += fmt_insn("movl (%[arg]), %[res]")
+ else:
+ assert size == 64
+ insns += fmt_insn("movq (%[arg]), %[res]")
+ if barrier:
+ insns += fmt_insn("mfence")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
+ %(cpp_type)s res;
+ asm volatile (%(insns)s
+ : [res] "=r" (res)
+ : [arg] "r" (arg)
+ : "memory");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "aarch64":
+ insns = ""
+ if barrier:
+ insns += fmt_insn("dmb ish")
+ if size == 8:
+ insns += fmt_insn("ldrb %w[res], [%x[arg]]")
+ elif size == 16:
+ insns += fmt_insn("ldrh %w[res], [%x[arg]]")
+ elif size == 32:
+ insns += fmt_insn("ldr %w[res], [%x[arg]]")
+ else:
+ assert size == 64
+ insns += fmt_insn("ldr %x[res], [%x[arg]]")
+ if barrier:
+ insns += fmt_insn("dmb ish")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
+ %(cpp_type)s res;
+ asm volatile (%(insns)s
+ : [res] "=r" (res)
+ : [arg] "r" (arg)
+ : "memory");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "arm":
+ insns = ""
+ if barrier:
+ insns += fmt_insn("dmb sy")
+ if size == 8:
+ insns += fmt_insn("ldrb %[res], [%[arg]]")
+ elif size == 16:
+ insns += fmt_insn("ldrh %[res], [%[arg]]")
+ else:
+ assert size == 32
+ insns += fmt_insn("ldr %[res], [%[arg]]")
+ if barrier:
+ insns += fmt_insn("dmb sy")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
+ %(cpp_type)s res;
+ asm volatile (%(insns)s
+ : [res] "=r" (res)
+ : [arg] "r" (arg)
+ : "memory");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_store(fun_name, cpp_type, size, barrier):
+ # NOTE: the assembly code must match the generated code in:
+ # - CacheIRCompiler::emitAtomicsStoreResult
+ # - LIRGenerator::visitStoreUnboxedScalar
+ # - CodeGenerator::visitAtomicStore64 (on 64-bit platforms)
+ # - MacroAssembler::wasmStore
+ if cpu_arch in ("x86", "x86_64"):
+ insns = ""
+ if barrier:
+ insns += fmt_insn("mfence")
+ if size == 8:
+ insns += fmt_insn("movb %[val], (%[addr])")
+ elif size == 16:
+ insns += fmt_insn("movw %[val], (%[addr])")
+ elif size == 32:
+ insns += fmt_insn("movl %[val], (%[addr])")
+ else:
+ assert size == 64
+ insns += fmt_insn("movq %[val], (%[addr])")
+ if barrier:
+ insns += fmt_insn("mfence")
+ return """
+ INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ asm volatile (%(insns)s
+ :
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory");
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "aarch64":
+ insns = ""
+ if barrier:
+ insns += fmt_insn("dmb ish")
+ if size == 8:
+ insns += fmt_insn("strb %w[val], [%x[addr]]")
+ elif size == 16:
+ insns += fmt_insn("strh %w[val], [%x[addr]]")
+ elif size == 32:
+ insns += fmt_insn("str %w[val], [%x[addr]]")
+ else:
+ assert size == 64
+ insns += fmt_insn("str %x[val], [%x[addr]]")
+ if barrier:
+ insns += fmt_insn("dmb ish")
+ return """
+ INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ asm volatile (%(insns)s
+ :
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory");
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "arm":
+ insns = ""
+ if barrier:
+ insns += fmt_insn("dmb sy")
+ if size == 8:
+ insns += fmt_insn("strb %[val], [%[addr]]")
+ elif size == 16:
+ insns += fmt_insn("strh %[val], [%[addr]]")
+ else:
+ assert size == 32
+ insns += fmt_insn("str %[val], [%[addr]]")
+ if barrier:
+ insns += fmt_insn("dmb sy")
+ return """
+ INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ asm volatile (%(insns)s
+ :
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory");
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_exchange(fun_name, cpp_type, size):
+ # NOTE: the assembly code must match the generated code in:
+ # - MacroAssembler::atomicExchange
+ # - MacroAssembler::atomicExchange64 (on 64-bit platforms)
+ if cpu_arch in ("x86", "x86_64"):
+ # Request an input/output register for `val` so that we can simply XCHG it
+ # with *addr.
+ insns = ""
+ if size == 8:
+ insns += fmt_insn("xchgb %[val], (%[addr])")
+ elif size == 16:
+ insns += fmt_insn("xchgw %[val], (%[addr])")
+ elif size == 32:
+ insns += fmt_insn("xchgl %[val], (%[addr])")
+ else:
+ assert size == 64
+ insns += fmt_insn("xchgq %[val], (%[addr])")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ asm volatile (%(insns)s
+ : [val] "+r" (val)
+ : [addr] "r" (addr)
+ : "memory");
+ return val;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "aarch64":
+ insns = ""
+ insns += fmt_insn("dmb ish")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("ldxrb %w[res], [%x[addr]]")
+ insns += fmt_insn("stxrb %w[scratch], %w[val], [%x[addr]]")
+ elif size == 16:
+ insns += fmt_insn("ldxrh %w[res], [%x[addr]]")
+ insns += fmt_insn("stxrh %w[scratch], %w[val], [%x[addr]]")
+ elif size == 32:
+ insns += fmt_insn("ldxr %w[res], [%x[addr]]")
+ insns += fmt_insn("stxr %w[scratch], %w[val], [%x[addr]]")
+ else:
+ assert size == 64
+ insns += fmt_insn("ldxr %x[res], [%x[addr]]")
+ insns += fmt_insn("stxr %w[scratch], %x[val], [%x[addr]]")
+ insns += fmt_insn("cbnz %w[scratch], 0b")
+ insns += fmt_insn("dmb ish")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ %(cpp_type)s res;
+ uint32_t scratch;
+ asm volatile (%(insns)s
+ : [res] "=&r"(res), [scratch] "=&r"(scratch)
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "arm":
+ insns = ""
+ insns += fmt_insn("dmb sy")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("ldrexb %[res], [%[addr]]")
+ insns += fmt_insn("strexb %[scratch], %[val], [%[addr]]")
+ elif size == 16:
+ insns += fmt_insn("ldrexh %[res], [%[addr]]")
+ insns += fmt_insn("strexh %[scratch], %[val], [%[addr]]")
+ else:
+ assert size == 32
+ insns += fmt_insn("ldrex %[res], [%[addr]]")
+ insns += fmt_insn("strex %[scratch], %[val], [%[addr]]")
+ insns += fmt_insn("cmp %[scratch], #1")
+ insns += fmt_insn("beq 0b")
+ insns += fmt_insn("dmb sy")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ %(cpp_type)s res;
+ uint32_t scratch;
+ asm volatile (%(insns)s
+ : [res] "=&r"(res), [scratch] "=&r"(scratch)
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_cmpxchg(fun_name, cpp_type, size):
+ # NOTE: the assembly code must match the generated code in:
+ # - MacroAssembler::compareExchange
+ # - MacroAssembler::compareExchange64
+ if cpu_arch == "x86" and size == 64:
+ # Use a +A constraint to load `oldval` into EDX:EAX as input/output.
+ # `newval` is loaded into ECX:EBX.
+ return r"""
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
+ %(cpp_type)s oldval,
+ %(cpp_type)s newval) {
+ asm volatile ("lock; cmpxchg8b (%%[addr])\n\t"
+ : "+A" (oldval)
+ : [addr] "r" (addr),
+ "b" (uint32_t(newval & 0xffff'ffff)),
+ "c" (uint32_t(newval >> 32))
+ : "memory", "cc");
+ return oldval;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ }
+ if cpu_arch == "arm" and size == 64:
+ return r"""
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
+ %(cpp_type)s oldval,
+ %(cpp_type)s newval) {
+ uint32_t oldval0 = oldval & 0xffff'ffff;
+ uint32_t oldval1 = oldval >> 32;
+ uint32_t newval0 = newval & 0xffff'ffff;
+ uint32_t newval1 = newval >> 32;
+ asm volatile (
+ "dmb sy\n\t"
+ "0: ldrexd r0, r1, [%%[addr]]\n\t"
+ "cmp r0, %%[oldval0]\n\t"
+ "bne 1f\n\t"
+ "cmp r1, %%[oldval1]\n\t"
+ "bne 1f\n\t"
+ "mov r2, %%[newval0]\n\t"
+ "mov r3, %%[newval1]\n\t"
+ "strexd r4, r2, r3, [%%[addr]]\n\t"
+ "cmp r4, #1\n\t"
+ "beq 0b\n\t"
+ "1: dmb sy\n\t"
+ "mov %%[oldval0], r0\n\t"
+ "mov %%[oldval1], r1\n\t"
+ : [oldval0] "+&r" (oldval0), [oldval1] "+&r"(oldval1)
+ : [addr] "r" (addr), [newval0] "r" (newval0), [newval1] "r" (newval1)
+ : "memory", "cc", "r0", "r1", "r2", "r3", "r4");
+ return uint64_t(oldval0) | (uint64_t(oldval1) << 32);
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ }
+ if cpu_arch in ("x86", "x86_64"):
+ # Use a +a constraint to load `oldval` into RAX as input/output register.
+ insns = ""
+ if size == 8:
+ insns += fmt_insn("lock; cmpxchgb %[newval], (%[addr])")
+ elif size == 16:
+ insns += fmt_insn("lock; cmpxchgw %[newval], (%[addr])")
+ elif size == 32:
+ insns += fmt_insn("lock; cmpxchgl %[newval], (%[addr])")
+ else:
+ assert size == 64
+ insns += fmt_insn("lock; cmpxchgq %[newval], (%[addr])")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
+ %(cpp_type)s oldval,
+ %(cpp_type)s newval) {
+ asm volatile (%(insns)s
+ : [oldval] "+a" (oldval)
+ : [addr] "r" (addr), [newval] "r" (newval)
+ : "memory", "cc");
+ return oldval;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "aarch64":
+ insns = ""
+ insns += fmt_insn("dmb ish")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("uxtb %w[scratch], %w[oldval]")
+ insns += fmt_insn("ldxrb %w[res], [%x[addr]]")
+ insns += fmt_insn("cmp %w[res], %w[scratch]")
+ insns += fmt_insn("b.ne 1f")
+ insns += fmt_insn("stxrb %w[scratch], %w[newval], [%x[addr]]")
+ elif size == 16:
+ insns += fmt_insn("uxth %w[scratch], %w[oldval]")
+ insns += fmt_insn("ldxrh %w[res], [%x[addr]]")
+ insns += fmt_insn("cmp %w[res], %w[scratch]")
+ insns += fmt_insn("b.ne 1f")
+ insns += fmt_insn("stxrh %w[scratch], %w[newval], [%x[addr]]")
+ elif size == 32:
+ insns += fmt_insn("mov %w[scratch], %w[oldval]")
+ insns += fmt_insn("ldxr %w[res], [%x[addr]]")
+ insns += fmt_insn("cmp %w[res], %w[scratch]")
+ insns += fmt_insn("b.ne 1f")
+ insns += fmt_insn("stxr %w[scratch], %w[newval], [%x[addr]]")
+ else:
+ assert size == 64
+ insns += fmt_insn("mov %x[scratch], %x[oldval]")
+ insns += fmt_insn("ldxr %x[res], [%x[addr]]")
+ insns += fmt_insn("cmp %x[res], %x[scratch]")
+ insns += fmt_insn("b.ne 1f")
+ insns += fmt_insn("stxr %w[scratch], %x[newval], [%x[addr]]")
+ insns += fmt_insn("cbnz %w[scratch], 0b")
+ insns += fmt_insn("1: dmb ish")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
+ %(cpp_type)s oldval,
+ %(cpp_type)s newval) {
+ %(cpp_type)s res, scratch;
+ asm volatile (%(insns)s
+ : [res] "=&r" (res), [scratch] "=&r" (scratch)
+ : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "arm":
+ insns = ""
+ insns += fmt_insn("dmb sy")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("uxtb %[scratch], %[oldval]")
+ insns += fmt_insn("ldrexb %[res], [%[addr]]")
+ insns += fmt_insn("cmp %[res], %[scratch]")
+ insns += fmt_insn("bne 1f")
+ insns += fmt_insn("strexb %[scratch], %[newval], [%[addr]]")
+ elif size == 16:
+ insns += fmt_insn("uxth %[scratch], %[oldval]")
+ insns += fmt_insn("ldrexh %[res], [%[addr]]")
+ insns += fmt_insn("cmp %[res], %[scratch]")
+ insns += fmt_insn("bne 1f")
+ insns += fmt_insn("strexh %[scratch], %[newval], [%[addr]]")
+ else:
+ assert size == 32
+ insns += fmt_insn("mov %[scratch], %[oldval]")
+ insns += fmt_insn("ldrex %[res], [%[addr]]")
+ insns += fmt_insn("cmp %[res], %[scratch]")
+ insns += fmt_insn("bne 1f")
+ insns += fmt_insn("strex %[scratch], %[newval], [%[addr]]")
+ insns += fmt_insn("cmp %[scratch], #1")
+ insns += fmt_insn("beq 0b")
+ insns += fmt_insn("1: dmb sy")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
+ %(cpp_type)s oldval,
+ %(cpp_type)s newval) {
+ %(cpp_type)s res, scratch;
+ asm volatile (%(insns)s
+ : [res] "=&r" (res), [scratch] "=&r" (scratch)
+ : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_fetchop(fun_name, cpp_type, size, op):
+ # NOTE: the assembly code must match the generated code in:
+ # - MacroAssembler::atomicFetchOp
+ # - MacroAssembler::atomicFetchOp64 (on 64-bit platforms)
+ if cpu_arch in ("x86", "x86_64"):
+ # The `add` operation can be optimized with XADD.
+ if op == "add":
+ insns = ""
+ if size == 8:
+ insns += fmt_insn("lock; xaddb %[val], (%[addr])")
+ elif size == 16:
+ insns += fmt_insn("lock; xaddw %[val], (%[addr])")
+ elif size == 32:
+ insns += fmt_insn("lock; xaddl %[val], (%[addr])")
+ else:
+ assert size == 64
+ insns += fmt_insn("lock; xaddq %[val], (%[addr])")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ asm volatile (%(insns)s
+ : [val] "+&r" (val)
+ : [addr] "r" (addr)
+ : "memory", "cc");
+ return val;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ # Use a +a constraint to ensure `res` is stored in RAX. This is required
+ # for the CMPXCHG instruction.
+ insns = ""
+ if size == 8:
+ insns += fmt_insn("movb (%[addr]), %[res]")
+ insns += fmt_insn("0: movb %[res], %[scratch]")
+ insns += fmt_insn("OPb %[val], %[scratch]")
+ insns += fmt_insn("lock; cmpxchgb %[scratch], (%[addr])")
+ elif size == 16:
+ insns += fmt_insn("movw (%[addr]), %[res]")
+ insns += fmt_insn("0: movw %[res], %[scratch]")
+ insns += fmt_insn("OPw %[val], %[scratch]")
+ insns += fmt_insn("lock; cmpxchgw %[scratch], (%[addr])")
+ elif size == 32:
+ insns += fmt_insn("movl (%[addr]), %[res]")
+ insns += fmt_insn("0: movl %[res], %[scratch]")
+ insns += fmt_insn("OPl %[val], %[scratch]")
+ insns += fmt_insn("lock; cmpxchgl %[scratch], (%[addr])")
+ else:
+ assert size == 64
+ insns += fmt_insn("movq (%[addr]), %[res]")
+ insns += fmt_insn("0: movq %[res], %[scratch]")
+ insns += fmt_insn("OPq %[val], %[scratch]")
+ insns += fmt_insn("lock; cmpxchgq %[scratch], (%[addr])")
+ insns = insns.replace("OP", op)
+ insns += fmt_insn("jnz 0b")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ %(cpp_type)s res, scratch;
+ asm volatile (%(insns)s
+ : [res] "=&a" (res), [scratch] "=&r" (scratch)
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "aarch64":
+ insns = ""
+ insns += fmt_insn("dmb ish")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("ldxrb %w[res], [%x[addr]]")
+ insns += fmt_insn("OP %x[scratch1], %x[res], %x[val]")
+ insns += fmt_insn("stxrb %w[scratch2], %w[scratch1], [%x[addr]]")
+ elif size == 16:
+ insns += fmt_insn("ldxrh %w[res], [%x[addr]]")
+ insns += fmt_insn("OP %x[scratch1], %x[res], %x[val]")
+ insns += fmt_insn("stxrh %w[scratch2], %w[scratch1], [%x[addr]]")
+ elif size == 32:
+ insns += fmt_insn("ldxr %w[res], [%x[addr]]")
+ insns += fmt_insn("OP %x[scratch1], %x[res], %x[val]")
+ insns += fmt_insn("stxr %w[scratch2], %w[scratch1], [%x[addr]]")
+ else:
+ assert size == 64
+ insns += fmt_insn("ldxr %x[res], [%x[addr]]")
+ insns += fmt_insn("OP %x[scratch1], %x[res], %x[val]")
+ insns += fmt_insn("stxr %w[scratch2], %x[scratch1], [%x[addr]]")
+ cpu_op = op
+ if cpu_op == "or":
+ cpu_op = "orr"
+ if cpu_op == "xor":
+ cpu_op = "eor"
+ insns = insns.replace("OP", cpu_op)
+ insns += fmt_insn("cbnz %w[scratch2], 0b")
+ insns += fmt_insn("dmb ish")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ %(cpp_type)s res;
+ uintptr_t scratch1, scratch2;
+ asm volatile (%(insns)s
+ : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ if cpu_arch == "arm":
+ insns = ""
+ insns += fmt_insn("dmb sy")
+ insns += fmt_insn("0:")
+ if size == 8:
+ insns += fmt_insn("ldrexb %[res], [%[addr]]")
+ insns += fmt_insn("OP %[scratch1], %[res], %[val]")
+ insns += fmt_insn("strexb %[scratch2], %[scratch1], [%[addr]]")
+ elif size == 16:
+ insns += fmt_insn("ldrexh %[res], [%[addr]]")
+ insns += fmt_insn("OP %[scratch1], %[res], %[val]")
+ insns += fmt_insn("strexh %[scratch2], %[scratch1], [%[addr]]")
+ else:
+ assert size == 32
+ insns += fmt_insn("ldrex %[res], [%[addr]]")
+ insns += fmt_insn("OP %[scratch1], %[res], %[val]")
+ insns += fmt_insn("strex %[scratch2], %[scratch1], [%[addr]]")
+ cpu_op = op
+ if cpu_op == "or":
+ cpu_op = "orr"
+ if cpu_op == "xor":
+ cpu_op = "eor"
+ insns = insns.replace("OP", cpu_op)
+ insns += fmt_insn("cmp %[scratch2], #1")
+ insns += fmt_insn("beq 0b")
+ insns += fmt_insn("dmb sy")
+ return """
+ INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
+ %(cpp_type)s res;
+ uintptr_t scratch1, scratch2;
+ asm volatile (%(insns)s
+ : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
+ : [addr] "r" (addr), [val] "r"(val)
+ : "memory", "cc");
+ return res;
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+ raise Exception("Unexpected arch")
+
+
+def gen_copy(fun_name, cpp_type, size, unroll, direction):
+ assert direction in ("down", "up")
+ offset = 0
+ if direction == "up":
+ offset = unroll - 1
+ insns = ""
+ for i in range(unroll):
+ if cpu_arch in ("x86", "x86_64"):
+ if size == 1:
+ insns += fmt_insn("movb OFFSET(%[src]), %[scratch]")
+ insns += fmt_insn("movb %[scratch], OFFSET(%[dst])")
+ elif size == 4:
+ insns += fmt_insn("movl OFFSET(%[src]), %[scratch]")
+ insns += fmt_insn("movl %[scratch], OFFSET(%[dst])")
+ else:
+ assert size == 8
+ insns += fmt_insn("movq OFFSET(%[src]), %[scratch]")
+ insns += fmt_insn("movq %[scratch], OFFSET(%[dst])")
+ elif cpu_arch == "aarch64":
+ if size == 1:
+ insns += fmt_insn("ldrb %w[scratch], [%x[src], OFFSET]")
+ insns += fmt_insn("strb %w[scratch], [%x[dst], OFFSET]")
+ else:
+ assert size == 8
+ insns += fmt_insn("ldr %x[scratch], [%x[src], OFFSET]")
+ insns += fmt_insn("str %x[scratch], [%x[dst], OFFSET]")
+ elif cpu_arch == "arm":
+ if size == 1:
+ insns += fmt_insn("ldrb %[scratch], [%[src], #OFFSET]")
+ insns += fmt_insn("strb %[scratch], [%[dst], #OFFSET]")
+ else:
+ assert size == 4
+ insns += fmt_insn("ldr %[scratch], [%[src], #OFFSET]")
+ insns += fmt_insn("str %[scratch], [%[dst], #OFFSET]")
+ else:
+ raise Exception("Unexpected arch")
+ insns = insns.replace("OFFSET", str(offset * size))
+
+ if direction == "down":
+ offset += 1
+ else:
+ offset -= 1
+
+ return """
+ INLINE_ATTR void %(fun_name)s(uint8_t* dst, const uint8_t* src) {
+ %(cpp_type)s* dst_ = reinterpret_cast<%(cpp_type)s*>(dst);
+ const %(cpp_type)s* src_ = reinterpret_cast<const %(cpp_type)s*>(src);
+ %(cpp_type)s scratch;
+ asm volatile (%(insns)s
+ : [scratch] "=&r" (scratch)
+ : [dst] "r" (dst_), [src] "r"(src_)
+ : "memory");
+ }""" % {
+ "cpp_type": cpp_type,
+ "fun_name": fun_name,
+ "insns": insns,
+ }
+
+
+HEADER_TEMPLATE = """\
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOperationsGenerated_h
+#define jit_AtomicOperationsGenerated_h
+
+/* This file is generated by jit/GenerateAtomicOperations.py. Do not edit! */
+
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace jit {
+
+%(contents)s
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_AtomicOperationsGenerated_h
+"""
+
+
+def generate_atomics_header(c_out):
+ contents = ""
+ if cpu_arch in ("x86", "x86_64", "aarch64") or (
+ cpu_arch == "arm" and int(buildconfig.substs["ARM_ARCH"]) >= 7
+ ):
+ contents += "#define JS_HAVE_GENERATED_ATOMIC_OPS 1"
+
+ # `fence` performs a full memory barrier.
+ contents += gen_seqcst("AtomicFenceSeqCst")
+
+ contents += gen_load("AtomicLoad8SeqCst", "uint8_t", 8, True)
+ contents += gen_load("AtomicLoad16SeqCst", "uint16_t", 16, True)
+ contents += gen_load("AtomicLoad32SeqCst", "uint32_t", 32, True)
+ if is_64bit:
+ contents += gen_load("AtomicLoad64SeqCst", "uint64_t", 64, True)
+
+ # These are access-atomic up to sizeof(uintptr_t).
+ contents += gen_load("AtomicLoad8Unsynchronized", "uint8_t", 8, False)
+ contents += gen_load("AtomicLoad16Unsynchronized", "uint16_t", 16, False)
+ contents += gen_load("AtomicLoad32Unsynchronized", "uint32_t", 32, False)
+ if is_64bit:
+ contents += gen_load("AtomicLoad64Unsynchronized", "uint64_t", 64, False)
+
+ contents += gen_store("AtomicStore8SeqCst", "uint8_t", 8, True)
+ contents += gen_store("AtomicStore16SeqCst", "uint16_t", 16, True)
+ contents += gen_store("AtomicStore32SeqCst", "uint32_t", 32, True)
+ if is_64bit:
+ contents += gen_store("AtomicStore64SeqCst", "uint64_t", 64, True)
+
+ # These are access-atomic up to sizeof(uintptr_t).
+ contents += gen_store("AtomicStore8Unsynchronized", "uint8_t", 8, False)
+ contents += gen_store("AtomicStore16Unsynchronized", "uint16_t", 16, False)
+ contents += gen_store("AtomicStore32Unsynchronized", "uint32_t", 32, False)
+ if is_64bit:
+ contents += gen_store("AtomicStore64Unsynchronized", "uint64_t", 64, False)
+
+ # `exchange` takes a cell address and a value. It stores it in the cell and
+ # returns the value previously in the cell.
+ contents += gen_exchange("AtomicExchange8SeqCst", "uint8_t", 8)
+ contents += gen_exchange("AtomicExchange16SeqCst", "uint16_t", 16)
+ contents += gen_exchange("AtomicExchange32SeqCst", "uint32_t", 32)
+ if is_64bit:
+ contents += gen_exchange("AtomicExchange64SeqCst", "uint64_t", 64)
+
+ # `cmpxchg` takes a cell address, an expected value and a replacement value.
+ # If the value in the cell equals the expected value then the replacement value
+ # is stored in the cell. It always returns the value previously in the cell.
+ contents += gen_cmpxchg("AtomicCmpXchg8SeqCst", "uint8_t", 8)
+ contents += gen_cmpxchg("AtomicCmpXchg16SeqCst", "uint16_t", 16)
+ contents += gen_cmpxchg("AtomicCmpXchg32SeqCst", "uint32_t", 32)
+ contents += gen_cmpxchg("AtomicCmpXchg64SeqCst", "uint64_t", 64)
+
+ # `add` adds a value atomically to the cell and returns the old value in the
+ # cell. (There is no `sub`; just add the negated value.)
+ contents += gen_fetchop("AtomicAdd8SeqCst", "uint8_t", 8, "add")
+ contents += gen_fetchop("AtomicAdd16SeqCst", "uint16_t", 16, "add")
+ contents += gen_fetchop("AtomicAdd32SeqCst", "uint32_t", 32, "add")
+ if is_64bit:
+ contents += gen_fetchop("AtomicAdd64SeqCst", "uint64_t", 64, "add")
+
+ # `and` bitwise-ands a value atomically into the cell and returns the old value
+ # in the cell.
+ contents += gen_fetchop("AtomicAnd8SeqCst", "uint8_t", 8, "and")
+ contents += gen_fetchop("AtomicAnd16SeqCst", "uint16_t", 16, "and")
+ contents += gen_fetchop("AtomicAnd32SeqCst", "uint32_t", 32, "and")
+ if is_64bit:
+ contents += gen_fetchop("AtomicAnd64SeqCst", "uint64_t", 64, "and")
+
+ # `or` bitwise-ors a value atomically into the cell and returns the old value
+ # in the cell.
+ contents += gen_fetchop("AtomicOr8SeqCst", "uint8_t", 8, "or")
+ contents += gen_fetchop("AtomicOr16SeqCst", "uint16_t", 16, "or")
+ contents += gen_fetchop("AtomicOr32SeqCst", "uint32_t", 32, "or")
+ if is_64bit:
+ contents += gen_fetchop("AtomicOr64SeqCst", "uint64_t", 64, "or")
+
+ # `xor` bitwise-xors a value atomically into the cell and returns the old value
+ # in the cell.
+ contents += gen_fetchop("AtomicXor8SeqCst", "uint8_t", 8, "xor")
+ contents += gen_fetchop("AtomicXor16SeqCst", "uint16_t", 16, "xor")
+ contents += gen_fetchop("AtomicXor32SeqCst", "uint32_t", 32, "xor")
+ if is_64bit:
+ contents += gen_fetchop("AtomicXor64SeqCst", "uint64_t", 64, "xor")
+
+ # See comment in jit/AtomicOperations-shared-jit.cpp for an explanation.
+ wordsize = 8 if is_64bit else 4
+ words_in_block = 8
+ blocksize = words_in_block * wordsize
+
+ contents += gen_copy(
+ "AtomicCopyUnalignedBlockDownUnsynchronized",
+ "uint8_t",
+ 1,
+ blocksize,
+ "down",
+ )
+ contents += gen_copy(
+ "AtomicCopyUnalignedBlockUpUnsynchronized", "uint8_t", 1, blocksize, "up"
+ )
+
+ contents += gen_copy(
+ "AtomicCopyUnalignedWordDownUnsynchronized", "uint8_t", 1, wordsize, "down"
+ )
+ contents += gen_copy(
+ "AtomicCopyUnalignedWordUpUnsynchronized", "uint8_t", 1, wordsize, "up"
+ )
+
+ contents += gen_copy(
+ "AtomicCopyBlockDownUnsynchronized",
+ "uintptr_t",
+ wordsize,
+ words_in_block,
+ "down",
+ )
+ contents += gen_copy(
+ "AtomicCopyBlockUpUnsynchronized",
+ "uintptr_t",
+ wordsize,
+ words_in_block,
+ "up",
+ )
+
+ contents += gen_copy(
+ "AtomicCopyWordUnsynchronized", "uintptr_t", wordsize, 1, "down"
+ )
+ contents += gen_copy("AtomicCopyByteUnsynchronized", "uint8_t", 1, 1, "down")
+
+ contents += "\n"
+ contents += (
+ "constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = "
+ + str(blocksize)
+ + ";\n"
+ )
+ contents += (
+ "constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = " + str(wordsize) + ";\n"
+ )
+
+ # Work around a GCC issue on 32-bit x86 by adding MOZ_NEVER_INLINE.
+ # See bug 1756347.
+ if is_gcc and cpu_arch == "x86":
+ contents = contents.replace("INLINE_ATTR", "MOZ_NEVER_INLINE inline")
+ else:
+ contents = contents.replace("INLINE_ATTR", "inline")
+
+ c_out.write(
+ HEADER_TEMPLATE
+ % {
+ "contents": contents,
+ }
+ )
diff --git a/js/src/jit/GenerateCacheIRFiles.py b/js/src/jit/GenerateCacheIRFiles.py
new file mode 100644
index 0000000000..38ebd9a162
--- /dev/null
+++ b/js/src/jit/GenerateCacheIRFiles.py
@@ -0,0 +1,539 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script generates jit/CacheIROpsGenerated.h from CacheIROps.yaml
+
+from collections import OrderedDict
+
+import buildconfig
+import six
+import yaml
+from mozbuild.preprocessor import Preprocessor
+
+HEADER_TEMPLATE = """\
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef %(includeguard)s
+#define %(includeguard)s
+
+/* This file is generated by jit/GenerateCacheIRFiles.py. Do not edit! */
+
+%(contents)s
+
+#endif // %(includeguard)s
+"""
+
+
+def generate_header(c_out, includeguard, contents):
+ c_out.write(
+ HEADER_TEMPLATE
+ % {
+ "includeguard": includeguard,
+ "contents": contents,
+ }
+ )
+
+
+def load_yaml(yaml_path):
+ # First invoke preprocessor.py so that we can use #ifdef JS_SIMULATOR in
+ # the YAML file.
+ pp = Preprocessor()
+ pp.context.update(buildconfig.defines["ALLDEFINES"])
+ pp.out = six.StringIO()
+ pp.do_filter("substitution")
+ pp.do_include(yaml_path)
+ contents = pp.out.getvalue()
+
+ # Load into an OrderedDict to ensure order is preserved. Note: Python 3.7+
+ # also preserves ordering for normal dictionaries.
+ # Code based on https://stackoverflow.com/a/21912744.
+ class OrderedLoader(yaml.Loader):
+ pass
+
+ def construct_mapping(loader, node):
+ loader.flatten_mapping(node)
+ return OrderedDict(loader.construct_pairs(node))
+
+ tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
+ OrderedLoader.add_constructor(tag, construct_mapping)
+ return yaml.load(contents, OrderedLoader)
+
+
+# Information for generating CacheIRWriter code for a single argument. Tuple
+# stores the C++ argument type and the CacheIRWriter method to call.
+arg_writer_info = {
+ "ValId": ("ValOperandId", "writeOperandId"),
+ "ObjId": ("ObjOperandId", "writeOperandId"),
+ "StringId": ("StringOperandId", "writeOperandId"),
+ "SymbolId": ("SymbolOperandId", "writeOperandId"),
+ "BooleanId": ("BooleanOperandId", "writeOperandId"),
+ "Int32Id": ("Int32OperandId", "writeOperandId"),
+ "NumberId": ("NumberOperandId", "writeOperandId"),
+ "BigIntId": ("BigIntOperandId", "writeOperandId"),
+ "ValueTagId": ("ValueTagOperandId", "writeOperandId"),
+ "IntPtrId": ("IntPtrOperandId", "writeOperandId"),
+ "RawId": ("OperandId", "writeOperandId"),
+ "ShapeField": ("Shape*", "writeShapeField"),
+ "GetterSetterField": ("GetterSetter*", "writeGetterSetterField"),
+ "ObjectField": ("JSObject*", "writeObjectField"),
+ "StringField": ("JSString*", "writeStringField"),
+ "AtomField": ("JSAtom*", "writeStringField"),
+ "SymbolField": ("JS::Symbol*", "writeSymbolField"),
+ "BaseScriptField": ("BaseScript*", "writeBaseScriptField"),
+ "JitCodeField": ("JitCode*", "writeJitCodeField"),
+ "RawInt32Field": ("uint32_t", "writeRawInt32Field"),
+ "RawPointerField": ("const void*", "writeRawPointerField"),
+ "IdField": ("jsid", "writeIdField"),
+ "ValueField": ("const Value&", "writeValueField"),
+ "RawInt64Field": ("uint64_t", "writeRawInt64Field"),
+ "DoubleField": ("double", "writeDoubleField"),
+ "AllocSiteField": ("gc::AllocSite*", "writeAllocSiteField"),
+ "JSOpImm": ("JSOp", "writeJSOpImm"),
+ "BoolImm": ("bool", "writeBoolImm"),
+ "ByteImm": ("uint32_t", "writeByteImm"), # uint32_t to enable fits-in-byte asserts.
+ "GuardClassKindImm": ("GuardClassKind", "writeGuardClassKindImm"),
+ "ValueTypeImm": ("ValueType", "writeValueTypeImm"),
+ "JSWhyMagicImm": ("JSWhyMagic", "writeJSWhyMagicImm"),
+ "CallFlagsImm": ("CallFlags", "writeCallFlagsImm"),
+ "ScalarTypeImm": ("Scalar::Type", "writeScalarTypeImm"),
+ "UnaryMathFunctionImm": ("UnaryMathFunction", "writeUnaryMathFunctionImm"),
+ "WasmValTypeImm": ("wasm::ValType::Kind", "writeWasmValTypeImm"),
+ "Int32Imm": ("int32_t", "writeInt32Imm"),
+ "UInt32Imm": ("uint32_t", "writeUInt32Imm"),
+ "JSNativeImm": ("JSNative", "writeJSNativeImm"),
+ "StaticStringImm": ("const char*", "writeStaticStringImm"),
+ "AllocKindImm": ("gc::AllocKind", "writeAllocKindImm"),
+ "CompletionKindImm": ("CompletionKind", "writeCompletionKindImm"),
+}
+
+
+def gen_writer_method(name, args, custom_writer):
+ """Generates a CacheIRWRiter method for a single opcode."""
+
+ # Generate a single method that writes the opcode and each argument.
+ # For example:
+ #
+ # void guardShape(ObjOperandId obj, Shape* shape) {
+ # writeOp(CacheOp::GuardShape);
+ # writeOperandId(obj);
+ # writeShapeField(shape);
+ # assertLengthMatches();
+ # }
+ #
+ # The assertLengthMatches() call is to assert the information in the
+ # arg_length dictionary below matches what's written.
+
+ # Method names start with a lowercase letter.
+ method_name = name[0].lower() + name[1:]
+ if custom_writer:
+ method_name += "_"
+
+ method_args = []
+ ret_type = "void"
+ args_code = ""
+ if args:
+ for arg_name, arg_type in six.iteritems(args):
+ cpp_type, write_method = arg_writer_info[arg_type]
+ if arg_name == "result":
+ ret_type = cpp_type
+ args_code += " {} result(newOperandId());\\\n".format(cpp_type)
+ args_code += " writeOperandId(result);\\\n"
+ else:
+ method_args.append("{} {}".format(cpp_type, arg_name))
+ args_code += " {}({});\\\n".format(write_method, arg_name)
+
+ code = ""
+ if custom_writer:
+ code += "private:\\\n"
+ code += "{} {}({}) {{\\\n".format(ret_type, method_name, ", ".join(method_args))
+ code += " writeOp(CacheOp::{});\\\n".format(name)
+ code += args_code
+ code += " assertLengthMatches();\\\n"
+ if ret_type != "void":
+ code += " return result;\\\n"
+ code += "}"
+ if custom_writer:
+ code += "\\\npublic:"
+ return code
+
+
+# Information for generating code using CacheIRReader for a single argument.
+# Tuple stores the C++ type, the suffix used for arguments/variables of this
+# type, and the expression to read this type from CacheIRReader.
+arg_reader_info = {
+ "ValId": ("ValOperandId", "Id", "reader.valOperandId()"),
+ "ObjId": ("ObjOperandId", "Id", "reader.objOperandId()"),
+ "StringId": ("StringOperandId", "Id", "reader.stringOperandId()"),
+ "SymbolId": ("SymbolOperandId", "Id", "reader.symbolOperandId()"),
+ "BooleanId": ("BooleanOperandId", "Id", "reader.booleanOperandId()"),
+ "Int32Id": ("Int32OperandId", "Id", "reader.int32OperandId()"),
+ "NumberId": ("NumberOperandId", "Id", "reader.numberOperandId()"),
+ "BigIntId": ("BigIntOperandId", "Id", "reader.bigIntOperandId()"),
+ "ValueTagId": ("ValueTagOperandId", "Id", "reader.valueTagOperandId()"),
+ "IntPtrId": ("IntPtrOperandId", "Id", "reader.intPtrOperandId()"),
+ "RawId": ("uint32_t", "Id", "reader.rawOperandId()"),
+ "ShapeField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "GetterSetterField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "ObjectField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "StringField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "AtomField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "SymbolField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "BaseScriptField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "JitCodeField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "RawInt32Field": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "RawPointerField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "IdField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "ValueField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "RawInt64Field": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "DoubleField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "AllocSiteField": ("uint32_t", "Offset", "reader.stubOffset()"),
+ "JSOpImm": ("JSOp", "", "reader.jsop()"),
+ "BoolImm": ("bool", "", "reader.readBool()"),
+ "ByteImm": ("uint8_t", "", "reader.readByte()"),
+ "GuardClassKindImm": ("GuardClassKind", "", "reader.guardClassKind()"),
+ "ValueTypeImm": ("ValueType", "", "reader.valueType()"),
+ "JSWhyMagicImm": ("JSWhyMagic", "", "reader.whyMagic()"),
+ "CallFlagsImm": ("CallFlags", "", "reader.callFlags()"),
+ "ScalarTypeImm": ("Scalar::Type", "", "reader.scalarType()"),
+ "UnaryMathFunctionImm": ("UnaryMathFunction", "", "reader.unaryMathFunction()"),
+ "WasmValTypeImm": ("wasm::ValType::Kind", "", "reader.wasmValType()"),
+ "Int32Imm": ("int32_t", "", "reader.int32Immediate()"),
+ "UInt32Imm": ("uint32_t", "", "reader.uint32Immediate()"),
+ "JSNativeImm": ("JSNative", "", "reinterpret_cast<JSNative>(reader.pointer())"),
+ "StaticStringImm": ("const char*", "", "reinterpret_cast<char*>(reader.pointer())"),
+ "AllocKindImm": ("gc::AllocKind", "", "reader.allocKind()"),
+ "CompletionKindImm": ("CompletionKind", "", "reader.completionKind()"),
+}
+
+
+def gen_compiler_method(name, args):
+ """Generates CacheIRCompiler or WarpCacheIRTranspiler header code for a
+ single opcode."""
+
+ method_name = "emit" + name
+
+ # We generate the signature of the method that needs to be implemented and a
+ # separate function forwarding to it. For example:
+ #
+ # [[nodiscard]] bool emitGuardShape(ObjOperandId objId, uint32_t shapeOffset);
+ # [[nodiscard]] bool emitGuardShape(CacheIRReader& reader) {
+ # ObjOperandId objId = reader.objOperandId();
+ # uint32_t shapeOffset = reader.stubOffset();
+ # return emitGuardShape(objId, shapeOffset);
+ # }
+ cpp_args = []
+ method_args = []
+ args_code = ""
+ if args:
+ for arg_name, arg_type in six.iteritems(args):
+ cpp_type, suffix, readexpr = arg_reader_info[arg_type]
+ cpp_name = arg_name + suffix
+ cpp_args.append(cpp_name)
+ method_args.append("{} {}".format(cpp_type, cpp_name))
+ args_code += " {} {} = {};\\\n".format(cpp_type, cpp_name, readexpr)
+
+ # Generate signature.
+ code = "[[nodiscard]] bool {}({});\\\n".format(method_name, ", ".join(method_args))
+
+ # Generate the method forwarding to it.
+ code += "[[nodiscard]] bool {}(CacheIRReader& reader) {{\\\n".format(method_name)
+ code += args_code
+ code += " return {}({});\\\n".format(method_name, ", ".join(cpp_args))
+ code += "}\\\n"
+
+ return code
+
+
+# For each argument type, the method name for printing it.
+arg_spewer_method = {
+ "ValId": "spewOperandId",
+ "ObjId": "spewOperandId",
+ "StringId": "spewOperandId",
+ "SymbolId": "spewOperandId",
+ "BooleanId": "spewOperandId",
+ "Int32Id": "spewOperandId",
+ "NumberId": "spewOperandId",
+ "BigIntId": "spewOperandId",
+ "ValueTagId": "spewOperandId",
+ "IntPtrId": "spewOperandId",
+ "RawId": "spewRawOperandId",
+ "ShapeField": "spewField",
+ "GetterSetterField": "spewField",
+ "ObjectField": "spewField",
+ "StringField": "spewField",
+ "AtomField": "spewField",
+ "SymbolField": "spewField",
+ "BaseScriptField": "spewField",
+ "JitCodeField": "spewField",
+ "RawInt32Field": "spewField",
+ "RawPointerField": "spewField",
+ "IdField": "spewField",
+ "ValueField": "spewField",
+ "RawInt64Field": "spewField",
+ "DoubleField": "spewField",
+ "AllocSiteField": "spewField",
+ "JSOpImm": "spewJSOpImm",
+ "BoolImm": "spewBoolImm",
+ "ByteImm": "spewByteImm",
+ "GuardClassKindImm": "spewGuardClassKindImm",
+ "ValueTypeImm": "spewValueTypeImm",
+ "JSWhyMagicImm": "spewJSWhyMagicImm",
+ "CallFlagsImm": "spewCallFlagsImm",
+ "ScalarTypeImm": "spewScalarTypeImm",
+ "UnaryMathFunctionImm": "spewUnaryMathFunctionImm",
+ "WasmValTypeImm": "spewWasmValTypeImm",
+ "Int32Imm": "spewInt32Imm",
+ "UInt32Imm": "spewUInt32Imm",
+ "JSNativeImm": "spewJSNativeImm",
+ "StaticStringImm": "spewStaticStringImm",
+ "AllocKindImm": "spewAllocKindImm",
+ "CompletionKindImm": "spewCompletionKindImm",
+}
+
+
+def gen_spewer_method(name, args):
+ """Generates spewer code for a single opcode."""
+
+ method_name = "spew" + name
+
+ # Generate code like this:
+ #
+ # void spewGuardShape(CacheIRReader& reader) {
+ # spewOp(CacheOp::GuardShape);
+ # spewOperandId("objId", reader.objOperandId());
+ # spewOperandSeparator();
+ # spewField("shapeOffset", reader.stubOffset());
+ # spewOpEnd();
+ # }
+ args_code = ""
+ if args:
+ is_first = True
+ for arg_name, arg_type in six.iteritems(args):
+ _, suffix, readexpr = arg_reader_info[arg_type]
+ arg_name += suffix
+ spew_method = arg_spewer_method[arg_type]
+ if not is_first:
+ args_code += " spewArgSeparator();\\\n"
+ args_code += ' {}("{}", {});\\\n'.format(spew_method, arg_name, readexpr)
+ is_first = False
+
+ code = "void {}(CacheIRReader& reader) {{\\\n".format(method_name)
+ code += " spewOp(CacheOp::{});\\\n".format(name)
+ code += args_code
+ code += " spewOpEnd();\\\n"
+ code += "}\\\n"
+
+ return code
+
+
+def gen_clone_method(name, args):
+ """Generates code for cloning a single opcode."""
+
+ method_name = "clone" + name
+
+ # Generate code like this:
+ #
+ # void cloneGuardShape(CacheIRReader& reader, CacheIRWriter& writer) {
+ # writer.writeOp(CacheOp::GuardShape);
+ # ObjOperandId objId = reader.objOperandId();
+ # writer.writeOperandId(objId);
+ # uint32_t shapeOffset = reader.stubOffset();
+ # Shape* shape = getShapeField(shapeOffset);
+ # writer.writeShapeField(shape);
+ # writer.assertLengthMatches();
+ # }
+
+ args_code = ""
+ if args:
+ for arg_name, arg_type in six.iteritems(args):
+ if arg_type == "RawId":
+ arg_type = "ValId"
+
+ read_type, suffix, readexpr = arg_reader_info[arg_type]
+ read_name = arg_name + suffix
+ value_name = read_name
+ args_code += " {} {} = {};\\\n".format(read_type, read_name, readexpr)
+
+ write_type, write_method = arg_writer_info[arg_type]
+ if arg_name == "result":
+ args_code += " writer.newOperandId();\\\n"
+ if suffix == "Offset":
+ # If the write function takes T&, the intermediate variable
+ # should be of type T.
+ if write_type.endswith("&"):
+ write_type = write_type[:-1]
+ value_name = arg_name
+ args_code += " {} {} = get{}({});\\\n".format(
+ write_type, value_name, arg_type, read_name
+ )
+ args_code += " writer.{}({});\\\n".format(write_method, value_name)
+
+ code = "void {}".format(method_name)
+ code += "(CacheIRReader& reader, CacheIRWriter& writer) {{\\\n"
+ code += " writer.writeOp(CacheOp::{});\\\n".format(name)
+ code += args_code
+ code += " writer.assertLengthMatches();\\\n"
+ code += "}}\\\n"
+
+ return code
+
+
+# Length in bytes for each argument type, either an integer or a C++ expression.
+# This is used to generate the CacheIROpArgLengths array. CacheIRWriter asserts
+# the number of bytes written matches the value in that array.
+arg_length = {
+ "ValId": 1,
+ "ObjId": 1,
+ "StringId": 1,
+ "SymbolId": 1,
+ "BooleanId": 1,
+ "Int32Id": 1,
+ "NumberId": 1,
+ "BigIntId": 1,
+ "ValueTagId": 1,
+ "IntPtrId": 1,
+ "RawId": 1,
+ "ShapeField": 1,
+ "GetterSetterField": 1,
+ "ObjectField": 1,
+ "StringField": 1,
+ "AtomField": 1,
+ "SymbolField": 1,
+ "BaseScriptField": 1,
+ "JitCodeField": 1,
+ "RawInt32Field": 1,
+ "RawPointerField": 1,
+ "RawInt64Field": 1,
+ "DoubleField": 1,
+ "IdField": 1,
+ "ValueField": 1,
+ "AllocSiteField": 1,
+ "ByteImm": 1,
+ "BoolImm": 1,
+ "CallFlagsImm": 1,
+ "ScalarTypeImm": 1,
+ "UnaryMathFunctionImm": 1,
+ "JSOpImm": 1,
+ "ValueTypeImm": 1,
+ "GuardClassKindImm": 1,
+ "JSWhyMagicImm": 1,
+ "WasmValTypeImm": 1,
+ "Int32Imm": 4,
+ "UInt32Imm": 4,
+ "JSNativeImm": "sizeof(uintptr_t)",
+ "StaticStringImm": "sizeof(uintptr_t)",
+ "AllocKindImm": 1,
+ "CompletionKindImm": 1,
+}
+
+
+def generate_cacheirops_header(c_out, yaml_path):
+ """Generate CacheIROpsGenerated.h from CacheIROps.yaml. The generated file
+ contains a list of all CacheIR ops and generated source code for
+ CacheIRWriter and CacheIRCompiler."""
+
+ data = load_yaml(yaml_path)
+
+ # CACHE_IR_OPS items. Each item stores an opcode name and arguments length
+ # expression. For example: _(GuardShape, 1 + 1)
+ ops_items = []
+
+ # Generated CacheIRWriter methods.
+ writer_methods = []
+
+ # Generated CacheIRCompiler methods.
+ compiler_shared_methods = []
+ compiler_unshared_methods = []
+
+ # Generated WarpCacheIRTranspiler methods.
+ transpiler_methods = []
+
+ # List of ops supported by WarpCacheIRTranspiler.
+ transpiler_ops = []
+
+ # Generated methods for spewers.
+ spewer_methods = []
+
+ # Generated methods for cloning IC stubs
+ clone_methods = []
+
+ for op in data:
+ name = op["name"]
+
+ args = op["args"]
+ assert args is None or isinstance(args, OrderedDict)
+
+ shared = op["shared"]
+ assert isinstance(shared, bool)
+
+ transpile = op["transpile"]
+ assert isinstance(transpile, bool)
+
+ # Unscored Ops default to UINT32_MAX
+ cost_estimate = op.get("cost_estimate", int(0xFFFFFFFF))
+ assert isinstance(cost_estimate, int)
+
+ custom_writer = op.get("custom_writer", False)
+ assert isinstance(custom_writer, bool)
+
+ if args:
+ args_length = " + ".join([str(arg_length[v]) for v in args.values()])
+ else:
+ args_length = "0"
+
+ transpile_str = "true" if transpile else "false"
+ ops_items.append(
+ "_({}, {}, {}, {})".format(name, args_length, transpile_str, cost_estimate)
+ )
+
+ writer_methods.append(gen_writer_method(name, args, custom_writer))
+
+ if shared:
+ compiler_shared_methods.append(gen_compiler_method(name, args))
+ else:
+ compiler_unshared_methods.append(gen_compiler_method(name, args))
+
+ if transpile:
+ transpiler_methods.append(gen_compiler_method(name, args))
+ transpiler_ops.append("_({})".format(name))
+
+ spewer_methods.append(gen_spewer_method(name, args))
+
+ clone_methods.append(gen_clone_method(name, args))
+
+ contents = "#define CACHE_IR_OPS(_)\\\n"
+ contents += "\\\n".join(ops_items)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_WRITER_GENERATED \\\n"
+ contents += "\\\n".join(writer_methods)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_COMPILER_SHARED_GENERATED \\\n"
+ contents += "\\\n".join(compiler_shared_methods)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_COMPILER_UNSHARED_GENERATED \\\n"
+ contents += "\\\n".join(compiler_unshared_methods)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_TRANSPILER_GENERATED \\\n"
+ contents += "\\\n".join(transpiler_methods)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_TRANSPILER_OPS(_)\\\n"
+ contents += "\\\n".join(transpiler_ops)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_SPEWER_GENERATED \\\n"
+ contents += "\\\n".join(spewer_methods)
+ contents += "\n\n"
+
+ contents += "#define CACHE_IR_CLONE_GENERATED \\\n"
+ contents += "\\\n".join(clone_methods)
+ contents += "\n\n"
+
+ generate_header(c_out, "jit_CacheIROpsGenerated_h", contents)
diff --git a/js/src/jit/GenerateLIRFiles.py b/js/src/jit/GenerateLIRFiles.py
new file mode 100644
index 0000000000..86882966fd
--- /dev/null
+++ b/js/src/jit/GenerateLIRFiles.py
@@ -0,0 +1,298 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script generates jit/LIROpsGenerated.h (list of LIR instructions)
+# from LIROps.yaml.
+
+from collections import OrderedDict
+
+import buildconfig
+import six
+import yaml
+from mozbuild.preprocessor import Preprocessor
+
+HEADER_TEMPLATE = """\
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef %(includeguard)s
+#define %(includeguard)s
+
+/* This file is generated by jit/GenerateLIRFiles.py. Do not edit! */
+
+%(contents)s
+
+#endif // %(includeguard)s
+"""
+
+
+def load_yaml(yaml_path):
+ # First invoke preprocessor.py so that we can use #ifdef JS_SIMULATOR in
+ # the YAML file.
+ pp = Preprocessor()
+ pp.context.update(buildconfig.defines["ALLDEFINES"])
+ pp.out = six.StringIO()
+ pp.do_filter("substitution")
+ pp.do_include(yaml_path)
+ contents = pp.out.getvalue()
+
+ # Load into an OrderedDict to ensure order is preserved. Note: Python 3.7
+ # also preserves ordering for normal dictionaries.
+ # Code based on https://stackoverflow.com/a/21912744.
+ class OrderedLoader(yaml.Loader):
+ pass
+
+ def construct_mapping(loader, node):
+ loader.flatten_mapping(node)
+ return OrderedDict(loader.construct_pairs(node))
+
+ tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
+ OrderedLoader.add_constructor(tag, construct_mapping)
+ return yaml.load(contents, OrderedLoader)
+
+
+def generate_header(c_out, includeguard, contents):
+ c_out.write(
+ HEADER_TEMPLATE
+ % {
+ "includeguard": includeguard,
+ "contents": contents,
+ }
+ )
+
+
+operand_types = {
+ "WordSized": "LAllocation",
+ "BoxedValue": "LBoxAllocation",
+ "Int64": "LInt64Allocation",
+}
+
+
+result_types = {
+ "WordSized": "1",
+ "BoxedValue": "BOX_PIECES",
+ "Int64": "INT64_PIECES",
+}
+
+
+def gen_helper_template_value(num_regular_allocs, num_value_allocs, num_int64_allocs):
+ template_str = ""
+ if num_value_allocs:
+ template_str += str(num_value_allocs) + " * BOX_PIECES + "
+ if num_int64_allocs:
+ template_str += str(num_int64_allocs) + " * INT64_PIECES + "
+ template_str += str(num_regular_allocs)
+ return template_str
+
+
+def build_index_def(num_specials_operands, index_value, num_reg_operands, piece):
+ if num_specials_operands:
+ return " static const size_t {} = {} + {} * {};\\\n".format(
+ index_value, num_reg_operands, piece, num_specials_operands
+ )
+ else:
+ return " static const size_t {} = {};\\\n".format(
+ index_value, num_reg_operands
+ )
+
+
+def gen_lir_class(
+ name, result_type, operands, arguments, num_temps, call_instruction, mir_op
+):
+ """Generates class definition for a single LIR opcode."""
+ class_name = "L" + name
+
+ getters = []
+ setters = []
+ # Operand index definitions.
+ oper_indices = []
+ # Parameters for the class constructor.
+ constructor_params = []
+
+ num_reg_operands = 0
+ num_value_operands = 0
+ num_int64_operands = 0
+ if operands:
+ # Get number of LAllocations to use for defining indices.
+ for operand in operands:
+ if operands[operand] == "WordSized":
+ num_reg_operands += 1
+
+ current_reg_oper = 0
+ for operand in operands:
+ op_type = operands[operand]
+ op_alloc_type = operand_types[op_type]
+ constructor_params.append("const " + op_alloc_type + "& " + operand)
+ if op_type == "WordSized":
+ index_value = str(current_reg_oper)
+ current_reg_oper += 1
+ getters.append(
+ " const "
+ + op_alloc_type
+ + "* "
+ + operand
+ + "() { return getOperand("
+ + index_value
+ + "); }"
+ )
+ setters.append(" setOperand(" + index_value + ", " + operand + ");")
+ elif op_type == "BoxedValue":
+ index_value = operand[0].upper() + operand[1:] + "Index"
+ oper_indices.append(
+ build_index_def(
+ num_value_operands, index_value, num_reg_operands, "BOX_PIECES"
+ )
+ )
+ num_value_operands += 1
+ # No getters generated for BoxedValue operands.
+ setters.append(
+ " setBoxOperand(" + index_value + ", " + operand + ");"
+ )
+ elif op_type == "Int64":
+ index_value = operand[0].upper() + operand[1:] + "Index"
+ oper_indices.append(
+ build_index_def(
+ num_int64_operands,
+ index_value,
+ num_reg_operands,
+ "INT64_PIECES",
+ )
+ )
+ num_int64_operands += 1
+ getters.append(
+ " const "
+ + op_alloc_type
+ + " "
+ + operand
+ + "() { return getInt64Operand("
+ + index_value
+ + "); }"
+ )
+ setters.append(
+ " setInt64Operand(" + index_value + ", " + operand + ");"
+ )
+ else:
+ raise Exception("Invalid operand type: " + op_type)
+ if num_temps:
+ for temp in range(num_temps):
+ constructor_params.append("const LDefinition& temp" + str(temp))
+ setters.append(" setTemp(" + str(temp) + ", temp" + str(temp) + ");")
+ getters.append(
+ " const LDefinition* temp"
+ + str(temp)
+ + "() { return getTemp("
+ + str(temp)
+ + "); }"
+ )
+ code = "class {} : public LInstructionHelper<".format(class_name)
+ if result_type:
+ code += result_types[result_type] + ", "
+ else:
+ code += "0, "
+ code += gen_helper_template_value(
+ num_reg_operands, num_value_operands, num_int64_operands
+ )
+ code += ", {}> {{\\\n".format(num_temps)
+ if arguments:
+ for arg_name in arguments:
+ arg_type_sig = arguments[arg_name]
+ constructor_params.append(arg_type_sig + " " + arg_name)
+ code += " " + arg_type_sig + " " + arg_name + "_;\\\n"
+ code += " public:\\\n LIR_HEADER({})\\\n".format(name)
+ code += " explicit {}(".format(class_name)
+ code += ", ".join(constructor_params)
+ code += ") : LInstructionHelper(classOpcode)"
+ if arguments:
+ for arg_name in arguments:
+ code += ", " + arg_name + "_(" + arg_name + ")"
+ code += " {"
+ if call_instruction:
+ code += "\\\n this->setIsCall();"
+ code += "\\\n"
+ code += "\\\n".join(setters)
+ code += "\\\n }\\\n"
+ code += "\\\n".join(getters)
+ if arguments:
+ for arg_name in arguments:
+ code += " " + arguments[arg_name] + " " + arg_name + "() const { "
+ code += "return " + arg_name + "_; }\\\n"
+ code += "\\\n"
+ if operands:
+ code += "\\\n".join(oper_indices)
+ if mir_op:
+ if mir_op is True:
+ code += " M{}* mir() const {{ return mir_->to{}(); }};\\\n".format(
+ name, name
+ )
+ else:
+ code += " M{}* mir() const {{ return mir_->to{}(); }};\\\n".format(
+ mir_op, mir_op
+ )
+ code += "};\\\n"
+ return code
+
+
+def generate_lir_header(c_out, yaml_path):
+ data = load_yaml(yaml_path)
+
+ # LIR_OPCODE_LIST opcode.
+ ops = []
+
+ # Generated LIR op class definitions.
+ lir_op_classes = []
+
+ for op in data:
+ name = op["name"]
+
+ gen_boilerplate = op.get("gen_boilerplate", True)
+ assert isinstance(gen_boilerplate, bool)
+
+ if gen_boilerplate:
+ result_type = op.get("result_type", None)
+ assert result_type is None or str
+ if result_type:
+ assert result_types[result_type]
+
+ operands = op.get("operands", None)
+ assert operands is None or OrderedDict
+
+ arguments = op.get("arguments", None)
+ assert arguments is None or isinstance(arguments, OrderedDict)
+
+ num_temps = op.get("num_temps", 0)
+ assert num_temps is None or int
+
+ gen_boilerplate = op.get("gen_boilerplate", True)
+ assert isinstance(gen_boilerplate, bool)
+
+ call_instruction = op.get("call_instruction", None)
+ assert call_instruction is None or True
+
+ mir_op = op.get("mir_op", None)
+ assert mir_op is None or True or str
+
+ lir_op_classes.append(
+ gen_lir_class(
+ name,
+ result_type,
+ operands,
+ arguments,
+ num_temps,
+ call_instruction,
+ mir_op,
+ )
+ )
+
+ ops.append("_({})".format(name))
+
+ contents = "#define LIR_OPCODE_LIST(_)\\\n"
+ contents += "\\\n".join(ops)
+ contents += "\n\n"
+
+ contents += "#define LIR_OPCODE_CLASS_GENERATED \\\n"
+ contents += "\\\n".join(lir_op_classes)
+ contents += "\n\n"
+
+ generate_header(c_out, "jit_LIROpsGenerated_h", contents)
diff --git a/js/src/jit/GenerateMIRFiles.py b/js/src/jit/GenerateMIRFiles.py
new file mode 100644
index 0000000000..3ed92087a3
--- /dev/null
+++ b/js/src/jit/GenerateMIRFiles.py
@@ -0,0 +1,404 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script generates jit/MIROpsGenerated.h (list of MIR instructions)
+# from MIROps.yaml, as well as MIR op definitions.
+
+from collections import OrderedDict
+
+import buildconfig
+import six
+import yaml
+from mozbuild.preprocessor import Preprocessor
+
+HEADER_TEMPLATE = """\
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef %(includeguard)s
+#define %(includeguard)s
+
+/* This file is generated by jit/GenerateMIRFiles.py. Do not edit! */
+
+%(contents)s
+
+#endif // %(includeguard)s
+"""
+
+
+def generate_header(c_out, includeguard, contents):
+ c_out.write(
+ HEADER_TEMPLATE
+ % {
+ "includeguard": includeguard,
+ "contents": contents,
+ }
+ )
+
+
+def load_yaml(yaml_path):
+ # First invoke preprocessor.py so that we can use #ifdef JS_SIMULATOR in
+ # the YAML file.
+ pp = Preprocessor()
+ pp.context.update(buildconfig.defines["ALLDEFINES"])
+ pp.out = six.StringIO()
+ pp.do_filter("substitution")
+ pp.do_include(yaml_path)
+ contents = pp.out.getvalue()
+
+ # Load into an OrderedDict to ensure order is preserved. Note: Python 3.7+
+ # also preserves ordering for normal dictionaries.
+ # Code based on https://stackoverflow.com/a/21912744.
+ class OrderedLoader(yaml.Loader):
+ pass
+
+ def construct_mapping(loader, node):
+ loader.flatten_mapping(node)
+ return OrderedDict(loader.construct_pairs(node))
+
+ tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
+ OrderedLoader.add_constructor(tag, construct_mapping)
+ return yaml.load(contents, OrderedLoader)
+
+
+type_policies = {
+ "Object": "ObjectPolicy",
+ "Value": "BoxPolicy",
+ "Int32": "UnboxedInt32Policy",
+ "BigInt": "BigIntPolicy",
+ "Boolean": "BooleanPolicy",
+ "Double": "DoublePolicy",
+ "String": "StringPolicy",
+ "Symbol": "SymbolPolicy",
+}
+
+
+def decide_type_policy(types, no_type_policy):
+ if no_type_policy:
+ return "public NoTypePolicy::Data"
+
+ if len(types) == 1:
+ return "public {}<0>::Data".format(type_policies[types[0]])
+
+ type_num = 0
+ mixed_type_policies = []
+ for mir_type in types:
+ policy = type_policies[mir_type]
+ mixed_type_policies.append("{}<{}>".format(policy, type_num))
+ type_num += 1
+
+ return "public MixPolicy<{}>::Data".format(", ".join(mixed_type_policies))
+
+
+mir_base_class = [
+ "MNullaryInstruction",
+ "MUnaryInstruction",
+ "MBinaryInstruction",
+ "MTernaryInstruction",
+ "MQuaternaryInstruction",
+]
+
+
+gc_pointer_types = [
+ "JSObject*",
+ "NativeObject*",
+ "JSFunction*",
+ "BaseScript*",
+ "PropertyName*",
+ "Shape*",
+ "GetterSetter*",
+ "JSAtom*",
+ "ClassBodyScope*",
+ "VarScope*",
+ "NamedLambdaObject*",
+ "RegExpObject*",
+ "JSScript*",
+ "LexicalScope*",
+]
+
+
+def gen_mir_class(
+ name,
+ operands,
+ arguments,
+ no_type_policy,
+ result,
+ guard,
+ movable,
+ folds_to,
+ congruent_to,
+ alias_set,
+ might_alias,
+ possibly_calls,
+ compute_range,
+ can_recover,
+ clone,
+):
+ """Generates class definition for a single MIR opcode."""
+
+ # Generate a MIR opcode class definition.
+ # For example:
+ # class MGuardIndexIsValidUpdateOrAdd
+ # : public MBinaryInstruction,
+ # public MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>>::Data {
+ # explicit MGuardIndexIsValidUpdateOrAdd(MDefinition* object,
+ # MDefinition* index)
+ # : MBinaryInstruction(classOpcode, object, index) {
+ # setGuard();
+ # setMovable();
+ # setResultType(MIRType::Int32);
+ # }
+ # public:
+ # INSTRUCTION_HEADER(GetFrameArgument)
+ # TRIVIAL_NEW_WRAPPERS
+ # NAMED_OPERANDS((0, object), (1, index))
+ # AliasSet getAliasSet() const override { return AliasSet::None(); }
+ # bool congruentTo(const MDefinition* ins) const override {
+ # return congruentIfOperandsEqual(ins); }
+ # };
+ #
+
+ type_policy = ""
+ # MIR op constructor operands.
+ mir_operands = []
+ # MIR op base class constructor operands.
+ mir_base_class_operands = []
+ # Types of each constructor operand.
+ mir_types = []
+ # Items for NAMED_OPERANDS.
+ named_operands = []
+ if operands:
+ current_oper_num = 0
+ for oper_name in operands:
+ oper = "MDefinition* " + oper_name
+ mir_operands.append(oper)
+ mir_base_class_operands.append(", " + oper_name)
+ # Collect all the MIR argument types to use for determining the
+ # ops type policy.
+ mir_types.append(operands[oper_name])
+ # Collecting named operands for defining accessors.
+ named_operands.append("({}, {})".format(current_oper_num, oper_name))
+ current_oper_num += 1
+ type_policy = decide_type_policy(mir_types, no_type_policy)
+
+ class_name = "M" + name
+
+ assert len(mir_operands) < 5
+ base_class = mir_base_class[len(mir_operands)]
+ assert base_class
+ if base_class != "MNullaryInstruction":
+ assert type_policy
+ type_policy = ", " + type_policy
+ code = "class {} : public {}{} {{\\\n".format(class_name, base_class, type_policy)
+
+ # Arguments to class constructor that require accessors.
+ mir_args = []
+ if arguments:
+ for arg_name in arguments:
+ arg_type_sig = arguments[arg_name]
+ mir_args.append(arg_type_sig + " " + arg_name)
+ if arg_type_sig in gc_pointer_types:
+ code += " CompilerGCPointer<" + arg_type_sig + ">"
+ else:
+ code += " " + arg_type_sig
+ code += " " + arg_name + "_;\\\n"
+
+ code += " explicit {}({}) : {}(classOpcode{})".format(
+ class_name,
+ ", ".join(mir_operands + mir_args),
+ base_class,
+ "".join(mir_base_class_operands),
+ )
+ if arguments:
+ for arg_name in arguments:
+ code += ", " + arg_name + "_(" + arg_name + ")"
+ code += " {\\\n"
+ if guard:
+ code += " setGuard();\\\n"
+ if movable:
+ code += " setMovable();\\\n"
+ if result:
+ code += " setResultType(MIRType::{});\\\n".format(result)
+ code += " }\\\n public:\\\n"
+ if arguments:
+ for arg_name in arguments:
+ code += " " + arguments[arg_name] + " " + arg_name + "() const { "
+ code += "return " + arg_name + "_; }\\\n"
+ code += " INSTRUCTION_HEADER({})\\\n".format(name)
+ code += " TRIVIAL_NEW_WRAPPERS\\\n"
+ if named_operands:
+ code += " NAMED_OPERANDS({})\\\n".format(", ".join(named_operands))
+ if alias_set:
+ if alias_set == "custom":
+ code += " AliasSet getAliasSet() const override;\\\n"
+ else:
+ assert alias_set == "none"
+ code += (
+ " AliasSet getAliasSet() const override { "
+ "return AliasSet::None(); }\\\n"
+ )
+ if might_alias:
+ code += " AliasType mightAlias(const MDefinition* store) const override;\\\n"
+ if folds_to:
+ code += " MDefinition* foldsTo(TempAllocator& alloc) override;\\\n"
+ if congruent_to:
+ if congruent_to == "custom":
+ code += " bool congruentTo(const MDefinition* ins) const override;\\\n"
+ else:
+ assert congruent_to == "if_operands_equal"
+ code += (
+ " bool congruentTo(const MDefinition* ins) const override { "
+ "return congruentIfOperandsEqual(ins); }\\\n"
+ )
+ if possibly_calls:
+ if possibly_calls == "custom":
+ code += " bool possiblyCalls() const override;\\\n"
+ else:
+ code += " bool possiblyCalls() const override { return true; }\\\n"
+ if compute_range:
+ code += " void computeRange(TempAllocator& alloc) override;\\\n"
+ if can_recover:
+ code += " [[nodiscard]] bool writeRecoverData(\\\n"
+ code += " CompactBufferWriter& writer) const override;\\\n"
+ if can_recover == "custom":
+ code += " bool canRecoverOnBailout() const override;\\\n"
+ else:
+ code += " bool canRecoverOnBailout() const override { return true; }\\\n"
+ if clone:
+ code += " ALLOW_CLONE(" + class_name + ")\\\n"
+ code += "};\\\n"
+ return code
+
+
+def gen_non_gc_pointer_type_assertions(seen_types):
+ """Generates a list of static assertions used to ensure that all argument
+ types seen are not derived from gc::Cell, ensuring that gc pointer arguments
+ are added to the gc_pointer_types list.
+ """
+ assertions = []
+
+ for seen_type in sorted(seen_types):
+ assertions.append(
+ "static_assert(!std::is_base_of_v<gc::Cell, " + seen_type.strip("*") + ">, "
+ '"Ensure that '
+ + seen_type.strip("*")
+ + ' is added to the gc_pointer_types list in GenerateMIRFiles.py."'
+ ");"
+ )
+
+ return assertions
+
+
+def generate_mir_header(c_out, yaml_path):
+ """Generate MIROpsGenerated.h from MIROps.yaml. The generated file
+ has a list of MIR ops and boilerplate for MIR op definitions.
+ """
+
+ data = load_yaml(yaml_path)
+
+ # MIR_OPCODE_LIST items. Stores the name of each MIR op.
+ ops_items = []
+
+ # Generated MIR op class definitions.
+ mir_op_classes = []
+
+ # Unique and non gc pointer types seen for arguments to the MIR constructor.
+ seen_non_gc_pointer_argument_types = set()
+
+ for op in data:
+ name = op["name"]
+
+ ops_items.append("_({})".format(name))
+
+ gen_boilerplate = op.get("gen_boilerplate", True)
+ assert isinstance(gen_boilerplate, bool)
+
+ if gen_boilerplate:
+ operands = op.get("operands", None)
+ assert operands is None or isinstance(operands, OrderedDict)
+
+ arguments = op.get("arguments", None)
+ assert arguments is None or isinstance(arguments, OrderedDict)
+
+ no_type_policy = op.get("type_policy", None)
+ assert no_type_policy is None or no_type_policy == "none"
+
+ result = op.get("result_type", None)
+ assert result is None or isinstance(result, str)
+
+ guard = op.get("guard", None)
+ assert guard is None or True
+
+ movable = op.get("movable", None)
+ assert movable is None or True
+
+ folds_to = op.get("folds_to", None)
+ assert folds_to is None or folds_to == "custom"
+
+ congruent_to = op.get("congruent_to", None)
+ assert (
+ congruent_to is None
+ or congruent_to == "if_operands_equal"
+ or congruent_to == "custom"
+ )
+
+ alias_set = op.get("alias_set", None)
+ assert alias_set is None or True or isinstance(alias_set, str)
+
+ might_alias = op.get("might_alias", None)
+ assert might_alias is None or might_alias == "custom"
+
+ possibly_calls = op.get("possibly_calls", None)
+ assert possibly_calls is None or True or possibly_calls == "custom"
+
+ compute_range = op.get("compute_range", None)
+ assert compute_range is None or compute_range == "custom"
+
+ can_recover = op.get("can_recover", None)
+ assert can_recover is None or True or can_recover == "custom"
+
+ clone = op.get("clone", None)
+ assert clone is None or True
+
+ code = gen_mir_class(
+ name,
+ operands,
+ arguments,
+ no_type_policy,
+ result,
+ guard,
+ movable,
+ folds_to,
+ congruent_to,
+ alias_set,
+ might_alias,
+ possibly_calls,
+ compute_range,
+ can_recover,
+ clone,
+ )
+ mir_op_classes.append(code)
+
+ if arguments:
+ for argument in arguments:
+ arg_type = arguments[argument]
+ if arg_type not in gc_pointer_types:
+ seen_non_gc_pointer_argument_types.add(arg_type)
+
+ contents = "#define MIR_OPCODE_LIST(_)\\\n"
+ contents += "\\\n".join(ops_items)
+ contents += "\n\n"
+
+ contents += "#define MIR_OPCODE_CLASS_GENERATED \\\n"
+ contents += "\\\n".join(mir_op_classes)
+ contents += "\n\n"
+
+ contents += "#define NON_GC_POINTER_TYPE_ASSERTIONS_GENERATED \\\n"
+ contents += "\\\n".join(
+ gen_non_gc_pointer_type_assertions(seen_non_gc_pointer_argument_types)
+ )
+ contents += "\n\n"
+
+ generate_header(c_out, "jit_MIROpsGenerated_h", contents)
diff --git a/js/src/jit/ICState.h b/js/src/jit/ICState.h
new file mode 100644
index 0000000000..0d8de7dfb4
--- /dev/null
+++ b/js/src/jit/ICState.h
@@ -0,0 +1,216 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ICState_h
+#define jit_ICState_h
+
+#include "jit/JitOptions.h"
+
+namespace js {
+namespace jit {
+
+// Used to track trial inlining status for a Baseline IC.
+// See also setTrialInliningState below.
+enum class TrialInliningState : uint8_t {
+ Initial = 0,
+ Candidate,
+ Inlined,
+ MonomorphicInlined,
+ Failure,
+};
+
+// ICState stores information about a Baseline or Ion IC.
+class ICState {
+ public:
+ // When we attach the maximum number of stubs, we discard all stubs and
+ // transition the IC to Megamorphic to attach stubs that are more generic
+ // (handle more cases). If we again attach the maximum number of stubs, we
+ // transition to Generic and (depending on the IC) will either attach a
+ // single stub that handles everything or stop attaching new stubs.
+ //
+ // We also transition to Generic when we repeatedly fail to attach a stub,
+ // to avoid wasting time trying.
+ enum class Mode : uint8_t { Specialized = 0, Megamorphic, Generic };
+
+ private:
+ uint8_t mode_ : 2;
+
+ // The TrialInliningState for a Baseline IC.
+ uint8_t trialInliningState_ : 3;
+
+ // Whether WarpOracle created a snapshot based on stubs attached to this
+ // Baseline IC.
+ bool usedByTranspiler_ : 1;
+
+ // Whether stubs attached to this IC have been folded together into
+ // a single stub. Used as a hint when attaching additional stubs to
+ // try folding them too.
+ bool hasFoldedStub_ : 1;
+
+ // Number of optimized stubs currently attached to this IC.
+ uint8_t numOptimizedStubs_;
+
+ // Number of times we failed to attach a stub.
+ uint8_t numFailures_;
+
+ static const size_t MaxOptimizedStubs = 6;
+
+ void setMode(Mode mode) {
+ mode_ = uint32_t(mode);
+ MOZ_ASSERT(Mode(mode_) == mode, "mode must fit in bitfield");
+ }
+
+ void transition(Mode mode) {
+ MOZ_ASSERT(mode > this->mode());
+ setMode(mode);
+ numFailures_ = 0;
+ }
+
+ MOZ_ALWAYS_INLINE size_t maxFailures() const {
+ // Allow more failures if we attached stubs.
+ static_assert(MaxOptimizedStubs == 6,
+ "numFailures_/maxFailures should fit in uint8_t");
+ size_t res = 5 + size_t(40) * numOptimizedStubs_;
+ MOZ_ASSERT(res <= UINT8_MAX, "numFailures_ should not overflow");
+ return res;
+ }
+
+ public:
+ ICState() { reset(); }
+
+ Mode mode() const { return Mode(mode_); }
+ size_t numOptimizedStubs() const { return numOptimizedStubs_; }
+ bool hasFailures() const { return (numFailures_ != 0); }
+ bool newStubIsFirstStub() const {
+ return (mode() == Mode::Specialized && numOptimizedStubs() == 0);
+ }
+
+ MOZ_ALWAYS_INLINE bool canAttachStub() const {
+ // Note: we cannot assert that numOptimizedStubs_ <= MaxOptimizedStubs
+ // because old-style baseline ICs may attach more stubs than
+ // MaxOptimizedStubs allows.
+ if (mode() == Mode::Generic || JitOptions.disableCacheIR) {
+ return false;
+ }
+ return true;
+ }
+
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool shouldTransition() {
+ // Note: we cannot assert that numOptimizedStubs_ <= MaxOptimizedStubs
+ // because old-style baseline ICs may attach more stubs than
+ // MaxOptimizedStubs allows.
+ if (mode() == Mode::Generic) {
+ return false;
+ }
+ if (numOptimizedStubs_ < MaxOptimizedStubs &&
+ numFailures_ < maxFailures()) {
+ return false;
+ }
+ return true;
+ }
+
+ // If this returns true, we transitioned to a new mode and the caller
+ // should discard all stubs.
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool maybeTransition() {
+ if (!shouldTransition()) {
+ return false;
+ }
+ if (numFailures_ >= maxFailures() || mode() == Mode::Megamorphic) {
+ transition(Mode::Generic);
+ return true;
+ }
+ MOZ_ASSERT(mode() == Mode::Specialized);
+ transition(Mode::Megamorphic);
+ return true;
+ }
+
+ void reset() {
+ setMode(Mode::Specialized);
+#ifdef DEBUG
+ if (JitOptions.forceMegamorphicICs) {
+ setMode(Mode::Megamorphic);
+ }
+#endif
+ trialInliningState_ = uint32_t(TrialInliningState::Initial);
+ usedByTranspiler_ = false;
+ hasFoldedStub_ = false;
+ numOptimizedStubs_ = 0;
+ numFailures_ = 0;
+ }
+ void trackAttached() {
+ // We'd like to assert numOptimizedStubs_ < MaxOptimizedStubs, but
+ // since this code is also used for non-CacheIR Baseline stubs, assert
+ // < 16 for now. Note that we do have the stronger assert in other
+ // methods, because they are only used by CacheIR ICs.
+ MOZ_ASSERT(numOptimizedStubs_ < 16);
+ numOptimizedStubs_++;
+ // As a heuristic, reduce the failure count after each successful attach
+ // to delay hitting Generic mode. Reset to 1 instead of 0 so that
+ // code which inspects state can distinguish no-failures from rare-failures.
+ numFailures_ = std::min(numFailures_, static_cast<uint8_t>(1));
+ }
+ void trackNotAttached() {
+ // Note: we can't assert numFailures_ < maxFailures() because
+ // maxFailures() depends on numOptimizedStubs_ and it's possible a
+ // GC discarded stubs before we got here.
+ numFailures_++;
+ MOZ_ASSERT(numFailures_ > 0, "numFailures_ should not overflow");
+ }
+ void trackUnlinkedStub() {
+ MOZ_ASSERT(numOptimizedStubs_ > 0);
+ numOptimizedStubs_--;
+ }
+ void trackUnlinkedAllStubs() { numOptimizedStubs_ = 0; }
+
+ void clearUsedByTranspiler() { usedByTranspiler_ = false; }
+ void setUsedByTranspiler() { usedByTranspiler_ = true; }
+ bool usedByTranspiler() const { return usedByTranspiler_; }
+
+ void clearHasFoldedStub() { hasFoldedStub_ = false; }
+ void setHasFoldedStub() { hasFoldedStub_ = true; }
+ bool hasFoldedStub() const { return hasFoldedStub_; }
+
+ TrialInliningState trialInliningState() const {
+ return TrialInliningState(trialInliningState_);
+ }
+ void setTrialInliningState(TrialInliningState state) {
+#ifdef DEBUG
+ // Moving to the Failure state is always valid. The other states should
+ // happen in this order:
+ //
+ // Initial -> Candidate --> Inlined
+ // \-> MonomorphicInlined
+ //
+ // This ensures we perform trial inlining at most once per IC site.
+ if (state != TrialInliningState::Failure) {
+ switch (trialInliningState()) {
+ case TrialInliningState::Initial:
+ MOZ_ASSERT(state == TrialInliningState::Candidate);
+ break;
+ case TrialInliningState::Candidate:
+ MOZ_ASSERT(state == TrialInliningState::Candidate ||
+ state == TrialInliningState::Inlined ||
+ state == TrialInliningState::MonomorphicInlined);
+ break;
+ case TrialInliningState::Inlined:
+ case TrialInliningState::MonomorphicInlined:
+ case TrialInliningState::Failure:
+ MOZ_CRASH("Inlined and Failure can only change to Failure");
+ break;
+ }
+ }
+#endif
+
+ trialInliningState_ = uint32_t(state);
+ MOZ_ASSERT(trialInliningState() == state,
+ "TrialInliningState must fit in bitfield");
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ICState_h */
diff --git a/js/src/jit/ICStubSpace.h b/js/src/jit/ICStubSpace.h
new file mode 100644
index 0000000000..cf922231ca
--- /dev/null
+++ b/js/src/jit/ICStubSpace.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ICStubSpace_h
+#define jit_ICStubSpace_h
+
+#include "ds/LifoAlloc.h"
+
+namespace js {
+namespace jit {
+
+// ICStubSpace is an abstraction for allocation policy and storage for CacheIR
+// stub data. There are two kinds of Baseline CacheIR stubs:
+//
+// (1) CacheIR stubs that can make non-tail calls that can GC. These are
+// allocated in a LifoAlloc stored in JitScript.
+// See JitScriptICStubSpace.
+//
+// (2) Other CacheIR stubs (aka optimized IC stubs). Allocated in a per-Zone
+// LifoAlloc and purged when JIT-code is discarded.
+// See OptimizedICStubSpace.
+class ICStubSpace {
+ protected:
+ LifoAlloc allocator_;
+
+ explicit ICStubSpace(size_t chunkSize) : allocator_(chunkSize) {}
+
+ public:
+ inline void* alloc(size_t size) { return allocator_.alloc(size); }
+
+ JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
+
+ void freeAllAfterMinorGC(JS::Zone* zone);
+
+#ifdef DEBUG
+ bool isEmpty() const { return allocator_.isEmpty(); }
+#endif
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return allocator_.sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+// Space for optimized stubs. Every JitZone has a single OptimizedICStubSpace.
+struct OptimizedICStubSpace : public ICStubSpace {
+ static const size_t STUB_DEFAULT_CHUNK_SIZE = 4096;
+
+ public:
+ OptimizedICStubSpace() : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE) {}
+};
+
+// Space for Can-GC stubs. Every JitScript has a JitScriptICStubSpace.
+struct JitScriptICStubSpace : public ICStubSpace {
+ static const size_t STUB_DEFAULT_CHUNK_SIZE = 4096;
+
+ public:
+ JitScriptICStubSpace() : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE) {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ICStubSpace_h */
diff --git a/js/src/jit/InlinableNatives.cpp b/js/src/jit/InlinableNatives.cpp
new file mode 100644
index 0000000000..3c9a1310e6
--- /dev/null
+++ b/js/src/jit/InlinableNatives.cpp
@@ -0,0 +1,300 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/InlinableNatives.h"
+
+#ifdef JS_HAS_INTL_API
+# include "builtin/intl/Collator.h"
+# include "builtin/intl/DateTimeFormat.h"
+# include "builtin/intl/DisplayNames.h"
+# include "builtin/intl/ListFormat.h"
+# include "builtin/intl/NumberFormat.h"
+# include "builtin/intl/PluralRules.h"
+# include "builtin/intl/RelativeTimeFormat.h"
+#endif
+#include "builtin/MapObject.h"
+#include "js/experimental/JitInfo.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/AsyncIteration.h"
+#include "vm/Iteration.h"
+#include "vm/SharedArrayObject.h"
+
+using namespace js;
+using namespace js::jit;
+
+#define ADD_NATIVE(native) \
+ const JSJitInfo js::jit::JitInfo_##native{ \
+ {nullptr}, \
+ {uint16_t(InlinableNative::native)}, \
+ {0}, \
+ JSJitInfo::InlinableNative};
+INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+const JSClass* js::jit::InlinableNativeGuardToClass(InlinableNative native) {
+ switch (native) {
+#ifdef JS_HAS_INTL_API
+ // Intl natives.
+ case InlinableNative::IntlGuardToCollator:
+ return &CollatorObject::class_;
+ case InlinableNative::IntlGuardToDateTimeFormat:
+ return &DateTimeFormatObject::class_;
+ case InlinableNative::IntlGuardToDisplayNames:
+ return &DisplayNamesObject::class_;
+ case InlinableNative::IntlGuardToListFormat:
+ return &ListFormatObject::class_;
+ case InlinableNative::IntlGuardToNumberFormat:
+ return &NumberFormatObject::class_;
+ case InlinableNative::IntlGuardToPluralRules:
+ return &PluralRulesObject::class_;
+ case InlinableNative::IntlGuardToRelativeTimeFormat:
+ return &RelativeTimeFormatObject::class_;
+#else
+ case InlinableNative::IntlGuardToCollator:
+ case InlinableNative::IntlGuardToDateTimeFormat:
+ case InlinableNative::IntlGuardToDisplayNames:
+ case InlinableNative::IntlGuardToListFormat:
+ case InlinableNative::IntlGuardToNumberFormat:
+ case InlinableNative::IntlGuardToPluralRules:
+ case InlinableNative::IntlGuardToRelativeTimeFormat:
+ MOZ_CRASH("Intl API disabled");
+#endif
+
+ // Utility intrinsics.
+ case InlinableNative::IntrinsicGuardToArrayIterator:
+ return &ArrayIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToMapIterator:
+ return &MapIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToSetIterator:
+ return &SetIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToStringIterator:
+ return &StringIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToRegExpStringIterator:
+ return &RegExpStringIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToWrapForValidIterator:
+ return &WrapForValidIteratorObject::class_;
+ case InlinableNative::IntrinsicGuardToIteratorHelper:
+ return &IteratorHelperObject::class_;
+ case InlinableNative::IntrinsicGuardToAsyncIteratorHelper:
+ return &AsyncIteratorHelperObject::class_;
+
+ case InlinableNative::IntrinsicGuardToMapObject:
+ return &MapObject::class_;
+ case InlinableNative::IntrinsicGuardToSetObject:
+ return &SetObject::class_;
+ case InlinableNative::IntrinsicGuardToArrayBuffer:
+ return &ArrayBufferObject::class_;
+ case InlinableNative::IntrinsicGuardToSharedArrayBuffer:
+ return &SharedArrayBufferObject::class_;
+
+ default:
+ MOZ_CRASH("Not a GuardTo instruction");
+ }
+}
+
+// Returns true if |native| can be inlined cross-realm. Especially inlined
+// natives that can allocate objects or throw exceptions shouldn't be inlined
+// cross-realm without a careful analysis because we might use the wrong realm!
+//
+// Note that self-hosting intrinsics are never called cross-realm. See the
+// MOZ_CRASH below.
+//
+// If you are adding a new inlinable native, the safe thing is to |return false|
+// here.
+bool js::jit::CanInlineNativeCrossRealm(InlinableNative native) {
+ switch (native) {
+ case InlinableNative::MathAbs:
+ case InlinableNative::MathFloor:
+ case InlinableNative::MathCeil:
+ case InlinableNative::MathRound:
+ case InlinableNative::MathClz32:
+ case InlinableNative::MathSqrt:
+ case InlinableNative::MathATan2:
+ case InlinableNative::MathHypot:
+ case InlinableNative::MathMax:
+ case InlinableNative::MathMin:
+ case InlinableNative::MathPow:
+ case InlinableNative::MathImul:
+ case InlinableNative::MathFRound:
+ case InlinableNative::MathTrunc:
+ case InlinableNative::MathSign:
+ case InlinableNative::MathSin:
+ case InlinableNative::MathTan:
+ case InlinableNative::MathCos:
+ case InlinableNative::MathExp:
+ case InlinableNative::MathLog:
+ case InlinableNative::MathASin:
+ case InlinableNative::MathATan:
+ case InlinableNative::MathACos:
+ case InlinableNative::MathLog10:
+ case InlinableNative::MathLog2:
+ case InlinableNative::MathLog1P:
+ case InlinableNative::MathExpM1:
+ case InlinableNative::MathCosH:
+ case InlinableNative::MathSinH:
+ case InlinableNative::MathTanH:
+ case InlinableNative::MathACosH:
+ case InlinableNative::MathASinH:
+ case InlinableNative::MathATanH:
+ case InlinableNative::MathCbrt:
+ case InlinableNative::Boolean:
+ return true;
+
+ case InlinableNative::Array:
+ // Cross-realm case handled by inlineArray.
+ return true;
+
+ case InlinableNative::MathRandom:
+ // RNG state is per-realm.
+ return false;
+
+ case InlinableNative::IntlGuardToCollator:
+ case InlinableNative::IntlGuardToDateTimeFormat:
+ case InlinableNative::IntlGuardToDisplayNames:
+ case InlinableNative::IntlGuardToListFormat:
+ case InlinableNative::IntlGuardToNumberFormat:
+ case InlinableNative::IntlGuardToPluralRules:
+ case InlinableNative::IntlGuardToRelativeTimeFormat:
+ case InlinableNative::IsRegExpObject:
+ case InlinableNative::IsPossiblyWrappedRegExpObject:
+ case InlinableNative::RegExpMatcher:
+ case InlinableNative::RegExpSearcher:
+ case InlinableNative::RegExpPrototypeOptimizable:
+ case InlinableNative::RegExpInstanceOptimizable:
+ case InlinableNative::GetFirstDollarIndex:
+ case InlinableNative::IntrinsicNewArrayIterator:
+ case InlinableNative::IntrinsicNewStringIterator:
+ case InlinableNative::IntrinsicNewRegExpStringIterator:
+ case InlinableNative::IntrinsicStringReplaceString:
+ case InlinableNative::IntrinsicStringSplitString:
+ case InlinableNative::IntrinsicUnsafeSetReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetObjectFromReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetInt32FromReservedSlot:
+ case InlinableNative::IntrinsicUnsafeGetStringFromReservedSlot:
+ case InlinableNative::IntrinsicIsCallable:
+ case InlinableNative::IntrinsicIsConstructor:
+ case InlinableNative::IntrinsicToObject:
+ case InlinableNative::IntrinsicIsObject:
+ case InlinableNative::IntrinsicIsCrossRealmArrayConstructor:
+ case InlinableNative::IntrinsicToInteger:
+ case InlinableNative::IntrinsicToLength:
+ case InlinableNative::IntrinsicIsConstructing:
+ case InlinableNative::IntrinsicIsSuspendedGenerator:
+ case InlinableNative::IntrinsicSubstringKernel:
+ case InlinableNative::IntrinsicGuardToArrayIterator:
+ case InlinableNative::IntrinsicGuardToMapIterator:
+ case InlinableNative::IntrinsicGuardToSetIterator:
+ case InlinableNative::IntrinsicGuardToStringIterator:
+ case InlinableNative::IntrinsicGuardToRegExpStringIterator:
+ case InlinableNative::IntrinsicGuardToWrapForValidIterator:
+ case InlinableNative::IntrinsicGuardToIteratorHelper:
+ case InlinableNative::IntrinsicGuardToAsyncIteratorHelper:
+ case InlinableNative::IntrinsicObjectHasPrototype:
+ case InlinableNative::IntrinsicIsPackedArray:
+ case InlinableNative::IntrinsicGuardToMapObject:
+ case InlinableNative::IntrinsicGetNextMapEntryForIterator:
+ case InlinableNative::IntrinsicGuardToSetObject:
+ case InlinableNative::IntrinsicGetNextSetEntryForIterator:
+ case InlinableNative::IntrinsicGuardToArrayBuffer:
+ case InlinableNative::IntrinsicArrayBufferByteLength:
+ case InlinableNative::IntrinsicPossiblyWrappedArrayBufferByteLength:
+ case InlinableNative::IntrinsicGuardToSharedArrayBuffer:
+ case InlinableNative::IntrinsicIsTypedArrayConstructor:
+ case InlinableNative::IntrinsicIsTypedArray:
+ case InlinableNative::IntrinsicIsPossiblyWrappedTypedArray:
+ case InlinableNative::IntrinsicPossiblyWrappedTypedArrayLength:
+ case InlinableNative::IntrinsicRegExpBuiltinExec:
+ case InlinableNative::IntrinsicRegExpBuiltinExecForTest:
+ case InlinableNative::IntrinsicRegExpExec:
+ case InlinableNative::IntrinsicRegExpExecForTest:
+ case InlinableNative::IntrinsicTypedArrayLength:
+ case InlinableNative::IntrinsicTypedArrayByteOffset:
+ case InlinableNative::IntrinsicTypedArrayElementSize:
+ case InlinableNative::IntrinsicArrayIteratorPrototypeOptimizable:
+ MOZ_CRASH("Unexpected cross-realm intrinsic call");
+
+ case InlinableNative::TestBailout:
+ case InlinableNative::TestAssertFloat32:
+ case InlinableNative::TestAssertRecoveredOnBailout:
+ // Testing functions, not worth inlining cross-realm.
+ return false;
+
+ case InlinableNative::ArrayIsArray:
+ case InlinableNative::ArrayJoin:
+ case InlinableNative::ArrayPop:
+ case InlinableNative::ArrayShift:
+ case InlinableNative::ArrayPush:
+ case InlinableNative::ArraySlice:
+ case InlinableNative::AtomicsCompareExchange:
+ case InlinableNative::AtomicsExchange:
+ case InlinableNative::AtomicsLoad:
+ case InlinableNative::AtomicsStore:
+ case InlinableNative::AtomicsAdd:
+ case InlinableNative::AtomicsSub:
+ case InlinableNative::AtomicsAnd:
+ case InlinableNative::AtomicsOr:
+ case InlinableNative::AtomicsXor:
+ case InlinableNative::AtomicsIsLockFree:
+ case InlinableNative::BigIntAsIntN:
+ case InlinableNative::BigIntAsUintN:
+ case InlinableNative::DataViewGetInt8:
+ case InlinableNative::DataViewGetUint8:
+ case InlinableNative::DataViewGetInt16:
+ case InlinableNative::DataViewGetUint16:
+ case InlinableNative::DataViewGetInt32:
+ case InlinableNative::DataViewGetUint32:
+ case InlinableNative::DataViewGetFloat32:
+ case InlinableNative::DataViewGetFloat64:
+ case InlinableNative::DataViewGetBigInt64:
+ case InlinableNative::DataViewGetBigUint64:
+ case InlinableNative::DataViewSetInt8:
+ case InlinableNative::DataViewSetUint8:
+ case InlinableNative::DataViewSetInt16:
+ case InlinableNative::DataViewSetUint16:
+ case InlinableNative::DataViewSetInt32:
+ case InlinableNative::DataViewSetUint32:
+ case InlinableNative::DataViewSetFloat32:
+ case InlinableNative::DataViewSetFloat64:
+ case InlinableNative::DataViewSetBigInt64:
+ case InlinableNative::DataViewSetBigUint64:
+ case InlinableNative::FunctionBind:
+ case InlinableNative::MapGet:
+ case InlinableNative::MapHas:
+ case InlinableNative::Number:
+ case InlinableNative::NumberParseInt:
+ case InlinableNative::NumberToString:
+ case InlinableNative::ReflectGetPrototypeOf:
+ case InlinableNative::SetHas:
+ case InlinableNative::String:
+ case InlinableNative::StringToString:
+ case InlinableNative::StringValueOf:
+ case InlinableNative::StringCharCodeAt:
+ case InlinableNative::StringFromCharCode:
+ case InlinableNative::StringFromCodePoint:
+ case InlinableNative::StringCharAt:
+ case InlinableNative::StringIndexOf:
+ case InlinableNative::StringStartsWith:
+ case InlinableNative::StringEndsWith:
+ case InlinableNative::StringToLowerCase:
+ case InlinableNative::StringToUpperCase:
+ case InlinableNative::Object:
+ case InlinableNative::ObjectCreate:
+ case InlinableNative::ObjectIs:
+ case InlinableNative::ObjectIsPrototypeOf:
+ case InlinableNative::ObjectToString:
+ case InlinableNative::TypedArrayConstructor:
+#ifdef FUZZING_JS_FUZZILLI
+ case InlinableNative::FuzzilliHash:
+#endif
+ // Default to false for most natives.
+ return false;
+
+ case InlinableNative::Limit:
+ break;
+ }
+ MOZ_CRASH("Unknown native");
+}
diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
new file mode 100644
index 0000000000..bdb946f8bb
--- /dev/null
+++ b/js/src/jit/InlinableNatives.h
@@ -0,0 +1,240 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlinableNatives_h
+#define jit_InlinableNatives_h
+
+#include <stdint.h> // For uint16_t
+
+#ifdef FUZZING_JS_FUZZILLI
+# define INLINABLE_NATIVE_FUZZILLI_LIST(_) _(FuzzilliHash)
+#else
+# define INLINABLE_NATIVE_FUZZILLI_LIST(_)
+#endif
+
+#define INLINABLE_NATIVE_LIST(_) \
+ _(Array) \
+ _(ArrayIsArray) \
+ _(ArrayJoin) \
+ _(ArrayPop) \
+ _(ArrayShift) \
+ _(ArrayPush) \
+ _(ArraySlice) \
+ \
+ _(AtomicsCompareExchange) \
+ _(AtomicsExchange) \
+ _(AtomicsLoad) \
+ _(AtomicsStore) \
+ _(AtomicsAdd) \
+ _(AtomicsSub) \
+ _(AtomicsAnd) \
+ _(AtomicsOr) \
+ _(AtomicsXor) \
+ _(AtomicsIsLockFree) \
+ \
+ _(BigIntAsIntN) \
+ _(BigIntAsUintN) \
+ \
+ _(Boolean) \
+ \
+ _(DataViewGetInt8) \
+ _(DataViewGetUint8) \
+ _(DataViewGetInt16) \
+ _(DataViewGetUint16) \
+ _(DataViewGetInt32) \
+ _(DataViewGetUint32) \
+ _(DataViewGetFloat32) \
+ _(DataViewGetFloat64) \
+ _(DataViewGetBigInt64) \
+ _(DataViewGetBigUint64) \
+ _(DataViewSetInt8) \
+ _(DataViewSetUint8) \
+ _(DataViewSetInt16) \
+ _(DataViewSetUint16) \
+ _(DataViewSetInt32) \
+ _(DataViewSetUint32) \
+ _(DataViewSetFloat32) \
+ _(DataViewSetFloat64) \
+ _(DataViewSetBigInt64) \
+ _(DataViewSetBigUint64) \
+ \
+ _(FunctionBind) \
+ \
+ _(IntlGuardToCollator) \
+ _(IntlGuardToDateTimeFormat) \
+ _(IntlGuardToDisplayNames) \
+ _(IntlGuardToListFormat) \
+ _(IntlGuardToNumberFormat) \
+ _(IntlGuardToPluralRules) \
+ _(IntlGuardToRelativeTimeFormat) \
+ \
+ _(MapGet) \
+ _(MapHas) \
+ \
+ _(MathAbs) \
+ _(MathFloor) \
+ _(MathCeil) \
+ _(MathRound) \
+ _(MathClz32) \
+ _(MathSqrt) \
+ _(MathATan2) \
+ _(MathHypot) \
+ _(MathMax) \
+ _(MathMin) \
+ _(MathPow) \
+ _(MathRandom) \
+ _(MathImul) \
+ _(MathFRound) \
+ _(MathSin) \
+ _(MathTan) \
+ _(MathCos) \
+ _(MathExp) \
+ _(MathLog) \
+ _(MathASin) \
+ _(MathATan) \
+ _(MathACos) \
+ _(MathLog10) \
+ _(MathLog2) \
+ _(MathLog1P) \
+ _(MathExpM1) \
+ _(MathSinH) \
+ _(MathTanH) \
+ _(MathCosH) \
+ _(MathASinH) \
+ _(MathATanH) \
+ _(MathACosH) \
+ _(MathSign) \
+ _(MathTrunc) \
+ _(MathCbrt) \
+ \
+ _(Number) \
+ _(NumberParseInt) \
+ _(NumberToString) \
+ \
+ _(ReflectGetPrototypeOf) \
+ \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(IsRegExpObject) \
+ _(IsPossiblyWrappedRegExpObject) \
+ _(RegExpPrototypeOptimizable) \
+ _(RegExpInstanceOptimizable) \
+ _(GetFirstDollarIndex) \
+ \
+ _(SetHas) \
+ \
+ _(String) \
+ _(StringToString) \
+ _(StringValueOf) \
+ _(StringCharCodeAt) \
+ _(StringFromCharCode) \
+ _(StringFromCodePoint) \
+ _(StringCharAt) \
+ _(StringIndexOf) \
+ _(StringStartsWith) \
+ _(StringEndsWith) \
+ _(StringToLowerCase) \
+ _(StringToUpperCase) \
+ \
+ _(IntrinsicStringReplaceString) \
+ _(IntrinsicStringSplitString) \
+ \
+ _(Object) \
+ _(ObjectCreate) \
+ _(ObjectIs) \
+ _(ObjectIsPrototypeOf) \
+ _(ObjectToString) \
+ \
+ _(TestBailout) \
+ _(TestAssertFloat32) \
+ _(TestAssertRecoveredOnBailout) \
+ \
+ _(IntrinsicUnsafeSetReservedSlot) \
+ _(IntrinsicUnsafeGetReservedSlot) \
+ _(IntrinsicUnsafeGetObjectFromReservedSlot) \
+ _(IntrinsicUnsafeGetInt32FromReservedSlot) \
+ _(IntrinsicUnsafeGetStringFromReservedSlot) \
+ \
+ _(IntrinsicIsCallable) \
+ _(IntrinsicIsConstructor) \
+ _(IntrinsicToObject) \
+ _(IntrinsicIsObject) \
+ _(IntrinsicIsCrossRealmArrayConstructor) \
+ _(IntrinsicToInteger) \
+ _(IntrinsicToLength) \
+ _(IntrinsicIsConstructing) \
+ _(IntrinsicSubstringKernel) \
+ _(IntrinsicObjectHasPrototype) \
+ _(IntrinsicIsPackedArray) \
+ \
+ _(IntrinsicIsSuspendedGenerator) \
+ \
+ _(IntrinsicGuardToArrayIterator) \
+ _(IntrinsicGuardToMapIterator) \
+ _(IntrinsicGuardToSetIterator) \
+ _(IntrinsicGuardToStringIterator) \
+ _(IntrinsicGuardToRegExpStringIterator) \
+ _(IntrinsicGuardToWrapForValidIterator) \
+ _(IntrinsicGuardToIteratorHelper) \
+ _(IntrinsicGuardToAsyncIteratorHelper) \
+ \
+ _(IntrinsicGuardToMapObject) \
+ _(IntrinsicGetNextMapEntryForIterator) \
+ \
+ _(IntrinsicGuardToSetObject) \
+ _(IntrinsicGetNextSetEntryForIterator) \
+ \
+ _(IntrinsicNewArrayIterator) \
+ _(IntrinsicNewStringIterator) \
+ _(IntrinsicNewRegExpStringIterator) \
+ _(IntrinsicArrayIteratorPrototypeOptimizable) \
+ \
+ _(IntrinsicGuardToArrayBuffer) \
+ _(IntrinsicArrayBufferByteLength) \
+ _(IntrinsicPossiblyWrappedArrayBufferByteLength) \
+ \
+ _(IntrinsicGuardToSharedArrayBuffer) \
+ \
+ _(TypedArrayConstructor) \
+ _(IntrinsicIsTypedArrayConstructor) \
+ _(IntrinsicIsTypedArray) \
+ _(IntrinsicIsPossiblyWrappedTypedArray) \
+ _(IntrinsicTypedArrayLength) \
+ _(IntrinsicPossiblyWrappedTypedArrayLength) \
+ _(IntrinsicRegExpBuiltinExec) \
+ _(IntrinsicRegExpBuiltinExecForTest) \
+ _(IntrinsicRegExpExec) \
+ _(IntrinsicRegExpExecForTest) \
+ _(IntrinsicTypedArrayByteOffset) \
+ _(IntrinsicTypedArrayElementSize) \
+ \
+ INLINABLE_NATIVE_FUZZILLI_LIST(_)
+
+struct JSClass;
+class JSJitInfo;
+
+namespace js {
+namespace jit {
+
+enum class InlinableNative : uint16_t {
+#define ADD_NATIVE(native) native,
+ INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+ Limit
+};
+
+#define ADD_NATIVE(native) extern const JSJitInfo JitInfo_##native;
+INLINABLE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+const JSClass* InlinableNativeGuardToClass(InlinableNative native);
+
+bool CanInlineNativeCrossRealm(InlinableNative native);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_InlinableNatives_h */
diff --git a/js/src/jit/InlineList.h b/js/src/jit/InlineList.h
new file mode 100644
index 0000000000..44e445872b
--- /dev/null
+++ b/js/src/jit/InlineList.h
@@ -0,0 +1,590 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlineList_h
+#define jit_InlineList_h
+
+#include "mozilla/Assertions.h"
+
+namespace js {
+
+template <typename T>
+class InlineForwardList;
+template <typename T>
+class InlineForwardListIterator;
+
+template <typename T>
+class InlineForwardListNode {
+ public:
+ InlineForwardListNode() : next(nullptr) {}
+ explicit InlineForwardListNode(InlineForwardListNode<T>* n) : next(n) {}
+
+ InlineForwardListNode(const InlineForwardListNode<T>&) = delete;
+
+ protected:
+ friend class InlineForwardList<T>;
+ friend class InlineForwardListIterator<T>;
+
+ InlineForwardListNode<T>* next;
+};
+
+template <typename T>
+class InlineForwardList : protected InlineForwardListNode<T> {
+ friend class InlineForwardListIterator<T>;
+
+ using Node = InlineForwardListNode<T>;
+
+ Node* tail_;
+#ifdef DEBUG
+ int modifyCount_;
+#endif
+
+ InlineForwardList<T>* thisFromConstructor() { return this; }
+
+ public:
+ InlineForwardList() : tail_(thisFromConstructor()) {
+#ifdef DEBUG
+ modifyCount_ = 0;
+#endif
+ }
+
+ public:
+ using iterator = InlineForwardListIterator<T>;
+
+ public:
+ iterator begin() const { return iterator(this); }
+ iterator begin(Node* item) const { return iterator(this, item); }
+ iterator end() const { return iterator(nullptr); }
+ void removeAt(iterator where) { removeAfter(where.prev, where.iter); }
+ void pushFront(Node* t) { insertAfter(this, t); }
+ void pushBack(Node* t) {
+ MOZ_ASSERT(t->next == nullptr);
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ tail_->next = t;
+ tail_ = t;
+ }
+ T* popFront() {
+ MOZ_ASSERT(!empty());
+ T* result = static_cast<T*>(this->next);
+ removeAfter(this, result);
+ return result;
+ }
+ T* back() const {
+ MOZ_ASSERT(!empty());
+ return static_cast<T*>(tail_);
+ }
+ void insertAfter(Node* at, Node* item) {
+ MOZ_ASSERT(item->next == nullptr);
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ if (at == tail_) {
+ tail_ = item;
+ }
+ item->next = at->next;
+ at->next = item;
+ }
+ void removeAfter(Node* at, Node* item) {
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ if (item == tail_) {
+ tail_ = at;
+ }
+ MOZ_ASSERT(at->next == item);
+ at->next = item->next;
+ item->next = nullptr;
+ }
+ void removeAndIncrement(iterator& where) {
+ // Do not change modifyCount_ here. The iterator can still be used
+ // after calling this method, unlike the other methods that modify
+ // the list.
+ Node* item = where.iter;
+ where.iter = item->next;
+ if (item == tail_) {
+ tail_ = where.prev;
+ }
+ MOZ_ASSERT(where.prev->next == item);
+ where.prev->next = where.iter;
+ item->next = nullptr;
+ }
+ void splitAfter(Node* at, InlineForwardList<T>* to) {
+ MOZ_ASSERT(to->empty());
+ if (!at) {
+ at = this;
+ }
+ if (at == tail_) {
+ return;
+ }
+#ifdef DEBUG
+ modifyCount_++;
+#endif
+ to->next = at->next;
+ to->tail_ = tail_;
+ tail_ = at;
+ at->next = nullptr;
+ }
+ bool empty() const { return tail_ == this; }
+ void clear() {
+ this->next = nullptr;
+ tail_ = this;
+#ifdef DEBUG
+ modifyCount_ = 0;
+#endif
+ }
+};
+
+template <typename T>
+class InlineForwardListIterator {
+ private:
+ friend class InlineForwardList<T>;
+
+ using Node = InlineForwardListNode<T>;
+
+ explicit InlineForwardListIterator(const InlineForwardList<T>* owner)
+ : prev(const_cast<Node*>(static_cast<const Node*>(owner))),
+ iter(owner ? owner->next : nullptr)
+#ifdef DEBUG
+ ,
+ owner_(owner),
+ modifyCount_(owner ? owner->modifyCount_ : 0)
+#endif
+ {
+ }
+
+ InlineForwardListIterator(const InlineForwardList<T>* owner, Node* node)
+ : prev(nullptr),
+ iter(node)
+#ifdef DEBUG
+ ,
+ owner_(owner),
+ modifyCount_(owner ? owner->modifyCount_ : 0)
+#endif
+ {
+ }
+
+ public:
+ InlineForwardListIterator<T>& operator++() {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ prev = iter;
+ iter = iter->next;
+ return *this;
+ }
+ InlineForwardListIterator<T> operator++(int) {
+ InlineForwardListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T* operator*() const {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ return static_cast<T*>(iter);
+ }
+ T* operator->() const {
+ MOZ_ASSERT(modifyCount_ == owner_->modifyCount_);
+ return static_cast<T*>(iter);
+ }
+ bool operator!=(const InlineForwardListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator==(const InlineForwardListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+ explicit operator bool() const { return iter != nullptr; }
+
+ private:
+ Node* prev;
+ Node* iter;
+
+#ifdef DEBUG
+ const InlineForwardList<T>* owner_;
+ int modifyCount_;
+#endif
+};
+
+template <typename T>
+class InlineList;
+template <typename T>
+class InlineListIterator;
+template <typename T>
+class InlineListReverseIterator;
+
+template <typename T>
+class InlineListNode : public InlineForwardListNode<T> {
+ public:
+ InlineListNode() : InlineForwardListNode<T>(nullptr), prev(nullptr) {}
+ InlineListNode(InlineListNode<T>* n, InlineListNode<T>* p)
+ : InlineForwardListNode<T>(n), prev(p) {}
+
+ // Move constructor. Nodes may be moved without being removed from their
+ // containing lists. For example, this allows list nodes to be safely
+ // stored in a resizable Vector -- when the Vector resizes, the new storage
+ // is initialized by this move constructor. |other| is a reference to the
+ // old node which the |this| node here is replacing.
+ InlineListNode(InlineListNode<T>&& other)
+ : InlineForwardListNode<T>(other.next) {
+ InlineListNode<T>* newNext = static_cast<InlineListNode<T>*>(other.next);
+ InlineListNode<T>* newPrev = other.prev;
+ prev = newPrev;
+
+ // Update the pointers in the adjacent nodes to point to this node's new
+ // location.
+ newNext->prev = this;
+ newPrev->next = this;
+ }
+
+ InlineListNode(const InlineListNode<T>&) = delete;
+ void operator=(const InlineListNode<T>&) = delete;
+
+ bool isInList() { return prev != nullptr && this->next != nullptr; }
+
+ protected:
+ friend class InlineList<T>;
+ friend class InlineListIterator<T>;
+ friend class InlineListReverseIterator<T>;
+
+ InlineListNode<T>* prev;
+};
+
+template <typename T>
+class InlineList : protected InlineListNode<T> {
+ using Node = InlineListNode<T>;
+
+ public:
+ InlineList() : InlineListNode<T>(this, this) {}
+
+ public:
+ using iterator = InlineListIterator<T>;
+ using reverse_iterator = InlineListReverseIterator<T>;
+
+ public:
+ iterator begin() const { return iterator(static_cast<Node*>(this->next)); }
+ iterator begin(Node* t) const { return iterator(t); }
+ iterator end() const { return iterator(this); }
+ reverse_iterator rbegin() const { return reverse_iterator(this->prev); }
+ reverse_iterator rbegin(Node* t) const { return reverse_iterator(t); }
+ reverse_iterator rend() const { return reverse_iterator(this); }
+ void pushFront(Node* t) { insertAfter(this, t); }
+ void pushFrontUnchecked(Node* t) { insertAfterUnchecked(this, t); }
+ void pushBack(Node* t) { insertBefore(this, t); }
+ void pushBackUnchecked(Node* t) { insertBeforeUnchecked(this, t); }
+ T* popFront() {
+ MOZ_ASSERT(!empty());
+ T* t = static_cast<T*>(this->next);
+ remove(t);
+ return t;
+ }
+ T* popBack() {
+ MOZ_ASSERT(!empty());
+ T* t = static_cast<T*>(this->prev);
+ remove(t);
+ return t;
+ }
+ T* peekBack() const {
+ iterator iter = end();
+ iter--;
+ return *iter;
+ }
+ void insertBefore(Node* at, Node* item) {
+ MOZ_ASSERT(item->prev == nullptr);
+ MOZ_ASSERT(item->next == nullptr);
+ insertBeforeUnchecked(at, item);
+ }
+ void insertBeforeUnchecked(Node* at, Node* item) {
+ Node* atPrev = at->prev;
+ item->next = at;
+ item->prev = atPrev;
+ atPrev->next = item;
+ at->prev = item;
+ }
+ void insertAfter(Node* at, Node* item) {
+ MOZ_ASSERT(item->prev == nullptr);
+ MOZ_ASSERT(item->next == nullptr);
+ insertAfterUnchecked(at, item);
+ }
+ void insertAfterUnchecked(Node* at, Node* item) {
+ Node* atNext = static_cast<Node*>(at->next);
+ item->next = atNext;
+ item->prev = at;
+ atNext->prev = item;
+ at->next = item;
+ }
+ void remove(Node* t) {
+ Node* tNext = static_cast<Node*>(t->next);
+ Node* tPrev = t->prev;
+ tPrev->next = tNext;
+ tNext->prev = tPrev;
+ t->next = nullptr;
+ t->prev = nullptr;
+ }
+ // Remove |old| from the list and insert |now| in its place.
+ void replace(Node* old, Node* now) {
+ MOZ_ASSERT(now->next == nullptr && now->prev == nullptr);
+ Node* listNext = static_cast<Node*>(old->next);
+ Node* listPrev = old->prev;
+ listPrev->next = now;
+ listNext->prev = now;
+ now->next = listNext;
+ now->prev = listPrev;
+ old->next = nullptr;
+ old->prev = nullptr;
+ }
+ void clear() { this->next = this->prev = this; }
+ bool empty() const { return begin() == end(); }
+ void takeElements(InlineList& l) {
+ MOZ_ASSERT(&l != this, "cannot takeElements from this");
+ Node* lprev = l.prev;
+ static_cast<Node*>(l.next)->prev = this;
+ lprev->next = this->next;
+ static_cast<Node*>(this->next)->prev = l.prev;
+ this->next = l.next;
+ l.clear();
+ }
+};
+
+template <typename T>
+class InlineListIterator {
+ private:
+ friend class InlineList<T>;
+
+ using Node = InlineListNode<T>;
+
+ explicit InlineListIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter)) {}
+
+ public:
+ InlineListIterator<T>& operator++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineListIterator<T> operator++(int) {
+ InlineListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ InlineListIterator<T>& operator--() {
+ iter = iter->prev;
+ return *this;
+ }
+ InlineListIterator<T> operator--(int) {
+ InlineListIterator<T> old(*this);
+ operator--();
+ return old;
+ }
+ T* operator*() const { return static_cast<T*>(iter); }
+ T* operator->() const { return static_cast<T*>(iter); }
+ bool operator!=(const InlineListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator==(const InlineListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+template <typename T>
+class InlineListReverseIterator {
+ private:
+ friend class InlineList<T>;
+
+ using Node = InlineListNode<T>;
+
+ explicit InlineListReverseIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter)) {}
+
+ public:
+ InlineListReverseIterator<T>& operator++() {
+ iter = iter->prev;
+ return *this;
+ }
+ InlineListReverseIterator<T> operator++(int) {
+ InlineListReverseIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ InlineListReverseIterator<T>& operator--() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineListReverseIterator<T> operator--(int) {
+ InlineListReverseIterator<T> old(*this);
+ operator--();
+ return old;
+ }
+ T* operator*() { return static_cast<T*>(iter); }
+ T* operator->() { return static_cast<T*>(iter); }
+ bool operator!=(const InlineListReverseIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator==(const InlineListReverseIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+// This list type is more or less exactly an InlineForwardList without a
+// sentinel node. It is useful in cases where you are doing algorithms that deal
+// with many merging singleton lists, rather than often empty ones.
+template <typename T>
+class InlineConcatListIterator;
+template <typename T>
+class InlineConcatList {
+ private:
+ using Node = InlineConcatList<T>;
+
+ InlineConcatList<T>* thisFromConstructor() { return this; }
+
+ public:
+ InlineConcatList() : next(nullptr), tail(thisFromConstructor()) {}
+
+ using iterator = InlineConcatListIterator<T>;
+
+ iterator begin() const { return iterator(this); }
+
+ iterator end() const { return iterator(nullptr); }
+
+ void append(InlineConcatList<T>* adding) {
+ MOZ_ASSERT(tail);
+ MOZ_ASSERT(!tail->next);
+ MOZ_ASSERT(adding->tail);
+ MOZ_ASSERT(!adding->tail->next);
+
+ tail->next = adding;
+ tail = adding->tail;
+ adding->tail = nullptr;
+ }
+
+ protected:
+ friend class InlineConcatListIterator<T>;
+ Node* next;
+ Node* tail;
+};
+
+template <typename T>
+class InlineConcatListIterator {
+ private:
+ friend class InlineConcatList<T>;
+
+ using Node = InlineConcatList<T>;
+
+ explicit InlineConcatListIterator(const Node* iter)
+ : iter(const_cast<Node*>(iter)) {}
+
+ public:
+ InlineConcatListIterator<T>& operator++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineConcatListIterator<T> operator++(int) {
+ InlineConcatListIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T* operator*() const { return static_cast<T*>(iter); }
+ T* operator->() const { return static_cast<T*>(iter); }
+ bool operator!=(const InlineConcatListIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator==(const InlineConcatListIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+template <typename T>
+class InlineSpaghettiStack;
+template <typename T>
+class InlineSpaghettiStackNode;
+template <typename T>
+class InlineSpaghettiStackIterator;
+
+template <typename T>
+class InlineSpaghettiStackNode : public InlineForwardListNode<T> {
+ using Parent = InlineForwardListNode<T>;
+
+ public:
+ InlineSpaghettiStackNode() : Parent() {}
+
+ explicit InlineSpaghettiStackNode(InlineSpaghettiStackNode<T>* n)
+ : Parent(n) {}
+
+ InlineSpaghettiStackNode(const InlineSpaghettiStackNode<T>&) = delete;
+
+ protected:
+ friend class InlineSpaghettiStack<T>;
+ friend class InlineSpaghettiStackIterator<T>;
+};
+
+template <typename T>
+class InlineSpaghettiStack : protected InlineSpaghettiStackNode<T> {
+ friend class InlineSpaghettiStackIterator<T>;
+
+ using Node = InlineSpaghettiStackNode<T>;
+
+ public:
+ InlineSpaghettiStack() = default;
+
+ public:
+ using iterator = InlineSpaghettiStackIterator<T>;
+
+ public:
+ iterator begin() const { return iterator(this); }
+ iterator end() const { return iterator(nullptr); }
+
+ void push(Node* t) {
+ MOZ_ASSERT(t->next == nullptr);
+ t->next = this->next;
+ this->next = t;
+ }
+
+ void copy(const InlineSpaghettiStack<T>& stack) { this->next = stack.next; }
+
+ bool empty() const { return this->next == nullptr; }
+};
+
+template <typename T>
+class InlineSpaghettiStackIterator {
+ private:
+ friend class InlineSpaghettiStack<T>;
+
+ using Node = InlineSpaghettiStackNode<T>;
+
+ explicit InlineSpaghettiStackIterator(const InlineSpaghettiStack<T>* owner)
+ : iter(owner ? static_cast<Node*>(owner->next) : nullptr) {}
+
+ public:
+ InlineSpaghettiStackIterator<T>& operator++() {
+ iter = static_cast<Node*>(iter->next);
+ return *this;
+ }
+ InlineSpaghettiStackIterator<T> operator++(int) {
+ InlineSpaghettiStackIterator<T> old(*this);
+ operator++();
+ return old;
+ }
+ T* operator*() const { return static_cast<T*>(iter); }
+ T* operator->() const { return static_cast<T*>(iter); }
+ bool operator!=(const InlineSpaghettiStackIterator<T>& where) const {
+ return iter != where.iter;
+ }
+ bool operator==(const InlineSpaghettiStackIterator<T>& where) const {
+ return iter == where.iter;
+ }
+
+ private:
+ Node* iter;
+};
+
+} // namespace js
+
+#endif /* jit_InlineList_h */
diff --git a/js/src/jit/InlineScriptTree-inl.h b/js/src/jit/InlineScriptTree-inl.h
new file mode 100644
index 0000000000..21d55bb039
--- /dev/null
+++ b/js/src/jit/InlineScriptTree-inl.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlineScriptTree_inl_h
+#define jit_InlineScriptTree_inl_h
+
+#include "jit/InlineScriptTree.h"
+
+#include "mozilla/Assertions.h"
+
+#include "jit/JitAllocPolicy.h"
+#include "js/TypeDecls.h"
+#include "vm/JSScript.h"
+
+namespace js {
+namespace jit {
+
+InlineScriptTree* InlineScriptTree::New(TempAllocator* allocator,
+ InlineScriptTree* callerTree,
+ jsbytecode* callerPc,
+ JSScript* script) {
+ MOZ_ASSERT_IF(!callerTree, !callerPc);
+ MOZ_ASSERT_IF(callerTree, callerTree->script()->containsPC(callerPc));
+
+ // Allocate a new InlineScriptTree
+ void* treeMem = allocator->allocate(sizeof(InlineScriptTree));
+ if (!treeMem) {
+ return nullptr;
+ }
+
+ // Initialize it.
+ return new (treeMem) InlineScriptTree(callerTree, callerPc, script);
+}
+
+InlineScriptTree* InlineScriptTree::addCallee(TempAllocator* allocator,
+ jsbytecode* callerPc,
+ JSScript* calleeScript) {
+ MOZ_ASSERT(script_ && script_->containsPC(callerPc));
+ InlineScriptTree* calleeTree = New(allocator, this, callerPc, calleeScript);
+ if (!calleeTree) {
+ return nullptr;
+ }
+
+ calleeTree->nextCallee_ = children_;
+ children_ = calleeTree;
+ return calleeTree;
+}
+
+void InlineScriptTree::removeCallee(InlineScriptTree* callee) {
+ InlineScriptTree** prevPtr = &children_;
+ for (InlineScriptTree* child = children_; child; child = child->nextCallee_) {
+ if (child == callee) {
+ *prevPtr = child->nextCallee_;
+ return;
+ }
+ prevPtr = &child->nextCallee_;
+ }
+ MOZ_CRASH("Callee not found");
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_InlineScriptTree_inl_h */
diff --git a/js/src/jit/InlineScriptTree.h b/js/src/jit/InlineScriptTree.h
new file mode 100644
index 0000000000..0e5e2767ff
--- /dev/null
+++ b/js/src/jit/InlineScriptTree.h
@@ -0,0 +1,112 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InlineScriptTree_h
+#define jit_InlineScriptTree_h
+
+#include "mozilla/Assertions.h"
+
+#include "jit/JitAllocPolicy.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace jit {
+
+// The compiler at various points needs to be able to store references to the
+// current inline path (the sequence of scripts and call-pcs that lead to the
+// current function being inlined).
+//
+// To support this, use a tree that records the inlinings done during
+// compilation.
+class InlineScriptTree {
+ // InlineScriptTree for the caller
+ InlineScriptTree* caller_;
+
+ // PC in the caller corresponding to this script.
+ jsbytecode* callerPc_;
+
+ // Script for this entry.
+ JSScript* script_;
+
+ // Child entries (linked together by nextCallee pointer)
+ InlineScriptTree* children_;
+ InlineScriptTree* nextCallee_;
+
+ public:
+ InlineScriptTree(InlineScriptTree* caller, jsbytecode* callerPc,
+ JSScript* script)
+ : caller_(caller),
+ callerPc_(callerPc),
+ script_(script),
+ children_(nullptr),
+ nextCallee_(nullptr) {}
+
+ static inline InlineScriptTree* New(TempAllocator* allocator,
+ InlineScriptTree* caller,
+ jsbytecode* callerPc, JSScript* script);
+
+ inline InlineScriptTree* addCallee(TempAllocator* allocator,
+ jsbytecode* callerPc,
+ JSScript* calleeScript);
+ inline void removeCallee(InlineScriptTree* callee);
+
+ InlineScriptTree* caller() const { return caller_; }
+
+ bool isOutermostCaller() const { return caller_ == nullptr; }
+ bool hasCaller() const { return caller_ != nullptr; }
+
+ jsbytecode* callerPc() const { return callerPc_; }
+
+ JSScript* script() const { return script_; }
+
+ bool hasChildren() const { return children_ != nullptr; }
+ InlineScriptTree* firstChild() const {
+ MOZ_ASSERT(hasChildren());
+ return children_;
+ }
+
+ bool hasNextCallee() const { return nextCallee_ != nullptr; }
+ InlineScriptTree* nextCallee() const {
+ MOZ_ASSERT(hasNextCallee());
+ return nextCallee_;
+ }
+
+ unsigned depth() const {
+ if (isOutermostCaller()) {
+ return 1;
+ }
+ return 1 + caller_->depth();
+ }
+};
+
+class BytecodeSite : public TempObject {
+ // InlineScriptTree identifying innermost active function at site.
+ InlineScriptTree* tree_;
+
+ // Bytecode address within innermost active function.
+ jsbytecode* pc_;
+
+ public:
+ // Wasm compilation leaves both fields null.
+ BytecodeSite() : tree_(nullptr), pc_(nullptr) {}
+
+ // Warp compilation sets both fields to non-null values.
+ BytecodeSite(InlineScriptTree* tree, jsbytecode* pc) : tree_(tree), pc_(pc) {
+ MOZ_ASSERT(tree_ != nullptr);
+ MOZ_ASSERT(pc_ != nullptr);
+ }
+
+ InlineScriptTree* tree() const { return tree_; }
+
+ jsbytecode* pc() const { return pc_; }
+
+ JSScript* script() const { return tree_ ? tree_->script() : nullptr; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_InlineScriptTree_h */
diff --git a/js/src/jit/InstructionReordering.cpp b/js/src/jit/InstructionReordering.cpp
new file mode 100644
index 0000000000..b399e0bb8c
--- /dev/null
+++ b/js/src/jit/InstructionReordering.cpp
@@ -0,0 +1,248 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/InstructionReordering.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+static void MoveBefore(MBasicBlock* block, MInstruction* at,
+ MInstruction* ins) {
+ if (at == ins) {
+ return;
+ }
+
+ // Update instruction numbers.
+ for (MInstructionIterator iter(block->begin(at)); *iter != ins; iter++) {
+ MOZ_ASSERT(iter->id() < ins->id());
+ iter->setId(iter->id() + 1);
+ }
+ ins->setId(at->id() - 1);
+ block->moveBefore(at, ins);
+}
+
+static bool IsLastUse(MDefinition* ins, MDefinition* input,
+ MBasicBlock* loopHeader) {
+ // If we are in a loop, this cannot be the last use of any definitions from
+ // outside the loop, as those definitions can be used in future iterations.
+ if (loopHeader && input->block()->id() < loopHeader->id()) {
+ return false;
+ }
+ for (MUseDefIterator iter(input); iter; iter++) {
+ // Watch for uses defined in blocks which ReorderInstructions hasn't
+ // processed yet. These nodes have not had their ids set yet.
+ if (iter.def()->block()->id() > ins->block()->id()) {
+ return false;
+ }
+ if (iter.def()->id() > ins->id()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static void MoveConstantsToStart(MBasicBlock* block,
+ MInstruction* insertionPoint) {
+ // Move constants with a single use in the current block to the start of the
+ // block. Constants won't be reordered by ReorderInstructions, as they have no
+ // inputs. Moving them up as high as possible can allow their use to be moved
+ // up further, though, and has no cost if the constant is emitted at its use.
+
+ MInstructionIterator iter(block->begin(insertionPoint));
+ while (iter != block->end()) {
+ MInstruction* ins = *iter;
+ iter++;
+
+ if (!ins->isConstant() || !ins->hasOneUse() ||
+ ins->usesBegin()->consumer()->block() != block ||
+ IsFloatingPointType(ins->type())) {
+ continue;
+ }
+
+ MOZ_ASSERT(ins->isMovable());
+ MOZ_ASSERT(insertionPoint != ins);
+
+ // Note: we don't need to use MoveBefore here because MoveConstantsToStart
+ // is called right before we renumber all instructions in this block.
+ block->moveBefore(insertionPoint, ins);
+ }
+}
+
+bool jit::ReorderInstructions(MIRGraph& graph) {
+ // Renumber all instructions in the graph as we go.
+ size_t nextId = 0;
+
+ // List of the headers of any loops we are in.
+ Vector<MBasicBlock*, 4, SystemAllocPolicy> loopHeaders;
+
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ // Don't reorder instructions within entry blocks, which have special
+ // requirements.
+ bool isEntryBlock =
+ *block == graph.entryBlock() || *block == graph.osrBlock();
+
+ MInstruction* insertionPoint = nullptr;
+ if (!isEntryBlock) {
+ // Move constants to the start of the block before renumbering all
+ // instructions.
+ insertionPoint = block->safeInsertTop();
+ MoveConstantsToStart(*block, insertionPoint);
+ }
+
+ // Renumber all definitions inside the basic blocks.
+ for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd();
+ iter++) {
+ iter->setId(nextId++);
+ }
+
+ for (MInstructionIterator iter(block->begin()); iter != block->end();
+ iter++) {
+ iter->setId(nextId++);
+ }
+
+ if (isEntryBlock) {
+ continue;
+ }
+
+ if (block->isLoopHeader()) {
+ if (!loopHeaders.append(*block)) {
+ return false;
+ }
+ }
+
+ MBasicBlock* innerLoop = loopHeaders.empty() ? nullptr : loopHeaders.back();
+
+ MInstructionReverseIterator rtop = ++block->rbegin(insertionPoint);
+ for (MInstructionIterator iter(block->begin(insertionPoint));
+ iter != block->end();) {
+ MInstruction* ins = *iter;
+
+ // Filter out some instructions which are never reordered.
+ if (ins->isEffectful() || !ins->isMovable() || ins->resumePoint() ||
+ ins == block->lastIns()) {
+ iter++;
+ continue;
+ }
+
+ // Look for inputs where this instruction is the last use of that
+ // input. If we move this instruction up, the input's lifetime will
+ // be shortened, modulo resume point uses (which don't need to be
+ // stored in a register, and can be handled by the register
+ // allocator by just spilling at some point with no reload).
+ Vector<MDefinition*, 4, SystemAllocPolicy> lastUsedInputs;
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ MDefinition* input = ins->getOperand(i);
+ if (!input->isConstant() && IsLastUse(ins, input, innerLoop)) {
+ if (!lastUsedInputs.append(input)) {
+ return false;
+ }
+ }
+ }
+
+ // Don't try to move instructions which aren't the last use of any
+ // of their inputs (we really ought to move these down instead).
+ if (lastUsedInputs.length() < 2) {
+ iter++;
+ continue;
+ }
+
+ MInstruction* target = ins;
+ MInstruction* postCallTarget = nullptr;
+ for (MInstructionReverseIterator riter = ++block->rbegin(ins);
+ riter != rtop; riter++) {
+ MInstruction* prev = *riter;
+ if (prev->isInterruptCheck()) {
+ break;
+ }
+ if (prev->isSetInitializedLength()) {
+ break;
+ }
+
+ // The instruction can't be moved before any of its uses.
+ bool isUse = false;
+ for (size_t i = 0; i < ins->numOperands(); i++) {
+ if (ins->getOperand(i) == prev) {
+ isUse = true;
+ break;
+ }
+ }
+ if (isUse) {
+ break;
+ }
+
+ // The instruction can't be moved before an instruction that
+ // stores to a location read by the instruction.
+ if (prev->isEffectful() &&
+ (ins->getAliasSet().flags() & prev->getAliasSet().flags()) &&
+ ins->mightAlias(prev) != MDefinition::AliasType::NoAlias) {
+ break;
+ }
+
+ // Make sure the instruction will still be the last use of one
+ // of its inputs when moved up this far.
+ for (size_t i = 0; i < lastUsedInputs.length();) {
+ bool found = false;
+ for (size_t j = 0; j < prev->numOperands(); j++) {
+ if (prev->getOperand(j) == lastUsedInputs[i]) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ lastUsedInputs[i] = lastUsedInputs.back();
+ lastUsedInputs.popBack();
+ } else {
+ i++;
+ }
+ }
+ if (lastUsedInputs.length() < 2) {
+ break;
+ }
+
+ // If we see a captured call result, either move the instruction before
+ // the corresponding call or don't move it at all.
+ if (prev->isCallResultCapture()) {
+ if (!postCallTarget) {
+ postCallTarget = target;
+ }
+ } else if (postCallTarget) {
+ MOZ_ASSERT(MWasmCallBase::IsWasmCall(prev) ||
+ prev->isIonToWasmCall());
+ postCallTarget = nullptr;
+ }
+
+ // We can move the instruction before this one.
+ target = prev;
+ }
+
+ if (postCallTarget) {
+ // We would have plonked this instruction between a call and its
+ // captured return value. Instead put it after the last corresponding
+ // return value.
+ target = postCallTarget;
+ }
+
+ iter++;
+ MoveBefore(*block, target, ins);
+
+ // Instruction reordering can move a bailing instruction up past a call
+ // that throws an exception, causing spurious bailouts. This should rarely
+ // be an issue in practice, so we only update the bailout kind if we don't
+ // have anything more specific.
+ if (ins->bailoutKind() == BailoutKind::TranspiledCacheIR) {
+ ins->setBailoutKind(BailoutKind::InstructionReordering);
+ }
+ }
+
+ if (block->isLoopBackedge()) {
+ loopHeaders.popBack();
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/InstructionReordering.h b/js/src/jit/InstructionReordering.h
new file mode 100644
index 0000000000..055e6da5b3
--- /dev/null
+++ b/js/src/jit/InstructionReordering.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InstructionReordering_h
+#define jit_InstructionReordering_h
+
+#include "jit/IonAnalysis.h"
+
+namespace js {
+namespace jit {
+
+[[nodiscard]] bool ReorderInstructions(MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_InstructionReordering_h
diff --git a/js/src/jit/InterpreterEntryTrampoline.cpp b/js/src/jit/InterpreterEntryTrampoline.cpp
new file mode 100644
index 0000000000..a58713ff09
--- /dev/null
+++ b/js/src/jit/InterpreterEntryTrampoline.cpp
@@ -0,0 +1,269 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/InterpreterEntryTrampoline.h"
+#include "jit/JitRuntime.h"
+#include "jit/Linker.h"
+#include "vm/Interpreter.h"
+
+#include "gc/Marking-inl.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void js::ClearInterpreterEntryMap(JSRuntime* runtime) {
+ if (runtime->hasJitRuntime() &&
+ runtime->jitRuntime()->hasInterpreterEntryMap()) {
+ runtime->jitRuntime()->getInterpreterEntryMap()->clear();
+ }
+}
+
+void EntryTrampolineMap::traceTrampolineCode(JSTracer* trc) {
+ for (jit::EntryTrampolineMap::Enum e(*this); !e.empty(); e.popFront()) {
+ EntryTrampoline& trampoline = e.front().value();
+ trampoline.trace(trc);
+ }
+}
+
+void EntryTrampolineMap::updateScriptsAfterMovingGC(void) {
+ for (jit::EntryTrampolineMap::Enum e(*this); !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (IsForwarded(script)) {
+ script = Forwarded(script);
+ e.rekeyFront(script);
+ }
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void EntryTrampoline::checkTrampolineAfterMovingGC() {
+ JitCode* trampoline = entryTrampoline_;
+ CheckGCThingAfterMovingGC(trampoline);
+}
+
+void EntryTrampolineMap::checkScriptsAfterMovingGC() {
+ for (jit::EntryTrampolineMap::Enum r(*this); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ CheckGCThingAfterMovingGC(script);
+ r.front().value().checkTrampolineAfterMovingGC();
+ auto ptr = lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+}
+#endif
+
+void JitRuntime::generateBaselineInterpreterEntryTrampoline(
+ MacroAssembler& masm) {
+ AutoCreatedBy acb(masm,
+ "JitRuntime::generateBaselineInterpreterEntryTrampoline");
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ Register nargs = regs.takeAny();
+ Register callee = regs.takeAny();
+ Register scratch = regs.takeAny();
+
+ // Load callee token and keep it in a register as it will be used often
+ Address calleeTokenAddr(
+ FramePointer, BaselineInterpreterEntryFrameLayout::offsetOfCalleeToken());
+ masm.loadPtr(calleeTokenAddr, callee);
+
+ // Load argc into nargs.
+ masm.loadNumActualArgs(FramePointer, nargs);
+
+ Label notFunction;
+ {
+ // Check if calleetoken is script or function
+ masm.branchTestPtr(Assembler::NonZero, callee, Imm32(CalleeTokenScriptBit),
+ &notFunction);
+
+ // CalleeToken is a function, load |nformals| into scratch
+ masm.movePtr(callee, scratch);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch);
+ masm.loadFunctionArgCount(scratch, scratch);
+
+ // Take max(nformals, argc).
+ Label noUnderflow;
+ masm.branch32(Assembler::AboveOrEqual, nargs, scratch, &noUnderflow);
+ { masm.movePtr(scratch, nargs); }
+ masm.bind(&noUnderflow);
+
+ // Add 1 to nargs if constructing.
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.movePtr(callee, scratch);
+ masm.and32(Imm32(uint32_t(CalleeToken_FunctionConstructing)), scratch);
+ masm.addPtr(scratch, nargs);
+ }
+ masm.bind(&notFunction);
+
+ // Align stack
+ masm.alignJitStackBasedOnNArgs(nargs, /*countIncludesThis = */ false);
+
+ // Point argPtr to the topmost argument.
+ static_assert(sizeof(Value) == 8,
+ "Using TimesEight for scale of sizeof(Value).");
+ BaseIndex topPtrAddr(FramePointer, nargs, TimesEight,
+ sizeof(BaselineInterpreterEntryFrameLayout));
+ Register argPtr = nargs;
+ masm.computeEffectiveAddress(topPtrAddr, argPtr);
+
+ // Load the end address into scratch, which is the callee token.
+ masm.computeEffectiveAddress(calleeTokenAddr, scratch);
+
+ // Copy |this|+arguments
+ Label loop;
+ masm.bind(&loop);
+ {
+ masm.pushValue(Address(argPtr, 0));
+ masm.subPtr(Imm32(sizeof(Value)), argPtr);
+ masm.branchPtr(Assembler::Above, argPtr, scratch, &loop);
+ }
+
+ // Copy callee token
+ masm.push(callee);
+
+ // Save a new descriptor using BaselineInterpreterEntry frame type.
+ masm.loadNumActualArgs(FramePointer, scratch);
+ masm.pushFrameDescriptorForJitCall(FrameType::BaselineInterpreterEntry,
+ scratch, scratch);
+
+ // Call into baseline interpreter
+ uint8_t* blinterpAddr = baselineInterpreter().codeRaw();
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+ masm.call(ImmPtr(blinterpAddr));
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+void JitRuntime::generateInterpreterEntryTrampoline(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterEntryTrampoline");
+
+ // If BLI is disabled, we don't need an offset.
+ if (IsBaselineInterpreterEnabled()) {
+ uint32_t offset = startTrampolineCode(masm);
+ if (!vmInterpreterEntryOffset_) {
+ vmInterpreterEntryOffset_ = offset;
+ }
+ }
+
+#ifdef JS_CODEGEN_ARM64
+ // Use the normal stack pointer for the initial pushes.
+ masm.SetStackPointer64(sp);
+
+ // Push lr and fp together to maintain 16-byte alignment.
+ masm.push(lr, FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // Save the PSP register (r28), and a scratch (r19).
+ masm.push(r19, r28);
+
+ // Setup the PSP so we can use callWithABI below.
+ masm.SetStackPointer64(PseudoStackPointer64);
+ masm.initPseudoStackPtr();
+
+ Register arg0 = IntArgReg0;
+ Register arg1 = IntArgReg1;
+ Register scratch = r19;
+#elif defined(JS_CODEGEN_X86)
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register arg0 = regs.takeAnyGeneral();
+ Register arg1 = regs.takeAnyGeneral();
+ Register scratch = regs.takeAnyGeneral();
+
+ // First two arguments are passed on the stack in 32-bit.
+ Address cxAddr(FramePointer, 2 * sizeof(void*));
+ Address stateAddr(FramePointer, 3 * sizeof(void*));
+ masm.loadPtr(cxAddr, arg0);
+ masm.loadPtr(stateAddr, arg1);
+#else
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ regs.take(IntArgReg0);
+ regs.take(IntArgReg1);
+ Register arg0 = IntArgReg0;
+ Register arg1 = IntArgReg1;
+ Register scratch = regs.takeAnyGeneral();
+#endif
+
+ using Fn = bool (*)(JSContext* cx, js::RunState& state);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(arg0); // cx
+ masm.passABIArg(arg1); // state
+ masm.callWithABI<Fn, Interpret>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+#ifdef JS_CODEGEN_ARM64
+ masm.syncStackPtr();
+ masm.SetStackPointer64(sp);
+
+ // Restore r28 and r19.
+ masm.pop(r28, r19);
+
+ // Restore old fp and pop lr for return.
+ masm.pop(FramePointer, lr);
+ masm.abiret();
+
+ // Reset stack pointer.
+ masm.SetStackPointer64(PseudoStackPointer64);
+#else
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+#endif
+}
+
+JitCode* JitRuntime::generateEntryTrampolineForScript(JSContext* cx,
+ JSScript* script) {
+ if (JitSpewEnabled(JitSpew_Codegen)) {
+ UniqueChars funName;
+ if (script->function() && script->function()->displayAtom()) {
+ funName = AtomToPrintableString(cx, script->function()->displayAtom());
+ }
+
+ JitSpew(JitSpew_Codegen,
+ "# Emitting Interpreter Entry Trampoline for %s (%s:%u:%u)",
+ funName ? funName.get() : "*", script->filename(), script->lineno(),
+ script->column());
+ }
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jctx(cx);
+ StackMacroAssembler masm(cx, temp);
+ AutoCreatedBy acb(masm, "JitRuntime::generateEntryTrampolineForScript");
+ PerfSpewerRangeRecorder rangeRecorder(masm);
+
+ if (IsBaselineInterpreterEnabled()) {
+ generateBaselineInterpreterEntryTrampoline(masm);
+ rangeRecorder.recordOffset("BaselineInterpreter", cx, script);
+ }
+
+ generateInterpreterEntryTrampoline(masm);
+ rangeRecorder.recordOffset("Interpreter", cx, script);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode(cx, CodeKind::Other);
+ if (!code) {
+ return nullptr;
+ }
+ rangeRecorder.collectRangesForJitCode(code);
+ JitSpew(JitSpew_Codegen, "# code = %p", code->raw());
+ return code;
+}
diff --git a/js/src/jit/InterpreterEntryTrampoline.h b/js/src/jit/InterpreterEntryTrampoline.h
new file mode 100644
index 0000000000..4f49f3fe13
--- /dev/null
+++ b/js/src/jit/InterpreterEntryTrampoline.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_InterpreterEntryTrampoline_h
+#define jit_InterpreterEntryTrampoline_h
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "jit/JitCode.h"
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/RootingAPI.h"
+
+namespace js {
+
+void ClearInterpreterEntryMap(JSRuntime* runtime);
+
+namespace jit {
+
+/*
+ * The EntryTrampolineMap is used to cache the trampoline code for
+ * each script as they are created. These trampolines are created
+ * only under --emit-interpreter-entry and are used to identify which
+ * script is being interpeted when profiling with external profilers
+ * such as perf.
+ *
+ * The map owns the JitCode objects that are created for each script,
+ * and keeps them alive at least as long as the script associated
+ * with it in case we need to re-enter the trampoline again.
+ *
+ * As each script is finalized, the entry is manually removed from
+ * the table in BaseScript::finalize which will also release the
+ * trampoline code associated with it.
+ *
+ * During a moving GC, the table is rekeyed in case any scripts
+ * have relocated.
+ */
+
+class EntryTrampoline {
+ HeapPtr<JitCode*> entryTrampoline_;
+
+ public:
+ void trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &entryTrampoline_, "interpreter-entry-trampoline");
+ }
+
+ explicit EntryTrampoline(JSContext* cx, JitCode* code) {
+ MOZ_ASSERT(code);
+ entryTrampoline_ = code;
+ }
+
+ uint8_t* raw() {
+ MOZ_ASSERT(entryTrampoline_, "Empty trampoline code.");
+ return entryTrampoline_->raw();
+ }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkTrampolineAfterMovingGC();
+#endif
+};
+
+using JSScriptToTrampolineMap =
+ HashMap<HeapPtr<BaseScript*>, EntryTrampoline,
+ DefaultHasher<HeapPtr<BaseScript*>>, SystemAllocPolicy>;
+class EntryTrampolineMap : public JSScriptToTrampolineMap {
+ public:
+ void traceTrampolineCode(JSTracer* trc);
+ void updateScriptsAfterMovingGC(void);
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkScriptsAfterMovingGC();
+#endif
+};
+
+} // namespace jit
+} // namespace js
+#endif /* jit_InterpreterEntryTrampoline_h */
diff --git a/js/src/jit/Invalidation.h b/js/src/jit/Invalidation.h
new file mode 100644
index 0000000000..f0a5410967
--- /dev/null
+++ b/js/src/jit/Invalidation.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Invalidation_h
+#define jit_Invalidation_h
+
+#include "jit/IonTypes.h"
+#include "js/AllocPolicy.h"
+#include "js/GCVector.h"
+
+namespace js {
+namespace jit {
+
+class IonScript;
+
+class RecompileInfo {
+ JSScript* script_;
+ IonCompilationId id_;
+
+ public:
+ RecompileInfo(JSScript* script, IonCompilationId id)
+ : script_(script), id_(id) {}
+
+ JSScript* script() const { return script_; }
+
+ IonScript* maybeIonScriptToInvalidate() const;
+
+ bool traceWeak(JSTracer* trc);
+
+ bool operator==(const RecompileInfo& other) const {
+ return script_ == other.script_ && id_ == other.id_;
+ }
+};
+
+// The RecompileInfoVector has a MinInlineCapacity of one so that invalidating a
+// single IonScript doesn't require an allocation.
+using RecompileInfoVector = JS::GCVector<RecompileInfo, 1, SystemAllocPolicy>;
+
+// Called from Zone::discardJitCode().
+void InvalidateAll(JS::GCContext* gcx, JS::Zone* zone);
+void FinishInvalidation(JS::GCContext* gcx, JSScript* script);
+
+// Add compilations involving |script| (outer script or inlined) to the vector.
+void AddPendingInvalidation(jit::RecompileInfoVector& invalid,
+ JSScript* script);
+
+// Walk the stack and invalidate active Ion frames for the invalid scripts.
+void Invalidate(JSContext* cx, const RecompileInfoVector& invalid,
+ bool resetUses = true, bool cancelOffThread = true);
+void Invalidate(JSContext* cx, JSScript* script, bool resetUses = true,
+ bool cancelOffThread = true);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Invalidation_h */
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
new file mode 100644
index 0000000000..3d472aa85f
--- /dev/null
+++ b/js/src/jit/Ion.cpp
@@ -0,0 +1,2631 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Ion.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "gc/GCContext.h"
+#include "gc/PublicIterators.h"
+#include "jit/AliasAnalysis.h"
+#include "jit/AlignmentMaskAnalysis.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/BacktrackingAllocator.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CodeGenerator.h"
+#include "jit/CompileInfo.h"
+#include "jit/EdgeCaseAnalysis.h"
+#include "jit/EffectiveAddressAnalysis.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/FoldLinearArithConstants.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/InstructionReordering.h"
+#include "jit/Invalidation.h"
+#include "jit/IonAnalysis.h"
+#include "jit/IonCompileTask.h"
+#include "jit/IonIC.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/IonScript.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/JitZone.h"
+#include "jit/LICM.h"
+#include "jit/Linker.h"
+#include "jit/LIR.h"
+#include "jit/Lowering.h"
+#include "jit/PerfSpewer.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/ScalarReplacement.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "jit/Sink.h"
+#include "jit/ValueNumbering.h"
+#include "jit/WarpBuilder.h"
+#include "jit/WarpOracle.h"
+#include "jit/WasmBCE.h"
+#include "js/Printf.h"
+#include "js/UniquePtr.h"
+#include "util/Memory.h"
+#include "util/WindowsWrapper.h"
+#include "vm/HelperThreads.h"
+#include "vm/Realm.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "gc/GC-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "jit/InlineScriptTree-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/SafepointIndex-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+#if defined(ANDROID)
+# include <sys/system_properties.h>
+#endif
+
+using mozilla::CheckedInt;
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+JitRuntime::~JitRuntime() {
+ MOZ_ASSERT(numFinishedOffThreadTasks_ == 0);
+ MOZ_ASSERT(ionLazyLinkListSize_ == 0);
+ MOZ_ASSERT(ionLazyLinkList_.ref().isEmpty());
+
+ // By this point, the jitcode global table should be empty.
+ MOZ_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
+ js_delete(jitcodeGlobalTable_.ref());
+
+ // interpreterEntryMap should be cleared out during finishRoots()
+ MOZ_ASSERT_IF(interpreterEntryMap_, interpreterEntryMap_->empty());
+ js_delete(interpreterEntryMap_.ref());
+
+ js_delete(jitHintsMap_.ref());
+}
+
+uint32_t JitRuntime::startTrampolineCode(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "startTrampolineCode");
+
+ masm.assumeUnreachable("Shouldn't get here");
+ masm.flushBuffer();
+ masm.haltingAlign(CodeAlignment);
+ masm.setFramePushed(0);
+ return masm.currentOffset();
+}
+
+bool JitRuntime::initialize(JSContext* cx) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+
+ AutoAllocInAtomsZone az(cx);
+ JitContext jctx(cx);
+
+ if (!generateTrampolines(cx)) {
+ return false;
+ }
+
+ if (!generateBaselineICFallbackCode(cx)) {
+ return false;
+ }
+
+ jitcodeGlobalTable_ = cx->new_<JitcodeGlobalTable>();
+ if (!jitcodeGlobalTable_) {
+ return false;
+ }
+
+ if (!JitOptions.disableJitHints) {
+ jitHintsMap_ = cx->new_<JitHintsMap>();
+ if (!jitHintsMap_) {
+ return false;
+ }
+ }
+
+ if (JitOptions.emitInterpreterEntryTrampoline) {
+ interpreterEntryMap_ = cx->new_<EntryTrampolineMap>();
+ if (!interpreterEntryMap_) {
+ return false;
+ }
+ }
+
+ if (!GenerateBaselineInterpreter(cx, baselineInterpreter_)) {
+ return false;
+ }
+
+ // Initialize the jitCodeRaw of the Runtime's canonical SelfHostedLazyScript
+ // to point to the interpreter trampoline.
+ cx->runtime()->selfHostedLazyScript.ref().jitCodeRaw_ =
+ interpreterStub().value;
+
+ return true;
+}
+
+bool JitRuntime::generateTrampolines(JSContext* cx) {
+ TempAllocator temp(&cx->tempLifoAlloc());
+ StackMacroAssembler masm(cx, temp);
+ PerfSpewerRangeRecorder rangeRecorder(masm);
+
+ Label bailoutTail;
+ JitSpew(JitSpew_Codegen, "# Emitting bailout tail stub");
+ generateBailoutTailStub(masm, &bailoutTail);
+
+ JitSpew(JitSpew_Codegen, "# Emitting bailout handler");
+ generateBailoutHandler(masm, &bailoutTail);
+ rangeRecorder.recordOffset("Trampoline: Bailout");
+
+ JitSpew(JitSpew_Codegen, "# Emitting invalidator");
+ generateInvalidator(masm, &bailoutTail);
+ rangeRecorder.recordOffset("Trampoline: Invalidator");
+
+ // The arguments rectifier has to use the same frame layout as the function
+ // frames it rectifies.
+ static_assert(std::is_base_of_v<JitFrameLayout, RectifierFrameLayout>,
+ "a rectifier frame can be used with jit frame");
+ static_assert(std::is_base_of_v<JitFrameLayout, WasmToJSJitFrameLayout>,
+ "wasm frames simply are jit frames");
+ static_assert(sizeof(JitFrameLayout) == sizeof(WasmToJSJitFrameLayout),
+ "thus a rectifier frame can be used with a wasm frame");
+
+ JitSpew(JitSpew_Codegen, "# Emitting arguments rectifier");
+ generateArgumentsRectifier(masm, ArgumentsRectifierKind::Normal);
+ rangeRecorder.recordOffset("Trampoline: Arguments Rectifier");
+
+ JitSpew(JitSpew_Codegen, "# Emitting trial inlining arguments rectifier");
+ generateArgumentsRectifier(masm, ArgumentsRectifierKind::TrialInlining);
+ rangeRecorder.recordOffset(
+ "Trampoline: Arguments Rectifier (Trial Inlining)");
+
+ JitSpew(JitSpew_Codegen, "# Emitting EnterJIT sequence");
+ generateEnterJIT(cx, masm);
+ rangeRecorder.recordOffset("Trampoline: EnterJIT");
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Value");
+ valuePreBarrierOffset_ = generatePreBarrier(cx, masm, MIRType::Value);
+ rangeRecorder.recordOffset("Trampoline: PreBarrier Value");
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for String");
+ stringPreBarrierOffset_ = generatePreBarrier(cx, masm, MIRType::String);
+ rangeRecorder.recordOffset("Trampoline: PreBarrier String");
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Object");
+ objectPreBarrierOffset_ = generatePreBarrier(cx, masm, MIRType::Object);
+ rangeRecorder.recordOffset("Trampoline: PreBarrier Object");
+
+ JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Shape");
+ shapePreBarrierOffset_ = generatePreBarrier(cx, masm, MIRType::Shape);
+ rangeRecorder.recordOffset("Trampoline: PreBarrier Shape");
+
+ JitSpew(JitSpew_Codegen, "# Emitting free stub");
+ generateFreeStub(masm);
+ rangeRecorder.recordOffset("Trampoline: FreeStub");
+
+ JitSpew(JitSpew_Codegen, "# Emitting lazy link stub");
+ generateLazyLinkStub(masm);
+ rangeRecorder.recordOffset("Trampoline: LazyLinkStub");
+
+ JitSpew(JitSpew_Codegen, "# Emitting interpreter stub");
+ generateInterpreterStub(masm);
+ rangeRecorder.recordOffset("Trampoline: Interpreter");
+
+ JitSpew(JitSpew_Codegen, "# Emitting double-to-int32-value stub");
+ generateDoubleToInt32ValueStub(masm);
+ rangeRecorder.recordOffset("Trampoline: DoubleToInt32ValueStub");
+
+ JitSpew(JitSpew_Codegen, "# Emitting VM function wrappers");
+ if (!generateVMWrappers(cx, masm)) {
+ return false;
+ }
+ rangeRecorder.recordOffset("Trampoline: VM Wrapper");
+
+ JitSpew(JitSpew_Codegen, "# Emitting profiler exit frame tail stub");
+ Label profilerExitTail;
+ generateProfilerExitFrameTailStub(masm, &profilerExitTail);
+ rangeRecorder.recordOffset("Trampoline: ProfilerExitFrameTailStub");
+
+ JitSpew(JitSpew_Codegen, "# Emitting exception tail stub");
+ generateExceptionTailStub(masm, &profilerExitTail, &bailoutTail);
+ rangeRecorder.recordOffset("Trampoline: ExceptionTailStub");
+
+ Linker linker(masm);
+ trampolineCode_ = linker.newCode(cx, CodeKind::Other);
+ if (!trampolineCode_) {
+ return false;
+ }
+
+ rangeRecorder.collectRangesForJitCode(trampolineCode_);
+#ifdef MOZ_VTUNE
+ vtune::MarkStub(trampolineCode_, "Trampolines");
+#endif
+
+ return true;
+}
+
+JitCode* JitRuntime::debugTrapHandler(JSContext* cx,
+ DebugTrapHandlerKind kind) {
+ if (!debugTrapHandlers_[kind]) {
+ // JitRuntime code stubs are shared across compartments and have to
+ // be allocated in the atoms zone.
+ mozilla::Maybe<AutoAllocInAtomsZone> az;
+ if (!cx->zone()->isAtomsZone()) {
+ az.emplace(cx);
+ }
+ debugTrapHandlers_[kind] = generateDebugTrapHandler(cx, kind);
+ }
+ return debugTrapHandlers_[kind];
+}
+
+JitRuntime::IonCompileTaskList& JitRuntime::ionLazyLinkList(JSRuntime* rt) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
+ "Should only be mutated by the main thread.");
+ return ionLazyLinkList_.ref();
+}
+
+void JitRuntime::ionLazyLinkListRemove(JSRuntime* rt,
+ jit::IonCompileTask* task) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
+ "Should only be mutated by the main thread.");
+ MOZ_ASSERT(rt == task->script()->runtimeFromMainThread());
+ MOZ_ASSERT(ionLazyLinkListSize_ > 0);
+
+ task->removeFrom(ionLazyLinkList(rt));
+ ionLazyLinkListSize_--;
+
+ MOZ_ASSERT(ionLazyLinkList(rt).isEmpty() == (ionLazyLinkListSize_ == 0));
+}
+
+void JitRuntime::ionLazyLinkListAdd(JSRuntime* rt, jit::IonCompileTask* task) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
+ "Should only be mutated by the main thread.");
+ MOZ_ASSERT(rt == task->script()->runtimeFromMainThread());
+ ionLazyLinkList(rt).insertFront(task);
+ ionLazyLinkListSize_++;
+}
+
+uint8_t* JitRuntime::allocateIonOsrTempData(size_t size) {
+ // Free the old buffer (if needed) before allocating a new one. Note that we
+ // could use realloc here but it's likely not worth the complexity.
+ freeIonOsrTempData();
+ ionOsrTempData_.ref().reset(static_cast<uint8_t*>(js_malloc(size)));
+ return ionOsrTempData_.ref().get();
+}
+
+void JitRuntime::freeIonOsrTempData() { ionOsrTempData_.ref().reset(); }
+
+JitRealm::JitRealm() : initialStringHeap(gc::Heap::Tenured) {}
+
+void JitRealm::initialize(bool zoneHasNurseryStrings) {
+ setStringsCanBeInNursery(zoneHasNurseryStrings);
+}
+
+template <typename T>
+static T PopNextBitmaskValue(uint32_t* bitmask) {
+ MOZ_ASSERT(*bitmask);
+ uint32_t index = mozilla::CountTrailingZeroes32(*bitmask);
+ *bitmask ^= 1 << index;
+
+ MOZ_ASSERT(index < uint32_t(T::Count));
+ return T(index);
+}
+
+void JitRealm::performStubReadBarriers(uint32_t stubsToBarrier) const {
+ while (stubsToBarrier) {
+ auto stub = PopNextBitmaskValue<StubIndex>(&stubsToBarrier);
+ const WeakHeapPtr<JitCode*>& jitCode = stubs_[stub];
+ MOZ_ASSERT(jitCode);
+ jitCode.get();
+ }
+}
+
+static bool LinkCodeGen(JSContext* cx, CodeGenerator* codegen,
+ HandleScript script, const WarpSnapshot* snapshot) {
+ if (!codegen->link(cx, snapshot)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool LinkBackgroundCodeGen(JSContext* cx, IonCompileTask* task) {
+ CodeGenerator* codegen = task->backgroundCodegen();
+ if (!codegen) {
+ return false;
+ }
+
+ JitContext jctx(cx);
+ RootedScript script(cx, task->script());
+ return LinkCodeGen(cx, codegen, script, task->snapshot());
+}
+
+void jit::LinkIonScript(JSContext* cx, HandleScript calleeScript) {
+ // Get the pending IonCompileTask from the script.
+ MOZ_ASSERT(calleeScript->hasBaselineScript());
+ IonCompileTask* task =
+ calleeScript->baselineScript()->pendingIonCompileTask();
+ calleeScript->baselineScript()->removePendingIonCompileTask(cx->runtime(),
+ calleeScript);
+
+ // Remove from pending.
+ cx->runtime()->jitRuntime()->ionLazyLinkListRemove(cx->runtime(), task);
+
+ {
+ gc::AutoSuppressGC suppressGC(cx);
+ if (!LinkBackgroundCodeGen(cx, task)) {
+ // Silently ignore OOM during code generation. The assembly code
+ // doesn't have code to handle it after linking happened. So it's
+ // not OK to throw a catchable exception from there.
+ cx->clearPendingException();
+ }
+ }
+
+ {
+ AutoLockHelperThreadState lock;
+ FinishOffThreadTask(cx->runtime(), task, lock);
+ }
+}
+
+uint8_t* jit::LazyLinkTopActivation(JSContext* cx,
+ LazyLinkExitFrameLayout* frame) {
+ RootedScript calleeScript(
+ cx, ScriptFromCalleeToken(frame->jsFrame()->calleeToken()));
+
+ LinkIonScript(cx, calleeScript);
+
+ MOZ_ASSERT(calleeScript->hasBaselineScript());
+ MOZ_ASSERT(calleeScript->jitCodeRaw());
+
+ return calleeScript->jitCodeRaw();
+}
+
+/* static */
+void JitRuntime::TraceAtomZoneRoots(JSTracer* trc) {
+ MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
+
+ // Shared stubs are allocated in the atoms zone, so do not iterate
+ // them after the atoms heap after it has been "finished."
+ if (trc->runtime()->atomsAreFinished()) {
+ return;
+ }
+
+ Zone* zone = trc->runtime()->atomsZone();
+ for (auto i = zone->cellIterUnsafe<JitCode>(); !i.done(); i.next()) {
+ JitCode* code = i;
+ TraceRoot(trc, &code, "wrapper");
+ }
+}
+
+/* static */
+bool JitRuntime::MarkJitcodeGlobalTableIteratively(GCMarker* marker) {
+ if (marker->runtime()->hasJitRuntime() &&
+ marker->runtime()->jitRuntime()->hasJitcodeGlobalTable()) {
+ return marker->runtime()
+ ->jitRuntime()
+ ->getJitcodeGlobalTable()
+ ->markIteratively(marker);
+ }
+ return false;
+}
+
+/* static */
+void JitRuntime::TraceWeakJitcodeGlobalTable(JSRuntime* rt, JSTracer* trc) {
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasJitcodeGlobalTable()) {
+ rt->jitRuntime()->getJitcodeGlobalTable()->traceWeak(rt, trc);
+ }
+}
+
+void JitRealm::traceWeak(JSTracer* trc, JS::Realm* realm) {
+ // Any outstanding compilations should have been cancelled by the GC.
+ MOZ_ASSERT(!HasOffThreadIonCompile(realm));
+
+ for (WeakHeapPtr<JitCode*>& stub : stubs_) {
+ TraceWeakEdge(trc, &stub, "JitRealm::stubs_");
+ }
+}
+
+bool JitZone::addInlinedCompilation(const RecompileInfo& info,
+ JSScript* inlined) {
+ MOZ_ASSERT(inlined != info.script());
+
+ auto p = inlinedCompilations_.lookupForAdd(inlined);
+ if (p) {
+ auto& compilations = p->value();
+ if (!compilations.empty() && compilations.back() == info) {
+ return true;
+ }
+ return compilations.append(info);
+ }
+
+ RecompileInfoVector compilations;
+ if (!compilations.append(info)) {
+ return false;
+ }
+ return inlinedCompilations_.add(p, inlined, std::move(compilations));
+}
+
+void jit::AddPendingInvalidation(RecompileInfoVector& invalid,
+ JSScript* script) {
+ MOZ_ASSERT(script);
+
+ CancelOffThreadIonCompile(script);
+
+ // Let the script warm up again before attempting another compile.
+ script->resetWarmUpCounterToDelayIonCompilation();
+
+ JitScript* jitScript = script->maybeJitScript();
+ if (!jitScript) {
+ return;
+ }
+
+ auto addPendingInvalidation = [&invalid](const RecompileInfo& info) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!invalid.append(info)) {
+ // BUG 1536159: For diagnostics, compute the size of the failed
+ // allocation. This presumes the vector growth strategy is to double. This
+ // is only used for crash reporting so not a problem if we get it wrong.
+ size_t allocSize = 2 * sizeof(RecompileInfo) * invalid.capacity();
+ oomUnsafe.crash(allocSize, "Could not update RecompileInfoVector");
+ }
+ };
+
+ // Trigger invalidation of the IonScript.
+ if (jitScript->hasIonScript()) {
+ RecompileInfo info(script, jitScript->ionScript()->compilationId());
+ addPendingInvalidation(info);
+ }
+
+ // Trigger invalidation of any callers inlining this script.
+ auto* inlinedCompilations =
+ script->zone()->jitZone()->maybeInlinedCompilations(script);
+ if (inlinedCompilations) {
+ for (const RecompileInfo& info : *inlinedCompilations) {
+ addPendingInvalidation(info);
+ }
+ script->zone()->jitZone()->removeInlinedCompilations(script);
+ }
+}
+
+IonScript* RecompileInfo::maybeIonScriptToInvalidate() const {
+ // Make sure this is not called under CodeGenerator::link (before the
+ // IonScript is created).
+ MOZ_ASSERT_IF(
+ script_->zone()->jitZone()->currentCompilationId(),
+ script_->zone()->jitZone()->currentCompilationId().ref() != id_);
+
+ if (!script_->hasIonScript() ||
+ script_->ionScript()->compilationId() != id_) {
+ return nullptr;
+ }
+
+ return script_->ionScript();
+}
+
+bool RecompileInfo::traceWeak(JSTracer* trc) {
+ // Sweep the RecompileInfo if either the script is dead or the IonScript has
+ // been invalidated.
+
+ if (!TraceManuallyBarrieredWeakEdge(trc, &script_, "RecompileInfo::script")) {
+ return false;
+ }
+
+ return maybeIonScriptToInvalidate() != nullptr;
+}
+
+void JitZone::traceWeak(JSTracer* trc) {
+ baselineCacheIRStubCodes_.traceWeak(trc);
+ inlinedCompilations_.traceWeak(trc);
+
+ TraceWeakEdge(trc, &lastStubFoldingBailoutChild_,
+ "JitZone::lastStubFoldingBailoutChild_");
+ TraceWeakEdge(trc, &lastStubFoldingBailoutParent_,
+ "JitZone::lastStubFoldingBailoutParent_");
+}
+
+size_t JitRealm::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this);
+}
+
+void JitZone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::CodeSizes* code, size_t* jitZone,
+ size_t* baselineStubsOptimized) const {
+ *jitZone += mallocSizeOf(this);
+ *jitZone +=
+ baselineCacheIRStubCodes_.shallowSizeOfExcludingThis(mallocSizeOf);
+ *jitZone += ionCacheIRStubInfoSet_.shallowSizeOfExcludingThis(mallocSizeOf);
+
+ execAlloc().addSizeOfCode(code);
+
+ *baselineStubsOptimized +=
+ optimizedStubSpace_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+void JitCodeHeader::init(JitCode* jitCode) {
+ // As long as JitCode isn't moveable, we can avoid tracing this and
+ // mutating executable data.
+ MOZ_ASSERT(!gc::IsMovableKind(gc::AllocKind::JITCODE));
+ jitCode_ = jitCode;
+}
+
+template <AllowGC allowGC>
+JitCode* JitCode::New(JSContext* cx, uint8_t* code, uint32_t totalSize,
+ uint32_t headerSize, ExecutablePool* pool,
+ CodeKind kind) {
+ uint32_t bufferSize = totalSize - headerSize;
+ JitCode* codeObj =
+ cx->newCell<JitCode, allowGC>(code, bufferSize, headerSize, pool, kind);
+ if (!codeObj) {
+ // The caller already allocated `totalSize` bytes of executable memory.
+ pool->release(totalSize, kind);
+ return nullptr;
+ }
+
+ cx->zone()->incJitMemory(totalSize);
+
+ return codeObj;
+}
+
+template JitCode* JitCode::New<CanGC>(JSContext* cx, uint8_t* code,
+ uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind);
+
+template JitCode* JitCode::New<NoGC>(JSContext* cx, uint8_t* code,
+ uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind);
+
+void JitCode::copyFrom(MacroAssembler& masm) {
+ // Store the JitCode pointer in the JitCodeHeader so we can recover the
+ // gcthing from relocation tables.
+ JitCodeHeader::FromExecutable(raw())->init(this);
+
+ insnSize_ = masm.instructionsSize();
+ masm.executableCopy(raw());
+
+ jumpRelocTableBytes_ = masm.jumpRelocationTableBytes();
+ masm.copyJumpRelocationTable(raw() + jumpRelocTableOffset());
+
+ dataRelocTableBytes_ = masm.dataRelocationTableBytes();
+ masm.copyDataRelocationTable(raw() + dataRelocTableOffset());
+
+ masm.processCodeLabels(raw());
+}
+
+void JitCode::traceChildren(JSTracer* trc) {
+ // Note that we cannot mark invalidated scripts, since we've basically
+ // corrupted the code stream by injecting bailouts.
+ if (invalidated()) {
+ return;
+ }
+
+ if (jumpRelocTableBytes_) {
+ uint8_t* start = raw() + jumpRelocTableOffset();
+ CompactBufferReader reader(start, start + jumpRelocTableBytes_);
+ MacroAssembler::TraceJumpRelocations(trc, this, reader);
+ }
+ if (dataRelocTableBytes_) {
+ uint8_t* start = raw() + dataRelocTableOffset();
+ CompactBufferReader reader(start, start + dataRelocTableBytes_);
+ MacroAssembler::TraceDataRelocations(trc, this, reader);
+ }
+}
+
+void JitCode::finalize(JS::GCContext* gcx) {
+ // If this jitcode had a bytecode map, it must have already been removed.
+#ifdef DEBUG
+ JSRuntime* rt = gcx->runtime();
+ if (hasBytecodeMap_) {
+ MOZ_ASSERT(rt->jitRuntime()->hasJitcodeGlobalTable());
+ MOZ_ASSERT(!rt->jitRuntime()->getJitcodeGlobalTable()->lookup(raw()));
+ }
+#endif
+
+#ifdef MOZ_VTUNE
+ vtune::UnmarkCode(this);
+#endif
+
+ MOZ_ASSERT(pool_);
+
+ // With W^X JIT code, reprotecting memory for each JitCode instance is
+ // slow, so we record the ranges and poison them later all at once. It's
+ // safe to ignore OOM here, it just means we won't poison the code.
+ if (gcx->appendJitPoisonRange(JitPoisonRange(pool_, raw() - headerSize_,
+ headerSize_ + bufferSize_))) {
+ pool_->addRef();
+ }
+ setHeaderPtr(nullptr);
+
+#ifdef JS_ION_PERF
+ // Code buffers are stored inside ExecutablePools. Pools are refcounted.
+ // Releasing the pool may free it. Horrible hack: if we are using perf
+ // integration, we don't want to reuse code addresses, so we just leak the
+ // memory instead.
+ if (!PerfEnabled()) {
+ pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
+ }
+#else
+ pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
+#endif
+
+ zone()->decJitMemory(headerSize_ + bufferSize_);
+
+ pool_ = nullptr;
+}
+
+IonScript::IonScript(IonCompilationId compilationId, uint32_t localSlotsSize,
+ uint32_t argumentSlotsSize, uint32_t frameSize)
+ : localSlotsSize_(localSlotsSize),
+ argumentSlotsSize_(argumentSlotsSize),
+ frameSize_(frameSize),
+ compilationId_(compilationId) {}
+
+IonScript* IonScript::New(JSContext* cx, IonCompilationId compilationId,
+ uint32_t localSlotsSize, uint32_t argumentSlotsSize,
+ uint32_t frameSize, size_t snapshotsListSize,
+ size_t snapshotsRVATableSize, size_t recoversSize,
+ size_t constants, size_t nurseryObjects,
+ size_t safepointIndices, size_t osiIndices,
+ size_t icEntries, size_t runtimeSize,
+ size_t safepointsSize) {
+ if (snapshotsListSize >= MAX_BUFFER_SIZE) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // Verify the hardcoded sizes in header are accurate.
+ static_assert(SizeOf_OsiIndex == sizeof(OsiIndex),
+ "IonScript has wrong size for OsiIndex");
+ static_assert(SizeOf_SafepointIndex == sizeof(SafepointIndex),
+ "IonScript has wrong size for SafepointIndex");
+
+ CheckedInt<Offset> allocSize = sizeof(IonScript);
+ allocSize += CheckedInt<Offset>(constants) * sizeof(Value);
+ allocSize += CheckedInt<Offset>(runtimeSize);
+ allocSize += CheckedInt<Offset>(nurseryObjects) * sizeof(HeapPtr<JSObject*>);
+ allocSize += CheckedInt<Offset>(osiIndices) * sizeof(OsiIndex);
+ allocSize += CheckedInt<Offset>(safepointIndices) * sizeof(SafepointIndex);
+ allocSize += CheckedInt<Offset>(icEntries) * sizeof(uint32_t);
+ allocSize += CheckedInt<Offset>(safepointsSize);
+ allocSize += CheckedInt<Offset>(snapshotsListSize);
+ allocSize += CheckedInt<Offset>(snapshotsRVATableSize);
+ allocSize += CheckedInt<Offset>(recoversSize);
+
+ if (!allocSize.isValid()) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+
+ void* raw = cx->pod_malloc<uint8_t>(allocSize.value());
+ MOZ_ASSERT(uintptr_t(raw) % alignof(IonScript) == 0);
+ if (!raw) {
+ return nullptr;
+ }
+ IonScript* script = new (raw)
+ IonScript(compilationId, localSlotsSize, argumentSlotsSize, frameSize);
+
+ Offset offsetCursor = sizeof(IonScript);
+
+ MOZ_ASSERT(offsetCursor % alignof(Value) == 0);
+ script->constantTableOffset_ = offsetCursor;
+ offsetCursor += constants * sizeof(Value);
+
+ MOZ_ASSERT(offsetCursor % alignof(uint64_t) == 0);
+ script->runtimeDataOffset_ = offsetCursor;
+ offsetCursor += runtimeSize;
+
+ MOZ_ASSERT(offsetCursor % alignof(HeapPtr<JSObject*>) == 0);
+ script->initElements<HeapPtr<JSObject*>>(offsetCursor, nurseryObjects);
+ script->nurseryObjectsOffset_ = offsetCursor;
+ offsetCursor += nurseryObjects * sizeof(HeapPtr<JSObject*>);
+
+ MOZ_ASSERT(offsetCursor % alignof(OsiIndex) == 0);
+ script->osiIndexOffset_ = offsetCursor;
+ offsetCursor += osiIndices * sizeof(OsiIndex);
+
+ MOZ_ASSERT(offsetCursor % alignof(SafepointIndex) == 0);
+ script->safepointIndexOffset_ = offsetCursor;
+ offsetCursor += safepointIndices * sizeof(SafepointIndex);
+
+ MOZ_ASSERT(offsetCursor % alignof(uint32_t) == 0);
+ script->icIndexOffset_ = offsetCursor;
+ offsetCursor += icEntries * sizeof(uint32_t);
+
+ script->safepointsOffset_ = offsetCursor;
+ offsetCursor += safepointsSize;
+
+ script->snapshotsOffset_ = offsetCursor;
+ offsetCursor += snapshotsListSize;
+
+ script->rvaTableOffset_ = offsetCursor;
+ offsetCursor += snapshotsRVATableSize;
+
+ script->recoversOffset_ = offsetCursor;
+ offsetCursor += recoversSize;
+
+ script->allocBytes_ = offsetCursor;
+
+ MOZ_ASSERT(script->numConstants() == constants);
+ MOZ_ASSERT(script->runtimeSize() == runtimeSize);
+ MOZ_ASSERT(script->numNurseryObjects() == nurseryObjects);
+ MOZ_ASSERT(script->numOsiIndices() == osiIndices);
+ MOZ_ASSERT(script->numSafepointIndices() == safepointIndices);
+ MOZ_ASSERT(script->numICs() == icEntries);
+ MOZ_ASSERT(script->safepointsSize() == safepointsSize);
+ MOZ_ASSERT(script->snapshotsListSize() == snapshotsListSize);
+ MOZ_ASSERT(script->snapshotsRVATableSize() == snapshotsRVATableSize);
+ MOZ_ASSERT(script->recoversSize() == recoversSize);
+ MOZ_ASSERT(script->endOffset() == offsetCursor);
+
+ return script;
+}
+
+void IonScript::trace(JSTracer* trc) {
+ if (method_) {
+ TraceEdge(trc, &method_, "method");
+ }
+
+ for (size_t i = 0; i < numConstants(); i++) {
+ TraceEdge(trc, &getConstant(i), "constant");
+ }
+
+ for (size_t i = 0; i < numNurseryObjects(); i++) {
+ TraceEdge(trc, &nurseryObjects()[i], "nursery-object");
+ }
+
+ // Trace caches so that the JSScript pointer can be updated if moved.
+ for (size_t i = 0; i < numICs(); i++) {
+ getICFromIndex(i).trace(trc, this);
+ }
+}
+
+/* static */
+void IonScript::preWriteBarrier(Zone* zone, IonScript* ionScript) {
+ PreWriteBarrier(zone, ionScript);
+}
+
+void IonScript::copySnapshots(const SnapshotWriter* writer) {
+ MOZ_ASSERT(writer->listSize() == snapshotsListSize());
+ memcpy(offsetToPointer<uint8_t>(snapshotsOffset()), writer->listBuffer(),
+ snapshotsListSize());
+
+ MOZ_ASSERT(snapshotsRVATableSize());
+ MOZ_ASSERT(writer->RVATableSize() == snapshotsRVATableSize());
+ memcpy(offsetToPointer<uint8_t>(rvaTableOffset()), writer->RVATableBuffer(),
+ snapshotsRVATableSize());
+}
+
+void IonScript::copyRecovers(const RecoverWriter* writer) {
+ MOZ_ASSERT(writer->size() == recoversSize());
+ memcpy(offsetToPointer<uint8_t>(recoversOffset()), writer->buffer(),
+ recoversSize());
+}
+
+void IonScript::copySafepoints(const SafepointWriter* writer) {
+ MOZ_ASSERT(writer->size() == safepointsSize());
+ memcpy(offsetToPointer<uint8_t>(safepointsOffset()), writer->buffer(),
+ safepointsSize());
+}
+
+void IonScript::copyConstants(const Value* vp) {
+ for (size_t i = 0; i < numConstants(); i++) {
+ constants()[i].init(vp[i]);
+ }
+}
+
+void IonScript::copySafepointIndices(const CodegenSafepointIndex* si) {
+ // Convert CodegenSafepointIndex to more compact form.
+ SafepointIndex* table = safepointIndices();
+ for (size_t i = 0; i < numSafepointIndices(); ++i) {
+ table[i] = SafepointIndex(si[i]);
+ }
+}
+
+void IonScript::copyOsiIndices(const OsiIndex* oi) {
+ memcpy(osiIndices(), oi, numOsiIndices() * sizeof(OsiIndex));
+}
+
+void IonScript::copyRuntimeData(const uint8_t* data) {
+ memcpy(runtimeData(), data, runtimeSize());
+}
+
+void IonScript::copyICEntries(const uint32_t* icEntries) {
+ memcpy(icIndex(), icEntries, numICs() * sizeof(uint32_t));
+
+ // Update the codeRaw_ field in the ICs now that we know the code address.
+ for (size_t i = 0; i < numICs(); i++) {
+ getICFromIndex(i).resetCodeRaw(this);
+ }
+}
+
+const SafepointIndex* IonScript::getSafepointIndex(uint32_t disp) const {
+ MOZ_ASSERT(numSafepointIndices() > 0);
+
+ const SafepointIndex* table = safepointIndices();
+ if (numSafepointIndices() == 1) {
+ MOZ_ASSERT(disp == table[0].displacement());
+ return &table[0];
+ }
+
+ size_t minEntry = 0;
+ size_t maxEntry = numSafepointIndices() - 1;
+ uint32_t min = table[minEntry].displacement();
+ uint32_t max = table[maxEntry].displacement();
+
+ // Raise if the element is not in the list.
+ MOZ_ASSERT(min <= disp && disp <= max);
+
+ // Approximate the location of the FrameInfo.
+ size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry;
+ uint32_t guessDisp = table[guess].displacement();
+
+ if (table[guess].displacement() == disp) {
+ return &table[guess];
+ }
+
+ // Doing a linear scan from the guess should be more efficient in case of
+ // small group which are equally distributed on the code.
+ //
+ // such as: <... ... ... ... . ... ...>
+ if (guessDisp > disp) {
+ while (--guess >= minEntry) {
+ guessDisp = table[guess].displacement();
+ MOZ_ASSERT(guessDisp >= disp);
+ if (guessDisp == disp) {
+ return &table[guess];
+ }
+ }
+ } else {
+ while (++guess <= maxEntry) {
+ guessDisp = table[guess].displacement();
+ MOZ_ASSERT(guessDisp <= disp);
+ if (guessDisp == disp) {
+ return &table[guess];
+ }
+ }
+ }
+
+ MOZ_CRASH("displacement not found.");
+}
+
+const OsiIndex* IonScript::getOsiIndex(uint32_t disp) const {
+ const OsiIndex* end = osiIndices() + numOsiIndices();
+ for (const OsiIndex* it = osiIndices(); it != end; ++it) {
+ if (it->returnPointDisplacement() == disp) {
+ return it;
+ }
+ }
+
+ MOZ_CRASH("Failed to find OSI point return address");
+}
+
+const OsiIndex* IonScript::getOsiIndex(uint8_t* retAddr) const {
+ JitSpew(JitSpew_IonInvalidate, "IonScript %p has method %p raw %p",
+ (void*)this, (void*)method(), method()->raw());
+
+ MOZ_ASSERT(containsCodeAddress(retAddr));
+ uint32_t disp = retAddr - method()->raw();
+ return getOsiIndex(disp);
+}
+
+void IonScript::Destroy(JS::GCContext* gcx, IonScript* script) {
+ // Make sure there are no pointers into the IonScript's nursery objects list
+ // in the store buffer. Because this can be called during sweeping when
+ // discarding JIT code, we have to lock the store buffer when we find an
+ // object that's (still) in the nursery.
+ mozilla::Maybe<gc::AutoLockStoreBuffer> lock;
+ for (size_t i = 0, len = script->numNurseryObjects(); i < len; i++) {
+ JSObject* obj = script->nurseryObjects()[i];
+ if (!IsInsideNursery(obj)) {
+ continue;
+ }
+ if (lock.isNothing()) {
+ lock.emplace(&gcx->runtime()->gc.storeBuffer());
+ }
+ script->nurseryObjects()[i] = HeapPtr<JSObject*>();
+ }
+
+ // This allocation is tracked by JSScript::setIonScriptImpl.
+ gcx->deleteUntracked(script);
+}
+
+void JS::DeletePolicy<js::jit::IonScript>::operator()(
+ const js::jit::IonScript* script) {
+ IonScript::Destroy(rt_->gcContext(), const_cast<IonScript*>(script));
+}
+
+void IonScript::purgeICs(Zone* zone) {
+ for (size_t i = 0; i < numICs(); i++) {
+ getICFromIndex(i).reset(zone, this);
+ }
+}
+
+namespace js {
+namespace jit {
+
+bool OptimizeMIR(MIRGenerator* mir) {
+ MIRGraph& graph = mir->graph();
+ GraphSpewer& gs = mir->graphSpewer();
+
+ if (mir->shouldCancel("Start")) {
+ return false;
+ }
+
+ gs.spewPass("BuildSSA");
+ AssertBasicGraphCoherency(graph);
+
+ if (JitSpewEnabled(JitSpew_MIRExpressions)) {
+ JitSpewCont(JitSpew_MIRExpressions, "\n");
+ DumpMIRExpressions(JitSpewPrinter(), graph, mir->outerInfo(),
+ "BuildSSA (== input to OptimizeMIR)");
+ }
+
+ if (!JitOptions.disablePruning && !mir->compilingWasm()) {
+ JitSpewCont(JitSpew_Prune, "\n");
+ if (!PruneUnusedBranches(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Prune Unused Branches");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Prune Unused Branches")) {
+ return false;
+ }
+ }
+
+ {
+ if (!FoldEmptyBlocks(graph)) {
+ return false;
+ }
+ gs.spewPass("Fold Empty Blocks");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Fold Empty Blocks")) {
+ return false;
+ }
+ }
+
+ // Remove trivially dead resume point operands before folding tests, so the
+ // latter pass can optimize more aggressively.
+ if (!mir->compilingWasm()) {
+ if (!EliminateTriviallyDeadResumePointOperands(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Eliminate trivially dead resume point operands");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Eliminate trivially dead resume point operands")) {
+ return false;
+ }
+ }
+
+ {
+ if (!FoldTests(graph)) {
+ return false;
+ }
+ gs.spewPass("Fold Tests");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Fold Tests")) {
+ return false;
+ }
+ }
+
+ {
+ if (!SplitCriticalEdges(graph)) {
+ return false;
+ }
+ gs.spewPass("Split Critical Edges");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Split Critical Edges")) {
+ return false;
+ }
+ }
+
+ {
+ RenumberBlocks(graph);
+ gs.spewPass("Renumber Blocks");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Renumber Blocks")) {
+ return false;
+ }
+ }
+
+ {
+ if (!BuildDominatorTree(graph)) {
+ return false;
+ }
+ // No spew: graph not changed.
+
+ if (mir->shouldCancel("Dominator Tree")) {
+ return false;
+ }
+ }
+
+ {
+ // Aggressive phi elimination must occur before any code elimination. If the
+ // script contains a try-statement, we only compiled the try block and not
+ // the catch or finally blocks, so in this case it's also invalid to use
+ // aggressive phi elimination.
+ Observability observability = graph.hasTryBlock()
+ ? ConservativeObservability
+ : AggressiveObservability;
+ if (!EliminatePhis(mir, graph, observability)) {
+ return false;
+ }
+ gs.spewPass("Eliminate phis");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Eliminate phis")) {
+ return false;
+ }
+
+ if (!BuildPhiReverseMapping(graph)) {
+ return false;
+ }
+ AssertExtendedGraphCoherency(graph);
+ // No spew: graph not changed.
+
+ if (mir->shouldCancel("Phi reverse mapping")) {
+ return false;
+ }
+ }
+
+ if (!mir->compilingWasm() && !JitOptions.disableIteratorIndices) {
+ if (!OptimizeIteratorIndices(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Iterator Indices");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Iterator Indices")) {
+ return false;
+ }
+ }
+
+ if (!JitOptions.disableRecoverIns &&
+ mir->optimizationInfo().scalarReplacementEnabled()) {
+ JitSpewCont(JitSpew_Escape, "\n");
+ if (!ScalarReplacement(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Scalar Replacement");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Scalar Replacement")) {
+ return false;
+ }
+ }
+
+ if (!mir->compilingWasm()) {
+ if (!ApplyTypeInformation(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Apply types");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Apply types")) {
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().amaEnabled()) {
+ AlignmentMaskAnalysis ama(graph);
+ if (!ama.analyze()) {
+ return false;
+ }
+ gs.spewPass("Alignment Mask Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Alignment Mask Analysis")) {
+ return false;
+ }
+ }
+
+ ValueNumberer gvn(mir, graph);
+
+ // Alias analysis is required for LICM and GVN so that we don't move
+ // loads across stores. We also use alias information when removing
+ // redundant shapeguards.
+ if (mir->optimizationInfo().licmEnabled() ||
+ mir->optimizationInfo().gvnEnabled() ||
+ mir->optimizationInfo().eliminateRedundantShapeGuardsEnabled()) {
+ {
+ AliasAnalysis analysis(mir, graph);
+ JitSpewCont(JitSpew_Alias, "\n");
+ if (!analysis.analyze()) {
+ return false;
+ }
+
+ gs.spewPass("Alias analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Alias analysis")) {
+ return false;
+ }
+ }
+
+ if (!mir->compilingWasm()) {
+ // Eliminating dead resume point operands requires basic block
+ // instructions to be numbered. Reuse the numbering computed during
+ // alias analysis.
+ if (!EliminateDeadResumePointOperands(mir, graph)) {
+ return false;
+ }
+
+ gs.spewPass("Eliminate dead resume point operands");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Eliminate dead resume point operands")) {
+ return false;
+ }
+ }
+ }
+
+ if (mir->optimizationInfo().gvnEnabled()) {
+ JitSpewCont(JitSpew_GVN, "\n");
+ if (!gvn.run(ValueNumberer::UpdateAliasAnalysis)) {
+ return false;
+ }
+ gs.spewPass("GVN");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("GVN")) {
+ return false;
+ }
+ }
+
+ // LICM can hoist instructions from conditional branches and
+ // trigger bailouts. Disable it if bailing out of a hoisted
+ // instruction has previously invalidated this script.
+ if (mir->licmEnabled()) {
+ JitSpewCont(JitSpew_LICM, "\n");
+ if (!LICM(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("LICM");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("LICM")) {
+ return false;
+ }
+ }
+
+ RangeAnalysis r(mir, graph);
+ if (mir->optimizationInfo().rangeAnalysisEnabled()) {
+ JitSpewCont(JitSpew_Range, "\n");
+ if (!r.addBetaNodes()) {
+ return false;
+ }
+ gs.spewPass("Beta");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA Beta")) {
+ return false;
+ }
+
+ if (!r.analyze() || !r.addRangeAssertions()) {
+ return false;
+ }
+ gs.spewPass("Range Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Range Analysis")) {
+ return false;
+ }
+
+ if (!r.removeBetaNodes()) {
+ return false;
+ }
+ gs.spewPass("De-Beta");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA De-Beta")) {
+ return false;
+ }
+
+ if (mir->optimizationInfo().gvnEnabled()) {
+ bool shouldRunUCE = false;
+ if (!r.prepareForUCE(&shouldRunUCE)) {
+ return false;
+ }
+ gs.spewPass("RA check UCE");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("RA check UCE")) {
+ return false;
+ }
+
+ if (shouldRunUCE) {
+ if (!gvn.run(ValueNumberer::DontUpdateAliasAnalysis)) {
+ return false;
+ }
+ gs.spewPass("UCE After RA");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("UCE After RA")) {
+ return false;
+ }
+ }
+ }
+
+ if (mir->optimizationInfo().autoTruncateEnabled()) {
+ if (!r.truncate()) {
+ return false;
+ }
+ gs.spewPass("Truncate Doubles");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Truncate Doubles")) {
+ return false;
+ }
+ }
+ }
+
+ if (!JitOptions.disableRecoverIns) {
+ JitSpewCont(JitSpew_Sink, "\n");
+ if (!Sink(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Sink");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Sink")) {
+ return false;
+ }
+ }
+
+ if (!JitOptions.disableRecoverIns &&
+ mir->optimizationInfo().rangeAnalysisEnabled()) {
+ JitSpewCont(JitSpew_Range, "\n");
+ if (!r.removeUnnecessaryBitops()) {
+ return false;
+ }
+ gs.spewPass("Remove Unnecessary Bitops");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Remove Unnecessary Bitops")) {
+ return false;
+ }
+ }
+
+ {
+ JitSpewCont(JitSpew_FLAC, "\n");
+ if (!FoldLinearArithConstants(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Fold Linear Arithmetic Constants");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("Fold Linear Arithmetic Constants")) {
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().eaaEnabled()) {
+ EffectiveAddressAnalysis eaa(mir, graph);
+ JitSpewCont(JitSpew_EAA, "\n");
+ if (!eaa.analyze()) {
+ return false;
+ }
+ gs.spewPass("Effective Address Analysis");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Effective Address Analysis")) {
+ return false;
+ }
+ }
+
+ // BCE marks bounds checks as dead, so do BCE before DCE.
+ if (mir->compilingWasm()) {
+ JitSpewCont(JitSpew_WasmBCE, "\n");
+ if (!EliminateBoundsChecks(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("Redundant Bounds Check Elimination");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("BCE")) {
+ return false;
+ }
+ }
+
+ {
+ if (!EliminateDeadCode(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("DCE");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("DCE")) {
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().instructionReorderingEnabled() &&
+ !mir->outerInfo().hadReorderingBailout()) {
+ if (!ReorderInstructions(graph)) {
+ return false;
+ }
+ gs.spewPass("Reordering");
+
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Reordering")) {
+ return false;
+ }
+ }
+
+ // Make loops contiguous. We do this after GVN/UCE and range analysis,
+ // which can remove CFG edges, exposing more blocks that can be moved.
+ {
+ if (!MakeLoopsContiguous(graph)) {
+ return false;
+ }
+ gs.spewPass("Make loops contiguous");
+ AssertExtendedGraphCoherency(graph);
+
+ if (mir->shouldCancel("Make loops contiguous")) {
+ return false;
+ }
+ }
+ AssertExtendedGraphCoherency(graph, /* underValueNumberer = */ false,
+ /* force = */ true);
+
+ // Remove unreachable blocks created by MBasicBlock::NewFakeLoopPredecessor
+ // to ensure every loop header has two predecessors. (This only happens due
+ // to OSR.) After this point, it is no longer possible to build the
+ // dominator tree.
+ if (!mir->compilingWasm() && graph.osrBlock()) {
+ graph.removeFakeLoopPredecessors();
+ gs.spewPass("Remove fake loop predecessors");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Remove fake loop predecessors")) {
+ return false;
+ }
+ }
+
+ // Passes after this point must not move instructions; these analyses
+ // depend on knowing the final order in which instructions will execute.
+
+ if (mir->optimizationInfo().edgeCaseAnalysisEnabled()) {
+ EdgeCaseAnalysis edgeCaseAnalysis(mir, graph);
+ if (!edgeCaseAnalysis.analyzeLate()) {
+ return false;
+ }
+ gs.spewPass("Edge Case Analysis (Late)");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("Edge Case Analysis (Late)")) {
+ return false;
+ }
+ }
+
+ if (mir->optimizationInfo().eliminateRedundantChecksEnabled()) {
+ // Note: check elimination has to run after all other passes that move
+ // instructions. Since check uses are replaced with the actual index,
+ // code motion after this pass could incorrectly move a load or store
+ // before its bounds check.
+ if (!EliminateRedundantChecks(graph)) {
+ return false;
+ }
+ gs.spewPass("Bounds Check Elimination");
+ AssertGraphCoherency(graph);
+ }
+
+ if (mir->optimizationInfo().eliminateRedundantShapeGuardsEnabled()) {
+ if (!EliminateRedundantShapeGuards(graph)) {
+ return false;
+ }
+ gs.spewPass("Shape Guard Elimination");
+ AssertGraphCoherency(graph);
+ }
+
+ // Run the GC Barrier Elimination pass after instruction reordering, to
+ // ensure we don't move instructions that can trigger GC between stores we
+ // optimize here.
+ if (mir->optimizationInfo().eliminateRedundantGCBarriersEnabled()) {
+ if (!EliminateRedundantGCBarriers(graph)) {
+ return false;
+ }
+ gs.spewPass("GC Barrier Elimination");
+ AssertGraphCoherency(graph);
+ }
+
+ if (!mir->compilingWasm() && !mir->outerInfo().hadUnboxFoldingBailout()) {
+ if (!FoldLoadsWithUnbox(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("FoldLoadsWithUnbox");
+ AssertGraphCoherency(graph);
+ }
+
+ if (!mir->compilingWasm()) {
+ if (!AddKeepAliveInstructions(graph)) {
+ return false;
+ }
+ gs.spewPass("Add KeepAlive Instructions");
+ AssertGraphCoherency(graph);
+ }
+
+ AssertGraphCoherency(graph, /* force = */ true);
+
+ if (JitSpewEnabled(JitSpew_MIRExpressions)) {
+ JitSpewCont(JitSpew_MIRExpressions, "\n");
+ DumpMIRExpressions(JitSpewPrinter(), graph, mir->outerInfo(),
+ "BeforeLIR (== result of OptimizeMIR)");
+ }
+
+ return true;
+}
+
+LIRGraph* GenerateLIR(MIRGenerator* mir) {
+ MIRGraph& graph = mir->graph();
+ GraphSpewer& gs = mir->graphSpewer();
+
+ LIRGraph* lir = mir->alloc().lifoAlloc()->new_<LIRGraph>(&graph);
+ if (!lir || !lir->init()) {
+ return nullptr;
+ }
+
+ LIRGenerator lirgen(mir, graph, *lir);
+ {
+ if (!lirgen.generate()) {
+ return nullptr;
+ }
+ gs.spewPass("Generate LIR");
+
+ if (mir->shouldCancel("Generate LIR")) {
+ return nullptr;
+ }
+ }
+
+#ifdef DEBUG
+ AllocationIntegrityState integrity(*lir);
+#endif
+
+ {
+ IonRegisterAllocator allocator =
+ mir->optimizationInfo().registerAllocator();
+
+ switch (allocator) {
+ case RegisterAllocator_Backtracking:
+ case RegisterAllocator_Testbed: {
+#ifdef DEBUG
+ if (JitOptions.fullDebugChecks) {
+ if (!integrity.record()) {
+ return nullptr;
+ }
+ }
+#endif
+
+ BacktrackingAllocator regalloc(mir, &lirgen, *lir,
+ allocator == RegisterAllocator_Testbed);
+ if (!regalloc.go()) {
+ return nullptr;
+ }
+
+#ifdef DEBUG
+ if (JitOptions.fullDebugChecks) {
+ if (!integrity.check()) {
+ return nullptr;
+ }
+ }
+#endif
+
+ gs.spewPass("Allocate Registers [Backtracking]");
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Bad regalloc");
+ }
+
+ if (mir->shouldCancel("Allocate Registers")) {
+ return nullptr;
+ }
+ }
+
+ return lir;
+}
+
+CodeGenerator* GenerateCode(MIRGenerator* mir, LIRGraph* lir) {
+ auto codegen = MakeUnique<CodeGenerator>(mir, lir);
+ if (!codegen) {
+ return nullptr;
+ }
+
+ if (!codegen->generate()) {
+ return nullptr;
+ }
+
+ return codegen.release();
+}
+
+CodeGenerator* CompileBackEnd(MIRGenerator* mir, WarpSnapshot* snapshot) {
+ // Everything in CompileBackEnd can potentially run on a helper thread.
+ AutoEnterIonBackend enter;
+ AutoSpewEndFunction spewEndFunction(mir);
+
+ {
+ WarpCompilation comp(mir->alloc());
+ WarpBuilder builder(*snapshot, *mir, &comp);
+ if (!builder.build()) {
+ return nullptr;
+ }
+ }
+
+ if (!OptimizeMIR(mir)) {
+ return nullptr;
+ }
+
+ LIRGraph* lir = GenerateLIR(mir);
+ if (!lir) {
+ return nullptr;
+ }
+
+ return GenerateCode(mir, lir);
+}
+
+static AbortReasonOr<WarpSnapshot*> CreateWarpSnapshot(JSContext* cx,
+ MIRGenerator* mirGen,
+ HandleScript script) {
+ // Suppress GC during compilation.
+ gc::AutoSuppressGC suppressGC(cx);
+
+ SpewBeginFunction(mirGen, script);
+
+ WarpOracle oracle(cx, *mirGen, script);
+
+ AbortReasonOr<WarpSnapshot*> result = oracle.createSnapshot();
+
+ MOZ_ASSERT_IF(result.isErr(), result.unwrapErr() == AbortReason::Alloc ||
+ result.unwrapErr() == AbortReason::Error ||
+ result.unwrapErr() == AbortReason::Disable);
+ MOZ_ASSERT_IF(!result.isErr(), result.unwrap());
+
+ return result;
+}
+
+static AbortReason IonCompile(JSContext* cx, HandleScript script,
+ jsbytecode* osrPc) {
+ cx->check(script);
+
+ auto alloc =
+ cx->make_unique<LifoAlloc>(TempAllocator::PreferredLifoChunkSize);
+ if (!alloc) {
+ return AbortReason::Error;
+ }
+
+ if (!cx->realm()->ensureJitRealmExists(cx)) {
+ return AbortReason::Error;
+ }
+
+ if (!cx->realm()->jitRealm()->ensureIonStubsExist(cx)) {
+ return AbortReason::Error;
+ }
+
+ TempAllocator* temp = alloc->new_<TempAllocator>(alloc.get());
+ if (!temp) {
+ return AbortReason::Alloc;
+ }
+
+ MIRGraph* graph = alloc->new_<MIRGraph>(temp);
+ if (!graph) {
+ return AbortReason::Alloc;
+ }
+
+ InlineScriptTree* inlineScriptTree =
+ InlineScriptTree::New(temp, nullptr, nullptr, script);
+ if (!inlineScriptTree) {
+ return AbortReason::Alloc;
+ }
+
+ CompileInfo* info = alloc->new_<CompileInfo>(
+ CompileRuntime::get(cx->runtime()), script, script->function(), osrPc,
+ script->needsArgsObj(), inlineScriptTree);
+ if (!info) {
+ return AbortReason::Alloc;
+ }
+
+ const OptimizationInfo* optimizationInfo =
+ IonOptimizations.get(OptimizationLevel::Normal);
+ const JitCompileOptions options(cx);
+
+ MIRGenerator* mirGen =
+ alloc->new_<MIRGenerator>(CompileRealm::get(cx->realm()), options, temp,
+ graph, info, optimizationInfo);
+ if (!mirGen) {
+ return AbortReason::Alloc;
+ }
+
+ MOZ_ASSERT(!script->baselineScript()->hasPendingIonCompileTask());
+ MOZ_ASSERT(!script->hasIonScript());
+ MOZ_ASSERT(script->canIonCompile());
+
+ if (osrPc) {
+ script->jitScript()->setHadIonOSR();
+ }
+
+ AbortReasonOr<WarpSnapshot*> result = CreateWarpSnapshot(cx, mirGen, script);
+ if (result.isErr()) {
+ return result.unwrapErr();
+ }
+ WarpSnapshot* snapshot = result.unwrap();
+
+ // If possible, compile the script off thread.
+ if (options.offThreadCompilationAvailable()) {
+ JitSpew(JitSpew_IonSyncLogs,
+ "Can't log script %s:%u:%u"
+ ". (Compiled on background thread.)",
+ script->filename(), script->lineno(), script->column());
+
+ IonCompileTask* task = alloc->new_<IonCompileTask>(cx, *mirGen, snapshot);
+ if (!task) {
+ return AbortReason::Alloc;
+ }
+
+ AutoLockHelperThreadState lock;
+ if (!StartOffThreadIonCompile(task, lock)) {
+ JitSpew(JitSpew_IonAbort, "Unable to start off-thread ion compilation.");
+ mirGen->graphSpewer().endFunction();
+ return AbortReason::Alloc;
+ }
+
+ script->jitScript()->setIsIonCompilingOffThread(script);
+
+ // The allocator and associated data will be destroyed after being
+ // processed in the finishedOffThreadCompilations list.
+ (void)alloc.release();
+
+ return AbortReason::NoAbort;
+ }
+
+ bool succeeded = false;
+ {
+ gc::AutoSuppressGC suppressGC(cx);
+ JitContext jctx(cx);
+ UniquePtr<CodeGenerator> codegen(CompileBackEnd(mirGen, snapshot));
+ if (!codegen) {
+ JitSpew(JitSpew_IonAbort, "Failed during back-end compilation.");
+ if (cx->isExceptionPending()) {
+ return AbortReason::Error;
+ }
+ return AbortReason::Disable;
+ }
+
+ succeeded = LinkCodeGen(cx, codegen.get(), script, snapshot);
+ }
+
+ if (succeeded) {
+ return AbortReason::NoAbort;
+ }
+ if (cx->isExceptionPending()) {
+ return AbortReason::Error;
+ }
+ return AbortReason::Disable;
+}
+
+static bool CheckFrame(JSContext* cx, BaselineFrame* frame) {
+ MOZ_ASSERT(!frame->isDebuggerEvalFrame());
+ MOZ_ASSERT(!frame->isEvalFrame());
+
+ // This check is to not overrun the stack.
+ if (frame->isFunctionFrame()) {
+ if (TooManyActualArguments(frame->numActualArgs())) {
+ JitSpew(JitSpew_IonAbort, "too many actual arguments");
+ return false;
+ }
+
+ if (TooManyFormalArguments(frame->numFormalArgs())) {
+ JitSpew(JitSpew_IonAbort, "too many arguments");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CanIonCompileOrInlineScript(JSScript* script, const char** reason) {
+ if (script->isForEval()) {
+ // Eval frames are not yet supported. Supporting this will require new
+ // logic in pushBailoutFrame to deal with linking prev.
+ // Additionally, JSOp::GlobalOrEvalDeclInstantiation support will require
+ // baking in isEvalFrame().
+ *reason = "eval script";
+ return false;
+ }
+
+ if (script->isAsync()) {
+ if (script->isModule()) {
+ *reason = "async module";
+ return false;
+ }
+ }
+
+ if (script->hasNonSyntacticScope() && !script->function()) {
+ // Support functions with a non-syntactic global scope but not other
+ // scripts. For global scripts, WarpBuilder currently uses the global
+ // object as scope chain, this is not valid when the script has a
+ // non-syntactic global scope.
+ *reason = "has non-syntactic global scope";
+ return false;
+ }
+
+ return true;
+} // namespace jit
+
+static bool ScriptIsTooLarge(JSContext* cx, JSScript* script) {
+ if (!JitOptions.limitScriptSize) {
+ return false;
+ }
+
+ size_t numLocalsAndArgs = NumLocalsAndArgs(script);
+
+ bool canCompileOffThread = OffThreadCompilationAvailable(cx);
+ size_t maxScriptSize = canCompileOffThread
+ ? JitOptions.ionMaxScriptSize
+ : JitOptions.ionMaxScriptSizeMainThread;
+ size_t maxLocalsAndArgs = canCompileOffThread
+ ? JitOptions.ionMaxLocalsAndArgs
+ : JitOptions.ionMaxLocalsAndArgsMainThread;
+
+ if (script->length() > maxScriptSize || numLocalsAndArgs > maxLocalsAndArgs) {
+ JitSpew(JitSpew_IonAbort,
+ "Script too large (%zu bytes) (%zu locals/args) @ %s:%u:%u",
+ script->length(), numLocalsAndArgs, script->filename(),
+ script->lineno(), script->column());
+ return true;
+ }
+
+ return false;
+}
+
+bool CanIonCompileScript(JSContext* cx, JSScript* script) {
+ if (!script->canIonCompile()) {
+ return false;
+ }
+
+ const char* reason = nullptr;
+ if (!CanIonCompileOrInlineScript(script, &reason)) {
+ JitSpew(JitSpew_IonAbort, "%s", reason);
+ return false;
+ }
+
+ if (ScriptIsTooLarge(cx, script)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool CanIonInlineScript(JSScript* script) {
+ if (!script->canIonCompile()) {
+ return false;
+ }
+
+ const char* reason = nullptr;
+ if (!CanIonCompileOrInlineScript(script, &reason)) {
+ JitSpew(JitSpew_Inlining, "Cannot Ion compile script (%s)", reason);
+ return false;
+ }
+
+ return true;
+}
+
+static MethodStatus Compile(JSContext* cx, HandleScript script,
+ BaselineFrame* osrFrame, jsbytecode* osrPc) {
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(jit::IsBaselineJitEnabled(cx));
+
+ MOZ_ASSERT(script->hasBaselineScript());
+ MOZ_ASSERT(!script->baselineScript()->hasPendingIonCompileTask());
+ MOZ_ASSERT(!script->hasIonScript());
+
+ AutoGeckoProfilerEntry pseudoFrame(
+ cx, "Ion script compilation",
+ JS::ProfilingCategoryPair::JS_IonCompilation);
+
+ if (script->isDebuggee() || (osrFrame && osrFrame->isDebuggee())) {
+ JitSpew(JitSpew_IonAbort, "debugging");
+ return Method_Skipped;
+ }
+
+ if (!CanIonCompileScript(cx, script)) {
+ JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%u:%u",
+ script->filename(), script->lineno(), script->column());
+ return Method_CantCompile;
+ }
+
+ OptimizationLevel optimizationLevel =
+ IonOptimizations.levelForScript(script, osrPc);
+ if (optimizationLevel == OptimizationLevel::DontCompile) {
+ return Method_Skipped;
+ }
+
+ MOZ_ASSERT(optimizationLevel == OptimizationLevel::Normal);
+
+ if (!CanLikelyAllocateMoreExecutableMemory()) {
+ script->resetWarmUpCounterToDelayIonCompilation();
+ return Method_Skipped;
+ }
+
+ MOZ_ASSERT(!script->hasIonScript());
+
+ AbortReason reason = IonCompile(cx, script, osrPc);
+ if (reason == AbortReason::Error) {
+ MOZ_ASSERT(cx->isExceptionPending());
+ return Method_Error;
+ }
+
+ if (reason == AbortReason::Disable) {
+ return Method_CantCompile;
+ }
+
+ if (reason == AbortReason::Alloc) {
+ ReportOutOfMemory(cx);
+ return Method_Error;
+ }
+
+ // Compilation succeeded or we invalidated right away or an inlining/alloc
+ // abort
+ if (script->hasIonScript()) {
+ return Method_Compiled;
+ }
+ return Method_Skipped;
+}
+
+} // namespace jit
+} // namespace js
+
+bool jit::OffThreadCompilationAvailable(JSContext* cx) {
+ // Even if off thread compilation is enabled, compilation must still occur
+ // on the main thread in some cases.
+ //
+ // Require cpuCount > 1 so that Ion compilation jobs and active-thread
+ // execution are not competing for the same resources.
+ return cx->runtime()->canUseOffthreadIonCompilation() &&
+ GetHelperThreadCPUCount() > 1 && CanUseExtraThreads();
+}
+
+MethodStatus jit::CanEnterIon(JSContext* cx, RunState& state) {
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+
+ HandleScript script = state.script();
+ MOZ_ASSERT(!script->hasIonScript());
+
+ // Skip if the script has been disabled.
+ if (!script->canIonCompile()) {
+ return Method_Skipped;
+ }
+
+ // Skip if the script is being compiled off thread.
+ if (script->isIonCompilingOffThread()) {
+ return Method_Skipped;
+ }
+
+ if (state.isInvoke()) {
+ InvokeState& invoke = *state.asInvoke();
+
+ if (TooManyActualArguments(invoke.args().length())) {
+ JitSpew(JitSpew_IonAbort, "too many actual args");
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ if (TooManyFormalArguments(
+ invoke.args().callee().as<JSFunction>().nargs())) {
+ JitSpew(JitSpew_IonAbort, "too many args");
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+ }
+
+ // If --ion-eager is used, compile with Baseline first, so that we
+ // can directly enter IonMonkey.
+ if (JitOptions.eagerIonCompilation() && !script->hasBaselineScript()) {
+ MethodStatus status =
+ CanEnterBaselineMethod<BaselineTier::Compiler>(cx, state);
+ if (status != Method_Compiled) {
+ return status;
+ }
+ // Bytecode analysis may forbid compilation for a script.
+ if (!script->canIonCompile()) {
+ return Method_CantCompile;
+ }
+ }
+
+ if (!script->hasBaselineScript()) {
+ return Method_Skipped;
+ }
+
+ MOZ_ASSERT(!script->isIonCompilingOffThread());
+ MOZ_ASSERT(script->canIonCompile());
+
+ // Attempt compilation. Returns Method_Compiled if already compiled.
+ MethodStatus status = Compile(cx, script, /* osrFrame = */ nullptr,
+ /* osrPc = */ nullptr);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile) {
+ ForbidCompilation(cx, script);
+ }
+ return status;
+ }
+
+ if (state.script()->baselineScript()->hasPendingIonCompileTask()) {
+ LinkIonScript(cx, state.script());
+ if (!state.script()->hasIonScript()) {
+ return jit::Method_Skipped;
+ }
+ }
+
+ return Method_Compiled;
+}
+
+static MethodStatus BaselineCanEnterAtEntry(JSContext* cx, HandleScript script,
+ BaselineFrame* frame) {
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT(script->canIonCompile());
+ MOZ_ASSERT(!script->isIonCompilingOffThread());
+ MOZ_ASSERT(!script->hasIonScript());
+ MOZ_ASSERT(frame->isFunctionFrame());
+
+ // Mark as forbidden if frame can't be handled.
+ if (!CheckFrame(cx, frame)) {
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ if (script->baselineScript()->hasPendingIonCompileTask()) {
+ LinkIonScript(cx, script);
+ if (script->hasIonScript()) {
+ return Method_Compiled;
+ }
+ }
+
+ // Attempt compilation. Returns Method_Compiled if already compiled.
+ MethodStatus status = Compile(cx, script, frame, nullptr);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile) {
+ ForbidCompilation(cx, script);
+ }
+ return status;
+ }
+
+ return Method_Compiled;
+}
+
+// Decide if a transition from baseline execution to Ion code should occur.
+// May compile or recompile the target JSScript.
+static MethodStatus BaselineCanEnterAtBranch(JSContext* cx, HandleScript script,
+ BaselineFrame* osrFrame,
+ jsbytecode* pc) {
+ MOZ_ASSERT(jit::IsIonEnabled(cx));
+ MOZ_ASSERT((JSOp)*pc == JSOp::LoopHead);
+
+ // Skip if the script has been disabled.
+ if (!script->canIonCompile()) {
+ return Method_Skipped;
+ }
+
+ // Skip if the script is being compiled off thread.
+ if (script->isIonCompilingOffThread()) {
+ return Method_Skipped;
+ }
+
+ // Optionally ignore on user request.
+ if (!JitOptions.osr) {
+ return Method_Skipped;
+ }
+
+ // Mark as forbidden if frame can't be handled.
+ if (!CheckFrame(cx, osrFrame)) {
+ ForbidCompilation(cx, script);
+ return Method_CantCompile;
+ }
+
+ // Check if the jitcode still needs to get linked and do this
+ // to have a valid IonScript.
+ if (script->baselineScript()->hasPendingIonCompileTask()) {
+ LinkIonScript(cx, script);
+ }
+
+ // By default a recompilation doesn't happen on osr mismatch.
+ // Decide if we want to force a recompilation if this happens too much.
+ if (script->hasIonScript()) {
+ if (pc == script->ionScript()->osrPc()) {
+ return Method_Compiled;
+ }
+
+ uint32_t count = script->ionScript()->incrOsrPcMismatchCounter();
+ if (count <= JitOptions.osrPcMismatchesBeforeRecompile &&
+ !JitOptions.eagerIonCompilation()) {
+ return Method_Skipped;
+ }
+
+ JitSpew(JitSpew_IonScripts, "Forcing OSR Mismatch Compilation");
+ Invalidate(cx, script);
+ }
+
+ // Attempt compilation.
+ // - Returns Method_Compiled if the right ionscript is present
+ // (Meaning it was present or a sequantial compile finished)
+ // - Returns Method_Skipped if pc doesn't match
+ // (This means a background thread compilation with that pc could have
+ // started or not.)
+ MethodStatus status = Compile(cx, script, osrFrame, pc);
+ if (status != Method_Compiled) {
+ if (status == Method_CantCompile) {
+ ForbidCompilation(cx, script);
+ }
+ return status;
+ }
+
+ // Return the compilation was skipped when the osr pc wasn't adjusted.
+ // This can happen when there was still an IonScript available and a
+ // background compilation started, but hasn't finished yet.
+ // Or when we didn't force a recompile.
+ if (script->hasIonScript() && pc != script->ionScript()->osrPc()) {
+ return Method_Skipped;
+ }
+
+ return Method_Compiled;
+}
+
+static bool IonCompileScriptForBaseline(JSContext* cx, BaselineFrame* frame,
+ jsbytecode* pc) {
+ MOZ_ASSERT(IsIonEnabled(cx));
+
+ RootedScript script(cx, frame->script());
+ bool isLoopHead = JSOp(*pc) == JSOp::LoopHead;
+
+ // The Baseline JIT code checks for Ion disabled or compiling off-thread.
+ MOZ_ASSERT(script->canIonCompile());
+ MOZ_ASSERT(!script->isIonCompilingOffThread());
+
+ // If Ion script exists, but PC is not at a loop entry, then Ion will be
+ // entered for this script at an appropriate LOOPENTRY or the next time this
+ // function is called.
+ if (script->hasIonScript() && !isLoopHead) {
+ JitSpew(JitSpew_BaselineOSR, "IonScript exists, but not at loop entry!");
+ // TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
+ // TODO: Clear all optimized stubs.
+ // TODO: Add a ion-script-already-exists checker stub.
+ return true;
+ }
+
+ // Ensure that Ion-compiled code is available.
+ JitSpew(JitSpew_BaselineOSR,
+ "WarmUpCounter for %s:%u:%u reached %d at pc %p, trying to switch to "
+ "Ion!",
+ script->filename(), script->lineno(), script->column(),
+ (int)script->getWarmUpCount(), (void*)pc);
+
+ MethodStatus stat;
+ if (isLoopHead) {
+ JitSpew(JitSpew_BaselineOSR, " Compile at loop head!");
+ stat = BaselineCanEnterAtBranch(cx, script, frame, pc);
+ } else if (frame->isFunctionFrame()) {
+ JitSpew(JitSpew_BaselineOSR,
+ " Compile function from top for later entry!");
+ stat = BaselineCanEnterAtEntry(cx, script, frame);
+ } else {
+ return true;
+ }
+
+ if (stat == Method_Error) {
+ JitSpew(JitSpew_BaselineOSR, " Compile with Ion errored!");
+ return false;
+ }
+
+ if (stat == Method_CantCompile) {
+ MOZ_ASSERT(!script->canIonCompile());
+ JitSpew(JitSpew_BaselineOSR, " Can't compile with Ion!");
+ } else if (stat == Method_Skipped) {
+ JitSpew(JitSpew_BaselineOSR, " Skipped compile with Ion!");
+ } else if (stat == Method_Compiled) {
+ JitSpew(JitSpew_BaselineOSR, " Compiled with Ion!");
+ } else {
+ MOZ_CRASH("Invalid MethodStatus!");
+ }
+
+ return true;
+}
+
+bool jit::IonCompileScriptForBaselineAtEntry(JSContext* cx,
+ BaselineFrame* frame) {
+ JSScript* script = frame->script();
+ return IonCompileScriptForBaseline(cx, frame, script->code());
+}
+
+/* clang-format off */
+// The following data is kept in a temporary heap-allocated buffer, stored in
+// JitRuntime (high memory addresses at top, low at bottom):
+//
+// +----->+=================================+ -- <---- High Address
+// | | | |
+// | | ...BaselineFrame... | |-- Copy of BaselineFrame + stack values
+// | | | |
+// | +---------------------------------+ |
+// | | | |
+// | | ...Locals/Stack... | |
+// | | | |
+// | +=================================+ --
+// | | Padding(Maybe Empty) |
+// | +=================================+ --
+// +------|-- baselineFrame | |-- IonOsrTempData
+// | jitcode | |
+// +=================================+ -- <---- Low Address
+//
+// A pointer to the IonOsrTempData is returned.
+/* clang-format on */
+
+static IonOsrTempData* PrepareOsrTempData(JSContext* cx, BaselineFrame* frame,
+ uint32_t frameSize, void* jitcode) {
+ uint32_t numValueSlots = frame->numValueSlots(frameSize);
+
+ // Calculate the amount of space to allocate:
+ // BaselineFrame space:
+ // (sizeof(Value) * numValueSlots)
+ // + sizeof(BaselineFrame)
+ //
+ // IonOsrTempData space:
+ // sizeof(IonOsrTempData)
+
+ size_t frameSpace = sizeof(BaselineFrame) + sizeof(Value) * numValueSlots;
+ size_t ionOsrTempDataSpace = sizeof(IonOsrTempData);
+
+ size_t totalSpace = AlignBytes(frameSpace, sizeof(Value)) +
+ AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+
+ JitRuntime* jrt = cx->runtime()->jitRuntime();
+ uint8_t* buf = jrt->allocateIonOsrTempData(totalSpace);
+ if (!buf) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ IonOsrTempData* info = new (buf) IonOsrTempData();
+ info->jitcode = jitcode;
+
+ // Copy the BaselineFrame + local/stack Values to the buffer. Arguments and
+ // |this| are not copied but left on the stack: the Baseline and Ion frame
+ // share the same frame prefix and Ion won't clobber these values. Note
+ // that info->baselineFrame will point to the *end* of the frame data, like
+ // the frame pointer register in baseline frames.
+ uint8_t* frameStart =
+ (uint8_t*)info + AlignBytes(ionOsrTempDataSpace, sizeof(Value));
+ info->baselineFrame = frameStart + frameSpace;
+
+ memcpy(frameStart, (uint8_t*)frame - numValueSlots * sizeof(Value),
+ frameSpace);
+
+ JitSpew(JitSpew_BaselineOSR, "Allocated IonOsrTempData at %p", info);
+ JitSpew(JitSpew_BaselineOSR, "Jitcode is %p", info->jitcode);
+
+ // All done.
+ return info;
+}
+
+bool jit::IonCompileScriptForBaselineOSR(JSContext* cx, BaselineFrame* frame,
+ uint32_t frameSize, jsbytecode* pc,
+ IonOsrTempData** infoPtr) {
+ MOZ_ASSERT(infoPtr);
+ *infoPtr = nullptr;
+
+ MOZ_ASSERT(frame->debugFrameSize() == frameSize);
+ MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);
+
+ if (!IonCompileScriptForBaseline(cx, frame, pc)) {
+ return false;
+ }
+
+ RootedScript script(cx, frame->script());
+ if (!script->hasIonScript() || script->ionScript()->osrPc() != pc ||
+ frame->isDebuggee()) {
+ return true;
+ }
+
+ IonScript* ion = script->ionScript();
+ MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled() ==
+ ion->hasProfilingInstrumentation());
+ MOZ_ASSERT(ion->osrPc() == pc);
+
+ ion->resetOsrPcMismatchCounter();
+
+ JitSpew(JitSpew_BaselineOSR, " OSR possible!");
+ void* jitcode = ion->method()->raw() + ion->osrEntryOffset();
+
+ // Prepare the temporary heap copy of the fake InterpreterFrame and actual
+ // args list.
+ JitSpew(JitSpew_BaselineOSR, "Got jitcode. Preparing for OSR into ion.");
+ IonOsrTempData* info = PrepareOsrTempData(cx, frame, frameSize, jitcode);
+ if (!info) {
+ return false;
+ }
+
+ *infoPtr = info;
+ return true;
+}
+
+static void InvalidateActivation(JS::GCContext* gcx,
+ const JitActivationIterator& activations,
+ bool invalidateAll) {
+ JitSpew(JitSpew_IonInvalidate, "BEGIN invalidating activation");
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters) {
+ activations->asJit()->setCheckRegs(false);
+ }
+#endif
+
+ size_t frameno = 1;
+
+ for (OnlyJSJitFrameIter iter(activations); !iter.done(); ++iter, ++frameno) {
+ const JSJitFrameIter& frame = iter.frame();
+ MOZ_ASSERT_IF(frameno == 1, frame.isExitFrame() ||
+ frame.type() == FrameType::Bailout ||
+ frame.type() == FrameType::JSJitToWasm);
+
+#ifdef JS_JITSPEW
+ switch (frame.type()) {
+ case FrameType::Exit:
+ JitSpew(JitSpew_IonInvalidate, "#%zu exit frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::JSJitToWasm:
+ JitSpew(JitSpew_IonInvalidate, "#%zu wasm exit frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::BaselineJS:
+ case FrameType::IonJS:
+ case FrameType::Bailout: {
+ MOZ_ASSERT(frame.isScripted());
+ const char* type = "Unknown";
+ if (frame.isIonJS()) {
+ type = "Optimized";
+ } else if (frame.isBaselineJS()) {
+ type = "Baseline";
+ } else if (frame.isBailoutJS()) {
+ type = "Bailing";
+ }
+ JSScript* script = frame.maybeForwardedScript();
+ JitSpew(JitSpew_IonInvalidate,
+ "#%zu %s JS frame @ %p, %s:%u:%u (fun: %p, script: %p, pc %p)",
+ frameno, type, frame.fp(), script->maybeForwardedFilename(),
+ script->lineno(), script->column(), frame.maybeCallee(), script,
+ frame.resumePCinCurrentFrame());
+ break;
+ }
+ case FrameType::BaselineStub:
+ JitSpew(JitSpew_IonInvalidate, "#%zu baseline stub frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::BaselineInterpreterEntry:
+ JitSpew(JitSpew_IonInvalidate,
+ "#%zu baseline interpreter entry frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::Rectifier:
+ JitSpew(JitSpew_IonInvalidate, "#%zu rectifier frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::IonICCall:
+ JitSpew(JitSpew_IonInvalidate, "#%zu ion IC call frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::CppToJSJit:
+ JitSpew(JitSpew_IonInvalidate, "#%zu entry frame @ %p", frameno,
+ frame.fp());
+ break;
+ case FrameType::WasmToJSJit:
+ JitSpew(JitSpew_IonInvalidate, "#%zu wasm frames @ %p", frameno,
+ frame.fp());
+ break;
+ }
+#endif // JS_JITSPEW
+
+ if (!frame.isIonScripted()) {
+ continue;
+ }
+
+ // See if the frame has already been invalidated.
+ if (frame.checkInvalidation()) {
+ continue;
+ }
+
+ JSScript* script = frame.maybeForwardedScript();
+ if (!script->hasIonScript()) {
+ continue;
+ }
+
+ if (!invalidateAll && !script->ionScript()->invalidated()) {
+ continue;
+ }
+
+ IonScript* ionScript = script->ionScript();
+
+ // Purge ICs before we mark this script as invalidated. This will
+ // prevent lastJump_ from appearing to be a bogus pointer, just
+ // in case anyone tries to read it.
+ ionScript->purgeICs(script->zone());
+
+ // This frame needs to be invalidated. We do the following:
+ //
+ // 1. Increment the reference counter to keep the ionScript alive
+ // for the invalidation bailout or for the exception handler.
+ // 2. Determine safepoint that corresponds to the current call.
+ // 3. From safepoint, get distance to the OSI-patchable offset.
+ // 4. From the IonScript, determine the distance between the
+ // call-patchable offset and the invalidation epilogue.
+ // 5. Patch the OSI point with a call-relative to the
+ // invalidation epilogue.
+ //
+ // The code generator ensures that there's enough space for us
+ // to patch in a call-relative operation at each invalidation
+ // point.
+ //
+ // Note: you can't simplify this mechanism to "just patch the
+ // instruction immediately after the call" because things may
+ // need to move into a well-defined register state (using move
+ // instructions after the call) in to capture an appropriate
+ // snapshot after the call occurs.
+
+ ionScript->incrementInvalidationCount();
+
+ JitCode* ionCode = ionScript->method();
+
+ // We're about to remove edges from the JSScript to GC things embedded in
+ // the JitCode. Perform a barrier to let the GC know about those edges.
+ PreWriteBarrier(script->zone(), ionCode, [](JSTracer* trc, JitCode* code) {
+ code->traceChildren(trc);
+ });
+
+ ionCode->setInvalidated();
+
+ // Don't adjust OSI points in a bailout path.
+ if (frame.isBailoutJS()) {
+ continue;
+ }
+
+ // Write the delta (from the return address offset to the
+ // IonScript pointer embedded into the invalidation epilogue)
+ // where the safepointed call instruction used to be. We rely on
+ // the call sequence causing the safepoint being >= the size of
+ // a uint32, which is checked during safepoint index
+ // construction.
+ AutoWritableJitCode awjc(ionCode);
+ const SafepointIndex* si =
+ ionScript->getSafepointIndex(frame.resumePCinCurrentFrame());
+ CodeLocationLabel dataLabelToMunge(frame.resumePCinCurrentFrame());
+ ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
+ (frame.resumePCinCurrentFrame() - ionCode->raw());
+ Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
+
+ CodeLocationLabel osiPatchPoint =
+ SafepointReader::InvalidationPatchPoint(ionScript, si);
+ CodeLocationLabel invalidateEpilogue(
+ ionCode, CodeOffset(ionScript->invalidateEpilogueOffset()));
+
+ JitSpew(
+ JitSpew_IonInvalidate,
+ " ! Invalidate ionScript %p (inv count %zu) -> patching osipoint %p",
+ ionScript, ionScript->invalidationCount(), (void*)osiPatchPoint.raw());
+ Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
+ }
+
+ JitSpew(JitSpew_IonInvalidate, "END invalidating activation");
+}
+
+void jit::InvalidateAll(JS::GCContext* gcx, Zone* zone) {
+ // The caller should previously have cancelled off thread compilation.
+#ifdef DEBUG
+ for (RealmsInZoneIter realm(zone); !realm.done(); realm.next()) {
+ MOZ_ASSERT(!HasOffThreadIonCompile(realm));
+ }
+#endif
+ if (zone->isAtomsZone()) {
+ return;
+ }
+ JSContext* cx = TlsContext.get();
+ for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->compartment()->zone() == zone) {
+ JitSpew(JitSpew_IonInvalidate, "Invalidating all frames for GC");
+ InvalidateActivation(gcx, iter, true);
+ }
+ }
+}
+
+static void ClearIonScriptAfterInvalidation(JSContext* cx, JSScript* script,
+ IonScript* ionScript,
+ bool resetUses) {
+ // Null out the JitScript's IonScript pointer. The caller is responsible for
+ // destroying the IonScript using the invalidation count mechanism.
+ DebugOnly<IonScript*> clearedIonScript =
+ script->jitScript()->clearIonScript(cx->gcContext(), script);
+ MOZ_ASSERT(clearedIonScript == ionScript);
+
+ // Wait for the scripts to get warm again before doing another
+ // compile, unless we are recompiling *because* a script got hot
+ // (resetUses is false).
+ if (resetUses) {
+ script->resetWarmUpCounterToDelayIonCompilation();
+ }
+}
+
+void jit::Invalidate(JSContext* cx, const RecompileInfoVector& invalid,
+ bool resetUses, bool cancelOffThread) {
+ JitSpew(JitSpew_IonInvalidate, "Start invalidation.");
+
+ // Add an invalidation reference to all invalidated IonScripts to indicate
+ // to the traversal which frames have been invalidated.
+ size_t numInvalidations = 0;
+ for (const RecompileInfo& info : invalid) {
+ if (cancelOffThread) {
+ CancelOffThreadIonCompile(info.script());
+ }
+
+ IonScript* ionScript = info.maybeIonScriptToInvalidate();
+ if (!ionScript) {
+ continue;
+ }
+
+ JitSpew(JitSpew_IonInvalidate, " Invalidate %s:%u:%u, IonScript %p",
+ info.script()->filename(), info.script()->lineno(),
+ info.script()->column(), ionScript);
+
+ // Keep the ion script alive during the invalidation and flag this
+ // ionScript as being invalidated. This increment is removed by the
+ // loop after the calls to InvalidateActivation.
+ ionScript->incrementInvalidationCount();
+ numInvalidations++;
+ }
+
+ if (!numInvalidations) {
+ JitSpew(JitSpew_IonInvalidate, " No IonScript invalidation.");
+ return;
+ }
+
+ JS::GCContext* gcx = cx->gcContext();
+ for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
+ InvalidateActivation(gcx, iter, false);
+ }
+
+ // Drop the references added above. If a script was never active, its
+ // IonScript will be immediately destroyed. Otherwise, it will be held live
+ // until its last invalidated frame is destroyed.
+ for (const RecompileInfo& info : invalid) {
+ IonScript* ionScript = info.maybeIonScriptToInvalidate();
+ if (!ionScript) {
+ continue;
+ }
+
+ if (ionScript->invalidationCount() == 1) {
+ // decrementInvalidationCount will destroy the IonScript so null out
+ // jitScript->ionScript_ now. We don't want to do this unconditionally
+ // because maybeIonScriptToInvalidate depends on script->ionScript() (we
+ // would leak the IonScript if |invalid| contains duplicates).
+ ClearIonScriptAfterInvalidation(cx, info.script(), ionScript, resetUses);
+ }
+
+ ionScript->decrementInvalidationCount(gcx);
+ numInvalidations--;
+ }
+
+ // Make sure we didn't leak references by invalidating the same IonScript
+ // multiple times in the above loop.
+ MOZ_ASSERT(!numInvalidations);
+
+ // Finally, null out jitScript->ionScript_ for IonScripts that are still on
+ // the stack.
+ for (const RecompileInfo& info : invalid) {
+ if (IonScript* ionScript = info.maybeIonScriptToInvalidate()) {
+ ClearIonScriptAfterInvalidation(cx, info.script(), ionScript, resetUses);
+ }
+ }
+}
+
+void jit::IonScript::invalidate(JSContext* cx, JSScript* script, bool resetUses,
+ const char* reason) {
+ // Note: we could short circuit here if we already invalidated this
+ // IonScript, but jit::Invalidate also cancels off-thread compilations of
+ // |script|.
+ MOZ_RELEASE_ASSERT(invalidated() || script->ionScript() == this);
+
+ JitSpew(JitSpew_IonInvalidate, " Invalidate IonScript %p: %s", this, reason);
+
+ // RecompileInfoVector has inline space for at least one element.
+ RecompileInfoVector list;
+ MOZ_RELEASE_ASSERT(list.reserve(1));
+ list.infallibleEmplaceBack(script, compilationId());
+
+ Invalidate(cx, list, resetUses, true);
+}
+
+void jit::Invalidate(JSContext* cx, JSScript* script, bool resetUses,
+ bool cancelOffThread) {
+ MOZ_ASSERT(script->hasIonScript());
+
+ if (cx->runtime()->geckoProfiler().enabled()) {
+ // Register invalidation with profiler.
+ // Format of event payload string:
+ // "<filename>:<lineno>"
+
+ // Get the script filename, if any, and its length.
+ const char* filename = script->filename();
+ if (filename == nullptr) {
+ filename = "<unknown>";
+ }
+
+ // Construct the descriptive string.
+ UniqueChars buf =
+ JS_smprintf("%s:%u:%u", filename, script->lineno(), script->column());
+
+ // Ignore the event on allocation failure.
+ if (buf) {
+ cx->runtime()->geckoProfiler().markEvent("Invalidate", buf.get());
+ }
+ }
+
+ // RecompileInfoVector has inline space for at least one element.
+ RecompileInfoVector scripts;
+ MOZ_ASSERT(script->hasIonScript());
+ MOZ_RELEASE_ASSERT(scripts.reserve(1));
+ scripts.infallibleEmplaceBack(script, script->ionScript()->compilationId());
+
+ Invalidate(cx, scripts, resetUses, cancelOffThread);
+}
+
+void jit::FinishInvalidation(JS::GCContext* gcx, JSScript* script) {
+ if (!script->hasIonScript()) {
+ return;
+ }
+
+ // In all cases, null out jitScript->ionScript_ to avoid re-entry.
+ IonScript* ion = script->jitScript()->clearIonScript(gcx, script);
+
+ // If this script has Ion code on the stack, invalidated() will return
+ // true. In this case we have to wait until destroying it.
+ if (!ion->invalidated()) {
+ jit::IonScript::Destroy(gcx, ion);
+ }
+}
+
+void jit::ForbidCompilation(JSContext* cx, JSScript* script) {
+ JitSpew(JitSpew_IonAbort, "Disabling Ion compilation of script %s:%u:%u",
+ script->filename(), script->lineno(), script->column());
+
+ CancelOffThreadIonCompile(script);
+
+ if (script->hasIonScript()) {
+ Invalidate(cx, script, false);
+ }
+
+ script->disableIon();
+}
+
+size_t jit::SizeOfIonData(JSScript* script,
+ mozilla::MallocSizeOf mallocSizeOf) {
+ size_t result = 0;
+
+ if (script->hasIonScript()) {
+ result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ return result;
+}
+
+// If you change these, please also change the comment in TempAllocator.
+/* static */ const size_t TempAllocator::BallastSize = 16 * 1024;
+/* static */ const size_t TempAllocator::PreferredLifoChunkSize = 32 * 1024;
diff --git a/js/src/jit/Ion.h b/js/src/jit/Ion.h
new file mode 100644
index 0000000000..f48aa71726
--- /dev/null
+++ b/js/src/jit/Ion.h
@@ -0,0 +1,154 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Ion_h
+#define jit_Ion_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jsfriendapi.h"
+#include "jspubtd.h"
+
+#include "jit/BaselineJIT.h"
+#include "jit/IonTypes.h"
+#include "jit/JitContext.h"
+#include "jit/JitOptions.h"
+#include "js/Principals.h"
+#include "js/TypeDecls.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+
+namespace js {
+
+class RunState;
+
+namespace jit {
+
+class BaselineFrame;
+
+bool CanIonCompileScript(JSContext* cx, JSScript* script);
+bool CanIonInlineScript(JSScript* script);
+
+[[nodiscard]] bool IonCompileScriptForBaselineAtEntry(JSContext* cx,
+ BaselineFrame* frame);
+
+struct IonOsrTempData {
+ void* jitcode;
+ uint8_t* baselineFrame;
+
+ static constexpr size_t offsetOfJitCode() {
+ return offsetof(IonOsrTempData, jitcode);
+ }
+ static constexpr size_t offsetOfBaselineFrame() {
+ return offsetof(IonOsrTempData, baselineFrame);
+ }
+};
+
+[[nodiscard]] bool IonCompileScriptForBaselineOSR(JSContext* cx,
+ BaselineFrame* frame,
+ uint32_t frameSize,
+ jsbytecode* pc,
+ IonOsrTempData** infoPtr);
+
+MethodStatus CanEnterIon(JSContext* cx, RunState& state);
+
+class MIRGenerator;
+class LIRGraph;
+class CodeGenerator;
+class LazyLinkExitFrameLayout;
+class WarpSnapshot;
+
+[[nodiscard]] bool OptimizeMIR(MIRGenerator* mir);
+LIRGraph* GenerateLIR(MIRGenerator* mir);
+CodeGenerator* GenerateCode(MIRGenerator* mir, LIRGraph* lir);
+CodeGenerator* CompileBackEnd(MIRGenerator* mir, WarpSnapshot* snapshot);
+
+void LinkIonScript(JSContext* cx, HandleScript calleescript);
+uint8_t* LazyLinkTopActivation(JSContext* cx, LazyLinkExitFrameLayout* frame);
+
+inline bool IsIonInlinableGetterOrSetterOp(JSOp op) {
+ // JSOp::GetProp, JSOp::CallProp, JSOp::Length, JSOp::GetElem,
+ // and JSOp::CallElem. (Inlined Getters)
+ // JSOp::SetProp, JSOp::SetName, JSOp::SetGName (Inlined Setters)
+ return IsGetPropOp(op) || IsGetElemOp(op) || IsSetPropOp(op);
+}
+
+inline bool IsIonInlinableOp(JSOp op) {
+ // JSOp::Call, JSOp::FunCall, JSOp::Eval, JSOp::New (Normal Callsites) or an
+ // inlinable getter or setter.
+ return (IsInvokeOp(op) && !IsSpreadOp(op)) ||
+ IsIonInlinableGetterOrSetterOp(op);
+}
+
+inline bool TooManyFormalArguments(unsigned nargs) {
+ return nargs >= SNAPSHOT_MAX_NARGS || TooManyActualArguments(nargs);
+}
+
+inline size_t NumLocalsAndArgs(JSScript* script) {
+ size_t num = 1 /* this */ + script->nfixed();
+ if (JSFunction* fun = script->function()) {
+ num += fun->nargs();
+ }
+ return num;
+}
+
+// Debugging RAII class which marks the current thread as performing an Ion
+// backend compilation.
+class MOZ_RAII AutoEnterIonBackend {
+ public:
+ AutoEnterIonBackend() {
+#ifdef DEBUG
+ JitContext* jcx = GetJitContext();
+ jcx->enterIonBackend();
+#endif
+ }
+
+#ifdef DEBUG
+ ~AutoEnterIonBackend() {
+ JitContext* jcx = GetJitContext();
+ jcx->leaveIonBackend();
+ }
+#endif
+};
+
+bool OffThreadCompilationAvailable(JSContext* cx);
+
+void ForbidCompilation(JSContext* cx, JSScript* script);
+
+size_t SizeOfIonData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf);
+
+inline bool IsIonEnabled(JSContext* cx) {
+ if (MOZ_UNLIKELY(!IsBaselineJitEnabled(cx) || cx->options().disableIon())) {
+ return false;
+ }
+
+ if (MOZ_LIKELY(JitOptions.ion)) {
+ return true;
+ }
+ if (JitOptions.jitForTrustedPrincipals) {
+ JS::Realm* realm = js::GetContextRealm(cx);
+ return realm && JS::GetRealmPrincipals(realm) &&
+ JS::GetRealmPrincipals(realm)->isSystemOrAddonPrincipal();
+ }
+ return false;
+}
+
+// Implemented per-platform. Returns true if the flags will not require
+// further (lazy) computation.
+bool CPUFlagsHaveBeenComputed();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Ion_h */
diff --git a/js/src/jit/IonAnalysis.cpp b/js/src/jit/IonAnalysis.cpp
new file mode 100644
index 0000000000..d15c0d5df0
--- /dev/null
+++ b/js/src/jit/IonAnalysis.cpp
@@ -0,0 +1,4934 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonAnalysis.h"
+
+#include <algorithm>
+#include <utility> // for ::std::pair
+
+#include "jit/AliasAnalysis.h"
+#include "jit/CompileInfo.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "util/CheckedArithmetic.h"
+
+#include "vm/BytecodeUtil-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+// Stack used by FlagPhiInputsAsImplicitlyUsed. It stores the Phi instruction
+// pointer and the MUseIterator which should be visited next.
+using MPhiUseIteratorStack =
+ Vector<std::pair<MPhi*, MUseIterator>, 16, SystemAllocPolicy>;
+
+// Look for Phi uses with a depth-first search. If any uses are found the stack
+// of MPhi instructions is returned in the |worklist| argument.
+static bool DepthFirstSearchUse(MIRGenerator* mir,
+ MPhiUseIteratorStack& worklist, MPhi* phi) {
+ // Push a Phi and the next use to iterate over in the worklist.
+ auto push = [&worklist](MPhi* phi, MUseIterator use) -> bool {
+ phi->setInWorklist();
+ return worklist.append(std::make_pair(phi, use));
+ };
+
+#ifdef DEBUG
+ // Used to assert that when we have no uses, we at least visited all the
+ // transitive uses.
+ size_t refUseCount = phi->useCount();
+ size_t useCount = 0;
+#endif
+ MOZ_ASSERT(worklist.empty());
+ if (!push(phi, phi->usesBegin())) {
+ return false;
+ }
+
+ while (!worklist.empty()) {
+ // Resume iterating over the last phi-use pair added by the next loop.
+ auto pair = worklist.popCopy();
+ MPhi* producer = pair.first;
+ MUseIterator use = pair.second;
+ MUseIterator end(producer->usesEnd());
+ producer->setNotInWorklist();
+
+ // Keep going down the tree of uses, skipping (continue)
+ // non-observable/unused cases and Phi which are already listed in the
+ // worklist. Stop (return) as soon as one use is found.
+ while (use != end) {
+ MNode* consumer = (*use)->consumer();
+ MUseIterator it = use;
+ use++;
+#ifdef DEBUG
+ useCount++;
+#endif
+ if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed inner loop")) {
+ return false;
+ }
+
+ if (consumer->isResumePoint()) {
+ MResumePoint* rp = consumer->toResumePoint();
+ // Observable operands are similar to potential uses.
+ if (rp->isObservableOperand(*it)) {
+ return push(producer, use);
+ }
+ continue;
+ }
+
+ MDefinition* cdef = consumer->toDefinition();
+ if (!cdef->isPhi()) {
+ // The producer is explicitly used by a definition.
+ return push(producer, use);
+ }
+
+ MPhi* cphi = cdef->toPhi();
+ if (cphi->getUsageAnalysis() == PhiUsage::Used ||
+ cphi->isImplicitlyUsed()) {
+ // The information got cached on the Phi the last time it
+ // got visited, or when flagging operands of implicitly used
+ // instructions.
+ return push(producer, use);
+ }
+
+ if (cphi->isInWorklist() || cphi == producer) {
+ // We are already iterating over the uses of this Phi instruction which
+ // are part of a loop, instead of trying to handle loops, conservatively
+ // mark them as used.
+ return push(producer, use);
+ }
+
+ if (cphi->getUsageAnalysis() == PhiUsage::Unused) {
+ // The instruction already got visited and is known to have
+ // no uses. Skip it.
+ continue;
+ }
+
+ // We found another Phi instruction, move the use iterator to
+ // the next use push it to the worklist stack. Then, continue
+ // with a depth search.
+ if (!push(producer, use)) {
+ return false;
+ }
+ producer = cphi;
+ use = producer->usesBegin();
+ end = producer->usesEnd();
+#ifdef DEBUG
+ refUseCount += producer->useCount();
+#endif
+ }
+
+ // When unused, we cannot bubble up this information without iterating
+ // over the rest of the previous Phi instruction consumers.
+ MOZ_ASSERT(use == end);
+ producer->setUsageAnalysis(PhiUsage::Unused);
+ }
+
+ MOZ_ASSERT(useCount == refUseCount);
+ return true;
+}
+
+static bool FlagPhiInputsAsImplicitlyUsed(MIRGenerator* mir, MBasicBlock* block,
+ MBasicBlock* succ,
+ MPhiUseIteratorStack& worklist) {
+ // When removing an edge between 2 blocks, we might remove the ability of
+ // later phases to figure out that the uses of a Phi should be considered as
+ // a use of all its inputs. Thus we need to mark the Phi inputs as being
+ // implicitly used iff the phi has any uses.
+ //
+ //
+ // +--------------------+ +---------------------+
+ // |12 MFoo 6 | |32 MBar 5 |
+ // | | | |
+ // | ... | | ... |
+ // | | | |
+ // |25 MGoto Block 4 | |43 MGoto Block 4 |
+ // +--------------------+ +---------------------+
+ // | |
+ // | | |
+ // | | |
+ // | +-----X------------------------+
+ // | Edge |
+ // | Removed |
+ // | |
+ // | +------------v-----------+
+ // | |50 MPhi 12 32 |
+ // | | |
+ // | | ... |
+ // | | |
+ // | |70 MReturn 50 |
+ // | +------------------------+
+ // |
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ // |
+ // v
+ //
+ // ^ +--------------------+ +---------------------+
+ // /!\ |12 MConst opt-out | |32 MBar 5 |
+ // '---' | | | |
+ // | ... | | ... |
+ // |78 MBail | | |
+ // |80 MUnreachable | |43 MGoto Block 4 |
+ // +--------------------+ +---------------------+
+ // |
+ // |
+ // |
+ // +---------------+
+ // |
+ // |
+ // |
+ // +------------v-----------+
+ // |50 MPhi 32 |
+ // | |
+ // | ... |
+ // | |
+ // |70 MReturn 50 |
+ // +------------------------+
+ //
+ //
+ // If the inputs of the Phi are not flagged as implicitly used, then
+ // later compilation phase might optimize them out. The problem is that a
+ // bailout will use this value and give it back to baseline, which will then
+ // use the OptimizedOut magic value in a computation.
+ //
+ // Unfortunately, we cannot be too conservative about flagging Phi inputs as
+ // having implicit uses, as this would prevent many optimizations from being
+ // used. Thus, the following code is in charge of flagging Phi instructions
+ // as Unused or Used, and setting ImplicitlyUsed accordingly.
+ size_t predIndex = succ->getPredecessorIndex(block);
+ MPhiIterator end = succ->phisEnd();
+ MPhiIterator it = succ->phisBegin();
+ for (; it != end; it++) {
+ MPhi* phi = *it;
+
+ if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed outer loop")) {
+ return false;
+ }
+
+ // We are looking to mark the Phi inputs which are used across the edge
+ // between the |block| and its successor |succ|.
+ MDefinition* def = phi->getOperand(predIndex);
+ if (def->isImplicitlyUsed()) {
+ continue;
+ }
+
+ // If the Phi is either Used or Unused, set the ImplicitlyUsed flag
+ // accordingly.
+ if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isImplicitlyUsed()) {
+ def->setImplicitlyUsedUnchecked();
+ continue;
+ } else if (phi->getUsageAnalysis() == PhiUsage::Unused) {
+ continue;
+ }
+
+ // We do not know if the Phi was Used or Unused, iterate over all uses
+ // with a depth-search of uses. Returns the matching stack in the
+ // worklist as soon as one use is found.
+ MOZ_ASSERT(worklist.empty());
+ if (!DepthFirstSearchUse(mir, worklist, phi)) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(worklist.empty(),
+ phi->getUsageAnalysis() == PhiUsage::Unused);
+ if (!worklist.empty()) {
+ // One of the Phis is used, set Used flags on all the Phis which are
+ // in the use chain.
+ def->setImplicitlyUsedUnchecked();
+ do {
+ auto pair = worklist.popCopy();
+ MPhi* producer = pair.first;
+ producer->setUsageAnalysis(PhiUsage::Used);
+ producer->setNotInWorklist();
+ } while (!worklist.empty());
+ }
+ MOZ_ASSERT(phi->getUsageAnalysis() != PhiUsage::Unknown);
+ }
+
+ return true;
+}
+
+static MInstructionIterator FindFirstInstructionAfterBail(MBasicBlock* block) {
+ MOZ_ASSERT(block->alwaysBails());
+ for (MInstructionIterator it = block->begin(); it != block->end(); it++) {
+ MInstruction* ins = *it;
+ if (ins->isBail()) {
+ it++;
+ return it;
+ }
+ }
+ MOZ_CRASH("Expected MBail in alwaysBails block");
+}
+
+// Given an iterator pointing to the first removed instruction, mark
+// the operands of each removed instruction as having implicit uses.
+static bool FlagOperandsAsImplicitlyUsedAfter(
+ MIRGenerator* mir, MBasicBlock* block, MInstructionIterator firstRemoved) {
+ MOZ_ASSERT(firstRemoved->block() == block);
+
+ const CompileInfo& info = block->info();
+
+ // Flag operands of removed instructions as having implicit uses.
+ MInstructionIterator end = block->end();
+ for (MInstructionIterator it = firstRemoved; it != end; it++) {
+ if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 1)")) {
+ return false;
+ }
+
+ MInstruction* ins = *it;
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ ins->getOperand(i)->setImplicitlyUsedUnchecked();
+ }
+
+ // Flag observable resume point operands as having implicit uses.
+ if (MResumePoint* rp = ins->resumePoint()) {
+ // Note: no need to iterate over the caller's of the resume point as
+ // this is the same as the entry resume point.
+ MOZ_ASSERT(&rp->block()->info() == &info);
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (info.isObservableSlot(i)) {
+ rp->getOperand(i)->setImplicitlyUsedUnchecked();
+ }
+ }
+ }
+ }
+
+ // Flag Phi inputs of the successors as having implicit uses.
+ MPhiUseIteratorStack worklist;
+ for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
+ if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 2)")) {
+ return false;
+ }
+
+ if (!FlagPhiInputsAsImplicitlyUsed(mir, block, block->getSuccessor(i),
+ worklist)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool FlagEntryResumePointOperands(MIRGenerator* mir,
+ MBasicBlock* block) {
+ // Flag observable operands of the entry resume point as having implicit uses.
+ MResumePoint* rp = block->entryResumePoint();
+ while (rp) {
+ if (mir->shouldCancel("FlagEntryResumePointOperands")) {
+ return false;
+ }
+
+ const CompileInfo& info = rp->block()->info();
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (info.isObservableSlot(i)) {
+ rp->getOperand(i)->setImplicitlyUsedUnchecked();
+ }
+ }
+
+ rp = rp->caller();
+ }
+
+ return true;
+}
+
+static bool FlagAllOperandsAsImplicitlyUsed(MIRGenerator* mir,
+ MBasicBlock* block) {
+ return FlagEntryResumePointOperands(mir, block) &&
+ FlagOperandsAsImplicitlyUsedAfter(mir, block, block->begin());
+}
+
+// WarpBuilder sets the alwaysBails flag on blocks that contain an
+// unconditional bailout. We trim any instructions in those blocks
+// after the first unconditional bailout, and remove any blocks that
+// are only reachable through bailing blocks.
+bool jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_Prune, "Begin");
+
+ // Pruning is guided by unconditional bailouts. Wasm does not have bailouts.
+ MOZ_ASSERT(!mir->compilingWasm());
+
+ Vector<MBasicBlock*, 16, SystemAllocPolicy> worklist;
+ uint32_t numMarked = 0;
+ bool needsTrim = false;
+
+ auto markReachable = [&](MBasicBlock* block) -> bool {
+ block->mark();
+ numMarked++;
+ if (block->alwaysBails()) {
+ needsTrim = true;
+ }
+ return worklist.append(block);
+ };
+
+ // The entry block is always reachable.
+ if (!markReachable(graph.entryBlock())) {
+ return false;
+ }
+
+ // The OSR entry block is always reachable if it exists.
+ if (graph.osrBlock() && !markReachable(graph.osrBlock())) {
+ return false;
+ }
+
+ // Iteratively mark all reachable blocks.
+ while (!worklist.empty()) {
+ if (mir->shouldCancel("Prune unused branches (marking reachable)")) {
+ return false;
+ }
+ MBasicBlock* block = worklist.popCopy();
+
+ JitSpew(JitSpew_Prune, "Visit block %u:", block->id());
+ JitSpewIndent indent(JitSpew_Prune);
+
+ // If this block always bails, then it does not reach its successors.
+ if (block->alwaysBails()) {
+ continue;
+ }
+
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (succ->isMarked()) {
+ continue;
+ }
+ JitSpew(JitSpew_Prune, "Reaches block %u", succ->id());
+ if (!markReachable(succ)) {
+ return false;
+ }
+ }
+ }
+
+ if (!needsTrim && numMarked == graph.numBlocks()) {
+ // There is nothing to prune.
+ graph.unmarkBlocks();
+ return true;
+ }
+
+ JitSpew(JitSpew_Prune, "Remove unreachable instructions and blocks:");
+ JitSpewIndent indent(JitSpew_Prune);
+
+ // The operands of removed instructions may be needed in baseline
+ // after bailing out.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ if (mir->shouldCancel("Prune unused branches (marking operands)")) {
+ return false;
+ }
+
+ MBasicBlock* block = *it++;
+ if (!block->isMarked()) {
+ // If we are removing the block entirely, mark the operands of every
+ // instruction as being implicitly used.
+ FlagAllOperandsAsImplicitlyUsed(mir, block);
+ } else if (block->alwaysBails()) {
+ // If we are only trimming instructions after a bail, only mark operands
+ // of removed instructions.
+ MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
+ FlagOperandsAsImplicitlyUsedAfter(mir, block, firstRemoved);
+ }
+ }
+
+ // Remove the blocks in post-order such that consumers are visited before
+ // the predecessors, the only exception being the Phi nodes of loop headers.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ if (mir->shouldCancel("Prune unused branches (removal loop)")) {
+ return false;
+ }
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ MBasicBlock* block = *it++;
+ if (block->isMarked() && !block->alwaysBails()) {
+ continue;
+ }
+
+ // As we are going to replace/remove the last instruction, we first have
+ // to remove this block from the predecessor list of its successors.
+ size_t numSucc = block->numSuccessors();
+ for (uint32_t i = 0; i < numSucc; i++) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (succ->isDead()) {
+ continue;
+ }
+
+ // Our dominators code expects all loop headers to have two predecessors.
+ // If we are removing the normal entry to a loop, but can still reach
+ // the loop header via OSR, we create a fake unreachable predecessor.
+ if (succ->isLoopHeader() && block != succ->backedge()) {
+ MOZ_ASSERT(graph.osrBlock());
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ MBasicBlock* fake = MBasicBlock::NewFakeLoopPredecessor(graph, succ);
+ if (!fake) {
+ return false;
+ }
+ // Mark the block to avoid removing it as unreachable.
+ fake->mark();
+
+ JitSpew(JitSpew_Prune,
+ "Header %u only reachable by OSR. Add fake predecessor %u",
+ succ->id(), fake->id());
+ }
+
+ JitSpew(JitSpew_Prune, "Remove block edge %u -> %u.", block->id(),
+ succ->id());
+ succ->removePredecessor(block);
+ }
+
+ if (!block->isMarked()) {
+ // Remove unreachable blocks from the CFG.
+ JitSpew(JitSpew_Prune, "Remove block %u.", block->id());
+ graph.removeBlock(block);
+ } else {
+ // Remove unreachable instructions after unconditional bailouts.
+ JitSpew(JitSpew_Prune, "Trim block %u.", block->id());
+
+ // Discard all instructions after the first MBail.
+ MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
+ block->discardAllInstructionsStartingAt(firstRemoved);
+
+ if (block->outerResumePoint()) {
+ block->clearOuterResumePoint();
+ }
+
+ block->end(MUnreachable::New(graph.alloc()));
+ }
+ }
+ graph.unmarkBlocks();
+
+ return true;
+}
+
+static bool SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block) {
+ if (block->numSuccessors() < 2) {
+ return true;
+ }
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ MBasicBlock* target = block->getSuccessor(i);
+ if (target->numPredecessors() < 2) {
+ continue;
+ }
+
+ // Create a simple new block which contains a goto and which split the
+ // edge between block and target.
+ MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
+ if (!split) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// A critical edge is an edge which is neither its successor's only predecessor
+// nor its predecessor's only successor. Critical edges must be split to
+// prevent copy-insertion and code motion from affecting other edges.
+bool jit::SplitCriticalEdges(MIRGraph& graph) {
+ for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
+ MBasicBlock* block = *iter;
+ if (!SplitCriticalEdgesForBlock(graph, block)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool jit::IsUint32Type(const MDefinition* def) {
+ if (def->isBeta()) {
+ def = def->getOperand(0);
+ }
+
+ if (def->type() != MIRType::Int32) {
+ return false;
+ }
+
+ return def->isUrsh() && def->getOperand(1)->isConstant() &&
+ def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
+ def->getOperand(1)->toConstant()->toInt32() == 0;
+}
+
+// Determine whether phiBlock/testBlock simply compute a phi and perform a
+// test on it.
+static bool BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock,
+ MPhi** pphi, MTest** ptest) {
+ *pphi = nullptr;
+ *ptest = nullptr;
+
+ if (phiBlock != testBlock) {
+ MOZ_ASSERT(phiBlock->numSuccessors() == 1 &&
+ phiBlock->getSuccessor(0) == testBlock);
+ if (!phiBlock->begin()->isGoto()) {
+ return false;
+ }
+ }
+
+ auto iter = testBlock->rbegin();
+ if (!iter->isTest()) {
+ return false;
+ }
+ MTest* test = iter->toTest();
+
+ // Unwrap boolean conversion performed through the '!!' idiom.
+ MInstruction* testOrNot = test;
+ bool hasOddNumberOfNots = false;
+ while (++iter != testBlock->rend()) {
+ if (iter->isNot()) {
+ // The MNot must only be used by |testOrNot|.
+ auto* notIns = iter->toNot();
+ if (testOrNot->getOperand(0) != notIns) {
+ return false;
+ }
+ if (!notIns->hasOneUse()) {
+ return false;
+ }
+
+ testOrNot = notIns;
+ hasOddNumberOfNots = !hasOddNumberOfNots;
+ } else {
+ // Fail if there are any other instructions than MNot.
+ return false;
+ }
+ }
+
+ // There's an odd number of MNot, so this can't be the '!!' idiom.
+ if (hasOddNumberOfNots) {
+ return false;
+ }
+
+ MOZ_ASSERT(testOrNot->isTest() || testOrNot->isNot());
+
+ MDefinition* testInput = testOrNot->getOperand(0);
+ if (!testInput->isPhi()) {
+ return false;
+ }
+ MPhi* phi = testInput->toPhi();
+ if (phi->block() != phiBlock) {
+ return false;
+ }
+
+ for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
+ MUse* use = *iter;
+ if (use->consumer() == testOrNot) {
+ continue;
+ }
+ if (use->consumer()->isResumePoint()) {
+ MBasicBlock* useBlock = use->consumer()->block();
+ if (useBlock == phiBlock || useBlock == testBlock) {
+ continue;
+ }
+ }
+ return false;
+ }
+
+ for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd();
+ ++iter) {
+ if (*iter != phi) {
+ return false;
+ }
+ }
+
+ if (phiBlock != testBlock && !testBlock->phisEmpty()) {
+ return false;
+ }
+
+ *pphi = phi;
+ *ptest = test;
+
+ return true;
+}
+
+// Determine if value is directly or indirectly the test input.
+static bool IsTestInputMaybeToBool(MTest* test, MDefinition* value) {
+ auto* input = test->input();
+ bool hasEvenNumberOfNots = true;
+ while (true) {
+ // Only accept if there's an even number of MNot.
+ if (input == value && hasEvenNumberOfNots) {
+ return true;
+ }
+
+ // Unwrap boolean conversion performed through the '!!' idiom.
+ if (input->isNot()) {
+ input = input->toNot()->input();
+ hasEvenNumberOfNots = !hasEvenNumberOfNots;
+ continue;
+ }
+
+ return false;
+ }
+}
+
+// Change block so that it ends in a goto to the specific target block.
+// existingPred is an existing predecessor of the block.
+[[nodiscard]] static bool UpdateGotoSuccessor(TempAllocator& alloc,
+ MBasicBlock* block,
+ MBasicBlock* target,
+ MBasicBlock* existingPred) {
+ MInstruction* ins = block->lastIns();
+ MOZ_ASSERT(ins->isGoto());
+ ins->toGoto()->target()->removePredecessor(block);
+ block->discardLastIns();
+
+ MGoto* newGoto = MGoto::New(alloc, target);
+ block->end(newGoto);
+
+ return target->addPredecessorSameInputsAs(block, existingPred);
+}
+
+// Change block so that it ends in a test of the specified value, going to
+// either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
+// or ifFalse with the same values incoming to ifTrue/ifFalse as block.
+// existingPred is not required to be a predecessor of ifTrue/ifFalse if block
+// already ends in a test going to that block on a true/false result.
+[[nodiscard]] static bool UpdateTestSuccessors(
+ TempAllocator& alloc, MBasicBlock* block, MDefinition* value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse, MBasicBlock* existingPred) {
+ MInstruction* ins = block->lastIns();
+ if (ins->isTest()) {
+ MTest* test = ins->toTest();
+ MOZ_ASSERT(test->input() == value);
+
+ if (ifTrue != test->ifTrue()) {
+ test->ifTrue()->removePredecessor(block);
+ if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
+ return false;
+ }
+ MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
+ test->replaceSuccessor(0, ifTrue);
+ }
+
+ if (ifFalse != test->ifFalse()) {
+ test->ifFalse()->removePredecessor(block);
+ if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
+ return false;
+ }
+ MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
+ test->replaceSuccessor(1, ifFalse);
+ }
+
+ return true;
+ }
+
+ MOZ_ASSERT(ins->isGoto());
+ ins->toGoto()->target()->removePredecessor(block);
+ block->discardLastIns();
+
+ MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
+ block->end(test);
+
+ if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
+ return false;
+ }
+ if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Look for a diamond pattern:
+ *
+ * initialBlock
+ * / \
+ * trueBranch falseBranch
+ * \ /
+ * phiBlock
+ * |
+ * testBlock
+ */
+static bool IsDiamondPattern(MBasicBlock* initialBlock) {
+ MInstruction* ins = initialBlock->lastIns();
+ if (!ins->isTest()) {
+ return false;
+ }
+ MTest* initialTest = ins->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1) {
+ return false;
+ }
+
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+ if (falseBranch->numPredecessors() != 1 ||
+ falseBranch->numSuccessors() != 1) {
+ return false;
+ }
+
+ MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
+ if (phiBlock != falseBranch->getSuccessor(0)) {
+ return false;
+ }
+ if (phiBlock->numPredecessors() != 2) {
+ return false;
+ }
+ return true;
+}
+
+static bool MaybeFoldDiamondConditionBlock(MIRGraph& graph,
+ MBasicBlock* initialBlock) {
+ MOZ_ASSERT(IsDiamondPattern(initialBlock));
+
+ // Optimize the MIR graph to improve the code generated for conditional
+ // operations. A test like 'if (a ? b : c)' normally requires four blocks,
+ // with a phi for the intermediate value. This can be improved to use three
+ // blocks with no phi value.
+
+ /*
+ * Look for a diamond pattern:
+ *
+ * initialBlock
+ * / \
+ * trueBranch falseBranch
+ * \ /
+ * phiBlock
+ * |
+ * testBlock
+ *
+ * Where phiBlock contains a single phi combining values pushed onto the
+ * stack by trueBranch and falseBranch, and testBlock contains a test on
+ * that phi. phiBlock and testBlock may be the same block; generated code
+ * will use different blocks if the (?:) op is in an inlined function.
+ */
+
+ MTest* initialTest = initialBlock->lastIns()->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+ if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
+ falseBranch->isLoopBackedge()) {
+ return true;
+ }
+
+ MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
+ MBasicBlock* testBlock = phiBlock;
+ if (testBlock->numSuccessors() == 1) {
+ if (testBlock->isLoopBackedge()) {
+ return true;
+ }
+ testBlock = testBlock->getSuccessor(0);
+ if (testBlock->numPredecessors() != 1) {
+ return true;
+ }
+ }
+
+ MPhi* phi;
+ MTest* finalTest;
+ if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
+ return true;
+ }
+
+ MOZ_ASSERT(phi->numOperands() == 2);
+
+ // Make sure the test block does not have any outgoing loop backedges.
+ if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
+ return false;
+ }
+
+ MDefinition* trueResult =
+ phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
+ MDefinition* falseResult =
+ phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
+
+ // OK, we found the desired pattern, now transform the graph.
+
+ // Remove the phi from phiBlock.
+ phiBlock->discardPhi(*phiBlock->phisBegin());
+
+ // Change the end of the block to a test that jumps directly to successors of
+ // testBlock, rather than to testBlock itself.
+
+ if (IsTestInputMaybeToBool(initialTest, trueResult)) {
+ if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(),
+ testBlock)) {
+ return false;
+ }
+ } else {
+ if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
+ finalTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ }
+
+ if (IsTestInputMaybeToBool(initialTest, falseResult)) {
+ if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ } else {
+ if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
+ finalTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ }
+
+ // Remove phiBlock, if different from testBlock.
+ if (phiBlock != testBlock) {
+ testBlock->removePredecessor(phiBlock);
+ graph.removeBlock(phiBlock);
+ }
+
+ // Remove testBlock itself.
+ finalTest->ifTrue()->removePredecessor(testBlock);
+ finalTest->ifFalse()->removePredecessor(testBlock);
+ graph.removeBlock(testBlock);
+
+ return true;
+}
+
+/*
+ * Look for a triangle pattern:
+ *
+ * initialBlock
+ * / \
+ * trueBranch |
+ * \ /
+ * phiBlock+falseBranch
+ * |
+ * testBlock
+ *
+ * Or:
+ *
+ * initialBlock
+ * / \
+ * | falseBranch
+ * \ /
+ * phiBlock+trueBranch
+ * |
+ * testBlock
+ */
+static bool IsTrianglePattern(MBasicBlock* initialBlock) {
+ MInstruction* ins = initialBlock->lastIns();
+ if (!ins->isTest()) {
+ return false;
+ }
+ MTest* initialTest = ins->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+
+ if (trueBranch->numSuccessors() == 1 &&
+ trueBranch->getSuccessor(0) == falseBranch) {
+ if (trueBranch->numPredecessors() != 1) {
+ return false;
+ }
+ if (falseBranch->numPredecessors() != 2) {
+ return false;
+ }
+ return true;
+ }
+
+ if (falseBranch->numSuccessors() == 1 &&
+ falseBranch->getSuccessor(0) == trueBranch) {
+ if (trueBranch->numPredecessors() != 2) {
+ return false;
+ }
+ if (falseBranch->numPredecessors() != 1) {
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static bool MaybeFoldTriangleConditionBlock(MIRGraph& graph,
+ MBasicBlock* initialBlock) {
+ MOZ_ASSERT(IsTrianglePattern(initialBlock));
+
+ // Optimize the MIR graph to improve the code generated for boolean
+ // operations. A test like 'if (a && b)' normally requires three blocks, with
+ // a phi for the intermediate value. This can be improved to use no phi value.
+
+ /*
+ * Look for a triangle pattern:
+ *
+ * initialBlock
+ * / \
+ * trueBranch |
+ * \ /
+ * phiBlock+falseBranch
+ * |
+ * testBlock
+ *
+ * Or:
+ *
+ * initialBlock
+ * / \
+ * | falseBranch
+ * \ /
+ * phiBlock+trueBranch
+ * |
+ * testBlock
+ *
+ * Where phiBlock contains a single phi combining values pushed onto the stack
+ * by trueBranch and falseBranch, and testBlock contains a test on that phi.
+ * phiBlock and testBlock may be the same block; generated code will use
+ * different blocks if the (&&) op is in an inlined function.
+ */
+
+ MTest* initialTest = initialBlock->lastIns()->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+ if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
+ falseBranch->isLoopBackedge()) {
+ return true;
+ }
+
+ MBasicBlock* phiBlock;
+ if (trueBranch->numSuccessors() == 1 &&
+ trueBranch->getSuccessor(0) == falseBranch) {
+ phiBlock = falseBranch;
+ } else {
+ MOZ_ASSERT(falseBranch->getSuccessor(0) == trueBranch);
+ phiBlock = trueBranch;
+ }
+
+ MBasicBlock* testBlock = phiBlock;
+ if (testBlock->numSuccessors() == 1) {
+ MOZ_ASSERT(!testBlock->isLoopBackedge());
+
+ testBlock = testBlock->getSuccessor(0);
+ if (testBlock->numPredecessors() != 1) {
+ return true;
+ }
+ }
+
+ MPhi* phi;
+ MTest* finalTest;
+ if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
+ return true;
+ }
+
+ MOZ_ASSERT(phi->numOperands() == 2);
+
+ // If the phi-operand doesn't match the initial input, we can't fold the test.
+ auto* phiInputForInitialBlock =
+ phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
+ if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
+ return true;
+ }
+
+ // Make sure the test block does not have any outgoing loop backedges.
+ if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
+ return false;
+ }
+
+ MDefinition* trueResult;
+ MDefinition* falseResult;
+ if (phiBlock == trueBranch) {
+ trueResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
+ falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
+ } else {
+ trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
+ falseResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
+ }
+
+ // OK, we found the desired pattern, now transform the graph.
+
+ // Remove the phi from phiBlock.
+ phiBlock->discardPhi(*phiBlock->phisBegin());
+
+ // Change the end of the block to a test that jumps directly to successors of
+ // testBlock, rather than to testBlock itself.
+
+ if (phiBlock == trueBranch) {
+ if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
+ finalTest->ifTrue(), initialTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ } else if (IsTestInputMaybeToBool(initialTest, trueResult)) {
+ if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(),
+ testBlock)) {
+ return false;
+ }
+ } else {
+ if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
+ finalTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ }
+
+ if (phiBlock == falseBranch) {
+ if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
+ initialTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ } else if (IsTestInputMaybeToBool(initialTest, falseResult)) {
+ if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ } else {
+ if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
+ finalTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ }
+
+ // Remove phiBlock, if different from testBlock.
+ if (phiBlock != testBlock) {
+ testBlock->removePredecessor(phiBlock);
+ graph.removeBlock(phiBlock);
+ }
+
+ // Remove testBlock itself.
+ finalTest->ifTrue()->removePredecessor(testBlock);
+ finalTest->ifFalse()->removePredecessor(testBlock);
+ graph.removeBlock(testBlock);
+
+ return true;
+}
+
+static bool MaybeFoldConditionBlock(MIRGraph& graph,
+ MBasicBlock* initialBlock) {
+ if (IsDiamondPattern(initialBlock)) {
+ return MaybeFoldDiamondConditionBlock(graph, initialBlock);
+ }
+ if (IsTrianglePattern(initialBlock)) {
+ return MaybeFoldTriangleConditionBlock(graph, initialBlock);
+ }
+ return true;
+}
+
+static bool MaybeFoldTestBlock(MIRGraph& graph, MBasicBlock* initialBlock) {
+ // Handle test expressions on more than two inputs. For example
+ // |if ((x > 10) && (y > 20) && (z > 30)) { ... }|, which results in the below
+ // pattern.
+ //
+ // Look for the pattern:
+ // ┌─────────────────┐
+ // 1 │ 1 compare │
+ // ┌─────┤ 2 test compare1 │
+ // │ └──────┬──────────┘
+ // │ │0
+ // ┌───────▼─────────┐ │
+ // │ 3 compare │ │
+ // │ 4 test compare3 │ └──────────┐
+ // └──┬──────────┬───┘ │
+ // 1│ │0 │
+ // ┌──────────▼──────┐ │ │
+ // │ 5 compare │ └─────────┐ │
+ // │ 6 goto │ │ │
+ // └───────┬─────────┘ │ │
+ // │ │ │
+ // │ ┌──────────────────▼───────▼───────┐
+ // └───►│ 9 phi compare1 compare3 compare5 │
+ // │10 goto │
+ // └────────────────┬─────────────────┘
+ // │
+ // ┌────────▼────────┐
+ // │11 test phi9 │
+ // └─────┬─────┬─────┘
+ // 1│ │0
+ // ┌────────────┐ │ │ ┌─────────────┐
+ // │ TrueBranch │◄────┘ └─────►│ FalseBranch │
+ // └────────────┘ └─────────────┘
+ //
+ // And transform it to:
+ //
+ // ┌─────────────────┐
+ // 1 │ 1 compare │
+ // ┌───┤ 2 test compare1 │
+ // │ └──────────┬──────┘
+ // │ │0
+ // ┌───────▼─────────┐ │
+ // │ 3 compare │ │
+ // │ 4 test compare3 │ │
+ // └──┬─────────┬────┘ │
+ // 1│ │0 │
+ // ┌──────────▼──────┐ │ │
+ // │ 5 compare │ └──────┐ │
+ // │ 6 test compare5 │ │ │
+ // └────┬────────┬───┘ │ │
+ // 1│ │0 │ │
+ // ┌─────▼──────┐ │ ┌───▼──▼──────┐
+ // │ TrueBranch │ └─────────► FalseBranch │
+ // └────────────┘ └─────────────┘
+
+ auto* ins = initialBlock->lastIns();
+ if (!ins->isTest()) {
+ return true;
+ }
+ auto* initialTest = ins->toTest();
+
+ MBasicBlock* trueBranch = initialTest->ifTrue();
+ MBasicBlock* falseBranch = initialTest->ifFalse();
+
+ // MaybeFoldConditionBlock handles the case for two operands.
+ MBasicBlock* phiBlock;
+ if (trueBranch->numPredecessors() > 2) {
+ phiBlock = trueBranch;
+ } else if (falseBranch->numPredecessors() > 2) {
+ phiBlock = falseBranch;
+ } else {
+ return true;
+ }
+
+ MBasicBlock* testBlock = phiBlock;
+ if (testBlock->numSuccessors() == 1) {
+ if (testBlock->isLoopBackedge()) {
+ return true;
+ }
+ testBlock = testBlock->getSuccessor(0);
+ if (testBlock->numPredecessors() != 1) {
+ return true;
+ }
+ }
+
+ MOZ_ASSERT(!phiBlock->isLoopBackedge());
+
+ MPhi* phi = nullptr;
+ MTest* finalTest = nullptr;
+ if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
+ return true;
+ }
+
+ MOZ_ASSERT(phiBlock->numPredecessors() == phi->numOperands());
+
+ // If the phi-operand doesn't match the initial input, we can't fold the test.
+ auto* phiInputForInitialBlock =
+ phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
+ if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
+ return true;
+ }
+
+ MBasicBlock* newTestBlock = nullptr;
+ MDefinition* newTestInput = nullptr;
+
+ // The block of each phi operand must either end with a test instruction on
+ // that phi operand or it's the sole block which ends with a goto instruction.
+ for (size_t i = 0; i < phiBlock->numPredecessors(); i++) {
+ auto* pred = phiBlock->getPredecessor(i);
+ auto* operand = phi->getOperand(i);
+
+ // Each predecessor must end with either a test or goto instruction.
+ auto* lastIns = pred->lastIns();
+ if (lastIns->isGoto() && !newTestBlock) {
+ newTestBlock = pred;
+ newTestInput = operand;
+ } else if (lastIns->isTest()) {
+ if (!IsTestInputMaybeToBool(lastIns->toTest(), operand)) {
+ return true;
+ }
+ } else {
+ return true;
+ }
+
+ MOZ_ASSERT(!pred->isLoopBackedge());
+ }
+
+ // Ensure we found the single goto block.
+ if (!newTestBlock) {
+ return true;
+ }
+
+ // Make sure the test block does not have any outgoing loop backedges.
+ if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
+ return false;
+ }
+
+ // OK, we found the desired pattern, now transform the graph.
+
+ // Remove the phi from phiBlock.
+ phiBlock->discardPhi(*phiBlock->phisBegin());
+
+ // Create the new test instruction.
+ if (!UpdateTestSuccessors(graph.alloc(), newTestBlock, newTestInput,
+ finalTest->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+
+ // Update all test instructions to point to the final target.
+ while (phiBlock->numPredecessors()) {
+ mozilla::DebugOnly<size_t> oldNumPred = phiBlock->numPredecessors();
+
+ auto* pred = phiBlock->getPredecessor(0);
+ auto* test = pred->lastIns()->toTest();
+ if (test->ifTrue() == phiBlock) {
+ if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
+ finalTest->ifTrue(), test->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(test->ifFalse() == phiBlock);
+ if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
+ test->ifTrue(), finalTest->ifFalse(),
+ testBlock)) {
+ return false;
+ }
+ }
+
+ // Ensure we've made progress.
+ MOZ_ASSERT(phiBlock->numPredecessors() + 1 == oldNumPred);
+ }
+
+ // Remove phiBlock, if different from testBlock.
+ if (phiBlock != testBlock) {
+ testBlock->removePredecessor(phiBlock);
+ graph.removeBlock(phiBlock);
+ }
+
+ // Remove testBlock itself.
+ finalTest->ifTrue()->removePredecessor(testBlock);
+ finalTest->ifFalse()->removePredecessor(testBlock);
+ graph.removeBlock(testBlock);
+
+ return true;
+}
+
+bool jit::FoldTests(MIRGraph& graph) {
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ block++) {
+ if (!MaybeFoldConditionBlock(graph, *block)) {
+ return false;
+ }
+ if (!MaybeFoldTestBlock(graph, *block)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool jit::FoldEmptyBlocks(MIRGraph& graph) {
+ for (MBasicBlockIterator iter(graph.begin()); iter != graph.end();) {
+ MBasicBlock* block = *iter;
+ iter++;
+
+ if (block->numPredecessors() != 1 || block->numSuccessors() != 1) {
+ continue;
+ }
+
+ if (!block->phisEmpty()) {
+ continue;
+ }
+
+ if (block->outerResumePoint()) {
+ continue;
+ }
+
+ if (*block->begin() != *block->rbegin()) {
+ continue;
+ }
+
+ MBasicBlock* succ = block->getSuccessor(0);
+ MBasicBlock* pred = block->getPredecessor(0);
+
+ if (succ->numPredecessors() != 1) {
+ continue;
+ }
+
+ size_t pos = pred->getSuccessorIndex(block);
+ pred->lastIns()->replaceSuccessor(pos, succ);
+
+ graph.removeBlock(block);
+
+ if (!succ->addPredecessorSameInputsAs(pred, block)) {
+ return false;
+ }
+ succ->removePredecessor(block);
+ }
+ return true;
+}
+
+static void EliminateTriviallyDeadResumePointOperands(MIRGraph& graph,
+ MResumePoint* rp) {
+ // If we will pop the top of the stack immediately after resuming,
+ // then don't preserve the top value in the resume point.
+ if (rp->mode() != ResumeMode::ResumeAt) {
+ return;
+ }
+
+ jsbytecode* pc = rp->pc();
+ if (JSOp(*pc) == JSOp::JumpTarget) {
+ pc += JSOpLength_JumpTarget;
+ }
+ if (JSOp(*pc) != JSOp::Pop) {
+ return;
+ }
+
+ size_t top = rp->stackDepth() - 1;
+ MOZ_ASSERT(!rp->isObservableOperand(top));
+
+ MDefinition* def = rp->getOperand(top);
+ if (def->isConstant()) {
+ return;
+ }
+
+ MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
+ rp->replaceOperand(top, constant);
+}
+
+// Operands to a resume point which are dead at the point of the resume can be
+// replaced with a magic value. This pass only replaces resume points which are
+// trivially dead.
+//
+// This is intended to ensure that extra resume points within a basic block
+// will not artificially extend the lifetimes of any SSA values. This could
+// otherwise occur if the new resume point captured a value which is created
+// between the old and new resume point and is dead at the new resume point.
+bool jit::EliminateTriviallyDeadResumePointOperands(MIRGenerator* mir,
+ MIRGraph& graph) {
+ for (auto* block : graph) {
+ if (MResumePoint* rp = block->entryResumePoint()) {
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+ ::EliminateTriviallyDeadResumePointOperands(graph, rp);
+ }
+ }
+ return true;
+}
+
+// Operands to a resume point which are dead at the point of the resume can be
+// replaced with a magic value. This analysis supports limited detection of
+// dead operands, pruning those which are defined in the resume point's basic
+// block and have no uses outside the block or at points later than the resume
+// point.
+//
+// This is intended to ensure that extra resume points within a basic block
+// will not artificially extend the lifetimes of any SSA values. This could
+// otherwise occur if the new resume point captured a value which is created
+// between the old and new resume point and is dead at the new resume point.
+bool jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph) {
+ // If we are compiling try blocks, locals and arguments may be observable
+ // from catch or finally blocks (which Ion does not compile). For now just
+ // disable the pass in this case.
+ if (graph.hasTryBlock()) {
+ return true;
+ }
+
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) {
+ return false;
+ }
+
+ if (MResumePoint* rp = block->entryResumePoint()) {
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+ ::EliminateTriviallyDeadResumePointOperands(graph, rp);
+ }
+
+ // The logic below can get confused on infinite loops.
+ if (block->isLoopHeader() && block->backedge() == *block) {
+ continue;
+ }
+
+ for (MInstructionIterator ins = block->begin(); ins != block->end();
+ ins++) {
+ if (MResumePoint* rp = ins->resumePoint()) {
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+ ::EliminateTriviallyDeadResumePointOperands(graph, rp);
+ }
+
+ // No benefit to replacing constant operands with other constants.
+ if (ins->isConstant()) {
+ continue;
+ }
+
+ // Scanning uses does not give us sufficient information to tell
+ // where instructions that are involved in box/unbox operations or
+ // parameter passing might be live. Rewriting uses of these terms
+ // in resume points may affect the interpreter's behavior. Rather
+ // than doing a more sophisticated analysis, just ignore these.
+ if (ins->isUnbox() || ins->isParameter() || ins->isBoxNonStrictThis()) {
+ continue;
+ }
+
+ // Early intermediate values captured by resume points, such as
+ // ArrayState and its allocation, may be legitimately dead in Ion code,
+ // but are still needed if we bail out. They can recover on bailout.
+ if (ins->isRecoveredOnBailout()) {
+ MOZ_ASSERT(ins->canRecoverOnBailout());
+ continue;
+ }
+
+ // If the instruction's behavior has been constant folded into a
+ // separate instruction, we can't determine precisely where the
+ // instruction becomes dead and can't eliminate its uses.
+ if (ins->isImplicitlyUsed()) {
+ continue;
+ }
+
+ // Check if this instruction's result is only used within the
+ // current block, and keep track of its last use in a definition
+ // (not resume point). This requires the instructions in the block
+ // to be numbered, ensured by running this immediately after alias
+ // analysis.
+ uint32_t maxDefinition = 0;
+ for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();
+ uses++) {
+ MNode* consumer = uses->consumer();
+ if (consumer->isResumePoint()) {
+ // If the instruction's is captured by one of the resume point, then
+ // it might be observed indirectly while the frame is live on the
+ // stack, so it has to be computed.
+ MResumePoint* resume = consumer->toResumePoint();
+ if (resume->isObservableOperand(*uses)) {
+ maxDefinition = UINT32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ if (def->block() != *block || def->isBox() || def->isPhi()) {
+ maxDefinition = UINT32_MAX;
+ break;
+ }
+ maxDefinition = std::max(maxDefinition, def->id());
+ }
+ if (maxDefinition == UINT32_MAX) {
+ continue;
+ }
+
+ // Walk the uses a second time, removing any in resume points after
+ // the last use in a definition.
+ for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();) {
+ MUse* use = *uses++;
+ if (use->consumer()->isDefinition()) {
+ continue;
+ }
+ MResumePoint* mrp = use->consumer()->toResumePoint();
+ if (mrp->block() != *block || !mrp->instruction() ||
+ mrp->instruction() == *ins ||
+ mrp->instruction()->id() <= maxDefinition) {
+ continue;
+ }
+
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ // Store an optimized out magic value in place of all dead
+ // resume point operands. Making any such substitution can in
+ // general alter the interpreter's behavior, even though the
+ // code is dead, as the interpreter will still execute opcodes
+ // whose effects cannot be observed. If the magic value value
+ // were to flow to, say, a dead property access the
+ // interpreter could throw an exception; we avoid this problem
+ // by removing dead operands before removing dead code.
+ MConstant* constant =
+ MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
+ block->insertBefore(*(block->begin()), constant);
+ use->replaceProducer(constant);
+ }
+ }
+ }
+
+ return true;
+}
+
+// Test whether |def| would be needed if it had no uses.
+bool js::jit::DeadIfUnused(const MDefinition* def) {
+ // Effectful instructions of course cannot be removed.
+ if (def->isEffectful()) {
+ return false;
+ }
+
+ // Never eliminate guard instructions.
+ if (def->isGuard()) {
+ return false;
+ }
+
+ // Required to be preserved, as the type guard related to this instruction
+ // is part of the semantics of a transformation.
+ if (def->isGuardRangeBailouts()) {
+ return false;
+ }
+
+ // Control instructions have no uses, but also shouldn't be optimized out
+ if (def->isControlInstruction()) {
+ return false;
+ }
+
+ // Used when lowering to generate the corresponding snapshots and aggregate
+ // the list of recover instructions to be repeated.
+ if (def->isInstruction() && def->toInstruction()->resumePoint()) {
+ return false;
+ }
+
+ return true;
+}
+
+// Similar to DeadIfUnused(), but additionally allows effectful instructions.
+bool js::jit::DeadIfUnusedAllowEffectful(const MDefinition* def) {
+ // Never eliminate guard instructions.
+ if (def->isGuard()) {
+ return false;
+ }
+
+ // Required to be preserved, as the type guard related to this instruction
+ // is part of the semantics of a transformation.
+ if (def->isGuardRangeBailouts()) {
+ return false;
+ }
+
+ // Control instructions have no uses, but also shouldn't be optimized out
+ if (def->isControlInstruction()) {
+ return false;
+ }
+
+ // Used when lowering to generate the corresponding snapshots and aggregate
+ // the list of recover instructions to be repeated.
+ if (def->isInstruction() && def->toInstruction()->resumePoint()) {
+ // All effectful instructions must have a resume point attached. We're
+ // allowing effectful instructions here, so we have to ignore any resume
+ // points if we want to consider effectful instructions as dead.
+ if (!def->isEffectful()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Test whether |def| may be safely discarded, due to being dead or due to being
+// located in a basic block which has itself been marked for discarding.
+bool js::jit::IsDiscardable(const MDefinition* def) {
+ return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
+}
+
+// Similar to IsDiscardable(), but additionally allows effectful instructions.
+bool js::jit::IsDiscardableAllowEffectful(const MDefinition* def) {
+ return !def->hasUses() &&
+ (DeadIfUnusedAllowEffectful(def) || def->block()->isMarked());
+}
+
+// Instructions are useless if they are unused and have no side effects.
+// This pass eliminates useless instructions.
+// The graph itself is unchanged.
+bool jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph) {
+ // Traverse in postorder so that we hit uses before definitions.
+ // Traverse instruction list backwards for the same reason.
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Eliminate Dead Code (main loop)")) {
+ return false;
+ }
+
+ // Remove unused instructions.
+ for (MInstructionReverseIterator iter = block->rbegin();
+ iter != block->rend();) {
+ MInstruction* inst = *iter++;
+ if (js::jit::IsDiscardable(inst)) {
+ block->discard(inst);
+ }
+ }
+ }
+
+ return true;
+}
+
+static inline bool IsPhiObservable(MPhi* phi, Observability observe) {
+ // If the phi has uses which are not reflected in SSA, then behavior in the
+ // interpreter may be affected by removing the phi.
+ if (phi->isImplicitlyUsed()) {
+ return true;
+ }
+
+ // Check for uses of this phi node outside of other phi nodes.
+ // Note that, initially, we skip reading resume points, which we
+ // don't count as actual uses. If the only uses are resume points,
+ // then the SSA name is never consumed by the program. However,
+ // after optimizations have been performed, it's possible that the
+ // actual uses in the program have been (incorrectly) optimized
+ // away, so we must be more conservative and consider resume
+ // points as well.
+ for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
+ MNode* consumer = iter->consumer();
+ if (consumer->isResumePoint()) {
+ MResumePoint* resume = consumer->toResumePoint();
+ if (observe == ConservativeObservability) {
+ return true;
+ }
+ if (resume->isObservableOperand(*iter)) {
+ return true;
+ }
+ } else {
+ MDefinition* def = consumer->toDefinition();
+ if (!def->isPhi()) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+// Handles cases like:
+// x is phi(a, x) --> a
+// x is phi(a, a) --> a
+static inline MDefinition* IsPhiRedundant(MPhi* phi) {
+ MDefinition* first = phi->operandIfRedundant();
+ if (first == nullptr) {
+ return nullptr;
+ }
+
+ // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
+ if (phi->isImplicitlyUsed()) {
+ first->setImplicitlyUsedUnchecked();
+ }
+
+ return first;
+}
+
+bool jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
+ Observability observe) {
+ // Eliminates redundant or unobservable phis from the graph. A
+ // redundant phi is something like b = phi(a, a) or b = phi(a, b),
+ // both of which can be replaced with a. An unobservable phi is
+ // one that whose value is never used in the program.
+ //
+ // Note that we must be careful not to eliminate phis representing
+ // values that the interpreter will require later. When the graph
+ // is first constructed, we can be more aggressive, because there
+ // is a greater correspondence between the CFG and the bytecode.
+ // After optimizations such as GVN have been performed, however,
+ // the bytecode and CFG may not correspond as closely to one
+ // another. In that case, we must be more conservative. The flag
+ // |conservativeObservability| is used to indicate that eliminate
+ // phis is being run after some optimizations have been performed,
+ // and thus we should use more conservative rules about
+ // observability. The particular danger is that we can optimize
+ // away uses of a phi because we think they are not executable,
+ // but the foundation for that assumption is false TI information
+ // that will eventually be invalidated. Therefore, if
+ // |conservativeObservability| is set, we will consider any use
+ // from a resume point to be observable. Otherwise, we demand a
+ // use from an actual instruction.
+
+ Vector<MPhi*, 16, SystemAllocPolicy> worklist;
+
+ // Add all observable phis to a worklist. We use the "in worklist" bit to
+ // mean "this phi is live".
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
+ block++) {
+ MPhiIterator iter = block->phisBegin();
+ while (iter != block->phisEnd()) {
+ MPhi* phi = *iter++;
+
+ if (mir->shouldCancel("Eliminate Phis (populate loop)")) {
+ return false;
+ }
+
+ // Flag all as unused, only observable phis would be marked as used
+ // when processed by the work list.
+ phi->setUnused();
+
+ // If the phi is redundant, remove it here.
+ if (MDefinition* redundant = IsPhiRedundant(phi)) {
+ phi->justReplaceAllUsesWith(redundant);
+ block->discardPhi(phi);
+ continue;
+ }
+
+ // Enqueue observable Phis.
+ if (IsPhiObservable(phi, observe)) {
+ phi->setInWorklist();
+ if (!worklist.append(phi)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Iteratively mark all phis reachable from live phis.
+ while (!worklist.empty()) {
+ if (mir->shouldCancel("Eliminate Phis (worklist)")) {
+ return false;
+ }
+
+ MPhi* phi = worklist.popCopy();
+ MOZ_ASSERT(phi->isUnused());
+ phi->setNotInWorklist();
+
+ // The removal of Phis can produce newly redundant phis.
+ if (MDefinition* redundant = IsPhiRedundant(phi)) {
+ // Add to the worklist the used phis which are impacted.
+ for (MUseDefIterator it(phi); it; it++) {
+ if (it.def()->isPhi()) {
+ MPhi* use = it.def()->toPhi();
+ if (!use->isUnused()) {
+ use->setUnusedUnchecked();
+ use->setInWorklist();
+ if (!worklist.append(use)) {
+ return false;
+ }
+ }
+ }
+ }
+ phi->justReplaceAllUsesWith(redundant);
+ } else {
+ // Otherwise flag them as used.
+ phi->setNotUnused();
+ }
+
+ // The current phi is/was used, so all its operands are used.
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) {
+ continue;
+ }
+ in->setInWorklist();
+ if (!worklist.append(in->toPhi())) {
+ return false;
+ }
+ }
+ }
+
+ // Sweep dead phis.
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
+ block++) {
+ MPhiIterator iter = block->phisBegin();
+ while (iter != block->phisEnd()) {
+ MPhi* phi = *iter++;
+ if (phi->isUnused()) {
+ if (!phi->optimizeOutAllUses(graph.alloc())) {
+ return false;
+ }
+ block->discardPhi(phi);
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace {
+
+// The type analysis algorithm inserts conversions and box/unbox instructions
+// to make the IR graph well-typed for future passes.
+//
+// Phi adjustment: If a phi's inputs are all the same type, the phi is
+// specialized to return that type.
+//
+// Input adjustment: Each input is asked to apply conversion operations to its
+// inputs. This may include Box, Unbox, or other instruction-specific type
+// conversion operations.
+//
+class TypeAnalyzer {
+ MIRGenerator* mir;
+ MIRGraph& graph;
+ Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
+
+ TempAllocator& alloc() const { return graph.alloc(); }
+
+ bool addPhiToWorklist(MPhi* phi) {
+ if (phi->isInWorklist()) {
+ return true;
+ }
+ if (!phiWorklist_.append(phi)) {
+ return false;
+ }
+ phi->setInWorklist();
+ return true;
+ }
+ MPhi* popPhi() {
+ MPhi* phi = phiWorklist_.popCopy();
+ phi->setNotInWorklist();
+ return phi;
+ }
+
+ [[nodiscard]] bool propagateAllPhiSpecializations();
+
+ bool respecialize(MPhi* phi, MIRType type);
+ bool propagateSpecialization(MPhi* phi);
+ bool specializePhis();
+ bool specializeOsrOnlyPhis();
+ void replaceRedundantPhi(MPhi* phi);
+ bool adjustPhiInputs(MPhi* phi);
+ bool adjustInputs(MDefinition* def);
+ bool insertConversions();
+
+ bool checkFloatCoherency();
+ bool graphContainsFloat32();
+ bool markPhiConsumers();
+ bool markPhiProducers();
+ bool specializeValidFloatOps();
+ bool tryEmitFloatOperations();
+
+ bool shouldSpecializeOsrPhis() const;
+ MIRType guessPhiType(MPhi* phi) const;
+
+ public:
+ TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph) : mir(mir), graph(graph) {}
+
+ bool analyze();
+};
+
+} /* anonymous namespace */
+
+bool TypeAnalyzer::shouldSpecializeOsrPhis() const {
+ // [SMDOC] OSR Phi Type Specialization
+ //
+ // Without special handling for OSR phis, we end up with unspecialized phis
+ // (MIRType::Value) in the loop (pre)header and other blocks, resulting in
+ // unnecessary boxing and unboxing in the loop body.
+ //
+ // To fix this, phi type specialization needs special code to deal with the
+ // OSR entry block. Recall that OSR results in the following basic block
+ // structure:
+ //
+ // +------------------+ +-----------------+
+ // | Code before loop | | OSR entry block |
+ // +------------------+ +-----------------+
+ // | |
+ // | |
+ // | +---------------+ |
+ // +---------> | OSR preheader | <---------+
+ // +---------------+
+ // |
+ // V
+ // +---------------+
+ // | Loop header |<-----+
+ // +---------------+ |
+ // | |
+ // ... |
+ // | |
+ // +---------------+ |
+ // | Loop backedge |------+
+ // +---------------+
+ //
+ // OSR phi specialization happens in three steps:
+ //
+ // (1) Specialize phis but ignore MOsrValue phi inputs. In other words,
+ // pretend the OSR entry block doesn't exist. See guessPhiType.
+ //
+ // (2) Once phi specialization is done, look at the types of loop header phis
+ // and add these types to the corresponding preheader phis. This way, the
+ // types of the preheader phis are based on the code before the loop and
+ // the code in the loop body. These are exactly the types we expect for
+ // the OSR Values. See the last part of TypeAnalyzer::specializePhis.
+ //
+ // (3) For type-specialized preheader phis, add guard/unbox instructions to
+ // the OSR entry block to guard the incoming Value indeed has this type.
+ // This happens in:
+ //
+ // * TypeAnalyzer::adjustPhiInputs: adds a fallible unbox for values that
+ // can be unboxed.
+ //
+ // * TypeAnalyzer::replaceRedundantPhi: adds a type guard for values that
+ // can't be unboxed (null/undefined/magic Values).
+ if (!mir->graph().osrBlock()) {
+ return false;
+ }
+
+ return !mir->outerInfo().hadSpeculativePhiBailout();
+}
+
+// Try to specialize this phi based on its non-cyclic inputs.
+MIRType TypeAnalyzer::guessPhiType(MPhi* phi) const {
+#ifdef DEBUG
+ // Check that different magic constants aren't flowing together. Ignore
+ // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
+ // away.
+ MIRType magicType = MIRType::None;
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == MIRType::MagicHole ||
+ in->type() == MIRType::MagicIsConstructing) {
+ if (magicType == MIRType::None) {
+ magicType = in->type();
+ }
+ MOZ_ASSERT(magicType == in->type());
+ }
+ }
+#endif
+
+ MIRType type = MIRType::None;
+ bool convertibleToFloat32 = false;
+ bool hasOSRValueInput = false;
+ DebugOnly<bool> hasSpecializableInput = false;
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->isPhi()) {
+ hasSpecializableInput = true;
+ if (!in->toPhi()->triedToSpecialize()) {
+ continue;
+ }
+ if (in->type() == MIRType::None) {
+ // The operand is a phi we tried to specialize, but we were
+ // unable to guess its type. propagateSpecialization will
+ // propagate the type to this phi when it becomes known.
+ continue;
+ }
+ }
+
+ // See shouldSpecializeOsrPhis comment. This is the first step mentioned
+ // there.
+ if (shouldSpecializeOsrPhis() && in->isOsrValue()) {
+ hasOSRValueInput = true;
+ hasSpecializableInput = true;
+ continue;
+ }
+
+ if (type == MIRType::None) {
+ type = in->type();
+ if (in->canProduceFloat32() &&
+ !mir->outerInfo().hadSpeculativePhiBailout()) {
+ convertibleToFloat32 = true;
+ }
+ continue;
+ }
+
+ if (type == in->type()) {
+ convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
+ } else {
+ if (convertibleToFloat32 && in->type() == MIRType::Float32) {
+ // If we only saw definitions that can be converted into Float32 before
+ // and encounter a Float32 value, promote previous values to Float32
+ type = MIRType::Float32;
+ } else if (IsTypeRepresentableAsDouble(type) &&
+ IsTypeRepresentableAsDouble(in->type())) {
+ // Specialize phis with int32 and double operands as double.
+ type = MIRType::Double;
+ convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
+ } else {
+ return MIRType::Value;
+ }
+ }
+ }
+
+ if (hasOSRValueInput && type == MIRType::Float32) {
+ // TODO(post-Warp): simplify float32 handling in this function or (better)
+ // make the float32 analysis a stand-alone optimization pass instead of
+ // complicating type analysis. See bug 1655773.
+ type = MIRType::Double;
+ }
+
+ MOZ_ASSERT_IF(type == MIRType::None, hasSpecializableInput);
+ return type;
+}
+
+bool TypeAnalyzer::respecialize(MPhi* phi, MIRType type) {
+ if (phi->type() == type) {
+ return true;
+ }
+ phi->specialize(type);
+ return addPhiToWorklist(phi);
+}
+
+bool TypeAnalyzer::propagateSpecialization(MPhi* phi) {
+ MOZ_ASSERT(phi->type() != MIRType::None);
+
+ // Verify that this specialization matches any phis depending on it.
+ for (MUseDefIterator iter(phi); iter; iter++) {
+ if (!iter.def()->isPhi()) {
+ continue;
+ }
+ MPhi* use = iter.def()->toPhi();
+ if (!use->triedToSpecialize()) {
+ continue;
+ }
+ if (use->type() == MIRType::None) {
+ // We tried to specialize this phi, but were unable to guess its
+ // type. Now that we know the type of one of its operands, we can
+ // specialize it. If it can't be specialized as float32, specialize
+ // as double.
+ MIRType type = phi->type();
+ if (type == MIRType::Float32 && !use->canProduceFloat32()) {
+ type = MIRType::Double;
+ }
+ if (!respecialize(use, type)) {
+ return false;
+ }
+ continue;
+ }
+ if (use->type() != phi->type()) {
+ // Specialize phis with int32 that can be converted to float and float
+ // operands as floats.
+ if ((use->type() == MIRType::Int32 && use->canProduceFloat32() &&
+ phi->type() == MIRType::Float32) ||
+ (phi->type() == MIRType::Int32 && phi->canProduceFloat32() &&
+ use->type() == MIRType::Float32)) {
+ if (!respecialize(use, MIRType::Float32)) {
+ return false;
+ }
+ continue;
+ }
+
+ // Specialize phis with int32 and double operands as double.
+ if (IsTypeRepresentableAsDouble(use->type()) &&
+ IsTypeRepresentableAsDouble(phi->type())) {
+ if (!respecialize(use, MIRType::Double)) {
+ return false;
+ }
+ continue;
+ }
+
+ // This phi in our use chain can now no longer be specialized.
+ if (!respecialize(use, MIRType::Value)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool TypeAnalyzer::propagateAllPhiSpecializations() {
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel("Specialize Phis (worklist)")) {
+ return false;
+ }
+
+ MPhi* phi = popPhi();
+ if (!propagateSpecialization(phi)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// If branch pruning removes the path from the entry block to the OSR
+// preheader, we may have phis (or chains of phis) with no operands
+// other than OsrValues. These phis will still have MIRType::None.
+// Since we don't have any information about them, we specialize them
+// as MIRType::Value.
+bool TypeAnalyzer::specializeOsrOnlyPhis() {
+ MOZ_ASSERT(graph.osrBlock());
+ MOZ_ASSERT(graph.osrPreHeaderBlock()->numPredecessors() == 1);
+
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Specialize osr-only phis (main loop)")) {
+ return false;
+ }
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ if (mir->shouldCancel("Specialize osr-only phis (inner loop)")) {
+ return false;
+ }
+
+ if (phi->type() == MIRType::None) {
+ phi->specialize(MIRType::Value);
+ }
+ }
+ }
+ return true;
+}
+
+bool TypeAnalyzer::specializePhis() {
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Specialize Phis (main loop)")) {
+ return false;
+ }
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ if (mir->shouldCancel("Specialize Phis (inner loop)")) {
+ return false;
+ }
+
+ MIRType type = guessPhiType(*phi);
+ phi->specialize(type);
+ if (type == MIRType::None) {
+ // We tried to guess the type but failed because all operands are
+ // phis we still have to visit. Set the triedToSpecialize flag but
+ // don't propagate the type to other phis, propagateSpecialization
+ // will do that once we know the type of one of the operands.
+ continue;
+ }
+ if (!propagateSpecialization(*phi)) {
+ return false;
+ }
+ }
+ }
+
+ if (!propagateAllPhiSpecializations()) {
+ return false;
+ }
+
+ if (shouldSpecializeOsrPhis()) {
+ // See shouldSpecializeOsrPhis comment. This is the second step, propagating
+ // loop header phi types to preheader phis.
+ MBasicBlock* preHeader = graph.osrPreHeaderBlock();
+ MBasicBlock* header = preHeader->getSingleSuccessor();
+
+ if (preHeader->numPredecessors() == 1) {
+ MOZ_ASSERT(preHeader->getPredecessor(0) == graph.osrBlock());
+ // Branch pruning has removed the path from the entry block
+ // to the preheader. Specialize any phis with no non-osr inputs.
+ if (!specializeOsrOnlyPhis()) {
+ return false;
+ }
+ } else if (header->isLoopHeader()) {
+ for (MPhiIterator phi(header->phisBegin()); phi != header->phisEnd();
+ phi++) {
+ MPhi* preHeaderPhi = phi->getOperand(0)->toPhi();
+ MOZ_ASSERT(preHeaderPhi->block() == preHeader);
+
+ if (preHeaderPhi->type() == MIRType::Value) {
+ // Already includes everything.
+ continue;
+ }
+
+ MIRType loopType = phi->type();
+ if (!respecialize(preHeaderPhi, loopType)) {
+ return false;
+ }
+ }
+ if (!propagateAllPhiSpecializations()) {
+ return false;
+ }
+ } else {
+ // Edge case: there is no backedge in this loop. This can happen
+ // if the header is a 'pending' loop header when control flow in
+ // the loop body is terminated unconditionally, or if a block
+ // that dominates the backedge unconditionally bails out. In
+ // this case the header only has the preheader as predecessor
+ // and we don't need to do anything.
+ MOZ_ASSERT(header->numPredecessors() == 1);
+ }
+ }
+
+ MOZ_ASSERT(phiWorklist_.empty());
+ return true;
+}
+
+bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
+ MIRType phiType = phi->type();
+ MOZ_ASSERT(phiType != MIRType::None);
+
+ // If we specialized a type that's not Value, there are 3 cases:
+ // 1. Every input is of that type.
+ // 2. Every observed input is of that type (i.e., some inputs haven't been
+ // executed yet).
+ // 3. Inputs were doubles and int32s, and was specialized to double.
+ if (phiType != MIRType::Value) {
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == phiType) {
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ if (in->isBox() && in->toBox()->input()->type() == phiType) {
+ phi->replaceOperand(i, in->toBox()->input());
+ } else {
+ MInstruction* replacement;
+
+ if (phiType == MIRType::Double && IsFloatType(in->type())) {
+ // Convert int32 operands to double.
+ replacement = MToDouble::New(alloc(), in);
+ } else if (phiType == MIRType::Float32) {
+ if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
+ replacement = MToFloat32::New(alloc(), in);
+ } else {
+ // See comment below
+ if (in->type() != MIRType::Value) {
+ MBox* box = MBox::New(alloc(), in);
+ in->block()->insertBefore(in->block()->lastIns(), box);
+ in = box;
+ }
+
+ MUnbox* unbox =
+ MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
+ unbox->setBailoutKind(BailoutKind::SpeculativePhi);
+ in->block()->insertBefore(in->block()->lastIns(), unbox);
+ replacement = MToFloat32::New(alloc(), in);
+ }
+ } else {
+ // If we know this branch will fail to convert to phiType,
+ // insert a box that'll immediately fail in the fallible unbox
+ // below.
+ if (in->type() != MIRType::Value) {
+ MBox* box = MBox::New(alloc(), in);
+ in->block()->insertBefore(in->block()->lastIns(), box);
+ in = box;
+ }
+
+ // Be optimistic and insert unboxes when the operand is a
+ // value.
+ replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
+ }
+
+ replacement->setBailoutKind(BailoutKind::SpeculativePhi);
+ in->block()->insertBefore(in->block()->lastIns(), replacement);
+ phi->replaceOperand(i, replacement);
+ }
+ }
+
+ return true;
+ }
+
+ // Box every typed input.
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ MDefinition* in = phi->getOperand(i);
+ if (in->type() == MIRType::Value) {
+ continue;
+ }
+
+ // The input is being explicitly unboxed, so sneak past and grab the
+ // original box. Don't bother optimizing if magic values are involved.
+ if (in->isUnbox()) {
+ MDefinition* unboxInput = in->toUnbox()->input();
+ if (!IsMagicType(unboxInput->type()) && phi->typeIncludes(unboxInput)) {
+ in = in->toUnbox()->input();
+ }
+ }
+
+ if (in->type() != MIRType::Value) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ MBasicBlock* pred = phi->block()->getPredecessor(i);
+ in = AlwaysBoxAt(alloc(), pred->lastIns(), in);
+ }
+
+ phi->replaceOperand(i, in);
+ }
+
+ return true;
+}
+
+bool TypeAnalyzer::adjustInputs(MDefinition* def) {
+ // Definitions such as MPhi have no type policy.
+ if (!def->isInstruction()) {
+ return true;
+ }
+
+ MInstruction* ins = def->toInstruction();
+ const TypePolicy* policy = ins->typePolicy();
+ if (policy && !policy->adjustInputs(alloc(), ins)) {
+ return false;
+ }
+ return true;
+}
+
+void TypeAnalyzer::replaceRedundantPhi(MPhi* phi) {
+ MBasicBlock* block = phi->block();
+ js::Value v;
+ switch (phi->type()) {
+ case MIRType::Undefined:
+ v = UndefinedValue();
+ break;
+ case MIRType::Null:
+ v = NullValue();
+ break;
+ case MIRType::MagicOptimizedOut:
+ v = MagicValue(JS_OPTIMIZED_OUT);
+ break;
+ case MIRType::MagicUninitializedLexical:
+ v = MagicValue(JS_UNINITIALIZED_LEXICAL);
+ break;
+ case MIRType::MagicIsConstructing:
+ v = MagicValue(JS_IS_CONSTRUCTING);
+ break;
+ case MIRType::MagicHole:
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+ MConstant* c = MConstant::New(alloc(), v);
+ // The instruction pass will insert the box
+ block->insertBefore(*(block->begin()), c);
+ phi->justReplaceAllUsesWith(c);
+
+ if (shouldSpecializeOsrPhis()) {
+ // See shouldSpecializeOsrPhis comment. This is part of the third step,
+ // guard the incoming MOsrValue is of this type.
+ for (uint32_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* def = phi->getOperand(i);
+ if (def->type() != phi->type()) {
+ MOZ_ASSERT(def->isOsrValue() || def->isPhi());
+ MOZ_ASSERT(def->type() == MIRType::Value);
+ MGuardValue* guard = MGuardValue::New(alloc(), def, v);
+ guard->setBailoutKind(BailoutKind::SpeculativePhi);
+ def->block()->insertBefore(def->block()->lastIns(), guard);
+ }
+ }
+ }
+}
+
+bool TypeAnalyzer::insertConversions() {
+ // Instructions are processed in reverse postorder: all uses are defs are
+ // seen before uses. This ensures that output adjustment (which may rewrite
+ // inputs of uses) does not conflict with input adjustment.
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Insert Conversions")) {
+ return false;
+ }
+
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end;) {
+ MPhi* phi = *iter++;
+ if (IsNullOrUndefined(phi->type()) || IsMagicType(phi->type())) {
+ // We can replace this phi with a constant.
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ replaceRedundantPhi(phi);
+ block->discardPhi(phi);
+ } else {
+ if (!adjustPhiInputs(phi)) {
+ return false;
+ }
+ }
+ }
+
+ // AdjustInputs can add/remove/mutate instructions before and after the
+ // current instruction. Only increment the iterator after it is finished.
+ for (MInstructionIterator iter(block->begin()); iter != block->end();
+ iter++) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ if (!adjustInputs(*iter)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/* clang-format off */
+//
+// This function tries to emit Float32 specialized operations whenever it's possible.
+// MIR nodes are flagged as:
+// - Producers, when they can create Float32 that might need to be coerced into a Double.
+// Loads in Float32 arrays and conversions to Float32 are producers.
+// - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
+// Stores in Float32 arrays and conversions to Float32 are consumers.
+// - Float32 commutative, when using the Float32 instruction instead of the Double instruction
+// does not result in a compound loss of precision. This is the case for +, -, /, * with 2
+// operands, for instance. However, an addition with 3 operands is not commutative anymore,
+// so an intermediate coercion is needed.
+// Except for phis, all these flags are known after Ion building, so they cannot change during
+// the process.
+//
+// The idea behind the algorithm is easy: whenever we can prove that a commutative operation
+// has only producers as inputs and consumers as uses, we can specialize the operation as a
+// float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
+// if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
+//
+// Phis have a special status. Phis need to be flagged as producers or consumers as they can
+// be inputs or outputs of commutative instructions. Fortunately, producers and consumers
+// properties are such that we can deduce the property using all non phis inputs first (which form
+// an initial phi graph) and then propagate all properties from one phi to another using a
+// fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
+// many flagged phis as the previous iteration (so the worst steady state case is all phis being
+// flagged as false).
+//
+// In a nutshell, the algorithm applies three passes:
+// 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
+// all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
+// the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
+// consumer if all of its uses are consumers.
+// 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
+// the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
+// 3 - Go through all commutative operations and ensure their inputs are all producers and their
+// uses are all consumers.
+//
+/* clang-format on */
+bool TypeAnalyzer::markPhiConsumers() {
+ MOZ_ASSERT(phiWorklist_.empty());
+
+ // Iterate in postorder so worklist is initialized to RPO.
+ for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
+ ++block) {
+ if (mir->shouldCancel(
+ "Ensure Float32 commutativity - Consumer Phis - Initial state")) {
+ return false;
+ }
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
+ MOZ_ASSERT(!phi->isInWorklist());
+ bool canConsumeFloat32 = !phi->isImplicitlyUsed();
+ for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
+ MDefinition* usedef = use.def();
+ canConsumeFloat32 &=
+ usedef->isPhi() || usedef->canConsumeFloat32(use.use());
+ }
+ phi->setCanConsumeFloat32(canConsumeFloat32);
+ if (canConsumeFloat32 && !addPhiToWorklist(*phi)) {
+ return false;
+ }
+ }
+ }
+
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel(
+ "Ensure Float32 commutativity - Consumer Phis - Fixed point")) {
+ return false;
+ }
+
+ MPhi* phi = popPhi();
+ MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
+
+ bool validConsumer = true;
+ for (MUseDefIterator use(phi); use; use++) {
+ MDefinition* def = use.def();
+ if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
+ validConsumer = false;
+ break;
+ }
+ }
+
+ if (validConsumer) {
+ continue;
+ }
+
+ // Propagate invalidated phis
+ phi->setCanConsumeFloat32(false);
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ MDefinition* input = phi->getOperand(i);
+ if (input->isPhi() && !input->isInWorklist() &&
+ input->canConsumeFloat32(nullptr /* unused */)) {
+ if (!addPhiToWorklist(input->toPhi())) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool TypeAnalyzer::markPhiProducers() {
+ MOZ_ASSERT(phiWorklist_.empty());
+
+ // Iterate in reverse postorder so worklist is initialized to PO.
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel(
+ "Ensure Float32 commutativity - Producer Phis - initial state")) {
+ return false;
+ }
+
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
+ MOZ_ASSERT(!phi->isInWorklist());
+ bool canProduceFloat32 = true;
+ for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e;
+ ++i) {
+ MDefinition* input = phi->getOperand(i);
+ canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
+ }
+ phi->setCanProduceFloat32(canProduceFloat32);
+ if (canProduceFloat32 && !addPhiToWorklist(*phi)) {
+ return false;
+ }
+ }
+ }
+
+ while (!phiWorklist_.empty()) {
+ if (mir->shouldCancel(
+ "Ensure Float32 commutativity - Producer Phis - Fixed point")) {
+ return false;
+ }
+
+ MPhi* phi = popPhi();
+ MOZ_ASSERT(phi->canProduceFloat32());
+
+ bool validProducer = true;
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ MDefinition* input = phi->getOperand(i);
+ if (input->isPhi() && !input->canProduceFloat32()) {
+ validProducer = false;
+ break;
+ }
+ }
+
+ if (validProducer) {
+ continue;
+ }
+
+ // Propagate invalidated phis
+ phi->setCanProduceFloat32(false);
+ for (MUseDefIterator use(phi); use; use++) {
+ MDefinition* def = use.def();
+ if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32()) {
+ if (!addPhiToWorklist(def->toPhi())) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool TypeAnalyzer::specializeValidFloatOps() {
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel("Ensure Float32 commutativity - Instructions")) {
+ return false;
+ }
+
+ for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
+ if (!ins->isFloat32Commutative()) {
+ continue;
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ // This call will try to specialize the instruction iff all uses are
+ // consumers and all inputs are producers.
+ ins->trySpecializeFloat32(alloc());
+ }
+ }
+ return true;
+}
+
+bool TypeAnalyzer::graphContainsFloat32() {
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); ++block) {
+ for (MDefinitionIterator def(*block); def; def++) {
+ if (mir->shouldCancel(
+ "Ensure Float32 commutativity - Graph contains Float32")) {
+ return false;
+ }
+
+ if (def->type() == MIRType::Float32) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool TypeAnalyzer::tryEmitFloatOperations() {
+ // Asm.js uses the ahead of time type checks to specialize operations, no need
+ // to check them again at this point.
+ if (mir->compilingWasm()) {
+ return true;
+ }
+
+ // Check ahead of time that there is at least one definition typed as Float32,
+ // otherwise we don't need this pass.
+ if (!graphContainsFloat32()) {
+ return true;
+ }
+
+ // WarpBuilder skips over code that can't be reached except through
+ // a catch block. Locals and arguments may be observable in such
+ // code after bailing out, so we can't rely on seeing all uses.
+ if (graph.hasTryBlock()) {
+ return true;
+ }
+
+ if (!markPhiConsumers()) {
+ return false;
+ }
+ if (!markPhiProducers()) {
+ return false;
+ }
+ if (!specializeValidFloatOps()) {
+ return false;
+ }
+ return true;
+}
+
+bool TypeAnalyzer::checkFloatCoherency() {
+#ifdef DEBUG
+ // Asserts that all Float32 instructions are flowing into Float32 consumers or
+ // specialized operations
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); ++block) {
+ if (mir->shouldCancel("Check Float32 coherency")) {
+ return false;
+ }
+
+ for (MDefinitionIterator def(*block); def; def++) {
+ if (def->type() != MIRType::Float32) {
+ continue;
+ }
+
+ for (MUseDefIterator use(*def); use; use++) {
+ MDefinition* consumer = use.def();
+ MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
+ }
+ }
+ }
+#endif
+ return true;
+}
+
+bool TypeAnalyzer::analyze() {
+ if (!tryEmitFloatOperations()) {
+ return false;
+ }
+ if (!specializePhis()) {
+ return false;
+ }
+ if (!insertConversions()) {
+ return false;
+ }
+ if (!checkFloatCoherency()) {
+ return false;
+ }
+ return true;
+}
+
+bool jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph) {
+ TypeAnalyzer analyzer(mir, graph);
+
+ if (!analyzer.analyze()) {
+ return false;
+ }
+
+ return true;
+}
+
+void jit::RenumberBlocks(MIRGraph& graph) {
+ size_t id = 0;
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ block->setId(id++);
+ }
+}
+
+// A utility for code which adds/deletes blocks. Renumber the remaining blocks,
+// recompute dominators, and optionally recompute AliasAnalysis dependencies.
+bool jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph,
+ bool updateAliasAnalysis,
+ bool underValueNumberer) {
+ // Renumber the blocks and clear out the old dominator info.
+ size_t id = 0;
+ for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e;
+ ++i) {
+ i->clearDominatorInfo();
+ i->setId(id++);
+ }
+
+ // Recompute dominator info.
+ if (!BuildDominatorTree(graph)) {
+ return false;
+ }
+
+ // If needed, update alias analysis dependencies.
+ if (updateAliasAnalysis) {
+ if (!AliasAnalysis(mir, graph).analyze()) {
+ return false;
+ }
+ }
+
+ AssertExtendedGraphCoherency(graph, underValueNumberer);
+ return true;
+}
+
+// Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
+// Alias analysis dependencies may be invalid after calling this function.
+bool jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph,
+ uint32_t numMarkedBlocks) {
+ if (numMarkedBlocks == graph.numBlocks()) {
+ // If all blocks are marked, no blocks need removal. Just clear the
+ // marks. We'll still need to update the dominator tree below though,
+ // since we may have removed edges even if we didn't remove any blocks.
+ graph.unmarkBlocks();
+ } else {
+ // As we are going to remove edges and basic blocks, we have to mark
+ // instructions which would be needed by baseline if we were to
+ // bailout.
+ for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
+ MBasicBlock* block = *it++;
+ if (block->isMarked()) {
+ continue;
+ }
+
+ FlagAllOperandsAsImplicitlyUsed(mir, block);
+ }
+
+ // Find unmarked blocks and remove them.
+ for (ReversePostorderIterator iter(graph.rpoBegin());
+ iter != graph.rpoEnd();) {
+ MBasicBlock* block = *iter++;
+
+ if (block->isMarked()) {
+ block->unmark();
+ continue;
+ }
+
+ // The block is unreachable. Clear out the loop header flag, as
+ // we're doing the sweep of a mark-and-sweep here, so we no longer
+ // need to worry about whether an unmarked block is a loop or not.
+ if (block->isLoopHeader()) {
+ block->clearLoopHeader();
+ }
+
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+ block->getSuccessor(i)->removePredecessor(block);
+ }
+ graph.removeBlock(block);
+ }
+ }
+
+ // Renumber the blocks and update the dominator tree.
+ return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
+}
+
+// A Simple, Fast Dominance Algorithm by Cooper et al.
+// Modified to support empty intersections for OSR, and in RPO.
+static MBasicBlock* IntersectDominators(MBasicBlock* block1,
+ MBasicBlock* block2) {
+ MBasicBlock* finger1 = block1;
+ MBasicBlock* finger2 = block2;
+
+ MOZ_ASSERT(finger1);
+ MOZ_ASSERT(finger2);
+
+ // In the original paper, the block ID comparisons are on the postorder index.
+ // This implementation iterates in RPO, so the comparisons are reversed.
+
+ // For this function to be called, the block must have multiple predecessors.
+ // If a finger is then found to be self-dominating, it must therefore be
+ // reachable from multiple roots through non-intersecting control flow.
+ // nullptr is returned in this case, to denote an empty intersection.
+
+ while (finger1->id() != finger2->id()) {
+ while (finger1->id() > finger2->id()) {
+ MBasicBlock* idom = finger1->immediateDominator();
+ if (idom == finger1) {
+ return nullptr; // Empty intersection.
+ }
+ finger1 = idom;
+ }
+
+ while (finger2->id() > finger1->id()) {
+ MBasicBlock* idom = finger2->immediateDominator();
+ if (idom == finger2) {
+ return nullptr; // Empty intersection.
+ }
+ finger2 = idom;
+ }
+ }
+ return finger1;
+}
+
+void jit::ClearDominatorTree(MIRGraph& graph) {
+ for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++) {
+ iter->clearDominatorInfo();
+ }
+}
+
+static void ComputeImmediateDominators(MIRGraph& graph) {
+ // The default start block is a root and therefore only self-dominates.
+ MBasicBlock* startBlock = graph.entryBlock();
+ startBlock->setImmediateDominator(startBlock);
+
+ // Any OSR block is a root and therefore only self-dominates.
+ MBasicBlock* osrBlock = graph.osrBlock();
+ if (osrBlock) {
+ osrBlock->setImmediateDominator(osrBlock);
+ }
+
+ bool changed = true;
+
+ while (changed) {
+ changed = false;
+
+ ReversePostorderIterator block = graph.rpoBegin();
+
+ // For each block in RPO, intersect all dominators.
+ for (; block != graph.rpoEnd(); block++) {
+ // If a node has once been found to have no exclusive dominator,
+ // it will never have an exclusive dominator, so it may be skipped.
+ if (block->immediateDominator() == *block) {
+ continue;
+ }
+
+ // A block with no predecessors is not reachable from any entry, so
+ // it self-dominates.
+ if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
+ block->setImmediateDominator(*block);
+ continue;
+ }
+
+ MBasicBlock* newIdom = block->getPredecessor(0);
+
+ // Find the first common dominator.
+ for (size_t i = 1; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (pred->immediateDominator() == nullptr) {
+ continue;
+ }
+
+ newIdom = IntersectDominators(pred, newIdom);
+
+ // If there is no common dominator, the block self-dominates.
+ if (newIdom == nullptr) {
+ block->setImmediateDominator(*block);
+ changed = true;
+ break;
+ }
+ }
+
+ if (newIdom && block->immediateDominator() != newIdom) {
+ block->setImmediateDominator(newIdom);
+ changed = true;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ // Assert that all blocks have dominator information.
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ MOZ_ASSERT(block->immediateDominator() != nullptr);
+ }
+#endif
+}
+
+bool jit::BuildDominatorTree(MIRGraph& graph) {
+ MOZ_ASSERT(graph.canBuildDominators());
+
+ ComputeImmediateDominators(graph);
+
+ Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
+
+ // Traversing through the graph in post-order means that every non-phi use
+ // of a definition is visited before the def itself. Since a def
+ // dominates its uses, by the time we reach a particular
+ // block, we have processed all of its dominated children, so
+ // block->numDominated() is accurate.
+ for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
+ MBasicBlock* child = *i;
+ MBasicBlock* parent = child->immediateDominator();
+
+ // Dominance is defined such that blocks always dominate themselves.
+ child->addNumDominated(1);
+
+ // If the block only self-dominates, it has no definite parent.
+ // Add it to the worklist as a root for pre-order traversal.
+ // This includes all roots. Order does not matter.
+ if (child == parent) {
+ if (!worklist.append(child)) {
+ return false;
+ }
+ continue;
+ }
+
+ if (!parent->addImmediatelyDominatedBlock(child)) {
+ return false;
+ }
+
+ parent->addNumDominated(child->numDominated());
+ }
+
+#ifdef DEBUG
+ // If compiling with OSR, many blocks will self-dominate.
+ // Without OSR, there is only one root block which dominates all.
+ if (!graph.osrBlock()) {
+ MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
+ }
+#endif
+ // Now, iterate through the dominator tree in pre-order and annotate every
+ // block with its index in the traversal.
+ size_t index = 0;
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+ block->setDomIndex(index);
+
+ if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
+ block->immediatelyDominatedBlocksEnd())) {
+ return false;
+ }
+ index++;
+ }
+
+ return true;
+}
+
+bool jit::BuildPhiReverseMapping(MIRGraph& graph) {
+ // Build a mapping such that given a basic block, whose successor has one or
+ // more phis, we can find our specific input to that phi. To make this fast
+ // mapping work we rely on a specific property of our structured control
+ // flow graph: For a block with phis, its predecessors each have only one
+ // successor with phis. Consider each case:
+ // * Blocks with less than two predecessors cannot have phis.
+ // * Breaks. A break always has exactly one successor, and the break
+ // catch block has exactly one predecessor for each break, as
+ // well as a final predecessor for the actual loop exit.
+ // * Continues. A continue always has exactly one successor, and the
+ // continue catch block has exactly one predecessor for each
+ // continue, as well as a final predecessor for the actual
+ // loop continuation. The continue itself has exactly one
+ // successor.
+ // * An if. Each branch as exactly one predecessor.
+ // * A switch. Each branch has exactly one predecessor.
+ // * Loop tail. A new block is always created for the exit, and if a
+ // break statement is present, the exit block will forward
+ // directly to the break block.
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ if (block->phisEmpty()) {
+ continue;
+ }
+
+ // Assert on the above.
+ for (size_t j = 0; j < block->numPredecessors(); j++) {
+ MBasicBlock* pred = block->getPredecessor(j);
+
+#ifdef DEBUG
+ size_t numSuccessorsWithPhis = 0;
+ for (size_t k = 0; k < pred->numSuccessors(); k++) {
+ MBasicBlock* successor = pred->getSuccessor(k);
+ if (!successor->phisEmpty()) {
+ numSuccessorsWithPhis++;
+ }
+ }
+ MOZ_ASSERT(numSuccessorsWithPhis <= 1);
+#endif
+
+ pred->setSuccessorWithPhis(*block, j);
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+static bool CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B) {
+ // Assuming B = succ(A), verify A = pred(B).
+ for (size_t i = 0; i < B->numPredecessors(); i++) {
+ if (A == B->getPredecessor(i)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B) {
+ // Assuming B = pred(A), verify A = succ(B).
+ for (size_t i = 0; i < B->numSuccessors(); i++) {
+ if (A == B->getSuccessor(i)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// If you have issues with the usesBalance assertions, then define the macro
+// _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
+// This output can then be processed with the following awk script to filter and
+// highlight which checks are missing or if there is an unexpected operand /
+// use.
+//
+// define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
+/*
+
+$ ./js 2>stderr.log
+$ gawk '
+ /^==Check/ { context = ""; state = $2; }
+ /^[a-z]/ { context = context "\n\t" $0; }
+ /^==End/ {
+ if (state == "Operand") {
+ list[context] = list[context] - 1;
+ } else if (state == "Use") {
+ list[context] = list[context] + 1;
+ }
+ }
+ END {
+ for (ctx in list) {
+ if (list[ctx] > 0) {
+ print "Missing operand check", ctx, "\n"
+ }
+ if (list[ctx] < 0) {
+ print "Missing use check", ctx, "\n"
+ }
+ };
+ }' < stderr.log
+
+*/
+
+static void CheckOperand(const MNode* consumer, const MUse* use,
+ int32_t* usesBalance) {
+ MOZ_ASSERT(use->hasProducer());
+ MDefinition* producer = use->producer();
+ MOZ_ASSERT(!producer->isDiscarded());
+ MOZ_ASSERT(producer->block() != nullptr);
+ MOZ_ASSERT(use->consumer() == consumer);
+# ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
+ Fprinter print(stderr);
+ print.printf("==Check Operand\n");
+ use->producer()->dump(print);
+ print.printf(" index: %zu\n", use->consumer()->indexOf(use));
+ use->consumer()->dump(print);
+ print.printf("==End\n");
+# endif
+ --*usesBalance;
+}
+
+static void CheckUse(const MDefinition* producer, const MUse* use,
+ int32_t* usesBalance) {
+ MOZ_ASSERT(!use->consumer()->block()->isDead());
+ MOZ_ASSERT_IF(use->consumer()->isDefinition(),
+ !use->consumer()->toDefinition()->isDiscarded());
+ MOZ_ASSERT(use->consumer()->block() != nullptr);
+ MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
+# ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
+ Fprinter print(stderr);
+ print.printf("==Check Use\n");
+ use->producer()->dump(print);
+ print.printf(" index: %zu\n", use->consumer()->indexOf(use));
+ use->consumer()->dump(print);
+ print.printf("==End\n");
+# endif
+ ++*usesBalance;
+}
+
+// To properly encode entry resume points, we have to ensure that all the
+// operands of the entry resume point are located before the safeInsertTop
+// location.
+static void AssertOperandsBeforeSafeInsertTop(MResumePoint* resume) {
+ MBasicBlock* block = resume->block();
+ if (block == block->graph().osrBlock()) {
+ return;
+ }
+ MInstruction* stop = block->safeInsertTop();
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ MDefinition* def = resume->getOperand(i);
+ if (def->block() != block) {
+ continue;
+ }
+ if (def->isPhi()) {
+ continue;
+ }
+
+ for (MInstructionIterator ins = block->begin(); true; ins++) {
+ if (*ins == def) {
+ break;
+ }
+ MOZ_ASSERT(
+ *ins != stop,
+ "Resume point operand located after the safeInsertTop location");
+ }
+ }
+}
+#endif // DEBUG
+
+void jit::AssertBasicGraphCoherency(MIRGraph& graph, bool force) {
+#ifdef DEBUG
+ if (!JitOptions.fullDebugChecks && !force) {
+ return;
+ }
+
+ MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
+ MOZ_ASSERT(graph.entryBlock()->phisEmpty());
+ MOZ_ASSERT(!graph.entryBlock()->unreachable());
+
+ if (MBasicBlock* osrBlock = graph.osrBlock()) {
+ MOZ_ASSERT(osrBlock->numPredecessors() == 0);
+ MOZ_ASSERT(osrBlock->phisEmpty());
+ MOZ_ASSERT(osrBlock != graph.entryBlock());
+ MOZ_ASSERT(!osrBlock->unreachable());
+ }
+
+ if (MResumePoint* resumePoint = graph.entryResumePoint()) {
+ MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
+ }
+
+ // Assert successor and predecessor list coherency.
+ uint32_t count = 0;
+ int32_t usesBalance = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ count++;
+
+ MOZ_ASSERT(&block->graph() == &graph);
+ MOZ_ASSERT(!block->isDead());
+ MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
+ block->entryResumePoint() != nullptr);
+
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ MOZ_ASSERT(
+ CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
+ }
+
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MOZ_ASSERT(
+ CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
+ }
+
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ MOZ_ASSERT(!resume->instruction());
+ MOZ_ASSERT(resume->block() == *block);
+ AssertOperandsBeforeSafeInsertTop(resume);
+ }
+ if (MResumePoint* resume = block->outerResumePoint()) {
+ MOZ_ASSERT(!resume->instruction());
+ MOZ_ASSERT(resume->block() == *block);
+ }
+ for (MResumePointIterator iter(block->resumePointsBegin());
+ iter != block->resumePointsEnd(); iter++) {
+ // We cannot yet assert that is there is no instruction then this is
+ // the entry resume point because we are still storing resume points
+ // in the InlinePropertyTable.
+ MOZ_ASSERT_IF(iter->instruction(),
+ iter->instruction()->block() == *block);
+ for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
+ CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
+ }
+ }
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
+ MOZ_ASSERT(!phi->isRecoveredOnBailout());
+ MOZ_ASSERT(phi->type() != MIRType::None);
+ MOZ_ASSERT(phi->dependency() == nullptr);
+ }
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ MOZ_ASSERT(iter->block() == *block);
+ MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
+ MOZ_ASSERT(!iter->isDiscarded());
+ MOZ_ASSERT_IF(iter->isStart(),
+ *block == graph.entryBlock() || *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isParameter(),
+ *block == graph.entryBlock() || *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
+ MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
+
+ // Assert that use chains are valid for this instruction.
+ for (uint32_t i = 0, end = iter->numOperands(); i < end; i++) {
+ CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
+ }
+ for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++) {
+ CheckUse(*iter, *use, &usesBalance);
+ }
+
+ if (iter->isInstruction()) {
+ if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
+ MOZ_ASSERT(resume->instruction() == *iter);
+ MOZ_ASSERT(resume->block() == *block);
+ MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
+ }
+ }
+
+ if (iter->isRecoveredOnBailout()) {
+ MOZ_ASSERT(!iter->hasLiveDefUses());
+ }
+ }
+
+ // The control instruction is not visited by the MDefinitionIterator.
+ MControlInstruction* control = block->lastIns();
+ MOZ_ASSERT(control->block() == *block);
+ MOZ_ASSERT(!control->hasUses());
+ MOZ_ASSERT(control->type() == MIRType::None);
+ MOZ_ASSERT(!control->isDiscarded());
+ MOZ_ASSERT(!control->isRecoveredOnBailout());
+ MOZ_ASSERT(control->resumePoint() == nullptr);
+ for (uint32_t i = 0, end = control->numOperands(); i < end; i++) {
+ CheckOperand(control, control->getUseFor(i), &usesBalance);
+ }
+ for (size_t i = 0; i < control->numSuccessors(); i++) {
+ MOZ_ASSERT(control->getSuccessor(i));
+ }
+ }
+
+ // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
+ MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
+ MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
+ MOZ_ASSERT(graph.numBlocks() == count);
+#endif
+}
+
+#ifdef DEBUG
+static void AssertReversePostorder(MIRGraph& graph) {
+ // Check that every block is visited after all its predecessors (except
+ // backedges).
+ for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();
+ ++iter) {
+ MBasicBlock* block = *iter;
+ MOZ_ASSERT(!block->isMarked());
+
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (!pred->isMarked()) {
+ MOZ_ASSERT(pred->isLoopBackedge());
+ MOZ_ASSERT(block->backedge() == pred);
+ }
+ }
+
+ block->mark();
+ }
+
+ graph.unmarkBlocks();
+}
+#endif
+
+#ifdef DEBUG
+static void AssertDominatorTree(MIRGraph& graph) {
+ // Check dominators.
+
+ MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
+ if (MBasicBlock* osrBlock = graph.osrBlock()) {
+ MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
+ } else {
+ MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
+ }
+
+ size_t i = graph.numBlocks();
+ size_t totalNumDominated = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ MOZ_ASSERT(block->dominates(*block));
+
+ MBasicBlock* idom = block->immediateDominator();
+ MOZ_ASSERT(idom->dominates(*block));
+ MOZ_ASSERT(idom == *block || idom->id() < block->id());
+
+ if (idom == *block) {
+ totalNumDominated += block->numDominated();
+ } else {
+ bool foundInParent = false;
+ for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
+ if (idom->getImmediatelyDominatedBlock(j) == *block) {
+ foundInParent = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(foundInParent);
+ }
+
+ size_t numDominated = 1;
+ for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
+ MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
+ MOZ_ASSERT(block->dominates(dom));
+ MOZ_ASSERT(dom->id() > block->id());
+ MOZ_ASSERT(dom->immediateDominator() == *block);
+
+ numDominated += dom->numDominated();
+ }
+ MOZ_ASSERT(block->numDominated() == numDominated);
+ MOZ_ASSERT(block->numDominated() <= i);
+ MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
+ i--;
+ }
+ MOZ_ASSERT(i == 0);
+ MOZ_ASSERT(totalNumDominated == graph.numBlocks());
+}
+#endif
+
+void jit::AssertGraphCoherency(MIRGraph& graph, bool force) {
+#ifdef DEBUG
+ if (!JitOptions.checkGraphConsistency) {
+ return;
+ }
+ if (!JitOptions.fullDebugChecks && !force) {
+ return;
+ }
+ AssertBasicGraphCoherency(graph, force);
+ AssertReversePostorder(graph);
+#endif
+}
+
+#ifdef DEBUG
+static bool IsResumableMIRType(MIRType type) {
+ // see CodeGeneratorShared::encodeAllocation
+ switch (type) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ case MIRType::Shape:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing:
+ case MIRType::Value:
+ case MIRType::Simd128:
+ return true;
+
+ case MIRType::MagicHole:
+ case MIRType::None:
+ case MIRType::Slots:
+ case MIRType::Elements:
+ case MIRType::Pointer:
+ case MIRType::Int64:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ case MIRType::IntPtr:
+ return false;
+ }
+ MOZ_CRASH("Unknown MIRType.");
+}
+
+static void AssertResumableOperands(MNode* node) {
+ for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
+ MDefinition* op = node->getOperand(i);
+ if (op->isRecoveredOnBailout()) {
+ continue;
+ }
+ MOZ_ASSERT(IsResumableMIRType(op->type()),
+ "Resume point cannot encode its operands");
+ }
+}
+
+static void AssertIfResumableInstruction(MDefinition* def) {
+ if (!def->isRecoveredOnBailout()) {
+ return;
+ }
+ AssertResumableOperands(def);
+}
+
+static void AssertResumePointDominatedByOperands(MResumePoint* resume) {
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ MDefinition* op = resume->getOperand(i);
+ MOZ_ASSERT(op->block()->dominates(resume->block()),
+ "Resume point is not dominated by its operands");
+ }
+}
+#endif // DEBUG
+
+void jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer,
+ bool force) {
+ // Checks the basic GraphCoherency but also other conditions that
+ // do not hold immediately (such as the fact that critical edges
+ // are split)
+
+#ifdef DEBUG
+ if (!JitOptions.checkGraphConsistency) {
+ return;
+ }
+ if (!JitOptions.fullDebugChecks && !force) {
+ return;
+ }
+
+ AssertGraphCoherency(graph, force);
+
+ AssertDominatorTree(graph);
+
+ DebugOnly<uint32_t> idx = 0;
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ MOZ_ASSERT(block->id() == idx);
+ ++idx;
+
+ // No critical edges:
+ if (block->numSuccessors() > 1) {
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
+ }
+ }
+
+ if (block->isLoopHeader()) {
+ if (underValueNumberer && block->numPredecessors() == 3) {
+ // Fixup block.
+ MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
+ MOZ_ASSERT(graph.osrBlock(),
+ "Fixup blocks should only exists if we have an osr block.");
+ } else {
+ MOZ_ASSERT(block->numPredecessors() == 2);
+ }
+ MBasicBlock* backedge = block->backedge();
+ MOZ_ASSERT(backedge->id() >= block->id());
+ MOZ_ASSERT(backedge->numSuccessors() == 1);
+ MOZ_ASSERT(backedge->getSuccessor(0) == *block);
+ }
+
+ if (!block->phisEmpty()) {
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ MOZ_ASSERT(pred->successorWithPhis() == *block);
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
+ }
+ }
+
+ uint32_t successorWithPhis = 0;
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ if (!block->getSuccessor(i)->phisEmpty()) {
+ successorWithPhis++;
+ }
+ }
+
+ MOZ_ASSERT(successorWithPhis <= 1);
+ MOZ_ASSERT((successorWithPhis != 0) ==
+ (block->successorWithPhis() != nullptr));
+
+ // Verify that phi operands dominate the corresponding CFG predecessor
+ // edges.
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end; ++iter) {
+ MPhi* phi = *iter;
+ for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
+ MOZ_ASSERT(
+ phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
+ "Phi input is not dominated by its operand");
+ }
+ }
+
+ // Verify that instructions are dominated by their operands.
+ for (MInstructionIterator iter(block->begin()), end(block->end());
+ iter != end; ++iter) {
+ MInstruction* ins = *iter;
+ for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+ MBasicBlock* opBlock = op->block();
+ MOZ_ASSERT(opBlock->dominates(*block),
+ "Instruction is not dominated by its operands");
+
+ // If the operand is an instruction in the same block, check
+ // that it comes first.
+ if (opBlock == *block && !op->isPhi()) {
+ MInstructionIterator opIter = block->begin(op->toInstruction());
+ do {
+ ++opIter;
+ MOZ_ASSERT(opIter != block->end(),
+ "Operand in same block as instruction does not precede");
+ } while (*opIter != ins);
+ }
+ }
+ AssertIfResumableInstruction(ins);
+ if (MResumePoint* resume = ins->resumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ }
+
+ // Verify that the block resume points are dominated by their operands.
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ if (MResumePoint* resume = block->outerResumePoint()) {
+ AssertResumePointDominatedByOperands(resume);
+ AssertResumableOperands(resume);
+ }
+ }
+#endif
+}
+
+struct BoundsCheckInfo {
+ MBoundsCheck* check;
+ uint32_t validEnd;
+};
+
+typedef HashMap<uint32_t, BoundsCheckInfo, DefaultHasher<uint32_t>,
+ JitAllocPolicy>
+ BoundsCheckMap;
+
+// Compute a hash for bounds checks which ignores constant offsets in the index.
+static HashNumber BoundsCheckHashIgnoreOffset(MBoundsCheck* check) {
+ SimpleLinearSum indexSum = ExtractLinearSum(check->index());
+ uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
+ uintptr_t length = uintptr_t(check->length());
+ return index ^ length;
+}
+
+static MBoundsCheck* FindDominatingBoundsCheck(BoundsCheckMap& checks,
+ MBoundsCheck* check,
+ size_t index) {
+ // Since we are traversing the dominator tree in pre-order, when we
+ // are looking at the |index|-th block, the next numDominated() blocks
+ // we traverse are precisely the set of blocks that are dominated.
+ //
+ // So, this value is visible in all blocks if:
+ // index <= index + ins->block->numDominated()
+ // and becomes invalid after that.
+ HashNumber hash = BoundsCheckHashIgnoreOffset(check);
+ BoundsCheckMap::Ptr p = checks.lookup(hash);
+ if (!p || index >= p->value().validEnd) {
+ // We didn't find a dominating bounds check.
+ BoundsCheckInfo info;
+ info.check = check;
+ info.validEnd = index + check->block()->numDominated();
+
+ if (!checks.put(hash, info)) return nullptr;
+
+ return check;
+ }
+
+ return p->value().check;
+}
+
+static MathSpace ExtractMathSpace(MDefinition* ins) {
+ MOZ_ASSERT(ins->isAdd() || ins->isSub());
+ MBinaryArithInstruction* arith = nullptr;
+ if (ins->isAdd()) {
+ arith = ins->toAdd();
+ } else {
+ arith = ins->toSub();
+ }
+ switch (arith->truncateKind()) {
+ case TruncateKind::NoTruncate:
+ case TruncateKind::TruncateAfterBailouts:
+ // TruncateAfterBailouts is considered as infinite space because the
+ // LinearSum will effectively remove the bailout check.
+ return MathSpace::Infinite;
+ case TruncateKind::IndirectTruncate:
+ case TruncateKind::Truncate:
+ return MathSpace::Modulo;
+ }
+ MOZ_CRASH("Unknown TruncateKind");
+}
+
+static bool MonotoneAdd(int32_t lhs, int32_t rhs) {
+ return (lhs >= 0 && rhs >= 0) || (lhs <= 0 && rhs <= 0);
+}
+
+static bool MonotoneSub(int32_t lhs, int32_t rhs) {
+ return (lhs >= 0 && rhs <= 0) || (lhs <= 0 && rhs >= 0);
+}
+
+// Extract a linear sum from ins, if possible (otherwise giving the
+// sum 'ins + 0').
+SimpleLinearSum jit::ExtractLinearSum(MDefinition* ins, MathSpace space,
+ int32_t recursionDepth) {
+ const int32_t SAFE_RECURSION_LIMIT = 100;
+ if (recursionDepth > SAFE_RECURSION_LIMIT) {
+ return SimpleLinearSum(ins, 0);
+ }
+
+ // Unwrap Int32ToIntPtr. This instruction only changes the representation
+ // (int32_t to intptr_t) without affecting the value.
+ if (ins->isInt32ToIntPtr()) {
+ ins = ins->toInt32ToIntPtr()->input();
+ }
+
+ if (ins->isBeta()) {
+ ins = ins->getOperand(0);
+ }
+
+ MOZ_ASSERT(!ins->isInt32ToIntPtr());
+
+ if (ins->type() != MIRType::Int32) {
+ return SimpleLinearSum(ins, 0);
+ }
+
+ if (ins->isConstant()) {
+ return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
+ }
+
+ if (!ins->isAdd() && !ins->isSub()) {
+ return SimpleLinearSum(ins, 0);
+ }
+
+ // Only allow math which are in the same space.
+ MathSpace insSpace = ExtractMathSpace(ins);
+ if (space == MathSpace::Unknown) {
+ space = insSpace;
+ } else if (space != insSpace) {
+ return SimpleLinearSum(ins, 0);
+ }
+ MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
+
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+ if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32) {
+ return SimpleLinearSum(ins, 0);
+ }
+
+ // Extract linear sums of each operand.
+ SimpleLinearSum lsum = ExtractLinearSum(lhs, space, recursionDepth + 1);
+ SimpleLinearSum rsum = ExtractLinearSum(rhs, space, recursionDepth + 1);
+
+ // LinearSum only considers a single term operand, if both sides have
+ // terms, then ignore extracted linear sums.
+ if (lsum.term && rsum.term) {
+ return SimpleLinearSum(ins, 0);
+ }
+
+ // Check if this is of the form <SUM> + n or n + <SUM>.
+ if (ins->isAdd()) {
+ int32_t constant;
+ if (space == MathSpace::Modulo) {
+ constant = uint32_t(lsum.constant) + uint32_t(rsum.constant);
+ } else if (!SafeAdd(lsum.constant, rsum.constant, &constant) ||
+ !MonotoneAdd(lsum.constant, rsum.constant)) {
+ return SimpleLinearSum(ins, 0);
+ }
+ return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
+ }
+
+ MOZ_ASSERT(ins->isSub());
+ // Check if this is of the form <SUM> - n.
+ if (lsum.term) {
+ int32_t constant;
+ if (space == MathSpace::Modulo) {
+ constant = uint32_t(lsum.constant) - uint32_t(rsum.constant);
+ } else if (!SafeSub(lsum.constant, rsum.constant, &constant) ||
+ !MonotoneSub(lsum.constant, rsum.constant)) {
+ return SimpleLinearSum(ins, 0);
+ }
+ return SimpleLinearSum(lsum.term, constant);
+ }
+
+ // Ignore any of the form n - <SUM>.
+ return SimpleLinearSum(ins, 0);
+}
+
+// Extract a linear inequality holding when a boolean test goes in the
+// specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
+bool jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
+ SimpleLinearSum* plhs, MDefinition** prhs,
+ bool* plessEqual) {
+ if (!test->getOperand(0)->isCompare()) {
+ return false;
+ }
+
+ MCompare* compare = test->getOperand(0)->toCompare();
+
+ MDefinition* lhs = compare->getOperand(0);
+ MDefinition* rhs = compare->getOperand(1);
+
+ // TODO: optimize Compare_UInt32
+ if (!compare->isInt32Comparison()) {
+ return false;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ JSOp jsop = compare->jsop();
+ if (direction == FALSE_BRANCH) {
+ jsop = NegateCompareOp(jsop);
+ }
+
+ SimpleLinearSum lsum = ExtractLinearSum(lhs);
+ SimpleLinearSum rsum = ExtractLinearSum(rhs);
+
+ if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant)) {
+ return false;
+ }
+
+ // Normalize operations to use <= or >=.
+ switch (jsop) {
+ case JSOp::Le:
+ *plessEqual = true;
+ break;
+ case JSOp::Lt:
+ /* x < y ==> x + 1 <= y */
+ if (!SafeAdd(lsum.constant, 1, &lsum.constant)) {
+ return false;
+ }
+ *plessEqual = true;
+ break;
+ case JSOp::Ge:
+ *plessEqual = false;
+ break;
+ case JSOp::Gt:
+ /* x > y ==> x - 1 >= y */
+ if (!SafeSub(lsum.constant, 1, &lsum.constant)) {
+ return false;
+ }
+ *plessEqual = false;
+ break;
+ default:
+ return false;
+ }
+
+ *plhs = lsum;
+ *prhs = rsum.term;
+
+ return true;
+}
+
+static bool TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex,
+ MBoundsCheck* dominated, bool* eliminated) {
+ MOZ_ASSERT(!*eliminated);
+
+ // Replace all uses of the bounds check with the actual index.
+ // This is (a) necessary, because we can coalesce two different
+ // bounds checks and would otherwise use the wrong index and
+ // (b) helps register allocation. Note that this is safe since
+ // no other pass after bounds check elimination moves instructions.
+ dominated->replaceAllUsesWith(dominated->index());
+
+ if (!dominated->isMovable()) {
+ return true;
+ }
+
+ if (!dominated->fallible()) {
+ return true;
+ }
+
+ MBoundsCheck* dominating =
+ FindDominatingBoundsCheck(checks, dominated, blockIndex);
+ if (!dominating) {
+ return false;
+ }
+
+ if (dominating == dominated) {
+ // We didn't find a dominating bounds check.
+ return true;
+ }
+
+ // We found two bounds checks with the same hash number, but we still have
+ // to make sure the lengths and index terms are equal.
+ if (dominating->length() != dominated->length()) {
+ return true;
+ }
+
+ SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
+ SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
+
+ // Both terms should be nullptr or the same definition.
+ if (sumA.term != sumB.term) {
+ return true;
+ }
+
+ // This bounds check is redundant.
+ *eliminated = true;
+
+ // Normalize the ranges according to the constant offsets in the two indexes.
+ int32_t minimumA, maximumA, minimumB, maximumB;
+ if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
+ !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
+ !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
+ !SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) {
+ return false;
+ }
+
+ // Update the dominating check to cover both ranges, denormalizing the
+ // result per the constant offset in the index.
+ int32_t newMinimum, newMaximum;
+ if (!SafeSub(std::min(minimumA, minimumB), sumA.constant, &newMinimum) ||
+ !SafeSub(std::max(maximumA, maximumB), sumA.constant, &newMaximum)) {
+ return false;
+ }
+
+ dominating->setMinimum(newMinimum);
+ dominating->setMaximum(newMaximum);
+ dominating->setBailoutKind(BailoutKind::HoistBoundsCheck);
+
+ return true;
+}
+
+// Eliminate checks which are redundant given each other or other instructions.
+//
+// A bounds check is considered redundant if it's dominated by another bounds
+// check with the same length and the indexes differ by only a constant amount.
+// In this case we eliminate the redundant bounds check and update the other one
+// to cover the ranges of both checks.
+//
+// Bounds checks are added to a hash map and since the hash function ignores
+// differences in constant offset, this offers a fast way to find redundant
+// checks.
+bool jit::EliminateRedundantChecks(MIRGraph& graph) {
+ BoundsCheckMap checks(graph.alloc());
+
+ // Stack for pre-order CFG traversal.
+ Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
+
+ // The index of the current block in the CFG traversal.
+ size_t index = 0;
+
+ // Add all self-dominating blocks to the worklist.
+ // This includes all roots. Order does not matter.
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* block = *i;
+ if (block->immediateDominator() == block) {
+ if (!worklist.append(block)) {
+ return false;
+ }
+ }
+ }
+
+ // Starting from each self-dominating block, traverse the CFG in pre-order.
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+
+ // Add all immediate dominators to the front of the worklist.
+ if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
+ block->immediatelyDominatedBlocksEnd())) {
+ return false;
+ }
+
+ for (MDefinitionIterator iter(block); iter;) {
+ MDefinition* def = *iter++;
+
+ if (!def->isBoundsCheck()) {
+ continue;
+ }
+ auto* boundsCheck = def->toBoundsCheck();
+
+ bool eliminated = false;
+ if (!TryEliminateBoundsCheck(checks, index, boundsCheck, &eliminated)) {
+ return false;
+ }
+
+ if (eliminated) {
+ block->discard(boundsCheck);
+ }
+ }
+ index++;
+ }
+
+ MOZ_ASSERT(index == graph.numBlocks());
+
+ return true;
+}
+
+static bool ShapeGuardIsRedundant(MGuardShape* guard,
+ const MDefinition* storeObject,
+ const Shape* storeShape) {
+ const MDefinition* guardObject = guard->object()->skipObjectGuards();
+ if (guardObject != storeObject) {
+ JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different objects (%d vs %d)",
+ guardObject->id(), storeObject->id());
+ return false;
+ }
+
+ const Shape* guardShape = guard->shape();
+ if (guardShape != storeShape) {
+ JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different shapes");
+ return false;
+ }
+
+ return true;
+}
+
+// Eliminate shape guards which are redundant given other instructions.
+//
+// A shape guard is redundant if we can prove that the object being
+// guarded already has the correct shape. The conditions for doing so
+// are as follows:
+//
+// 1. We can see the most recent change to the shape of this object.
+// (This can be an AddAndStoreSlot, an AllocateAndStoreSlot, or the
+// creation of the object itself.
+// 2. That mutation dominates the shape guard.
+// 3. The shape that was assigned at that point matches the shape
+// we expect.
+//
+// If all of these conditions hold, then we can remove the shape guard.
+// In debug, we replace it with an AssertShape to help verify correctness.
+bool jit::EliminateRedundantShapeGuards(MIRGraph& graph) {
+ JitSpew(JitSpew_RedundantShapeGuards, "Begin");
+
+ for (ReversePostorderIterator block = graph.rpoBegin();
+ block != graph.rpoEnd(); block++) {
+ for (MInstructionIterator insIter(block->begin());
+ insIter != block->end();) {
+ MInstruction* ins = *insIter;
+ insIter++;
+
+ // Skip instructions that aren't shape guards.
+ if (!ins->isGuardShape()) {
+ continue;
+ }
+ MGuardShape* guard = ins->toGuardShape();
+ MDefinition* lastStore = guard->dependency();
+
+ JitSpew(JitSpew_RedundantShapeGuards, "Visit shape guard %d",
+ guard->id());
+ JitSpewIndent spewIndent(JitSpew_RedundantShapeGuards);
+
+ if (lastStore->isDiscarded() || lastStore->block()->isDead() ||
+ !lastStore->block()->dominates(guard->block())) {
+ JitSpew(JitSpew_RedundantShapeGuards,
+ "SKIP: ins %d does not dominate block %d", lastStore->id(),
+ guard->block()->id());
+ continue;
+ }
+
+ if (lastStore->isAddAndStoreSlot()) {
+ auto* add = lastStore->toAddAndStoreSlot();
+ auto* addObject = add->object()->skipObjectGuards();
+ if (!ShapeGuardIsRedundant(guard, addObject, add->shape())) {
+ continue;
+ }
+ } else if (lastStore->isAllocateAndStoreSlot()) {
+ auto* allocate = lastStore->toAllocateAndStoreSlot();
+ auto* allocateObject = allocate->object()->skipObjectGuards();
+ if (!ShapeGuardIsRedundant(guard, allocateObject, allocate->shape())) {
+ continue;
+ }
+ } else if (lastStore->isStart()) {
+ // The guard doesn't depend on any other instruction that is modifying
+ // the object operand, so we check the object operand directly.
+ auto* obj = guard->object()->skipObjectGuards();
+
+ const Shape* initialShape = nullptr;
+ if (obj->isNewObject()) {
+ auto* templateObject = obj->toNewObject()->templateObject();
+ if (!templateObject) {
+ JitSpew(JitSpew_RedundantShapeGuards, "SKIP: no template");
+ continue;
+ }
+ initialShape = templateObject->shape();
+ } else if (obj->isNewPlainObject()) {
+ initialShape = obj->toNewPlainObject()->shape();
+ } else {
+ JitSpew(JitSpew_RedundantShapeGuards,
+ "SKIP: not NewObject or NewPlainObject (%d)", obj->id());
+ continue;
+ }
+ if (initialShape != guard->shape()) {
+ JitSpew(JitSpew_RedundantShapeGuards, "SKIP: shapes don't match");
+ continue;
+ }
+ } else {
+ JitSpew(JitSpew_RedundantShapeGuards,
+ "SKIP: Last store not supported (%d)", lastStore->id());
+ continue;
+ }
+
+#ifdef DEBUG
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+ auto* assert = MAssertShape::New(graph.alloc(), guard->object(),
+ const_cast<Shape*>(guard->shape()));
+ guard->block()->insertBefore(guard, assert);
+#endif
+
+ JitSpew(JitSpew_RedundantShapeGuards, "SUCCESS: Removing shape guard %d",
+ guard->id());
+ guard->replaceAllUsesWith(guard->input());
+ guard->block()->discard(guard);
+ }
+ }
+
+ return true;
+}
+
+static bool TryEliminateGCBarriersForAllocation(TempAllocator& alloc,
+ MInstruction* allocation) {
+ MOZ_ASSERT(allocation->type() == MIRType::Object);
+
+ JitSpew(JitSpew_RedundantGCBarriers, "Analyzing allocation %s",
+ allocation->opName());
+
+ MBasicBlock* block = allocation->block();
+ MInstructionIterator insIter(block->begin(allocation));
+
+ // Skip `allocation`.
+ MOZ_ASSERT(*insIter == allocation);
+ insIter++;
+
+ // Try to optimize the other instructions in the block.
+ while (insIter != block->end()) {
+ MInstruction* ins = *insIter;
+ insIter++;
+ switch (ins->op()) {
+ case MDefinition::Opcode::Constant:
+ case MDefinition::Opcode::Box:
+ case MDefinition::Opcode::Unbox:
+ case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
+ // These instructions can't trigger GC or affect this analysis in other
+ // ways.
+ break;
+ case MDefinition::Opcode::StoreFixedSlot: {
+ auto* store = ins->toStoreFixedSlot();
+ if (store->object() != allocation) {
+ JitSpew(JitSpew_RedundantGCBarriers,
+ "Stopped at StoreFixedSlot for other object");
+ return true;
+ }
+ store->setNeedsBarrier(false);
+ JitSpew(JitSpew_RedundantGCBarriers, "Elided StoreFixedSlot barrier");
+ break;
+ }
+ case MDefinition::Opcode::PostWriteBarrier: {
+ auto* barrier = ins->toPostWriteBarrier();
+ if (barrier->object() != allocation) {
+ JitSpew(JitSpew_RedundantGCBarriers,
+ "Stopped at PostWriteBarrier for other object");
+ return true;
+ }
+#ifdef DEBUG
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+ MDefinition* value = barrier->value();
+ if (value->type() != MIRType::Value) {
+ value = MBox::New(alloc, value);
+ block->insertBefore(barrier, value->toInstruction());
+ }
+ auto* assert =
+ MAssertCanElidePostWriteBarrier::New(alloc, allocation, value);
+ block->insertBefore(barrier, assert);
+#endif
+ block->discard(barrier);
+ JitSpew(JitSpew_RedundantGCBarriers, "Elided PostWriteBarrier");
+ break;
+ }
+ default:
+ JitSpew(JitSpew_RedundantGCBarriers,
+ "Stopped at unsupported instruction %s", ins->opName());
+ return true;
+ }
+ }
+
+ return true;
+}
+
+bool jit::EliminateRedundantGCBarriers(MIRGraph& graph) {
+ // Peephole optimization for the following pattern:
+ //
+ // 0: MNewCallObject
+ // 1: MStoreFixedSlot(0, ...)
+ // 2: MStoreFixedSlot(0, ...)
+ // 3: MPostWriteBarrier(0, ...)
+ //
+ // If the instructions immediately following the allocation instruction can't
+ // trigger GC and we are storing to the new object's slots, we can elide the
+ // pre-barrier.
+ //
+ // We also eliminate the post barrier and (in debug builds) replace it with an
+ // assertion.
+ //
+ // See also the similar optimizations in WarpBuilder::buildCallObject.
+
+ JitSpew(JitSpew_RedundantGCBarriers, "Begin");
+
+ for (ReversePostorderIterator block = graph.rpoBegin();
+ block != graph.rpoEnd(); block++) {
+ for (MInstructionIterator insIter(block->begin());
+ insIter != block->end();) {
+ MInstruction* ins = *insIter;
+ insIter++;
+
+ if (ins->isNewCallObject()) {
+ if (!TryEliminateGCBarriersForAllocation(graph.alloc(), ins)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
+ MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
+ slotsOrElements->type() == MIRType::Slots);
+
+ if (slotsOrElements->block() != use->block()) {
+ return true;
+ }
+
+ // Allocating a BigInt can GC, so we have to keep the object alive.
+ if (use->type() == MIRType::BigInt) {
+ return true;
+ }
+
+ MBasicBlock* block = use->block();
+ MInstructionIterator iter(block->begin(slotsOrElements));
+ MOZ_ASSERT(*iter == slotsOrElements);
+ ++iter;
+
+ while (true) {
+ if (*iter == use) {
+ return false;
+ }
+
+ switch (iter->op()) {
+ case MDefinition::Opcode::Nop:
+ case MDefinition::Opcode::Constant:
+ case MDefinition::Opcode::KeepAliveObject:
+ case MDefinition::Opcode::Unbox:
+ case MDefinition::Opcode::LoadDynamicSlot:
+ case MDefinition::Opcode::StoreDynamicSlot:
+ case MDefinition::Opcode::LoadFixedSlot:
+ case MDefinition::Opcode::StoreFixedSlot:
+ case MDefinition::Opcode::LoadElement:
+ case MDefinition::Opcode::LoadElementAndUnbox:
+ case MDefinition::Opcode::StoreElement:
+ case MDefinition::Opcode::StoreHoleValueElement:
+ case MDefinition::Opcode::InitializedLength:
+ case MDefinition::Opcode::ArrayLength:
+ case MDefinition::Opcode::BoundsCheck:
+ case MDefinition::Opcode::GuardElementNotHole:
+ case MDefinition::Opcode::SpectreMaskIndex:
+ case MDefinition::Opcode::DebugEnterGCUnsafeRegion:
+ case MDefinition::Opcode::DebugLeaveGCUnsafeRegion:
+ iter++;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ MOZ_CRASH("Unreachable");
+}
+
+bool jit::AddKeepAliveInstructions(MIRGraph& graph) {
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* block = *i;
+
+ for (MInstructionIterator insIter(block->begin()); insIter != block->end();
+ insIter++) {
+ MInstruction* ins = *insIter;
+ if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots) {
+ continue;
+ }
+
+ MDefinition* ownerObject;
+ switch (ins->op()) {
+ case MDefinition::Opcode::Elements:
+ case MDefinition::Opcode::ArrayBufferViewElements:
+ MOZ_ASSERT(ins->numOperands() == 1);
+ ownerObject = ins->getOperand(0);
+ break;
+ case MDefinition::Opcode::Slots:
+ ownerObject = ins->toSlots()->object();
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ MOZ_ASSERT(ownerObject->type() == MIRType::Object);
+
+ if (ownerObject->isConstant()) {
+ // Constants are kept alive by other pointers, for instance
+ // ImmGCPtr in JIT code.
+ continue;
+ }
+
+ for (MUseDefIterator uses(ins); uses; uses++) {
+ MInstruction* use = uses.def()->toInstruction();
+
+ if (use->isStoreElementHole()) {
+ // StoreElementHole has an explicit object operand. If GVN
+ // is disabled, we can get different unbox instructions with
+ // the same object as input, so we check for that case.
+ MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() &&
+ !ownerObject->isUnbox(),
+ use->toStoreElementHole()->object() == ownerObject);
+ continue;
+ }
+
+ if (use->isInArray()) {
+ // See StoreElementHole case above.
+ MOZ_ASSERT_IF(
+ !use->toInArray()->object()->isUnbox() && !ownerObject->isUnbox(),
+ use->toInArray()->object() == ownerObject);
+ continue;
+ }
+
+ if (!NeedsKeepAlive(ins, use)) {
+#ifdef DEBUG
+ // These two instructions don't start a GC unsafe region, because they
+ // overwrite their elements register at the very start. This ensures
+ // there's no invalidated elements value kept on the stack.
+ if (use->isApplyArray() || use->isConstructArray()) {
+ continue;
+ }
+
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ // Enter a GC unsafe region while the elements/slots are on the stack.
+ auto* enter = MDebugEnterGCUnsafeRegion::New(graph.alloc());
+ use->block()->insertAfter(ins, enter);
+
+ // Leave the region after the use.
+ auto* leave = MDebugLeaveGCUnsafeRegion::New(graph.alloc());
+ use->block()->insertAfter(use, leave);
+#endif
+ continue;
+ }
+
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+ MKeepAliveObject* keepAlive =
+ MKeepAliveObject::New(graph.alloc(), ownerObject);
+ use->block()->insertAfter(use, keepAlive);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool LinearSum::multiply(int32_t scale) {
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale)) {
+ return false;
+ }
+ }
+ return SafeMul(scale, constant_, &constant_);
+}
+
+bool LinearSum::divide(uint32_t scale) {
+ MOZ_ASSERT(scale > 0);
+
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (terms_[i].scale % scale != 0) {
+ return false;
+ }
+ }
+ if (constant_ % scale != 0) {
+ return false;
+ }
+
+ for (size_t i = 0; i < terms_.length(); i++) {
+ terms_[i].scale /= scale;
+ }
+ constant_ /= scale;
+
+ return true;
+}
+
+bool LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */) {
+ for (size_t i = 0; i < other.terms_.length(); i++) {
+ int32_t newScale = scale;
+ if (!SafeMul(scale, other.terms_[i].scale, &newScale)) {
+ return false;
+ }
+ if (!add(other.terms_[i].term, newScale)) {
+ return false;
+ }
+ }
+ int32_t newConstant = scale;
+ if (!SafeMul(scale, other.constant_, &newConstant)) {
+ return false;
+ }
+ return add(newConstant);
+}
+
+bool LinearSum::add(SimpleLinearSum other, int32_t scale) {
+ if (other.term && !add(other.term, scale)) {
+ return false;
+ }
+
+ int32_t constant;
+ if (!SafeMul(other.constant, scale, &constant)) {
+ return false;
+ }
+
+ return add(constant);
+}
+
+bool LinearSum::add(MDefinition* term, int32_t scale) {
+ MOZ_ASSERT(term);
+
+ if (scale == 0) {
+ return true;
+ }
+
+ if (MConstant* termConst = term->maybeConstantValue()) {
+ int32_t constant = termConst->toInt32();
+ if (!SafeMul(constant, scale, &constant)) {
+ return false;
+ }
+ return add(constant);
+ }
+
+ for (size_t i = 0; i < terms_.length(); i++) {
+ if (term == terms_[i].term) {
+ if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) {
+ return false;
+ }
+ if (terms_[i].scale == 0) {
+ terms_[i] = terms_.back();
+ terms_.popBack();
+ }
+ return true;
+ }
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!terms_.append(LinearTerm(term, scale))) {
+ oomUnsafe.crash("LinearSum::add");
+ }
+
+ return true;
+}
+
+bool LinearSum::add(int32_t constant) {
+ return SafeAdd(constant, constant_, &constant_);
+}
+
+void LinearSum::dump(GenericPrinter& out) const {
+ for (size_t i = 0; i < terms_.length(); i++) {
+ int32_t scale = terms_[i].scale;
+ int32_t id = terms_[i].term->id();
+ MOZ_ASSERT(scale);
+ if (scale > 0) {
+ if (i) {
+ out.printf("+");
+ }
+ if (scale == 1) {
+ out.printf("#%d", id);
+ } else {
+ out.printf("%d*#%d", scale, id);
+ }
+ } else if (scale == -1) {
+ out.printf("-#%d", id);
+ } else {
+ out.printf("%d*#%d", scale, id);
+ }
+ }
+ if (constant_ > 0) {
+ out.printf("+%d", constant_);
+ } else if (constant_ < 0) {
+ out.printf("%d", constant_);
+ }
+}
+
+void LinearSum::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+MDefinition* jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block,
+ const LinearSum& sum,
+ BailoutKind bailoutKind) {
+ MDefinition* def = nullptr;
+
+ for (size_t i = 0; i < sum.numTerms(); i++) {
+ LinearTerm term = sum.term(i);
+ MOZ_ASSERT(!term.term->isConstant());
+ if (term.scale == 1) {
+ if (def) {
+ def = MAdd::New(alloc, def, term.term, MIRType::Int32);
+ def->setBailoutKind(bailoutKind);
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ def = term.term;
+ }
+ } else if (term.scale == -1) {
+ if (!def) {
+ def = MConstant::New(alloc, Int32Value(0));
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ }
+ def = MSub::New(alloc, def, term.term, MIRType::Int32);
+ def->setBailoutKind(bailoutKind);
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ MOZ_ASSERT(term.scale != 0);
+ MConstant* factor = MConstant::New(alloc, Int32Value(term.scale));
+ block->insertAtEnd(factor);
+ MMul* mul = MMul::New(alloc, term.term, factor, MIRType::Int32);
+ mul->setBailoutKind(bailoutKind);
+ block->insertAtEnd(mul);
+ mul->computeRange(alloc);
+ if (def) {
+ def = MAdd::New(alloc, def, mul, MIRType::Int32);
+ def->setBailoutKind(bailoutKind);
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ } else {
+ def = mul;
+ }
+ }
+ }
+
+ if (!def) {
+ def = MConstant::New(alloc, Int32Value(0));
+ block->insertAtEnd(def->toInstruction());
+ def->computeRange(alloc);
+ }
+
+ return def;
+}
+
+// Mark all the blocks that are in the loop with the given header.
+// Returns the number of blocks marked. Set *canOsr to true if the loop is
+// reachable from both the normal entry and the OSR entry.
+size_t jit::MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr) {
+#ifdef DEBUG
+ for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
+ i != e; ++i) {
+ MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
+ }
+#endif
+
+ MBasicBlock* osrBlock = graph.osrBlock();
+ *canOsr = false;
+
+ // The blocks are in RPO; start at the loop backedge, which marks the bottom
+ // of the loop, and walk up until we get to the header. Loops may be
+ // discontiguous, so we trace predecessors to determine which blocks are
+ // actually part of the loop. The backedge is always part of the loop, and
+ // so are its predecessors, transitively, up to the loop header or an OSR
+ // entry.
+ MBasicBlock* backedge = header->backedge();
+ backedge->mark();
+ size_t numMarked = 1;
+ for (PostorderIterator i = graph.poBegin(backedge);; ++i) {
+ MOZ_ASSERT(
+ i != graph.poEnd(),
+ "Reached the end of the graph while searching for the loop header");
+ MBasicBlock* block = *i;
+ // If we've reached the loop header, we're done.
+ if (block == header) {
+ break;
+ }
+ // A block not marked by the time we reach it is not in the loop.
+ if (!block->isMarked()) {
+ continue;
+ }
+
+ // This block is in the loop; trace to its predecessors.
+ for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
+ MBasicBlock* pred = block->getPredecessor(p);
+ if (pred->isMarked()) {
+ continue;
+ }
+
+ // Blocks dominated by the OSR entry are not part of the loop
+ // (unless they aren't reachable from the normal entry).
+ if (osrBlock && pred != header && osrBlock->dominates(pred) &&
+ !osrBlock->dominates(header)) {
+ *canOsr = true;
+ continue;
+ }
+
+ MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
+ "Loop block not between loop header and loop backedge");
+
+ pred->mark();
+ ++numMarked;
+
+ // A nested loop may not exit back to the enclosing loop at its
+ // bottom. If we just marked its header, then the whole nested loop
+ // is part of the enclosing loop.
+ if (pred->isLoopHeader()) {
+ MBasicBlock* innerBackedge = pred->backedge();
+ if (!innerBackedge->isMarked()) {
+ // Mark its backedge so that we add all of its blocks to the
+ // outer loop as we walk upwards.
+ innerBackedge->mark();
+ ++numMarked;
+
+ // If the nested loop is not contiguous, we may have already
+ // passed its backedge. If this happens, back up.
+ if (innerBackedge->id() > block->id()) {
+ i = graph.poBegin(innerBackedge);
+ --i;
+ }
+ }
+ }
+ }
+ }
+
+ // If there's no path connecting the header to the backedge, then this isn't
+ // actually a loop. This can happen when the code starts with a loop but GVN
+ // folds some branches away.
+ if (!header->isMarked()) {
+ jit::UnmarkLoopBlocks(graph, header);
+ return 0;
+ }
+
+ return numMarked;
+}
+
+// Unmark all the blocks that are in the loop with the given header.
+void jit::UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header) {
+ MBasicBlock* backedge = header->backedge();
+ for (ReversePostorderIterator i = graph.rpoBegin(header);; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(),
+ "Reached the end of the graph while searching for the backedge");
+ MBasicBlock* block = *i;
+ if (block->isMarked()) {
+ block->unmark();
+ if (block == backedge) {
+ break;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
+ i != e; ++i) {
+ MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
+ }
+#endif
+}
+
+bool jit::FoldLoadsWithUnbox(MIRGenerator* mir, MIRGraph& graph) {
+ // This pass folds MLoadFixedSlot, MLoadDynamicSlot, MLoadElement instructions
+ // followed by MUnbox into a single instruction. For LoadElement this allows
+ // us to fuse the hole check with the type check for the unbox.
+
+ for (MBasicBlockIterator block(graph.begin()); block != graph.end();
+ block++) {
+ if (mir->shouldCancel("FoldLoadsWithUnbox")) {
+ return false;
+ }
+
+ for (MInstructionIterator insIter(block->begin());
+ insIter != block->end();) {
+ MInstruction* ins = *insIter;
+ insIter++;
+
+ // We're only interested in loads producing a Value.
+ if (!ins->isLoadFixedSlot() && !ins->isLoadDynamicSlot() &&
+ !ins->isLoadElement()) {
+ continue;
+ }
+ if (ins->type() != MIRType::Value) {
+ continue;
+ }
+
+ MInstruction* load = ins;
+
+ // Ensure there's a single def-use (ignoring resume points) and it's an
+ // unbox. Unwrap MLexicalCheck because it's redundant if we have a
+ // fallible unbox (checked below).
+ MDefinition* defUse = load->maybeSingleDefUse();
+ if (!defUse) {
+ continue;
+ }
+ MLexicalCheck* lexicalCheck = nullptr;
+ if (defUse->isLexicalCheck()) {
+ lexicalCheck = defUse->toLexicalCheck();
+ defUse = lexicalCheck->maybeSingleDefUse();
+ if (!defUse) {
+ continue;
+ }
+ }
+ if (!defUse->isUnbox()) {
+ continue;
+ }
+
+ // For now require the load and unbox to be in the same block. This isn't
+ // strictly necessary but it's the common case and could prevent bailouts
+ // when moving the unbox before a loop.
+ MUnbox* unbox = defUse->toUnbox();
+ if (unbox->block() != *block) {
+ continue;
+ }
+ MOZ_ASSERT_IF(lexicalCheck, lexicalCheck->block() == *block);
+
+ MOZ_ASSERT(!IsMagicType(unbox->type()));
+
+ // If this is a LoadElement or if we have a lexical check between the load
+ // and unbox, we only support folding the load with a fallible unbox so
+ // that we can eliminate the MagicValue check.
+ if ((load->isLoadElement() || lexicalCheck) && !unbox->fallible()) {
+ continue;
+ }
+
+ // Combine the load and unbox into a single MIR instruction.
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ MIRType type = unbox->type();
+ MUnbox::Mode mode = unbox->mode();
+
+ MInstruction* replacement;
+ switch (load->op()) {
+ case MDefinition::Opcode::LoadFixedSlot: {
+ auto* loadIns = load->toLoadFixedSlot();
+ replacement = MLoadFixedSlotAndUnbox::New(
+ graph.alloc(), loadIns->object(), loadIns->slot(), mode, type);
+ break;
+ }
+ case MDefinition::Opcode::LoadDynamicSlot: {
+ auto* loadIns = load->toLoadDynamicSlot();
+ replacement = MLoadDynamicSlotAndUnbox::New(
+ graph.alloc(), loadIns->slots(), loadIns->slot(), mode, type);
+ break;
+ }
+ case MDefinition::Opcode::LoadElement: {
+ auto* loadIns = load->toLoadElement();
+ MOZ_ASSERT(unbox->fallible());
+ replacement = MLoadElementAndUnbox::New(
+ graph.alloc(), loadIns->elements(), loadIns->index(), mode, type);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected instruction");
+ }
+ replacement->setBailoutKind(BailoutKind::UnboxFolding);
+
+ block->insertBefore(load, replacement);
+ unbox->replaceAllUsesWith(replacement);
+ if (lexicalCheck) {
+ lexicalCheck->replaceAllUsesWith(replacement);
+ }
+ load->replaceAllUsesWith(replacement);
+
+ if (lexicalCheck && *insIter == lexicalCheck) {
+ insIter++;
+ }
+ if (*insIter == unbox) {
+ insIter++;
+ }
+ block->discard(unbox);
+ if (lexicalCheck) {
+ block->discard(lexicalCheck);
+ }
+ block->discard(load);
+ }
+ }
+
+ return true;
+}
+
+// Reorder the blocks in the loop starting at the given header to be contiguous.
+static void MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header,
+ size_t numMarked) {
+ MBasicBlock* backedge = header->backedge();
+
+ MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
+ MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
+
+ // If there are any blocks between the loop header and the loop backedge
+ // that are not part of the loop, prepare to move them to the end. We keep
+ // them in order, which preserves RPO.
+ ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
+ insertIter++;
+ MBasicBlock* insertPt = *insertIter;
+
+ // Visit all the blocks from the loop header to the loop backedge.
+ size_t headerId = header->id();
+ size_t inLoopId = headerId;
+ size_t notInLoopId = inLoopId + numMarked;
+ ReversePostorderIterator i = graph.rpoBegin(header);
+ for (;;) {
+ MBasicBlock* block = *i++;
+ MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
+ "Loop backedge should be last block in loop");
+
+ if (block->isMarked()) {
+ // This block is in the loop.
+ block->unmark();
+ block->setId(inLoopId++);
+ // If we've reached the loop backedge, we're done!
+ if (block == backedge) {
+ break;
+ }
+ } else {
+ // This block is not in the loop. Move it to the end.
+ graph.moveBlockBefore(insertPt, block);
+ block->setId(notInLoopId++);
+ }
+ }
+ MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
+ MOZ_ASSERT(inLoopId == headerId + numMarked,
+ "Wrong number of blocks kept in loop");
+ MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id()
+ : graph.numBlocks()),
+ "Wrong number of blocks moved out of loop");
+}
+
+// Reorder the blocks in the graph so that loops are contiguous.
+bool jit::MakeLoopsContiguous(MIRGraph& graph) {
+ // Visit all loop headers (in any order).
+ for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
+ MBasicBlock* header = *i;
+ if (!header->isLoopHeader()) {
+ continue;
+ }
+
+ // Mark all blocks that are actually part of the loop.
+ bool canOsr;
+ size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
+
+ // If the loop isn't a loop, don't try to optimize it.
+ if (numMarked == 0) {
+ continue;
+ }
+
+ // If there's an OSR block entering the loop in the middle, it's tricky,
+ // so don't try to handle it, for now.
+ if (canOsr) {
+ UnmarkLoopBlocks(graph, header);
+ continue;
+ }
+
+ // Move all blocks between header and backedge that aren't marked to
+ // the end of the loop, making the loop itself contiguous.
+ MakeLoopContiguous(graph, header, numMarked);
+ }
+
+ return true;
+}
+
+static MDefinition* SkipUnbox(MDefinition* ins) {
+ if (ins->isUnbox()) {
+ return ins->toUnbox()->input();
+ }
+ return ins;
+}
+
+bool jit::OptimizeIteratorIndices(MIRGenerator* mir, MIRGraph& graph) {
+ bool changed = false;
+
+ for (ReversePostorderIterator blockIter = graph.rpoBegin();
+ blockIter != graph.rpoEnd();) {
+ MBasicBlock* block = *blockIter++;
+ for (MInstructionIterator insIter(block->begin());
+ insIter != block->end();) {
+ MInstruction* ins = *insIter;
+ insIter++;
+ if (!graph.alloc().ensureBallast()) {
+ return false;
+ }
+
+ MDefinition* receiver = nullptr;
+ MDefinition* idVal = nullptr;
+ MDefinition* setValue = nullptr;
+ if (ins->isMegamorphicHasProp() &&
+ ins->toMegamorphicHasProp()->hasOwn()) {
+ receiver = ins->toMegamorphicHasProp()->object();
+ idVal = ins->toMegamorphicHasProp()->idVal();
+ } else if (ins->isHasOwnCache()) {
+ receiver = ins->toHasOwnCache()->value();
+ idVal = ins->toHasOwnCache()->idval();
+ } else if (ins->isMegamorphicLoadSlotByValue()) {
+ receiver = ins->toMegamorphicLoadSlotByValue()->object();
+ idVal = ins->toMegamorphicLoadSlotByValue()->idVal();
+ } else if (ins->isGetPropertyCache()) {
+ receiver = ins->toGetPropertyCache()->value();
+ idVal = ins->toGetPropertyCache()->idval();
+ } else if (ins->isMegamorphicSetElement()) {
+ receiver = ins->toMegamorphicSetElement()->object();
+ idVal = ins->toMegamorphicSetElement()->index();
+ setValue = ins->toMegamorphicSetElement()->value();
+ } else if (ins->isSetPropertyCache()) {
+ receiver = ins->toSetPropertyCache()->object();
+ idVal = ins->toSetPropertyCache()->idval();
+ setValue = ins->toSetPropertyCache()->value();
+ }
+
+ if (!receiver) {
+ continue;
+ }
+
+ // Given the following structure (that occurs inside for-in loops):
+ // obj: some object
+ // iter: ObjectToIterator <obj>
+ // iterNext: IteratorMore <iter>
+ // access: HasProp/GetElem <obj> <iterNext>
+ // If the iterator object has an indices array, we can speed up the
+ // property access:
+ // 1. If the property access is a HasProp looking for own properties,
+ // then the result will always be true if the iterator has indices,
+ // because we only populate the indices array for objects with no
+ // enumerable properties on the prototype.
+ // 2. If the property access is a GetProp, then we can use the contents
+ // of the indices array to find the correct property faster than
+ // the megamorphic cache.
+ if (!idVal->isIteratorMore()) {
+ continue;
+ }
+ auto* iterNext = idVal->toIteratorMore();
+
+ if (!iterNext->iterator()->isObjectToIterator()) {
+ continue;
+ }
+
+ MObjectToIterator* iter = iterNext->iterator()->toObjectToIterator();
+ if (SkipUnbox(iter->object()) != SkipUnbox(receiver)) {
+ continue;
+ }
+
+ MInstruction* indicesCheck =
+ MIteratorHasIndices::New(graph.alloc(), iter->object(), iter);
+ MInstruction* replacement;
+ if (ins->isHasOwnCache() || ins->isMegamorphicHasProp()) {
+ MOZ_ASSERT(!setValue);
+ replacement = MConstant::New(graph.alloc(), BooleanValue(true));
+ } else if (ins->isMegamorphicLoadSlotByValue() ||
+ ins->isGetPropertyCache()) {
+ MOZ_ASSERT(!setValue);
+ replacement =
+ MLoadSlotByIteratorIndex::New(graph.alloc(), receiver, iter);
+ } else {
+ MOZ_ASSERT(ins->isMegamorphicSetElement() || ins->isSetPropertyCache());
+ MOZ_ASSERT(setValue);
+ replacement = MStoreSlotByIteratorIndex::New(graph.alloc(), receiver,
+ iter, setValue);
+ }
+
+ if (!block->wrapInstructionInFastpath(ins, replacement, indicesCheck)) {
+ return false;
+ }
+
+ iter->setWantsIndices(true);
+ changed = true;
+
+ // Advance to join block.
+ blockIter = graph.rpoBegin(block->getSuccessor(0)->getSuccessor(0));
+ break;
+ }
+ }
+ if (changed && !AccountForCFGChanges(mir, graph,
+ /*updateAliasAnalysis=*/false)) {
+ return false;
+ }
+
+ return true;
+}
+
+void jit::DumpMIRDefinition(GenericPrinter& out, MDefinition* def) {
+#ifdef JS_JITSPEW
+ out.printf("%u = %s.", def->id(), StringFromMIRType(def->type()));
+ if (def->isConstant()) {
+ def->printOpcode(out);
+ } else {
+ MDefinition::PrintOpcodeName(out, def->op());
+ }
+
+ // Get any extra bits of text that the MIR node wants to show us. Both the
+ // vector and the strings added to it belong to this function, so both will
+ // be automatically freed at exit.
+ ExtrasCollector extras;
+ def->getExtras(&extras);
+ for (size_t i = 0; i < extras.count(); i++) {
+ out.printf(" %s", extras.get(i).get());
+ }
+
+ for (size_t i = 0; i < def->numOperands(); i++) {
+ out.printf(" %u", def->getOperand(i)->id());
+ }
+#endif
+}
+
+void jit::DumpMIRExpressions(GenericPrinter& out, MIRGraph& graph,
+ const CompileInfo& info, const char* phase) {
+#ifdef JS_JITSPEW
+ if (!JitSpewEnabled(JitSpew_MIRExpressions)) {
+ return;
+ }
+
+ out.printf("===== %s =====\n", phase);
+
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ out.printf(" Block%u:\n", block->id());
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end; iter++) {
+ out.printf(" ");
+ jit::DumpMIRDefinition(out, *iter);
+ out.printf("\n");
+ }
+ for (MInstructionIterator iter(block->begin()), end(block->end());
+ iter != end; iter++) {
+ out.printf(" ");
+ DumpMIRDefinition(out, *iter);
+ out.printf("\n");
+ }
+ }
+
+ if (info.compilingWasm()) {
+ out.printf("===== end wasm MIR dump =====\n");
+ } else {
+ out.printf("===== %s:%u =====\n", info.filename(), info.lineno());
+ }
+#endif
+}
diff --git a/js/src/jit/IonAnalysis.h b/js/src/jit/IonAnalysis.h
new file mode 100644
index 0000000000..cd9b087e3e
--- /dev/null
+++ b/js/src/jit/IonAnalysis.h
@@ -0,0 +1,193 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonAnalysis_h
+#define jit_IonAnalysis_h
+
+// This file declares various analysis passes that operate on MIR.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace js {
+
+class JS_PUBLIC_API GenericPrinter;
+class PlainObject;
+
+namespace jit {
+
+class MBasicBlock;
+class MCompare;
+class MDefinition;
+class MIRGenerator;
+class MIRGraph;
+class MTest;
+
+[[nodiscard]] bool PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph);
+
+[[nodiscard]] bool FoldTests(MIRGraph& graph);
+
+[[nodiscard]] bool FoldEmptyBlocks(MIRGraph& graph);
+
+[[nodiscard]] bool SplitCriticalEdges(MIRGraph& graph);
+
+[[nodiscard]] bool OptimizeIteratorIndices(MIRGenerator* mir, MIRGraph& graph);
+
+bool IsUint32Type(const MDefinition* def);
+
+enum Observability { ConservativeObservability, AggressiveObservability };
+
+[[nodiscard]] bool EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
+ Observability observe);
+
+size_t MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr);
+
+void UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header);
+
+[[nodiscard]] bool MakeLoopsContiguous(MIRGraph& graph);
+
+[[nodiscard]] bool EliminateTriviallyDeadResumePointOperands(MIRGenerator* mir,
+ MIRGraph& graph);
+
+[[nodiscard]] bool EliminateDeadResumePointOperands(MIRGenerator* mir,
+ MIRGraph& graph);
+
+[[nodiscard]] bool EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph);
+
+[[nodiscard]] bool FoldLoadsWithUnbox(MIRGenerator* mir, MIRGraph& graph);
+
+[[nodiscard]] bool ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph);
+
+void RenumberBlocks(MIRGraph& graph);
+
+[[nodiscard]] bool AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph,
+ bool updateAliasAnalysis,
+ bool underValueNumberer = false);
+
+[[nodiscard]] bool RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph,
+ uint32_t numMarkedBlocks);
+
+void ClearDominatorTree(MIRGraph& graph);
+
+[[nodiscard]] bool BuildDominatorTree(MIRGraph& graph);
+
+[[nodiscard]] bool BuildPhiReverseMapping(MIRGraph& graph);
+
+void AssertBasicGraphCoherency(MIRGraph& graph, bool force = false);
+
+void AssertGraphCoherency(MIRGraph& graph, bool force = false);
+
+void AssertExtendedGraphCoherency(MIRGraph& graph,
+ bool underValueNumberer = false,
+ bool force = false);
+
+[[nodiscard]] bool EliminateRedundantChecks(MIRGraph& graph);
+
+[[nodiscard]] bool EliminateRedundantShapeGuards(MIRGraph& graph);
+
+[[nodiscard]] bool EliminateRedundantGCBarriers(MIRGraph& graph);
+
+[[nodiscard]] bool AddKeepAliveInstructions(MIRGraph& graph);
+
+// Simple linear sum of the form 'n' or 'x + n'.
+struct SimpleLinearSum {
+ MDefinition* term;
+ int32_t constant;
+
+ SimpleLinearSum(MDefinition* term, int32_t constant)
+ : term(term), constant(constant) {}
+};
+
+// Math done in a Linear sum can either be in a modulo space, in which case
+// overflow are wrapped around, or they can be computed in the integer-space in
+// which case we have to check that no overflow can happen when summing
+// constants.
+//
+// When the caller ignores which space it is, the definition would be used to
+// deduce it.
+enum class MathSpace { Modulo, Infinite, Unknown };
+
+SimpleLinearSum ExtractLinearSum(MDefinition* ins,
+ MathSpace space = MathSpace::Unknown,
+ int32_t recursionDepth = 0);
+
+[[nodiscard]] bool ExtractLinearInequality(MTest* test,
+ BranchDirection direction,
+ SimpleLinearSum* plhs,
+ MDefinition** prhs,
+ bool* plessEqual);
+
+struct LinearTerm {
+ MDefinition* term;
+ int32_t scale;
+
+ LinearTerm(MDefinition* term, int32_t scale) : term(term), scale(scale) {}
+};
+
+// General linear sum of the form 'x1*n1 + x2*n2 + ... + n'
+class LinearSum {
+ public:
+ explicit LinearSum(TempAllocator& alloc) : terms_(alloc), constant_(0) {}
+
+ LinearSum(const LinearSum& other)
+ : terms_(other.terms_.allocPolicy()), constant_(other.constant_) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!terms_.appendAll(other.terms_)) {
+ oomUnsafe.crash("LinearSum::LinearSum");
+ }
+ }
+
+ // These return false on an integer overflow, and afterwards the sum must
+ // not be used.
+ [[nodiscard]] bool multiply(int32_t scale);
+ [[nodiscard]] bool add(const LinearSum& other, int32_t scale = 1);
+ [[nodiscard]] bool add(SimpleLinearSum other, int32_t scale = 1);
+ [[nodiscard]] bool add(MDefinition* term, int32_t scale);
+ [[nodiscard]] bool add(int32_t constant);
+
+ // Unlike the above function, on failure this leaves the sum unchanged and
+ // it can still be used.
+ [[nodiscard]] bool divide(uint32_t scale);
+
+ int32_t constant() const { return constant_; }
+ size_t numTerms() const { return terms_.length(); }
+ LinearTerm term(size_t i) const { return terms_[i]; }
+ void replaceTerm(size_t i, MDefinition* def) { terms_[i].term = def; }
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+
+ private:
+ Vector<LinearTerm, 2, JitAllocPolicy> terms_;
+ int32_t constant_;
+};
+
+// Convert all components of a linear sum (except the constant)
+// and add any new instructions to the end of block.
+MDefinition* ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block,
+ const LinearSum& sum, BailoutKind bailoutKind);
+
+bool DeadIfUnused(const MDefinition* def);
+bool DeadIfUnusedAllowEffectful(const MDefinition* def);
+
+bool IsDiscardable(const MDefinition* def);
+bool IsDiscardableAllowEffectful(const MDefinition* def);
+
+class CompileInfo;
+void DumpMIRExpressions(GenericPrinter& out, MIRGraph& graph,
+ const CompileInfo& info, const char* phase);
+void DumpMIRDefinition(GenericPrinter& out, MDefinition* def);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonAnalysis_h */
diff --git a/js/src/jit/IonCacheIRCompiler.cpp b/js/src/jit/IonCacheIRCompiler.cpp
new file mode 100644
index 0000000000..f57a313898
--- /dev/null
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -0,0 +1,2140 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonCacheIRCompiler.h"
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/IonIC.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitZone.h"
+#include "jit/JSJitFrameIter.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/VMFunctions.h"
+#include "proxy/DeadObjectProxy.h"
+#include "proxy/Proxy.h"
+#include "util/Memory.h"
+#include "vm/StaticStrings.h"
+
+#include "jit/JSJitFrameIter-inl.h"
+#include "jit/MacroAssembler-inl.h"
+#include "jit/VMFunctionList-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+namespace JS {
+struct ExpandoAndGeneration;
+}
+
+using JS::ExpandoAndGeneration;
+
+namespace js {
+namespace jit {
+
+// IonCacheIRCompiler compiles CacheIR to IonIC native code.
+IonCacheIRCompiler::IonCacheIRCompiler(JSContext* cx, TempAllocator& alloc,
+ const CacheIRWriter& writer, IonIC* ic,
+ IonScript* ionScript,
+ uint32_t stubDataOffset)
+ : CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Ion,
+ StubFieldPolicy::Constant),
+ writer_(writer),
+ ic_(ic),
+ ionScript_(ionScript),
+ savedLiveRegs_(false) {
+ MOZ_ASSERT(ic_);
+ MOZ_ASSERT(ionScript_);
+}
+
+template <typename T>
+T IonCacheIRCompiler::rawPointerStubField(uint32_t offset) {
+ static_assert(sizeof(T) == sizeof(uintptr_t), "T must have pointer size");
+ return (T)readStubWord(offset, StubField::Type::RawPointer);
+}
+
+template <typename T>
+T IonCacheIRCompiler::rawInt64StubField(uint32_t offset) {
+ static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
+ return (T)readStubInt64(offset, StubField::Type::RawInt64);
+}
+
+template <typename Fn, Fn fn>
+void IonCacheIRCompiler::callVM(MacroAssembler& masm) {
+ VMFunctionId id = VMFunctionToId<Fn, fn>::id;
+ callVMInternal(masm, id);
+}
+
+void IonCacheIRCompiler::pushStubCodePointer() {
+ stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
+}
+
+// AutoSaveLiveRegisters must be used when we make a call that can GC. The
+// constructor ensures all live registers are stored on the stack (where the GC
+// expects them) and the destructor restores these registers.
+AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
+ : compiler_(compiler) {
+ MOZ_ASSERT(compiler_.liveRegs_.isSome());
+ MOZ_ASSERT(compiler_.ic_);
+ compiler_.allocator.saveIonLiveRegisters(
+ compiler_.masm, compiler_.liveRegs_.ref(),
+ compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
+ compiler_.savedLiveRegs_ = true;
+}
+AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
+ MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
+ "Must have pushed JitCode* pointer");
+ compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
+ compiler_.liveRegs_.ref());
+ MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
+}
+
+} // namespace jit
+} // namespace js
+
+void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
+ LiveRegisterSet liveRegs,
+ Register scratch,
+ IonScript* ionScript) {
+ // We have to push all registers in liveRegs on the stack. It's possible we
+ // stored other values in our live registers and stored operands on the
+ // stack (where our live registers should go), so this requires some careful
+ // work. Try to keep it simple by taking one small step at a time.
+
+ // Step 1. Discard any dead operands so we can reuse their registers.
+ freeDeadOperandLocations(masm);
+
+ // Step 2. Figure out the size of our live regs. This is consistent with
+ // the fact that we're using storeRegsInMask to generate the save code and
+ // PopRegsInMask to generate the restore code.
+ size_t sizeOfLiveRegsInBytes = masm.PushRegsInMaskSizeInBytes(liveRegs);
+
+ MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
+
+ // Step 3. Ensure all non-input operands are on the stack.
+ size_t numInputs = writer_.numInputOperands();
+ for (size_t i = numInputs; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (loc.isInRegister()) {
+ spillOperandToStack(masm, &loc);
+ }
+ }
+
+ // Step 4. Restore the register state, but don't discard the stack as
+ // non-input operands are stored there.
+ restoreInputState(masm, /* shouldDiscardStack = */ false);
+
+ // We just restored the input state, so no input operands should be stored
+ // on the stack.
+#ifdef DEBUG
+ for (size_t i = 0; i < numInputs; i++) {
+ const OperandLocation& loc = operandLocations_[i];
+ MOZ_ASSERT(!loc.isOnStack());
+ }
+#endif
+
+ // Step 5. At this point our register state is correct. Stack values,
+ // however, may cover the space where we have to store the live registers.
+ // Move them out of the way.
+
+ bool hasOperandOnStack = false;
+ for (size_t i = numInputs; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (!loc.isOnStack()) {
+ continue;
+ }
+
+ hasOperandOnStack = true;
+
+ size_t operandSize = loc.stackSizeInBytes();
+ size_t operandStackPushed = loc.stackPushed();
+ MOZ_ASSERT(operandSize > 0);
+ MOZ_ASSERT(stackPushed_ >= operandStackPushed);
+ MOZ_ASSERT(operandStackPushed >= operandSize);
+
+ // If this operand doesn't cover the live register space, there's
+ // nothing to do.
+ if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
+ MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
+ continue;
+ }
+
+ // Reserve stack space for the live registers if needed.
+ if (sizeOfLiveRegsInBytes > stackPushed_) {
+ size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
+ MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
+ masm.subFromStackPtr(Imm32(extraBytes));
+ stackPushed_ += extraBytes;
+ }
+
+ // Push the operand below the live register space.
+ if (loc.kind() == OperandLocation::PayloadStack) {
+ masm.push(
+ Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
+ stackPushed_ += operandSize;
+ loc.setPayloadStack(stackPushed_, loc.payloadType());
+ continue;
+ }
+ MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
+ masm.pushValue(
+ Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
+ stackPushed_ += operandSize;
+ loc.setValueStack(stackPushed_);
+ }
+
+ // Step 6. If we have any operands on the stack, adjust their stackPushed
+ // values to not include sizeOfLiveRegsInBytes (this simplifies code down
+ // the line). Then push/store the live registers.
+ if (hasOperandOnStack) {
+ MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
+ stackPushed_ -= sizeOfLiveRegsInBytes;
+
+ for (size_t i = numInputs; i < operandLocations_.length(); i++) {
+ OperandLocation& loc = operandLocations_[i];
+ if (loc.isOnStack()) {
+ loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
+ }
+ }
+
+ size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
+ masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
+ scratch);
+ masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
+ } else {
+ // If no operands are on the stack, discard the unused stack space.
+ if (stackPushed_ > 0) {
+ masm.addToStackPtr(Imm32(stackPushed_));
+ stackPushed_ = 0;
+ }
+ masm.PushRegsInMask(liveRegs);
+ }
+ freePayloadSlots_.clear();
+ freeValueSlots_.clear();
+
+ MOZ_ASSERT(masm.framePushed() ==
+ ionScript->frameSize() + sizeOfLiveRegsInBytes);
+
+ // Step 7. All live registers and non-input operands are stored on the stack
+ // now, so at this point all registers except for the input registers are
+ // available.
+ availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
+ availableRegsAfterSpill_.set() = GeneralRegisterSet();
+
+ // Step 8. We restored our input state, so we have to fix up aliased input
+ // registers again.
+ fixupAliasedInputs(masm);
+}
+
+void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
+ LiveRegisterSet liveRegs) {
+ masm.PopRegsInMask(liveRegs);
+
+ availableRegs_.set() = GeneralRegisterSet();
+ availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
+}
+
+static void* GetReturnAddressToIonCode(JSContext* cx) {
+ JSJitFrameIter frame(cx->activation()->asJit());
+ MOZ_ASSERT(frame.type() == FrameType::Exit,
+ "An exit frame is expected as update functions are called with a "
+ "VMFunction.");
+
+ void* returnAddr = frame.returnAddress();
+#ifdef DEBUG
+ ++frame;
+ MOZ_ASSERT(frame.isIonJS());
+#endif
+ return returnAddr;
+}
+
+// The AutoSaveLiveRegisters parameter is used to ensure registers were saved
+void IonCacheIRCompiler::enterStubFrame(MacroAssembler& masm,
+ const AutoSaveLiveRegisters&) {
+ MOZ_ASSERT(!enteredStubFrame_);
+ pushStubCodePointer();
+ masm.PushFrameDescriptor(FrameType::IonJS);
+ masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
+
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ enteredStubFrame_ = true;
+}
+
+bool IonCacheIRCompiler::init() {
+ if (!allocator.init()) {
+ return false;
+ }
+
+ size_t numInputs = writer_.numInputOperands();
+ MOZ_ASSERT(numInputs == NumInputsForCacheKind(ic_->kind()));
+
+ AllocatableGeneralRegisterSet available;
+
+ switch (ic_->kind()) {
+ case CacheKind::GetProp:
+ case CacheKind::GetElem: {
+ IonGetPropertyIC* ic = ic_->asGetPropertyIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(output);
+
+ MOZ_ASSERT(numInputs == 1 || numInputs == 2);
+
+ allocator.initInputLocation(0, ic->value());
+ if (numInputs > 1) {
+ allocator.initInputLocation(1, ic->id());
+ }
+ break;
+ }
+ case CacheKind::GetPropSuper:
+ case CacheKind::GetElemSuper: {
+ IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(output);
+
+ MOZ_ASSERT(numInputs == 2 || numInputs == 3);
+
+ allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
+
+ if (ic->kind() == CacheKind::GetPropSuper) {
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(1, ic->receiver());
+ } else {
+ MOZ_ASSERT(numInputs == 3);
+ allocator.initInputLocation(1, ic->id());
+ allocator.initInputLocation(2, ic->receiver());
+ }
+ break;
+ }
+ case CacheKind::SetProp:
+ case CacheKind::SetElem: {
+ IonSetPropertyIC* ic = ic_->asSetPropertyIC();
+
+ available.add(ic->temp());
+
+ liveRegs_.emplace(ic->liveRegs());
+
+ allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
+
+ if (ic->kind() == CacheKind::SetProp) {
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(1, ic->rhs());
+ } else {
+ MOZ_ASSERT(numInputs == 3);
+ allocator.initInputLocation(1, ic->id());
+ allocator.initInputLocation(2, ic->rhs());
+ }
+ break;
+ }
+ case CacheKind::GetName: {
+ IonGetNameIC* ic = ic_->asGetNameIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+ available.add(ic->temp());
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(output);
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
+ break;
+ }
+ case CacheKind::BindName: {
+ IonBindNameIC* ic = ic_->asBindNameIC();
+ Register output = ic->output();
+
+ available.add(output);
+ available.add(ic->temp());
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
+ break;
+ }
+ case CacheKind::GetIterator: {
+ IonGetIteratorIC* ic = ic_->asGetIteratorIC();
+ Register output = ic->output();
+
+ available.add(output);
+ available.add(ic->temp1());
+ available.add(ic->temp2());
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->value());
+ break;
+ }
+ case CacheKind::OptimizeSpreadCall: {
+ auto* ic = ic_->asOptimizeSpreadCallIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+ available.add(ic->temp());
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(output);
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->value());
+ break;
+ }
+ case CacheKind::In: {
+ IonInIC* ic = ic_->asInIC();
+ Register output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->key());
+ allocator.initInputLocation(
+ 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
+ break;
+ }
+ case CacheKind::HasOwn: {
+ IonHasOwnIC* ic = ic_->asHasOwnIC();
+ Register output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->id());
+ allocator.initInputLocation(1, ic->value());
+ break;
+ }
+ case CacheKind::CheckPrivateField: {
+ IonCheckPrivateFieldIC* ic = ic_->asCheckPrivateFieldIC();
+ Register output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->value());
+ allocator.initInputLocation(1, ic->id());
+ break;
+ }
+ case CacheKind::InstanceOf: {
+ IonInstanceOfIC* ic = ic_->asInstanceOfIC();
+ Register output = ic->output();
+ available.add(output);
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->lhs());
+ allocator.initInputLocation(
+ 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
+ break;
+ }
+ case CacheKind::ToPropertyKey: {
+ IonToPropertyKeyIC* ic = ic_->asToPropertyKeyIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(TypedOrValueRegister(output));
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->input());
+ break;
+ }
+ case CacheKind::UnaryArith: {
+ IonUnaryArithIC* ic = ic_->asUnaryArithIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(TypedOrValueRegister(output));
+
+ MOZ_ASSERT(numInputs == 1);
+ allocator.initInputLocation(0, ic->input());
+ break;
+ }
+ case CacheKind::BinaryArith: {
+ IonBinaryArithIC* ic = ic_->asBinaryArithIC();
+ ValueOperand output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(TypedOrValueRegister(output));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->lhs());
+ allocator.initInputLocation(1, ic->rhs());
+ break;
+ }
+ case CacheKind::Compare: {
+ IonCompareIC* ic = ic_->asCompareIC();
+ Register output = ic->output();
+
+ available.add(output);
+
+ liveRegs_.emplace(ic->liveRegs());
+ outputUnchecked_.emplace(
+ TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
+
+ MOZ_ASSERT(numInputs == 2);
+ allocator.initInputLocation(0, ic->lhs());
+ allocator.initInputLocation(1, ic->rhs());
+ break;
+ }
+ case CacheKind::CloseIter: {
+ IonCloseIterIC* ic = ic_->asCloseIterIC();
+
+ available.add(ic->temp());
+
+ liveRegs_.emplace(ic->liveRegs());
+ allocator.initInputLocation(0, ic->iter(), JSVAL_TYPE_OBJECT);
+ break;
+ }
+ case CacheKind::Call:
+ case CacheKind::TypeOf:
+ case CacheKind::ToBool:
+ case CacheKind::GetIntrinsic:
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ MOZ_CRASH("Unsupported IC");
+ }
+
+ liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
+
+ allocator.initAvailableRegs(available);
+ allocator.initAvailableRegsAfterSpill();
+ return true;
+}
+
+JitCode* IonCacheIRCompiler::compile(IonICStub* stub) {
+ AutoCreatedBy acb(masm, "IonCacheIRCompiler::compile");
+
+ masm.setFramePushed(ionScript_->frameSize());
+ if (cx_->runtime()->geckoProfiler().enabled()) {
+ masm.enableProfilingInstrumentation();
+ }
+
+ allocator.fixupAliasedInputs(masm);
+
+ CacheIRReader reader(writer_);
+ do {
+ CacheOp op = reader.readOp();
+ perfSpewer_.recordInstruction(masm, op);
+ switch (op) {
+#define DEFINE_OP(op, ...) \
+ case CacheOp::op: \
+ if (!emit##op(reader)) return nullptr; \
+ break;
+ CACHE_IR_OPS(DEFINE_OP)
+#undef DEFINE_OP
+
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+ allocator.nextOp();
+ } while (reader.more());
+
+ masm.assumeUnreachable("Should have returned from IC");
+
+ // Done emitting the main IC code. Now emit the failure paths.
+ for (size_t i = 0; i < failurePaths.length(); i++) {
+ if (!emitFailurePath(i)) {
+ return nullptr;
+ }
+ Register scratch = ic_->scratchRegisterForEntryJump();
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
+ masm.jump(Address(scratch, 0));
+ if (!nextCodeOffsets_.append(offset)) {
+ return nullptr;
+ }
+ }
+
+ Linker linker(masm);
+ Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
+ if (!newStubCode) {
+ cx_->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ for (CodeOffset offset : nextCodeOffsets_) {
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
+ ImmPtr(stub->nextCodeRawPtr()),
+ ImmPtr((void*)-1));
+ }
+ if (stubJitCodeOffset_) {
+ Assembler::PatchDataWithValueCheck(
+ CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
+ ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
+ }
+
+ return newStubCode;
+}
+
+#ifdef DEBUG
+void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
+ switch (ic_->kind()) {
+ case CacheKind::GetProp:
+ case CacheKind::GetElem:
+ case CacheKind::GetPropSuper:
+ case CacheKind::GetElemSuper:
+ case CacheKind::GetName:
+ case CacheKind::BindName:
+ case CacheKind::GetIterator:
+ case CacheKind::In:
+ case CacheKind::HasOwn:
+ case CacheKind::CheckPrivateField:
+ case CacheKind::InstanceOf:
+ case CacheKind::UnaryArith:
+ case CacheKind::ToPropertyKey:
+ case CacheKind::OptimizeSpreadCall:
+ case CacheKind::CloseIter:
+ MOZ_CRASH("No float registers available");
+ case CacheKind::SetProp:
+ case CacheKind::SetElem:
+ // FloatReg0 is available per LIRGenerator::visitSetPropertyCache.
+ MOZ_ASSERT(reg == FloatReg0);
+ break;
+ case CacheKind::BinaryArith:
+ case CacheKind::Compare:
+ // FloatReg0 and FloatReg1 are available per
+ // LIRGenerator::visitBinaryCache.
+ MOZ_ASSERT(reg == FloatReg0 || reg == FloatReg1);
+ break;
+ case CacheKind::Call:
+ case CacheKind::TypeOf:
+ case CacheKind::ToBool:
+ case CacheKind::GetIntrinsic:
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ MOZ_CRASH("Unsupported IC");
+ }
+}
+#endif
+
+bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId,
+ uint32_t shapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ Shape* shape = shapeStubField(shapeOffset);
+
+ bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+ Maybe<AutoScratchRegister> maybeScratch;
+ if (needSpectreMitigations) {
+ maybeScratch.emplace(allocator, masm);
+ }
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ if (needSpectreMitigations) {
+ masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
+ failure->label());
+ } else {
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
+ failure->label());
+ }
+
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardProto(ObjOperandId objId,
+ uint32_t protoOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ JSObject* proto = objectStubField(protoOffset);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjProto(obj, scratch);
+ masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
+ uint32_t globalOffset,
+ uint32_t compartmentOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ JSObject* globalWrapper = objectStubField(globalOffset);
+ JS::Compartment* compartment = compartmentStubField(compartmentOffset);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Verify that the global wrapper is still valid, as
+ // it is pre-requisite for doing the compartment check.
+ masm.movePtr(ImmGCPtr(globalWrapper), scratch);
+ Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
+ masm.branchPtr(Assembler::Equal, handlerAddr,
+ ImmPtr(&DeadObjectProxy::singleton), failure->label());
+
+ masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
+ uint32_t claspOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ const JSClass* clasp = classStubField(claspOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
+ failure->label());
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
+ scratch, failure->label());
+ }
+
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
+ uint32_t handlerOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ const void* handler = proxyHandlerStubField(handlerOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Address handlerAddr(obj, ProxyObject::offsetOfHandler());
+ masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ JSObject* expected = objectStubField(expectedOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardSpecificFunction(
+ ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
+ return emitGuardSpecificObject(objId, expectedOffset);
+}
+
+bool IonCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register str = allocator.useRegister(masm, strId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ JSAtom* atom = &stringStubField(expectedOffset)->asAtom();
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch);
+
+ masm.guardSpecificAtom(str, atom, scratch, volatileRegs, failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
+ uint32_t expectedOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register sym = allocator.useRegister(masm, symId);
+ JS::Symbol* expected = symbolStubField(expectedOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
+ MOZ_CRASH("Baseline-specific op");
+}
+
+bool IonCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ int32_t offset = int32StubField(offsetOffset);
+ masm.loadTypedOrValue(Address(obj, offset), output);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitLoadFixedSlotTypedResult(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValueType) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ int32_t offset = int32StubField(offsetOffset);
+
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
+ masm.loadTypedOrValue(Address(scratch, offset), output);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallScriptedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
+
+ JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
+ AutoScratchRegister scratch(allocator, masm);
+
+ MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
+
+ allocator.discardStack(masm);
+
+ uint32_t framePushedBefore = masm.framePushed();
+
+ enterStubFrame(masm, save);
+
+ // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
+ // so we just have to make sure the stack is aligned after we push the
+ // |this| + argument Values.
+ uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
+ uint32_t padding =
+ ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
+ MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
+ MOZ_ASSERT(padding < JitStackAlignment);
+ masm.reserveStack(padding);
+
+ for (size_t i = 0; i < target->nargs(); i++) {
+ masm.Push(UndefinedValue());
+ }
+ masm.Push(receiver);
+
+ if (!sameRealm) {
+ masm.switchToRealm(target->realm(), scratch);
+ }
+
+ masm.movePtr(ImmGCPtr(target), scratch);
+
+ masm.Push(scratch);
+ masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
+
+ // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
+ // frame pointer pushed by the call/callee.
+ MOZ_ASSERT(
+ ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
+
+ MOZ_ASSERT(target->hasJitEntry());
+ masm.loadJitCodeRaw(scratch, scratch);
+ masm.callJit(scratch);
+
+ if (!sameRealm) {
+ static_assert(!JSReturnOperand.aliases(ReturnReg),
+ "ReturnReg available as scratch after scripted calls");
+ masm.switchToRealm(cx_->realm(), ReturnReg);
+ }
+
+ masm.storeCallResultValue(output);
+
+ // Restore the frame pointer and stack pointer.
+ masm.loadPtr(Address(FramePointer, 0), FramePointer);
+ masm.freeStack(masm.framePushed() - framePushedBefore);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallInlinedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ MOZ_CRASH("Trial inlining not supported in Ion");
+}
+
+bool IonCacheIRCompiler::emitCallNativeGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
+
+ JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
+ MOZ_ASSERT(target->isNativeFun());
+
+ AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType argUintN(allocator, masm, output);
+ AutoScratchRegister argVp(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ // Native functions have the signature:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
+ // are the function arguments.
+
+ // Construct vp array:
+ // Push receiver value for |this|
+ masm.Push(receiver);
+ // Push callee/outparam.
+ masm.Push(ObjectValue(*target));
+
+ // Preload arguments into registers.
+ masm.loadJSContext(argJSContext);
+ masm.move32(Imm32(0), argUintN);
+ masm.moveStackPtrTo(argVp.get());
+
+ // Push marking data for later use.
+ masm.Push(argUintN);
+ pushStubCodePointer();
+
+ if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
+ return false;
+ }
+ masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
+
+ if (!sameRealm) {
+ masm.switchToRealm(target->realm(), scratch);
+ }
+
+ // Construct and execute call.
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(argJSContext);
+ masm.passABIArg(argUintN);
+ masm.passABIArg(argVp);
+ masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ if (!sameRealm) {
+ masm.switchToRealm(cx_->realm(), ReturnReg);
+ }
+
+ // Load the outparam vp[0] into output register(s).
+ Address outparam(masm.getStackPointer(),
+ IonOOLNativeExitFrameLayout::offsetOfResult());
+ masm.loadValue(outparam, output.valueReg());
+
+ if (JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
+ uint32_t jitInfoOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+
+ const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(obj);
+ masm.Push(ImmPtr(info));
+
+ using Fn =
+ bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
+ callVM<Fn, jit::CallDOMGetter>(masm);
+
+ masm.storeCallResultValue(output);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
+ uint32_t jitInfoOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(val);
+ masm.Push(obj);
+ masm.Push(ImmPtr(info));
+
+ using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
+ callVM<Fn, jit::CallDOMSetter>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
+ uint32_t idOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ jsid id = idStubField(idOffset);
+
+ // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+ // MutableHandleValue vp)
+ AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType argProxy(allocator, masm, output);
+ AutoScratchRegister argId(allocator, masm);
+ AutoScratchRegister argVp(allocator, masm);
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ // Push stubCode for marking.
+ pushStubCodePointer();
+
+ // Push args on stack first so we can take pointers to make handles.
+ masm.Push(UndefinedValue());
+ masm.moveStackPtrTo(argVp.get());
+
+ masm.Push(id, scratch);
+ masm.moveStackPtrTo(argId.get());
+
+ // Push the proxy. Also used as receiver.
+ masm.Push(obj);
+ masm.moveStackPtrTo(argProxy.get());
+
+ masm.loadJSContext(argJSContext);
+
+ if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
+ return false;
+ }
+ masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
+
+ // Make the call.
+ using Fn = bool (*)(JSContext* cx, HandleObject proxy, HandleId id,
+ MutableHandleValue vp);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(argJSContext);
+ masm.passABIArg(argProxy);
+ masm.passABIArg(argId);
+ masm.passABIArg(argVp);
+ masm.callWithABI<Fn, ProxyGetProperty>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ // Load the outparam vp[0] into output register(s).
+ Address outparam(masm.getStackPointer(),
+ IonOOLProxyExitFrameLayout::offsetOfResult());
+ masm.loadValue(outparam, output.valueReg());
+
+ // Spectre mitigation in case of speculative execution within C++ code.
+ if (JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // masm.leaveExitFrame & pop locals
+ masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitFrameIsConstructingResult() {
+ MOZ_CRASH("Baseline-specific op");
+}
+
+bool IonCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ MOZ_CRASH("not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCompareStringResult(JSOp op, StringOperandId lhsId,
+ StringOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ Register left = allocator.useRegister(masm, lhsId);
+ Register right = allocator.useRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+
+ Label slow, done;
+ MOZ_ASSERT(!output.hasValue());
+ masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
+
+ masm.jump(&done);
+ masm.bind(&slow);
+
+ enterStubFrame(masm, save);
+
+ // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
+ // - |left <= right| is implemented as |right >= left|.
+ // - |left > right| is implemented as |right < left|.
+ if (op == JSOp::Le || op == JSOp::Gt) {
+ masm.Push(left);
+ masm.Push(right);
+ } else {
+ masm.Push(right);
+ masm.Push(left);
+ }
+
+ using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
+ if (op == JSOp::Eq || op == JSOp::StrictEq) {
+ callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
+ } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
+ callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
+ } else if (op == JSOp::Lt || op == JSOp::Gt) {
+ callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
+ } else {
+ MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
+ callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
+ }
+
+ masm.storeCallBoolResult(output.typedReg().gpr());
+ masm.bind(&done);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ int32_t offset = int32StubField(offsetOffset);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ Address slot(obj, offset);
+ EmitPreBarrier(masm, slot, MIRType::Value);
+ masm.storeConstantOrRegister(val, slot);
+ emitPostBarrierSlot(obj, val, scratch);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ int32_t offset = int32StubField(offsetOffset);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
+ Address slot(scratch, offset);
+ EmitPreBarrier(masm, slot, MIRType::Value);
+ masm.storeConstantOrRegister(val, slot);
+ emitPostBarrierSlot(obj, val, scratch);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitAddAndStoreSlotShared(
+ CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ int32_t offset = int32StubField(offsetOffset);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ AutoScratchRegister scratch1(allocator, masm);
+
+ Maybe<AutoScratchRegister> scratch2;
+ if (op == CacheOp::AllocateAndStoreDynamicSlot) {
+ scratch2.emplace(allocator, masm);
+ }
+
+ Shape* newShape = shapeStubField(newShapeOffset);
+
+ if (op == CacheOp::AllocateAndStoreDynamicSlot) {
+ // We have to (re)allocate dynamic slots. Do this first, as it's the
+ // only fallible operation here. Note that growSlotsPure is
+ // fallible but does not GC.
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ int32_t numNewSlots = int32StubField(*numNewSlotsOffset);
+ MOZ_ASSERT(numNewSlots > 0);
+
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ masm.PushRegsInMask(save);
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
+ masm.setupUnalignedABICall(scratch1);
+ masm.loadJSContext(scratch1);
+ masm.passABIArg(scratch1);
+ masm.passABIArg(obj);
+ masm.move32(Imm32(numNewSlots), scratch2.ref());
+ masm.passABIArg(scratch2.ref());
+ masm.callWithABI<Fn, NativeObject::growSlotsPure>();
+ masm.storeCallPointerResult(scratch1);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch1);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.branchIfFalseBool(scratch1, failure->label());
+ }
+
+ // Update the object's shape.
+ masm.storeObjShape(newShape, obj,
+ [](MacroAssembler& masm, const Address& addr) {
+ EmitPreBarrier(masm, addr, MIRType::Shape);
+ });
+
+ // Perform the store. No pre-barrier required since this is a new
+ // initialization.
+ if (op == CacheOp::AddAndStoreFixedSlot) {
+ Address slot(obj, offset);
+ masm.storeConstantOrRegister(val, slot);
+ } else {
+ MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
+ op == CacheOp::AllocateAndStoreDynamicSlot);
+ masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+ Address slot(scratch1, offset);
+ masm.storeConstantOrRegister(val, slot);
+ }
+
+ emitPostBarrierSlot(obj, val, scratch1);
+
+ return true;
+}
+
+bool IonCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
+ return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ numNewSlotsOffset);
+}
+
+bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
+ return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ numNewSlotsOffset);
+}
+
+bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
+ offsetOffset, rhsId, newShapeOffset,
+ mozilla::Some(numNewSlotsOffset));
+}
+
+bool IonCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
+ Int32OperandId indexId,
+ bool handleOOB) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register str = allocator.useRegister(masm, strId);
+ Register index = allocator.useRegister(masm, indexId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+ AutoScratchRegister scratch3(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Bounds check, load string char.
+ Label done;
+ Label loadFailed;
+ if (!handleOOB) {
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch1, failure->label());
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
+ failure->label());
+ } else {
+ // Return the empty string for out-of-bounds access.
+ masm.movePtr(ImmGCPtr(cx_->runtime()->emptyString), scratch2);
+
+ // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
+ // guaranteed to see no nested ropes.
+ masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
+ scratch1, &done);
+ masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
+ }
+
+ // Load StaticString for this char. For larger code units perform a VM call.
+ Label vmCall;
+ masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
+ &vmCall);
+ masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
+ masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
+
+ masm.jump(&done);
+
+ if (handleOOB) {
+ masm.bind(&loadFailed);
+ masm.assumeUnreachable("loadStringChar can't fail for linear strings");
+ }
+
+ {
+ masm.bind(&vmCall);
+
+ // FailurePath and AutoSaveLiveRegisters don't get along very well. Both are
+ // modifying the stack and expect that no other stack manipulations are
+ // made. Therefore we need to use an ABI call instead of a VM call here.
+
+ LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ volatileRegs.takeUnchecked(scratch1);
+ volatileRegs.takeUnchecked(scratch2);
+ volatileRegs.takeUnchecked(scratch3);
+ volatileRegs.takeUnchecked(output);
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = JSLinearString* (*)(JSContext* cx, int32_t code);
+ masm.setupUnalignedABICall(scratch2);
+ masm.loadJSContext(scratch2);
+ masm.passABIArg(scratch2);
+ masm.passABIArg(scratch1);
+ masm.callWithABI<Fn, jit::StringFromCharCodeNoGC>();
+ masm.storeCallPointerResult(scratch2);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ masm.branchPtr(Assembler::Equal, scratch2, ImmWord(0), failure->label());
+ }
+
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId receiverId,
+ uint32_t setterOffset,
+ ValOperandId rhsId,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register receiver = allocator.useRegister(masm, receiverId);
+ JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
+ MOZ_ASSERT(target->isNativeFun());
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ AutoScratchRegister argJSContext(allocator, masm);
+ AutoScratchRegister argVp(allocator, masm);
+ AutoScratchRegister argUintN(allocator, masm);
+#ifndef JS_CODEGEN_X86
+ AutoScratchRegister scratch(allocator, masm);
+#else
+ // Not enough registers on x86.
+ Register scratch = argUintN;
+#endif
+
+ allocator.discardStack(masm);
+
+ // Set up the call:
+ // bool (*)(JSContext*, unsigned, Value* vp)
+ // vp[0] is callee/outparam
+ // vp[1] is |this|
+ // vp[2] is the value
+
+ // Build vp and move the base into argVpReg.
+ masm.Push(val);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
+ masm.Push(ObjectValue(*target));
+ masm.moveStackPtrTo(argVp.get());
+
+ // Preload other regs.
+ masm.loadJSContext(argJSContext);
+ masm.move32(Imm32(1), argUintN);
+
+ // Push marking data for later use.
+ masm.Push(argUintN);
+ pushStubCodePointer();
+
+ if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
+ return false;
+ }
+ masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
+
+ if (!sameRealm) {
+ masm.switchToRealm(target->realm(), scratch);
+ }
+
+ // Make the call.
+ masm.setupUnalignedABICall(scratch);
+#ifdef JS_CODEGEN_X86
+ // Reload argUintN because it was clobbered.
+ masm.move32(Imm32(1), argUintN);
+#endif
+ masm.passABIArg(argJSContext);
+ masm.passABIArg(argUintN);
+ masm.passABIArg(argVp);
+ masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
+
+ if (!sameRealm) {
+ masm.switchToRealm(cx_->realm(), ReturnReg);
+ }
+
+ masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallScriptedSetter(ObjOperandId receiverId,
+ uint32_t setterOffset,
+ ValOperandId rhsId,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register receiver = allocator.useRegister(masm, receiverId);
+ JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+
+ uint32_t framePushedBefore = masm.framePushed();
+
+ enterStubFrame(masm, save);
+
+ // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
+ // so we just have to make sure the stack is aligned after we push the
+ // |this| + argument Values.
+ size_t numArgs = std::max<size_t>(1, target->nargs());
+ uint32_t argSize = (numArgs + 1) * sizeof(Value);
+ uint32_t padding =
+ ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
+ MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
+ MOZ_ASSERT(padding < JitStackAlignment);
+ masm.reserveStack(padding);
+
+ for (size_t i = 1; i < target->nargs(); i++) {
+ masm.Push(UndefinedValue());
+ }
+ masm.Push(val);
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
+
+ if (!sameRealm) {
+ masm.switchToRealm(target->realm(), scratch);
+ }
+
+ masm.movePtr(ImmGCPtr(target), scratch);
+
+ masm.Push(scratch);
+ masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 1);
+
+ // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
+ // frame pointer pushed by the call/callee.
+ MOZ_ASSERT(
+ ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
+
+ MOZ_ASSERT(target->hasJitEntry());
+ masm.loadJitCodeRaw(scratch, scratch);
+ masm.callJit(scratch);
+
+ if (!sameRealm) {
+ masm.switchToRealm(cx_->realm(), ReturnReg);
+ }
+
+ // Restore the frame pointer and stack pointer.
+ masm.loadPtr(Address(FramePointer, 0), FramePointer);
+ masm.freeStack(masm.framePushed() - framePushedBefore);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallInlinedSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ MOZ_CRASH("Trial inlining not supported in Ion");
+}
+
+bool IonCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId, bool strict,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
+ callVM<Fn, jit::SetArrayLength>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitProxySet(ObjOperandId objId, uint32_t idOffset,
+ ValOperandId rhsId, bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+ jsid id = idStubField(idOffset);
+
+ AutoScratchRegister scratch(allocator, masm);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(id, scratch);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
+ callVM<Fn, ProxySetProperty>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId, bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, ProxySetPropertyByValue>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
+ ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ Register id = allocator.useRegister(masm, idId);
+ ValueOperand val = allocator.useValueRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(id);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
+ HandleValue v, bool strict);
+ callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId,
+ bool strict) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register obj = allocator.useRegister(masm, objId);
+ ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
+ ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+ enterStubFrame(masm, save);
+
+ masm.Push(Imm32(strict));
+ masm.Push(val);
+ masm.Push(idVal);
+ masm.Push(obj);
+
+ using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
+ callVM<Fn, SetElementMegamorphic<false>>(masm);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitReturnFromIC() {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ if (!savedLiveRegs_) {
+ allocator.restoreInputState(masm);
+ }
+
+ uint8_t* rejoinAddr = ic_->rejoinAddr(ionScript_);
+ masm.jump(ImmPtr(rejoinAddr));
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
+ ValOperandId expandoId, uint32_t shapeOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ ValueOperand val = allocator.useValueRegister(masm, expandoId);
+ Shape* shape = shapeStubField(shapeOffset);
+
+ AutoScratchRegister objScratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.branchTestUndefined(Assembler::Equal, val, &done);
+
+ masm.debugAssertIsObject(val);
+ masm.unboxObject(val, objScratch);
+ // The expando object is not used in this case, so we don't need Spectre
+ // mitigations.
+ masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
+ shape, failure->label());
+
+ masm.bind(&done);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
+ ObjOperandId objId, uint32_t expandoAndGenerationOffset,
+ uint32_t generationOffset, ValOperandId resultId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ ExpandoAndGeneration* expandoAndGeneration =
+ rawPointerStubField<ExpandoAndGeneration*>(expandoAndGenerationOffset);
+ uint64_t generation = rawInt64StubField<uint64_t>(generationOffset);
+
+ ValueOperand output = allocator.defineValueRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadDOMExpandoValueGuardGeneration(obj, output, expandoAndGeneration,
+ generation, failure->label());
+ return true;
+}
+
+void IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
+ CacheKind kind, IonScript* ionScript,
+ bool* attached) {
+ // We shouldn't GC or report OOM (or any other exception) here.
+ AutoAssertNoPendingException aanpe(cx);
+ JS::AutoCheckCannotGC nogc;
+
+ MOZ_ASSERT(!*attached);
+
+ // Do nothing if the IR generator failed or triggered a GC that invalidated
+ // the script.
+ if (writer.failed() || ionScript->invalidated()) {
+ return;
+ }
+
+ JitZone* jitZone = cx->zone()->jitZone();
+
+ constexpr uint32_t stubDataOffset = sizeof(IonICStub);
+ static_assert(stubDataOffset % sizeof(uint64_t) == 0,
+ "Stub fields must be aligned");
+
+ // Try to reuse a previously-allocated CacheIRStubInfo.
+ CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC, writer.codeStart(),
+ writer.codeLength());
+ CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
+ if (!stubInfo) {
+ // Allocate the shared CacheIRStubInfo. Note that the
+ // putIonCacheIRStubInfo call below will transfer ownership to
+ // the stub info HashSet, so we don't have to worry about freeing
+ // it below.
+
+ // For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
+ bool makesGCCalls = true;
+ stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
+ stubDataOffset, writer);
+ if (!stubInfo) {
+ return;
+ }
+
+ CacheIRStubKey key(stubInfo);
+ if (!jitZone->putIonCacheIRStubInfo(lookup, key)) {
+ return;
+ }
+ }
+
+ MOZ_ASSERT(stubInfo);
+
+ // Ensure we don't attach duplicate stubs. This can happen if a stub failed
+ // for some reason and the IR generator doesn't check for exactly the same
+ // conditions.
+ for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
+ if (stub->stubInfo() != stubInfo) {
+ continue;
+ }
+ if (!writer.stubDataEquals(stub->stubDataStart())) {
+ continue;
+ }
+ return;
+ }
+
+ size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
+
+ // Allocate the IonICStub in the optimized stub space. Ion stubs and
+ // CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
+ // because the stub code is rooted separately when we make a VM call, and
+ // stub code should never access the IonICStub after making a VM call. The
+ // IonICStub::poison method poisons the stub to catch bugs in this area.
+ ICStubSpace* stubSpace = cx->zone()->jitZone()->optimizedStubSpace();
+ void* newStubMem = stubSpace->alloc(bytesNeeded);
+ if (!newStubMem) {
+ return;
+ }
+
+ IonICStub* newStub =
+ new (newStubMem) IonICStub(fallbackAddr(ionScript), stubInfo);
+ writer.copyStubData(newStub->stubDataStart());
+
+ TempAllocator temp(&cx->tempLifoAlloc());
+ JitContext jctx(cx);
+ IonCacheIRCompiler compiler(cx, temp, writer, this, ionScript,
+ stubDataOffset);
+ if (!compiler.init()) {
+ return;
+ }
+
+ JitCode* code = compiler.compile(newStub);
+ if (!code) {
+ return;
+ }
+
+ // Record the stub code if perf spewer is enabled.
+ CacheKind stubKind = newStub->stubInfo()->kind();
+ compiler.perfSpewer().saveProfile(cx, script(), code,
+ CacheKindNames[uint8_t(stubKind)]);
+
+ // Add an entry to the profiler's code table, so that the profiler can
+ // identify this as Ion code.
+ if (ionScript->hasProfilingInstrumentation()) {
+ uint8_t* addr = rejoinAddr(ionScript);
+ auto entry = MakeJitcodeGlobalEntry<IonICEntry>(cx, code, code->raw(),
+ code->rawEnd(), addr);
+ if (!entry) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+
+ auto* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (!globalTable->addEntry(std::move(entry))) {
+ return;
+ }
+ }
+
+ attachStub(newStub, code);
+ *attached = true;
+}
+
+bool IonCacheIRCompiler::emitCallStringObjectConcatResult(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+ AutoOutputRegister output(*this);
+
+ ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
+ ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
+
+ allocator.discardStack(masm);
+
+ enterStubFrame(masm, save);
+ masm.Push(rhs);
+ masm.Push(lhs);
+
+ using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
+ callVM<Fn, DoConcatStringObject>(masm);
+
+ masm.storeCallResultValue(output);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCloseIterScriptedResult(ObjOperandId iterId,
+ ObjOperandId calleeId,
+ CompletionKind kind,
+ uint32_t calleeNargs) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoSaveLiveRegisters save(*this);
+
+ Register iter = allocator.useRegister(masm, iterId);
+ Register callee = allocator.useRegister(masm, calleeId);
+
+ allocator.discardStack(masm);
+
+ uint32_t framePushedBefore = masm.framePushed();
+
+ // Construct IonICCallFrameLayout.
+ enterStubFrame(masm, save);
+
+ uint32_t stubFramePushed = masm.framePushed();
+
+ // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
+ // so we just have to make sure the stack is aligned after we push |this|
+ // and |calleeNargs| undefined arguments.
+ uint32_t argSize = (calleeNargs + 1) * sizeof(Value);
+ uint32_t padding =
+ ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
+ MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
+ MOZ_ASSERT(padding < JitStackAlignment);
+ masm.reserveStack(padding);
+
+ for (uint32_t i = 0; i < calleeNargs; i++) {
+ masm.Push(UndefinedValue());
+ }
+ masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(iter)));
+
+ masm.Push(callee);
+ masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
+
+ masm.loadJitCodeRaw(callee, callee);
+ masm.callJit(callee);
+
+ if (kind != CompletionKind::Throw) {
+ // Verify that the return value is an object.
+ Label success;
+ masm.branchTestObject(Assembler::Equal, JSReturnOperand, &success);
+
+ // We can reuse the same stub frame, but we first have to pop the arguments
+ // from the previous call.
+ uint32_t framePushedAfterCall = masm.framePushed();
+ masm.freeStack(masm.framePushed() - stubFramePushed);
+
+ masm.push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn)));
+ using Fn = bool (*)(JSContext*, CheckIsObjectKind);
+ callVM<Fn, ThrowCheckIsObject>(masm);
+
+ masm.bind(&success);
+ masm.setFramePushed(framePushedAfterCall);
+ }
+
+ // Restore the frame pointer and stack pointer.
+ masm.loadPtr(Address(FramePointer, 0), FramePointer);
+ masm.freeStack(masm.framePushed() - framePushedBefore);
+ return true;
+}
+
+bool IonCacheIRCompiler::emitGuardFunctionScript(ObjOperandId funId,
+ uint32_t expectedOffset,
+ uint32_t nargsAndFlagsOffset) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register fun = allocator.useRegister(masm, funId);
+ AutoScratchRegister scratch(allocator, masm);
+ BaseScript* expected = baseScriptStubField(expectedOffset);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
+ scratch);
+ masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(expected),
+ failure->label());
+ return true;
+}
+
+bool IonCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallBoundScriptedFunction(ObjOperandId calleeId,
+ ObjOperandId targetId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t numBoundArgs) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallWasmFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
+ uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+#ifdef JS_SIMULATOR
+bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ uint32_t targetOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallDOMFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
+ CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+#else
+bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ bool ignoresReturnValue) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ ObjOperandId thisObjId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+#endif
+
+bool IonCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags, uint32_t argcFixed,
+ uint32_t targetOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ uint32_t icScriptOffset,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitBindFunctionResult(ObjOperandId targetId,
+ uint32_t argc,
+ uint32_t templateObjectOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitSpecializedBindFunctionResult(
+ ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
+ uint8_t slotIndex) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
+ Int32OperandId argcId,
+ uint8_t slotIndex) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
+ StringOperandId sepId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitPackedArraySliceResult(
+ uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+ Int32OperandId endId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitArgumentsSliceResult(uint32_t templateObjectOffset,
+ ObjOperandId argsId,
+ Int32OperandId beginId,
+ Int32OperandId endId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
+ bool isPossiblyWrapped) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitStringFromCharCodeResult(Int32OperandId codeId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitStringFromCodePointResult(Int32OperandId codeId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitReflectGetPrototypeOfResult(ObjOperandId objId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
+ uint32_t claspOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitSameValueResult(ValOperandId lhs,
+ ValOperandId rhs) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId,
+ StringOperandId strId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ MOZ_CRASH("NewArray ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
+ uint32_t numDynamicSlots,
+ gc::AllocKind allocKind,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ MOZ_CRASH("NewObject ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallRegExpMatcherResult(ObjOperandId regexpId,
+ StringOperandId inputId,
+ Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitCallRegExpSearcherResult(
+ ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitRegExpBuiltinExecTestResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ MOZ_CRASH("Call ICs not used in ion");
+}
diff --git a/js/src/jit/IonCacheIRCompiler.h b/js/src/jit/IonCacheIRCompiler.h
new file mode 100644
index 0000000000..aa902a3671
--- /dev/null
+++ b/js/src/jit/IonCacheIRCompiler.h
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonCacheIRCompiler_h
+#define jit_IonCacheIRCompiler_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIROpsGenerated.h"
+#include "jit/CacheIRReader.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "js/Vector.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+class CacheIRWriter;
+class CodeOffset;
+class IonIC;
+class IonICStub;
+class IonScript;
+class JitCode;
+class MacroAssembler;
+
+// IonCacheIRCompiler compiles CacheIR to IonIC native code.
+class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler {
+ public:
+ friend class AutoSaveLiveRegisters;
+ friend class AutoCallVM;
+
+ IonCacheIRCompiler(JSContext* cx, TempAllocator& alloc,
+ const CacheIRWriter& writer, IonIC* ic,
+ IonScript* ionScript, uint32_t stubDataOffset);
+
+ [[nodiscard]] bool init();
+ JitCode* compile(IonICStub* stub);
+
+#ifdef DEBUG
+ void assertFloatRegisterAvailable(FloatRegister reg);
+#endif
+
+ IonICPerfSpewer& perfSpewer() { return perfSpewer_; }
+
+ private:
+ const CacheIRWriter& writer_;
+ IonIC* ic_;
+ IonScript* ionScript_;
+
+ Vector<CodeOffset, 4, SystemAllocPolicy> nextCodeOffsets_;
+ mozilla::Maybe<LiveRegisterSet> liveRegs_;
+ mozilla::Maybe<CodeOffset> stubJitCodeOffset_;
+
+ bool savedLiveRegs_;
+
+ IonICPerfSpewer perfSpewer_;
+
+ template <typename T>
+ T rawPointerStubField(uint32_t offset);
+
+ template <typename T>
+ T rawInt64StubField(uint32_t offset);
+
+ void enterStubFrame(MacroAssembler& masm, const AutoSaveLiveRegisters&);
+
+ template <typename Fn, Fn fn>
+ void callVM(MacroAssembler& masm);
+
+ [[nodiscard]] bool emitAddAndStoreSlotShared(
+ CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, mozilla::Maybe<uint32_t> numNewSlotsOffset);
+
+ void pushStubCodePointer();
+
+ CACHE_IR_COMPILER_UNSHARED_GENERATED
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonCacheIRCompiler_h */
diff --git a/js/src/jit/IonCompileTask.cpp b/js/src/jit/IonCompileTask.cpp
new file mode 100644
index 0000000000..0c01112908
--- /dev/null
+++ b/js/src/jit/IonCompileTask.cpp
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonCompileTask.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/Ion.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitScript.h"
+#include "jit/WarpSnapshot.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSScript.h"
+
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void IonCompileTask::runHelperThreadTask(AutoLockHelperThreadState& locked) {
+ // The build is taken by this thread. Unfreeze the LifoAlloc to allow
+ // mutations.
+ alloc().lifoAlloc()->setReadWrite();
+
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+ runTask();
+ }
+
+ FinishOffThreadIonCompile(this, locked);
+
+ JSRuntime* rt = script()->runtimeFromAnyThread();
+
+ // Ping the main thread so that the compiled code can be incorporated at the
+ // next interrupt callback.
+ //
+ // This must happen before the current task is reset. DestroyContext
+ // cancels in progress Ion compilations before destroying its target
+ // context, and after we reset the current task we are no longer considered
+ // to be Ion compiling.
+ rt->mainContextFromAnyThread()->requestInterrupt(
+ InterruptReason::AttachIonCompilations);
+}
+
+void IonCompileTask::runTask() {
+ // This is the entry point when ion compiles are run offthread.
+
+ jit::JitContext jctx(mirGen_.realm->runtime());
+ setBackgroundCodegen(jit::CompileBackEnd(&mirGen_, snapshot_));
+}
+
+void IonCompileTask::trace(JSTracer* trc) {
+ if (!mirGen_.runtime->runtimeMatches(trc->runtime())) {
+ return;
+ }
+
+ snapshot_->trace(trc);
+}
+
+IonCompileTask::IonCompileTask(JSContext* cx, MIRGenerator& mirGen,
+ WarpSnapshot* snapshot)
+ : mirGen_(mirGen),
+ snapshot_(snapshot),
+ isExecuting_(cx->isExecutingRef()) {}
+
+size_t IonCompileTask::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ // See js::jit::FreeIonCompileTask.
+ // The IonCompileTask and most of its contents live in the LifoAlloc we point
+ // to.
+
+ size_t result = alloc().lifoAlloc()->sizeOfIncludingThis(mallocSizeOf);
+
+ if (backgroundCodegen_) {
+ result += mallocSizeOf(backgroundCodegen_);
+ }
+
+ return result;
+}
+
+static inline bool TooManyUnlinkedTasks(JSRuntime* rt) {
+ static const size_t MaxUnlinkedTasks = 100;
+ return rt->jitRuntime()->ionLazyLinkListSize() > MaxUnlinkedTasks;
+}
+
+static void MoveFinishedTasksToLazyLinkList(
+ JSRuntime* rt, const AutoLockHelperThreadState& lock) {
+ // Incorporate any off thread compilations for the runtime which have
+ // finished, failed or have been cancelled.
+
+ GlobalHelperThreadState::IonCompileTaskVector& finished =
+ HelperThreadState().ionFinishedList(lock);
+
+ for (size_t i = 0; i < finished.length(); i++) {
+ // Find a finished task for the runtime.
+ IonCompileTask* task = finished[i];
+ if (task->script()->runtimeFromAnyThread() != rt) {
+ continue;
+ }
+
+ HelperThreadState().remove(finished, &i);
+ rt->jitRuntime()->numFinishedOffThreadTasksRef(lock)--;
+
+ JSScript* script = task->script();
+ MOZ_ASSERT(script->hasBaselineScript());
+ script->baselineScript()->setPendingIonCompileTask(rt, script, task);
+ rt->jitRuntime()->ionLazyLinkListAdd(rt, task);
+ }
+}
+
+static void EagerlyLinkExcessTasks(JSContext* cx,
+ AutoLockHelperThreadState& lock) {
+ JSRuntime* rt = cx->runtime();
+ MOZ_ASSERT(TooManyUnlinkedTasks(rt));
+
+ do {
+ jit::IonCompileTask* task = rt->jitRuntime()->ionLazyLinkList(rt).getLast();
+ RootedScript script(cx, task->script());
+
+ AutoUnlockHelperThreadState unlock(lock);
+ AutoRealm ar(cx, script);
+ jit::LinkIonScript(cx, script);
+ } while (TooManyUnlinkedTasks(rt));
+}
+
+void jit::AttachFinishedCompilations(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ if (!rt->jitRuntime() || !rt->jitRuntime()->numFinishedOffThreadTasks()) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+
+ while (true) {
+ MoveFinishedTasksToLazyLinkList(rt, lock);
+
+ if (!TooManyUnlinkedTasks(rt)) {
+ break;
+ }
+
+ EagerlyLinkExcessTasks(cx, lock);
+
+ // Linking releases the lock so we must now check the finished list
+ // again.
+ }
+
+ MOZ_ASSERT(!rt->jitRuntime()->numFinishedOffThreadTasks());
+}
+
+void jit::FreeIonCompileTask(IonCompileTask* task) {
+ // The task is allocated into its LifoAlloc, so destroying that will
+ // destroy the task and all other data accumulated during compilation,
+ // except any final codegen (which includes an assembler and needs to be
+ // explicitly destroyed).
+ js_delete(task->backgroundCodegen());
+ js_delete(task->alloc().lifoAlloc());
+}
+
+void IonFreeTask::runHelperThreadTask(AutoLockHelperThreadState& locked) {
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+ jit::FreeIonCompileTask(task_);
+ }
+
+ js_delete(this);
+}
+
+void jit::FinishOffThreadTask(JSRuntime* runtime, IonCompileTask* task,
+ const AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(runtime);
+
+ JSScript* script = task->script();
+
+ // Clean the references to the pending IonCompileTask, if we just finished it.
+ if (script->baselineScript()->hasPendingIonCompileTask() &&
+ script->baselineScript()->pendingIonCompileTask() == task) {
+ script->baselineScript()->removePendingIonCompileTask(runtime, script);
+ }
+
+ // If the task is still in one of the helper thread lists, then remove it.
+ if (task->isInList()) {
+ runtime->jitRuntime()->ionLazyLinkListRemove(runtime, task);
+ }
+
+ // Clean up if compilation did not succeed.
+ if (script->isIonCompilingOffThread()) {
+ script->jitScript()->clearIsIonCompilingOffThread(script);
+
+ const AbortReasonOr<Ok>& status = task->mirGen().getOffThreadStatus();
+ if (status.isErr() && status.inspectErr() == AbortReason::Disable) {
+ script->disableIon();
+ }
+ }
+
+ // Free Ion LifoAlloc off-thread. Free on the main thread if this OOMs.
+ if (!StartOffThreadIonFree(task, locked)) {
+ FreeIonCompileTask(task);
+ }
+}
diff --git a/js/src/jit/IonCompileTask.h b/js/src/jit/IonCompileTask.h
new file mode 100644
index 0000000000..c997c13bc6
--- /dev/null
+++ b/js/src/jit/IonCompileTask.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonCompileTask_h
+#define jit_IonCompileTask_h
+
+#include "mozilla/LinkedList.h"
+
+#include "jit/MIRGenerator.h"
+
+#include "js/Utility.h"
+#include "vm/HelperThreadTask.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+class WarpSnapshot;
+
+// IonCompileTask represents a single off-thread Ion compilation task.
+class IonCompileTask final : public HelperThreadTask,
+ public mozilla::LinkedListElement<IonCompileTask> {
+ MIRGenerator& mirGen_;
+
+ // If off thread compilation is successful, the final code generator is
+ // attached here. Code has been generated, but not linked (there is not yet
+ // an IonScript). This is heap allocated, and must be explicitly destroyed,
+ // performed by FinishOffThreadTask().
+ CodeGenerator* backgroundCodegen_ = nullptr;
+
+ WarpSnapshot* snapshot_ = nullptr;
+
+ // Alias of the JSContext field of this task, to determine the priority of
+ // compiling this script. Contexts are destroyed after the pending tasks are
+ // removed from the helper threads. Thus this should be safe.
+ const mozilla::Atomic<bool, mozilla::ReleaseAcquire>& isExecuting_;
+
+ public:
+ explicit IonCompileTask(JSContext* cx, MIRGenerator& mirGen,
+ WarpSnapshot* snapshot);
+
+ JSScript* script() { return mirGen_.outerInfo().script(); }
+ MIRGenerator& mirGen() { return mirGen_; }
+ TempAllocator& alloc() { return mirGen_.alloc(); }
+ WarpSnapshot* snapshot() { return snapshot_; }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+ void trace(JSTracer* trc);
+
+ CodeGenerator* backgroundCodegen() const { return backgroundCodegen_; }
+ void setBackgroundCodegen(CodeGenerator* codegen) {
+ backgroundCodegen_ = codegen;
+ }
+
+ // Return whether the main thread which scheduled this task is currently
+ // executing JS code. This changes the way we prioritize tasks.
+ bool isMainThreadRunningJS() const { return isExecuting_; }
+
+ ThreadType threadType() override { return THREAD_TYPE_ION; }
+ void runTask();
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+};
+
+class IonFreeTask : public HelperThreadTask {
+ public:
+ explicit IonFreeTask(IonCompileTask* task) : task_(task) {}
+ IonCompileTask* compileTask() { return task_; }
+
+ ThreadType threadType() override { return THREAD_TYPE_ION_FREE; }
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+
+ private:
+ IonCompileTask* task_;
+};
+
+void AttachFinishedCompilations(JSContext* cx);
+void FinishOffThreadTask(JSRuntime* runtime, IonCompileTask* task,
+ const AutoLockHelperThreadState& lock);
+void FreeIonCompileTask(IonCompileTask* task);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonCompileTask_h */
diff --git a/js/src/jit/IonIC.cpp b/js/src/jit/IonIC.cpp
new file mode 100644
index 0000000000..b6536d6d83
--- /dev/null
+++ b/js/src/jit/IonIC.cpp
@@ -0,0 +1,727 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonIC.h"
+
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRGenerator.h"
+#include "jit/IonScript.h"
+#include "jit/VMFunctions.h"
+#include "util/DiagnosticAssertions.h"
+#include "vm/EqualityOperations.h"
+#include "vm/Iteration.h"
+
+#include "vm/Interpreter-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void IonIC::resetCodeRaw(IonScript* ionScript) {
+ codeRaw_ = fallbackAddr(ionScript);
+}
+
+uint8_t* IonIC::fallbackAddr(IonScript* ionScript) const {
+ return ionScript->method()->raw() + fallbackOffset_;
+}
+
+uint8_t* IonIC::rejoinAddr(IonScript* ionScript) const {
+ return ionScript->method()->raw() + rejoinOffset_;
+}
+
+Register IonIC::scratchRegisterForEntryJump() {
+ switch (kind_) {
+ case CacheKind::GetProp:
+ case CacheKind::GetElem:
+ return asGetPropertyIC()->output().scratchReg();
+ case CacheKind::GetPropSuper:
+ case CacheKind::GetElemSuper:
+ return asGetPropSuperIC()->output().scratchReg();
+ case CacheKind::SetProp:
+ case CacheKind::SetElem:
+ return asSetPropertyIC()->temp();
+ case CacheKind::GetName:
+ return asGetNameIC()->temp();
+ case CacheKind::BindName:
+ return asBindNameIC()->temp();
+ case CacheKind::In:
+ return asInIC()->temp();
+ case CacheKind::HasOwn:
+ return asHasOwnIC()->output();
+ case CacheKind::CheckPrivateField:
+ return asCheckPrivateFieldIC()->output();
+ case CacheKind::GetIterator:
+ return asGetIteratorIC()->temp1();
+ case CacheKind::OptimizeSpreadCall:
+ return asOptimizeSpreadCallIC()->temp();
+ case CacheKind::InstanceOf:
+ return asInstanceOfIC()->output();
+ case CacheKind::UnaryArith:
+ return asUnaryArithIC()->output().scratchReg();
+ case CacheKind::ToPropertyKey:
+ return asToPropertyKeyIC()->output().scratchReg();
+ case CacheKind::BinaryArith:
+ return asBinaryArithIC()->output().scratchReg();
+ case CacheKind::Compare:
+ return asCompareIC()->output();
+ case CacheKind::CloseIter:
+ return asCloseIterIC()->temp();
+ case CacheKind::Call:
+ case CacheKind::TypeOf:
+ case CacheKind::ToBool:
+ case CacheKind::GetIntrinsic:
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ MOZ_CRASH("Unsupported IC");
+ }
+
+ MOZ_CRASH("Invalid kind");
+}
+
+void IonIC::discardStubs(Zone* zone, IonScript* ionScript) {
+ if (firstStub_) {
+ // We are removing edges from IonIC to gcthings. Perform a write barrier to
+ // let the GC know about those edges.
+ PreWriteBarrier(zone, ionScript);
+ }
+
+#ifdef JS_CRASH_DIAGNOSTICS
+ IonICStub* stub = firstStub_;
+ while (stub) {
+ IonICStub* next = stub->next();
+ stub->poison();
+ stub = next;
+ }
+#endif
+
+ firstStub_ = nullptr;
+ resetCodeRaw(ionScript);
+ state_.trackUnlinkedAllStubs();
+}
+
+void IonIC::reset(Zone* zone, IonScript* ionScript) {
+ discardStubs(zone, ionScript);
+ state_.reset();
+}
+
+void IonIC::trace(JSTracer* trc, IonScript* ionScript) {
+ if (script_) {
+ TraceManuallyBarrieredEdge(trc, &script_, "IonIC::script_");
+ }
+
+ uint8_t* nextCodeRaw = codeRaw_;
+ for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
+ JitCode* code = JitCode::FromExecutable(nextCodeRaw);
+ TraceManuallyBarrieredEdge(trc, &code, "ion-ic-code");
+
+ TraceCacheIRStub(trc, stub, stub->stubInfo());
+
+ nextCodeRaw = stub->nextCodeRaw();
+ }
+
+ MOZ_ASSERT(nextCodeRaw == fallbackAddr(ionScript));
+}
+
+// This helper handles ICState updates/transitions while attaching CacheIR
+// stubs.
+template <typename IRGenerator, typename... Args>
+static void TryAttachIonStub(JSContext* cx, IonIC* ic, IonScript* ionScript,
+ Args&&... args) {
+ if (ic->state().maybeTransition()) {
+ ic->discardStubs(cx->zone(), ionScript);
+ }
+
+ if (ic->state().canAttachStub()) {
+ RootedScript script(cx, ic->script());
+ bool attached = false;
+ IRGenerator gen(cx, script, ic->pc(), ic->state(),
+ std::forward<Args>(args)...);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::Attach:
+ ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), ionScript,
+ &attached);
+ break;
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ attached = true;
+ break;
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("Not expected in generic TryAttachIonStub");
+ break;
+ }
+ if (!attached) {
+ ic->state().trackNotAttached();
+ }
+ }
+}
+
+/* static */
+bool IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript,
+ IonGetPropertyIC* ic, HandleValue val,
+ HandleValue idVal, MutableHandleValue res) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ // Optimized-arguments and other magic values must not escape to Ion ICs.
+ MOZ_ASSERT(!val.isMagic());
+
+ TryAttachIonStub<GetPropIRGenerator>(cx, ic, ionScript, ic->kind(), val,
+ idVal);
+
+ if (ic->kind() == CacheKind::GetProp) {
+ Rooted<PropertyName*> name(cx, idVal.toString()->asAtom().asPropertyName());
+ if (!GetProperty(cx, val, name, res)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(ic->kind() == CacheKind::GetElem);
+ if (!GetElementOperation(cx, val, idVal, res)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* static */
+bool IonGetPropSuperIC::update(JSContext* cx, HandleScript outerScript,
+ IonGetPropSuperIC* ic, HandleObject obj,
+ HandleValue receiver, HandleValue idVal,
+ MutableHandleValue res) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ if (ic->state().maybeTransition()) {
+ ic->discardStubs(cx->zone(), ionScript);
+ }
+
+ RootedValue val(cx, ObjectValue(*obj));
+
+ TryAttachIonStub<GetPropIRGenerator>(cx, ic, ionScript, ic->kind(), val,
+ idVal);
+
+ if (ic->kind() == CacheKind::GetPropSuper) {
+ Rooted<PropertyName*> name(cx, idVal.toString()->asAtom().asPropertyName());
+ if (!GetProperty(cx, obj, receiver, name, res)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(ic->kind() == CacheKind::GetElemSuper);
+
+ JSOp op = JSOp(*ic->pc());
+ MOZ_ASSERT(op == JSOp::GetElemSuper);
+
+ if (!GetObjectElementOperation(cx, op, obj, receiver, idVal, res)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* static */
+bool IonSetPropertyIC::update(JSContext* cx, HandleScript outerScript,
+ IonSetPropertyIC* ic, HandleObject obj,
+ HandleValue idVal, HandleValue rhs) {
+ using DeferType = SetPropIRGenerator::DeferType;
+
+ Rooted<Shape*> oldShape(cx);
+ IonScript* ionScript = outerScript->ionScript();
+
+ bool attached = false;
+ DeferType deferType = DeferType::None;
+
+ if (ic->state().maybeTransition()) {
+ ic->discardStubs(cx->zone(), ionScript);
+ }
+
+ if (ic->state().canAttachStub()) {
+ oldShape = obj->shape();
+
+ RootedValue objv(cx, ObjectValue(*obj));
+ RootedScript script(cx, ic->script());
+ jsbytecode* pc = ic->pc();
+
+ SetPropIRGenerator gen(cx, script, pc, ic->kind(), ic->state(), objv, idVal,
+ rhs);
+ switch (gen.tryAttachStub()) {
+ case AttachDecision::Attach:
+ ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), ionScript,
+ &attached);
+ break;
+ case AttachDecision::NoAction:
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ attached = true;
+ break;
+ case AttachDecision::Deferred:
+ deferType = gen.deferType();
+ MOZ_ASSERT(deferType != DeferType::None);
+ break;
+ }
+ }
+
+ jsbytecode* pc = ic->pc();
+ if (ic->kind() == CacheKind::SetElem) {
+ if (JSOp(*pc) == JSOp::InitElemInc) {
+ if (!InitElemIncOperation(cx, obj.as<ArrayObject>(), idVal.toInt32(),
+ rhs)) {
+ return false;
+ }
+ } else if (IsPropertyInitOp(JSOp(*pc))) {
+ if (!InitElemOperation(cx, pc, obj, idVal, rhs)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc)));
+ if (!SetObjectElement(cx, obj, idVal, rhs, ic->strict())) {
+ return false;
+ }
+ }
+ } else {
+ MOZ_ASSERT(ic->kind() == CacheKind::SetProp);
+
+ if (JSOp(*pc) == JSOp::InitGLexical) {
+ RootedScript script(cx, ic->script());
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ InitGlobalLexicalOperation(cx, &cx->global()->lexicalEnvironment(),
+ script, pc, rhs);
+ } else if (IsPropertyInitOp(JSOp(*pc))) {
+ Rooted<PropertyName*> name(cx,
+ idVal.toString()->asAtom().asPropertyName());
+ if (!InitPropertyOperation(cx, pc, obj, name, rhs)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(IsPropertySetOp(JSOp(*pc)));
+ Rooted<PropertyName*> name(cx,
+ idVal.toString()->asAtom().asPropertyName());
+ if (!SetProperty(cx, obj, name, rhs, ic->strict(), pc)) {
+ return false;
+ }
+ }
+ }
+
+ if (attached) {
+ return true;
+ }
+
+ // The SetProperty call might have entered this IC recursively, so try
+ // to transition.
+ if (ic->state().maybeTransition()) {
+ ic->discardStubs(cx->zone(), ionScript);
+ }
+
+ bool canAttachStub = ic->state().canAttachStub();
+ if (deferType != DeferType::None && canAttachStub) {
+ RootedValue objv(cx, ObjectValue(*obj));
+ RootedScript script(cx, ic->script());
+ jsbytecode* pc = ic->pc();
+ SetPropIRGenerator gen(cx, script, pc, ic->kind(), ic->state(), objv, idVal,
+ rhs);
+ MOZ_ASSERT(deferType == DeferType::AddSlot);
+ AttachDecision decision = gen.tryAttachAddSlotStub(oldShape);
+
+ switch (decision) {
+ case AttachDecision::Attach:
+ ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), ionScript,
+ &attached);
+ break;
+ case AttachDecision::NoAction:
+ gen.trackAttached(IRGenerator::NotAttached);
+ break;
+ case AttachDecision::TemporarilyUnoptimizable:
+ case AttachDecision::Deferred:
+ MOZ_ASSERT_UNREACHABLE("Invalid attach result");
+ break;
+ }
+ }
+ if (!attached && canAttachStub) {
+ ic->state().trackNotAttached();
+ }
+
+ return true;
+}
+
+/* static */
+bool IonGetNameIC::update(JSContext* cx, HandleScript outerScript,
+ IonGetNameIC* ic, HandleObject envChain,
+ MutableHandleValue res) {
+ IonScript* ionScript = outerScript->ionScript();
+ jsbytecode* pc = ic->pc();
+ Rooted<PropertyName*> name(cx, ic->script()->getName(pc));
+
+ TryAttachIonStub<GetNameIRGenerator>(cx, ic, ionScript, envChain, name);
+
+ RootedObject obj(cx);
+ RootedObject holder(cx);
+ PropertyResult prop;
+ if (!LookupName(cx, name, envChain, &obj, &holder, &prop)) {
+ return false;
+ }
+
+ if (JSOp(*GetNextPc(pc)) == JSOp::Typeof) {
+ return FetchName<GetNameMode::TypeOf>(cx, obj, holder, name, prop, res);
+ }
+
+ return FetchName<GetNameMode::Normal>(cx, obj, holder, name, prop, res);
+}
+
+/* static */
+JSObject* IonBindNameIC::update(JSContext* cx, HandleScript outerScript,
+ IonBindNameIC* ic, HandleObject envChain) {
+ IonScript* ionScript = outerScript->ionScript();
+ jsbytecode* pc = ic->pc();
+ Rooted<PropertyName*> name(cx, ic->script()->getName(pc));
+
+ TryAttachIonStub<BindNameIRGenerator>(cx, ic, ionScript, envChain, name);
+
+ RootedObject holder(cx);
+ if (!LookupNameUnqualified(cx, name, envChain, &holder)) {
+ return nullptr;
+ }
+
+ return holder;
+}
+
+/* static */
+JSObject* IonGetIteratorIC::update(JSContext* cx, HandleScript outerScript,
+ IonGetIteratorIC* ic, HandleValue value) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ TryAttachIonStub<GetIteratorIRGenerator>(cx, ic, ionScript, value);
+
+ PropertyIteratorObject* iterObj = ValueToIterator(cx, value);
+ if (!iterObj) {
+ return nullptr;
+ }
+
+ return iterObj;
+}
+
+/* static */
+bool IonOptimizeSpreadCallIC::update(JSContext* cx, HandleScript outerScript,
+ IonOptimizeSpreadCallIC* ic,
+ HandleValue value,
+ MutableHandleValue result) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ TryAttachIonStub<OptimizeSpreadCallIRGenerator>(cx, ic, ionScript, value);
+
+ return OptimizeSpreadCall(cx, value, result);
+}
+
+/* static */
+bool IonHasOwnIC::update(JSContext* cx, HandleScript outerScript,
+ IonHasOwnIC* ic, HandleValue val, HandleValue idVal,
+ int32_t* res) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ TryAttachIonStub<HasPropIRGenerator>(cx, ic, ionScript, CacheKind::HasOwn,
+ idVal, val);
+
+ bool found;
+ if (!HasOwnProperty(cx, val, idVal, &found)) {
+ return false;
+ }
+
+ *res = found;
+ return true;
+}
+
+/* static */
+bool IonCheckPrivateFieldIC::update(JSContext* cx, HandleScript outerScript,
+ IonCheckPrivateFieldIC* ic, HandleValue val,
+ HandleValue idVal, bool* res) {
+ IonScript* ionScript = outerScript->ionScript();
+ jsbytecode* pc = ic->pc();
+
+ TryAttachIonStub<CheckPrivateFieldIRGenerator>(
+ cx, ic, ionScript, CacheKind::CheckPrivateField, idVal, val);
+
+ return CheckPrivateFieldOperation(cx, pc, val, idVal, res);
+}
+
+/* static */
+bool IonInIC::update(JSContext* cx, HandleScript outerScript, IonInIC* ic,
+ HandleValue key, HandleObject obj, bool* res) {
+ IonScript* ionScript = outerScript->ionScript();
+ RootedValue objV(cx, ObjectValue(*obj));
+
+ TryAttachIonStub<HasPropIRGenerator>(cx, ic, ionScript, CacheKind::In, key,
+ objV);
+
+ return OperatorIn(cx, key, obj, res);
+}
+/* static */
+bool IonInstanceOfIC::update(JSContext* cx, HandleScript outerScript,
+ IonInstanceOfIC* ic, HandleValue lhs,
+ HandleObject rhs, bool* res) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ TryAttachIonStub<InstanceOfIRGenerator>(cx, ic, ionScript, lhs, rhs);
+
+ return InstanceofOperator(cx, rhs, lhs, res);
+}
+
+/* static */
+bool IonToPropertyKeyIC::update(JSContext* cx, HandleScript outerScript,
+ IonToPropertyKeyIC* ic, HandleValue val,
+ MutableHandleValue res) {
+ IonScript* ionScript = outerScript->ionScript();
+
+ TryAttachIonStub<ToPropertyKeyIRGenerator>(cx, ic, ionScript, val);
+
+ return ToPropertyKeyOperation(cx, val, res);
+}
+
+/* static */
+bool IonCloseIterIC::update(JSContext* cx, HandleScript outerScript,
+ IonCloseIterIC* ic, HandleObject iter) {
+ IonScript* ionScript = outerScript->ionScript();
+ CompletionKind kind = ic->completionKind();
+
+ TryAttachIonStub<CloseIterIRGenerator>(cx, ic, ionScript, iter, kind);
+
+ return CloseIterOperation(cx, iter, kind);
+}
+
+/* static */
+bool IonUnaryArithIC::update(JSContext* cx, HandleScript outerScript,
+ IonUnaryArithIC* ic, HandleValue val,
+ MutableHandleValue res) {
+ IonScript* ionScript = outerScript->ionScript();
+ RootedScript script(cx, ic->script());
+ jsbytecode* pc = ic->pc();
+ JSOp op = JSOp(*pc);
+
+ switch (op) {
+ case JSOp::BitNot: {
+ res.set(val);
+ if (!BitNot(cx, res, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Pos: {
+ res.set(val);
+ if (!ToNumber(cx, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Neg: {
+ res.set(val);
+ if (!NegOperation(cx, res, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Inc: {
+ if (!IncOperation(cx, val, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Dec: {
+ if (!DecOperation(cx, val, res)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::ToNumeric: {
+ res.set(val);
+ if (!ToNumeric(cx, res)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+ MOZ_ASSERT(res.isNumeric());
+
+ TryAttachIonStub<UnaryArithIRGenerator>(cx, ic, ionScript, op, val, res);
+
+ return true;
+}
+
+/* static */
+bool IonBinaryArithIC::update(JSContext* cx, HandleScript outerScript,
+ IonBinaryArithIC* ic, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue ret) {
+ IonScript* ionScript = outerScript->ionScript();
+ RootedScript script(cx, ic->script());
+ jsbytecode* pc = ic->pc();
+ JSOp op = JSOp(*pc);
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the compare operation.
+ switch (op) {
+ case JSOp::Add:
+ // Do an add.
+ if (!AddValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Sub:
+ if (!SubValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Mul:
+ if (!MulValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Div:
+ if (!DivValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Mod:
+ if (!ModValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::Pow:
+ if (!PowValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ case JSOp::BitOr: {
+ if (!BitOr(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::BitXor: {
+ if (!BitXor(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::BitAnd: {
+ if (!BitAnd(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Lsh: {
+ if (!BitLsh(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Rsh: {
+ if (!BitRsh(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ case JSOp::Ursh: {
+ if (!UrshValues(cx, &lhsCopy, &rhsCopy, ret)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unhandled binary arith op");
+ }
+
+ TryAttachIonStub<BinaryArithIRGenerator>(cx, ic, ionScript, op, lhs, rhs,
+ ret);
+
+ return true;
+}
+
+/* static */
+bool IonCompareIC::update(JSContext* cx, HandleScript outerScript,
+ IonCompareIC* ic, HandleValue lhs, HandleValue rhs,
+ bool* res) {
+ IonScript* ionScript = outerScript->ionScript();
+ RootedScript script(cx, ic->script());
+ jsbytecode* pc = ic->pc();
+ JSOp op = JSOp(*pc);
+
+ // Don't pass lhs/rhs directly, we need the original values when
+ // generating stubs.
+ RootedValue lhsCopy(cx, lhs);
+ RootedValue rhsCopy(cx, rhs);
+
+ // Perform the compare operation.
+ switch (op) {
+ case JSOp::Lt:
+ if (!LessThan(cx, &lhsCopy, &rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::Le:
+ if (!LessThanOrEqual(cx, &lhsCopy, &rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::Gt:
+ if (!GreaterThan(cx, &lhsCopy, &rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::Ge:
+ if (!GreaterThanOrEqual(cx, &lhsCopy, &rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::Eq:
+ if (!js::LooselyEqual(cx, lhsCopy, rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::Ne:
+ if (!js::LooselyEqual(cx, lhsCopy, rhsCopy, res)) {
+ return false;
+ }
+ *res = !*res;
+ break;
+ case JSOp::StrictEq:
+ if (!js::StrictlyEqual(cx, lhsCopy, rhsCopy, res)) {
+ return false;
+ }
+ break;
+ case JSOp::StrictNe:
+ if (!js::StrictlyEqual(cx, lhsCopy, rhsCopy, res)) {
+ return false;
+ }
+ *res = !*res;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unhandled ion compare op");
+ return false;
+ }
+
+ TryAttachIonStub<CompareIRGenerator>(cx, ic, ionScript, op, lhs, rhs);
+
+ return true;
+}
+
+uint8_t* IonICStub::stubDataStart() {
+ return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
+}
+
+void IonIC::attachStub(IonICStub* newStub, JitCode* code) {
+ MOZ_ASSERT(newStub);
+ MOZ_ASSERT(code);
+
+ if (firstStub_) {
+ newStub->setNext(firstStub_, codeRaw_);
+ }
+ firstStub_ = newStub;
+ codeRaw_ = code->raw();
+
+ state_.trackAttached();
+}
diff --git a/js/src/jit/IonIC.h b/js/src/jit/IonIC.h
new file mode 100644
index 0000000000..2f5c61bcc6
--- /dev/null
+++ b/js/src/jit/IonIC.h
@@ -0,0 +1,664 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonIC_h
+#define jit_IonIC_h
+
+#include "jit/CacheIR.h"
+#include "jit/ICState.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+class CacheIRStubInfo;
+class CacheIRWriter;
+class IonScript;
+
+// An optimized stub attached to an IonIC.
+class IonICStub {
+ // Code to jump to when this stub fails. This is either the next optimized
+ // stub or the OOL fallback path.
+ uint8_t* nextCodeRaw_;
+
+ // The next optimized stub in this chain, or nullptr if this is the last
+ // one.
+ IonICStub* next_;
+
+ // Info about this stub.
+ CacheIRStubInfo* stubInfo_;
+
+#ifndef JS_64BIT
+ protected: // Silence Clang warning about unused private fields.
+ // Ensure stub data is 8-byte aligned on 32-bit.
+ uintptr_t padding_ = 0;
+#endif
+
+ public:
+ IonICStub(uint8_t* fallbackCode, CacheIRStubInfo* stubInfo)
+ : nextCodeRaw_(fallbackCode), next_(nullptr), stubInfo_(stubInfo) {}
+
+ uint8_t* nextCodeRaw() const { return nextCodeRaw_; }
+ uint8_t** nextCodeRawPtr() { return &nextCodeRaw_; }
+ CacheIRStubInfo* stubInfo() const { return stubInfo_; }
+ IonICStub* next() const { return next_; }
+
+ uint8_t* stubDataStart();
+
+ void setNext(IonICStub* next, uint8_t* nextCodeRaw) {
+ MOZ_ASSERT(!next_);
+ MOZ_ASSERT(next && nextCodeRaw);
+ next_ = next;
+ nextCodeRaw_ = nextCodeRaw;
+ }
+
+ // Null out pointers when we unlink stubs, to ensure we never use
+ // discarded stubs.
+ void poison() {
+ nextCodeRaw_ = nullptr;
+ next_ = nullptr;
+ stubInfo_ = nullptr;
+ }
+};
+
+class IonGetPropertyIC;
+class IonSetPropertyIC;
+class IonGetPropSuperIC;
+class IonGetNameIC;
+class IonBindNameIC;
+class IonGetIteratorIC;
+class IonHasOwnIC;
+class IonCheckPrivateFieldIC;
+class IonInIC;
+class IonInstanceOfIC;
+class IonCompareIC;
+class IonUnaryArithIC;
+class IonBinaryArithIC;
+class IonToPropertyKeyIC;
+class IonOptimizeSpreadCallIC;
+class IonCloseIterIC;
+
+class IonIC {
+ // This either points at the OOL path for the fallback path, or the code for
+ // the first stub.
+ uint8_t* codeRaw_;
+
+ // The first optimized stub, or nullptr.
+ IonICStub* firstStub_;
+
+ // Location of this IC.
+ JSScript* script_;
+ jsbytecode* pc_;
+
+ // The offset of the rejoin location in the IonScript's code (stubs jump to
+ // this location).
+ uint32_t rejoinOffset_;
+
+ // The offset of the OOL path in the IonScript's code that calls the IC's
+ // update function.
+ uint32_t fallbackOffset_;
+
+ CacheKind kind_;
+ ICState state_;
+
+ protected:
+ explicit IonIC(CacheKind kind)
+ : codeRaw_(nullptr),
+ firstStub_(nullptr),
+ script_(nullptr),
+ pc_(nullptr),
+ rejoinOffset_(0),
+ fallbackOffset_(0),
+ kind_(kind),
+ state_() {}
+
+ void attachStub(IonICStub* newStub, JitCode* code);
+
+ public:
+ void setScriptedLocation(JSScript* script, jsbytecode* pc) {
+ MOZ_ASSERT(!script_ && !pc_);
+ MOZ_ASSERT(script && pc);
+ script_ = script;
+ pc_ = pc;
+ }
+
+ JSScript* script() const {
+ MOZ_ASSERT(script_);
+ return script_;
+ }
+ jsbytecode* pc() const {
+ MOZ_ASSERT(pc_);
+ return pc_;
+ }
+
+ // Discard all stubs.
+ void discardStubs(Zone* zone, IonScript* ionScript);
+
+ // Discard all stubs and reset the ICState.
+ void reset(Zone* zone, IonScript* ionScript);
+
+ ICState& state() { return state_; }
+
+ CacheKind kind() const { return kind_; }
+ uint8_t** codeRawPtr() { return &codeRaw_; }
+
+ void setFallbackOffset(CodeOffset offset) {
+ fallbackOffset_ = offset.offset();
+ }
+ void setRejoinOffset(CodeOffset offset) { rejoinOffset_ = offset.offset(); }
+
+ void resetCodeRaw(IonScript* ionScript);
+
+ uint8_t* fallbackAddr(IonScript* ionScript) const;
+ uint8_t* rejoinAddr(IonScript* ionScript) const;
+
+ IonGetPropertyIC* asGetPropertyIC() {
+ MOZ_ASSERT(kind_ == CacheKind::GetProp || kind_ == CacheKind::GetElem);
+ return (IonGetPropertyIC*)this;
+ }
+ IonSetPropertyIC* asSetPropertyIC() {
+ MOZ_ASSERT(kind_ == CacheKind::SetProp || kind_ == CacheKind::SetElem);
+ return (IonSetPropertyIC*)this;
+ }
+ IonGetPropSuperIC* asGetPropSuperIC() {
+ MOZ_ASSERT(kind_ == CacheKind::GetPropSuper ||
+ kind_ == CacheKind::GetElemSuper);
+ return (IonGetPropSuperIC*)this;
+ }
+ IonGetNameIC* asGetNameIC() {
+ MOZ_ASSERT(kind_ == CacheKind::GetName);
+ return (IonGetNameIC*)this;
+ }
+ IonBindNameIC* asBindNameIC() {
+ MOZ_ASSERT(kind_ == CacheKind::BindName);
+ return (IonBindNameIC*)this;
+ }
+ IonGetIteratorIC* asGetIteratorIC() {
+ MOZ_ASSERT(kind_ == CacheKind::GetIterator);
+ return (IonGetIteratorIC*)this;
+ }
+ IonOptimizeSpreadCallIC* asOptimizeSpreadCallIC() {
+ MOZ_ASSERT(kind_ == CacheKind::OptimizeSpreadCall);
+ return (IonOptimizeSpreadCallIC*)this;
+ }
+ IonHasOwnIC* asHasOwnIC() {
+ MOZ_ASSERT(kind_ == CacheKind::HasOwn);
+ return (IonHasOwnIC*)this;
+ }
+ IonCheckPrivateFieldIC* asCheckPrivateFieldIC() {
+ MOZ_ASSERT(kind_ == CacheKind::CheckPrivateField);
+ return (IonCheckPrivateFieldIC*)this;
+ }
+ IonInIC* asInIC() {
+ MOZ_ASSERT(kind_ == CacheKind::In);
+ return (IonInIC*)this;
+ }
+ IonInstanceOfIC* asInstanceOfIC() {
+ MOZ_ASSERT(kind_ == CacheKind::InstanceOf);
+ return (IonInstanceOfIC*)this;
+ }
+ IonCompareIC* asCompareIC() {
+ MOZ_ASSERT(kind_ == CacheKind::Compare);
+ return (IonCompareIC*)this;
+ }
+ IonUnaryArithIC* asUnaryArithIC() {
+ MOZ_ASSERT(kind_ == CacheKind::UnaryArith);
+ return (IonUnaryArithIC*)this;
+ }
+ IonBinaryArithIC* asBinaryArithIC() {
+ MOZ_ASSERT(kind_ == CacheKind::BinaryArith);
+ return (IonBinaryArithIC*)this;
+ }
+ IonToPropertyKeyIC* asToPropertyKeyIC() {
+ MOZ_ASSERT(kind_ == CacheKind::ToPropertyKey);
+ return (IonToPropertyKeyIC*)this;
+ }
+ IonCloseIterIC* asCloseIterIC() {
+ MOZ_ASSERT(kind_ == CacheKind::CloseIter);
+ return (IonCloseIterIC*)this;
+ }
+
+ // Returns the Register to use as scratch when entering IC stubs. This
+ // should either be an output register or a temp.
+ Register scratchRegisterForEntryJump();
+
+ void trace(JSTracer* trc, IonScript* ionScript);
+
+ void attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
+ CacheKind kind, IonScript* ionScript, bool* attached);
+};
+
+class IonGetPropertyIC : public IonIC {
+ private:
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister value_;
+ ConstantOrRegister id_;
+ ValueOperand output_;
+
+ public:
+ IonGetPropertyIC(CacheKind kind, LiveRegisterSet liveRegs,
+ TypedOrValueRegister value, const ConstantOrRegister& id,
+ ValueOperand output)
+ : IonIC(kind),
+ liveRegs_(liveRegs),
+ value_(value),
+ id_(id),
+ output_(output) {}
+
+ TypedOrValueRegister value() const { return value_; }
+ ConstantOrRegister id() const { return id_; }
+ ValueOperand output() const { return output_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonGetPropertyIC* ic, HandleValue val,
+ HandleValue idVal, MutableHandleValue res);
+};
+
+class IonGetPropSuperIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ Register object_;
+ TypedOrValueRegister receiver_;
+ ConstantOrRegister id_;
+ ValueOperand output_;
+
+ public:
+ IonGetPropSuperIC(CacheKind kind, LiveRegisterSet liveRegs, Register object,
+ TypedOrValueRegister receiver, const ConstantOrRegister& id,
+ ValueOperand output)
+ : IonIC(kind),
+ liveRegs_(liveRegs),
+ object_(object),
+ receiver_(receiver),
+ id_(id),
+ output_(output) {}
+
+ Register object() const { return object_; }
+ TypedOrValueRegister receiver() const { return receiver_; }
+ ConstantOrRegister id() const { return id_; }
+ ValueOperand output() const { return output_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonGetPropSuperIC* ic, HandleObject obj,
+ HandleValue receiver, HandleValue idVal,
+ MutableHandleValue res);
+};
+
+class IonSetPropertyIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ Register object_;
+ Register temp_;
+ ConstantOrRegister id_;
+ ConstantOrRegister rhs_;
+ bool strict_ : 1;
+
+ public:
+ IonSetPropertyIC(CacheKind kind, LiveRegisterSet liveRegs, Register object,
+ Register temp, const ConstantOrRegister& id,
+ const ConstantOrRegister& rhs, bool strict)
+ : IonIC(kind),
+ liveRegs_(liveRegs),
+ object_(object),
+ temp_(temp),
+ id_(id),
+ rhs_(rhs),
+ strict_(strict) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ Register object() const { return object_; }
+ ConstantOrRegister id() const { return id_; }
+ ConstantOrRegister rhs() const { return rhs_; }
+
+ Register temp() const { return temp_; }
+
+ bool strict() const { return strict_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonSetPropertyIC* ic, HandleObject obj,
+ HandleValue idVal, HandleValue rhs);
+};
+
+class IonGetNameIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ Register environment_;
+ ValueOperand output_;
+ Register temp_;
+
+ public:
+ IonGetNameIC(LiveRegisterSet liveRegs, Register environment,
+ ValueOperand output, Register temp)
+ : IonIC(CacheKind::GetName),
+ liveRegs_(liveRegs),
+ environment_(environment),
+ output_(output),
+ temp_(temp) {}
+
+ Register environment() const { return environment_; }
+ ValueOperand output() const { return output_; }
+ Register temp() const { return temp_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonGetNameIC* ic, HandleObject envChain,
+ MutableHandleValue res);
+};
+
+class IonBindNameIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ Register environment_;
+ Register output_;
+ Register temp_;
+
+ public:
+ IonBindNameIC(LiveRegisterSet liveRegs, Register environment, Register output,
+ Register temp)
+ : IonIC(CacheKind::BindName),
+ liveRegs_(liveRegs),
+ environment_(environment),
+ output_(output),
+ temp_(temp) {}
+
+ Register environment() const { return environment_; }
+ Register output() const { return output_; }
+ Register temp() const { return temp_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ static JSObject* update(JSContext* cx, HandleScript outerScript,
+ IonBindNameIC* ic, HandleObject envChain);
+};
+
+class IonGetIteratorIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+ TypedOrValueRegister value_;
+ Register output_;
+ Register temp1_;
+ Register temp2_;
+
+ public:
+ IonGetIteratorIC(LiveRegisterSet liveRegs, TypedOrValueRegister value,
+ Register output, Register temp1, Register temp2)
+ : IonIC(CacheKind::GetIterator),
+ liveRegs_(liveRegs),
+ value_(value),
+ output_(output),
+ temp1_(temp1),
+ temp2_(temp2) {}
+
+ TypedOrValueRegister value() const { return value_; }
+ Register output() const { return output_; }
+ Register temp1() const { return temp1_; }
+ Register temp2() const { return temp2_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ static JSObject* update(JSContext* cx, HandleScript outerScript,
+ IonGetIteratorIC* ic, HandleValue value);
+};
+
+class IonOptimizeSpreadCallIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+ ValueOperand value_;
+ ValueOperand output_;
+ Register temp_;
+
+ public:
+ IonOptimizeSpreadCallIC(LiveRegisterSet liveRegs, ValueOperand value,
+ ValueOperand output, Register temp)
+ : IonIC(CacheKind::OptimizeSpreadCall),
+ liveRegs_(liveRegs),
+ value_(value),
+ output_(output),
+ temp_(temp) {}
+
+ ValueOperand value() const { return value_; }
+ ValueOperand output() const { return output_; }
+ Register temp() const { return temp_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ static bool update(JSContext* cx, HandleScript outerScript,
+ IonOptimizeSpreadCallIC* ic, HandleValue value,
+ MutableHandleValue result);
+};
+
+class IonHasOwnIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister value_;
+ TypedOrValueRegister id_;
+ Register output_;
+
+ public:
+ IonHasOwnIC(LiveRegisterSet liveRegs, TypedOrValueRegister value,
+ TypedOrValueRegister id, Register output)
+ : IonIC(CacheKind::HasOwn),
+ liveRegs_(liveRegs),
+ value_(value),
+ id_(id),
+ output_(output) {}
+
+ TypedOrValueRegister value() const { return value_; }
+ TypedOrValueRegister id() const { return id_; }
+ Register output() const { return output_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonHasOwnIC* ic, HandleValue val,
+ HandleValue idVal, int32_t* res);
+};
+
+class IonCheckPrivateFieldIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister value_;
+ TypedOrValueRegister id_;
+ Register output_;
+
+ public:
+ IonCheckPrivateFieldIC(LiveRegisterSet liveRegs, TypedOrValueRegister value,
+ TypedOrValueRegister id, Register output)
+ : IonIC(CacheKind::CheckPrivateField),
+ liveRegs_(liveRegs),
+ value_(value),
+ id_(id),
+ output_(output) {}
+
+ TypedOrValueRegister value() const { return value_; }
+ TypedOrValueRegister id() const { return id_; }
+ Register output() const { return output_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonCheckPrivateFieldIC* ic, HandleValue val,
+ HandleValue idVal, bool* res);
+};
+
+class IonInIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ ConstantOrRegister key_;
+ Register object_;
+ Register output_;
+ Register temp_;
+
+ public:
+ IonInIC(LiveRegisterSet liveRegs, const ConstantOrRegister& key,
+ Register object, Register output, Register temp)
+ : IonIC(CacheKind::In),
+ liveRegs_(liveRegs),
+ key_(key),
+ object_(object),
+ output_(output),
+ temp_(temp) {}
+
+ ConstantOrRegister key() const { return key_; }
+ Register object() const { return object_; }
+ Register output() const { return output_; }
+ Register temp() const { return temp_; }
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonInIC* ic, HandleValue key,
+ HandleObject obj, bool* res);
+};
+
+class IonInstanceOfIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister lhs_;
+ Register rhs_;
+ Register output_;
+
+ public:
+ IonInstanceOfIC(LiveRegisterSet liveRegs, TypedOrValueRegister lhs,
+ Register rhs, Register output)
+ : IonIC(CacheKind::InstanceOf),
+ liveRegs_(liveRegs),
+ lhs_(lhs),
+ rhs_(rhs),
+ output_(output) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ TypedOrValueRegister lhs() const { return lhs_; }
+ Register rhs() const { return rhs_; }
+ Register output() const { return output_; }
+
+ // This signature mimics that of TryAttachInstanceOfStub in baseline
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonInstanceOfIC* ic, HandleValue lhs,
+ HandleObject rhs, bool* attached);
+};
+
+class IonCompareIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister lhs_;
+ TypedOrValueRegister rhs_;
+ Register output_;
+
+ public:
+ IonCompareIC(LiveRegisterSet liveRegs, TypedOrValueRegister lhs,
+ TypedOrValueRegister rhs, Register output)
+ : IonIC(CacheKind::Compare),
+ liveRegs_(liveRegs),
+ lhs_(lhs),
+ rhs_(rhs),
+ output_(output) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ TypedOrValueRegister lhs() const { return lhs_; }
+ TypedOrValueRegister rhs() const { return rhs_; }
+ Register output() const { return output_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonCompareIC* stub, HandleValue lhs,
+ HandleValue rhs, bool* res);
+};
+
+class IonUnaryArithIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister input_;
+ ValueOperand output_;
+
+ public:
+ IonUnaryArithIC(LiveRegisterSet liveRegs, TypedOrValueRegister input,
+ ValueOperand output)
+ : IonIC(CacheKind::UnaryArith),
+ liveRegs_(liveRegs),
+ input_(input),
+ output_(output) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ TypedOrValueRegister input() const { return input_; }
+ ValueOperand output() const { return output_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonUnaryArithIC* stub, HandleValue val,
+ MutableHandleValue res);
+};
+
+class IonToPropertyKeyIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+ ValueOperand input_;
+ ValueOperand output_;
+
+ public:
+ IonToPropertyKeyIC(LiveRegisterSet liveRegs, ValueOperand input,
+ ValueOperand output)
+ : IonIC(CacheKind::ToPropertyKey),
+ liveRegs_(liveRegs),
+ input_(input),
+ output_(output) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ ValueOperand input() const { return input_; }
+ ValueOperand output() const { return output_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonToPropertyKeyIC* ic, HandleValue val,
+ MutableHandleValue res);
+};
+
+class IonBinaryArithIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ TypedOrValueRegister lhs_;
+ TypedOrValueRegister rhs_;
+ ValueOperand output_;
+
+ public:
+ IonBinaryArithIC(LiveRegisterSet liveRegs, TypedOrValueRegister lhs,
+ TypedOrValueRegister rhs, ValueOperand output)
+ : IonIC(CacheKind::BinaryArith),
+ liveRegs_(liveRegs),
+ lhs_(lhs),
+ rhs_(rhs),
+ output_(output) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ TypedOrValueRegister lhs() const { return lhs_; }
+ TypedOrValueRegister rhs() const { return rhs_; }
+ ValueOperand output() const { return output_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonBinaryArithIC* stub, HandleValue lhs,
+ HandleValue rhs, MutableHandleValue res);
+};
+
+class IonCloseIterIC : public IonIC {
+ LiveRegisterSet liveRegs_;
+
+ Register iter_;
+ Register temp_;
+ CompletionKind completionKind_;
+
+ public:
+ IonCloseIterIC(LiveRegisterSet liveRegs, Register iter, Register temp,
+ CompletionKind completionKind)
+ : IonIC(CacheKind::CloseIter),
+ liveRegs_(liveRegs),
+ iter_(iter),
+ temp_(temp),
+ completionKind_(completionKind) {}
+
+ LiveRegisterSet liveRegs() const { return liveRegs_; }
+ Register temp() const { return temp_; }
+ Register iter() const { return iter_; }
+ CompletionKind completionKind() const { return completionKind_; }
+
+ [[nodiscard]] static bool update(JSContext* cx, HandleScript outerScript,
+ IonCloseIterIC* ic, HandleObject iter);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonIC_h */
diff --git a/js/src/jit/IonOptimizationLevels.cpp b/js/src/jit/IonOptimizationLevels.cpp
new file mode 100644
index 0000000000..7470a11286
--- /dev/null
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonOptimizationLevels.h"
+
+#include "jit/Ion.h"
+#include "vm/JSScript.h"
+
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+const OptimizationLevelInfo IonOptimizations;
+
+void OptimizationInfo::initNormalOptimizationInfo() {
+ level_ = OptimizationLevel::Normal;
+
+ autoTruncate_ = true;
+ eaa_ = true;
+ edgeCaseAnalysis_ = true;
+ eliminateRedundantChecks_ = true;
+ eliminateRedundantShapeGuards_ = true;
+ eliminateRedundantGCBarriers_ = true;
+ inlineInterpreted_ = true;
+ inlineNative_ = true;
+ licm_ = true;
+ gvn_ = true;
+ rangeAnalysis_ = true;
+ reordering_ = true;
+ scalarReplacement_ = true;
+ sink_ = true;
+
+ registerAllocator_ = RegisterAllocator_Backtracking;
+}
+
+void OptimizationInfo::initWasmOptimizationInfo() {
+ // The Wasm optimization level
+ // Disables some passes that don't work well with wasm.
+
+ // Take normal option values for not specified values.
+ initNormalOptimizationInfo();
+
+ level_ = OptimizationLevel::Wasm;
+
+ ama_ = true;
+ autoTruncate_ = false;
+ edgeCaseAnalysis_ = false;
+ eliminateRedundantChecks_ = false;
+ eliminateRedundantShapeGuards_ = false;
+ eliminateRedundantGCBarriers_ = false;
+ scalarReplacement_ = false; // wasm has no objects.
+ sink_ = false;
+}
+
+uint32_t OptimizationInfo::compilerWarmUpThreshold(JSScript* script,
+ jsbytecode* pc) const {
+ MOZ_ASSERT(pc == nullptr || pc == script->code() ||
+ JSOp(*pc) == JSOp::LoopHead);
+
+ // The script must not start with a LoopHead op or the code below would be
+ // wrong. See bug 1602681.
+ MOZ_ASSERT_IF(pc && JSOp(*pc) == JSOp::LoopHead, pc > script->code());
+
+ if (pc == script->code()) {
+ pc = nullptr;
+ }
+
+ uint32_t warmUpThreshold = baseCompilerWarmUpThreshold();
+
+ // If the script is too large to compile on the main thread, we can still
+ // compile it off thread. In these cases, increase the warm-up counter
+ // threshold to improve the compilation's type information and hopefully
+ // avoid later recompilation.
+
+ if (script->length() > JitOptions.ionMaxScriptSizeMainThread) {
+ warmUpThreshold *=
+ (script->length() / double(JitOptions.ionMaxScriptSizeMainThread));
+ }
+
+ uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
+ if (numLocalsAndArgs > JitOptions.ionMaxLocalsAndArgsMainThread) {
+ warmUpThreshold *=
+ (numLocalsAndArgs / double(JitOptions.ionMaxLocalsAndArgsMainThread));
+ }
+
+ if (!pc || JitOptions.eagerIonCompilation()) {
+ return warmUpThreshold;
+ }
+
+ // It's more efficient to enter outer loops, rather than inner loops, via OSR.
+ // To accomplish this, we use a slightly higher threshold for inner loops.
+ // Note that the loop depth is always > 0 so we will prefer non-OSR over OSR.
+ uint32_t loopDepth = LoopHeadDepthHint(pc);
+ MOZ_ASSERT(loopDepth > 0);
+ return warmUpThreshold + loopDepth * (baseCompilerWarmUpThreshold() / 10);
+}
+
+uint32_t OptimizationInfo::recompileWarmUpThreshold(JSScript* script,
+ jsbytecode* pc) const {
+ MOZ_ASSERT(pc == script->code() || JSOp(*pc) == JSOp::LoopHead);
+
+ uint32_t threshold = compilerWarmUpThreshold(script, pc);
+ if (JSOp(*pc) != JSOp::LoopHead || JitOptions.eagerIonCompilation()) {
+ return threshold;
+ }
+
+ // If we're stuck in a long-running loop at a low optimization level, we have
+ // to invalidate to be able to tier up. This is worse than recompiling at
+ // function entry (because in that case we can use the lazy link mechanism and
+ // avoid invalidation completely). Use a very high recompilation threshold for
+ // loop edges so that this only affects very long-running loops.
+
+ uint32_t loopDepth = LoopHeadDepthHint(pc);
+ MOZ_ASSERT(loopDepth > 0);
+ return threshold + loopDepth * (baseCompilerWarmUpThreshold() / 10);
+}
+
+OptimizationLevelInfo::OptimizationLevelInfo() {
+ infos_[OptimizationLevel::Normal].initNormalOptimizationInfo();
+ infos_[OptimizationLevel::Wasm].initWasmOptimizationInfo();
+}
+
+OptimizationLevel OptimizationLevelInfo::levelForScript(JSScript* script,
+ jsbytecode* pc) const {
+ const OptimizationInfo* info = get(OptimizationLevel::Normal);
+ if (script->getWarmUpCount() < info->compilerWarmUpThreshold(script, pc)) {
+ return OptimizationLevel::DontCompile;
+ }
+
+ return OptimizationLevel::Normal;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
new file mode 100644
index 0000000000..2d899b36ba
--- /dev/null
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonOptimizationLevels_h
+#define jit_IonOptimizationLevels_h
+
+#include "mozilla/EnumeratedArray.h"
+
+#include "jstypes.h"
+
+#include "jit/JitOptions.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace jit {
+
+enum class OptimizationLevel : uint8_t { Normal, Wasm, Count, DontCompile };
+
+#ifdef JS_JITSPEW
+inline const char* OptimizationLevelString(OptimizationLevel level) {
+ switch (level) {
+ case OptimizationLevel::DontCompile:
+ return "Optimization_DontCompile";
+ case OptimizationLevel::Normal:
+ return "Optimization_Normal";
+ case OptimizationLevel::Wasm:
+ return "Optimization_Wasm";
+ case OptimizationLevel::Count:;
+ }
+ MOZ_CRASH("Invalid OptimizationLevel");
+}
+#endif
+
+// Class representing the Ion optimization settings for an OptimizationLevel.
+class OptimizationInfo {
+ OptimizationLevel level_;
+
+ // Toggles whether Effective Address Analysis is performed.
+ bool eaa_;
+
+ // Toggles whether Alignment Mask Analysis is performed.
+ bool ama_;
+
+ // Toggles whether Edge Case Analysis is used.
+ bool edgeCaseAnalysis_;
+
+ // Toggles whether redundant checks get removed.
+ bool eliminateRedundantChecks_;
+
+ // Toggles whether redundant shape guards get removed.
+ bool eliminateRedundantShapeGuards_;
+
+ // Toggles whether redundant GC barriers get removed.
+ bool eliminateRedundantGCBarriers_;
+
+ // Toggles whether interpreted scripts get inlined.
+ bool inlineInterpreted_;
+
+ // Toggles whether native scripts get inlined.
+ bool inlineNative_;
+
+ // Toggles whether global value numbering is used.
+ bool gvn_;
+
+ // Toggles whether loop invariant code motion is performed.
+ bool licm_;
+
+ // Toggles whether Range Analysis is used.
+ bool rangeAnalysis_;
+
+ // Toggles whether instruction reordering is performed.
+ bool reordering_;
+
+ // Toggles whether Truncation based on Range Analysis is used.
+ bool autoTruncate_;
+
+ // Toggles whether sink is used.
+ bool sink_;
+
+ // Toggles whether scalar replacement is used.
+ bool scalarReplacement_;
+
+ // Describes which register allocator to use.
+ IonRegisterAllocator registerAllocator_;
+
+ uint32_t baseCompilerWarmUpThreshold() const {
+ MOZ_ASSERT(level_ == OptimizationLevel::Normal);
+ return JitOptions.normalIonWarmUpThreshold;
+ }
+
+ public:
+ constexpr OptimizationInfo()
+ : level_(OptimizationLevel::Normal),
+ eaa_(false),
+ ama_(false),
+ edgeCaseAnalysis_(false),
+ eliminateRedundantChecks_(false),
+ eliminateRedundantShapeGuards_(false),
+ eliminateRedundantGCBarriers_(false),
+ inlineInterpreted_(false),
+ inlineNative_(false),
+ gvn_(false),
+ licm_(false),
+ rangeAnalysis_(false),
+ reordering_(false),
+ autoTruncate_(false),
+ sink_(false),
+ scalarReplacement_(false),
+ registerAllocator_(RegisterAllocator_Backtracking) {}
+
+ void initNormalOptimizationInfo();
+ void initWasmOptimizationInfo();
+
+ OptimizationLevel level() const { return level_; }
+
+ bool inlineInterpreted() const {
+ return inlineInterpreted_ && !JitOptions.disableInlining;
+ }
+
+ bool inlineNative() const {
+ return inlineNative_ && !JitOptions.disableInlining;
+ }
+
+ uint32_t compilerWarmUpThreshold(JSScript* script,
+ jsbytecode* pc = nullptr) const;
+
+ uint32_t recompileWarmUpThreshold(JSScript* script, jsbytecode* pc) const;
+
+ bool gvnEnabled() const { return gvn_ && !JitOptions.disableGvn; }
+
+ bool licmEnabled() const { return licm_ && !JitOptions.disableLicm; }
+
+ bool rangeAnalysisEnabled() const {
+ return rangeAnalysis_ && !JitOptions.disableRangeAnalysis;
+ }
+
+ bool instructionReorderingEnabled() const {
+ return reordering_ && !JitOptions.disableInstructionReordering;
+ }
+
+ bool autoTruncateEnabled() const {
+ return autoTruncate_ && rangeAnalysisEnabled();
+ }
+
+ bool sinkEnabled() const { return sink_ && !JitOptions.disableSink; }
+
+ bool eaaEnabled() const { return eaa_ && !JitOptions.disableEaa; }
+
+ bool amaEnabled() const { return ama_ && !JitOptions.disableAma; }
+
+ bool edgeCaseAnalysisEnabled() const {
+ return edgeCaseAnalysis_ && !JitOptions.disableEdgeCaseAnalysis;
+ }
+
+ bool eliminateRedundantChecksEnabled() const {
+ return eliminateRedundantChecks_;
+ }
+
+ bool eliminateRedundantShapeGuardsEnabled() const {
+ return eliminateRedundantShapeGuards_ &&
+ !JitOptions.disableRedundantShapeGuards;
+ }
+
+ bool eliminateRedundantGCBarriersEnabled() const {
+ return eliminateRedundantGCBarriers_ &&
+ !JitOptions.disableRedundantGCBarriers;
+ }
+
+ IonRegisterAllocator registerAllocator() const {
+ return JitOptions.forcedRegisterAllocator.valueOr(registerAllocator_);
+ }
+
+ bool scalarReplacementEnabled() const {
+ return scalarReplacement_ && !JitOptions.disableScalarReplacement;
+ }
+};
+
+class OptimizationLevelInfo {
+ private:
+ mozilla::EnumeratedArray<OptimizationLevel, OptimizationLevel::Count,
+ OptimizationInfo>
+ infos_;
+
+ public:
+ OptimizationLevelInfo();
+
+ const OptimizationInfo* get(OptimizationLevel level) const {
+ return &infos_[level];
+ }
+
+ OptimizationLevel levelForScript(JSScript* script,
+ jsbytecode* pc = nullptr) const;
+};
+
+extern const OptimizationLevelInfo IonOptimizations;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonOptimizationLevels_h */
diff --git a/js/src/jit/IonScript.h b/js/src/jit/IonScript.h
new file mode 100644
index 0000000000..c1f7a4810a
--- /dev/null
+++ b/js/src/jit/IonScript.h
@@ -0,0 +1,590 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonScript_h
+#define jit_IonScript_h
+
+#include "mozilla/MemoryReporting.h" // MallocSizeOf
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "jstypes.h"
+
+#include "gc/Barrier.h" // HeapPtr{JitCode,Object}
+#include "jit/IonTypes.h" // IonCompilationId
+#include "jit/JitCode.h" // JitCode
+#include "jit/JitOptions.h" // JitOptions
+#include "js/TypeDecls.h" // jsbytecode
+#include "util/TrailingArray.h" // TrailingArray
+
+namespace js {
+namespace jit {
+
+class SnapshotWriter;
+class RecoverWriter;
+class SafepointWriter;
+class CodegenSafepointIndex;
+class SafepointIndex;
+class OsiIndex;
+class IonIC;
+
+// An IonScript attaches Ion-generated information to a JSScript. The header
+// structure is followed by several arrays of data. These trailing arrays have a
+// layout based on offsets (bytes from 'this') stored in the IonScript header.
+//
+// <IonScript itself>
+// --
+// PreBarriered<Value>[] constantTable()
+// uint8_t[] runtimeData()
+// OsiIndex[] osiIndex()
+// SafepointIndex[] safepointIndex()
+// uint32_t[] icIndex()
+// --
+// uint8_t[] safepoints()
+// uint8_t[] snapshots()
+// uint8_t[] snapshotsRVATable()
+// uint8_t[] recovers()
+//
+// Note: These are arranged in order of descending alignment requirements to
+// avoid the need for padding. The `runtimeData` uses uint64_t alignement due to
+// its use of mozilla::AlignedStorage2.
+class alignas(8) IonScript final : public TrailingArray {
+ private:
+ // Offset (in bytes) from `this` to the start of each trailing array. Each
+ // array ends where following one begins. There is no implicit padding (except
+ // possible at very end).
+ Offset constantTableOffset_ = 0; // JS::Value aligned
+ Offset runtimeDataOffset_ = 0; // uint64_t aligned
+ Offset nurseryObjectsOffset_ = 0; // pointer aligned
+ Offset osiIndexOffset_ = 0;
+ Offset safepointIndexOffset_ = 0;
+ Offset icIndexOffset_ = 0;
+ Offset safepointsOffset_ = 0;
+ Offset snapshotsOffset_ = 0;
+ Offset rvaTableOffset_ = 0;
+ Offset recoversOffset_ = 0;
+ Offset allocBytes_ = 0;
+
+ // Code pointer containing the actual method.
+ HeapPtr<JitCode*> method_ = nullptr;
+
+ // Entrypoint for OSR, or nullptr.
+ jsbytecode* osrPc_ = nullptr;
+
+ // Offset to OSR entrypoint from method_->raw(), or 0.
+ uint32_t osrEntryOffset_ = 0;
+
+ // Offset of the invalidation epilogue (which pushes this IonScript
+ // and calls the invalidation thunk).
+ uint32_t invalidateEpilogueOffset_ = 0;
+
+ // The offset immediately after the IonScript immediate.
+ // NOTE: technically a constant delta from
+ // |invalidateEpilogueOffset_|, so we could hard-code this
+ // per-platform if we want.
+ uint32_t invalidateEpilogueDataOffset_ = 0;
+
+ // Number of bailouts that have occurred for reasons that could be
+ // fixed if we invalidated and recompiled.
+ uint16_t numFixableBailouts_ = 0;
+
+ // Number of bailouts that have occurred for reasons that can't be
+ // fixed by recompiling: for example, bailing out to catch an exception.
+ uint16_t numUnfixableBailouts_ = 0;
+
+ public:
+ enum class LICMState : uint8_t { NeverBailed, Bailed, BailedAndHitFallback };
+
+ private:
+ // Tracks the state of LICM bailouts.
+ LICMState licmState_ = LICMState::NeverBailed;
+
+ // Flag set if IonScript was compiled with profiling enabled.
+ bool hasProfilingInstrumentation_ = false;
+
+ // Number of bytes this function reserves on the stack for slots spilled by
+ // the register allocator.
+ uint32_t localSlotsSize_ = 0;
+
+ // Number of bytes used passed in as formal arguments or |this|.
+ uint32_t argumentSlotsSize_ = 0;
+
+ // Frame size is the value that can be added to the StackPointer along
+ // with the frame prefix to get a valid JitFrameLayout.
+ uint32_t frameSize_ = 0;
+
+ // Number of references from invalidation records.
+ uint32_t invalidationCount_ = 0;
+
+ // Identifier of the compilation which produced this code.
+ IonCompilationId compilationId_;
+
+ // Number of times we tried to enter this script via OSR but failed due to
+ // a LOOPENTRY pc other than osrPc_.
+ uint32_t osrPcMismatchCounter_ = 0;
+
+#ifdef DEBUG
+ // A hash of the ICScripts used in this compilation.
+ mozilla::HashNumber icHash_ = 0;
+#endif
+
+ // End of fields.
+
+ private:
+ // Layout helpers
+ Offset constantTableOffset() const { return constantTableOffset_; }
+ Offset runtimeDataOffset() const { return runtimeDataOffset_; }
+ Offset nurseryObjectsOffset() const { return nurseryObjectsOffset_; }
+ Offset osiIndexOffset() const { return osiIndexOffset_; }
+ Offset safepointIndexOffset() const { return safepointIndexOffset_; }
+ Offset icIndexOffset() const { return icIndexOffset_; }
+ Offset safepointsOffset() const { return safepointsOffset_; }
+ Offset snapshotsOffset() const { return snapshotsOffset_; }
+ Offset rvaTableOffset() const { return rvaTableOffset_; }
+ Offset recoversOffset() const { return recoversOffset_; }
+ Offset endOffset() const { return allocBytes_; }
+
+ // Hardcode size of incomplete types. These are verified in Ion.cpp.
+ static constexpr size_t SizeOf_OsiIndex = 2 * sizeof(uint32_t);
+ static constexpr size_t SizeOf_SafepointIndex = 2 * sizeof(uint32_t);
+
+ public:
+ //
+ // Table of constants referenced in snapshots. (JS::Value alignment)
+ //
+ PreBarriered<Value>* constants() {
+ // Nursery constants are manually barriered in CodeGenerator::link() so a
+ // post barrier is not required..
+ return offsetToPointer<PreBarriered<Value>>(constantTableOffset());
+ }
+ size_t numConstants() const {
+ return numElements<PreBarriered<Value>>(constantTableOffset(),
+ runtimeDataOffset());
+ }
+
+ //
+ // IonIC data structures. (uint64_t alignment)
+ //
+ uint8_t* runtimeData() {
+ return offsetToPointer<uint8_t>(runtimeDataOffset());
+ }
+ size_t runtimeSize() const {
+ return numElements<uint8_t>(runtimeDataOffset(), nurseryObjectsOffset());
+ }
+
+ //
+ // List of (originally) nursery-allocated objects referenced from JIT code.
+ // (JSObject* alignment)
+ //
+ HeapPtr<JSObject*>* nurseryObjects() {
+ return offsetToPointer<HeapPtr<JSObject*>>(nurseryObjectsOffset());
+ }
+ size_t numNurseryObjects() const {
+ return numElements<HeapPtr<JSObject*>>(nurseryObjectsOffset(),
+ osiIndexOffset());
+ }
+ void* addressOfNurseryObject(uint32_t index) {
+ MOZ_ASSERT(index < numNurseryObjects());
+ return &nurseryObjects()[index];
+ }
+
+ //
+ // Map OSI-point displacement to snapshot.
+ //
+ OsiIndex* osiIndices() { return offsetToPointer<OsiIndex>(osiIndexOffset()); }
+ const OsiIndex* osiIndices() const {
+ return offsetToPointer<OsiIndex>(osiIndexOffset());
+ }
+ size_t numOsiIndices() const {
+ return numElements<SizeOf_OsiIndex>(osiIndexOffset(),
+ safepointIndexOffset());
+ }
+
+ //
+ // Map code displacement to safepoint / OSI-patch-delta.
+ //
+ SafepointIndex* safepointIndices() {
+ return offsetToPointer<SafepointIndex>(safepointIndexOffset());
+ }
+ const SafepointIndex* safepointIndices() const {
+ return offsetToPointer<SafepointIndex>(safepointIndexOffset());
+ }
+ size_t numSafepointIndices() const {
+ return numElements<SizeOf_SafepointIndex>(safepointIndexOffset(),
+ icIndexOffset());
+ }
+
+ //
+ // Offset into `runtimeData` for each (variable-length) IonIC.
+ //
+ uint32_t* icIndex() { return offsetToPointer<uint32_t>(icIndexOffset()); }
+ size_t numICs() const {
+ return numElements<uint32_t>(icIndexOffset(), safepointsOffset());
+ }
+
+ //
+ // Safepoint table as a CompactBuffer.
+ //
+ const uint8_t* safepoints() const {
+ return offsetToPointer<uint8_t>(safepointsOffset());
+ }
+ size_t safepointsSize() const {
+ return numElements<uint8_t>(safepointsOffset(), snapshotsOffset());
+ }
+
+ //
+ // Snapshot and RValueAllocation tables as CompactBuffers.
+ //
+ const uint8_t* snapshots() const {
+ return offsetToPointer<uint8_t>(snapshotsOffset());
+ }
+ size_t snapshotsListSize() const {
+ return numElements<uint8_t>(snapshotsOffset(), rvaTableOffset());
+ }
+ size_t snapshotsRVATableSize() const {
+ return numElements<uint8_t>(rvaTableOffset(), recoversOffset());
+ }
+
+ //
+ // Recover instruction table as a CompactBuffer.
+ //
+ const uint8_t* recovers() const {
+ return offsetToPointer<uint8_t>(recoversOffset());
+ }
+ size_t recoversSize() const {
+ return numElements<uint8_t>(recoversOffset(), endOffset());
+ }
+
+ private:
+ IonScript(IonCompilationId compilationId, uint32_t localSlotsSize,
+ uint32_t argumentSlotsSize, uint32_t frameSize);
+
+ public:
+ static IonScript* New(JSContext* cx, IonCompilationId compilationId,
+ uint32_t localSlotsSize, uint32_t argumentSlotsSize,
+ uint32_t frameSize, size_t snapshotsListSize,
+ size_t snapshotsRVATableSize, size_t recoversSize,
+ size_t constants, size_t nurseryObjects,
+ size_t safepointIndices, size_t osiIndices,
+ size_t icEntries, size_t runtimeSize,
+ size_t safepointsSize);
+
+ static void Destroy(JS::GCContext* gcx, IonScript* script);
+
+ void trace(JSTracer* trc);
+
+ static inline size_t offsetOfInvalidationCount() {
+ return offsetof(IonScript, invalidationCount_);
+ }
+
+ public:
+ JitCode* method() const { return method_; }
+ void setMethod(JitCode* code) {
+ MOZ_ASSERT(!invalidated());
+ method_ = code;
+ }
+ void setOsrPc(jsbytecode* osrPc) { osrPc_ = osrPc; }
+ jsbytecode* osrPc() const { return osrPc_; }
+ void setOsrEntryOffset(uint32_t offset) {
+ MOZ_ASSERT(!osrEntryOffset_);
+ osrEntryOffset_ = offset;
+ }
+ uint32_t osrEntryOffset() const { return osrEntryOffset_; }
+ bool containsCodeAddress(uint8_t* addr) const {
+ return method()->raw() <= addr &&
+ addr <= method()->raw() + method()->instructionsSize();
+ }
+ bool containsReturnAddress(uint8_t* addr) const {
+ // This accounts for an off by one error caused by the return address of a
+ // bailout sitting outside the range of the containing function.
+ return method()->raw() <= addr &&
+ addr <= method()->raw() + method()->instructionsSize();
+ }
+ void setInvalidationEpilogueOffset(uint32_t offset) {
+ MOZ_ASSERT(!invalidateEpilogueOffset_);
+ invalidateEpilogueOffset_ = offset;
+ }
+ uint32_t invalidateEpilogueOffset() const {
+ MOZ_ASSERT(invalidateEpilogueOffset_);
+ return invalidateEpilogueOffset_;
+ }
+ void setInvalidationEpilogueDataOffset(uint32_t offset) {
+ MOZ_ASSERT(!invalidateEpilogueDataOffset_);
+ invalidateEpilogueDataOffset_ = offset;
+ }
+ uint32_t invalidateEpilogueDataOffset() const {
+ MOZ_ASSERT(invalidateEpilogueDataOffset_);
+ return invalidateEpilogueDataOffset_;
+ }
+
+ void incNumFixableBailouts() { numFixableBailouts_++; }
+ void resetNumFixableBailouts() { numFixableBailouts_ = 0; }
+ void incNumUnfixableBailouts() { numUnfixableBailouts_++; }
+
+ bool shouldInvalidate() const {
+ return numFixableBailouts_ >= JitOptions.frequentBailoutThreshold;
+ }
+ bool shouldInvalidateAndDisable() const {
+ return numUnfixableBailouts_ >= JitOptions.frequentBailoutThreshold * 5;
+ }
+
+ LICMState licmState() const { return licmState_; }
+ void setHadLICMBailout() {
+ if (licmState_ == LICMState::NeverBailed) {
+ licmState_ = LICMState::Bailed;
+ }
+ }
+ void noteBaselineFallback() {
+ if (licmState_ == LICMState::Bailed) {
+ licmState_ = LICMState::BailedAndHitFallback;
+ }
+ }
+
+ void setHasProfilingInstrumentation() { hasProfilingInstrumentation_ = true; }
+ void clearHasProfilingInstrumentation() {
+ hasProfilingInstrumentation_ = false;
+ }
+ bool hasProfilingInstrumentation() const {
+ return hasProfilingInstrumentation_;
+ }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this);
+ }
+ PreBarriered<Value>& getConstant(size_t index) {
+ MOZ_ASSERT(index < numConstants());
+ return constants()[index];
+ }
+ uint32_t localSlotsSize() const { return localSlotsSize_; }
+ uint32_t argumentSlotsSize() const { return argumentSlotsSize_; }
+ uint32_t frameSize() const { return frameSize_; }
+ const SafepointIndex* getSafepointIndex(uint32_t disp) const;
+ const SafepointIndex* getSafepointIndex(uint8_t* retAddr) const {
+ MOZ_ASSERT(containsCodeAddress(retAddr));
+ return getSafepointIndex(retAddr - method()->raw());
+ }
+ const OsiIndex* getOsiIndex(uint32_t disp) const;
+ const OsiIndex* getOsiIndex(uint8_t* retAddr) const;
+
+ IonIC& getICFromIndex(uint32_t index) {
+ MOZ_ASSERT(index < numICs());
+ uint32_t offset = icIndex()[index];
+ return getIC(offset);
+ }
+ inline IonIC& getIC(uint32_t offset) {
+ MOZ_ASSERT(offset < runtimeSize());
+ return *reinterpret_cast<IonIC*>(runtimeData() + offset);
+ }
+ void purgeICs(Zone* zone);
+ void copySnapshots(const SnapshotWriter* writer);
+ void copyRecovers(const RecoverWriter* writer);
+ void copyConstants(const Value* vp);
+ void copySafepointIndices(const CodegenSafepointIndex* si);
+ void copyOsiIndices(const OsiIndex* oi);
+ void copyRuntimeData(const uint8_t* data);
+ void copyICEntries(const uint32_t* icEntries);
+ void copySafepoints(const SafepointWriter* writer);
+
+ bool invalidated() const { return invalidationCount_ != 0; }
+
+ // Invalidate the current compilation.
+ void invalidate(JSContext* cx, JSScript* script, bool resetUses,
+ const char* reason);
+
+ size_t invalidationCount() const { return invalidationCount_; }
+ void incrementInvalidationCount() { invalidationCount_++; }
+ void decrementInvalidationCount(JS::GCContext* gcx) {
+ MOZ_ASSERT(invalidationCount_);
+ invalidationCount_--;
+ if (!invalidationCount_) {
+ Destroy(gcx, this);
+ }
+ }
+ IonCompilationId compilationId() const { return compilationId_; }
+ uint32_t incrOsrPcMismatchCounter() { return ++osrPcMismatchCounter_; }
+ void resetOsrPcMismatchCounter() { osrPcMismatchCounter_ = 0; }
+
+ size_t allocBytes() const { return allocBytes_; }
+
+ static void preWriteBarrier(Zone* zone, IonScript* ionScript);
+
+#ifdef DEBUG
+ mozilla::HashNumber icHash() const { return icHash_; }
+ void setICHash(mozilla::HashNumber hash) { icHash_ = hash; }
+#endif
+};
+
+// Execution information for a basic block which may persist after the
+// accompanying IonScript is destroyed, for use during profiling.
+struct IonBlockCounts {
+ private:
+ uint32_t id_;
+
+ // Approximate bytecode in the outer (not inlined) script this block
+ // was generated from.
+ uint32_t offset_;
+
+ // File and line of the inner script this block was generated from.
+ char* description_;
+
+ // ids for successors of this block.
+ uint32_t numSuccessors_;
+ uint32_t* successors_;
+
+ // Hit count for this block.
+ uint64_t hitCount_;
+
+ // Text information about the code generated for this block.
+ char* code_;
+
+ public:
+ [[nodiscard]] bool init(uint32_t id, uint32_t offset, char* description,
+ uint32_t numSuccessors) {
+ id_ = id;
+ offset_ = offset;
+ description_ = description;
+ numSuccessors_ = numSuccessors;
+ if (numSuccessors) {
+ successors_ = js_pod_calloc<uint32_t>(numSuccessors);
+ if (!successors_) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void destroy() {
+ js_free(description_);
+ js_free(successors_);
+ js_free(code_);
+ }
+
+ uint32_t id() const { return id_; }
+
+ uint32_t offset() const { return offset_; }
+
+ const char* description() const { return description_; }
+
+ size_t numSuccessors() const { return numSuccessors_; }
+
+ void setSuccessor(size_t i, uint32_t id) {
+ MOZ_ASSERT(i < numSuccessors_);
+ successors_[i] = id;
+ }
+
+ uint32_t successor(size_t i) const {
+ MOZ_ASSERT(i < numSuccessors_);
+ return successors_[i];
+ }
+
+ uint64_t* addressOfHitCount() { return &hitCount_; }
+
+ uint64_t hitCount() const { return hitCount_; }
+
+ void setCode(const char* code) {
+ char* ncode = js_pod_malloc<char>(strlen(code) + 1);
+ if (ncode) {
+ strcpy(ncode, code);
+ code_ = ncode;
+ }
+ }
+
+ const char* code() const { return code_; }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(description_) + mallocSizeOf(successors_) +
+ mallocSizeOf(code_);
+ }
+};
+
+// Execution information for a compiled script which may persist after the
+// IonScript is destroyed, for use during profiling.
+struct IonScriptCounts {
+ private:
+ // Any previous invalidated compilation(s) for the script.
+ IonScriptCounts* previous_ = nullptr;
+
+ // Information about basic blocks in this script.
+ size_t numBlocks_ = 0;
+ IonBlockCounts* blocks_ = nullptr;
+
+ public:
+ IonScriptCounts() = default;
+
+ ~IonScriptCounts() {
+ for (size_t i = 0; i < numBlocks_; i++) {
+ blocks_[i].destroy();
+ }
+ js_free(blocks_);
+ // The list can be long in some corner cases (bug 1140084), so
+ // unroll the recursion.
+ IonScriptCounts* victims = previous_;
+ while (victims) {
+ IonScriptCounts* victim = victims;
+ victims = victim->previous_;
+ victim->previous_ = nullptr;
+ js_delete(victim);
+ }
+ }
+
+ [[nodiscard]] bool init(size_t numBlocks) {
+ blocks_ = js_pod_calloc<IonBlockCounts>(numBlocks);
+ if (!blocks_) {
+ return false;
+ }
+
+ numBlocks_ = numBlocks;
+ return true;
+ }
+
+ size_t numBlocks() const { return numBlocks_; }
+
+ IonBlockCounts& block(size_t i) {
+ MOZ_ASSERT(i < numBlocks_);
+ return blocks_[i];
+ }
+
+ void setPrevious(IonScriptCounts* previous) { previous_ = previous; }
+
+ IonScriptCounts* previous() const { return previous_; }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t size = 0;
+ auto currCounts = this;
+ do {
+ size += currCounts->sizeOfOneIncludingThis(mallocSizeOf);
+ currCounts = currCounts->previous_;
+ } while (currCounts);
+ return size;
+ }
+
+ size_t sizeOfOneIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t size = mallocSizeOf(this) + mallocSizeOf(blocks_);
+ for (size_t i = 0; i < numBlocks_; i++) {
+ blocks_[i].sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+namespace JS {
+
+template <>
+struct DeletePolicy<js::jit::IonScript> {
+ explicit DeletePolicy(JSRuntime* rt) : rt_(rt) {}
+ void operator()(const js::jit::IonScript* script);
+
+ private:
+ JSRuntime* rt_;
+};
+
+} // namespace JS
+
+#endif /* jit_IonScript_h */
diff --git a/js/src/jit/IonTypes.h b/js/src/jit/IonTypes.h
new file mode 100644
index 0000000000..6227253bff
--- /dev/null
+++ b/js/src/jit/IonTypes.h
@@ -0,0 +1,1108 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_IonTypes_h
+#define jit_IonTypes_h
+
+#include "mozilla/HashFunctions.h"
+
+#include <algorithm>
+#include <initializer_list>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Value.h"
+
+namespace js {
+
+// Each IonScript has a unique compilation id. This is used to sweep/ignore
+// constraints for IonScripts that have been invalidated/destroyed.
+class IonCompilationId {
+ // Use two 32-bit integers instead of uint64_t to avoid 8-byte alignment on
+ // some 32-bit platforms.
+ uint32_t idLo_;
+ uint32_t idHi_;
+
+ public:
+ explicit IonCompilationId(uint64_t id)
+ : idLo_(id & UINT32_MAX), idHi_(id >> 32) {}
+ bool operator==(const IonCompilationId& other) const {
+ return idLo_ == other.idLo_ && idHi_ == other.idHi_;
+ }
+ bool operator!=(const IonCompilationId& other) const {
+ return !operator==(other);
+ }
+};
+
+namespace jit {
+
+using RecoverOffset = uint32_t;
+using SnapshotOffset = uint32_t;
+
+// The maximum size of any buffer associated with an assembler or code object.
+// This is chosen to not overflow a signed integer, leaving room for an extra
+// bit on offsets.
+static const uint32_t MAX_BUFFER_SIZE = (1 << 30) - 1;
+
+// Maximum number of scripted arg slots.
+static const uint32_t SNAPSHOT_MAX_NARGS = 127;
+
+static const SnapshotOffset INVALID_RECOVER_OFFSET = uint32_t(-1);
+static const SnapshotOffset INVALID_SNAPSHOT_OFFSET = uint32_t(-1);
+
+/*
+ * [SMDOC] Avoiding repeated bailouts / invalidations
+ *
+ * To avoid getting trapped in a "compilation -> bailout -> invalidation ->
+ * recompilation -> bailout -> invalidation -> ..." loop, every snapshot in
+ * Warp code is assigned a BailoutKind. If we bail out at that snapshot,
+ * FinishBailoutToBaseline will examine the BailoutKind and take appropriate
+ * action. In general:
+ *
+ * 1. If the bailing instruction comes from transpiled CacheIR, then when we
+ * bail out and continue execution in the baseline interpreter, the
+ * corresponding stub should fail a guard. As a result, we will either
+ * increment the enteredCount for a subsequent stub or attach a new stub,
+ * either of which will prevent WarpOracle from transpiling the failing stub
+ * when we recompile.
+ *
+ * Note: this means that every CacheIR op that can bail out in Warp must
+ * have an equivalent guard in the baseline CacheIR implementation.
+ *
+ * FirstExecution works according to the same principles: we have never hit
+ * this IC before, but after we bail to baseline we will attach a stub and
+ * recompile with better CacheIR information.
+ *
+ * 2. If the bailout occurs because an assumption we made in WarpBuilder was
+ * invalidated, then FinishBailoutToBaseline will set a flag on the script
+ * to avoid that assumption in the future: for example, UninitializedLexical.
+ *
+ * 3. Similarly, if the bailing instruction is generated or modified by a MIR
+ * optimization, then FinishBailoutToBaseline will set a flag on the script
+ * to make that optimization more conservative in the future. Examples
+ * include LICM, EagerTruncation, and HoistBoundsCheck.
+ *
+ * 4. Some bailouts can't be handled in Warp, even after a recompile. For
+ * example, Warp does not support catching exceptions. If this happens
+ * too often, then the cost of bailing out repeatedly outweighs the
+ * benefit of Warp compilation, so we invalidate the script and disable
+ * Warp compilation.
+ *
+ * 5. Some bailouts don't happen in performance-sensitive code: for example,
+ * the |debugger| statement. We just ignore those.
+ */
+enum class BailoutKind : uint8_t {
+ Unknown,
+
+ // An instruction generated by the transpiler. If this instruction bails out,
+ // attaching a new stub in baseline will invalidate the current Warp script
+ // and avoid a bailout loop.
+ TranspiledCacheIR,
+
+ // An instruction generated by stub folding which has been transpiled into
+ // a monomorphic-inlined script. If this instruction bails out, we will
+ // return to baseline and see if we add a new case to the folded stub. If
+ // we do, this should not count as a bailout for the purpose of eventually
+ // invalidating this script. Because the script containing the folded stub
+ // was inlined monomorphically, there's no direct connection between the
+ // inner script and the outer script. We store the inner and outer scripts
+ // so that we know which outer script to notify if we successfully add a
+ // new case to the folded stub.
+ MonomorphicInlinedStubFolding,
+
+ // An optimistic unbox on the cold path for a non-Value phi failed. If this
+ // instruction bails out, we will invalidate the script and mark the
+ // HadSpeculativePhiBailout flag on the script.
+ SpeculativePhi,
+
+ // A conversion inserted by a type policy. If this instruction bails out,
+ // we expect to throw an error. If this happens too frequently, we will
+ // invalidate the current Warp script and disable recompilation.
+ TypePolicy,
+
+ // An instruction hoisted by LICM. If this instruction bails out, we will
+ // bail out to baseline to see if we attach a new stub. If we do, then the
+ // more than once, we will invalidate the current Warp script and
+ // mark the hadLICMInvalidation flag on the script.
+ LICM,
+
+ // An instruction moved up by InstructionReordering. If this
+ // instruction bails out, we will mark the ReorderingBailout flag on
+ // the script. If this happens too frequently, we will invalidate
+ // the script.
+ InstructionReordering,
+
+ // An instruction created or hoisted by tryHoistBoundsCheck.
+ // If this instruction bails out, we will invalidate the current Warp script
+ // and mark the HoistBoundsCheckBailout flag on the script.
+ HoistBoundsCheck,
+
+ // An eager truncation generated by range analysis.
+ // If this instruction bails out, we will invalidate the current Warp script
+ // and mark the EagerTruncationBailout flag on the script.
+ EagerTruncation,
+
+ // A folded unbox instruction generated by FoldLoadsWithUnbox.
+ // If this instruction bails out, we will invalidate the current Warp script
+ // and mark the UnboxFoldingBailout flag on the script.
+ UnboxFolding,
+
+ // An inevitable bailout (MBail instruction or type barrier that always bails)
+ Inevitable,
+
+ // Bailing out during a VM call. Many possible causes that are hard
+ // to distinguish statically at snapshot construction time.
+ // We just lump them together.
+ DuringVMCall,
+
+ // A spread call or funapply had more than JIT_ARGS_LENGTH_MAX arguments.
+ // We bail out to handle this in the VM. If this happens too frequently,
+ // we will invalidate the current Warp script and disable recompilation.
+ TooManyArguments,
+
+ // We hit an active |debugger;| statement.
+ Debugger,
+
+ // We hit this code for the first time.
+ FirstExecution,
+
+ // A lexical check failed. We will set lexical checks as unmovable.
+ UninitializedLexical,
+
+ // A bailout to baseline from Ion on exception to handle Debugger hooks.
+ IonExceptionDebugMode,
+
+ // A bailout to baseline from Ion on exception to handle a finally block.
+ Finally,
+
+ // We returned to a stack frame after invalidating its IonScript.
+ OnStackInvalidation,
+
+ // We returned to a stack frame while calling the |return| method of an
+ // iterator, and we have to throw an exception because the return value
+ // was not an object.
+ ThrowCheckIsObject,
+
+ // We have executed code that should be unreachable, and need to assert.
+ Unreachable,
+
+ Limit
+};
+
+inline const char* BailoutKindString(BailoutKind kind) {
+ switch (kind) {
+ case BailoutKind::Unknown:
+ return "Unknown";
+ case BailoutKind::TranspiledCacheIR:
+ return "TranspiledCacheIR";
+ case BailoutKind::MonomorphicInlinedStubFolding:
+ return "MonomorphicInlinedStubFolding";
+ case BailoutKind::SpeculativePhi:
+ return "SpeculativePhi";
+ case BailoutKind::TypePolicy:
+ return "TypePolicy";
+ case BailoutKind::LICM:
+ return "LICM";
+ case BailoutKind::InstructionReordering:
+ return "InstructionReordering";
+ case BailoutKind::HoistBoundsCheck:
+ return "HoistBoundsCheck";
+ case BailoutKind::EagerTruncation:
+ return "EagerTruncation";
+ case BailoutKind::UnboxFolding:
+ return "UnboxFolding";
+ case BailoutKind::Inevitable:
+ return "Inevitable";
+ case BailoutKind::DuringVMCall:
+ return "DuringVMCall";
+ case BailoutKind::TooManyArguments:
+ return "TooManyArguments";
+ case BailoutKind::Debugger:
+ return "Debugger";
+ case BailoutKind::FirstExecution:
+ return "FirstExecution";
+ case BailoutKind::UninitializedLexical:
+ return "UninitializedLexical";
+ case BailoutKind::IonExceptionDebugMode:
+ return "IonExceptionDebugMode";
+ case BailoutKind::Finally:
+ return "Finally";
+ case BailoutKind::OnStackInvalidation:
+ return "OnStackInvalidation";
+ case BailoutKind::ThrowCheckIsObject:
+ return "ThrowCheckIsObject";
+ case BailoutKind::Unreachable:
+ return "Unreachable";
+
+ case BailoutKind::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Invalid BailoutKind");
+}
+
+static const uint32_t ELEMENT_TYPE_BITS = 5;
+static const uint32_t ELEMENT_TYPE_SHIFT = 0;
+static const uint32_t ELEMENT_TYPE_MASK = (1 << ELEMENT_TYPE_BITS) - 1;
+static const uint32_t VECTOR_TYPE_BITS = 1;
+static const uint32_t VECTOR_TYPE_SHIFT =
+ ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
+static const uint32_t VECTOR_TYPE_MASK = (1 << VECTOR_TYPE_BITS) - 1;
+
+// The integer SIMD types have a lot of operations that do the exact same thing
+// for signed and unsigned integer types. Sometimes it is simpler to treat
+// signed and unsigned integer SIMD types as the same type, using a SimdSign to
+// distinguish the few cases where there is a difference.
+enum class SimdSign {
+ // Signedness is not applicable to this type. (i.e., Float or Bool).
+ NotApplicable,
+ // Treat as an unsigned integer with a range 0 .. 2^N-1.
+ Unsigned,
+ // Treat as a signed integer in two's complement encoding.
+ Signed,
+};
+
+class SimdConstant {
+ public:
+ enum Type {
+ Int8x16,
+ Int16x8,
+ Int32x4,
+ Int64x2,
+ Float32x4,
+ Float64x2,
+ Undefined = -1
+ };
+
+ typedef int8_t I8x16[16];
+ typedef int16_t I16x8[8];
+ typedef int32_t I32x4[4];
+ typedef int64_t I64x2[2];
+ typedef float F32x4[4];
+ typedef double F64x2[2];
+
+ private:
+ Type type_;
+ union {
+ I8x16 i8x16;
+ I16x8 i16x8;
+ I32x4 i32x4;
+ I64x2 i64x2;
+ F32x4 f32x4;
+ F64x2 f64x2;
+ } u;
+
+ bool defined() const { return type_ != Undefined; }
+
+ public:
+ // Doesn't have a default constructor, as it would prevent it from being
+ // included in unions.
+
+ static SimdConstant CreateX16(const int8_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int8x16;
+ memcpy(cst.u.i8x16, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX16(int8_t v) {
+ SimdConstant cst;
+ cst.type_ = Int8x16;
+ std::fill_n(cst.u.i8x16, 16, v);
+ return cst;
+ }
+ static SimdConstant CreateX8(const int16_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int16x8;
+ memcpy(cst.u.i16x8, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX8(int16_t v) {
+ SimdConstant cst;
+ cst.type_ = Int16x8;
+ std::fill_n(cst.u.i16x8, 8, v);
+ return cst;
+ }
+ static SimdConstant CreateX4(const int32_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int32x4;
+ memcpy(cst.u.i32x4, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX4(int32_t v) {
+ SimdConstant cst;
+ cst.type_ = Int32x4;
+ std::fill_n(cst.u.i32x4, 4, v);
+ return cst;
+ }
+ static SimdConstant CreateX2(const int64_t* array) {
+ SimdConstant cst;
+ cst.type_ = Int64x2;
+ memcpy(cst.u.i64x2, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX2(int64_t v) {
+ SimdConstant cst;
+ cst.type_ = Int64x2;
+ std::fill_n(cst.u.i64x2, 2, v);
+ return cst;
+ }
+ static SimdConstant CreateX4(const float* array) {
+ SimdConstant cst;
+ cst.type_ = Float32x4;
+ memcpy(cst.u.f32x4, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX4(float v) {
+ SimdConstant cst;
+ cst.type_ = Float32x4;
+ std::fill_n(cst.u.f32x4, 4, v);
+ return cst;
+ }
+ static SimdConstant CreateX2(const double* array) {
+ SimdConstant cst;
+ cst.type_ = Float64x2;
+ memcpy(cst.u.f64x2, array, sizeof(cst.u));
+ return cst;
+ }
+ static SimdConstant SplatX2(double v) {
+ SimdConstant cst;
+ cst.type_ = Float64x2;
+ std::fill_n(cst.u.f64x2, 2, v);
+ return cst;
+ }
+
+ // Overloads for use by templates.
+ static SimdConstant CreateSimd128(const int8_t* array) {
+ return CreateX16(array);
+ }
+ static SimdConstant CreateSimd128(const int16_t* array) {
+ return CreateX8(array);
+ }
+ static SimdConstant CreateSimd128(const int32_t* array) {
+ return CreateX4(array);
+ }
+ static SimdConstant CreateSimd128(const int64_t* array) {
+ return CreateX2(array);
+ }
+ static SimdConstant CreateSimd128(const float* array) {
+ return CreateX4(array);
+ }
+ static SimdConstant CreateSimd128(const double* array) {
+ return CreateX2(array);
+ }
+
+ Type type() const {
+ MOZ_ASSERT(defined());
+ return type_;
+ }
+
+ bool isFloatingType() const {
+ MOZ_ASSERT(defined());
+ return type_ >= Float32x4;
+ }
+
+ bool isIntegerType() const {
+ MOZ_ASSERT(defined());
+ return type_ <= Int64x2;
+ }
+
+ // Get the raw bytes of the constant.
+ const void* bytes() const { return u.i8x16; }
+
+ const I8x16& asInt8x16() const {
+ MOZ_ASSERT(defined() && type_ == Int8x16);
+ return u.i8x16;
+ }
+
+ const I16x8& asInt16x8() const {
+ MOZ_ASSERT(defined() && type_ == Int16x8);
+ return u.i16x8;
+ }
+
+ const I32x4& asInt32x4() const {
+ MOZ_ASSERT(defined() && type_ == Int32x4);
+ return u.i32x4;
+ }
+
+ const I64x2& asInt64x2() const {
+ MOZ_ASSERT(defined() && type_ == Int64x2);
+ return u.i64x2;
+ }
+
+ const F32x4& asFloat32x4() const {
+ MOZ_ASSERT(defined() && type_ == Float32x4);
+ return u.f32x4;
+ }
+
+ const F64x2& asFloat64x2() const {
+ MOZ_ASSERT(defined() && type_ == Float64x2);
+ return u.f64x2;
+ }
+
+ bool bitwiseEqual(const SimdConstant& rhs) const {
+ MOZ_ASSERT(defined() && rhs.defined());
+ return memcmp(&u, &rhs.u, sizeof(u)) == 0;
+ }
+
+ bool isZeroBits() const {
+ MOZ_ASSERT(defined());
+ return u.i64x2[0] == 0 && u.i64x2[1] == 0;
+ }
+
+ bool isOneBits() const {
+ MOZ_ASSERT(defined());
+ return ~u.i64x2[0] == 0 && ~u.i64x2[1] == 0;
+ }
+
+ // SimdConstant is a HashPolicy. Currently we discriminate by type, but it
+ // may be that we should only be discriminating by int vs float.
+ using Lookup = SimdConstant;
+
+ static HashNumber hash(const SimdConstant& val) {
+ uint32_t hash = mozilla::HashBytes(&val.u, sizeof(val.u));
+ return mozilla::AddToHash(hash, val.type_);
+ }
+
+ static bool match(const SimdConstant& lhs, const SimdConstant& rhs) {
+ return lhs.type() == rhs.type() && lhs.bitwiseEqual(rhs);
+ }
+};
+
+enum class IntConversionBehavior {
+ // These two try to convert the input to an int32 using ToNumber and
+ // will fail if the resulting int32 isn't strictly equal to the input.
+ Normal, // Succeeds on -0: converts to 0.
+ NegativeZeroCheck, // Fails on -0.
+ // These two will convert the input to an int32 with loss of precision.
+ Truncate,
+ ClampToUint8,
+};
+
+enum class IntConversionInputKind { NumbersOnly, NumbersOrBoolsOnly, Any };
+
+// The ordering of this enumeration is important: Anything < Value is a
+// specialized type. Furthermore, anything < String has trivial conversion to
+// a number.
+enum class MIRType : uint8_t {
+ Undefined,
+ Null,
+ Boolean,
+ Int32,
+ Int64,
+ IntPtr,
+ Double,
+ Float32,
+ // Types above have trivial conversion to a number.
+ String,
+ Symbol,
+ BigInt,
+ Simd128,
+ // Types above are primitive (including undefined and null).
+ Object,
+ MagicOptimizedOut, // JS_OPTIMIZED_OUT magic value.
+ MagicHole, // JS_ELEMENTS_HOLE magic value.
+ MagicIsConstructing, // JS_IS_CONSTRUCTING magic value.
+ MagicUninitializedLexical, // JS_UNINITIALIZED_LEXICAL magic value.
+ // Types above are specialized.
+ Value,
+ None, // Invalid, used as a placeholder.
+ Slots, // A slots vector
+ Elements, // An elements vector
+ Pointer, // An opaque pointer that receives no special treatment
+ RefOrNull, // Wasm Ref/AnyRef/NullRef: a raw JSObject* or a raw (void*)0
+ StackResults, // Wasm multi-value stack result area, which may contain refs
+ Shape, // A Shape pointer.
+ Last = Shape
+};
+
+static inline MIRType TargetWordMIRType() {
+#ifdef JS_64BIT
+ return MIRType::Int64;
+#else
+ return MIRType::Int32;
+#endif
+}
+
+static inline MIRType MIRTypeFromValueType(JSValueType type) {
+ // This function does not deal with magic types. Magic constants should be
+ // filtered out in MIRTypeFromValue.
+ switch (type) {
+ case JSVAL_TYPE_DOUBLE:
+ return MIRType::Double;
+ case JSVAL_TYPE_INT32:
+ return MIRType::Int32;
+ case JSVAL_TYPE_UNDEFINED:
+ return MIRType::Undefined;
+ case JSVAL_TYPE_STRING:
+ return MIRType::String;
+ case JSVAL_TYPE_SYMBOL:
+ return MIRType::Symbol;
+ case JSVAL_TYPE_BIGINT:
+ return MIRType::BigInt;
+ case JSVAL_TYPE_BOOLEAN:
+ return MIRType::Boolean;
+ case JSVAL_TYPE_NULL:
+ return MIRType::Null;
+ case JSVAL_TYPE_OBJECT:
+ return MIRType::Object;
+ case JSVAL_TYPE_UNKNOWN:
+ return MIRType::Value;
+ default:
+ MOZ_CRASH("unexpected jsval type");
+ }
+}
+
+static inline JSValueType ValueTypeFromMIRType(MIRType type) {
+ switch (type) {
+ case MIRType::Undefined:
+ return JSVAL_TYPE_UNDEFINED;
+ case MIRType::Null:
+ return JSVAL_TYPE_NULL;
+ case MIRType::Boolean:
+ return JSVAL_TYPE_BOOLEAN;
+ case MIRType::Int32:
+ return JSVAL_TYPE_INT32;
+ case MIRType::Float32: // Fall through, there's no JSVAL for Float32
+ case MIRType::Double:
+ return JSVAL_TYPE_DOUBLE;
+ case MIRType::String:
+ return JSVAL_TYPE_STRING;
+ case MIRType::Symbol:
+ return JSVAL_TYPE_SYMBOL;
+ case MIRType::BigInt:
+ return JSVAL_TYPE_BIGINT;
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicHole:
+ case MIRType::MagicIsConstructing:
+ case MIRType::MagicUninitializedLexical:
+ return JSVAL_TYPE_MAGIC;
+ default:
+ MOZ_ASSERT(type == MIRType::Object);
+ return JSVAL_TYPE_OBJECT;
+ }
+}
+
+static inline JSValueTag MIRTypeToTag(MIRType type) {
+ return JSVAL_TYPE_TO_TAG(ValueTypeFromMIRType(type));
+}
+
+static inline size_t MIRTypeToSize(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ return 4;
+ case MIRType::Int64:
+ return 8;
+ case MIRType::Float32:
+ return 4;
+ case MIRType::Double:
+ return 8;
+ case MIRType::Simd128:
+ return 16;
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ return sizeof(uintptr_t);
+ default:
+ MOZ_CRASH("MIRTypeToSize - unhandled case");
+ }
+}
+
+static inline const char* StringFromMIRType(MIRType type) {
+ switch (type) {
+ case MIRType::Undefined:
+ return "Undefined";
+ case MIRType::Null:
+ return "Null";
+ case MIRType::Boolean:
+ return "Bool";
+ case MIRType::Int32:
+ return "Int32";
+ case MIRType::Int64:
+ return "Int64";
+ case MIRType::IntPtr:
+ return "IntPtr";
+ case MIRType::Double:
+ return "Double";
+ case MIRType::Float32:
+ return "Float32";
+ case MIRType::String:
+ return "String";
+ case MIRType::Symbol:
+ return "Symbol";
+ case MIRType::BigInt:
+ return "BigInt";
+ case MIRType::Object:
+ return "Object";
+ case MIRType::MagicOptimizedOut:
+ return "MagicOptimizedOut";
+ case MIRType::MagicHole:
+ return "MagicHole";
+ case MIRType::MagicIsConstructing:
+ return "MagicIsConstructing";
+ case MIRType::MagicUninitializedLexical:
+ return "MagicUninitializedLexical";
+ case MIRType::Value:
+ return "Value";
+ case MIRType::None:
+ return "None";
+ case MIRType::Slots:
+ return "Slots";
+ case MIRType::Elements:
+ return "Elements";
+ case MIRType::Pointer:
+ return "Pointer";
+ case MIRType::RefOrNull:
+ return "RefOrNull";
+ case MIRType::StackResults:
+ return "StackResults";
+ case MIRType::Shape:
+ return "Shape";
+ case MIRType::Simd128:
+ return "Simd128";
+ }
+ MOZ_CRASH("Unknown MIRType.");
+}
+
+static inline bool IsIntType(MIRType type) {
+ return type == MIRType::Int32 || type == MIRType::Int64;
+}
+
+static inline bool IsNumberType(MIRType type) {
+ return type == MIRType::Int32 || type == MIRType::Double ||
+ type == MIRType::Float32 || type == MIRType::Int64;
+}
+
+static inline bool IsNumericType(MIRType type) {
+ return IsNumberType(type) || type == MIRType::BigInt;
+}
+
+static inline bool IsTypeRepresentableAsDouble(MIRType type) {
+ return type == MIRType::Int32 || type == MIRType::Double ||
+ type == MIRType::Float32;
+}
+
+static inline bool IsFloatType(MIRType type) {
+ return type == MIRType::Int32 || type == MIRType::Float32;
+}
+
+static inline bool IsFloatingPointType(MIRType type) {
+ return type == MIRType::Double || type == MIRType::Float32;
+}
+
+static inline bool IsNullOrUndefined(MIRType type) {
+ return type == MIRType::Null || type == MIRType::Undefined;
+}
+
+static inline bool IsMagicType(MIRType type) {
+ return type == MIRType::MagicHole || type == MIRType::MagicOptimizedOut ||
+ type == MIRType::MagicIsConstructing ||
+ type == MIRType::MagicUninitializedLexical;
+}
+
+static inline bool IsNonGCThing(MIRType type) {
+ return type == MIRType::Undefined || type == MIRType::Null ||
+ type == MIRType::Boolean || IsNumberType(type);
+}
+
+static inline MIRType ScalarTypeToMIRType(Scalar::Type type) {
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ return MIRType::Int32;
+ case Scalar::Int64:
+ return MIRType::Int64;
+ case Scalar::Float32:
+ return MIRType::Float32;
+ case Scalar::Float64:
+ return MIRType::Double;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ MOZ_CRASH("NYI");
+ case Scalar::Simd128:
+ return MIRType::Simd128;
+ case Scalar::MaxTypedArrayViewType:
+ break;
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
+static constexpr bool NeedsPostBarrier(MIRType type) {
+ MOZ_ASSERT(type != MIRType::Value);
+ return type == MIRType::Object || type == MIRType::String ||
+ type == MIRType::BigInt;
+}
+
+#ifdef DEBUG
+
+// Track the pipeline of opcodes which has produced a snapshot.
+# define TRACK_SNAPSHOTS 1
+
+// Make sure registers are not modified between an instruction and
+// its OsiPoint.
+# define CHECK_OSIPOINT_REGISTERS 1
+
+#endif // DEBUG
+
+enum ABIArgType {
+ // A pointer sized integer
+ ArgType_General = 0x1,
+ // A 32-bit integer
+ ArgType_Int32 = 0x2,
+ // A 64-bit integer
+ ArgType_Int64 = 0x3,
+ // A 32-bit floating point number
+ ArgType_Float32 = 0x4,
+ // A 64-bit floating point number
+ ArgType_Float64 = 0x5,
+
+ RetType_Shift = 0x0,
+ ArgType_Shift = 0x3,
+ ArgType_Mask = 0x7
+};
+
+namespace detail {
+
+static constexpr uint64_t MakeABIFunctionType(
+ ABIArgType ret, std::initializer_list<ABIArgType> args) {
+ uint64_t abiType = (uint64_t)ret << RetType_Shift;
+ int i = 1;
+ for (auto arg : args) {
+ abiType |= ((uint64_t)arg << (ArgType_Shift * i));
+ i++;
+ }
+ return abiType;
+}
+
+} // namespace detail
+
+enum ABIFunctionType : uint64_t {
+ // The enum must be explicitly typed to avoid UB: some validly constructed
+ // members are larger than any explicitly declared members.
+
+ // VM functions that take 0-9 non-double arguments
+ // and return a non-double value.
+ Args_General0 = ArgType_General << RetType_Shift,
+ Args_General1 = Args_General0 | (ArgType_General << (ArgType_Shift * 1)),
+ Args_General2 = Args_General1 | (ArgType_General << (ArgType_Shift * 2)),
+ Args_General3 = Args_General2 | (ArgType_General << (ArgType_Shift * 3)),
+ Args_General4 = Args_General3 | (ArgType_General << (ArgType_Shift * 4)),
+ Args_General5 = Args_General4 | (ArgType_General << (ArgType_Shift * 5)),
+ Args_General6 = Args_General5 | (ArgType_General << (ArgType_Shift * 6)),
+ Args_General7 = Args_General6 | (ArgType_General << (ArgType_Shift * 7)),
+ Args_General8 = Args_General7 | (ArgType_General << (ArgType_Shift * 8)),
+
+ // int64 f(double)
+ Args_Int64_Double =
+ (ArgType_Int64 << RetType_Shift) | (ArgType_Float64 << ArgType_Shift),
+
+ // double f()
+ Args_Double_None = ArgType_Float64 << RetType_Shift,
+
+ // int f(double)
+ Args_Int_Double = Args_General0 | (ArgType_Float64 << ArgType_Shift),
+
+ // int f(float32)
+ Args_Int_Float32 = Args_General0 | (ArgType_Float32 << ArgType_Shift),
+
+ // float f(float)
+ Args_Float32_Float32 =
+ (ArgType_Float32 << RetType_Shift) | (ArgType_Float32 << ArgType_Shift),
+
+ // float f(int, int)
+ Args_Float32_IntInt = (ArgType_Float32 << RetType_Shift) |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)),
+
+ // double f(double)
+ Args_Double_Double = Args_Double_None | (ArgType_Float64 << ArgType_Shift),
+
+ // double f(int)
+ Args_Double_Int = Args_Double_None | (ArgType_General << ArgType_Shift),
+
+ // double f(int, int)
+ Args_Double_IntInt =
+ Args_Double_Int | (ArgType_General << (ArgType_Shift * 2)),
+
+ // double f(double, int)
+ Args_Double_DoubleInt = Args_Double_None |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_Float64 << (ArgType_Shift * 2)),
+
+ // double f(double, double)
+ Args_Double_DoubleDouble =
+ Args_Double_Double | (ArgType_Float64 << (ArgType_Shift * 2)),
+
+ // float f(float, float)
+ Args_Float32_Float32Float32 =
+ Args_Float32_Float32 | (ArgType_Float32 << (ArgType_Shift * 2)),
+
+ // double f(int, double)
+ Args_Double_IntDouble = Args_Double_None |
+ (ArgType_Float64 << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)),
+
+ // int f(int, double)
+ Args_Int_IntDouble = Args_General0 |
+ (ArgType_Float64 << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)),
+
+ // int f(double, int)
+ Args_Int_DoubleInt = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_Float64 << (ArgType_Shift * 2)),
+
+ // double f(double, double, double)
+ Args_Double_DoubleDoubleDouble =
+ Args_Double_DoubleDouble | (ArgType_Float64 << (ArgType_Shift * 3)),
+
+ // double f(double, double, double, double)
+ Args_Double_DoubleDoubleDoubleDouble =
+ Args_Double_DoubleDoubleDouble | (ArgType_Float64 << (ArgType_Shift * 4)),
+
+ // int f(double, int, int)
+ Args_Int_DoubleIntInt = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_Float64 << (ArgType_Shift * 3)),
+
+ // int f(int, double, int, int)
+ Args_Int_IntDoubleIntInt = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_Float64 << (ArgType_Shift * 3)) |
+ (ArgType_General << (ArgType_Shift * 4)),
+
+ Args_Int_GeneralGeneralGeneralInt64 =
+ Args_General0 | (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_General << (ArgType_Shift * 3)) |
+ (ArgType_Int64 << (ArgType_Shift * 4)),
+
+ Args_Int_GeneralGeneralInt64Int64 = Args_General0 |
+ (ArgType_General << (ArgType_Shift * 1)) |
+ (ArgType_General << (ArgType_Shift * 2)) |
+ (ArgType_Int64 << (ArgType_Shift * 3)) |
+ (ArgType_Int64 << (ArgType_Shift * 4)),
+
+ // int32_t f(...) variants
+ Args_Int32_General =
+ detail::MakeABIFunctionType(ArgType_Int32, {ArgType_General}),
+ Args_Int32_GeneralInt32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32}),
+ Args_Int32_GeneralInt32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralInt32Int32Int32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralInt32Int32Int32Int32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralInt32Int32Int32Int32General = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralGeneralInt32General = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_General, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32 =
+ detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_General, ArgType_Int32, ArgType_General,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General =
+ detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General =
+ detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Float32, ArgType_Float32,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General =
+ detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Float32, ArgType_Float32,
+ ArgType_Float32, ArgType_Float32, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General =
+ detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Float32, ArgType_Float32,
+ ArgType_Int32, ArgType_Float32, ArgType_Float32, ArgType_Int32,
+ ArgType_Float32, ArgType_Int32, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Int32Int32General = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Int32Int64 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int32, ArgType_Int64}),
+ Args_Int32_GeneralInt32Int32General = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int32, ArgType_General}),
+ Args_Int32_GeneralInt32Int64Int64 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int64, ArgType_Int64}),
+ Args_Int32_GeneralInt32GeneralInt32 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_General, ArgType_Int32}),
+ Args_Int32_GeneralInt32GeneralInt32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_General,
+ ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralGeneral = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_General}),
+ Args_Int32_GeneralGeneralGeneral = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_General, ArgType_General}),
+ Args_Int32_GeneralGeneralInt32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_General, ArgType_Int32, ArgType_Int32}),
+
+ // general f(...) variants
+ Args_General_GeneralInt32 = detail::MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_Int32}),
+ Args_General_GeneralInt32Int32 = detail::MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_Int32, ArgType_Int32}),
+ Args_General_GeneralInt32General = detail::MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_Int32, ArgType_General}),
+ Args_General_GeneralInt32Int32GeneralInt32 = detail::MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_General, ArgType_Int32}),
+ Args_Int32_GeneralInt64Int32Int32Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int64, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32}),
+ Args_Int32_GeneralInt64Int32 = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int64, ArgType_Int32}),
+ Args_Int32_GeneralInt64Int32Int64 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int64, ArgType_Int32, ArgType_Int64}),
+ Args_Int32_GeneralInt64Int32Int64General = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int64, ArgType_Int32,
+ ArgType_Int64, ArgType_General}),
+ Args_Int32_GeneralInt64Int64Int64 = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int64, ArgType_Int64, ArgType_Int64}),
+ Args_Int32_GeneralInt64Int64General = detail::MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int64, ArgType_Int64, ArgType_General}),
+ Args_Int32_GeneralInt64Int64Int64General = detail::MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int64, ArgType_Int64,
+ ArgType_Int64, ArgType_General}),
+
+ // Functions that return Int64 are tricky because SpiderMonkey's ReturnRegI64
+ // does not match the ABI int64 return register on x86. Wasm only!
+ Args_Int64_General =
+ detail::MakeABIFunctionType(ArgType_Int64, {ArgType_General}),
+ Args_Int64_GeneralInt64 = detail::MakeABIFunctionType(
+ ArgType_Int64, {ArgType_General, ArgType_Int64}),
+
+};
+
+static constexpr ABIFunctionType MakeABIFunctionType(
+ ABIArgType ret, std::initializer_list<ABIArgType> args) {
+ return ABIFunctionType(detail::MakeABIFunctionType(ret, args));
+}
+
+// Rounding modes for round instructions.
+enum class RoundingMode { Down, Up, NearestTiesToEven, TowardsZero };
+
+// If a function contains no calls, we can assume the caller has checked the
+// stack limit up to this maximum frame size. This works because the jit stack
+// limit has a generous buffer before the real end of the native stack.
+static const uint32_t MAX_UNCHECKED_LEAF_FRAME_SIZE = 64;
+
+// Truncating conversion modifiers.
+using TruncFlags = uint32_t;
+static const TruncFlags TRUNC_UNSIGNED = TruncFlags(1) << 0;
+static const TruncFlags TRUNC_SATURATING = TruncFlags(1) << 1;
+
+enum BranchDirection { FALSE_BRANCH, TRUE_BRANCH };
+
+template <typename T>
+constexpr T SplatByteToUInt(uint8_t val, uint8_t x) {
+ T splatted = val;
+ for (; x > 1; x--) {
+ splatted |= splatted << 8;
+ }
+ return splatted;
+}
+
+// Resume information for a frame, stored in a resume point.
+enum class ResumeMode : uint8_t {
+ // Innermost frame. Resume at the next bytecode op when bailing out.
+ ResumeAfter,
+
+ // Innermost frame. This resume point captures an additional value
+ // that is not on the expression stack. Resume at the next bytecode
+ // op when bailing out, but first check that the intermediate value
+ // is an object. This is used if calling the |return| method for a
+ // CloseIter causes an invalidation bailout.
+ ResumeAfterCheckIsObject,
+
+ // Innermost frame. Resume at the current bytecode op when bailing out.
+ ResumeAt,
+
+ // Outer frame for an inlined "standard" call at an IsInvokeOp bytecode op.
+ InlinedStandardCall,
+
+ // Outer frame for an inlined js::fun_call at an IsInvokeOp bytecode op.
+ InlinedFunCall,
+
+ // Outer frame for an inlined getter/setter at a Get*/Set* bytecode op.
+ InlinedAccessor,
+
+ Last = InlinedAccessor
+};
+
+inline const char* ResumeModeToString(ResumeMode mode) {
+ switch (mode) {
+ case ResumeMode::ResumeAfter:
+ return "ResumeAfter";
+ case ResumeMode::ResumeAt:
+ return "ResumeAt";
+ case ResumeMode::InlinedStandardCall:
+ return "InlinedStandardCall";
+ case ResumeMode::InlinedFunCall:
+ return "InlinedFunCall";
+ case ResumeMode::InlinedAccessor:
+ return "InlinedAccessor";
+ case ResumeMode::ResumeAfterCheckIsObject:
+ return "ResumeAfterCheckIsObject";
+ }
+ MOZ_CRASH("Invalid mode");
+}
+
+inline bool IsResumeAfter(ResumeMode mode) {
+ switch (mode) {
+ case ResumeMode::ResumeAfter:
+ case ResumeMode::ResumeAfterCheckIsObject:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// The number of intermediate values captured by this resume point
+// that aren't on the expression stack, but are needed during bailouts.
+inline uint32_t NumIntermediateValues(ResumeMode mode) {
+ switch (mode) {
+ case ResumeMode::ResumeAfterCheckIsObject:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_IonTypes_h */
diff --git a/js/src/jit/JSJitFrameIter-inl.h b/js/src/jit/JSJitFrameIter-inl.h
new file mode 100644
index 0000000000..58c56e4ae4
--- /dev/null
+++ b/js/src/jit/JSJitFrameIter-inl.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JSJitFrameIter_inl_h
+#define jit_JSJitFrameIter_inl_h
+
+#include "jit/JSJitFrameIter.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/ScriptFromCalleeToken.h"
+
+namespace js {
+namespace jit {
+
+inline uint8_t* JSJitFrameIter::returnAddress() const {
+ CommonFrameLayout* current = (CommonFrameLayout*)current_;
+ return current->returnAddress();
+}
+
+inline FrameType JSJitFrameIter::prevType() const {
+ CommonFrameLayout* current = (CommonFrameLayout*)current_;
+ return current->prevType();
+}
+
+inline ExitFrameLayout* JSJitFrameIter::exitFrame() const {
+ MOZ_ASSERT(isExitFrame());
+ return (ExitFrameLayout*)fp();
+}
+
+inline JitFrameLayout* JSJitProfilingFrameIterator::framePtr() const {
+ MOZ_ASSERT(!done());
+ return (JitFrameLayout*)fp_;
+}
+
+inline JSScript* JSJitProfilingFrameIterator::frameScript() const {
+ return ScriptFromCalleeToken(framePtr()->calleeToken());
+}
+
+inline BaselineFrame* JSJitFrameIter::baselineFrame() const {
+ MOZ_ASSERT(isBaselineJS());
+ return (BaselineFrame*)(fp() - BaselineFrame::Size());
+}
+
+inline uint32_t JSJitFrameIter::baselineFrameNumValueSlots() const {
+ MOZ_ASSERT(isBaselineJS());
+ return baselineFrame()->numValueSlots(*baselineFrameSize_);
+}
+
+template <typename T>
+bool JSJitFrameIter::isExitFrameLayout() const {
+ if (!isExitFrame()) {
+ return false;
+ }
+ return exitFrame()->is<T>();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JSJitFrameIter_inl_h */
diff --git a/js/src/jit/JSJitFrameIter.cpp b/js/src/jit/JSJitFrameIter.cpp
new file mode 100644
index 0000000000..5d846fbdab
--- /dev/null
+++ b/js/src/jit/JSJitFrameIter.cpp
@@ -0,0 +1,798 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JSJitFrameIter-inl.h"
+
+#include "jit/CalleeToken.h"
+#include "jit/IonScript.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitScript.h"
+#include "jit/MacroAssembler.h" // js::jit::Assembler::GetPointer
+#include "jit/SafepointIndex.h"
+#include "jit/Safepoints.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "jit/VMFunctions.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject, js::DumpValue
+#include "vm/JitActivation.h"
+
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+JSJitFrameIter::JSJitFrameIter(const JitActivation* activation)
+ : JSJitFrameIter(activation, FrameType::Exit, activation->jsExitFP()) {}
+
+JSJitFrameIter::JSJitFrameIter(const JitActivation* activation,
+ FrameType frameType, uint8_t* fp)
+ : current_(fp),
+ type_(frameType),
+ resumePCinCurrentFrame_(nullptr),
+ cachedSafepointIndex_(nullptr),
+ activation_(activation) {
+ MOZ_ASSERT(type_ == FrameType::JSJitToWasm || type_ == FrameType::Exit);
+ if (activation_->bailoutData()) {
+ current_ = activation_->bailoutData()->fp();
+ type_ = FrameType::Bailout;
+ } else {
+ MOZ_ASSERT(!TlsContext.get()->inUnsafeCallWithABI);
+ }
+}
+
+bool JSJitFrameIter::checkInvalidation() const {
+ IonScript* dummy;
+ return checkInvalidation(&dummy);
+}
+
+bool JSJitFrameIter::checkInvalidation(IonScript** ionScriptOut) const {
+ JSScript* script = this->script();
+ if (isBailoutJS()) {
+ *ionScriptOut = activation_->bailoutData()->ionScript();
+ return !script->hasIonScript() || script->ionScript() != *ionScriptOut;
+ }
+
+ uint8_t* returnAddr = resumePCinCurrentFrame();
+ // N.B. the current IonScript is not the same as the frame's
+ // IonScript if the frame has since been invalidated.
+ bool invalidated = !script->hasIonScript() ||
+ !script->ionScript()->containsReturnAddress(returnAddr);
+ if (!invalidated) {
+ return false;
+ }
+
+ int32_t invalidationDataOffset = ((int32_t*)returnAddr)[-1];
+ uint8_t* ionScriptDataOffset = returnAddr + invalidationDataOffset;
+ IonScript* ionScript = (IonScript*)Assembler::GetPointer(ionScriptDataOffset);
+ MOZ_ASSERT(ionScript->containsReturnAddress(returnAddr));
+ *ionScriptOut = ionScript;
+ return true;
+}
+
+CalleeToken JSJitFrameIter::calleeToken() const {
+ return ((JitFrameLayout*)current_)->calleeToken();
+}
+
+JSFunction* JSJitFrameIter::callee() const {
+ MOZ_ASSERT(isScripted());
+ MOZ_ASSERT(isFunctionFrame());
+ return CalleeTokenToFunction(calleeToken());
+}
+
+JSFunction* JSJitFrameIter::maybeCallee() const {
+ if (isScripted() && isFunctionFrame()) {
+ return callee();
+ }
+ return nullptr;
+}
+
+bool JSJitFrameIter::isBareExit() const {
+ if (type_ != FrameType::Exit) {
+ return false;
+ }
+ return exitFrame()->isBareExit();
+}
+
+bool JSJitFrameIter::isUnwoundJitExit() const {
+ if (type_ != FrameType::Exit) {
+ return false;
+ }
+ return exitFrame()->isUnwoundJitExit();
+}
+
+bool JSJitFrameIter::isFunctionFrame() const {
+ return CalleeTokenIsFunction(calleeToken());
+}
+
+JSScript* JSJitFrameIter::script() const {
+ MOZ_ASSERT(isScripted());
+ JSScript* script = ScriptFromCalleeToken(calleeToken());
+ MOZ_ASSERT(script);
+ return script;
+}
+
+JSScript* JSJitFrameIter::maybeForwardedScript() const {
+ MOZ_ASSERT(isScripted());
+ if (isBaselineJS()) {
+ return MaybeForwardedScriptFromCalleeToken(baselineFrame()->calleeToken());
+ }
+ JSScript* script = MaybeForwardedScriptFromCalleeToken(calleeToken());
+ MOZ_ASSERT(script);
+ return script;
+}
+
+void JSJitFrameIter::baselineScriptAndPc(JSScript** scriptRes,
+ jsbytecode** pcRes) const {
+ MOZ_ASSERT(isBaselineJS());
+ JSScript* script = this->script();
+ if (scriptRes) {
+ *scriptRes = script;
+ }
+
+ MOZ_ASSERT(pcRes);
+
+ // The Baseline Interpreter stores the bytecode pc in the frame.
+ if (baselineFrame()->runningInInterpreter()) {
+ MOZ_ASSERT(baselineFrame()->interpreterScript() == script);
+ *pcRes = baselineFrame()->interpreterPC();
+ return;
+ }
+
+ // There must be a BaselineScript with a RetAddrEntry for the current return
+ // address.
+ uint8_t* retAddr = resumePCinCurrentFrame();
+ const RetAddrEntry& entry =
+ script->baselineScript()->retAddrEntryFromReturnAddress(retAddr);
+ *pcRes = entry.pc(script);
+}
+
+Value* JSJitFrameIter::actualArgs() const { return jsFrame()->actualArgs(); }
+
+uint8_t* JSJitFrameIter::prevFp() const { return current()->callerFramePtr(); }
+
+// Compute the size of a Baseline frame excluding pushed VMFunction arguments or
+// callee frame headers. This is used to calculate the number of Value slots in
+// the frame. The caller asserts this matches BaselineFrame::debugFrameSize.
+static uint32_t ComputeBaselineFrameSize(const JSJitFrameIter& frame) {
+ MOZ_ASSERT(frame.prevType() == FrameType::BaselineJS);
+
+ uint32_t frameSize = frame.current()->callerFramePtr() - frame.fp();
+
+ if (frame.isBaselineStub()) {
+ return frameSize - BaselineStubFrameLayout::Size();
+ }
+
+ // Note: an UnwoundJit exit frame is a JitFrameLayout that was turned into an
+ // ExitFrameLayout by EnsureUnwoundJitExitFrame. We have to use the original
+ // header size here because that's what we have on the stack.
+ if (frame.isScripted() || frame.isUnwoundJitExit()) {
+ return frameSize - JitFrameLayout::Size();
+ }
+
+ if (frame.isExitFrame()) {
+ frameSize -= ExitFrameLayout::Size();
+ if (frame.exitFrame()->isWrapperExit()) {
+ const VMFunctionData* data = frame.exitFrame()->footer()->function();
+ frameSize -= data->explicitStackSlots() * sizeof(void*);
+ }
+ return frameSize;
+ }
+
+ MOZ_CRASH("Unexpected frame");
+}
+
+void JSJitFrameIter::operator++() {
+ MOZ_ASSERT(!isEntry());
+
+ // Compute BaselineFrame size. In debug builds this is equivalent to
+ // BaselineFrame::debugFrameSize_. This is asserted at the end of this method.
+ if (current()->prevType() == FrameType::BaselineJS) {
+ uint32_t frameSize = ComputeBaselineFrameSize(*this);
+ baselineFrameSize_ = mozilla::Some(frameSize);
+ } else {
+ baselineFrameSize_ = mozilla::Nothing();
+ }
+
+ cachedSafepointIndex_ = nullptr;
+
+ // If the next frame is the entry frame, just exit. Don't update current_,
+ // since the entry and first frames overlap.
+ if (isEntry(current()->prevType())) {
+ type_ = current()->prevType();
+ return;
+ }
+
+ type_ = current()->prevType();
+ resumePCinCurrentFrame_ = current()->returnAddress();
+ current_ = prevFp();
+
+ MOZ_ASSERT_IF(isBaselineJS(),
+ baselineFrame()->debugFrameSize() == *baselineFrameSize_);
+}
+
+uintptr_t* JSJitFrameIter::spillBase() const {
+ MOZ_ASSERT(isIonJS());
+
+ // Get the base address to where safepoint registers are spilled.
+ // Out-of-line calls do not unwind the extra padding space used to
+ // aggregate bailout tables, so we use frameSize instead of frameLocals,
+ // which would only account for local stack slots.
+ return reinterpret_cast<uintptr_t*>(fp() - ionScript()->frameSize());
+}
+
+MachineState JSJitFrameIter::machineState() const {
+ MOZ_ASSERT(isIonScripted());
+
+ // The MachineState is used by GCs for tracing call-sites.
+ if (MOZ_UNLIKELY(isBailoutJS())) {
+ return *activation_->bailoutData()->machineState();
+ }
+
+ SafepointReader reader(ionScript(), safepoint());
+
+ FloatRegisterSet fregs = reader.allFloatSpills().set().reduceSetForPush();
+ GeneralRegisterSet regs = reader.allGprSpills().set();
+
+ uintptr_t* spill = spillBase();
+ uint8_t* spillAlign =
+ alignDoubleSpill(reinterpret_cast<uint8_t*>(spill - regs.size()));
+ char* floatSpill = reinterpret_cast<char*>(spillAlign);
+
+ return MachineState::FromSafepoint(fregs, regs, floatSpill, spill);
+}
+
+JitFrameLayout* JSJitFrameIter::jsFrame() const {
+ MOZ_ASSERT(isScripted());
+ if (isBailoutJS()) {
+ return (JitFrameLayout*)activation_->bailoutData()->fp();
+ }
+
+ return (JitFrameLayout*)fp();
+}
+
+IonScript* JSJitFrameIter::ionScript() const {
+ MOZ_ASSERT(isIonScripted());
+ if (isBailoutJS()) {
+ return activation_->bailoutData()->ionScript();
+ }
+
+ IonScript* ionScript = nullptr;
+ if (checkInvalidation(&ionScript)) {
+ return ionScript;
+ }
+ return ionScriptFromCalleeToken();
+}
+
+IonScript* JSJitFrameIter::ionScriptFromCalleeToken() const {
+ MOZ_ASSERT(isIonJS());
+ MOZ_ASSERT(!checkInvalidation());
+ return script()->ionScript();
+}
+
+const SafepointIndex* JSJitFrameIter::safepoint() const {
+ MOZ_ASSERT(isIonJS());
+ if (!cachedSafepointIndex_) {
+ cachedSafepointIndex_ =
+ ionScript()->getSafepointIndex(resumePCinCurrentFrame());
+ }
+ return cachedSafepointIndex_;
+}
+
+SnapshotOffset JSJitFrameIter::snapshotOffset() const {
+ MOZ_ASSERT(isIonScripted());
+ if (isBailoutJS()) {
+ return activation_->bailoutData()->snapshotOffset();
+ }
+ return osiIndex()->snapshotOffset();
+}
+
+const OsiIndex* JSJitFrameIter::osiIndex() const {
+ MOZ_ASSERT(isIonJS());
+ SafepointReader reader(ionScript(), safepoint());
+ return ionScript()->getOsiIndex(reader.osiReturnPointOffset());
+}
+
+bool JSJitFrameIter::isConstructing() const {
+ return CalleeTokenIsConstructing(calleeToken());
+}
+
+unsigned JSJitFrameIter::numActualArgs() const {
+ if (isScripted()) {
+ return jsFrame()->numActualArgs();
+ }
+
+ MOZ_ASSERT(isExitFrameLayout<NativeExitFrameLayout>());
+ return exitFrame()->as<NativeExitFrameLayout>()->argc();
+}
+
+void JSJitFrameIter::dumpBaseline() const {
+ MOZ_ASSERT(isBaselineJS());
+
+ fprintf(stderr, " JS Baseline frame\n");
+ if (isFunctionFrame()) {
+ fprintf(stderr, " callee fun: ");
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ DumpObject(callee());
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %u\n", script()->filename(),
+ script()->lineno());
+
+ JSContext* cx = TlsContext.get();
+ RootedScript script(cx);
+ jsbytecode* pc;
+ baselineScriptAndPc(script.address(), &pc);
+
+ fprintf(stderr, " script = %p, pc = %p (offset %u)\n", (void*)script, pc,
+ uint32_t(script->pcToOffset(pc)));
+ fprintf(stderr, " current op: %s\n", CodeName(JSOp(*pc)));
+
+ fprintf(stderr, " actual args: %u\n", numActualArgs());
+
+ for (unsigned i = 0; i < baselineFrameNumValueSlots(); i++) {
+ fprintf(stderr, " slot %u: ", i);
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ Value* v = baselineFrame()->valueSlot(i);
+ DumpValue(*v);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+}
+
+void JSJitFrameIter::dump() const {
+ switch (type_) {
+ case FrameType::CppToJSJit:
+ fprintf(stderr, " Entry frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::BaselineJS:
+ dumpBaseline();
+ break;
+ case FrameType::BaselineStub:
+ fprintf(stderr, " Baseline stub frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::Bailout:
+ case FrameType::IonJS: {
+ InlineFrameIterator frames(TlsContext.get(), this);
+ for (;;) {
+ frames.dump();
+ if (!frames.more()) {
+ break;
+ }
+ ++frames;
+ }
+ break;
+ }
+ case FrameType::BaselineInterpreterEntry:
+ fprintf(stderr, " Baseline Interpreter Entry frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::Rectifier:
+ fprintf(stderr, " Rectifier frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::IonICCall:
+ fprintf(stderr, " Ion IC call\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::WasmToJSJit:
+ fprintf(stderr, " Fast wasm-to-JS entry frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
+ case FrameType::Exit:
+ fprintf(stderr, " Exit frame\n");
+ break;
+ case FrameType::JSJitToWasm:
+ fprintf(stderr, " Wasm exit frame\n");
+ break;
+ };
+ fputc('\n', stderr);
+}
+
+#ifdef DEBUG
+bool JSJitFrameIter::verifyReturnAddressUsingNativeToBytecodeMap() {
+ MOZ_ASSERT(resumePCinCurrentFrame_ != nullptr);
+
+ // Only handle Ion frames for now.
+ if (type_ != FrameType::IonJS && type_ != FrameType::BaselineJS) {
+ return true;
+ }
+
+ JSRuntime* rt = TlsContext.get()->runtime();
+
+ // Don't verify while off thread.
+ if (!CurrentThreadCanAccessRuntime(rt)) {
+ return true;
+ }
+
+ // Don't verify if sampling is being suppressed.
+ if (!TlsContext.get()->isProfilerSamplingEnabled()) {
+ return true;
+ }
+
+ if (JS::RuntimeHeapIsMinorCollecting()) {
+ return true;
+ }
+
+ JitRuntime* jitrt = rt->jitRuntime();
+
+ // Look up and print bytecode info for the native address.
+ const JitcodeGlobalEntry* entry =
+ jitrt->getJitcodeGlobalTable()->lookup(resumePCinCurrentFrame_);
+ if (!entry) {
+ return true;
+ }
+
+ JitSpew(JitSpew_Profiling, "Found nativeToBytecode entry for %p: %p - %p",
+ resumePCinCurrentFrame_, entry->nativeStartAddr(),
+ entry->nativeEndAddr());
+
+ BytecodeLocationVector location;
+ uint32_t depth = UINT32_MAX;
+ if (!entry->callStackAtAddr(rt, resumePCinCurrentFrame_, location, &depth)) {
+ return false;
+ }
+ MOZ_ASSERT(depth > 0 && depth != UINT32_MAX);
+ MOZ_ASSERT(location.length() == depth);
+
+ JitSpew(JitSpew_Profiling, "Found bytecode location of depth %u:", depth);
+ for (size_t i = 0; i < location.length(); i++) {
+ JitSpew(JitSpew_Profiling, " %s:%u - %zu",
+ location[i].getDebugOnlyScript()->filename(),
+ location[i].getDebugOnlyScript()->lineno(),
+ size_t(location[i].toRawBytecode() -
+ location[i].getDebugOnlyScript()->code()));
+ }
+
+ if (type_ == FrameType::IonJS) {
+ // Create an InlineFrameIterator here and verify the mapped info against the
+ // iterator info.
+ InlineFrameIterator inlineFrames(TlsContext.get(), this);
+ for (size_t idx = 0; idx < location.length(); idx++) {
+ MOZ_ASSERT(idx < location.length());
+ MOZ_ASSERT_IF(idx < location.length() - 1, inlineFrames.more());
+
+ JitSpew(JitSpew_Profiling, "Match %d: ION %s:%u(%zu) vs N2B %s:%u(%zu)",
+ (int)idx, inlineFrames.script()->filename(),
+ inlineFrames.script()->lineno(),
+ size_t(inlineFrames.pc() - inlineFrames.script()->code()),
+ location[idx].getDebugOnlyScript()->filename(),
+ location[idx].getDebugOnlyScript()->lineno(),
+ size_t(location[idx].toRawBytecode() -
+ location[idx].getDebugOnlyScript()->code()));
+
+ MOZ_ASSERT(inlineFrames.script() == location[idx].getDebugOnlyScript());
+
+ if (inlineFrames.more()) {
+ ++inlineFrames;
+ }
+ }
+ }
+
+ return true;
+}
+#endif // DEBUG
+
+JSJitProfilingFrameIterator::JSJitProfilingFrameIterator(JSContext* cx,
+ void* pc, void* sp) {
+ // If no profilingActivation is live, initialize directly to
+ // end-of-iteration state.
+ if (!cx->profilingActivation()) {
+ type_ = FrameType::CppToJSJit;
+ fp_ = nullptr;
+ resumePCinCurrentFrame_ = nullptr;
+ return;
+ }
+
+ MOZ_ASSERT(cx->profilingActivation()->isJit());
+
+ JitActivation* act = cx->profilingActivation()->asJit();
+
+ // If the top JitActivation has a null lastProfilingFrame, assume that
+ // it's a trivially empty activation, and initialize directly
+ // to end-of-iteration state.
+ if (!act->lastProfilingFrame()) {
+ type_ = FrameType::CppToJSJit;
+ fp_ = nullptr;
+ resumePCinCurrentFrame_ = nullptr;
+ return;
+ }
+
+ // Get the fp from the current profilingActivation
+ fp_ = (uint8_t*)act->lastProfilingFrame();
+
+ // Use fp_ as endStackAddress_. For cases below where we know we're currently
+ // executing JIT code, we use the current stack pointer instead.
+ endStackAddress_ = fp_;
+
+ // Profiler sampling must NOT be suppressed if we are here.
+ MOZ_ASSERT(cx->isProfilerSamplingEnabled());
+
+ // Try initializing with sampler pc
+ if (tryInitWithPC(pc)) {
+ endStackAddress_ = sp;
+ return;
+ }
+
+ // Try initializing with sampler pc using native=>bytecode table.
+ JitcodeGlobalTable* table =
+ cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
+ if (tryInitWithTable(table, pc, /* forLastCallSite = */ false)) {
+ endStackAddress_ = sp;
+ return;
+ }
+
+ // Try initializing with lastProfilingCallSite pc
+ void* lastCallSite = act->lastProfilingCallSite();
+ if (lastCallSite) {
+ if (tryInitWithPC(lastCallSite)) {
+ return;
+ }
+
+ // Try initializing with lastProfilingCallSite pc using native=>bytecode
+ // table.
+ if (tryInitWithTable(table, lastCallSite, /* forLastCallSite = */ true)) {
+ return;
+ }
+ }
+
+ // If nothing matches, for now just assume we are at the start of the last
+ // frame's baseline jit code or interpreter code.
+ type_ = FrameType::BaselineJS;
+ if (frameScript()->hasBaselineScript()) {
+ resumePCinCurrentFrame_ = frameScript()->baselineScript()->method()->raw();
+ } else {
+ MOZ_ASSERT(IsBaselineInterpreterEnabled());
+ resumePCinCurrentFrame_ =
+ cx->runtime()->jitRuntime()->baselineInterpreter().codeRaw();
+ }
+}
+
+template <typename ReturnType = CommonFrameLayout*>
+static inline ReturnType GetPreviousRawFrame(CommonFrameLayout* frame) {
+ return ReturnType(frame->callerFramePtr());
+}
+
+JSJitProfilingFrameIterator::JSJitProfilingFrameIterator(
+ CommonFrameLayout* fp) {
+ endStackAddress_ = fp;
+ moveToNextFrame(fp);
+}
+
+bool JSJitProfilingFrameIterator::tryInitWithPC(void* pc) {
+ JSScript* callee = frameScript();
+
+ // Check for Ion first, since it's more likely for hot code.
+ if (callee->hasIonScript() &&
+ callee->ionScript()->method()->containsNativePC(pc)) {
+ type_ = FrameType::IonJS;
+ resumePCinCurrentFrame_ = pc;
+ return true;
+ }
+
+ // Check for containment in Baseline jitcode second.
+ if (callee->hasBaselineScript() &&
+ callee->baselineScript()->method()->containsNativePC(pc)) {
+ type_ = FrameType::BaselineJS;
+ resumePCinCurrentFrame_ = pc;
+ return true;
+ }
+
+ return false;
+}
+
+bool JSJitProfilingFrameIterator::tryInitWithTable(JitcodeGlobalTable* table,
+ void* pc,
+ bool forLastCallSite) {
+ if (!pc) {
+ return false;
+ }
+
+ const JitcodeGlobalEntry* entry = table->lookup(pc);
+ if (!entry) {
+ return false;
+ }
+
+ JSScript* callee = frameScript();
+
+ MOZ_ASSERT(entry->isIon() || entry->isIonIC() || entry->isBaseline() ||
+ entry->isBaselineInterpreter() || entry->isDummy());
+
+ // Treat dummy lookups as an empty frame sequence.
+ if (entry->isDummy()) {
+ type_ = FrameType::CppToJSJit;
+ fp_ = nullptr;
+ resumePCinCurrentFrame_ = nullptr;
+ return true;
+ }
+
+ // For IonICEntry, use the corresponding IonEntry.
+ if (entry->isIonIC()) {
+ entry = table->lookup(entry->asIonIC().rejoinAddr());
+ MOZ_ASSERT(entry);
+ MOZ_RELEASE_ASSERT(entry->isIon());
+ }
+
+ if (entry->isIon()) {
+ // If looked-up callee doesn't match frame callee, don't accept
+ // lastProfilingCallSite
+ if (entry->asIon().getScript(0) != callee) {
+ return false;
+ }
+
+ type_ = FrameType::IonJS;
+ resumePCinCurrentFrame_ = pc;
+ return true;
+ }
+
+ if (entry->isBaseline()) {
+ // If looked-up callee doesn't match frame callee, don't accept
+ // lastProfilingCallSite
+ if (forLastCallSite && entry->asBaseline().script() != callee) {
+ return false;
+ }
+
+ type_ = FrameType::BaselineJS;
+ resumePCinCurrentFrame_ = pc;
+ return true;
+ }
+
+ if (entry->isBaselineInterpreter()) {
+ type_ = FrameType::BaselineJS;
+ resumePCinCurrentFrame_ = pc;
+ return true;
+ }
+
+ return false;
+}
+
+const char* JSJitProfilingFrameIterator::baselineInterpreterLabel() const {
+ MOZ_ASSERT(type_ == FrameType::BaselineJS);
+ return frameScript()->jitScript()->profileString();
+}
+
+void JSJitProfilingFrameIterator::baselineInterpreterScriptPC(
+ JSScript** script, jsbytecode** pc, uint64_t* realmID) const {
+ MOZ_ASSERT(type_ == FrameType::BaselineJS);
+ BaselineFrame* blFrame = (BaselineFrame*)(fp_ - BaselineFrame::Size());
+ *script = frameScript();
+ *pc = (*script)->code();
+
+ if (blFrame->runningInInterpreter() &&
+ blFrame->interpreterScript() == *script) {
+ jsbytecode* interpPC = blFrame->interpreterPC();
+ if ((*script)->containsPC(interpPC)) {
+ *pc = interpPC;
+ }
+
+ *realmID = (*script)->realm()->creationOptions().profilerRealmID();
+ }
+}
+
+void JSJitProfilingFrameIterator::operator++() {
+ JitFrameLayout* frame = framePtr();
+ moveToNextFrame(frame);
+}
+
+void JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) {
+ /*
+ * fp_ points to a Baseline or Ion frame. The possible call-stacks
+ * patterns occurring between this frame and a previous Ion, Baseline or Entry
+ * frame are as follows:
+ *
+ * <Baseline-Or-Ion>
+ * ^
+ * |
+ * ^--- Ion (or Baseline JSOp::Resume)
+ * |
+ * ^--- Baseline Stub <---- Baseline
+ * |
+ * ^--- IonICCall <---- Ion
+ * |
+ * ^--- WasmToJSJit <---- (other wasm frames, not handled by this iterator)
+ * |
+ * ^--- Arguments Rectifier
+ * | ^
+ * | |
+ * | ^--- Ion
+ * | |
+ * | ^--- Baseline Stub <---- Baseline
+ * | |
+ * | ^--- WasmToJSJit <--- (other wasm frames)
+ * | |
+ * | ^--- Entry Frame (CppToJSJit)
+ * |
+ * ^--- Entry Frame (CppToJSJit)
+ * |
+ * ^--- Entry Frame (BaselineInterpreter)
+ * | ^
+ * | |
+ * | ^--- Ion
+ * | |
+ * | ^--- Baseline Stub <---- Baseline
+ * | |
+ * | ^--- WasmToJSJit <--- (other wasm frames)
+ * | |
+ * | ^--- Entry Frame (CppToJSJit)
+ * | |
+ * | ^--- Arguments Rectifier
+ *
+ * NOTE: Keep this in sync with JitRuntime::generateProfilerExitFrameTailStub!
+ */
+
+ // Unwrap baseline interpreter entry frame.
+ if (frame->prevType() == FrameType::BaselineInterpreterEntry) {
+ frame = GetPreviousRawFrame<BaselineInterpreterEntryFrameLayout*>(frame);
+ }
+
+ // Unwrap rectifier frames.
+ if (frame->prevType() == FrameType::Rectifier) {
+ frame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
+ MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
+ frame->prevType() == FrameType::BaselineStub ||
+ frame->prevType() == FrameType::WasmToJSJit ||
+ frame->prevType() == FrameType::CppToJSJit);
+ }
+
+ FrameType prevType = frame->prevType();
+ switch (prevType) {
+ case FrameType::IonJS:
+ case FrameType::BaselineJS:
+ resumePCinCurrentFrame_ = frame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ type_ = prevType;
+ return;
+
+ case FrameType::BaselineStub:
+ case FrameType::IonICCall: {
+ FrameType stubPrevType = (prevType == FrameType::BaselineStub)
+ ? FrameType::BaselineJS
+ : FrameType::IonJS;
+ auto* stubFrame = GetPreviousRawFrame<CommonFrameLayout*>(frame);
+ MOZ_ASSERT(stubFrame->prevType() == stubPrevType);
+ resumePCinCurrentFrame_ = stubFrame->returnAddress();
+ fp_ = GetPreviousRawFrame<uint8_t*>(stubFrame);
+ type_ = stubPrevType;
+ return;
+ }
+
+ case FrameType::WasmToJSJit:
+ // No previous js jit frame, this is a transition frame, used to
+ // pass a wasm iterator the correct value of FP.
+ resumePCinCurrentFrame_ = nullptr;
+ fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ type_ = FrameType::WasmToJSJit;
+ MOZ_ASSERT(!done());
+ return;
+
+ case FrameType::CppToJSJit:
+ // No previous frame, set to nullptr to indicate that
+ // JSJitProfilingFrameIterator is done().
+ resumePCinCurrentFrame_ = nullptr;
+ fp_ = nullptr;
+ type_ = FrameType::CppToJSJit;
+ return;
+
+ case FrameType::BaselineInterpreterEntry:
+ case FrameType::Rectifier:
+ case FrameType::Exit:
+ case FrameType::Bailout:
+ case FrameType::JSJitToWasm:
+ // Rectifier and Baseline Interpreter entry frames are handled before
+ // this switch. The other frame types can't call JS functions directly.
+ break;
+ }
+
+ MOZ_CRASH("Bad frame type.");
+}
diff --git a/js/src/jit/JSJitFrameIter.h b/js/src/jit/JSJitFrameIter.h
new file mode 100644
index 0000000000..b056da46db
--- /dev/null
+++ b/js/src/jit/JSJitFrameIter.h
@@ -0,0 +1,802 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JSJitFrameIter_h
+#define jit_JSJitFrameIter_h
+
+#include "mozilla/Maybe.h"
+
+#include "jstypes.h"
+
+#include "jit/JitCode.h"
+#include "jit/MachineState.h"
+#include "jit/Snapshots.h"
+#include "js/ProfilingFrameIterator.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+
+namespace js {
+
+class ArgumentsObject;
+
+namespace jit {
+
+enum class FrameType {
+ // A JS frame is analogous to a js::InterpreterFrame, representing one
+ // scripted function activation. IonJS frames are used by the optimizing
+ // compiler.
+ IonJS,
+
+ // JS frame used by the Baseline Interpreter and Baseline JIT.
+ BaselineJS,
+
+ // Frame pushed by Baseline stubs that make non-tail calls, so that the
+ // return address -> ICEntry mapping works.
+ BaselineStub,
+
+ // The entry frame is the initial prologue block transitioning from the VM
+ // into the Ion world.
+ CppToJSJit,
+
+ // This entry frame sits right before the baseline interpreter
+ // so that external profilers can identify which function is being
+ // interpreted. Only used under the --emit-interpreter-entry option.
+ BaselineInterpreterEntry,
+
+ // A rectifier frame sits in between two JS frames, adapting argc != nargs
+ // mismatches in calls.
+ Rectifier,
+
+ // Ion IC calling a scripted getter/setter or a VMFunction.
+ IonICCall,
+
+ // An exit frame is necessary for transitioning from a JS frame into C++.
+ // From within C++, an exit frame is always the last frame in any
+ // JitActivation.
+ Exit,
+
+ // A bailout frame is a special IonJS jit frame after a bailout, and before
+ // the reconstruction of the BaselineJS frame. From within C++, a bailout
+ // frame is always the last frame in a JitActivation iff the bailout frame
+ // information is recorded on the JitActivation.
+ Bailout,
+
+ // A wasm to JS frame is constructed during fast calls from wasm to the JS
+ // jits, used as a marker to interleave JS jit and wasm frames. From the
+ // point of view of JS JITs, this is just another kind of entry frame.
+ WasmToJSJit,
+
+ // A JS to wasm frame is constructed during fast calls from any JS jits to
+ // wasm, and is a special kind of exit frame that doesn't have the exit
+ // footer. From the point of view of the jit, it can be skipped as an exit.
+ JSJitToWasm,
+};
+
+enum class ReadFrameArgsBehavior {
+ // Read all actual arguments. Will invoke the callback numActualArgs times.
+ Actuals,
+
+ // Read all argument values in the stack frame. Will invoke the callback
+ // max(numFormalArgs, numActualArgs) times.
+ ActualsAndFormals,
+};
+
+class CommonFrameLayout;
+class JitFrameLayout;
+class ExitFrameLayout;
+
+class BaselineFrame;
+class JitActivation;
+class SafepointIndex;
+class OsiIndex;
+
+// Iterate over the JIT stack to assert that all invariants are respected.
+// - Check that all entry frames are aligned on JitStackAlignment.
+// - Check that all rectifier frames keep the JitStackAlignment.
+
+void AssertJitStackInvariants(JSContext* cx);
+
+// A JSJitFrameIter can iterate over a linear frame group of JS jit frames
+// only. It will stop at the first frame that is not of the same kind, or at
+// the end of an activation.
+//
+// If you want to handle every kind of frames (including wasm frames), use
+// JitFrameIter. If you want to skip interleaved frames of other kinds, use
+// OnlyJSJitFrameIter.
+
+class JSJitFrameIter {
+ protected:
+ uint8_t* current_;
+ FrameType type_;
+ uint8_t* resumePCinCurrentFrame_;
+
+ // Size of the current Baseline frame. Equivalent to
+ // BaselineFrame::debugFrameSize_ in debug builds.
+ mozilla::Maybe<uint32_t> baselineFrameSize_;
+
+ private:
+ mutable const SafepointIndex* cachedSafepointIndex_;
+ const JitActivation* activation_;
+
+ void dumpBaseline() const;
+
+ public:
+ // See comment above the class.
+ explicit JSJitFrameIter(const JitActivation* activation);
+
+ // A constructor specialized for jit->wasm frames, which starts at a
+ // specific FP.
+ JSJitFrameIter(const JitActivation* activation, FrameType frameType,
+ uint8_t* fp);
+
+ void setResumePCInCurrentFrame(uint8_t* newAddr) {
+ resumePCinCurrentFrame_ = newAddr;
+ }
+
+ // Current frame information.
+ FrameType type() const { return type_; }
+ uint8_t* fp() const { return current_; }
+ const JitActivation* activation() const { return activation_; }
+
+ CommonFrameLayout* current() const { return (CommonFrameLayout*)current_; }
+
+ inline uint8_t* returnAddress() const;
+
+ // Return the pointer of the JitFrame, the iterator is assumed to be settled
+ // on a scripted frame.
+ JitFrameLayout* jsFrame() const;
+
+ inline ExitFrameLayout* exitFrame() const;
+
+ // Returns whether the JS frame has been invalidated and, if so,
+ // places the invalidated Ion script in |ionScript|.
+ bool checkInvalidation(IonScript** ionScript) const;
+ bool checkInvalidation() const;
+
+ bool isExitFrame() const { return type_ == FrameType::Exit; }
+ bool isScripted() const {
+ return type_ == FrameType::BaselineJS || type_ == FrameType::IonJS ||
+ type_ == FrameType::Bailout;
+ }
+ bool isBaselineJS() const { return type_ == FrameType::BaselineJS; }
+ bool isIonScripted() const {
+ return type_ == FrameType::IonJS || type_ == FrameType::Bailout;
+ }
+ bool isIonJS() const { return type_ == FrameType::IonJS; }
+ bool isIonICCall() const { return type_ == FrameType::IonICCall; }
+ bool isBailoutJS() const { return type_ == FrameType::Bailout; }
+ bool isBaselineStub() const { return type_ == FrameType::BaselineStub; }
+ bool isBaselineInterpreterEntry() const {
+ return type_ == FrameType::BaselineInterpreterEntry;
+ }
+ bool isRectifier() const { return type_ == FrameType::Rectifier; }
+ bool isBareExit() const;
+ bool isUnwoundJitExit() const;
+ template <typename T>
+ bool isExitFrameLayout() const;
+
+ static bool isEntry(FrameType type) {
+ return type == FrameType::CppToJSJit || type == FrameType::WasmToJSJit;
+ }
+ bool isEntry() const { return isEntry(type_); }
+
+ bool isFunctionFrame() const;
+
+ bool isConstructing() const;
+
+ void* calleeToken() const;
+ JSFunction* callee() const;
+ JSFunction* maybeCallee() const;
+ unsigned numActualArgs() const;
+ JSScript* script() const;
+ JSScript* maybeForwardedScript() const;
+ void baselineScriptAndPc(JSScript** scriptRes, jsbytecode** pcRes) const;
+ Value* actualArgs() const;
+
+ // Returns the address of the next instruction that will execute in this
+ // frame, once control returns to this frame.
+ uint8_t* resumePCinCurrentFrame() const { return resumePCinCurrentFrame_; }
+
+ // Previous frame information extracted from the current frame.
+ inline FrameType prevType() const;
+ uint8_t* prevFp() const;
+
+ // Functions used to iterate on frames. When prevType is an entry,
+ // the current frame is the last JS Jit frame.
+ bool done() const { return isEntry(); }
+ void operator++();
+
+ // Returns the IonScript associated with this JS frame.
+ IonScript* ionScript() const;
+
+ // Returns the IonScript associated with this JS frame; the frame must
+ // not be invalidated.
+ IonScript* ionScriptFromCalleeToken() const;
+
+ // Returns the Safepoint associated with this JS frame. Incurs a lookup
+ // overhead.
+ const SafepointIndex* safepoint() const;
+
+ // Returns the OSI index associated with this JS frame. Incurs a lookup
+ // overhead.
+ const OsiIndex* osiIndex() const;
+
+ // Returns the Snapshot offset associated with this JS frame. Incurs a
+ // lookup overhead.
+ SnapshotOffset snapshotOffset() const;
+
+ uintptr_t* spillBase() const;
+ MachineState machineState() const;
+
+ template <class Op>
+ void unaliasedForEachActual(Op op) const {
+ MOZ_ASSERT(isBaselineJS());
+
+ unsigned nactual = numActualArgs();
+ Value* argv = actualArgs();
+ for (unsigned i = 0; i < nactual; i++) {
+ op(argv[i]);
+ }
+ }
+
+ void dump() const;
+
+ inline BaselineFrame* baselineFrame() const;
+
+ // Returns the number of local and expression stack Values for the current
+ // Baseline frame.
+ inline uint32_t baselineFrameNumValueSlots() const;
+
+ // This function isn't used, but we keep it here (debug-only) because it is
+ // helpful when chasing issues with the jitcode map.
+#ifdef DEBUG
+ bool verifyReturnAddressUsingNativeToBytecodeMap();
+#else
+ bool verifyReturnAddressUsingNativeToBytecodeMap() { return true; }
+#endif
+};
+
+class JitcodeGlobalTable;
+
+class JSJitProfilingFrameIterator {
+ uint8_t* fp_;
+ // See JS::ProfilingFrameIterator::endStackAddress_ comment.
+ void* endStackAddress_ = nullptr;
+ FrameType type_;
+ void* resumePCinCurrentFrame_;
+
+ inline JSScript* frameScript() const;
+ [[nodiscard]] bool tryInitWithPC(void* pc);
+ [[nodiscard]] bool tryInitWithTable(JitcodeGlobalTable* table, void* pc,
+ bool forLastCallSite);
+
+ void moveToNextFrame(CommonFrameLayout* frame);
+
+ public:
+ JSJitProfilingFrameIterator(JSContext* cx, void* pc, void* sp);
+ explicit JSJitProfilingFrameIterator(CommonFrameLayout* exitFP);
+
+ void operator++();
+ bool done() const { return fp_ == nullptr; }
+
+ const char* baselineInterpreterLabel() const;
+ void baselineInterpreterScriptPC(JSScript** script, jsbytecode** pc,
+ uint64_t* realmID) const;
+
+ void* fp() const {
+ MOZ_ASSERT(!done());
+ return fp_;
+ }
+ inline JitFrameLayout* framePtr() const;
+ void* stackAddress() const { return fp(); }
+ FrameType frameType() const {
+ MOZ_ASSERT(!done());
+ return type_;
+ }
+ void* resumePCinCurrentFrame() const {
+ MOZ_ASSERT(!done());
+ return resumePCinCurrentFrame_;
+ }
+
+ void* endStackAddress() const { return endStackAddress_; }
+};
+
+class RInstructionResults {
+ // Vector of results of recover instructions.
+ typedef mozilla::Vector<HeapPtr<Value>, 1, SystemAllocPolicy> Values;
+ UniquePtr<Values> results_;
+
+ // The frame pointer is used as a key to check if the current frame already
+ // bailed out.
+ JitFrameLayout* fp_;
+
+ // Record if we tried and succeed at allocating and filling the vector of
+ // recover instruction results, if needed. This flag is needed in order to
+ // avoid evaluating the recover instruction twice.
+ bool initialized_;
+
+ public:
+ explicit RInstructionResults(JitFrameLayout* fp);
+ RInstructionResults(RInstructionResults&& src);
+
+ RInstructionResults& operator=(RInstructionResults&& rhs);
+
+ ~RInstructionResults();
+
+ [[nodiscard]] bool init(JSContext* cx, uint32_t numResults);
+ bool isInitialized() const;
+ size_t length() const;
+
+ JitFrameLayout* frame() const;
+
+ HeapPtr<Value>& operator[](size_t index);
+
+ void trace(JSTracer* trc);
+};
+
+struct MaybeReadFallback {
+ enum FallbackConsequence { Fallback_Invalidate, Fallback_DoNothing };
+
+ JSContext* maybeCx = nullptr;
+ JitActivation* activation = nullptr;
+ const JSJitFrameIter* frame = nullptr;
+ const FallbackConsequence consequence = Fallback_Invalidate;
+
+ MaybeReadFallback() = default;
+
+ MaybeReadFallback(JSContext* cx, JitActivation* activation,
+ const JSJitFrameIter* frame,
+ FallbackConsequence consequence = Fallback_Invalidate)
+ : maybeCx(cx),
+ activation(activation),
+ frame(frame),
+ consequence(consequence) {}
+
+ bool canRecoverResults() { return maybeCx; }
+};
+
+class RResumePoint;
+
+// Reads frame information in snapshot-encoding order (that is, outermost frame
+// to innermost frame).
+class SnapshotIterator {
+ protected:
+ SnapshotReader snapshot_;
+ RecoverReader recover_;
+ JitFrameLayout* fp_;
+ const MachineState* machine_;
+ IonScript* ionScript_;
+ RInstructionResults* instructionResults_;
+
+ enum class ReadMethod : bool {
+ // Read the normal value.
+ Normal,
+
+ // Read the default value, or the normal value if there is no default.
+ AlwaysDefault,
+ };
+
+ private:
+ // Read a spilled register from the machine state.
+ bool hasRegister(Register reg) const { return machine_->has(reg); }
+ uintptr_t fromRegister(Register reg) const { return machine_->read(reg); }
+
+ bool hasRegister(FloatRegister reg) const { return machine_->has(reg); }
+ template <typename T>
+ T fromRegister(FloatRegister reg) const {
+ return machine_->read<T>(reg);
+ }
+
+ // Read an uintptr_t from the stack.
+ bool hasStack(int32_t offset) const { return true; }
+ uintptr_t fromStack(int32_t offset) const;
+
+ bool hasInstructionResult(uint32_t index) const {
+ return instructionResults_;
+ }
+ bool hasInstructionResults() const { return instructionResults_; }
+ Value fromInstructionResult(uint32_t index) const;
+
+ Value allocationValue(const RValueAllocation& a,
+ ReadMethod rm = ReadMethod::Normal);
+ [[nodiscard]] bool allocationReadable(const RValueAllocation& a,
+ ReadMethod rm = ReadMethod::Normal);
+ void writeAllocationValuePayload(const RValueAllocation& a, const Value& v);
+ void warnUnreadableAllocation();
+
+ public:
+ // Handle iterating over RValueAllocations of the snapshots.
+ inline RValueAllocation readAllocation() {
+ MOZ_ASSERT(moreAllocations());
+ return snapshot_.readAllocation();
+ }
+ void skip() { snapshot_.skipAllocation(); }
+
+ const RResumePoint* resumePoint() const;
+ const RInstruction* instruction() const { return recover_.instruction(); }
+
+ uint32_t numAllocations() const;
+ inline bool moreAllocations() const {
+ return snapshot_.numAllocationsRead() < numAllocations();
+ }
+
+ JitFrameLayout* frame() { return fp_; };
+
+ // Used by recover instruction to store the value back into the instruction
+ // results array.
+ void storeInstructionResult(const Value& v);
+
+ public:
+ // Exhibits frame properties contained in the snapshot.
+ uint32_t pcOffset() const;
+ ResumeMode resumeMode() const;
+
+ bool resumeAfter() const {
+ // Calls in outer frames are never considered resume-after.
+ MOZ_ASSERT_IF(moreFrames(), !IsResumeAfter(resumeMode()));
+ return IsResumeAfter(resumeMode());
+ }
+ inline BailoutKind bailoutKind() const { return snapshot_.bailoutKind(); }
+
+ public:
+ // Read the next instruction available and get ready to either skip it or
+ // evaluate it.
+ inline void nextInstruction() {
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == numAllocations());
+ recover_.nextInstruction();
+ snapshot_.resetNumAllocationsRead();
+ }
+
+ // Skip an Instruction by walking to the next instruction and by skipping
+ // all the allocations corresponding to this instruction.
+ void skipInstruction();
+
+ inline bool moreInstructions() const { return recover_.moreInstructions(); }
+
+ // Register a vector used for storing the results of the evaluation of
+ // recover instructions. This vector should be registered before the
+ // beginning of the iteration. This function is in charge of allocating
+ // enough space for all instructions results, and return false iff it fails.
+ [[nodiscard]] bool initInstructionResults(MaybeReadFallback& fallback);
+
+ protected:
+ // This function is used internally for computing the result of the recover
+ // instructions.
+ [[nodiscard]] bool computeInstructionResults(
+ JSContext* cx, RInstructionResults* results) const;
+
+ public:
+ // Handle iterating over frames of the snapshots.
+ void nextFrame();
+ void settleOnFrame();
+
+ inline bool moreFrames() const {
+ // The last instruction is recovering the innermost frame, so as long as
+ // there is more instruction there is necesseray more frames.
+ return moreInstructions();
+ }
+
+ public:
+ // Connect all informations about the current script in order to recover the
+ // content of baseline frames.
+
+ SnapshotIterator(const JSJitFrameIter& iter,
+ const MachineState* machineState);
+ SnapshotIterator();
+
+ Value read() { return allocationValue(readAllocation()); }
+
+ // Read the |Normal| value unless it is not available and that the snapshot
+ // provides a |Default| value. This is useful to avoid invalidations of the
+ // frame while we are only interested in a few properties which are provided
+ // by the |Default| value.
+ Value readWithDefault(RValueAllocation* alloc) {
+ *alloc = RValueAllocation();
+ RValueAllocation a = readAllocation();
+ if (allocationReadable(a)) {
+ return allocationValue(a);
+ }
+
+ *alloc = a;
+ return allocationValue(a, ReadMethod::AlwaysDefault);
+ }
+
+ Value maybeRead(const RValueAllocation& a, MaybeReadFallback& fallback);
+ Value maybeRead(MaybeReadFallback& fallback) {
+ RValueAllocation a = readAllocation();
+ return maybeRead(a, fallback);
+ }
+
+ bool tryRead(Value* result);
+
+ void traceAllocation(JSTracer* trc);
+
+ template <class Op>
+ void readFunctionFrameArgs(Op& op, ArgumentsObject** argsObj, Value* thisv,
+ unsigned start, unsigned end, JSScript* script,
+ MaybeReadFallback& fallback) {
+ // Assumes that the common frame arguments have already been read.
+ if (script->needsArgsObj()) {
+ if (argsObj) {
+ Value v = maybeRead(fallback);
+ if (v.isObject()) {
+ *argsObj = &v.toObject().as<ArgumentsObject>();
+ }
+ } else {
+ skip();
+ }
+ }
+
+ if (thisv) {
+ *thisv = maybeRead(fallback);
+ } else {
+ skip();
+ }
+
+ unsigned i = 0;
+ if (end < start) {
+ i = start;
+ }
+
+ for (; i < start; i++) {
+ skip();
+ }
+ for (; i < end; i++) {
+ // We are not always able to read values from the snapshots, some values
+ // such as non-gc things may still be live in registers and cause an
+ // error while reading the machine state.
+ Value v = maybeRead(fallback);
+ op(v);
+ }
+ }
+
+ // Iterate over all the allocations and return only the value of the
+ // allocation located at one index.
+ Value maybeReadAllocByIndex(size_t index);
+
+#ifdef TRACK_SNAPSHOTS
+ void spewBailingFrom() const { snapshot_.spewBailingFrom(); }
+#endif
+};
+
+// Reads frame information in callstack order (that is, innermost frame to
+// outermost frame).
+class InlineFrameIterator {
+ const JSJitFrameIter* frame_;
+ SnapshotIterator start_;
+ SnapshotIterator si_;
+ uint32_t framesRead_;
+
+ // When the inline-frame-iterator is created, this variable is defined to
+ // UINT32_MAX. Then the first iteration of findNextFrame, which settle on
+ // the innermost frame, is used to update this counter to the number of
+ // frames contained in the recover buffer.
+ uint32_t frameCount_;
+
+ // The |calleeTemplate_| fields contains either the JSFunction or the
+ // template from which it is supposed to be cloned. The |calleeRVA_| is an
+ // Invalid value allocation, if the |calleeTemplate_| field is the effective
+ // JSFunction, and not its template. On the other hand, any other value
+ // allocation implies that the |calleeTemplate_| is the template JSFunction
+ // from which the effective one would be derived and cached by the Recover
+ // instruction result.
+ RootedFunction calleeTemplate_;
+ RValueAllocation calleeRVA_;
+
+ RootedScript script_;
+ jsbytecode* pc_;
+ uint32_t numActualArgs_;
+
+ // Register state, used by all snapshot iterators.
+ MachineState machine_;
+
+ struct Nop {
+ void operator()(const Value& v) {}
+ };
+
+ private:
+ void findNextFrame();
+ JSObject* computeEnvironmentChain(const Value& envChainValue,
+ MaybeReadFallback& fallback,
+ bool* hasInitialEnv = nullptr) const;
+
+ public:
+ InlineFrameIterator(JSContext* cx, const JSJitFrameIter* iter);
+ InlineFrameIterator(JSContext* cx, const InlineFrameIterator* iter);
+
+ bool more() const { return frame_ && framesRead_ < frameCount_; }
+
+ // Due to optimizations, we are not always capable of reading the callee of
+ // inlined frames without invalidating the IonCode. This function might
+ // return either the effective callee of the JSFunction which might be used
+ // to create it.
+ //
+ // As such, the |calleeTemplate()| can be used to read most of the metadata
+ // which are conserved across clones.
+ JSFunction* calleeTemplate() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return calleeTemplate_;
+ }
+ JSFunction* maybeCalleeTemplate() const { return calleeTemplate_; }
+
+ JSFunction* callee(MaybeReadFallback& fallback) const;
+
+ unsigned numActualArgs() const {
+ // The number of actual arguments for inline frames is determined by this
+ // iterator based on the caller's bytecode instruction (Call, FunCall,
+ // GetProp/SetProp, etc). For the outer function it's stored in the stack
+ // frame.
+ if (more()) {
+ return numActualArgs_;
+ }
+
+ return frame_->numActualArgs();
+ }
+
+ template <class ArgOp, class LocalOp>
+ void readFrameArgsAndLocals(JSContext* cx, ArgOp& argOp, LocalOp& localOp,
+ JSObject** envChain, bool* hasInitialEnv,
+ Value* rval, ArgumentsObject** argsObj,
+ Value* thisv, ReadFrameArgsBehavior behavior,
+ MaybeReadFallback& fallback) const {
+ SnapshotIterator s(si_);
+
+ // Read the env chain.
+ if (envChain) {
+ Value envChainValue = s.maybeRead(fallback);
+ *envChain =
+ computeEnvironmentChain(envChainValue, fallback, hasInitialEnv);
+ } else {
+ s.skip();
+ }
+
+ // Read return value.
+ if (rval) {
+ *rval = s.maybeRead(fallback);
+ } else {
+ s.skip();
+ }
+
+ // Read arguments, which only function frames have.
+ if (isFunctionFrame()) {
+ unsigned nactual = numActualArgs();
+ unsigned nformal = calleeTemplate()->nargs();
+
+ // Read the formal arguments, which are taken from the inlined frame,
+ // because it will have the updated value when JSOp::SetArg is used.
+ unsigned numFormalsToRead;
+ if (behavior == ReadFrameArgsBehavior::Actuals) {
+ numFormalsToRead = std::min(nactual, nformal);
+ } else {
+ MOZ_ASSERT(behavior == ReadFrameArgsBehavior::ActualsAndFormals);
+ numFormalsToRead = nformal;
+ }
+ s.readFunctionFrameArgs(argOp, argsObj, thisv, 0, numFormalsToRead,
+ script(), fallback);
+
+ // Skip formals we didn't read.
+ for (unsigned i = numFormalsToRead; i < nformal; i++) {
+ s.skip();
+ }
+
+ if (nactual > nformal) {
+ if (more()) {
+ // There is still a parent frame of this inlined frame. All
+ // arguments (also the overflown) are the last pushed values
+ // in the parent frame. To get the overflown arguments, we
+ // need to take them from there.
+
+ // The overflown arguments are not available in current frame.
+ // They are the last pushed arguments in the parent frame of
+ // this inlined frame.
+ InlineFrameIterator it(cx, this);
+ ++it;
+ unsigned argsObjAdj = it.script()->needsArgsObj() ? 1 : 0;
+ bool hasNewTarget = isConstructing();
+ SnapshotIterator parent_s(it.snapshotIterator());
+
+ // Skip over all slots until we get to the last slots
+ // (= arguments slots of callee) the +3 is for [this], [returnvalue],
+ // [envchain], and maybe +1 for [argsObj]
+ MOZ_ASSERT(parent_s.numAllocations() >=
+ nactual + 3 + argsObjAdj + hasNewTarget);
+ unsigned skip = parent_s.numAllocations() - nactual - 3 - argsObjAdj -
+ hasNewTarget;
+ for (unsigned j = 0; j < skip; j++) {
+ parent_s.skip();
+ }
+
+ // Get the overflown arguments
+ parent_s.skip(); // env chain
+ parent_s.skip(); // return value
+ parent_s.readFunctionFrameArgs(argOp, nullptr, nullptr, nformal,
+ nactual, it.script(), fallback);
+ } else {
+ // There is no parent frame to this inlined frame, we can read
+ // from the frame's Value vector directly.
+ Value* argv = frame_->actualArgs();
+ for (unsigned i = nformal; i < nactual; i++) {
+ argOp(argv[i]);
+ }
+ }
+ }
+ }
+
+ // At this point we've read all the formals in s, and can read the
+ // locals.
+ for (unsigned i = 0; i < script()->nfixed(); i++) {
+ localOp(s.maybeRead(fallback));
+ }
+ }
+
+ template <class Op>
+ void unaliasedForEachActual(JSContext* cx, Op op,
+ MaybeReadFallback& fallback) const {
+ Nop nop;
+ readFrameArgsAndLocals(cx, op, nop, nullptr, nullptr, nullptr, nullptr,
+ nullptr, ReadFrameArgsBehavior::Actuals, fallback);
+ }
+
+ JSScript* script() const { return script_; }
+ jsbytecode* pc() const { return pc_; }
+ SnapshotIterator snapshotIterator() const { return si_; }
+ bool isFunctionFrame() const;
+ bool isModuleFrame() const;
+ bool isConstructing() const;
+
+ JSObject* environmentChain(MaybeReadFallback& fallback,
+ bool* hasInitialEnvironment = nullptr) const {
+ SnapshotIterator s(si_);
+
+ // envChain
+ Value v = s.maybeRead(fallback);
+ return computeEnvironmentChain(v, fallback, hasInitialEnvironment);
+ }
+
+ Value thisArgument(MaybeReadFallback& fallback) const {
+ SnapshotIterator s(si_);
+
+ // envChain
+ s.skip();
+
+ // return value
+ s.skip();
+
+ // Arguments object.
+ if (script()->needsArgsObj()) {
+ s.skip();
+ }
+
+ return s.maybeRead(fallback);
+ }
+
+ InlineFrameIterator& operator++() {
+ findNextFrame();
+ return *this;
+ }
+
+ void dump() const;
+
+ void resetOn(const JSJitFrameIter* iter);
+
+ const JSJitFrameIter& frame() const { return *frame_; }
+
+ // Inline frame number, 0 for the outermost (non-inlined) frame.
+ size_t frameNo() const { return frameCount() - framesRead_; }
+ size_t frameCount() const {
+ MOZ_ASSERT(frameCount_ != UINT32_MAX);
+ return frameCount_;
+ }
+
+ private:
+ InlineFrameIterator() = delete;
+ InlineFrameIterator(const InlineFrameIterator& iter) = delete;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JSJitFrameIter_h */
diff --git a/js/src/jit/JSONSpewer.cpp b/js/src/jit/JSONSpewer.cpp
new file mode 100644
index 0000000000..81ce1a2859
--- /dev/null
+++ b/js/src/jit/JSONSpewer.cpp
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_JITSPEW
+
+# include "jit/JSONSpewer.h"
+
+# include "jit/BacktrackingAllocator.h"
+# include "jit/LIR.h"
+# include "jit/MIR.h"
+# include "jit/MIRGraph.h"
+# include "jit/RangeAnalysis.h"
+
+using namespace js;
+using namespace js::jit;
+
+void JSONSpewer::beginFunction(JSScript* script) {
+ beginObject();
+ formatProperty("name", "%s:%u", script->filename(), script->lineno());
+ beginListProperty("passes");
+}
+
+void JSONSpewer::beginWasmFunction(unsigned funcIndex) {
+ beginObject();
+ formatProperty("name", "wasm-func%u", funcIndex);
+ beginListProperty("passes");
+}
+
+void JSONSpewer::beginPass(const char* pass) {
+ beginObject();
+ property("name", pass);
+}
+
+void JSONSpewer::spewMResumePoint(MResumePoint* rp) {
+ if (!rp) {
+ return;
+ }
+
+ beginObjectProperty("resumePoint");
+
+ if (rp->caller()) {
+ property("caller", rp->caller()->block()->id());
+ }
+
+ property("mode", ResumeModeToString(rp->mode()));
+
+ beginListProperty("operands");
+ for (MResumePoint* iter = rp; iter; iter = iter->caller()) {
+ for (int i = iter->numOperands() - 1; i >= 0; i--) {
+ value(iter->getOperand(i)->id());
+ }
+ if (iter->caller()) {
+ value("|");
+ }
+ }
+ endList();
+
+ endObject();
+}
+
+void JSONSpewer::spewMDef(MDefinition* def) {
+ beginObject();
+
+ property("id", def->id());
+
+ propertyName("opcode");
+ out_.printf("\"");
+ def->printOpcode(out_);
+ out_.printf("\"");
+
+ beginListProperty("attributes");
+# define OUTPUT_ATTRIBUTE(X) \
+ do { \
+ if (def->is##X()) value(#X); \
+ } while (0);
+ MIR_FLAG_LIST(OUTPUT_ATTRIBUTE);
+# undef OUTPUT_ATTRIBUTE
+ endList();
+
+ beginListProperty("inputs");
+ for (size_t i = 0, e = def->numOperands(); i < e; i++) {
+ value(def->getOperand(i)->id());
+ }
+ endList();
+
+ beginListProperty("uses");
+ for (MUseDefIterator use(def); use; use++) {
+ value(use.def()->id());
+ }
+ endList();
+
+ if (!def->isLowered()) {
+ beginListProperty("memInputs");
+ if (def->dependency()) {
+ value(def->dependency()->id());
+ }
+ endList();
+ }
+
+ bool isTruncated = false;
+ if (def->isAdd() || def->isSub() || def->isMod() || def->isMul() ||
+ def->isDiv()) {
+ isTruncated = static_cast<MBinaryArithInstruction*>(def)->isTruncated();
+ }
+
+ if (def->type() != MIRType::None && def->range()) {
+ beginStringProperty("type");
+ def->range()->dump(out_);
+ out_.printf(" : %s%s", StringFromMIRType(def->type()),
+ (isTruncated ? " (t)" : ""));
+ endStringProperty();
+ } else {
+ formatProperty("type", "%s%s", StringFromMIRType(def->type()),
+ (isTruncated ? " (t)" : ""));
+ }
+
+ if (def->isInstruction()) {
+ if (MResumePoint* rp = def->toInstruction()->resumePoint()) {
+ spewMResumePoint(rp);
+ }
+ }
+
+ endObject();
+}
+
+void JSONSpewer::spewMIR(MIRGraph* mir) {
+ beginObjectProperty("mir");
+ beginListProperty("blocks");
+
+ for (MBasicBlockIterator block(mir->begin()); block != mir->end(); block++) {
+ beginObject();
+
+ property("number", block->id());
+
+ beginListProperty("attributes");
+ if (block->hasLastIns()) {
+ if (block->isLoopBackedge()) {
+ value("backedge");
+ }
+ if (block->isLoopHeader()) {
+ value("loopheader");
+ }
+ if (block->isSplitEdge()) {
+ value("splitedge");
+ }
+ }
+ endList();
+
+ beginListProperty("predecessors");
+ for (size_t i = 0; i < block->numPredecessors(); i++) {
+ value(block->getPredecessor(i)->id());
+ }
+ endList();
+
+ beginListProperty("successors");
+ if (block->hasLastIns()) {
+ for (size_t i = 0; i < block->numSuccessors(); i++) {
+ value(block->getSuccessor(i)->id());
+ }
+ }
+ endList();
+
+ beginListProperty("instructions");
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ spewMDef(*phi);
+ }
+ for (MInstructionIterator i(block->begin()); i != block->end(); i++) {
+ spewMDef(*i);
+ }
+ endList();
+
+ spewMResumePoint(block->entryResumePoint());
+
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void JSONSpewer::spewLIns(LNode* ins) {
+ beginObject();
+
+ property("id", ins->id());
+
+ propertyName("opcode");
+ out_.printf("\"");
+ ins->dump(out_);
+ out_.printf("\"");
+
+ beginListProperty("defs");
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ if (ins->isPhi()) {
+ value(ins->toPhi()->getDef(i)->virtualRegister());
+ } else {
+ value(ins->toInstruction()->getDef(i)->virtualRegister());
+ }
+ }
+ endList();
+
+ endObject();
+}
+
+void JSONSpewer::spewLIR(MIRGraph* mir) {
+ beginObjectProperty("lir");
+ beginListProperty("blocks");
+
+ for (MBasicBlockIterator i(mir->begin()); i != mir->end(); i++) {
+ LBlock* block = i->lir();
+ if (!block) {
+ continue;
+ }
+
+ beginObject();
+ property("number", i->id());
+
+ beginListProperty("instructions");
+ for (size_t p = 0; p < block->numPhis(); p++) {
+ spewLIns(block->getPhi(p));
+ }
+ for (LInstructionIterator ins(block->begin()); ins != block->end(); ins++) {
+ spewLIns(*ins);
+ }
+ endList();
+
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void JSONSpewer::spewRanges(BacktrackingAllocator* regalloc) {
+ beginObjectProperty("ranges");
+ beginListProperty("blocks");
+
+ for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
+ beginObject();
+ property("number", bno);
+ beginListProperty("vregs");
+
+ LBlock* lir = regalloc->graph.getBlock(bno);
+ for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
+ for (size_t k = 0; k < ins->numDefs(); k++) {
+ uint32_t id = ins->getDef(k)->virtualRegister();
+ VirtualRegister* vreg = &regalloc->vregs[id];
+
+ beginObject();
+ property("vreg", id);
+ beginListProperty("ranges");
+
+ for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter;
+ iter++) {
+ LiveRange* range = LiveRange::get(*iter);
+
+ beginObject();
+ property("allocation",
+ range->bundle()->allocation().toString().get());
+ property("start", range->from().bits());
+ property("end", range->to().bits());
+ endObject();
+ }
+
+ endList();
+ endObject();
+ }
+ }
+
+ endList();
+ endObject();
+ }
+
+ endList();
+ endObject();
+}
+
+void JSONSpewer::endPass() { endObject(); }
+
+void JSONSpewer::endFunction() {
+ endList();
+ endObject();
+}
+
+#endif /* JS_JITSPEW */
diff --git a/js/src/jit/JSONSpewer.h b/js/src/jit/JSONSpewer.h
new file mode 100644
index 0000000000..6c663c041a
--- /dev/null
+++ b/js/src/jit/JSONSpewer.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JSONSpewer_h
+#define jit_JSONSpewer_h
+
+#ifdef JS_JITSPEW
+
+# include <stdio.h>
+
+# include "js/TypeDecls.h"
+# include "vm/JSONPrinter.h"
+
+namespace js {
+namespace jit {
+
+class BacktrackingAllocator;
+class MDefinition;
+class MIRGraph;
+class MResumePoint;
+class LNode;
+
+class JSONSpewer : JSONPrinter {
+ public:
+ explicit JSONSpewer(GenericPrinter& out) : JSONPrinter(out) {}
+
+ void beginFunction(JSScript* script);
+ void beginWasmFunction(unsigned funcIndex);
+ void beginPass(const char* pass);
+ void spewMDef(MDefinition* def);
+ void spewMResumePoint(MResumePoint* rp);
+ void spewMIR(MIRGraph* mir);
+ void spewLIns(LNode* ins);
+ void spewLIR(MIRGraph* mir);
+ void spewRanges(BacktrackingAllocator* regalloc);
+ void endPass();
+ void endFunction();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_JITSPEW */
+
+#endif /* jit_JSONSpewer_h */
diff --git a/js/src/jit/Jit.cpp b/js/src/jit/Jit.cpp
new file mode 100644
index 0000000000..4566fc013e
--- /dev/null
+++ b/js/src/jit/Jit.cpp
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Jit.h"
+
+#include "jit/BaselineJIT.h"
+#include "jit/CalleeToken.h"
+#include "jit/Ion.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "vm/Interpreter.h"
+#include "vm/JitActivation.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "vm/Activation-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static EnterJitStatus JS_HAZ_JSNATIVE_CALLER EnterJit(JSContext* cx,
+ RunState& state,
+ uint8_t* code) {
+ // We don't want to call the interpreter stub here (because
+ // C++ -> interpreterStub -> C++ is slower than staying in C++).
+ MOZ_ASSERT(code);
+ MOZ_ASSERT(code != cx->runtime()->jitRuntime()->interpreterStub().value);
+ MOZ_ASSERT(IsBaselineInterpreterEnabled());
+
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return EnterJitStatus::Error;
+ }
+
+ // jit::Bailout(), jit::InvalidationBailout(), and jit::HandleException()
+ // reset the counter to zero, so assert here it's also zero when we enter
+ // JIT code.
+ MOZ_ASSERT(!cx->isInUnsafeRegion());
+
+#ifdef DEBUG
+ // Assert we don't GC before entering JIT code. A GC could discard JIT code
+ // or move the function stored in the CalleeToken (it won't be traced at
+ // this point). We use Maybe<> here so we can call reset() to call the
+ // AutoAssertNoGC destructor before we enter JIT code.
+ mozilla::Maybe<JS::AutoAssertNoGC> nogc;
+ nogc.emplace(cx);
+#endif
+
+ JSScript* script = state.script();
+ size_t numActualArgs;
+ bool constructing;
+ size_t maxArgc;
+ Value* maxArgv;
+ JSObject* envChain;
+ CalleeToken calleeToken;
+
+ if (state.isInvoke()) {
+ const CallArgs& args = state.asInvoke()->args();
+ numActualArgs = args.length();
+
+ if (TooManyActualArguments(numActualArgs)) {
+ // Fall back to the C++ interpreter to avoid running out of stack space.
+ return EnterJitStatus::NotEntered;
+ }
+
+ constructing = state.asInvoke()->constructing();
+ maxArgc = args.length() + 1;
+ maxArgv = args.array() - 1; // -1 to include |this|
+ envChain = nullptr;
+ calleeToken = CalleeToToken(&args.callee().as<JSFunction>(), constructing);
+
+ unsigned numFormals = script->function()->nargs();
+ if (numFormals > numActualArgs) {
+ code = cx->runtime()->jitRuntime()->getArgumentsRectifier().value;
+ }
+ } else {
+ numActualArgs = 0;
+ constructing = false;
+ maxArgc = 0;
+ maxArgv = nullptr;
+ envChain = state.asExecute()->environmentChain();
+ calleeToken = CalleeToToken(state.script());
+ }
+
+ // Caller must construct |this| before invoking the function.
+ MOZ_ASSERT_IF(constructing, maxArgv[0].isObject() ||
+ maxArgv[0].isMagic(JS_UNINITIALIZED_LEXICAL));
+
+ RootedValue result(cx, Int32Value(numActualArgs));
+ {
+ AssertRealmUnchanged aru(cx);
+ ActivationEntryMonitor entryMonitor(cx, calleeToken);
+ JitActivation activation(cx);
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterJit();
+
+#ifdef DEBUG
+ nogc.reset();
+#endif
+ CALL_GENERATED_CODE(enter, code, maxArgc, maxArgv, /* osrFrame = */ nullptr,
+ calleeToken, envChain, /* osrNumStackValues = */ 0,
+ result.address());
+ }
+
+ // Ensure the counter was reset to zero after exiting from JIT code.
+ MOZ_ASSERT(!cx->isInUnsafeRegion());
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->jitRuntime()->freeIonOsrTempData();
+
+ if (result.isMagic()) {
+ MOZ_ASSERT(result.isMagic(JS_ION_ERROR));
+ return EnterJitStatus::Error;
+ }
+
+ // Jit callers wrap primitive constructor return, except for derived
+ // class constructors, which are forced to do it themselves.
+ if (constructing && result.isPrimitive()) {
+ MOZ_ASSERT(maxArgv[0].isObject());
+ result = maxArgv[0];
+ }
+
+ state.setReturnValue(result);
+ return EnterJitStatus::Ok;
+}
+
+// Call the per-script interpreter entry trampoline.
+bool js::jit::EnterInterpreterEntryTrampoline(uint8_t* code, JSContext* cx,
+ RunState* state) {
+ using EnterTrampolineCodePtr = bool (*)(JSContext * cx, RunState*);
+ auto funcPtr = JS_DATA_TO_FUNC_PTR(EnterTrampolineCodePtr, code);
+ return CALL_GENERATED_2(funcPtr, cx, state);
+}
+
+EnterJitStatus js::jit::MaybeEnterJit(JSContext* cx, RunState& state) {
+ if (!IsBaselineInterpreterEnabled()) {
+ // All JITs are disabled.
+ return EnterJitStatus::NotEntered;
+ }
+
+ // JITs do not respect the debugger's OnNativeCall hook, so JIT execution is
+ // disabled if this hook might need to be called.
+ if (cx->insideDebuggerEvaluationWithOnNativeCallHook) {
+ return EnterJitStatus::NotEntered;
+ }
+
+ JSScript* script = state.script();
+
+ uint8_t* code = script->jitCodeRaw();
+
+#ifdef JS_CACHEIR_SPEW
+ cx->spewer().enableSpewing();
+#endif
+
+ do {
+ // Make sure we can enter Baseline Interpreter code. Note that the prologue
+ // has warm-up checks to tier up if needed.
+ if (script->hasJitScript()) {
+ break;
+ }
+
+ script->incWarmUpCounter();
+
+ // Try to Ion-compile.
+ if (jit::IsIonEnabled(cx)) {
+ jit::MethodStatus status = jit::CanEnterIon(cx, state);
+ if (status == jit::Method_Error) {
+ return EnterJitStatus::Error;
+ }
+ if (status == jit::Method_Compiled) {
+ code = script->jitCodeRaw();
+ break;
+ }
+ }
+
+ // Try to Baseline-compile.
+ if (jit::IsBaselineJitEnabled(cx)) {
+ jit::MethodStatus status =
+ jit::CanEnterBaselineMethod<BaselineTier::Compiler>(cx, state);
+ if (status == jit::Method_Error) {
+ return EnterJitStatus::Error;
+ }
+ if (status == jit::Method_Compiled) {
+ code = script->jitCodeRaw();
+ break;
+ }
+ }
+
+ // Try to enter the Baseline Interpreter.
+ if (IsBaselineInterpreterEnabled()) {
+ jit::MethodStatus status =
+ jit::CanEnterBaselineMethod<BaselineTier::Interpreter>(cx, state);
+ if (status == jit::Method_Error) {
+ return EnterJitStatus::Error;
+ }
+ if (status == jit::Method_Compiled) {
+ code = script->jitCodeRaw();
+ break;
+ }
+ }
+
+ return EnterJitStatus::NotEntered;
+ } while (false);
+
+#ifdef JS_CACHEIR_SPEW
+ cx->spewer().disableSpewing();
+#endif
+
+ return EnterJit(cx, state, code);
+}
diff --git a/js/src/jit/Jit.h b/js/src/jit/Jit.h
new file mode 100644
index 0000000000..d9632513b6
--- /dev/null
+++ b/js/src/jit/Jit.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Jit_h
+#define jit_Jit_h
+
+#include "jstypes.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+
+class RunState;
+
+namespace jit {
+
+enum class EnterJitStatus {
+ // An error occurred, either before we entered JIT code or the script threw
+ // an exception. Usually the context will have a pending exception, except
+ // for uncatchable exceptions (interrupts).
+ Error,
+
+ // Entered and returned from JIT code.
+ Ok,
+
+ // We didn't enter JIT code, for instance because the script still has to
+ // warm up or cannot be compiled.
+ NotEntered,
+};
+
+extern bool EnterInterpreterEntryTrampoline(uint8_t* code, JSContext* cx,
+ RunState* state);
+extern EnterJitStatus MaybeEnterJit(JSContext* cx, RunState& state);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Jit_h */
diff --git a/js/src/jit/JitAllocPolicy.h b/js/src/jit/JitAllocPolicy.h
new file mode 100644
index 0000000000..3980f3f2fb
--- /dev/null
+++ b/js/src/jit/JitAllocPolicy.h
@@ -0,0 +1,179 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitAllocPolicy_h
+#define jit_JitAllocPolicy_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/TemplateLib.h"
+
+#include <algorithm>
+#include <stddef.h>
+#include <string.h>
+#include <type_traits>
+#include <utility>
+
+#include "ds/LifoAlloc.h"
+#include "jit/InlineList.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+class TempAllocator {
+ LifoAllocScope lifoScope_;
+
+ public:
+ // Most infallible JIT allocations are small, so we use a ballast of 16
+ // KiB. And with a ballast of 16 KiB, a chunk size of 32 KiB works well,
+ // because TempAllocators with a peak allocation size of less than 16 KiB
+ // (which is most of them) only have to allocate a single chunk.
+ static const size_t BallastSize; // 16 KiB
+ static const size_t PreferredLifoChunkSize; // 32 KiB
+
+ explicit TempAllocator(LifoAlloc* lifoAlloc) : lifoScope_(lifoAlloc) {
+ lifoAlloc->setAsInfallibleByDefault();
+ }
+
+ void* allocateInfallible(size_t bytes) {
+ return lifoScope_.alloc().allocInfallible(bytes);
+ }
+
+ [[nodiscard]] void* allocate(size_t bytes) {
+ LifoAlloc::AutoFallibleScope fallibleAllocator(lifoAlloc());
+ return lifoScope_.alloc().allocEnsureUnused(bytes, BallastSize);
+ }
+
+ template <typename T>
+ [[nodiscard]] T* allocateArray(size_t n) {
+ LifoAlloc::AutoFallibleScope fallibleAllocator(lifoAlloc());
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(n, &bytes))) {
+ return nullptr;
+ }
+ return static_cast<T*>(
+ lifoScope_.alloc().allocEnsureUnused(bytes, BallastSize));
+ }
+
+ // View this allocator as a fallible allocator.
+ struct Fallible {
+ TempAllocator& alloc;
+ };
+ Fallible fallible() { return {*this}; }
+
+ LifoAlloc* lifoAlloc() { return &lifoScope_.alloc(); }
+
+ [[nodiscard]] bool ensureBallast() {
+ JS_OOM_POSSIBLY_FAIL_BOOL();
+ return lifoScope_.alloc().ensureUnusedApproximate(BallastSize);
+ }
+};
+
+class JitAllocPolicy {
+ TempAllocator& alloc_;
+
+ public:
+ MOZ_IMPLICIT JitAllocPolicy(TempAllocator& alloc) : alloc_(alloc) {}
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
+ return nullptr;
+ }
+ return static_cast<T*>(alloc_.allocate(bytes));
+ }
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ T* p = maybe_pod_malloc<T>(numElems);
+ if (MOZ_LIKELY(p)) {
+ memset(p, 0, numElems * sizeof(T));
+ }
+ return p;
+ }
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ T* n = pod_malloc<T>(newSize);
+ if (MOZ_UNLIKELY(!n)) {
+ return n;
+ }
+ MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
+ memcpy(n, p, std::min(oldSize * sizeof(T), newSize * sizeof(T)));
+ return n;
+ }
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return maybe_pod_malloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return maybe_pod_calloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_realloc(T* ptr, size_t oldSize, size_t newSize) {
+ return maybe_pod_realloc<T>(ptr, oldSize, newSize);
+ }
+ template <typename T>
+ void free_(T* p, size_t numElems = 0) {}
+ void reportAllocOverflow() const {}
+ [[nodiscard]] bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+};
+
+struct TempObject {
+ inline void* operator new(size_t nbytes,
+ TempAllocator::Fallible view) noexcept(true) {
+ return view.alloc.allocate(nbytes);
+ }
+ inline void* operator new(size_t nbytes, TempAllocator& alloc) {
+ return alloc.allocateInfallible(nbytes);
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, T* pos) {
+ static_assert(std::is_convertible_v<T*, TempObject*>,
+ "Placement new argument type must inherit from TempObject");
+ return pos;
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, mozilla::NotNullTag, T* pos) {
+ static_assert(std::is_convertible_v<T*, TempObject*>,
+ "Placement new argument type must inherit from TempObject");
+ MOZ_ASSERT(pos);
+ return pos;
+ }
+};
+
+template <typename T>
+class TempObjectPool {
+ TempAllocator* alloc_;
+ InlineForwardList<T> freed_;
+
+ public:
+ TempObjectPool() : alloc_(nullptr) {}
+ void setAllocator(TempAllocator& alloc) {
+ MOZ_ASSERT(freed_.empty());
+ alloc_ = &alloc;
+ }
+ template <typename... Args>
+ T* allocate(Args&&... args) {
+ MOZ_ASSERT(alloc_);
+ if (freed_.empty()) {
+ return new (alloc_->fallible()) T(std::forward<Args>(args)...);
+ }
+ T* res = freed_.popFront();
+ return new (res) T(std::forward<Args>(args)...);
+ }
+ void free(T* obj) { freed_.pushFront(obj); }
+ void clear() { freed_.clear(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitAllocPolicy_h */
diff --git a/js/src/jit/JitCode.h b/js/src/jit/JitCode.h
new file mode 100644
index 0000000000..5a96445c12
--- /dev/null
+++ b/js/src/jit/JitCode.h
@@ -0,0 +1,171 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitCode_h
+#define jit_JitCode_h
+
+#include "mozilla/MemoryReporting.h" // MallocSizeOf
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "jstypes.h"
+
+#include "gc/Allocator.h" // AllowGC
+#include "gc/Cell.h" // gc::TenuredCellWithNonGCPointer
+#include "js/TraceKind.h" // JS::TraceKind
+#include "js/UbiNode.h" // ubi::{TracerConcrete, Size, CourseType}
+
+namespace js {
+namespace jit {
+
+class ExecutablePool;
+class JitCode;
+class MacroAssembler;
+
+enum class CodeKind : uint8_t;
+
+// Header at start of raw code buffer
+struct JitCodeHeader {
+ // Link back to corresponding gcthing
+ JitCode* jitCode_;
+
+ void init(JitCode* jitCode);
+
+ static JitCodeHeader* FromExecutable(uint8_t* buffer) {
+ return (JitCodeHeader*)(buffer - sizeof(JitCodeHeader));
+ }
+};
+
+class JitCode : public gc::TenuredCellWithNonGCPointer<uint8_t> {
+ friend class gc::CellAllocator;
+
+ public:
+ // Raw code pointer, stored in the cell header.
+ uint8_t* raw() const { return headerPtr(); }
+
+ protected:
+ ExecutablePool* pool_;
+ uint32_t bufferSize_; // Total buffer size. Does not include headerSize_.
+ uint32_t insnSize_; // Instruction stream size.
+ uint32_t dataSize_; // Size of the read-only data area.
+ uint32_t jumpRelocTableBytes_; // Size of the jump relocation table.
+ uint32_t dataRelocTableBytes_; // Size of the data relocation table.
+ uint8_t headerSize_ : 5; // Number of bytes allocated before codeStart.
+ uint8_t kind_ : 3; // jit::CodeKind, for the memory reporters.
+ bool invalidated_ : 1; // Whether the code object has been invalidated.
+ // This is necessary to prevent GC tracing.
+ bool hasBytecodeMap_ : 1; // Whether the code object has been registered with
+ // native=>bytecode mapping tables.
+
+ JitCode() = delete;
+ JitCode(uint8_t* code, uint32_t bufferSize, uint32_t headerSize,
+ ExecutablePool* pool, CodeKind kind)
+ : TenuredCellWithNonGCPointer(code),
+ pool_(pool),
+ bufferSize_(bufferSize),
+ insnSize_(0),
+ dataSize_(0),
+ jumpRelocTableBytes_(0),
+ dataRelocTableBytes_(0),
+ headerSize_(headerSize),
+ kind_(uint8_t(kind)),
+ invalidated_(false),
+ hasBytecodeMap_(false) {
+ MOZ_ASSERT(CodeKind(kind_) == kind);
+ MOZ_ASSERT(headerSize_ == headerSize);
+ }
+
+ uint32_t dataOffset() const { return insnSize_; }
+ uint32_t jumpRelocTableOffset() const { return dataOffset() + dataSize_; }
+ uint32_t dataRelocTableOffset() const {
+ return jumpRelocTableOffset() + jumpRelocTableBytes_;
+ }
+
+ public:
+ uint8_t* rawEnd() const { return raw() + insnSize_; }
+ bool containsNativePC(const void* addr) const {
+ const uint8_t* addr_u8 = (const uint8_t*)addr;
+ return raw() <= addr_u8 && addr_u8 < rawEnd();
+ }
+ size_t instructionsSize() const { return insnSize_; }
+ size_t bufferSize() const { return bufferSize_; }
+ size_t headerSize() const { return headerSize_; }
+
+ void traceChildren(JSTracer* trc);
+ void finalize(JS::GCContext* gcx);
+ void setInvalidated() { invalidated_ = true; }
+
+ void setHasBytecodeMap() { hasBytecodeMap_ = true; }
+
+ // If this JitCode object has been, effectively, corrupted due to
+ // invalidation patching, then we have to remember this so we don't try and
+ // trace relocation entries that may now be corrupt.
+ bool invalidated() const { return !!invalidated_; }
+
+ template <typename T>
+ T as() const {
+ return JS_DATA_TO_FUNC_PTR(T, raw());
+ }
+
+ void copyFrom(MacroAssembler& masm);
+
+ static JitCode* FromExecutable(uint8_t* buffer) {
+ JitCode* code = JitCodeHeader::FromExecutable(buffer)->jitCode_;
+ MOZ_ASSERT(code->raw() == buffer);
+ return code;
+ }
+
+ static size_t offsetOfCode() { return offsetOfHeaderPtr(); }
+
+ uint8_t* jumpRelocTable() { return raw() + jumpRelocTableOffset(); }
+
+ // Allocates a new JitCode object which will be managed by the GC. If no
+ // object can be allocated, nullptr is returned. On failure, |pool| is
+ // automatically released, so the code may be freed.
+ template <AllowGC allowGC>
+ static JitCode* New(JSContext* cx, uint8_t* code, uint32_t totalSize,
+ uint32_t headerSize, ExecutablePool* pool, CodeKind kind);
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::JitCode;
+};
+
+} // namespace jit
+} // namespace js
+
+// JS::ubi::Nodes can point to js::jit::JitCode instances; they're js::gc::Cell
+// instances with no associated compartment.
+namespace JS {
+namespace ubi {
+template <>
+class Concrete<js::jit::JitCode> : TracerConcrete<js::jit::JitCode> {
+ protected:
+ explicit Concrete(js::jit::JitCode* ptr)
+ : TracerConcrete<js::jit::JitCode>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::jit::JitCode* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ CoarseType coarseType() const final { return CoarseType::Script; }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override {
+ Size size = js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+ size += get().bufferSize();
+ size += get().headerSize();
+ return size;
+ }
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif /* jit_JitCode_h */
diff --git a/js/src/jit/JitCommon.h b/js/src/jit/JitCommon.h
new file mode 100644
index 0000000000..906aa55934
--- /dev/null
+++ b/js/src/jit/JitCommon.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitCommon_h
+#define jit_JitCommon_h
+
+// Various macros used by all JITs.
+
+#include "jit/Simulator.h"
+
+#ifdef JS_SIMULATOR
+// Call into cross-jitted code by following the ABI of the simulated
+// architecture.
+# define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (js::jit::Simulator::Current()->call( \
+ JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 8, intptr_t(p0), intptr_t(p1), \
+ intptr_t(p2), intptr_t(p3), intptr_t(p4), intptr_t(p5), intptr_t(p6), \
+ intptr_t(p7)))
+
+# define CALL_GENERATED_0(entry) \
+ (js::jit::Simulator::Current()->call(JS_FUNC_TO_DATA_PTR(uint8_t*, entry), \
+ 0))
+
+# define CALL_GENERATED_1(entry, p0) \
+ (js::jit::Simulator::Current()->call(JS_FUNC_TO_DATA_PTR(uint8_t*, entry), \
+ 1, intptr_t(p0)))
+
+# define CALL_GENERATED_2(entry, p0, p1) \
+ (js::jit::Simulator::Current()->call(JS_FUNC_TO_DATA_PTR(uint8_t*, entry), \
+ 2, intptr_t(p0), intptr_t(p1)))
+
+# define CALL_GENERATED_3(entry, p0, p1, p2) \
+ (js::jit::Simulator::Current()->call(JS_FUNC_TO_DATA_PTR(uint8_t*, entry), \
+ 3, intptr_t(p0), intptr_t(p1), \
+ intptr_t(p2)))
+
+#else
+
+// Call into jitted code by following the ABI of the native architecture.
+# define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
+
+# define CALL_GENERATED_0(entry) entry()
+# define CALL_GENERATED_1(entry, p0) entry(p0)
+# define CALL_GENERATED_2(entry, p0, p1) entry(p0, p1)
+# define CALL_GENERATED_3(entry, p0, p1, p2) entry(p0, p1, p2)
+
+#endif
+
+#endif // jit_JitCommon_h
diff --git a/js/src/jit/JitContext.cpp b/js/src/jit/JitContext.cpp
new file mode 100644
index 0000000000..b8f52307d7
--- /dev/null
+++ b/js/src/jit/JitContext.cpp
@@ -0,0 +1,161 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitContext.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ThreadLocal.h"
+
+#include <stdlib.h>
+
+#include "jit/CacheIRSpewer.h"
+#include "jit/CompileWrappers.h"
+#include "jit/Ion.h"
+#include "jit/JitCode.h"
+#include "jit/JitOptions.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+#include "js/HeapAPI.h"
+#include "vm/JSContext.h"
+
+#ifdef JS_CODEGEN_ARM64
+# include "jit/arm64/vixl/Cpu-vixl.h"
+#endif
+
+#if defined(ANDROID)
+# include <sys/system_properties.h>
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+namespace js::jit {
+class TempAllocator;
+}
+
+// Assert that JitCode is gc::Cell aligned.
+static_assert(sizeof(JitCode) % gc::CellAlignBytes == 0);
+
+static MOZ_THREAD_LOCAL(JitContext*) TlsJitContext;
+
+static JitContext* CurrentJitContext() {
+ if (!TlsJitContext.init()) {
+ return nullptr;
+ }
+ return TlsJitContext.get();
+}
+
+void jit::SetJitContext(JitContext* ctx) {
+ MOZ_ASSERT(!TlsJitContext.get());
+ TlsJitContext.set(ctx);
+}
+
+JitContext* jit::GetJitContext() {
+ MOZ_ASSERT(CurrentJitContext());
+ return CurrentJitContext();
+}
+
+JitContext* jit::MaybeGetJitContext() { return CurrentJitContext(); }
+
+JitContext::JitContext(CompileRuntime* rt) : runtime(rt) {
+ MOZ_ASSERT(rt);
+ SetJitContext(this);
+}
+
+JitContext::JitContext(JSContext* cx)
+ : cx(cx), runtime(CompileRuntime::get(cx->runtime())) {
+ SetJitContext(this);
+}
+
+JitContext::JitContext() {
+#ifdef DEBUG
+ isCompilingWasm_ = true;
+#endif
+ SetJitContext(this);
+}
+
+JitContext::~JitContext() {
+ MOZ_ASSERT(TlsJitContext.get() == this);
+ TlsJitContext.set(nullptr);
+}
+
+bool jit::InitializeJit() {
+ if (!TlsJitContext.init()) {
+ return false;
+ }
+
+ CheckLogging();
+
+#ifdef JS_CACHEIR_SPEW
+ const char* env = getenv("CACHEIR_LOGS");
+ if (env && env[0] && env[0] != '0') {
+ CacheIRSpewer::singleton().init(env);
+ }
+#endif
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // Compute flags.
+ js::jit::CPUInfo::ComputeFlags();
+#endif
+
+#if defined(JS_CODEGEN_ARM)
+ InitARMFlags();
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+ // Initialize instruction cache flushing.
+ vixl::CPU::SetUp();
+#endif
+
+#ifndef JS_CODEGEN_NONE
+ MOZ_ASSERT(js::jit::CPUFlagsHaveBeenComputed());
+#endif
+
+ // Note: jit flags need to be initialized after the InitARMFlags call above.
+ // This is the final point where we can set disableJitBackend = true, before
+ // we use this flag below with the HasJitBackend call.
+ if (!MacroAssembler::SupportsFloatingPoint()) {
+ JitOptions.disableJitBackend = true;
+ }
+ JitOptions.supportsUnalignedAccesses =
+ MacroAssembler::SupportsUnalignedAccesses();
+
+ if (HasJitBackend()) {
+ if (!InitProcessExecutableMemory()) {
+ return false;
+ }
+ }
+
+ PerfSpewer::Init();
+ return true;
+}
+
+void jit::ShutdownJit() {
+ if (HasJitBackend() && !JSRuntime::hasLiveRuntimes()) {
+ ReleaseProcessExecutableMemory();
+ }
+}
+
+bool jit::JitSupportsWasmSimd() {
+#if defined(ENABLE_WASM_SIMD)
+ return js::jit::MacroAssembler::SupportsWasmSimd();
+#else
+ return false;
+#endif
+}
+
+bool jit::JitSupportsAtomics() {
+#if defined(JS_CODEGEN_ARM)
+ // Bug 1146902, bug 1077318: Enable Ion inlining of Atomics
+ // operations on ARM only when the CPU has byte, halfword, and
+ // doubleword load-exclusive and store-exclusive instructions,
+ // until we can add support for systems that don't have those.
+ return js::jit::HasLDSTREXBHD();
+#else
+ return true;
+#endif
+}
diff --git a/js/src/jit/JitContext.h b/js/src/jit/JitContext.h
new file mode 100644
index 0000000000..b19f2dd371
--- /dev/null
+++ b/js/src/jit/JitContext.h
@@ -0,0 +1,168 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitContext_h
+#define jit_JitContext_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Result.h"
+
+#include <stdint.h>
+
+#include "jstypes.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+class CompileRealm;
+class CompileRuntime;
+class TempAllocator;
+
+enum MethodStatus {
+ Method_Error,
+ Method_CantCompile,
+ Method_Skipped,
+ Method_Compiled
+};
+
+// Use only even, non-zero values for errors, to allow using the UnusedZero and
+// HasFreeLSB optimizations for mozilla::Result (see specializations of
+// UnusedZero/HasFreeLSB below).
+enum class AbortReason : uint8_t {
+ NoAbort,
+ Alloc = 2,
+ Disable = 4,
+ Error = 6,
+};
+} // namespace jit
+} // namespace js
+
+namespace mozilla::detail {
+
+template <>
+struct UnusedZero<js::jit::AbortReason> : UnusedZeroEnum<js::jit::AbortReason> {
+};
+
+template <>
+struct HasFreeLSB<js::jit::AbortReason> {
+ static const bool value = true;
+};
+
+} // namespace mozilla::detail
+
+namespace js {
+namespace jit {
+
+template <typename V>
+using AbortReasonOr = mozilla::Result<V, AbortReason>;
+using mozilla::Err;
+using mozilla::Ok;
+
+static_assert(sizeof(AbortReasonOr<Ok>) <= sizeof(uintptr_t),
+ "Unexpected size of AbortReasonOr<Ok>");
+static_assert(mozilla::detail::SelectResultImpl<bool, AbortReason>::value ==
+ mozilla::detail::PackingStrategy::NullIsOk);
+static_assert(sizeof(AbortReasonOr<bool>) <= sizeof(uintptr_t),
+ "Unexpected size of AbortReasonOr<bool>");
+static_assert(sizeof(AbortReasonOr<uint16_t*>) == sizeof(uintptr_t),
+ "Unexpected size of AbortReasonOr<uint16_t*>");
+
+// A JIT context is needed for parts of the compiler backend such as the
+// MacroAssembler. It points to the JSContext (nullptr for off-thread
+// compilations or Wasm compilations) and also stores some extra information,
+// most of it only used in debug builds.
+//
+// JIT contexts must not be nested.
+
+class MOZ_RAII JitContext {
+#ifdef DEBUG
+ // Whether this thread is actively Ion compiling (does not include Wasm or
+ // WarpOracle).
+ bool inIonBackend_ = false;
+
+ bool isCompilingWasm_ = false;
+ bool oom_ = false;
+#endif
+
+ public:
+ // Running context when compiling on the main thread. Not available during
+ // off-thread compilation.
+ JSContext* cx = nullptr;
+
+ // Wrapper with information about the current runtime. nullptr for Wasm
+ // compilations.
+ CompileRuntime* runtime = nullptr;
+
+ // Constructor for compilations happening on the main thread.
+ explicit JitContext(JSContext* cx);
+
+ // Constructor for off-thread Ion compilations.
+ explicit JitContext(CompileRuntime* rt);
+
+ // Constructor for Wasm compilation.
+ JitContext();
+
+ ~JitContext();
+
+#ifdef DEBUG
+ bool isCompilingWasm() { return isCompilingWasm_; }
+ bool setIsCompilingWasm(bool flag) {
+ bool oldFlag = isCompilingWasm_;
+ isCompilingWasm_ = flag;
+ return oldFlag;
+ }
+ bool hasOOM() { return oom_; }
+ void setOOM() { oom_ = true; }
+
+ bool inIonBackend() const { return inIonBackend_; }
+
+ void enterIonBackend() {
+ MOZ_ASSERT(!inIonBackend_);
+ inIonBackend_ = true;
+ }
+ void leaveIonBackend() {
+ MOZ_ASSERT(inIonBackend_);
+ inIonBackend_ = false;
+ }
+#endif
+};
+
+// Process-wide initialization and shutdown of JIT data structures.
+[[nodiscard]] bool InitializeJit();
+void ShutdownJit();
+
+// Get and set the current JIT context.
+JitContext* GetJitContext();
+JitContext* MaybeGetJitContext();
+
+void SetJitContext(JitContext* ctx);
+
+enum JitExecStatus {
+ // The method call had to be aborted due to a stack limit check. This
+ // error indicates that Ion never attempted to clean up frames.
+ JitExec_Aborted,
+
+ // The method call resulted in an error, and IonMonkey has cleaned up
+ // frames.
+ JitExec_Error,
+
+ // The method call succeeded and returned a value.
+ JitExec_Ok
+};
+
+static inline bool IsErrorStatus(JitExecStatus status) {
+ return status == JitExec_Error || status == JitExec_Aborted;
+}
+
+bool JitSupportsWasmSimd();
+bool JitSupportsAtomics();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitContext_h */
diff --git a/js/src/jit/JitFrames-inl.h b/js/src/jit/JitFrames-inl.h
new file mode 100644
index 0000000000..ddbc76e103
--- /dev/null
+++ b/js/src/jit/JitFrames-inl.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrames_inl_h
+#define jit_JitFrames_inl_h
+
+#include "jit/JitFrames.h"
+#include "vm/JSContext.h"
+
+#include "jit/JSJitFrameIter-inl.h"
+
+namespace js {
+namespace jit {
+
+inline BaselineFrame* GetTopBaselineFrame(JSContext* cx) {
+ JSJitFrameIter frame(cx->activation()->asJit());
+ MOZ_ASSERT(frame.type() == FrameType::Exit);
+ ++frame;
+ if (frame.isBaselineStub()) {
+ ++frame;
+ }
+ MOZ_ASSERT(frame.isBaselineJS());
+ return frame.baselineFrame();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitFrames_inl_h */
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
new file mode 100644
index 0000000000..fd65289e61
--- /dev/null
+++ b/js/src/jit/JitFrames.cpp
@@ -0,0 +1,2570 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitFrames-inl.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include <algorithm>
+
+#include "builtin/ModuleObject.h"
+#include "gc/GC.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "jit/IonScript.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+#include "jit/PcScriptCache.h"
+#include "jit/Recover.h"
+#include "jit/Safepoints.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "jit/Snapshots.h"
+#include "jit/VMFunctions.h"
+#include "js/Exception.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject, js::DumpValue
+#include "vm/Interpreter.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmInstance.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "jit/JSJitFrameIter-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Probes-inl.h"
+
+namespace js {
+namespace jit {
+
+// Given a slot index, returns the offset, in bytes, of that slot from an
+// JitFrameLayout. Slot distances are uniform across architectures, however,
+// the distance does depend on the size of the frame header.
+static inline int32_t OffsetOfFrameSlot(int32_t slot) { return -slot; }
+
+static inline uint8_t* AddressOfFrameSlot(JitFrameLayout* fp, int32_t slot) {
+ return (uint8_t*)fp + OffsetOfFrameSlot(slot);
+}
+
+static inline uintptr_t ReadFrameSlot(JitFrameLayout* fp, int32_t slot) {
+ return *(uintptr_t*)AddressOfFrameSlot(fp, slot);
+}
+
+static inline void WriteFrameSlot(JitFrameLayout* fp, int32_t slot,
+ uintptr_t value) {
+ *(uintptr_t*)AddressOfFrameSlot(fp, slot) = value;
+}
+
+static inline double ReadFrameDoubleSlot(JitFrameLayout* fp, int32_t slot) {
+ return *(double*)AddressOfFrameSlot(fp, slot);
+}
+
+static inline float ReadFrameFloat32Slot(JitFrameLayout* fp, int32_t slot) {
+ return *(float*)AddressOfFrameSlot(fp, slot);
+}
+
+static inline int32_t ReadFrameInt32Slot(JitFrameLayout* fp, int32_t slot) {
+ return *(int32_t*)AddressOfFrameSlot(fp, slot);
+}
+
+static inline bool ReadFrameBooleanSlot(JitFrameLayout* fp, int32_t slot) {
+ return *(bool*)AddressOfFrameSlot(fp, slot);
+}
+
+static uint32_t NumArgAndLocalSlots(const InlineFrameIterator& frame) {
+ JSScript* script = frame.script();
+ return CountArgSlots(script, frame.maybeCalleeTemplate()) + script->nfixed();
+}
+
+static void CloseLiveIteratorIon(JSContext* cx,
+ const InlineFrameIterator& frame,
+ const TryNote* tn) {
+ MOZ_ASSERT(tn->kind() == TryNoteKind::ForIn ||
+ tn->kind() == TryNoteKind::Destructuring);
+
+ bool isDestructuring = tn->kind() == TryNoteKind::Destructuring;
+ MOZ_ASSERT_IF(!isDestructuring, tn->stackDepth > 0);
+ MOZ_ASSERT_IF(isDestructuring, tn->stackDepth > 1);
+
+ // Save any pending exception, because some recover operations call into
+ // AutoUnsafeCallWithABI functions, which don't allow pending exceptions.
+ JS::AutoSaveExceptionState savedExc(cx);
+
+ SnapshotIterator si = frame.snapshotIterator();
+
+ // Skip stack slots until we reach the iterator object on the stack. For
+ // the destructuring case, we also need to get the "done" value.
+ uint32_t stackSlot = tn->stackDepth;
+ uint32_t adjust = isDestructuring ? 2 : 1;
+ uint32_t skipSlots = NumArgAndLocalSlots(frame) + stackSlot - adjust;
+
+ for (unsigned i = 0; i < skipSlots; i++) {
+ si.skip();
+ }
+
+ MaybeReadFallback recover(cx, cx->activation()->asJit(), &frame.frame(),
+ MaybeReadFallback::Fallback_DoNothing);
+ Value v = si.maybeRead(recover);
+ MOZ_RELEASE_ASSERT(v.isObject());
+ RootedObject iterObject(cx, &v.toObject());
+
+ if (isDestructuring) {
+ RootedValue doneValue(cx, si.read());
+ MOZ_RELEASE_ASSERT(!doneValue.isMagic());
+ bool done = ToBoolean(doneValue);
+ // Do not call IteratorClose if the destructuring iterator is already
+ // done.
+ if (done) {
+ return;
+ }
+ }
+
+ // Restore any pending exception before the closing the iterator.
+ savedExc.restore();
+
+ if (cx->isExceptionPending()) {
+ if (tn->kind() == TryNoteKind::ForIn) {
+ CloseIterator(iterObject);
+ } else {
+ IteratorCloseForException(cx, iterObject);
+ }
+ } else {
+ UnwindIteratorForUncatchableException(iterObject);
+ }
+}
+
+class IonTryNoteFilter {
+ uint32_t depth_;
+
+ public:
+ explicit IonTryNoteFilter(const InlineFrameIterator& frame) {
+ uint32_t base = NumArgAndLocalSlots(frame);
+ SnapshotIterator si = frame.snapshotIterator();
+ MOZ_ASSERT(si.numAllocations() >= base);
+ depth_ = si.numAllocations() - base;
+ }
+
+ bool operator()(const TryNote* note) { return note->stackDepth <= depth_; }
+};
+
+class TryNoteIterIon : public TryNoteIter<IonTryNoteFilter> {
+ public:
+ TryNoteIterIon(JSContext* cx, const InlineFrameIterator& frame)
+ : TryNoteIter(cx, frame.script(), frame.pc(), IonTryNoteFilter(frame)) {}
+};
+
+static bool ShouldBailoutForDebugger(JSContext* cx,
+ const InlineFrameIterator& frame,
+ bool hitBailoutException) {
+ if (hitBailoutException) {
+ MOZ_ASSERT(!cx->isPropagatingForcedReturn());
+ return false;
+ }
+
+ // Bail out if we're propagating a forced return from an inlined frame,
+ // even if the realm is no longer a debuggee.
+ if (cx->isPropagatingForcedReturn() && frame.more()) {
+ return true;
+ }
+
+ if (!cx->realm()->isDebuggee()) {
+ return false;
+ }
+
+ // Bail out if there's a catchable exception and we are the debuggee of a
+ // Debugger with a live onExceptionUnwind hook.
+ if (cx->isExceptionPending() &&
+ DebugAPI::hasExceptionUnwindHook(cx->global())) {
+ return true;
+ }
+
+ // Bail out if a Debugger has observed this frame (e.g., for onPop).
+ JitActivation* act = cx->activation()->asJit();
+ RematerializedFrame* rematFrame =
+ act->lookupRematerializedFrame(frame.frame().fp(), frame.frameNo());
+ return rematFrame && rematFrame->isDebuggee();
+}
+
+static void OnLeaveIonFrame(JSContext* cx, const InlineFrameIterator& frame,
+ ResumeFromException* rfe) {
+ bool returnFromThisFrame =
+ cx->isPropagatingForcedReturn() || cx->isClosingGenerator();
+ if (!returnFromThisFrame) {
+ return;
+ }
+
+ JitActivation* act = cx->activation()->asJit();
+ RematerializedFrame* rematFrame = nullptr;
+ {
+ JS::AutoSaveExceptionState savedExc(cx);
+
+ // We can run recover instructions without invalidating because we're
+ // already leaving the frame.
+ MaybeReadFallback::FallbackConsequence consequence =
+ MaybeReadFallback::Fallback_DoNothing;
+ rematFrame = act->getRematerializedFrame(cx, frame.frame(), frame.frameNo(),
+ consequence);
+ if (!rematFrame) {
+ return;
+ }
+ }
+
+ MOZ_ASSERT(!frame.more());
+
+ if (cx->isClosingGenerator()) {
+ HandleClosingGeneratorReturn(cx, rematFrame, /*frameOk=*/true);
+ } else {
+ cx->clearPropagatingForcedReturn();
+ }
+
+ Value& rval = rematFrame->returnValue();
+ MOZ_RELEASE_ASSERT(!rval.isMagic());
+
+ // Set both framePointer and stackPointer to the address of the
+ // JitFrameLayout.
+ rfe->kind = ExceptionResumeKind::ForcedReturnIon;
+ rfe->framePointer = frame.frame().fp();
+ rfe->stackPointer = frame.frame().fp();
+ rfe->exception = rval;
+
+ act->removeIonFrameRecovery(frame.frame().jsFrame());
+ act->removeRematerializedFrame(frame.frame().fp());
+}
+
+static void HandleExceptionIon(JSContext* cx, const InlineFrameIterator& frame,
+ ResumeFromException* rfe,
+ bool* hitBailoutException) {
+ if (ShouldBailoutForDebugger(cx, frame, *hitBailoutException)) {
+ // We do the following:
+ //
+ // 1. Bailout to baseline to reconstruct a baseline frame.
+ // 2. Resume immediately into the exception tail afterwards, and
+ // handle the exception again with the top frame now a baseline
+ // frame.
+ //
+ // An empty exception info denotes that we're propagating an Ion
+ // exception due to debug mode, which BailoutIonToBaseline needs to
+ // know. This is because we might not be able to fully reconstruct up
+ // to the stack depth at the snapshot, as we could've thrown in the
+ // middle of a call.
+ ExceptionBailoutInfo propagateInfo(cx);
+ if (ExceptionHandlerBailout(cx, frame, rfe, propagateInfo)) {
+ return;
+ }
+ *hitBailoutException = true;
+ }
+
+ RootedScript script(cx, frame.script());
+
+ for (TryNoteIterIon tni(cx, frame); !tni.done(); ++tni) {
+ const TryNote* tn = *tni;
+ switch (tn->kind()) {
+ case TryNoteKind::ForIn:
+ case TryNoteKind::Destructuring:
+ CloseLiveIteratorIon(cx, frame, tn);
+ break;
+
+ case TryNoteKind::Catch:
+ // If we're closing a generator, we have to skip catch blocks.
+ if (cx->isClosingGenerator()) {
+ break;
+ }
+
+ if (cx->isExceptionPending()) {
+ // Ion can compile try-catch, but bailing out to catch
+ // exceptions is slow. Reset the warm-up counter so that if we
+ // catch many exceptions we won't Ion-compile the script.
+ script->resetWarmUpCounterToDelayIonCompilation();
+
+ if (*hitBailoutException) {
+ break;
+ }
+
+ // Bailout at the start of the catch block.
+ jsbytecode* catchPC = script->offsetToPC(tn->start + tn->length);
+ ExceptionBailoutInfo excInfo(cx, frame.frameNo(), catchPC,
+ tn->stackDepth);
+ if (ExceptionHandlerBailout(cx, frame, rfe, excInfo)) {
+ // Record exception locations to allow scope unwinding in
+ // |FinishBailoutToBaseline|
+ MOZ_ASSERT(cx->isExceptionPending());
+ rfe->bailoutInfo->tryPC =
+ UnwindEnvironmentToTryPc(frame.script(), tn);
+ rfe->bailoutInfo->faultPC = frame.pc();
+ return;
+ }
+
+ *hitBailoutException = true;
+ MOZ_ASSERT(cx->isExceptionPending());
+ }
+ break;
+
+ case TryNoteKind::Finally: {
+ if (!cx->isExceptionPending()) {
+ // We don't catch uncatchable exceptions.
+ break;
+ }
+
+ script->resetWarmUpCounterToDelayIonCompilation();
+
+ if (*hitBailoutException) {
+ break;
+ }
+
+ // Bailout at the start of the finally block.
+ jsbytecode* finallyPC = script->offsetToPC(tn->start + tn->length);
+ ExceptionBailoutInfo excInfo(cx, frame.frameNo(), finallyPC,
+ tn->stackDepth);
+
+ RootedValue exception(cx);
+ if (!cx->getPendingException(&exception)) {
+ exception = UndefinedValue();
+ }
+ excInfo.setFinallyException(exception.get());
+ cx->clearPendingException();
+
+ if (ExceptionHandlerBailout(cx, frame, rfe, excInfo)) {
+ // Record exception locations to allow scope unwinding in
+ // |FinishBailoutToBaseline|
+ rfe->bailoutInfo->tryPC =
+ UnwindEnvironmentToTryPc(frame.script(), tn);
+ rfe->bailoutInfo->faultPC = frame.pc();
+ return;
+ }
+
+ *hitBailoutException = true;
+ MOZ_ASSERT(cx->isExceptionPending());
+ break;
+ }
+
+ case TryNoteKind::ForOf:
+ case TryNoteKind::Loop:
+ break;
+
+ // TryNoteKind::ForOfIterclose is handled internally by the try note
+ // iterator.
+ default:
+ MOZ_CRASH("Unexpected try note");
+ }
+ }
+
+ OnLeaveIonFrame(cx, frame, rfe);
+}
+
+static void OnLeaveBaselineFrame(JSContext* cx, const JSJitFrameIter& frame,
+ jsbytecode* pc, ResumeFromException* rfe,
+ bool frameOk) {
+ BaselineFrame* baselineFrame = frame.baselineFrame();
+ bool returnFromThisFrame = jit::DebugEpilogue(cx, baselineFrame, pc, frameOk);
+ if (returnFromThisFrame) {
+ rfe->kind = ExceptionResumeKind::ForcedReturnBaseline;
+ rfe->framePointer = frame.fp();
+ rfe->stackPointer = reinterpret_cast<uint8_t*>(baselineFrame);
+ }
+}
+
+static inline void BaselineFrameAndStackPointersFromTryNote(
+ const TryNote* tn, const JSJitFrameIter& frame, uint8_t** framePointer,
+ uint8_t** stackPointer) {
+ JSScript* script = frame.baselineFrame()->script();
+ *framePointer = frame.fp();
+ *stackPointer = *framePointer - BaselineFrame::Size() -
+ (script->nfixed() + tn->stackDepth) * sizeof(Value);
+}
+
+static void SettleOnTryNote(JSContext* cx, const TryNote* tn,
+ const JSJitFrameIter& frame, EnvironmentIter& ei,
+ ResumeFromException* rfe, jsbytecode** pc) {
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ // Unwind environment chain (pop block objects).
+ if (cx->isExceptionPending()) {
+ UnwindEnvironment(cx, ei, UnwindEnvironmentToTryPc(script, tn));
+ }
+
+ // Compute base pointer and stack pointer.
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &rfe->framePointer,
+ &rfe->stackPointer);
+
+ // Compute the pc.
+ *pc = script->offsetToPC(tn->start + tn->length);
+}
+
+class BaselineTryNoteFilter {
+ const JSJitFrameIter& frame_;
+
+ public:
+ explicit BaselineTryNoteFilter(const JSJitFrameIter& frame) : frame_(frame) {}
+ bool operator()(const TryNote* note) {
+ BaselineFrame* frame = frame_.baselineFrame();
+
+ uint32_t numValueSlots = frame_.baselineFrameNumValueSlots();
+ MOZ_RELEASE_ASSERT(numValueSlots >= frame->script()->nfixed());
+
+ uint32_t currDepth = numValueSlots - frame->script()->nfixed();
+ return note->stackDepth <= currDepth;
+ }
+};
+
+class TryNoteIterBaseline : public TryNoteIter<BaselineTryNoteFilter> {
+ public:
+ TryNoteIterBaseline(JSContext* cx, const JSJitFrameIter& frame,
+ jsbytecode* pc)
+ : TryNoteIter(cx, frame.script(), pc, BaselineTryNoteFilter(frame)) {}
+};
+
+// Close all live iterators on a BaselineFrame due to exception unwinding. The
+// pc parameter is updated to where the envs have been unwound to.
+static void CloseLiveIteratorsBaselineForUncatchableException(
+ JSContext* cx, const JSJitFrameIter& frame, jsbytecode* pc) {
+ for (TryNoteIterBaseline tni(cx, frame, pc); !tni.done(); ++tni) {
+ const TryNote* tn = *tni;
+ switch (tn->kind()) {
+ case TryNoteKind::ForIn: {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &framePointer,
+ &stackPointer);
+ Value iterValue(*(Value*)stackPointer);
+ RootedObject iterObject(cx, &iterValue.toObject());
+ UnwindIteratorForUncatchableException(iterObject);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+}
+
+static bool ProcessTryNotesBaseline(JSContext* cx, const JSJitFrameIter& frame,
+ EnvironmentIter& ei,
+ ResumeFromException* rfe, jsbytecode** pc) {
+ MOZ_ASSERT(frame.baselineFrame()->runningInInterpreter(),
+ "Caller must ensure frame is an interpreter frame");
+
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ for (TryNoteIterBaseline tni(cx, frame, *pc); !tni.done(); ++tni) {
+ const TryNote* tn = *tni;
+
+ MOZ_ASSERT(cx->isExceptionPending());
+ switch (tn->kind()) {
+ case TryNoteKind::Catch: {
+ // If we're closing a generator, we have to skip catch blocks.
+ if (cx->isClosingGenerator()) {
+ break;
+ }
+
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+
+ // Ion can compile try-catch, but bailing out to catch
+ // exceptions is slow. Reset the warm-up counter so that if we
+ // catch many exceptions we won't Ion-compile the script.
+ script->resetWarmUpCounterToDelayIonCompilation();
+
+ // Resume at the start of the catch block.
+ const BaselineInterpreter& interp =
+ cx->runtime()->jitRuntime()->baselineInterpreter();
+ frame.baselineFrame()->setInterpreterFields(*pc);
+ rfe->kind = ExceptionResumeKind::Catch;
+ rfe->target = interp.interpretOpAddr().value;
+ return true;
+ }
+
+ case TryNoteKind::Finally: {
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+
+ const BaselineInterpreter& interp =
+ cx->runtime()->jitRuntime()->baselineInterpreter();
+ frame.baselineFrame()->setInterpreterFields(*pc);
+ rfe->kind = ExceptionResumeKind::Finally;
+ rfe->target = interp.interpretOpAddr().value;
+
+ // Drop the exception instead of leaking cross compartment data.
+ if (!cx->getPendingException(
+ MutableHandleValue::fromMarkedLocation(&rfe->exception))) {
+ rfe->exception = UndefinedValue();
+ }
+ cx->clearPendingException();
+ return true;
+ }
+
+ case TryNoteKind::ForIn: {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &framePointer,
+ &stackPointer);
+ Value iterValue(*reinterpret_cast<Value*>(stackPointer));
+ JSObject* iterObject = &iterValue.toObject();
+ CloseIterator(iterObject);
+ break;
+ }
+
+ case TryNoteKind::Destructuring: {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ BaselineFrameAndStackPointersFromTryNote(tn, frame, &framePointer,
+ &stackPointer);
+ // Note: if this ever changes, also update the
+ // TryNoteKind::Destructuring code in WarpBuilder.cpp!
+ RootedValue doneValue(cx, *(reinterpret_cast<Value*>(stackPointer)));
+ MOZ_RELEASE_ASSERT(!doneValue.isMagic());
+ bool done = ToBoolean(doneValue);
+ if (!done) {
+ Value iterValue(*(reinterpret_cast<Value*>(stackPointer) + 1));
+ RootedObject iterObject(cx, &iterValue.toObject());
+ if (!IteratorCloseForException(cx, iterObject)) {
+ SettleOnTryNote(cx, tn, frame, ei, rfe, pc);
+ return false;
+ }
+ }
+ break;
+ }
+
+ case TryNoteKind::ForOf:
+ case TryNoteKind::Loop:
+ break;
+
+ // TryNoteKind::ForOfIterClose is handled internally by the try note
+ // iterator.
+ default:
+ MOZ_CRASH("Invalid try note");
+ }
+ }
+ return true;
+}
+
+static void HandleExceptionBaseline(JSContext* cx, JSJitFrameIter& frame,
+ CommonFrameLayout* prevFrame,
+ ResumeFromException* rfe) {
+ MOZ_ASSERT(frame.isBaselineJS());
+ MOZ_ASSERT(prevFrame);
+
+ jsbytecode* pc;
+ frame.baselineScriptAndPc(nullptr, &pc);
+
+ // Ensure the BaselineFrame is an interpreter frame. This is easy to do and
+ // simplifies the code below and interaction with DebugModeOSR.
+ //
+ // Note that we never return to this frame via the previous frame's return
+ // address. We could set the return address to nullptr to ensure it's never
+ // used, but the profiler expects a non-null return value for its JitCode map
+ // lookup so we have to use an address in the interpreter code instead.
+ if (!frame.baselineFrame()->runningInInterpreter()) {
+ const BaselineInterpreter& interp =
+ cx->runtime()->jitRuntime()->baselineInterpreter();
+ uint8_t* retAddr = interp.codeRaw();
+ BaselineFrame* baselineFrame = frame.baselineFrame();
+
+ // Suppress profiler sampling while we fix up the frame to ensure the
+ // sampler thread doesn't see an inconsistent state.
+ AutoSuppressProfilerSampling suppressProfilerSampling(cx);
+ baselineFrame->switchFromJitToInterpreterForExceptionHandler(cx, pc);
+ prevFrame->setReturnAddress(retAddr);
+
+ // Ensure the current iterator's resumePCInCurrentFrame_ isn't used
+ // anywhere.
+ frame.setResumePCInCurrentFrame(nullptr);
+ }
+
+ bool frameOk = false;
+ RootedScript script(cx, frame.baselineFrame()->script());
+
+ if (script->hasScriptCounts()) {
+ PCCounts* counts = script->getThrowCounts(pc);
+ // If we failed to allocate, then skip the increment and continue to
+ // handle the exception.
+ if (counts) {
+ counts->numExec()++;
+ }
+ }
+
+ bool hasTryNotes = !script->trynotes().empty();
+
+again:
+ if (cx->isExceptionPending()) {
+ if (!cx->isClosingGenerator()) {
+ if (!DebugAPI::onExceptionUnwind(cx, frame.baselineFrame())) {
+ if (!cx->isExceptionPending()) {
+ goto again;
+ }
+ }
+ // Ensure that the debugger hasn't returned 'true' while clearing the
+ // exception state.
+ MOZ_ASSERT(cx->isExceptionPending());
+ }
+
+ if (hasTryNotes) {
+ EnvironmentIter ei(cx, frame.baselineFrame(), pc);
+ if (!ProcessTryNotesBaseline(cx, frame, ei, rfe, &pc)) {
+ goto again;
+ }
+ if (rfe->kind != ExceptionResumeKind::EntryFrame) {
+ // No need to increment the PCCounts number of execution here,
+ // as the interpreter increments any PCCounts if present.
+ MOZ_ASSERT_IF(script->hasScriptCounts(), script->maybeGetPCCounts(pc));
+ return;
+ }
+ }
+
+ frameOk = HandleClosingGeneratorReturn(cx, frame.baselineFrame(), frameOk);
+ } else {
+ if (hasTryNotes) {
+ CloseLiveIteratorsBaselineForUncatchableException(cx, frame, pc);
+ }
+
+ // We may be propagating a forced return from a debugger hook function.
+ if (MOZ_UNLIKELY(cx->isPropagatingForcedReturn())) {
+ cx->clearPropagatingForcedReturn();
+ frameOk = true;
+ }
+ }
+
+ OnLeaveBaselineFrame(cx, frame, pc, rfe, frameOk);
+}
+
+static JitFrameLayout* GetLastProfilingFrame(ResumeFromException* rfe) {
+ switch (rfe->kind) {
+ case ExceptionResumeKind::EntryFrame:
+ case ExceptionResumeKind::Wasm:
+ case ExceptionResumeKind::WasmCatch:
+ return nullptr;
+
+ // The following all return into Baseline or Ion frames.
+ case ExceptionResumeKind::Catch:
+ case ExceptionResumeKind::Finally:
+ case ExceptionResumeKind::ForcedReturnBaseline:
+ case ExceptionResumeKind::ForcedReturnIon:
+ return reinterpret_cast<JitFrameLayout*>(rfe->framePointer);
+
+ // When resuming into a bailed-out ion frame, use the bailout info to
+ // find the frame we are resuming into.
+ case ExceptionResumeKind::Bailout:
+ return reinterpret_cast<JitFrameLayout*>(rfe->bailoutInfo->incomingStack);
+ }
+
+ MOZ_CRASH("Invalid ResumeFromException type!");
+ return nullptr;
+}
+
+void HandleExceptionWasm(JSContext* cx, wasm::WasmFrameIter* iter,
+ ResumeFromException* rfe) {
+ MOZ_ASSERT(cx->activation()->asJit()->hasWasmExitFP());
+ wasm::HandleThrow(cx, *iter, rfe);
+ MOZ_ASSERT(iter->done());
+}
+
+void HandleException(ResumeFromException* rfe) {
+ JSContext* cx = TlsContext.get();
+
+#ifdef DEBUG
+ cx->runtime()->jitRuntime()->clearDisallowArbitraryCode();
+
+ // Reset the counter when we bailed after MDebugEnterGCUnsafeRegion, but
+ // before the matching MDebugLeaveGCUnsafeRegion.
+ //
+ // NOTE: EnterJit ensures the counter is zero when we enter JIT code.
+ cx->resetInUnsafeRegion();
+#endif
+
+ auto resetProfilerFrame = mozilla::MakeScopeExit([=] {
+ if (!cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
+ cx->runtime())) {
+ return;
+ }
+
+ MOZ_ASSERT(cx->jitActivation == cx->profilingActivation());
+
+ auto* lastProfilingFrame = GetLastProfilingFrame(rfe);
+ cx->jitActivation->setLastProfilingFrame(lastProfilingFrame);
+ });
+
+ rfe->kind = ExceptionResumeKind::EntryFrame;
+
+ JitSpew(JitSpew_IonInvalidate, "handling exception");
+
+ JitActivation* activation = cx->activation()->asJit();
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters) {
+ activation->setCheckRegs(false);
+ }
+#endif
+
+ JitFrameIter iter(cx->activation()->asJit(),
+ /* mustUnwindActivation = */ true);
+ CommonFrameLayout* prevJitFrame = nullptr;
+ while (!iter.done()) {
+ if (iter.isWasm()) {
+ prevJitFrame = nullptr;
+ HandleExceptionWasm(cx, &iter.asWasm(), rfe);
+ // If a wasm try-catch handler is found, we can immediately jump to it
+ // and quit iterating through the stack.
+ if (rfe->kind == ExceptionResumeKind::WasmCatch) {
+ return;
+ }
+ if (!iter.done()) {
+ ++iter;
+ }
+ continue;
+ }
+
+ JSJitFrameIter& frame = iter.asJSJit();
+
+ // JIT code can enter same-compartment realms, so reset cx->realm to
+ // this frame's realm.
+ if (frame.isScripted()) {
+ cx->setRealmForJitExceptionHandler(iter.realm());
+ }
+
+ if (frame.isIonJS()) {
+ // Search each inlined frame for live iterator objects, and close
+ // them.
+ InlineFrameIterator frames(cx, &frame);
+
+ // Invalidation state will be the same for all inlined scripts in the
+ // frame.
+ IonScript* ionScript = nullptr;
+ bool invalidated = frame.checkInvalidation(&ionScript);
+
+ // If we hit OOM or overrecursion while bailing out, we don't
+ // attempt to bail out a second time for this Ion frame. Just unwind
+ // and continue at the next frame.
+ bool hitBailoutException = false;
+ for (;;) {
+ HandleExceptionIon(cx, frames, rfe, &hitBailoutException);
+
+ if (rfe->kind == ExceptionResumeKind::Bailout ||
+ rfe->kind == ExceptionResumeKind::ForcedReturnIon) {
+ if (invalidated) {
+ ionScript->decrementInvalidationCount(cx->gcContext());
+ }
+ return;
+ }
+
+ MOZ_ASSERT(rfe->kind == ExceptionResumeKind::EntryFrame);
+
+ // When profiling, each frame popped needs a notification that
+ // the function has exited, so invoke the probe that a function
+ // is exiting.
+
+ JSScript* script = frames.script();
+ probes::ExitScript(cx, script, script->function(),
+ /* popProfilerFrame = */ false);
+ if (!frames.more()) {
+ break;
+ }
+ ++frames;
+ }
+
+ // Remove left-over state which might have been needed for bailout.
+ activation->removeIonFrameRecovery(frame.jsFrame());
+ activation->removeRematerializedFrame(frame.fp());
+
+ // If invalidated, decrement the number of frames remaining on the
+ // stack for the given IonScript.
+ if (invalidated) {
+ ionScript->decrementInvalidationCount(cx->gcContext());
+ }
+
+ } else if (frame.isBaselineJS()) {
+ HandleExceptionBaseline(cx, frame, prevJitFrame, rfe);
+
+ if (rfe->kind != ExceptionResumeKind::EntryFrame &&
+ rfe->kind != ExceptionResumeKind::ForcedReturnBaseline) {
+ return;
+ }
+
+ // Unwind profiler pseudo-stack
+ JSScript* script = frame.script();
+ probes::ExitScript(cx, script, script->function(),
+ /* popProfilerFrame = */ false);
+
+ if (rfe->kind == ExceptionResumeKind::ForcedReturnBaseline) {
+ return;
+ }
+ }
+
+ prevJitFrame = frame.current();
+ ++iter;
+ }
+
+ // Wasm sets its own value of SP in HandleExceptionWasm.
+ if (iter.isJSJit()) {
+ MOZ_ASSERT(rfe->kind == ExceptionResumeKind::EntryFrame);
+ rfe->framePointer = iter.asJSJit().current()->callerFramePtr();
+ rfe->stackPointer =
+ iter.asJSJit().fp() + CommonFrameLayout::offsetOfReturnAddress();
+ }
+}
+
+// Turns a JitFrameLayout into an UnwoundJit ExitFrameLayout.
+void EnsureUnwoundJitExitFrame(JitActivation* act, JitFrameLayout* frame) {
+ ExitFrameLayout* exitFrame = reinterpret_cast<ExitFrameLayout*>(frame);
+
+ if (act->jsExitFP() == (uint8_t*)frame) {
+ // If we already called this function for the current frame, do
+ // nothing.
+ MOZ_ASSERT(exitFrame->isUnwoundJitExit());
+ return;
+ }
+
+#ifdef DEBUG
+ JSJitFrameIter iter(act);
+ while (!iter.isScripted()) {
+ ++iter;
+ }
+ MOZ_ASSERT(iter.current() == frame, "|frame| must be the top JS frame");
+
+ MOZ_ASSERT(!!act->jsExitFP());
+ MOZ_ASSERT((uint8_t*)exitFrame->footer() >= act->jsExitFP(),
+ "Must have space for ExitFooterFrame before jsExitFP");
+#endif
+
+ act->setJSExitFP((uint8_t*)frame);
+ exitFrame->footer()->setUnwoundJitExitFrame();
+ MOZ_ASSERT(exitFrame->isUnwoundJitExit());
+}
+
+JSScript* MaybeForwardedScriptFromCalleeToken(CalleeToken token) {
+ switch (GetCalleeTokenTag(token)) {
+ case CalleeToken_Script:
+ return MaybeForwarded(CalleeTokenToScript(token));
+ case CalleeToken_Function:
+ case CalleeToken_FunctionConstructing: {
+ JSFunction* fun = MaybeForwarded(CalleeTokenToFunction(token));
+ return MaybeForwarded(fun)->nonLazyScript();
+ }
+ }
+ MOZ_CRASH("invalid callee token tag");
+}
+
+CalleeToken TraceCalleeToken(JSTracer* trc, CalleeToken token) {
+ switch (CalleeTokenTag tag = GetCalleeTokenTag(token)) {
+ case CalleeToken_Function:
+ case CalleeToken_FunctionConstructing: {
+ JSFunction* fun = CalleeTokenToFunction(token);
+ TraceRoot(trc, &fun, "jit-callee");
+ return CalleeToToken(fun, tag == CalleeToken_FunctionConstructing);
+ }
+ case CalleeToken_Script: {
+ JSScript* script = CalleeTokenToScript(token);
+ TraceRoot(trc, &script, "jit-script");
+ return CalleeToToken(script);
+ }
+ default:
+ MOZ_CRASH("unknown callee token type");
+ }
+}
+
+uintptr_t* JitFrameLayout::slotRef(SafepointSlotEntry where) {
+ if (where.stack) {
+ return (uintptr_t*)((uint8_t*)this - where.slot);
+ }
+ return (uintptr_t*)((uint8_t*)thisAndActualArgs() + where.slot);
+}
+
+#ifdef JS_NUNBOX32
+static inline uintptr_t ReadAllocation(const JSJitFrameIter& frame,
+ const LAllocation* a) {
+ if (a->isGeneralReg()) {
+ Register reg = a->toGeneralReg()->reg();
+ return frame.machineState().read(reg);
+ }
+ return *frame.jsFrame()->slotRef(SafepointSlotEntry(a));
+}
+#endif
+
+static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
+ JitFrameLayout* layout) {
+ // Trace |this| and any extra actual arguments for an Ion frame. Tracing
+ // of formal arguments is taken care of by the frame's safepoint/snapshot,
+ // except when the script might have lazy arguments or rest, in which case
+ // we trace them as well. We also have to trace formals if we have a
+ // LazyLink frame or an InterpreterStub frame or a special JSJit to wasm
+ // frame (since wasm doesn't use snapshots).
+
+ if (!CalleeTokenIsFunction(layout->calleeToken())) {
+ return;
+ }
+
+ size_t nargs = layout->numActualArgs();
+ size_t nformals = 0;
+
+ JSFunction* fun = CalleeTokenToFunction(layout->calleeToken());
+ if (frame.type() != FrameType::JSJitToWasm &&
+ !frame.isExitFrameLayout<CalledFromJitExitFrameLayout>() &&
+ !fun->nonLazyScript()->mayReadFrameArgsDirectly()) {
+ nformals = fun->nargs();
+ }
+
+ size_t newTargetOffset = std::max(nargs, fun->nargs());
+
+ Value* argv = layout->thisAndActualArgs();
+
+ // Trace |this|.
+ TraceRoot(trc, argv, "ion-thisv");
+
+ // Trace actual arguments beyond the formals. Note + 1 for thisv.
+ for (size_t i = nformals + 1; i < nargs + 1; i++) {
+ TraceRoot(trc, &argv[i], "ion-argv");
+ }
+
+ // Always trace the new.target from the frame. It's not in the snapshots.
+ // +1 to pass |this|
+ if (CalleeTokenIsConstructing(layout->calleeToken())) {
+ TraceRoot(trc, &argv[1 + newTargetOffset], "ion-newTarget");
+ }
+}
+
+#ifdef JS_NUNBOX32
+static inline void WriteAllocation(const JSJitFrameIter& frame,
+ const LAllocation* a, uintptr_t value) {
+ if (a->isGeneralReg()) {
+ Register reg = a->toGeneralReg()->reg();
+ frame.machineState().write(reg, value);
+ } else {
+ *frame.jsFrame()->slotRef(SafepointSlotEntry(a)) = value;
+ }
+}
+#endif
+
+static void TraceIonJSFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+
+ IonScript* ionScript = nullptr;
+ if (frame.checkInvalidation(&ionScript)) {
+ // This frame has been invalidated, meaning that its IonScript is no
+ // longer reachable through the callee token (JSFunction/JSScript->ion
+ // is now nullptr or recompiled). Manually trace it here.
+ ionScript->trace(trc);
+ } else {
+ ionScript = frame.ionScriptFromCalleeToken();
+ }
+
+ TraceThisAndArguments(trc, frame, frame.jsFrame());
+
+ const SafepointIndex* si =
+ ionScript->getSafepointIndex(frame.resumePCinCurrentFrame());
+
+ SafepointReader safepoint(ionScript, si);
+
+ // Scan through slots which contain pointers (or on punboxing systems,
+ // actual values).
+ SafepointSlotEntry entry;
+
+ while (safepoint.getGcSlot(&entry)) {
+ uintptr_t* ref = layout->slotRef(entry);
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(ref),
+ "ion-gc-slot");
+ }
+
+ uintptr_t* spill = frame.spillBase();
+ LiveGeneralRegisterSet gcRegs = safepoint.gcSpills();
+ LiveGeneralRegisterSet valueRegs = safepoint.valueSpills();
+ for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills());
+ iter.more(); ++iter) {
+ --spill;
+ if (gcRegs.has(*iter)) {
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(spill),
+ "ion-gc-spill");
+ } else if (valueRegs.has(*iter)) {
+ TraceRoot(trc, reinterpret_cast<Value*>(spill), "ion-value-spill");
+ }
+ }
+
+#ifdef JS_PUNBOX64
+ while (safepoint.getValueSlot(&entry)) {
+ Value* v = (Value*)layout->slotRef(entry);
+ TraceRoot(trc, v, "ion-gc-slot");
+ }
+#else
+ LAllocation type, payload;
+ while (safepoint.getNunboxSlot(&type, &payload)) {
+ JSValueTag tag = JSValueTag(ReadAllocation(frame, &type));
+ uintptr_t rawPayload = ReadAllocation(frame, &payload);
+
+ Value v = Value::fromTagAndPayload(tag, rawPayload);
+ TraceRoot(trc, &v, "ion-torn-value");
+
+ if (v != Value::fromTagAndPayload(tag, rawPayload)) {
+ // GC moved the value, replace the stored payload.
+ rawPayload = v.toNunboxPayload();
+ WriteAllocation(frame, &payload, rawPayload);
+ }
+ }
+#endif
+}
+
+static void TraceBailoutFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+
+ // We have to trace the list of actual arguments, as only formal arguments
+ // are represented in the Snapshot.
+ TraceThisAndArguments(trc, frame, frame.jsFrame());
+
+ // Under a bailout, do not have a Safepoint to only iterate over GC-things.
+ // Thus we use a SnapshotIterator to trace all the locations which would be
+ // used to reconstruct the Baseline frame.
+ //
+ // Note that at the time where this function is called, we have not yet
+ // started to reconstruct baseline frames.
+
+ // The vector of recover instructions is already traced as part of the
+ // JitActivation.
+ SnapshotIterator snapIter(frame,
+ frame.activation()->bailoutData()->machineState());
+
+ // For each instruction, we read the allocations without evaluating the
+ // recover instruction, nor reconstructing the frame. We are only looking at
+ // tracing readable allocations.
+ while (true) {
+ while (snapIter.moreAllocations()) {
+ snapIter.traceAllocation(trc);
+ }
+
+ if (!snapIter.moreInstructions()) {
+ break;
+ }
+ snapIter.nextInstruction();
+ }
+}
+
+static void UpdateIonJSFrameForMinorGC(JSRuntime* rt,
+ const JSJitFrameIter& frame) {
+ // Minor GCs may move slots/elements allocated in the nursery. Update
+ // any slots/elements pointers stored in this frame.
+
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+
+ IonScript* ionScript = nullptr;
+ if (frame.checkInvalidation(&ionScript)) {
+ // This frame has been invalidated, meaning that its IonScript is no
+ // longer reachable through the callee token (JSFunction/JSScript->ion
+ // is now nullptr or recompiled).
+ } else {
+ ionScript = frame.ionScriptFromCalleeToken();
+ }
+
+ Nursery& nursery = rt->gc.nursery();
+
+ const SafepointIndex* si =
+ ionScript->getSafepointIndex(frame.resumePCinCurrentFrame());
+ SafepointReader safepoint(ionScript, si);
+
+ LiveGeneralRegisterSet slotsRegs = safepoint.slotsOrElementsSpills();
+ uintptr_t* spill = frame.spillBase();
+ for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills());
+ iter.more(); ++iter) {
+ --spill;
+ if (slotsRegs.has(*iter)) {
+ nursery.forwardBufferPointer(spill);
+ }
+ }
+
+ // Skip to the right place in the safepoint
+ SafepointSlotEntry entry;
+ while (safepoint.getGcSlot(&entry)) {
+ }
+
+#ifdef JS_PUNBOX64
+ while (safepoint.getValueSlot(&entry)) {
+ }
+#else
+ LAllocation type, payload;
+ while (safepoint.getNunboxSlot(&type, &payload)) {
+ }
+#endif
+
+ while (safepoint.getSlotsOrElementsSlot(&entry)) {
+ nursery.forwardBufferPointer(layout->slotRef(entry));
+ }
+}
+
+static void TraceBaselineStubFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ // Trace the ICStub pointer stored in the stub frame. This is necessary
+ // so that we don't destroy the stub code after unlinking the stub.
+
+ MOZ_ASSERT(frame.type() == FrameType::BaselineStub);
+ BaselineStubFrameLayout* layout = (BaselineStubFrameLayout*)frame.fp();
+
+ if (ICStub* stub = layout->maybeStubPtr()) {
+ if (stub->isFallback()) {
+ // Fallback stubs use runtime-wide trampoline code we don't need to trace.
+ MOZ_ASSERT(stub->usesTrampolineCode());
+ } else {
+ MOZ_ASSERT(stub->toCacheIRStub()->makesGCCalls());
+ stub->toCacheIRStub()->trace(trc);
+ }
+ }
+}
+
+static void TraceIonICCallFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ MOZ_ASSERT(frame.type() == FrameType::IonICCall);
+ IonICCallFrameLayout* layout = (IonICCallFrameLayout*)frame.fp();
+ TraceRoot(trc, layout->stubCode(), "ion-ic-call-code");
+}
+
+#if defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32)
+uint8_t* alignDoubleSpill(uint8_t* pointer) {
+ uintptr_t address = reinterpret_cast<uintptr_t>(pointer);
+ address &= ~(uintptr_t(ABIStackAlignment) - 1);
+ return reinterpret_cast<uint8_t*>(address);
+}
+#endif
+
+#ifdef JS_CODEGEN_MIPS32
+static void TraceJitExitFrameCopiedArguments(JSTracer* trc,
+ const VMFunctionData* f,
+ ExitFooterFrame* footer) {
+ uint8_t* doubleArgs = footer->alignedForABI();
+ if (f->outParam == Type_Handle) {
+ doubleArgs -= sizeof(Value);
+ }
+ doubleArgs -= f->doubleByRefArgs() * sizeof(double);
+
+ for (uint32_t explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) {
+ if (f->argProperties(explicitArg) == VMFunctionData::DoubleByRef) {
+ // Arguments with double size can only have RootValue type.
+ if (f->argRootType(explicitArg) == VMFunctionData::RootValue) {
+ TraceRoot(trc, reinterpret_cast<Value*>(doubleArgs), "ion-vm-args");
+ } else {
+ MOZ_ASSERT(f->argRootType(explicitArg) == VMFunctionData::RootNone);
+ }
+ doubleArgs += sizeof(double);
+ }
+ }
+}
+#else
+static void TraceJitExitFrameCopiedArguments(JSTracer* trc,
+ const VMFunctionData* f,
+ ExitFooterFrame* footer) {
+ // This is NO-OP on other platforms.
+}
+#endif
+
+static void TraceJitExitFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ ExitFooterFrame* footer = frame.exitFrame()->footer();
+
+ // This corresponds to the case where we have build a fake exit frame which
+ // handles the case of a native function call. We need to trace the argument
+ // vector of the function call, and also new.target if it was a constructing
+ // call.
+ if (frame.isExitFrameLayout<NativeExitFrameLayout>()) {
+ NativeExitFrameLayout* native =
+ frame.exitFrame()->as<NativeExitFrameLayout>();
+ size_t len = native->argc() + 2;
+ Value* vp = native->vp();
+ TraceRootRange(trc, len, vp, "ion-native-args");
+ if (frame.isExitFrameLayout<ConstructNativeExitFrameLayout>()) {
+ TraceRoot(trc, vp + len, "ion-native-new-target");
+ }
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonOOLNativeExitFrameLayout>()) {
+ IonOOLNativeExitFrameLayout* oolnative =
+ frame.exitFrame()->as<IonOOLNativeExitFrameLayout>();
+ TraceRoot(trc, oolnative->stubCode(), "ion-ool-native-code");
+ TraceRoot(trc, oolnative->vp(), "iol-ool-native-vp");
+ size_t len = oolnative->argc() + 1;
+ TraceRootRange(trc, len, oolnative->thisp(), "ion-ool-native-thisargs");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonOOLProxyExitFrameLayout>()) {
+ IonOOLProxyExitFrameLayout* oolproxy =
+ frame.exitFrame()->as<IonOOLProxyExitFrameLayout>();
+ TraceRoot(trc, oolproxy->stubCode(), "ion-ool-proxy-code");
+ TraceRoot(trc, oolproxy->vp(), "ion-ool-proxy-vp");
+ TraceRoot(trc, oolproxy->id(), "ion-ool-proxy-id");
+ TraceRoot(trc, oolproxy->proxy(), "ion-ool-proxy-proxy");
+ return;
+ }
+
+ if (frame.isExitFrameLayout<IonDOMExitFrameLayout>()) {
+ IonDOMExitFrameLayout* dom = frame.exitFrame()->as<IonDOMExitFrameLayout>();
+ TraceRoot(trc, dom->thisObjAddress(), "ion-dom-args");
+ if (dom->isMethodFrame()) {
+ IonDOMMethodExitFrameLayout* method =
+ reinterpret_cast<IonDOMMethodExitFrameLayout*>(dom);
+ size_t len = method->argc() + 2;
+ Value* vp = method->vp();
+ TraceRootRange(trc, len, vp, "ion-dom-args");
+ } else {
+ TraceRoot(trc, dom->vp(), "ion-dom-args");
+ }
+ return;
+ }
+
+ if (frame.isExitFrameLayout<CalledFromJitExitFrameLayout>()) {
+ auto* layout = frame.exitFrame()->as<CalledFromJitExitFrameLayout>();
+ JitFrameLayout* jsLayout = layout->jsFrame();
+ jsLayout->replaceCalleeToken(
+ TraceCalleeToken(trc, jsLayout->calleeToken()));
+ TraceThisAndArguments(trc, frame, jsLayout);
+ return;
+ }
+
+ if (frame.isExitFrameLayout<DirectWasmJitCallFrameLayout>()) {
+ // Nothing needs to be traced here at the moment -- the arguments to the
+ // callee are traced by the callee, and the inlined caller does not push
+ // anything else.
+ return;
+ }
+
+ if (frame.isBareExit() || frame.isUnwoundJitExit()) {
+ // Nothing to trace. Fake exit frame pushed for VM functions with
+ // nothing to trace on the stack or unwound JitFrameLayout.
+ return;
+ }
+
+ MOZ_ASSERT(frame.exitFrame()->isWrapperExit());
+
+ const VMFunctionData* f = footer->function();
+ MOZ_ASSERT(f);
+
+ // Trace arguments of the VM wrapper.
+ uint8_t* argBase = frame.exitFrame()->argBase();
+ for (uint32_t explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) {
+ switch (f->argRootType(explicitArg)) {
+ case VMFunctionData::RootNone:
+ break;
+ case VMFunctionData::RootObject: {
+ // Sometimes we can bake in HandleObjects to nullptr.
+ JSObject** pobj = reinterpret_cast<JSObject**>(argBase);
+ if (*pobj) {
+ TraceRoot(trc, pobj, "ion-vm-args");
+ }
+ break;
+ }
+ case VMFunctionData::RootString:
+ TraceRoot(trc, reinterpret_cast<JSString**>(argBase), "ion-vm-args");
+ break;
+ case VMFunctionData::RootValue:
+ TraceRoot(trc, reinterpret_cast<Value*>(argBase), "ion-vm-args");
+ break;
+ case VMFunctionData::RootId:
+ TraceRoot(trc, reinterpret_cast<jsid*>(argBase), "ion-vm-args");
+ break;
+ case VMFunctionData::RootCell:
+ TraceGenericPointerRoot(trc, reinterpret_cast<gc::Cell**>(argBase),
+ "ion-vm-args");
+ break;
+ case VMFunctionData::RootBigInt:
+ TraceRoot(trc, reinterpret_cast<JS::BigInt**>(argBase), "ion-vm-args");
+ break;
+ }
+
+ switch (f->argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ case VMFunctionData::WordByRef:
+ argBase += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ argBase += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ if (f->outParam == Type_Handle) {
+ switch (f->outParamRootType) {
+ case VMFunctionData::RootNone:
+ MOZ_CRASH("Handle outparam must have root type");
+ case VMFunctionData::RootObject:
+ TraceRoot(trc, footer->outParam<JSObject*>(), "ion-vm-out");
+ break;
+ case VMFunctionData::RootString:
+ TraceRoot(trc, footer->outParam<JSString*>(), "ion-vm-out");
+ break;
+ case VMFunctionData::RootValue:
+ TraceRoot(trc, footer->outParam<Value>(), "ion-vm-outvp");
+ break;
+ case VMFunctionData::RootId:
+ TraceRoot(trc, footer->outParam<jsid>(), "ion-vm-outvp");
+ break;
+ case VMFunctionData::RootCell:
+ TraceGenericPointerRoot(trc, footer->outParam<gc::Cell*>(),
+ "ion-vm-out");
+ break;
+ case VMFunctionData::RootBigInt:
+ TraceRoot(trc, footer->outParam<JS::BigInt*>(), "ion-vm-out");
+ break;
+ }
+ }
+
+ TraceJitExitFrameCopiedArguments(trc, f, footer);
+}
+
+static void TraceBaselineInterpreterEntryFrame(JSTracer* trc,
+ const JSJitFrameIter& frame) {
+ // Baseline Interpreter entry code generated under --emit-interpreter-entry.
+ BaselineInterpreterEntryFrameLayout* layout =
+ (BaselineInterpreterEntryFrameLayout*)frame.fp();
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+ TraceThisAndArguments(trc, frame, layout);
+}
+
+static void TraceRectifierFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ // Trace thisv.
+ //
+ // Baseline JIT code generated as part of the ICCall_Fallback stub may use
+ // it if we're calling a constructor that returns a primitive value.
+ RectifierFrameLayout* layout = (RectifierFrameLayout*)frame.fp();
+ TraceRoot(trc, &layout->thisv(), "rectifier-thisv");
+}
+
+static void TraceJSJitToWasmFrame(JSTracer* trc, const JSJitFrameIter& frame) {
+ // This is doing a subset of TraceIonJSFrame, since the callee doesn't
+ // have a script.
+ JitFrameLayout* layout = (JitFrameLayout*)frame.fp();
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+ TraceThisAndArguments(trc, frame, layout);
+}
+
+static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (JitOptions.checkOsiPointRegisters) {
+ // GC can modify spilled registers, breaking our register checks.
+ // To handle this, we disable these checks for the current VM call
+ // when a GC happens.
+ activation->setCheckRegs(false);
+ }
+#endif
+
+ activation->traceRematerializedFrames(trc);
+ activation->traceIonRecovery(trc);
+
+ // This is used for sanity checking continuity of the sequence of wasm stack
+ // maps as we unwind. It has no functional purpose.
+ uintptr_t highestByteVisitedInPrevWasmFrame = 0;
+
+ for (JitFrameIter frames(activation); !frames.done(); ++frames) {
+ if (frames.isJSJit()) {
+ const JSJitFrameIter& jitFrame = frames.asJSJit();
+ switch (jitFrame.type()) {
+ case FrameType::Exit:
+ TraceJitExitFrame(trc, jitFrame);
+ break;
+ case FrameType::BaselineJS:
+ jitFrame.baselineFrame()->trace(trc, jitFrame);
+ break;
+ case FrameType::IonJS:
+ TraceIonJSFrame(trc, jitFrame);
+ break;
+ case FrameType::BaselineStub:
+ TraceBaselineStubFrame(trc, jitFrame);
+ break;
+ case FrameType::Bailout:
+ TraceBailoutFrame(trc, jitFrame);
+ break;
+ case FrameType::BaselineInterpreterEntry:
+ TraceBaselineInterpreterEntryFrame(trc, jitFrame);
+ break;
+ case FrameType::Rectifier:
+ TraceRectifierFrame(trc, jitFrame);
+ break;
+ case FrameType::IonICCall:
+ TraceIonICCallFrame(trc, jitFrame);
+ break;
+ case FrameType::WasmToJSJit:
+ // Ignore: this is a special marker used to let the
+ // JitFrameIter know the frame above is a wasm frame, handled
+ // in the next iteration.
+ break;
+ case FrameType::JSJitToWasm:
+ TraceJSJitToWasmFrame(trc, jitFrame);
+ break;
+ default:
+ MOZ_CRASH("unexpected frame type");
+ }
+ highestByteVisitedInPrevWasmFrame = 0; /* "unknown" */
+ } else {
+ MOZ_ASSERT(frames.isWasm());
+ uint8_t* nextPC = frames.resumePCinCurrentFrame();
+ MOZ_ASSERT(nextPC != 0);
+ wasm::WasmFrameIter& wasmFrameIter = frames.asWasm();
+ wasm::Instance* instance = wasmFrameIter.instance();
+ wasm::TraceInstanceEdge(trc, instance, "WasmFrameIter instance");
+ highestByteVisitedInPrevWasmFrame = instance->traceFrame(
+ trc, wasmFrameIter, nextPC, highestByteVisitedInPrevWasmFrame);
+ }
+ }
+}
+
+void TraceJitActivations(JSContext* cx, JSTracer* trc) {
+ for (JitActivationIterator activations(cx); !activations.done();
+ ++activations) {
+ TraceJitActivation(trc, activations->asJit());
+ }
+}
+
+void UpdateJitActivationsForMinorGC(JSRuntime* rt) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
+ JSContext* cx = rt->mainContextFromOwnThread();
+ for (JitActivationIterator activations(cx); !activations.done();
+ ++activations) {
+ for (OnlyJSJitFrameIter iter(activations); !iter.done(); ++iter) {
+ if (iter.frame().type() == FrameType::IonJS) {
+ UpdateIonJSFrameForMinorGC(rt, iter.frame());
+ }
+ }
+ }
+}
+
+JSScript* GetTopJitJSScript(JSContext* cx) {
+ JSJitFrameIter frame(cx->activation()->asJit());
+ MOZ_ASSERT(frame.type() == FrameType::Exit);
+ ++frame;
+
+ if (frame.isBaselineStub()) {
+ ++frame;
+ MOZ_ASSERT(frame.isBaselineJS());
+ }
+
+ MOZ_ASSERT(frame.isScripted());
+ return frame.script();
+}
+
+void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes) {
+ JitSpew(JitSpew_IonSnapshots, "Recover PC & Script from the last frame.");
+
+ // Recover the return address so that we can look it up in the
+ // PcScriptCache, as script/pc computation is expensive.
+ JitActivationIterator actIter(cx);
+ OnlyJSJitFrameIter it(actIter);
+ uint8_t* retAddr;
+ if (it.frame().isExitFrame()) {
+ ++it;
+
+ // Skip baseline interpreter entry frames.
+ // Can exist before rectifier frames.
+ if (it.frame().isBaselineInterpreterEntry()) {
+ ++it;
+ }
+
+ // Skip rectifier frames.
+ if (it.frame().isRectifier()) {
+ ++it;
+ MOZ_ASSERT(it.frame().isBaselineStub() || it.frame().isBaselineJS() ||
+ it.frame().isIonJS());
+ }
+
+ // Skip Baseline/Ion stub and IC call frames.
+ if (it.frame().isBaselineStub()) {
+ ++it;
+ MOZ_ASSERT(it.frame().isBaselineJS());
+ } else if (it.frame().isIonICCall()) {
+ ++it;
+ MOZ_ASSERT(it.frame().isIonJS());
+ }
+
+ MOZ_ASSERT(it.frame().isBaselineJS() || it.frame().isIonJS());
+
+ // Don't use the return address and the cache if the BaselineFrame is
+ // running in the Baseline Interpreter. In this case the bytecode pc is
+ // cheap to get, so we won't benefit from the cache, and the return address
+ // does not map to a single bytecode pc.
+ if (it.frame().isBaselineJS() &&
+ it.frame().baselineFrame()->runningInInterpreter()) {
+ it.frame().baselineScriptAndPc(scriptRes, pcRes);
+ return;
+ }
+
+ retAddr = it.frame().resumePCinCurrentFrame();
+ } else {
+ MOZ_ASSERT(it.frame().isBailoutJS());
+ retAddr = it.frame().returnAddress();
+ }
+
+ MOZ_ASSERT(retAddr);
+
+ uint32_t hash = PcScriptCache::Hash(retAddr);
+
+ // Lazily initialize the cache. The allocation may safely fail and will not
+ // GC.
+ if (MOZ_UNLIKELY(cx->ionPcScriptCache == nullptr)) {
+ cx->ionPcScriptCache =
+ MakeUnique<PcScriptCache>(cx->runtime()->gc.gcNumber());
+ }
+
+ if (cx->ionPcScriptCache.ref() &&
+ cx->ionPcScriptCache->get(cx->runtime(), hash, retAddr, scriptRes,
+ pcRes)) {
+ return;
+ }
+
+ // Lookup failed: undertake expensive process to determine script and pc.
+ if (it.frame().isIonJS() || it.frame().isBailoutJS()) {
+ InlineFrameIterator ifi(cx, &it.frame());
+ *scriptRes = ifi.script();
+ *pcRes = ifi.pc();
+ } else {
+ MOZ_ASSERT(it.frame().isBaselineJS());
+ it.frame().baselineScriptAndPc(scriptRes, pcRes);
+ }
+
+ // Add entry to cache.
+ if (cx->ionPcScriptCache.ref()) {
+ cx->ionPcScriptCache->add(hash, retAddr, *pcRes, *scriptRes);
+ }
+}
+
+RInstructionResults::RInstructionResults(JitFrameLayout* fp)
+ : results_(nullptr), fp_(fp), initialized_(false) {}
+
+RInstructionResults::RInstructionResults(RInstructionResults&& src)
+ : results_(std::move(src.results_)),
+ fp_(src.fp_),
+ initialized_(src.initialized_) {
+ src.initialized_ = false;
+}
+
+RInstructionResults& RInstructionResults::operator=(RInstructionResults&& rhs) {
+ MOZ_ASSERT(&rhs != this, "self-moves are prohibited");
+ this->~RInstructionResults();
+ new (this) RInstructionResults(std::move(rhs));
+ return *this;
+}
+
+RInstructionResults::~RInstructionResults() {
+ // results_ is freed by the UniquePtr.
+}
+
+bool RInstructionResults::init(JSContext* cx, uint32_t numResults) {
+ if (numResults) {
+ results_ = cx->make_unique<Values>();
+ if (!results_) {
+ return false;
+ }
+ if (!results_->growBy(numResults)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Value guard = MagicValue(JS_ION_BAILOUT);
+ for (size_t i = 0; i < numResults; i++) {
+ (*results_)[i].init(guard);
+ }
+ }
+
+ initialized_ = true;
+ return true;
+}
+
+bool RInstructionResults::isInitialized() const { return initialized_; }
+
+size_t RInstructionResults::length() const { return results_->length(); }
+
+JitFrameLayout* RInstructionResults::frame() const {
+ MOZ_ASSERT(fp_);
+ return fp_;
+}
+
+HeapPtr<Value>& RInstructionResults::operator[](size_t index) {
+ return (*results_)[index];
+}
+
+void RInstructionResults::trace(JSTracer* trc) {
+ // Note: The vector necessary exists, otherwise this object would not have
+ // been stored on the activation from where the trace function is called.
+ TraceRange(trc, results_->length(), results_->begin(), "ion-recover-results");
+}
+
+SnapshotIterator::SnapshotIterator(const JSJitFrameIter& iter,
+ const MachineState* machineState)
+ : snapshot_(iter.ionScript()->snapshots(), iter.snapshotOffset(),
+ iter.ionScript()->snapshotsRVATableSize(),
+ iter.ionScript()->snapshotsListSize()),
+ recover_(snapshot_, iter.ionScript()->recovers(),
+ iter.ionScript()->recoversSize()),
+ fp_(iter.jsFrame()),
+ machine_(machineState),
+ ionScript_(iter.ionScript()),
+ instructionResults_(nullptr) {}
+
+SnapshotIterator::SnapshotIterator()
+ : snapshot_(nullptr, 0, 0, 0),
+ recover_(snapshot_, nullptr, 0),
+ fp_(nullptr),
+ machine_(nullptr),
+ ionScript_(nullptr),
+ instructionResults_(nullptr) {}
+
+uintptr_t SnapshotIterator::fromStack(int32_t offset) const {
+ return ReadFrameSlot(fp_, offset);
+}
+
+static Value FromObjectPayload(uintptr_t payload) {
+ MOZ_ASSERT(payload != 0);
+ return ObjectValue(*reinterpret_cast<JSObject*>(payload));
+}
+
+static Value FromStringPayload(uintptr_t payload) {
+ return StringValue(reinterpret_cast<JSString*>(payload));
+}
+
+static Value FromSymbolPayload(uintptr_t payload) {
+ return SymbolValue(reinterpret_cast<JS::Symbol*>(payload));
+}
+
+static Value FromBigIntPayload(uintptr_t payload) {
+ return BigIntValue(reinterpret_cast<JS::BigInt*>(payload));
+}
+
+static Value FromTypedPayload(JSValueType type, uintptr_t payload) {
+ switch (type) {
+ case JSVAL_TYPE_INT32:
+ return Int32Value(payload);
+ case JSVAL_TYPE_BOOLEAN:
+ return BooleanValue(!!payload);
+ case JSVAL_TYPE_STRING:
+ return FromStringPayload(payload);
+ case JSVAL_TYPE_SYMBOL:
+ return FromSymbolPayload(payload);
+ case JSVAL_TYPE_BIGINT:
+ return FromBigIntPayload(payload);
+ case JSVAL_TYPE_OBJECT:
+ return FromObjectPayload(payload);
+ default:
+ MOZ_CRASH("unexpected type - needs payload");
+ }
+}
+
+bool SnapshotIterator::allocationReadable(const RValueAllocation& alloc,
+ ReadMethod rm) {
+ // If we have to recover stores, and if we are not interested in the
+ // default value of the instruction, then we have to check if the recover
+ // instruction results are available.
+ if (alloc.needSideEffect() && rm != ReadMethod::AlwaysDefault) {
+ if (!hasInstructionResults()) {
+ return false;
+ }
+ }
+
+ switch (alloc.mode()) {
+ case RValueAllocation::DOUBLE_REG:
+ return hasRegister(alloc.fpuReg());
+
+ case RValueAllocation::TYPED_REG:
+ return hasRegister(alloc.reg2());
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG:
+ return hasRegister(alloc.reg()) && hasRegister(alloc.reg2());
+ case RValueAllocation::UNTYPED_REG_STACK:
+ return hasRegister(alloc.reg()) && hasStack(alloc.stackOffset2());
+ case RValueAllocation::UNTYPED_STACK_REG:
+ return hasStack(alloc.stackOffset()) && hasRegister(alloc.reg2());
+ case RValueAllocation::UNTYPED_STACK_STACK:
+ return hasStack(alloc.stackOffset()) && hasStack(alloc.stackOffset2());
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG:
+ return hasRegister(alloc.reg());
+ case RValueAllocation::UNTYPED_STACK:
+ return hasStack(alloc.stackOffset());
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ return hasInstructionResult(alloc.index());
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ return rm == ReadMethod::AlwaysDefault ||
+ hasInstructionResult(alloc.index());
+
+ default:
+ return true;
+ }
+}
+
+Value SnapshotIterator::allocationValue(const RValueAllocation& alloc,
+ ReadMethod rm) {
+ switch (alloc.mode()) {
+ case RValueAllocation::CONSTANT:
+ return ionScript_->getConstant(alloc.index());
+
+ case RValueAllocation::CST_UNDEFINED:
+ return UndefinedValue();
+
+ case RValueAllocation::CST_NULL:
+ return NullValue();
+
+ case RValueAllocation::DOUBLE_REG:
+ return DoubleValue(fromRegister<double>(alloc.fpuReg()));
+
+ case RValueAllocation::ANY_FLOAT_REG:
+ return Float32Value(fromRegister<float>(alloc.fpuReg()));
+
+ case RValueAllocation::ANY_FLOAT_STACK:
+ return Float32Value(ReadFrameFloat32Slot(fp_, alloc.stackOffset()));
+
+ case RValueAllocation::TYPED_REG:
+ return FromTypedPayload(alloc.knownType(), fromRegister(alloc.reg2()));
+
+ case RValueAllocation::TYPED_STACK: {
+ switch (alloc.knownType()) {
+ case JSVAL_TYPE_DOUBLE:
+ return DoubleValue(ReadFrameDoubleSlot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_INT32:
+ return Int32Value(ReadFrameInt32Slot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_BOOLEAN:
+ return BooleanValue(ReadFrameBooleanSlot(fp_, alloc.stackOffset2()));
+ case JSVAL_TYPE_STRING:
+ return FromStringPayload(fromStack(alloc.stackOffset2()));
+ case JSVAL_TYPE_SYMBOL:
+ return FromSymbolPayload(fromStack(alloc.stackOffset2()));
+ case JSVAL_TYPE_BIGINT:
+ return FromBigIntPayload(fromStack(alloc.stackOffset2()));
+ case JSVAL_TYPE_OBJECT:
+ return FromObjectPayload(fromStack(alloc.stackOffset2()));
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ }
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG: {
+ return Value::fromTagAndPayload(JSValueTag(fromRegister(alloc.reg())),
+ fromRegister(alloc.reg2()));
+ }
+
+ case RValueAllocation::UNTYPED_REG_STACK: {
+ return Value::fromTagAndPayload(JSValueTag(fromRegister(alloc.reg())),
+ fromStack(alloc.stackOffset2()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK_REG: {
+ return Value::fromTagAndPayload(
+ JSValueTag(fromStack(alloc.stackOffset())),
+ fromRegister(alloc.reg2()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK_STACK: {
+ return Value::fromTagAndPayload(
+ JSValueTag(fromStack(alloc.stackOffset())),
+ fromStack(alloc.stackOffset2()));
+ }
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG: {
+ return Value::fromRawBits(fromRegister(alloc.reg()));
+ }
+
+ case RValueAllocation::UNTYPED_STACK: {
+ return Value::fromRawBits(fromStack(alloc.stackOffset()));
+ }
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ return fromInstructionResult(alloc.index());
+
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ if (rm == ReadMethod::Normal && hasInstructionResult(alloc.index())) {
+ return fromInstructionResult(alloc.index());
+ }
+ MOZ_ASSERT(rm == ReadMethod::AlwaysDefault);
+ return ionScript_->getConstant(alloc.index2());
+
+ default:
+ MOZ_CRASH("huh?");
+ }
+}
+
+Value SnapshotIterator::maybeRead(const RValueAllocation& a,
+ MaybeReadFallback& fallback) {
+ if (allocationReadable(a)) {
+ return allocationValue(a);
+ }
+
+ if (fallback.canRecoverResults()) {
+ // Code paths which are calling maybeRead are not always capable of
+ // returning an error code, as these code paths used to be infallible.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!initInstructionResults(fallback)) {
+ oomUnsafe.crash("js::jit::SnapshotIterator::maybeRead");
+ }
+
+ if (allocationReadable(a)) {
+ return allocationValue(a);
+ }
+
+ MOZ_ASSERT_UNREACHABLE("All allocations should be readable.");
+ }
+
+ return UndefinedValue();
+}
+
+bool SnapshotIterator::tryRead(Value* result) {
+ RValueAllocation a = readAllocation();
+ if (allocationReadable(a)) {
+ *result = allocationValue(a);
+ return true;
+ }
+ return false;
+}
+
+void SnapshotIterator::writeAllocationValuePayload(
+ const RValueAllocation& alloc, const Value& v) {
+ MOZ_ASSERT(v.isGCThing());
+
+ switch (alloc.mode()) {
+ case RValueAllocation::CONSTANT:
+ ionScript_->getConstant(alloc.index()) = v;
+ break;
+
+ case RValueAllocation::CST_UNDEFINED:
+ case RValueAllocation::CST_NULL:
+ case RValueAllocation::DOUBLE_REG:
+ case RValueAllocation::ANY_FLOAT_REG:
+ case RValueAllocation::ANY_FLOAT_STACK:
+ MOZ_CRASH("Not a GC thing: Unexpected write");
+ break;
+
+ case RValueAllocation::TYPED_REG:
+ machine_->write(alloc.reg2(), uintptr_t(v.toGCThing()));
+ break;
+
+ case RValueAllocation::TYPED_STACK:
+ switch (alloc.knownType()) {
+ default:
+ MOZ_CRASH("Not a GC thing: Unexpected write");
+ break;
+ case JSVAL_TYPE_STRING:
+ case JSVAL_TYPE_SYMBOL:
+ case JSVAL_TYPE_BIGINT:
+ case JSVAL_TYPE_OBJECT:
+ WriteFrameSlot(fp_, alloc.stackOffset2(), uintptr_t(v.toGCThing()));
+ break;
+ }
+ break;
+
+#if defined(JS_NUNBOX32)
+ case RValueAllocation::UNTYPED_REG_REG:
+ case RValueAllocation::UNTYPED_STACK_REG:
+ machine_->write(alloc.reg2(), uintptr_t(v.toGCThing()));
+ break;
+
+ case RValueAllocation::UNTYPED_REG_STACK:
+ case RValueAllocation::UNTYPED_STACK_STACK:
+ WriteFrameSlot(fp_, alloc.stackOffset2(), uintptr_t(v.toGCThing()));
+ break;
+#elif defined(JS_PUNBOX64)
+ case RValueAllocation::UNTYPED_REG:
+ machine_->write(alloc.reg(), v.asRawBits());
+ break;
+
+ case RValueAllocation::UNTYPED_STACK:
+ WriteFrameSlot(fp_, alloc.stackOffset(), v.asRawBits());
+ break;
+#endif
+
+ case RValueAllocation::RECOVER_INSTRUCTION:
+ MOZ_CRASH("Recover instructions are handled by the JitActivation.");
+ break;
+
+ case RValueAllocation::RI_WITH_DEFAULT_CST:
+ // Assume that we are always going to be writing on the default value
+ // while tracing.
+ ionScript_->getConstant(alloc.index2()) = v;
+ break;
+
+ default:
+ MOZ_CRASH("huh?");
+ }
+}
+
+void SnapshotIterator::traceAllocation(JSTracer* trc) {
+ RValueAllocation alloc = readAllocation();
+ if (!allocationReadable(alloc, ReadMethod::AlwaysDefault)) {
+ return;
+ }
+
+ Value v = allocationValue(alloc, ReadMethod::AlwaysDefault);
+ if (!v.isGCThing()) {
+ return;
+ }
+
+ Value copy = v;
+ TraceRoot(trc, &v, "ion-typed-reg");
+ if (v != copy) {
+ MOZ_ASSERT(SameType(v, copy));
+ writeAllocationValuePayload(alloc, v);
+ }
+}
+
+const RResumePoint* SnapshotIterator::resumePoint() const {
+ return instruction()->toResumePoint();
+}
+
+uint32_t SnapshotIterator::numAllocations() const {
+ return instruction()->numOperands();
+}
+
+uint32_t SnapshotIterator::pcOffset() const {
+ return resumePoint()->pcOffset();
+}
+
+ResumeMode SnapshotIterator::resumeMode() const {
+ return resumePoint()->mode();
+}
+
+void SnapshotIterator::skipInstruction() {
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == 0);
+ size_t numOperands = instruction()->numOperands();
+ for (size_t i = 0; i < numOperands; i++) {
+ skip();
+ }
+ nextInstruction();
+}
+
+bool SnapshotIterator::initInstructionResults(MaybeReadFallback& fallback) {
+ MOZ_ASSERT(fallback.canRecoverResults());
+ JSContext* cx = fallback.maybeCx;
+
+ // If there is only one resume point in the list of instructions, then there
+ // is no instruction to recover, and thus no need to register any results.
+ if (recover_.numInstructions() == 1) {
+ return true;
+ }
+
+ JitFrameLayout* fp = fallback.frame->jsFrame();
+ RInstructionResults* results = fallback.activation->maybeIonFrameRecovery(fp);
+ if (!results) {
+ AutoRealm ar(cx, fallback.frame->script());
+
+ // We are going to run recover instructions. To avoid problems where recover
+ // instructions are not idempotent (for example, if we allocate an object,
+ // object identity may be observable), we should not execute code in the
+ // Ion stack frame afterwards. To avoid doing so, we invalidate the script.
+ // This is not necessary for bailouts or other cases where we are leaving
+ // the frame anyway. We only need it for niche cases like debugger
+ // introspection or Function.arguments.
+ if (fallback.consequence == MaybeReadFallback::Fallback_Invalidate) {
+ ionScript_->invalidate(cx, fallback.frame->script(),
+ /* resetUses = */ false,
+ "Observe recovered instruction.");
+ }
+
+ // Register the list of result on the activation. We need to do that
+ // before we initialize the list such as if any recover instruction
+ // cause a GC, we can ensure that the results are properly traced by the
+ // activation.
+ RInstructionResults tmp(fallback.frame->jsFrame());
+ if (!fallback.activation->registerIonFrameRecovery(std::move(tmp))) {
+ return false;
+ }
+
+ results = fallback.activation->maybeIonFrameRecovery(fp);
+
+ // Start a new snapshot at the beginning of the JSJitFrameIter. This
+ // SnapshotIterator is used for evaluating the content of all recover
+ // instructions. The result is then saved on the JitActivation.
+ MachineState machine = fallback.frame->machineState();
+ SnapshotIterator s(*fallback.frame, &machine);
+ if (!s.computeInstructionResults(cx, results)) {
+ // If the evaluation failed because of OOMs, then we discard the
+ // current set of result that we collected so far.
+ fallback.activation->removeIonFrameRecovery(fp);
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(results->isInitialized());
+ MOZ_RELEASE_ASSERT(results->length() == recover_.numInstructions() - 1);
+ instructionResults_ = results;
+ return true;
+}
+
+bool SnapshotIterator::computeInstructionResults(
+ JSContext* cx, RInstructionResults* results) const {
+ MOZ_ASSERT(!results->isInitialized());
+ MOZ_ASSERT(recover_.numInstructionsRead() == 1);
+
+ // The last instruction will always be a resume point.
+ size_t numResults = recover_.numInstructions() - 1;
+ if (!results->isInitialized()) {
+ if (!results->init(cx, numResults)) {
+ return false;
+ }
+
+ // No need to iterate over the only resume point.
+ if (!numResults) {
+ MOZ_ASSERT(results->isInitialized());
+ return true;
+ }
+
+ // Avoid invoking the object metadata callback, which could try to walk the
+ // stack while bailing out.
+ gc::AutoSuppressGC suppressGC(cx);
+ js::AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
+
+ // Fill with the results of recover instructions.
+ SnapshotIterator s(*this);
+ s.instructionResults_ = results;
+ while (s.moreInstructions()) {
+ // Skip resume point and only interpret recover instructions.
+ if (s.instruction()->isResumePoint()) {
+ s.skipInstruction();
+ continue;
+ }
+
+ if (!s.instruction()->recover(cx, s)) {
+ return false;
+ }
+ s.nextInstruction();
+ }
+ }
+
+ MOZ_ASSERT(results->isInitialized());
+ return true;
+}
+
+void SnapshotIterator::storeInstructionResult(const Value& v) {
+ uint32_t currIns = recover_.numInstructionsRead() - 1;
+ MOZ_ASSERT((*instructionResults_)[currIns].isMagic(JS_ION_BAILOUT));
+ (*instructionResults_)[currIns] = v;
+}
+
+Value SnapshotIterator::fromInstructionResult(uint32_t index) const {
+ MOZ_ASSERT(!(*instructionResults_)[index].isMagic(JS_ION_BAILOUT));
+ return (*instructionResults_)[index];
+}
+
+void SnapshotIterator::settleOnFrame() {
+ // Check that the current instruction can still be use.
+ MOZ_ASSERT(snapshot_.numAllocationsRead() == 0);
+ while (!instruction()->isResumePoint()) {
+ skipInstruction();
+ }
+}
+
+void SnapshotIterator::nextFrame() {
+ nextInstruction();
+ settleOnFrame();
+}
+
+Value SnapshotIterator::maybeReadAllocByIndex(size_t index) {
+ while (index--) {
+ MOZ_ASSERT(moreAllocations());
+ skip();
+ }
+
+ Value s;
+ {
+ // This MaybeReadFallback method cannot GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ MaybeReadFallback fallback;
+ s = maybeRead(fallback);
+ }
+
+ while (moreAllocations()) {
+ skip();
+ }
+
+ return s;
+}
+
+InlineFrameIterator::InlineFrameIterator(JSContext* cx,
+ const JSJitFrameIter* iter)
+ : calleeTemplate_(cx),
+ calleeRVA_(),
+ script_(cx),
+ pc_(nullptr),
+ numActualArgs_(0) {
+ resetOn(iter);
+}
+
+InlineFrameIterator::InlineFrameIterator(JSContext* cx,
+ const InlineFrameIterator* iter)
+ : frame_(iter ? iter->frame_ : nullptr),
+ framesRead_(0),
+ frameCount_(iter ? iter->frameCount_ : UINT32_MAX),
+ calleeTemplate_(cx),
+ calleeRVA_(),
+ script_(cx),
+ pc_(nullptr),
+ numActualArgs_(0) {
+ if (frame_) {
+ machine_ = iter->machine_;
+ start_ = SnapshotIterator(*frame_, &machine_);
+
+ // findNextFrame will iterate to the next frame and init. everything.
+ // Therefore to settle on the same frame, we report one frame less readed.
+ framesRead_ = iter->framesRead_ - 1;
+ findNextFrame();
+ }
+}
+
+void InlineFrameIterator::resetOn(const JSJitFrameIter* iter) {
+ frame_ = iter;
+ framesRead_ = 0;
+ frameCount_ = UINT32_MAX;
+
+ if (iter) {
+ machine_ = iter->machineState();
+ start_ = SnapshotIterator(*iter, &machine_);
+ findNextFrame();
+ }
+}
+
+void InlineFrameIterator::findNextFrame() {
+ MOZ_ASSERT(more());
+
+ si_ = start_;
+
+ // Read the initial frame out of the C stack.
+ calleeTemplate_ = frame_->maybeCallee();
+ calleeRVA_ = RValueAllocation();
+ script_ = frame_->script();
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ // Settle on the outermost frame without evaluating any instructions before
+ // looking for a pc.
+ si_.settleOnFrame();
+
+ pc_ = script_->offsetToPC(si_.pcOffset());
+ numActualArgs_ = 0xbadbad;
+
+ // This unfortunately is O(n*m), because we must skip over outer frames
+ // before reading inner ones.
+
+ // The first time (frameCount_ == UINT32_MAX) we do not know the number of
+ // frames that we are going to inspect. So we are iterating until there is
+ // no more frames, to settle on the inner most frame and to count the number
+ // of frames.
+ size_t remaining = (frameCount_ != UINT32_MAX) ? frameNo() - 1 : SIZE_MAX;
+
+ size_t i = 1;
+ for (; i <= remaining && si_.moreFrames(); i++) {
+ ResumeMode mode = si_.resumeMode();
+ MOZ_ASSERT(IsIonInlinableOp(JSOp(*pc_)));
+
+ // Recover the number of actual arguments from the script.
+ if (IsInvokeOp(JSOp(*pc_))) {
+ MOZ_ASSERT(mode == ResumeMode::InlinedStandardCall ||
+ mode == ResumeMode::InlinedFunCall);
+ numActualArgs_ = GET_ARGC(pc_);
+ if (mode == ResumeMode::InlinedFunCall && numActualArgs_ > 0) {
+ numActualArgs_--;
+ }
+ } else if (IsGetPropPC(pc_) || IsGetElemPC(pc_)) {
+ MOZ_ASSERT(mode == ResumeMode::InlinedAccessor);
+ numActualArgs_ = 0;
+ } else {
+ MOZ_RELEASE_ASSERT(IsSetPropPC(pc_));
+ MOZ_ASSERT(mode == ResumeMode::InlinedAccessor);
+ numActualArgs_ = 1;
+ }
+
+ // Skip over non-argument slots, as well as |this|.
+ bool skipNewTarget = IsConstructPC(pc_);
+ unsigned skipCount =
+ (si_.numAllocations() - 1) - numActualArgs_ - 1 - skipNewTarget;
+ for (unsigned j = 0; j < skipCount; j++) {
+ si_.skip();
+ }
+
+ // This value should correspond to the function which is being inlined.
+ // The value must be readable to iterate over the inline frame. Most of
+ // the time, these functions are stored as JSFunction constants,
+ // register which are holding the JSFunction pointer, or recover
+ // instruction with Default value.
+ Value funval = si_.readWithDefault(&calleeRVA_);
+
+ // Skip extra value allocations.
+ while (si_.moreAllocations()) {
+ si_.skip();
+ }
+
+ si_.nextFrame();
+
+ calleeTemplate_ = &funval.toObject().as<JSFunction>();
+ script_ = calleeTemplate_->nonLazyScript();
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ pc_ = script_->offsetToPC(si_.pcOffset());
+ }
+
+ // The first time we do not know the number of frames, we only settle on the
+ // last frame, and update the number of frames based on the number of
+ // iteration that we have done.
+ if (frameCount_ == UINT32_MAX) {
+ MOZ_ASSERT(!si_.moreFrames());
+ frameCount_ = i;
+ }
+
+ framesRead_++;
+}
+
+JSFunction* InlineFrameIterator::callee(MaybeReadFallback& fallback) const {
+ MOZ_ASSERT(isFunctionFrame());
+ if (calleeRVA_.mode() == RValueAllocation::INVALID ||
+ !fallback.canRecoverResults()) {
+ return calleeTemplate_;
+ }
+
+ SnapshotIterator s(si_);
+ // :TODO: Handle allocation failures from recover instruction.
+ Value funval = s.maybeRead(calleeRVA_, fallback);
+ return &funval.toObject().as<JSFunction>();
+}
+
+JSObject* InlineFrameIterator::computeEnvironmentChain(
+ const Value& envChainValue, MaybeReadFallback& fallback,
+ bool* hasInitialEnv) const {
+ if (envChainValue.isObject()) {
+ if (hasInitialEnv) {
+ if (fallback.canRecoverResults()) {
+ RootedObject obj(fallback.maybeCx, &envChainValue.toObject());
+ *hasInitialEnv = isFunctionFrame() &&
+ callee(fallback)->needsFunctionEnvironmentObjects();
+ return obj;
+ }
+ JS::AutoSuppressGCAnalysis
+ nogc; // If we cannot recover then we cannot GC.
+ *hasInitialEnv = isFunctionFrame() &&
+ callee(fallback)->needsFunctionEnvironmentObjects();
+ }
+
+ return &envChainValue.toObject();
+ }
+
+ // Note we can hit this case even for functions with a CallObject, in case
+ // we are walking the frame during the function prologue, before the env
+ // chain has been initialized.
+ if (isFunctionFrame()) {
+ return callee(fallback)->environment();
+ }
+
+ if (isModuleFrame()) {
+ return script()->module()->environment();
+ }
+
+ // Ion does not handle non-function scripts that have anything other than
+ // the global on their env chain.
+ MOZ_ASSERT(!script()->isForEval());
+ MOZ_ASSERT(!script()->hasNonSyntacticScope());
+ return &script()->global().lexicalEnvironment();
+}
+
+bool InlineFrameIterator::isFunctionFrame() const { return !!calleeTemplate_; }
+
+bool InlineFrameIterator::isModuleFrame() const { return script()->isModule(); }
+
+uintptr_t* MachineState::SafepointState::addressOfRegister(Register reg) const {
+ size_t offset = regs.offsetOfPushedRegister(reg);
+
+ MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
+ uint32_t index = offset / sizeof(uintptr_t);
+
+#ifdef DEBUG
+ // Assert correctness with a slower algorithm in debug builds.
+ uint32_t expectedIndex = 0;
+ bool found = false;
+ for (GeneralRegisterBackwardIterator iter(regs); iter.more(); ++iter) {
+ expectedIndex++;
+ if (*iter == reg) {
+ found = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(found);
+ MOZ_ASSERT(expectedIndex == index);
+#endif
+
+ return spillBase - index;
+}
+
+char* MachineState::SafepointState::addressOfRegister(FloatRegister reg) const {
+ // Note: this could be optimized similar to the GPR case above by implementing
+ // offsetOfPushedRegister for FloatRegisterSet. Float register sets are
+ // complicated though and this case is very uncommon: it's only reachable for
+ // exception bailouts with live float registers.
+ MOZ_ASSERT(!reg.isSimd128());
+ char* ptr = floatSpillBase;
+ for (FloatRegisterBackwardIterator iter(floatRegs); iter.more(); ++iter) {
+ ptr -= (*iter).size();
+ for (uint32_t a = 0; a < (*iter).numAlignedAliased(); a++) {
+ // Only say that registers that actually start here start here.
+ // e.g. d0 should not start at s1, only at s0.
+ FloatRegister ftmp = (*iter).alignedAliased(a);
+ if (ftmp == reg) {
+ return ptr;
+ }
+ }
+ }
+ MOZ_CRASH("Invalid register");
+}
+
+uintptr_t MachineState::read(Register reg) const {
+ if (state_.is<BailoutState>()) {
+ return state_.as<BailoutState>().regs[reg.code()].r;
+ }
+ if (state_.is<SafepointState>()) {
+ uintptr_t* addr = state_.as<SafepointState>().addressOfRegister(reg);
+ return *addr;
+ }
+ MOZ_CRASH("Invalid state");
+}
+
+template <typename T>
+T MachineState::read(FloatRegister reg) const {
+#if !defined(JS_CODEGEN_RISCV64)
+ MOZ_ASSERT(reg.size() == sizeof(T));
+#else
+ // RISCV64 always store FloatRegister as 64bit.
+ MOZ_ASSERT(reg.size() == sizeof(double));
+#endif
+
+#if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
+ if (state_.is<BailoutState>()) {
+ uint32_t offset = reg.getRegisterDumpOffsetInBytes();
+ MOZ_ASSERT((offset % sizeof(T)) == 0);
+ MOZ_ASSERT((offset + sizeof(T)) <= sizeof(RegisterDump::FPUArray));
+
+ const BailoutState& state = state_.as<BailoutState>();
+ char* addr = reinterpret_cast<char*>(state.floatRegs.begin()) + offset;
+ return *reinterpret_cast<T*>(addr);
+ }
+ if (state_.is<SafepointState>()) {
+ char* addr = state_.as<SafepointState>().addressOfRegister(reg);
+ return *reinterpret_cast<T*>(addr);
+ }
+#endif
+ MOZ_CRASH("Invalid state");
+}
+
+void MachineState::write(Register reg, uintptr_t value) const {
+ if (state_.is<SafepointState>()) {
+ uintptr_t* addr = state_.as<SafepointState>().addressOfRegister(reg);
+ *addr = value;
+ return;
+ }
+ MOZ_CRASH("Invalid state");
+}
+
+bool InlineFrameIterator::isConstructing() const {
+ // Skip the current frame and look at the caller's.
+ if (more()) {
+ InlineFrameIterator parent(TlsContext.get(), this);
+ ++parent;
+
+ // In the case of a JS frame, look up the pc from the snapshot.
+ JSOp parentOp = JSOp(*parent.pc());
+
+ // Inlined Getters and Setters are never constructing.
+ if (IsIonInlinableGetterOrSetterOp(parentOp)) {
+ return false;
+ }
+
+ MOZ_ASSERT(IsInvokeOp(parentOp) && !IsSpreadOp(parentOp));
+
+ return IsConstructOp(parentOp);
+ }
+
+ return frame_->isConstructing();
+}
+
+void SnapshotIterator::warnUnreadableAllocation() {
+ fprintf(stderr,
+ "Warning! Tried to access unreadable value allocation (possible "
+ "f.arguments).\n");
+}
+
+struct DumpOverflownOp {
+ const unsigned numFormals_;
+ unsigned i_ = 0;
+
+ explicit DumpOverflownOp(unsigned numFormals) : numFormals_(numFormals) {}
+
+ void operator()(const Value& v) {
+ if (i_ >= numFormals_) {
+ fprintf(stderr, " actual (arg %u): ", i_);
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ DumpValue(v);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+ i_++;
+ }
+};
+
+void InlineFrameIterator::dump() const {
+ MaybeReadFallback fallback;
+
+ if (more()) {
+ fprintf(stderr, " JS frame (inlined)\n");
+ } else {
+ fprintf(stderr, " JS frame\n");
+ }
+
+ bool isFunction = false;
+ if (isFunctionFrame()) {
+ isFunction = true;
+ fprintf(stderr, " callee fun: ");
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ DumpObject(callee(fallback));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %u\n", script()->filename(),
+ script()->lineno());
+
+ fprintf(stderr, " script = %p, pc = %p\n", (void*)script(), pc());
+ fprintf(stderr, " current op: %s\n", CodeName(JSOp(*pc())));
+
+ if (!more()) {
+ numActualArgs();
+ }
+
+ SnapshotIterator si = snapshotIterator();
+ fprintf(stderr, " slots: %u\n", si.numAllocations() - 1);
+ for (unsigned i = 0; i < si.numAllocations() - 1; i++) {
+ if (isFunction) {
+ if (i == 0) {
+ fprintf(stderr, " env chain: ");
+ } else if (i == 1) {
+ fprintf(stderr, " this: ");
+ } else if (i - 2 < calleeTemplate()->nargs()) {
+ fprintf(stderr, " formal (arg %u): ", i - 2);
+ } else {
+ if (i - 2 == calleeTemplate()->nargs() &&
+ numActualArgs() > calleeTemplate()->nargs()) {
+ DumpOverflownOp d(calleeTemplate()->nargs());
+ unaliasedForEachActual(TlsContext.get(), d, fallback);
+ }
+
+ fprintf(stderr, " slot %d: ", int(i - 2 - calleeTemplate()->nargs()));
+ }
+ } else
+ fprintf(stderr, " slot %u: ", i);
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ DumpValue(si.maybeRead(fallback));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ fputc('\n', stderr);
+}
+
+JitFrameLayout* InvalidationBailoutStack::fp() const {
+ return (JitFrameLayout*)(sp() + ionScript_->frameSize());
+}
+
+void InvalidationBailoutStack::checkInvariants() const {
+#ifdef DEBUG
+ JitFrameLayout* frame = fp();
+ CalleeToken token = frame->calleeToken();
+ MOZ_ASSERT(token);
+
+ uint8_t* rawBase = ionScript()->method()->raw();
+ uint8_t* rawLimit = rawBase + ionScript()->method()->instructionsSize();
+ uint8_t* osiPoint = osiPointReturnAddress();
+ MOZ_ASSERT(rawBase <= osiPoint && osiPoint <= rawLimit);
+#endif
+}
+
+void AssertJitStackInvariants(JSContext* cx) {
+ for (JitActivationIterator activations(cx); !activations.done();
+ ++activations) {
+ JitFrameIter iter(activations->asJit());
+ if (iter.isJSJit()) {
+ JSJitFrameIter& frames = iter.asJSJit();
+ size_t prevFrameSize = 0;
+ size_t frameSize = 0;
+ bool isScriptedCallee = false;
+ for (; !frames.done(); ++frames) {
+ size_t calleeFp = reinterpret_cast<size_t>(frames.fp());
+ size_t callerFp = reinterpret_cast<size_t>(frames.prevFp());
+ MOZ_ASSERT(callerFp >= calleeFp);
+ prevFrameSize = frameSize;
+ frameSize = callerFp - calleeFp;
+
+ if (frames.isScripted() &&
+ (frames.prevType() == FrameType::Rectifier ||
+ frames.prevType() == FrameType::BaselineInterpreterEntry)) {
+ MOZ_RELEASE_ASSERT(
+ frameSize % JitStackAlignment == 0,
+ "The rectifier and bli entry frame should keep the alignment");
+
+ size_t expectedFrameSize =
+ sizeof(Value) *
+ (frames.callee()->nargs() + 1 /* |this| argument */ +
+ frames.isConstructing() /* new.target */) +
+ sizeof(JitFrameLayout);
+ MOZ_RELEASE_ASSERT(frameSize >= expectedFrameSize,
+ "The frame is large enough to hold all arguments");
+ MOZ_RELEASE_ASSERT(expectedFrameSize + JitStackAlignment > frameSize,
+ "The frame size is optimal");
+ }
+
+ if (frames.isExitFrame()) {
+ // For the moment, we do not keep the JitStackAlignment
+ // alignment for exit frames.
+ frameSize -= ExitFrameLayout::Size();
+ }
+
+ if (frames.isIonJS()) {
+ // Ideally, we should not have such requirement, but keep the
+ // alignment-delta as part of the Safepoint such that we can pad
+ // accordingly when making out-of-line calls. In the mean time,
+ // let us have check-points where we can garantee that
+ // everything can properly be aligned before adding complexity.
+ MOZ_RELEASE_ASSERT(
+ frames.ionScript()->frameSize() % JitStackAlignment == 0,
+ "Ensure that if the Ion frame is aligned, then the spill base is "
+ "also aligned");
+
+ if (isScriptedCallee) {
+ MOZ_RELEASE_ASSERT(prevFrameSize % JitStackAlignment == 0,
+ "The ion frame should keep the alignment");
+ }
+ }
+
+ // The stack is dynamically aligned by baseline stubs before calling
+ // any jitted code.
+ if (frames.prevType() == FrameType::BaselineStub && isScriptedCallee) {
+ MOZ_RELEASE_ASSERT(calleeFp % JitStackAlignment == 0,
+ "The baseline stub restores the stack alignment");
+ }
+
+ isScriptedCallee =
+ frames.isScripted() || frames.type() == FrameType::Rectifier;
+ }
+
+ MOZ_RELEASE_ASSERT(
+ JSJitFrameIter::isEntry(frames.type()),
+ "The first frame of a Jit activation should be an entry frame");
+ MOZ_RELEASE_ASSERT(
+ reinterpret_cast<size_t>(frames.fp()) % JitStackAlignment == 0,
+ "The entry frame should be properly aligned");
+ } else {
+ MOZ_ASSERT(iter.isWasm());
+ wasm::WasmFrameIter& frames = iter.asWasm();
+ while (!frames.done()) {
+ ++frames;
+ }
+ }
+ }
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
new file mode 100644
index 0000000000..c8bdcfe0df
--- /dev/null
+++ b/js/src/jit/JitFrames.h
@@ -0,0 +1,748 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitFrames_h
+#define jit_JitFrames_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/CalleeToken.h"
+#include "jit/MachineState.h"
+#include "jit/Registers.h"
+#include "js/Id.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+
+namespace js {
+
+namespace wasm {
+class Instance;
+}
+
+namespace jit {
+
+enum class FrameType;
+class IonScript;
+class JitActivation;
+class JitFrameLayout;
+struct SafepointSlotEntry;
+struct VMFunctionData;
+
+// [SMDOC] JIT Frame Layout
+//
+// Frame Headers:
+//
+// In between every two frames lies a small header describing both frames. This
+// header, minimally, contains a returnAddress word and a descriptor word (See
+// CommonFrameLayout). The descriptor describes the type of the older (caller)
+// frame, whereas the returnAddress describes the address the newer (callee)
+// frame will return to. For JitFrameLayout, the descriptor also stores the
+// number of arguments passed from the caller to the callee frame.
+//
+// Special Frames:
+//
+// Two special frame types exist:
+// - Entry frames begin a JitActivation, and therefore there is exactly one
+// per activation of EnterJit or EnterBaseline. These reuse JitFrameLayout.
+// - Exit frames are necessary to leave JIT code and enter C++, and thus,
+// C++ code will always begin iterating from the topmost exit frame.
+//
+// Approximate Layout:
+//
+// The layout of an Ion frame on the C stack is roughly:
+// argN _
+// ... \ - These are jsvals
+// arg0 /
+// -3 this _/
+// -2 callee
+// -1 descriptor
+// 0 returnAddress
+// .. locals ..
+
+// [SMDOC] Frame Descriptor Layout
+//
+// A frame descriptor word has the following data:
+//
+// high bits: [ numActualArgs |
+// has-cached-saved-frame bit |
+// low bits: frame type ]
+//
+// * numActualArgs: for JitFrameLayout, the number of arguments passed by the
+// caller.
+// * Has-cache-saved-frame bit: Used to power the LiveSavedFrameCache
+// optimization. See the comment in Activation.h
+// * Frame Type: BaselineJS, Exit, etc. (jit::FrameType)
+//
+
+static const uintptr_t FRAMETYPE_BITS = 4;
+static const uintptr_t FRAMETYPE_MASK = (1 << FRAMETYPE_BITS) - 1;
+static const uintptr_t HASCACHEDSAVEDFRAME_BIT = 1 << FRAMETYPE_BITS;
+static const uintptr_t NUMACTUALARGS_SHIFT =
+ FRAMETYPE_BITS + 1 /* HASCACHEDSAVEDFRAME_BIT */;
+
+struct BaselineBailoutInfo;
+
+enum class ExceptionResumeKind : int32_t {
+ // There is no exception handler in this activation.
+ // Return from the entry frame.
+ EntryFrame,
+
+ // The exception was caught in baseline.
+ // Restore state and jump to the catch block.
+ Catch,
+
+ // A finally block must be executed in baseline.
+ // Stash the exception on the stack and jump to the finally block.
+ Finally,
+
+ // We are forcing an early return with a specific return value.
+ // This is used by the debugger and when closing generators.
+ // Immediately return from the current frame with the given value.
+ ForcedReturnBaseline,
+ ForcedReturnIon,
+
+ // This frame is currently executing in Ion, but we must bail out
+ // to baseline before handling the exception.
+ // Jump to the bailout tail stub.
+ Bailout,
+
+ // The innermost frame was a wasm frame.
+ // Return to the wasm entry frame.
+ Wasm,
+
+ // The exception was caught by a wasm catch handler.
+ // Restore state and jump to it.
+ WasmCatch
+};
+
+// Data needed to recover from an exception.
+struct ResumeFromException {
+ uint8_t* framePointer;
+ uint8_t* stackPointer;
+ uint8_t* target;
+ ExceptionResumeKind kind;
+ wasm::Instance* instance;
+
+ // Value to push when resuming into a |finally| block.
+ // Also used by Wasm to send the exception object to the throw stub.
+ JS::Value exception;
+
+ BaselineBailoutInfo* bailoutInfo;
+
+#if defined(JS_CODEGEN_ARM64)
+ uint64_t padding_;
+#endif
+
+ static size_t offsetOfFramePointer() {
+ return offsetof(ResumeFromException, framePointer);
+ }
+ static size_t offsetOfStackPointer() {
+ return offsetof(ResumeFromException, stackPointer);
+ }
+ static size_t offsetOfTarget() {
+ return offsetof(ResumeFromException, target);
+ }
+ static size_t offsetOfKind() { return offsetof(ResumeFromException, kind); }
+ static size_t offsetOfInstance() {
+ return offsetof(ResumeFromException, instance);
+ }
+ static size_t offsetOfException() {
+ return offsetof(ResumeFromException, exception);
+ }
+ static size_t offsetOfBailoutInfo() {
+ return offsetof(ResumeFromException, bailoutInfo);
+ }
+};
+
+#if defined(JS_CODEGEN_ARM64)
+static_assert(sizeof(ResumeFromException) % 16 == 0,
+ "ResumeFromException should be aligned");
+#endif
+
+void HandleException(ResumeFromException* rfe);
+
+void EnsureUnwoundJitExitFrame(JitActivation* act, JitFrameLayout* frame);
+
+void TraceJitActivations(JSContext* cx, JSTracer* trc);
+
+void UpdateJitActivationsForMinorGC(JSRuntime* rt);
+
+static inline uint32_t MakeFrameDescriptor(FrameType type) {
+ return uint32_t(type);
+}
+
+// For JitFrameLayout, the descriptor also stores the number of arguments passed
+// by the caller. Note that |type| is the type of the *older* frame and |argc|
+// is the number of arguments passed to the *newer* frame.
+static inline uint32_t MakeFrameDescriptorForJitCall(FrameType type,
+ uint32_t argc) {
+ uint32_t descriptor = (argc << NUMACTUALARGS_SHIFT) | uint32_t(type);
+ MOZ_ASSERT((descriptor >> NUMACTUALARGS_SHIFT) == argc,
+ "argc must fit in descriptor");
+ return descriptor;
+}
+
+// Returns the JSScript associated with the topmost JIT frame.
+JSScript* GetTopJitJSScript(JSContext* cx);
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_ARM64)
+uint8_t* alignDoubleSpill(uint8_t* pointer);
+#else
+inline uint8_t* alignDoubleSpill(uint8_t* pointer) {
+ // This is a no-op on most platforms.
+ return pointer;
+}
+#endif
+
+// Layout of the frame prefix. This assumes the stack architecture grows down.
+// If this is ever not the case, we'll have to refactor.
+class CommonFrameLayout {
+ uint8_t* callerFramePtr_;
+ uint8_t* returnAddress_;
+ uintptr_t descriptor_;
+
+ public:
+ static constexpr size_t offsetOfDescriptor() {
+ return offsetof(CommonFrameLayout, descriptor_);
+ }
+ uintptr_t descriptor() const { return descriptor_; }
+ static constexpr size_t offsetOfReturnAddress() {
+ return offsetof(CommonFrameLayout, returnAddress_);
+ }
+ FrameType prevType() const { return FrameType(descriptor_ & FRAMETYPE_MASK); }
+ void changePrevType(FrameType type) {
+ descriptor_ &= ~FRAMETYPE_MASK;
+ descriptor_ |= uintptr_t(type);
+ }
+ bool hasCachedSavedFrame() const {
+ return descriptor_ & HASCACHEDSAVEDFRAME_BIT;
+ }
+ void setHasCachedSavedFrame() { descriptor_ |= HASCACHEDSAVEDFRAME_BIT; }
+ void clearHasCachedSavedFrame() { descriptor_ &= ~HASCACHEDSAVEDFRAME_BIT; }
+ uint8_t* returnAddress() const { return returnAddress_; }
+ void setReturnAddress(uint8_t* addr) { returnAddress_ = addr; }
+
+ uint8_t* callerFramePtr() const { return callerFramePtr_; }
+ static constexpr size_t offsetOfCallerFramePtr() {
+ return offsetof(CommonFrameLayout, callerFramePtr_);
+ }
+ static constexpr size_t bytesPoppedAfterCall() {
+ // The return address and frame pointer are popped by the callee/call.
+ return 2 * sizeof(void*);
+ }
+};
+
+class JitFrameLayout : public CommonFrameLayout {
+ CalleeToken calleeToken_;
+
+ public:
+ CalleeToken calleeToken() const { return calleeToken_; }
+ void replaceCalleeToken(CalleeToken calleeToken) {
+ calleeToken_ = calleeToken;
+ }
+
+ static constexpr size_t offsetOfCalleeToken() {
+ return offsetof(JitFrameLayout, calleeToken_);
+ }
+ static constexpr size_t offsetOfThis() { return sizeof(JitFrameLayout); }
+ static constexpr size_t offsetOfActualArgs() {
+ return offsetOfThis() + sizeof(JS::Value);
+ }
+ static constexpr size_t offsetOfActualArg(size_t arg) {
+ return offsetOfActualArgs() + arg * sizeof(JS::Value);
+ }
+
+ JS::Value& thisv() {
+ MOZ_ASSERT(CalleeTokenIsFunction(calleeToken()));
+ return thisAndActualArgs()[0];
+ }
+ JS::Value* thisAndActualArgs() {
+ MOZ_ASSERT(CalleeTokenIsFunction(calleeToken()));
+ return (JS::Value*)(this + 1);
+ }
+ JS::Value* actualArgs() { return thisAndActualArgs() + 1; }
+ uintptr_t numActualArgs() const {
+ return descriptor() >> NUMACTUALARGS_SHIFT;
+ }
+
+ // Computes a reference to a stack or argument slot, where a slot is a
+ // distance from the base frame pointer, as would be used for LStackSlot
+ // or LArgument.
+ uintptr_t* slotRef(SafepointSlotEntry where);
+
+ static inline size_t Size() { return sizeof(JitFrameLayout); }
+};
+
+class BaselineInterpreterEntryFrameLayout : public JitFrameLayout {
+ public:
+ static inline size_t Size() {
+ return sizeof(BaselineInterpreterEntryFrameLayout);
+ }
+};
+
+class RectifierFrameLayout : public JitFrameLayout {
+ public:
+ static inline size_t Size() { return sizeof(RectifierFrameLayout); }
+};
+
+class WasmToJSJitFrameLayout : public JitFrameLayout {
+ public:
+ static inline size_t Size() { return sizeof(WasmToJSJitFrameLayout); }
+};
+
+class IonICCallFrameLayout : public CommonFrameLayout {
+ protected:
+ // Pointer to root the stub's JitCode.
+ JitCode* stubCode_;
+
+ public:
+ JitCode** stubCode() { return &stubCode_; }
+ static size_t Size() { return sizeof(IonICCallFrameLayout); }
+};
+
+enum class ExitFrameType : uint8_t {
+ CallNative = 0x0,
+ ConstructNative = 0x1,
+ IonDOMGetter = 0x2,
+ IonDOMSetter = 0x3,
+ IonDOMMethod = 0x4,
+ IonOOLNative = 0x5,
+ IonOOLProxy = 0x6,
+ WasmGenericJitEntry = 0x7,
+ DirectWasmJitCall = 0x8,
+ UnwoundJit = 0xFB,
+ InterpreterStub = 0xFC,
+ VMFunction = 0xFD,
+ LazyLink = 0xFE,
+ Bare = 0xFF,
+};
+
+// GC related data used to keep alive data surrounding the Exit frame.
+class ExitFooterFrame {
+ // Stores the ExitFrameType or, for ExitFrameType::VMFunction, the
+ // VMFunctionData*.
+ uintptr_t data_;
+
+ public:
+ static constexpr size_t Size() { return sizeof(ExitFooterFrame); }
+ void setUnwoundJitExitFrame() {
+ data_ = uintptr_t(ExitFrameType::UnwoundJit);
+ }
+ ExitFrameType type() const {
+ static_assert(sizeof(ExitFrameType) == sizeof(uint8_t),
+ "Code assumes ExitFrameType fits in a byte");
+ if (data_ > UINT8_MAX) {
+ return ExitFrameType::VMFunction;
+ }
+ MOZ_ASSERT(ExitFrameType(data_) != ExitFrameType::VMFunction);
+ return ExitFrameType(data_);
+ }
+ inline const VMFunctionData* function() const {
+ MOZ_ASSERT(type() == ExitFrameType::VMFunction);
+ return reinterpret_cast<const VMFunctionData*>(data_);
+ }
+
+#ifdef JS_CODEGEN_MIPS32
+ uint8_t* alignedForABI() {
+ // See: MacroAssemblerMIPSCompat::alignStackPointer()
+ uint8_t* address = reinterpret_cast<uint8_t*>(this);
+ address -= sizeof(intptr_t);
+ return alignDoubleSpill(address);
+ }
+#else
+ uint8_t* alignedForABI() {
+ // This is NO-OP on non-MIPS platforms.
+ return reinterpret_cast<uint8_t*>(this);
+ }
+#endif
+
+ // This should only be called for function()->outParam == Type_Handle
+ template <typename T>
+ T* outParam() {
+ uint8_t* address = alignedForABI();
+ return reinterpret_cast<T*>(address - sizeof(T));
+ }
+};
+
+class NativeExitFrameLayout;
+class IonOOLNativeExitFrameLayout;
+class IonOOLProxyExitFrameLayout;
+class IonDOMExitFrameLayout;
+
+// this is the frame layout when we are exiting ion code, and about to enter
+// platform ABI code
+class ExitFrameLayout : public CommonFrameLayout {
+ inline uint8_t* top() { return reinterpret_cast<uint8_t*>(this + 1); }
+
+ public:
+ static inline size_t Size() { return sizeof(ExitFrameLayout); }
+ static inline size_t SizeWithFooter() {
+ return Size() + ExitFooterFrame::Size();
+ }
+
+ inline ExitFooterFrame* footer() {
+ uint8_t* sp = reinterpret_cast<uint8_t*>(this);
+ return reinterpret_cast<ExitFooterFrame*>(sp - ExitFooterFrame::Size());
+ }
+
+ // argBase targets the point which precedes the exit frame. Arguments of VM
+ // each wrapper are pushed before the exit frame. This correspond exactly
+ // to the value of the argBase register of the generateVMWrapper function.
+ inline uint8_t* argBase() {
+ MOZ_ASSERT(isWrapperExit());
+ return top();
+ }
+
+ inline bool isWrapperExit() {
+ return footer()->type() == ExitFrameType::VMFunction;
+ }
+ inline bool isBareExit() { return footer()->type() == ExitFrameType::Bare; }
+ inline bool isUnwoundJitExit() {
+ return footer()->type() == ExitFrameType::UnwoundJit;
+ }
+
+ // See the various exit frame layouts below.
+ template <typename T>
+ inline bool is() {
+ return footer()->type() == T::Type();
+ }
+ template <typename T>
+ inline T* as() {
+ MOZ_ASSERT(this->is<T>());
+ return reinterpret_cast<T*>(footer());
+ }
+};
+
+// Cannot inherit implementation since we need to extend the top of
+// ExitFrameLayout.
+class NativeExitFrameLayout {
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ public:
+ static inline size_t Size() { return sizeof(NativeExitFrameLayout); }
+
+ static size_t offsetOfResult() {
+ return offsetof(NativeExitFrameLayout, loCalleeResult_);
+ }
+ inline JS::Value* vp() {
+ return reinterpret_cast<JS::Value*>(&loCalleeResult_);
+ }
+ inline uintptr_t argc() const { return argc_; }
+};
+
+class CallNativeExitFrameLayout : public NativeExitFrameLayout {
+ public:
+ static ExitFrameType Type() { return ExitFrameType::CallNative; }
+};
+
+class ConstructNativeExitFrameLayout : public NativeExitFrameLayout {
+ public:
+ static ExitFrameType Type() { return ExitFrameType::ConstructNative; }
+};
+
+template <>
+inline bool ExitFrameLayout::is<NativeExitFrameLayout>() {
+ return is<CallNativeExitFrameLayout>() ||
+ is<ConstructNativeExitFrameLayout>();
+}
+
+class IonOOLNativeExitFrameLayout {
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ // pointer to root the stub's JitCode
+ JitCode* stubCode_;
+
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ // Split Value for |this| and args above.
+ uint32_t loThis_;
+ uint32_t hiThis_;
+
+ public:
+ static ExitFrameType Type() { return ExitFrameType::IonOOLNative; }
+
+ static inline size_t Size(size_t argc) {
+ // The frame accounts for the callee/result and |this|, so we only need
+ // args.
+ return sizeof(IonOOLNativeExitFrameLayout) + (argc * sizeof(JS::Value));
+ }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonOOLNativeExitFrameLayout, loCalleeResult_);
+ }
+
+ inline JitCode** stubCode() { return &stubCode_; }
+ inline JS::Value* vp() {
+ return reinterpret_cast<JS::Value*>(&loCalleeResult_);
+ }
+ inline JS::Value* thisp() { return reinterpret_cast<JS::Value*>(&loThis_); }
+ inline uintptr_t argc() const { return argc_; }
+};
+
+// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+// MutableHandleValue vp)
+// ProxyCallProperty(JSContext* cx, HandleObject proxy, HandleId id,
+// MutableHandleValue vp)
+// ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+// MutableHandleValue vp, bool strict)
+class IonOOLProxyExitFrameLayout {
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ // The proxy object.
+ JSObject* proxy_;
+
+ // id for HandleId
+ jsid id_;
+
+ // space for MutableHandleValue result
+ // use two uint32_t so compiler doesn't align.
+ uint32_t vp0_;
+ uint32_t vp1_;
+
+ // pointer to root the stub's JitCode
+ JitCode* stubCode_;
+
+ public:
+ static ExitFrameType Type() { return ExitFrameType::IonOOLProxy; }
+
+ static inline size_t Size() { return sizeof(IonOOLProxyExitFrameLayout); }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonOOLProxyExitFrameLayout, vp0_);
+ }
+
+ inline JitCode** stubCode() { return &stubCode_; }
+ inline JS::Value* vp() { return reinterpret_cast<JS::Value*>(&vp0_); }
+ inline jsid* id() { return &id_; }
+ inline JSObject** proxy() { return &proxy_; }
+};
+
+class IonDOMExitFrameLayout {
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ JSObject* thisObj;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ public:
+ static ExitFrameType GetterType() { return ExitFrameType::IonDOMGetter; }
+ static ExitFrameType SetterType() { return ExitFrameType::IonDOMSetter; }
+
+ static inline size_t Size() { return sizeof(IonDOMExitFrameLayout); }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonDOMExitFrameLayout, loCalleeResult_);
+ }
+ inline JS::Value* vp() {
+ return reinterpret_cast<JS::Value*>(&loCalleeResult_);
+ }
+ inline JSObject** thisObjAddress() { return &thisObj; }
+ inline bool isMethodFrame();
+};
+
+struct IonDOMMethodExitFrameLayoutTraits;
+
+class IonDOMMethodExitFrameLayout {
+ protected: // only to silence a clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+ // This must be the last thing pushed, so as to stay common with
+ // IonDOMExitFrameLayout.
+ JSObject* thisObj_;
+ JS::Value* argv_;
+ uintptr_t argc_;
+
+ // We need to split the Value into 2 fields of 32 bits, otherwise the C++
+ // compiler may add some padding between the fields.
+ uint32_t loCalleeResult_;
+ uint32_t hiCalleeResult_;
+
+ friend struct IonDOMMethodExitFrameLayoutTraits;
+
+ public:
+ static ExitFrameType Type() { return ExitFrameType::IonDOMMethod; }
+
+ static inline size_t Size() { return sizeof(IonDOMMethodExitFrameLayout); }
+
+ static size_t offsetOfResult() {
+ return offsetof(IonDOMMethodExitFrameLayout, loCalleeResult_);
+ }
+
+ inline JS::Value* vp() {
+ // The code in visitCallDOMNative depends on this static assert holding
+ static_assert(
+ offsetof(IonDOMMethodExitFrameLayout, loCalleeResult_) ==
+ (offsetof(IonDOMMethodExitFrameLayout, argc_) + sizeof(uintptr_t)));
+ return reinterpret_cast<JS::Value*>(&loCalleeResult_);
+ }
+ inline JSObject** thisObjAddress() { return &thisObj_; }
+ inline uintptr_t argc() { return argc_; }
+};
+
+inline bool IonDOMExitFrameLayout::isMethodFrame() {
+ return footer_.type() == IonDOMMethodExitFrameLayout::Type();
+}
+
+template <>
+inline bool ExitFrameLayout::is<IonDOMExitFrameLayout>() {
+ ExitFrameType type = footer()->type();
+ return type == IonDOMExitFrameLayout::GetterType() ||
+ type == IonDOMExitFrameLayout::SetterType() ||
+ type == IonDOMMethodExitFrameLayout::Type();
+}
+
+template <>
+inline IonDOMExitFrameLayout* ExitFrameLayout::as<IonDOMExitFrameLayout>() {
+ MOZ_ASSERT(is<IonDOMExitFrameLayout>());
+ return reinterpret_cast<IonDOMExitFrameLayout*>(footer());
+}
+
+struct IonDOMMethodExitFrameLayoutTraits {
+ static const size_t offsetOfArgcFromArgv =
+ offsetof(IonDOMMethodExitFrameLayout, argc_) -
+ offsetof(IonDOMMethodExitFrameLayout, argv_);
+};
+
+// Cannot inherit implementation since we need to extend the top of
+// ExitFrameLayout.
+class CalledFromJitExitFrameLayout {
+ protected: // silence clang warning about unused private fields
+ ExitFooterFrame footer_;
+ JitFrameLayout exit_;
+
+ public:
+ static inline size_t Size() { return sizeof(CalledFromJitExitFrameLayout); }
+ inline JitFrameLayout* jsFrame() { return &exit_; }
+ static size_t offsetOfExitFrame() {
+ return offsetof(CalledFromJitExitFrameLayout, exit_);
+ }
+};
+
+class LazyLinkExitFrameLayout : public CalledFromJitExitFrameLayout {
+ public:
+ static ExitFrameType Type() { return ExitFrameType::LazyLink; }
+};
+
+class InterpreterStubExitFrameLayout : public CalledFromJitExitFrameLayout {
+ public:
+ static ExitFrameType Type() { return ExitFrameType::InterpreterStub; }
+};
+
+class WasmGenericJitEntryFrameLayout : CalledFromJitExitFrameLayout {
+ public:
+ static ExitFrameType Type() { return ExitFrameType::WasmGenericJitEntry; }
+};
+
+template <>
+inline bool ExitFrameLayout::is<CalledFromJitExitFrameLayout>() {
+ return is<InterpreterStubExitFrameLayout>() ||
+ is<LazyLinkExitFrameLayout>() || is<WasmGenericJitEntryFrameLayout>();
+}
+
+template <>
+inline CalledFromJitExitFrameLayout*
+ExitFrameLayout::as<CalledFromJitExitFrameLayout>() {
+ MOZ_ASSERT(is<CalledFromJitExitFrameLayout>());
+ uint8_t* sp = reinterpret_cast<uint8_t*>(this);
+ sp -= CalledFromJitExitFrameLayout::offsetOfExitFrame();
+ return reinterpret_cast<CalledFromJitExitFrameLayout*>(sp);
+}
+
+class DirectWasmJitCallFrameLayout {
+ protected: // silence clang warning about unused private fields
+ ExitFooterFrame footer_;
+ ExitFrameLayout exit_;
+
+ public:
+ static ExitFrameType Type() { return ExitFrameType::DirectWasmJitCall; }
+};
+
+class ICStub;
+
+class BaselineStubFrameLayout : public CommonFrameLayout {
+ // Info on the stack
+ //
+ // +-----------------------+
+ // |BaselineStubFrameLayout|
+ // +-----------------------+
+ // | - Descriptor | => Marks end of FrameType::BaselineJS
+ // | - Return address |
+ // | - CallerFramePtr |
+ // +-----------------------+
+ // | - StubPtr | Technically this last field is not part
+ // +-----------------------+ of the frame layout.
+
+ public:
+ static constexpr size_t ICStubOffset = sizeof(void*);
+ static constexpr int ICStubOffsetFromFP = -int(ICStubOffset);
+
+ static inline size_t Size() { return sizeof(BaselineStubFrameLayout); }
+
+ inline ICStub* maybeStubPtr() {
+ uint8_t* fp = reinterpret_cast<uint8_t*>(this);
+ return *reinterpret_cast<ICStub**>(fp - ICStubOffset);
+ }
+};
+
+// An invalidation bailout stack is at the stack pointer for the callee frame.
+class InvalidationBailoutStack {
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ IonScript* ionScript_;
+ uint8_t* osiPointReturnAddress_;
+
+ public:
+ uint8_t* sp() const {
+ return (uint8_t*)this + sizeof(InvalidationBailoutStack);
+ }
+ JitFrameLayout* fp() const;
+ MachineState machine() { return MachineState::FromBailout(regs_, fpregs_); }
+
+ IonScript* ionScript() const { return ionScript_; }
+ uint8_t* osiPointReturnAddress() const { return osiPointReturnAddress_; }
+ static size_t offsetOfFpRegs() {
+ return offsetof(InvalidationBailoutStack, fpregs_);
+ }
+ static size_t offsetOfRegs() {
+ return offsetof(InvalidationBailoutStack, regs_);
+ }
+
+ void checkInvariants() const;
+};
+
+void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes);
+
+// Baseline requires one slot for this/argument type checks.
+static const uint32_t MinJITStackSize = 1;
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_JitFrames_h */
diff --git a/js/src/jit/JitHints-inl.h b/js/src/jit/JitHints-inl.h
new file mode 100644
index 0000000000..6316af936e
--- /dev/null
+++ b/js/src/jit/JitHints-inl.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitHints_inl_h
+#define jit_JitHints_inl_h
+
+#include "jit/JitHints.h"
+#include "mozilla/HashFunctions.h"
+
+namespace js::jit {
+
+inline JitHintsMap::ScriptKey JitHintsMap::getScriptKey(
+ JSScript* script) const {
+ if (ScriptKey key = script->filenameHash()) {
+ return mozilla::AddToHash(key, script->sourceStart());
+ }
+ return 0;
+}
+
+inline void JitHintsMap::incrementEntryCount() {
+ // Clear the cache if we've exceeded the false positivity rate
+ // calculated by MaxEntries.
+ if (++entryCount_ > MaxEntries_) {
+ map_.clear();
+ entryCount_ = 0;
+ }
+}
+
+inline void JitHintsMap::setEagerBaselineHint(JSScript* script) {
+ ScriptKey key = getScriptKey(script);
+ if (!key) {
+ return;
+ }
+
+ // If the entry already exists, don't increment entryCount.
+ if (map_.mightContain(key)) {
+ return;
+ }
+
+ // Increment entry count, and possibly clear the cache.
+ incrementEntryCount();
+
+ script->setNoEagerBaselineHint(false);
+ map_.add(key);
+}
+
+inline bool JitHintsMap::mightHaveEagerBaselineHint(JSScript* script) const {
+ if (ScriptKey key = getScriptKey(script)) {
+ return map_.mightContain(key);
+ }
+ script->setNoEagerBaselineHint(true);
+ return false;
+}
+
+} // namespace js::jit
+
+#endif /* jit_JitHints_inl_h */
diff --git a/js/src/jit/JitHints.h b/js/src/jit/JitHints.h
new file mode 100644
index 0000000000..34b8da36d6
--- /dev/null
+++ b/js/src/jit/JitHints.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitHints_h
+#define jit_JitHints_h
+
+#include "mozilla/BloomFilter.h"
+#include "vm/JSScript.h"
+
+namespace js::jit {
+
+/*
+ * The JitHintsMap implements a BitBloomFilter to track whether or not a script,
+ * identified by filename+sourceStart, has been baseline compiled before in the
+ * same process. This can occur frequently during navigations.
+ *
+ * The bloom filter allows us to have very efficient storage and lookup costs,
+ * at the expense of occasional false positives. The number of entries added
+ * to the bloom filter is monitored in order to try and keep the false
+ * positivity rate below 1%. If the entry count exceeds MaxEntries_, which
+ * indicates the false positivity rate may exceed 1.5%, then the filter is
+ * completely cleared to reset the cache.
+ */
+
+class JitHintsMap {
+ // ScriptKey is a hash on the filename+sourceStart.
+ using ScriptKey = HashNumber;
+
+ static constexpr uint32_t CacheSize_ = 16;
+ mozilla::BitBloomFilter<CacheSize_, ScriptKey> map_;
+
+ /*
+ * MaxEntries_ is the approximate entry count for which the
+ * false positivity rate will exceed p=0.015 using k=2 and m=2**CacheSize.
+ * Formula is as follows:
+ * MaxEntries_ = floor(m / (-k / ln(1-exp(ln(p) / k))))
+ */
+ static constexpr uint32_t MaxEntries_ = 4281;
+ static_assert(CacheSize_ == 16 && MaxEntries_ == 4281,
+ "MaxEntries should be recalculated for given CacheSize.");
+
+ uint32_t entryCount_ = 0;
+
+ ScriptKey getScriptKey(JSScript* script) const;
+ void incrementEntryCount();
+
+ public:
+ void setEagerBaselineHint(JSScript* script);
+ bool mightHaveEagerBaselineHint(JSScript* script) const;
+};
+
+} // namespace js::jit
+#endif /* jit_JitHints_h */
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
new file mode 100644
index 0000000000..1dc24f1ce2
--- /dev/null
+++ b/js/src/jit/JitOptions.cpp
@@ -0,0 +1,414 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitOptions.h"
+
+#include <cstdlib>
+#include <type_traits>
+
+#include "vm/JSScript.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+namespace js {
+namespace jit {
+
+DefaultJitOptions JitOptions;
+
+static void Warn(const char* env, const char* value) {
+ fprintf(stderr, "Warning: I didn't understand %s=\"%s\"\n", env, value);
+}
+
+static Maybe<int> ParseInt(const char* str) {
+ char* endp;
+ int retval = strtol(str, &endp, 0);
+ if (*endp == '\0') {
+ return mozilla::Some(retval);
+ }
+ return mozilla::Nothing();
+}
+
+template <typename T>
+T overrideDefault(const char* param, T dflt) {
+ char* str = getenv(param);
+ if (!str) {
+ return dflt;
+ }
+ if constexpr (std::is_same_v<T, bool>) {
+ if (strcmp(str, "true") == 0 || strcmp(str, "yes") == 0) {
+ return true;
+ }
+ if (strcmp(str, "false") == 0 || strcmp(str, "no") == 0) {
+ return false;
+ }
+ Warn(param, str);
+ } else {
+ Maybe<int> value = ParseInt(str);
+ if (value.isSome()) {
+ return value.ref();
+ }
+ Warn(param, str);
+ }
+ return dflt;
+}
+
+#define SET_DEFAULT(var, dflt) var = overrideDefault("JIT_OPTION_" #var, dflt)
+DefaultJitOptions::DefaultJitOptions() {
+ // Whether to perform expensive graph-consistency DEBUG-only assertions.
+ // It can be useful to disable this to reduce DEBUG-compile time of large
+ // wasm programs.
+ SET_DEFAULT(checkGraphConsistency, true);
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // Emit extra code to verify live regs at the start of a VM call
+ // are not modified before its OsiPoint.
+ SET_DEFAULT(checkOsiPointRegisters, false);
+#endif
+
+ // Whether to enable extra code to perform dynamic validation of
+ // RangeAnalysis results.
+ SET_DEFAULT(checkRangeAnalysis, false);
+
+ // Toggles whether Alignment Mask Analysis is globally disabled.
+ SET_DEFAULT(disableAma, false);
+
+ // Toggles whether Effective Address Analysis is globally disabled.
+ SET_DEFAULT(disableEaa, false);
+
+ // Toggles whether Edge Case Analysis is gobally disabled.
+ SET_DEFAULT(disableEdgeCaseAnalysis, false);
+
+ // Toggle whether global value numbering is globally disabled.
+ SET_DEFAULT(disableGvn, false);
+
+ // Toggles whether inlining is globally disabled.
+ SET_DEFAULT(disableInlining, false);
+
+ // Toggles whether loop invariant code motion is globally disabled.
+ SET_DEFAULT(disableLicm, false);
+
+ // Toggle whether branch pruning is globally disabled.
+ SET_DEFAULT(disablePruning, false);
+
+ // Toggles whether the iterator indices optimization is globally disabled.
+ SET_DEFAULT(disableIteratorIndices, false);
+
+ // Toggles whether instruction reordering is globally disabled.
+ SET_DEFAULT(disableInstructionReordering, false);
+
+ // Toggles whether Range Analysis is globally disabled.
+ SET_DEFAULT(disableRangeAnalysis, false);
+
+ // Toggles wheter Recover instructions is globally disabled.
+ SET_DEFAULT(disableRecoverIns, false);
+
+ // Toggle whether eager scalar replacement is globally disabled.
+ SET_DEFAULT(disableScalarReplacement, false);
+
+ // Toggles whether CacheIR stubs are used.
+ SET_DEFAULT(disableCacheIR, false);
+
+ // Toggles whether sink code motion is globally disabled.
+ SET_DEFAULT(disableSink, true);
+
+ // Toggles whether redundant shape guard elimination is globally disabled.
+ SET_DEFAULT(disableRedundantShapeGuards, false);
+
+ // Toggles whether redundant GC barrier elimination is globally disabled.
+ SET_DEFAULT(disableRedundantGCBarriers, false);
+
+ // Toggles whether we verify that we don't recompile with the same CacheIR.
+ SET_DEFAULT(disableBailoutLoopCheck, false);
+
+ // Whether the Baseline Interpreter is enabled.
+ SET_DEFAULT(baselineInterpreter, true);
+
+ // Emit baseline interpreter and interpreter entry frames to distinguish which
+ // JSScript is being interpreted by external profilers.
+ // Enabled by default under --enable-perf, otherwise disabled.
+#if defined(JS_ION_PERF)
+ SET_DEFAULT(emitInterpreterEntryTrampoline, true);
+#else
+ SET_DEFAULT(emitInterpreterEntryTrampoline, false);
+#endif
+
+ // Whether the Baseline JIT is enabled.
+ SET_DEFAULT(baselineJit, true);
+
+ // Whether the IonMonkey JIT is enabled.
+ SET_DEFAULT(ion, true);
+
+ // Whether the IonMonkey and Baseline JITs are enabled for Trusted Principals.
+ // (Ignored if ion or baselineJit is set to true.)
+ SET_DEFAULT(jitForTrustedPrincipals, false);
+
+ // Whether the RegExp JIT is enabled.
+ SET_DEFAULT(nativeRegExp, true);
+
+ // Whether Warp should use ICs instead of transpiling Baseline CacheIR.
+ SET_DEFAULT(forceInlineCaches, false);
+
+ // Whether all ICs should be initialized as megamorphic ICs.
+ SET_DEFAULT(forceMegamorphicICs, false);
+
+ // Toggles whether large scripts are rejected.
+ SET_DEFAULT(limitScriptSize, true);
+
+ // Toggles whether functions may be entered at loop headers.
+ SET_DEFAULT(osr, true);
+
+ // Whether the JIT backend (used by JITs, Wasm, Baseline Interpreter) has been
+ // disabled for this process. See JS::DisableJitBackend.
+ SET_DEFAULT(disableJitBackend, false);
+
+ // Whether to enable extra code to perform dynamic validations.
+ SET_DEFAULT(runExtraChecks, false);
+
+ // How many invocations or loop iterations are needed before functions
+ // enter the Baseline Interpreter.
+ SET_DEFAULT(baselineInterpreterWarmUpThreshold, 10);
+
+ // How many invocations or loop iterations are needed before functions
+ // are compiled with the baseline compiler.
+ // Duplicated in all.js - ensure both match.
+ SET_DEFAULT(baselineJitWarmUpThreshold, 100);
+
+ // Disable eager baseline jit hints
+ SET_DEFAULT(disableJitHints, false);
+
+ // How many invocations or loop iterations are needed before functions
+ // are considered for trial inlining.
+ SET_DEFAULT(trialInliningWarmUpThreshold, 500);
+
+ // The initial warm-up count for ICScripts created by trial inlining.
+ //
+ // Note: the difference between trialInliningInitialWarmUpCount and
+ // trialInliningWarmUpThreshold must be:
+ //
+ // * Small enough to allow inlining multiple levels deep before the outer
+ // script reaches its normalIonWarmUpThreshold.
+ //
+ // * Greater than inliningEntryThreshold or no scripts can be inlined.
+ SET_DEFAULT(trialInliningInitialWarmUpCount, 250);
+
+ // How many invocations or loop iterations are needed before functions
+ // are compiled with the Ion compiler at OptimizationLevel::Normal.
+ // Duplicated in all.js - ensure both match.
+ SET_DEFAULT(normalIonWarmUpThreshold, 1500);
+
+ // How many invocations are needed before regexps are compiled to
+ // native code.
+ SET_DEFAULT(regexpWarmUpThreshold, 10);
+
+ // Number of exception bailouts (resuming into catch/finally block) before
+ // we invalidate and forbid Ion compilation.
+ SET_DEFAULT(exceptionBailoutThreshold, 10);
+
+ // Number of bailouts without invalidation before we set
+ // JSScript::hadFrequentBailouts and invalidate.
+ // Duplicated in all.js - ensure both match.
+ SET_DEFAULT(frequentBailoutThreshold, 10);
+
+ // Whether to run all debug checks in debug builds.
+ // Disabling might make it more enjoyable to run JS in debug builds.
+ SET_DEFAULT(fullDebugChecks, true);
+
+ // How many actual arguments are accepted on the C stack.
+ SET_DEFAULT(maxStackArgs, 20'000);
+
+ // How many times we will try to enter a script via OSR before
+ // invalidating the script.
+ SET_DEFAULT(osrPcMismatchesBeforeRecompile, 6000);
+
+ // The bytecode length limit for small function.
+ SET_DEFAULT(smallFunctionMaxBytecodeLength, 130);
+
+ // The minimum entry count for an IC stub before it can be trial-inlined.
+ SET_DEFAULT(inliningEntryThreshold, 100);
+
+ // An artificial testing limit for the maximum supported offset of
+ // pc-relative jump and call instructions.
+ SET_DEFAULT(jumpThreshold, UINT32_MAX);
+
+ // Branch pruning heuristic is based on a scoring system, which is look at
+ // different metrics and provide a score. The score is computed as a
+ // projection where each factor defines the weight of each metric. Then this
+ // score is compared against a threshold to prevent a branch from being
+ // removed.
+ SET_DEFAULT(branchPruningHitCountFactor, 1);
+ SET_DEFAULT(branchPruningInstFactor, 10);
+ SET_DEFAULT(branchPruningBlockSpanFactor, 100);
+ SET_DEFAULT(branchPruningEffectfulInstFactor, 3500);
+ SET_DEFAULT(branchPruningThreshold, 4000);
+
+ // Limits on bytecode length and number of locals/arguments for Ion
+ // compilation. There are different (lower) limits for when off-thread Ion
+ // compilation isn't available.
+ SET_DEFAULT(ionMaxScriptSize, 100 * 1000);
+ SET_DEFAULT(ionMaxScriptSizeMainThread, 2 * 1000);
+ SET_DEFAULT(ionMaxLocalsAndArgs, 10 * 1000);
+ SET_DEFAULT(ionMaxLocalsAndArgsMainThread, 256);
+
+ // Force the used register allocator instead of letting the optimization
+ // pass decide.
+ const char* forcedRegisterAllocatorEnv = "JIT_OPTION_forcedRegisterAllocator";
+ if (const char* env = getenv(forcedRegisterAllocatorEnv)) {
+ forcedRegisterAllocator = LookupRegisterAllocator(env);
+ if (!forcedRegisterAllocator.isSome()) {
+ Warn(forcedRegisterAllocatorEnv, env);
+ }
+ }
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ SET_DEFAULT(spectreIndexMasking, false);
+ SET_DEFAULT(spectreObjectMitigations, false);
+ SET_DEFAULT(spectreStringMitigations, false);
+ SET_DEFAULT(spectreValueMasking, false);
+ SET_DEFAULT(spectreJitToCxxCalls, false);
+#else
+ SET_DEFAULT(spectreIndexMasking, true);
+ SET_DEFAULT(spectreObjectMitigations, true);
+ SET_DEFAULT(spectreStringMitigations, true);
+ SET_DEFAULT(spectreValueMasking, true);
+ SET_DEFAULT(spectreJitToCxxCalls, true);
+#endif
+
+ // This is set to its actual value in InitializeJit.
+ SET_DEFAULT(supportsUnalignedAccesses, false);
+
+ // To access local (non-argument) slots, it's more efficient to use the frame
+ // pointer (FP) instead of the stack pointer (SP) as base register on x86 and
+ // x64 (because instructions are one byte shorter, for example).
+ //
+ // However, because this requires a negative offset from FP, on ARM64 it can
+ // be more efficient to use SP-relative addresses for larger stack frames
+ // because the range for load/store immediate offsets is [-256, 4095] and
+ // offsets outside this range will require an extra instruction.
+ //
+ // We default to FP-relative addresses on x86/x64 and SP-relative on other
+ // platforms, but to improve fuzzing we allow changing this in the shell:
+ //
+ // setJitCompilerOption("base-reg-for-locals", N); // 0 for SP, 1 for FP
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ baseRegForLocals = BaseRegForAddress::FP;
+#else
+ baseRegForLocals = BaseRegForAddress::SP;
+#endif
+
+ // Toggles the optimization whereby offsets are folded into loads and not
+ // included in the bounds check.
+ SET_DEFAULT(wasmFoldOffsets, true);
+
+ // Controls whether two-tiered compilation should be requested when
+ // compiling a new wasm module, independently of other heuristics, and
+ // should be delayed to test both baseline and ion paths in compiled code,
+ // as well as the transition from one tier to the other.
+ SET_DEFAULT(wasmDelayTier2, false);
+
+ // Until which wasm bytecode size should we accumulate functions, in order
+ // to compile efficiently on helper threads. Baseline code compiles much
+ // faster than Ion code so use scaled thresholds (see also bug 1320374).
+ SET_DEFAULT(wasmBatchBaselineThreshold, 10000);
+ SET_DEFAULT(wasmBatchIonThreshold, 1100);
+
+ // Controls how much assertion checking code is emitted
+ SET_DEFAULT(lessDebugCode, false);
+
+ // Whether the MegamorphicCache is enabled.
+ SET_DEFAULT(enableWatchtowerMegamorphic, true);
+
+ SET_DEFAULT(onlyInlineSelfHosted, false);
+ SET_DEFAULT(enableICFramePointers, false);
+
+ SET_DEFAULT(enableWasmJitExit, true);
+ SET_DEFAULT(enableWasmJitEntry, true);
+ SET_DEFAULT(enableWasmIonFastCalls, true);
+#ifdef WASM_CODEGEN_DEBUG
+ SET_DEFAULT(enableWasmImportCallSpew, false);
+ SET_DEFAULT(enableWasmFuncCallSpew, false);
+#endif
+
+ // This is used to control whether regexps tier up from interpreted to
+ // compiled. We control this with --no-native-regexp and
+ // --regexp-warmup-threshold.
+ SET_DEFAULT(regexp_tier_up, true);
+
+ // Dumps a representation of parsed regexps to stderr
+ SET_DEFAULT(trace_regexp_parser, false);
+ // Dumps the calls made to the regexp assembler to stderr
+ SET_DEFAULT(trace_regexp_assembler, false);
+ // Dumps the bytecodes interpreted by the regexp engine to stderr
+ SET_DEFAULT(trace_regexp_bytecodes, false);
+ // Dumps the changes made by the regexp peephole optimizer to stderr
+ SET_DEFAULT(trace_regexp_peephole_optimization, false);
+
+ // ***** Irregexp shim flags *****
+
+ // V8 uses this for differential fuzzing to handle stack overflows.
+ // We address the same problem in StackLimitCheck::HasOverflowed.
+ SET_DEFAULT(correctness_fuzzer_suppressions, false);
+ // Instead of using a flag for this, we provide an implementation of
+ // CanReadUnaligned in SMRegExpMacroAssembler.
+ SET_DEFAULT(enable_regexp_unaligned_accesses, false);
+ // This is used to guard an old prototype implementation of possessive
+ // quantifiers, which never got past the point of adding parser support.
+ SET_DEFAULT(regexp_possessive_quantifier, false);
+ // These affect the default level of optimization. We can still turn
+ // optimization off on a case-by-case basis in CompilePattern - for
+ // example, if a regexp is too long - so we might as well turn these
+ // flags on unconditionally.
+ SET_DEFAULT(regexp_optimization, true);
+#if MOZ_BIG_ENDIAN()
+ // peephole optimization not supported on big endian
+ SET_DEFAULT(regexp_peephole_optimization, false);
+#else
+ SET_DEFAULT(regexp_peephole_optimization, true);
+#endif
+}
+
+bool DefaultJitOptions::isSmallFunction(JSScript* script) const {
+ return script->length() <= smallFunctionMaxBytecodeLength;
+}
+
+void DefaultJitOptions::enableGvn(bool enable) { disableGvn = !enable; }
+
+void DefaultJitOptions::setEagerBaselineCompilation() {
+ baselineInterpreterWarmUpThreshold = 0;
+ baselineJitWarmUpThreshold = 0;
+ regexpWarmUpThreshold = 0;
+}
+
+void DefaultJitOptions::setEagerIonCompilation() {
+ setEagerBaselineCompilation();
+ normalIonWarmUpThreshold = 0;
+}
+
+void DefaultJitOptions::setFastWarmUp() {
+ baselineInterpreterWarmUpThreshold = 4;
+ baselineJitWarmUpThreshold = 10;
+ trialInliningWarmUpThreshold = 14;
+ trialInliningInitialWarmUpCount = 12;
+ normalIonWarmUpThreshold = 30;
+
+ inliningEntryThreshold = 2;
+ smallFunctionMaxBytecodeLength = 2000;
+}
+
+void DefaultJitOptions::setNormalIonWarmUpThreshold(uint32_t warmUpThreshold) {
+ normalIonWarmUpThreshold = warmUpThreshold;
+}
+
+void DefaultJitOptions::resetNormalIonWarmUpThreshold() {
+ jit::DefaultJitOptions defaultValues;
+ setNormalIonWarmUpThreshold(defaultValues.normalIonWarmUpThreshold);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
new file mode 100644
index 0000000000..b0599b012f
--- /dev/null
+++ b/js/src/jit/JitOptions.h
@@ -0,0 +1,184 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitOptions_h
+#define jit_JitOptions_h
+
+#include "mozilla/Maybe.h"
+
+#include "jit/IonTypes.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace jit {
+
+// Possible register allocators which may be used.
+enum IonRegisterAllocator {
+ RegisterAllocator_Backtracking,
+ RegisterAllocator_Testbed,
+};
+
+// Which register to use as base register to access stack slots: frame pointer,
+// stack pointer, or whichever is the default for this platform. See comment
+// for baseRegForLocals in JitOptions.cpp for more information.
+enum class BaseRegForAddress { Default, FP, SP };
+
+static inline mozilla::Maybe<IonRegisterAllocator> LookupRegisterAllocator(
+ const char* name) {
+ if (!strcmp(name, "backtracking")) {
+ return mozilla::Some(RegisterAllocator_Backtracking);
+ }
+ if (!strcmp(name, "testbed")) {
+ return mozilla::Some(RegisterAllocator_Testbed);
+ }
+ return mozilla::Nothing();
+}
+
+struct DefaultJitOptions {
+ bool checkGraphConsistency;
+#ifdef CHECK_OSIPOINT_REGISTERS
+ bool checkOsiPointRegisters;
+#endif
+ bool checkRangeAnalysis;
+ bool runExtraChecks;
+ bool disableJitBackend;
+ bool disableJitHints;
+ bool disableAma;
+ bool disableEaa;
+ bool disableEdgeCaseAnalysis;
+ bool disableGvn;
+ bool disableInlining;
+ bool disableLicm;
+ bool disablePruning;
+ bool disableInstructionReordering;
+ bool disableIteratorIndices;
+ bool disableRangeAnalysis;
+ bool disableRecoverIns;
+ bool disableScalarReplacement;
+ bool disableCacheIR;
+ bool disableSink;
+ bool disableRedundantShapeGuards;
+ bool disableRedundantGCBarriers;
+ bool disableBailoutLoopCheck;
+ bool baselineInterpreter;
+ bool baselineJit;
+ bool ion;
+ bool jitForTrustedPrincipals;
+ bool nativeRegExp;
+ bool forceInlineCaches;
+ bool forceMegamorphicICs;
+ bool fullDebugChecks;
+ bool limitScriptSize;
+ bool osr;
+ bool wasmFoldOffsets;
+ bool wasmDelayTier2;
+ bool lessDebugCode;
+ bool enableWatchtowerMegamorphic;
+ bool onlyInlineSelfHosted;
+ bool enableICFramePointers;
+ bool enableWasmJitExit;
+ bool enableWasmJitEntry;
+ bool enableWasmIonFastCalls;
+#ifdef WASM_CODEGEN_DEBUG
+ bool enableWasmImportCallSpew;
+ bool enableWasmFuncCallSpew;
+#endif
+ bool emitInterpreterEntryTrampoline;
+ uint32_t baselineInterpreterWarmUpThreshold;
+ uint32_t baselineJitWarmUpThreshold;
+ uint32_t trialInliningWarmUpThreshold;
+ uint32_t trialInliningInitialWarmUpCount;
+ uint32_t normalIonWarmUpThreshold;
+ uint32_t regexpWarmUpThreshold;
+ uint32_t exceptionBailoutThreshold;
+ uint32_t frequentBailoutThreshold;
+ uint32_t maxStackArgs;
+ uint32_t osrPcMismatchesBeforeRecompile;
+ uint32_t smallFunctionMaxBytecodeLength;
+ uint32_t inliningEntryThreshold;
+ uint32_t jumpThreshold;
+ uint32_t branchPruningHitCountFactor;
+ uint32_t branchPruningInstFactor;
+ uint32_t branchPruningBlockSpanFactor;
+ uint32_t branchPruningEffectfulInstFactor;
+ uint32_t branchPruningThreshold;
+ uint32_t ionMaxScriptSize;
+ uint32_t ionMaxScriptSizeMainThread;
+ uint32_t ionMaxLocalsAndArgs;
+ uint32_t ionMaxLocalsAndArgsMainThread;
+ uint32_t wasmBatchBaselineThreshold;
+ uint32_t wasmBatchIonThreshold;
+ mozilla::Maybe<IonRegisterAllocator> forcedRegisterAllocator;
+
+ // Spectre mitigation flags. Each mitigation has its own flag in order to
+ // measure the effectiveness of each mitigation with various proof of
+ // concept.
+ bool spectreIndexMasking;
+ bool spectreObjectMitigations;
+ bool spectreStringMitigations;
+ bool spectreValueMasking;
+ bool spectreJitToCxxCalls;
+
+ bool supportsUnalignedAccesses;
+ BaseRegForAddress baseRegForLocals;
+
+ // Irregexp shim flags
+ bool correctness_fuzzer_suppressions;
+ bool enable_regexp_unaligned_accesses;
+ bool regexp_possessive_quantifier;
+ bool regexp_optimization;
+ bool regexp_peephole_optimization;
+ bool regexp_tier_up;
+ bool trace_regexp_assembler;
+ bool trace_regexp_bytecodes;
+ bool trace_regexp_parser;
+ bool trace_regexp_peephole_optimization;
+
+ DefaultJitOptions();
+ bool isSmallFunction(JSScript* script) const;
+ void setEagerBaselineCompilation();
+ void setEagerIonCompilation();
+ void setNormalIonWarmUpThreshold(uint32_t warmUpThreshold);
+ void resetNormalIonWarmUpThreshold();
+ void enableGvn(bool val);
+ void setFastWarmUp();
+
+ bool eagerIonCompilation() const { return normalIonWarmUpThreshold == 0; }
+};
+
+extern DefaultJitOptions JitOptions;
+
+inline bool HasJitBackend() {
+#if defined(JS_CODEGEN_NONE)
+ return false;
+#else
+ return !JitOptions.disableJitBackend;
+#endif
+}
+
+inline bool IsBaselineInterpreterEnabled() {
+ return HasJitBackend() && JitOptions.baselineInterpreter;
+}
+
+inline bool TooManyActualArguments(size_t nargs) {
+ return nargs > JitOptions.maxStackArgs;
+}
+
+} // namespace jit
+
+extern mozilla::Atomic<bool> fuzzingSafe;
+
+static inline bool IsFuzzing() {
+#ifdef FUZZING
+ return true;
+#else
+ return fuzzingSafe;
+#endif
+}
+
+} // namespace js
+
+#endif /* jit_JitOptions_h */
diff --git a/js/src/jit/JitRealm.h b/js/src/jit/JitRealm.h
new file mode 100644
index 0000000000..a423d9a1c3
--- /dev/null
+++ b/js/src/jit/JitRealm.h
@@ -0,0 +1,189 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitRealm_h
+#define jit_JitRealm_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "gc/Barrier.h"
+#include "gc/ZoneAllocator.h"
+#include "js/GCHashTable.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+MOZ_COLD void ReportOutOfMemory(JSContext* cx);
+
+namespace jit {
+
+class JitCode;
+
+class JitRealm {
+ friend class JitActivation;
+
+ // The JitRealm stores stubs to concatenate strings inline and perform RegExp
+ // calls inline. These bake in zone and realm specific pointers and can't be
+ // stored in JitRuntime. They also are dependent on the value of
+ // 'initialStringHeap' and must be flushed when its value changes.
+ //
+ // These are weak pointers, but they can by accessed during off-thread Ion
+ // compilation and therefore can't use the usual read barrier. Instead, we
+ // record which stubs have been read and perform the appropriate barriers in
+ // CodeGenerator::link().
+
+ enum StubIndex : uint32_t {
+ StringConcat = 0,
+ RegExpMatcher,
+ RegExpSearcher,
+ RegExpExecMatch,
+ RegExpExecTest,
+ Count
+ };
+
+ mozilla::EnumeratedArray<StubIndex, StubIndex::Count, WeakHeapPtr<JitCode*>>
+ stubs_;
+
+ gc::Heap initialStringHeap;
+
+ JitCode* generateStringConcatStub(JSContext* cx);
+ JitCode* generateRegExpMatcherStub(JSContext* cx);
+ JitCode* generateRegExpSearcherStub(JSContext* cx);
+ JitCode* generateRegExpExecMatchStub(JSContext* cx);
+ JitCode* generateRegExpExecTestStub(JSContext* cx);
+
+ JitCode* getStubNoBarrier(StubIndex stub,
+ uint32_t* requiredBarriersOut) const {
+ MOZ_ASSERT(CurrentThreadIsIonCompiling());
+ *requiredBarriersOut |= 1 << uint32_t(stub);
+ return stubs_[stub].unbarrieredGet();
+ }
+
+ public:
+ JitRealm();
+
+ void initialize(bool zoneHasNurseryStrings);
+
+ // Initialize code stubs only used by Ion, not Baseline.
+ [[nodiscard]] bool ensureIonStubsExist(JSContext* cx) {
+ if (stubs_[StringConcat]) {
+ return true;
+ }
+ stubs_[StringConcat] = generateStringConcatStub(cx);
+ return stubs_[StringConcat];
+ }
+
+ void traceWeak(JSTracer* trc, JS::Realm* realm);
+
+ void discardStubs() {
+ for (WeakHeapPtr<JitCode*>& stubRef : stubs_) {
+ stubRef = nullptr;
+ }
+ }
+
+ bool hasStubs() const {
+ for (const WeakHeapPtr<JitCode*>& stubRef : stubs_) {
+ if (stubRef) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void setStringsCanBeInNursery(bool allow) {
+ MOZ_ASSERT(!hasStubs());
+ initialStringHeap = allow ? gc::Heap::Default : gc::Heap::Tenured;
+ }
+ gc::Heap getInitialStringHeap() const { return initialStringHeap; }
+
+ JitCode* stringConcatStubNoBarrier(uint32_t* requiredBarriersOut) const {
+ return getStubNoBarrier(StringConcat, requiredBarriersOut);
+ }
+
+ JitCode* regExpMatcherStubNoBarrier(uint32_t* requiredBarriersOut) const {
+ return getStubNoBarrier(RegExpMatcher, requiredBarriersOut);
+ }
+
+ [[nodiscard]] JitCode* ensureRegExpMatcherStubExists(JSContext* cx) {
+ if (JitCode* code = stubs_[RegExpMatcher]) {
+ return code;
+ }
+ stubs_[RegExpMatcher] = generateRegExpMatcherStub(cx);
+ return stubs_[RegExpMatcher];
+ }
+
+ JitCode* regExpSearcherStubNoBarrier(uint32_t* requiredBarriersOut) const {
+ return getStubNoBarrier(RegExpSearcher, requiredBarriersOut);
+ }
+
+ [[nodiscard]] JitCode* ensureRegExpSearcherStubExists(JSContext* cx) {
+ if (JitCode* code = stubs_[RegExpSearcher]) {
+ return code;
+ }
+ stubs_[RegExpSearcher] = generateRegExpSearcherStub(cx);
+ return stubs_[RegExpSearcher];
+ }
+
+ JitCode* regExpExecMatchStubNoBarrier(uint32_t* requiredBarriersOut) const {
+ return getStubNoBarrier(RegExpExecMatch, requiredBarriersOut);
+ }
+
+ [[nodiscard]] JitCode* ensureRegExpExecMatchStubExists(JSContext* cx) {
+ if (JitCode* code = stubs_[RegExpExecMatch]) {
+ return code;
+ }
+ stubs_[RegExpExecMatch] = generateRegExpExecMatchStub(cx);
+ return stubs_[RegExpExecMatch];
+ }
+
+ JitCode* regExpExecTestStubNoBarrier(uint32_t* requiredBarriersOut) const {
+ return getStubNoBarrier(RegExpExecTest, requiredBarriersOut);
+ }
+
+ [[nodiscard]] JitCode* ensureRegExpExecTestStubExists(JSContext* cx) {
+ if (JitCode* code = stubs_[RegExpExecTest]) {
+ return code;
+ }
+ stubs_[RegExpExecTest] = generateRegExpExecTestStub(cx);
+ return stubs_[RegExpExecTest];
+ }
+
+ // Perform the necessary read barriers on stubs described by the bitmasks
+ // passed in. This function can only be called from the main thread.
+ //
+ // The stub pointers must still be valid by the time these methods are
+ // called. This is arranged by cancelling off-thread Ion compilation at the
+ // start of GC and at the start of sweeping.
+ void performStubReadBarriers(uint32_t stubsToBarrier) const;
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ static constexpr size_t offsetOfRegExpMatcherStub() {
+ return offsetof(JitRealm, stubs_) + RegExpMatcher * sizeof(uintptr_t);
+ }
+ static constexpr size_t offsetOfRegExpSearcherStub() {
+ return offsetof(JitRealm, stubs_) + RegExpSearcher * sizeof(uintptr_t);
+ }
+ static constexpr size_t offsetOfRegExpExecMatchStub() {
+ return offsetof(JitRealm, stubs_) + RegExpExecMatch * sizeof(uintptr_t);
+ }
+ static constexpr size_t offsetOfRegExpExecTestStub() {
+ return offsetof(JitRealm, stubs_) + RegExpExecTest * sizeof(uintptr_t);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitRealm_h */
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
new file mode 100644
index 0000000000..cc3bd231c4
--- /dev/null
+++ b/js/src/jit/JitRuntime.h
@@ -0,0 +1,451 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitRuntime_h
+#define jit_JitRuntime_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/LinkedList.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/ABIFunctions.h"
+#include "jit/BaselineICList.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CalleeToken.h"
+#include "jit/InterpreterEntryTrampoline.h"
+#include "jit/IonCompileTask.h"
+#include "jit/IonTypes.h"
+#include "jit/JitCode.h"
+#include "jit/JitHints.h"
+#include "jit/shared/Assembler-shared.h"
+#include "js/AllocPolicy.h"
+#include "js/ProfilingFrameIterator.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+#include "threading/ProtectedData.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/Runtime.h"
+
+class JS_PUBLIC_API JSTracer;
+
+namespace js {
+
+class AutoLockHelperThreadState;
+class GCMarker;
+
+namespace jit {
+
+class FrameSizeClass;
+class JitRealm;
+class Label;
+class MacroAssembler;
+struct VMFunctionData;
+
+enum class TailCallVMFunctionId;
+enum class VMFunctionId;
+
+enum class BaselineICFallbackKind : uint8_t {
+#define DEF_ENUM_KIND(kind) kind,
+ IC_BASELINE_FALLBACK_CODE_KIND_LIST(DEF_ENUM_KIND)
+#undef DEF_ENUM_KIND
+ Count
+};
+
+enum class BailoutReturnKind {
+ GetProp,
+ GetPropSuper,
+ SetProp,
+ GetElem,
+ GetElemSuper,
+ Call,
+ New,
+ Count
+};
+
+// Class storing code and offsets for all Baseline IC fallback trampolines. This
+// is stored in JitRuntime and generated when creating the JitRuntime.
+class BaselineICFallbackCode {
+ JitCode* code_ = nullptr;
+ using OffsetArray =
+ mozilla::EnumeratedArray<BaselineICFallbackKind,
+ BaselineICFallbackKind::Count, uint32_t>;
+ OffsetArray offsets_ = {};
+
+ // Keep track of offset into various baseline stubs' code at return
+ // point from called script.
+ using BailoutReturnArray =
+ mozilla::EnumeratedArray<BailoutReturnKind, BailoutReturnKind::Count,
+ uint32_t>;
+ BailoutReturnArray bailoutReturnOffsets_ = {};
+
+ public:
+ BaselineICFallbackCode() = default;
+ BaselineICFallbackCode(const BaselineICFallbackCode&) = delete;
+ void operator=(const BaselineICFallbackCode&) = delete;
+
+ void initOffset(BaselineICFallbackKind kind, uint32_t offset) {
+ offsets_[kind] = offset;
+ }
+ void initCode(JitCode* code) { code_ = code; }
+ void initBailoutReturnOffset(BailoutReturnKind kind, uint32_t offset) {
+ bailoutReturnOffsets_[kind] = offset;
+ }
+ TrampolinePtr addr(BaselineICFallbackKind kind) const {
+ return TrampolinePtr(code_->raw() + offsets_[kind]);
+ }
+ uint8_t* bailoutReturnAddr(BailoutReturnKind kind) const {
+ return code_->raw() + bailoutReturnOffsets_[kind];
+ }
+};
+
+enum class ArgumentsRectifierKind { Normal, TrialInlining };
+
+enum class DebugTrapHandlerKind { Interpreter, Compiler, Count };
+
+using EnterJitCode = void (*)(void*, unsigned int, Value*, InterpreterFrame*,
+ CalleeToken, JSObject*, size_t, Value*);
+
+class JitcodeGlobalTable;
+
+class JitRuntime {
+ private:
+ friend class JitRealm;
+
+ MainThreadData<uint64_t> nextCompilationId_{0};
+
+ // Buffer for OSR from baseline to Ion. To avoid holding on to this for too
+ // long it's also freed in EnterBaseline and EnterJit (after returning from
+ // JIT code).
+ MainThreadData<js::UniquePtr<uint8_t>> ionOsrTempData_{nullptr};
+
+ // Shared exception-handler tail.
+ WriteOnceData<uint32_t> exceptionTailOffset_{0};
+
+ // Shared profiler exit frame tail.
+ WriteOnceData<uint32_t> profilerExitFrameTailOffset_{0};
+
+ // Trampoline for entering JIT code.
+ WriteOnceData<uint32_t> enterJITOffset_{0};
+
+ // Generic bailout table; used if the bailout table overflows.
+ WriteOnceData<uint32_t> bailoutHandlerOffset_{0};
+
+ // Argument-rectifying thunks, in the case of insufficient arguments passed
+ // to a function call site. The return offset is used to rebuild stack frames
+ // when bailing out.
+ WriteOnceData<uint32_t> argumentsRectifierOffset_{0};
+ WriteOnceData<uint32_t> trialInliningArgumentsRectifierOffset_{0};
+ WriteOnceData<uint32_t> argumentsRectifierReturnOffset_{0};
+
+ // Thunk that invalides an (Ion compiled) caller on the Ion stack.
+ WriteOnceData<uint32_t> invalidatorOffset_{0};
+
+ // Thunk that calls the GC pre barrier.
+ WriteOnceData<uint32_t> valuePreBarrierOffset_{0};
+ WriteOnceData<uint32_t> stringPreBarrierOffset_{0};
+ WriteOnceData<uint32_t> objectPreBarrierOffset_{0};
+ WriteOnceData<uint32_t> shapePreBarrierOffset_{0};
+
+ // Thunk to call malloc/free.
+ WriteOnceData<uint32_t> freeStubOffset_{0};
+
+ // Thunk called to finish compilation of an IonScript.
+ WriteOnceData<uint32_t> lazyLinkStubOffset_{0};
+
+ // Thunk to enter the interpreter from JIT code.
+ WriteOnceData<uint32_t> interpreterStubOffset_{0};
+
+ // Thunk to convert the value in R0 to int32 if it's a double.
+ // Note: this stub treats -0 as +0 and may clobber R1.scratchReg().
+ WriteOnceData<uint32_t> doubleToInt32ValueStubOffset_{0};
+
+ // Thunk used by the debugger for breakpoint and step mode.
+ mozilla::EnumeratedArray<DebugTrapHandlerKind, DebugTrapHandlerKind::Count,
+ WriteOnceData<JitCode*>>
+ debugTrapHandlers_;
+
+ // BaselineInterpreter state.
+ BaselineInterpreter baselineInterpreter_;
+
+ // Code for trampolines and VMFunction wrappers.
+ WriteOnceData<JitCode*> trampolineCode_{nullptr};
+
+ // Thunk that calls into the C++ interpreter from the interpreter
+ // entry trampoline that is generated with --emit-interpreter-entry
+ WriteOnceData<uint32_t> vmInterpreterEntryOffset_{0};
+
+ // Maps VMFunctionId to the offset of the wrapper code in trampolineCode_.
+ using VMWrapperOffsets = Vector<uint32_t, 0, SystemAllocPolicy>;
+ VMWrapperOffsets functionWrapperOffsets_;
+
+ // Maps TailCallVMFunctionId to the offset of the wrapper code in
+ // trampolineCode_.
+ VMWrapperOffsets tailCallFunctionWrapperOffsets_;
+
+ MainThreadData<BaselineICFallbackCode> baselineICFallbackCode_;
+
+ // Global table of jitcode native address => bytecode address mappings.
+ UnprotectedData<JitcodeGlobalTable*> jitcodeGlobalTable_{nullptr};
+
+ // Map that stores Jit Hints for each script.
+ MainThreadData<JitHintsMap*> jitHintsMap_{nullptr};
+
+ // Map used to collect entry trampolines for the Interpreters which is used
+ // for external profiling to identify which functions are being interpreted.
+ MainThreadData<EntryTrampolineMap*> interpreterEntryMap_{nullptr};
+
+#ifdef DEBUG
+ // The number of possible bailing places encountered before forcefully bailing
+ // in that place if the counter reaches zero. Note that zero also means
+ // inactive.
+ MainThreadData<uint32_t> ionBailAfterCounter_{0};
+
+ // Whether the bailAfter mechanism is enabled. Used to avoid generating the
+ // Ion code instrumentation for ionBailAfterCounter_ if the testing function
+ // isn't used.
+ MainThreadData<bool> ionBailAfterEnabled_{false};
+#endif
+
+ // Number of Ion compilations which were finished off thread and are
+ // waiting to be lazily linked. This is only set while holding the helper
+ // thread state lock, but may be read from at other times.
+ typedef mozilla::Atomic<size_t, mozilla::SequentiallyConsistent>
+ NumFinishedOffThreadTasksType;
+ NumFinishedOffThreadTasksType numFinishedOffThreadTasks_{0};
+
+ // List of Ion compilation waiting to get linked.
+ using IonCompileTaskList = mozilla::LinkedList<js::jit::IonCompileTask>;
+ MainThreadData<IonCompileTaskList> ionLazyLinkList_;
+ MainThreadData<size_t> ionLazyLinkListSize_{0};
+
+#ifdef DEBUG
+ // Flag that can be set from JIT code to indicate it's invalid to call
+ // arbitrary JS code in a particular region. This is checked in RunScript.
+ MainThreadData<uint32_t> disallowArbitraryCode_{false};
+#endif
+
+ bool generateTrampolines(JSContext* cx);
+ bool generateBaselineICFallbackCode(JSContext* cx);
+
+ void generateLazyLinkStub(MacroAssembler& masm);
+ void generateInterpreterStub(MacroAssembler& masm);
+ void generateDoubleToInt32ValueStub(MacroAssembler& masm);
+ void generateProfilerExitFrameTailStub(MacroAssembler& masm,
+ Label* profilerExitTail);
+ void generateExceptionTailStub(MacroAssembler& masm, Label* profilerExitTail,
+ Label* bailoutTail);
+ void generateBailoutTailStub(MacroAssembler& masm, Label* bailoutTail);
+ void generateEnterJIT(JSContext* cx, MacroAssembler& masm);
+ void generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind);
+ void generateBailoutHandler(MacroAssembler& masm, Label* bailoutTail);
+ void generateInvalidator(MacroAssembler& masm, Label* bailoutTail);
+ uint32_t generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type);
+ void generateFreeStub(MacroAssembler& masm);
+ JitCode* generateDebugTrapHandler(JSContext* cx, DebugTrapHandlerKind kind);
+
+ bool generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset);
+
+ template <typename IdT>
+ bool generateVMWrappers(JSContext* cx, MacroAssembler& masm,
+ VMWrapperOffsets& offsets);
+ bool generateVMWrappers(JSContext* cx, MacroAssembler& masm);
+
+ uint32_t startTrampolineCode(MacroAssembler& masm);
+
+ TrampolinePtr trampolineCode(uint32_t offset) const {
+ MOZ_ASSERT(offset > 0);
+ MOZ_ASSERT(offset < trampolineCode_->instructionsSize());
+ return TrampolinePtr(trampolineCode_->raw() + offset);
+ }
+
+ void generateBaselineInterpreterEntryTrampoline(MacroAssembler& masm);
+ void generateInterpreterEntryTrampoline(MacroAssembler& masm);
+
+ public:
+ JitCode* generateEntryTrampolineForScript(JSContext* cx, JSScript* script);
+
+ JitRuntime() = default;
+ ~JitRuntime();
+ [[nodiscard]] bool initialize(JSContext* cx);
+
+ static void TraceAtomZoneRoots(JSTracer* trc);
+ [[nodiscard]] static bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
+ static void TraceWeakJitcodeGlobalTable(JSRuntime* rt, JSTracer* trc);
+
+ const BaselineICFallbackCode& baselineICFallbackCode() const {
+ return baselineICFallbackCode_.ref();
+ }
+
+ IonCompilationId nextCompilationId() {
+ return IonCompilationId(nextCompilationId_++);
+ }
+
+#ifdef DEBUG
+ bool disallowArbitraryCode() const { return disallowArbitraryCode_; }
+ void clearDisallowArbitraryCode() { disallowArbitraryCode_ = false; }
+ const void* addressOfDisallowArbitraryCode() const {
+ return &disallowArbitraryCode_.refNoCheck();
+ }
+#endif
+
+ uint8_t* allocateIonOsrTempData(size_t size);
+ void freeIonOsrTempData();
+
+ TrampolinePtr getVMWrapper(VMFunctionId funId) const {
+ MOZ_ASSERT(trampolineCode_);
+ return trampolineCode(functionWrapperOffsets_[size_t(funId)]);
+ }
+ TrampolinePtr getVMWrapper(TailCallVMFunctionId funId) const {
+ MOZ_ASSERT(trampolineCode_);
+ return trampolineCode(tailCallFunctionWrapperOffsets_[size_t(funId)]);
+ }
+
+ JitCode* debugTrapHandler(JSContext* cx, DebugTrapHandlerKind kind);
+
+ BaselineInterpreter& baselineInterpreter() { return baselineInterpreter_; }
+
+ TrampolinePtr getGenericBailoutHandler() const {
+ return trampolineCode(bailoutHandlerOffset_);
+ }
+
+ TrampolinePtr getExceptionTail() const {
+ return trampolineCode(exceptionTailOffset_);
+ }
+
+ TrampolinePtr getProfilerExitFrameTail() const {
+ return trampolineCode(profilerExitFrameTailOffset_);
+ }
+
+ TrampolinePtr getArgumentsRectifier(
+ ArgumentsRectifierKind kind = ArgumentsRectifierKind::Normal) const {
+ if (kind == ArgumentsRectifierKind::TrialInlining) {
+ return trampolineCode(trialInliningArgumentsRectifierOffset_);
+ }
+ return trampolineCode(argumentsRectifierOffset_);
+ }
+
+ uint32_t vmInterpreterEntryOffset() { return vmInterpreterEntryOffset_; }
+
+ TrampolinePtr getArgumentsRectifierReturnAddr() const {
+ return trampolineCode(argumentsRectifierReturnOffset_);
+ }
+
+ TrampolinePtr getInvalidationThunk() const {
+ return trampolineCode(invalidatorOffset_);
+ }
+
+ EnterJitCode enterJit() const {
+ return JS_DATA_TO_FUNC_PTR(EnterJitCode,
+ trampolineCode(enterJITOffset_).value);
+ }
+
+ // Return the registers from the native caller frame of the given JIT frame.
+ // Nothing{} if frameStackAddress is NOT pointing at a native-to-JIT entry
+ // frame, or if the information is not accessible/implemented on this
+ // platform.
+ static mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+ getCppEntryRegisters(JitFrameLayout* frameStackAddress);
+
+ TrampolinePtr preBarrier(MIRType type) const {
+ switch (type) {
+ case MIRType::Value:
+ return trampolineCode(valuePreBarrierOffset_);
+ case MIRType::String:
+ return trampolineCode(stringPreBarrierOffset_);
+ case MIRType::Object:
+ return trampolineCode(objectPreBarrierOffset_);
+ case MIRType::Shape:
+ return trampolineCode(shapePreBarrierOffset_);
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ TrampolinePtr freeStub() const { return trampolineCode(freeStubOffset_); }
+
+ TrampolinePtr lazyLinkStub() const {
+ return trampolineCode(lazyLinkStubOffset_);
+ }
+ TrampolinePtr interpreterStub() const {
+ return trampolineCode(interpreterStubOffset_);
+ }
+
+ TrampolinePtr getDoubleToInt32ValueStub() const {
+ return trampolineCode(doubleToInt32ValueStubOffset_);
+ }
+
+ bool hasJitcodeGlobalTable() const { return jitcodeGlobalTable_ != nullptr; }
+
+ JitcodeGlobalTable* getJitcodeGlobalTable() {
+ MOZ_ASSERT(hasJitcodeGlobalTable());
+ return jitcodeGlobalTable_;
+ }
+
+ bool hasJitHintsMap() const { return jitHintsMap_ != nullptr; }
+
+ JitHintsMap* getJitHintsMap() {
+ MOZ_ASSERT(hasJitHintsMap());
+ return jitHintsMap_;
+ }
+
+ bool hasInterpreterEntryMap() const {
+ return interpreterEntryMap_ != nullptr;
+ }
+
+ EntryTrampolineMap* getInterpreterEntryMap() {
+ MOZ_ASSERT(hasInterpreterEntryMap());
+ return interpreterEntryMap_;
+ }
+
+ bool isProfilerInstrumentationEnabled(JSRuntime* rt) {
+ return rt->geckoProfiler().enabled();
+ }
+
+ bool isOptimizationTrackingEnabled(JSRuntime* rt) {
+ return isProfilerInstrumentationEnabled(rt);
+ }
+
+#ifdef DEBUG
+ void* addressOfIonBailAfterCounter() { return &ionBailAfterCounter_; }
+
+ // Set after how many bailing places we should forcefully bail.
+ // Zero disables this feature.
+ void setIonBailAfterCounter(uint32_t after) { ionBailAfterCounter_ = after; }
+ bool ionBailAfterEnabled() const { return ionBailAfterEnabled_; }
+ void setIonBailAfterEnabled(bool enabled) { ionBailAfterEnabled_ = enabled; }
+#endif
+
+ size_t numFinishedOffThreadTasks() const {
+ return numFinishedOffThreadTasks_;
+ }
+ NumFinishedOffThreadTasksType& numFinishedOffThreadTasksRef(
+ const AutoLockHelperThreadState& locked) {
+ return numFinishedOffThreadTasks_;
+ }
+
+ IonCompileTaskList& ionLazyLinkList(JSRuntime* rt);
+
+ size_t ionLazyLinkListSize() const { return ionLazyLinkListSize_; }
+
+ void ionLazyLinkListRemove(JSRuntime* rt, js::jit::IonCompileTask* task);
+ void ionLazyLinkListAdd(JSRuntime* rt, js::jit::IonCompileTask* task);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitRuntime_h */
diff --git a/js/src/jit/JitScript-inl.h b/js/src/jit/JitScript-inl.h
new file mode 100644
index 0000000000..e27745ffab
--- /dev/null
+++ b/js/src/jit/JitScript-inl.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitScript_inl_h
+#define jit_JitScript_inl_h
+
+#include "jit/JitScript.h"
+
+#include "mozilla/Assertions.h"
+
+#include "gc/Zone.h"
+#include "jit/JitZone.h"
+#include "vm/JSContext.h"
+#include "vm/JSScript.h"
+
+namespace js {
+namespace jit {
+
+inline AutoKeepJitScripts::AutoKeepJitScripts(JSContext* cx)
+ : zone_(cx->zone()->jitZone()), prev_(zone_->keepJitScripts()) {
+ zone_->setKeepJitScripts(true);
+}
+
+inline AutoKeepJitScripts::~AutoKeepJitScripts() {
+ MOZ_ASSERT(zone_->keepJitScripts());
+ zone_->setKeepJitScripts(prev_);
+}
+
+} // namespace jit
+} // namespace js
+
+inline bool JSScript::ensureHasJitScript(JSContext* cx,
+ js::jit::AutoKeepJitScripts&) {
+ if (hasJitScript()) {
+ return true;
+ }
+ return createJitScript(cx);
+}
+
+#endif /* jit_JitScript_inl_h */
diff --git a/js/src/jit/JitScript.cpp b/js/src/jit/JitScript.cpp
new file mode 100644
index 0000000000..ac0a39cbb5
--- /dev/null
+++ b/js/src/jit/JitScript.cpp
@@ -0,0 +1,732 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitScript-inl.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/CheckedInt.h"
+
+#include <utility>
+
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/BytecodeAnalysis.h"
+#include "jit/IonScript.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "jit/TrialInlining.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/Compartment.h"
+#include "vm/FrameIter.h" // js::OnlyJSJitFrameIter
+#include "vm/JitActivation.h"
+#include "vm/JSScript.h"
+
+#include "gc/GCContext-inl.h"
+#include "jit/JSJitFrameIter-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::CheckedInt;
+
+JitScript::JitScript(JSScript* script, Offset fallbackStubsOffset,
+ Offset endOffset, const char* profileString)
+ : profileString_(profileString),
+ endOffset_(endOffset),
+ icScript_(script->getWarmUpCount(),
+ fallbackStubsOffset - offsetOfICScript(),
+ endOffset - offsetOfICScript(),
+ /*depth=*/0) {
+ // Ensure the baselineScript_ and ionScript_ fields match the BaselineDisabled
+ // and IonDisabled script flags.
+ if (!script->canBaselineCompile()) {
+ setBaselineScriptImpl(script, BaselineDisabledScriptPtr);
+ }
+ if (!script->canIonCompile()) {
+ setIonScriptImpl(script, IonDisabledScriptPtr);
+ }
+}
+
+#ifdef DEBUG
+JitScript::~JitScript() {
+ // The contents of the stub space are removed and freed separately after the
+ // next minor GC. See prepareForDestruction.
+ MOZ_ASSERT(jitScriptStubSpace_.isEmpty());
+
+ // BaselineScript and IonScript must have been destroyed at this point.
+ MOZ_ASSERT(!hasBaselineScript());
+ MOZ_ASSERT(!hasIonScript());
+}
+#else
+JitScript::~JitScript() = default;
+#endif
+
+bool JSScript::createJitScript(JSContext* cx) {
+ MOZ_ASSERT(!hasJitScript());
+ cx->check(this);
+
+ // Scripts with a JitScript can run in the Baseline Interpreter. Make sure
+ // we don't create a JitScript for scripts we shouldn't Baseline interpret.
+ MOZ_ASSERT_IF(IsBaselineInterpreterEnabled(),
+ CanBaselineInterpretScript(this));
+
+ // Store the profile string in the JitScript if the profiler is enabled.
+ const char* profileString = nullptr;
+ if (cx->runtime()->geckoProfiler().enabled()) {
+ profileString = cx->runtime()->geckoProfiler().profileString(cx, this);
+ if (!profileString) {
+ return false;
+ }
+ }
+
+ static_assert(sizeof(JitScript) % sizeof(uintptr_t) == 0,
+ "Trailing arrays must be aligned properly");
+ static_assert(sizeof(ICEntry) % sizeof(uintptr_t) == 0,
+ "Trailing arrays must be aligned properly");
+
+ static_assert(
+ sizeof(JitScript) == offsetof(JitScript, icScript_) + sizeof(ICScript),
+ "icScript_ must be the last field");
+
+ // Calculate allocation size.
+ CheckedInt<uint32_t> allocSize = sizeof(JitScript);
+ allocSize += CheckedInt<uint32_t>(numICEntries()) * sizeof(ICEntry);
+ allocSize += CheckedInt<uint32_t>(numICEntries()) * sizeof(ICFallbackStub);
+ if (!allocSize.isValid()) {
+ ReportAllocationOverflow(cx);
+ return false;
+ }
+
+ void* raw = cx->pod_malloc<uint8_t>(allocSize.value());
+ MOZ_ASSERT(uintptr_t(raw) % alignof(JitScript) == 0);
+ if (!raw) {
+ return false;
+ }
+
+ size_t fallbackStubsOffset =
+ sizeof(JitScript) + numICEntries() * sizeof(ICEntry);
+
+ UniquePtr<JitScript> jitScript(new (raw) JitScript(
+ this, fallbackStubsOffset, allocSize.value(), profileString));
+
+ // Sanity check the length computation.
+ MOZ_ASSERT(jitScript->numICEntries() == numICEntries());
+
+ jitScript->icScript()->initICEntries(cx, this);
+
+ warmUpData_.initJitScript(jitScript.release());
+ AddCellMemory(this, allocSize.value(), MemoryUse::JitScript);
+
+ // We have a JitScript so we can set the script's jitCodeRaw pointer to the
+ // Baseline Interpreter code.
+ updateJitCodeRaw(cx->runtime());
+
+ return true;
+}
+
+void JSScript::maybeReleaseJitScript(JS::GCContext* gcx) {
+ MOZ_ASSERT(hasJitScript());
+
+ if (zone()->jitZone()->keepJitScripts() || jitScript()->hasBaselineScript() ||
+ jitScript()->active()) {
+ return;
+ }
+
+ releaseJitScript(gcx);
+}
+
+void JSScript::releaseJitScript(JS::GCContext* gcx) {
+ MOZ_ASSERT(hasJitScript());
+ MOZ_ASSERT(!hasBaselineScript());
+ MOZ_ASSERT(!hasIonScript());
+
+ gcx->removeCellMemory(this, jitScript()->allocBytes(), MemoryUse::JitScript);
+
+ JitScript::Destroy(zone(), jitScript());
+ warmUpData_.clearJitScript();
+ updateJitCodeRaw(gcx->runtime());
+}
+
+void JSScript::releaseJitScriptOnFinalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(hasJitScript());
+
+ if (hasIonScript()) {
+ IonScript* ion = jitScript()->clearIonScript(gcx, this);
+ jit::IonScript::Destroy(gcx, ion);
+ }
+
+ if (hasBaselineScript()) {
+ BaselineScript* baseline = jitScript()->clearBaselineScript(gcx, this);
+ jit::BaselineScript::Destroy(gcx, baseline);
+ }
+
+ releaseJitScript(gcx);
+}
+
+void JitScript::trace(JSTracer* trc) {
+ icScript_.trace(trc);
+
+ if (hasBaselineScript()) {
+ baselineScript()->trace(trc);
+ }
+
+ if (hasIonScript()) {
+ ionScript()->trace(trc);
+ }
+
+ if (templateEnv_.isSome()) {
+ TraceNullableEdge(trc, templateEnv_.ptr(), "jitscript-template-env");
+ }
+
+ if (hasInliningRoot()) {
+ inliningRoot()->trace(trc);
+ }
+}
+
+void ICScript::trace(JSTracer* trc) {
+ // Mark all IC stub codes hanging off the IC stub entries.
+ for (size_t i = 0; i < numICEntries(); i++) {
+ ICEntry& ent = icEntry(i);
+ ent.trace(trc);
+ }
+}
+
+bool ICScript::addInlinedChild(JSContext* cx, UniquePtr<ICScript> child,
+ uint32_t pcOffset) {
+ MOZ_ASSERT(!hasInlinedChild(pcOffset));
+
+ if (!inlinedChildren_) {
+ inlinedChildren_ = cx->make_unique<Vector<CallSite>>(cx);
+ if (!inlinedChildren_) {
+ return false;
+ }
+ }
+
+ // First reserve space in inlinedChildren_ to ensure that if the ICScript is
+ // added to the inlining root, it can also be added to inlinedChildren_.
+ CallSite callsite(child.get(), pcOffset);
+ if (!inlinedChildren_->reserve(inlinedChildren_->length() + 1)) {
+ return false;
+ }
+ if (!inliningRoot()->addInlinedScript(std::move(child))) {
+ return false;
+ }
+ inlinedChildren_->infallibleAppend(callsite);
+ return true;
+}
+
+ICScript* ICScript::findInlinedChild(uint32_t pcOffset) {
+ for (auto& callsite : *inlinedChildren_) {
+ if (callsite.pcOffset_ == pcOffset) {
+ return callsite.callee_;
+ }
+ }
+ MOZ_CRASH("Inlined child expected at pcOffset");
+}
+
+void ICScript::removeInlinedChild(uint32_t pcOffset) {
+ MOZ_ASSERT(inliningRoot());
+ inlinedChildren_->eraseIf([pcOffset](const CallSite& callsite) -> bool {
+ return callsite.pcOffset_ == pcOffset;
+ });
+}
+
+bool ICScript::hasInlinedChild(uint32_t pcOffset) {
+ if (!inlinedChildren_) {
+ return false;
+ }
+ for (auto& callsite : *inlinedChildren_) {
+ if (callsite.pcOffset_ == pcOffset) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void JitScript::resetWarmUpCount(uint32_t count) {
+ icScript_.resetWarmUpCount(count);
+ if (hasInliningRoot()) {
+ inliningRoot()->resetWarmUpCounts(count);
+ }
+}
+
+void JitScript::ensureProfileString(JSContext* cx, JSScript* script) {
+ MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled());
+
+ if (profileString_) {
+ return;
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ profileString_ = cx->runtime()->geckoProfiler().profileString(cx, script);
+ if (!profileString_) {
+ oomUnsafe.crash("Failed to allocate profile string");
+ }
+}
+
+/* static */
+void JitScript::Destroy(Zone* zone, JitScript* script) {
+ script->prepareForDestruction(zone);
+
+ js_delete(script);
+}
+
+void JitScript::prepareForDestruction(Zone* zone) {
+ // When the script contains pointers to nursery things, the store buffer can
+ // contain entries that point into the fallback stub space. Since we can
+ // destroy scripts outside the context of a GC, this situation could result
+ // in us trying to mark invalid store buffer entries.
+ //
+ // Defer freeing any allocated blocks until after the next minor GC.
+ jitScriptStubSpace_.freeAllAfterMinorGC(zone);
+
+ // Trigger write barriers.
+ baselineScript_.set(zone, nullptr);
+ ionScript_.set(zone, nullptr);
+}
+
+struct FallbackStubs {
+ ICScript* const icScript_;
+
+ explicit FallbackStubs(ICScript* icScript) : icScript_(icScript) {}
+
+ size_t numEntries() const { return icScript_->numICEntries(); }
+ ICFallbackStub* operator[](size_t index) const {
+ return icScript_->fallbackStub(index);
+ }
+};
+
+static bool ComputeBinarySearchMid(FallbackStubs stubs, uint32_t pcOffset,
+ size_t* loc) {
+ return mozilla::BinarySearchIf(
+ stubs, 0, stubs.numEntries(),
+ [pcOffset](const ICFallbackStub* stub) {
+ if (pcOffset < stub->pcOffset()) {
+ return -1;
+ }
+ if (stub->pcOffset() < pcOffset) {
+ return 1;
+ }
+ return 0;
+ },
+ loc);
+}
+
+ICEntry& ICScript::icEntryFromPCOffset(uint32_t pcOffset) {
+ size_t mid;
+ MOZ_ALWAYS_TRUE(ComputeBinarySearchMid(FallbackStubs(this), pcOffset, &mid));
+
+ MOZ_ASSERT(mid < numICEntries());
+
+ ICEntry& entry = icEntry(mid);
+ MOZ_ASSERT(fallbackStubForICEntry(&entry)->pcOffset() == pcOffset);
+ return entry;
+}
+
+ICEntry* ICScript::interpreterICEntryFromPCOffset(uint32_t pcOffset) {
+ // We have to return the entry to store in BaselineFrame::interpreterICEntry
+ // when resuming in the Baseline Interpreter at pcOffset. The bytecode op at
+ // pcOffset does not necessarily have an ICEntry, so we want to return the
+ // first ICEntry for which the following is true:
+ //
+ // entry.pcOffset() >= pcOffset
+ //
+ // Fortunately, ComputeBinarySearchMid returns exactly this entry.
+
+ size_t mid;
+ ComputeBinarySearchMid(FallbackStubs(this), pcOffset, &mid);
+
+ if (mid < numICEntries()) {
+ ICEntry& entry = icEntry(mid);
+ MOZ_ASSERT(fallbackStubForICEntry(&entry)->pcOffset() >= pcOffset);
+ return &entry;
+ }
+
+ // Resuming at a pc after the last ICEntry. Just return nullptr:
+ // BaselineFrame::interpreterICEntry will never be used in this case.
+ return nullptr;
+}
+
+void JitScript::purgeOptimizedStubs(JSScript* script) {
+ MOZ_ASSERT(script->jitScript() == this);
+
+ Zone* zone = script->zone();
+ if (IsAboutToBeFinalizedUnbarriered(script)) {
+ // We're sweeping and the script is dead. Don't purge optimized stubs
+ // because (1) accessing CacheIRStubInfo pointers in ICStubs is invalid
+ // because we may have swept them already when we started (incremental)
+ // sweeping and (2) it's unnecessary because this script will be finalized
+ // soon anyway.
+ return;
+ }
+
+ JitSpew(JitSpew_BaselineIC, "Purging optimized stubs");
+
+ icScript()->purgeOptimizedStubs(zone);
+ if (hasInliningRoot()) {
+ inliningRoot()->purgeOptimizedStubs(zone);
+ }
+#ifdef DEBUG
+ failedICHash_.reset();
+ hasPurgedStubs_ = true;
+#endif
+}
+
+void ICScript::purgeOptimizedStubs(Zone* zone) {
+ for (size_t i = 0; i < numICEntries(); i++) {
+ ICEntry& entry = icEntry(i);
+ ICStub* lastStub = entry.firstStub();
+ while (!lastStub->isFallback()) {
+ lastStub = lastStub->toCacheIRStub()->next();
+ }
+
+ // Unlink all stubs allocated in the optimized space.
+ ICStub* stub = entry.firstStub();
+ ICCacheIRStub* prev = nullptr;
+
+ while (stub != lastStub) {
+ if (!stub->toCacheIRStub()->allocatedInFallbackSpace()) {
+ lastStub->toFallbackStub()->unlinkStub(zone, &entry, prev,
+ stub->toCacheIRStub());
+ stub = stub->toCacheIRStub()->next();
+ continue;
+ }
+
+ prev = stub->toCacheIRStub();
+ stub = stub->toCacheIRStub()->next();
+ }
+
+ lastStub->toFallbackStub()->clearHasFoldedStub();
+ }
+
+#ifdef DEBUG
+ // All remaining stubs must be allocated in the fallback space.
+ for (size_t i = 0; i < numICEntries(); i++) {
+ ICEntry& entry = icEntry(i);
+ ICStub* stub = entry.firstStub();
+ while (!stub->isFallback()) {
+ MOZ_ASSERT(stub->toCacheIRStub()->allocatedInFallbackSpace());
+ stub = stub->toCacheIRStub()->next();
+ }
+ }
+#endif
+}
+
+bool JitScript::ensureHasCachedBaselineJitData(JSContext* cx,
+ HandleScript script) {
+ if (templateEnv_.isSome()) {
+ return true;
+ }
+
+ if (!script->function() ||
+ !script->function()->needsFunctionEnvironmentObjects()) {
+ templateEnv_.emplace();
+ return true;
+ }
+
+ Rooted<EnvironmentObject*> templateEnv(cx);
+ Rooted<JSFunction*> fun(cx, script->function());
+
+ if (fun->needsNamedLambdaEnvironment()) {
+ templateEnv = NamedLambdaObject::createTemplateObject(cx, fun);
+ if (!templateEnv) {
+ return false;
+ }
+ }
+
+ if (fun->needsCallObject()) {
+ templateEnv = CallObject::createTemplateObject(cx, script, templateEnv);
+ if (!templateEnv) {
+ return false;
+ }
+ }
+
+ templateEnv_.emplace(templateEnv);
+ return true;
+}
+
+bool JitScript::ensureHasCachedIonData(JSContext* cx, HandleScript script) {
+ MOZ_ASSERT(script->jitScript() == this);
+
+ if (usesEnvironmentChain_.isSome()) {
+ return true;
+ }
+
+ if (!ensureHasCachedBaselineJitData(cx, script)) {
+ return false;
+ }
+
+ usesEnvironmentChain_.emplace(ScriptUsesEnvironmentChain(script));
+ return true;
+}
+
+void JitScript::setBaselineScriptImpl(JSScript* script,
+ BaselineScript* baselineScript) {
+ JSRuntime* rt = script->runtimeFromMainThread();
+ setBaselineScriptImpl(rt->gcContext(), script, baselineScript);
+}
+
+void JitScript::setBaselineScriptImpl(JS::GCContext* gcx, JSScript* script,
+ BaselineScript* baselineScript) {
+ if (hasBaselineScript()) {
+ gcx->removeCellMemory(script, baselineScript_->allocBytes(),
+ MemoryUse::BaselineScript);
+ baselineScript_.set(script->zone(), nullptr);
+ }
+
+ MOZ_ASSERT(ionScript_ == nullptr || ionScript_ == IonDisabledScriptPtr);
+
+ baselineScript_.set(script->zone(), baselineScript);
+ if (hasBaselineScript()) {
+ AddCellMemory(script, baselineScript_->allocBytes(),
+ MemoryUse::BaselineScript);
+ }
+
+ script->resetWarmUpResetCounter();
+ script->updateJitCodeRaw(gcx->runtime());
+}
+
+void JitScript::setIonScriptImpl(JSScript* script, IonScript* ionScript) {
+ JSRuntime* rt = script->runtimeFromMainThread();
+ setIonScriptImpl(rt->gcContext(), script, ionScript);
+}
+
+void JitScript::setIonScriptImpl(JS::GCContext* gcx, JSScript* script,
+ IonScript* ionScript) {
+ MOZ_ASSERT_IF(ionScript != IonDisabledScriptPtr,
+ !baselineScript()->hasPendingIonCompileTask());
+
+ JS::Zone* zone = script->zone();
+ if (hasIonScript()) {
+ gcx->removeCellMemory(script, ionScript_->allocBytes(),
+ MemoryUse::IonScript);
+ ionScript_.set(zone, nullptr);
+ }
+
+ ionScript_.set(zone, ionScript);
+ MOZ_ASSERT_IF(hasIonScript(), hasBaselineScript());
+ if (hasIonScript()) {
+ AddCellMemory(script, ionScript_->allocBytes(), MemoryUse::IonScript);
+ }
+
+ script->updateJitCodeRaw(gcx->runtime());
+}
+
+#ifdef JS_STRUCTURED_SPEW
+static bool HasEnteredCounters(ICEntry& entry) {
+ ICStub* stub = entry.firstStub();
+ if (stub && !stub->isFallback()) {
+ return true;
+ }
+ return false;
+}
+
+void jit::JitSpewBaselineICStats(JSScript* script, const char* dumpReason) {
+ MOZ_ASSERT(script->hasJitScript());
+ JSContext* cx = TlsContext.get();
+ AutoStructuredSpewer spew(cx, SpewChannel::BaselineICStats, script);
+ if (!spew) {
+ return;
+ }
+
+ JitScript* jitScript = script->jitScript();
+ spew->property("reason", dumpReason);
+ spew->beginListProperty("entries");
+ for (size_t i = 0; i < jitScript->numICEntries(); i++) {
+ ICEntry& entry = jitScript->icEntry(i);
+ ICFallbackStub* fallback = jitScript->fallbackStub(i);
+ if (!HasEnteredCounters(entry)) {
+ continue;
+ }
+
+ uint32_t pcOffset = fallback->pcOffset();
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+
+ unsigned column;
+ unsigned int line = PCToLineNumber(script, pc, &column);
+
+ spew->beginObject();
+ spew->property("op", CodeName(JSOp(*pc)));
+ spew->property("pc", pcOffset);
+ spew->property("line", line);
+ spew->property("column", column);
+
+ spew->beginListProperty("counts");
+ ICStub* stub = entry.firstStub();
+ while (stub && !stub->isFallback()) {
+ uint32_t count = stub->enteredCount();
+ spew->value(count);
+ stub = stub->toCacheIRStub()->next();
+ }
+ spew->endList();
+ spew->property("fallback_count", fallback->enteredCount());
+ spew->endObject();
+ }
+ spew->endList();
+}
+#endif
+
+static void MarkActiveJitScripts(JSContext* cx,
+ const JitActivationIterator& activation) {
+ for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
+ const JSJitFrameIter& frame = iter.frame();
+ switch (frame.type()) {
+ case FrameType::BaselineJS:
+ frame.script()->jitScript()->setActive();
+ break;
+ case FrameType::Exit:
+ if (frame.exitFrame()->is<LazyLinkExitFrameLayout>()) {
+ LazyLinkExitFrameLayout* ll =
+ frame.exitFrame()->as<LazyLinkExitFrameLayout>();
+ JSScript* script =
+ ScriptFromCalleeToken(ll->jsFrame()->calleeToken());
+ script->jitScript()->setActive();
+ }
+ break;
+ case FrameType::Bailout:
+ case FrameType::IonJS: {
+ // Keep the JitScript and BaselineScript around, since bailouts from
+ // the ion jitcode need to re-enter into the Baseline code.
+ frame.script()->jitScript()->setActive();
+ for (InlineFrameIterator inlineIter(cx, &frame); inlineIter.more();
+ ++inlineIter) {
+ inlineIter.script()->jitScript()->setActive();
+ }
+ break;
+ }
+ default:;
+ }
+ }
+}
+
+void jit::MarkActiveJitScripts(Zone* zone) {
+ if (zone->isAtomsZone()) {
+ return;
+ }
+ JSContext* cx = TlsContext.get();
+ for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->compartment()->zone() == zone) {
+ MarkActiveJitScripts(cx, iter);
+ }
+ }
+}
+
+InliningRoot* JitScript::getOrCreateInliningRoot(JSContext* cx,
+ JSScript* script) {
+ if (!inliningRoot_) {
+ inliningRoot_ = js::MakeUnique<InliningRoot>(cx, script);
+ if (!inliningRoot_) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ icScript_.inliningRoot_ = inliningRoot_.get();
+ }
+ return inliningRoot_.get();
+}
+
+gc::AllocSite* JitScript::createAllocSite(JSScript* script) {
+ MOZ_ASSERT(script->jitScript() == this);
+
+ Nursery& nursery = script->runtimeFromMainThread()->gc.nursery();
+ if (!nursery.canCreateAllocSite()) {
+ // Don't block attaching an optimized stub, but don't process allocations
+ // for this site.
+ return script->zone()->unknownAllocSite(JS::TraceKind::Object);
+ }
+
+ if (!allocSites_.reserve(allocSites_.length() + 1)) {
+ return nullptr;
+ }
+
+ ICStubSpace* stubSpace = jitScriptStubSpace();
+ auto* site =
+ static_cast<gc::AllocSite*>(stubSpace->alloc(sizeof(gc::AllocSite)));
+ if (!site) {
+ return nullptr;
+ }
+
+ new (site) gc::AllocSite(script->zone(), script, JS::TraceKind::Object);
+
+ allocSites_.infallibleAppend(site);
+
+ nursery.noteAllocSiteCreated();
+
+ return site;
+}
+
+bool JitScript::resetAllocSites(bool resetNurserySites,
+ bool resetPretenuredSites) {
+ MOZ_ASSERT(resetNurserySites || resetPretenuredSites);
+
+ bool anyReset = false;
+
+ for (gc::AllocSite* site : allocSites_) {
+ if ((resetNurserySites && site->initialHeap() == gc::Heap::Default) ||
+ (resetPretenuredSites && site->initialHeap() == gc::Heap::Tenured)) {
+ if (site->maybeResetState()) {
+ anyReset = true;
+ }
+ }
+ }
+
+ return anyReset;
+}
+
+JitScriptICStubSpace* ICScript::jitScriptStubSpace() {
+ if (isInlined()) {
+ return inliningRoot_->jitScriptStubSpace();
+ }
+ return outerJitScript()->jitScriptStubSpace();
+}
+
+JitScript* ICScript::outerJitScript() {
+ MOZ_ASSERT(!isInlined());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
+ return reinterpret_cast<JitScript*>(ptr - JitScript::offsetOfICScript());
+}
+
+#ifdef DEBUG
+// This hash is used to verify that we do not recompile after a
+// TranspiledCacheIR invalidation with the exact same ICs.
+//
+// It should change iff an ICEntry in this ICScript (or an ICScript
+// inlined into this ICScript) is modified such that we will make a
+// different decision in WarpScriptOracle::maybeInlineIC. This means:
+//
+// 1. The hash will change if we attach a new stub.
+// 2. The hash will change if the entered count of any CacheIR stub
+// other than the first changes from 0.
+// 3. The hash will change if the entered count of the fallback stub
+// changes from 0.
+//
+HashNumber ICScript::hash() {
+ HashNumber h = 0;
+ for (size_t i = 0; i < numICEntries(); i++) {
+ ICStub* stub = icEntry(i).firstStub();
+
+ // Hash the address of the first stub.
+ h = mozilla::AddToHash(h, stub);
+
+ // Hash whether subsequent stubs have entry count 0.
+ if (!stub->isFallback()) {
+ stub = stub->toCacheIRStub()->next();
+ while (!stub->isFallback()) {
+ h = mozilla::AddToHash(h, stub->enteredCount() == 0);
+ stub = stub->toCacheIRStub()->next();
+ }
+ }
+
+ // Hash whether the fallback has entry count 0.
+ MOZ_ASSERT(stub->isFallback());
+ h = mozilla::AddToHash(h, stub->enteredCount() == 0);
+ }
+
+ return h;
+}
+#endif
diff --git a/js/src/jit/JitScript.h b/js/src/jit/JitScript.h
new file mode 100644
index 0000000000..f84630853f
--- /dev/null
+++ b/js/src/jit/JitScript.h
@@ -0,0 +1,543 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitScript_h
+#define jit_JitScript_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/Barrier.h"
+#include "jit/BaselineIC.h"
+#include "jit/ICStubSpace.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+#include "util/TrailingArray.h"
+#include "vm/EnvironmentObject.h"
+
+class JS_PUBLIC_API JSScript;
+class JS_PUBLIC_API JSTracer;
+struct JS_PUBLIC_API JSContext;
+
+namespace JS {
+class Zone;
+}
+
+namespace js {
+
+class SystemAllocPolicy;
+
+namespace gc {
+class AllocSite;
+}
+
+namespace jit {
+
+class BaselineScript;
+class InliningRoot;
+class IonScript;
+class JitScript;
+class JitZone;
+
+// Magic BaselineScript value indicating Baseline compilation has been disabled.
+static constexpr uintptr_t BaselineDisabledScript = 0x1;
+
+static BaselineScript* const BaselineDisabledScriptPtr =
+ reinterpret_cast<BaselineScript*>(BaselineDisabledScript);
+
+// Magic IonScript values indicating Ion compilation has been disabled or the
+// script is being Ion-compiled off-thread.
+static constexpr uintptr_t IonDisabledScript = 0x1;
+static constexpr uintptr_t IonCompilingScript = 0x2;
+
+static IonScript* const IonDisabledScriptPtr =
+ reinterpret_cast<IonScript*>(IonDisabledScript);
+static IonScript* const IonCompilingScriptPtr =
+ reinterpret_cast<IonScript*>(IonCompilingScript);
+
+/* [SMDOC] ICScript Lifetimes
+ *
+ * An ICScript owns an array of ICEntries, each of which owns a linked
+ * list of ICStubs.
+ *
+ * A JitScript contains an embedded ICScript. If it has done any trial
+ * inlining, it also owns an InliningRoot. The InliningRoot owns all
+ * of the ICScripts that have been created for inlining into the
+ * corresponding JitScript. This ties the lifetime of the inlined
+ * ICScripts to the lifetime of the JitScript itself.
+ *
+ * We store pointers to ICScripts in two other places: on the stack in
+ * BaselineFrame, and in IC stubs for CallInlinedFunction.
+ *
+ * The ICScript pointer in a BaselineFrame either points to the
+ * ICScript embedded in the JitScript for that frame, or to an inlined
+ * ICScript owned by a caller. In each case, there must be a frame on
+ * the stack corresponding to the JitScript that owns the current
+ * ICScript, which will keep the ICScript alive.
+ *
+ * Each ICStub is owned by an ICScript and, indirectly, a
+ * JitScript. An ICStub that uses CallInlinedFunction contains an
+ * ICScript for use by the callee. The ICStub and the callee ICScript
+ * are always owned by the same JitScript, so the callee ICScript will
+ * not be freed while the ICStub is alive.
+ *
+ * The lifetime of an ICScript is independent of the lifetimes of the
+ * BaselineScript and IonScript/WarpScript to which it
+ * corresponds. They can be destroyed and recreated, and the ICScript
+ * will remain valid.
+ */
+
+class alignas(uintptr_t) ICScript final : public TrailingArray {
+ public:
+ ICScript(uint32_t warmUpCount, Offset fallbackStubsOffset, Offset endOffset,
+ uint32_t depth, InliningRoot* inliningRoot = nullptr)
+ : inliningRoot_(inliningRoot),
+ warmUpCount_(warmUpCount),
+ fallbackStubsOffset_(fallbackStubsOffset),
+ endOffset_(endOffset),
+ depth_(depth) {}
+
+ bool isInlined() const { return depth_ > 0; }
+
+ void initICEntries(JSContext* cx, JSScript* script);
+
+ ICEntry& icEntry(size_t index) {
+ MOZ_ASSERT(index < numICEntries());
+ return icEntries()[index];
+ }
+
+ ICFallbackStub* fallbackStub(size_t index) {
+ MOZ_ASSERT(index < numICEntries());
+ return fallbackStubs() + index;
+ }
+
+ ICEntry* icEntryForStub(const ICFallbackStub* stub) {
+ size_t index = stub - fallbackStubs();
+ MOZ_ASSERT(index < numICEntries());
+ return &icEntry(index);
+ }
+ ICFallbackStub* fallbackStubForICEntry(const ICEntry* entry) {
+ size_t index = entry - icEntries();
+ MOZ_ASSERT(index < numICEntries());
+ return fallbackStub(index);
+ }
+
+ InliningRoot* inliningRoot() const { return inliningRoot_; }
+ uint32_t depth() const { return depth_; }
+
+ void resetWarmUpCount(uint32_t count) { warmUpCount_ = count; }
+
+ static constexpr size_t offsetOfFirstStub(uint32_t entryIndex) {
+ return sizeof(ICScript) + entryIndex * sizeof(ICEntry) +
+ ICEntry::offsetOfFirstStub();
+ }
+
+ static constexpr Offset offsetOfWarmUpCount() {
+ return offsetof(ICScript, warmUpCount_);
+ }
+ static constexpr Offset offsetOfDepth() { return offsetof(ICScript, depth_); }
+
+ static constexpr Offset offsetOfICEntries() { return sizeof(ICScript); }
+ uint32_t numICEntries() const {
+ return numElements<ICEntry>(icEntriesOffset(), fallbackStubsOffset());
+ }
+
+ ICEntry* interpreterICEntryFromPCOffset(uint32_t pcOffset);
+
+ ICEntry& icEntryFromPCOffset(uint32_t pcOffset);
+
+ [[nodiscard]] bool addInlinedChild(JSContext* cx,
+ js::UniquePtr<ICScript> child,
+ uint32_t pcOffset);
+ ICScript* findInlinedChild(uint32_t pcOffset);
+ void removeInlinedChild(uint32_t pcOffset);
+ bool hasInlinedChild(uint32_t pcOffset);
+
+ JitScriptICStubSpace* jitScriptStubSpace();
+ void purgeOptimizedStubs(Zone* zone);
+
+ void trace(JSTracer* trc);
+
+#ifdef DEBUG
+ mozilla::HashNumber hash();
+#endif
+
+ private:
+ class CallSite {
+ public:
+ CallSite(ICScript* callee, uint32_t pcOffset)
+ : callee_(callee), pcOffset_(pcOffset) {}
+ ICScript* callee_;
+ uint32_t pcOffset_;
+ };
+
+ // If this ICScript was created for trial inlining or has another
+ // ICScript inlined into it, a pointer to the root of the inlining
+ // tree. Otherwise, nullptr.
+ InliningRoot* inliningRoot_ = nullptr;
+
+ // ICScripts that have been inlined into this ICScript.
+ js::UniquePtr<Vector<CallSite>> inlinedChildren_;
+
+ // Number of times this copy of the script has been called or has had
+ // backedges taken. Reset if the script's JIT code is forcibly discarded.
+ // See also the ScriptWarmUpData class.
+ mozilla::Atomic<uint32_t, mozilla::Relaxed> warmUpCount_ = {};
+
+ // The offset of the ICFallbackStub array.
+ Offset fallbackStubsOffset_;
+
+ // The size of this allocation.
+ Offset endOffset_;
+
+ // The inlining depth of this ICScript. 0 for the inlining root.
+ uint32_t depth_;
+
+ Offset icEntriesOffset() const { return offsetOfICEntries(); }
+ Offset fallbackStubsOffset() const { return fallbackStubsOffset_; }
+ Offset endOffset() const { return endOffset_; }
+
+ ICEntry* icEntries() { return offsetToPointer<ICEntry>(icEntriesOffset()); }
+
+ ICFallbackStub* fallbackStubs() {
+ return offsetToPointer<ICFallbackStub>(fallbackStubsOffset());
+ }
+
+ JitScript* outerJitScript();
+
+ friend class JitScript;
+};
+
+// [SMDOC] JitScript
+//
+// JitScript stores type inference data, Baseline ICs and other JIT-related data
+// for a script. Scripts with a JitScript can run in the Baseline Interpreter.
+//
+// IC Data
+// =======
+// All IC data for Baseline (Interpreter and JIT) is stored in an ICScript. Each
+// JitScript contains an ICScript as the last field. Additional free-standing
+// ICScripts may be created during trial inlining. Ion has its own IC chains
+// stored in IonScript.
+//
+// For each IC we store an ICEntry, which points to the first ICStub in the
+// chain, and an ICFallbackStub. Note that multiple stubs in the same zone can
+// share Baseline IC code. This works because the stub data is stored in the
+// ICStub instead of baked in in the stub code.
+//
+// Storing this separate from BaselineScript allows us to use the same ICs in
+// the Baseline Interpreter and Baseline JIT. It also simplifies debug mode OSR
+// because the JitScript can be reused when we have to recompile the
+// BaselineScript.
+//
+// The JitScript contains a stub space. This stores the "can GC" CacheIR stubs.
+// These stubs are never purged before destroying the JitScript. Other stubs are
+// stored in the optimized stub space stored in JitZone and can be purged more
+// eagerly. See JitScript::purgeOptimizedStubs.
+//
+// An ICScript contains a list of IC entries and a list of fallback stubs.
+// There's one ICEntry and ICFallbackStub for each JOF_IC bytecode op.
+//
+// The ICScript also contains the warmUpCount for the script.
+//
+// Inlining Data
+// =============
+// JitScript also contains a list of Warp compilations inlining this script, for
+// invalidation.
+//
+// Memory Layout
+// =============
+// JitScript contains an ICScript as the last field. ICScript has trailing
+// (variable length) arrays for ICEntry and ICFallbackStub. The memory layout is
+// as follows:
+//
+// Item | Offset
+// ------------------------+------------------------
+// JitScript | 0
+// -->ICScript (field) |
+// ICEntry[] | icEntriesOffset()
+// ICFallbackStub[] | fallbackStubsOffset()
+//
+// These offsets are also used to compute numICEntries.
+class alignas(uintptr_t) JitScript final : public TrailingArray {
+ friend class ::JSScript;
+
+ // Allocated space for Can-GC CacheIR stubs.
+ JitScriptICStubSpace jitScriptStubSpace_ = {};
+
+ // Profile string used by the profiler for Baseline Interpreter frames.
+ const char* profileString_ = nullptr;
+
+ // Baseline code for the script. Either nullptr, BaselineDisabledScriptPtr or
+ // a valid BaselineScript*.
+ GCStructPtr<BaselineScript*> baselineScript_;
+
+ // Ion code for this script. Either nullptr, IonDisabledScriptPtr,
+ // IonCompilingScriptPtr or a valid IonScript*.
+ GCStructPtr<IonScript*> ionScript_;
+
+ // For functions that need a CallObject and/or NamedLambdaObject, the template
+ // objects used by the Baseline JIT and Ion. If the function needs both a
+ // named lambda object and a call object, the named lambda object template is
+ // linked via the call object's enclosing environment. This field is set the
+ // first time the Baseline JIT compiles this script.
+ mozilla::Maybe<HeapPtr<EnvironmentObject*>> templateEnv_;
+
+ // Analysis data computed lazily the first time this script is compiled or
+ // inlined by WarpBuilder.
+ mozilla::Maybe<bool> usesEnvironmentChain_;
+
+ // The size of this allocation.
+ Offset endOffset_ = 0;
+
+ struct Flags {
+ // Flag set when discarding JIT code to indicate this script is on the stack
+ // and type information and JIT code should not be discarded.
+ bool active : 1;
+
+ // True if this script entered Ion via OSR at a loop header.
+ bool hadIonOSR : 1;
+ };
+ Flags flags_ = {}; // Zero-initialize flags.
+
+ js::UniquePtr<InliningRoot> inliningRoot_;
+
+#ifdef DEBUG
+ // If the last warp compilation invalidated because of TranspiledCacheIR
+ // bailouts, this is a hash of the ICScripts used in that compilation.
+ // When recompiling, we assert that the hash has changed.
+ mozilla::Maybe<mozilla::HashNumber> failedICHash_;
+
+ // To avoid pathological cases, we skip the check if we have purged
+ // stubs due to GC pressure.
+ bool hasPurgedStubs_ = false;
+#endif
+
+ // List of allocation sites referred to by ICs in this script.
+ Vector<gc::AllocSite*, 0, SystemAllocPolicy> allocSites_;
+
+ ICScript icScript_;
+ // End of fields.
+
+ Offset endOffset() const { return endOffset_; }
+
+ public:
+ JitScript(JSScript* script, Offset fallbackStubsOffset, Offset endOffset,
+ const char* profileString);
+
+ ~JitScript();
+
+ [[nodiscard]] bool ensureHasCachedBaselineJitData(JSContext* cx,
+ HandleScript script);
+ [[nodiscard]] bool ensureHasCachedIonData(JSContext* cx, HandleScript script);
+
+ void setHadIonOSR() { flags_.hadIonOSR = true; }
+ bool hadIonOSR() const { return flags_.hadIonOSR; }
+
+ uint32_t numICEntries() const { return icScript_.numICEntries(); }
+
+ bool active() const { return flags_.active; }
+ void setActive() { flags_.active = true; }
+ void resetActive() { flags_.active = false; }
+
+ void ensureProfileString(JSContext* cx, JSScript* script);
+
+ const char* profileString() const {
+ MOZ_ASSERT(profileString_);
+ return profileString_;
+ }
+
+ static void Destroy(Zone* zone, JitScript* script);
+
+ static constexpr Offset offsetOfICEntries() { return sizeof(JitScript); }
+
+ static constexpr size_t offsetOfBaselineScript() {
+ return offsetof(JitScript, baselineScript_);
+ }
+ static constexpr size_t offsetOfIonScript() {
+ return offsetof(JitScript, ionScript_);
+ }
+ static constexpr size_t offsetOfICScript() {
+ return offsetof(JitScript, icScript_);
+ }
+ static constexpr size_t offsetOfWarmUpCount() {
+ return offsetOfICScript() + ICScript::offsetOfWarmUpCount();
+ }
+
+ uint32_t warmUpCount() const { return icScript_.warmUpCount_; }
+ void incWarmUpCount() { icScript_.warmUpCount_++; }
+ void resetWarmUpCount(uint32_t count);
+
+ void prepareForDestruction(Zone* zone);
+
+ JitScriptICStubSpace* jitScriptStubSpace() { return &jitScriptStubSpace_; }
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t* data,
+ size_t* fallbackStubs) const {
+ *data += mallocSizeOf(this);
+
+ // |data| already includes the ICStubSpace itself, so use
+ // sizeOfExcludingThis.
+ *fallbackStubs += jitScriptStubSpace_.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ ICEntry& icEntry(size_t index) { return icScript_.icEntry(index); }
+
+ ICFallbackStub* fallbackStub(size_t index) {
+ return icScript_.fallbackStub(index);
+ }
+
+ ICEntry* icEntryForStub(const ICFallbackStub* stub) {
+ return icScript_.icEntryForStub(stub);
+ }
+ ICFallbackStub* fallbackStubForICEntry(const ICEntry* entry) {
+ return icScript_.fallbackStubForICEntry(entry);
+ }
+
+ void trace(JSTracer* trc);
+ void purgeOptimizedStubs(JSScript* script);
+
+ ICEntry& icEntryFromPCOffset(uint32_t pcOffset) {
+ return icScript_.icEntryFromPCOffset(pcOffset);
+ };
+
+ size_t allocBytes() const { return endOffset(); }
+
+ EnvironmentObject* templateEnvironment() const { return templateEnv_.ref(); }
+
+ bool usesEnvironmentChain() const { return *usesEnvironmentChain_; }
+
+ gc::AllocSite* createAllocSite(JSScript* script);
+
+ bool resetAllocSites(bool resetNurserySites, bool resetPretenuredSites);
+
+ private:
+ // Methods to set baselineScript_ to a BaselineScript*, nullptr, or
+ // BaselineDisabledScriptPtr.
+ void setBaselineScriptImpl(JSScript* script, BaselineScript* baselineScript);
+ void setBaselineScriptImpl(JS::GCContext* gcx, JSScript* script,
+ BaselineScript* baselineScript);
+
+ public:
+ // Methods for getting/setting/clearing a BaselineScript*.
+ bool hasBaselineScript() const {
+ bool res = baselineScript_ && baselineScript_ != BaselineDisabledScriptPtr;
+ MOZ_ASSERT_IF(!res, !hasIonScript());
+ return res;
+ }
+ BaselineScript* baselineScript() const {
+ MOZ_ASSERT(hasBaselineScript());
+ return baselineScript_;
+ }
+ void setBaselineScript(JSScript* script, BaselineScript* baselineScript) {
+ MOZ_ASSERT(!hasBaselineScript());
+ setBaselineScriptImpl(script, baselineScript);
+ MOZ_ASSERT(hasBaselineScript());
+ }
+ [[nodiscard]] BaselineScript* clearBaselineScript(JS::GCContext* gcx,
+ JSScript* script) {
+ BaselineScript* baseline = baselineScript();
+ setBaselineScriptImpl(gcx, script, nullptr);
+ return baseline;
+ }
+
+ private:
+ // Methods to set ionScript_ to an IonScript*, nullptr, or one of the special
+ // Ion{Disabled,Compiling}ScriptPtr values.
+ void setIonScriptImpl(JS::GCContext* gcx, JSScript* script,
+ IonScript* ionScript);
+ void setIonScriptImpl(JSScript* script, IonScript* ionScript);
+
+ public:
+ // Methods for getting/setting/clearing an IonScript*.
+ bool hasIonScript() const {
+ bool res = ionScript_ && ionScript_ != IonDisabledScriptPtr &&
+ ionScript_ != IonCompilingScriptPtr;
+ MOZ_ASSERT_IF(res, baselineScript_);
+ return res;
+ }
+ IonScript* ionScript() const {
+ MOZ_ASSERT(hasIonScript());
+ return ionScript_;
+ }
+ void setIonScript(JSScript* script, IonScript* ionScript) {
+ MOZ_ASSERT(!hasIonScript());
+ setIonScriptImpl(script, ionScript);
+ MOZ_ASSERT(hasIonScript());
+ }
+ [[nodiscard]] IonScript* clearIonScript(JS::GCContext* gcx,
+ JSScript* script) {
+ IonScript* ion = ionScript();
+ setIonScriptImpl(gcx, script, nullptr);
+ return ion;
+ }
+
+ // Methods for off-thread compilation.
+ bool isIonCompilingOffThread() const {
+ return ionScript_ == IonCompilingScriptPtr;
+ }
+ void setIsIonCompilingOffThread(JSScript* script) {
+ MOZ_ASSERT(ionScript_ == nullptr);
+ setIonScriptImpl(script, IonCompilingScriptPtr);
+ }
+ void clearIsIonCompilingOffThread(JSScript* script) {
+ MOZ_ASSERT(isIonCompilingOffThread());
+ setIonScriptImpl(script, nullptr);
+ }
+ ICScript* icScript() { return &icScript_; }
+
+ bool hasInliningRoot() const { return !!inliningRoot_; }
+ InliningRoot* inliningRoot() const { return inliningRoot_.get(); }
+ InliningRoot* getOrCreateInliningRoot(JSContext* cx, JSScript* script);
+
+#ifdef DEBUG
+ bool hasFailedICHash() const { return failedICHash_.isSome(); }
+ mozilla::HashNumber getFailedICHash() { return failedICHash_.extract(); }
+ void setFailedICHash(mozilla::HashNumber hash) {
+ MOZ_ASSERT(failedICHash_.isNothing());
+ if (!hasPurgedStubs_) {
+ failedICHash_.emplace(hash);
+ }
+ }
+#endif
+};
+
+// Ensures no JitScripts are purged in the current zone.
+class MOZ_RAII AutoKeepJitScripts {
+ jit::JitZone* zone_;
+ bool prev_;
+
+ AutoKeepJitScripts(const AutoKeepJitScripts&) = delete;
+ void operator=(const AutoKeepJitScripts&) = delete;
+
+ public:
+ explicit inline AutoKeepJitScripts(JSContext* cx);
+ inline ~AutoKeepJitScripts();
+};
+
+// Mark JitScripts on the stack as active, so that they are not discarded
+// during GC.
+void MarkActiveJitScripts(Zone* zone);
+
+#ifdef JS_STRUCTURED_SPEW
+void JitSpewBaselineICStats(JSScript* script, const char* dumpReason);
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitScript_h */
diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp
new file mode 100644
index 0000000000..f00efe7b79
--- /dev/null
+++ b/js/src/jit/JitSpewer.cpp
@@ -0,0 +1,660 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_JITSPEW
+
+# include "jit/JitSpewer.h"
+
+# include "mozilla/Atomics.h"
+# include "mozilla/Sprintf.h"
+
+# include "jit/MIR.h"
+# include "jit/MIRGenerator.h"
+# include "jit/MIRGraph.h"
+# include "threading/LockGuard.h"
+# include "util/GetPidProvider.h" // getpid()
+# include "vm/MutexIDs.h"
+
+# ifndef JIT_SPEW_DIR
+# if defined(_WIN32)
+# define JIT_SPEW_DIR "."
+# elif defined(__ANDROID__)
+# define JIT_SPEW_DIR "/data/local/tmp"
+# else
+# define JIT_SPEW_DIR "/tmp"
+# endif
+# endif
+
+using namespace js;
+using namespace js::jit;
+
+class IonSpewer {
+ private:
+ Mutex outputLock_ MOZ_UNANNOTATED;
+ Fprinter jsonOutput_;
+ bool firstFunction_;
+ bool asyncLogging_;
+ bool inited_;
+
+ void release();
+
+ public:
+ IonSpewer()
+ : outputLock_(mutexid::IonSpewer),
+ firstFunction_(false),
+ asyncLogging_(false),
+ inited_(false) {}
+
+ // File output is terminated safely upon destruction.
+ ~IonSpewer();
+
+ bool init();
+ bool isEnabled() { return inited_; }
+ void setAsyncLogging(bool incremental) { asyncLogging_ = incremental; }
+ bool getAsyncLogging() { return asyncLogging_; }
+
+ void beginFunction();
+ void spewPass(GraphSpewer* gs);
+ void endFunction(GraphSpewer* gs);
+};
+
+// IonSpewer singleton.
+static IonSpewer ionspewer;
+
+static bool LoggingChecked = false;
+static_assert(JitSpew_Terminator <= 64,
+ "Increase the size of the LoggingBits global.");
+static uint64_t LoggingBits = 0;
+static mozilla::Atomic<uint32_t, mozilla::Relaxed> filteredOutCompilations(0);
+
+static const char* const ChannelNames[] = {
+# define JITSPEW_CHANNEL(name) #name,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+# undef JITSPEW_CHANNEL
+};
+
+static size_t ChannelIndentLevel[] = {
+# define JITSPEW_CHANNEL(name) 0,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+# undef JITSPEW_CHANNEL
+};
+
+// The IONFILTER environment variable specifies an expression to select only
+// certain functions for spewing to reduce amount of log data generated.
+static const char* gSpewFilter = nullptr;
+
+static bool FilterContainsLocation(JSScript* function) {
+ // If there is no filter we accept all outputs.
+ if (!gSpewFilter || !gSpewFilter[0]) {
+ return true;
+ }
+
+ // Disable wasm output when filter is set.
+ if (!function) {
+ return false;
+ }
+
+ const char* filename = function->filename();
+ const size_t line = function->lineno();
+ const size_t filelen = strlen(filename);
+ const char* index = strstr(gSpewFilter, filename);
+ while (index) {
+ if (index == gSpewFilter || index[-1] == ',') {
+ if (index[filelen] == 0 || index[filelen] == ',') {
+ return true;
+ }
+ if (index[filelen] == ':' && line != size_t(-1)) {
+ size_t read_line = strtoul(&index[filelen + 1], nullptr, 10);
+ if (read_line == line) {
+ return true;
+ }
+ }
+ }
+ index = strstr(index + filelen, filename);
+ }
+ return false;
+}
+
+void jit::EnableIonDebugSyncLogging() {
+ ionspewer.init();
+ ionspewer.setAsyncLogging(false);
+ EnableChannel(JitSpew_IonSyncLogs);
+}
+
+void jit::EnableIonDebugAsyncLogging() {
+ ionspewer.init();
+ ionspewer.setAsyncLogging(true);
+}
+
+void IonSpewer::release() {
+ if (jsonOutput_.isInitialized()) {
+ jsonOutput_.finish();
+ }
+ inited_ = false;
+}
+
+bool IonSpewer::init() {
+ if (inited_) {
+ return true;
+ }
+
+ // Filter expression for spewing
+ gSpewFilter = getenv("IONFILTER");
+
+ const size_t bufferLength = 256;
+ char jsonBuffer[bufferLength];
+ const char* jsonFilename = JIT_SPEW_DIR "/ion.json";
+
+ const char* usePid = getenv("ION_SPEW_BY_PID");
+ if (usePid && *usePid != 0) {
+ uint32_t pid = getpid();
+ size_t len;
+ len = SprintfLiteral(jsonBuffer, JIT_SPEW_DIR "/ion%" PRIu32 ".json", pid);
+ if (bufferLength <= len) {
+ fprintf(stderr, "Warning: IonSpewer::init: Cannot serialize file name.");
+ return false;
+ }
+ jsonFilename = jsonBuffer;
+ }
+
+ if (!jsonOutput_.init(jsonFilename)) {
+ release();
+ return false;
+ }
+
+ jsonOutput_.printf("{\n \"functions\": [\n");
+ firstFunction_ = true;
+
+ inited_ = true;
+ return true;
+}
+
+void IonSpewer::beginFunction() {
+ // If we are doing a synchronous logging then we spew everything as we go,
+ // as this is useful in case of failure during the compilation. On the other
+ // hand, it is recommended to disable off thread compilation.
+ if (!getAsyncLogging() && !firstFunction_) {
+ LockGuard<Mutex> guard(outputLock_);
+ jsonOutput_.put(","); // separate functions
+ }
+}
+
+void IonSpewer::spewPass(GraphSpewer* gs) {
+ if (!getAsyncLogging()) {
+ LockGuard<Mutex> guard(outputLock_);
+ gs->dump(jsonOutput_);
+ }
+}
+
+void IonSpewer::endFunction(GraphSpewer* gs) {
+ LockGuard<Mutex> guard(outputLock_);
+ if (getAsyncLogging() && !firstFunction_) {
+ jsonOutput_.put(","); // separate functions
+ }
+
+ gs->dump(jsonOutput_);
+ firstFunction_ = false;
+}
+
+IonSpewer::~IonSpewer() {
+ if (!inited_) {
+ return;
+ }
+
+ jsonOutput_.printf("\n]}\n");
+ release();
+}
+
+GraphSpewer::GraphSpewer(TempAllocator* alloc)
+ : graph_(nullptr),
+ jsonPrinter_(alloc->lifoAlloc()),
+ jsonSpewer_(jsonPrinter_) {}
+
+void GraphSpewer::init(MIRGraph* graph, JSScript* function) {
+ MOZ_ASSERT(!isSpewing());
+ if (!ionspewer.isEnabled()) {
+ return;
+ }
+
+ if (!FilterContainsLocation(function)) {
+ // filter out logs during the compilation.
+ filteredOutCompilations++;
+ MOZ_ASSERT(!isSpewing());
+ return;
+ }
+
+ graph_ = graph;
+ MOZ_ASSERT(isSpewing());
+}
+
+void GraphSpewer::beginFunction(JSScript* function) {
+ if (!isSpewing()) {
+ return;
+ }
+ jsonSpewer_.beginFunction(function);
+ ionspewer.beginFunction();
+}
+
+void GraphSpewer::beginWasmFunction(unsigned funcIndex) {
+ if (!isSpewing()) {
+ return;
+ }
+ jsonSpewer_.beginWasmFunction(funcIndex);
+ ionspewer.beginFunction();
+}
+
+void GraphSpewer::spewPass(const char* pass) {
+ if (!isSpewing()) {
+ return;
+ }
+
+ jsonSpewer_.beginPass(pass);
+ jsonSpewer_.spewMIR(graph_);
+ jsonSpewer_.spewLIR(graph_);
+ jsonSpewer_.endPass();
+
+ ionspewer.spewPass(this);
+
+ // As this function is used for debugging, we ignore any of the previous
+ // failures and ensure there is enough ballast space, such that we do not
+ // exhaust the ballast space before running the next phase.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!graph_->alloc().ensureBallast()) {
+ oomUnsafe.crash(
+ "Could not ensure enough ballast space after spewing graph "
+ "information.");
+ }
+}
+
+void GraphSpewer::spewPass(const char* pass, BacktrackingAllocator* ra) {
+ if (!isSpewing()) {
+ return;
+ }
+
+ jsonSpewer_.beginPass(pass);
+ jsonSpewer_.spewMIR(graph_);
+ jsonSpewer_.spewLIR(graph_);
+ jsonSpewer_.spewRanges(ra);
+ jsonSpewer_.endPass();
+
+ ionspewer.spewPass(this);
+}
+
+void GraphSpewer::endFunction() {
+ if (!ionspewer.isEnabled()) {
+ return;
+ }
+
+ if (!isSpewing()) {
+ MOZ_ASSERT(filteredOutCompilations != 0);
+ filteredOutCompilations--;
+ return;
+ }
+
+ jsonSpewer_.endFunction();
+
+ ionspewer.endFunction(this);
+ graph_ = nullptr;
+}
+
+void GraphSpewer::dump(Fprinter& jsonOut) {
+ if (!jsonPrinter_.hadOutOfMemory()) {
+ jsonPrinter_.exportInto(jsonOut);
+ } else {
+ jsonOut.put("{}");
+ }
+ jsonOut.flush();
+ jsonPrinter_.clear();
+}
+
+void jit::SpewBeginFunction(MIRGenerator* mir, JSScript* function) {
+ MIRGraph* graph = &mir->graph();
+ mir->graphSpewer().init(graph, function);
+ mir->graphSpewer().beginFunction(function);
+}
+
+void jit::SpewBeginWasmFunction(MIRGenerator* mir, unsigned funcIndex) {
+ MIRGraph* graph = &mir->graph();
+ mir->graphSpewer().init(graph, nullptr);
+ mir->graphSpewer().beginWasmFunction(funcIndex);
+}
+
+AutoSpewEndFunction::~AutoSpewEndFunction() {
+ mir_->graphSpewer().endFunction();
+}
+
+Fprinter& jit::JitSpewPrinter() {
+ static Fprinter out;
+ return out;
+}
+
+static void PrintHelpAndExit(int status = 0) {
+ fflush(nullptr);
+ printf(
+ "\n"
+ "usage: IONFLAGS=option,option,option,... where options can be:\n"
+ "\n"
+ " aborts Compilation abort messages\n"
+ " scripts Compiled scripts\n"
+ " mir MIR information\n"
+ " prune Prune unused branches\n"
+ " escape Escape analysis\n"
+ " alias Alias analysis\n"
+ " alias-sum Alias analysis: shows summaries for every block\n"
+ " gvn Global Value Numbering\n"
+ " licm Loop invariant code motion\n"
+ " flac Fold linear arithmetic constants\n"
+ " eaa Effective address analysis\n"
+ " sink Sink transformation\n"
+ " regalloc Register allocation\n"
+ " inline Inlining\n"
+ " snapshots Snapshot information\n"
+ " codegen Native code generation\n"
+ " bailouts Bailouts\n"
+ " caches Inline caches\n"
+ " osi Invalidation\n"
+ " safepoints Safepoints\n"
+ " pools Literal Pools (ARM only for now)\n"
+ " cacheflush Instruction Cache flushes (ARM only for now)\n"
+ " range Range Analysis\n"
+ " wasmbce Wasm Bounds Check Elimination\n"
+ " shapeguards Redundant shape guard elimination\n"
+ " gcbarriers Redundant GC barrier elimination\n"
+ " logs JSON visualization logging\n"
+ " logs-sync Same as logs, but flushes between each pass (sync. "
+ "compiled functions only).\n"
+ " profiling Profiling-related information\n"
+ " dump-mir-expr Dump the MIR expressions\n"
+ " scriptstats Tracelogger summary stats\n"
+ " warp-snapshots WarpSnapshots created by WarpOracle\n"
+ " warp-transpiler Warp CacheIR transpiler\n"
+ " warp-trial-inlining Trial inlining for Warp\n"
+ " all Everything\n"
+ "\n"
+ " bl-aborts Baseline compiler abort messages\n"
+ " bl-scripts Baseline script-compilation\n"
+ " bl-op Baseline compiler detailed op-specific messages\n"
+ " bl-ic Baseline inline-cache messages\n"
+ " bl-ic-fb Baseline IC fallback stub messages\n"
+ " bl-osr Baseline IC OSR messages\n"
+ " bl-bails Baseline bailouts\n"
+ " bl-dbg-osr Baseline debug mode on stack recompile messages\n"
+ " bl-all All baseline spew\n"
+ "\n"
+ "See also SPEW=help for information on the Structured Spewer."
+ "\n");
+ exit(status);
+}
+
+static bool IsFlag(const char* found, const char* flag) {
+ return strlen(found) == strlen(flag) && strcmp(found, flag) == 0;
+}
+
+void jit::CheckLogging() {
+ if (LoggingChecked) {
+ return;
+ }
+
+ LoggingChecked = true;
+
+ char* env = getenv("IONFLAGS");
+ if (!env) {
+ return;
+ }
+
+ const char* found = strtok(env, ",");
+ while (found) {
+ fprintf(stderr, "found tag: %s\n", found);
+ // We're at the end of a flag; check if the previous substring was a
+ // known flag (i-1 is the last character of the flag we just read).
+ if (IsFlag(found, "help")) {
+ PrintHelpAndExit();
+ } else if (IsFlag(found, "aborts")) {
+ EnableChannel(JitSpew_IonAbort);
+ } else if (IsFlag(found, "prune")) {
+ EnableChannel(JitSpew_Prune);
+ } else if (IsFlag(found, "escape")) {
+ EnableChannel(JitSpew_Escape);
+ } else if (IsFlag(found, "alias")) {
+ EnableChannel(JitSpew_Alias);
+ } else if (IsFlag(found, "alias-sum")) {
+ EnableChannel(JitSpew_AliasSummaries);
+ } else if (IsFlag(found, "scripts")) {
+ EnableChannel(JitSpew_IonScripts);
+ } else if (IsFlag(found, "mir")) {
+ EnableChannel(JitSpew_IonMIR);
+ } else if (IsFlag(found, "gvn")) {
+ EnableChannel(JitSpew_GVN);
+ } else if (IsFlag(found, "range")) {
+ EnableChannel(JitSpew_Range);
+ } else if (IsFlag(found, "wasmbce")) {
+ EnableChannel(JitSpew_WasmBCE);
+ } else if (IsFlag(found, "licm")) {
+ EnableChannel(JitSpew_LICM);
+ } else if (IsFlag(found, "flac")) {
+ EnableChannel(JitSpew_FLAC);
+ } else if (IsFlag(found, "eaa")) {
+ EnableChannel(JitSpew_EAA);
+ } else if (IsFlag(found, "sink")) {
+ EnableChannel(JitSpew_Sink);
+ } else if (IsFlag(found, "regalloc")) {
+ EnableChannel(JitSpew_RegAlloc);
+ } else if (IsFlag(found, "inline")) {
+ EnableChannel(JitSpew_Inlining);
+ } else if (IsFlag(found, "snapshots")) {
+ EnableChannel(JitSpew_IonSnapshots);
+ } else if (IsFlag(found, "codegen")) {
+ EnableChannel(JitSpew_Codegen);
+ } else if (IsFlag(found, "bailouts")) {
+ EnableChannel(JitSpew_IonBailouts);
+ } else if (IsFlag(found, "osi")) {
+ EnableChannel(JitSpew_IonInvalidate);
+ } else if (IsFlag(found, "caches")) {
+ EnableChannel(JitSpew_IonIC);
+ } else if (IsFlag(found, "safepoints")) {
+ EnableChannel(JitSpew_Safepoints);
+ } else if (IsFlag(found, "pools")) {
+ EnableChannel(JitSpew_Pools);
+ } else if (IsFlag(found, "cacheflush")) {
+ EnableChannel(JitSpew_CacheFlush);
+ } else if (IsFlag(found, "shapeguards")) {
+ EnableChannel(JitSpew_RedundantShapeGuards);
+ } else if (IsFlag(found, "gcbarriers")) {
+ EnableChannel(JitSpew_RedundantGCBarriers);
+ } else if (IsFlag(found, "logs")) {
+ EnableIonDebugAsyncLogging();
+ } else if (IsFlag(found, "logs-sync")) {
+ EnableIonDebugSyncLogging();
+ } else if (IsFlag(found, "profiling")) {
+ EnableChannel(JitSpew_Profiling);
+ } else if (IsFlag(found, "dump-mir-expr")) {
+ EnableChannel(JitSpew_MIRExpressions);
+ } else if (IsFlag(found, "scriptstats")) {
+ EnableChannel(JitSpew_ScriptStats);
+ } else if (IsFlag(found, "warp-snapshots")) {
+ EnableChannel(JitSpew_WarpSnapshots);
+ } else if (IsFlag(found, "warp-transpiler")) {
+ EnableChannel(JitSpew_WarpTranspiler);
+ } else if (IsFlag(found, "warp-trial-inlining")) {
+ EnableChannel(JitSpew_WarpTrialInlining);
+ } else if (IsFlag(found, "all")) {
+ LoggingBits = uint64_t(-1);
+ } else if (IsFlag(found, "bl-aborts")) {
+ EnableChannel(JitSpew_BaselineAbort);
+ } else if (IsFlag(found, "bl-scripts")) {
+ EnableChannel(JitSpew_BaselineScripts);
+ } else if (IsFlag(found, "bl-op")) {
+ EnableChannel(JitSpew_BaselineOp);
+ } else if (IsFlag(found, "bl-ic")) {
+ EnableChannel(JitSpew_BaselineIC);
+ } else if (IsFlag(found, "bl-ic-fb")) {
+ EnableChannel(JitSpew_BaselineICFallback);
+ } else if (IsFlag(found, "bl-osr")) {
+ EnableChannel(JitSpew_BaselineOSR);
+ } else if (IsFlag(found, "bl-bails")) {
+ EnableChannel(JitSpew_BaselineBailouts);
+ } else if (IsFlag(found, "bl-dbg-osr")) {
+ EnableChannel(JitSpew_BaselineDebugModeOSR);
+ } else if (IsFlag(found, "bl-all")) {
+ EnableChannel(JitSpew_BaselineAbort);
+ EnableChannel(JitSpew_BaselineScripts);
+ EnableChannel(JitSpew_BaselineOp);
+ EnableChannel(JitSpew_BaselineIC);
+ EnableChannel(JitSpew_BaselineICFallback);
+ EnableChannel(JitSpew_BaselineOSR);
+ EnableChannel(JitSpew_BaselineBailouts);
+ EnableChannel(JitSpew_BaselineDebugModeOSR);
+ } else {
+ fprintf(stderr, "Unknown flag.\n");
+ PrintHelpAndExit(64);
+ }
+ found = strtok(nullptr, ",");
+ }
+
+ FILE* spewfh = stderr;
+ const char* filename = getenv("ION_SPEW_FILENAME");
+ if (filename && *filename) {
+ char actual_filename[2048] = {0};
+ SprintfLiteral(actual_filename, "%s.%d", filename, getpid());
+ spewfh = fopen(actual_filename, "w");
+ MOZ_RELEASE_ASSERT(spewfh);
+ setbuf(spewfh, nullptr); // Make unbuffered
+ }
+ JitSpewPrinter().init(spewfh);
+}
+
+JitSpewIndent::JitSpewIndent(JitSpewChannel channel) : channel_(channel) {
+ ChannelIndentLevel[channel]++;
+}
+
+JitSpewIndent::~JitSpewIndent() { ChannelIndentLevel[channel_]--; }
+
+void jit::JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap) {
+ if (!JitSpewEnabled(channel)) {
+ return;
+ }
+
+ JitSpewHeader(channel);
+ Fprinter& out = JitSpewPrinter();
+ out.vprintf(fmt, ap);
+}
+
+void jit::JitSpewContVA(JitSpewChannel channel, const char* fmt, va_list ap) {
+ if (!JitSpewEnabled(channel)) {
+ return;
+ }
+
+ Fprinter& out = JitSpewPrinter();
+ out.vprintf(fmt, ap);
+}
+
+void jit::JitSpewFin(JitSpewChannel channel) {
+ if (!JitSpewEnabled(channel)) {
+ return;
+ }
+
+ Fprinter& out = JitSpewPrinter();
+ out.put("\n");
+}
+
+void jit::JitSpewVA(JitSpewChannel channel, const char* fmt, va_list ap) {
+ JitSpewStartVA(channel, fmt, ap);
+ JitSpewFin(channel);
+}
+
+void jit::JitSpew(JitSpewChannel channel, const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewVA(channel, fmt, ap);
+ va_end(ap);
+}
+
+void jit::JitSpewDef(JitSpewChannel channel, const char* str,
+ MDefinition* def) {
+ if (!JitSpewEnabled(channel)) {
+ return;
+ }
+
+ JitSpewHeader(channel);
+ Fprinter& out = JitSpewPrinter();
+ out.put(str);
+ def->dump(out);
+ def->dumpLocation(out);
+}
+
+void jit::JitSpewStart(JitSpewChannel channel, const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewStartVA(channel, fmt, ap);
+ va_end(ap);
+}
+void jit::JitSpewCont(JitSpewChannel channel, const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ JitSpewContVA(channel, fmt, ap);
+ va_end(ap);
+}
+
+void jit::JitSpewHeader(JitSpewChannel channel) {
+ if (!JitSpewEnabled(channel)) {
+ return;
+ }
+
+ Fprinter& out = JitSpewPrinter();
+ out.printf("[%s] ", ChannelNames[channel]);
+ for (size_t i = ChannelIndentLevel[channel]; i != 0; i--) {
+ out.put(" ");
+ }
+}
+
+bool jit::JitSpewEnabled(JitSpewChannel channel) {
+ MOZ_ASSERT(LoggingChecked);
+ return (LoggingBits & (uint64_t(1) << uint32_t(channel))) &&
+ !filteredOutCompilations;
+}
+
+void jit::EnableChannel(JitSpewChannel channel) {
+ MOZ_ASSERT(LoggingChecked);
+ LoggingBits |= uint64_t(1) << uint32_t(channel);
+}
+
+void jit::DisableChannel(JitSpewChannel channel) {
+ MOZ_ASSERT(LoggingChecked);
+ LoggingBits &= ~(uint64_t(1) << uint32_t(channel));
+}
+
+const char* js::jit::ValTypeToString(JSValueType type) {
+ switch (type) {
+ case JSVAL_TYPE_DOUBLE:
+ return "Double";
+ case JSVAL_TYPE_INT32:
+ return "Int32";
+ case JSVAL_TYPE_BOOLEAN:
+ return "Boolean";
+ case JSVAL_TYPE_UNDEFINED:
+ return "Undefined";
+ case JSVAL_TYPE_NULL:
+ return "Null";
+ case JSVAL_TYPE_MAGIC:
+ return "Magic";
+ case JSVAL_TYPE_STRING:
+ return "String";
+ case JSVAL_TYPE_SYMBOL:
+ return "Symbol";
+ case JSVAL_TYPE_PRIVATE_GCTHING:
+ return "PrivateGCThing";
+ case JSVAL_TYPE_BIGINT:
+ return "BigInt";
+ case JSVAL_TYPE_OBJECT:
+ return "Object";
+ case JSVAL_TYPE_UNKNOWN:
+ return "None";
+ default:
+ MOZ_CRASH("Unknown JSValueType");
+ }
+}
+
+#endif /* JS_JITSPEW */
diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
new file mode 100644
index 0000000000..5706cedb30
--- /dev/null
+++ b/js/src/jit/JitSpewer.h
@@ -0,0 +1,286 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitSpewer_h
+#define jit_JitSpewer_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include <stdarg.h>
+
+#include "jit/JSONSpewer.h"
+#include "js/Printer.h"
+#include "js/TypeDecls.h"
+
+enum JSValueType : uint8_t;
+
+namespace js {
+namespace jit {
+
+// New channels may be added below.
+#define JITSPEW_CHANNEL_LIST(_) \
+ /* Information during sinking */ \
+ _(Prune) \
+ /* Information during escape analysis */ \
+ _(Escape) \
+ /* Information during alias analysis */ \
+ _(Alias) \
+ /* Information during alias analysis */ \
+ _(AliasSummaries) \
+ /* Information during GVN */ \
+ _(GVN) \
+ /* Information during sinking */ \
+ _(Sink) \
+ /* Information during Range analysis */ \
+ _(Range) \
+ /* Information during LICM */ \
+ _(LICM) \
+ /* Info about fold linear constants */ \
+ _(FLAC) \
+ /* Effective address analysis info */ \
+ _(EAA) \
+ /* Wasm Bounds Check Elimination */ \
+ _(WasmBCE) \
+ /* Information during regalloc */ \
+ _(RegAlloc) \
+ /* Information during inlining */ \
+ _(Inlining) \
+ /* Information during codegen */ \
+ _(Codegen) \
+ /* Debug info about safepoints */ \
+ _(Safepoints) \
+ /* Debug info about Pools*/ \
+ _(Pools) \
+ /* Profiling-related information */ \
+ _(Profiling) \
+ /* Debug info about the I$ */ \
+ _(CacheFlush) \
+ /* Info about redundant shape guards */ \
+ _(RedundantShapeGuards) \
+ /* Info about redundant GC barriers */ \
+ _(RedundantGCBarriers) \
+ /* Output a list of MIR expressions */ \
+ _(MIRExpressions) \
+ /* Spew Tracelogger summary stats */ \
+ _(ScriptStats) \
+ \
+ /* BASELINE COMPILER SPEW */ \
+ \
+ /* Aborting Script Compilation. */ \
+ _(BaselineAbort) \
+ /* Script Compilation. */ \
+ _(BaselineScripts) \
+ /* Detailed op-specific spew. */ \
+ _(BaselineOp) \
+ /* Inline caches. */ \
+ _(BaselineIC) \
+ /* Inline cache fallbacks. */ \
+ _(BaselineICFallback) \
+ /* OSR from Baseline => Ion. */ \
+ _(BaselineOSR) \
+ /* Bailouts. */ \
+ _(BaselineBailouts) \
+ /* Debug Mode On Stack Recompile . */ \
+ _(BaselineDebugModeOSR) \
+ \
+ /* ION COMPILER SPEW */ \
+ \
+ /* Used to abort SSA construction */ \
+ _(IonAbort) \
+ /* Information about compiled scripts */ \
+ _(IonScripts) \
+ /* Info about failing to log script */ \
+ _(IonSyncLogs) \
+ /* Information during MIR building */ \
+ _(IonMIR) \
+ /* Information during bailouts */ \
+ _(IonBailouts) \
+ /* Information during OSI */ \
+ _(IonInvalidate) \
+ /* Debug info about snapshots */ \
+ _(IonSnapshots) \
+ /* Generated inline cache stubs */ \
+ _(IonIC) \
+ \
+ /* WARP SPEW */ \
+ \
+ /* Generated WarpSnapshots */ \
+ _(WarpSnapshots) \
+ /* CacheIR transpiler logging */ \
+ _(WarpTranspiler) \
+ /* Trial inlining for Warp */ \
+ _(WarpTrialInlining)
+
+enum JitSpewChannel {
+#define JITSPEW_CHANNEL(name) JitSpew_##name,
+ JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL)
+#undef JITSPEW_CHANNEL
+ JitSpew_Terminator
+};
+
+class BacktrackingAllocator;
+class MDefinition;
+class MIRGenerator;
+class MIRGraph;
+class TempAllocator;
+
+// The JitSpewer is only available on debug builds.
+// None of the global functions have effect on non-debug builds.
+#ifdef JS_JITSPEW
+
+// Class made to hold the MIR and LIR graphs of an Wasm / Ion compilation.
+class GraphSpewer {
+ private:
+ MIRGraph* graph_;
+ LSprinter jsonPrinter_;
+ JSONSpewer jsonSpewer_;
+
+ public:
+ explicit GraphSpewer(TempAllocator* alloc);
+
+ bool isSpewing() const { return graph_; }
+ void init(MIRGraph* graph, JSScript* function);
+ void beginFunction(JSScript* function);
+ void beginWasmFunction(unsigned funcIndex);
+ void spewPass(const char* pass);
+ void spewPass(const char* pass, BacktrackingAllocator* ra);
+ void endFunction();
+
+ void dump(Fprinter& json);
+};
+
+void SpewBeginFunction(MIRGenerator* mir, JSScript* function);
+void SpewBeginWasmFunction(MIRGenerator* mir, unsigned funcIndex);
+
+class AutoSpewEndFunction {
+ private:
+ MIRGenerator* mir_;
+
+ public:
+ explicit AutoSpewEndFunction(MIRGenerator* mir) : mir_(mir) {}
+ ~AutoSpewEndFunction();
+};
+
+void CheckLogging();
+Fprinter& JitSpewPrinter();
+
+class JitSpewIndent {
+ JitSpewChannel channel_;
+
+ public:
+ explicit JitSpewIndent(JitSpewChannel channel);
+ ~JitSpewIndent();
+};
+
+void JitSpew(JitSpewChannel channel, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewStart(JitSpewChannel channel, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewCont(JitSpewChannel channel, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(2, 3);
+void JitSpewFin(JitSpewChannel channel);
+void JitSpewHeader(JitSpewChannel channel);
+bool JitSpewEnabled(JitSpewChannel channel);
+void JitSpewVA(JitSpewChannel channel, const char* fmt, va_list ap)
+ MOZ_FORMAT_PRINTF(2, 0);
+void JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap)
+ MOZ_FORMAT_PRINTF(2, 0);
+void JitSpewContVA(JitSpewChannel channel, const char* fmt, va_list ap)
+ MOZ_FORMAT_PRINTF(2, 0);
+void JitSpewDef(JitSpewChannel channel, const char* str, MDefinition* def);
+
+void EnableChannel(JitSpewChannel channel);
+void DisableChannel(JitSpewChannel channel);
+void EnableIonDebugSyncLogging();
+void EnableIonDebugAsyncLogging();
+
+const char* ValTypeToString(JSValueType type);
+
+# define JitSpewIfEnabled(channel, fmt, ...) \
+ do { \
+ if (JitSpewEnabled(channel)) { \
+ JitSpew(channel, fmt, __VA_ARGS__); \
+ } \
+ } while (false);
+
+#else
+
+class GraphSpewer {
+ public:
+ explicit GraphSpewer(TempAllocator* alloc) {}
+
+ bool isSpewing() { return false; }
+ void init(MIRGraph* graph, JSScript* function) {}
+ void beginFunction(JSScript* function) {}
+ void spewPass(const char* pass) {}
+ void spewPass(const char* pass, BacktrackingAllocator* ra) {}
+ void endFunction() {}
+
+ void dump(Fprinter& c1, Fprinter& json) {}
+};
+
+static inline void SpewBeginFunction(MIRGenerator* mir, JSScript* function) {}
+static inline void SpewBeginWasmFunction(MIRGenerator* mir,
+ unsigned funcIndex) {}
+
+class AutoSpewEndFunction {
+ public:
+ explicit AutoSpewEndFunction(MIRGenerator* mir) {}
+ ~AutoSpewEndFunction() {}
+};
+
+static inline void CheckLogging() {}
+static inline Fprinter& JitSpewPrinter() {
+ MOZ_CRASH("No empty backend for JitSpewPrinter");
+}
+
+class JitSpewIndent {
+ public:
+ explicit JitSpewIndent(JitSpewChannel channel) {}
+ ~JitSpewIndent() {}
+};
+
+// The computation of some of the argument of the spewing functions might be
+// costly, thus we use variaidic macros to ignore any argument of these
+// functions.
+static inline void JitSpewCheckArguments(JitSpewChannel channel,
+ const char* fmt) {}
+
+# define JitSpewCheckExpandedArgs(channel, fmt, ...) \
+ JitSpewCheckArguments(channel, fmt)
+# define JitSpewCheckExpandedArgs_(ArgList) \
+ JitSpewCheckExpandedArgs ArgList /* Fix MSVC issue */
+# define JitSpew(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+# define JitSpewStart(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+# define JitSpewCont(...) JitSpewCheckExpandedArgs_((__VA_ARGS__))
+
+# define JitSpewIfEnabled(channel, fmt, ...) \
+ JitSpewCheckArguments(channel, fmt)
+
+static inline void JitSpewFin(JitSpewChannel channel) {}
+
+static inline void JitSpewHeader(JitSpewChannel channel) {}
+static inline bool JitSpewEnabled(JitSpewChannel channel) { return false; }
+static inline MOZ_FORMAT_PRINTF(2, 0) void JitSpewVA(JitSpewChannel channel,
+ const char* fmt,
+ va_list ap) {}
+static inline void JitSpewDef(JitSpewChannel channel, const char* str,
+ MDefinition* def) {}
+
+static inline void EnableChannel(JitSpewChannel) {}
+static inline void DisableChannel(JitSpewChannel) {}
+static inline void EnableIonDebugSyncLogging() {}
+static inline void EnableIonDebugAsyncLogging() {}
+
+#endif /* JS_JITSPEW */
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitSpewer_h */
diff --git a/js/src/jit/JitZone.h b/js/src/jit/JitZone.h
new file mode 100644
index 0000000000..7489eca256
--- /dev/null
+++ b/js/src/jit/JitZone.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitZone_h
+#define jit_JitZone_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <utility>
+
+#include "gc/Barrier.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/ICStubSpace.h"
+#include "jit/Invalidation.h"
+#include "js/AllocPolicy.h"
+#include "js/GCHashTable.h"
+#include "js/HashTable.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/ProtectedData.h"
+
+namespace JS {
+struct CodeSizes;
+}
+
+namespace js {
+namespace jit {
+
+enum class CacheKind : uint8_t;
+class CacheIRStubInfo;
+class JitCode;
+
+enum class ICStubEngine : uint8_t {
+ // Baseline IC, see BaselineIC.h.
+ Baseline = 0,
+
+ // Ion IC, see IonIC.h.
+ IonIC
+};
+
+struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
+ struct Lookup {
+ CacheKind kind;
+ ICStubEngine engine;
+ const uint8_t* code;
+ uint32_t length;
+
+ Lookup(CacheKind kind, ICStubEngine engine, const uint8_t* code,
+ uint32_t length)
+ : kind(kind), engine(engine), code(code), length(length) {}
+ };
+
+ static HashNumber hash(const Lookup& l);
+ static bool match(const CacheIRStubKey& entry, const Lookup& l);
+
+ UniquePtr<CacheIRStubInfo, JS::FreePolicy> stubInfo;
+
+ explicit CacheIRStubKey(CacheIRStubInfo* info) : stubInfo(info) {}
+ CacheIRStubKey(CacheIRStubKey&& other)
+ : stubInfo(std::move(other.stubInfo)) {}
+
+ void operator=(CacheIRStubKey&& other) {
+ stubInfo = std::move(other.stubInfo);
+ }
+};
+
+struct BaselineCacheIRStubCodeMapGCPolicy {
+ static bool traceWeak(JSTracer* trc, CacheIRStubKey*,
+ WeakHeapPtr<JitCode*>* value) {
+ return TraceWeakEdge(trc, value, "traceWeak");
+ }
+};
+
+class JitZone {
+ // Allocated space for optimized baseline stubs.
+ OptimizedICStubSpace optimizedStubSpace_;
+
+ // Set of CacheIRStubInfo instances used by Ion stubs in this Zone.
+ using IonCacheIRStubInfoSet =
+ HashSet<CacheIRStubKey, CacheIRStubKey, SystemAllocPolicy>;
+ IonCacheIRStubInfoSet ionCacheIRStubInfoSet_;
+
+ // Map CacheIRStubKey to shared JitCode objects.
+ using BaselineCacheIRStubCodeMap =
+ GCHashMap<CacheIRStubKey, WeakHeapPtr<JitCode*>, CacheIRStubKey,
+ SystemAllocPolicy, BaselineCacheIRStubCodeMapGCPolicy>;
+ BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
+
+ // Executable allocator for all code except wasm code.
+ MainThreadData<ExecutableAllocator> execAlloc_;
+
+ // HashMap that maps scripts to compilations inlining those scripts.
+ using InlinedScriptMap =
+ GCHashMap<WeakHeapPtr<BaseScript*>, RecompileInfoVector,
+ StableCellHasher<WeakHeapPtr<BaseScript*>>, SystemAllocPolicy>;
+ InlinedScriptMap inlinedCompilations_;
+
+ // The following two fields are a pair of associated scripts. If they are
+ // non-null, the child has been inlined into the parent, and we have bailed
+ // out due to a MonomorphicInlinedStubFolding bailout. If it wasn't
+ // trial-inlined, we need to track for the parent if we attach a new case to
+ // the corresponding folded stub which belongs to the child.
+ WeakHeapPtr<JSScript*> lastStubFoldingBailoutChild_;
+ WeakHeapPtr<JSScript*> lastStubFoldingBailoutParent_;
+
+ mozilla::Maybe<IonCompilationId> currentCompilationId_;
+ bool keepJitScripts_ = false;
+
+ public:
+ ~JitZone() { MOZ_ASSERT(!keepJitScripts_); }
+
+ void traceWeak(JSTracer* trc);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::CodeSizes* code, size_t* jitZone,
+ size_t* baselineStubsOptimized) const;
+
+ OptimizedICStubSpace* optimizedStubSpace() { return &optimizedStubSpace_; }
+
+ JitCode* getBaselineCacheIRStubCode(const CacheIRStubKey::Lookup& key,
+ CacheIRStubInfo** stubInfo) {
+ auto p = baselineCacheIRStubCodes_.lookup(key);
+ if (p) {
+ *stubInfo = p->key().stubInfo.get();
+ return p->value();
+ }
+ *stubInfo = nullptr;
+ return nullptr;
+ }
+ [[nodiscard]] bool putBaselineCacheIRStubCode(
+ const CacheIRStubKey::Lookup& lookup, CacheIRStubKey& key,
+ JitCode* stubCode) {
+ auto p = baselineCacheIRStubCodes_.lookupForAdd(lookup);
+ MOZ_ASSERT(!p);
+ return baselineCacheIRStubCodes_.add(p, std::move(key), stubCode);
+ }
+
+ CacheIRStubInfo* getIonCacheIRStubInfo(const CacheIRStubKey::Lookup& key) {
+ IonCacheIRStubInfoSet::Ptr p = ionCacheIRStubInfoSet_.lookup(key);
+ return p ? p->stubInfo.get() : nullptr;
+ }
+ [[nodiscard]] bool putIonCacheIRStubInfo(const CacheIRStubKey::Lookup& lookup,
+ CacheIRStubKey& key) {
+ IonCacheIRStubInfoSet::AddPtr p =
+ ionCacheIRStubInfoSet_.lookupForAdd(lookup);
+ MOZ_ASSERT(!p);
+ return ionCacheIRStubInfoSet_.add(p, std::move(key));
+ }
+ void purgeIonCacheIRStubInfo() { ionCacheIRStubInfoSet_.clearAndCompact(); }
+
+ ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
+ const ExecutableAllocator& execAlloc() const { return execAlloc_.ref(); }
+
+ [[nodiscard]] bool addInlinedCompilation(const RecompileInfo& info,
+ JSScript* inlined);
+
+ RecompileInfoVector* maybeInlinedCompilations(JSScript* inlined) {
+ auto p = inlinedCompilations_.lookup(inlined);
+ return p ? &p->value() : nullptr;
+ }
+
+ void removeInlinedCompilations(JSScript* inlined) {
+ inlinedCompilations_.remove(inlined);
+ }
+
+ void noteStubFoldingBailout(JSScript* child, JSScript* parent) {
+ lastStubFoldingBailoutChild_ = child;
+ lastStubFoldingBailoutParent_ = parent;
+ }
+ bool hasStubFoldingBailoutData(JSScript* child) const {
+ return lastStubFoldingBailoutChild_ &&
+ lastStubFoldingBailoutChild_.get() == child &&
+ lastStubFoldingBailoutParent_;
+ }
+ JSScript* stubFoldingBailoutParent() const {
+ MOZ_ASSERT(lastStubFoldingBailoutChild_);
+ return lastStubFoldingBailoutParent_.get();
+ }
+ void clearStubFoldingBailoutData() {
+ lastStubFoldingBailoutChild_ = nullptr;
+ lastStubFoldingBailoutParent_ = nullptr;
+ }
+
+ bool keepJitScripts() const { return keepJitScripts_; }
+ void setKeepJitScripts(bool keep) { keepJitScripts_ = keep; }
+
+ mozilla::Maybe<IonCompilationId> currentCompilationId() const {
+ return currentCompilationId_;
+ }
+ mozilla::Maybe<IonCompilationId>& currentCompilationIdRef() {
+ return currentCompilationId_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitZone_h */
diff --git a/js/src/jit/JitcodeMap.cpp b/js/src/jit/JitcodeMap.cpp
new file mode 100644
index 0000000000..1c1e21c72e
--- /dev/null
+++ b/js/src/jit/JitcodeMap.cpp
@@ -0,0 +1,1145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/JitcodeMap.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+
+#include "gc/Marking.h"
+#include "jit/BaselineJIT.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "js/Vector.h"
+#include "vm/BytecodeLocation.h" // for BytecodeLocation
+#include "vm/GeckoProfiler.h"
+
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using mozilla::Maybe;
+
+namespace js {
+namespace jit {
+
+static inline JitcodeRegionEntry RegionAtAddr(const IonEntry& entry, void* ptr,
+ uint32_t* ptrOffset) {
+ MOZ_ASSERT(entry.containsPointer(ptr));
+ *ptrOffset = reinterpret_cast<uint8_t*>(ptr) -
+ reinterpret_cast<uint8_t*>(entry.nativeStartAddr());
+
+ uint32_t regionIdx = entry.regionTable()->findRegionEntry(*ptrOffset);
+ MOZ_ASSERT(regionIdx < entry.regionTable()->numRegions());
+
+ return entry.regionTable()->regionEntry(regionIdx);
+}
+
+void* IonEntry::canonicalNativeAddrFor(void* ptr) const {
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+ return (void*)(((uint8_t*)nativeStartAddr()) + region.nativeOffset());
+}
+
+bool IonEntry::callStackAtAddr(void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+ *depth = region.scriptDepth();
+
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ bool first = true;
+ while (locationIter.hasMore()) {
+ uint32_t scriptIdx, pcOffset;
+ locationIter.readNext(&scriptIdx, &pcOffset);
+ // For the first entry pushed (innermost frame), the pcOffset is obtained
+ // from the delta-run encodings.
+ if (first) {
+ pcOffset = region.findPcOffset(ptrOffset, pcOffset);
+ first = false;
+ }
+ JSScript* script = getScript(scriptIdx);
+ jsbytecode* pc = script->offsetToPC(pcOffset);
+ if (!results.append(BytecodeLocation(script, pc))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+uint32_t IonEntry::callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const {
+ MOZ_ASSERT(maxResults >= 1);
+
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ uint32_t count = 0;
+ while (locationIter.hasMore()) {
+ uint32_t scriptIdx, pcOffset;
+
+ locationIter.readNext(&scriptIdx, &pcOffset);
+ MOZ_ASSERT(getStr(scriptIdx));
+
+ results[count++] = getStr(scriptIdx);
+ if (count >= maxResults) {
+ break;
+ }
+ }
+
+ return count;
+}
+
+uint64_t IonEntry::lookupRealmID(void* ptr) const {
+ uint32_t ptrOffset;
+ JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
+ JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
+ MOZ_ASSERT(locationIter.hasMore());
+ uint32_t scriptIdx, pcOffset;
+ locationIter.readNext(&scriptIdx, &pcOffset);
+
+ JSScript* script = getScript(scriptIdx);
+ return script->realm()->creationOptions().profilerRealmID();
+}
+
+IonEntry::~IonEntry() {
+ // The region table is stored at the tail of the compacted data,
+ // which means the start of the region table is a pointer to
+ // the _middle_ of the memory space allocated for it.
+ //
+ // When freeing it, obtain the payload start pointer first.
+ MOZ_ASSERT(regionTable_);
+ js_free((void*)(regionTable_->payloadStart()));
+ regionTable_ = nullptr;
+}
+
+static IonEntry& IonEntryForIonIC(JSRuntime* rt, const IonICEntry* icEntry) {
+ // The table must have an IonEntry for the IC's rejoin address.
+ auto* table = rt->jitRuntime()->getJitcodeGlobalTable();
+ auto* entry = table->lookup(icEntry->rejoinAddr());
+ MOZ_ASSERT(entry);
+ MOZ_RELEASE_ASSERT(entry->isIon());
+ return entry->asIon();
+}
+
+void* IonICEntry::canonicalNativeAddrFor(void* ptr) const { return ptr; }
+
+bool IonICEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ const IonEntry& entry = IonEntryForIonIC(rt, this);
+ return entry.callStackAtAddr(rejoinAddr(), results, depth);
+}
+
+uint32_t IonICEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ const char** results,
+ uint32_t maxResults) const {
+ const IonEntry& entry = IonEntryForIonIC(rt, this);
+ return entry.callStackAtAddr(rejoinAddr(), results, maxResults);
+}
+
+uint64_t IonICEntry::lookupRealmID(JSRuntime* rt, void* ptr) const {
+ const IonEntry& entry = IonEntryForIonIC(rt, this);
+ return entry.lookupRealmID(rejoinAddr());
+}
+
+void* BaselineEntry::canonicalNativeAddrFor(void* ptr) const {
+ // TODO: We can't yet normalize Baseline addresses until we unify
+ // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
+ return ptr;
+}
+
+bool BaselineEntry::callStackAtAddr(void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ MOZ_ASSERT(containsPointer(ptr));
+ MOZ_ASSERT(script_->hasBaselineScript());
+
+ uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
+ jsbytecode* pc =
+ script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
+ if (!results.append(BytecodeLocation(script_, pc))) {
+ return false;
+ }
+
+ *depth = 1;
+
+ return true;
+}
+
+uint32_t BaselineEntry::callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const {
+ MOZ_ASSERT(containsPointer(ptr));
+ MOZ_ASSERT(maxResults >= 1);
+
+ results[0] = str();
+ return 1;
+}
+
+uint64_t BaselineEntry::lookupRealmID() const {
+ return script_->realm()->creationOptions().profilerRealmID();
+}
+
+void* BaselineInterpreterEntry::canonicalNativeAddrFor(void* ptr) const {
+ return ptr;
+}
+
+bool BaselineInterpreterEntry::callStackAtAddr(void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
+}
+
+uint32_t BaselineInterpreterEntry::callStackAtAddr(void* ptr,
+ const char** results,
+ uint32_t maxResults) const {
+ MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
+}
+
+uint64_t BaselineInterpreterEntry::lookupRealmID() const {
+ MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
+}
+
+const JitcodeGlobalEntry* JitcodeGlobalTable::lookupForSampler(
+ void* ptr, JSRuntime* rt, uint64_t samplePosInBuffer) {
+ JitcodeGlobalEntry* entry = lookupInternal(ptr);
+ if (!entry) {
+ return nullptr;
+ }
+
+ entry->setSamplePositionInBuffer(samplePosInBuffer);
+
+ // IonIC entries must keep their corresponding Ion entries alive.
+ if (entry->isIonIC()) {
+ IonEntry& ionEntry = IonEntryForIonIC(rt, &entry->asIonIC());
+ ionEntry.setSamplePositionInBuffer(samplePosInBuffer);
+ }
+
+ // JitcodeGlobalEntries are marked at the end of the mark phase. A read
+ // barrier is not needed. Any JS frames sampled during the sweep phase of
+ // the GC must be on stack, and on-stack frames must already be marked at
+ // the beginning of the sweep phase. It's not possible to assert this here
+ // as we may be off main thread when called from the gecko profiler.
+
+ return entry;
+}
+
+JitcodeGlobalEntry* JitcodeGlobalTable::lookupInternal(void* ptr) {
+ // Search for an entry containing the one-byte range starting at |ptr|.
+ JitCodeRange range(ptr, static_cast<uint8_t*>(ptr) + 1);
+
+ if (JitCodeRange** entry = tree_.maybeLookup(&range)) {
+ MOZ_ASSERT((*entry)->containsPointer(ptr));
+ return static_cast<JitcodeGlobalEntry*>(*entry);
+ }
+
+ return nullptr;
+}
+
+bool JitcodeGlobalTable::addEntry(UniqueJitcodeGlobalEntry entry) {
+ MOZ_ASSERT(entry->isIon() || entry->isIonIC() || entry->isBaseline() ||
+ entry->isBaselineInterpreter() || entry->isDummy());
+
+ // Assert the new entry does not have a code range that's equal to (or
+ // contained in) one of the existing entries, because that would confuse the
+ // AVL tree.
+ MOZ_ASSERT(!tree_.maybeLookup(entry.get()));
+
+ // Suppress profiler sampling while data structures are being mutated.
+ AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
+
+ if (!entries_.append(std::move(entry))) {
+ return false;
+ }
+ if (!tree_.insert(entries_.back().get())) {
+ entries_.popBack();
+ return false;
+ }
+
+ return true;
+}
+
+void JitcodeGlobalTable::setAllEntriesAsExpired() {
+ AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
+ for (EntryVector::Range r(entries_.all()); !r.empty(); r.popFront()) {
+ auto& entry = r.front();
+ entry->setAsExpired();
+ }
+}
+
+bool JitcodeGlobalTable::markIteratively(GCMarker* marker) {
+ // JitcodeGlobalTable must keep entries that are in the sampler buffer
+ // alive. This conditionality is akin to holding the entries weakly.
+ //
+ // If this table were marked at the beginning of the mark phase, then
+ // sampling would require a read barrier for sampling in between
+ // incremental GC slices. However, invoking read barriers from the sampler
+ // is wildly unsafe. The sampler may run at any time, including during GC
+ // itself.
+ //
+ // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
+ // phase, along with weak references. The key assumption is the
+ // following. At the beginning of the sweep phase, any JS frames that the
+ // sampler may put in its buffer that are not already there at the
+ // beginning of the mark phase must have already been marked, as either 1)
+ // the frame was on-stack at the beginning of the sweep phase, or 2) the
+ // frame was pushed between incremental sweep slices. Frames of case 1)
+ // are already marked. Frames of case 2) must have been reachable to have
+ // been newly pushed, and thus are already marked.
+ //
+ // The approach above obviates the need for read barriers. The assumption
+ // above is checked in JitcodeGlobalTable::lookupForSampler.
+
+ MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
+
+ AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
+
+ // If the profiler is off, rangeStart will be Nothing() and all entries are
+ // considered to be expired.
+ Maybe<uint64_t> rangeStart =
+ marker->runtime()->profilerSampleBufferRangeStart();
+
+ bool markedAny = false;
+ for (EntryVector::Range r(entries_.all()); !r.empty(); r.popFront()) {
+ auto& entry = r.front();
+
+ // If an entry is not sampled, reset its buffer position to the invalid
+ // position, and conditionally mark the rest of the entry if its
+ // JitCode is not already marked. This conditional marking ensures
+ // that so long as the JitCode *may* be sampled, we keep any
+ // information that may be handed out to the sampler, like tracked
+ // types used by optimizations and scripts used for pc to line number
+ // mapping, alive as well.
+ if (!rangeStart || !entry->isSampled(*rangeStart)) {
+ entry->setAsExpired();
+ if (!entry->isJitcodeMarkedFromAnyThread(marker->runtime())) {
+ continue;
+ }
+ }
+
+ // The table is runtime-wide. Not all zones may be participating in
+ // the GC.
+ if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
+ continue;
+ }
+
+ markedAny |= entry->trace(marker->tracer());
+ }
+
+ return markedAny;
+}
+
+void JitcodeGlobalTable::traceWeak(JSRuntime* rt, JSTracer* trc) {
+ AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
+
+ entries_.eraseIf([&](auto& entry) {
+ if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
+ return false;
+ }
+
+ if (TraceManuallyBarrieredWeakEdge(
+ trc, entry->jitcodePtr(),
+ "JitcodeGlobalTable::JitcodeGlobalEntry::jitcode_")) {
+ entry->traceWeak(trc);
+ return false;
+ }
+
+ // We have to remove the entry.
+#ifdef DEBUG
+ Maybe<uint64_t> rangeStart = rt->profilerSampleBufferRangeStart();
+ MOZ_ASSERT_IF(rangeStart, !entry->isSampled(*rangeStart));
+#endif
+ tree_.remove(entry.get());
+ return true;
+ });
+
+ MOZ_ASSERT(tree_.empty() == entries_.empty());
+}
+
+bool JitcodeGlobalEntry::traceJitcode(JSTracer* trc) {
+ if (!IsMarkedUnbarriered(trc->runtime(), jitcode_)) {
+ TraceManuallyBarrieredEdge(trc, &jitcode_,
+ "jitcodglobaltable-baseentry-jitcode");
+ return true;
+ }
+ return false;
+}
+
+bool JitcodeGlobalEntry::isJitcodeMarkedFromAnyThread(JSRuntime* rt) {
+ return IsMarkedUnbarriered(rt, jitcode_);
+}
+
+bool BaselineEntry::trace(JSTracer* trc) {
+ if (!IsMarkedUnbarriered(trc->runtime(), script_)) {
+ TraceManuallyBarrieredEdge(trc, &script_,
+ "jitcodeglobaltable-baselineentry-script");
+ return true;
+ }
+ return false;
+}
+
+void BaselineEntry::traceWeak(JSTracer* trc) {
+ MOZ_ALWAYS_TRUE(
+ TraceManuallyBarrieredWeakEdge(trc, &script_, "BaselineEntry::script_"));
+}
+
+bool IonEntry::trace(JSTracer* trc) {
+ bool tracedAny = false;
+
+ JSRuntime* rt = trc->runtime();
+ for (auto& pair : scriptList_) {
+ if (!IsMarkedUnbarriered(rt, pair.script)) {
+ TraceManuallyBarrieredEdge(trc, &pair.script,
+ "jitcodeglobaltable-ionentry-script");
+ tracedAny = true;
+ }
+ }
+
+ return tracedAny;
+}
+
+void IonEntry::traceWeak(JSTracer* trc) {
+ for (auto& pair : scriptList_) {
+ JSScript** scriptp = &pair.script;
+ MOZ_ALWAYS_TRUE(
+ TraceManuallyBarrieredWeakEdge(trc, scriptp, "IonEntry script"));
+ }
+}
+
+bool IonICEntry::trace(JSTracer* trc) {
+ IonEntry& entry = IonEntryForIonIC(trc->runtime(), this);
+ return entry.trace(trc);
+}
+
+void IonICEntry::traceWeak(JSTracer* trc) {
+ IonEntry& entry = IonEntryForIonIC(trc->runtime(), this);
+ entry.traceWeak(trc);
+}
+
+[[nodiscard]] bool JitcodeGlobalEntry::callStackAtAddr(
+ JSRuntime* rt, void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ switch (kind()) {
+ case Kind::Ion:
+ return asIon().callStackAtAddr(ptr, results, depth);
+ case Kind::IonIC:
+ return asIonIC().callStackAtAddr(rt, ptr, results, depth);
+ case Kind::Baseline:
+ return asBaseline().callStackAtAddr(ptr, results, depth);
+ case Kind::BaselineInterpreter:
+ return asBaselineInterpreter().callStackAtAddr(ptr, results, depth);
+ case Kind::Dummy:
+ return asDummy().callStackAtAddr(rt, ptr, results, depth);
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+uint32_t JitcodeGlobalEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
+ const char** results,
+ uint32_t maxResults) const {
+ switch (kind()) {
+ case Kind::Ion:
+ return asIon().callStackAtAddr(ptr, results, maxResults);
+ case Kind::IonIC:
+ return asIonIC().callStackAtAddr(rt, ptr, results, maxResults);
+ case Kind::Baseline:
+ return asBaseline().callStackAtAddr(ptr, results, maxResults);
+ case Kind::BaselineInterpreter:
+ return asBaselineInterpreter().callStackAtAddr(ptr, results, maxResults);
+ case Kind::Dummy:
+ return asDummy().callStackAtAddr(rt, ptr, results, maxResults);
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+uint64_t JitcodeGlobalEntry::lookupRealmID(JSRuntime* rt, void* ptr) const {
+ switch (kind()) {
+ case Kind::Ion:
+ return asIon().lookupRealmID(ptr);
+ case Kind::IonIC:
+ return asIonIC().lookupRealmID(rt, ptr);
+ case Kind::Baseline:
+ return asBaseline().lookupRealmID();
+ case Kind::Dummy:
+ return asDummy().lookupRealmID();
+ case Kind::BaselineInterpreter:
+ break;
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+bool JitcodeGlobalEntry::trace(JSTracer* trc) {
+ bool tracedAny = traceJitcode(trc);
+ switch (kind()) {
+ case Kind::Ion:
+ tracedAny |= asIon().trace(trc);
+ break;
+ case Kind::IonIC:
+ tracedAny |= asIonIC().trace(trc);
+ break;
+ case Kind::Baseline:
+ tracedAny |= asBaseline().trace(trc);
+ break;
+ case Kind::BaselineInterpreter:
+ case Kind::Dummy:
+ break;
+ }
+ return tracedAny;
+}
+
+void JitcodeGlobalEntry::traceWeak(JSTracer* trc) {
+ switch (kind()) {
+ case Kind::Ion:
+ asIon().traceWeak(trc);
+ break;
+ case Kind::IonIC:
+ asIonIC().traceWeak(trc);
+ break;
+ case Kind::Baseline:
+ asBaseline().traceWeak(trc);
+ break;
+ case Kind::BaselineInterpreter:
+ case Kind::Dummy:
+ break;
+ }
+}
+
+void* JitcodeGlobalEntry::canonicalNativeAddrFor(JSRuntime* rt,
+ void* ptr) const {
+ switch (kind()) {
+ case Kind::Ion:
+ return asIon().canonicalNativeAddrFor(ptr);
+ case Kind::IonIC:
+ return asIonIC().canonicalNativeAddrFor(ptr);
+ case Kind::Baseline:
+ return asBaseline().canonicalNativeAddrFor(ptr);
+ case Kind::Dummy:
+ return asDummy().canonicalNativeAddrFor(rt, ptr);
+ case Kind::BaselineInterpreter:
+ break;
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+// static
+void JitcodeGlobalEntry::DestroyPolicy::operator()(JitcodeGlobalEntry* entry) {
+ switch (entry->kind()) {
+ case JitcodeGlobalEntry::Kind::Ion:
+ js_delete(&entry->asIon());
+ break;
+ case JitcodeGlobalEntry::Kind::IonIC:
+ js_delete(&entry->asIonIC());
+ break;
+ case JitcodeGlobalEntry::Kind::Baseline:
+ js_delete(&entry->asBaseline());
+ break;
+ case JitcodeGlobalEntry::Kind::BaselineInterpreter:
+ js_delete(&entry->asBaselineInterpreter());
+ break;
+ case JitcodeGlobalEntry::Kind::Dummy:
+ js_delete(&entry->asDummy());
+ break;
+ }
+}
+
+/* static */
+void JitcodeRegionEntry::WriteHead(CompactBufferWriter& writer,
+ uint32_t nativeOffset, uint8_t scriptDepth) {
+ writer.writeUnsigned(nativeOffset);
+ writer.writeByte(scriptDepth);
+}
+
+/* static */
+void JitcodeRegionEntry::ReadHead(CompactBufferReader& reader,
+ uint32_t* nativeOffset,
+ uint8_t* scriptDepth) {
+ *nativeOffset = reader.readUnsigned();
+ *scriptDepth = reader.readByte();
+}
+
+/* static */
+void JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter& writer,
+ uint32_t scriptIdx, uint32_t pcOffset) {
+ writer.writeUnsigned(scriptIdx);
+ writer.writeUnsigned(pcOffset);
+}
+
+/* static */
+void JitcodeRegionEntry::ReadScriptPc(CompactBufferReader& reader,
+ uint32_t* scriptIdx, uint32_t* pcOffset) {
+ *scriptIdx = reader.readUnsigned();
+ *pcOffset = reader.readUnsigned();
+}
+
+/* static */
+void JitcodeRegionEntry::WriteDelta(CompactBufferWriter& writer,
+ uint32_t nativeDelta, int32_t pcDelta) {
+ if (pcDelta >= 0) {
+ // 1 and 2-byte formats possible.
+
+ // NNNN-BBB0
+ if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
+ uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
+ (nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal);
+ return;
+ }
+
+ // NNNN-NNNN BBBB-BB01
+ if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
+ uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
+ (nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ return;
+ }
+ }
+
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
+ nativeDelta <= ENC3_NATIVE_DELTA_MAX) {
+ uint32_t encVal =
+ ENC3_MASK_VAL |
+ ((uint32_t(pcDelta) << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
+ (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ writer.writeByte((encVal >> 16) & 0xff);
+ return;
+ }
+
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
+ nativeDelta <= ENC4_NATIVE_DELTA_MAX) {
+ uint32_t encVal =
+ ENC4_MASK_VAL |
+ ((uint32_t(pcDelta) << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
+ (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
+ writer.writeByte(encVal & 0xff);
+ writer.writeByte((encVal >> 8) & 0xff);
+ writer.writeByte((encVal >> 16) & 0xff);
+ writer.writeByte((encVal >> 24) & 0xff);
+ return;
+ }
+
+ // Should never get here.
+ MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
+}
+
+/* static */
+void JitcodeRegionEntry::ReadDelta(CompactBufferReader& reader,
+ uint32_t* nativeDelta, int32_t* pcDelta) {
+ // NB:
+ // It's possible to get nativeDeltas with value 0 in two cases:
+ //
+ // 1. The last region's run. This is because the region table's start
+ // must be 4-byte aligned, and we must insert padding bytes to align the
+ // payload section before emitting the table.
+ //
+ // 2. A zero-offset nativeDelta with a negative pcDelta.
+ //
+ // So if nativeDelta is zero, then pcDelta must be <= 0.
+
+ // NNNN-BBB0
+ const uint32_t firstByte = reader.readByte();
+ if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
+ uint32_t encVal = firstByte;
+ *nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
+ *pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN BBBB-BB01
+ const uint32_t secondByte = reader.readByte();
+ if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8;
+ *nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
+ *pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ const uint32_t thirdByte = reader.readByte();
+ if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
+ uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
+ *nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
+
+ uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
+ // Fix sign if necessary.
+ if (pcDeltaU > static_cast<uint32_t>(ENC3_PC_DELTA_MAX)) {
+ pcDeltaU |= ~ENC3_PC_DELTA_MAX;
+ }
+ *pcDelta = pcDeltaU;
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+ return;
+ }
+
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
+ const uint32_t fourthByte = reader.readByte();
+ uint32_t encVal =
+ firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
+ *nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
+
+ uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
+ // fix sign if necessary
+ if (pcDeltaU > static_cast<uint32_t>(ENC4_PC_DELTA_MAX)) {
+ pcDeltaU |= ~ENC4_PC_DELTA_MAX;
+ }
+ *pcDelta = pcDeltaU;
+
+ MOZ_ASSERT(*pcDelta != 0);
+ MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
+}
+
+/* static */
+uint32_t JitcodeRegionEntry::ExpectedRunLength(const NativeToBytecode* entry,
+ const NativeToBytecode* end) {
+ MOZ_ASSERT(entry < end);
+
+ // We always use the first entry, so runLength starts at 1
+ uint32_t runLength = 1;
+
+ uint32_t curNativeOffset = entry->nativeOffset.offset();
+ uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
+
+ for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
+ // If the next run moves to a different inline site, stop the run.
+ if (nextEntry->tree != entry->tree) {
+ break;
+ }
+
+ uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
+ uint32_t nextBytecodeOffset =
+ nextEntry->tree->script()->pcToOffset(nextEntry->pc);
+ MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
+
+ uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
+ int32_t bytecodeDelta =
+ int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
+
+ // If deltas are too large (very unlikely), stop the run.
+ if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta)) {
+ break;
+ }
+
+ runLength++;
+
+ // If the run has grown to its maximum length, stop the run.
+ if (runLength == MAX_RUN_LENGTH) {
+ break;
+ }
+
+ curNativeOffset = nextNativeOffset;
+ curBytecodeOffset = nextBytecodeOffset;
+ }
+
+ return runLength;
+}
+
+struct JitcodeMapBufferWriteSpewer {
+#ifdef JS_JITSPEW
+ CompactBufferWriter* writer;
+ uint32_t startPos;
+
+ static const uint32_t DumpMaxBytes = 50;
+
+ explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w)
+ : writer(&w), startPos(writer->length()) {}
+
+ void spewAndAdvance(const char* name) {
+ if (writer->oom()) {
+ return;
+ }
+
+ uint32_t curPos = writer->length();
+ const uint8_t* start = writer->buffer() + startPos;
+ const uint8_t* end = writer->buffer() + curPos;
+ const char* MAP = "0123456789ABCDEF";
+ uint32_t bytes = end - start;
+
+ char buffer[DumpMaxBytes * 3];
+ for (uint32_t i = 0; i < bytes; i++) {
+ buffer[i * 3] = MAP[(start[i] >> 4) & 0xf];
+ buffer[i * 3 + 1] = MAP[(start[i] >> 0) & 0xf];
+ buffer[i * 3 + 2] = ' ';
+ }
+ if (bytes >= DumpMaxBytes) {
+ buffer[DumpMaxBytes * 3 - 1] = '\0';
+ } else {
+ buffer[bytes * 3 - 1] = '\0';
+ }
+
+ JitSpew(JitSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos),
+ int(bytes), buffer);
+
+ // Move to the end of the current buffer.
+ startPos = writer->length();
+ }
+#else // !JS_JITSPEW
+ explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w) {}
+ void spewAndAdvance(const char* name) {}
+#endif // JS_JITSPEW
+};
+
+// Write a run, starting at the given NativeToBytecode entry, into the given
+// buffer writer.
+/* static */
+bool JitcodeRegionEntry::WriteRun(CompactBufferWriter& writer,
+ const IonEntry::ScriptList& scriptList,
+ uint32_t runLength,
+ const NativeToBytecode* entry) {
+ MOZ_ASSERT(runLength > 0);
+ MOZ_ASSERT(runLength <= MAX_RUN_LENGTH);
+
+ // Calculate script depth.
+ MOZ_ASSERT(entry->tree->depth() <= 0xff);
+ uint8_t scriptDepth = entry->tree->depth();
+ uint32_t regionNativeOffset = entry->nativeOffset.offset();
+
+ JitcodeMapBufferWriteSpewer spewer(writer);
+
+ // Write the head info.
+ JitSpew(JitSpew_Profiling, " Head Info: nativeOffset=%d scriptDepth=%d",
+ int(regionNativeOffset), int(scriptDepth));
+ WriteHead(writer, regionNativeOffset, scriptDepth);
+ spewer.spewAndAdvance(" ");
+
+ // Write each script/pc pair.
+ {
+ InlineScriptTree* curTree = entry->tree;
+ jsbytecode* curPc = entry->pc;
+ for (uint8_t i = 0; i < scriptDepth; i++) {
+ // Find the index of the script within the list.
+ // NB: scriptList is guaranteed to contain curTree->script()
+ uint32_t scriptIdx = 0;
+ for (; scriptIdx < scriptList.length(); scriptIdx++) {
+ if (scriptList[scriptIdx].script == curTree->script()) {
+ break;
+ }
+ }
+ MOZ_ASSERT(scriptIdx < scriptList.length());
+
+ uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
+
+ JitSpew(JitSpew_Profiling, " Script/PC %d: scriptIdx=%d pcOffset=%d",
+ int(i), int(scriptIdx), int(pcOffset));
+ WriteScriptPc(writer, scriptIdx, pcOffset);
+ spewer.spewAndAdvance(" ");
+
+ MOZ_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
+ curPc = curTree->callerPc();
+ curTree = curTree->caller();
+ }
+ }
+
+ // Start writing runs.
+ uint32_t curNativeOffset = entry->nativeOffset.offset();
+ uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
+
+ JitSpew(JitSpew_Profiling,
+ " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
+ int(curNativeOffset), int(curBytecodeOffset));
+
+ // Skip first entry because it is implicit in the header. Start at subsequent
+ // entry.
+ for (uint32_t i = 1; i < runLength; i++) {
+ MOZ_ASSERT(entry[i].tree == entry->tree);
+
+ uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
+ uint32_t nextBytecodeOffset =
+ entry[i].tree->script()->pcToOffset(entry[i].pc);
+ MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
+
+ uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
+ int32_t bytecodeDelta =
+ int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
+ MOZ_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
+
+ JitSpew(JitSpew_Profiling,
+ " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
+ int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
+ int(curBytecodeOffset), int(nextBytecodeOffset),
+ int(bytecodeDelta));
+ WriteDelta(writer, nativeDelta, bytecodeDelta);
+
+ // Spew the bytecode in these ranges.
+ if (curBytecodeOffset < nextBytecodeOffset) {
+ JitSpewStart(JitSpew_Profiling, " OPS: ");
+ uint32_t curBc = curBytecodeOffset;
+ while (curBc < nextBytecodeOffset) {
+ jsbytecode* pc = entry[i].tree->script()->offsetToPC(curBc);
+#ifdef JS_JITSPEW
+ JSOp op = JSOp(*pc);
+ JitSpewCont(JitSpew_Profiling, "%s ", CodeName(op));
+#endif
+ curBc += GetBytecodeLength(pc);
+ }
+ JitSpewFin(JitSpew_Profiling);
+ }
+ spewer.spewAndAdvance(" ");
+
+ curNativeOffset = nextNativeOffset;
+ curBytecodeOffset = nextBytecodeOffset;
+ }
+
+ if (writer.oom()) {
+ return false;
+ }
+
+ return true;
+}
+
+void JitcodeRegionEntry::unpack() {
+ CompactBufferReader reader(data_, end_);
+ ReadHead(reader, &nativeOffset_, &scriptDepth_);
+ MOZ_ASSERT(scriptDepth_ > 0);
+
+ scriptPcStack_ = reader.currentPosition();
+ // Skip past script/pc stack
+ for (unsigned i = 0; i < scriptDepth_; i++) {
+ uint32_t scriptIdx, pcOffset;
+ ReadScriptPc(reader, &scriptIdx, &pcOffset);
+ }
+
+ deltaRun_ = reader.currentPosition();
+}
+
+uint32_t JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset,
+ uint32_t startPcOffset) const {
+ DeltaIterator iter = deltaIterator();
+ uint32_t curNativeOffset = nativeOffset();
+ uint32_t curPcOffset = startPcOffset;
+ while (iter.hasMore()) {
+ uint32_t nativeDelta;
+ int32_t pcDelta;
+ iter.readNext(&nativeDelta, &pcDelta);
+
+ // The start address of the next delta-run entry is counted towards
+ // the current delta-run entry, because return addresses should
+ // associate with the bytecode op prior (the call) not the op after.
+ if (queryNativeOffset <= curNativeOffset + nativeDelta) {
+ break;
+ }
+ curNativeOffset += nativeDelta;
+ curPcOffset += pcDelta;
+ }
+ return curPcOffset;
+}
+
+uint32_t JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const {
+ static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
+ uint32_t regions = numRegions();
+ MOZ_ASSERT(regions > 0);
+
+ // For small region lists, just search linearly.
+ if (regions <= LINEAR_SEARCH_THRESHOLD) {
+ JitcodeRegionEntry previousEntry = regionEntry(0);
+ for (uint32_t i = 1; i < regions; i++) {
+ JitcodeRegionEntry nextEntry = regionEntry(i);
+ MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
+
+ // See note in binary-search code below about why we use '<=' here
+ // instead of '<'. Short explanation: regions are closed at their
+ // ending addresses, and open at their starting addresses.
+ if (nativeOffset <= nextEntry.nativeOffset()) {
+ return i - 1;
+ }
+
+ previousEntry = nextEntry;
+ }
+ // If nothing found, assume it falls within last region.
+ return regions - 1;
+ }
+
+ // For larger ones, binary search the region table.
+ uint32_t idx = 0;
+ uint32_t count = regions;
+ while (count > 1) {
+ uint32_t step = count / 2;
+ uint32_t mid = idx + step;
+ JitcodeRegionEntry midEntry = regionEntry(mid);
+
+ // A region memory range is closed at its ending address, not starting
+ // address. This is because the return address for calls must associate
+ // with the call's bytecode PC, not the PC of the bytecode operator after
+ // the call.
+ //
+ // So a query is < an entry if the query nativeOffset is <= the start
+ // address of the entry, and a query is >= an entry if the query
+ // nativeOffset is > the start address of an entry.
+ if (nativeOffset <= midEntry.nativeOffset()) {
+ // Target entry is below midEntry.
+ count = step;
+ } else { // if (nativeOffset > midEntry.nativeOffset())
+ // Target entry is at midEntry or above.
+ idx = mid;
+ count -= step;
+ }
+ }
+ return idx;
+}
+
+/* static */
+bool JitcodeIonTable::WriteIonTable(CompactBufferWriter& writer,
+ const IonEntry::ScriptList& scriptList,
+ const NativeToBytecode* start,
+ const NativeToBytecode* end,
+ uint32_t* tableOffsetOut,
+ uint32_t* numRegionsOut) {
+ MOZ_ASSERT(tableOffsetOut != nullptr);
+ MOZ_ASSERT(numRegionsOut != nullptr);
+ MOZ_ASSERT(writer.length() == 0);
+ MOZ_ASSERT(scriptList.length() > 0);
+
+ JitSpew(JitSpew_Profiling,
+ "Writing native to bytecode map for %s:%u:%u (%zu entries)",
+ scriptList[0].script->filename(), scriptList[0].script->lineno(),
+ scriptList[0].script->column(),
+ mozilla::PointerRangeSize(start, end));
+
+ JitSpew(JitSpew_Profiling, " ScriptList of size %u",
+ unsigned(scriptList.length()));
+ for (uint32_t i = 0; i < scriptList.length(); i++) {
+ JitSpew(JitSpew_Profiling, " Script %u - %s:%u:%u", i,
+ scriptList[i].script->filename(), scriptList[i].script->lineno(),
+ scriptList[i].script->column());
+ }
+
+ // Write out runs first. Keep a vector tracking the positive offsets from
+ // payload start to the run.
+ const NativeToBytecode* curEntry = start;
+ js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
+
+ while (curEntry != end) {
+ // Calculate the length of the next run.
+ uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
+ MOZ_ASSERT(runLength > 0);
+ MOZ_ASSERT(runLength <= uintptr_t(end - curEntry));
+ JitSpew(JitSpew_Profiling, " Run at entry %d, length %d, buffer offset %d",
+ int(curEntry - start), int(runLength), int(writer.length()));
+
+ // Store the offset of the run.
+ if (!runOffsets.append(writer.length())) {
+ return false;
+ }
+
+ // Encode the run.
+ if (!JitcodeRegionEntry::WriteRun(writer, scriptList, runLength,
+ curEntry)) {
+ return false;
+ }
+
+ curEntry += runLength;
+ }
+
+ // Done encoding regions. About to start table. Ensure we are aligned to 4
+ // bytes since table is composed of uint32_t values.
+ uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
+ if (padding == sizeof(uint32_t)) {
+ padding = 0;
+ }
+ JitSpew(JitSpew_Profiling, " Padding %d bytes after run @%d", int(padding),
+ int(writer.length()));
+ for (uint32_t i = 0; i < padding; i++) {
+ writer.writeByte(0);
+ }
+
+ // Now at start of table.
+ uint32_t tableOffset = writer.length();
+
+ // The table being written at this point will be accessed directly via
+ // uint32_t pointers, so all writes below use native endianness.
+
+ // Write out numRegions
+ JitSpew(JitSpew_Profiling, " Writing numRuns=%d", int(runOffsets.length()));
+ writer.writeNativeEndianUint32_t(runOffsets.length());
+
+ // Write out region offset table. The offsets in |runOffsets| are currently
+ // forward offsets from the beginning of the buffer. We convert them to
+ // backwards offsets from the start of the table before writing them into
+ // their table entries.
+ for (uint32_t i = 0; i < runOffsets.length(); i++) {
+ JitSpew(JitSpew_Profiling, " Run %d offset=%d backOffset=%d @%d", int(i),
+ int(runOffsets[i]), int(tableOffset - runOffsets[i]),
+ int(writer.length()));
+ writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
+ }
+
+ if (writer.oom()) {
+ return false;
+ }
+
+ *tableOffsetOut = tableOffset;
+ *numRegionsOut = runOffsets.length();
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+JS::ProfiledFrameHandle::ProfiledFrameHandle(JSRuntime* rt,
+ js::jit::JitcodeGlobalEntry& entry,
+ void* addr, const char* label,
+ uint32_t depth)
+ : rt_(rt),
+ entry_(entry),
+ addr_(addr),
+ canonicalAddr_(nullptr),
+ label_(label),
+ depth_(depth) {
+ if (!canonicalAddr_) {
+ canonicalAddr_ = entry_.canonicalNativeAddrFor(rt_, addr_);
+ }
+}
+
+JS_PUBLIC_API JS::ProfilingFrameIterator::FrameKind
+JS::ProfiledFrameHandle::frameKind() const {
+ if (entry_.isBaselineInterpreter()) {
+ return JS::ProfilingFrameIterator::Frame_BaselineInterpreter;
+ }
+ if (entry_.isBaseline()) {
+ return JS::ProfilingFrameIterator::Frame_Baseline;
+ }
+ return JS::ProfilingFrameIterator::Frame_Ion;
+}
+
+JS_PUBLIC_API uint64_t JS::ProfiledFrameHandle::realmID() const {
+ return entry_.lookupRealmID(rt_, addr_);
+}
+
+JS_PUBLIC_API JS::ProfiledFrameRange JS::GetProfiledFrames(JSContext* cx,
+ void* addr) {
+ JSRuntime* rt = cx->runtime();
+ js::jit::JitcodeGlobalTable* table =
+ rt->jitRuntime()->getJitcodeGlobalTable();
+ js::jit::JitcodeGlobalEntry* entry = table->lookup(addr);
+
+ ProfiledFrameRange result(rt, addr, entry);
+
+ if (entry) {
+ result.depth_ = entry->callStackAtAddr(rt, addr, result.labels_,
+ MOZ_ARRAY_LENGTH(result.labels_));
+ }
+ return result;
+}
+
+JS::ProfiledFrameHandle JS::ProfiledFrameRange::Iter::operator*() const {
+ // The iterator iterates in high depth to low depth order. index_ goes up,
+ // and the depth we need to pass to ProfiledFrameHandle goes down.
+ uint32_t depth = range_.depth_ - 1 - index_;
+ return ProfiledFrameHandle(range_.rt_, *range_.entry_, range_.addr_,
+ range_.labels_[depth], depth);
+}
diff --git a/js/src/jit/JitcodeMap.h b/js/src/jit/JitcodeMap.h
new file mode 100644
index 0000000000..b4ed8ae7ff
--- /dev/null
+++ b/js/src/jit/JitcodeMap.h
@@ -0,0 +1,808 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitcodeMap_h
+#define jit_JitcodeMap_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF, MOZ_CRASH
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t, uint64_t
+
+#include "ds/AvlTree.h" // AvlTree
+#include "jit/CompactBuffer.h" // CompactBufferReader, CompactBufferWriter
+#include "jit/shared/Assembler-shared.h" // CodeOffset
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/TypeDecls.h" // jsbytecode
+#include "js/Vector.h" // Vector
+#include "vm/BytecodeLocation.h" // BytecodeLocation
+
+class JSScript;
+class JSTracer;
+struct JSRuntime;
+
+namespace JS {
+class Zone;
+} // namespace JS
+
+namespace js {
+
+class GCMarker;
+
+namespace jit {
+
+class InlineScriptTree;
+
+/*
+ * The jitcode map implements tables to allow mapping from addresses in jitcode
+ * to the list of (JSScript*, jsbytecode*) pairs that are implicitly active in
+ * the frame at that point in the native code.
+ *
+ * To represent this information efficiently, a multi-level table is used.
+ *
+ * At the top level, a global AVL-tree of JitcodeGlobalEntry describing the
+ * mapping for each individual JitCode generated by compiles. The entries are
+ * ordered by their nativeStartAddr.
+ *
+ * Every entry in the table is of fixed size, but there are different entry
+ * types, distinguished by the kind field.
+ */
+
+class JitcodeGlobalTable;
+class JitcodeIonTable;
+class JitcodeRegionEntry;
+
+struct NativeToBytecode {
+ CodeOffset nativeOffset;
+ InlineScriptTree* tree;
+ jsbytecode* pc;
+};
+
+// Describes range [start, end) of JIT-generated code.
+class JitCodeRange {
+ protected:
+ void* const nativeStartAddr_;
+ void* const nativeEndAddr_;
+
+ public:
+ JitCodeRange(void* start, void* end)
+ : nativeStartAddr_(start), nativeEndAddr_(end) {
+ MOZ_ASSERT(start < end);
+ }
+
+ // Comparator used by the AvlTree.
+ static int compare(const JitCodeRange* r1, const JitCodeRange* r2) {
+ // JitCodeRange includes 'start' but excludes 'end'.
+ if (r1->nativeEndAddr_ <= r2->nativeStartAddr_) {
+ return -1;
+ }
+ if (r1->nativeStartAddr_ >= r2->nativeEndAddr_) {
+ return 1;
+ }
+ return 0;
+ }
+
+ void* nativeStartAddr() const { return nativeStartAddr_; }
+ void* nativeEndAddr() const { return nativeEndAddr_; }
+
+ bool containsPointer(void* ptr) const {
+ return nativeStartAddr() <= ptr && ptr < nativeEndAddr();
+ }
+};
+
+typedef Vector<BytecodeLocation, 0, SystemAllocPolicy> BytecodeLocationVector;
+
+class IonEntry;
+class IonICEntry;
+class BaselineEntry;
+class BaselineInterpreterEntry;
+class DummyEntry;
+
+// Base class for all entries.
+class JitcodeGlobalEntry : public JitCodeRange {
+ protected:
+ JitCode* jitcode_;
+ // If this entry is referenced from the profiler buffer, this is the
+ // position where the most recent sample that references it starts.
+ // Otherwise set to kNoSampleInBuffer.
+ static const uint64_t kNoSampleInBuffer = UINT64_MAX;
+ uint64_t samplePositionInBuffer_ = kNoSampleInBuffer;
+
+ public:
+ enum class Kind : uint8_t {
+ Ion,
+ IonIC,
+ Baseline,
+ BaselineInterpreter,
+ Dummy
+ };
+
+ protected:
+ Kind kind_;
+
+ JitcodeGlobalEntry(Kind kind, JitCode* code, void* nativeStartAddr,
+ void* nativeEndAddr)
+ : JitCodeRange(nativeStartAddr, nativeEndAddr),
+ jitcode_(code),
+ kind_(kind) {
+ MOZ_ASSERT(code);
+ MOZ_ASSERT(nativeStartAddr);
+ MOZ_ASSERT(nativeEndAddr);
+ }
+
+ // Protected destructor to ensure this is called through DestroyPolicy.
+ ~JitcodeGlobalEntry() = default;
+
+ JitcodeGlobalEntry(const JitcodeGlobalEntry& other) = delete;
+ void operator=(const JitcodeGlobalEntry& other) = delete;
+
+ public:
+ struct DestroyPolicy {
+ void operator()(JitcodeGlobalEntry* entry);
+ };
+
+ void setSamplePositionInBuffer(uint64_t bufferWritePos) {
+ samplePositionInBuffer_ = bufferWritePos;
+ }
+ void setAsExpired() { samplePositionInBuffer_ = kNoSampleInBuffer; }
+ bool isSampled(uint64_t bufferRangeStart) {
+ if (samplePositionInBuffer_ == kNoSampleInBuffer) {
+ return false;
+ }
+ return bufferRangeStart <= samplePositionInBuffer_;
+ }
+
+ Kind kind() const { return kind_; }
+ bool isIon() const { return kind() == Kind::Ion; }
+ bool isIonIC() const { return kind() == Kind::IonIC; }
+ bool isBaseline() const { return kind() == Kind::Baseline; }
+ bool isBaselineInterpreter() const {
+ return kind() == Kind::BaselineInterpreter;
+ }
+ bool isDummy() const { return kind() == Kind::Dummy; }
+
+ inline const IonEntry& asIon() const;
+ inline const IonICEntry& asIonIC() const;
+ inline const BaselineEntry& asBaseline() const;
+ inline const BaselineInterpreterEntry& asBaselineInterpreter() const;
+ inline const DummyEntry& asDummy() const;
+
+ inline IonEntry& asIon();
+ inline IonICEntry& asIonIC();
+ inline BaselineEntry& asBaseline();
+ inline BaselineInterpreterEntry& asBaselineInterpreter();
+ inline DummyEntry& asDummy();
+
+ JitCode* jitcode() const { return jitcode_; }
+ JitCode** jitcodePtr() { return &jitcode_; }
+ Zone* zone() const { return jitcode()->zone(); }
+
+ bool traceJitcode(JSTracer* trc);
+ bool isJitcodeMarkedFromAnyThread(JSRuntime* rt);
+
+ bool trace(JSTracer* trc);
+ void traceWeak(JSTracer* trc);
+ uint64_t lookupRealmID(JSRuntime* rt, void* ptr) const;
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const;
+
+ // Read the inline call stack at a given point in the native code and append
+ // into the given vector. Innermost (script,pc) pair will be appended first,
+ // and outermost appended last.
+ //
+ // Returns false on memory failure.
+ [[nodiscard]] bool callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const;
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const;
+};
+
+using UniqueJitcodeGlobalEntry =
+ UniquePtr<JitcodeGlobalEntry, JitcodeGlobalEntry::DestroyPolicy>;
+
+template <typename T, typename... Args>
+inline UniqueJitcodeGlobalEntry MakeJitcodeGlobalEntry(JSContext* cx,
+ Args&&... args) {
+ UniqueJitcodeGlobalEntry res(js_new<T>(std::forward<Args>(args)...));
+ if (!res) {
+ ReportOutOfMemory(cx);
+ }
+ return res;
+}
+
+class IonEntry : public JitcodeGlobalEntry {
+ public:
+ struct ScriptNamePair {
+ JSScript* script;
+ UniqueChars str;
+ ScriptNamePair(JSScript* script, UniqueChars str)
+ : script(script), str(std::move(str)) {}
+ };
+ using ScriptList = Vector<ScriptNamePair, 2, SystemAllocPolicy>;
+
+ private:
+ ScriptList scriptList_;
+
+ // regionTable_ points to the start of the region table within the
+ // packed map for compile represented by this entry. Since the
+ // region table occurs at the tail of the memory region, this pointer
+ // points somewhere inside the region memory space, and not to the start
+ // of the memory space.
+ const JitcodeIonTable* regionTable_;
+
+ public:
+ IonEntry(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ ScriptList&& scriptList, JitcodeIonTable* regionTable)
+ : JitcodeGlobalEntry(Kind::Ion, code, nativeStartAddr, nativeEndAddr),
+ scriptList_(std::move(scriptList)),
+ regionTable_(regionTable) {
+ MOZ_ASSERT(regionTable);
+ }
+
+ ~IonEntry();
+
+ ScriptList& scriptList() { return scriptList_; }
+
+ size_t numScripts() const { return scriptList_.length(); }
+
+ JSScript* getScript(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return scriptList_[idx].script;
+ }
+
+ const char* getStr(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return scriptList_[idx].str.get();
+ }
+
+ const JitcodeIonTable* regionTable() const { return regionTable_; }
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID(void* ptr) const;
+
+ bool trace(JSTracer* trc);
+ void traceWeak(JSTracer* trc);
+};
+
+class IonICEntry : public JitcodeGlobalEntry {
+ // Address for this IC in the IonScript code. Most operations on IonICEntry
+ // use this to forward to the IonEntry.
+ void* rejoinAddr_;
+
+ public:
+ IonICEntry(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ void* rejoinAddr)
+ : JitcodeGlobalEntry(Kind::IonIC, code, nativeStartAddr, nativeEndAddr),
+ rejoinAddr_(rejoinAddr) {
+ MOZ_ASSERT(rejoinAddr_);
+ }
+
+ void* rejoinAddr() const { return rejoinAddr_; }
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID(JSRuntime* rt, void* ptr) const;
+
+ bool trace(JSTracer* trc);
+ void traceWeak(JSTracer* trc);
+};
+
+class BaselineEntry : public JitcodeGlobalEntry {
+ JSScript* script_;
+ UniqueChars str_;
+
+ public:
+ BaselineEntry(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ JSScript* script, UniqueChars str)
+ : JitcodeGlobalEntry(Kind::Baseline, code, nativeStartAddr,
+ nativeEndAddr),
+ script_(script),
+ str_(std::move(str)) {
+ MOZ_ASSERT(script_);
+ MOZ_ASSERT(str_);
+ }
+
+ JSScript* script() const { return script_; }
+
+ const char* str() const { return str_.get(); }
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID() const;
+
+ bool trace(JSTracer* trc);
+ void traceWeak(JSTracer* trc);
+};
+
+class BaselineInterpreterEntry : public JitcodeGlobalEntry {
+ public:
+ BaselineInterpreterEntry(JitCode* code, void* nativeStartAddr,
+ void* nativeEndAddr)
+ : JitcodeGlobalEntry(Kind::BaselineInterpreter, code, nativeStartAddr,
+ nativeEndAddr) {}
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr, BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID() const;
+};
+
+// Dummy entries are created for jitcode generated when profiling is not
+// turned on, so that they have representation in the global table if they are
+// on the stack when profiling is enabled.
+class DummyEntry : public JitcodeGlobalEntry {
+ public:
+ DummyEntry(JitCode* code, void* nativeStartAddr, void* nativeEndAddr)
+ : JitcodeGlobalEntry(Kind::Dummy, code, nativeStartAddr, nativeEndAddr) {}
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const {
+ return nullptr;
+ }
+
+ [[nodiscard]] bool callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ return true;
+ }
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const {
+ return 0;
+ }
+
+ uint64_t lookupRealmID() const { return 0; }
+};
+
+inline const IonEntry& JitcodeGlobalEntry::asIon() const {
+ MOZ_ASSERT(isIon());
+ return *static_cast<const IonEntry*>(this);
+}
+
+inline const IonICEntry& JitcodeGlobalEntry::asIonIC() const {
+ MOZ_ASSERT(isIonIC());
+ return *static_cast<const IonICEntry*>(this);
+}
+
+inline const BaselineEntry& JitcodeGlobalEntry::asBaseline() const {
+ MOZ_ASSERT(isBaseline());
+ return *static_cast<const BaselineEntry*>(this);
+}
+
+inline const BaselineInterpreterEntry&
+JitcodeGlobalEntry::asBaselineInterpreter() const {
+ MOZ_ASSERT(isBaselineInterpreter());
+ return *static_cast<const BaselineInterpreterEntry*>(this);
+}
+
+inline const DummyEntry& JitcodeGlobalEntry::asDummy() const {
+ MOZ_ASSERT(isDummy());
+ return *static_cast<const DummyEntry*>(this);
+}
+
+inline IonEntry& JitcodeGlobalEntry::asIon() {
+ MOZ_ASSERT(isIon());
+ return *static_cast<IonEntry*>(this);
+}
+
+inline IonICEntry& JitcodeGlobalEntry::asIonIC() {
+ MOZ_ASSERT(isIonIC());
+ return *static_cast<IonICEntry*>(this);
+}
+
+inline BaselineEntry& JitcodeGlobalEntry::asBaseline() {
+ MOZ_ASSERT(isBaseline());
+ return *static_cast<BaselineEntry*>(this);
+}
+
+inline BaselineInterpreterEntry& JitcodeGlobalEntry::asBaselineInterpreter() {
+ MOZ_ASSERT(isBaselineInterpreter());
+ return *static_cast<BaselineInterpreterEntry*>(this);
+}
+
+inline DummyEntry& JitcodeGlobalEntry::asDummy() {
+ MOZ_ASSERT(isDummy());
+ return *static_cast<DummyEntry*>(this);
+}
+
+// Global table of JitcodeGlobalEntry entries.
+class JitcodeGlobalTable {
+ private:
+ // Vector containing (and owning) all entries. This is unsorted and used for
+ // iterating over all entries, because the AvlTree currently doesn't support
+ // modifications while iterating.
+ using EntryVector = Vector<UniqueJitcodeGlobalEntry, 0, SystemAllocPolicy>;
+ EntryVector entries_;
+
+ // AVL tree containing all entries in the Vector above. This is used to
+ // efficiently look up the entry corresponding to a native code address.
+ using EntryTree = AvlTree<JitCodeRange*, JitCodeRange>;
+ static const size_t LIFO_CHUNK_SIZE = 16 * 1024;
+ LifoAlloc alloc_;
+ EntryTree tree_;
+
+ public:
+ JitcodeGlobalTable() : alloc_(LIFO_CHUNK_SIZE), tree_(&alloc_) {}
+
+ bool empty() const {
+ MOZ_ASSERT(entries_.empty() == tree_.empty());
+ return entries_.empty();
+ }
+
+ JitcodeGlobalEntry* lookup(void* ptr) { return lookupInternal(ptr); }
+
+ const JitcodeGlobalEntry* lookupForSampler(void* ptr, JSRuntime* rt,
+ uint64_t samplePosInBuffer);
+
+ [[nodiscard]] bool addEntry(UniqueJitcodeGlobalEntry entry);
+
+ void setAllEntriesAsExpired();
+ [[nodiscard]] bool markIteratively(GCMarker* marker);
+ void traceWeak(JSRuntime* rt, JSTracer* trc);
+
+ private:
+ JitcodeGlobalEntry* lookupInternal(void* ptr);
+};
+
+// clang-format off
+/*
+ * Container class for main jitcode table.
+ * The Region table's memory is structured as follows:
+ *
+ * +------------------------------------------------+ |
+ * | Region 1 Run | |
+ * |------------------------------------------------| |
+ * | Region 2 Run | |
+ * | | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region 3 Run | |
+ * | | |
+ * |------------------------------------------------| |-- Payload
+ * | | |
+ * | ... | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region M Run | |
+ * | | |
+ * +================================================+ <- RegionTable pointer points here
+ * | uint23_t numRegions = M | |
+ * +------------------------------------------------+ |
+ * | Region 1 | |
+ * | uint32_t entryOffset = size(Payload) | |
+ * +------------------------------------------------+ |
+ * | | |-- Table
+ * | ... | |
+ * | | |
+ * +------------------------------------------------+ |
+ * | Region M | |
+ * | uint32_t entryOffset | |
+ * +------------------------------------------------+ |
+ *
+ * The region table is composed of two sections: a tail section that contains a table of
+ * fixed-size entries containing offsets into the the head section, and a head section that
+ * holds a sequence of variable-sized runs. The table in the tail section serves to
+ * locate the variable-length encoded structures in the head section.
+ *
+ * The entryOffsets in the table indicate the bytes offset to subtract from the regionTable
+ * pointer to arrive at the encoded region in the payload.
+ *
+ *
+ * Variable-length entries in payload
+ * ----------------------------------
+ * The entryOffsets in the region table's fixed-sized entries refer to a location within the
+ * variable-length payload section. This location contains a compactly encoded "run" of
+ * mappings.
+ *
+ * Each run starts by describing the offset within the native code it starts at, and the
+ * sequence of (JSScript*, jsbytecode*) pairs active at that site. Following that, there
+ * are a number of variable-length entries encoding (nativeOffsetDelta, bytecodeOffsetDelta)
+ * pairs for the run.
+ *
+ * VarUint32 nativeOffset;
+ * - The offset from nativeStartAddr in the global table entry at which
+ * the jitcode for this region starts.
+ *
+ * Uint8_t scriptDepth;
+ * - The depth of inlined scripts for this region.
+ *
+ * List<VarUint32> inlineScriptPcStack;
+ * - We encode (2 * scriptDepth) VarUint32s here. Each pair of uint32s are taken
+ * as an index into the scriptList in the global table entry, and a pcOffset
+ * respectively.
+ *
+ * List<NativeAndBytecodeDelta> deltaRun;
+ * - The rest of the entry is a deltaRun that stores a series of variable-length
+ * encoded NativeAndBytecodeDelta datums.
+ */
+// clang-format on
+class JitcodeRegionEntry {
+ private:
+ static const unsigned MAX_RUN_LENGTH = 100;
+
+ public:
+ static void WriteHead(CompactBufferWriter& writer, uint32_t nativeOffset,
+ uint8_t scriptDepth);
+ static void ReadHead(CompactBufferReader& reader, uint32_t* nativeOffset,
+ uint8_t* scriptDepth);
+
+ static void WriteScriptPc(CompactBufferWriter& writer, uint32_t scriptIdx,
+ uint32_t pcOffset);
+ static void ReadScriptPc(CompactBufferReader& reader, uint32_t* scriptIdx,
+ uint32_t* pcOffset);
+
+ static void WriteDelta(CompactBufferWriter& writer, uint32_t nativeDelta,
+ int32_t pcDelta);
+ static void ReadDelta(CompactBufferReader& reader, uint32_t* nativeDelta,
+ int32_t* pcDelta);
+
+ // Given a pointer into an array of NativeToBytecode (and a pointer to the end
+ // of the array), compute the number of entries that would be consume by
+ // outputting a run starting at this one.
+ static uint32_t ExpectedRunLength(const NativeToBytecode* entry,
+ const NativeToBytecode* end);
+
+ // Write a run, starting at the given NativeToBytecode entry, into the given
+ // buffer writer.
+ [[nodiscard]] static bool WriteRun(CompactBufferWriter& writer,
+ const IonEntry::ScriptList& scriptList,
+ uint32_t runLength,
+ const NativeToBytecode* entry);
+
+ // Delta Run entry formats are encoded little-endian:
+ //
+ // byte 0
+ // NNNN-BBB0
+ // Single byte format. nativeDelta in [0, 15], pcDelta in [0, 7]
+ //
+ static const uint32_t ENC1_MASK = 0x1;
+ static const uint32_t ENC1_MASK_VAL = 0x0;
+
+ static const uint32_t ENC1_NATIVE_DELTA_MAX = 0xf;
+ static const unsigned ENC1_NATIVE_DELTA_SHIFT = 4;
+
+ static const uint32_t ENC1_PC_DELTA_MASK = 0x0e;
+ static const int32_t ENC1_PC_DELTA_MAX = 0x7;
+ static const unsigned ENC1_PC_DELTA_SHIFT = 1;
+
+ // byte 1 byte 0
+ // NNNN-NNNN BBBB-BB01
+ // Two-byte format. nativeDelta in [0, 255], pcDelta in [0, 63]
+ //
+ static const uint32_t ENC2_MASK = 0x3;
+ static const uint32_t ENC2_MASK_VAL = 0x1;
+
+ static const uint32_t ENC2_NATIVE_DELTA_MAX = 0xff;
+ static const unsigned ENC2_NATIVE_DELTA_SHIFT = 8;
+
+ static const uint32_t ENC2_PC_DELTA_MASK = 0x00fc;
+ static const int32_t ENC2_PC_DELTA_MAX = 0x3f;
+ static const unsigned ENC2_PC_DELTA_SHIFT = 2;
+
+ // byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ // Three-byte format. nativeDelta in [0, 2047], pcDelta in [-512, 511]
+ //
+ static const uint32_t ENC3_MASK = 0x7;
+ static const uint32_t ENC3_MASK_VAL = 0x3;
+
+ static const uint32_t ENC3_NATIVE_DELTA_MAX = 0x7ff;
+ static const unsigned ENC3_NATIVE_DELTA_SHIFT = 13;
+
+ static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
+ static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
+ static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
+ static const unsigned ENC3_PC_DELTA_SHIFT = 3;
+
+ // byte 3 byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ // Three-byte format. nativeDelta in [0, 65535],
+ // pcDelta in [-4096, 4095]
+ static const uint32_t ENC4_MASK = 0x7;
+ static const uint32_t ENC4_MASK_VAL = 0x7;
+
+ static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
+ static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
+
+ static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
+ static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
+ static const int32_t ENC4_PC_DELTA_MIN = -ENC4_PC_DELTA_MAX - 1;
+ static const unsigned ENC4_PC_DELTA_SHIFT = 3;
+
+ static bool IsDeltaEncodeable(uint32_t nativeDelta, int32_t pcDelta) {
+ return (nativeDelta <= ENC4_NATIVE_DELTA_MAX) &&
+ (pcDelta >= ENC4_PC_DELTA_MIN) && (pcDelta <= ENC4_PC_DELTA_MAX);
+ }
+
+ private:
+ const uint8_t* data_;
+ const uint8_t* end_;
+
+ // Unpacked state from jitcode entry.
+ uint32_t nativeOffset_;
+ uint8_t scriptDepth_;
+ const uint8_t* scriptPcStack_;
+ const uint8_t* deltaRun_;
+
+ void unpack();
+
+ public:
+ JitcodeRegionEntry(const uint8_t* data, const uint8_t* end)
+ : data_(data),
+ end_(end),
+ nativeOffset_(0),
+ scriptDepth_(0),
+ scriptPcStack_(nullptr),
+ deltaRun_(nullptr) {
+ MOZ_ASSERT(data_ < end_);
+ unpack();
+ MOZ_ASSERT(scriptPcStack_ < end_);
+ MOZ_ASSERT(deltaRun_ <= end_);
+ }
+
+ uint32_t nativeOffset() const { return nativeOffset_; }
+ uint32_t scriptDepth() const { return scriptDepth_; }
+
+ class ScriptPcIterator {
+ private:
+ const uint8_t* start_;
+ const uint8_t* end_;
+#ifdef DEBUG
+ uint32_t count_;
+#endif
+ uint32_t idx_;
+ const uint8_t* cur_;
+
+ public:
+ ScriptPcIterator(const uint8_t* start, const uint8_t* end, uint32_t count)
+ : start_(start),
+ end_(end),
+#ifdef DEBUG
+ count_(count),
+#endif
+ idx_(0),
+ cur_(start_) {
+ }
+
+ bool hasMore() const {
+ MOZ_ASSERT((idx_ == count_) == (cur_ == end_));
+ MOZ_ASSERT((idx_ < count_) == (cur_ < end_));
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* scriptIdxOut, uint32_t* pcOffsetOut) {
+ MOZ_ASSERT(scriptIdxOut);
+ MOZ_ASSERT(pcOffsetOut);
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadScriptPc(reader, scriptIdxOut, pcOffsetOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+
+ idx_++;
+ MOZ_ASSERT_IF(idx_ == count_, cur_ == end_);
+ }
+
+ void reset() {
+ idx_ = 0;
+ cur_ = start_;
+ }
+ };
+
+ ScriptPcIterator scriptPcIterator() const {
+ // End of script+pc sequence is the start of the delta run.
+ return ScriptPcIterator(scriptPcStack_, deltaRun_, scriptDepth_);
+ }
+
+ class DeltaIterator {
+ private:
+ const uint8_t* start_;
+ const uint8_t* end_;
+ const uint8_t* cur_;
+
+ public:
+ DeltaIterator(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end), cur_(start) {}
+
+ bool hasMore() const {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* nativeDeltaOut, int32_t* pcDeltaOut) {
+ MOZ_ASSERT(nativeDeltaOut != nullptr);
+ MOZ_ASSERT(pcDeltaOut != nullptr);
+
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadDelta(reader, nativeDeltaOut, pcDeltaOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+ }
+
+ void reset() { cur_ = start_; }
+ };
+ DeltaIterator deltaIterator() const { return DeltaIterator(deltaRun_, end_); }
+
+ uint32_t findPcOffset(uint32_t queryNativeOffset,
+ uint32_t startPcOffset) const;
+};
+
+class JitcodeIonTable {
+ private:
+ /* Variable length payload section "below" here. */
+ uint32_t numRegions_;
+ uint32_t regionOffsets_[1];
+
+ const uint8_t* payloadEnd() const {
+ return reinterpret_cast<const uint8_t*>(this);
+ }
+
+ public:
+ JitcodeIonTable() = delete;
+
+ uint32_t numRegions() const { return numRegions_; }
+
+ uint32_t regionOffset(uint32_t regionIndex) const {
+ MOZ_ASSERT(regionIndex < numRegions());
+ return regionOffsets_[regionIndex];
+ }
+
+ JitcodeRegionEntry regionEntry(uint32_t regionIndex) const {
+ const uint8_t* regionStart = payloadEnd() - regionOffset(regionIndex);
+ const uint8_t* regionEnd = payloadEnd();
+ if (regionIndex < numRegions_ - 1) {
+ regionEnd -= regionOffset(regionIndex + 1);
+ }
+ return JitcodeRegionEntry(regionStart, regionEnd);
+ }
+
+ uint32_t findRegionEntry(uint32_t offset) const;
+
+ const uint8_t* payloadStart() const {
+ // The beginning of the payload the beginning of the first region are the
+ // same.
+ return payloadEnd() - regionOffset(0);
+ }
+
+ [[nodiscard]] static bool WriteIonTable(
+ CompactBufferWriter& writer, const IonEntry::ScriptList& scriptList,
+ const NativeToBytecode* start, const NativeToBytecode* end,
+ uint32_t* tableOffsetOut, uint32_t* numRegionsOut);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitcodeMap_h */
diff --git a/js/src/jit/Jitdump.h b/js/src/jit/Jitdump.h
new file mode 100644
index 0000000000..a1d61bd69e
--- /dev/null
+++ b/js/src/jit/Jitdump.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitDump_h
+#define jit_JitDump_h
+
+/*
+ This file provides the necessary data structures to meet the JitDump
+ specification as of
+ https://github.com/torvalds/linux/blob/f2906aa863381afb0015a9eb7fefad885d4e5a56/tools/perf/Documentation/jitdump-specification.txt
+*/
+
+namespace js {
+namespace jit {
+
+// JitDump record types
+enum {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_MOVE,
+ JIT_CODE_DEBUG_INFO,
+ JIT_CODE_CLOSE,
+ JIT_CODE_UNWINDING_INFO
+};
+
+// File header
+struct JitDumpHeader {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t total_size;
+ uint32_t elf_mach;
+ uint32_t pad1;
+ uint32_t pid;
+ uint64_t timestamp;
+ uint64_t flags;
+};
+
+// Header for each record
+struct JitDumpRecordHeader {
+ uint32_t id;
+ uint32_t total_size;
+ uint64_t timestamp;
+};
+
+// Load record
+struct JitDumpLoadRecord {
+ JitDumpRecordHeader header;
+
+ // Payload
+ uint32_t pid;
+ uint32_t tid;
+ uint64_t vma;
+ uint64_t code_addr;
+ uint64_t code_size;
+ uint64_t code_index;
+};
+
+// Debug record
+struct JitDumpDebugRecord {
+ JitDumpRecordHeader header;
+
+ // Debug header
+ uint64_t code_addr;
+ uint64_t nr_entry;
+};
+
+struct JitDumpDebugEntry {
+ uint64_t code_addr;
+ uint32_t line;
+ uint32_t discrim;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitDump_h */
diff --git a/js/src/jit/KnownClass.cpp b/js/src/jit/KnownClass.cpp
new file mode 100644
index 0000000000..fcb7715f5f
--- /dev/null
+++ b/js/src/jit/KnownClass.cpp
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/KnownClass.h"
+
+#include "jit/MIR.h"
+#include "vm/ArrayObject.h"
+#include "vm/Iteration.h"
+#include "vm/JSFunction.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/RegExpObject.h"
+
+using namespace js;
+using namespace js::jit;
+
+KnownClass jit::GetObjectKnownClass(const MDefinition* def) {
+ MOZ_ASSERT(def->type() == MIRType::Object);
+
+ switch (def->op()) {
+ case MDefinition::Opcode::NewArray:
+ case MDefinition::Opcode::NewArrayDynamicLength:
+ case MDefinition::Opcode::NewArrayObject:
+ case MDefinition::Opcode::Rest:
+ case MDefinition::Opcode::ArgumentsSlice:
+ case MDefinition::Opcode::FrameArgumentsSlice:
+ case MDefinition::Opcode::InlineArgumentsSlice:
+ return KnownClass::Array;
+
+ case MDefinition::Opcode::NewObject:
+ case MDefinition::Opcode::NewPlainObject:
+ case MDefinition::Opcode::CreateThis:
+ return KnownClass::PlainObject;
+
+ case MDefinition::Opcode::Lambda:
+ case MDefinition::Opcode::FunctionWithProto:
+ return KnownClass::Function;
+
+ case MDefinition::Opcode::RegExp:
+ return KnownClass::RegExp;
+
+ case MDefinition::Opcode::NewIterator:
+ switch (def->toNewIterator()->type()) {
+ case MNewIterator::ArrayIterator:
+ return KnownClass::ArrayIterator;
+ case MNewIterator::StringIterator:
+ return KnownClass::StringIterator;
+ case MNewIterator::RegExpStringIterator:
+ return KnownClass::RegExpStringIterator;
+ }
+ MOZ_CRASH("unreachable");
+
+ case MDefinition::Opcode::Phi: {
+ if (def->numOperands() == 0) {
+ return KnownClass::None;
+ }
+
+ MDefinition* op = def->getOperand(0);
+ // Check for Phis to avoid recursion for now.
+ if (op->isPhi()) {
+ return KnownClass::None;
+ }
+
+ KnownClass known = GetObjectKnownClass(op);
+ if (known == KnownClass::None) {
+ return KnownClass::None;
+ }
+
+ for (size_t i = 1; i < def->numOperands(); i++) {
+ op = def->getOperand(i);
+ if (op->isPhi() || GetObjectKnownClass(op) != known) {
+ return KnownClass::None;
+ }
+ }
+
+ return known;
+ }
+
+ default:
+ break;
+ }
+
+ return KnownClass::None;
+}
+
+const JSClass* jit::GetObjectKnownJSClass(const MDefinition* def) {
+ switch (GetObjectKnownClass(def)) {
+ case KnownClass::PlainObject:
+ return &PlainObject::class_;
+ case KnownClass::Array:
+ return &ArrayObject::class_;
+ case KnownClass::Function:
+ return &FunctionClass;
+ case KnownClass::RegExp:
+ return &RegExpObject::class_;
+ case KnownClass::ArrayIterator:
+ return &ArrayIteratorObject::class_;
+ case KnownClass::StringIterator:
+ return &StringIteratorObject::class_;
+ case KnownClass::RegExpStringIterator:
+ return &RegExpStringIteratorObject::class_;
+ case KnownClass::None:
+ break;
+ }
+
+ return nullptr;
+}
diff --git a/js/src/jit/KnownClass.h b/js/src/jit/KnownClass.h
new file mode 100644
index 0000000000..862c0f1cb8
--- /dev/null
+++ b/js/src/jit/KnownClass.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_KnownClass_h
+#define jit_KnownClass_h
+
+#include "jspubtd.h"
+
+namespace js {
+namespace jit {
+
+class MDefinition;
+
+// Users of this enum often can't handle Proxy and Wrapper classes,
+// as well as non-Function callables.
+enum class KnownClass {
+ PlainObject,
+ Array,
+ Function,
+ RegExp,
+ ArrayIterator,
+ StringIterator,
+ RegExpStringIterator,
+ None
+};
+
+KnownClass GetObjectKnownClass(const MDefinition* def);
+const JSClass* GetObjectKnownJSClass(const MDefinition* def);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_KnownClass_h
diff --git a/js/src/jit/LICM.cpp b/js/src/jit/LICM.cpp
new file mode 100644
index 0000000000..ba00199fc7
--- /dev/null
+++ b/js/src/jit/LICM.cpp
@@ -0,0 +1,367 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/LICM.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+// There are two constants which control whether a loop is LICM'd or is left
+// unchanged. For rationale see comment in jit::LICM() below.
+//
+// A bit of quick profiling with the wasm Embenchen suite on x64 shows that
+// the threshold pair (100,25) has either no effect or gives a small net
+// reduction in memory traffic, compared to unconstrained LICMing. Halving
+// them to (50,12) gives a small overall increase in memory traffic,
+// suggesting it excludes too many loops from LICM. Doubling them to (200,50)
+// gives a win that is even smaller than (100,25), hence (100,25) seems the
+// best choice.
+//
+// If a loop has more than this number of basic blocks in its body, it won't
+// be LICM'd.
+static constexpr size_t LargestAllowedLoop = 100;
+
+// If a loop contains an MTableSwitch instruction that has more than this many
+// successors, it won't be LICM'd.
+static constexpr size_t LargestAllowedTableSwitch = 25;
+
+// Test whether any instruction in the loop possiblyCalls().
+static bool LoopContainsPossibleCall(MIRGraph& graph, MBasicBlock* header,
+ MBasicBlock* backedge) {
+ for (auto i(graph.rpoBegin(header));; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(),
+ "Reached end of graph searching for blocks in loop");
+ MBasicBlock* block = *i;
+ if (!block->isMarked()) {
+ continue;
+ }
+
+ for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd;
+ ++insIter) {
+ MInstruction* ins = *insIter;
+ if (ins->possiblyCalls()) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Possible call found at %s%u", ins->opName(),
+ ins->id());
+#endif
+ return true;
+ }
+ }
+
+ if (block == backedge) {
+ break;
+ }
+ }
+ return false;
+}
+
+// Tests whether any instruction in the loop is a table-switch with more than
+// `LargestAllowedTableSwitch` successors. If it returns true, it also
+// returns the actual number of successors of the instruction in question,
+// although that is used only for statistics/debug printing.
+static bool LoopContainsBigTableSwitch(MIRGraph& graph, MBasicBlock* header,
+ /*OUT*/ size_t* numSuccessors) {
+ MBasicBlock* backedge = header->backedge();
+
+ for (auto i(graph.rpoBegin(header));; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(),
+ "Reached end of graph searching for blocks in loop");
+ MBasicBlock* block = *i;
+ if (!block->isMarked()) {
+ continue;
+ }
+
+ for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd;
+ ++insIter) {
+ MInstruction* ins = *insIter;
+ if (ins->isTableSwitch() &&
+ ins->toTableSwitch()->numSuccessors() > LargestAllowedTableSwitch) {
+ *numSuccessors = ins->toTableSwitch()->numSuccessors();
+ return true;
+ }
+ }
+
+ if (block == backedge) {
+ break;
+ }
+ }
+ return false;
+}
+
+// When a nested loop has no exits back into what would be its parent loop,
+// MarkLoopBlocks on the parent loop doesn't mark the blocks of the nested
+// loop, since they technically aren't part of the loop. However, AliasAnalysis
+// currently does consider such nested loops to be part of their parent
+// loops. Consequently, we can't use IsInLoop on dependency() values; we must
+// test whether a dependency() is *before* the loop, even if it is not
+// technically in the loop.
+static bool IsBeforeLoop(MDefinition* ins, MBasicBlock* header) {
+ return ins->block()->id() < header->id();
+}
+
+// Test whether the given instruction is inside the loop (and thus not
+// loop-invariant).
+static bool IsInLoop(MDefinition* ins) { return ins->block()->isMarked(); }
+
+// Test whether the given instruction is cheap and not worth hoisting unless
+// one of its users will be hoisted as well.
+static bool RequiresHoistedUse(const MDefinition* ins, bool hasCalls) {
+ if (ins->isBox()) {
+ MOZ_ASSERT(!ins->toBox()->input()->isBox(),
+ "Box of a box could lead to unbounded recursion");
+ return true;
+ }
+
+ // Integer constants are usually cheap and aren't worth hoisting on their
+ // own, in general. Floating-point constants typically are worth hoisting,
+ // unless they'll end up being spilled (eg. due to a call).
+ if (ins->isConstant() && (!IsFloatingPointType(ins->type()) || hasCalls)) {
+ return true;
+ }
+
+ return false;
+}
+
+// Test whether the given instruction has any operands defined within the loop.
+static bool HasOperandInLoop(MInstruction* ins, bool hasCalls) {
+ // An instruction is only loop invariant if it and all of its operands can
+ // be safely hoisted into the loop preheader.
+ for (size_t i = 0, e = ins->numOperands(); i != e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+
+ if (!IsInLoop(op)) {
+ continue;
+ }
+
+ if (RequiresHoistedUse(op, hasCalls)) {
+ // Recursively test for loop invariance. Note that the recursion is
+ // bounded because we require RequiresHoistedUse to be set at each
+ // level.
+ if (!HasOperandInLoop(op->toInstruction(), hasCalls)) {
+ continue;
+ }
+ }
+
+ return true;
+ }
+ return false;
+}
+
+// Test whether the given instruction is hoistable, ignoring memory
+// dependencies.
+static bool IsHoistableIgnoringDependency(MInstruction* ins, bool hasCalls) {
+ return ins->isMovable() && !ins->isEffectful() &&
+ !HasOperandInLoop(ins, hasCalls);
+}
+
+// Test whether the given instruction has a memory dependency inside the loop.
+static bool HasDependencyInLoop(MInstruction* ins, MBasicBlock* header) {
+ // Don't hoist if this instruction depends on a store inside the loop.
+ if (MDefinition* dep = ins->dependency()) {
+ return !IsBeforeLoop(dep, header);
+ }
+ return false;
+}
+
+// Test whether the given instruction is hoistable.
+static bool IsHoistable(MInstruction* ins, MBasicBlock* header, bool hasCalls) {
+ return IsHoistableIgnoringDependency(ins, hasCalls) &&
+ !HasDependencyInLoop(ins, header);
+}
+
+// In preparation for hoisting an instruction, hoist any of its operands which
+// were too cheap to hoist on their own.
+static void MoveDeferredOperands(MInstruction* ins, MInstruction* hoistPoint,
+ bool hasCalls) {
+ // If any of our operands were waiting for a user to be hoisted, make a note
+ // to hoist them.
+ for (size_t i = 0, e = ins->numOperands(); i != e; ++i) {
+ MDefinition* op = ins->getOperand(i);
+ if (!IsInLoop(op)) {
+ continue;
+ }
+ MOZ_ASSERT(RequiresHoistedUse(op, hasCalls),
+ "Deferred loop-invariant operand is not cheap");
+ MInstruction* opIns = op->toInstruction();
+
+ // Recursively move the operands. Note that the recursion is bounded
+ // because we require RequiresHoistedUse to be set at each level.
+ MoveDeferredOperands(opIns, hoistPoint, hasCalls);
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM,
+ " Hoisting %s%u (now that a user will be hoisted)",
+ opIns->opName(), opIns->id());
+#endif
+
+ opIns->block()->moveBefore(hoistPoint, opIns);
+ opIns->setBailoutKind(BailoutKind::LICM);
+ }
+}
+
+static void VisitLoopBlock(MBasicBlock* block, MBasicBlock* header,
+ MInstruction* hoistPoint, bool hasCalls) {
+ for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd;) {
+ MInstruction* ins = *insIter++;
+
+ if (!IsHoistable(ins, header, hasCalls)) {
+#ifdef JS_JITSPEW
+ if (IsHoistableIgnoringDependency(ins, hasCalls)) {
+ JitSpew(JitSpew_LICM,
+ " %s%u isn't hoistable due to dependency on %s%u",
+ ins->opName(), ins->id(), ins->dependency()->opName(),
+ ins->dependency()->id());
+ }
+#endif
+ continue;
+ }
+
+ // Don't hoist a cheap constant if it doesn't enable us to hoist one of
+ // its uses. We want those instructions as close as possible to their
+ // use, to minimize register pressure.
+ if (RequiresHoistedUse(ins, hasCalls)) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " %s%u will be hoisted only if its users are",
+ ins->opName(), ins->id());
+#endif
+ continue;
+ }
+
+ // Hoist operands which were too cheap to hoist on their own.
+ MoveDeferredOperands(ins, hoistPoint, hasCalls);
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Hoisting %s%u", ins->opName(), ins->id());
+#endif
+
+ // Move the instruction to the hoistPoint.
+ block->moveBefore(hoistPoint, ins);
+ ins->setBailoutKind(BailoutKind::LICM);
+ }
+}
+
+static void VisitLoop(MIRGraph& graph, MBasicBlock* header) {
+ MInstruction* hoistPoint = header->loopPredecessor()->lastIns();
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Visiting loop with header block%u, hoisting to %s%u",
+ header->id(), hoistPoint->opName(), hoistPoint->id());
+#endif
+
+ MBasicBlock* backedge = header->backedge();
+
+ // This indicates whether the loop contains calls or other things which
+ // clobber most or all floating-point registers. In such loops,
+ // floating-point constants should not be hoisted unless it enables further
+ // hoisting.
+ bool hasCalls = LoopContainsPossibleCall(graph, header, backedge);
+
+ for (auto i(graph.rpoBegin(header));; ++i) {
+ MOZ_ASSERT(i != graph.rpoEnd(),
+ "Reached end of graph searching for blocks in loop");
+ MBasicBlock* block = *i;
+ if (!block->isMarked()) {
+ continue;
+ }
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_LICM, " Visiting block%u", block->id());
+#endif
+
+ VisitLoopBlock(block, header, hoistPoint, hasCalls);
+
+ if (block == backedge) {
+ break;
+ }
+ }
+}
+
+bool jit::LICM(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_LICM, "Beginning LICM pass");
+
+ // Iterate in RPO to visit outer loops before inner loops. We'd hoist the
+ // same things either way, but outer first means we do a little less work.
+ for (auto i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
+ MBasicBlock* header = *i;
+ if (!header->isLoopHeader()) {
+ continue;
+ }
+
+ bool canOsr;
+ size_t numBlocks = MarkLoopBlocks(graph, header, &canOsr);
+
+ if (numBlocks == 0) {
+ JitSpew(JitSpew_LICM,
+ " Skipping loop with header block%u -- contains zero blocks",
+ header->id());
+ continue;
+ }
+
+ // There are various reasons why we might choose not to LICM a given loop:
+ //
+ // (a) Hoisting out of a loop that has an entry from the OSR block in
+ // addition to its normal entry is tricky. In theory we could clone
+ // the instruction and insert phis. In practice we don't bother.
+ //
+ // (b) If the loop contains a large number of blocks, we play safe and
+ // punt, in order to reduce the risk of creating excessive register
+ // pressure by hoisting lots of values out of the loop. In a larger
+ // loop there's more likely to be duplication of invariant expressions
+ // within the loop body, and that duplication will be GVN'd but only
+ // within the scope of the loop body, so there's less loss from not
+ // lifting them out of the loop entirely.
+ //
+ // (c) If the loop contains a multiway switch with many successors, there
+ // could be paths with low probabilities, from which LICMing will be a
+ // net loss, especially if a large number of values are hoisted out.
+ // See bug 1708381 for a spectacular example and bug 1712078 for
+ // further discussion.
+ //
+ // It's preferable to perform test (c) only if (a) and (b) pass since (c)
+ // is more expensive to determine -- requiring a visit to all the MIR
+ // nodes -- than (a) or (b), which only involve visiting all blocks.
+
+ bool doVisit = true;
+ if (canOsr) {
+ JitSpew(JitSpew_LICM, " Skipping loop with header block%u due to OSR",
+ header->id());
+ doVisit = false;
+ } else if (numBlocks > LargestAllowedLoop) {
+ JitSpew(JitSpew_LICM,
+ " Skipping loop with header block%u "
+ "due to too many blocks (%u > thresh %u)",
+ header->id(), (uint32_t)numBlocks, (uint32_t)LargestAllowedLoop);
+ doVisit = false;
+ } else {
+ size_t switchSize = 0;
+ if (LoopContainsBigTableSwitch(graph, header, &switchSize)) {
+ JitSpew(JitSpew_LICM,
+ " Skipping loop with header block%u "
+ "due to oversize tableswitch (%u > thresh %u)",
+ header->id(), (uint32_t)switchSize,
+ (uint32_t)LargestAllowedTableSwitch);
+ doVisit = false;
+ }
+ }
+
+ if (doVisit) {
+ VisitLoop(graph, header);
+ }
+
+ UnmarkLoopBlocks(graph, header);
+
+ if (mir->shouldCancel("LICM (main loop)")) {
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/LICM.h b/js/src/jit/LICM.h
new file mode 100644
index 0000000000..38012615f5
--- /dev/null
+++ b/js/src/jit/LICM.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LICM_h
+#define jit_LICM_h
+
+// This file represents the Loop Invariant Code Motion optimization pass
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+[[nodiscard]] bool LICM(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_LICM_h */
diff --git a/js/src/jit/LIR.cpp b/js/src/jit/LIR.cpp
new file mode 100644
index 0000000000..2795c5b322
--- /dev/null
+++ b/js/src/jit/LIR.cpp
@@ -0,0 +1,780 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/LIR.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include <type_traits>
+
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "js/Printf.h"
+#include "util/Unicode.h"
+
+using namespace js;
+using namespace js::jit;
+
+const char* const js::jit::LIROpNames[] = {
+#define OPNAME(op, ...) #op,
+ LIR_OPCODE_LIST(OPNAME)
+#undef OPNAME
+};
+
+LIRGraph::LIRGraph(MIRGraph* mir)
+ : blocks_(),
+ constantPool_(mir->alloc()),
+ constantPoolMap_(mir->alloc()),
+ safepoints_(mir->alloc()),
+ nonCallSafepoints_(mir->alloc()),
+ numVirtualRegisters_(0),
+ numInstructions_(1), // First id is 1.
+ localSlotsSize_(0),
+ argumentSlotCount_(0),
+ mir_(*mir) {}
+
+bool LIRGraph::addConstantToPool(const Value& v, uint32_t* index) {
+ ConstantPoolMap::AddPtr p = constantPoolMap_.lookupForAdd(v);
+ if (p) {
+ *index = p->value();
+ return true;
+ }
+ *index = constantPool_.length();
+ return constantPool_.append(v) && constantPoolMap_.add(p, v, *index);
+}
+
+bool LIRGraph::noteNeedsSafepoint(LInstruction* ins) {
+ // Instructions with safepoints must be in linear order.
+ MOZ_ASSERT_IF(!safepoints_.empty(), safepoints_.back()->id() < ins->id());
+ if (!ins->isCall() && !nonCallSafepoints_.append(ins)) {
+ return false;
+ }
+ return safepoints_.append(ins);
+}
+
+#ifdef JS_JITSPEW
+void LIRGraph::dump(GenericPrinter& out) {
+ for (size_t i = 0; i < numBlocks(); i++) {
+ getBlock(i)->dump(out);
+ out.printf("\n");
+ }
+}
+
+void LIRGraph::dump() {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+#endif
+
+LBlock::LBlock(MBasicBlock* from)
+ : block_(from), phis_(), entryMoveGroup_(nullptr), exitMoveGroup_(nullptr) {
+ from->assignLir(this);
+}
+
+bool LBlock::init(TempAllocator& alloc) {
+ // Count the number of LPhis we'll need.
+ size_t numLPhis = 0;
+ for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
+ MPhi* phi = *i;
+ switch (phi->type()) {
+ case MIRType::Value:
+ numLPhis += BOX_PIECES;
+ break;
+ case MIRType::Int64:
+ numLPhis += INT64_PIECES;
+ break;
+ default:
+ numLPhis += 1;
+ break;
+ }
+ }
+
+ // Allocate space for the LPhis.
+ if (!phis_.init(alloc, numLPhis)) {
+ return false;
+ }
+
+ // For each MIR phi, set up LIR phis as appropriate. We'll fill in their
+ // operands on each incoming edge, and set their definitions at the start of
+ // their defining block.
+ size_t phiIndex = 0;
+ size_t numPreds = block_->numPredecessors();
+ for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
+ MPhi* phi = *i;
+ MOZ_ASSERT(phi->numOperands() == numPreds);
+
+ int numPhis;
+ switch (phi->type()) {
+ case MIRType::Value:
+ numPhis = BOX_PIECES;
+ break;
+ case MIRType::Int64:
+ numPhis = INT64_PIECES;
+ break;
+ default:
+ numPhis = 1;
+ break;
+ }
+ for (int i = 0; i < numPhis; i++) {
+ LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds);
+ if (!inputs) {
+ return false;
+ }
+
+ void* addr = &phis_[phiIndex++];
+ LPhi* lphi = new (addr) LPhi(phi, inputs);
+ lphi->setBlock(this);
+ }
+ }
+ return true;
+}
+
+const LInstruction* LBlock::firstInstructionWithId() const {
+ for (LInstructionIterator i(instructions_.begin()); i != instructions_.end();
+ ++i) {
+ if (i->id()) {
+ return *i;
+ }
+ }
+ return 0;
+}
+
+LMoveGroup* LBlock::getEntryMoveGroup(TempAllocator& alloc) {
+ if (entryMoveGroup_) {
+ return entryMoveGroup_;
+ }
+ entryMoveGroup_ = LMoveGroup::New(alloc);
+ insertBefore(*begin(), entryMoveGroup_);
+ return entryMoveGroup_;
+}
+
+LMoveGroup* LBlock::getExitMoveGroup(TempAllocator& alloc) {
+ if (exitMoveGroup_) {
+ return exitMoveGroup_;
+ }
+ exitMoveGroup_ = LMoveGroup::New(alloc);
+ insertBefore(*rbegin(), exitMoveGroup_);
+ return exitMoveGroup_;
+}
+
+#ifdef JS_JITSPEW
+void LBlock::dump(GenericPrinter& out) {
+ out.printf("block%u:\n", mir()->id());
+ for (size_t i = 0; i < numPhis(); ++i) {
+ getPhi(i)->dump(out);
+ out.printf("\n");
+ }
+ for (LInstructionIterator iter = begin(); iter != end(); iter++) {
+ iter->dump(out);
+ if (iter->safepoint()) {
+ out.printf(" SAFEPOINT(0x%p) ", iter->safepoint());
+ }
+ out.printf("\n");
+ }
+}
+
+void LBlock::dump() {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+#endif
+
+static size_t TotalOperandCount(LRecoverInfo* recoverInfo) {
+ size_t accum = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ if (!it->isRecoveredOnBailout()) {
+ accum++;
+ }
+ }
+ return accum;
+}
+
+LRecoverInfo::LRecoverInfo(TempAllocator& alloc)
+ : instructions_(alloc), recoverOffset_(INVALID_RECOVER_OFFSET) {}
+
+LRecoverInfo* LRecoverInfo::New(MIRGenerator* gen, MResumePoint* mir) {
+ LRecoverInfo* recoverInfo = new (gen->alloc()) LRecoverInfo(gen->alloc());
+ if (!recoverInfo || !recoverInfo->init(mir)) {
+ return nullptr;
+ }
+
+ JitSpew(JitSpew_IonSnapshots, "Generating LIR recover info %p from MIR (%p)",
+ (void*)recoverInfo, (void*)mir);
+
+ return recoverInfo;
+}
+
+// de-virtualise MResumePoint::getOperand calls.
+template <typename Node>
+bool LRecoverInfo::appendOperands(Node* ins) {
+ for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
+ MDefinition* def = ins->getOperand(i);
+
+ // As there is no cycle in the data-flow (without MPhi), checking for
+ // isInWorkList implies that the definition is already in the
+ // instruction vector, and not processed by a caller of the current
+ // function.
+ if (def->isRecoveredOnBailout() && !def->isInWorklist()) {
+ if (!appendDefinition(def)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool LRecoverInfo::appendDefinition(MDefinition* def) {
+ MOZ_ASSERT(def->isRecoveredOnBailout());
+ def->setInWorklist();
+ auto clearWorklistFlagOnFailure =
+ mozilla::MakeScopeExit([&] { def->setNotInWorklist(); });
+
+ if (!appendOperands(def)) {
+ return false;
+ }
+
+ if (!instructions_.append(def)) {
+ return false;
+ }
+
+ clearWorklistFlagOnFailure.release();
+ return true;
+}
+
+bool LRecoverInfo::appendResumePoint(MResumePoint* rp) {
+ // Stores should be recovered first.
+ for (auto iter(rp->storesBegin()), end(rp->storesEnd()); iter != end;
+ ++iter) {
+ if (!appendDefinition(iter->operand)) {
+ return false;
+ }
+ }
+
+ if (rp->caller() && !appendResumePoint(rp->caller())) {
+ return false;
+ }
+
+ if (!appendOperands(rp)) {
+ return false;
+ }
+
+ return instructions_.append(rp);
+}
+
+bool LRecoverInfo::init(MResumePoint* rp) {
+ // Before exiting this function, remove temporary flags from all definitions
+ // added in the vector.
+ auto clearWorklistFlags = mozilla::MakeScopeExit([&] {
+ for (MNode** it = begin(); it != end(); it++) {
+ if (!(*it)->isDefinition()) {
+ continue;
+ }
+ (*it)->toDefinition()->setNotInWorklist();
+ }
+ });
+
+ // Sort operations in the order in which we need to restore the stack. This
+ // implies that outer frames, as well as operations needed to recover the
+ // current frame, are located before the current frame. The inner-most
+ // resume point should be the last element in the list.
+ if (!appendResumePoint(rp)) {
+ return false;
+ }
+
+ MOZ_ASSERT(mir() == rp);
+ return true;
+}
+
+LSnapshot::LSnapshot(LRecoverInfo* recoverInfo, BailoutKind kind)
+ : slots_(nullptr),
+ recoverInfo_(recoverInfo),
+ snapshotOffset_(INVALID_SNAPSHOT_OFFSET),
+ numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
+ bailoutKind_(kind) {}
+
+bool LSnapshot::init(MIRGenerator* gen) {
+ slots_ = gen->allocate<LAllocation>(numSlots_);
+ return !!slots_;
+}
+
+LSnapshot* LSnapshot::New(MIRGenerator* gen, LRecoverInfo* recover,
+ BailoutKind kind) {
+ LSnapshot* snapshot = new (gen->alloc()) LSnapshot(recover, kind);
+ if (!snapshot || !snapshot->init(gen)) {
+ return nullptr;
+ }
+
+ JitSpew(JitSpew_IonSnapshots, "Generating LIR snapshot %p from recover (%p)",
+ (void*)snapshot, (void*)recover);
+
+ return snapshot;
+}
+
+void LSnapshot::rewriteRecoveredInput(LUse input) {
+ // Mark any operands to this snapshot with the same value as input as being
+ // equal to the instruction's result.
+ for (size_t i = 0; i < numEntries(); i++) {
+ if (getEntry(i)->isUse() &&
+ getEntry(i)->toUse()->virtualRegister() == input.virtualRegister()) {
+ setEntry(i, LUse(input.virtualRegister(), LUse::RECOVERED_INPUT));
+ }
+ }
+}
+
+#ifdef JS_JITSPEW
+void LNode::printName(GenericPrinter& out, Opcode op) {
+ static const char* const names[] = {
+# define LIROP(x) #x,
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+ };
+ const char* name = names[uint32_t(op)];
+ size_t len = strlen(name);
+ for (size_t i = 0; i < len; i++) {
+ out.printf("%c", unicode::ToLowerCase(name[i]));
+ }
+}
+
+void LNode::printName(GenericPrinter& out) { printName(out, op()); }
+#endif
+
+bool LAllocation::aliases(const LAllocation& other) const {
+ if (isFloatReg() && other.isFloatReg()) {
+ return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
+ }
+ return *this == other;
+}
+
+#ifdef JS_JITSPEW
+static const char* DefTypeName(LDefinition::Type type) {
+ switch (type) {
+ case LDefinition::GENERAL:
+ return "g";
+ case LDefinition::INT32:
+ return "i";
+ case LDefinition::OBJECT:
+ return "o";
+ case LDefinition::SLOTS:
+ return "s";
+ case LDefinition::FLOAT32:
+ return "f";
+ case LDefinition::DOUBLE:
+ return "d";
+ case LDefinition::SIMD128:
+ return "simd128";
+ case LDefinition::STACKRESULTS:
+ return "stackresults";
+# ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ return "t";
+ case LDefinition::PAYLOAD:
+ return "p";
+# else
+ case LDefinition::BOX:
+ return "x";
+# endif
+ }
+ MOZ_CRASH("Invalid type");
+}
+
+UniqueChars LDefinition::toString() const {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ UniqueChars buf;
+ if (isBogusTemp()) {
+ buf = JS_smprintf("bogus");
+ } else {
+ buf = JS_smprintf("v%u<%s>", virtualRegister(), DefTypeName(type()));
+ if (buf) {
+ if (policy() == LDefinition::FIXED) {
+ buf = JS_sprintf_append(std::move(buf), ":%s",
+ output()->toString().get());
+ } else if (policy() == LDefinition::MUST_REUSE_INPUT) {
+ buf = JS_sprintf_append(std::move(buf), ":tied(%u)", getReusedInput());
+ }
+ }
+ }
+
+ if (!buf) {
+ oomUnsafe.crash("LDefinition::toString()");
+ }
+
+ return buf;
+}
+
+static UniqueChars PrintUse(const LUse* use) {
+ switch (use->policy()) {
+ case LUse::REGISTER:
+ return JS_smprintf("v%u:R", use->virtualRegister());
+ case LUse::FIXED:
+ return JS_smprintf("v%u:F:%s", use->virtualRegister(),
+ AnyRegister::FromCode(use->registerCode()).name());
+ case LUse::ANY:
+ return JS_smprintf("v%u:A", use->virtualRegister());
+ case LUse::KEEPALIVE:
+ return JS_smprintf("v%u:KA", use->virtualRegister());
+ case LUse::STACK:
+ return JS_smprintf("v%u:S", use->virtualRegister());
+ case LUse::RECOVERED_INPUT:
+ return JS_smprintf("v%u:RI", use->virtualRegister());
+ default:
+ MOZ_CRASH("invalid use policy");
+ }
+}
+
+UniqueChars LAllocation::toString() const {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ UniqueChars buf;
+ if (isBogus()) {
+ buf = JS_smprintf("bogus");
+ } else {
+ switch (kind()) {
+ case LAllocation::CONSTANT_VALUE:
+ case LAllocation::CONSTANT_INDEX: {
+ const MConstant* c = toConstant();
+ switch (c->type()) {
+ case MIRType::Int32:
+ buf = JS_smprintf("%d", c->toInt32());
+ break;
+ case MIRType::Int64:
+ buf = JS_smprintf("%" PRId64, c->toInt64());
+ break;
+ case MIRType::IntPtr:
+ buf = JS_smprintf("%" PRIxPTR, c->toIntPtr());
+ break;
+ case MIRType::String:
+ // If a JSContext is a available, output the actual string
+ if (JSContext* maybeCx = TlsContext.get()) {
+ Sprinter spr(maybeCx);
+ if (!spr.init()) {
+ oomUnsafe.crash("LAllocation::toString()");
+ }
+ spr.putString(c->toString());
+ buf = spr.release();
+ } else {
+ buf = JS_smprintf("string");
+ }
+ break;
+ case MIRType::Symbol:
+ buf = JS_smprintf("sym");
+ break;
+ case MIRType::Object:
+ case MIRType::Null:
+ buf = JS_smprintf("obj %p", c->toObjectOrNull());
+ break;
+ case MIRType::Shape:
+ buf = JS_smprintf("shape");
+ break;
+ default:
+ if (c->isTypeRepresentableAsDouble()) {
+ buf = JS_smprintf("%g", c->numberToDouble());
+ } else {
+ buf = JS_smprintf("const");
+ }
+ }
+ } break;
+ case LAllocation::GPR:
+ buf = JS_smprintf("%s", toGeneralReg()->reg().name());
+ break;
+ case LAllocation::FPU:
+ buf = JS_smprintf("%s", toFloatReg()->reg().name());
+ break;
+ case LAllocation::STACK_SLOT:
+ buf = JS_smprintf("stack:%u", toStackSlot()->slot());
+ break;
+ case LAllocation::ARGUMENT_SLOT:
+ buf = JS_smprintf("arg:%u", toArgument()->index());
+ break;
+ case LAllocation::STACK_AREA:
+ buf = JS_smprintf("stackarea:%u+%u", toStackArea()->base(),
+ toStackArea()->size());
+ break;
+ case LAllocation::USE:
+ buf = PrintUse(toUse());
+ break;
+ default:
+ MOZ_CRASH("what?");
+ }
+ }
+
+ if (!buf) {
+ oomUnsafe.crash("LAllocation::toString()");
+ }
+
+ return buf;
+}
+
+void LAllocation::dump() const { fprintf(stderr, "%s\n", toString().get()); }
+
+void LDefinition::dump() const { fprintf(stderr, "%s\n", toString().get()); }
+
+template <typename T>
+static void PrintOperands(GenericPrinter& out, T* node) {
+ size_t numOperands = node->numOperands();
+
+ for (size_t i = 0; i < numOperands; i++) {
+ out.printf(" (%s)", node->getOperand(i)->toString().get());
+ if (i != numOperands - 1) {
+ out.printf(",");
+ }
+ }
+}
+
+void LNode::printOperands(GenericPrinter& out) {
+ if (isMoveGroup()) {
+ toMoveGroup()->printOperands(out);
+ return;
+ }
+ if (isInteger()) {
+ out.printf(" (%d)", toInteger()->i32());
+ return;
+ }
+ if (isInteger64()) {
+ out.printf(" (%" PRId64 ")", toInteger64()->i64());
+ return;
+ }
+
+ if (isPhi()) {
+ PrintOperands(out, toPhi());
+ } else {
+ PrintOperands(out, toInstruction());
+ }
+}
+#endif
+
+void LInstruction::assignSnapshot(LSnapshot* snapshot) {
+ MOZ_ASSERT(!snapshot_);
+ snapshot_ = snapshot;
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonSnapshots)) {
+ JitSpewHeader(JitSpew_IonSnapshots);
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Assigning snapshot %p to instruction %p (", (void*)snapshot,
+ (void*)this);
+ printName(out);
+ out.printf(")\n");
+ }
+#endif
+}
+
+#ifdef JS_JITSPEW
+static size_t NumSuccessorsHelper(const LNode* ins) { return 0; }
+
+template <size_t Succs, size_t Operands, size_t Temps>
+static size_t NumSuccessorsHelper(
+ const LControlInstructionHelper<Succs, Operands, Temps>* ins) {
+ return Succs;
+}
+
+static size_t NumSuccessors(const LInstruction* ins) {
+ switch (ins->op()) {
+ default:
+ MOZ_CRASH("Unexpected LIR op");
+# define LIROP(x) \
+ case LNode::Opcode::x: \
+ return NumSuccessorsHelper(ins->to##x());
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+ }
+}
+
+static MBasicBlock* GetSuccessorHelper(const LNode* ins, size_t i) {
+ MOZ_CRASH("Unexpected instruction with successors");
+}
+
+template <size_t Succs, size_t Operands, size_t Temps>
+static MBasicBlock* GetSuccessorHelper(
+ const LControlInstructionHelper<Succs, Operands, Temps>* ins, size_t i) {
+ return ins->getSuccessor(i);
+}
+
+static MBasicBlock* GetSuccessor(const LInstruction* ins, size_t i) {
+ MOZ_ASSERT(i < NumSuccessors(ins));
+
+ switch (ins->op()) {
+ default:
+ MOZ_CRASH("Unexpected LIR op");
+# define LIROP(x) \
+ case LNode::Opcode::x: \
+ return GetSuccessorHelper(ins->to##x(), i);
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+ }
+}
+#endif
+
+#ifdef JS_JITSPEW
+void LNode::dump(GenericPrinter& out) {
+ if (numDefs() != 0) {
+ out.printf("{");
+ for (size_t i = 0; i < numDefs(); i++) {
+ const LDefinition* def =
+ isPhi() ? toPhi()->getDef(i) : toInstruction()->getDef(i);
+ out.printf("%s", def->toString().get());
+ if (i != numDefs() - 1) {
+ out.printf(", ");
+ }
+ }
+ out.printf("} <- ");
+ }
+
+ printName(out);
+ printOperands(out);
+
+ if (isInstruction()) {
+ LInstruction* ins = toInstruction();
+ size_t numTemps = ins->numTemps();
+ if (numTemps > 0) {
+ out.printf(" t=(");
+ for (size_t i = 0; i < numTemps; i++) {
+ out.printf("%s", ins->getTemp(i)->toString().get());
+ if (i != numTemps - 1) {
+ out.printf(", ");
+ }
+ }
+ out.printf(")");
+ }
+
+ size_t numSuccessors = NumSuccessors(ins);
+ if (numSuccessors > 0) {
+ out.printf(" s=(");
+ for (size_t i = 0; i < numSuccessors; i++) {
+ MBasicBlock* succ = GetSuccessor(ins, i);
+ out.printf("block%u", succ->id());
+ if (i != numSuccessors - 1) {
+ out.printf(", ");
+ }
+ }
+ out.printf(")");
+ }
+ }
+}
+
+void LNode::dump() {
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+const char* LNode::getExtraName() const {
+ switch (op()) {
+ default:
+ MOZ_CRASH("Unexpected LIR op");
+# define LIROP(x) \
+ case LNode::Opcode::x: \
+ return to##x()->extraName();
+ LIR_OPCODE_LIST(LIROP)
+# undef LIROP
+ }
+}
+#endif
+
+void LInstruction::initSafepoint(TempAllocator& alloc) {
+ MOZ_ASSERT(!safepoint_);
+ safepoint_ = new (alloc) LSafepoint(alloc);
+ MOZ_ASSERT(safepoint_);
+}
+
+bool LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) {
+#ifdef DEBUG
+ MOZ_ASSERT(from != to);
+ for (size_t i = 0; i < moves_.length(); i++) {
+ MOZ_ASSERT(to != moves_[i].to());
+ }
+
+ // Check that SIMD moves are aligned according to ABI requirements.
+ // clang-format off
+# ifdef ENABLE_WASM_SIMD
+ // Alignment is not currently required for SIMD on x86/x64/arm64. See also
+ // CodeGeneratorShared::CodeGeneratorShared and in general everywhere
+ // SimdMemoryAignment is used. Likely, alignment requirements will return.
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
+ defined(JS_CODEGEN_ARM64)
+ // No need for any check on x86/x64/arm64.
+# else
+# error "Need to consider SIMD alignment on this target."
+ // The following code may be of use if we need alignment checks on
+ // some future target.
+ //if (LDefinition(type).type() == LDefinition::SIMD128) {
+ // MOZ_ASSERT(from.isMemory() || from.isFloatReg());
+ // if (from.isMemory()) {
+ // if (from.isArgument()) {
+ // MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
+ // } else {
+ // MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
+ // }
+ // }
+ // MOZ_ASSERT(to.isMemory() || to.isFloatReg());
+ // if (to.isMemory()) {
+ // if (to.isArgument()) {
+ // MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
+ // } else {
+ // MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
+ // }
+ // }
+ //}
+# endif
+# endif
+ // clang-format on
+
+#endif
+ return moves_.append(LMove(from, to, type));
+}
+
+bool LMoveGroup::addAfter(LAllocation from, LAllocation to,
+ LDefinition::Type type) {
+ // Transform the operands to this move so that performing the result
+ // simultaneously with existing moves in the group will have the same
+ // effect as if the original move took place after the existing moves.
+
+ for (size_t i = 0; i < moves_.length(); i++) {
+ if (moves_[i].to() == from) {
+ from = moves_[i].from();
+ break;
+ }
+ }
+
+ if (from == to) {
+ return true;
+ }
+
+ for (size_t i = 0; i < moves_.length(); i++) {
+ if (to == moves_[i].to()) {
+ moves_[i] = LMove(from, to, type);
+ return true;
+ }
+ }
+
+ return add(from, to, type);
+}
+
+#ifdef JS_JITSPEW
+void LMoveGroup::printOperands(GenericPrinter& out) {
+ for (size_t i = 0; i < numMoves(); i++) {
+ const LMove& move = getMove(i);
+ out.printf(" [%s -> %s", move.from().toString().get(),
+ move.to().toString().get());
+ out.printf(", %s", DefTypeName(move.type()));
+ out.printf("]");
+ if (i != numMoves() - 1) {
+ out.printf(",");
+ }
+ }
+}
+#endif
+
+#define LIROP(x) \
+ static_assert(!std::is_polymorphic_v<L##x>, \
+ "LIR instructions should not have virtual methods");
+LIR_OPCODE_LIST(LIROP)
+#undef LIROP
diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h
new file mode 100644
index 0000000000..ecb54c5085
--- /dev/null
+++ b/js/src/jit/LIR.h
@@ -0,0 +1,2000 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_LIR_h
+#define jit_LIR_h
+
+// This file declares the core data structures for LIR: storage allocations for
+// inputs and outputs, as well as the interface instructions must conform to.
+
+#include "mozilla/Array.h"
+#include "mozilla/Casting.h"
+
+#include "jit/Bailouts.h"
+#include "jit/FixedList.h"
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/LIROpsGenerated.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/Registers.h"
+#include "jit/Safepoints.h"
+#include "util/Memory.h"
+
+namespace js {
+namespace jit {
+
+class LUse;
+class LGeneralReg;
+class LFloatReg;
+class LStackSlot;
+class LStackArea;
+class LArgument;
+class LConstantIndex;
+class LInstruction;
+class LDefinition;
+class MBasicBlock;
+class MIRGenerator;
+
+static const uint32_t VREG_INCREMENT = 1;
+
+static const uint32_t THIS_FRAME_ARGSLOT = 0;
+
+#if defined(JS_NUNBOX32)
+# define BOX_PIECES 2
+static const uint32_t VREG_TYPE_OFFSET = 0;
+static const uint32_t VREG_DATA_OFFSET = 1;
+static const uint32_t TYPE_INDEX = 0;
+static const uint32_t PAYLOAD_INDEX = 1;
+static const uint32_t INT64LOW_INDEX = 0;
+static const uint32_t INT64HIGH_INDEX = 1;
+#elif defined(JS_PUNBOX64)
+# define BOX_PIECES 1
+#else
+# error "Unknown!"
+#endif
+
+static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
+
+// Represents storage for an operand. For constants, the pointer is tagged
+// with a single bit, and the untagged pointer is a pointer to a Value.
+class LAllocation {
+ uintptr_t bits_;
+
+ // 3 bits gives us enough for an interesting set of Kinds and also fits
+ // within the alignment bits of pointers to Value, which are always
+ // 8-byte aligned.
+ static const uintptr_t KIND_BITS = 3;
+ static const uintptr_t KIND_SHIFT = 0;
+ static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
+
+ protected:
+#ifdef JS_64BIT
+ static const uintptr_t DATA_BITS = sizeof(uint32_t) * 8;
+#else
+ static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
+#endif
+ static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
+
+ public:
+ enum Kind {
+ CONSTANT_VALUE, // MConstant*.
+ CONSTANT_INDEX, // Constant arbitrary index.
+ USE, // Use of a virtual register, with physical allocation policy.
+ GPR, // General purpose register.
+ FPU, // Floating-point register.
+ STACK_SLOT, // Stack slot.
+ STACK_AREA, // Stack area.
+ ARGUMENT_SLOT // Argument slot.
+ };
+
+ static const uintptr_t DATA_MASK = (uintptr_t(1) << DATA_BITS) - 1;
+
+ protected:
+ uint32_t data() const {
+ MOZ_ASSERT(!hasIns());
+ return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
+ }
+ void setData(uintptr_t data) {
+ MOZ_ASSERT(!hasIns());
+ MOZ_ASSERT(data <= DATA_MASK);
+ bits_ &= ~(DATA_MASK << DATA_SHIFT);
+ bits_ |= (data << DATA_SHIFT);
+ }
+ void setKindAndData(Kind kind, uintptr_t data) {
+ MOZ_ASSERT(data <= DATA_MASK);
+ bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
+ MOZ_ASSERT(!hasIns());
+ }
+
+ bool hasIns() const { return isStackArea(); }
+ const LInstruction* ins() const {
+ MOZ_ASSERT(hasIns());
+ return reinterpret_cast<const LInstruction*>(bits_ &
+ ~(KIND_MASK << KIND_SHIFT));
+ }
+ LInstruction* ins() {
+ MOZ_ASSERT(hasIns());
+ return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
+ }
+ void setKindAndIns(Kind kind, LInstruction* ins) {
+ uintptr_t data = reinterpret_cast<uintptr_t>(ins);
+ MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
+ bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
+ MOZ_ASSERT(hasIns());
+ }
+
+ LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
+ LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
+ explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
+
+ public:
+ LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
+
+ // The MConstant pointer must have its low bits cleared.
+ explicit LAllocation(const MConstant* c) {
+ MOZ_ASSERT(c);
+ bits_ = uintptr_t(c);
+ MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
+ bits_ |= CONSTANT_VALUE << KIND_SHIFT;
+ }
+ inline explicit LAllocation(AnyRegister reg);
+
+ Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
+
+ bool isBogus() const { return bits_ == 0; }
+ bool isUse() const { return kind() == USE; }
+ bool isConstant() const { return isConstantValue() || isConstantIndex(); }
+ bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
+ bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
+ bool isGeneralReg() const { return kind() == GPR; }
+ bool isFloatReg() const { return kind() == FPU; }
+ bool isStackSlot() const { return kind() == STACK_SLOT; }
+ bool isStackArea() const { return kind() == STACK_AREA; }
+ bool isArgument() const { return kind() == ARGUMENT_SLOT; }
+ bool isRegister() const { return isGeneralReg() || isFloatReg(); }
+ bool isRegister(bool needFloat) const {
+ return needFloat ? isFloatReg() : isGeneralReg();
+ }
+ bool isMemory() const { return isStackSlot() || isArgument(); }
+ inline uint32_t memorySlot() const;
+ inline LUse* toUse();
+ inline const LUse* toUse() const;
+ inline const LGeneralReg* toGeneralReg() const;
+ inline const LFloatReg* toFloatReg() const;
+ inline const LStackSlot* toStackSlot() const;
+ inline LStackArea* toStackArea();
+ inline const LStackArea* toStackArea() const;
+ inline const LArgument* toArgument() const;
+ inline const LConstantIndex* toConstantIndex() const;
+ inline AnyRegister toRegister() const;
+
+ const MConstant* toConstant() const {
+ MOZ_ASSERT(isConstantValue());
+ return reinterpret_cast<const MConstant*>(bits_ &
+ ~(KIND_MASK << KIND_SHIFT));
+ }
+
+ bool operator==(const LAllocation& other) const {
+ return bits_ == other.bits_;
+ }
+
+ bool operator!=(const LAllocation& other) const {
+ return bits_ != other.bits_;
+ }
+
+ HashNumber hash() const { return bits_; }
+
+ bool aliases(const LAllocation& other) const;
+
+#ifdef JS_JITSPEW
+ UniqueChars toString() const;
+ void dump() const;
+#endif
+};
+
+class LUse : public LAllocation {
+ static const uint32_t POLICY_BITS = 3;
+ static const uint32_t POLICY_SHIFT = 0;
+ static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
+#ifdef JS_CODEGEN_ARM64
+ static const uint32_t REG_BITS = 7;
+#else
+ static const uint32_t REG_BITS = 6;
+#endif
+ static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
+ static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
+
+ // Whether the physical register for this operand may be reused for a def.
+ static const uint32_t USED_AT_START_BITS = 1;
+ static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
+ static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
+
+ // The REG field will hold the register code for any Register or
+ // FloatRegister, though not for an AnyRegister.
+ static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
+ REG_MASK + 1,
+ "The field must be able to represent any register code");
+
+ public:
+ // Virtual registers get the remaining bits.
+ static const uint32_t VREG_BITS =
+ DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
+ static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
+ static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
+
+ enum Policy {
+ // Input should be in a read-only register or stack slot.
+ ANY,
+
+ // Input must be in a read-only register.
+ REGISTER,
+
+ // Input must be in a specific, read-only register.
+ FIXED,
+
+ // Keep the used virtual register alive, and use whatever allocation is
+ // available. This is similar to ANY but hints to the register allocator
+ // that it is never useful to optimize this site.
+ KEEPALIVE,
+
+ // Input must be allocated on the stack. Only used when extracting stack
+ // results from stack result areas.
+ STACK,
+
+ // For snapshot inputs, indicates that the associated instruction will
+ // write this input to its output register before bailing out.
+ // The register allocator may thus allocate that output register, and
+ // does not need to keep the virtual register alive (alternatively,
+ // this may be treated as KEEPALIVE).
+ RECOVERED_INPUT
+ };
+
+ void set(Policy policy, uint32_t reg, bool usedAtStart) {
+ MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
+ setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
+ ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
+ }
+
+ public:
+ LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
+ set(policy, 0, usedAtStart);
+ setVirtualRegister(vreg);
+ }
+ explicit LUse(Policy policy, bool usedAtStart = false) {
+ set(policy, 0, usedAtStart);
+ }
+ explicit LUse(Register reg, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ }
+ explicit LUse(FloatRegister reg, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ }
+ LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ setVirtualRegister(virtualRegister);
+ }
+ LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
+ set(FIXED, reg.code(), usedAtStart);
+ setVirtualRegister(virtualRegister);
+ }
+
+ void setVirtualRegister(uint32_t index) {
+ MOZ_ASSERT(index < VREG_MASK);
+
+ uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
+ setData(old | (index << VREG_SHIFT));
+ }
+
+ Policy policy() const {
+ Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
+ return policy;
+ }
+ uint32_t virtualRegister() const {
+ uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
+ MOZ_ASSERT(index != 0);
+ return index;
+ }
+ uint32_t registerCode() const {
+ MOZ_ASSERT(policy() == FIXED);
+ return (data() >> REG_SHIFT) & REG_MASK;
+ }
+ bool isFixedRegister() const { return policy() == FIXED; }
+ bool usedAtStart() const {
+ return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
+ }
+};
+
+static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
+
+class LBoxAllocation {
+#ifdef JS_NUNBOX32
+ LAllocation type_;
+ LAllocation payload_;
+#else
+ LAllocation value_;
+#endif
+
+ public:
+#ifdef JS_NUNBOX32
+ LBoxAllocation(LAllocation type, LAllocation payload)
+ : type_(type), payload_(payload) {}
+
+ LAllocation type() const { return type_; }
+ LAllocation payload() const { return payload_; }
+#else
+ explicit LBoxAllocation(LAllocation value) : value_(value) {}
+
+ LAllocation value() const { return value_; }
+#endif
+};
+
+template <class ValT>
+class LInt64Value {
+#if JS_BITS_PER_WORD == 32
+ ValT high_;
+ ValT low_;
+#else
+ ValT value_;
+#endif
+
+ public:
+ LInt64Value() = default;
+
+#if JS_BITS_PER_WORD == 32
+ LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
+
+ ValT high() const { return high_; }
+ ValT low() const { return low_; }
+
+ const ValT* pointerHigh() const { return &high_; }
+ const ValT* pointerLow() const { return &low_; }
+#else
+ explicit LInt64Value(ValT value) : value_(value) {}
+
+ ValT value() const { return value_; }
+ const ValT* pointer() const { return &value_; }
+#endif
+};
+
+using LInt64Allocation = LInt64Value<LAllocation>;
+
+class LGeneralReg : public LAllocation {
+ public:
+ explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
+
+ Register reg() const { return Register::FromCode(data()); }
+};
+
+class LFloatReg : public LAllocation {
+ public:
+ explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
+
+ FloatRegister reg() const { return FloatRegister::FromCode(data()); }
+};
+
+// Arbitrary constant index.
+class LConstantIndex : public LAllocation {
+ explicit LConstantIndex(uint32_t index)
+ : LAllocation(CONSTANT_INDEX, index) {}
+
+ public:
+ static LConstantIndex FromIndex(uint32_t index) {
+ return LConstantIndex(index);
+ }
+
+ uint32_t index() const { return data(); }
+};
+
+// Stack slots are indices into the stack. The indices are byte indices.
+class LStackSlot : public LAllocation {
+ public:
+ explicit LStackSlot(uint32_t slot) : LAllocation(STACK_SLOT, slot) {}
+
+ uint32_t slot() const { return data(); }
+};
+
+// Stack area indicates a contiguous stack allocation meant to receive call
+// results that don't fit in registers.
+class LStackArea : public LAllocation {
+ public:
+ explicit LStackArea(LInstruction* stackArea)
+ : LAllocation(STACK_AREA, stackArea) {}
+
+ // Byte index of base of stack area, in the same coordinate space as
+ // LStackSlot::slot().
+ inline uint32_t base() const;
+ inline void setBase(uint32_t base);
+
+ // Size in bytes of the stack area.
+ inline uint32_t size() const;
+ inline uint32_t alignment() const { return 8; }
+
+ class ResultIterator {
+ const LStackArea& alloc_;
+ uint32_t idx_;
+
+ public:
+ explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
+
+ inline bool done() const;
+ inline void next();
+ inline LAllocation alloc() const;
+ inline bool isGcPointer() const;
+
+ explicit operator bool() const { return !done(); }
+ };
+
+ ResultIterator results() const { return ResultIterator(*this); }
+
+ inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
+};
+
+// Arguments are reverse indices into the stack. The indices are byte indices.
+class LArgument : public LAllocation {
+ public:
+ explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
+
+ uint32_t index() const { return data(); }
+};
+
+inline uint32_t LAllocation::memorySlot() const {
+ MOZ_ASSERT(isMemory());
+ return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
+}
+
+// Represents storage for a definition.
+class LDefinition {
+ // Bits containing policy, type, and virtual register.
+ uint32_t bits_;
+
+ // Before register allocation, this optionally contains a fixed policy.
+ // Register allocation assigns this field to a physical policy if none is
+ // fixed.
+ //
+ // Right now, pre-allocated outputs are limited to the following:
+ // * Physical argument stack slots.
+ // * Physical registers.
+ LAllocation output_;
+
+ static const uint32_t TYPE_BITS = 4;
+ static const uint32_t TYPE_SHIFT = 0;
+ static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
+ static const uint32_t POLICY_BITS = 2;
+ static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
+ static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
+
+ static const uint32_t VREG_BITS =
+ (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
+ static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
+ static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
+
+ public:
+ // Note that definitions, by default, are always allocated a register,
+ // unless the policy specifies that an input can be re-used and that input
+ // is a stack slot.
+ enum Policy {
+ // The policy is predetermined by the LAllocation attached to this
+ // definition. The allocation may be:
+ // * A register, which may not appear as any fixed temporary.
+ // * A stack slot or argument.
+ //
+ // Register allocation will not modify a fixed allocation.
+ FIXED,
+
+ // A random register of an appropriate class will be assigned.
+ REGISTER,
+
+ // An area on the stack must be assigned. Used when defining stack results
+ // and stack result areas.
+ STACK,
+
+ // One definition per instruction must re-use the first input
+ // allocation, which (for now) must be a register.
+ MUST_REUSE_INPUT
+ };
+
+ enum Type {
+ GENERAL, // Generic, integer or pointer-width data (GPR).
+ INT32, // int32 data (GPR).
+ OBJECT, // Pointer that may be collected as garbage (GPR).
+ SLOTS, // Slots/elements pointer that may be moved by minor GCs (GPR).
+ FLOAT32, // 32-bit floating-point value (FPU).
+ DOUBLE, // 64-bit floating-point value (FPU).
+ SIMD128, // 128-bit SIMD vector (FPU).
+ STACKRESULTS, // A variable-size stack allocation that may contain objects.
+#ifdef JS_NUNBOX32
+ // A type virtual register must be followed by a payload virtual
+ // register, as both will be tracked as a single gcthing.
+ TYPE,
+ PAYLOAD
+#else
+ BOX // Joined box, for punbox systems. (GPR, gcthing)
+#endif
+ };
+
+ void set(uint32_t index, Type type, Policy policy) {
+ static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
+ bits_ =
+ (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
+#ifndef ENABLE_WASM_SIMD
+ MOZ_ASSERT(this->type() != SIMD128);
+#endif
+ }
+
+ public:
+ LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
+ set(index, type, policy);
+ }
+
+ explicit LDefinition(Type type, Policy policy = REGISTER) {
+ set(0, type, policy);
+ }
+
+ LDefinition(Type type, const LAllocation& a) : output_(a) {
+ set(0, type, FIXED);
+ }
+
+ LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
+ set(index, type, FIXED);
+ }
+
+ LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
+
+ static LDefinition BogusTemp() { return LDefinition(); }
+
+ Policy policy() const {
+ return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
+ }
+ Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
+
+ static bool isFloatRegCompatible(Type type, FloatRegister reg) {
+#ifdef JS_CODEGEN_RISCV64
+ if (type == FLOAT32 || type == DOUBLE) {
+ return reg.isSingle() || reg.isDouble();
+ }
+#else
+ if (type == FLOAT32) {
+ return reg.isSingle();
+ }
+ if (type == DOUBLE) {
+ return reg.isDouble();
+ }
+#endif
+ MOZ_ASSERT(type == SIMD128);
+ return reg.isSimd128();
+ }
+
+ bool isCompatibleReg(const AnyRegister& r) const {
+ if (isFloatReg() && r.isFloat()) {
+ return isFloatRegCompatible(type(), r.fpu());
+ }
+ return !isFloatReg() && !r.isFloat();
+ }
+ bool isCompatibleDef(const LDefinition& other) const {
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ if (isFloatReg() && other.isFloatReg()) {
+ return type() == other.type();
+ }
+ return !isFloatReg() && !other.isFloatReg();
+#else
+ return isFloatReg() == other.isFloatReg();
+#endif
+ }
+
+ static bool isFloatReg(Type type) {
+ return type == FLOAT32 || type == DOUBLE || type == SIMD128;
+ }
+ bool isFloatReg() const { return isFloatReg(type()); }
+
+ uint32_t virtualRegister() const {
+ uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
+ // MOZ_ASSERT(index != 0);
+ return index;
+ }
+ LAllocation* output() { return &output_; }
+ const LAllocation* output() const { return &output_; }
+ bool isFixed() const { return policy() == FIXED; }
+ bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
+ void setVirtualRegister(uint32_t index) {
+ MOZ_ASSERT(index < VREG_MASK);
+ bits_ &= ~(VREG_MASK << VREG_SHIFT);
+ bits_ |= index << VREG_SHIFT;
+ }
+ void setOutput(const LAllocation& a) {
+ output_ = a;
+ if (!a.isUse()) {
+ bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
+ bits_ |= FIXED << POLICY_SHIFT;
+ }
+ }
+ void setReusedInput(uint32_t operand) {
+ output_ = LConstantIndex::FromIndex(operand);
+ }
+ uint32_t getReusedInput() const {
+ MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
+ return output_.toConstantIndex()->index();
+ }
+
+ static inline Type TypeFrom(MIRType type) {
+ switch (type) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ // The stack slot allocator doesn't currently support allocating
+ // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
+ static_assert(sizeof(bool) <= sizeof(int32_t),
+ "bool doesn't fit in an int32 slot");
+ return LDefinition::INT32;
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ case MIRType::RefOrNull:
+ return LDefinition::OBJECT;
+ case MIRType::Double:
+ return LDefinition::DOUBLE;
+ case MIRType::Float32:
+ return LDefinition::FLOAT32;
+#if defined(JS_PUNBOX64)
+ case MIRType::Value:
+ return LDefinition::BOX;
+#endif
+ case MIRType::Slots:
+ case MIRType::Elements:
+ return LDefinition::SLOTS;
+ case MIRType::Pointer:
+ case MIRType::IntPtr:
+ return LDefinition::GENERAL;
+#if defined(JS_PUNBOX64)
+ case MIRType::Int64:
+ return LDefinition::GENERAL;
+#endif
+ case MIRType::StackResults:
+ return LDefinition::STACKRESULTS;
+ case MIRType::Simd128:
+ return LDefinition::SIMD128;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+ }
+
+ UniqueChars toString() const;
+
+#ifdef JS_JITSPEW
+ void dump() const;
+#endif
+};
+
+class LInt64Definition : public LInt64Value<LDefinition> {
+ public:
+ using LInt64Value<LDefinition>::LInt64Value;
+
+ static LInt64Definition BogusTemp() { return LInt64Definition(); }
+
+ bool isBogusTemp() const {
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
+ return high().isBogusTemp();
+#else
+ return value().isBogusTemp();
+#endif
+ }
+};
+
+// Forward declarations of LIR types.
+#define LIROP(op) class L##op;
+LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+class LSnapshot;
+class LSafepoint;
+class LElementVisitor;
+
+constexpr size_t MaxNumLInstructionOperands = 63;
+
+// The common base class for LPhi and LInstruction.
+class LNode {
+ protected:
+ MDefinition* mir_;
+
+ private:
+ LBlock* block_;
+ uint32_t id_;
+
+ protected:
+ // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
+ uint32_t op_ : 10;
+ uint32_t isCall_ : 1;
+
+ // LPhi::numOperands() may not fit in this bitfield, so we only use this
+ // field for LInstruction.
+ uint32_t nonPhiNumOperands_ : 6;
+ static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
+ "packing constraints");
+
+ // For LInstruction, the first operand is stored at offset
+ // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
+ uint32_t nonPhiOperandsOffset_ : 5;
+ uint32_t numDefs_ : 4;
+ uint32_t numTemps_ : 4;
+
+ public:
+ enum class Opcode {
+#define LIROP(name) name,
+ LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+ Invalid
+ };
+
+ LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
+ uint32_t numTemps)
+ : mir_(nullptr),
+ block_(nullptr),
+ id_(0),
+ op_(uint32_t(op)),
+ isCall_(false),
+ nonPhiNumOperands_(nonPhiNumOperands),
+ nonPhiOperandsOffset_(0),
+ numDefs_(numDefs),
+ numTemps_(numTemps) {
+ MOZ_ASSERT(op < Opcode::Invalid);
+ MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
+ MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
+ "nonPhiNumOperands must fit in bitfield");
+ MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
+ MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
+ }
+
+ const char* opName() {
+ switch (op()) {
+#define LIR_NAME_INS(name) \
+ case Opcode::name: \
+ return #name;
+ LIR_OPCODE_LIST(LIR_NAME_INS)
+#undef LIR_NAME_INS
+ default:
+ MOZ_CRASH("Invalid op");
+ }
+ }
+
+ // Hook for opcodes to add extra high level detail about what code will be
+ // emitted for the op.
+ private:
+ const char* extraName() const { return nullptr; }
+
+ public:
+#ifdef JS_JITSPEW
+ const char* getExtraName() const;
+#endif
+
+ Opcode op() const { return Opcode(op_); }
+
+ bool isInstruction() const { return op() != Opcode::Phi; }
+ inline LInstruction* toInstruction();
+ inline const LInstruction* toInstruction() const;
+
+ // Returns the number of outputs of this instruction. If an output is
+ // unallocated, it is an LDefinition, defining a virtual register.
+ size_t numDefs() const { return numDefs_; }
+
+ bool isCall() const { return isCall_; }
+
+ // Does this call preserve the given register?
+ // By default, it is assumed that all registers are clobbered by a call.
+ inline bool isCallPreserved(AnyRegister reg) const;
+
+ uint32_t id() const { return id_; }
+ void setId(uint32_t id) {
+ MOZ_ASSERT(!id_);
+ MOZ_ASSERT(id);
+ id_ = id;
+ }
+ void setMir(MDefinition* mir) { mir_ = mir; }
+ MDefinition* mirRaw() const {
+ /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
+ return mir_;
+ }
+ LBlock* block() const { return block_; }
+ void setBlock(LBlock* block) { block_ = block; }
+
+ // For an instruction which has a MUST_REUSE_INPUT output, whether that
+ // output register will be restored to its original value when bailing out.
+ inline bool recoversInput() const;
+
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out);
+ void dump();
+ static void printName(GenericPrinter& out, Opcode op);
+ void printName(GenericPrinter& out);
+ void printOperands(GenericPrinter& out);
+#endif
+
+ public:
+ // Opcode testing and casts.
+#define LIROP(name) \
+ bool is##name() const { return op() == Opcode::name; } \
+ inline L##name* to##name(); \
+ inline const L##name* to##name() const;
+ LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+// Note: GenerateOpcodeFiles.py generates LIROpsGenerated.h based on this
+// macro.
+#define LIR_HEADER(opcode) \
+ static constexpr LNode::Opcode classOpcode = LNode::Opcode::opcode;
+};
+
+extern const char* const LIROpNames[];
+inline const char* LIRCodeName(LNode::Opcode op) {
+ return LIROpNames[static_cast<size_t>(op)];
+}
+
+class LInstruction : public LNode,
+ public TempObject,
+ public InlineListNode<LInstruction> {
+ // This snapshot could be set after a ResumePoint. It is used to restart
+ // from the resume point pc.
+ LSnapshot* snapshot_;
+
+ // Structure capturing the set of stack slots and registers which are known
+ // to hold either gcthings or Values.
+ LSafepoint* safepoint_;
+
+ LMoveGroup* inputMoves_;
+ LMoveGroup* fixReuseMoves_;
+ LMoveGroup* movesAfter_;
+
+ protected:
+ LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
+ uint32_t numTemps)
+ : LNode(opcode, numOperands, numDefs, numTemps),
+ snapshot_(nullptr),
+ safepoint_(nullptr),
+ inputMoves_(nullptr),
+ fixReuseMoves_(nullptr),
+ movesAfter_(nullptr) {}
+
+ void setIsCall() { isCall_ = true; }
+
+ public:
+ inline LDefinition* getDef(size_t index);
+
+ void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
+
+ LAllocation* getOperand(size_t index) const {
+ MOZ_ASSERT(index < numOperands());
+ MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
+ uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
+ nonPhiOperandsOffset_ * sizeof(uintptr_t);
+ return reinterpret_cast<LAllocation*>(p) + index;
+ }
+ void setOperand(size_t index, const LAllocation& a) {
+ *getOperand(index) = a;
+ }
+
+ void initOperandsOffset(size_t offset) {
+ MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
+ MOZ_ASSERT(offset >= sizeof(LInstruction));
+ MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
+ offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
+ nonPhiOperandsOffset_ = offset;
+ MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
+ }
+
+ // Returns information about temporary registers needed. Each temporary
+ // register is an LDefinition with a fixed or virtual register and
+ // either GENERAL, FLOAT32, or DOUBLE type.
+ size_t numTemps() const { return numTemps_; }
+ inline LDefinition* getTemp(size_t index);
+
+ LSnapshot* snapshot() const { return snapshot_; }
+ LSafepoint* safepoint() const { return safepoint_; }
+ LMoveGroup* inputMoves() const { return inputMoves_; }
+ void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
+ LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
+ void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
+ LMoveGroup* movesAfter() const { return movesAfter_; }
+ void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
+ uint32_t numOperands() const { return nonPhiNumOperands_; }
+ void assignSnapshot(LSnapshot* snapshot);
+ void initSafepoint(TempAllocator& alloc);
+
+ class InputIterator;
+};
+
+LInstruction* LNode::toInstruction() {
+ MOZ_ASSERT(isInstruction());
+ return static_cast<LInstruction*>(this);
+}
+
+const LInstruction* LNode::toInstruction() const {
+ MOZ_ASSERT(isInstruction());
+ return static_cast<const LInstruction*>(this);
+}
+
+class LElementVisitor {
+#ifdef TRACK_SNAPSHOTS
+ LInstruction* ins_ = nullptr;
+#endif
+
+ protected:
+#ifdef TRACK_SNAPSHOTS
+ LInstruction* instruction() { return ins_; }
+
+ void setElement(LInstruction* ins) { ins_ = ins; }
+#else
+ void setElement(LInstruction* ins) {}
+#endif
+};
+
+using LInstructionIterator = InlineList<LInstruction>::iterator;
+using LInstructionReverseIterator = InlineList<LInstruction>::reverse_iterator;
+
+class MPhi;
+
+// Phi is a pseudo-instruction that emits no code, and is an annotation for the
+// register allocator. Like its equivalent in MIR, phis are collected at the
+// top of blocks and are meant to be executed in parallel, choosing the input
+// corresponding to the predecessor taken in the control flow graph.
+class LPhi final : public LNode {
+ LAllocation* const inputs_;
+ LDefinition def_;
+
+ public:
+ LIR_HEADER(Phi)
+
+ LPhi(MPhi* ins, LAllocation* inputs)
+ : LNode(classOpcode,
+ /* nonPhiNumOperands = */ 0,
+ /* numDefs = */ 1,
+ /* numTemps = */ 0),
+ inputs_(inputs) {
+ setMir(ins);
+ }
+
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(index == 0);
+ return &def_;
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index == 0);
+ def_ = def;
+ }
+ size_t numOperands() const { return mir_->toPhi()->numOperands(); }
+ LAllocation* getOperand(size_t index) {
+ MOZ_ASSERT(index < numOperands());
+ return &inputs_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) {
+ MOZ_ASSERT(index < numOperands());
+ inputs_[index] = a;
+ }
+
+ // Phis don't have temps, so calling numTemps/getTemp is pointless.
+ size_t numTemps() const = delete;
+ LDefinition* getTemp(size_t index) = delete;
+};
+
+class LMoveGroup;
+class LBlock {
+ MBasicBlock* block_;
+ FixedList<LPhi> phis_;
+ InlineList<LInstruction> instructions_;
+ LMoveGroup* entryMoveGroup_;
+ LMoveGroup* exitMoveGroup_;
+ Label label_;
+
+ public:
+ explicit LBlock(MBasicBlock* block);
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ void add(LInstruction* ins) {
+ ins->setBlock(this);
+ instructions_.pushBack(ins);
+ }
+ size_t numPhis() const { return phis_.length(); }
+ LPhi* getPhi(size_t index) { return &phis_[index]; }
+ const LPhi* getPhi(size_t index) const { return &phis_[index]; }
+ MBasicBlock* mir() const { return block_; }
+ LInstructionIterator begin() { return instructions_.begin(); }
+ LInstructionIterator begin(LInstruction* at) {
+ return instructions_.begin(at);
+ }
+ LInstructionIterator end() { return instructions_.end(); }
+ LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
+ LInstructionReverseIterator rbegin(LInstruction* at) {
+ return instructions_.rbegin(at);
+ }
+ LInstructionReverseIterator rend() { return instructions_.rend(); }
+ InlineList<LInstruction>& instructions() { return instructions_; }
+ void insertAfter(LInstruction* at, LInstruction* ins) {
+ instructions_.insertAfter(at, ins);
+ }
+ void insertBefore(LInstruction* at, LInstruction* ins) {
+ instructions_.insertBefore(at, ins);
+ }
+ const LNode* firstElementWithId() const {
+ return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
+ : firstInstructionWithId();
+ }
+ uint32_t firstId() const { return firstElementWithId()->id(); }
+ uint32_t lastId() const { return lastInstructionWithId()->id(); }
+ const LInstruction* firstInstructionWithId() const;
+ const LInstruction* lastInstructionWithId() const {
+ const LInstruction* last = *instructions_.rbegin();
+ MOZ_ASSERT(last->id());
+ // The last instruction is a control flow instruction which does not have
+ // any output.
+ MOZ_ASSERT(last->numDefs() == 0);
+ return last;
+ }
+
+ // Return the label to branch to when branching to this block.
+ Label* label() {
+ MOZ_ASSERT(!isTrivial());
+ return &label_;
+ }
+
+ LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
+ LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
+
+ // Test whether this basic block is empty except for a simple goto, and
+ // which is not forming a loop. No code will be emitted for such blocks.
+ bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
+
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out);
+ void dump();
+#endif
+};
+
+namespace details {
+template <size_t Defs, size_t Temps>
+class LInstructionFixedDefsTempsHelper : public LInstruction {
+ mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
+
+ protected:
+ LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
+ : LInstruction(opcode, numOperands, Defs, Temps) {}
+
+ public:
+ // Override the methods in LInstruction with more optimized versions
+ // for when we know the exact instruction type.
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(index < Defs);
+ return &defsAndTemps_[index];
+ }
+ LDefinition* getTemp(size_t index) {
+ MOZ_ASSERT(index < Temps);
+ return &defsAndTemps_[Defs + index];
+ }
+ LInt64Definition getInt64Temp(size_t index) {
+ MOZ_ASSERT(index + INT64_PIECES <= Temps);
+#if JS_BITS_PER_WORD == 32
+ return LInt64Definition(defsAndTemps_[Defs + index + INT64HIGH_INDEX],
+ defsAndTemps_[Defs + index + INT64LOW_INDEX]);
+#else
+ return LInt64Definition(defsAndTemps_[Defs + index]);
+#endif
+ }
+
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index < Defs);
+ defsAndTemps_[index] = def;
+ }
+ void setTemp(size_t index, const LDefinition& a) {
+ MOZ_ASSERT(index < Temps);
+ defsAndTemps_[Defs + index] = a;
+ }
+ void setInt64Temp(size_t index, const LInt64Definition& a) {
+#if JS_BITS_PER_WORD == 32
+ setTemp(index, a.low());
+ setTemp(index + 1, a.high());
+#else
+ setTemp(index, a.value());
+#endif
+ }
+
+ // Default accessors, assuming a single input and output, respectively.
+ const LAllocation* input() {
+ MOZ_ASSERT(numOperands() == 1);
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ MOZ_ASSERT(numDefs() == 1);
+ return getDef(0);
+ }
+ static size_t offsetOfDef(size_t index) {
+ using T = LInstructionFixedDefsTempsHelper<0, 0>;
+ return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
+ }
+ static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
+ using T = LInstructionFixedDefsTempsHelper<0, 0>;
+ return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
+ }
+};
+} // namespace details
+
+inline LDefinition* LInstruction::getDef(size_t index) {
+ MOZ_ASSERT(index < numDefs());
+ using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
+ uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
+ return reinterpret_cast<LDefinition*>(p);
+}
+
+inline LDefinition* LInstruction::getTemp(size_t index) {
+ MOZ_ASSERT(index < numTemps());
+ using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
+ uint8_t* p =
+ reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
+ return reinterpret_cast<LDefinition*>(p);
+}
+
+template <size_t Defs, size_t Operands, size_t Temps>
+class LInstructionHelper
+ : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
+ mozilla::Array<LAllocation, Operands> operands_;
+
+ protected:
+ explicit LInstructionHelper(LNode::Opcode opcode)
+ : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
+ Operands) {
+ static_assert(
+ Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
+ "mozilla::Array should not contain other fields");
+ if (Operands > 0) {
+ using T = LInstructionHelper<Defs, Operands, Temps>;
+ this->initOperandsOffset(offsetof(T, operands_));
+ }
+ }
+
+ public:
+ // Override the methods in LInstruction with more optimized versions
+ // for when we know the exact instruction type.
+ LAllocation* getOperand(size_t index) { return &operands_[index]; }
+ void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
+ void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
+#ifdef JS_NUNBOX32
+ operands_[index + TYPE_INDEX] = alloc.type();
+ operands_[index + PAYLOAD_INDEX] = alloc.payload();
+#else
+ operands_[index] = alloc.value();
+#endif
+ }
+ void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
+#if JS_BITS_PER_WORD == 32
+ operands_[index + INT64LOW_INDEX] = alloc.low();
+ operands_[index + INT64HIGH_INDEX] = alloc.high();
+#else
+ operands_[index] = alloc.value();
+#endif
+ }
+ const LInt64Allocation getInt64Operand(size_t offset) {
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
+ operands_[offset + INT64LOW_INDEX]);
+#else
+ return LInt64Allocation(operands_[offset]);
+#endif
+ }
+};
+
+template <size_t Defs, size_t Temps>
+class LVariadicInstruction
+ : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
+ protected:
+ LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
+ : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
+ numOperands) {}
+
+ public:
+ void setBoxOperand(size_t index, const LBoxAllocation& a) {
+#ifdef JS_NUNBOX32
+ this->setOperand(index + TYPE_INDEX, a.type());
+ this->setOperand(index + PAYLOAD_INDEX, a.payload());
+#else
+ this->setOperand(index, a.value());
+#endif
+ }
+};
+
+template <size_t Defs, size_t Operands, size_t Temps>
+class LCallInstructionHelper
+ : public LInstructionHelper<Defs, Operands, Temps> {
+ protected:
+ explicit LCallInstructionHelper(LNode::Opcode opcode)
+ : LInstructionHelper<Defs, Operands, Temps>(opcode) {
+ this->setIsCall();
+ }
+};
+
+template <size_t Defs, size_t Temps>
+class LBinaryCallInstructionHelper
+ : public LCallInstructionHelper<Defs, 2, Temps> {
+ protected:
+ explicit LBinaryCallInstructionHelper(LNode::Opcode opcode)
+ : LCallInstructionHelper<Defs, 2, Temps>(opcode) {}
+
+ public:
+ const LAllocation* lhs() { return this->getOperand(0); }
+ const LAllocation* rhs() { return this->getOperand(1); }
+};
+
+class LRecoverInfo : public TempObject {
+ public:
+ typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
+
+ private:
+ // List of instructions needed to recover the stack frames.
+ // Outer frames are stored before inner frames.
+ Instructions instructions_;
+
+ // Cached offset where this resume point is encoded.
+ RecoverOffset recoverOffset_;
+
+ explicit LRecoverInfo(TempAllocator& alloc);
+ [[nodiscard]] bool init(MResumePoint* mir);
+
+ // Fill the instruction vector such as all instructions needed for the
+ // recovery are pushed before the current instruction.
+ template <typename Node>
+ [[nodiscard]] bool appendOperands(Node* ins);
+ [[nodiscard]] bool appendDefinition(MDefinition* def);
+ [[nodiscard]] bool appendResumePoint(MResumePoint* rp);
+
+ public:
+ static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
+
+ // Resume point of the inner most function.
+ MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
+ RecoverOffset recoverOffset() const { return recoverOffset_; }
+ void setRecoverOffset(RecoverOffset offset) {
+ MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
+ recoverOffset_ = offset;
+ }
+
+ MNode** begin() { return instructions_.begin(); }
+ MNode** end() { return instructions_.end(); }
+ size_t numInstructions() const { return instructions_.length(); }
+
+ class OperandIter {
+ private:
+ MNode** it_;
+ MNode** end_;
+ size_t op_;
+ size_t opEnd_;
+ MResumePoint* rp_;
+ MNode* node_;
+
+ public:
+ explicit OperandIter(LRecoverInfo* recoverInfo)
+ : it_(recoverInfo->begin()),
+ end_(recoverInfo->end()),
+ op_(0),
+ opEnd_(0),
+ rp_(nullptr),
+ node_(nullptr) {
+ settle();
+ }
+
+ void settle() {
+ opEnd_ = (*it_)->numOperands();
+ while (opEnd_ == 0) {
+ ++it_;
+ op_ = 0;
+ opEnd_ = (*it_)->numOperands();
+ }
+ node_ = *it_;
+ if (node_->isResumePoint()) {
+ rp_ = node_->toResumePoint();
+ }
+ }
+
+ MDefinition* operator*() {
+ if (rp_) { // de-virtualize MResumePoint::getOperand calls.
+ return rp_->getOperand(op_);
+ }
+ return node_->getOperand(op_);
+ }
+ MDefinition* operator->() {
+ if (rp_) { // de-virtualize MResumePoint::getOperand calls.
+ return rp_->getOperand(op_);
+ }
+ return node_->getOperand(op_);
+ }
+
+ OperandIter& operator++() {
+ ++op_;
+ if (op_ != opEnd_) {
+ return *this;
+ }
+ op_ = 0;
+ ++it_;
+ node_ = rp_ = nullptr;
+ if (!*this) {
+ settle();
+ }
+ return *this;
+ }
+
+ explicit operator bool() const { return it_ == end_; }
+
+#ifdef DEBUG
+ bool canOptimizeOutIfUnused();
+#endif
+ };
+};
+
+// An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
+// MResumePoints, they cannot be shared, as they are filled in by the register
+// allocator in order to capture the precise low-level stack state in between an
+// instruction's input and output. During code generation, LSnapshots are
+// compressed and saved in the compiled script.
+class LSnapshot : public TempObject {
+ private:
+ LAllocation* slots_;
+ LRecoverInfo* recoverInfo_;
+ SnapshotOffset snapshotOffset_;
+ uint32_t numSlots_;
+ BailoutKind bailoutKind_;
+
+ LSnapshot(LRecoverInfo* recover, BailoutKind kind);
+ [[nodiscard]] bool init(MIRGenerator* gen);
+
+ public:
+ static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
+ BailoutKind kind);
+
+ size_t numEntries() const { return numSlots_; }
+ size_t numSlots() const { return numSlots_ / BOX_PIECES; }
+ LAllocation* payloadOfSlot(size_t i) {
+ MOZ_ASSERT(i < numSlots());
+ size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
+ return getEntry(entryIndex);
+ }
+#ifdef JS_NUNBOX32
+ LAllocation* typeOfSlot(size_t i) {
+ MOZ_ASSERT(i < numSlots());
+ size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
+ return getEntry(entryIndex);
+ }
+#endif
+ LAllocation* getEntry(size_t i) {
+ MOZ_ASSERT(i < numSlots_);
+ return &slots_[i];
+ }
+ void setEntry(size_t i, const LAllocation& alloc) {
+ MOZ_ASSERT(i < numSlots_);
+ slots_[i] = alloc;
+ }
+ LRecoverInfo* recoverInfo() const { return recoverInfo_; }
+ MResumePoint* mir() const { return recoverInfo()->mir(); }
+ SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
+ void setSnapshotOffset(SnapshotOffset offset) {
+ MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
+ snapshotOffset_ = offset;
+ }
+ BailoutKind bailoutKind() const { return bailoutKind_; }
+ void rewriteRecoveredInput(LUse input);
+};
+
+struct SafepointSlotEntry {
+ // Flag indicating whether this is a slot in the stack or argument space.
+ uint32_t stack : 1;
+
+ // Byte offset of the slot, as in LStackSlot or LArgument.
+ uint32_t slot : 31;
+
+ SafepointSlotEntry() : stack(0), slot(0) {}
+ SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
+ explicit SafepointSlotEntry(const LAllocation* a)
+ : stack(a->isStackSlot()), slot(a->memorySlot()) {}
+};
+
+struct SafepointNunboxEntry {
+ uint32_t typeVreg;
+ LAllocation type;
+ LAllocation payload;
+
+ SafepointNunboxEntry() : typeVreg(0) {}
+ SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
+ : typeVreg(typeVreg), type(type), payload(payload) {}
+};
+
+class LSafepoint : public TempObject {
+ using SlotEntry = SafepointSlotEntry;
+ using NunboxEntry = SafepointNunboxEntry;
+
+ public:
+ typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
+ typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
+
+ private:
+ // The information in a safepoint describes the registers and gc related
+ // values that are live at the start of the associated instruction.
+
+ // The set of registers which are live at an OOL call made within the
+ // instruction. This includes any registers for inputs which are not
+ // use-at-start, any registers for temps, and any registers live after the
+ // call except outputs of the instruction.
+ //
+ // For call instructions, the live regs are empty. Call instructions may
+ // have register inputs or temporaries, which will *not* be in the live
+ // registers: if passed to the call, the values passed will be marked via
+ // TraceJitExitFrame, and no registers can be live after the instruction
+ // except its outputs.
+ LiveRegisterSet liveRegs_;
+
+ // The subset of liveRegs which contains gcthing pointers.
+ LiveGeneralRegisterSet gcRegs_;
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // Clobbered regs of the current instruction. This set is never written to
+ // the safepoint; it's only used by assertions during compilation.
+ LiveRegisterSet clobberedRegs_;
+#endif
+
+ // Offset to a position in the safepoint stream, or
+ // INVALID_SAFEPOINT_OFFSET.
+ uint32_t safepointOffset_;
+
+ // Assembler buffer displacement to OSI point's call location.
+ uint32_t osiCallPointOffset_;
+
+ // List of slots which have gcthing pointers.
+ SlotList gcSlots_;
+
+#ifdef JS_NUNBOX32
+ // List of registers (in liveRegs) and slots which contain pieces of Values.
+ NunboxList nunboxParts_;
+#elif JS_PUNBOX64
+ // List of slots which have Values.
+ SlotList valueSlots_;
+
+ // The subset of liveRegs which have Values.
+ LiveGeneralRegisterSet valueRegs_;
+#endif
+
+ // The subset of liveRegs which contains pointers to slots/elements.
+ LiveGeneralRegisterSet slotsOrElementsRegs_;
+
+ // List of slots which have slots/elements pointers.
+ SlotList slotsOrElementsSlots_;
+
+ // Wasm only: with what kind of instruction is this LSafepoint associated?
+ // true => wasm trap, false => wasm call.
+ bool isWasmTrap_;
+
+ // Wasm only: what is the value of masm.framePushed() that corresponds to
+ // the lowest-addressed word covered by the StackMap that we will generate
+ // from this LSafepoint? This depends on the instruction:
+ //
+ // if isWasmTrap_ == true:
+ // masm.framePushed() unmodified. Note that when constructing the
+ // StackMap we will add entries below this point to take account of
+ // registers dumped on the stack as a result of the trap.
+ //
+ // if isWasmTrap_ == false:
+ // masm.framePushed() - StackArgAreaSizeUnaligned(arg types for the call),
+ // because the map does not include the outgoing args themselves, but
+ // it does cover any and all alignment space above them.
+ uint32_t framePushedAtStackMapBase_;
+
+ public:
+ void assertInvariants() {
+ // Every register in valueRegs and gcRegs should also be in liveRegs.
+#ifndef JS_NUNBOX32
+ MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
+#endif
+ MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
+ }
+
+ explicit LSafepoint(TempAllocator& alloc)
+ : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
+ osiCallPointOffset_(0),
+ gcSlots_(alloc),
+#ifdef JS_NUNBOX32
+ nunboxParts_(alloc),
+#else
+ valueSlots_(alloc),
+#endif
+ slotsOrElementsSlots_(alloc),
+ isWasmTrap_(false),
+ framePushedAtStackMapBase_(0) {
+ assertInvariants();
+ }
+ void addLiveRegister(AnyRegister reg) {
+ liveRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ const LiveRegisterSet& liveRegs() const { return liveRegs_; }
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void addClobberedRegister(AnyRegister reg) {
+ clobberedRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
+#endif
+ void addGcRegister(Register reg) {
+ gcRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
+ [[nodiscard]] bool addGcSlot(bool stack, uint32_t slot) {
+ bool result = gcSlots_.append(SlotEntry(stack, slot));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+ SlotList& gcSlots() { return gcSlots_; }
+
+ SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
+ LiveGeneralRegisterSet slotsOrElementsRegs() const {
+ return slotsOrElementsRegs_;
+ }
+ void addSlotsOrElementsRegister(Register reg) {
+ slotsOrElementsRegs_.addUnchecked(reg);
+ assertInvariants();
+ }
+ [[nodiscard]] bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
+ bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+ [[nodiscard]] bool addSlotsOrElementsPointer(LAllocation alloc) {
+ if (alloc.isMemory()) {
+ return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+ MOZ_ASSERT(alloc.isRegister());
+ addSlotsOrElementsRegister(alloc.toRegister().gpr());
+ assertInvariants();
+ return true;
+ }
+ bool hasSlotsOrElementsPointer(LAllocation alloc) const {
+ if (alloc.isRegister()) {
+ return slotsOrElementsRegs().has(alloc.toRegister().gpr());
+ }
+ for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
+ const SlotEntry& entry = slotsOrElementsSlots_[i];
+ if (entry.stack == alloc.isStackSlot() &&
+ entry.slot == alloc.memorySlot()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ [[nodiscard]] bool addGcPointer(LAllocation alloc) {
+ if (alloc.isMemory()) {
+ return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+ if (alloc.isRegister()) {
+ addGcRegister(alloc.toRegister().gpr());
+ }
+ assertInvariants();
+ return true;
+ }
+
+ bool hasGcPointer(LAllocation alloc) const {
+ if (alloc.isRegister()) {
+ return gcRegs().has(alloc.toRegister().gpr());
+ }
+ MOZ_ASSERT(alloc.isMemory());
+ for (size_t i = 0; i < gcSlots_.length(); i++) {
+ if (gcSlots_[i].stack == alloc.isStackSlot() &&
+ gcSlots_[i].slot == alloc.memorySlot()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Return true if all GC-managed pointers from `alloc` are recorded in this
+ // safepoint.
+ bool hasAllGcPointersFromStackArea(LAllocation alloc) const {
+ for (LStackArea::ResultIterator iter = alloc.toStackArea()->results(); iter;
+ iter.next()) {
+ if (iter.isGcPointer() && !hasGcPointer(iter.alloc())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+#ifdef JS_NUNBOX32
+ [[nodiscard]] bool addNunboxParts(uint32_t typeVreg, LAllocation type,
+ LAllocation payload) {
+ bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+
+ [[nodiscard]] bool addNunboxType(uint32_t typeVreg, LAllocation type) {
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].type == type) {
+ return true;
+ }
+ if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
+ nunboxParts_[i].type = type;
+ return true;
+ }
+ }
+
+ // vregs for nunbox pairs are adjacent, with the type coming first.
+ uint32_t payloadVreg = typeVreg + 1;
+ bool result = nunboxParts_.append(
+ NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+
+ [[nodiscard]] bool addNunboxPayload(uint32_t payloadVreg,
+ LAllocation payload) {
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].payload == payload) {
+ return true;
+ }
+ if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
+ nunboxParts_[i].payload = payload;
+ return true;
+ }
+ }
+
+ // vregs for nunbox pairs are adjacent, with the type coming first.
+ uint32_t typeVreg = payloadVreg - 1;
+ bool result = nunboxParts_.append(
+ NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+
+ LAllocation findTypeAllocation(uint32_t typeVreg) {
+ // Look for some allocation for the specified type vreg, to go with a
+ // partial nunbox entry for the payload. Note that we don't need to
+ // look at the value slots in the safepoint, as these aren't used by
+ // register allocators which add partial nunbox entries.
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].typeVreg == typeVreg &&
+ !nunboxParts_[i].type.isUse()) {
+ return nunboxParts_[i].type;
+ }
+ }
+ return LUse(typeVreg, LUse::ANY);
+ }
+
+# ifdef DEBUG
+ bool hasNunboxPayload(LAllocation payload) const {
+ for (size_t i = 0; i < nunboxParts_.length(); i++) {
+ if (nunboxParts_[i].payload == payload) {
+ return true;
+ }
+ }
+ return false;
+ }
+# endif
+
+ NunboxList& nunboxParts() { return nunboxParts_; }
+
+#elif JS_PUNBOX64
+ [[nodiscard]] bool addValueSlot(bool stack, uint32_t slot) {
+ bool result = valueSlots_.append(SlotEntry(stack, slot));
+ if (result) {
+ assertInvariants();
+ }
+ return result;
+ }
+ SlotList& valueSlots() { return valueSlots_; }
+
+ bool hasValueSlot(bool stack, uint32_t slot) const {
+ for (size_t i = 0; i < valueSlots_.length(); i++) {
+ if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void addValueRegister(Register reg) {
+ valueRegs_.add(reg);
+ assertInvariants();
+ }
+ LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
+
+ [[nodiscard]] bool addBoxedValue(LAllocation alloc) {
+ if (alloc.isRegister()) {
+ Register reg = alloc.toRegister().gpr();
+ if (!valueRegs().has(reg)) {
+ addValueRegister(reg);
+ }
+ return true;
+ }
+ if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot())) {
+ return true;
+ }
+ return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+
+ bool hasBoxedValue(LAllocation alloc) const {
+ if (alloc.isRegister()) {
+ return valueRegs().has(alloc.toRegister().gpr());
+ }
+ return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
+ }
+
+#endif // JS_PUNBOX64
+
+ bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
+ uint32_t offset() const {
+ MOZ_ASSERT(encoded());
+ return safepointOffset_;
+ }
+ void setOffset(uint32_t offset) { safepointOffset_ = offset; }
+ uint32_t osiReturnPointOffset() const {
+ // In general, pointer arithmetic on code is bad, but in this case,
+ // getting the return address from a call instruction, stepping over pools
+ // would be wrong.
+ return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
+ }
+ uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
+ void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
+ MOZ_ASSERT(!osiCallPointOffset_);
+ osiCallPointOffset_ = osiCallPointOffset;
+ }
+
+ bool isWasmTrap() const { return isWasmTrap_; }
+ void setIsWasmTrap() { isWasmTrap_ = true; }
+
+ uint32_t framePushedAtStackMapBase() const {
+ return framePushedAtStackMapBase_;
+ }
+ void setFramePushedAtStackMapBase(uint32_t n) {
+ MOZ_ASSERT(framePushedAtStackMapBase_ == 0);
+ framePushedAtStackMapBase_ = n;
+ }
+};
+
+class LInstruction::InputIterator {
+ private:
+ LInstruction& ins_;
+ size_t idx_;
+ bool snapshot_;
+
+ void handleOperandsEnd() {
+ // Iterate on the snapshot when iteration over all operands is done.
+ if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
+ idx_ = 0;
+ snapshot_ = true;
+ }
+ }
+
+ public:
+ explicit InputIterator(LInstruction& ins)
+ : ins_(ins), idx_(0), snapshot_(false) {
+ handleOperandsEnd();
+ }
+
+ bool more() const {
+ if (snapshot_) {
+ return idx_ < ins_.snapshot()->numEntries();
+ }
+ if (idx_ < ins_.numOperands()) {
+ return true;
+ }
+ if (ins_.snapshot() && ins_.snapshot()->numEntries()) {
+ return true;
+ }
+ return false;
+ }
+
+ bool isSnapshotInput() const { return snapshot_; }
+
+ void next() {
+ MOZ_ASSERT(more());
+ idx_++;
+ handleOperandsEnd();
+ }
+
+ void replace(const LAllocation& alloc) {
+ if (snapshot_) {
+ ins_.snapshot()->setEntry(idx_, alloc);
+ } else {
+ ins_.setOperand(idx_, alloc);
+ }
+ }
+
+ LAllocation* operator*() const {
+ if (snapshot_) {
+ return ins_.snapshot()->getEntry(idx_);
+ }
+ return ins_.getOperand(idx_);
+ }
+
+ LAllocation* operator->() const { return **this; }
+};
+
+class LIRGraph {
+ struct ValueHasher {
+ using Lookup = Value;
+ static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
+ static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
+ };
+
+ FixedList<LBlock> blocks_;
+
+ // constantPool_ is a mozilla::Vector, not a js::Vector, because
+ // js::Vector<Value> is prohibited as unsafe. This particular Vector of
+ // Values is safe because it is only used within the scope of an
+ // AutoSuppressGC (in IonCompile), which inhibits GC.
+ mozilla::Vector<Value, 0, JitAllocPolicy> constantPool_;
+ typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
+ ConstantPoolMap constantPoolMap_;
+ Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
+ Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
+ uint32_t numVirtualRegisters_;
+ uint32_t numInstructions_;
+
+ // Size of stack slots needed for local spills.
+ uint32_t localSlotsSize_;
+ // Number of JS::Value stack slots needed for argument construction for calls.
+ uint32_t argumentSlotCount_;
+
+ MIRGraph& mir_;
+
+ public:
+ explicit LIRGraph(MIRGraph* mir);
+
+ [[nodiscard]] bool init() {
+ return blocks_.init(mir_.alloc(), mir_.numBlocks());
+ }
+ MIRGraph& mir() const { return mir_; }
+ size_t numBlocks() const { return blocks_.length(); }
+ LBlock* getBlock(size_t i) { return &blocks_[i]; }
+ uint32_t numBlockIds() const { return mir_.numBlockIds(); }
+ [[nodiscard]] bool initBlock(MBasicBlock* mir) {
+ auto* block = &blocks_[mir->id()];
+ auto* lir = new (block) LBlock(mir);
+ return lir->init(mir_.alloc());
+ }
+ uint32_t getVirtualRegister() {
+ numVirtualRegisters_ += VREG_INCREMENT;
+ return numVirtualRegisters_;
+ }
+ uint32_t numVirtualRegisters() const {
+ // Virtual registers are 1-based, not 0-based, so add one as a
+ // convenience for 0-based arrays.
+ return numVirtualRegisters_ + 1;
+ }
+ uint32_t getInstructionId() { return numInstructions_++; }
+ uint32_t numInstructions() const { return numInstructions_; }
+ void setLocalSlotsSize(uint32_t localSlotsSize) {
+ localSlotsSize_ = localSlotsSize;
+ }
+ uint32_t localSlotsSize() const { return localSlotsSize_; }
+ void setArgumentSlotCount(uint32_t argumentSlotCount) {
+ argumentSlotCount_ = argumentSlotCount;
+ }
+ uint32_t argumentSlotCount() const { return argumentSlotCount_; }
+ [[nodiscard]] bool addConstantToPool(const Value& v, uint32_t* index);
+ size_t numConstants() const { return constantPool_.length(); }
+ Value* constantPool() { return &constantPool_[0]; }
+
+ bool noteNeedsSafepoint(LInstruction* ins);
+ size_t numNonCallSafepoints() const { return nonCallSafepoints_.length(); }
+ LInstruction* getNonCallSafepoint(size_t i) const {
+ return nonCallSafepoints_[i];
+ }
+ size_t numSafepoints() const { return safepoints_.length(); }
+ LInstruction* getSafepoint(size_t i) const { return safepoints_[i]; }
+
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out);
+ void dump();
+#endif
+};
+
+LAllocation::LAllocation(AnyRegister reg) {
+ if (reg.isFloat()) {
+ *this = LFloatReg(reg.fpu());
+ } else {
+ *this = LGeneralReg(reg.gpr());
+ }
+}
+
+AnyRegister LAllocation::toRegister() const {
+ MOZ_ASSERT(isRegister());
+ if (isFloatReg()) {
+ return AnyRegister(toFloatReg()->reg());
+ }
+ return AnyRegister(toGeneralReg()->reg());
+}
+
+} // namespace jit
+} // namespace js
+
+#include "jit/shared/LIR-shared.h"
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# if defined(JS_CODEGEN_X86)
+# include "jit/x86/LIR-x86.h"
+# elif defined(JS_CODEGEN_X64)
+# include "jit/x64/LIR-x64.h"
+# endif
+# include "jit/x86-shared/LIR-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/LIR-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/LIR-arm64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/LIR-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/LIR-riscv64.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+# if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/LIR-mips32.h"
+# elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/LIR-mips64.h"
+# endif
+# include "jit/mips-shared/LIR-mips-shared.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/LIR-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/LIR-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#undef LIR_HEADER
+
+namespace js {
+namespace jit {
+
+#define LIROP(name) \
+ L##name* LNode::to##name() { \
+ MOZ_ASSERT(is##name()); \
+ return static_cast<L##name*>(this); \
+ } \
+ const L##name* LNode::to##name() const { \
+ MOZ_ASSERT(is##name()); \
+ return static_cast<const L##name*>(this); \
+ }
+LIR_OPCODE_LIST(LIROP)
+#undef LIROP
+
+#define LALLOC_CAST(type) \
+ L##type* LAllocation::to##type() { \
+ MOZ_ASSERT(is##type()); \
+ return static_cast<L##type*>(this); \
+ }
+#define LALLOC_CONST_CAST(type) \
+ const L##type* LAllocation::to##type() const { \
+ MOZ_ASSERT(is##type()); \
+ return static_cast<const L##type*>(this); \
+ }
+
+LALLOC_CAST(Use)
+LALLOC_CONST_CAST(Use)
+LALLOC_CONST_CAST(GeneralReg)
+LALLOC_CONST_CAST(FloatReg)
+LALLOC_CONST_CAST(StackSlot)
+LALLOC_CAST(StackArea)
+LALLOC_CONST_CAST(StackArea)
+LALLOC_CONST_CAST(Argument)
+LALLOC_CONST_CAST(ConstantIndex)
+
+#undef LALLOC_CAST
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_LIR_h */
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
new file mode 100644
index 0000000000..de497278e0
--- /dev/null
+++ b/js/src/jit/LIROps.yaml
@@ -0,0 +1,3972 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# [SMDOC] LIR Opcodes
+# =======================
+# This file defines all LIR opcodes as well as LIR opcode class
+# definitions. It is parsed by GenerateLIRFiles.py at build time to
+# create LIROpsGenerated.h. Each opcode consists of a
+# name and a set of attributes that are described below. Unless
+# marked as required, attributes are optional.
+#
+# name [required]
+# ====
+# Opcode name.
+# Possible values:
+# - opcode string: used as the name for LIR opcode.
+#
+# gen_boilerplate
+# ===============
+# Used to decide to generate LIR boilerplate.
+# - true (default): auto generate boilerplate for this LIR opcode
+# - false: do not generate boilerplate for this LIR opcode
+#
+# result_type
+# ===========
+# Specifies the result type that is produced by this LIR instruction.
+# The result type can be any of the following: WordSized, BoxedValue,
+# or Int64.
+# - attribute not specified (default): there is no result produced
+# by this LIR instruction
+# - result type: sets result type for this LIR instruction
+#
+# operands
+# ========
+# A list of operands to the LIR node. Each operand will be
+# passed into and set in the instruction's constructor. A simple getter
+# will also be auto generated for the operand. Each operand in the
+# following list is defined by its name and an type.
+# The type can be WordSized, BoxedValue, or Int64.
+#
+# For example:
+# operands:
+# lhs: BoxedValue
+# rhs: WordSized
+#
+# Will result in:
+# explicit LInstanceOfV(const LBoxAllocation& lhs, const LAllocation& rhs)
+# : LInstructionHelper(classOpcode) {
+# setBoxOperand(lhsIndex, lhs);
+# setOperand(rhsIndex, rhs);
+# }
+# const LAllocation* rhs() { return getOperand(0); }
+#
+# static const size_t lhsIndex = 0;
+# static const size_t rhsIndex = BOX_PIECES;
+#
+# - attribute not specified (default): no code generated
+# - list of operand names with their types: operand getters and setters
+# are generated and passed into the constructor
+#
+# arguments
+# =========
+# A list of non-LIR node arguments to the LIR op class constructor
+# that are passed along with the operands. The arguments require
+# both a name and a full type signature for each item in the list.
+#
+# For example:
+# offset: size_t
+# type: MIRType
+#
+# For each argument a private variable declaration will be autogenerated
+# in the LIR op class, as well as simple accessor for that variable. The
+# above arguments list will result in the following declarations and
+# accessors:
+#
+# size_t offset_;
+# MIRType type_;
+#
+# size_t offset() const { return offset_; }
+# MIRType type() const { return type_; }
+#
+# - attribute not specified (default): no code generated
+# - argument list: argument names and their full type signature
+#
+# num_temps
+# ========
+# Specifies the number of temporary virtual registers, LDefinitions, used by
+# this LIR op.
+# - attribute not specified (default): number of temps is set to 0
+# - number of LDefinition temps: sets number of temps max 15
+#
+# call_instruction
+# ================
+# Used to define call instructions.
+# - attribute not specified (default): no code generated
+# - true: generates a call to setIsCall in the op's constructor
+#
+# mir_op
+# ======
+# If a LIR instruction corresponds one-to-one with a particular MIR
+# instruction, this will generate a method that returns that MIR
+# instruction.
+# - attribute not specified (default): no code generated
+# - true: generates a method to return MIR instruction
+# - mir string: returns this specified MIR instruction
+#
+
+- name: Phi
+ gen_boilerplate: false
+
+- name: Box
+ gen_boilerplate: false
+
+- name: OsiPoint
+ gen_boilerplate: false
+
+- name: MoveGroup
+ gen_boilerplate: false
+
+# Constant 32-bit integer.
+- name: Integer
+ result_type: WordSized
+ arguments:
+ i32: int32_t
+
+# Constant 64-bit integer.
+- name: Integer64
+ result_type: Int64
+ arguments:
+ i64: int64_t
+
+# Constant pointer.
+- name: Pointer
+ result_type: WordSized
+ arguments:
+ gcptr: gc::Cell*
+
+# Constant double.
+- name: Double
+ result_type: WordSized
+ arguments:
+ value: double
+
+# Constant float32.
+- name: Float32
+ result_type: WordSized
+ arguments:
+ value: float
+
+- name: Value
+ gen_boilerplate: false
+
+- name: NurseryObject
+ result_type: WordSized
+ mir_op: true
+
+# Formal argument for a function, returning a box. Formal arguments are
+# initially read from the stack.
+- name: Parameter
+ result_type: BoxedValue
+
+# Stack offset for a word-sized immutable input value to a frame.
+- name: Callee
+ result_type: WordSized
+
+- name: IsConstructing
+ result_type: WordSized
+
+- name: Goto
+ gen_boilerplate: false
+
+- name: NewArray
+ gen_boilerplate: false
+
+- name: NewArrayDynamicLength
+ result_type: WordSized
+ operands:
+ length: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewIterator
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewTypedArray
+ result_type: WordSized
+ num_temps: 2
+ mir_op: true
+
+- name: NewTypedArrayDynamicLength
+ result_type: WordSized
+ operands:
+ length: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewTypedArrayFromArray
+ result_type: WordSized
+ operands:
+ array: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: NewTypedArrayFromArrayBuffer
+ result_type: WordSized
+ operands:
+ arrayBuffer: WordSized
+ byteOffset: BoxedValue
+ length: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: BindFunction
+ result_type: WordSized
+ operands:
+ target: WordSized
+ call_instruction: true
+ num_temps: 2
+ mir_op: true
+
+- name: NewBoundFunction
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewObject
+ gen_boilerplate: false
+
+- name: NewPlainObject
+ result_type: WordSized
+ num_temps: 3
+ mir_op: true
+
+- name: NewArrayObject
+ result_type: WordSized
+ num_temps: 2
+ mir_op: true
+
+# Allocates a new NamedLambdaObject.
+#
+# This instruction generates two possible instruction sets:
+# (1) An inline allocation of the call object is attempted.
+# (2) Otherwise, a callVM create a new object.
+#
+- name: NewNamedLambdaObject
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+# Allocates a new CallObject.
+#
+# This instruction generates two possible instruction sets:
+# (1) If the call object is extensible, this is a callVM to create the
+# call object.
+# (2) Otherwise, an inline allocation of the call object is attempted.
+#
+- name: NewCallObject
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewStringObject
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: InitElemGetterSetter
+ operands:
+ object: WordSized
+ id: BoxedValue
+ value: WordSized
+ call_instruction: true
+ mir_op: true
+
+# Takes in an Object and a Value.
+- name: MutateProto
+ operands:
+ object: WordSized
+ value: BoxedValue
+ call_instruction: true
+
+- name: InitPropGetterSetter
+ operands:
+ object: WordSized
+ value: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: CheckOverRecursed
+ mir_op: true
+
+- name: WasmTrap
+ mir_op: true
+
+- name: WasmTrapIfNull
+ operands:
+ object: WordSized
+ mir_op: true
+
+- name: WasmGcObjectIsSubtypeOfConcrete
+ mir_op: true
+ operands:
+ object: WordSized
+ superSuperTypeVector: WordSized
+ result_type: WordSized
+ num_temps: 2
+
+- name: WasmGcObjectIsSubtypeOfAbstract
+ mir_op: true
+ operands:
+ object: WordSized
+ result_type: WordSized
+ num_temps: 1
+
+- name: WasmGcObjectIsSubtypeOfConcreteAndBranch
+ gen_boilerplate: false
+
+- name: WasmGcObjectIsSubtypeOfAbstractAndBranch
+ gen_boilerplate: false
+
+- name: WasmReinterpret
+ gen_boilerplate: false
+
+- name: WasmReinterpretFromI64
+ gen_boilerplate: false
+
+- name: WasmReinterpretToI64
+ gen_boilerplate: false
+
+- name: Rotate
+ gen_boilerplate: false
+
+- name: RotateI64
+ gen_boilerplate: false
+
+- name: InterruptCheck
+ mir_op: true
+
+- name: WasmInterruptCheck
+ operands:
+ instance: WordSized
+ mir_op: true
+
+- name: TypeOfV
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: TypeOf
+
+- name: TypeOfO
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: TypeOf
+
+- name: TypeOfName
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: TypeOfIsNonPrimitiveV
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: TypeOfIs
+
+- name: TypeOfIsNonPrimitiveO
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: TypeOfIs
+
+- name: TypeOfIsPrimitive
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ mir_op: TypeOfIs
+
+- name: ToAsyncIter
+ result_type: WordSized
+ operands:
+ iterator: WordSized
+ nextMethod: BoxedValue
+ call_instruction: true
+
+- name: ToPropertyKeyCache
+ result_type: BoxedValue
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+# Allocate an object for |new| on the caller-side,
+# when there is no templateObject or prototype known
+- name: CreateThis
+ result_type: BoxedValue
+ operands:
+ callee: WordSized
+ newTarget: WordSized
+ call_instruction: true
+ mir_op: true
+
+# Allocate a new arguments object for the frame.
+- name: CreateArgumentsObject
+ result_type: WordSized
+ operands:
+ callObject: WordSized
+ num_temps: 3
+ call_instruction: true
+ mir_op: true
+
+- name: CreateInlinedArgumentsObject
+ gen_boilerplate: false
+
+- name: GetInlinedArgument
+ gen_boilerplate: false
+
+- name: GetInlinedArgumentHole
+ gen_boilerplate: false
+
+# Get argument from arguments object.
+- name: GetArgumentsObjectArg
+ result_type: BoxedValue
+ operands:
+ argsObject: WordSized
+ num_temps: 1
+ mir_op: true
+
+# Set argument on arguments object.
+- name: SetArgumentsObjectArg
+ operands:
+ argsObject: WordSized
+ value: BoxedValue
+ num_temps: 1
+ mir_op: true
+
+# Load an element from an arguments object.
+- name: LoadArgumentsObjectArg
+ result_type: BoxedValue
+ operands:
+ argsObject: WordSized
+ index: WordSized
+ num_temps: 1
+
+# Load an element from an arguments object. Handles out-of-bounds accesses.
+- name: LoadArgumentsObjectArgHole
+ result_type: BoxedValue
+ operands:
+ argsObject: WordSized
+ index: WordSized
+ num_temps: 1
+
+# Return true if the element exists in the arguments object.
+- name: InArgumentsObjectArg
+ result_type: WordSized
+ operands:
+ argsObject: WordSized
+ index: WordSized
+ num_temps: 1
+
+# Return |arguments.length| unless it has been overridden.
+- name: ArgumentsObjectLength
+ result_type: WordSized
+ operands:
+ argsObject: WordSized
+
+# Create an array from an arguments object.
+- name: ArrayFromArgumentsObject
+ result_type: WordSized
+ operands:
+ argsObject: WordSized
+ call_instruction: true
+ mir_op: true
+
+# Guard that the given flags are not set on the arguments object.
+- name: GuardArgumentsObjectFlags
+ operands:
+ argsObject: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: BoundFunctionNumArgs
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: GuardBoundFunctionIsConstructor
+ operands:
+ object: WordSized
+
+# If the Value is an Object, return unbox(Value).
+# Otherwise, return the other Object.
+- name: ReturnFromCtor
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ object: WordSized
+
+- name: BoxNonStrictThis
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ mir_op: true
+
+- name: ImplicitThis
+ result_type: BoxedValue
+ operands:
+ env: WordSized
+ call_instruction: true
+ mir_op: true
+
+# Writes a typed argument for a function call to the frame's argument vector.
+- name: StackArgT
+ operands:
+ arg: WordSized
+ arguments:
+ # Index into frame-scope argument vector.
+ argslot: uint32_t
+ type: MIRType
+
+# Writes a typed argument for a function call to the frame's argument vector.
+- name: StackArgV
+ operands:
+ value: BoxedValue
+ arguments:
+ # Index into frame-scope argument vector.
+ argslot: uint32_t
+
+- name: CallGeneric
+ gen_boilerplate: false
+
+- name: CallKnown
+ gen_boilerplate: false
+
+- name: CallNative
+ gen_boilerplate: false
+
+- name: CallDOMNative
+ gen_boilerplate: false
+
+- name: CallClassHook
+ gen_boilerplate: false
+
+- name: Bail
+
+- name: Unreachable
+ gen_boilerplate: false
+
+- name: EncodeSnapshot
+
+- name: UnreachableResultV
+ gen_boilerplate: false
+
+- name: UnreachableResultT
+ result_type: WordSized
+
+- name: GetDOMProperty
+ gen_boilerplate: false
+
+- name: GetDOMMemberV
+ gen_boilerplate: false
+
+- name: GetDOMMemberT
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: GetDOMMember
+
+- name: SetDOMProperty
+ gen_boilerplate: false
+
+- name: LoadDOMExpandoValue
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ mir_op: true
+
+- name: LoadDOMExpandoValueGuardGeneration
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ mir_op: true
+
+- name: LoadDOMExpandoValueIgnoreGeneration
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ mir_op: true
+
+- name: GuardDOMExpandoMissingOrGuardShape
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: true
+
+- name: ApplyArgsGeneric
+ gen_boilerplate: false
+
+- name: ApplyArgsObj
+ gen_boilerplate: false
+
+- name: ApplyArrayGeneric
+ gen_boilerplate: false
+
+- name: ConstructArgsGeneric
+ gen_boilerplate: false
+
+- name: ConstructArrayGeneric
+ gen_boilerplate: false
+
+- name: TestIAndBranch
+ gen_boilerplate: false
+
+- name: TestI64AndBranch
+ gen_boilerplate: false
+
+- name: TestDAndBranch
+ gen_boilerplate: false
+
+- name: TestFAndBranch
+ gen_boilerplate: false
+
+- name: TestBIAndBranch
+ gen_boilerplate: false
+
+- name: TestOAndBranch
+ gen_boilerplate: false
+
+- name: TestVAndBranch
+ gen_boilerplate: false
+
+- name: Compare
+ gen_boilerplate: false
+
+- name: CompareI64
+ gen_boilerplate: false
+
+- name: CompareI64AndBranch
+ gen_boilerplate: false
+
+- name: CompareAndBranch
+ gen_boilerplate: false
+
+- name: CompareD
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ mir_op: Compare
+
+- name: CompareF
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ mir_op: Compare
+
+- name: CompareDAndBranch
+ gen_boilerplate: false
+
+- name: CompareFAndBranch
+ gen_boilerplate: false
+
+- name: CompareS
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ mir_op: Compare
+
+- name: CompareSInline
+ result_type: WordSized
+ operands:
+ input: WordSized
+ arguments:
+ constant: JSLinearString*
+ mir_op: Compare
+
+- name: CompareBigInt
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ num_temps: 3
+ mir_op: Compare
+
+- name: CompareBigIntInt32
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ num_temps: 2
+ mir_op: Compare
+
+- name: CompareBigIntDouble
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ call_instruction: true
+ mir_op: Compare
+
+- name: CompareBigIntString
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ call_instruction: true
+ mir_op: Compare
+
+- name: BitAndAndBranch
+ gen_boilerplate: false
+
+# Takes a value and tests whether it is null, undefined, or is an object that
+# emulates |undefined|, as determined by the JSCLASS_EMULATES_UNDEFINED class
+# flag on unwrapped objects. See also js::EmulatesUndefined.
+- name: IsNullOrLikeUndefinedV
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ num_temps: 1
+ mir_op: Compare
+
+# Takes an object pointer and tests whether it is an object that emulates
+# |undefined|, as above.
+- name: IsNullOrLikeUndefinedT
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: Compare
+
+# Takes a value and tests whether it is null.
+- name: IsNull
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ mir_op: Compare
+
+# Takes a value and tests whether it is undefined.
+- name: IsUndefined
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ mir_op: Compare
+
+- name: IsNullOrLikeUndefinedAndBranchV
+ gen_boilerplate: false
+
+- name: IsNullOrLikeUndefinedAndBranchT
+ gen_boilerplate: false
+
+- name: IsNullAndBranch
+ gen_boilerplate: false
+
+- name: IsUndefinedAndBranch
+ gen_boilerplate: false
+
+- name: SameValueDouble
+ result_type: WordSized
+ operands:
+ left: WordSized
+ right: WordSized
+ num_temps: 1
+
+- name: SameValue
+ result_type: WordSized
+ operands:
+ lhs: BoxedValue
+ rhs: BoxedValue
+
+# Not operation on an integer.
+- name: NotI
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+# Not operation on an int64.
+- name: NotI64
+ result_type: WordSized
+ operands:
+ inputI64: Int64
+
+# Not operation on a double.
+- name: NotD
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: Not
+
+# Not operation on a float32.
+- name: NotF
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: Not
+
+# Not operation on a BigInt.
+- name: NotBI
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: Not
+
+# Boolean complement operation on an object.
+- name: NotO
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: Not
+
+# Boolean complement operation on a value.
+- name: NotV
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 2
+ mir_op: Not
+
+- name: BitNotI
+ gen_boilerplate: false
+
+- name: BitNotI64
+ gen_boilerplate: false
+
+- name: BitOpI
+ gen_boilerplate: false
+
+- name: BitOpI64
+ gen_boilerplate: false
+
+- name: ShiftI
+ gen_boilerplate: false
+
+- name: ShiftI64
+ gen_boilerplate: false
+
+- name: SignExtendInt32
+ result_type: WordSized
+ operands:
+ num: WordSized
+ arguments:
+ mode: MSignExtendInt32::Mode
+
+- name: SignExtendInt64
+ gen_boilerplate: false
+
+- name: UrshD
+ gen_boilerplate: false
+
+- name: Return
+ gen_boilerplate: false
+
+- name: Throw
+ operands:
+ value: BoxedValue
+ call_instruction: true
+
+- name: MinMaxI
+ gen_boilerplate: false
+
+- name: MinMaxD
+ gen_boilerplate: false
+
+- name: MinMaxF
+ gen_boilerplate: false
+
+- name: MinMaxArrayI
+ gen_boilerplate: false
+
+- name: MinMaxArrayD
+ gen_boilerplate: false
+
+# Negative of integer
+- name: NegI
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Negative of an int64
+- name: NegI64
+ result_type: Int64
+ operands:
+ num: Int64
+
+# Negative of double
+- name: NegD
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Negative of float32
+- name: NegF
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Absolute value of an integer.
+- name: AbsI
+ result_type: WordSized
+ operands:
+ num: WordSized
+ mir_op: Abs
+
+# Absolute value of a double.
+- name: AbsD
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Absolute value of a float32.
+- name: AbsF
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+- name: CopySignD
+ gen_boilerplate: false
+
+- name: CopySignF
+ gen_boilerplate: false
+
+# Count leading zeroes on an int32.
+- name: ClzI
+ result_type: WordSized
+ operands:
+ num: WordSized
+ mir_op: Clz
+
+# Count leading zeroes on an int64.
+- name: ClzI64
+ result_type: Int64
+ operands:
+ num: Int64
+ mir_op: Clz
+
+# Count trailing zeroes on an int32.
+- name: CtzI
+ result_type: WordSized
+ operands:
+ num: WordSized
+ mir_op: Ctz
+
+# Count trailing zeroes on an int64.
+- name: CtzI64
+ result_type: Int64
+ operands:
+ num: Int64
+ mir_op: Ctz
+
+# Count population on an int32.
+- name: PopcntI
+ result_type: WordSized
+ operands:
+ num: WordSized
+ num_temps: 1
+ mir_op: Popcnt
+
+# Count population on an int64.
+- name: PopcntI64
+ result_type: Int64
+ operands:
+ num: Int64
+ num_temps: 1
+ mir_op: Popcnt
+
+- name: SqrtD
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+- name: SqrtF
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+- name: Atan2D
+ gen_boilerplate: false
+
+- name: Hypot
+ gen_boilerplate: false
+
+# Double raised to an integer power.
+- name: PowI
+ result_type: WordSized
+ operands:
+ value: WordSized
+ power: WordSized
+ call_instruction: true
+
+# Integer raised to an integer power.
+- name: PowII
+ result_type: WordSized
+ operands:
+ value: WordSized
+ power: WordSized
+ num_temps: 2
+ mir_op: Pow
+
+# Double raised to a double power.
+- name: PowD
+ result_type: WordSized
+ operands:
+ value: WordSized
+ power: WordSized
+ call_instruction: true
+
+# Constant of a power of two raised to an integer power.
+- name: PowOfTwoI
+ result_type: WordSized
+ operands:
+ power: WordSized
+ arguments:
+ base: uint32_t
+
+# Sign value of an integer.
+- name: SignI
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Sign value of an integer.
+- name: SignD
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Sign value of a double with expected int32 result.
+- name: SignDI
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 1
+
+- name: MathFunctionD
+ gen_boilerplate: false
+
+- name: MathFunctionF
+ gen_boilerplate: false
+
+- name: AddI
+ gen_boilerplate: false
+
+- name: AddI64
+ gen_boilerplate: false
+
+- name: SubI
+ gen_boilerplate: false
+
+- name: SubI64
+ gen_boilerplate: false
+
+- name: MulI64
+ gen_boilerplate: false
+
+- name: MathD
+ gen_boilerplate: false
+
+- name: MathF
+ gen_boilerplate: false
+
+- name: ModD
+ gen_boilerplate: false
+
+- name: ModPowTwoD
+ gen_boilerplate: false
+
+- name: WasmBuiltinModD
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ rhs: WordSized
+ instance: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: BigIntAdd
+ gen_boilerplate: false
+
+- name: BigIntSub
+ gen_boilerplate: false
+
+- name: BigIntMul
+ gen_boilerplate: false
+
+- name: BigIntDiv
+ gen_boilerplate: false
+
+- name: BigIntMod
+ gen_boilerplate: false
+
+- name: BigIntPow
+ gen_boilerplate: false
+
+- name: BigIntBitAnd
+ gen_boilerplate: false
+
+- name: BigIntBitOr
+ gen_boilerplate: false
+
+- name: BigIntBitXor
+ gen_boilerplate: false
+
+- name: BigIntLsh
+ gen_boilerplate: false
+
+- name: BigIntRsh
+ gen_boilerplate: false
+
+- name: BigIntIncrement
+ gen_boilerplate: false
+
+- name: BigIntDecrement
+ gen_boilerplate: false
+
+- name: BigIntNegate
+ gen_boilerplate: false
+
+- name: BigIntBitNot
+ gen_boilerplate: false
+
+- name: Int32ToStringWithBase
+ result_type: WordSized
+ operands:
+ input: WordSized
+ base: WordSized
+ num_temps: 2
+ mir_op: true
+
+- name: NumberParseInt
+ result_type: BoxedValue
+ operands:
+ string: WordSized
+ radix: WordSized
+ num_temps: 1
+ call_instruction: true
+ mir_op: true
+
+- name: DoubleParseInt
+ result_type: WordSized
+ operands:
+ number: WordSized
+ num_temps: 1
+ mir_op: true
+
+# Adds two string, returning a string.
+- name: Concat
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ rhs: WordSized
+ num_temps: 5
+
+# Linearize a string before a character access.
+- name: LinearizeForCharAccess
+ result_type: WordSized
+ operands:
+ str: WordSized
+ index: WordSized
+ mir_op: true
+
+# Get uint16 character code from a string.
+- name: CharCodeAt
+ result_type: WordSized
+ operands:
+ str: WordSized
+ index: WordSized
+ num_temps: 2
+
+# Get uint16 character code from a string. Return NaN on out-of-bounds.
+- name: CharCodeAtMaybeOutOfBounds
+ result_type: BoxedValue
+ operands:
+ str: WordSized
+ index: WordSized
+ num_temps: 2
+
+# Get uint16 character code from a string and convert it to a string. Return
+# the empty string on out-of-bounds.
+- name: CharAtMaybeOutOfBounds
+ result_type: WordSized
+ operands:
+ str: WordSized
+ index: WordSized
+ num_temps: 2
+
+# Convert uint16 character code to a string.
+- name: FromCharCode
+ result_type: WordSized
+ operands:
+ code: WordSized
+
+# Convert uint32 code point to a string.
+- name: FromCodePoint
+ result_type: WordSized
+ operands:
+ codePoint: WordSized
+ num_temps: 2
+
+# Search for the first index of the search string.
+- name: StringIndexOf
+ result_type: WordSized
+ operands:
+ string: WordSized
+ searchString: WordSized
+ call_instruction: true
+
+# Test if a string starts with the search string
+- name: StringStartsWith
+ result_type: WordSized
+ operands:
+ string: WordSized
+ searchString: WordSized
+ call_instruction: true
+
+# Test if a string starts with the constant search string
+- name: StringStartsWithInline
+ result_type: WordSized
+ operands:
+ string: WordSized
+ arguments:
+ searchString: JSLinearString*
+ num_temps: 1
+
+# Test if a string ends with the search string
+- name: StringEndsWith
+ result_type: WordSized
+ operands:
+ string: WordSized
+ searchString: WordSized
+ call_instruction: true
+
+# Test if a string ends with the constant search string
+- name: StringEndsWithInline
+ result_type: WordSized
+ operands:
+ string: WordSized
+ arguments:
+ searchString: JSLinearString*
+ num_temps: 1
+
+# Calls the ToLowerCase case conversion function. Inlines the case conversion
+# when possible.
+- name: StringToLowerCase
+ result_type: WordSized
+ operands:
+ string: WordSized
+ num_temps: 5
+ mir_op: StringConvertCase
+
+# Calls the ToUpperCase case conversion function.
+- name: StringToUpperCase
+ result_type: WordSized
+ operands:
+ string: WordSized
+ call_instruction: true
+ mir_op: StringConvertCase
+
+- name: StringSplit
+ result_type: WordSized
+ operands:
+ string: WordSized
+ separator: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: Substr
+ result_type: WordSized
+ operands:
+ string: WordSized
+ begin: WordSized
+ length: WordSized
+ num_temps: 3
+ mir_op: StringSplit
+
+- name: Int32ToDouble
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: Float32ToDouble
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: DoubleToFloat32
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: Int32ToFloat32
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+# Convert a value to a double.
+- name: ValueToDouble
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ mir_op: ToDouble
+
+# Convert a value to a float32.
+- name: ValueToFloat32
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ mir_op: ToFloat32
+
+- name: ValueToInt32
+ gen_boilerplate: false
+
+# Convert a value to a BigInt.
+- name: ValueToBigInt
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ mir_op: ToBigInt
+
+# Convert a double to an int32.
+# Input: floating-point Register
+# Output: 32-bit integer
+# Bailout: if the double cannot be converted to an integer.
+- name: DoubleToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ mir_op: ToNumberInt32
+
+# Convert a float32 to an int32.
+# Input: floating-point Register
+# Output: 32-bit integer
+# Bailout: if the float32 cannot be converted to an integer.
+- name: Float32ToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ mir_op: ToNumberInt32
+
+# Convert a double to a truncated int32.
+# Input: floating-point Register
+# Output: 32-bit integer
+- name: TruncateDToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ num_temps: 1
+ mir_op: TruncateToInt32
+
+# Convert a double to a truncated int32 with instance offset because we need it
+# for the slow ool path.
+- name: WasmBuiltinTruncateDToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ instance: WordSized
+ num_temps: 1
+ mir_op: WasmBuiltinTruncateToInt32
+
+# Convert a float32 to a truncated int32.
+# Input: floating-point Register
+# Output: 32-bit integer
+- name: TruncateFToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ num_temps: 1
+ mir_op: TruncateToInt32
+
+# Convert a float32 to a truncated int32 with instance offset because we need
+# it for the slow ool path.
+- name: WasmBuiltinTruncateFToInt32
+ result_type: WordSized
+ operands:
+ in: WordSized
+ instance: WordSized
+ num_temps: 1
+ mir_op: WasmBuiltinTruncateToInt32
+
+- name: WasmTruncateToInt32
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: WrapInt64ToInt32
+ result_type: WordSized
+ operands:
+ input: Int64
+ mir_op: true
+
+- name: ExtendInt32ToInt64
+ result_type: Int64
+ operands:
+ input: WordSized
+ mir_op: true
+
+# Convert a boolean value to a string.
+- name: BooleanToString
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: ToString
+
+# Convert an integer hosted on one definition to a string with a function call.
+- name: IntToString
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: ToString
+
+# Convert a double hosted on one definition to a string with a function call.
+- name: DoubleToString
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 1
+ mir_op: ToString
+
+# Convert a primitive to a string with a function call.
+- name: ValueToString
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: ToString
+
+- name: PowHalfD
+ gen_boilerplate: false
+
+- name: NaNToZero
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: OsrEntry
+ gen_boilerplate: false
+
+# Materialize a Value stored in an interpreter frame for OSR.
+- name: OsrValue
+ result_type: BoxedValue
+ operands:
+ entry: WordSized
+ mir_op: true
+
+# Materialize a JSObject env chain stored in an interpreter frame for OSR.
+- name: OsrEnvironmentChain
+ result_type: WordSized
+ operands:
+ entry: WordSized
+ mir_op: true
+
+# Materialize a JSObject env chain stored in an interpreter frame for OSR.
+- name: OsrReturnValue
+ result_type: BoxedValue
+ operands:
+ entry: WordSized
+ mir_op: true
+
+- name: OsrArgumentsObject
+ result_type: WordSized
+ operands:
+ entry: WordSized
+ mir_op: true
+
+- name: RegExp
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: RegExpMatcher
+ result_type: BoxedValue
+ operands:
+ regexp: WordSized
+ string: WordSized
+ lastIndex: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: RegExpSearcher
+ result_type: WordSized
+ operands:
+ regexp: WordSized
+ string: WordSized
+ lastIndex: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: RegExpExecMatch
+ result_type: BoxedValue
+ operands:
+ regexp: WordSized
+ string: WordSized
+ call_instruction: true
+ num_temps: 0
+ mir_op: true
+
+- name: RegExpExecTest
+ result_type: WordSized
+ operands:
+ regexp: WordSized
+ string: WordSized
+ call_instruction: true
+ num_temps: 0
+ mir_op: true
+
+- name: RegExpPrototypeOptimizable
+ result_type: WordSized
+ operands:
+ object: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: RegExpInstanceOptimizable
+ result_type: WordSized
+ operands:
+ object: WordSized
+ proto: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: GetFirstDollarIndex
+ result_type: WordSized
+ operands:
+ str: WordSized
+ num_temps: 3
+
+- name: StringReplace
+ result_type: WordSized
+ operands:
+ string: WordSized
+ pattern: WordSized
+ replacement: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: BinaryValueCache
+ result_type: BoxedValue
+ operands:
+ lhs: BoxedValue
+ rhs: BoxedValue
+ # Takes two temps: these are intended to be FloatReg0 and FloatReg1
+ # to allow the actual cache code to safely clobber those values without
+ # save and restore.
+ num_temps: 2
+ mir_op: BinaryCache
+
+- name: BinaryBoolCache
+ result_type: WordSized
+ operands:
+ lhs: BoxedValue
+ rhs: BoxedValue
+ # Takes two temps: these are intendend to be FloatReg0 and FloatReg1
+ # To allow the actual cache code to safely clobber those values without
+ # save and restore.
+ num_temps: 2
+ mir_op: BinaryCache
+
+- name: UnaryCache
+ result_type: BoxedValue
+ operands:
+ input: BoxedValue
+ mir_op_cast: true
+
+- name: ModuleMetadata
+ result_type: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: DynamicImport
+ result_type: WordSized
+ operands:
+ specifier: BoxedValue
+ options: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: Lambda
+ result_type: WordSized
+ operands:
+ environmentChain: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: FunctionWithProto
+ result_type: WordSized
+ operands:
+ envChain: WordSized
+ prototype: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: SetFunName
+ result_type: WordSized
+ operands:
+ fun: WordSized
+ name: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: KeepAliveObject
+ operands:
+ object: WordSized
+
+- name: DebugEnterGCUnsafeRegion
+ num_temps: 1
+
+- name: DebugLeaveGCUnsafeRegion
+ num_temps: 1
+
+# Load the "slots" member out of a JSObject.
+# Input: JSObject pointer
+# Output: slots pointer
+- name: Slots
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+# Load the "elements" member out of a JSObject.
+# Input: JSObject pointer
+# Output: elements pointer
+- name: Elements
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+# Load the initialized length from an elements header.
+- name: InitializedLength
+ result_type: WordSized
+ operands:
+ elements: WordSized
+
+# Store to the initialized length in an elements header. Note the input is an
+# *index*, one less than the desired initialized length.
+- name: SetInitializedLength
+ operands:
+ elements: WordSized
+ index: WordSized
+
+# Load the length from an elements header.
+- name: ArrayLength
+ result_type: WordSized
+ operands:
+ elements: WordSized
+
+# Store to the length in an elements header. Note the input is an *index*,
+# one less than the desired length.
+- name: SetArrayLength
+ operands:
+ elements: WordSized
+ index: WordSized
+
+# Load the "length" property of a function.
+- name: FunctionLength
+ result_type: WordSized
+ operands:
+ function: WordSized
+
+# Load the "name" property of a function.
+- name: FunctionName
+ result_type: WordSized
+ operands:
+ function: WordSized
+
+- name: GetNextEntryForIterator
+ result_type: WordSized
+ operands:
+ iter: WordSized
+ result: WordSized
+ num_temps: 3
+ mir_op: true
+
+- name: ArrayBufferByteLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: ArrayBufferViewLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+# Read the byteOffset of an array buffer view.
+- name: ArrayBufferViewByteOffset
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+# Load an array buffer view's elements vector.
+- name: ArrayBufferViewElements
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+# Return the element size of a typed array.
+- name: TypedArrayElementSize
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: GuardHasAttachedArrayBuffer
+ operands:
+ object: WordSized
+ num_temps: 1
+
+# Double to IntPtr, eligible for accessing into a TypedArray or DataView. If
+# the index isn't exactly representable as an IntPtr, depending on the
+# supportOOB flag on the MIR instruction, either bail out or produce an IntPtr
+# which is equivalent to an OOB access.
+- name: GuardNumberToIntPtrIndex
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+# Bailout if index >= length.
+- name: BoundsCheck
+ operands:
+ index: WordSized
+ length: WordSized
+ mir_op: true
+
+- name: BoundsCheckRange
+ gen_boilerplate: false
+
+# Bailout if index < minimum.
+- name: BoundsCheckLower
+ operands:
+ index: WordSized
+ mir_op: true
+
+- name: SpectreMaskIndex
+ result_type: WordSized
+ operands:
+ index: WordSized
+ length: WordSized
+ mir_op: true
+
+- name: LoadElementV
+ gen_boilerplate: false
+
+- name: InArray
+ result_type: WordSized
+ operands:
+ elements: WordSized
+ index: WordSized
+ initLength: WordSized
+ mir_op: true
+
+- name: GuardElementNotHole
+ operands:
+ elements: WordSized
+ index: WordSized
+
+- name: LoadElementHole
+ gen_boilerplate: false
+
+- name: StoreElementV
+ gen_boilerplate: false
+
+- name: StoreElementT
+ gen_boilerplate: false
+
+- name: StoreHoleValueElement
+ operands:
+ elements: WordSized
+ index: WordSized
+
+# Like LStoreElementV, but supports indexes >= initialized length.
+- name: StoreElementHoleV
+ operands:
+ object: WordSized
+ elements: WordSized
+ index: WordSized
+ value: BoxedValue
+ num_temps: 1
+ mir_op: StoreElementHole
+
+# Like LStoreElementT, but supports indexes >= initialized length.
+- name: StoreElementHoleT
+ operands:
+ object: WordSized
+ elements: WordSized
+ index: WordSized
+ value: WordSized
+ num_temps: 1
+ mir_op: StoreElementHole
+
+- name: ArrayPopShift
+ gen_boilerplate: false
+
+- name: ArrayPush
+ result_type: WordSized
+ operands:
+ object: WordSized
+ value: BoxedValue
+ num_temps: 2
+ mir_op: true
+
+- name: ArraySlice
+ result_type: WordSized
+ operands:
+ object: WordSized
+ begin: WordSized
+ end: WordSized
+ num_temps: 2
+ call_instruction: true
+ mir_op: true
+
+- name: ArgumentsSlice
+ result_type: WordSized
+ operands:
+ object: WordSized
+ begin: WordSized
+ end: WordSized
+ num_temps: 2
+ call_instruction: true
+ mir_op: true
+
+- name: FrameArgumentsSlice
+ result_type: WordSized
+ operands:
+ begin: WordSized
+ count: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: InlineArgumentsSlice
+ gen_boilerplate: false
+
+- name: NormalizeSliceTerm
+ result_type: WordSized
+ operands:
+ value: WordSized
+ length: WordSized
+ mir_op: true
+
+- name: ArrayJoin
+ result_type: WordSized
+ operands:
+ array: WordSized
+ separator: WordSized
+ num_temps: 1
+ call_instruction: true
+ mir_op: true
+
+- name: LoadUnboxedScalar
+ result_type: WordSized
+ operands:
+ elements: WordSized
+ index: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: LoadUnboxedBigInt
+ gen_boilerplate: false
+
+- name: LoadDataViewElement
+ gen_boilerplate: false
+
+- name: LoadTypedArrayElementHole
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ index: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: LoadTypedArrayElementHoleBigInt
+ gen_boilerplate: false
+
+- name: StoreUnboxedScalar
+ operands:
+ elements: WordSized
+ index: WordSized
+ value: WordSized
+ mir_op: true
+
+- name: StoreUnboxedBigInt
+ gen_boilerplate: false
+
+- name: StoreDataViewElement
+ gen_boilerplate: false
+
+- name: StoreTypedArrayElementHole
+ operands:
+ elements: WordSized
+ length: WordSized
+ index: WordSized
+ value: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: StoreTypedArrayElementHoleBigInt
+ gen_boilerplate: false
+
+- name: AtomicIsLockFree
+ result_type: WordSized
+ operands:
+ value: WordSized
+
+- name: CompareExchangeTypedArrayElement
+ gen_boilerplate: false
+
+- name: AtomicExchangeTypedArrayElement
+ gen_boilerplate: false
+
+- name: AtomicTypedArrayElementBinop
+ gen_boilerplate: false
+
+- name: AtomicTypedArrayElementBinopForEffect
+ gen_boilerplate: false
+
+- name: AtomicLoad64
+ gen_boilerplate: false
+
+- name: AtomicStore64
+ gen_boilerplate: false
+
+- name: CompareExchangeTypedArrayElement64
+ gen_boilerplate: false
+
+- name: AtomicExchangeTypedArrayElement64
+ gen_boilerplate: false
+
+- name: AtomicTypedArrayElementBinop64
+ gen_boilerplate: false
+
+- name: AtomicTypedArrayElementBinopForEffect64
+ gen_boilerplate: false
+
+- name: EffectiveAddress
+ result_type: WordSized
+ operands:
+ base: WordSized
+ index: WordSized
+ mir_op: true
+
+- name: ClampIToUint8
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: ClampDToUint8
+ result_type: WordSized
+ operands:
+ in: WordSized
+ num_temps: 1
+
+- name: ClampVToUint8
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: ClampToUint8
+
+# Load a boxed value from an object's fixed slot.
+- name: LoadFixedSlotV
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ mir_op: LoadFixedSlot
+
+# Load a typed value from an object's fixed slot.
+- name: LoadFixedSlotT
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: LoadFixedSlot
+
+- name: LoadFixedSlotAndUnbox
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+- name: LoadDynamicSlotAndUnbox
+ result_type: WordSized
+ operands:
+ slots: WordSized
+ mir_op: true
+
+- name: LoadElementAndUnbox
+ result_type: WordSized
+ operands:
+ elements: WordSized
+ index: WordSized
+ mir_op: true
+
+- name: AddAndStoreSlot
+ operands:
+ object: WordSized
+ value: BoxedValue
+ num_temps: 1
+ mir_op: true
+
+- name: AllocateAndStoreSlot
+ operands:
+ object: WordSized
+ value: BoxedValue
+ num_temps: 2
+ call_instruction: true
+ mir_op: true
+
+- name: AddSlotAndCallAddPropHook
+ operands:
+ object: WordSized
+ value: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+# Store a boxed value to an object's fixed slot.
+- name: StoreFixedSlotV
+ operands:
+ obj: WordSized
+ value: BoxedValue
+ mir_op: StoreFixedSlot
+
+# Store a typed value to an object's fixed slot.
+- name: StoreFixedSlotT
+ operands:
+ obj: WordSized
+ value: WordSized
+ mir_op: StoreFixedSlot
+
+# Note, Name ICs always return a Value. There are no V/T variants.
+- name: GetNameCache
+ result_type: BoxedValue
+ operands:
+ envObj: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: CallGetIntrinsicValue
+ result_type: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: GetPropSuperCache
+ result_type: BoxedValue
+ operands:
+ obj: WordSized
+ receiver: BoxedValue
+ id: BoxedValue
+ mir_op: true
+
+# Patchable jump to stubs generated for a GetProperty cache, which loads a
+# boxed value.
+- name: GetPropertyCache
+ result_type: BoxedValue
+ operands:
+ value: BoxedValue
+ id: BoxedValue
+ mir_op: true
+
+- name: BindNameCache
+ result_type: WordSized
+ operands:
+ environmentChain: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: CallBindVar
+ result_type: WordSized
+ operands:
+ environmentChain: WordSized
+ mir_op: true
+
+# Load a value from an object's dslots or a slots vector.
+- name: LoadDynamicSlotV
+ result_type: BoxedValue
+ operands:
+ in: WordSized
+ mir_op: LoadDynamicSlot
+
+# Store a value to an object's dslots or a slots vector.
+- name: StoreDynamicSlotV
+ operands:
+ slots: WordSized
+ value: BoxedValue
+ mir_op: StoreDynamicSlot
+
+# Store a typed value to an object's dslots or a slots vector. This has a
+# few advantages over LStoreDynamicSlotV:
+# 1) We can bypass storing the type tag if the slot has the same type as
+# the value.
+# 2) Better Register allocation: we can store constants and FP regs directly
+# without requiring a second Register for the value.
+- name: StoreDynamicSlotT
+ operands:
+ slots: WordSized
+ value: WordSized
+ mir_op: StoreDynamicSlot
+
+# Read length field of a JSString*.
+- name: StringLength
+ result_type: WordSized
+ operands:
+ string: WordSized
+
+# Take the floor of a double precision number and converts it to an int32.
+# Implements Math.floor().
+- name: Floor
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Take the floor of a single precision number and converts it to an int32.
+# Implements Math.floor().
+- name: FloorF
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Take the ceiling of a double precision number and converts it to an int32.
+# Implements Math.ceil().
+- name: Ceil
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Take the ceiling of a single precision number and converts it to an int32.
+# Implements Math.ceil().
+- name: CeilF
+ result_type: WordSized
+ operands:
+ string: WordSized
+
+# Round a double precision number and converts it to an int32.
+# Implements Math.round().
+- name: Round
+ result_type: WordSized
+ operands:
+ num: WordSized
+ num_temps: 1
+ mir_op: true
+
+# Round a single precision number and converts it to an int32.
+# Implements Math.round().
+- name: RoundF
+ result_type: WordSized
+ operands:
+ num: WordSized
+ num_temps: 1
+ mir_op: Round
+
+# Truncates a double precision number and converts it to an int32.
+# Implements Math.trunc().
+- name: Trunc
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Truncates a single precision number and converts it to an int32.
+# Implements Math.trunc().
+- name: TruncF
+ result_type: WordSized
+ operands:
+ num: WordSized
+
+# Rounds a double precision number accordingly to mir()->roundingMode(),
+# and keeps a double output.
+- name: NearbyInt
+ result_type: WordSized
+ operands:
+ num: WordSized
+ mir_op: true
+
+# Rounds a single precision number accordingly to mir()->roundingMode(),
+# and keeps a single output.
+- name: NearbyIntF
+ result_type: WordSized
+ operands:
+ num: WordSized
+ mir_op: NearbyInt
+
+# Load a function's call environment.
+- name: FunctionEnvironment
+ result_type: WordSized
+ operands:
+ function: WordSized
+
+- name: HomeObject
+ result_type: WordSized
+ operands:
+ function: WordSized
+
+- name: HomeObjectSuperBase
+ result_type: BoxedValue
+ operands:
+ homeObject: WordSized
+
+- name: NewLexicalEnvironmentObject
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewClassBodyEnvironmentObject
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: NewVarEnvironmentObject
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: MegamorphicSetElement
+ operands:
+ object: WordSized
+ index: BoxedValue
+ value: BoxedValue
+ # On x86 we do not have enough registers to use 3 temps for this *and* take
+ # five words worth of operands. Since it's 32-bit, though, we get two
+ # registers from pushing `value`, which doesn't get used until the end
+ # anyway. This is somewhat klunky, but oh well.
+#ifdef JS_CODEGEN_X86
+ num_temps: 1
+#else
+ num_temps: 3
+#endif
+ call_instruction: true
+ mir_op: true
+
+- name: CallDeleteProperty
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ call_instruction: true
+ mir_op: DeleteProperty
+
+- name: CallDeleteElement
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ index: BoxedValue
+ call_instruction: true
+ mir_op: DeleteElement
+
+- name: ObjectToIterator
+ result_type: WordSized
+ operands:
+ object: WordSized
+ num_temps: 3
+ mir_op: true
+
+- name: ValueToIterator
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ call_instruction: true
+ mir_op: ValueToIterator
+
+- name: IteratorHasIndicesAndBranch
+ gen_boilerplate: false
+
+- name: LoadSlotByIteratorIndex
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ iterator: WordSized
+
+ num_temps: 2
+
+- name: StoreSlotByIteratorIndex
+ operands:
+ object: WordSized
+ iterator: WordSized
+ value: BoxedValue
+ num_temps: 2
+ mir_op: true
+
+# Patchable jump to stubs generated for a SetProperty cache.
+- name: SetPropertyCache
+ operands:
+ object: WordSized
+ id: BoxedValue
+ value: BoxedValue
+ # Takes an additional temp: this is intendend to be FloatReg0 to allow the
+ # actual cache code to safely clobber that value without save and restore.
+ num_temps: 2
+ mir_op: true
+
+- name: GetIteratorCache
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ num_temps: 2
+ mir_op: true
+
+- name: OptimizeSpreadCallCache
+ result_type: BoxedValue
+ operands:
+ value: BoxedValue
+ num_temps: 1
+ mir_op: true
+
+- name: IteratorMore
+ result_type: BoxedValue
+ operands:
+ iterator: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: IsNoIterAndBranch
+ gen_boilerplate: false
+
+- name: IteratorEnd
+ operands:
+ object: WordSized
+ num_temps: 3
+ mir_op: true
+
+- name: CloseIterCache
+ operands:
+ iter: WordSized
+ num_temps: 1
+ mir_op: true
+
+# Read the number of actual arguments.
+- name: ArgumentsLength
+ result_type: WordSized
+
+# Load a value from the actual arguments.
+- name: GetFrameArgument
+ result_type: BoxedValue
+ operands:
+ index: WordSized
+
+# Load a value from the actual arguments.
+# Returns undefined if |index| is larger-or-equals to |length|. Bails out if
+# |index| is negative.
+- name: GetFrameArgumentHole
+ result_type: BoxedValue
+ operands:
+ index: WordSized
+ length: WordSized
+ num_temps: 1
+
+# Create the rest parameter.
+- name: Rest
+ result_type: WordSized
+ operands:
+ numActuals: WordSized
+ num_temps: 3
+ call_instruction: true
+ mir_op: true
+
+- name: Int32ToIntPtr
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: NonNegativeIntPtrToInt32
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: IntPtrToDouble
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: AdjustDataViewLength
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+# Convert a Boolean to an Int64, following ToBigInt.
+- name: BooleanToInt64
+ result_type: Int64
+ operands:
+ input: WordSized
+ mir_op: ToInt64
+
+# Convert a String to an Int64, following ToBigInt.
+- name: StringToInt64
+ result_type: Int64
+ operands:
+ input: WordSized
+ mir_op: ToInt64
+
+# Simulate ToBigInt on a Value and produce a matching Int64.
+- name: ValueToInt64
+ result_type: Int64
+ operands:
+ input: BoxedValue
+ num_temps: 1
+ mir_op: ToInt64
+
+# Truncate a BigInt to an unboxed int64.
+- name: TruncateBigIntToInt64
+ result_type: Int64
+ operands:
+ input: WordSized
+ mir_op: true
+
+# Create a new BigInt* from an unboxed int64.
+- name: Int64ToBigInt
+ result_type: WordSized
+ operands:
+ input: Int64
+ num_temps: 1
+ mir_op: true
+
+# Generational write barrier used when writing an object to another object.
+- name: PostWriteBarrierO
+ operands:
+ object: WordSized
+ value: WordSized
+ num_temps: 1
+ mir_op: PostWriteBarrier
+
+# Generational write barrier used when writing a string to an object.
+- name: PostWriteBarrierS
+ operands:
+ object: WordSized
+ value: WordSized
+ num_temps: 1
+ mir_op: PostWriteBarrier
+
+# Generational write barrier used when writing a BigInt to an object.
+- name: PostWriteBarrierBI
+ operands:
+ object: WordSized
+ value: WordSized
+ num_temps: 1
+ mir_op: PostWriteBarrier
+
+# Generational write barrier used when writing a value to another object.
+- name: PostWriteBarrierV
+ operands:
+ object: WordSized
+ value: BoxedValue
+ num_temps: 1
+ mir_op: PostWriteBarrier
+
+# Generational write barrier used when writing an object to another object's
+# elements.
+- name: PostWriteElementBarrierO
+ operands:
+ object: WordSized
+ value: WordSized
+ index: WordSized
+ num_temps: 1
+ mir_op: PostWriteElementBarrier
+
+# Generational write barrier used when writing a string to an object's
+# elements.
+- name: PostWriteElementBarrierS
+ operands:
+ object: WordSized
+ value: WordSized
+ index: WordSized
+ num_temps: 1
+ mir_op: PostWriteElementBarrier
+
+# Generational write barrier used when writing a BigInt to an object's
+# elements.
+- name: PostWriteElementBarrierBI
+ operands:
+ object: WordSized
+ value: WordSized
+ index: WordSized
+ num_temps: 1
+ mir_op: PostWriteElementBarrier
+
+# Generational write barrier used when writing a value to another object's
+# elements.
+- name: PostWriteElementBarrierV
+ operands:
+ object: WordSized
+ index: WordSized
+ value: BoxedValue
+ num_temps: 1
+ mir_op: PostWriteElementBarrier
+
+# Assert in debug mode that a post write barrier can be elided.
+- name: AssertCanElidePostWriteBarrier
+ operands:
+ object: WordSized
+ value: BoxedValue
+ num_temps: 1
+
+# Guard against an object's identity.
+- name: GuardObjectIdentity
+ operands:
+ input: WordSized
+ expected: WordSized
+ mir_op: true
+
+# Guard against an function's identity.
+- name: GuardSpecificFunction
+ operands:
+ input: WordSized
+ expected: WordSized
+
+- name: GuardSpecificAtom
+ operands:
+ str: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: GuardSpecificSymbol
+ operands:
+ symbol: WordSized
+ mir_op: true
+
+- name: GuardSpecificInt32
+ operands:
+ num: WordSized
+ mir_op: true
+
+- name: GuardStringToIndex
+ result_type: WordSized
+ operands:
+ string: WordSized
+
+- name: GuardStringToInt32
+ result_type: WordSized
+ operands:
+ string: WordSized
+ num_temps: 1
+
+- name: GuardStringToDouble
+ result_type: WordSized
+ operands:
+ string: WordSized
+ num_temps: 2
+
+- name: GuardShape
+ result_type: WordSized
+ operands:
+ in: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: GuardMultipleShapes
+ result_type: WordSized
+ operands:
+ object: WordSized
+ shapeList: WordSized
+ num_temps: 4
+ mir_op: true
+
+- name: GuardProto
+ operands:
+ object: WordSized
+ expected: WordSized
+ num_temps: 1
+
+- name: GuardNullProto
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardIsNativeObject
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardGlobalGeneration
+ mir_op: true
+ num_temps: 1
+
+- name: GuardIsProxy
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardIsNotProxy
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardIsNotDOMProxy
+ operands:
+ proxy: WordSized
+ num_temps: 1
+
+- name: ProxyGet
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ num_temps: 1
+ call_instruction: true
+ mir_op: true
+
+- name: ProxyGetByValue
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ id: BoxedValue
+ call_instruction: true
+
+- name: ProxyHasProp
+ result_type: BoxedValue
+ operands:
+ proxy: WordSized
+ id: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: ProxySet
+ operands:
+ proxy: WordSized
+ rhs: BoxedValue
+ num_temps: 1
+ call_instruction: true
+ mir_op: true
+
+- name: ProxySetByValue
+ operands:
+ proxy: WordSized
+ id: BoxedValue
+ rhs: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: CallSetArrayLength
+ operands:
+ obj: WordSized
+ rhs: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: MegamorphicLoadSlot
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ num_temps: 4
+ call_instruction: true
+ mir_op: true
+
+- name: MegamorphicLoadSlotByValue
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ id: BoxedValue
+ num_temps: 3
+ call_instruction: true
+ mir_op: true
+
+- name: MegamorphicStoreSlot
+ operands:
+ object: WordSized
+ rhs: BoxedValue
+#ifdef JS_CODEGEN_X86
+ num_temps: 1
+#else
+ num_temps: 3
+#endif
+ call_instruction: true
+ mir_op: true
+
+- name: MegamorphicHasProp
+ result_type: WordSized
+ operands:
+ object: WordSized
+ id: BoxedValue
+ num_temps: 3
+ call_instruction: true
+ mir_op: true
+
+- name: GuardIsNotArrayBufferMaybeShared
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardIsTypedArray
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardNoDenseElements
+ operands:
+ in: WordSized
+ num_temps: 1
+
+- name: InCache
+ result_type: WordSized
+ operands:
+ lhs: BoxedValue
+ rhs: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: HasOwnCache
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ id: BoxedValue
+ mir_op: true
+
+- name: CheckPrivateFieldCache
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ id: BoxedValue
+ mir_op: true
+
+- name: NewPrivateName
+ result_type: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: InstanceOfO
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ rhs: WordSized
+ mir_op: InstanceOf
+
+- name: InstanceOfV
+ result_type: WordSized
+ operands:
+ lhs: BoxedValue
+ rhs: WordSized
+ mir_op: InstanceOf
+
+- name: InstanceOfCache
+ gen_boilerplate: false
+
+- name: IsCallableO
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: IsCallable
+
+- name: IsCallableV
+ result_type: WordSized
+ operands:
+ object: BoxedValue
+ num_temps: 1
+ mir_op: IsCallable
+
+- name: IsConstructor
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+- name: IsCrossRealmArrayConstructor
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: IsArrayO
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: IsArray
+
+- name: IsArrayV
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ num_temps: 1
+ mir_op: IsArray
+
+- name: IsTypedArray
+ result_type: WordSized
+ operands:
+ object: WordSized
+ mir_op: true
+
+- name: IsObject
+ result_type: WordSized
+ operands:
+ object: BoxedValue
+ mir_op: true
+
+- name: IsObjectAndBranch
+ gen_boilerplate: false
+
+- name: IsNullOrUndefined
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+- name: IsNullOrUndefinedAndBranch
+ gen_boilerplate: false
+
+- name: HasClass
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ mir_op: true
+
+- name: GuardToClass
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: GuardToFunction
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: ObjectClassToString
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ num_temps: 1
+ call_instruction: true
+ mir_op: true
+
+- name: WasmSelect
+ gen_boilerplate: false
+
+- name: WasmSelectI64
+ gen_boilerplate: false
+
+- name: WasmCompareAndSelect
+ gen_boilerplate: false
+
+- name: WasmAddOffset
+ result_type: WordSized
+ operands:
+ base: WordSized
+ mir_op: true
+
+- name: WasmAddOffset64
+ result_type: Int64
+ operands:
+ base: Int64
+ mir_op: WasmAddOffset
+
+- name: WasmBoundsCheck
+ result_type: WordSized
+ operands:
+ ptr: WordSized
+ boundsCheckLimit: WordSized
+ mir_op: true
+
+- name: WasmBoundsCheck64
+ gen_boilerplate: false
+
+- name: WasmExtendU32Index
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: WasmWrapU32Index
+ result_type: WordSized
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: WasmAlignmentCheck
+ operands:
+ ptr: WordSized
+ mir_op: true
+
+- name: WasmAlignmentCheck64
+ operands:
+ ptr: Int64
+ mir_op: WasmAlignmentCheck
+
+- name: WasmLoadInstance
+ result_type: WordSized
+ operands:
+ instance: WordSized
+ mir_op: true
+
+- name: WasmLoadInstance64
+ result_type: Int64
+ operands:
+ instance: WordSized
+ mir_op: WasmLoadInstance
+
+- name: WasmHeapBase
+ result_type: WordSized
+ operands:
+ instance: WordSized
+ mir_op: true
+
+- name: WasmLoad
+ gen_boilerplate: false
+
+- name: WasmLoadI64
+ gen_boilerplate: false
+
+- name: WasmStore
+ gen_boilerplate: false
+
+- name: WasmStoreI64
+ gen_boilerplate: false
+
+- name: AsmJSLoadHeap
+ result_type: WordSized
+ operands:
+ ptr: WordSized
+ boundsCheckLimit: WordSized
+ memoryBase: WordSized
+ mir_op: true
+
+- name: AsmJSStoreHeap
+ result_type: WordSized
+ operands:
+ ptr: WordSized
+ value: WordSized
+ boundsCheckLimit: WordSized
+ memoryBase: WordSized
+ mir_op: true
+
+- name: WasmCompareExchangeHeap
+ gen_boilerplate: false
+
+- name: WasmFence
+
+- name: WasmAtomicExchangeHeap
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopHeap
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopHeapForEffect
+ gen_boilerplate: false
+
+- name: WasmLoadSlot
+ result_type: WordSized
+ operands:
+ containerRef: WordSized
+ arguments:
+ offset: size_t
+ type: MIRType
+ wideningOp: MWideningOp
+ maybeTrap: MaybeTrapSiteInfo
+
+- name: WasmLoadSlotI64
+ result_type: Int64
+ operands:
+ containerRef: WordSized
+ arguments:
+ offset: size_t
+ maybeTrap: MaybeTrapSiteInfo
+
+- name: WasmStoreSlot
+ operands:
+ value: WordSized
+ containerRef: WordSized
+ arguments:
+ offset: size_t
+ type: MIRType
+ narrowingOp: MNarrowingOp
+ maybeTrap: MaybeTrapSiteInfo
+
+- name: WasmStoreSlotI64
+ operands:
+ value: Int64
+ containerRef: WordSized
+ arguments:
+ offset: size_t
+ maybeTrap: MaybeTrapSiteInfo
+
+- name: WasmLoadTableElement
+ result_type: WordSized
+ operands:
+ elements: WordSized
+ index: WordSized
+
+- name: WasmDerivedPointer
+ gen_boilerplate: false
+
+- name: WasmDerivedIndexPointer
+ gen_boilerplate: false
+
+- name: WasmStoreRef
+ operands:
+ instance: WordSized
+ valueBase: WordSized
+ value: WordSized
+ arguments:
+ offset: uint32_t
+ maybeTrap: MaybeTrapSiteInfo
+ preBarrierKind: WasmPreBarrierKind
+ num_temps: 1
+ mir_op: true
+
+# Generational write barrier used when writing an object to another object.
+- name: WasmPostWriteBarrier
+ operands:
+ instance: WordSized
+ object: WordSized
+ valueBase: WordSized
+ value: WordSized
+ arguments:
+ valueOffset: uint32_t
+ num_temps: 1
+ mir_op: true
+
+- name: WasmParameter
+ result_type: WordSized
+
+- name: WasmParameterI64
+ gen_boilerplate: false
+
+- name: WasmReturn
+ operands:
+ rval: WordSized
+ instance: WordSized
+
+- name: WasmReturnI64
+ operands:
+ rval: Int64
+ instance: WordSized
+
+- name: WasmReturnVoid
+ operands:
+ rval: WordSized
+
+- name: WasmStackArg
+ operands:
+ arg: WordSized
+ mir_op: true
+
+- name: WasmStackArgI64
+ operands:
+ arg: Int64
+ mir_op: WasmStackArg
+
+- name: WasmNullConstant
+ result_type: WordSized
+
+- name: WasmCallIndirectAdjunctSafepoint
+ gen_boilerplate: false
+
+- name: WasmCall
+ gen_boilerplate: false
+
+- name: WasmCallLandingPrePad
+ mir_op: true
+
+- name: WasmRegisterResult
+ gen_boilerplate: false
+
+- name: WasmRegisterPairResult
+ gen_boilerplate: false
+
+- name: WasmStackResultArea
+ result_type: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: WasmStackResult
+ gen_boilerplate: false
+
+- name: WasmStackResult64
+ gen_boilerplate: false
+
+- name: AssertRangeI
+ gen_boilerplate: false
+
+- name: AssertRangeD
+ gen_boilerplate: false
+
+- name: AssertRangeF
+ gen_boilerplate: false
+
+- name: AssertRangeV
+ gen_boilerplate: false
+
+- name: AssertClass
+ operands:
+ input: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: AssertShape
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: GuardValue
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+- name: GuardNullOrUndefined
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+- name: GuardIsNotObject
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+- name: GuardFunctionFlags
+ operands:
+ function: WordSized
+ mir_op: true
+
+- name: GuardFunctionIsNonBuiltinCtor
+ operands:
+ function: WordSized
+ num_temps: 1
+
+- name: GuardFunctionKind
+ operands:
+ function: WordSized
+ num_temps: 1
+ mir_op: true
+
+- name: GuardFunctionScript
+ operands:
+ function: WordSized
+ mir_op: true
+
+- name: IncrementWarmUpCounter
+ num_temps: 1
+ mir_op: true
+
+- name: LexicalCheck
+ operands:
+ input: BoxedValue
+ mir_op: true
+
+- name: ThrowRuntimeLexicalError
+ call_instruction: true
+ mir_op: true
+
+- name: ThrowMsg
+ call_instruction: true
+ mir_op: true
+
+- name: GlobalDeclInstantiation
+ mir_op: true
+
+- name: MemoryBarrier
+ gen_boilerplate: false
+
+- name: Debugger
+ num_temps: 1
+ call_instruction: true
+
+- name: NewTarget
+ result_type: BoxedValue
+
+- name: Random
+ gen_boilerplate: false
+
+- name: CheckReturn
+ result_type: BoxedValue
+ operands:
+ returnValue: BoxedValue
+ thisValue: BoxedValue
+
+- name: CheckIsObj
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ mir_op: true
+
+- name: CheckObjCoercible
+ operands:
+ value: BoxedValue
+
+- name: CheckClassHeritage
+ operands:
+ heritage: BoxedValue
+ num_temps: 2
+
+- name: CheckThis
+ operands:
+ value: BoxedValue
+
+- name: CheckThisReinit
+ operands:
+ thisValue: BoxedValue
+
+- name: Generator
+ result_type: WordSized
+ operands:
+ callee: WordSized
+ environmentChain: WordSized
+ argsObject: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: AsyncResolve
+ result_type: WordSized
+ operands:
+ generator: WordSized
+ valueOrReason: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: AsyncAwait
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ generator: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: CanSkipAwait
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: MaybeExtractAwaitValue
+ result_type: BoxedValue
+ operands:
+ value: BoxedValue
+ canSkip: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: DebugCheckSelfHosted
+ operands:
+ value: BoxedValue
+ call_instruction: true
+
+- name: IsPackedArray
+ result_type: WordSized
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardArrayIsPacked
+ operands:
+ array: WordSized
+ num_temps: 2
+ mir_op: true
+
+- name: GetPrototypeOf
+ result_type: BoxedValue
+ operands:
+ target: WordSized
+
+- name: ObjectWithProto
+ result_type: WordSized
+ operands:
+ prototype: BoxedValue
+ call_instruction: true
+
+- name: ObjectStaticProto
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: BuiltinObject
+ result_type: WordSized
+ call_instruction: true
+ mir_op: true
+
+- name: SuperFunction
+ result_type: BoxedValue
+ operands:
+ callee: WordSized
+ num_temps: 1
+
+- name: InitHomeObject
+ result_type: WordSized
+ operands:
+ function: WordSized
+ homeObject: BoxedValue
+
+- name: IsTypedArrayConstructor
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: LoadValueTag
+ result_type: WordSized
+ operands:
+ value: BoxedValue
+
+- name: GuardTagNotEqual
+ operands:
+ lhs: WordSized
+ rhs: WordSized
+
+- name: LoadWrapperTarget
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+- name: GuardHasGetterSetter
+ operands:
+ object: WordSized
+ num_temps: 3
+ call_instruction: true
+ mir_op: true
+
+- name: GuardIsExtensible
+ operands:
+ object: WordSized
+ num_temps: 1
+
+- name: GuardInt32IsNonNegative
+ operands:
+ index: WordSized
+
+- name: GuardInt32Range
+ operands:
+ input: WordSized
+ mir_op: true
+
+- name: GuardIndexIsNotDenseElement
+ operands:
+ object: WordSized
+ index: WordSized
+ num_temps: 2
+
+- name: GuardIndexIsValidUpdateOrAdd
+ operands:
+ object: WordSized
+ index: WordSized
+ num_temps: 2
+
+- name: CallAddOrUpdateSparseElement
+ operands:
+ object: WordSized
+ index: WordSized
+ value: BoxedValue
+ call_instruction: true
+ mir_op: true
+
+- name: CallGetSparseElement
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ index: WordSized
+ call_instruction: true
+
+- name: CallNativeGetElement
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ index: WordSized
+ call_instruction: true
+
+- name: CallNativeGetElementSuper
+ result_type: BoxedValue
+ operands:
+ object: WordSized
+ index: WordSized
+ receiver: BoxedValue
+ call_instruction: true
+
+- name: CallObjectHasSparseElement
+ result_type: WordSized
+ operands:
+ object: WordSized
+ index: WordSized
+ num_temps: 2
+ call_instruction: true
+
+- name: BigIntAsIntN
+ result_type: WordSized
+ operands:
+ bits: WordSized
+ input: WordSized
+ call_instruction: true
+
+- name: BigIntAsIntN64
+ gen_boilerplate: false
+
+- name: BigIntAsIntN32
+ gen_boilerplate: false
+
+- name: GuardNonGCThing
+ operands:
+ input: BoxedValue
+
+- name: ToHashableNonGCThing
+ result_type: BoxedValue
+ operands:
+ input: BoxedValue
+ num_temps: 1
+
+- name: ToHashableString
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: ToHashableValue
+ result_type: BoxedValue
+ operands:
+ input: BoxedValue
+ num_temps: 1
+
+- name: HashNonGCThing
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+ num_temps: 1
+
+- name: HashString
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 1
+
+- name: HashSymbol
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+- name: HashBigInt
+ result_type: WordSized
+ operands:
+ input: WordSized
+ num_temps: 3
+
+- name: HashObject
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+ num_temps: 4
+
+- name: HashValue
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+ num_temps: 4
+
+- name: SetObjectHasNonBigInt
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 2
+
+- name: SetObjectHasBigInt
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: SetObjectHasValue
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: SetObjectHasValueVMCall
+ result_type: WordSized
+ call_instruction: true
+ operands:
+ setObject: WordSized
+ input: BoxedValue
+
+- name: SetObjectSize
+ result_type: WordSized
+ operands:
+ setObject: WordSized
+
+- name: MapObjectHasNonBigInt
+ result_type: WordSized
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 2
+
+- name: MapObjectHasBigInt
+ result_type: WordSized
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: MapObjectHasValue
+ result_type: WordSized
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: MapObjectHasValueVMCall
+ result_type: WordSized
+ call_instruction: true
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+
+- name: MapObjectGetNonBigInt
+ result_type: BoxedValue
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 2
+
+- name: MapObjectGetBigInt
+ result_type: BoxedValue
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: MapObjectGetValue
+ result_type: BoxedValue
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+ hash: WordSized
+ num_temps: 4
+
+- name: MapObjectGetValueVMCall
+ result_type: BoxedValue
+ call_instruction: true
+ operands:
+ mapObject: WordSized
+ input: BoxedValue
+
+- name: MapObjectSize
+ result_type: WordSized
+ operands:
+ mapObject: WordSized
+
+- name: BigIntAsUintN
+ result_type: WordSized
+ operands:
+ bits: WordSized
+ input: WordSized
+ call_instruction: true
+
+- name: BigIntAsUintN64
+ gen_boilerplate: false
+
+- name: BigIntAsUintN32
+ gen_boilerplate: false
+
+- name: IonToWasmCall
+ gen_boilerplate: false
+
+- name: IonToWasmCallV
+ gen_boilerplate: false
+
+- name: IonToWasmCallI64
+ gen_boilerplate: false
+
+- name: WasmBoxValue
+ result_type: WordSized
+ operands:
+ input: BoxedValue
+
+- name: WasmAnyRefFromJSObject
+ result_type: WordSized
+ operands:
+ input: WordSized
+
+# Constant Simd128
+- name: Simd128
+ result_type: WordSized
+ arguments:
+ simd128: SimdConstant
+
+- name: WasmTernarySimd128
+ gen_boilerplate: false
+
+- name: WasmBinarySimd128
+ gen_boilerplate: false
+
+- name: WasmBinarySimd128WithConstant
+ gen_boilerplate: false
+
+- name: WasmVariableShiftSimd128
+ gen_boilerplate: false
+
+- name: WasmConstantShiftSimd128
+ gen_boilerplate: false
+
+- name: WasmSignReplicationSimd128
+ gen_boilerplate: false
+
+- name: WasmShuffleSimd128
+ gen_boilerplate: false
+
+- name: WasmPermuteSimd128
+ gen_boilerplate: false
+
+- name: WasmReplaceLaneSimd128
+ gen_boilerplate: false
+
+- name: WasmReplaceInt64LaneSimd128
+ gen_boilerplate: false
+
+- name: WasmScalarToSimd128
+ gen_boilerplate: false
+
+- name: WasmInt64ToSimd128
+ gen_boilerplate: false
+
+- name: WasmUnarySimd128
+ gen_boilerplate: false
+
+- name: WasmReduceSimd128
+ gen_boilerplate: false
+
+- name: WasmReduceAndBranchSimd128
+ gen_boilerplate: false
+
+- name: WasmReduceSimd128ToInt64
+ gen_boilerplate: false
+
+- name: WasmLoadLaneSimd128
+ gen_boilerplate: false
+
+- name: WasmStoreLaneSimd128
+ gen_boilerplate: false
+
+- name: Unbox
+ gen_boilerplate: false
+
+- name: UnboxFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmUint32ToDouble
+ gen_boilerplate: false
+
+- name: WasmUint32ToFloat32
+ gen_boilerplate: false
+
+- name: DivI
+ gen_boilerplate: false
+
+- name: ModI
+ gen_boilerplate: false
+
+- name: DivPowTwoI
+ gen_boilerplate: false
+
+- name: ModPowTwoI
+ gen_boilerplate: false
+
+- name: TableSwitch
+ gen_boilerplate: false
+
+- name: TableSwitchV
+ gen_boilerplate: false
+
+- name: MulI
+ gen_boilerplate: false
+
+#ifdef JS_CODEGEN_X86
+- name: BoxFloatingPoint
+ gen_boilerplate: false
+
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: DivOrModConstantI
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: UDivOrModConstant
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmAtomicLoadI64
+ gen_boilerplate: false
+
+- name: WasmAtomicStoreI64
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_X64
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: DivOrModConstantI
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: UDivOrModConstant
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_ARM
+- name: BoxFloatingPoint
+ gen_boilerplate: false
+
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: SoftDivI
+ gen_boilerplate: false
+
+- name: SoftModI
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: UDiv
+ gen_boilerplate: false
+
+- name: UMod
+ gen_boilerplate: false
+
+- name: SoftUDivOrMod
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPointCall
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: WasmAtomicLoadI64
+ gen_boilerplate: false
+
+- name: WasmAtomicStoreI64
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: DivConstantI
+ gen_boilerplate: false
+
+- name: UDivConstantI
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: UDiv
+ gen_boilerplate: false
+
+- name: UMod
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_MIPS32
+- name: BoxFloatingPoint
+ gen_boilerplate: false
+
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmUnalignedLoad
+ gen_boilerplate: false
+
+- name: WasmUnalignedLoadI64
+ gen_boilerplate: false
+
+- name: WasmUnalignedStore
+ gen_boilerplate: false
+
+- name: WasmUnalignedStoreI64
+ gen_boilerplate: false
+
+- name: WasmAtomicLoadI64
+ gen_boilerplate: false
+
+- name: WasmAtomicStoreI64
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_MIPS64
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmUnalignedLoad
+ gen_boilerplate: false
+
+- name: WasmUnalignedLoadI64
+ gen_boilerplate: false
+
+- name: WasmUnalignedStore
+ gen_boilerplate: false
+
+- name: WasmUnalignedStoreI64
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_LOONG64
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef JS_CODEGEN_RISCV64
+- name: DivOrModI64
+ gen_boilerplate: false
+
+- name: UDivOrMod
+ gen_boilerplate: false
+
+- name: UDivOrModI64
+ gen_boilerplate: false
+
+- name: ModMaskI
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: WasmCompareExchangeI64
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopI64
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeI64
+ gen_boilerplate: false
+#endif
+
+#ifdef FUZZING_JS_FUZZILLI
+- name: FuzzilliHashT
+ gen_boilerplate: false
+
+- name: FuzzilliHashV
+ gen_boilerplate: false
+
+- name: FuzzilliHashStore
+ gen_boilerplate: false
+#endif
diff --git a/js/src/jit/Label.cpp b/js/src/jit/Label.cpp
new file mode 100644
index 0000000000..e1c9db76c4
--- /dev/null
+++ b/js/src/jit/Label.cpp
@@ -0,0 +1,29 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Label.h"
+
+#include "mozilla/Assertions.h"
+
+#include "jit/CompileWrappers.h"
+#include "jit/JitContext.h"
+#include "js/Utility.h"
+
+namespace js::jit {
+
+#ifdef DEBUG
+Label::~Label() {
+ // The assertion below doesn't hold if an error occurred.
+ JitContext* context = MaybeGetJitContext();
+ bool hadError =
+ js::oom::HadSimulatedOOM() ||
+ (context && context->runtime && context->runtime->hadOutOfMemory()) ||
+ (context && !context->runtime && context->hasOOM());
+ MOZ_ASSERT_IF(!hadError, !used());
+}
+#endif
+
+} // namespace js::jit
diff --git a/js/src/jit/Label.h b/js/src/jit/Label.h
new file mode 100644
index 0000000000..bf78d3c5b8
--- /dev/null
+++ b/js/src/jit/Label.h
@@ -0,0 +1,106 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Label_h
+#define jit_Label_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+namespace js {
+namespace jit {
+
+struct LabelBase {
+ private:
+ // We use uint32_t instead of bool to ensure MSVC packs these fields
+ // correctly.
+ uint32_t bound_ : 1;
+
+ // offset_ < INVALID_OFFSET means that the label is either bound or has
+ // incoming uses and needs to be bound.
+ uint32_t offset_ : 31;
+
+ void operator=(const LabelBase& label) = delete;
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ public:
+#endif
+ static const uint32_t INVALID_OFFSET = 0x7fffffff; // UINT31_MAX.
+
+ public:
+ LabelBase() : bound_(false), offset_(INVALID_OFFSET) {}
+
+ // If the label is bound, all incoming edges have been patched and any
+ // future incoming edges will be immediately patched.
+ bool bound() const { return bound_; }
+ int32_t offset() const {
+ MOZ_ASSERT(bound() || used());
+ return offset_;
+ }
+ // Returns whether the label is not bound, but has incoming uses.
+ bool used() const { return !bound() && offset_ < INVALID_OFFSET; }
+ // Binds the label, fixing its final position in the code stream.
+ void bind(int32_t offset) {
+ MOZ_ASSERT(!bound());
+ MOZ_ASSERT(offset >= 0);
+ MOZ_ASSERT(uint32_t(offset) < INVALID_OFFSET);
+ offset_ = offset;
+ bound_ = true;
+ MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
+ }
+ // Marks the label as neither bound nor used.
+ void reset() {
+ offset_ = INVALID_OFFSET;
+ bound_ = false;
+ }
+ // Sets the label's latest used position.
+ void use(int32_t offset) {
+ MOZ_ASSERT(!bound());
+ MOZ_ASSERT(offset >= 0);
+ MOZ_ASSERT(uint32_t(offset) < INVALID_OFFSET);
+ offset_ = offset;
+ MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
+ }
+};
+
+// A label represents a position in an assembly buffer that may or may not have
+// already been generated. Labels can either be "bound" or "unbound", the
+// former meaning that its position is known and the latter that its position
+// is not yet known.
+//
+// A jump to an unbound label adds that jump to the label's incoming queue. A
+// jump to a bound label automatically computes the jump distance. The process
+// of binding a label automatically corrects all incoming jumps.
+class Label : public LabelBase {
+ public:
+#ifdef DEBUG
+ ~Label();
+#endif
+};
+
+static_assert(sizeof(Label) == sizeof(uint32_t),
+ "Label should have same size as uint32_t");
+
+// Label's destructor asserts that if it has been used it has also been bound.
+// In the case long-lived labels, however, failed compilation (e.g. OOM) will
+// trigger this failure innocuously. This Label silences the assertion.
+class NonAssertingLabel : public Label {
+ public:
+#ifdef DEBUG
+ ~NonAssertingLabel() {
+ if (used()) {
+ bind(0);
+ }
+ }
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_Label_h
diff --git a/js/src/jit/Linker.cpp b/js/src/jit/Linker.cpp
new file mode 100644
index 0000000000..aedebcecbe
--- /dev/null
+++ b/js/src/jit/Linker.cpp
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Linker.h"
+
+#include "jit/JitZone.h"
+#include "util/Memory.h"
+
+#include "gc/StoreBuffer-inl.h"
+
+namespace js {
+namespace jit {
+
+JitCode* Linker::newCode(JSContext* cx, CodeKind kind) {
+ JS::AutoAssertNoGC nogc(cx);
+ if (masm.oom()) {
+ return fail(cx);
+ }
+
+ static const size_t ExecutableAllocatorAlignment = sizeof(void*);
+ static_assert(CodeAlignment >= ExecutableAllocatorAlignment,
+ "Unexpected alignment requirements");
+
+ // We require enough bytes for the code, header, and worst-case alignment
+ // padding.
+ size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCodeHeader) +
+ (CodeAlignment - ExecutableAllocatorAlignment);
+ if (bytesNeeded >= MAX_BUFFER_SIZE) {
+ return fail(cx);
+ }
+
+ // ExecutableAllocator requires bytesNeeded to be aligned.
+ bytesNeeded = AlignBytes(bytesNeeded, ExecutableAllocatorAlignment);
+
+ JitZone* jitZone = cx->zone()->getJitZone(cx);
+ if (!jitZone) {
+ // Note: don't call fail(cx) here, getJitZone reports OOM.
+ return nullptr;
+ }
+
+ ExecutablePool* pool;
+ uint8_t* result =
+ (uint8_t*)jitZone->execAlloc().alloc(cx, bytesNeeded, &pool, kind);
+ if (!result) {
+ return fail(cx);
+ }
+
+ // The JitCodeHeader will be stored right before the code buffer.
+ uint8_t* codeStart = result + sizeof(JitCodeHeader);
+
+ // Bump the code up to a nice alignment.
+ codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment);
+ MOZ_ASSERT(codeStart + masm.bytesNeeded() <= result + bytesNeeded);
+ uint32_t headerSize = codeStart - result;
+ JitCode* code =
+ JitCode::New<NoGC>(cx, codeStart, bytesNeeded, headerSize, pool, kind);
+ if (!code) {
+ return fail(cx);
+ }
+ if (masm.oom()) {
+ return fail(cx);
+ }
+ awjcf.emplace(result, bytesNeeded);
+ if (!awjcf->makeWritable()) {
+ return fail(cx);
+ }
+ code->copyFrom(masm);
+ masm.link(code);
+ if (masm.embedsNurseryPointers()) {
+ cx->runtime()->gc.storeBuffer().putWholeCell(code);
+ }
+ return code;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/Linker.h b/js/src/jit/Linker.h
new file mode 100644
index 0000000000..01df1c819a
--- /dev/null
+++ b/js/src/jit/Linker.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Linker_h
+#define jit_Linker_h
+
+#include "mozilla/Maybe.h"
+
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/AutoWritableJitCode.h"
+#include "jit/MacroAssembler.h"
+#include "vm/Runtime.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace jit {
+
+class JitCode;
+
+enum class CodeKind : uint8_t;
+
+class Linker {
+ MacroAssembler& masm;
+ mozilla::Maybe<AutoWritableJitCodeFallible> awjcf;
+
+ JitCode* fail(JSContext* cx) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ public:
+ // Construct a linker with a rooted macro assembler.
+ explicit Linker(MacroAssembler& masm) : masm(masm) { masm.finish(); }
+
+ // Create a new JitCode object and populate it with the contents of the
+ // macro assember buffer.
+ //
+ // This method cannot GC. Errors are reported to the context.
+ JitCode* newCode(JSContext* cx, CodeKind kind);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Linker_h */
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
new file mode 100644
index 0000000000..781a536252
--- /dev/null
+++ b/js/src/jit/Lowering.cpp
@@ -0,0 +1,7172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Lowering.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <type_traits>
+
+#include "jit/ABIArgGenerator.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/SharedICRegisters.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "util/Memory.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmJS.h" // for wasm::ReportSimdAnalysis
+
+#include "jit/shared/Lowering-shared-inl.h"
+#include "vm/BytecodeUtil-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using JS::GenericNaN;
+using mozilla::DebugOnly;
+
+LBoxAllocation LIRGenerator::useBoxFixedAtStart(MDefinition* mir,
+ ValueOperand op) {
+#if defined(JS_NUNBOX32)
+ return useBoxFixed(mir, op.typeReg(), op.payloadReg(), true);
+#elif defined(JS_PUNBOX64)
+ return useBoxFixed(mir, op.valueReg(), op.scratchReg(), true);
+#endif
+}
+
+LBoxAllocation LIRGenerator::useBoxAtStart(MDefinition* mir,
+ LUse::Policy policy) {
+ return useBox(mir, policy, /* useAtStart = */ true);
+}
+
+void LIRGenerator::visitParameter(MParameter* param) {
+ ptrdiff_t offset;
+ if (param->index() == MParameter::THIS_SLOT) {
+ offset = THIS_FRAME_ARGSLOT;
+ } else {
+ offset = 1 + param->index();
+ }
+
+ LParameter* ins = new (alloc()) LParameter;
+ defineBox(ins, param, LDefinition::FIXED);
+
+ offset *= sizeof(Value);
+#if defined(JS_NUNBOX32)
+# if MOZ_BIG_ENDIAN()
+ ins->getDef(0)->setOutput(LArgument(offset));
+ ins->getDef(1)->setOutput(LArgument(offset + 4));
+# else
+ ins->getDef(0)->setOutput(LArgument(offset + 4));
+ ins->getDef(1)->setOutput(LArgument(offset));
+# endif
+#elif defined(JS_PUNBOX64)
+ ins->getDef(0)->setOutput(LArgument(offset));
+#endif
+}
+
+void LIRGenerator::visitCallee(MCallee* ins) {
+ define(new (alloc()) LCallee(), ins);
+}
+
+void LIRGenerator::visitIsConstructing(MIsConstructing* ins) {
+ define(new (alloc()) LIsConstructing(), ins);
+}
+
+void LIRGenerator::visitGoto(MGoto* ins) {
+ add(new (alloc()) LGoto(ins->target()));
+}
+
+void LIRGenerator::visitTableSwitch(MTableSwitch* tableswitch) {
+ MDefinition* opd = tableswitch->getOperand(0);
+
+ // There should be at least 1 successor. The default case!
+ MOZ_ASSERT(tableswitch->numSuccessors() > 0);
+
+ // If there are no cases, the default case is always taken.
+ if (tableswitch->numSuccessors() == 1) {
+ add(new (alloc()) LGoto(tableswitch->getDefault()));
+ return;
+ }
+
+ // If we don't know the type.
+ if (opd->type() == MIRType::Value) {
+ LTableSwitchV* lir = newLTableSwitchV(tableswitch);
+ add(lir);
+ return;
+ }
+
+ // Case indices are numeric, so other types will always go to the default
+ // case.
+ if (opd->type() != MIRType::Int32 && opd->type() != MIRType::Double) {
+ add(new (alloc()) LGoto(tableswitch->getDefault()));
+ return;
+ }
+
+ // Return an LTableSwitch, capable of handling either an integer or
+ // floating-point index.
+ LAllocation index;
+ LDefinition tempInt;
+ if (opd->type() == MIRType::Int32) {
+ index = useRegisterAtStart(opd);
+ tempInt = tempCopy(opd, 0);
+ } else {
+ index = useRegister(opd);
+ tempInt = temp(LDefinition::GENERAL);
+ }
+ add(newLTableSwitch(index, tempInt, tableswitch));
+}
+
+void LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed* ins) {
+ LCheckOverRecursed* lir = new (alloc()) LCheckOverRecursed();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewArray(MNewArray* ins) {
+ LNewArray* lir = new (alloc()) LNewArray(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewArrayDynamicLength(MNewArrayDynamicLength* ins) {
+ MDefinition* length = ins->length();
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+
+ LNewArrayDynamicLength* lir =
+ new (alloc()) LNewArrayDynamicLength(useRegister(length), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewIterator(MNewIterator* ins) {
+ LNewIterator* lir = new (alloc()) LNewIterator(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewTypedArray(MNewTypedArray* ins) {
+ LNewTypedArray* lir = new (alloc()) LNewTypedArray(temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewTypedArrayDynamicLength(
+ MNewTypedArrayDynamicLength* ins) {
+ MDefinition* length = ins->length();
+ MOZ_ASSERT(length->type() == MIRType::Int32);
+
+ LNewTypedArrayDynamicLength* lir =
+ new (alloc()) LNewTypedArrayDynamicLength(useRegister(length), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewTypedArrayFromArray(MNewTypedArrayFromArray* ins) {
+ MDefinition* array = ins->array();
+ MOZ_ASSERT(array->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LNewTypedArrayFromArray(useRegisterAtStart(array));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewTypedArrayFromArrayBuffer(
+ MNewTypedArrayFromArrayBuffer* ins) {
+ MDefinition* arrayBuffer = ins->arrayBuffer();
+ MDefinition* byteOffset = ins->byteOffset();
+ MDefinition* length = ins->length();
+ MOZ_ASSERT(arrayBuffer->type() == MIRType::Object);
+ MOZ_ASSERT(byteOffset->type() == MIRType::Value);
+ MOZ_ASSERT(length->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LNewTypedArrayFromArrayBuffer(
+ useRegisterAtStart(arrayBuffer), useBoxAtStart(byteOffset),
+ useBoxAtStart(length));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewObject(MNewObject* ins) {
+ LNewObject* lir = new (alloc()) LNewObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBindFunction(MBindFunction* ins) {
+ MDefinition* target = ins->target();
+ MOZ_ASSERT(target->type() == MIRType::Object);
+
+ if (!lowerCallArguments(ins)) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitBindFunction");
+ return;
+ }
+
+ auto* lir = new (alloc())
+ LBindFunction(useFixedAtStart(target, CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewBoundFunction(MNewBoundFunction* ins) {
+ auto* lir = new (alloc()) LNewBoundFunction(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewPlainObject(MNewPlainObject* ins) {
+ LNewPlainObject* lir = new (alloc()) LNewPlainObject(temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewArrayObject(MNewArrayObject* ins) {
+ LNewArrayObject* lir = new (alloc()) LNewArrayObject(temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewNamedLambdaObject(MNewNamedLambdaObject* ins) {
+ LNewNamedLambdaObject* lir = new (alloc()) LNewNamedLambdaObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewCallObject(MNewCallObject* ins) {
+ LNewCallObject* lir = new (alloc()) LNewCallObject(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewStringObject(MNewStringObject* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::String);
+
+ LNewStringObject* lir =
+ new (alloc()) LNewStringObject(useRegister(ins->input()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInitElemGetterSetter(MInitElemGetterSetter* ins) {
+ LInitElemGetterSetter* lir = new (alloc()) LInitElemGetterSetter(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->id()),
+ useRegisterAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMutateProto(MMutateProto* ins) {
+ LMutateProto* lir = new (alloc()) LMutateProto(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInitPropGetterSetter(MInitPropGetterSetter* ins) {
+ LInitPropGetterSetter* lir = new (alloc()) LInitPropGetterSetter(
+ useRegisterAtStart(ins->object()), useRegisterAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCreateThis(MCreateThis* ins) {
+ LCreateThis* lir =
+ new (alloc()) LCreateThis(useRegisterOrConstantAtStart(ins->callee()),
+ useRegisterOrConstantAtStart(ins->newTarget()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCreateArgumentsObject(MCreateArgumentsObject* ins) {
+ LAllocation callObj = useRegisterAtStart(ins->getCallObject());
+ LCreateArgumentsObject* lir = new (alloc())
+ LCreateArgumentsObject(callObj, tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCreateInlinedArgumentsObject(
+ MCreateInlinedArgumentsObject* ins) {
+ LAllocation callObj = useRegisterAtStart(ins->getCallObject());
+ LAllocation callee = useRegisterAtStart(ins->getCallee());
+ uint32_t numActuals = ins->numActuals();
+ uint32_t numOperands = numActuals * BOX_PIECES +
+ LCreateInlinedArgumentsObject::NumNonArgumentOperands;
+
+ auto* lir = allocateVariadic<LCreateInlinedArgumentsObject>(
+ numOperands, tempFixed(CallTempReg0), tempFixed(CallTempReg1));
+ if (!lir) {
+ abort(AbortReason::Alloc,
+ "OOM: LIRGenerator::visitCreateInlinedArgumentsObject");
+ return;
+ }
+
+ lir->setOperand(LCreateInlinedArgumentsObject::CallObj, callObj);
+ lir->setOperand(LCreateInlinedArgumentsObject::Callee, callee);
+ for (uint32_t i = 0; i < numActuals; i++) {
+ MDefinition* arg = ins->getArg(i);
+ uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(i);
+ lir->setBoxOperand(index, useBoxOrTypedOrConstant(arg,
+ /*useConstant = */ true,
+ /*useAtStart = */ true));
+ }
+
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetInlinedArgument(MGetInlinedArgument* ins) {
+#if defined(JS_PUNBOX64)
+ // On 64-bit architectures, we don't support boxing a typed register
+ // in-place without using a scratch register, so the result register
+ // can't be the same as any of the inputs. Fortunately, those
+ // architectures have registers to spare.
+ const bool useAtStart = false;
+#else
+ const bool useAtStart = true;
+#endif
+
+ LAllocation index =
+ useAtStart ? useRegisterAtStart(ins->index()) : useRegister(ins->index());
+ uint32_t numActuals = ins->numActuals();
+ uint32_t numOperands =
+ numActuals * BOX_PIECES + LGetInlinedArgument::NumNonArgumentOperands;
+
+ auto* lir = allocateVariadic<LGetInlinedArgument>(numOperands);
+ if (!lir) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitGetInlinedArgument");
+ return;
+ }
+
+ lir->setOperand(LGetInlinedArgument::Index, index);
+ for (uint32_t i = 0; i < numActuals; i++) {
+ MDefinition* arg = ins->getArg(i);
+ uint32_t index = LGetInlinedArgument::ArgIndex(i);
+ lir->setBoxOperand(
+ index, useBoxOrTypedOrConstant(arg,
+ /*useConstant = */ true, useAtStart));
+ }
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitGetInlinedArgumentHole(MGetInlinedArgumentHole* ins) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_MIPS64)
+ // On some 64-bit architectures, we don't support boxing a typed
+ // register in-place without using a scratch register, so the result
+ // register can't be the same as any of the inputs. Fortunately,
+ // those architectures have registers to spare.
+ const bool useAtStart = false;
+#else
+ const bool useAtStart = true;
+#endif
+
+ LAllocation index =
+ useAtStart ? useRegisterAtStart(ins->index()) : useRegister(ins->index());
+ uint32_t numActuals = ins->numActuals();
+ uint32_t numOperands =
+ numActuals * BOX_PIECES + LGetInlinedArgumentHole::NumNonArgumentOperands;
+
+ auto* lir = allocateVariadic<LGetInlinedArgumentHole>(numOperands);
+ if (!lir) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitGetInlinedArgumentHole");
+ return;
+ }
+
+ lir->setOperand(LGetInlinedArgumentHole::Index, index);
+ for (uint32_t i = 0; i < numActuals; i++) {
+ MDefinition* arg = ins->getArg(i);
+ uint32_t index = LGetInlinedArgumentHole::ArgIndex(i);
+ lir->setBoxOperand(
+ index, useBoxOrTypedOrConstant(arg,
+ /*useConstant = */ true, useAtStart));
+ }
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins) {
+ LAllocation argsObj = useRegister(ins->argsObject());
+ LGetArgumentsObjectArg* lir =
+ new (alloc()) LGetArgumentsObjectArg(argsObj, temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitSetArgumentsObjectArg(MSetArgumentsObjectArg* ins) {
+ LAllocation argsObj = useRegister(ins->argsObject());
+ LSetArgumentsObjectArg* lir = new (alloc())
+ LSetArgumentsObjectArg(argsObj, useBox(ins->value()), temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc())
+ LLoadArgumentsObjectArg(useRegister(argsObj), useRegister(index), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitLoadArgumentsObjectArgHole(
+ MLoadArgumentsObjectArgHole* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LLoadArgumentsObjectArgHole(
+ useRegister(argsObj), useRegister(index), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc())
+ LInArgumentsObjectArg(useRegister(argsObj), useRegister(index), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArgumentsObjectLength(MArgumentsObjectLength* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LArgumentsObjectLength(useRegister(argsObj));
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayFromArgumentsObject(
+ MArrayFromArgumentsObject* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LArrayFromArgumentsObject(useRegisterAtStart(argsObj));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGuardArgumentsObjectFlags(
+ MGuardArgumentsObjectFlags* ins) {
+ MDefinition* argsObj = ins->argsObject();
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardArgumentsObjectFlags(useRegister(argsObj), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, argsObj);
+}
+
+void LIRGenerator::visitBoundFunctionNumArgs(MBoundFunctionNumArgs* ins) {
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LBoundFunctionNumArgs(useRegisterAtStart(obj));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardBoundFunctionIsConstructor(
+ MGuardBoundFunctionIsConstructor* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardBoundFunctionIsConstructor(useRegister(ins->object()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitReturnFromCtor(MReturnFromCtor* ins) {
+ LReturnFromCtor* lir = new (alloc())
+ LReturnFromCtor(useBox(ins->value()), useRegister(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitBoxNonStrictThis(MBoxNonStrictThis* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->input()->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LBoxNonStrictThis(useBox(ins->input()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitImplicitThis(MImplicitThis* ins) {
+ MDefinition* env = ins->envChain();
+ MOZ_ASSERT(env->type() == MIRType::Object);
+
+ LImplicitThis* lir = new (alloc()) LImplicitThis(useRegisterAtStart(env));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+template <typename T>
+bool LIRGenerator::lowerCallArguments(T* call) {
+ uint32_t argc = call->numStackArgs();
+
+ // Align the arguments of a call such that the callee would keep the same
+ // alignment as the caller.
+ uint32_t baseSlot = 0;
+ if (JitStackValueAlignment > 1) {
+ baseSlot = AlignBytes(argc, JitStackValueAlignment);
+ } else {
+ baseSlot = argc;
+ }
+
+ // Save the maximum number of argument, such that we can have one unique
+ // frame size.
+ if (baseSlot > maxargslots_) {
+ maxargslots_ = baseSlot;
+ }
+
+ for (size_t i = 0; i < argc; i++) {
+ MDefinition* arg = call->getArg(i);
+ uint32_t argslot = baseSlot - i;
+
+ // Values take a slow path.
+ if (arg->type() == MIRType::Value) {
+ LStackArgV* stack = new (alloc()) LStackArgV(useBox(arg), argslot);
+ add(stack);
+ } else {
+ // Known types can move constant types and/or payloads.
+ LStackArgT* stack = new (alloc())
+ LStackArgT(useRegisterOrConstant(arg), argslot, arg->type());
+ add(stack);
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void LIRGenerator::visitCall(MCall* call) {
+ MOZ_ASSERT(call->getCallee()->type() == MIRType::Object);
+
+ // In case of oom, skip the rest of the allocations.
+ if (!lowerCallArguments(call)) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitCall");
+ return;
+ }
+
+ WrappedFunction* target = call->getSingleTarget();
+
+ LInstruction* lir;
+
+ if (call->isCallDOMNative()) {
+ // Call DOM functions.
+ MOZ_ASSERT(target && target->isNativeWithoutJitEntry());
+ Register cxReg, objReg, privReg, argsReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &argsReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+ lir = new (alloc()) LCallDOMNative(tempFixed(cxReg), tempFixed(objReg),
+ tempFixed(privReg), tempFixed(argsReg));
+ } else if (target) {
+ // Call known functions.
+ if (target->isNativeWithoutJitEntry()) {
+ Register cxReg, numReg, vpReg, tmpReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &numReg);
+ GetTempRegForIntArg(2, 0, &vpReg);
+
+ // Even though this is just a temp reg, use the same API to avoid
+ // register collisions.
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &tmpReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+
+ lir = new (alloc()) LCallNative(tempFixed(cxReg), tempFixed(numReg),
+ tempFixed(vpReg), tempFixed(tmpReg));
+ } else {
+ lir = new (alloc()) LCallKnown(useRegisterAtStart(call->getCallee()),
+ tempFixed(CallTempReg0));
+ }
+ } else {
+ // Call anything, using the most generic code.
+ lir = new (alloc())
+ LCallGeneric(useRegisterAtStart(call->getCallee()),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1));
+ }
+ defineReturn(lir, call);
+ assignSafepoint(lir, call);
+}
+
+void LIRGenerator::visitCallClassHook(MCallClassHook* call) {
+ MDefinition* callee = call->getCallee();
+ MOZ_ASSERT(callee->type() == MIRType::Object);
+
+ // In case of oom, skip the rest of the allocations.
+ if (!lowerCallArguments(call)) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitCallClassHook");
+ return;
+ }
+
+ Register cxReg, numReg, vpReg, tmpReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &numReg);
+ GetTempRegForIntArg(2, 0, &vpReg);
+
+ // Even though this is just a temp reg, use the same API to avoid
+ // register collisions.
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &tmpReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+
+ auto* lir = new (alloc())
+ LCallClassHook(useRegisterAtStart(callee), tempFixed(cxReg),
+ tempFixed(numReg), tempFixed(vpReg), tempFixed(tmpReg));
+ defineReturn(lir, call);
+ assignSafepoint(lir, call);
+}
+
+void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
+ MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
+
+ // Assert if the return value is already erased.
+ static_assert(CallTempReg2 != JSReturnReg_Type);
+ static_assert(CallTempReg2 != JSReturnReg_Data);
+
+ LApplyArgsGeneric* lir = new (alloc()) LApplyArgsGeneric(
+ useFixedAtStart(apply->getFunction(), CallTempReg3),
+ useFixedAtStart(apply->getArgc(), CallTempReg0),
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg1), // object register
+ tempFixed(CallTempReg2)); // stack counter register
+
+ // Bailout is needed in the case of too many values in the arguments array.
+ assignSnapshot(lir, apply->bailoutKind());
+
+ defineReturn(lir, apply);
+ assignSafepoint(lir, apply);
+}
+
+void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
+ MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
+
+ // Assert if the return value is already erased.
+ static_assert(CallTempReg2 != JSReturnReg_Type);
+ static_assert(CallTempReg2 != JSReturnReg_Data);
+
+ LApplyArgsObj* lir = new (alloc()) LApplyArgsObj(
+ useFixedAtStart(apply->getFunction(), CallTempReg3),
+ useFixedAtStart(apply->getArgsObj(), CallTempReg0),
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg1), // object register
+ tempFixed(CallTempReg2)); // stack counter register
+
+ // Bailout is needed in the case of too many values in the arguments array.
+ assignSnapshot(lir, apply->bailoutKind());
+
+ defineReturn(lir, apply);
+ assignSafepoint(lir, apply);
+}
+
+void LIRGenerator::visitApplyArray(MApplyArray* apply) {
+ MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
+
+ // Assert if the return value is already erased.
+ static_assert(CallTempReg2 != JSReturnReg_Type);
+ static_assert(CallTempReg2 != JSReturnReg_Data);
+
+ LApplyArrayGeneric* lir = new (alloc()) LApplyArrayGeneric(
+ useFixedAtStart(apply->getFunction(), CallTempReg3),
+ useFixedAtStart(apply->getElements(), CallTempReg0),
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg1), // object register
+ tempFixed(CallTempReg2)); // stack counter register
+
+ // Bailout is needed in the case of too many values in the array, or empty
+ // space at the end of the array.
+ assignSnapshot(lir, apply->bailoutKind());
+
+ defineReturn(lir, apply);
+ assignSafepoint(lir, apply);
+}
+
+void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
+ MOZ_ASSERT(mir->getFunction()->type() == MIRType::Object);
+ MOZ_ASSERT(mir->getArgc()->type() == MIRType::Int32);
+ MOZ_ASSERT(mir->getNewTarget()->type() == MIRType::Object);
+ MOZ_ASSERT(mir->getThis()->type() == MIRType::Value);
+
+ // Assert if the return value is already erased.
+ static_assert(CallTempReg2 != JSReturnReg_Type);
+ static_assert(CallTempReg2 != JSReturnReg_Data);
+
+ auto* lir = new (alloc()) LConstructArgsGeneric(
+ useFixedAtStart(mir->getFunction(), CallTempReg3),
+ useFixedAtStart(mir->getArgc(), CallTempReg0),
+ useFixedAtStart(mir->getNewTarget(), CallTempReg1),
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg2));
+
+ // Bailout is needed in the case of too many values in the arguments array.
+ assignSnapshot(lir, mir->bailoutKind());
+
+ defineReturn(lir, mir);
+ assignSafepoint(lir, mir);
+}
+
+void LIRGenerator::visitConstructArray(MConstructArray* mir) {
+ MOZ_ASSERT(mir->getFunction()->type() == MIRType::Object);
+ MOZ_ASSERT(mir->getElements()->type() == MIRType::Elements);
+ MOZ_ASSERT(mir->getNewTarget()->type() == MIRType::Object);
+ MOZ_ASSERT(mir->getThis()->type() == MIRType::Value);
+
+ // Assert if the return value is already erased.
+ static_assert(CallTempReg2 != JSReturnReg_Type);
+ static_assert(CallTempReg2 != JSReturnReg_Data);
+
+ auto* lir = new (alloc()) LConstructArrayGeneric(
+ useFixedAtStart(mir->getFunction(), CallTempReg3),
+ useFixedAtStart(mir->getElements(), CallTempReg0),
+ useFixedAtStart(mir->getNewTarget(), CallTempReg1),
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
+ tempFixed(CallTempReg2));
+
+ // Bailout is needed in the case of too many values in the array, or empty
+ // space at the end of the array.
+ assignSnapshot(lir, mir->bailoutKind());
+
+ defineReturn(lir, mir);
+ assignSafepoint(lir, mir);
+}
+
+void LIRGenerator::visitBail(MBail* bail) {
+ LBail* lir = new (alloc()) LBail();
+ assignSnapshot(lir, bail->bailoutKind());
+ add(lir, bail);
+}
+
+void LIRGenerator::visitUnreachable(MUnreachable* unreachable) {
+ LUnreachable* lir = new (alloc()) LUnreachable();
+ add(lir, unreachable);
+}
+
+void LIRGenerator::visitEncodeSnapshot(MEncodeSnapshot* mir) {
+ LEncodeSnapshot* lir = new (alloc()) LEncodeSnapshot();
+ assignSnapshot(lir, mir->bailoutKind());
+ add(lir, mir);
+}
+
+void LIRGenerator::visitUnreachableResult(MUnreachableResult* mir) {
+ if (mir->type() == MIRType::Value) {
+ auto* lir = new (alloc()) LUnreachableResultV();
+ defineBox(lir, mir);
+ } else {
+ auto* lir = new (alloc()) LUnreachableResultT();
+ define(lir, mir);
+ }
+}
+
+void LIRGenerator::visitAssertFloat32(MAssertFloat32* assertion) {
+ MIRType type = assertion->input()->type();
+ DebugOnly<bool> checkIsFloat32 = assertion->mustBeFloat32();
+
+ if (type != MIRType::Value && !JitOptions.eagerIonCompilation()) {
+ MOZ_ASSERT_IF(checkIsFloat32, type == MIRType::Float32);
+ MOZ_ASSERT_IF(!checkIsFloat32, type != MIRType::Float32);
+ }
+}
+
+void LIRGenerator::visitAssertRecoveredOnBailout(
+ MAssertRecoveredOnBailout* assertion) {
+ MOZ_CRASH("AssertRecoveredOnBailout nodes are always recovered on bailouts.");
+}
+
+[[nodiscard]] static JSOp ReorderComparison(JSOp op, MDefinition** lhsp,
+ MDefinition** rhsp) {
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (lhs->maybeConstantValue()) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ return ReverseCompareOp(op);
+ }
+ return op;
+}
+
+void LIRGenerator::visitTest(MTest* test) {
+ MDefinition* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ // String is converted to length of string in the type analysis phase (see
+ // TestPolicy).
+ MOZ_ASSERT(opd->type() != MIRType::String);
+
+ // Testing a constant.
+ if (MConstant* constant = opd->maybeConstantValue()) {
+ bool b;
+ if (constant->valueToBoolean(&b)) {
+ add(new (alloc()) LGoto(b ? ifTrue : ifFalse));
+ return;
+ }
+ }
+
+ if (opd->type() == MIRType::Value) {
+ auto* lir = new (alloc()) LTestVAndBranch(
+ ifTrue, ifFalse, useBox(opd), tempDouble(), tempToUnbox(), temp());
+ add(lir, test);
+ return;
+ }
+
+ // Objects are truthy, except if it might emulate undefined.
+ if (opd->type() == MIRType::Object) {
+ add(new (alloc())
+ LTestOAndBranch(useRegister(opd), ifTrue, ifFalse, temp()),
+ test);
+ return;
+ }
+
+ // These must be explicitly sniffed out since they are constants and have
+ // no payload.
+ if (opd->type() == MIRType::Undefined || opd->type() == MIRType::Null) {
+ add(new (alloc()) LGoto(ifFalse));
+ return;
+ }
+
+ // All symbols are truthy.
+ if (opd->type() == MIRType::Symbol) {
+ add(new (alloc()) LGoto(ifTrue));
+ return;
+ }
+
+ // Try to match the pattern
+ // test=MTest(
+ // comp=MCompare(
+ // {EQ,NE} for {Int,UInt}{32,64},
+ // bitAnd={MBitAnd,MWasmBinaryBitwise(And{32,64})}(x, y),
+ // MConstant(0)
+ // )
+ // )
+ // and produce a single LBitAndAndBranch node. This requires both `comp`
+ // and `bitAnd` to be marked emit-at-uses. Since we can't use
+ // LBitAndAndBranch to represent a 64-bit AND on a 32-bit target, the 64-bit
+ // case is restricted to 64-bit targets.
+ if (opd->isCompare() && opd->isEmittedAtUses()) {
+#ifdef JS_64BIT
+ constexpr bool targetIs64 = true;
+#else
+ constexpr bool targetIs64 = false;
+#endif
+ MCompare* comp = opd->toCompare();
+ Assembler::Condition compCond =
+ JSOpToCondition(comp->compareType(), comp->jsop());
+ MDefinition* compL = comp->getOperand(0);
+ MDefinition* compR = comp->getOperand(1);
+ if ((comp->compareType() == MCompare::Compare_Int32 ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ (targetIs64 && comp->compareType() == MCompare::Compare_Int64) ||
+ (targetIs64 && comp->compareType() == MCompare::Compare_UInt64)) &&
+ (compCond == Assembler::Equal || compCond == Assembler::NotEqual) &&
+ compR->isConstant() &&
+ (compR->toConstant()->isInt32(0) ||
+ (targetIs64 && compR->toConstant()->isInt64(0))) &&
+ (compL->isBitAnd() || (compL->isWasmBinaryBitwise() &&
+ compL->toWasmBinaryBitwise()->subOpcode() ==
+ MWasmBinaryBitwise::SubOpcode::And))) {
+ // The MCompare is OK; now check its first operand (the and-ish node).
+ MDefinition* bitAnd = compL;
+ MDefinition* bitAndL = bitAnd->getOperand(0);
+ MDefinition* bitAndR = bitAnd->getOperand(1);
+ MIRType bitAndLTy = bitAndL->type();
+ MIRType bitAndRTy = bitAndR->type();
+ if (bitAnd->isEmittedAtUses() && bitAndLTy == bitAndRTy &&
+ (bitAndLTy == MIRType::Int32 ||
+ (targetIs64 && bitAndLTy == MIRType::Int64))) {
+ // Pattern match succeeded.
+ ReorderCommutative(&bitAndL, &bitAndR, test);
+ if (compCond == Assembler::Equal) {
+ compCond = Assembler::Zero;
+ } else if (compCond == Assembler::NotEqual) {
+ compCond = Assembler::NonZero;
+ } else {
+ MOZ_ASSERT_UNREACHABLE("inequality operators cannot be folded");
+ }
+ MOZ_ASSERT_IF(!targetIs64, bitAndLTy == MIRType::Int32);
+ lowerForBitAndAndBranch(
+ new (alloc()) LBitAndAndBranch(
+ ifTrue, ifFalse, bitAndLTy == MIRType::Int64, compCond),
+ test, bitAndL, bitAndR);
+ return;
+ }
+ }
+ }
+
+ // Check if the operand for this test is a compare operation. If it is, we
+ // want to emit an LCompare*AndBranch rather than an LTest*AndBranch, to fuse
+ // the compare and jump instructions.
+ if (opd->isCompare() && opd->isEmittedAtUses()) {
+ MCompare* comp = opd->toCompare();
+ MDefinition* left = comp->lhs();
+ MDefinition* right = comp->rhs();
+
+ // Try to fold the comparison so that we don't have to handle all cases.
+ bool result;
+ if (comp->tryFold(&result)) {
+ add(new (alloc()) LGoto(result ? ifTrue : ifFalse));
+ return;
+ }
+
+ // Emit LCompare*AndBranch.
+
+ // Compare and branch null/undefined.
+ // The second operand has known null/undefined type,
+ // so just test the first operand.
+ if (comp->compareType() == MCompare::Compare_Null ||
+ comp->compareType() == MCompare::Compare_Undefined) {
+ if (left->type() == MIRType::Object) {
+ auto* lir = new (alloc()) LIsNullOrLikeUndefinedAndBranchT(
+ comp, useRegister(left), ifTrue, ifFalse, temp());
+ add(lir, test);
+ return;
+ }
+
+ if (IsLooseEqualityOp(comp->jsop())) {
+ auto* lir = new (alloc()) LIsNullOrLikeUndefinedAndBranchV(
+ comp, ifTrue, ifFalse, useBox(left), temp(), tempToUnbox());
+ add(lir, test);
+ return;
+ }
+
+ if (comp->compareType() == MCompare::Compare_Null) {
+ auto* lir =
+ new (alloc()) LIsNullAndBranch(comp, ifTrue, ifFalse, useBox(left));
+ add(lir, test);
+ return;
+ }
+
+ auto* lir = new (alloc())
+ LIsUndefinedAndBranch(comp, ifTrue, ifFalse, useBox(left));
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch Int32, Symbol, Object, or RefOrNull pointers.
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_UIntPtr ||
+ comp->compareType() == MCompare::Compare_Object ||
+ comp->compareType() == MCompare::Compare_Symbol ||
+ comp->compareType() == MCompare::Compare_RefOrNull) {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs;
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_UIntPtr) {
+ rhs = useAnyOrInt32Constant(right);
+ } else {
+ rhs = useAny(right);
+ }
+ LCompareAndBranch* lir =
+ new (alloc()) LCompareAndBranch(comp, op, lhs, rhs, ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch Int64.
+ if (comp->compareType() == MCompare::Compare_Int64 ||
+ comp->compareType() == MCompare::Compare_UInt64) {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ lowerForCompareI64AndBranch(test, comp, op, left, right, ifTrue, ifFalse);
+ return;
+ }
+
+ // Compare and branch doubles.
+ if (comp->isDoubleComparison()) {
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs = useRegister(right);
+ LCompareDAndBranch* lir =
+ new (alloc()) LCompareDAndBranch(comp, lhs, rhs, ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+
+ // Compare and branch floats.
+ if (comp->isFloat32Comparison()) {
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs = useRegister(right);
+ LCompareFAndBranch* lir =
+ new (alloc()) LCompareFAndBranch(comp, lhs, rhs, ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+ }
+
+ // Check if the operand for this test is a bitand operation. If it is, we want
+ // to emit an LBitAndAndBranch rather than an LTest*AndBranch.
+ if (opd->isBitAnd() && opd->isEmittedAtUses()) {
+ MDefinition* lhs = opd->getOperand(0);
+ MDefinition* rhs = opd->getOperand(1);
+ if (lhs->type() == MIRType::Int32 && rhs->type() == MIRType::Int32) {
+ ReorderCommutative(&lhs, &rhs, test);
+ lowerForBitAndAndBranch(new (alloc()) LBitAndAndBranch(ifTrue, ifFalse,
+ /*is64=*/false),
+ test, lhs, rhs);
+ return;
+ }
+ }
+
+#if defined(ENABLE_WASM_SIMD) && \
+ (defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
+ defined(JS_CODEGEN_ARM64))
+ // Check if the operand for this test is an any_true/all_true SIMD operation.
+ // If it is, we want to emit an LWasmReduceAndBranchSimd128 node to avoid
+ // generating an intermediate boolean result.
+ if (opd->isWasmReduceSimd128() && opd->isEmittedAtUses()) {
+ MWasmReduceSimd128* node = opd->toWasmReduceSimd128();
+ if (canFoldReduceSimd128AndBranch(node->simdOp())) {
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("simd128-to-scalar-and-branch -> folded");
+# endif
+ auto* lir = new (alloc()) LWasmReduceAndBranchSimd128(
+ useRegister(node->input()), node->simdOp(), ifTrue, ifFalse);
+ add(lir, test);
+ return;
+ }
+ }
+#endif
+
+ if (opd->isIsObject() && opd->isEmittedAtUses()) {
+ MDefinition* input = opd->toIsObject()->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ LIsObjectAndBranch* lir =
+ new (alloc()) LIsObjectAndBranch(ifTrue, ifFalse, useBoxAtStart(input));
+ add(lir, test);
+ return;
+ }
+
+ if (opd->isWasmGcObjectIsSubtypeOfAbstract() && opd->isEmittedAtUses()) {
+ MWasmGcObjectIsSubtypeOfAbstract* isSubTypeOf =
+ opd->toWasmGcObjectIsSubtypeOfAbstract();
+ LAllocation object = useRegister(isSubTypeOf->object());
+ // As in visitWasmGcObjectIsSubtypeOfAbstract, we know we do not need
+ // scratch2 and superSuperTypeVector because we know this is not a concrete
+ // type.
+ LDefinition scratch1 = MacroAssembler::needScratch1ForBranchWasmGcRefType(
+ isSubTypeOf->destType())
+ ? temp()
+ : LDefinition();
+ add(new (alloc()) LWasmGcObjectIsSubtypeOfAbstractAndBranch(
+ ifTrue, ifFalse, isSubTypeOf->sourceType(), isSubTypeOf->destType(),
+ object, scratch1),
+ test);
+ return;
+ }
+
+ if (opd->isWasmGcObjectIsSubtypeOfConcrete() && opd->isEmittedAtUses()) {
+ MWasmGcObjectIsSubtypeOfConcrete* isSubTypeOf =
+ opd->toWasmGcObjectIsSubtypeOfConcrete();
+ LAllocation object = useRegister(isSubTypeOf->object());
+ // As in visitWasmGcObjectIsSubtypeOfConcrete, we know we need scratch1 and
+ // superSuperTypeVector because we know this is a concrete type.
+ LAllocation superSuperTypeVector =
+ useRegister(isSubTypeOf->superSuperTypeVector());
+ LDefinition scratch1 = temp();
+ LDefinition scratch2 = MacroAssembler::needScratch2ForBranchWasmGcRefType(
+ isSubTypeOf->destType())
+ ? temp()
+ : LDefinition();
+ add(new (alloc()) LWasmGcObjectIsSubtypeOfConcreteAndBranch(
+ ifTrue, ifFalse, isSubTypeOf->sourceType(), isSubTypeOf->destType(),
+ object, superSuperTypeVector, scratch1, scratch2),
+ test);
+ return;
+ }
+
+ if (opd->isIsNullOrUndefined() && opd->isEmittedAtUses()) {
+ MIsNullOrUndefined* isNullOrUndefined = opd->toIsNullOrUndefined();
+ MDefinition* input = isNullOrUndefined->value();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LIsNullOrUndefinedAndBranch(
+ isNullOrUndefined, ifTrue, ifFalse, useBoxAtStart(input));
+ add(lir, test);
+ return;
+ }
+
+ if (opd->isIsNoIter()) {
+ MOZ_ASSERT(opd->isEmittedAtUses());
+
+ MDefinition* input = opd->toIsNoIter()->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ LIsNoIterAndBranch* lir =
+ new (alloc()) LIsNoIterAndBranch(ifTrue, ifFalse, useBox(input));
+ add(lir, test);
+ return;
+ }
+
+ if (opd->isIteratorHasIndices()) {
+ MOZ_ASSERT(opd->isEmittedAtUses());
+
+ MDefinition* object = opd->toIteratorHasIndices()->object();
+ MDefinition* iterator = opd->toIteratorHasIndices()->iterator();
+ LIteratorHasIndicesAndBranch* lir = new (alloc())
+ LIteratorHasIndicesAndBranch(ifTrue, ifFalse, useRegister(object),
+ useRegister(iterator), temp(), temp());
+ add(lir, test);
+ return;
+ }
+
+ switch (opd->type()) {
+ case MIRType::Double:
+ add(new (alloc()) LTestDAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Float32:
+ add(new (alloc()) LTestFAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Int32:
+ case MIRType::Boolean:
+ add(new (alloc()) LTestIAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::Int64:
+ add(new (alloc())
+ LTestI64AndBranch(useInt64Register(opd), ifTrue, ifFalse));
+ break;
+ case MIRType::BigInt:
+ add(new (alloc()) LTestBIAndBranch(useRegister(opd), ifTrue, ifFalse));
+ break;
+ default:
+ MOZ_CRASH("Bad type");
+ }
+}
+
+static inline bool CanEmitCompareAtUses(MInstruction* ins) {
+ if (!ins->canEmitAtUses()) {
+ return false;
+ }
+
+ // If the result is never used, we can usefully defer emission to the use
+ // point, since that will never happen.
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd()) {
+ return true;
+ }
+
+ // If the first use isn't of the expected form, the answer is No.
+ MNode* node = iter->consumer();
+ if (!node->isDefinition()) {
+ return false;
+ }
+
+ MDefinition* use = node->toDefinition();
+ if (!use->isTest() && !use->isWasmSelect()) {
+ return false;
+ }
+
+ // Emission can be deferred to the first use point, but only if there are no
+ // other use points.
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+static bool CanCompareCharactersInline(const JSLinearString* linear) {
+ size_t length = linear->length();
+
+ // Limit the number of inline instructions used for character comparisons. Use
+ // the same instruction limit for both encodings, i.e. two-byte uses half the
+ // limit of Latin-1 strings.
+ constexpr size_t Latin1StringCompareCutoff = 32;
+ constexpr size_t TwoByteStringCompareCutoff = 16;
+
+ return length > 0 &&
+ (linear->hasLatin1Chars() ? length <= Latin1StringCompareCutoff
+ : length <= TwoByteStringCompareCutoff);
+}
+
+void LIRGenerator::visitCompare(MCompare* comp) {
+ MDefinition* left = comp->lhs();
+ MDefinition* right = comp->rhs();
+
+ // Try to fold the comparison so that we don't have to handle all cases.
+ bool result;
+ if (comp->tryFold(&result)) {
+ define(new (alloc()) LInteger(result), comp);
+ return;
+ }
+
+ // Move below the emitAtUses call if we ever implement
+ // LCompareSAndBranch. Doing this now wouldn't be wrong, but doesn't
+ // make sense and avoids confusion.
+ if (comp->compareType() == MCompare::Compare_String) {
+ if (IsEqualityOp(comp->jsop())) {
+ MConstant* constant = nullptr;
+ if (left->isConstant()) {
+ constant = left->toConstant();
+ } else if (right->isConstant()) {
+ constant = right->toConstant();
+ }
+
+ if (constant) {
+ JSLinearString* linear = &constant->toString()->asLinear();
+
+ if (CanCompareCharactersInline(linear)) {
+ MDefinition* input = left->isConstant() ? right : left;
+
+ auto* lir = new (alloc()) LCompareSInline(useRegister(input), linear);
+ define(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+ }
+ }
+
+ LCompareS* lir =
+ new (alloc()) LCompareS(useRegister(left), useRegister(right));
+ define(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+
+ // Compare two BigInts.
+ if (comp->compareType() == MCompare::Compare_BigInt) {
+ auto* lir = new (alloc()) LCompareBigInt(
+ useRegister(left), useRegister(right), temp(), temp(), temp());
+ define(lir, comp);
+ return;
+ }
+
+ // Compare BigInt with Int32.
+ if (comp->compareType() == MCompare::Compare_BigInt_Int32) {
+ auto* lir = new (alloc()) LCompareBigIntInt32(
+ useRegister(left), useRegister(right), temp(), temp());
+ define(lir, comp);
+ return;
+ }
+
+ // Compare BigInt with Double.
+ if (comp->compareType() == MCompare::Compare_BigInt_Double) {
+ auto* lir = new (alloc()) LCompareBigIntDouble(useRegisterAtStart(left),
+ useRegisterAtStart(right));
+ defineReturn(lir, comp);
+ return;
+ }
+
+ // Compare BigInt with String.
+ if (comp->compareType() == MCompare::Compare_BigInt_String) {
+ auto* lir = new (alloc()) LCompareBigIntString(useRegisterAtStart(left),
+ useRegisterAtStart(right));
+ defineReturn(lir, comp);
+ assignSafepoint(lir, comp);
+ return;
+ }
+
+ // Sniff out if the output of this compare is used only for a branching.
+ // If it is, then we will emit an LCompare*AndBranch instruction in place
+ // of this compare and any test that uses this compare. Thus, we can
+ // ignore this Compare.
+ if (CanEmitCompareAtUses(comp)) {
+ emitAtUses(comp);
+ return;
+ }
+
+ // Compare Null and Undefined.
+ if (comp->compareType() == MCompare::Compare_Null ||
+ comp->compareType() == MCompare::Compare_Undefined) {
+ if (left->type() == MIRType::Object) {
+ define(new (alloc()) LIsNullOrLikeUndefinedT(useRegister(left)), comp);
+ return;
+ }
+
+ if (IsLooseEqualityOp(comp->jsop())) {
+ auto* lir =
+ new (alloc()) LIsNullOrLikeUndefinedV(useBox(left), tempToUnbox());
+ define(lir, comp);
+ return;
+ }
+
+ if (comp->compareType() == MCompare::Compare_Null) {
+ auto* lir = new (alloc()) LIsNull(useBox(left));
+ define(lir, comp);
+ return;
+ }
+
+ auto* lir = new (alloc()) LIsUndefined(useBox(left));
+ define(lir, comp);
+ return;
+ }
+
+ // Compare Int32, Symbol, Object or Wasm pointers.
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_UIntPtr ||
+ comp->compareType() == MCompare::Compare_Object ||
+ comp->compareType() == MCompare::Compare_Symbol ||
+ comp->compareType() == MCompare::Compare_RefOrNull) {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ LAllocation lhs = useRegister(left);
+ LAllocation rhs;
+ if (comp->isInt32Comparison() ||
+ comp->compareType() == MCompare::Compare_UInt32 ||
+ comp->compareType() == MCompare::Compare_UIntPtr) {
+ rhs = useAnyOrInt32Constant(right);
+ } else {
+ rhs = useAny(right);
+ }
+ define(new (alloc()) LCompare(op, lhs, rhs), comp);
+ return;
+ }
+
+ // Compare Int64.
+ if (comp->compareType() == MCompare::Compare_Int64 ||
+ comp->compareType() == MCompare::Compare_UInt64) {
+ JSOp op = ReorderComparison(comp->jsop(), &left, &right);
+ define(new (alloc()) LCompareI64(op, useInt64Register(left),
+ useInt64OrConstant(right)),
+ comp);
+ return;
+ }
+
+ // Compare doubles.
+ if (comp->isDoubleComparison()) {
+ define(new (alloc()) LCompareD(useRegister(left), useRegister(right)),
+ comp);
+ return;
+ }
+
+ // Compare float32.
+ if (comp->isFloat32Comparison()) {
+ define(new (alloc()) LCompareF(useRegister(left), useRegister(right)),
+ comp);
+ return;
+ }
+
+ MOZ_CRASH("Unrecognized compare type.");
+}
+
+void LIRGenerator::visitSameValueDouble(MSameValueDouble* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ MOZ_ASSERT(rhs->type() == MIRType::Double);
+
+ auto* lir = new (alloc())
+ LSameValueDouble(useRegister(lhs), useRegister(rhs), tempDouble());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSameValue(MSameValue* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LSameValue(useBox(lhs), useBox(rhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::lowerBitOp(JSOp op, MBinaryInstruction* ins) {
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForALU(new (alloc()) LBitOpI(op), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ MOZ_ASSERT(rhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForALUInt64(new (alloc()) LBitOpI64(op), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled integer specialization");
+}
+
+void LIRGenerator::visitTypeOf(MTypeOf* ins) {
+ MDefinition* opd = ins->input();
+
+ if (opd->type() == MIRType::Object) {
+ auto* lir = new (alloc()) LTypeOfO(useRegister(opd));
+ define(lir, ins);
+ return;
+ }
+
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LTypeOfV* lir = new (alloc()) LTypeOfV(useBox(opd), tempToUnbox());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitTypeOfName(MTypeOfName* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LTypeOfName(useRegister(input));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitTypeOfIs(MTypeOfIs* ins) {
+ MDefinition* input = ins->input();
+
+ MOZ_ASSERT(input->type() == MIRType::Object ||
+ input->type() == MIRType::Value);
+
+ switch (ins->jstype()) {
+ case JSTYPE_UNDEFINED:
+ case JSTYPE_OBJECT:
+ case JSTYPE_FUNCTION: {
+ if (input->type() == MIRType::Object) {
+ auto* lir = new (alloc()) LTypeOfIsNonPrimitiveO(useRegister(input));
+ define(lir, ins);
+ } else {
+ auto* lir =
+ new (alloc()) LTypeOfIsNonPrimitiveV(useBox(input), tempToUnbox());
+ define(lir, ins);
+ }
+ return;
+ }
+
+ case JSTYPE_STRING:
+ case JSTYPE_NUMBER:
+ case JSTYPE_BOOLEAN:
+ case JSTYPE_SYMBOL:
+ case JSTYPE_BIGINT: {
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LTypeOfIsPrimitive(useBoxAtStart(input));
+ define(lir, ins);
+ return;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+#endif
+ case JSTYPE_LIMIT:
+ break;
+ }
+ MOZ_CRASH("Unhandled JSType");
+}
+
+void LIRGenerator::visitToAsyncIter(MToAsyncIter* ins) {
+ LToAsyncIter* lir = new (alloc()) LToAsyncIter(
+ useRegisterAtStart(ins->iterator()), useBoxAtStart(ins->nextMethod()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitToPropertyKeyCache(MToPropertyKeyCache* ins) {
+ MDefinition* input = ins->getOperand(0);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LToPropertyKeyCache(useBox(input));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBitNot(MBitNot* ins) {
+ MDefinition* input = ins->getOperand(0);
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+ lowerForALU(new (alloc()) LBitNotI(), ins, input);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(input->type() == MIRType::Int64);
+ lowerForALUInt64(new (alloc()) LBitNotI64(), ins, input);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled integer specialization");
+}
+
+static bool CanEmitBitAndAtUses(MInstruction* ins) {
+ if (!ins->canEmitAtUses()) {
+ return false;
+ }
+
+ MIRType tyL = ins->getOperand(0)->type();
+ MIRType tyR = ins->getOperand(1)->type();
+ if (tyL != tyR || (tyL != MIRType::Int32 && tyL != MIRType::Int64)) {
+ return false;
+ }
+
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd()) {
+ return false;
+ }
+
+ MNode* node = iter->consumer();
+ if (!node->isDefinition() || !node->toDefinition()->isInstruction()) {
+ return false;
+ }
+
+ MInstruction* use = node->toDefinition()->toInstruction();
+ if (!use->isTest() && !(use->isCompare() && CanEmitCompareAtUses(use))) {
+ return false;
+ }
+
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+void LIRGenerator::visitBitAnd(MBitAnd* ins) {
+ // Sniff out if the output of this bitand is used only for a branching.
+ // If it is, then we will emit an LBitAndAndBranch instruction in place
+ // of this bitand and any test that uses this bitand. Thus, we can
+ // ignore this BitAnd.
+ if (CanEmitBitAndAtUses(ins)) {
+ emitAtUses(ins);
+ } else {
+ lowerBitOp(JSOp::BitAnd, ins);
+ }
+}
+
+void LIRGenerator::visitBitOr(MBitOr* ins) { lowerBitOp(JSOp::BitOr, ins); }
+
+void LIRGenerator::visitBitXor(MBitXor* ins) { lowerBitOp(JSOp::BitXor, ins); }
+
+void LIRGenerator::visitWasmBinaryBitwise(MWasmBinaryBitwise* ins) {
+ switch (ins->subOpcode()) {
+ case MWasmBinaryBitwise::SubOpcode::And:
+ if (CanEmitBitAndAtUses(ins)) {
+ emitAtUses(ins);
+ } else {
+ lowerBitOp(JSOp::BitAnd, ins);
+ }
+ break;
+ case MWasmBinaryBitwise::SubOpcode::Or:
+ lowerBitOp(JSOp::BitOr, ins);
+ break;
+ case MWasmBinaryBitwise::SubOpcode::Xor:
+ lowerBitOp(JSOp::BitXor, ins);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void LIRGenerator::lowerShiftOp(JSOp op, MShiftInstruction* ins) {
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ if (op == JSOp::Ursh && ins->type() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ lowerUrshD(ins->toUrsh());
+ return;
+ }
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LShiftI* lir = new (alloc()) LShiftI(op);
+ if (op == JSOp::Ursh) {
+ if (ins->toUrsh()->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ }
+ lowerForShift(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ MOZ_ASSERT(rhs->type() == MIRType::Int64);
+ lowerForShiftInt64(new (alloc()) LShiftI64(op), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled integer specialization");
+}
+
+void LIRGenerator::visitLsh(MLsh* ins) { lowerShiftOp(JSOp::Lsh, ins); }
+
+void LIRGenerator::visitRsh(MRsh* ins) { lowerShiftOp(JSOp::Rsh, ins); }
+
+void LIRGenerator::visitUrsh(MUrsh* ins) { lowerShiftOp(JSOp::Ursh, ins); }
+
+void LIRGenerator::visitSignExtendInt32(MSignExtendInt32* ins) {
+ LInstructionHelper<1, 1, 0>* lir;
+
+ if (ins->mode() == MSignExtendInt32::Byte) {
+ lir = new (alloc())
+ LSignExtendInt32(useByteOpRegisterAtStart(ins->input()), ins->mode());
+ } else {
+ lir = new (alloc())
+ LSignExtendInt32(useRegisterAtStart(ins->input()), ins->mode());
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitRotate(MRotate* ins) {
+ MDefinition* input = ins->input();
+ MDefinition* count = ins->count();
+
+ if (ins->type() == MIRType::Int32) {
+ auto* lir = new (alloc()) LRotate();
+ lowerForShift(lir, ins, input, count);
+ } else if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LRotateI64();
+ lowerForShiftInt64(lir, ins, input, count);
+ } else {
+ MOZ_CRASH("unexpected type in visitRotate");
+ }
+}
+
+void LIRGenerator::visitFloor(MFloor* ins) {
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (type == MIRType::Double) {
+ lir = new (alloc()) LFloor(useRegister(ins->input()));
+ } else {
+ lir = new (alloc()) LFloorF(useRegister(ins->input()));
+ }
+
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitCeil(MCeil* ins) {
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (type == MIRType::Double) {
+ lir = new (alloc()) LCeil(useRegister(ins->input()));
+ } else {
+ lir = new (alloc()) LCeilF(useRegister(ins->input()));
+ }
+
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitRound(MRound* ins) {
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 1>* lir;
+ if (type == MIRType::Double) {
+ lir = new (alloc()) LRound(useRegister(ins->input()), tempDouble());
+ } else {
+ lir = new (alloc()) LRoundF(useRegister(ins->input()), tempFloat32());
+ }
+
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitTrunc(MTrunc* ins) {
+ MIRType type = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(type));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (type == MIRType::Double) {
+ lir = new (alloc()) LTrunc(useRegister(ins->input()));
+ } else {
+ lir = new (alloc()) LTruncF(useRegister(ins->input()));
+ }
+
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitNearbyInt(MNearbyInt* ins) {
+ MIRType inputType = ins->input()->type();
+ MOZ_ASSERT(IsFloatingPointType(inputType));
+ MOZ_ASSERT(ins->type() == inputType);
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (inputType == MIRType::Double) {
+ lir = new (alloc()) LNearbyInt(useRegisterAtStart(ins->input()));
+ } else {
+ lir = new (alloc()) LNearbyIntF(useRegisterAtStart(ins->input()));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitMinMax(MMinMax* ins) {
+ MDefinition* first = ins->getOperand(0);
+ MDefinition* second = ins->getOperand(1);
+
+ ReorderCommutative(&first, &second, ins);
+
+ LMinMaxBase* lir;
+ switch (ins->type()) {
+ case MIRType::Int32:
+ lir = new (alloc())
+ LMinMaxI(useRegisterAtStart(first), useRegisterOrConstant(second));
+ break;
+ case MIRType::Float32:
+ lir = new (alloc())
+ LMinMaxF(useRegisterAtStart(first), useRegister(second));
+ break;
+ case MIRType::Double:
+ lir = new (alloc())
+ LMinMaxD(useRegisterAtStart(first), useRegister(second));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // Input reuse is OK (for now) even on ARM64: floating min/max are fairly
+ // expensive due to SNaN -> QNaN conversion, and int min/max is for asm.js.
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitMinMaxArray(MMinMaxArray* ins) {
+ LInstructionHelper<1, 1, 3>* lir;
+ if (ins->type() == MIRType::Int32) {
+ lir = new (alloc())
+ LMinMaxArrayI(useRegisterAtStart(ins->array()), temp(), temp(), temp());
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ lir = new (alloc()) LMinMaxArrayD(useRegisterAtStart(ins->array()),
+ tempDouble(), temp(), temp());
+ }
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+LInstructionHelper<1, 1, 0>* LIRGenerator::allocateAbs(MAbs* ins,
+ LAllocation input) {
+ MDefinition* num = ins->input();
+ MOZ_ASSERT(IsNumberType(num->type()));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ switch (num->type()) {
+ case MIRType::Int32:
+ lir = new (alloc()) LAbsI(input);
+ // needed to handle abs(INT32_MIN)
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ break;
+ case MIRType::Float32:
+ lir = new (alloc()) LAbsF(input);
+ break;
+ case MIRType::Double:
+ lir = new (alloc()) LAbsD(input);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return lir;
+}
+
+void LIRGenerator::visitClz(MClz* ins) {
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LClzI* lir = new (alloc()) LClzI(useRegisterAtStart(num));
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LClzI64(useInt64RegisterAtStart(num));
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitCtz(MCtz* ins) {
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LCtzI* lir = new (alloc()) LCtzI(useRegisterAtStart(num));
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LCtzI64(useInt64RegisterAtStart(num));
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitPopcnt(MPopcnt* ins) {
+ MDefinition* num = ins->num();
+
+ MOZ_ASSERT(IsIntType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ LPopcntI* lir = new (alloc()) LPopcntI(useRegisterAtStart(num), temp());
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LPopcntI64(useInt64RegisterAtStart(num), temp());
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitSqrt(MSqrt* ins) {
+ MDefinition* num = ins->input();
+ MOZ_ASSERT(IsFloatingPointType(num->type()));
+
+ LInstructionHelper<1, 1, 0>* lir;
+ if (num->type() == MIRType::Double) {
+ lir = new (alloc()) LSqrtD(useRegisterAtStart(num));
+ } else {
+ lir = new (alloc()) LSqrtF(useRegisterAtStart(num));
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtan2(MAtan2* ins) {
+ MDefinition* y = ins->y();
+ MOZ_ASSERT(y->type() == MIRType::Double);
+
+ MDefinition* x = ins->x();
+ MOZ_ASSERT(x->type() == MIRType::Double);
+
+ LAtan2D* lir =
+ new (alloc()) LAtan2D(useRegisterAtStart(y), useRegisterAtStart(x));
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitHypot(MHypot* ins) {
+ LHypot* lir = nullptr;
+ uint32_t length = ins->numOperands();
+ for (uint32_t i = 0; i < length; ++i) {
+ MOZ_ASSERT(ins->getOperand(i)->type() == MIRType::Double);
+ }
+
+ switch (length) {
+ case 2:
+ lir = new (alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)));
+ break;
+ case 3:
+ lir = new (alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)),
+ useRegisterAtStart(ins->getOperand(2)));
+ break;
+ case 4:
+ lir = new (alloc()) LHypot(useRegisterAtStart(ins->getOperand(0)),
+ useRegisterAtStart(ins->getOperand(1)),
+ useRegisterAtStart(ins->getOperand(2)),
+ useRegisterAtStart(ins->getOperand(3)));
+ break;
+ default:
+ MOZ_CRASH("Unexpected number of arguments to LHypot.");
+ }
+
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitPow(MPow* ins) {
+ MDefinition* input = ins->input();
+ MDefinition* power = ins->power();
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+ MOZ_ASSERT(power->type() == MIRType::Int32);
+
+ if (input->isConstant()) {
+ // Restrict this optimization to |base <= 256| to avoid generating too
+ // many consecutive shift instructions.
+ int32_t base = input->toConstant()->toInt32();
+ if (2 <= base && base <= 256 && mozilla::IsPowerOfTwo(uint32_t(base))) {
+ lowerPowOfTwoI(ins);
+ return;
+ }
+ }
+
+ auto* lir = new (alloc())
+ LPowII(useRegister(input), useRegister(power), temp(), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ return;
+ }
+
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ MOZ_ASSERT(power->type() == MIRType::Int32 ||
+ power->type() == MIRType::Double);
+
+ LInstruction* lir;
+ if (power->type() == MIRType::Int32) {
+ lir = new (alloc())
+ LPowI(useRegisterAtStart(input), useRegisterAtStart(power));
+ } else {
+ lir = new (alloc())
+ LPowD(useRegisterAtStart(input), useRegisterAtStart(power));
+ }
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitSign(MSign* ins) {
+ if (ins->type() == ins->input()->type()) {
+ LInstructionHelper<1, 1, 0>* lir;
+ if (ins->type() == MIRType::Int32) {
+ lir = new (alloc()) LSignI(useRegister(ins->input()));
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ lir = new (alloc()) LSignD(useRegister(ins->input()));
+ }
+ define(lir, ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->input()->type() == MIRType::Double);
+
+ auto* lir = new (alloc()) LSignDI(useRegister(ins->input()), tempDouble());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitMathFunction(MMathFunction* ins) {
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+ MOZ_ASSERT(ins->type() == ins->input()->type());
+
+ LInstruction* lir;
+ if (ins->type() == MIRType::Double) {
+ lir = new (alloc()) LMathFunctionD(useRegisterAtStart(ins->input()));
+ } else {
+ lir = new (alloc()) LMathFunctionF(useRegisterAtStart(ins->input()));
+ }
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitRandom(MRandom* ins) {
+ auto* lir = new (alloc()) LRandom(temp(), tempInt64(), tempInt64());
+ define(lir, ins);
+}
+
+// Try to mark an add or sub instruction as able to recover its input when
+// bailing out.
+template <typename S, typename T>
+static void MaybeSetRecoversInput(S* mir, T* lir) {
+ MOZ_ASSERT(lir->mirRaw() == mir);
+ if (!mir->fallible() || !lir->snapshot()) {
+ return;
+ }
+
+ if (lir->output()->policy() != LDefinition::MUST_REUSE_INPUT) {
+ return;
+ }
+
+ // The original operands to an add or sub can't be recovered if they both
+ // use the same register.
+ if (lir->lhs()->isUse() && lir->rhs()->isUse() &&
+ lir->lhs()->toUse()->virtualRegister() ==
+ lir->rhs()->toUse()->virtualRegister()) {
+ return;
+ }
+
+ // Add instructions that are on two different values can recover
+ // the input they clobbered via MUST_REUSE_INPUT. Thus, a copy
+ // of that input does not need to be kept alive in the snapshot
+ // for the instruction.
+
+ lir->setRecoversInput();
+
+ const LUse* input = lir->getOperand(lir->output()->getReusedInput())->toUse();
+ lir->snapshot()->rewriteRecoveredInput(*input);
+}
+
+void LIRGenerator::visitAdd(MAdd* ins) {
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ LAddI* lir = new (alloc()) LAddI;
+
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+
+ lowerForALU(lir, ins, lhs, rhs);
+ MaybeSetRecoversInput(ins, lir);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+ LAddI64* lir = new (alloc()) LAddI64;
+ lowerForALUInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForFPU(new (alloc()) LMathD(JSOp::Add), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ ReorderCommutative(&lhs, &rhs, ins);
+ lowerForFPU(new (alloc()) LMathF(JSOp::Add), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled number specialization");
+}
+
+void LIRGenerator::visitSub(MSub* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+
+ LSubI* lir = new (alloc()) LSubI;
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+
+ // If our LHS is a constant 0 and we don't have to worry about results that
+ // can't be represented as an int32, we can optimize to an LNegI.
+ if (!ins->fallible() && lhs->isConstant() &&
+ lhs->toConstant()->toInt32() == 0) {
+ lowerNegI(ins, rhs);
+ return;
+ }
+
+ lowerForALU(lir, ins, lhs, rhs);
+ MaybeSetRecoversInput(ins, lir);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+
+ // If our LHS is a constant 0, we can optimize to an LNegI64.
+ if (lhs->isConstant() && lhs->toConstant()->toInt64() == 0) {
+ lowerNegI64(ins, rhs);
+ return;
+ }
+
+ LSubI64* lir = new (alloc()) LSubI64;
+ lowerForALUInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ lowerForFPU(new (alloc()) LMathD(JSOp::Sub), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ lowerForFPU(new (alloc()) LMathF(JSOp::Sub), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled number specialization");
+}
+
+void LIRGenerator::visitMul(MMul* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // If our RHS is a constant -1 and we don't have to worry about results that
+ // can't be represented as an int32, we can optimize to an LNegI.
+ if (!ins->fallible() && rhs->isConstant() &&
+ rhs->toConstant()->toInt32() == -1) {
+ lowerNegI(ins, lhs);
+ return;
+ }
+
+ lowerMulI(ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // If our RHS is a constant -1, we can optimize to an LNegI64.
+ if (rhs->isConstant() && rhs->toConstant()->toInt64() == -1) {
+ lowerNegI64(ins, lhs);
+ return;
+ }
+
+ LMulI64* lir = new (alloc()) LMulI64;
+ lowerForMulInt64(lir, ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // If our RHS is a constant -1.0, we can optimize to an LNegD.
+ if (!ins->mustPreserveNaN() && rhs->isConstant() &&
+ rhs->toConstant()->toDouble() == -1.0) {
+ defineReuseInput(new (alloc()) LNegD(useRegisterAtStart(lhs)), ins, 0);
+ return;
+ }
+
+ lowerForFPU(new (alloc()) LMathD(JSOp::Mul), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ ReorderCommutative(&lhs, &rhs, ins);
+
+ // We apply the same optimizations as for doubles
+ if (!ins->mustPreserveNaN() && rhs->isConstant() &&
+ rhs->toConstant()->toFloat32() == -1.0f) {
+ defineReuseInput(new (alloc()) LNegF(useRegisterAtStart(lhs)), ins, 0);
+ return;
+ }
+
+ lowerForFPU(new (alloc()) LMathF(JSOp::Mul), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled number specialization");
+}
+
+void LIRGenerator::visitDiv(MDiv* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ lowerDivI(ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(lhs->type() == MIRType::Int64);
+ lowerDivI64(ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ lowerForFPU(new (alloc()) LMathD(JSOp::Div), ins, lhs, rhs);
+ return;
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ MOZ_ASSERT(lhs->type() == MIRType::Float32);
+ lowerForFPU(new (alloc()) LMathF(JSOp::Div), ins, lhs, rhs);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled number specialization");
+}
+
+void LIRGenerator::visitWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ lowerWasmBuiltinDivI64(div);
+}
+
+void LIRGenerator::visitWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ lowerWasmBuiltinModI64(mod);
+}
+
+void LIRGenerator::visitBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ lowerBuiltinInt64ToFloatingPoint(ins);
+}
+
+void LIRGenerator::visitWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ lowerWasmBuiltinTruncateToInt64(ins);
+}
+
+void LIRGenerator::visitWasmBuiltinModD(MWasmBuiltinModD* ins) {
+ MOZ_ASSERT(gen->compilingWasm());
+ LWasmBuiltinModD* lir = new (alloc()) LWasmBuiltinModD(
+ useRegisterAtStart(ins->lhs()), useRegisterAtStart(ins->rhs()),
+ useFixedAtStart(ins->instance(), InstanceReg));
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitMod(MMod* ins) {
+ MOZ_ASSERT(ins->lhs()->type() == ins->rhs()->type());
+ MOZ_ASSERT(IsNumberType(ins->type()));
+
+ if (ins->type() == MIRType::Int32) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Int32);
+ lowerModI(ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(ins->type() == MIRType::Int64);
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Int64);
+ lowerModI64(ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Double) {
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Double);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Double);
+
+ MOZ_ASSERT(!gen->compilingWasm());
+
+ if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
+ if (ins->rhs()->isConstant()) {
+ double d = ins->rhs()->toConstant()->toDouble();
+ int32_t div;
+ if (mozilla::NumberIsInt32(d, &div) && div > 0 &&
+ mozilla::IsPowerOfTwo(uint32_t(div))) {
+ auto* lir = new (alloc()) LModPowTwoD(useRegister(ins->lhs()), div);
+ define(lir, ins);
+ return;
+ }
+ }
+ }
+
+ LModD* lir = new (alloc())
+ LModD(useRegisterAtStart(ins->lhs()), useRegisterAtStart(ins->rhs()));
+ defineReturn(lir, ins);
+ return;
+ }
+
+ MOZ_CRASH("Unhandled number specialization");
+}
+
+void LIRGenerator::visitBigIntAdd(MBigIntAdd* ins) {
+ auto* lir = new (alloc()) LBigIntAdd(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntSub(MBigIntSub* ins) {
+ auto* lir = new (alloc()) LBigIntSub(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntMul(MBigIntMul* ins) {
+ auto* lir = new (alloc()) LBigIntMul(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntDiv(MBigIntDiv* ins) { lowerBigIntDiv(ins); }
+
+void LIRGenerator::visitBigIntMod(MBigIntMod* ins) { lowerBigIntMod(ins); }
+
+void LIRGenerator::visitBigIntPow(MBigIntPow* ins) {
+ auto* lir = new (alloc()) LBigIntPow(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntBitAnd(MBigIntBitAnd* ins) {
+ auto* lir = new (alloc()) LBigIntBitAnd(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntBitOr(MBigIntBitOr* ins) {
+ auto* lir = new (alloc()) LBigIntBitOr(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntBitXor(MBigIntBitXor* ins) {
+ auto* lir = new (alloc()) LBigIntBitXor(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntLsh(MBigIntLsh* ins) { lowerBigIntLsh(ins); }
+
+void LIRGenerator::visitBigIntRsh(MBigIntRsh* ins) { lowerBigIntRsh(ins); }
+
+void LIRGenerator::visitBigIntIncrement(MBigIntIncrement* ins) {
+ auto* lir =
+ new (alloc()) LBigIntIncrement(useRegister(ins->input()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntDecrement(MBigIntDecrement* ins) {
+ auto* lir =
+ new (alloc()) LBigIntDecrement(useRegister(ins->input()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntNegate(MBigIntNegate* ins) {
+ auto* lir = new (alloc()) LBigIntNegate(useRegister(ins->input()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntBitNot(MBigIntBitNot* ins) {
+ auto* lir =
+ new (alloc()) LBigIntBitNot(useRegister(ins->input()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInt32ToStringWithBase(MInt32ToStringWithBase* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ int32_t baseInt =
+ ins->base()->isConstant() ? ins->base()->toConstant()->toInt32() : 0;
+
+ LAllocation base;
+ if (2 <= baseInt && baseInt <= 36) {
+ base = useRegisterOrConstant(ins->base());
+ } else {
+ base = useRegister(ins->base());
+ }
+
+ auto* lir = new (alloc())
+ LInt32ToStringWithBase(useRegister(ins->input()), base, temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNumberParseInt(MNumberParseInt* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->radix()->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LNumberParseInt(useRegisterAtStart(ins->string()),
+ useRegisterAtStart(ins->radix()),
+ tempFixed(CallTempReg0));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitDoubleParseInt(MDoubleParseInt* ins) {
+ MOZ_ASSERT(ins->number()->type() == MIRType::Double);
+
+ auto* lir =
+ new (alloc()) LDoubleParseInt(useRegister(ins->number()), tempDouble());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitConcat(MConcat* ins) {
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(lhs->type() == MIRType::String);
+ MOZ_ASSERT(rhs->type() == MIRType::String);
+ MOZ_ASSERT(ins->type() == MIRType::String);
+
+ LConcat* lir = new (alloc()) LConcat(
+ useFixedAtStart(lhs, CallTempReg0), useFixedAtStart(rhs, CallTempReg1),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1), tempFixed(CallTempReg2),
+ tempFixed(CallTempReg3), tempFixed(CallTempReg4));
+ defineFixed(lir, ins, LAllocation(AnyRegister(CallTempReg5)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitLinearizeForCharAccess(MLinearizeForCharAccess* ins) {
+ MDefinition* str = ins->string();
+ MDefinition* idx = ins->index();
+
+ MOZ_ASSERT(str->type() == MIRType::String);
+ MOZ_ASSERT(idx->type() == MIRType::Int32);
+
+ auto* lir =
+ new (alloc()) LLinearizeForCharAccess(useRegister(str), useRegister(idx));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCharCodeAt(MCharCodeAt* ins) {
+ MDefinition* str = ins->string();
+ MDefinition* idx = ins->index();
+
+ MOZ_ASSERT(str->type() == MIRType::String);
+ MOZ_ASSERT(idx->type() == MIRType::Int32);
+
+ LCharCodeAt* lir = new (alloc())
+ LCharCodeAt(useRegister(str), useRegister(idx), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCharCodeAtMaybeOutOfBounds(
+ MCharCodeAtMaybeOutOfBounds* ins) {
+ MDefinition* str = ins->string();
+ MDefinition* idx = ins->index();
+
+ MOZ_ASSERT(str->type() == MIRType::String);
+ MOZ_ASSERT(idx->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LCharCodeAtMaybeOutOfBounds(
+ useRegister(str), useRegister(idx), temp(), temp());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCharAtMaybeOutOfBounds(MCharAtMaybeOutOfBounds* ins) {
+ MDefinition* str = ins->string();
+ MDefinition* idx = ins->index();
+
+ MOZ_ASSERT(str->type() == MIRType::String);
+ MOZ_ASSERT(idx->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LCharAtMaybeOutOfBounds(
+ useRegister(str), useRegister(idx), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitFromCharCode(MFromCharCode* ins) {
+ MDefinition* code = ins->getOperand(0);
+
+ MOZ_ASSERT(code->type() == MIRType::Int32);
+
+ LFromCharCode* lir = new (alloc()) LFromCharCode(useRegister(code));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitFromCodePoint(MFromCodePoint* ins) {
+ MDefinition* codePoint = ins->getOperand(0);
+
+ MOZ_ASSERT(codePoint->type() == MIRType::Int32);
+
+ LFromCodePoint* lir =
+ new (alloc()) LFromCodePoint(useRegister(codePoint), temp(), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringIndexOf(MStringIndexOf* ins) {
+ auto* string = ins->string();
+ MOZ_ASSERT(string->type() == MIRType::String);
+
+ auto* searchStr = ins->searchString();
+ MOZ_ASSERT(searchStr->type() == MIRType::String);
+
+ auto* lir = new (alloc())
+ LStringIndexOf(useRegisterAtStart(string), useRegisterAtStart(searchStr));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringStartsWith(MStringStartsWith* ins) {
+ auto* string = ins->string();
+ MOZ_ASSERT(string->type() == MIRType::String);
+
+ auto* searchStr = ins->searchString();
+ MOZ_ASSERT(searchStr->type() == MIRType::String);
+
+ if (searchStr->isConstant()) {
+ JSLinearString* linear = &searchStr->toConstant()->toString()->asLinear();
+
+ if (CanCompareCharactersInline(linear)) {
+ auto* lir = new (alloc())
+ LStringStartsWithInline(useRegister(string), temp(), linear);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ }
+
+ auto* lir = new (alloc()) LStringStartsWith(useRegisterAtStart(string),
+ useRegisterAtStart(searchStr));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringEndsWith(MStringEndsWith* ins) {
+ auto* string = ins->string();
+ MOZ_ASSERT(string->type() == MIRType::String);
+
+ auto* searchStr = ins->searchString();
+ MOZ_ASSERT(searchStr->type() == MIRType::String);
+
+ if (searchStr->isConstant()) {
+ JSLinearString* linear = &searchStr->toConstant()->toString()->asLinear();
+
+ if (CanCompareCharactersInline(linear)) {
+ auto* lir = new (alloc())
+ LStringEndsWithInline(useRegister(string), temp(), linear);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ }
+
+ auto* lir = new (alloc()) LStringEndsWith(useRegisterAtStart(string),
+ useRegisterAtStart(searchStr));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringConvertCase(MStringConvertCase* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+
+ if (ins->mode() == MStringConvertCase::LowerCase) {
+#ifdef JS_CODEGEN_X86
+ // Due to lack of registers on x86, we reuse the string register as
+ // temporary. As a result we only need four temporary registers and take a
+ // bogus temporary as the fifth argument.
+ LDefinition temp4 = LDefinition::BogusTemp();
+#else
+ LDefinition temp4 = temp();
+#endif
+ auto* lir = new (alloc())
+ LStringToLowerCase(useRegister(ins->string()), temp(), temp(), temp(),
+ temp4, tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ auto* lir =
+ new (alloc()) LStringToUpperCase(useRegisterAtStart(ins->string()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitStart(MStart* start) {}
+
+void LIRGenerator::visitNop(MNop* nop) {}
+
+void LIRGenerator::visitLimitedTruncate(MLimitedTruncate* nop) {
+ redefine(nop, nop->input());
+}
+
+void LIRGenerator::visitOsrEntry(MOsrEntry* entry) {
+ LOsrEntry* lir = new (alloc()) LOsrEntry(temp());
+ defineFixed(lir, entry, LAllocation(AnyRegister(OsrFrameReg)));
+}
+
+void LIRGenerator::visitOsrValue(MOsrValue* value) {
+ LOsrValue* lir = new (alloc()) LOsrValue(useRegister(value->entry()));
+ defineBox(lir, value);
+}
+
+void LIRGenerator::visitOsrReturnValue(MOsrReturnValue* value) {
+ LOsrReturnValue* lir =
+ new (alloc()) LOsrReturnValue(useRegister(value->entry()));
+ defineBox(lir, value);
+}
+
+void LIRGenerator::visitOsrEnvironmentChain(MOsrEnvironmentChain* object) {
+ LOsrEnvironmentChain* lir =
+ new (alloc()) LOsrEnvironmentChain(useRegister(object->entry()));
+ define(lir, object);
+}
+
+void LIRGenerator::visitOsrArgumentsObject(MOsrArgumentsObject* object) {
+ LOsrArgumentsObject* lir =
+ new (alloc()) LOsrArgumentsObject(useRegister(object->entry()));
+ define(lir, object);
+}
+
+void LIRGenerator::visitToDouble(MToDouble* convert) {
+ MDefinition* opd = convert->input();
+ mozilla::DebugOnly<MToFPInstruction::ConversionKind> conversion =
+ convert->conversion();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ LValueToDouble* lir = new (alloc()) LValueToDouble(useBox(opd));
+ assignSnapshot(lir, convert->bailoutKind());
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ lowerConstantDouble(0, convert);
+ break;
+
+ case MIRType::Undefined:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ lowerConstantDouble(GenericNaN(), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ [[fallthrough]];
+
+ case MIRType::Int32: {
+ LInt32ToDouble* lir =
+ new (alloc()) LInt32ToDouble(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Float32: {
+ LFloat32ToDouble* lir =
+ new (alloc()) LFloat32ToDouble(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double:
+ redefine(convert, opd);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols will throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitToFloat32(MToFloat32* convert) {
+ MDefinition* opd = convert->input();
+ mozilla::DebugOnly<MToFloat32::ConversionKind> conversion =
+ convert->conversion();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ LValueToFloat32* lir = new (alloc()) LValueToFloat32(useBox(opd));
+ assignSnapshot(lir, convert->bailoutKind());
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ lowerConstantFloat32(0, convert);
+ break;
+
+ case MIRType::Undefined:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ lowerConstantFloat32(GenericNaN(), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(conversion == MToFPInstruction::NonStringPrimitives);
+ [[fallthrough]];
+
+ case MIRType::Int32: {
+ LInt32ToFloat32* lir =
+ new (alloc()) LInt32ToFloat32(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double: {
+ LDoubleToFloat32* lir =
+ new (alloc()) LDoubleToFloat32(useRegisterAtStart(opd));
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Float32:
+ redefine(convert, opd);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols will throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitToNumberInt32(MToNumberInt32* convert) {
+ MDefinition* opd = convert->input();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ auto* lir = new (alloc()) LValueToInt32(useBox(opd), tempDouble(), temp(),
+ LValueToInt32::NORMAL);
+ assignSnapshot(lir, convert->bailoutKind());
+ define(lir, convert);
+ if (lir->mode() == LValueToInt32::TRUNCATE) {
+ assignSafepoint(lir, convert);
+ }
+ break;
+ }
+
+ case MIRType::Null:
+ MOZ_ASSERT(convert->conversion() == IntConversionInputKind::Any);
+ define(new (alloc()) LInteger(0), convert);
+ break;
+
+ case MIRType::Boolean:
+ MOZ_ASSERT(convert->conversion() == IntConversionInputKind::Any ||
+ convert->conversion() ==
+ IntConversionInputKind::NumbersOrBoolsOnly);
+ redefine(convert, opd);
+ break;
+
+ case MIRType::Int32:
+ redefine(convert, opd);
+ break;
+
+ case MIRType::Float32: {
+ LFloat32ToInt32* lir = new (alloc()) LFloat32ToInt32(useRegister(opd));
+ assignSnapshot(lir, convert->bailoutKind());
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::Double: {
+ LDoubleToInt32* lir = new (alloc()) LDoubleToInt32(useRegister(opd));
+ assignSnapshot(lir, convert->bailoutKind());
+ define(lir, convert);
+ break;
+ }
+
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ case MIRType::Undefined:
+ // Objects might be effectful. Symbols and BigInts throw. Undefined
+ // coerces to NaN, not int32.
+ MOZ_CRASH("ToInt32 invalid input type");
+
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitBooleanToInt32(MBooleanToInt32* convert) {
+ MDefinition* opd = convert->input();
+ MOZ_ASSERT(opd->type() == MIRType::Boolean);
+ redefine(convert, opd);
+}
+
+void LIRGenerator::visitTruncateToInt32(MTruncateToInt32* truncate) {
+ MDefinition* opd = truncate->input();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ LValueToInt32* lir = new (alloc()) LValueToInt32(
+ useBox(opd), tempDouble(), temp(), LValueToInt32::TRUNCATE);
+ assignSnapshot(lir, truncate->bailoutKind());
+ define(lir, truncate);
+ assignSafepoint(lir, truncate);
+ break;
+ }
+
+ case MIRType::Null:
+ case MIRType::Undefined:
+ define(new (alloc()) LInteger(0), truncate);
+ break;
+
+ case MIRType::Int32:
+ case MIRType::Boolean:
+ redefine(truncate, opd);
+ break;
+
+ case MIRType::Double:
+ // May call into JS::ToInt32() on the slow OOL path.
+ gen->setNeedsStaticStackAlignment();
+ lowerTruncateDToInt32(truncate);
+ break;
+
+ case MIRType::Float32:
+ // May call into JS::ToInt32() on the slow OOL path.
+ gen->setNeedsStaticStackAlignment();
+ lowerTruncateFToInt32(truncate);
+ break;
+
+ default:
+ // Objects might be effectful. Symbols throw.
+ // Strings are complicated - we don't handle them yet.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitInt32ToIntPtr(MInt32ToIntPtr* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+#ifdef JS_64BIT
+ // If the result is only used by instructions that expect a bounds-checked
+ // index, we must have eliminated or hoisted a bounds check and we can assume
+ // the index is non-negative. This lets us generate more efficient code.
+ if (ins->canBeNegative()) {
+ bool canBeNegative = false;
+ for (MUseDefIterator iter(ins); iter; iter++) {
+ if (!iter.def()->isSpectreMaskIndex() &&
+ !iter.def()->isLoadUnboxedScalar() &&
+ !iter.def()->isStoreUnboxedScalar() &&
+ !iter.def()->isLoadDataViewElement() &&
+ !iter.def()->isStoreDataViewElement()) {
+ canBeNegative = true;
+ break;
+ }
+ }
+ if (!canBeNegative) {
+ ins->setCanNotBeNegative();
+ }
+ }
+
+ if (ins->canBeNegative()) {
+ auto* lir = new (alloc()) LInt32ToIntPtr(useAnyAtStart(input));
+ define(lir, ins);
+ } else {
+ redefine(ins, input);
+ }
+#else
+ // On 32-bit platforms this is a no-op.
+ redefine(ins, input);
+#endif
+}
+
+void LIRGenerator::visitNonNegativeIntPtrToInt32(
+ MNonNegativeIntPtrToInt32* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+
+#ifdef JS_64BIT
+ auto* lir =
+ new (alloc()) LNonNegativeIntPtrToInt32(useRegisterAtStart(input));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+#else
+ // On 32-bit platforms this is a no-op.
+ redefine(ins, input);
+#endif
+}
+
+void LIRGenerator::visitWasmExtendU32Index(MWasmExtendU32Index* ins) {
+#ifdef JS_64BIT
+ // Technically this produces an Int64 register and I guess we could clean that
+ // up, but it's a 64-bit only operation, so it doesn't actually matter.
+
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Int64);
+
+ // Input reuse is OK even on ARM64 because this node *must* reuse its input in
+ // order not to generate any code at all, as is the intent.
+ auto* lir = new (alloc()) LWasmExtendU32Index(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+#else
+ MOZ_CRASH("64-bit only");
+#endif
+}
+
+void LIRGenerator::visitWasmWrapU32Index(MWasmWrapU32Index* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int64);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+
+ // Tricky: On 64-bit, this just returns its input (except on MIPS64 there may
+ // be a sign/zero extension). On 32-bit, it returns the low register of the
+ // input, and should generate no code.
+
+ // If this assertion does not hold then using "input" unadorned as an alias
+ // for the low register will not work.
+#if defined(JS_NUNBOX32)
+ static_assert(INT64LOW_INDEX == 0);
+#endif
+
+ auto* lir = new (alloc()) LWasmWrapU32Index(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitIntPtrToDouble(MIntPtrToDouble* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+
+ auto* lir = new (alloc()) LIntPtrToDouble(useRegister(input));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAdjustDataViewLength(MAdjustDataViewLength* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc()) LAdjustDataViewLength(useRegisterAtStart(input));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitToBigInt(MToBigInt* ins) {
+ MDefinition* opd = ins->input();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ auto* lir = new (alloc()) LValueToBigInt(useBox(opd));
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::BigInt:
+ redefine(ins, opd);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitToInt64(MToInt64* ins) {
+ MDefinition* opd = ins->input();
+
+ switch (opd->type()) {
+ case MIRType::Value: {
+ auto* lir = new (alloc()) LValueToInt64(useBox(opd), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineInt64(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::Boolean: {
+ auto* lir = new (alloc()) LBooleanToInt64(useRegisterAtStart(opd));
+ defineInt64(lir, ins);
+ break;
+ }
+
+ case MIRType::String: {
+ auto* lir = new (alloc()) LStringToInt64(useRegister(opd));
+ defineInt64(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ // An Int64 may be passed here from a BigInt to Int64 conversion.
+ case MIRType::Int64: {
+ redefine(ins, opd);
+ break;
+ }
+
+ default:
+ // Undefined, Null, Number, and Symbol throw.
+ // Objects may be effectful.
+ // BigInt operands are eliminated by the type policy.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitTruncateBigIntToInt64(MTruncateBigIntToInt64* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::BigInt);
+ auto* lir = new (alloc()) LTruncateBigIntToInt64(useRegister(ins->input()));
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitInt64ToBigInt(MInt64ToBigInt* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int64);
+ auto* lir =
+ new (alloc()) LInt64ToBigInt(useInt64Register(ins->input()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt32(MWasmTruncateToInt32* ins) {
+ MDefinition* input = ins->input();
+ switch (input->type()) {
+ case MIRType::Double:
+ case MIRType::Float32: {
+ auto* lir = new (alloc()) LWasmTruncateToInt32(useRegisterAtStart(input));
+ define(lir, ins);
+ break;
+ }
+ default:
+ MOZ_CRASH("unexpected type in WasmTruncateToInt32");
+ }
+}
+
+void LIRGenerator::visitWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* truncate) {
+ mozilla::DebugOnly<MDefinition*> opd = truncate->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ // May call into JS::ToInt32() on the slow OOL path.
+ gen->setNeedsStaticStackAlignment();
+ lowerWasmBuiltinTruncateToInt32(truncate);
+}
+
+void LIRGenerator::visitWasmBoxValue(MWasmBoxValue* ins) {
+ LWasmBoxValue* lir = new (alloc()) LWasmBoxValue(useBox(ins->input()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmAnyRefFromJSObject(MWasmAnyRefFromJSObject* ins) {
+ LWasmAnyRefFromJSObject* lir =
+ new (alloc()) LWasmAnyRefFromJSObject(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWrapInt64ToInt32(MWrapInt64ToInt32* ins) {
+ define(new (alloc()) LWrapInt64ToInt32(useInt64AtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitToString(MToString* ins) {
+ MDefinition* opd = ins->input();
+
+ switch (opd->type()) {
+ case MIRType::Null: {
+ const JSAtomState& names = gen->runtime->names();
+ LPointer* lir = new (alloc()) LPointer(names.null);
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Undefined: {
+ const JSAtomState& names = gen->runtime->names();
+ LPointer* lir = new (alloc()) LPointer(names.undefined);
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Boolean: {
+ LBooleanToString* lir = new (alloc()) LBooleanToString(useRegister(opd));
+ define(lir, ins);
+ break;
+ }
+
+ case MIRType::Double: {
+ LDoubleToString* lir =
+ new (alloc()) LDoubleToString(useRegister(opd), temp());
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::Int32: {
+ LIntToString* lir = new (alloc()) LIntToString(useRegister(opd));
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ case MIRType::String:
+ redefine(ins, ins->input());
+ break;
+
+ case MIRType::Value: {
+ LValueToString* lir =
+ new (alloc()) LValueToString(useBox(opd), tempToUnbox());
+ if (ins->needsSnapshot()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ default:
+ // Float32, symbols, bigint, and objects are not supported.
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitRegExp(MRegExp* ins) {
+ LRegExp* lir = new (alloc()) LRegExp(temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitRegExpMatcher(MRegExpMatcher* ins) {
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->lastIndex()->type() == MIRType::Int32);
+
+ LRegExpMatcher* lir = new (alloc()) LRegExpMatcher(
+ useFixedAtStart(ins->regexp(), RegExpMatcherRegExpReg),
+ useFixedAtStart(ins->string(), RegExpMatcherStringReg),
+ useFixedAtStart(ins->lastIndex(), RegExpMatcherLastIndexReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitRegExpSearcher(MRegExpSearcher* ins) {
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->lastIndex()->type() == MIRType::Int32);
+
+ LRegExpSearcher* lir = new (alloc()) LRegExpSearcher(
+ useFixedAtStart(ins->regexp(), RegExpSearcherRegExpReg),
+ useFixedAtStart(ins->string(), RegExpSearcherStringReg),
+ useFixedAtStart(ins->lastIndex(), RegExpSearcherLastIndexReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitRegExpExecMatch(MRegExpExecMatch* ins) {
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+
+ auto* lir = new (alloc())
+ LRegExpExecMatch(useFixedAtStart(ins->regexp(), RegExpMatcherRegExpReg),
+ useFixedAtStart(ins->string(), RegExpMatcherStringReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitRegExpExecTest(MRegExpExecTest* ins) {
+ MOZ_ASSERT(ins->regexp()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+
+ auto* lir = new (alloc())
+ LRegExpExecTest(useFixedAtStart(ins->regexp(), RegExpExecTestRegExpReg),
+ useFixedAtStart(ins->string(), RegExpExecTestStringReg));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitRegExpPrototypeOptimizable(
+ MRegExpPrototypeOptimizable* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ LRegExpPrototypeOptimizable* lir = new (alloc())
+ LRegExpPrototypeOptimizable(useRegister(ins->object()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitRegExpInstanceOptimizable(
+ MRegExpInstanceOptimizable* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->proto()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ LRegExpInstanceOptimizable* lir = new (alloc()) LRegExpInstanceOptimizable(
+ useRegister(ins->object()), useRegister(ins->proto()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGetFirstDollarIndex(MGetFirstDollarIndex* ins) {
+ MOZ_ASSERT(ins->str()->type() == MIRType::String);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ LGetFirstDollarIndex* lir = new (alloc())
+ LGetFirstDollarIndex(useRegister(ins->str()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringReplace(MStringReplace* ins) {
+ MOZ_ASSERT(ins->pattern()->type() == MIRType::String);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->replacement()->type() == MIRType::String);
+
+ LStringReplace* lir = new (alloc())
+ LStringReplace(useRegisterOrConstantAtStart(ins->string()),
+ useRegisterAtStart(ins->pattern()),
+ useRegisterOrConstantAtStart(ins->replacement()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBinaryCache(MBinaryCache* ins) {
+ MDefinition* lhs = ins->getOperand(0);
+ MDefinition* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(ins->type() == MIRType::Value || ins->type() == MIRType::Boolean);
+ LInstruction* lir;
+ if (ins->type() == MIRType::Value) {
+ LBinaryValueCache* valueLir = new (alloc()) LBinaryValueCache(
+ useBox(lhs), useBox(rhs), tempFixed(FloatReg0), tempFixed(FloatReg1));
+ defineBox(valueLir, ins);
+ lir = valueLir;
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ LBinaryBoolCache* boolLir = new (alloc()) LBinaryBoolCache(
+ useBox(lhs), useBox(rhs), tempFixed(FloatReg0), tempFixed(FloatReg1));
+ define(boolLir, ins);
+ lir = boolLir;
+ }
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitUnaryCache(MUnaryCache* ins) {
+ MDefinition* input = ins->getOperand(0);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LUnaryCache* lir = new (alloc()) LUnaryCache(useBox(input));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitModuleMetadata(MModuleMetadata* ins) {
+ LModuleMetadata* lir = new (alloc()) LModuleMetadata();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitDynamicImport(MDynamicImport* ins) {
+ LDynamicImport* lir = new (alloc()) LDynamicImport(
+ useBoxAtStart(ins->specifier()), useBoxAtStart(ins->options()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitLambda(MLambda* ins) {
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LLambda(useRegister(ins->environmentChain()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitFunctionWithProto(MFunctionWithProto* ins) {
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->prototype()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LFunctionWithProto(useRegisterAtStart(ins->environmentChain()),
+ useRegisterAtStart(ins->prototype()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitSetFunName(MSetFunName* ins) {
+ MOZ_ASSERT(ins->fun()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->name()->type() == MIRType::Value);
+
+ LSetFunName* lir = new (alloc())
+ LSetFunName(useRegisterAtStart(ins->fun()), useBoxAtStart(ins->name()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewLexicalEnvironmentObject(
+ MNewLexicalEnvironmentObject* ins) {
+ auto* lir = new (alloc()) LNewLexicalEnvironmentObject(temp());
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewClassBodyEnvironmentObject(
+ MNewClassBodyEnvironmentObject* ins) {
+ auto* lir = new (alloc()) LNewClassBodyEnvironmentObject(temp());
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewVarEnvironmentObject(MNewVarEnvironmentObject* ins) {
+ auto* lir = new (alloc()) LNewVarEnvironmentObject(temp());
+
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitKeepAliveObject(MKeepAliveObject* ins) {
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ add(new (alloc()) LKeepAliveObject(useKeepalive(obj)), ins);
+}
+
+void LIRGenerator::visitDebugEnterGCUnsafeRegion(
+ MDebugEnterGCUnsafeRegion* ins) {
+ add(new (alloc()) LDebugEnterGCUnsafeRegion(temp()), ins);
+}
+
+void LIRGenerator::visitDebugLeaveGCUnsafeRegion(
+ MDebugLeaveGCUnsafeRegion* ins) {
+ add(new (alloc()) LDebugLeaveGCUnsafeRegion(temp()), ins);
+}
+
+void LIRGenerator::visitSlots(MSlots* ins) {
+ define(new (alloc()) LSlots(useRegisterAtStart(ins->object())), ins);
+}
+
+void LIRGenerator::visitElements(MElements* ins) {
+ define(new (alloc()) LElements(useRegisterAtStart(ins->object())), ins);
+}
+
+void LIRGenerator::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+ defineBox(new (alloc()) LLoadDynamicSlotV(useRegisterAtStart(ins->slots())),
+ ins);
+}
+
+void LIRGenerator::visitFunctionEnvironment(MFunctionEnvironment* ins) {
+ define(new (alloc())
+ LFunctionEnvironment(useRegisterAtStart(ins->function())),
+ ins);
+}
+
+void LIRGenerator::visitHomeObject(MHomeObject* ins) {
+ define(new (alloc()) LHomeObject(useRegisterAtStart(ins->function())), ins);
+}
+
+void LIRGenerator::visitHomeObjectSuperBase(MHomeObjectSuperBase* ins) {
+ MOZ_ASSERT(ins->homeObject()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto lir =
+ new (alloc()) LHomeObjectSuperBase(useRegisterAtStart(ins->homeObject()));
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitInterruptCheck(MInterruptCheck* ins) {
+ LInstruction* lir = new (alloc()) LInterruptCheck();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmInterruptCheck(MWasmInterruptCheck* ins) {
+ auto* lir =
+ new (alloc()) LWasmInterruptCheck(useRegisterAtStart(ins->instance()));
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+}
+
+void LIRGenerator::visitWasmTrap(MWasmTrap* ins) {
+ add(new (alloc()) LWasmTrap, ins);
+}
+
+void LIRGenerator::visitWasmTrapIfNull(MWasmTrapIfNull* ins) {
+ auto* lir = new (alloc()) LWasmTrapIfNull(useRegister(ins->value()));
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmReinterpret(MWasmReinterpret* ins) {
+ if (ins->type() == MIRType::Int64) {
+ defineInt64(new (alloc())
+ LWasmReinterpretToI64(useRegisterAtStart(ins->input())),
+ ins);
+ } else if (ins->input()->type() == MIRType::Int64) {
+ define(new (alloc())
+ LWasmReinterpretFromI64(useInt64RegisterAtStart(ins->input())),
+ ins);
+ } else {
+ define(new (alloc()) LWasmReinterpret(useRegisterAtStart(ins->input())),
+ ins);
+ }
+}
+
+void LIRGenerator::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
+ LInstruction* lir;
+
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ lir = new (alloc())
+ LStoreDynamicSlotV(useRegister(ins->slots()), useBox(ins->value()));
+ add(lir, ins);
+ break;
+
+ case MIRType::Double:
+ add(new (alloc()) LStoreDynamicSlotT(useRegister(ins->slots()),
+ useRegister(ins->value())),
+ ins);
+ break;
+
+ case MIRType::Float32:
+ MOZ_CRASH("Float32 shouldn't be stored in a slot.");
+
+ default:
+ add(new (alloc()) LStoreDynamicSlotT(useRegister(ins->slots()),
+ useRegisterOrConstant(ins->value())),
+ ins);
+ break;
+ }
+}
+
+// Returns true iff |def| is a constant that's either not a GC thing or is not
+// allocated in the nursery.
+static bool IsNonNurseryConstant(MDefinition* def) {
+ if (!def->isConstant()) {
+ return false;
+ }
+ Value v = def->toConstant()->toJSValue();
+ return !v.isGCThing() || !IsInsideNursery(v.toGCThing());
+}
+
+void LIRGenerator::visitPostWriteBarrier(MPostWriteBarrier* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ // LPostWriteBarrier assumes that if it has a constant object then that
+ // object is tenured, and does not need to be tested for being in the
+ // nursery. Ensure that assumption holds by lowering constant nursery
+ // objects to a register.
+ bool useConstantObject = IsNonNurseryConstant(ins->object());
+
+ switch (ins->value()->type()) {
+ case MIRType::Object: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteBarrierO* lir = new (alloc())
+ LPostWriteBarrierO(useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::String: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteBarrierS* lir = new (alloc())
+ LPostWriteBarrierS(useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::BigInt: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ auto* lir = new (alloc())
+ LPostWriteBarrierBI(useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::Value: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteBarrierV* lir = new (alloc())
+ LPostWriteBarrierV(useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useBox(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ default:
+ // Currently, only objects and strings can be in the nursery. Other
+ // instruction types cannot hold nursery pointers.
+ break;
+ }
+}
+
+void LIRGenerator::visitPostWriteElementBarrier(MPostWriteElementBarrier* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ // LPostWriteElementBarrier assumes that if it has a constant object then that
+ // object is tenured, and does not need to be tested for being in the
+ // nursery. Ensure that assumption holds by lowering constant nursery
+ // objects to a register.
+ bool useConstantObject =
+ ins->object()->isConstant() &&
+ !IsInsideNursery(&ins->object()->toConstant()->toObject());
+
+ switch (ins->value()->type()) {
+ case MIRType::Object: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteElementBarrierO* lir = new (alloc()) LPostWriteElementBarrierO(
+ useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), useRegister(ins->index()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::String: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteElementBarrierS* lir = new (alloc()) LPostWriteElementBarrierS(
+ useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), useRegister(ins->index()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::BigInt: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ auto* lir = new (alloc()) LPostWriteElementBarrierBI(
+ useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->value()), useRegister(ins->index()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ case MIRType::Value: {
+ LDefinition tmp =
+ needTempForPostBarrier() ? temp() : LDefinition::BogusTemp();
+ LPostWriteElementBarrierV* lir = new (alloc()) LPostWriteElementBarrierV(
+ useConstantObject ? useOrConstant(ins->object())
+ : useRegister(ins->object()),
+ useRegister(ins->index()), useBox(ins->value()), tmp);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+ default:
+ // Currently, only objects, strings, and bigints can be in the nursery.
+ // Other instruction types cannot hold nursery pointers.
+ break;
+ }
+}
+
+void LIRGenerator::visitAssertCanElidePostWriteBarrier(
+ MAssertCanElidePostWriteBarrier* ins) {
+ auto* lir = new (alloc()) LAssertCanElidePostWriteBarrier(
+ useRegister(ins->object()), useBox(ins->value()), temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitArrayLength(MArrayLength* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ auto* lir = new (alloc()) LArrayLength(useRegisterAtStart(ins->elements()));
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSetArrayLength(MSetArrayLength* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ MOZ_ASSERT(ins->index()->isConstant());
+ add(new (alloc()) LSetArrayLength(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index())),
+ ins);
+}
+
+void LIRGenerator::visitFunctionLength(MFunctionLength* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LFunctionLength(useRegister(ins->function()));
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitFunctionName(MFunctionName* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LFunctionName(useRegister(ins->function()));
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGetNextEntryForIterator(MGetNextEntryForIterator* ins) {
+ MOZ_ASSERT(ins->iter()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->result()->type() == MIRType::Object);
+ auto lir = new (alloc()) LGetNextEntryForIterator(useRegister(ins->iter()),
+ useRegister(ins->result()),
+ temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayBufferByteLength(MArrayBufferByteLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir =
+ new (alloc()) LArrayBufferByteLength(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayBufferViewLength(MArrayBufferViewLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir =
+ new (alloc()) LArrayBufferViewLength(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayBufferViewByteOffset(
+ MArrayBufferViewByteOffset* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc())
+ LArrayBufferViewByteOffset(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayBufferViewElements(MArrayBufferViewElements* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Elements);
+ define(new (alloc())
+ LArrayBufferViewElements(useRegisterAtStart(ins->object())),
+ ins);
+}
+
+void LIRGenerator::visitTypedArrayElementSize(MTypedArrayElementSize* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ define(new (alloc())
+ LTypedArrayElementSize(useRegisterAtStart(ins->object())),
+ ins);
+}
+
+void LIRGenerator::visitGuardHasAttachedArrayBuffer(
+ MGuardHasAttachedArrayBuffer* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardHasAttachedArrayBuffer(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardNumberToIntPtrIndex(
+ MGuardNumberToIntPtrIndex* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+
+ auto* lir = new (alloc()) LGuardNumberToIntPtrIndex(useRegister(input));
+ if (!ins->supportOOB()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitInitializedLength(MInitializedLength* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ define(new (alloc()) LInitializedLength(useRegisterAtStart(ins->elements())),
+ ins);
+}
+
+void LIRGenerator::visitSetInitializedLength(MSetInitializedLength* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ MOZ_ASSERT(ins->index()->isConstant());
+ add(new (alloc()) LSetInitializedLength(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index())),
+ ins);
+}
+
+void LIRGenerator::visitNot(MNot* ins) {
+ MDefinition* op = ins->input();
+
+ // String is converted to length of string in the type analysis phase (see
+ // TestPolicy).
+ MOZ_ASSERT(op->type() != MIRType::String);
+
+ // - boolean: x xor 1
+ // - int32: LCompare(x, 0)
+ // - double: LCompare(x, 0)
+ // - null or undefined: true
+ // - symbol: false
+ // - bigint: LNotBI(x)
+ // - object: false if it never emulates undefined, else LNotO(x)
+ switch (op->type()) {
+ case MIRType::Boolean: {
+ MConstant* cons = MConstant::New(alloc(), Int32Value(1));
+ ins->block()->insertBefore(ins, cons);
+ lowerForALU(new (alloc()) LBitOpI(JSOp::BitXor), ins, op, cons);
+ break;
+ }
+ case MIRType::Int32:
+ define(new (alloc()) LNotI(useRegisterAtStart(op)), ins);
+ break;
+ case MIRType::Int64:
+ define(new (alloc()) LNotI64(useInt64RegisterAtStart(op)), ins);
+ break;
+ case MIRType::Double:
+ define(new (alloc()) LNotD(useRegister(op)), ins);
+ break;
+ case MIRType::Float32:
+ define(new (alloc()) LNotF(useRegister(op)), ins);
+ break;
+ case MIRType::Undefined:
+ case MIRType::Null:
+ define(new (alloc()) LInteger(1), ins);
+ break;
+ case MIRType::Symbol:
+ define(new (alloc()) LInteger(0), ins);
+ break;
+ case MIRType::BigInt:
+ define(new (alloc()) LNotBI(useRegisterAtStart(op)), ins);
+ break;
+ case MIRType::Object:
+ define(new (alloc()) LNotO(useRegister(op)), ins);
+ break;
+ case MIRType::Value: {
+ auto* lir = new (alloc()) LNotV(useBox(op), tempDouble(), tempToUnbox());
+ define(lir, ins);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Unexpected MIRType.");
+ }
+}
+
+void LIRGenerator::visitBoundsCheck(MBoundsCheck* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32 || ins->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->index()->type() == ins->type());
+ MOZ_ASSERT(ins->length()->type() == ins->type());
+
+ if (!ins->fallible()) {
+ return;
+ }
+
+ LInstruction* check;
+ if (ins->minimum() || ins->maximum()) {
+ check = new (alloc())
+ LBoundsCheckRange(useRegisterOrInt32Constant(ins->index()),
+ useAny(ins->length()), temp());
+ } else {
+ check = new (alloc()) LBoundsCheck(useRegisterOrInt32Constant(ins->index()),
+ useAnyOrInt32Constant(ins->length()));
+ }
+ assignSnapshot(check, ins->bailoutKind());
+ add(check, ins);
+}
+
+void LIRGenerator::visitSpectreMaskIndex(MSpectreMaskIndex* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32 || ins->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->index()->type() == ins->type());
+ MOZ_ASSERT(ins->length()->type() == ins->type());
+
+ auto* lir = new (alloc())
+ LSpectreMaskIndex(useRegister(ins->index()), useAny(ins->length()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitBoundsCheckLower(MBoundsCheckLower* ins) {
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ if (!ins->fallible()) {
+ return;
+ }
+
+ LInstruction* check =
+ new (alloc()) LBoundsCheckLower(useRegister(ins->index()));
+ assignSnapshot(check, ins->bailoutKind());
+ add(check, ins);
+}
+
+void LIRGenerator::visitInArray(MInArray* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->initLength()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ auto* lir = new (alloc()) LInArray(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()),
+ useRegister(ins->initLength()));
+ if (ins->needsNegativeIntCheck()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardElementNotHole(MGuardElementNotHole* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ auto* guard = new (alloc())
+ LGuardElementNotHole(useRegisterAtStart(ins->elements()),
+ useRegisterOrConstantAtStart(ins->index()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+}
+
+void LIRGenerator::visitLoadElement(MLoadElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LLoadElementV(useRegister(ins->elements()),
+ useRegisterOrConstant(ins->index()));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitLoadElementHole(MLoadElementHole* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->initLength()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ LLoadElementHole* lir = new (alloc())
+ LLoadElementHole(useRegister(ins->elements()), useRegister(ins->index()),
+ useRegister(ins->initLength()));
+ if (ins->needsNegativeIntCheck()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitStoreElement(MStoreElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ switch (ins->value()->type()) {
+ case MIRType::Value: {
+ LInstruction* lir =
+ new (alloc()) LStoreElementV(elements, index, useBox(ins->value()));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ add(lir, ins);
+ break;
+ }
+
+ default: {
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ LInstruction* lir = new (alloc()) LStoreElementT(elements, index, value);
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ add(lir, ins);
+ break;
+ }
+ }
+}
+
+void LIRGenerator::visitStoreHoleValueElement(MStoreHoleValueElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LStoreHoleValueElement(useRegister(ins->elements()),
+ useRegister(ins->index()));
+ add(lir, ins);
+}
+
+static bool BoundsCheckNeedsSpectreTemp() {
+ // On x86, spectreBoundsCheck32 can emit better code if it has a scratch
+ // register and index masking is enabled.
+#ifdef JS_CODEGEN_X86
+ return JitOptions.spectreIndexMasking;
+#else
+ return false;
+#endif
+}
+
+void LIRGenerator::visitStoreElementHole(MStoreElementHole* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse object = useRegister(ins->object());
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegister(ins->index());
+
+ LInstruction* lir;
+ switch (ins->value()->type()) {
+ case MIRType::Value:
+ lir = new (alloc()) LStoreElementHoleV(object, elements, index,
+ useBox(ins->value()), temp());
+ break;
+
+ default: {
+ const LAllocation value = useRegisterOrNonDoubleConstant(ins->value());
+ lir = new (alloc())
+ LStoreElementHoleT(object, elements, index, value, temp());
+ break;
+ }
+ }
+
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitEffectiveAddress(MEffectiveAddress* ins) {
+ define(new (alloc()) LEffectiveAddress(useRegister(ins->base()),
+ useRegister(ins->index())),
+ ins);
+}
+
+void LIRGenerator::visitArrayPopShift(MArrayPopShift* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto* lir =
+ new (alloc()) LArrayPopShift(useRegister(ins->object()), temp(), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+
+ if (ins->mode() == MArrayPopShift::Shift) {
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitArrayPush(MArrayPush* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+
+ LUse object = useRegister(ins->object());
+
+ LDefinition spectreTemp =
+ BoundsCheckNeedsSpectreTemp() ? temp() : LDefinition::BogusTemp();
+
+ auto* lir = new (alloc())
+ LArrayPush(object, useBox(ins->value()), temp(), spectreTemp);
+ // We will bailout before pushing if the length would overflow INT32_MAX.
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitArraySlice(MArraySlice* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->begin()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->end()->type() == MIRType::Int32);
+
+ LArraySlice* lir = new (alloc()) LArraySlice(
+ useRegisterAtStart(ins->object()), useRegisterAtStart(ins->begin()),
+ useRegisterAtStart(ins->end()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitArgumentsSlice(MArgumentsSlice* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->begin()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->end()->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LArgumentsSlice(
+ useRegisterAtStart(ins->object()), useRegisterAtStart(ins->begin()),
+ useRegisterAtStart(ins->end()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitFrameArgumentsSlice(MFrameArgumentsSlice* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->begin()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->count()->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LFrameArgumentsSlice(
+ useRegister(ins->begin()), useRegister(ins->count()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInlineArgumentsSlice(MInlineArgumentsSlice* ins) {
+ LAllocation begin = useRegisterOrConstant(ins->begin());
+ LAllocation count = useRegisterOrConstant(ins->count());
+ uint32_t numActuals = ins->numActuals();
+ uint32_t numOperands =
+ numActuals * BOX_PIECES + LInlineArgumentsSlice::NumNonArgumentOperands;
+
+ auto* lir = allocateVariadic<LInlineArgumentsSlice>(numOperands, temp());
+ if (!lir) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitInlineArgumentsSlice");
+ return;
+ }
+
+ lir->setOperand(LInlineArgumentsSlice::Begin, begin);
+ lir->setOperand(LInlineArgumentsSlice::Count, count);
+ for (uint32_t i = 0; i < numActuals; i++) {
+ MDefinition* arg = ins->getArg(i);
+ uint32_t index = LInlineArgumentsSlice::ArgIndex(i);
+ lir->setBoxOperand(index,
+ useBoxOrTypedOrConstant(arg, /*useConstant = */ true));
+ }
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNormalizeSliceTerm(MNormalizeSliceTerm* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->length()->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LNormalizeSliceTerm(useRegister(ins->value()),
+ useRegister(ins->length()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitArrayJoin(MArrayJoin* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::String);
+ MOZ_ASSERT(ins->array()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->sep()->type() == MIRType::String);
+
+ auto* lir = new (alloc())
+ LArrayJoin(useRegisterAtStart(ins->array()),
+ useRegisterAtStart(ins->sep()), tempFixed(CallTempReg0));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringSplit(MStringSplit* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ MOZ_ASSERT(ins->separator()->type() == MIRType::String);
+
+ LStringSplit* lir = new (alloc()) LStringSplit(
+ useRegisterAtStart(ins->string()), useRegisterAtStart(ins->separator()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(IsNumericType(ins->type()) || ins->type() == MIRType::Boolean);
+
+ if (Scalar::isBigIntType(ins->storageType()) &&
+ ins->requiresMemoryBarrier()) {
+ lowerAtomicLoad64(ins);
+ return;
+ }
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrIndexConstant(
+ ins->index(), ins->storageType(), ins->offsetAdjustment());
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ Synchronization sync = Synchronization::Load();
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
+ add(fence, ins);
+ }
+
+ if (!Scalar::isBigIntType(ins->storageType())) {
+ // We need a temp register for Uint32Array with known double result.
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->storageType() == Scalar::Uint32 &&
+ IsFloatingPointType(ins->type())) {
+ tempDef = temp();
+ }
+
+ auto* lir = new (alloc()) LLoadUnboxedScalar(elements, index, tempDef);
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::BigInt);
+
+ auto* lir =
+ new (alloc()) LLoadUnboxedBigInt(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
+ add(fence, ins);
+ }
+}
+
+void LIRGenerator::visitLoadDataViewElement(MLoadDataViewElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ MOZ_ASSERT(IsNumericType(ins->type()));
+
+ const LUse elements = useRegister(ins->elements());
+ const LUse index = useRegister(ins->index());
+ const LAllocation littleEndian = useRegisterOrConstant(ins->littleEndian());
+
+ // We need a temp register for:
+ // - Uint32Array with known double result,
+ // - Float32Array,
+ // - and BigInt64Array and BigUint64Array.
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if ((ins->storageType() == Scalar::Uint32 &&
+ IsFloatingPointType(ins->type())) ||
+ ins->storageType() == Scalar::Float32) {
+ tempDef = temp();
+ }
+ if (Scalar::isBigIntType(ins->storageType())) {
+#ifdef JS_CODEGEN_X86
+ // There are not enough registers on x86.
+ if (littleEndian.isConstant()) {
+ tempDef = temp();
+ }
+#else
+ tempDef = temp();
+#endif
+ }
+
+ // We also need a separate 64-bit temp register for:
+ // - Float64Array
+ // - and BigInt64Array and BigUint64Array.
+ LInt64Definition temp64Def = LInt64Definition::BogusTemp();
+ if (Scalar::byteSize(ins->storageType()) == 8) {
+ temp64Def = tempInt64();
+ }
+
+ auto* lir = new (alloc())
+ LLoadDataViewElement(elements, index, littleEndian, tempDef, temp64Def);
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+ if (Scalar::isBigIntType(ins->storageType())) {
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitClampToUint8(MClampToUint8* ins) {
+ MDefinition* in = ins->input();
+
+ switch (in->type()) {
+ case MIRType::Boolean:
+ redefine(ins, in);
+ break;
+
+ case MIRType::Int32:
+ defineReuseInput(new (alloc()) LClampIToUint8(useRegisterAtStart(in)),
+ ins, 0);
+ break;
+
+ case MIRType::Double:
+ // LClampDToUint8 clobbers its input register. Making it available as
+ // a temp copy describes this behavior to the register allocator.
+ define(new (alloc())
+ LClampDToUint8(useRegisterAtStart(in), tempCopy(in, 0)),
+ ins);
+ break;
+
+ case MIRType::Value: {
+ LClampVToUint8* lir =
+ new (alloc()) LClampVToUint8(useBox(in), tempDouble());
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGenerator::visitLoadTypedArrayElementHole(
+ MLoadTypedArrayElementHole* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ const LUse object = useRegister(ins->object());
+ const LAllocation index = useRegister(ins->index());
+
+ if (!Scalar::isBigIntType(ins->arrayType())) {
+ auto* lir = new (alloc()) LLoadTypedArrayElementHole(object, index, temp());
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ defineBox(lir, ins);
+ } else {
+#ifdef JS_CODEGEN_X86
+ LDefinition tmp = LDefinition::BogusTemp();
+#else
+ LDefinition tmp = temp();
+#endif
+
+ auto* lir = new (alloc())
+ LLoadTypedArrayElementHoleBigInt(object, index, tmp, tempInt64());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (ins->isFloatWrite()) {
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32,
+ ins->value()->type() == MIRType::Float32);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64,
+ ins->value()->type() == MIRType::Double);
+ } else if (ins->isBigIntWrite()) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::BigInt);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ }
+
+ if (ins->isBigIntWrite() && ins->requiresMemoryBarrier()) {
+ lowerAtomicStore64(ins);
+ return;
+ }
+
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value;
+
+ // For byte arrays, the value has to be in a byte register on x86.
+ if (ins->isByteWrite()) {
+ value = useByteOpRegisterOrNonDoubleConstant(ins->value());
+ } else if (ins->isBigIntWrite()) {
+ value = useRegister(ins->value());
+ } else {
+ value = useRegisterOrNonDoubleConstant(ins->value());
+ }
+
+ // Optimization opportunity for atomics: on some platforms there
+ // is a store instruction that incorporates the necessary
+ // barriers, and we could use that instead of separate barrier and
+ // store instructions. See bug #1077027.
+ //
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ Synchronization sync = Synchronization::Store();
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
+ add(fence, ins);
+ }
+ if (!ins->isBigIntWrite()) {
+ add(new (alloc()) LStoreUnboxedScalar(elements, index, value), ins);
+ } else {
+ add(new (alloc()) LStoreUnboxedBigInt(elements, index, value, tempInt64()),
+ ins);
+ }
+ if (ins->requiresMemoryBarrier()) {
+ LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
+ add(fence, ins);
+ }
+}
+
+void LIRGenerator::visitStoreDataViewElement(MStoreDataViewElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->littleEndian()->type() == MIRType::Boolean);
+
+ if (ins->isFloatWrite()) {
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32,
+ ins->value()->type() == MIRType::Float32);
+ MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64,
+ ins->value()->type() == MIRType::Double);
+ } else if (ins->isBigIntWrite()) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::BigInt);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ }
+
+ LUse elements = useRegister(ins->elements());
+ LUse index = useRegister(ins->index());
+ LAllocation value;
+ if (ins->isBigIntWrite()) {
+ value = useRegister(ins->value());
+ } else {
+ value = useRegisterOrNonDoubleConstant(ins->value());
+ }
+ LAllocation littleEndian = useRegisterOrConstant(ins->littleEndian());
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LInt64Definition temp64Def = LInt64Definition::BogusTemp();
+ if (Scalar::byteSize(ins->writeType()) < 8) {
+ tempDef = temp();
+ } else {
+ temp64Def = tempInt64();
+ }
+
+ add(new (alloc()) LStoreDataViewElement(elements, index, value, littleEndian,
+ tempDef, temp64Def),
+ ins);
+}
+
+void LIRGenerator::visitStoreTypedArrayElementHole(
+ MStoreTypedArrayElementHole* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->length()->type() == MIRType::IntPtr);
+
+ if (ins->isFloatWrite()) {
+ MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float32,
+ ins->value()->type() == MIRType::Float32);
+ MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float64,
+ ins->value()->type() == MIRType::Double);
+ } else if (ins->isBigIntWrite()) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::BigInt);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+ }
+
+ LUse elements = useRegister(ins->elements());
+ LAllocation length = useAny(ins->length());
+ LAllocation index = useRegister(ins->index());
+
+ // For byte arrays, the value has to be in a byte register on x86.
+ LAllocation value;
+ if (ins->isByteWrite()) {
+ value = useByteOpRegisterOrNonDoubleConstant(ins->value());
+ } else if (ins->isBigIntWrite()) {
+ value = useRegister(ins->value());
+ } else {
+ value = useRegisterOrNonDoubleConstant(ins->value());
+ }
+
+ if (!ins->isBigIntWrite()) {
+ LDefinition spectreTemp =
+ BoundsCheckNeedsSpectreTemp() ? temp() : LDefinition::BogusTemp();
+ auto* lir = new (alloc()) LStoreTypedArrayElementHole(
+ elements, length, index, value, spectreTemp);
+ add(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LStoreTypedArrayElementHoleBigInt(
+ elements, length, index, value, tempInt64());
+ add(lir, ins);
+ }
+}
+
+void LIRGenerator::visitLoadFixedSlot(MLoadFixedSlot* ins) {
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ MIRType type = ins->type();
+
+ if (type == MIRType::Value) {
+ LLoadFixedSlotV* lir =
+ new (alloc()) LLoadFixedSlotV(useRegisterAtStart(obj));
+ defineBox(lir, ins);
+ } else {
+ LLoadFixedSlotT* lir =
+ new (alloc()) LLoadFixedSlotT(useRegisterForTypedLoad(obj, type));
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitLoadFixedSlotAndUnbox(MLoadFixedSlotAndUnbox* ins) {
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ LLoadFixedSlotAndUnbox* lir =
+ new (alloc()) LLoadFixedSlotAndUnbox(useRegisterAtStart(obj));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitLoadDynamicSlotAndUnbox(MLoadDynamicSlotAndUnbox* ins) {
+ MDefinition* slots = ins->slots();
+ MOZ_ASSERT(slots->type() == MIRType::Slots);
+
+ auto* lir = new (alloc()) LLoadDynamicSlotAndUnbox(useRegisterAtStart(slots));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitLoadElementAndUnbox(MLoadElementAndUnbox* ins) {
+ MDefinition* elements = ins->elements();
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc())
+ LLoadElementAndUnbox(useRegister(elements), useRegisterOrConstant(index));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAddAndStoreSlot(MAddAndStoreSlot* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition maybeTemp = LDefinition::BogusTemp();
+ if (ins->kind() != MAddAndStoreSlot::Kind::FixedSlot) {
+ maybeTemp = temp();
+ }
+
+ auto* lir = new (alloc()) LAddAndStoreSlot(useRegister(ins->object()),
+ useBox(ins->value()), maybeTemp);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitAllocateAndStoreSlot(MAllocateAndStoreSlot* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LAllocateAndStoreSlot(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->value()),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitAddSlotAndCallAddPropHook(
+ MAddSlotAndCallAddPropHook* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LAddSlotAndCallAddPropHook(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->value()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStoreFixedSlot(MStoreFixedSlot* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (ins->value()->type() == MIRType::Value) {
+ LStoreFixedSlotV* lir = new (alloc())
+ LStoreFixedSlotV(useRegister(ins->object()), useBox(ins->value()));
+ add(lir, ins);
+ } else {
+ LStoreFixedSlotT* lir = new (alloc()) LStoreFixedSlotT(
+ useRegister(ins->object()), useRegisterOrConstant(ins->value()));
+ add(lir, ins);
+ }
+}
+
+void LIRGenerator::visitGetNameCache(MGetNameCache* ins) {
+ MOZ_ASSERT(ins->envObj()->type() == MIRType::Object);
+
+ // Emit an overrecursed check: this is necessary because the cache can
+ // attach a scripted getter stub that calls this script recursively.
+ gen->setNeedsOverrecursedCheck();
+
+ LGetNameCache* lir =
+ new (alloc()) LGetNameCache(useRegister(ins->envObj()), temp());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallGetIntrinsicValue(MCallGetIntrinsicValue* ins) {
+ LCallGetIntrinsicValue* lir = new (alloc()) LCallGetIntrinsicValue();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetPropSuperCache(MGetPropSuperCache* ins) {
+ MDefinition* obj = ins->object();
+ MDefinition* receiver = ins->receiver();
+ MDefinition* id = ins->idval();
+
+ gen->setNeedsOverrecursedCheck();
+
+ bool useConstId =
+ id->type() == MIRType::String || id->type() == MIRType::Symbol;
+
+ auto* lir = new (alloc())
+ LGetPropSuperCache(useRegister(obj), useBoxOrTyped(receiver),
+ useBoxOrTypedOrConstant(id, useConstId));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetPropertyCache(MGetPropertyCache* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Object ||
+ value->type() == MIRType::Value);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String || id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 || id->type() == MIRType::Value);
+
+ // Emit an overrecursed check: this is necessary because the cache can
+ // attach a scripted getter stub that calls this script recursively.
+ gen->setNeedsOverrecursedCheck();
+
+ // If this is a GetProp, the id is a constant string. Allow passing it as a
+ // constant to reduce register allocation pressure.
+ bool useConstId =
+ id->type() == MIRType::String || id->type() == MIRType::Symbol;
+
+ auto* lir = new (alloc()) LGetPropertyCache(
+ useBoxOrTyped(value), useBoxOrTypedOrConstant(id, useConstId));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBindNameCache(MBindNameCache* ins) {
+ MOZ_ASSERT(ins->envChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ LBindNameCache* lir =
+ new (alloc()) LBindNameCache(useRegister(ins->envChain()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallBindVar(MCallBindVar* ins) {
+ MOZ_ASSERT(ins->environmentChain()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ LCallBindVar* lir =
+ new (alloc()) LCallBindVar(useRegister(ins->environmentChain()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardObjectIdentity(MGuardObjectIdentity* ins) {
+ LGuardObjectIdentity* guard = new (alloc()) LGuardObjectIdentity(
+ useRegister(ins->object()), useRegister(ins->expected()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardSpecificFunction(MGuardSpecificFunction* ins) {
+ auto* guard = new (alloc()) LGuardSpecificFunction(
+ useRegister(ins->function()), useRegister(ins->expected()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->function());
+}
+
+void LIRGenerator::visitGuardSpecificAtom(MGuardSpecificAtom* ins) {
+ auto* guard =
+ new (alloc()) LGuardSpecificAtom(useRegister(ins->str()), temp());
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->str());
+ assignSafepoint(guard, ins);
+}
+
+void LIRGenerator::visitGuardSpecificSymbol(MGuardSpecificSymbol* ins) {
+ auto* guard = new (alloc()) LGuardSpecificSymbol(useRegister(ins->symbol()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->symbol());
+}
+
+void LIRGenerator::visitGuardSpecificInt32(MGuardSpecificInt32* ins) {
+ auto* guard = new (alloc()) LGuardSpecificInt32(useRegister(ins->num()));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->num());
+}
+
+void LIRGenerator::visitGuardStringToIndex(MGuardStringToIndex* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ auto* guard = new (alloc()) LGuardStringToIndex(useRegister(ins->string()));
+ assignSnapshot(guard, ins->bailoutKind());
+ define(guard, ins);
+ assignSafepoint(guard, ins);
+}
+
+void LIRGenerator::visitGuardStringToInt32(MGuardStringToInt32* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ auto* guard =
+ new (alloc()) LGuardStringToInt32(useRegister(ins->string()), temp());
+ assignSnapshot(guard, ins->bailoutKind());
+ define(guard, ins);
+ assignSafepoint(guard, ins);
+}
+
+void LIRGenerator::visitGuardStringToDouble(MGuardStringToDouble* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ auto* guard = new (alloc())
+ LGuardStringToDouble(useRegister(ins->string()), temp(), temp());
+ assignSnapshot(guard, ins->bailoutKind());
+ define(guard, ins);
+ assignSafepoint(guard, ins);
+}
+
+void LIRGenerator::visitGuardNoDenseElements(MGuardNoDenseElements* ins) {
+ auto* guard =
+ new (alloc()) LGuardNoDenseElements(useRegister(ins->object()), temp());
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardShape(MGuardShape* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (JitOptions.spectreObjectMitigations) {
+ auto* lir =
+ new (alloc()) LGuardShape(useRegisterAtStart(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+ } else {
+ auto* lir = new (alloc())
+ LGuardShape(useRegister(ins->object()), LDefinition::BogusTemp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+ }
+}
+
+void LIRGenerator::visitGuardMultipleShapes(MGuardMultipleShapes* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ if (JitOptions.spectreObjectMitigations) {
+ auto* lir = new (alloc()) LGuardMultipleShapes(
+ useRegisterAtStart(ins->object()), useRegister(ins->shapeList()),
+ temp(), temp(), temp(), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+ } else {
+ auto* lir = new (alloc()) LGuardMultipleShapes(
+ useRegister(ins->object()), useRegister(ins->shapeList()), temp(),
+ temp(), temp(), LDefinition::BogusTemp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+ }
+}
+
+void LIRGenerator::visitGuardProto(MGuardProto* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->expected()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardProto(useRegister(ins->object()),
+ useRegister(ins->expected()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardNullProto(MGuardNullProto* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardNullProto(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardIsNativeObject(MGuardIsNativeObject* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardIsNativeObject(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardGlobalGeneration(MGuardGlobalGeneration* ins) {
+ auto* lir = new (alloc()) LGuardGlobalGeneration(temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitGuardIsProxy(MGuardIsProxy* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardIsProxy(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardIsNotProxy(MGuardIsNotProxy* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardIsNotProxy(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardIsNotDOMProxy(MGuardIsNotDOMProxy* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardIsNotDOMProxy(useRegister(ins->proxy()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->proxy());
+}
+
+void LIRGenerator::visitProxyGet(MProxyGet* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LProxyGet(useRegisterAtStart(ins->proxy()), tempFixed(CallTempReg0));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitProxyGetByValue(MProxyGetByValue* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->idVal()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LProxyGetByValue(useRegisterAtStart(ins->proxy()),
+ useBoxAtStart(ins->idVal()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitProxyHasProp(MProxyHasProp* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->idVal()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LProxyHasProp(useRegisterAtStart(ins->proxy()),
+ useBoxAtStart(ins->idVal()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitProxySet(MProxySet* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Value);
+ auto* lir = new (alloc())
+ LProxySet(useRegisterAtStart(ins->proxy()), useBoxAtStart(ins->rhs()),
+ tempFixed(CallTempReg0));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitProxySetByValue(MProxySetByValue* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->idVal()->type() == MIRType::Value);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Value);
+ auto* lir = new (alloc())
+ LProxySetByValue(useRegisterAtStart(ins->proxy()),
+ useBoxAtStart(ins->idVal()), useBoxAtStart(ins->rhs()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallSetArrayLength(MCallSetArrayLength* ins) {
+ MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LCallSetArrayLength(useRegisterAtStart(ins->obj()),
+ useBoxAtStart(ins->rhs()));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMegamorphicLoadSlot(MMegamorphicLoadSlot* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LMegamorphicLoadSlot(useRegisterAtStart(ins->object()),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1),
+ tempFixed(CallTempReg2), tempFixed(CallTempReg3));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitMegamorphicLoadSlotByValue(
+ MMegamorphicLoadSlotByValue* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->idVal()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LMegamorphicLoadSlotByValue(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->idVal()),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1),
+ tempFixed(CallTempReg2));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitMegamorphicStoreSlot(MMegamorphicStoreSlot* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Value);
+
+#ifdef JS_CODEGEN_X86
+ auto* lir = new (alloc()) LMegamorphicStoreSlot(
+ useFixedAtStart(ins->object(), CallTempReg0),
+ useBoxFixedAtStart(ins->rhs(), CallTempReg1, CallTempReg2),
+ tempFixed(CallTempReg5));
+#else
+ auto* lir = new (alloc())
+ LMegamorphicStoreSlot(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->rhs()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+#endif
+
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMegamorphicHasProp(MMegamorphicHasProp* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->idVal()->type() == MIRType::Value);
+ auto* lir = new (alloc())
+ LMegamorphicHasProp(useRegisterAtStart(ins->object()),
+ useBoxAtStart(ins->idVal()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitGuardIsNotArrayBufferMaybeShared(
+ MGuardIsNotArrayBufferMaybeShared* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardIsNotArrayBufferMaybeShared(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardIsTypedArray(MGuardIsTypedArray* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardIsTypedArray(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitNurseryObject(MNurseryObject* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LNurseryObject();
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardValue(MGuardValue* ins) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LGuardValue(useBox(ins->value()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->value());
+}
+
+void LIRGenerator::visitGuardNullOrUndefined(MGuardNullOrUndefined* ins) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LGuardNullOrUndefined(useBox(ins->value()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->value());
+}
+
+void LIRGenerator::visitGuardIsNotObject(MGuardIsNotObject* ins) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+ auto* lir = new (alloc()) LGuardIsNotObject(useBox(ins->value()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->value());
+}
+
+void LIRGenerator::visitGuardFunctionFlags(MGuardFunctionFlags* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardFunctionFlags(useRegister(ins->function()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->function());
+}
+
+void LIRGenerator::visitGuardFunctionIsNonBuiltinCtor(
+ MGuardFunctionIsNonBuiltinCtor* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardFunctionIsNonBuiltinCtor(useRegister(ins->function()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->function());
+}
+
+void LIRGenerator::visitGuardFunctionKind(MGuardFunctionKind* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LGuardFunctionKind(useRegister(ins->function()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->function());
+}
+
+void LIRGenerator::visitGuardFunctionScript(MGuardFunctionScript* ins) {
+ MOZ_ASSERT(ins->function()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardFunctionScript(useRegister(ins->function()));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->function());
+}
+
+void LIRGenerator::visitAssertRange(MAssertRange* ins) {
+ MDefinition* input = ins->input();
+ LInstruction* lir = nullptr;
+
+ switch (input->type()) {
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::IntPtr:
+ lir = new (alloc()) LAssertRangeI(useRegisterAtStart(input));
+ break;
+
+ case MIRType::Double:
+ lir = new (alloc()) LAssertRangeD(useRegister(input), tempDouble());
+ break;
+
+ case MIRType::Float32:
+ lir = new (alloc())
+ LAssertRangeF(useRegister(input), tempDouble(), tempDouble());
+ break;
+
+ case MIRType::Value:
+ lir = new (alloc()) LAssertRangeV(useBox(input), tempToUnbox(),
+ tempDouble(), tempDouble());
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected Range for MIRType");
+ break;
+ }
+
+ lir->setMir(ins);
+ add(lir);
+}
+
+void LIRGenerator::visitAssertClass(MAssertClass* ins) {
+ auto* lir =
+ new (alloc()) LAssertClass(useRegisterAtStart(ins->input()), temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitAssertShape(MAssertShape* ins) {
+ auto* lir = new (alloc()) LAssertShape(useRegisterAtStart(ins->input()));
+ add(lir, ins);
+}
+
+void LIRGenerator::visitDeleteProperty(MDeleteProperty* ins) {
+ LCallDeleteProperty* lir =
+ new (alloc()) LCallDeleteProperty(useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitDeleteElement(MDeleteElement* ins) {
+ LCallDeleteElement* lir = new (alloc()) LCallDeleteElement(
+ useBoxAtStart(ins->value()), useBoxAtStart(ins->index()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitObjectToIterator(MObjectToIterator* ins) {
+ auto* lir = new (alloc())
+ LObjectToIterator(useRegister(ins->object()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitValueToIterator(MValueToIterator* ins) {
+ auto* lir = new (alloc()) LValueToIterator(useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitLoadSlotByIteratorIndex(MLoadSlotByIteratorIndex* ins) {
+ auto* lir = new (alloc()) LLoadSlotByIteratorIndex(
+ useRegisterAtStart(ins->object()), useRegisterAtStart(ins->iterator()),
+ temp(), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitStoreSlotByIteratorIndex(
+ MStoreSlotByIteratorIndex* ins) {
+ auto* lir = new (alloc()) LStoreSlotByIteratorIndex(
+ useRegister(ins->object()), useRegister(ins->iterator()),
+ useBox(ins->value()), temp(), temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitIteratorHasIndices(MIteratorHasIndices* ins) {
+ MOZ_ASSERT(ins->hasOneUse());
+ emitAtUses(ins);
+}
+
+void LIRGenerator::visitSetPropertyCache(MSetPropertyCache* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String || id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 || id->type() == MIRType::Value);
+
+ // If this is a SetProp, the id is a constant string. Allow passing it as a
+ // constant to reduce register allocation pressure.
+ bool useConstId =
+ id->type() == MIRType::String || id->type() == MIRType::Symbol;
+ bool useConstValue = IsNonNurseryConstant(ins->value());
+
+ // Emit an overrecursed check: this is necessary because the cache can
+ // attach a scripted setter stub that calls this script recursively.
+ gen->setNeedsOverrecursedCheck();
+
+ // We need a double temp register for TypedArray or TypedObject stubs.
+ LDefinition tempD = tempFixed(FloatReg0);
+
+ LInstruction* lir = new (alloc()) LSetPropertyCache(
+ useRegister(ins->object()), useBoxOrTypedOrConstant(id, useConstId),
+ useBoxOrTypedOrConstant(ins->value(), useConstValue), temp(), tempD);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMegamorphicSetElement(MMegamorphicSetElement* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Value);
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+
+ // See comment in LIROps.yaml (x86 is short on registers)
+#ifdef JS_CODEGEN_X86
+ auto* lir = new (alloc()) LMegamorphicSetElement(
+ useFixedAtStart(ins->object(), CallTempReg0),
+ useBoxFixedAtStart(ins->index(), CallTempReg1, CallTempReg2),
+ useBoxFixedAtStart(ins->value(), CallTempReg3, CallTempReg4),
+ tempFixed(CallTempReg5));
+#else
+ auto* lir = new (alloc()) LMegamorphicSetElement(
+ useRegisterAtStart(ins->object()), useBoxAtStart(ins->index()),
+ useBoxAtStart(ins->value()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+#endif
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetIteratorCache(MGetIteratorCache* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Object ||
+ value->type() == MIRType::Value);
+
+ LGetIteratorCache* lir =
+ new (alloc()) LGetIteratorCache(useBoxOrTyped(value), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitOptimizeSpreadCallCache(MOptimizeSpreadCallCache* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LOptimizeSpreadCallCache(useBox(value), temp());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitIteratorMore(MIteratorMore* ins) {
+ LIteratorMore* lir =
+ new (alloc()) LIteratorMore(useRegister(ins->iterator()), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitIsNoIter(MIsNoIter* ins) {
+ MOZ_ASSERT(ins->hasOneUse());
+ emitAtUses(ins);
+}
+
+void LIRGenerator::visitIteratorEnd(MIteratorEnd* ins) {
+ LIteratorEnd* lir = new (alloc())
+ LIteratorEnd(useRegister(ins->iterator()), temp(), temp(), temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitCloseIterCache(MCloseIterCache* ins) {
+ LCloseIterCache* lir =
+ new (alloc()) LCloseIterCache(useRegister(ins->iter()), temp());
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitStringLength(MStringLength* ins) {
+ MOZ_ASSERT(ins->string()->type() == MIRType::String);
+ define(new (alloc()) LStringLength(useRegisterAtStart(ins->string())), ins);
+}
+
+void LIRGenerator::visitArgumentsLength(MArgumentsLength* ins) {
+ define(new (alloc()) LArgumentsLength(), ins);
+}
+
+void LIRGenerator::visitGetFrameArgument(MGetFrameArgument* ins) {
+ LGetFrameArgument* lir =
+ new (alloc()) LGetFrameArgument(useRegisterOrConstant(ins->index()));
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitGetFrameArgumentHole(MGetFrameArgumentHole* ins) {
+ LDefinition spectreTemp =
+ BoundsCheckNeedsSpectreTemp() ? temp() : LDefinition::BogusTemp();
+
+ auto* lir = new (alloc()) LGetFrameArgumentHole(
+ useRegister(ins->index()), useRegister(ins->length()), spectreTemp);
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitNewTarget(MNewTarget* ins) {
+ LNewTarget* lir = new (alloc()) LNewTarget();
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitRest(MRest* ins) {
+ MOZ_ASSERT(ins->numActuals()->type() == MIRType::Int32);
+
+ LRest* lir = new (alloc())
+ LRest(useRegisterAtStart(ins->numActuals()), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitThrow(MThrow* ins) {
+ MDefinition* value = ins->getOperand(0);
+ MOZ_ASSERT(value->type() == MIRType::Value);
+
+ LThrow* lir = new (alloc()) LThrow(useBoxAtStart(value));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInCache(MInCache* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::String || lhs->type() == MIRType::Symbol ||
+ lhs->type() == MIRType::Int32 || lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Object);
+
+ LInCache* lir =
+ new (alloc()) LInCache(useBoxOrTyped(lhs), useRegister(rhs), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitHasOwnCache(MHasOwnCache* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Object ||
+ value->type() == MIRType::Value);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String || id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 || id->type() == MIRType::Value);
+
+ // Emit an overrecursed check: this is necessary because the cache can
+ // attach a scripted getter stub that calls this script recursively.
+ gen->setNeedsOverrecursedCheck();
+
+ LHasOwnCache* lir =
+ new (alloc()) LHasOwnCache(useBoxOrTyped(value), useBoxOrTyped(id));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckPrivateFieldCache(MCheckPrivateFieldCache* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Object ||
+ value->type() == MIRType::Value);
+
+ MDefinition* id = ins->idval();
+ MOZ_ASSERT(id->type() == MIRType::String || id->type() == MIRType::Symbol ||
+ id->type() == MIRType::Int32 || id->type() == MIRType::Value);
+
+ LCheckPrivateFieldCache* lir = new (alloc())
+ LCheckPrivateFieldCache(useBoxOrTyped(value), useBoxOrTyped(id));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitNewPrivateName(MNewPrivateName* ins) {
+ auto* lir = new (alloc()) LNewPrivateName();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitInstanceOf(MInstanceOf* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value || lhs->type() == MIRType::Object);
+ MOZ_ASSERT(rhs->type() == MIRType::Object);
+
+ if (lhs->type() == MIRType::Object) {
+ auto* lir = new (alloc()) LInstanceOfO(useRegister(lhs), useRegister(rhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LInstanceOfV(useBox(lhs), useRegister(rhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitInstanceOfCache(MInstanceOfCache* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Value);
+ MOZ_ASSERT(rhs->type() == MIRType::Object);
+
+ LInstanceOfCache* lir =
+ new (alloc()) LInstanceOfCache(useBox(lhs), useRegister(rhs));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitIsArray(MIsArray* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ if (ins->value()->type() == MIRType::Object) {
+ LIsArrayO* lir = new (alloc()) LIsArrayO(useRegister(ins->value()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Value);
+ LIsArrayV* lir = new (alloc()) LIsArrayV(useBox(ins->value()), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitIsTypedArray(MIsTypedArray* ins) {
+ MOZ_ASSERT(ins->value()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ auto* lir = new (alloc()) LIsTypedArray(useRegister(ins->value()));
+ define(lir, ins);
+
+ if (ins->isPossiblyWrapped()) {
+ assignSafepoint(lir, ins);
+ }
+}
+
+void LIRGenerator::visitIsCallable(MIsCallable* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ if (ins->object()->type() == MIRType::Object) {
+ define(new (alloc()) LIsCallableO(useRegister(ins->object())), ins);
+ } else {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Value);
+ define(new (alloc()) LIsCallableV(useBox(ins->object()), temp()), ins);
+ }
+}
+
+void LIRGenerator::visitIsConstructor(MIsConstructor* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new (alloc()) LIsConstructor(useRegister(ins->object())), ins);
+}
+
+void LIRGenerator::visitIsCrossRealmArrayConstructor(
+ MIsCrossRealmArrayConstructor* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new (alloc())
+ LIsCrossRealmArrayConstructor(useRegister(ins->object())),
+ ins);
+}
+
+static bool CanEmitAtUseForSingleTest(MInstruction* ins) {
+ if (!ins->canEmitAtUses()) {
+ return false;
+ }
+
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd()) {
+ return false;
+ }
+
+ MNode* node = iter->consumer();
+ if (!node->isDefinition()) {
+ return false;
+ }
+
+ if (!node->toDefinition()->isTest()) {
+ return false;
+ }
+
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+void LIRGenerator::visitIsObject(MIsObject* ins) {
+ if (CanEmitAtUseForSingleTest(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+ LIsObject* lir = new (alloc()) LIsObject(useBoxAtStart(opd));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitIsNullOrUndefined(MIsNullOrUndefined* ins) {
+ if (CanEmitAtUseForSingleTest(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+ LIsNullOrUndefined* lir =
+ new (alloc()) LIsNullOrUndefined(useBoxAtStart(opd));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHasClass(MHasClass* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ define(new (alloc()) LHasClass(useRegister(ins->object())), ins);
+}
+
+void LIRGenerator::visitGuardToClass(MGuardToClass* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ LGuardToClass* lir =
+ new (alloc()) LGuardToClass(useRegisterAtStart(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitGuardToFunction(MGuardToFunction* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ LGuardToFunction* lir =
+ new (alloc()) LGuardToFunction(useRegisterAtStart(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitObjectClassToString(MObjectClassToString* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::String);
+ auto* lir = new (alloc()) LObjectClassToString(
+ useRegisterAtStart(ins->object()), tempFixed(CallTempReg0));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitWasmAddOffset(MWasmAddOffset* ins) {
+ MOZ_ASSERT(ins->offset());
+ if (ins->base()->type() == MIRType::Int32) {
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->offset() <= UINT32_MAX); // Because memory32
+ define(new (alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Int64);
+#ifdef JS_64BIT
+ defineInt64(new (alloc())
+ LWasmAddOffset64(useInt64RegisterAtStart(ins->base())),
+ ins);
+#else
+ // Avoid situation where the input is (a,b) and the output is (b,a).
+ defineInt64ReuseInput(
+ new (alloc()) LWasmAddOffset64(useInt64RegisterAtStart(ins->base())),
+ ins, 0);
+#endif
+ }
+}
+
+void LIRGenerator::visitWasmLoadInstance(MWasmLoadInstance* ins) {
+ if (ins->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation instance = useRegisterAtStart(ins->instance());
+#else
+ // Avoid reusing instance for a 64-bit output pair as the load clobbers the
+ // first half of that pair before loading the second half.
+ LAllocation instance = useRegister(ins->instance());
+#endif
+ auto* lir = new (alloc()) LWasmLoadInstance64(instance);
+ defineInt64(lir, ins);
+ } else {
+ auto* lir =
+ new (alloc()) LWasmLoadInstance(useRegisterAtStart(ins->instance()));
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitWasmStoreInstance(MWasmStoreInstance* ins) {
+ MDefinition* value = ins->value();
+ if (value->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation instance = useRegisterAtStart(ins->instance());
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+#else
+ LAllocation instance = useRegister(ins->instance());
+ LInt64Allocation valueAlloc = useInt64Register(value);
+#endif
+ add(new (alloc()) LWasmStoreSlotI64(valueAlloc, instance, ins->offset(),
+ mozilla::Nothing()),
+ ins);
+ } else {
+ MOZ_ASSERT(value->type() != MIRType::RefOrNull);
+ LAllocation instance = useRegisterAtStart(ins->instance());
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ add(new (alloc())
+ LWasmStoreSlot(valueAlloc, instance, ins->offset(), value->type(),
+ MNarrowingOp::None, mozilla::Nothing()),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins) {
+ MOZ_ASSERT(!ins->isRedundant());
+
+ MDefinition* index = ins->index();
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+
+ MOZ_ASSERT(boundsCheckLimit->type() == index->type());
+
+ if (index->type() == MIRType::Int64) {
+ if (JitOptions.spectreIndexMasking) {
+ auto* lir = new (alloc()) LWasmBoundsCheck64(
+ useInt64RegisterAtStart(index), useInt64Register(boundsCheckLimit));
+ defineInt64ReuseInput(lir, ins, 0);
+ } else {
+ auto* lir = new (alloc())
+ LWasmBoundsCheck64(useInt64RegisterAtStart(index),
+ useInt64RegisterAtStart(boundsCheckLimit));
+ add(lir, ins);
+ }
+ } else {
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ if (JitOptions.spectreIndexMasking) {
+ auto* lir = new (alloc()) LWasmBoundsCheck(useRegisterAtStart(index),
+ useRegister(boundsCheckLimit));
+ defineReuseInput(lir, ins, 0);
+ } else {
+ auto* lir = new (alloc()) LWasmBoundsCheck(
+ useRegisterAtStart(index), useRegisterAtStart(boundsCheckLimit));
+ add(lir, ins);
+ }
+ }
+}
+
+void LIRGenerator::visitWasmAlignmentCheck(MWasmAlignmentCheck* ins) {
+ MDefinition* index = ins->index();
+ if (index->type() == MIRType::Int64) {
+ auto* lir =
+ new (alloc()) LWasmAlignmentCheck64(useInt64RegisterAtStart(index));
+ add(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LWasmAlignmentCheck(useRegisterAtStart(index));
+ add(lir, ins);
+ }
+}
+
+void LIRGenerator::visitWasmLoadInstanceDataField(
+ MWasmLoadInstanceDataField* ins) {
+ size_t offs = wasm::Instance::offsetInData(ins->instanceDataOffset());
+ if (ins->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation instance = useRegisterAtStart(ins->instance());
+#else
+ // Avoid reusing instance for the output pair as the load clobbers the first
+ // half of that pair before loading the second half.
+ LAllocation instance = useRegister(ins->instance());
+#endif
+ defineInt64(new (alloc())
+ LWasmLoadSlotI64(instance, offs, mozilla::Nothing()),
+ ins);
+ } else {
+ LAllocation instance = useRegisterAtStart(ins->instance());
+ define(new (alloc()) LWasmLoadSlot(instance, offs, ins->type(),
+ MWideningOp::None, mozilla::Nothing()),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmLoadGlobalCell(MWasmLoadGlobalCell* ins) {
+ if (ins->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation cellPtr = useRegisterAtStart(ins->cellPtr());
+#else
+ // Avoid reusing cellPtr for the output pair as the load clobbers the first
+ // half of that pair before loading the second half.
+ LAllocation cellPtr = useRegister(ins->cellPtr());
+#endif
+ defineInt64(new (alloc())
+ LWasmLoadSlotI64(cellPtr, /*offset=*/0, mozilla::Nothing()),
+ ins);
+ } else {
+ LAllocation cellPtr = useRegisterAtStart(ins->cellPtr());
+ define(new (alloc()) LWasmLoadSlot(cellPtr, /*offset=*/0, ins->type(),
+ MWideningOp::None, mozilla::Nothing()),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmLoadTableElement(MWasmLoadTableElement* ins) {
+ LAllocation elements = useRegisterAtStart(ins->elements());
+ LAllocation index = useRegisterAtStart(ins->index());
+ define(new (alloc()) LWasmLoadTableElement(elements, index), ins);
+}
+
+void LIRGenerator::visitWasmStoreInstanceDataField(
+ MWasmStoreInstanceDataField* ins) {
+ MDefinition* value = ins->value();
+ size_t offs = wasm::Instance::offsetInData(ins->instanceDataOffset());
+ if (value->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation instance = useRegisterAtStart(ins->instance());
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+#else
+ LAllocation instance = useRegister(ins->instance());
+ LInt64Allocation valueAlloc = useInt64Register(value);
+#endif
+ add(new (alloc())
+ LWasmStoreSlotI64(valueAlloc, instance, offs, mozilla::Nothing()),
+ ins);
+ } else {
+ MOZ_ASSERT(value->type() != MIRType::RefOrNull);
+ LAllocation instance = useRegisterAtStart(ins->instance());
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ add(new (alloc()) LWasmStoreSlot(valueAlloc, instance, offs, value->type(),
+ MNarrowingOp::None, mozilla::Nothing()),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmStoreGlobalCell(MWasmStoreGlobalCell* ins) {
+ MDefinition* value = ins->value();
+ size_t offs = 0;
+ if (value->type() == MIRType::Int64) {
+#ifdef JS_PUNBOX64
+ LAllocation cellPtr = useRegisterAtStart(ins->cellPtr());
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+#else
+ LAllocation cellPtr = useRegister(ins->cellPtr());
+ LInt64Allocation valueAlloc = useInt64Register(value);
+#endif
+ add(new (alloc())
+ LWasmStoreSlotI64(valueAlloc, cellPtr, offs, mozilla::Nothing()));
+ } else {
+ MOZ_ASSERT(value->type() != MIRType::RefOrNull);
+ LAllocation cellPtr = useRegisterAtStart(ins->cellPtr());
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ add(new (alloc()) LWasmStoreSlot(valueAlloc, cellPtr, offs, value->type(),
+ MNarrowingOp::None, mozilla::Nothing()));
+ }
+}
+
+void LIRGenerator::visitWasmStoreStackResult(MWasmStoreStackResult* ins) {
+ MDefinition* stackResultArea = ins->stackResultArea();
+ MDefinition* value = ins->value();
+ size_t offs = ins->offset();
+ LInstruction* lir;
+ if (value->type() == MIRType::Int64) {
+ lir = new (alloc())
+ LWasmStoreSlotI64(useInt64Register(value), useRegister(stackResultArea),
+ offs, mozilla::Nothing());
+ } else {
+ MOZ_ASSERT(value->type() != MIRType::RefOrNull);
+ lir = new (alloc())
+ LWasmStoreSlot(useRegister(value), useRegister(stackResultArea), offs,
+ value->type(), MNarrowingOp::None, mozilla::Nothing());
+ }
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmDerivedPointer(MWasmDerivedPointer* ins) {
+ LAllocation base = useRegisterAtStart(ins->base());
+ define(new (alloc()) LWasmDerivedPointer(base), ins);
+}
+
+void LIRGenerator::visitWasmDerivedIndexPointer(MWasmDerivedIndexPointer* ins) {
+ LAllocation base = useRegisterAtStart(ins->base());
+ LAllocation index = useRegisterAtStart(ins->index());
+ define(new (alloc()) LWasmDerivedIndexPointer(base, index), ins);
+}
+
+void LIRGenerator::visitWasmStoreRef(MWasmStoreRef* ins) {
+ LAllocation instance = useRegister(ins->instance());
+ LAllocation valueBase = useFixed(ins->valueBase(), PreBarrierReg);
+ LAllocation value = useRegister(ins->value());
+ uint32_t valueOffset = ins->offset();
+ add(new (alloc())
+ LWasmStoreRef(instance, valueBase, value, temp(), valueOffset,
+ mozilla::Nothing(), ins->preBarrierKind()),
+ ins);
+}
+
+void LIRGenerator::visitWasmPostWriteBarrier(MWasmPostWriteBarrier* ins) {
+ LWasmPostWriteBarrier* lir = new (alloc()) LWasmPostWriteBarrier(
+ useFixed(ins->instance(), InstanceReg), useRegister(ins->object()),
+ useRegister(ins->valueBase()), useRegister(ins->value()), temp(),
+ ins->valueOffset());
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+}
+
+void LIRGenerator::visitWasmParameter(MWasmParameter* ins) {
+ ABIArg abi = ins->abi();
+ if (ins->type() == MIRType::StackResults) {
+ // Functions that return stack results receive an extra incoming parameter
+ // with type MIRType::StackResults. This value is a pointer to fresh
+ // memory. Here we treat it as if it were in fact MIRType::Pointer.
+ auto* lir = new (alloc()) LWasmParameter;
+ LDefinition def(LDefinition::TypeFrom(MIRType::Pointer),
+ LDefinition::FIXED);
+ def.setOutput(abi.argInRegister() ? LAllocation(abi.reg())
+ : LArgument(abi.offsetFromArgBase()));
+ define(lir, ins, def);
+ return;
+ }
+ if (abi.argInRegister()) {
+#if defined(JS_NUNBOX32)
+ if (abi.isGeneralRegPair()) {
+ defineInt64Fixed(
+ new (alloc()) LWasmParameterI64, ins,
+ LInt64Allocation(LAllocation(AnyRegister(abi.gpr64().high)),
+ LAllocation(AnyRegister(abi.gpr64().low))));
+ return;
+ }
+#endif
+ defineFixed(new (alloc()) LWasmParameter, ins, LAllocation(abi.reg()));
+ return;
+ }
+ if (ins->type() == MIRType::Int64) {
+ MOZ_ASSERT(!abi.argInRegister());
+ defineInt64Fixed(
+ new (alloc()) LWasmParameterI64, ins,
+#if defined(JS_NUNBOX32)
+ LInt64Allocation(LArgument(abi.offsetFromArgBase() + INT64HIGH_OFFSET),
+ LArgument(abi.offsetFromArgBase() + INT64LOW_OFFSET))
+#else
+ LInt64Allocation(LArgument(abi.offsetFromArgBase()))
+#endif
+ );
+ } else {
+ MOZ_ASSERT(IsNumberType(ins->type()) || ins->type() == MIRType::RefOrNull
+#ifdef ENABLE_WASM_SIMD
+ || ins->type() == MIRType::Simd128
+#endif
+ );
+ defineFixed(new (alloc()) LWasmParameter, ins,
+ LArgument(abi.offsetFromArgBase()));
+ }
+}
+
+void LIRGenerator::visitWasmReturn(MWasmReturn* ins) {
+ MDefinition* rval = ins->getOperand(0);
+ MDefinition* instance = ins->getOperand(1);
+
+ if (rval->type() == MIRType::Int64) {
+ add(new (alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64),
+ useFixed(instance, InstanceReg)));
+ return;
+ }
+
+ LAllocation returnReg;
+ if (rval->type() == MIRType::Float32) {
+ returnReg = useFixed(rval, ReturnFloat32Reg);
+ } else if (rval->type() == MIRType::Double) {
+ returnReg = useFixed(rval, ReturnDoubleReg);
+#ifdef ENABLE_WASM_SIMD
+ } else if (rval->type() == MIRType::Simd128) {
+ returnReg = useFixed(rval, ReturnSimd128Reg);
+#endif
+ } else if (rval->type() == MIRType::Int32 ||
+ rval->type() == MIRType::RefOrNull) {
+ returnReg = useFixed(rval, ReturnReg);
+ } else {
+ MOZ_CRASH("Unexpected wasm return type");
+ }
+
+ LWasmReturn* lir =
+ new (alloc()) LWasmReturn(useFixed(instance, InstanceReg), returnReg);
+ add(lir);
+}
+
+void LIRGenerator::visitWasmReturnVoid(MWasmReturnVoid* ins) {
+ MDefinition* instance = ins->getOperand(0);
+ LWasmReturnVoid* lir =
+ new (alloc()) LWasmReturnVoid(useFixed(instance, InstanceReg));
+ add(lir);
+}
+
+void LIRGenerator::visitWasmStackArg(MWasmStackArg* ins) {
+ if (ins->arg()->type() == MIRType::Int64) {
+ add(new (alloc())
+ LWasmStackArgI64(useInt64RegisterOrConstantAtStart(ins->arg())),
+ ins);
+ } else if (IsFloatingPointType(ins->arg()->type())) {
+ MOZ_ASSERT(!ins->arg()->isEmittedAtUses());
+ add(new (alloc()) LWasmStackArg(useRegisterAtStart(ins->arg())), ins);
+ } else {
+ add(new (alloc()) LWasmStackArg(useRegisterOrConstantAtStart(ins->arg())),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmRegisterResult(MWasmRegisterResult* ins) {
+ auto* lir = new (alloc()) LWasmRegisterResult();
+ uint32_t vreg = getVirtualRegister();
+ MOZ_ASSERT(ins->type() != MIRType::Int64);
+ auto type = LDefinition::TypeFrom(ins->type());
+ lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ins->loc())));
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmFloatRegisterResult(MWasmFloatRegisterResult* ins) {
+ auto* lir = new (alloc()) LWasmRegisterResult();
+ uint32_t vreg = getVirtualRegister();
+ auto type = LDefinition::TypeFrom(ins->type());
+ lir->setDef(0, LDefinition(vreg, type, LFloatReg(ins->loc())));
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmRegister64Result(MWasmRegister64Result* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Int64);
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ auto* lir = new (alloc()) LWasmRegisterPairResult();
+ lir->setDef(INT64LOW_INDEX,
+ LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ins->loc().low)));
+ lir->setDef(INT64HIGH_INDEX,
+ LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ins->loc().high)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ auto* lir = new (alloc()) LWasmRegisterResult();
+ lir->setDef(
+ 0, LDefinition(vreg, LDefinition::GENERAL, LGeneralReg(ins->loc().reg)));
+#else
+# error expected either JS_NUNBOX32 or JS_PUNBOX64
+#endif
+
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmStackResultArea(MWasmStackResultArea* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::StackResults);
+ auto* lir = new (alloc()) LWasmStackResultArea(temp());
+ uint32_t vreg = getVirtualRegister();
+ lir->setDef(0,
+ LDefinition(vreg, LDefinition::STACKRESULTS, LDefinition::STACK));
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmStackResult(MWasmStackResult* ins) {
+ MWasmStackResultArea* area = ins->resultArea()->toWasmStackResultArea();
+ LDefinition::Policy pol = LDefinition::STACK;
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmStackResult64;
+ lir->setOperand(0, use(area, LUse(LUse::STACK, /* usedAtStart = */ true)));
+ uint32_t vreg = getVirtualRegister();
+ LDefinition::Type typ = LDefinition::GENERAL;
+#if defined(JS_NUNBOX32)
+ getVirtualRegister();
+ lir->setDef(INT64LOW_INDEX, LDefinition(vreg + INT64LOW_INDEX, typ, pol));
+ lir->setDef(INT64HIGH_INDEX, LDefinition(vreg + INT64HIGH_INDEX, typ, pol));
+#else
+ lir->setDef(0, LDefinition(vreg, typ, pol));
+#endif
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmStackResult;
+ lir->setOperand(0, use(area, LUse(LUse::STACK, /* usedAtStart = */ true)));
+ uint32_t vreg = getVirtualRegister();
+ LDefinition::Type typ = LDefinition::TypeFrom(ins->type());
+ lir->setDef(0, LDefinition(vreg, typ, pol));
+ ins->setVirtualRegister(vreg);
+ add(lir, ins);
+}
+
+template <class MWasmCallT>
+void LIRGenerator::visitWasmCall(MWasmCallT ins) {
+ bool needsBoundsCheck = true;
+ mozilla::Maybe<uint32_t> tableSize;
+
+ if (ins->callee().isTable()) {
+ MDefinition* index = ins->getOperand(ins->numArgs());
+
+ if (ins->callee().which() == wasm::CalleeDesc::WasmTable) {
+ uint32_t minLength = ins->callee().wasmTableMinLength();
+ mozilla::Maybe<uint32_t> maxLength = ins->callee().wasmTableMaxLength();
+ if (index->isConstant() &&
+ uint32_t(index->toConstant()->toInt32()) < minLength) {
+ needsBoundsCheck = false;
+ }
+ if (maxLength.isSome() && *maxLength == minLength) {
+ tableSize = maxLength;
+ }
+ }
+ }
+
+ auto* lir = allocateVariadic<LWasmCall>(ins->numOperands(), needsBoundsCheck,
+ tableSize);
+ if (!lir) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::lowerWasmCall");
+ return;
+ }
+
+ for (unsigned i = 0; i < ins->numArgs(); i++) {
+ lir->setOperand(
+ i, useFixedAtStart(ins->getOperand(i), ins->registerForArg(i)));
+ }
+
+ if (ins->callee().isTable()) {
+ MDefinition* index = ins->getOperand(ins->numArgs());
+ lir->setOperand(ins->numArgs(),
+ useFixedAtStart(index, WasmTableCallIndexReg));
+ }
+ if (ins->callee().isFuncRef()) {
+ MDefinition* ref = ins->getOperand(ins->numArgs());
+ lir->setOperand(ins->numArgs(), useFixedAtStart(ref, WasmCallRefReg));
+ }
+
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+
+ // WasmCall with WasmTable has two call instructions, and they both need a
+ // safepoint associated with them. Create a second safepoint here; the node
+ // otherwise does nothing, and codegen for it only marks the safepoint at the
+ // node.
+ if (ins->callee().which() == wasm::CalleeDesc::WasmTable) {
+ auto* adjunctSafepoint = new (alloc()) LWasmCallIndirectAdjunctSafepoint();
+ add(adjunctSafepoint);
+ assignWasmSafepoint(adjunctSafepoint);
+ lir->setAdjunctSafepoint(adjunctSafepoint);
+ }
+}
+
+void LIRGenerator::visitWasmCallCatchable(MWasmCallCatchable* ins) {
+ visitWasmCall(ins);
+}
+
+void LIRGenerator::visitWasmCallUncatchable(MWasmCallUncatchable* ins) {
+ visitWasmCall(ins);
+}
+
+void LIRGenerator::visitWasmCallLandingPrePad(MWasmCallLandingPrePad* ins) {
+ add(new (alloc()) LWasmCallLandingPrePad, ins);
+}
+
+void LIRGenerator::visitSetDOMProperty(MSetDOMProperty* ins) {
+ MDefinition* val = ins->value();
+
+ Register cxReg, objReg, privReg, valueReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ GetTempRegForIntArg(3, 0, &valueReg);
+
+ // Keep using GetTempRegForIntArg, since we want to make sure we
+ // don't clobber registers we're already using.
+ Register tempReg1, tempReg2;
+ GetTempRegForIntArg(4, 0, &tempReg1);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(5, 0, &tempReg2);
+ MOZ_ASSERT(ok, "How can we not have six temp registers?");
+
+ LSetDOMProperty* lir = new (alloc())
+ LSetDOMProperty(tempFixed(cxReg), useFixedAtStart(ins->object(), objReg),
+ useBoxFixedAtStart(val, tempReg1, tempReg2),
+ tempFixed(privReg), tempFixed(valueReg));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetDOMProperty(MGetDOMProperty* ins) {
+ Register cxReg, objReg, privReg, valueReg;
+ GetTempRegForIntArg(0, 0, &cxReg);
+ GetTempRegForIntArg(1, 0, &objReg);
+ GetTempRegForIntArg(2, 0, &privReg);
+ mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &valueReg);
+ MOZ_ASSERT(ok, "How can we not have four temp registers?");
+ LGetDOMProperty* lir = new (alloc())
+ LGetDOMProperty(tempFixed(cxReg), useFixedAtStart(ins->object(), objReg),
+ tempFixed(privReg), tempFixed(valueReg));
+
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGetDOMMember(MGetDOMMember* ins) {
+ MOZ_ASSERT(ins->isDomMovable(), "Members had better be movable");
+ // We wish we could assert that ins->domAliasSet() == JSJitInfo::AliasNone,
+ // but some MGetDOMMembers are for [Pure], not [Constant] properties, whose
+ // value can in fact change as a result of DOM setters and method calls.
+ MOZ_ASSERT(ins->domAliasSet() != JSJitInfo::AliasEverything,
+ "Member gets had better not alias the world");
+
+ MDefinition* obj = ins->object();
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ MIRType type = ins->type();
+
+ if (type == MIRType::Value) {
+ LGetDOMMemberV* lir = new (alloc()) LGetDOMMemberV(useRegisterAtStart(obj));
+ defineBox(lir, ins);
+ } else {
+ LGetDOMMemberT* lir =
+ new (alloc()) LGetDOMMemberT(useRegisterForTypedLoad(obj, type));
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitLoadDOMExpandoValue(MLoadDOMExpandoValue* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ auto* lir =
+ new (alloc()) LLoadDOMExpandoValue(useRegisterAtStart(ins->proxy()));
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitLoadDOMExpandoValueGuardGeneration(
+ MLoadDOMExpandoValueGuardGeneration* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LLoadDOMExpandoValueGuardGeneration(useRegisterAtStart(ins->proxy()));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
+ MLoadDOMExpandoValueIgnoreGeneration* ins) {
+ MOZ_ASSERT(ins->proxy()->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LLoadDOMExpandoValueIgnoreGeneration(useRegisterAtStart(ins->proxy()));
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitGuardDOMExpandoMissingOrGuardShape(
+ MGuardDOMExpandoMissingOrGuardShape* ins) {
+ MOZ_ASSERT(ins->expando()->type() == MIRType::Value);
+ auto* lir = new (alloc())
+ LGuardDOMExpandoMissingOrGuardShape(useBox(ins->expando()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->expando());
+}
+
+void LIRGenerator::visitIncrementWarmUpCounter(MIncrementWarmUpCounter* ins) {
+ LIncrementWarmUpCounter* lir = new (alloc()) LIncrementWarmUpCounter(temp());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitLexicalCheck(MLexicalCheck* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+ LLexicalCheck* lir = new (alloc()) LLexicalCheck(useBox(input));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, input);
+}
+
+void LIRGenerator::visitThrowRuntimeLexicalError(
+ MThrowRuntimeLexicalError* ins) {
+ LThrowRuntimeLexicalError* lir = new (alloc()) LThrowRuntimeLexicalError();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitThrowMsg(MThrowMsg* ins) {
+ LThrowMsg* lir = new (alloc()) LThrowMsg();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGlobalDeclInstantiation(MGlobalDeclInstantiation* ins) {
+ LGlobalDeclInstantiation* lir = new (alloc()) LGlobalDeclInstantiation();
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitDebugger(MDebugger* ins) {
+ auto* lir = new (alloc()) LDebugger(tempFixed(CallTempReg0));
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+}
+
+void LIRGenerator::visitAtomicIsLockFree(MAtomicIsLockFree* ins) {
+ define(new (alloc()) LAtomicIsLockFree(useRegister(ins->input())), ins);
+}
+
+void LIRGenerator::visitCheckReturn(MCheckReturn* ins) {
+ MDefinition* retVal = ins->returnValue();
+ MDefinition* thisVal = ins->thisValue();
+ MOZ_ASSERT(retVal->type() == MIRType::Value);
+ MOZ_ASSERT(thisVal->type() == MIRType::Value);
+
+ auto* lir =
+ new (alloc()) LCheckReturn(useBoxAtStart(retVal), useBoxAtStart(thisVal));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckIsObj(MCheckIsObj* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Value);
+
+ LCheckIsObj* lir = new (alloc()) LCheckIsObj(useBox(input));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckObjCoercible(MCheckObjCoercible* ins) {
+ MDefinition* checkVal = ins->checkValue();
+ MOZ_ASSERT(checkVal->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LCheckObjCoercible(useBoxAtStart(checkVal));
+ redefine(ins, checkVal);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckClassHeritage(MCheckClassHeritage* ins) {
+ MDefinition* heritage = ins->heritage();
+ MOZ_ASSERT(heritage->type() == MIRType::Value);
+
+ auto* lir =
+ new (alloc()) LCheckClassHeritage(useBox(heritage), temp(), temp());
+ redefine(ins, heritage);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckThis(MCheckThis* ins) {
+ MDefinition* thisValue = ins->thisValue();
+ MOZ_ASSERT(thisValue->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LCheckThis(useBoxAtStart(thisValue));
+ redefine(ins, thisValue);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCheckThisReinit(MCheckThisReinit* ins) {
+ MDefinition* thisValue = ins->thisValue();
+ MOZ_ASSERT(thisValue->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LCheckThisReinit(useBoxAtStart(thisValue));
+ redefine(ins, thisValue);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGenerator(MGenerator* ins) {
+ auto* lir =
+ new (alloc()) LGenerator(useRegisterAtStart(ins->callee()),
+ useRegisterAtStart(ins->environmentChain()),
+ useRegisterAtStart(ins->argsObject()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitAsyncResolve(MAsyncResolve* ins) {
+ auto* lir = new (alloc()) LAsyncResolve(useRegisterAtStart(ins->generator()),
+ useBoxAtStart(ins->valueOrReason()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitAsyncAwait(MAsyncAwait* ins) {
+ MOZ_ASSERT(ins->generator()->type() == MIRType::Object);
+ auto* lir = new (alloc()) LAsyncAwait(useBoxAtStart(ins->value()),
+ useRegisterAtStart(ins->generator()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCanSkipAwait(MCanSkipAwait* ins) {
+ auto* lir = new (alloc()) LCanSkipAwait(useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMaybeExtractAwaitValue(MMaybeExtractAwaitValue* ins) {
+ auto* lir = new (alloc()) LMaybeExtractAwaitValue(
+ useBoxAtStart(ins->value()), useRegisterAtStart(ins->canSkip()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitDebugCheckSelfHosted(MDebugCheckSelfHosted* ins) {
+ MDefinition* checkVal = ins->checkValue();
+ MOZ_ASSERT(checkVal->type() == MIRType::Value);
+
+ LDebugCheckSelfHosted* lir =
+ new (alloc()) LDebugCheckSelfHosted(useBoxAtStart(checkVal));
+ redefine(ins, checkVal);
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitIsPackedArray(MIsPackedArray* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+
+ auto lir = new (alloc()) LIsPackedArray(useRegister(ins->object()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
+ MOZ_ASSERT(ins->array()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardArrayIsPacked(useRegister(ins->array()), temp(), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->array());
+}
+
+void LIRGenerator::visitGetPrototypeOf(MGetPrototypeOf* ins) {
+ MOZ_ASSERT(ins->target()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto lir = new (alloc()) LGetPrototypeOf(useRegister(ins->target()));
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitObjectWithProto(MObjectWithProto* ins) {
+ MOZ_ASSERT(ins->prototype()->type() == MIRType::Value);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LObjectWithProto(useBoxAtStart(ins->prototype()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitObjectStaticProto(MObjectStaticProto* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ auto* lir =
+ new (alloc()) LObjectStaticProto(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+};
+
+void LIRGenerator::visitBuiltinObject(MBuiltinObject* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LBuiltinObject();
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitReturn(MReturn* ret) {
+ return visitReturnImpl(ret->getOperand(0));
+}
+
+void LIRGenerator::visitGeneratorReturn(MGeneratorReturn* ret) {
+ return visitReturnImpl(ret->getOperand(0), true);
+}
+
+void LIRGenerator::visitSuperFunction(MSuperFunction* ins) {
+ MOZ_ASSERT(ins->callee()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LSuperFunction(useRegister(ins->callee()), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitInitHomeObject(MInitHomeObject* ins) {
+ MDefinition* function = ins->function();
+ MOZ_ASSERT(function->type() == MIRType::Object);
+
+ MDefinition* homeObject = ins->homeObject();
+ MOZ_ASSERT(homeObject->type() == MIRType::Value);
+
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LInitHomeObject(useRegisterAtStart(function), useBoxAtStart(homeObject));
+ redefine(ins, function);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitIsTypedArrayConstructor(MIsTypedArrayConstructor* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LIsTypedArrayConstructor(useRegister(object));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitLoadValueTag(MLoadValueTag* ins) {
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Value);
+
+ define(new (alloc()) LLoadValueTag(useBoxAtStart(value)), ins);
+}
+
+void LIRGenerator::visitGuardTagNotEqual(MGuardTagNotEqual* ins) {
+ MDefinition* lhs = ins->lhs();
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+
+ MDefinition* rhs = ins->rhs();
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ auto* guard =
+ new (alloc()) LGuardTagNotEqual(useRegister(lhs), useRegister(rhs));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+}
+
+void LIRGenerator::visitLoadWrapperTarget(MLoadWrapperTarget* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ define(new (alloc()) LLoadWrapperTarget(useRegisterAtStart(object)), ins);
+}
+
+void LIRGenerator::visitGuardHasGetterSetter(MGuardHasGetterSetter* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ auto* guard = new (alloc())
+ LGuardHasGetterSetter(useRegisterAtStart(object), tempFixed(CallTempReg0),
+ tempFixed(CallTempReg1), tempFixed(CallTempReg2));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, object);
+}
+
+void LIRGenerator::visitGuardIsExtensible(MGuardIsExtensible* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ auto* guard = new (alloc()) LGuardIsExtensible(useRegister(object), temp());
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, object);
+}
+
+void LIRGenerator::visitGuardInt32IsNonNegative(MGuardInt32IsNonNegative* ins) {
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* guard = new (alloc()) LGuardInt32IsNonNegative(useRegister(index));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, index);
+}
+
+void LIRGenerator::visitGuardInt32Range(MGuardInt32Range* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Int32);
+
+ auto* guard = new (alloc()) LGuardInt32Range(useRegister(input));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, input);
+}
+
+void LIRGenerator::visitGuardIndexIsNotDenseElement(
+ MGuardIndexIsNotDenseElement* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ LDefinition spectreTemp =
+ BoundsCheckNeedsSpectreTemp() ? temp() : LDefinition::BogusTemp();
+
+ auto* guard = new (alloc()) LGuardIndexIsNotDenseElement(
+ useRegister(object), useRegister(index), temp(), spectreTemp);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, index);
+}
+
+void LIRGenerator::visitGuardIndexIsValidUpdateOrAdd(
+ MGuardIndexIsValidUpdateOrAdd* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ LDefinition spectreTemp =
+ BoundsCheckNeedsSpectreTemp() ? temp() : LDefinition::BogusTemp();
+
+ auto* guard = new (alloc()) LGuardIndexIsValidUpdateOrAdd(
+ useRegister(object), useRegister(index), temp(), spectreTemp);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, index);
+}
+
+void LIRGenerator::visitCallAddOrUpdateSparseElement(
+ MCallAddOrUpdateSparseElement* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ MDefinition* value = ins->value();
+ MOZ_ASSERT(value->type() == MIRType::Value);
+
+ auto* lir = new (alloc()) LCallAddOrUpdateSparseElement(
+ useRegisterAtStart(object), useRegisterAtStart(index),
+ useBoxAtStart(value));
+ add(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallGetSparseElement(MCallGetSparseElement* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LCallGetSparseElement(useRegisterAtStart(object),
+ useRegisterAtStart(index));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallNativeGetElement(MCallNativeGetElement* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LCallNativeGetElement(useRegisterAtStart(object),
+ useRegisterAtStart(index));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallNativeGetElementSuper(
+ MCallNativeGetElementSuper* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ MDefinition* receiver = ins->receiver();
+
+ auto* lir = new (alloc()) LCallNativeGetElementSuper(
+ useRegisterAtStart(object), useRegisterAtStart(index),
+ useBoxAtStart(receiver));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCallObjectHasSparseElement(
+ MCallObjectHasSparseElement* ins) {
+ MDefinition* object = ins->object();
+ MOZ_ASSERT(object->type() == MIRType::Object);
+
+ MDefinition* index = ins->index();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LCallObjectHasSparseElement(
+ useRegisterAtStart(object), useRegisterAtStart(index),
+ tempFixed(CallTempReg0), tempFixed(CallTempReg1));
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitBigIntAsIntN(MBigIntAsIntN* ins) {
+ MOZ_ASSERT(ins->bits()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->input()->type() == MIRType::BigInt);
+
+ if (ins->bits()->isConstant()) {
+ int32_t bits = ins->bits()->toConstant()->toInt32();
+ if (bits == 64) {
+ auto* lir = new (alloc())
+ LBigIntAsIntN64(useRegister(ins->input()), temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ if (bits == 32) {
+ auto* lir = new (alloc())
+ LBigIntAsIntN32(useRegister(ins->input()), temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ }
+
+ auto* lir = new (alloc()) LBigIntAsIntN(useRegisterAtStart(ins->bits()),
+ useRegisterAtStart(ins->input()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitBigIntAsUintN(MBigIntAsUintN* ins) {
+ MOZ_ASSERT(ins->bits()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->input()->type() == MIRType::BigInt);
+
+ if (ins->bits()->isConstant()) {
+ int32_t bits = ins->bits()->toConstant()->toInt32();
+ if (bits == 64) {
+ auto* lir = new (alloc())
+ LBigIntAsUintN64(useRegister(ins->input()), temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ if (bits == 32) {
+ auto* lir = new (alloc())
+ LBigIntAsUintN32(useRegister(ins->input()), temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+ }
+
+ auto* lir = new (alloc()) LBigIntAsUintN(useRegisterAtStart(ins->bits()),
+ useRegisterAtStart(ins->input()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitGuardNonGCThing(MGuardNonGCThing* ins) {
+ MDefinition* input = ins->input();
+
+ auto* guard = new (alloc()) LGuardNonGCThing(useBox(input));
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, input);
+}
+
+void LIRGenerator::visitToHashableNonGCThing(MToHashableNonGCThing* ins) {
+ auto* lir =
+ new (alloc()) LToHashableNonGCThing(useBox(ins->input()), tempDouble());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitToHashableString(MToHashableString* ins) {
+ auto* lir = new (alloc()) LToHashableString(useRegister(ins->input()));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitToHashableValue(MToHashableValue* ins) {
+ auto* lir =
+ new (alloc()) LToHashableValue(useBox(ins->input()), tempDouble());
+ defineBox(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitHashNonGCThing(MHashNonGCThing* ins) {
+ auto* lir = new (alloc()) LHashNonGCThing(useBox(ins->input()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHashString(MHashString* ins) {
+ auto* lir = new (alloc()) LHashString(useRegister(ins->input()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHashSymbol(MHashSymbol* ins) {
+ auto* lir = new (alloc()) LHashSymbol(useRegister(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHashBigInt(MHashBigInt* ins) {
+ auto* lir = new (alloc())
+ LHashBigInt(useRegister(ins->input()), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHashObject(MHashObject* ins) {
+ auto* lir =
+ new (alloc()) LHashObject(useRegister(ins->set()), useBox(ins->input()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitHashValue(MHashValue* ins) {
+ auto* lir =
+ new (alloc()) LHashValue(useRegister(ins->set()), useBox(ins->input()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSetObjectHasNonBigInt(MSetObjectHasNonBigInt* ins) {
+ auto* lir = new (alloc())
+ LSetObjectHasNonBigInt(useRegister(ins->set()), useBox(ins->value()),
+ useRegister(ins->hash()), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSetObjectHasBigInt(MSetObjectHasBigInt* ins) {
+ auto* lir = new (alloc()) LSetObjectHasBigInt(
+ useRegister(ins->set()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSetObjectHasValue(MSetObjectHasValue* ins) {
+ auto* lir = new (alloc()) LSetObjectHasValue(
+ useRegister(ins->set()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSetObjectHasValueVMCall(MSetObjectHasValueVMCall* ins) {
+ auto* lir = new (alloc()) LSetObjectHasValueVMCall(
+ useRegisterAtStart(ins->set()), useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitSetObjectSize(MSetObjectSize* ins) {
+ auto* lir = new (alloc()) LSetObjectSize(useRegisterAtStart(ins->set()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectHasNonBigInt(MMapObjectHasNonBigInt* ins) {
+ auto* lir = new (alloc())
+ LMapObjectHasNonBigInt(useRegister(ins->map()), useBox(ins->value()),
+ useRegister(ins->hash()), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectHasBigInt(MMapObjectHasBigInt* ins) {
+ auto* lir = new (alloc()) LMapObjectHasBigInt(
+ useRegister(ins->map()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectHasValue(MMapObjectHasValue* ins) {
+ auto* lir = new (alloc()) LMapObjectHasValue(
+ useRegister(ins->map()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectHasValueVMCall(MMapObjectHasValueVMCall* ins) {
+ auto* lir = new (alloc()) LMapObjectHasValueVMCall(
+ useRegisterAtStart(ins->map()), useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectGetNonBigInt(MMapObjectGetNonBigInt* ins) {
+ auto* lir = new (alloc())
+ LMapObjectGetNonBigInt(useRegister(ins->map()), useBox(ins->value()),
+ useRegister(ins->hash()), temp(), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectGetBigInt(MMapObjectGetBigInt* ins) {
+ auto* lir = new (alloc()) LMapObjectGetBigInt(
+ useRegister(ins->map()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectGetValue(MMapObjectGetValue* ins) {
+ auto* lir = new (alloc()) LMapObjectGetValue(
+ useRegister(ins->map()), useBox(ins->value()), useRegister(ins->hash()),
+ temp(), temp(), temp(), temp());
+ defineBox(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectGetValueVMCall(MMapObjectGetValueVMCall* ins) {
+ auto* lir = new (alloc()) LMapObjectGetValueVMCall(
+ useRegisterAtStart(ins->map()), useBoxAtStart(ins->value()));
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitMapObjectSize(MMapObjectSize* ins) {
+ auto* lir = new (alloc()) LMapObjectSize(useRegisterAtStart(ins->map()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitConstant(MConstant* ins) {
+ if (!IsFloatingPointType(ins->type()) && ins->canEmitAtUses()) {
+ emitAtUses(ins);
+ return;
+ }
+
+ switch (ins->type()) {
+ case MIRType::Double:
+ define(new (alloc()) LDouble(ins->toDouble()), ins);
+ break;
+ case MIRType::Float32:
+ define(new (alloc()) LFloat32(ins->toFloat32()), ins);
+ break;
+ case MIRType::Boolean:
+ define(new (alloc()) LInteger(ins->toBoolean()), ins);
+ break;
+ case MIRType::Int32:
+ define(new (alloc()) LInteger(ins->toInt32()), ins);
+ break;
+ case MIRType::Int64:
+ defineInt64(new (alloc()) LInteger64(ins->toInt64()), ins);
+ break;
+ case MIRType::IntPtr:
+#ifdef JS_64BIT
+ defineInt64(new (alloc()) LInteger64(ins->toIntPtr()), ins);
+#else
+ define(new (alloc()) LInteger(ins->toIntPtr()), ins);
+#endif
+ break;
+ case MIRType::String:
+ define(new (alloc()) LPointer(ins->toString()), ins);
+ break;
+ case MIRType::Symbol:
+ define(new (alloc()) LPointer(ins->toSymbol()), ins);
+ break;
+ case MIRType::BigInt:
+ define(new (alloc()) LPointer(ins->toBigInt()), ins);
+ break;
+ case MIRType::Object:
+ define(new (alloc()) LPointer(&ins->toObject()), ins);
+ break;
+ case MIRType::Shape:
+ MOZ_ASSERT(ins->isEmittedAtUses());
+ break;
+ default:
+ // Constants of special types (undefined, null) should never flow into
+ // here directly. Operations blindly consuming them require a Box.
+ MOZ_CRASH("unexpected constant type");
+ }
+}
+
+void LIRGenerator::visitConstantProto(MConstantProto* ins) {
+ JSObject* obj = &ins->protoObject()->toConstant()->toObject();
+ define(new (alloc()) LPointer(obj), ins);
+}
+
+void LIRGenerator::visitWasmNullConstant(MWasmNullConstant* ins) {
+ define(new (alloc()) LWasmNullConstant(), ins);
+}
+
+void LIRGenerator::visitWasmFloatConstant(MWasmFloatConstant* ins) {
+ switch (ins->type()) {
+ case MIRType::Double:
+ define(new (alloc()) LDouble(ins->toDouble()), ins);
+ break;
+ case MIRType::Float32:
+ define(new (alloc()) LFloat32(ins->toFloat32()), ins);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ define(new (alloc()) LSimd128(ins->toSimd128()), ins);
+ break;
+#endif
+ default:
+ MOZ_CRASH("unexpected constant type");
+ }
+}
+
+#ifdef JS_JITSPEW
+static void SpewResumePoint(MBasicBlock* block, MInstruction* ins,
+ MResumePoint* resumePoint) {
+ Fprinter& out = JitSpewPrinter();
+ out.printf("Current resume point %p details:\n", (void*)resumePoint);
+ out.printf(" frame count: %u\n", resumePoint->frameCount());
+
+ if (ins) {
+ out.printf(" taken after: ");
+ ins->printName(out);
+ } else {
+ out.printf(" taken at block %u entry", block->id());
+ }
+ out.printf("\n");
+
+ out.printf(" pc: %p (script: %p, offset: %d)\n", (void*)resumePoint->pc(),
+ (void*)resumePoint->block()->info().script(),
+ int(resumePoint->block()->info().script()->pcToOffset(
+ resumePoint->pc())));
+
+ for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++) {
+ MDefinition* in = resumePoint->getOperand(i);
+ out.printf(" slot%u: ", (unsigned)i);
+ in->printName(out);
+ out.printf("\n");
+ }
+}
+#endif
+
+void LIRGenerator::visitInstructionDispatch(MInstruction* ins) {
+#ifdef JS_CODEGEN_NONE
+ // Don't compile the switch-statement below so that we don't have to define
+ // the platform-specific visit* methods for the none-backend.
+ MOZ_CRASH();
+#else
+ switch (ins->op()) {
+# define MIR_OP(op) \
+ case MDefinition::Opcode::op: \
+ visit##op(ins->to##op()); \
+ break;
+ MIR_OPCODE_LIST(MIR_OP)
+# undef MIR_OP
+ default:
+ MOZ_CRASH("Invalid instruction");
+ }
+#endif
+}
+
+void LIRGeneratorShared::visitEmittedAtUses(MInstruction* ins) {
+ static_cast<LIRGenerator*>(this)->visitInstructionDispatch(ins);
+}
+
+bool LIRGenerator::visitInstruction(MInstruction* ins) {
+ MOZ_ASSERT(!errored());
+
+ if (ins->isRecoveredOnBailout()) {
+ MOZ_ASSERT(!JitOptions.disableRecoverIns);
+ return true;
+ }
+
+ if (!gen->ensureBallast()) {
+ return false;
+ }
+ visitInstructionDispatch(ins);
+
+ if (ins->resumePoint()) {
+ updateResumeState(ins);
+ }
+
+#ifdef DEBUG
+ ins->setInWorklistUnchecked();
+#endif
+
+ // If no safepoint was created, there's no need for an OSI point.
+ if (LOsiPoint* osiPoint = popOsiPoint()) {
+ add(osiPoint);
+ }
+
+ return !errored();
+}
+
+bool LIRGenerator::definePhis() {
+ size_t lirIndex = 0;
+ MBasicBlock* block = current->mir();
+ for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
+ if (phi->type() == MIRType::Value) {
+ defineUntypedPhi(*phi, lirIndex);
+ lirIndex += BOX_PIECES;
+ } else if (phi->type() == MIRType::Int64) {
+ defineInt64Phi(*phi, lirIndex);
+ lirIndex += INT64_PIECES;
+ } else {
+ defineTypedPhi(*phi, lirIndex);
+ lirIndex += 1;
+ }
+ }
+ return !errored();
+}
+
+void LIRGenerator::updateResumeState(MInstruction* ins) {
+ lastResumePoint_ = ins->resumePoint();
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonSnapshots) && lastResumePoint_) {
+ SpewResumePoint(nullptr, ins, lastResumePoint_);
+ }
+#endif
+}
+
+void LIRGenerator::updateResumeState(MBasicBlock* block) {
+ // Note: RangeAnalysis can flag blocks as unreachable, but they are only
+ // removed iff GVN (including UCE) is enabled.
+ MOZ_ASSERT_IF(!mir()->compilingWasm() && !block->unreachable(),
+ block->entryResumePoint());
+ MOZ_ASSERT_IF(block->unreachable(), !mir()->optimizationInfo().gvnEnabled());
+ lastResumePoint_ = block->entryResumePoint();
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonSnapshots) && lastResumePoint_) {
+ SpewResumePoint(block, nullptr, lastResumePoint_);
+ }
+#endif
+}
+
+bool LIRGenerator::visitBlock(MBasicBlock* block) {
+ current = block->lir();
+ updateResumeState(block);
+
+ if (!definePhis()) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(block->unreachable(), !mir()->optimizationInfo().gvnEnabled());
+ for (MInstructionIterator iter = block->begin(); *iter != block->lastIns();
+ iter++) {
+ if (!visitInstruction(*iter)) {
+ return false;
+ }
+ }
+
+ if (block->successorWithPhis()) {
+ // If we have a successor with phis, lower the phi input now that we
+ // are approaching the join point.
+ MBasicBlock* successor = block->successorWithPhis();
+ uint32_t position = block->positionInPhiSuccessor();
+ size_t lirIndex = 0;
+ for (MPhiIterator phi(successor->phisBegin()); phi != successor->phisEnd();
+ phi++) {
+ if (!gen->ensureBallast()) {
+ return false;
+ }
+
+ MDefinition* opd = phi->getOperand(position);
+ ensureDefined(opd);
+
+ MOZ_ASSERT(opd->type() == phi->type());
+
+ if (phi->type() == MIRType::Value) {
+ lowerUntypedPhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += BOX_PIECES;
+ } else if (phi->type() == MIRType::Int64) {
+ lowerInt64PhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += INT64_PIECES;
+ } else {
+ lowerTypedPhiInput(*phi, position, successor->lir(), lirIndex);
+ lirIndex += 1;
+ }
+ }
+ }
+
+ // Now emit the last instruction, which is some form of branch.
+ if (!visitInstruction(block->lastIns())) {
+ return false;
+ }
+
+ return true;
+}
+
+void LIRGenerator::visitNaNToZero(MNaNToZero* ins) {
+ MDefinition* input = ins->input();
+
+ if (ins->operandIsNeverNaN() && ins->operandIsNeverNegativeZero()) {
+ redefine(ins, input);
+ return;
+ }
+ LNaNToZero* lir =
+ new (alloc()) LNaNToZero(useRegisterAtStart(input), tempDouble());
+ defineReuseInput(lir, ins, 0);
+}
+
+bool LIRGenerator::generate() {
+ // Create all blocks and prep all phis beforehand.
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ if (gen->shouldCancel("Lowering (preparation loop)")) {
+ return false;
+ }
+
+ if (!lirGraph_.initBlock(*block)) {
+ return false;
+ }
+ }
+
+ for (ReversePostorderIterator block(graph.rpoBegin());
+ block != graph.rpoEnd(); block++) {
+ if (gen->shouldCancel("Lowering (main loop)")) {
+ return false;
+ }
+
+ if (!visitBlock(*block)) {
+ return false;
+ }
+ }
+
+ lirGraph_.setArgumentSlotCount(maxargslots_);
+ return true;
+}
+
+void LIRGenerator::visitPhi(MPhi* phi) {
+ // Phi nodes are not lowered because they are only meaningful for the register
+ // allocator.
+ MOZ_CRASH("Unexpected Phi node during Lowering.");
+}
+
+void LIRGenerator::visitBeta(MBeta* beta) {
+ // Beta nodes are supposed to be removed before because they are
+ // only used to carry the range information for Range analysis
+ MOZ_CRASH("Unexpected Beta node during Lowering.");
+}
+
+void LIRGenerator::visitObjectState(MObjectState* objState) {
+ // ObjectState nodes are always recovered on bailouts
+ MOZ_CRASH("Unexpected ObjectState node during Lowering.");
+}
+
+void LIRGenerator::visitArrayState(MArrayState* objState) {
+ // ArrayState nodes are always recovered on bailouts
+ MOZ_CRASH("Unexpected ArrayState node during Lowering.");
+}
+
+void LIRGenerator::visitIonToWasmCall(MIonToWasmCall* ins) {
+ // The instruction needs a temp register:
+ // - that's not the FramePointer, since wasm is going to use it in the
+ // function.
+ // - that's not aliasing an input register.
+ LDefinition scratch = tempFixed(ABINonArgReg0);
+
+ // Note that since this is a LIR call instruction, regalloc will prevent
+ // the use*AtStart below from reusing any of the temporaries.
+
+ LInstruction* lir;
+ if (ins->type() == MIRType::Value) {
+ lir = allocateVariadic<LIonToWasmCallV>(ins->numOperands(), scratch);
+ } else if (ins->type() == MIRType::Int64) {
+ lir = allocateVariadic<LIonToWasmCallI64>(ins->numOperands(), scratch);
+ } else {
+ lir = allocateVariadic<LIonToWasmCall>(ins->numOperands(), scratch);
+ }
+ if (!lir) {
+ abort(AbortReason::Alloc, "OOM: LIRGenerator::visitIonToWasmCall");
+ return;
+ }
+
+ ABIArgGenerator abi;
+ for (unsigned i = 0; i < ins->numOperands(); i++) {
+ MDefinition* argDef = ins->getOperand(i);
+ ABIArg arg = abi.next(ToMIRType(argDef->type()));
+ switch (arg.kind()) {
+ case ABIArg::GPR:
+ case ABIArg::FPU:
+ lir->setOperand(i, useFixedAtStart(argDef, arg.reg()));
+ break;
+ case ABIArg::Stack:
+ lir->setOperand(i, useAtStart(argDef));
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ MOZ_CRASH(
+ "no way to pass i64, and wasm uses hardfp for function calls");
+#endif
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+
+ defineReturn(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmSelect(MWasmSelect* ins) {
+ MDefinition* condExpr = ins->condExpr();
+
+ // Pick off specific cases that we can do with LWasmCompareAndSelect to avoid
+ // generating a boolean that we then have to test again.
+ if (condExpr->isCompare() && condExpr->isEmittedAtUses()) {
+ MCompare* comp = condExpr->toCompare();
+ MCompare::CompareType compTy = comp->compareType();
+ if (canSpecializeWasmCompareAndSelect(compTy, ins->type())) {
+ JSOp jsop = comp->jsop();
+ // We don't currently generate any other JSOPs for the comparison, and if
+ // that changes, we want to know about it. Hence this assertion.
+ MOZ_ASSERT(jsop == JSOp::Eq || jsop == JSOp::Ne || jsop == JSOp::Lt ||
+ jsop == JSOp::Gt || jsop == JSOp::Le || jsop == JSOp::Ge);
+ MDefinition* lhs = comp->lhs();
+ MDefinition* rhs = comp->rhs();
+ jsop = ReorderComparison(jsop, &lhs, &rhs);
+ lowerWasmCompareAndSelect(ins, lhs, rhs, compTy, jsop);
+ return;
+ }
+ }
+ // Fall through to code that generates a boolean and selects on that.
+
+ if (ins->type() == MIRType::Int64) {
+ lowerWasmSelectI64(ins);
+ return;
+ }
+
+ lowerWasmSelectI(ins);
+}
+
+void LIRGenerator::visitWasmFence(MWasmFence* ins) {
+ add(new (alloc()) LWasmFence, ins);
+}
+
+void LIRGenerator::visitWasmLoadField(MWasmLoadField* ins) {
+ uint32_t offs = ins->offset();
+ LAllocation obj = useRegister(ins->obj());
+ MWideningOp wideningOp = ins->wideningOp();
+ if (ins->type() == MIRType::Int64) {
+ MOZ_RELEASE_ASSERT(wideningOp == MWideningOp::None);
+ defineInt64(new (alloc()) LWasmLoadSlotI64(obj, offs, ins->maybeTrap()),
+ ins);
+ } else {
+ define(new (alloc()) LWasmLoadSlot(obj, offs, ins->type(), wideningOp,
+ ins->maybeTrap()),
+ ins);
+ }
+}
+
+void LIRGenerator::visitWasmLoadFieldKA(MWasmLoadFieldKA* ins) {
+ uint32_t offs = ins->offset();
+ LAllocation obj = useRegister(ins->obj());
+ MWideningOp wideningOp = ins->wideningOp();
+ if (ins->type() == MIRType::Int64) {
+ MOZ_RELEASE_ASSERT(wideningOp == MWideningOp::None);
+ defineInt64(new (alloc()) LWasmLoadSlotI64(obj, offs, ins->maybeTrap()),
+ ins);
+ } else {
+ define(new (alloc()) LWasmLoadSlot(obj, offs, ins->type(), wideningOp,
+ ins->maybeTrap()),
+ ins);
+ }
+ add(new (alloc()) LKeepAliveObject(useKeepalive(ins->ka())), ins);
+}
+
+void LIRGenerator::visitWasmStoreFieldKA(MWasmStoreFieldKA* ins) {
+ MDefinition* value = ins->value();
+ uint32_t offs = ins->offset();
+ MNarrowingOp narrowingOp = ins->narrowingOp();
+ LAllocation obj = useRegister(ins->obj());
+ LInstruction* lir;
+ if (value->type() == MIRType::Int64) {
+ MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
+ lir = new (alloc())
+ LWasmStoreSlotI64(useInt64Register(value), obj, offs, ins->maybeTrap());
+ } else {
+ lir = new (alloc())
+ LWasmStoreSlot(useRegister(value), obj, offs, value->type(),
+ narrowingOp, ins->maybeTrap());
+ }
+ add(lir, ins);
+ add(new (alloc()) LKeepAliveObject(useKeepalive(ins->ka())), ins);
+}
+
+void LIRGenerator::visitWasmStoreFieldRefKA(MWasmStoreFieldRefKA* ins) {
+ LAllocation instance = useRegister(ins->instance());
+ LAllocation obj = useFixed(ins->obj(), PreBarrierReg);
+ LAllocation value = useRegister(ins->value());
+ uint32_t offset = ins->offset();
+ add(new (alloc()) LWasmStoreRef(instance, obj, value, temp(), offset,
+ ins->maybeTrap(), ins->preBarrierKind()),
+ ins);
+ add(new (alloc()) LKeepAliveObject(useKeepalive(ins->ka())), ins);
+}
+
+void LIRGenerator::visitWasmGcObjectIsSubtypeOfAbstract(
+ MWasmGcObjectIsSubtypeOfAbstract* ins) {
+ if (CanEmitAtUseForSingleTest(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ // See comment on MacroAssembler::branchWasmGcObjectIsRefType.
+ // We know we do not need scratch2 and superSuperTypeVector because we know
+ // this is not a concrete type.
+ MOZ_ASSERT(
+ !MacroAssembler::needScratch2ForBranchWasmGcRefType(ins->destType()));
+ MOZ_ASSERT(!MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
+ ins->destType()));
+
+ LAllocation object = useRegister(ins->object());
+ LDefinition scratch1 =
+ MacroAssembler::needScratch1ForBranchWasmGcRefType(ins->destType())
+ ? temp()
+ : LDefinition();
+ define(new (alloc()) LWasmGcObjectIsSubtypeOfAbstract(object, scratch1), ins);
+}
+
+void LIRGenerator::visitWasmGcObjectIsSubtypeOfConcrete(
+ MWasmGcObjectIsSubtypeOfConcrete* ins) {
+ if (CanEmitAtUseForSingleTest(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ // See comment on MacroAssembler::branchWasmGcObjectIsRefType.
+ // We know we need scratch1 and superSuperTypeVector because we know this is a
+ // concrete type.
+ MOZ_ASSERT(MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
+ ins->destType()));
+ MOZ_ASSERT(
+ MacroAssembler::needScratch1ForBranchWasmGcRefType(ins->destType()));
+
+ LAllocation object = useRegister(ins->object());
+ LAllocation superSuperTypeVector = useRegister(ins->superSuperTypeVector());
+ LDefinition scratch1 = temp();
+ LDefinition scratch2 =
+ MacroAssembler::needScratch2ForBranchWasmGcRefType(ins->destType())
+ ? temp()
+ : LDefinition();
+ define(new (alloc()) LWasmGcObjectIsSubtypeOfConcrete(
+ object, superSuperTypeVector, scratch1, scratch2),
+ ins);
+}
+
+#ifdef FUZZING_JS_FUZZILLI
+void LIRGenerator::visitFuzzilliHash(MFuzzilliHash* ins) {
+ MDefinition* value = ins->getOperand(0);
+
+ if (value->type() == MIRType::Undefined || value->type() == MIRType::Null) {
+ define(new (alloc()) LFuzzilliHashT(LAllocation(), temp(), tempDouble()),
+ ins);
+ } else if (value->type() == MIRType::Int32 ||
+ value->type() == MIRType::Double ||
+ value->type() == MIRType::Float32 ||
+ value->type() == MIRType::Boolean ||
+ value->type() == MIRType::BigInt) {
+ define(new (alloc())
+ LFuzzilliHashT(useRegister(value), temp(), tempDouble()),
+ ins);
+ } else if (value->type() == MIRType::Object) {
+ LFuzzilliHashT* lir =
+ new (alloc()) LFuzzilliHashT(useRegister(value), temp(), tempDouble());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else if (value->type() == MIRType::Value) {
+ LFuzzilliHashV* lir =
+ new (alloc()) LFuzzilliHashV(useBox(value), temp(), tempDouble());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ } else {
+ define(new (alloc()) LInteger(0), ins);
+ }
+}
+
+void LIRGenerator::visitFuzzilliHashStore(MFuzzilliHashStore* ins) {
+ MDefinition* value = ins->getOperand(0);
+ MOZ_ASSERT(value->type() == MIRType::Int32);
+ add(new (alloc()) LFuzzilliHashStore(useRegister(value), temp(), temp()),
+ ins);
+}
+#endif
+
+static_assert(!std::is_polymorphic_v<LIRGenerator>,
+ "LIRGenerator should not have any virtual methods");
+
+#ifdef JS_CODEGEN_NONE
+void LIRGenerator::visitReturnImpl(MDefinition*, bool) { MOZ_CRASH(); }
+#endif
diff --git a/js/src/jit/Lowering.h b/js/src/jit/Lowering.h
new file mode 100644
index 0000000000..4031424c37
--- /dev/null
+++ b/js/src/jit/Lowering.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Lowering_h
+#define jit_Lowering_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/Lowering-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/Lowering-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/Lowering-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Lowering-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Lowering-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Lowering-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/Lowering-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/Lowering-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/Lowering-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/Lowering-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+class LIRGenerator final : public LIRGeneratorSpecific {
+ void updateResumeState(MInstruction* ins);
+ void updateResumeState(MBasicBlock* block);
+
+ // The maximum depth, for framesizeclass determination.
+ uint32_t maxargslots_;
+
+ public:
+ LIRGenerator(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorSpecific(gen, graph, lirGraph), maxargslots_(0) {}
+
+ [[nodiscard]] bool generate();
+
+ private:
+ LBoxAllocation useBoxFixedAtStart(MDefinition* mir, Register reg1,
+ Register reg2) {
+ return useBoxFixed(mir, reg1, reg2, /* useAtStart = */ true);
+ }
+
+ LBoxAllocation useBoxFixedAtStart(MDefinition* mir, ValueOperand op);
+ LBoxAllocation useBoxAtStart(MDefinition* mir,
+ LUse::Policy policy = LUse::REGISTER);
+
+ void lowerBitOp(JSOp op, MBinaryInstruction* ins);
+ void lowerShiftOp(JSOp op, MShiftInstruction* ins);
+ LInstructionHelper<1, 1, 0>* allocateAbs(MAbs* ins, LAllocation input);
+ bool definePhis();
+
+ template <typename T>
+ [[nodiscard]] bool lowerCallArguments(T* call);
+
+ friend class LIRGeneratorShared;
+ void visitInstructionDispatch(MInstruction* ins);
+
+ void visitReturnImpl(MDefinition* def, bool isGenerator = false);
+
+ [[nodiscard]] bool visitInstruction(MInstruction* ins);
+ [[nodiscard]] bool visitBlock(MBasicBlock* block);
+
+#define MIR_OP(op) void visit##op(M##op* ins);
+ MIR_OPCODE_LIST(MIR_OP)
+#undef MIR_OP
+
+ template <class MWasmCallT>
+ void visitWasmCall(MWasmCallT ins);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Lowering_h */
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
new file mode 100644
index 0000000000..a4b2191a82
--- /dev/null
+++ b/js/src/jit/MIR.cpp
@@ -0,0 +1,7261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MIR.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+
+#include <array>
+#include <utility>
+
+#include "jslibmath.h"
+#include "jsmath.h"
+#include "jsnum.h"
+
+#include "builtin/RegExp.h"
+#include "jit/AtomicOperations.h"
+#include "jit/CompileInfo.h"
+#include "jit/KnownClass.h"
+#include "jit/MIRGraph.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/VMFunctions.h"
+#include "jit/WarpBuilderShared.h"
+#include "js/Conversions.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo, JSTypedMethodJitInfo
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/Text.h"
+#include "util/Unicode.h"
+#include "vm/Iteration.h" // js::NativeIterator
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Uint8Clamped.h"
+#include "wasm/WasmCode.h"
+
+#include "vm/JSAtom-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::ToInt32;
+
+using mozilla::CheckedInt;
+using mozilla::DebugOnly;
+using mozilla::IsFloat32Representable;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::NumbersAreIdentical;
+
+NON_GC_POINTER_TYPE_ASSERTIONS_GENERATED
+
+#ifdef DEBUG
+size_t MUse::index() const { return consumer()->indexOf(this); }
+#endif
+
+template <size_t Op>
+static void ConvertDefinitionToDouble(TempAllocator& alloc, MDefinition* def,
+ MInstruction* consumer) {
+ MInstruction* replace = MToDouble::New(alloc, def);
+ consumer->replaceOperand(Op, replace);
+ consumer->block()->insertBefore(consumer, replace);
+}
+
+template <size_t Arity, size_t Index>
+static void ConvertOperandToDouble(MAryInstruction<Arity>* def,
+ TempAllocator& alloc) {
+ static_assert(Index < Arity);
+ auto* operand = def->getOperand(Index);
+ if (operand->type() == MIRType::Float32) {
+ ConvertDefinitionToDouble<Index>(alloc, operand, def);
+ }
+}
+
+template <size_t Arity, size_t... ISeq>
+static void ConvertOperandsToDouble(MAryInstruction<Arity>* def,
+ TempAllocator& alloc,
+ std::index_sequence<ISeq...>) {
+ (ConvertOperandToDouble<Arity, ISeq>(def, alloc), ...);
+}
+
+template <size_t Arity>
+static void ConvertOperandsToDouble(MAryInstruction<Arity>* def,
+ TempAllocator& alloc) {
+ ConvertOperandsToDouble<Arity>(def, alloc, std::make_index_sequence<Arity>{});
+}
+
+template <size_t Arity, size_t... ISeq>
+static bool AllOperandsCanProduceFloat32(MAryInstruction<Arity>* def,
+ std::index_sequence<ISeq...>) {
+ return (def->getOperand(ISeq)->canProduceFloat32() && ...);
+}
+
+template <size_t Arity>
+static bool AllOperandsCanProduceFloat32(MAryInstruction<Arity>* def) {
+ return AllOperandsCanProduceFloat32<Arity>(def,
+ std::make_index_sequence<Arity>{});
+}
+
+static bool CheckUsesAreFloat32Consumers(const MInstruction* ins) {
+ if (ins->isImplicitlyUsed()) {
+ return false;
+ }
+ bool allConsumerUses = true;
+ for (MUseDefIterator use(ins); allConsumerUses && use; use++) {
+ allConsumerUses &= use.def()->canConsumeFloat32(use.use());
+ }
+ return allConsumerUses;
+}
+
+#ifdef JS_JITSPEW
+static const char* OpcodeName(MDefinition::Opcode op) {
+ static const char* const names[] = {
+# define NAME(x) #x,
+ MIR_OPCODE_LIST(NAME)
+# undef NAME
+ };
+ return names[unsigned(op)];
+}
+
+void MDefinition::PrintOpcodeName(GenericPrinter& out, Opcode op) {
+ const char* name = OpcodeName(op);
+ size_t len = strlen(name);
+ for (size_t i = 0; i < len; i++) {
+ out.printf("%c", unicode::ToLowerCase(name[i]));
+ }
+}
+
+uint32_t js::jit::GetMBasicBlockId(const MBasicBlock* block) {
+ return block->id();
+}
+#endif
+
+static MConstant* EvaluateInt64ConstantOperands(TempAllocator& alloc,
+ MBinaryInstruction* ins) {
+ MDefinition* left = ins->getOperand(0);
+ MDefinition* right = ins->getOperand(1);
+
+ if (!left->isConstant() || !right->isConstant()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(left->type() == MIRType::Int64);
+ MOZ_ASSERT(right->type() == MIRType::Int64);
+
+ int64_t lhs = left->toConstant()->toInt64();
+ int64_t rhs = right->toConstant()->toInt64();
+ int64_t ret;
+
+ switch (ins->op()) {
+ case MDefinition::Opcode::BitAnd:
+ ret = lhs & rhs;
+ break;
+ case MDefinition::Opcode::BitOr:
+ ret = lhs | rhs;
+ break;
+ case MDefinition::Opcode::BitXor:
+ ret = lhs ^ rhs;
+ break;
+ case MDefinition::Opcode::Lsh:
+ ret = lhs << (rhs & 0x3F);
+ break;
+ case MDefinition::Opcode::Rsh:
+ ret = lhs >> (rhs & 0x3F);
+ break;
+ case MDefinition::Opcode::Ursh:
+ ret = uint64_t(lhs) >> (uint64_t(rhs) & 0x3F);
+ break;
+ case MDefinition::Opcode::Add:
+ ret = lhs + rhs;
+ break;
+ case MDefinition::Opcode::Sub:
+ ret = lhs - rhs;
+ break;
+ case MDefinition::Opcode::Mul:
+ ret = lhs * rhs;
+ break;
+ case MDefinition::Opcode::Div:
+ if (rhs == 0) {
+ // Division by zero will trap at runtime.
+ return nullptr;
+ }
+ if (ins->toDiv()->isUnsigned()) {
+ ret = int64_t(uint64_t(lhs) / uint64_t(rhs));
+ } else if (lhs == INT64_MIN || rhs == -1) {
+ // Overflow will trap at runtime.
+ return nullptr;
+ } else {
+ ret = lhs / rhs;
+ }
+ break;
+ case MDefinition::Opcode::Mod:
+ if (rhs == 0) {
+ // Division by zero will trap at runtime.
+ return nullptr;
+ }
+ if (!ins->toMod()->isUnsigned() && (lhs < 0 || rhs < 0)) {
+ // Handle all negative values at runtime, for simplicity.
+ return nullptr;
+ }
+ ret = int64_t(uint64_t(lhs) % uint64_t(rhs));
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+
+ return MConstant::NewInt64(alloc, ret);
+}
+
+static MConstant* EvaluateConstantOperands(TempAllocator& alloc,
+ MBinaryInstruction* ins,
+ bool* ptypeChange = nullptr) {
+ MDefinition* left = ins->getOperand(0);
+ MDefinition* right = ins->getOperand(1);
+
+ MOZ_ASSERT(IsTypeRepresentableAsDouble(left->type()));
+ MOZ_ASSERT(IsTypeRepresentableAsDouble(right->type()));
+
+ if (!left->isConstant() || !right->isConstant()) {
+ return nullptr;
+ }
+
+ MConstant* lhs = left->toConstant();
+ MConstant* rhs = right->toConstant();
+ double ret = JS::GenericNaN();
+
+ switch (ins->op()) {
+ case MDefinition::Opcode::BitAnd:
+ ret = double(lhs->toInt32() & rhs->toInt32());
+ break;
+ case MDefinition::Opcode::BitOr:
+ ret = double(lhs->toInt32() | rhs->toInt32());
+ break;
+ case MDefinition::Opcode::BitXor:
+ ret = double(lhs->toInt32() ^ rhs->toInt32());
+ break;
+ case MDefinition::Opcode::Lsh:
+ ret = double(uint32_t(lhs->toInt32()) << (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Opcode::Rsh:
+ ret = double(lhs->toInt32() >> (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Opcode::Ursh:
+ ret = double(uint32_t(lhs->toInt32()) >> (rhs->toInt32() & 0x1F));
+ break;
+ case MDefinition::Opcode::Add:
+ ret = lhs->numberToDouble() + rhs->numberToDouble();
+ break;
+ case MDefinition::Opcode::Sub:
+ ret = lhs->numberToDouble() - rhs->numberToDouble();
+ break;
+ case MDefinition::Opcode::Mul:
+ ret = lhs->numberToDouble() * rhs->numberToDouble();
+ break;
+ case MDefinition::Opcode::Div:
+ if (ins->toDiv()->isUnsigned()) {
+ if (rhs->isInt32(0)) {
+ if (ins->toDiv()->trapOnError()) {
+ return nullptr;
+ }
+ ret = 0.0;
+ } else {
+ ret = double(uint32_t(lhs->toInt32()) / uint32_t(rhs->toInt32()));
+ }
+ } else {
+ ret = NumberDiv(lhs->numberToDouble(), rhs->numberToDouble());
+ }
+ break;
+ case MDefinition::Opcode::Mod:
+ if (ins->toMod()->isUnsigned()) {
+ if (rhs->isInt32(0)) {
+ if (ins->toMod()->trapOnError()) {
+ return nullptr;
+ }
+ ret = 0.0;
+ } else {
+ ret = double(uint32_t(lhs->toInt32()) % uint32_t(rhs->toInt32()));
+ }
+ } else {
+ ret = NumberMod(lhs->numberToDouble(), rhs->numberToDouble());
+ }
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+
+ if (ins->type() == MIRType::Float32) {
+ return MConstant::NewFloat32(alloc, float(ret));
+ }
+ if (ins->type() == MIRType::Double) {
+ return MConstant::New(alloc, DoubleValue(ret));
+ }
+
+ Value retVal;
+ retVal.setNumber(JS::CanonicalizeNaN(ret));
+
+ // If this was an int32 operation but the result isn't an int32 (for
+ // example, a division where the numerator isn't evenly divisible by the
+ // denominator), decline folding.
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+ if (!retVal.isInt32()) {
+ if (ptypeChange) {
+ *ptypeChange = true;
+ }
+ return nullptr;
+ }
+
+ return MConstant::New(alloc, retVal);
+}
+
+static MMul* EvaluateExactReciprocal(TempAllocator& alloc, MDiv* ins) {
+ // we should fold only when it is a floating point operation
+ if (!IsFloatingPointType(ins->type())) {
+ return nullptr;
+ }
+
+ MDefinition* left = ins->getOperand(0);
+ MDefinition* right = ins->getOperand(1);
+
+ if (!right->isConstant()) {
+ return nullptr;
+ }
+
+ int32_t num;
+ if (!mozilla::NumberIsInt32(right->toConstant()->numberToDouble(), &num)) {
+ return nullptr;
+ }
+
+ // check if rhs is a power of two
+ if (mozilla::Abs(num) & (mozilla::Abs(num) - 1)) {
+ return nullptr;
+ }
+
+ Value ret;
+ ret.setDouble(1.0 / double(num));
+
+ MConstant* foldedRhs;
+ if (ins->type() == MIRType::Float32) {
+ foldedRhs = MConstant::NewFloat32(alloc, ret.toDouble());
+ } else {
+ foldedRhs = MConstant::New(alloc, ret);
+ }
+
+ MOZ_ASSERT(foldedRhs->type() == ins->type());
+ ins->block()->insertBefore(ins, foldedRhs);
+
+ MMul* mul = MMul::New(alloc, left, foldedRhs, ins->type());
+ mul->setMustPreserveNaN(ins->mustPreserveNaN());
+ return mul;
+}
+
+#ifdef JS_JITSPEW
+const char* MDefinition::opName() const { return OpcodeName(op()); }
+
+void MDefinition::printName(GenericPrinter& out) const {
+ PrintOpcodeName(out, op());
+ out.printf("%u", id());
+}
+#endif
+
+HashNumber MDefinition::valueHash() const {
+ HashNumber out = HashNumber(op());
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ out = addU32ToHash(out, getOperand(i)->id());
+ }
+ if (MDefinition* dep = dependency()) {
+ out = addU32ToHash(out, dep->id());
+ }
+ return out;
+}
+
+HashNumber MNullaryInstruction::valueHash() const {
+ HashNumber hash = HashNumber(op());
+ if (MDefinition* dep = dependency()) {
+ hash = addU32ToHash(hash, dep->id());
+ }
+ MOZ_ASSERT(hash == MDefinition::valueHash());
+ return hash;
+}
+
+HashNumber MUnaryInstruction::valueHash() const {
+ HashNumber hash = HashNumber(op());
+ hash = addU32ToHash(hash, getOperand(0)->id());
+ if (MDefinition* dep = dependency()) {
+ hash = addU32ToHash(hash, dep->id());
+ }
+ MOZ_ASSERT(hash == MDefinition::valueHash());
+ return hash;
+}
+
+HashNumber MBinaryInstruction::valueHash() const {
+ HashNumber hash = HashNumber(op());
+ hash = addU32ToHash(hash, getOperand(0)->id());
+ hash = addU32ToHash(hash, getOperand(1)->id());
+ if (MDefinition* dep = dependency()) {
+ hash = addU32ToHash(hash, dep->id());
+ }
+ MOZ_ASSERT(hash == MDefinition::valueHash());
+ return hash;
+}
+
+HashNumber MTernaryInstruction::valueHash() const {
+ HashNumber hash = HashNumber(op());
+ hash = addU32ToHash(hash, getOperand(0)->id());
+ hash = addU32ToHash(hash, getOperand(1)->id());
+ hash = addU32ToHash(hash, getOperand(2)->id());
+ if (MDefinition* dep = dependency()) {
+ hash = addU32ToHash(hash, dep->id());
+ }
+ MOZ_ASSERT(hash == MDefinition::valueHash());
+ return hash;
+}
+
+HashNumber MQuaternaryInstruction::valueHash() const {
+ HashNumber hash = HashNumber(op());
+ hash = addU32ToHash(hash, getOperand(0)->id());
+ hash = addU32ToHash(hash, getOperand(1)->id());
+ hash = addU32ToHash(hash, getOperand(2)->id());
+ hash = addU32ToHash(hash, getOperand(3)->id());
+ if (MDefinition* dep = dependency()) {
+ hash = addU32ToHash(hash, dep->id());
+ }
+ MOZ_ASSERT(hash == MDefinition::valueHash());
+ return hash;
+}
+
+const MDefinition* MDefinition::skipObjectGuards() const {
+ const MDefinition* result = this;
+ // These instructions don't modify the object and just guard specific
+ // properties.
+ while (true) {
+ if (result->isGuardShape()) {
+ result = result->toGuardShape()->object();
+ continue;
+ }
+ if (result->isGuardNullProto()) {
+ result = result->toGuardNullProto()->object();
+ continue;
+ }
+ if (result->isGuardProto()) {
+ result = result->toGuardProto()->object();
+ continue;
+ }
+
+ break;
+ }
+
+ return result;
+}
+
+bool MDefinition::congruentIfOperandsEqual(const MDefinition* ins) const {
+ if (op() != ins->op()) {
+ return false;
+ }
+
+ if (type() != ins->type()) {
+ return false;
+ }
+
+ if (isEffectful() || ins->isEffectful()) {
+ return false;
+ }
+
+ if (numOperands() != ins->numOperands()) {
+ return false;
+ }
+
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (getOperand(i) != ins->getOperand(i)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+MDefinition* MDefinition::foldsTo(TempAllocator& alloc) {
+ // In the default case, there are no constants to fold.
+ return this;
+}
+
+bool MDefinition::mightBeMagicType() const {
+ if (IsMagicType(type())) {
+ return true;
+ }
+
+ if (MIRType::Value != type()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool MDefinition::definitelyType(std::initializer_list<MIRType> types) const {
+#ifdef DEBUG
+ // Only support specialized, non-magic types.
+ auto isSpecializedNonMagic = [](MIRType type) {
+ return type <= MIRType::Object;
+ };
+#endif
+
+ MOZ_ASSERT(types.size() > 0);
+ MOZ_ASSERT(std::all_of(types.begin(), types.end(), isSpecializedNonMagic));
+
+ if (type() == MIRType::Value) {
+ return false;
+ }
+
+ return std::find(types.begin(), types.end(), type()) != types.end();
+}
+
+MDefinition* MInstruction::foldsToStore(TempAllocator& alloc) {
+ if (!dependency()) {
+ return nullptr;
+ }
+
+ MDefinition* store = dependency();
+ if (mightAlias(store) != AliasType::MustAlias) {
+ return nullptr;
+ }
+
+ if (!store->block()->dominates(block())) {
+ return nullptr;
+ }
+
+ MDefinition* value;
+ switch (store->op()) {
+ case Opcode::StoreFixedSlot:
+ value = store->toStoreFixedSlot()->value();
+ break;
+ case Opcode::StoreDynamicSlot:
+ value = store->toStoreDynamicSlot()->value();
+ break;
+ case Opcode::StoreElement:
+ value = store->toStoreElement()->value();
+ break;
+ default:
+ MOZ_CRASH("unknown store");
+ }
+
+ // If the type are matching then we return the value which is used as
+ // argument of the store.
+ if (value->type() != type()) {
+ // If we expect to read a type which is more generic than the type seen
+ // by the store, then we box the value used by the store.
+ if (type() != MIRType::Value) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(value->type() < MIRType::Value);
+ MBox* box = MBox::New(alloc, value);
+ value = box;
+ }
+
+ return value;
+}
+
+void MDefinition::analyzeEdgeCasesForward() {}
+
+void MDefinition::analyzeEdgeCasesBackward() {}
+
+void MInstruction::setResumePoint(MResumePoint* resumePoint) {
+ MOZ_ASSERT(!resumePoint_);
+ resumePoint_ = resumePoint;
+ resumePoint_->setInstruction(this);
+}
+
+void MInstruction::stealResumePoint(MInstruction* other) {
+ MResumePoint* resumePoint = other->resumePoint_;
+ other->resumePoint_ = nullptr;
+
+ resumePoint->resetInstruction();
+ setResumePoint(resumePoint);
+}
+
+void MInstruction::moveResumePointAsEntry() {
+ MOZ_ASSERT(isNop());
+ block()->clearEntryResumePoint();
+ block()->setEntryResumePoint(resumePoint_);
+ resumePoint_->resetInstruction();
+ resumePoint_ = nullptr;
+}
+
+void MInstruction::clearResumePoint() {
+ resumePoint_->resetInstruction();
+ block()->discardPreAllocatedResumePoint(resumePoint_);
+ resumePoint_ = nullptr;
+}
+
+MDefinition* MTest::foldsDoubleNegation(TempAllocator& alloc) {
+ MDefinition* op = getOperand(0);
+
+ if (op->isNot()) {
+ // If the operand of the Not is itself a Not, they cancel out.
+ MDefinition* opop = op->getOperand(0);
+ if (opop->isNot()) {
+ return MTest::New(alloc, opop->toNot()->input(), ifTrue(), ifFalse());
+ }
+ return MTest::New(alloc, op->toNot()->input(), ifFalse(), ifTrue());
+ }
+ return nullptr;
+}
+
+MDefinition* MTest::foldsConstant(TempAllocator& alloc) {
+ MDefinition* op = getOperand(0);
+ if (MConstant* opConst = op->maybeConstantValue()) {
+ bool b;
+ if (opConst->valueToBoolean(&b)) {
+ return MGoto::New(alloc, b ? ifTrue() : ifFalse());
+ }
+ }
+ return nullptr;
+}
+
+MDefinition* MTest::foldsTypes(TempAllocator& alloc) {
+ MDefinition* op = getOperand(0);
+
+ switch (op->type()) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ return MGoto::New(alloc, ifFalse());
+ case MIRType::Symbol:
+ return MGoto::New(alloc, ifTrue());
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+class UsesIterator {
+ MDefinition* def_;
+
+ public:
+ explicit UsesIterator(MDefinition* def) : def_(def) {}
+ auto begin() const { return def_->usesBegin(); }
+ auto end() const { return def_->usesEnd(); }
+};
+
+static bool AllInstructionsDeadIfUnused(MBasicBlock* block) {
+ for (auto* ins : *block) {
+ // Skip trivial instructions.
+ if (ins->isNop() || ins->isGoto()) {
+ continue;
+ }
+
+ // All uses must be within the current block.
+ for (auto* use : UsesIterator(ins)) {
+ if (use->consumer()->block() != block) {
+ return false;
+ }
+ }
+
+ // All instructions within this block must be dead if unused.
+ if (!DeadIfUnused(ins)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+MDefinition* MTest::foldsNeedlessControlFlow(TempAllocator& alloc) {
+ // All instructions within both successors need be dead if unused.
+ if (!AllInstructionsDeadIfUnused(ifTrue()) ||
+ !AllInstructionsDeadIfUnused(ifFalse())) {
+ return nullptr;
+ }
+
+ // Both successors must have the same target successor.
+ if (ifTrue()->numSuccessors() != 1 || ifFalse()->numSuccessors() != 1) {
+ return nullptr;
+ }
+ if (ifTrue()->getSuccessor(0) != ifFalse()->getSuccessor(0)) {
+ return nullptr;
+ }
+
+ // The target successor's phis must be redundant. Redundant phis should have
+ // been removed in an earlier pass, so only check if any phis are present,
+ // which is a stronger condition.
+ if (ifTrue()->successorWithPhis()) {
+ return nullptr;
+ }
+
+ return MGoto::New(alloc, ifTrue());
+}
+
+MDefinition* MTest::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsDoubleNegation(alloc)) {
+ return def;
+ }
+
+ if (MDefinition* def = foldsConstant(alloc)) {
+ return def;
+ }
+
+ if (MDefinition* def = foldsTypes(alloc)) {
+ return def;
+ }
+
+ if (MDefinition* def = foldsNeedlessControlFlow(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+AliasSet MThrow::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MNewArrayDynamicLength::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MNewTypedArrayDynamicLength::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+#ifdef JS_JITSPEW
+void MDefinition::printOpcode(GenericPrinter& out) const {
+ PrintOpcodeName(out, op());
+ for (size_t j = 0, e = numOperands(); j < e; j++) {
+ out.printf(" ");
+ if (getUseFor(j)->hasProducer()) {
+ getOperand(j)->printName(out);
+ out.printf(":%s", StringFromMIRType(getOperand(j)->type()));
+ } else {
+ out.printf("(null)");
+ }
+ }
+}
+
+void MDefinition::dump(GenericPrinter& out) const {
+ printName(out);
+ out.printf(":%s", StringFromMIRType(type()));
+ out.printf(" = ");
+ printOpcode(out);
+ out.printf("\n");
+
+ if (isInstruction()) {
+ if (MResumePoint* resume = toInstruction()->resumePoint()) {
+ resume->dump(out);
+ }
+ }
+}
+
+void MDefinition::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+void MDefinition::dumpLocation(GenericPrinter& out) const {
+ MResumePoint* rp = nullptr;
+ const char* linkWord = nullptr;
+ if (isInstruction() && toInstruction()->resumePoint()) {
+ rp = toInstruction()->resumePoint();
+ linkWord = "at";
+ } else {
+ rp = block()->entryResumePoint();
+ linkWord = "after";
+ }
+
+ while (rp) {
+ JSScript* script = rp->block()->info().script();
+ uint32_t lineno = PCToLineNumber(rp->block()->info().script(), rp->pc());
+ out.printf(" %s %s:%u\n", linkWord, script->filename(), lineno);
+ rp = rp->caller();
+ linkWord = "in";
+ }
+}
+
+void MDefinition::dumpLocation() const {
+ Fprinter out(stderr);
+ dumpLocation(out);
+ out.finish();
+}
+#endif
+
+#ifdef DEBUG
+bool MDefinition::trackedSiteMatchesBlock(const BytecodeSite* site) const {
+ return site == block()->trackedSite();
+}
+#endif
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+size_t MDefinition::useCount() const {
+ size_t count = 0;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ count++;
+ }
+ return count;
+}
+
+size_t MDefinition::defUseCount() const {
+ size_t count = 0;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ if ((*i)->consumer()->isDefinition()) {
+ count++;
+ }
+ }
+ return count;
+}
+#endif
+
+bool MDefinition::hasOneUse() const {
+ MUseIterator i(uses_.begin());
+ if (i == uses_.end()) {
+ return false;
+ }
+ i++;
+ return i == uses_.end();
+}
+
+bool MDefinition::hasOneDefUse() const {
+ bool hasOneDefUse = false;
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ if (!(*i)->consumer()->isDefinition()) {
+ continue;
+ }
+
+ // We already have a definition use. So 1+
+ if (hasOneDefUse) {
+ return false;
+ }
+
+ // We saw one definition. Loop to test if there is another.
+ hasOneDefUse = true;
+ }
+
+ return hasOneDefUse;
+}
+
+bool MDefinition::hasDefUses() const {
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ if ((*i)->consumer()->isDefinition()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MDefinition::hasLiveDefUses() const {
+ for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
+ MNode* ins = (*i)->consumer();
+ if (ins->isDefinition()) {
+ if (!ins->toDefinition()->isRecoveredOnBailout()) {
+ return true;
+ }
+ } else {
+ MOZ_ASSERT(ins->isResumePoint());
+ if (!ins->toResumePoint()->isRecoverableOperand(*i)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+MDefinition* MDefinition::maybeSingleDefUse() const {
+ MUseDefIterator use(this);
+ if (!use) {
+ // No def-uses.
+ return nullptr;
+ }
+
+ MDefinition* useDef = use.def();
+
+ use++;
+ if (use) {
+ // More than one def-use.
+ return nullptr;
+ }
+
+ return useDef;
+}
+
+MDefinition* MDefinition::maybeMostRecentlyAddedDefUse() const {
+ MUseDefIterator use(this);
+ if (!use) {
+ // No def-uses.
+ return nullptr;
+ }
+
+ MDefinition* mostRecentUse = use.def();
+
+#ifdef DEBUG
+ // This function relies on addUse adding new uses to the front of the list.
+ // Check this invariant by asserting the next few uses are 'older'. Skip this
+ // for phis because setBackedge can add a new use for a loop phi even if the
+ // loop body has a use with an id greater than the loop phi's id.
+ if (!mostRecentUse->isPhi()) {
+ static constexpr size_t NumUsesToCheck = 3;
+ use++;
+ for (size_t i = 0; use && i < NumUsesToCheck; i++, use++) {
+ MOZ_ASSERT(use.def()->id() <= mostRecentUse->id());
+ }
+ }
+#endif
+
+ return mostRecentUse;
+}
+
+void MDefinition::replaceAllUsesWith(MDefinition* dom) {
+ for (size_t i = 0, e = numOperands(); i < e; ++i) {
+ getOperand(i)->setImplicitlyUsedUnchecked();
+ }
+
+ justReplaceAllUsesWith(dom);
+}
+
+void MDefinition::justReplaceAllUsesWith(MDefinition* dom) {
+ MOZ_ASSERT(dom != nullptr);
+ MOZ_ASSERT(dom != this);
+
+ // Carry over the fact the value has uses which are no longer inspectable
+ // with the graph.
+ if (isImplicitlyUsed()) {
+ dom->setImplicitlyUsedUnchecked();
+ }
+
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e; ++i) {
+ i->setProducerUnchecked(dom);
+ }
+ dom->uses_.takeElements(uses_);
+}
+
+bool MDefinition::optimizeOutAllUses(TempAllocator& alloc) {
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e;) {
+ MUse* use = *i++;
+ MConstant* constant = use->consumer()->block()->optimizedOutConstant(alloc);
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+
+ // Update the resume point operand to use the optimized-out constant.
+ use->setProducerUnchecked(constant);
+ constant->addUseUnchecked(use);
+ }
+
+ // Remove dangling pointers.
+ this->uses_.clear();
+ return true;
+}
+
+void MDefinition::replaceAllLiveUsesWith(MDefinition* dom) {
+ for (MUseIterator i(usesBegin()), e(usesEnd()); i != e;) {
+ MUse* use = *i++;
+ MNode* consumer = use->consumer();
+ if (consumer->isResumePoint()) {
+ continue;
+ }
+ if (consumer->isDefinition() &&
+ consumer->toDefinition()->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ // Update the operand to use the dominating definition.
+ use->replaceProducer(dom);
+ }
+}
+
+MConstant* MConstant::New(TempAllocator& alloc, const Value& v) {
+ return new (alloc) MConstant(alloc, v);
+}
+
+MConstant* MConstant::New(TempAllocator::Fallible alloc, const Value& v) {
+ return new (alloc) MConstant(alloc.alloc, v);
+}
+
+MConstant* MConstant::NewFloat32(TempAllocator& alloc, double d) {
+ MOZ_ASSERT(std::isnan(d) || d == double(float(d)));
+ return new (alloc) MConstant(float(d));
+}
+
+MConstant* MConstant::NewInt64(TempAllocator& alloc, int64_t i) {
+ return new (alloc) MConstant(MIRType::Int64, i);
+}
+
+MConstant* MConstant::NewIntPtr(TempAllocator& alloc, intptr_t i) {
+ return new (alloc) MConstant(MIRType::IntPtr, i);
+}
+
+MConstant* MConstant::New(TempAllocator& alloc, const Value& v, MIRType type) {
+ if (type == MIRType::Float32) {
+ return NewFloat32(alloc, v.toNumber());
+ }
+ MConstant* res = New(alloc, v);
+ MOZ_ASSERT(res->type() == type);
+ return res;
+}
+
+MConstant* MConstant::NewObject(TempAllocator& alloc, JSObject* v) {
+ return new (alloc) MConstant(v);
+}
+
+MConstant* MConstant::NewShape(TempAllocator& alloc, Shape* s) {
+ return new (alloc) MConstant(s);
+}
+
+static MIRType MIRTypeFromValue(const js::Value& vp) {
+ if (vp.isDouble()) {
+ return MIRType::Double;
+ }
+ if (vp.isMagic()) {
+ switch (vp.whyMagic()) {
+ case JS_OPTIMIZED_OUT:
+ return MIRType::MagicOptimizedOut;
+ case JS_ELEMENTS_HOLE:
+ return MIRType::MagicHole;
+ case JS_IS_CONSTRUCTING:
+ return MIRType::MagicIsConstructing;
+ case JS_UNINITIALIZED_LEXICAL:
+ return MIRType::MagicUninitializedLexical;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unexpected magic constant");
+ }
+ }
+ return MIRTypeFromValueType(vp.extractNonDoubleType());
+}
+
+MConstant::MConstant(TempAllocator& alloc, const js::Value& vp)
+ : MNullaryInstruction(classOpcode) {
+ setResultType(MIRTypeFromValue(vp));
+
+ MOZ_ASSERT(payload_.asBits == 0);
+
+ switch (type()) {
+ case MIRType::Undefined:
+ case MIRType::Null:
+ break;
+ case MIRType::Boolean:
+ payload_.b = vp.toBoolean();
+ break;
+ case MIRType::Int32:
+ payload_.i32 = vp.toInt32();
+ break;
+ case MIRType::Double:
+ payload_.d = vp.toDouble();
+ break;
+ case MIRType::String:
+ MOZ_ASSERT(!IsInsideNursery(vp.toString()));
+ MOZ_ASSERT(vp.toString()->isLinear());
+ payload_.str = vp.toString();
+ break;
+ case MIRType::Symbol:
+ payload_.sym = vp.toSymbol();
+ break;
+ case MIRType::BigInt:
+ MOZ_ASSERT(!IsInsideNursery(vp.toBigInt()));
+ payload_.bi = vp.toBigInt();
+ break;
+ case MIRType::Object:
+ MOZ_ASSERT(!IsInsideNursery(&vp.toObject()));
+ payload_.obj = &vp.toObject();
+ break;
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicHole:
+ case MIRType::MagicIsConstructing:
+ case MIRType::MagicUninitializedLexical:
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ setMovable();
+}
+
+MConstant::MConstant(JSObject* obj) : MNullaryInstruction(classOpcode) {
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ setResultType(MIRType::Object);
+ payload_.obj = obj;
+ setMovable();
+}
+
+MConstant::MConstant(Shape* shape) : MNullaryInstruction(classOpcode) {
+ setResultType(MIRType::Shape);
+ payload_.shape = shape;
+ setMovable();
+}
+
+MConstant::MConstant(float f) : MNullaryInstruction(classOpcode) {
+ setResultType(MIRType::Float32);
+ payload_.f = f;
+ setMovable();
+}
+
+MConstant::MConstant(MIRType type, int64_t i)
+ : MNullaryInstruction(classOpcode) {
+ MOZ_ASSERT(type == MIRType::Int64 || type == MIRType::IntPtr);
+ setResultType(type);
+ if (type == MIRType::Int64) {
+ payload_.i64 = i;
+ } else {
+ payload_.iptr = i;
+ }
+ setMovable();
+}
+
+#ifdef DEBUG
+void MConstant::assertInitializedPayload() const {
+ // valueHash() and equals() expect the unused payload bits to be
+ // initialized to zero. Assert this in debug builds.
+
+ switch (type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+# if MOZ_LITTLE_ENDIAN()
+ MOZ_ASSERT((payload_.asBits >> 32) == 0);
+# else
+ MOZ_ASSERT((payload_.asBits << 32) == 0);
+# endif
+ break;
+ case MIRType::Boolean:
+# if MOZ_LITTLE_ENDIAN()
+ MOZ_ASSERT((payload_.asBits >> 1) == 0);
+# else
+ MOZ_ASSERT((payload_.asBits & ~(1ULL << 56)) == 0);
+# endif
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ break;
+ case MIRType::String:
+ case MIRType::Object:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::IntPtr:
+ case MIRType::Shape:
+# if MOZ_LITTLE_ENDIAN()
+ MOZ_ASSERT_IF(JS_BITS_PER_WORD == 32, (payload_.asBits >> 32) == 0);
+# else
+ MOZ_ASSERT_IF(JS_BITS_PER_WORD == 32, (payload_.asBits << 32) == 0);
+# endif
+ break;
+ default:
+ MOZ_ASSERT(IsNullOrUndefined(type()) || IsMagicType(type()));
+ MOZ_ASSERT(payload_.asBits == 0);
+ break;
+ }
+}
+#endif
+
+static HashNumber ConstantValueHash(MIRType type, uint64_t payload) {
+ // Build a 64-bit value holding both the payload and the type.
+ static const size_t TypeBits = 8;
+ static const size_t TypeShift = 64 - TypeBits;
+ MOZ_ASSERT(uintptr_t(type) <= (1 << TypeBits) - 1);
+ uint64_t bits = (uint64_t(type) << TypeShift) ^ payload;
+
+ // Fold all 64 bits into the 32-bit result. It's tempting to just discard
+ // half of the bits, as this is just a hash, however there are many common
+ // patterns of values where only the low or the high bits vary, so
+ // discarding either side would lead to excessive hash collisions.
+ return (HashNumber)bits ^ (HashNumber)(bits >> 32);
+}
+
+HashNumber MConstant::valueHash() const {
+ static_assert(sizeof(Payload) == sizeof(uint64_t),
+ "Code below assumes payload fits in 64 bits");
+
+ assertInitializedPayload();
+ return ConstantValueHash(type(), payload_.asBits);
+}
+
+HashNumber MConstantProto::valueHash() const {
+ HashNumber hash = protoObject()->valueHash();
+ const MDefinition* receiverObject = getReceiverObject();
+ if (receiverObject) {
+ hash = addU32ToHash(hash, receiverObject->id());
+ }
+ return hash;
+}
+
+bool MConstant::congruentTo(const MDefinition* ins) const {
+ return ins->isConstant() && equals(ins->toConstant());
+}
+
+#ifdef JS_JITSPEW
+void MConstant::printOpcode(GenericPrinter& out) const {
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ switch (type()) {
+ case MIRType::Undefined:
+ out.printf("undefined");
+ break;
+ case MIRType::Null:
+ out.printf("null");
+ break;
+ case MIRType::Boolean:
+ out.printf(toBoolean() ? "true" : "false");
+ break;
+ case MIRType::Int32:
+ out.printf("0x%x", uint32_t(toInt32()));
+ break;
+ case MIRType::Int64:
+ out.printf("0x%" PRIx64, uint64_t(toInt64()));
+ break;
+ case MIRType::IntPtr:
+ out.printf("0x%" PRIxPTR, uintptr_t(toIntPtr()));
+ break;
+ case MIRType::Double:
+ out.printf("%.16g", toDouble());
+ break;
+ case MIRType::Float32: {
+ float val = toFloat32();
+ out.printf("%.16g", val);
+ break;
+ }
+ case MIRType::Object:
+ if (toObject().is<JSFunction>()) {
+ JSFunction* fun = &toObject().as<JSFunction>();
+ if (fun->displayAtom()) {
+ out.put("function ");
+ EscapedStringPrinter(out, fun->displayAtom(), 0);
+ } else {
+ out.put("unnamed function");
+ }
+ if (fun->hasBaseScript()) {
+ BaseScript* script = fun->baseScript();
+ out.printf(" (%s:%u)", script->filename() ? script->filename() : "",
+ script->lineno());
+ }
+ out.printf(" at %p", (void*)fun);
+ break;
+ }
+ out.printf("object %p (%s)", (void*)&toObject(),
+ toObject().getClass()->name);
+ break;
+ case MIRType::Symbol:
+ out.printf("symbol at %p", (void*)toSymbol());
+ break;
+ case MIRType::BigInt:
+ out.printf("BigInt at %p", (void*)toBigInt());
+ break;
+ case MIRType::String:
+ out.printf("string %p", (void*)toString());
+ break;
+ case MIRType::Shape:
+ out.printf("shape at %p", (void*)toShape());
+ break;
+ case MIRType::MagicHole:
+ out.printf("magic hole");
+ break;
+ case MIRType::MagicIsConstructing:
+ out.printf("magic is-constructing");
+ break;
+ case MIRType::MagicOptimizedOut:
+ out.printf("magic optimized-out");
+ break;
+ case MIRType::MagicUninitializedLexical:
+ out.printf("magic uninitialized-lexical");
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+#endif
+
+bool MConstant::canProduceFloat32() const {
+ if (!isTypeRepresentableAsDouble()) {
+ return false;
+ }
+
+ if (type() == MIRType::Int32) {
+ return IsFloat32Representable(static_cast<double>(toInt32()));
+ }
+ if (type() == MIRType::Double) {
+ return IsFloat32Representable(toDouble());
+ }
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return true;
+}
+
+Value MConstant::toJSValue() const {
+ // Wasm has types like int64 that cannot be stored as js::Value. It also
+ // doesn't want the NaN canonicalization enforced by js::Value.
+ MOZ_ASSERT(!IsCompilingWasm());
+
+ switch (type()) {
+ case MIRType::Undefined:
+ return UndefinedValue();
+ case MIRType::Null:
+ return NullValue();
+ case MIRType::Boolean:
+ return BooleanValue(toBoolean());
+ case MIRType::Int32:
+ return Int32Value(toInt32());
+ case MIRType::Double:
+ return DoubleValue(toDouble());
+ case MIRType::Float32:
+ return Float32Value(toFloat32());
+ case MIRType::String:
+ return StringValue(toString());
+ case MIRType::Symbol:
+ return SymbolValue(toSymbol());
+ case MIRType::BigInt:
+ return BigIntValue(toBigInt());
+ case MIRType::Object:
+ return ObjectValue(toObject());
+ case MIRType::Shape:
+ return PrivateGCThingValue(toShape());
+ case MIRType::MagicOptimizedOut:
+ return MagicValue(JS_OPTIMIZED_OUT);
+ case MIRType::MagicHole:
+ return MagicValue(JS_ELEMENTS_HOLE);
+ case MIRType::MagicIsConstructing:
+ return MagicValue(JS_IS_CONSTRUCTING);
+ case MIRType::MagicUninitializedLexical:
+ return MagicValue(JS_UNINITIALIZED_LEXICAL);
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+}
+
+bool MConstant::valueToBoolean(bool* res) const {
+ switch (type()) {
+ case MIRType::Boolean:
+ *res = toBoolean();
+ return true;
+ case MIRType::Int32:
+ *res = toInt32() != 0;
+ return true;
+ case MIRType::Int64:
+ *res = toInt64() != 0;
+ return true;
+ case MIRType::Double:
+ *res = !std::isnan(toDouble()) && toDouble() != 0.0;
+ return true;
+ case MIRType::Float32:
+ *res = !std::isnan(toFloat32()) && toFloat32() != 0.0f;
+ return true;
+ case MIRType::Null:
+ case MIRType::Undefined:
+ *res = false;
+ return true;
+ case MIRType::Symbol:
+ *res = true;
+ return true;
+ case MIRType::BigInt:
+ *res = !toBigInt()->isZero();
+ return true;
+ case MIRType::String:
+ *res = toString()->length() != 0;
+ return true;
+ case MIRType::Object:
+ // TODO(Warp): Lazy groups have been removed.
+ // We have to call EmulatesUndefined but that reads obj->group->clasp
+ // and so it's racy when the object has a lazy group. The main callers
+ // of this (MTest, MNot) already know how to fold the object case, so
+ // just give up.
+ return false;
+ default:
+ MOZ_ASSERT(IsMagicType(type()));
+ return false;
+ }
+}
+
+HashNumber MWasmFloatConstant::valueHash() const {
+#ifdef ENABLE_WASM_SIMD
+ return ConstantValueHash(type(), u.bits_[0] ^ u.bits_[1]);
+#else
+ return ConstantValueHash(type(), u.bits_[0]);
+#endif
+}
+
+bool MWasmFloatConstant::congruentTo(const MDefinition* ins) const {
+ return ins->isWasmFloatConstant() && type() == ins->type() &&
+#ifdef ENABLE_WASM_SIMD
+ u.bits_[1] == ins->toWasmFloatConstant()->u.bits_[1] &&
+#endif
+ u.bits_[0] == ins->toWasmFloatConstant()->u.bits_[0];
+}
+
+HashNumber MWasmNullConstant::valueHash() const {
+ return ConstantValueHash(MIRType::RefOrNull, 0);
+}
+
+#ifdef JS_JITSPEW
+void MControlInstruction::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ for (size_t j = 0; j < numSuccessors(); j++) {
+ if (getSuccessor(j)) {
+ out.printf(" block%u", getSuccessor(j)->id());
+ } else {
+ out.printf(" (null-to-be-patched)");
+ }
+ }
+}
+
+void MCompare::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" %s", CodeName(jsop()));
+}
+
+void MTypeOfIs::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" %s", CodeName(jsop()));
+
+ const char* name = "";
+ switch (jstype()) {
+ case JSTYPE_UNDEFINED:
+ name = "undefined";
+ break;
+ case JSTYPE_OBJECT:
+ name = "object";
+ break;
+ case JSTYPE_FUNCTION:
+ name = "function";
+ break;
+ case JSTYPE_STRING:
+ name = "string";
+ break;
+ case JSTYPE_NUMBER:
+ name = "number";
+ break;
+ case JSTYPE_BOOLEAN:
+ name = "boolean";
+ break;
+ case JSTYPE_SYMBOL:
+ name = "symbol";
+ break;
+ case JSTYPE_BIGINT:
+ name = "bigint";
+ break;
+# ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+# endif
+ case JSTYPE_LIMIT:
+ MOZ_CRASH("Unexpected type");
+ }
+ out.printf(" '%s'", name);
+}
+
+void MLoadUnboxedScalar::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" %s", Scalar::name(storageType()));
+}
+
+void MLoadDataViewElement::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" %s", Scalar::name(storageType()));
+}
+
+void MAssertRange::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.put(" ");
+ assertedRange()->dump(out);
+}
+
+void MNearbyInt::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ const char* roundingModeStr = nullptr;
+ switch (roundingMode_) {
+ case RoundingMode::Up:
+ roundingModeStr = "(up)";
+ break;
+ case RoundingMode::Down:
+ roundingModeStr = "(down)";
+ break;
+ case RoundingMode::NearestTiesToEven:
+ roundingModeStr = "(nearest ties even)";
+ break;
+ case RoundingMode::TowardsZero:
+ roundingModeStr = "(towards zero)";
+ break;
+ }
+ out.printf(" %s", roundingModeStr);
+}
+#endif
+
+AliasSet MRandom::getAliasSet() const { return AliasSet::Store(AliasSet::RNG); }
+
+MDefinition* MSign::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (!input->isConstant() ||
+ !input->toConstant()->isTypeRepresentableAsDouble()) {
+ return this;
+ }
+
+ double in = input->toConstant()->numberToDouble();
+ double out = js::math_sign_impl(in);
+
+ if (type() == MIRType::Int32) {
+ // Decline folding if this is an int32 operation, but the result type
+ // isn't an int32.
+ Value outValue = NumberValue(out);
+ if (!outValue.isInt32()) {
+ return this;
+ }
+
+ return MConstant::New(alloc, outValue);
+ }
+
+ return MConstant::New(alloc, DoubleValue(out));
+}
+
+const char* MMathFunction::FunctionName(UnaryMathFunction function) {
+ return GetUnaryMathFunctionName(function);
+}
+
+#ifdef JS_JITSPEW
+void MMathFunction::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" %s", FunctionName(function()));
+}
+#endif
+
+MDefinition* MMathFunction::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (!input->isConstant() ||
+ !input->toConstant()->isTypeRepresentableAsDouble()) {
+ return this;
+ }
+
+ UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(function());
+
+ double in = input->toConstant()->numberToDouble();
+
+ // The function pointer call can't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ double out = funPtr(in);
+
+ if (input->type() == MIRType::Float32) {
+ return MConstant::NewFloat32(alloc, out);
+ }
+ return MConstant::New(alloc, DoubleValue(out));
+}
+
+MDefinition* MAtomicIsLockFree::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (!input->isConstant() || input->type() != MIRType::Int32) {
+ return this;
+ }
+
+ int32_t i = input->toConstant()->toInt32();
+ return MConstant::New(alloc, BooleanValue(AtomicOperations::isLockfreeJS(i)));
+}
+
+// Define |THIS_SLOT| as part of this translation unit, as it is used to
+// specialized the parameterized |New| function calls introduced by
+// TRIVIAL_NEW_WRAPPERS.
+const int32_t MParameter::THIS_SLOT;
+
+#ifdef JS_JITSPEW
+void MParameter::printOpcode(GenericPrinter& out) const {
+ PrintOpcodeName(out, op());
+ if (index() == THIS_SLOT) {
+ out.printf(" THIS_SLOT");
+ } else {
+ out.printf(" %d", index());
+ }
+}
+#endif
+
+HashNumber MParameter::valueHash() const {
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, index_);
+ return hash;
+}
+
+bool MParameter::congruentTo(const MDefinition* ins) const {
+ if (!ins->isParameter()) {
+ return false;
+ }
+
+ return ins->toParameter()->index() == index_;
+}
+
+WrappedFunction::WrappedFunction(JSFunction* nativeFun, uint16_t nargs,
+ FunctionFlags flags)
+ : nativeFun_(nativeFun), nargs_(nargs), flags_(flags) {
+ MOZ_ASSERT_IF(nativeFun, isNativeWithoutJitEntry());
+
+#ifdef DEBUG
+ // If we are not running off-main thread we can assert that the
+ // metadata is consistent.
+ if (!CanUseExtraThreads() && nativeFun) {
+ MOZ_ASSERT(nativeFun->nargs() == nargs);
+
+ MOZ_ASSERT(nativeFun->isNativeWithoutJitEntry() ==
+ isNativeWithoutJitEntry());
+ MOZ_ASSERT(nativeFun->hasJitEntry() == hasJitEntry());
+ MOZ_ASSERT(nativeFun->isConstructor() == isConstructor());
+ MOZ_ASSERT(nativeFun->isClassConstructor() == isClassConstructor());
+ }
+#endif
+}
+
+MCall* MCall::New(TempAllocator& alloc, WrappedFunction* target, size_t maxArgc,
+ size_t numActualArgs, bool construct, bool ignoresReturnValue,
+ bool isDOMCall, mozilla::Maybe<DOMObjectKind> objectKind) {
+ MOZ_ASSERT(isDOMCall == objectKind.isSome());
+ MOZ_ASSERT(maxArgc >= numActualArgs);
+ MCall* ins;
+ if (isDOMCall) {
+ MOZ_ASSERT(!construct);
+ ins = new (alloc) MCallDOMNative(target, numActualArgs, *objectKind);
+ } else {
+ ins =
+ new (alloc) MCall(target, numActualArgs, construct, ignoresReturnValue);
+ }
+ if (!ins->init(alloc, maxArgc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+ return ins;
+}
+
+AliasSet MCallDOMNative::getAliasSet() const {
+ const JSJitInfo* jitInfo = getJitInfo();
+
+ // If we don't know anything about the types of our arguments, we have to
+ // assume that type-coercions can have side-effects, so we need to alias
+ // everything.
+ if (jitInfo->aliasSet() == JSJitInfo::AliasEverything ||
+ !jitInfo->isTypedMethodJitInfo()) {
+ return AliasSet::Store(AliasSet::Any);
+ }
+
+ uint32_t argIndex = 0;
+ const JSTypedMethodJitInfo* methodInfo =
+ reinterpret_cast<const JSTypedMethodJitInfo*>(jitInfo);
+ for (const JSJitInfo::ArgType* argType = methodInfo->argTypes;
+ *argType != JSJitInfo::ArgTypeListEnd; ++argType, ++argIndex) {
+ if (argIndex >= numActualArgs()) {
+ // Passing through undefined can't have side-effects
+ continue;
+ }
+ // getArg(0) is "this", so skip it
+ MDefinition* arg = getArg(argIndex + 1);
+ MIRType actualType = arg->type();
+ // The only way to reliably avoid side-effects given the information we
+ // have here is if we're passing in a known primitive value to an
+ // argument that expects a primitive value.
+ //
+ // XXXbz maybe we need to communicate better information. For example,
+ // a sequence argument will sort of unavoidably have side effects, while
+ // a typed array argument won't have any, but both are claimed to be
+ // JSJitInfo::Object. But if we do that, we need to watch out for our
+ // movability/DCE-ability bits: if we have an arg type that can reliably
+ // throw an exception on conversion, that might not affect our alias set
+ // per se, but it should prevent us being moved or DCE-ed, unless we
+ // know the incoming things match that arg type and won't throw.
+ //
+ if ((actualType == MIRType::Value || actualType == MIRType::Object) ||
+ (*argType & JSJitInfo::Object)) {
+ return AliasSet::Store(AliasSet::Any);
+ }
+ }
+
+ // We checked all the args, and they check out. So we only alias DOM
+ // mutations or alias nothing, depending on the alias set in the jitinfo.
+ if (jitInfo->aliasSet() == JSJitInfo::AliasNone) {
+ return AliasSet::None();
+ }
+
+ MOZ_ASSERT(jitInfo->aliasSet() == JSJitInfo::AliasDOMSets);
+ return AliasSet::Load(AliasSet::DOMProperty);
+}
+
+void MCallDOMNative::computeMovable() {
+ // We are movable if the jitinfo says we can be and if we're also not
+ // effectful. The jitinfo can't check for the latter, since it depends on
+ // the types of our arguments.
+ const JSJitInfo* jitInfo = getJitInfo();
+
+ MOZ_ASSERT_IF(jitInfo->isMovable,
+ jitInfo->aliasSet() != JSJitInfo::AliasEverything);
+
+ if (jitInfo->isMovable && !isEffectful()) {
+ setMovable();
+ }
+}
+
+bool MCallDOMNative::congruentTo(const MDefinition* ins) const {
+ if (!isMovable()) {
+ return false;
+ }
+
+ if (!ins->isCall()) {
+ return false;
+ }
+
+ const MCall* call = ins->toCall();
+
+ if (!call->isCallDOMNative()) {
+ return false;
+ }
+
+ if (getSingleTarget() != call->getSingleTarget()) {
+ return false;
+ }
+
+ if (isConstructing() != call->isConstructing()) {
+ return false;
+ }
+
+ if (numActualArgs() != call->numActualArgs()) {
+ return false;
+ }
+
+ if (!congruentIfOperandsEqual(call)) {
+ return false;
+ }
+
+ // The other call had better be movable at this point!
+ MOZ_ASSERT(call->isMovable());
+
+ return true;
+}
+
+const JSJitInfo* MCallDOMNative::getJitInfo() const {
+ MOZ_ASSERT(getSingleTarget()->hasJitInfo());
+ return getSingleTarget()->jitInfo();
+}
+
+MCallClassHook* MCallClassHook::New(TempAllocator& alloc, JSNative target,
+ uint32_t argc, bool constructing) {
+ auto* ins = new (alloc) MCallClassHook(target, constructing);
+
+ // Add callee + |this| + (if constructing) newTarget.
+ uint32_t numOperands = 2 + argc + constructing;
+
+ if (!ins->init(alloc, numOperands)) {
+ return nullptr;
+ }
+
+ return ins;
+}
+
+MDefinition* MStringLength::foldsTo(TempAllocator& alloc) {
+ if (string()->isConstant()) {
+ JSString* str = string()->toConstant()->toString();
+ return MConstant::New(alloc, Int32Value(str->length()));
+ }
+
+ // MFromCharCode returns a one-element string.
+ if (string()->isFromCharCode()) {
+ return MConstant::New(alloc, Int32Value(1));
+ }
+
+ return this;
+}
+
+MDefinition* MConcat::foldsTo(TempAllocator& alloc) {
+ if (lhs()->isConstant() && lhs()->toConstant()->toString()->empty()) {
+ return rhs();
+ }
+
+ if (rhs()->isConstant() && rhs()->toConstant()->toString()->empty()) {
+ return lhs();
+ }
+
+ return this;
+}
+
+MDefinition* MCharCodeAt::foldsTo(TempAllocator& alloc) {
+ MDefinition* string = this->string();
+ if (!string->isConstant() && !string->isFromCharCode()) {
+ return this;
+ }
+
+ MDefinition* index = this->index();
+ if (index->isSpectreMaskIndex()) {
+ index = index->toSpectreMaskIndex()->index();
+ }
+ if (!index->isConstant()) {
+ return this;
+ }
+ int32_t idx = index->toConstant()->toInt32();
+
+ // Handle the pattern |s[idx].charCodeAt(0)|.
+ if (string->isFromCharCode()) {
+ if (idx != 0) {
+ return this;
+ }
+
+ // Simplify |CharCodeAt(FromCharCode(CharCodeAt(s, idx)), 0)| to just
+ // |CharCodeAt(s, idx)|.
+ auto* charCode = string->toFromCharCode()->code();
+ if (!charCode->isCharCodeAt()) {
+ return this;
+ }
+
+ return charCode;
+ }
+
+ JSLinearString* str = &string->toConstant()->toString()->asLinear();
+ if (idx < 0 || uint32_t(idx) >= str->length()) {
+ return this;
+ }
+
+ char16_t ch = str->latin1OrTwoByteChar(idx);
+ return MConstant::New(alloc, Int32Value(ch));
+}
+
+template <size_t Arity>
+[[nodiscard]] static bool EnsureFloatInputOrConvert(
+ MAryInstruction<Arity>* owner, TempAllocator& alloc) {
+ MOZ_ASSERT(!IsFloatingPointType(owner->type()),
+ "Floating point types must check consumers");
+
+ if (AllOperandsCanProduceFloat32(owner)) {
+ return true;
+ }
+ ConvertOperandsToDouble(owner, alloc);
+ return false;
+}
+
+template <size_t Arity>
+[[nodiscard]] static bool EnsureFloatConsumersAndInputOrConvert(
+ MAryInstruction<Arity>* owner, TempAllocator& alloc) {
+ MOZ_ASSERT(IsFloatingPointType(owner->type()),
+ "Integer types don't need to check consumers");
+
+ if (AllOperandsCanProduceFloat32(owner) &&
+ CheckUsesAreFloat32Consumers(owner)) {
+ return true;
+ }
+ ConvertOperandsToDouble(owner, alloc);
+ return false;
+}
+
+void MFloor::trySpecializeFloat32(TempAllocator& alloc) {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc)) {
+ specialization_ = MIRType::Float32;
+ }
+}
+
+void MCeil::trySpecializeFloat32(TempAllocator& alloc) {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc)) {
+ specialization_ = MIRType::Float32;
+ }
+}
+
+void MRound::trySpecializeFloat32(TempAllocator& alloc) {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc)) {
+ specialization_ = MIRType::Float32;
+ }
+}
+
+void MTrunc::trySpecializeFloat32(TempAllocator& alloc) {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ if (EnsureFloatInputOrConvert(this, alloc)) {
+ specialization_ = MIRType::Float32;
+ }
+}
+
+void MNearbyInt::trySpecializeFloat32(TempAllocator& alloc) {
+ if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
+ specialization_ = MIRType::Float32;
+ setResultType(MIRType::Float32);
+ }
+}
+
+MGoto* MGoto::New(TempAllocator& alloc, MBasicBlock* target) {
+ return new (alloc) MGoto(target);
+}
+
+MGoto* MGoto::New(TempAllocator::Fallible alloc, MBasicBlock* target) {
+ MOZ_ASSERT(target);
+ return new (alloc) MGoto(target);
+}
+
+MGoto* MGoto::New(TempAllocator& alloc) { return new (alloc) MGoto(nullptr); }
+
+#ifdef JS_JITSPEW
+void MUnbox::printOpcode(GenericPrinter& out) const {
+ PrintOpcodeName(out, op());
+ out.printf(" ");
+ getOperand(0)->printName(out);
+ out.printf(" ");
+
+ switch (type()) {
+ case MIRType::Int32:
+ out.printf("to Int32");
+ break;
+ case MIRType::Double:
+ out.printf("to Double");
+ break;
+ case MIRType::Boolean:
+ out.printf("to Boolean");
+ break;
+ case MIRType::String:
+ out.printf("to String");
+ break;
+ case MIRType::Symbol:
+ out.printf("to Symbol");
+ break;
+ case MIRType::BigInt:
+ out.printf("to BigInt");
+ break;
+ case MIRType::Object:
+ out.printf("to Object");
+ break;
+ default:
+ break;
+ }
+
+ switch (mode()) {
+ case Fallible:
+ out.printf(" (fallible)");
+ break;
+ case Infallible:
+ out.printf(" (infallible)");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+MDefinition* MUnbox::foldsTo(TempAllocator& alloc) {
+ if (input()->isBox()) {
+ MDefinition* unboxed = input()->toBox()->input();
+
+ // Fold MUnbox(MBox(x)) => x if types match.
+ if (unboxed->type() == type()) {
+ if (fallible()) {
+ unboxed->setImplicitlyUsedUnchecked();
+ }
+ return unboxed;
+ }
+
+ // Fold MUnbox(MBox(x)) => MToDouble(x) if possible.
+ if (type() == MIRType::Double &&
+ IsTypeRepresentableAsDouble(unboxed->type())) {
+ if (unboxed->isConstant()) {
+ return MConstant::New(
+ alloc, DoubleValue(unboxed->toConstant()->numberToDouble()));
+ }
+
+ return MToDouble::New(alloc, unboxed);
+ }
+
+ // MUnbox<Int32>(MBox<Double>(x)) will always fail, even if x can be
+ // represented as an Int32. Fold to avoid unnecessary bailouts.
+ if (type() == MIRType::Int32 && unboxed->type() == MIRType::Double) {
+ auto* folded = MToNumberInt32::New(alloc, unboxed,
+ IntConversionInputKind::NumbersOnly);
+ folded->setGuard();
+ return folded;
+ }
+ }
+
+ return this;
+}
+
+#ifdef DEBUG
+void MPhi::assertLoopPhi() const {
+ // getLoopPredecessorOperand and getLoopBackedgeOperand rely on these
+ // predecessors being at known indices.
+ if (block()->numPredecessors() == 2) {
+ MBasicBlock* pred = block()->getPredecessor(0);
+ MBasicBlock* back = block()->getPredecessor(1);
+ MOZ_ASSERT(pred == block()->loopPredecessor());
+ MOZ_ASSERT(pred->successorWithPhis() == block());
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == 0);
+ MOZ_ASSERT(back == block()->backedge());
+ MOZ_ASSERT(back->successorWithPhis() == block());
+ MOZ_ASSERT(back->positionInPhiSuccessor() == 1);
+ } else {
+ // After we remove fake loop predecessors for loop headers that
+ // are only reachable via OSR, the only predecessor is the
+ // loop backedge.
+ MOZ_ASSERT(block()->numPredecessors() == 1);
+ MOZ_ASSERT(block()->graph().osrBlock());
+ MOZ_ASSERT(!block()->graph().canBuildDominators());
+ MBasicBlock* back = block()->getPredecessor(0);
+ MOZ_ASSERT(back == block()->backedge());
+ MOZ_ASSERT(back->successorWithPhis() == block());
+ MOZ_ASSERT(back->positionInPhiSuccessor() == 0);
+ }
+}
+#endif
+
+MDefinition* MPhi::getLoopPredecessorOperand() const {
+ // This should not be called after removing fake loop predecessors.
+ MOZ_ASSERT(block()->numPredecessors() == 2);
+ assertLoopPhi();
+ return getOperand(0);
+}
+
+MDefinition* MPhi::getLoopBackedgeOperand() const {
+ assertLoopPhi();
+ uint32_t idx = block()->numPredecessors() == 2 ? 1 : 0;
+ return getOperand(idx);
+}
+
+void MPhi::removeOperand(size_t index) {
+ MOZ_ASSERT(index < numOperands());
+ MOZ_ASSERT(getUseFor(index)->index() == index);
+ MOZ_ASSERT(getUseFor(index)->consumer() == this);
+
+ // If we have phi(..., a, b, c, d, ..., z) and we plan
+ // on removing a, then first shift downward so that we have
+ // phi(..., b, c, d, ..., z, z):
+ MUse* p = inputs_.begin() + index;
+ MUse* e = inputs_.end();
+ p->producer()->removeUse(p);
+ for (; p < e - 1; ++p) {
+ MDefinition* producer = (p + 1)->producer();
+ p->setProducerUnchecked(producer);
+ producer->replaceUse(p + 1, p);
+ }
+
+ // truncate the inputs_ list:
+ inputs_.popBack();
+}
+
+void MPhi::removeAllOperands() {
+ for (MUse& p : inputs_) {
+ p.producer()->removeUse(&p);
+ }
+ inputs_.clear();
+}
+
+MDefinition* MPhi::foldsTernary(TempAllocator& alloc) {
+ /* Look if this MPhi is a ternary construct.
+ * This is a very loose term as it actually only checks for
+ *
+ * MTest X
+ * / \
+ * ... ...
+ * \ /
+ * MPhi X Y
+ *
+ * Which we will simply call:
+ * x ? x : y or x ? y : x
+ */
+
+ if (numOperands() != 2) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(block()->numPredecessors() == 2);
+
+ MBasicBlock* pred = block()->immediateDominator();
+ if (!pred || !pred->lastIns()->isTest()) {
+ return nullptr;
+ }
+
+ MTest* test = pred->lastIns()->toTest();
+
+ // True branch may only dominate one edge of MPhi.
+ if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
+ test->ifTrue()->dominates(block()->getPredecessor(1))) {
+ return nullptr;
+ }
+
+ // False branch may only dominate one edge of MPhi.
+ if (test->ifFalse()->dominates(block()->getPredecessor(0)) ==
+ test->ifFalse()->dominates(block()->getPredecessor(1))) {
+ return nullptr;
+ }
+
+ // True and false branch must dominate different edges of MPhi.
+ if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
+ test->ifFalse()->dominates(block()->getPredecessor(0))) {
+ return nullptr;
+ }
+
+ // We found a ternary construct.
+ bool firstIsTrueBranch =
+ test->ifTrue()->dominates(block()->getPredecessor(0));
+ MDefinition* trueDef = firstIsTrueBranch ? getOperand(0) : getOperand(1);
+ MDefinition* falseDef = firstIsTrueBranch ? getOperand(1) : getOperand(0);
+
+ // Accept either
+ // testArg ? testArg : constant or
+ // testArg ? constant : testArg
+ if (!trueDef->isConstant() && !falseDef->isConstant()) {
+ return nullptr;
+ }
+
+ MConstant* c =
+ trueDef->isConstant() ? trueDef->toConstant() : falseDef->toConstant();
+ MDefinition* testArg = (trueDef == c) ? falseDef : trueDef;
+ if (testArg != test->input()) {
+ return nullptr;
+ }
+
+ // This check should be a tautology, except that the constant might be the
+ // result of the removal of a branch. In such case the domination scope of
+ // the block which is holding the constant might be incomplete. This
+ // condition is used to prevent doing this optimization based on incomplete
+ // information.
+ //
+ // As GVN removed a branch, it will update the dominations rules before
+ // trying to fold this MPhi again. Thus, this condition does not inhibit
+ // this optimization.
+ MBasicBlock* truePred = block()->getPredecessor(firstIsTrueBranch ? 0 : 1);
+ MBasicBlock* falsePred = block()->getPredecessor(firstIsTrueBranch ? 1 : 0);
+ if (!trueDef->block()->dominates(truePred) ||
+ !falseDef->block()->dominates(falsePred)) {
+ return nullptr;
+ }
+
+ // If testArg is an int32 type we can:
+ // - fold testArg ? testArg : 0 to testArg
+ // - fold testArg ? 0 : testArg to 0
+ if (testArg->type() == MIRType::Int32 && c->numberToDouble() == 0) {
+ testArg->setGuardRangeBailoutsUnchecked();
+
+ // When folding to the constant we need to hoist it.
+ if (trueDef == c && !c->block()->dominates(block())) {
+ c->block()->moveBefore(pred->lastIns(), c);
+ }
+ return trueDef;
+ }
+
+ // If testArg is an double type we can:
+ // - fold testArg ? testArg : 0.0 to MNaNToZero(testArg)
+ if (testArg->type() == MIRType::Double &&
+ mozilla::IsPositiveZero(c->numberToDouble()) && c != trueDef) {
+ MNaNToZero* replace = MNaNToZero::New(alloc, testArg);
+ test->block()->insertBefore(test, replace);
+ return replace;
+ }
+
+ // If testArg is a string type we can:
+ // - fold testArg ? testArg : "" to testArg
+ // - fold testArg ? "" : testArg to ""
+ if (testArg->type() == MIRType::String &&
+ c->toString() == GetJitContext()->runtime->emptyString()) {
+ // When folding to the constant we need to hoist it.
+ if (trueDef == c && !c->block()->dominates(block())) {
+ c->block()->moveBefore(pred->lastIns(), c);
+ }
+ return trueDef;
+ }
+
+ return nullptr;
+}
+
+MDefinition* MPhi::operandIfRedundant() {
+ if (inputs_.length() == 0) {
+ return nullptr;
+ }
+
+ // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
+ // returns the operand that it will always be equal to (a, in
+ // those two cases).
+ MDefinition* first = getOperand(0);
+ for (size_t i = 1, e = numOperands(); i < e; i++) {
+ MDefinition* op = getOperand(i);
+ if (op != first && op != this) {
+ return nullptr;
+ }
+ }
+ return first;
+}
+
+MDefinition* MPhi::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = operandIfRedundant()) {
+ return def;
+ }
+
+ if (MDefinition* def = foldsTernary(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+bool MPhi::congruentTo(const MDefinition* ins) const {
+ if (!ins->isPhi()) {
+ return false;
+ }
+
+ // Phis in different blocks may have different control conditions.
+ // For example, these phis:
+ //
+ // if (p)
+ // goto a
+ // a:
+ // t = phi(x, y)
+ //
+ // if (q)
+ // goto b
+ // b:
+ // s = phi(x, y)
+ //
+ // have identical operands, but they are not equvalent because t is
+ // effectively p?x:y and s is effectively q?x:y.
+ //
+ // For now, consider phis in different blocks incongruent.
+ if (ins->block() != block()) {
+ return false;
+ }
+
+ return congruentIfOperandsEqual(ins);
+}
+
+void MPhi::updateForReplacement(MPhi* other) {
+ // This function is called to fix the current Phi flags using it as a
+ // replacement of the other Phi instruction |other|.
+ //
+ // When dealing with usage analysis, any Use will replace all other values,
+ // such as Unused and Unknown. Unless both are Unused, the merge would be
+ // Unknown.
+ if (usageAnalysis_ == PhiUsage::Used ||
+ other->usageAnalysis_ == PhiUsage::Used) {
+ usageAnalysis_ = PhiUsage::Used;
+ } else if (usageAnalysis_ != other->usageAnalysis_) {
+ // this == unused && other == unknown
+ // or this == unknown && other == unused
+ usageAnalysis_ = PhiUsage::Unknown;
+ } else {
+ // this == unused && other == unused
+ // or this == unknown && other = unknown
+ MOZ_ASSERT(usageAnalysis_ == PhiUsage::Unused ||
+ usageAnalysis_ == PhiUsage::Unknown);
+ MOZ_ASSERT(usageAnalysis_ == other->usageAnalysis_);
+ }
+}
+
+/* static */
+bool MPhi::markIteratorPhis(const PhiVector& iterators) {
+ // Find and mark phis that must transitively hold an iterator live.
+
+ Vector<MPhi*, 8, SystemAllocPolicy> worklist;
+
+ for (MPhi* iter : iterators) {
+ if (!iter->isInWorklist()) {
+ if (!worklist.append(iter)) {
+ return false;
+ }
+ iter->setInWorklist();
+ }
+ }
+
+ while (!worklist.empty()) {
+ MPhi* phi = worklist.popCopy();
+ phi->setNotInWorklist();
+
+ phi->setIterator();
+ phi->setImplicitlyUsedUnchecked();
+
+ for (MUseDefIterator iter(phi); iter; iter++) {
+ MDefinition* use = iter.def();
+ if (!use->isInWorklist() && use->isPhi() && !use->toPhi()->isIterator()) {
+ if (!worklist.append(use->toPhi())) {
+ return false;
+ }
+ use->setInWorklist();
+ }
+ }
+ }
+
+ return true;
+}
+
+bool MPhi::typeIncludes(MDefinition* def) {
+ MOZ_ASSERT(!IsMagicType(def->type()));
+
+ if (def->type() == MIRType::Int32 && this->type() == MIRType::Double) {
+ return true;
+ }
+
+ if (def->type() == MIRType::Value) {
+ // This phi must be able to be any value.
+ return this->type() == MIRType::Value;
+ }
+
+ return this->mightBeType(def->type());
+}
+
+void MCallBase::addArg(size_t argnum, MDefinition* arg) {
+ // The operand vector is initialized in reverse order by WarpBuilder.
+ // It cannot be checked for consistency until all arguments are added.
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ initOperand(argnum + NumNonArgumentOperands, arg);
+}
+
+static inline bool IsConstant(MDefinition* def, double v) {
+ if (!def->isConstant()) {
+ return false;
+ }
+
+ return NumbersAreIdentical(def->toConstant()->numberToDouble(), v);
+}
+
+MDefinition* MBinaryBitwiseInstruction::foldsTo(TempAllocator& alloc) {
+ // Identity operations are removed (for int32 only) in foldUnnecessaryBitop.
+
+ if (type() == MIRType::Int32) {
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this)) {
+ return folded;
+ }
+ } else if (type() == MIRType::Int64) {
+ if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
+ return folded;
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MBinaryBitwiseInstruction::foldUnnecessaryBitop() {
+ // It's probably OK to perform this optimization only for int32, as it will
+ // have the greatest effect for asm.js code that is compiled with the JS
+ // pipeline, and that code will not see int64 values.
+
+ if (type() != MIRType::Int32) {
+ return this;
+ }
+
+ // Fold unsigned shift right operator when the second operand is zero and
+ // the only use is an unsigned modulo. Thus, the expression
+ // |(x >>> 0) % y| becomes |x % y|.
+ if (isUrsh() && IsUint32Type(this)) {
+ MDefinition* defUse = maybeSingleDefUse();
+ if (defUse && defUse->isMod() && defUse->toMod()->isUnsigned()) {
+ return getOperand(0);
+ }
+ }
+
+ // Eliminate bitwise operations that are no-ops when used on integer
+ // inputs, such as (x | 0).
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ if (IsConstant(lhs, 0)) {
+ return foldIfZero(0);
+ }
+
+ if (IsConstant(rhs, 0)) {
+ return foldIfZero(1);
+ }
+
+ if (IsConstant(lhs, -1)) {
+ return foldIfNegOne(0);
+ }
+
+ if (IsConstant(rhs, -1)) {
+ return foldIfNegOne(1);
+ }
+
+ if (lhs == rhs) {
+ return foldIfEqual();
+ }
+
+ if (maskMatchesRightRange) {
+ MOZ_ASSERT(lhs->isConstant());
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ return foldIfAllBitsSet(0);
+ }
+
+ if (maskMatchesLeftRange) {
+ MOZ_ASSERT(rhs->isConstant());
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ return foldIfAllBitsSet(1);
+ }
+
+ return this;
+}
+
+static inline bool CanProduceNegativeZero(MDefinition* def) {
+ // Test if this instruction can produce negative zero even when bailing out
+ // and changing types.
+ switch (def->op()) {
+ case MDefinition::Opcode::Constant:
+ if (def->type() == MIRType::Double &&
+ def->toConstant()->toDouble() == -0.0) {
+ return true;
+ }
+ [[fallthrough]];
+ case MDefinition::Opcode::BitAnd:
+ case MDefinition::Opcode::BitOr:
+ case MDefinition::Opcode::BitXor:
+ case MDefinition::Opcode::BitNot:
+ case MDefinition::Opcode::Lsh:
+ case MDefinition::Opcode::Rsh:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static inline bool NeedNegativeZeroCheck(MDefinition* def) {
+ if (def->isGuard() || def->isGuardRangeBailouts()) {
+ return true;
+ }
+
+ // Test if all uses have the same semantics for -0 and 0
+ for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
+ if (use->consumer()->isResumePoint()) {
+ return true;
+ }
+
+ MDefinition* use_def = use->consumer()->toDefinition();
+ switch (use_def->op()) {
+ case MDefinition::Opcode::Add: {
+ // If add is truncating -0 and 0 are observed as the same.
+ if (use_def->toAdd()->isTruncated()) {
+ break;
+ }
+
+ // x + y gives -0, when both x and y are -0
+
+ // Figure out the order in which the addition's operands will
+ // execute. EdgeCaseAnalysis::analyzeLate has renumbered the MIR
+ // definitions for us so that this just requires comparing ids.
+ MDefinition* first = use_def->toAdd()->lhs();
+ MDefinition* second = use_def->toAdd()->rhs();
+ if (first->id() > second->id()) {
+ std::swap(first, second);
+ }
+ // Negative zero checks can be removed on the first executed
+ // operand only if it is guaranteed the second executed operand
+ // will produce a value other than -0. While the second is
+ // typed as an int32, a bailout taken between execution of the
+ // operands may change that type and cause a -0 to flow to the
+ // second.
+ //
+ // There is no way to test whether there are any bailouts
+ // between execution of the operands, so remove negative
+ // zero checks from the first only if the second's type is
+ // independent from type changes that may occur after bailing.
+ if (def == first && CanProduceNegativeZero(second)) {
+ return true;
+ }
+
+ // The negative zero check can always be removed on the second
+ // executed operand; by the time this executes the first will have
+ // been evaluated as int32 and the addition's result cannot be -0.
+ break;
+ }
+ case MDefinition::Opcode::Sub: {
+ // If sub is truncating -0 and 0 are observed as the same
+ if (use_def->toSub()->isTruncated()) {
+ break;
+ }
+
+ // x + y gives -0, when x is -0 and y is 0
+
+ // We can remove the negative zero check on the rhs, only if we
+ // are sure the lhs isn't negative zero.
+
+ // The lhs is typed as integer (i.e. not -0.0), but it can bailout
+ // and change type. This should be fine if the lhs is executed
+ // first. However if the rhs is executed first, the lhs can bail,
+ // change type and become -0.0 while the rhs has already been
+ // optimized to not make a difference between zero and negative zero.
+ MDefinition* lhs = use_def->toSub()->lhs();
+ MDefinition* rhs = use_def->toSub()->rhs();
+ if (rhs->id() < lhs->id() && CanProduceNegativeZero(lhs)) {
+ return true;
+ }
+
+ [[fallthrough]];
+ }
+ case MDefinition::Opcode::StoreElement:
+ case MDefinition::Opcode::StoreHoleValueElement:
+ case MDefinition::Opcode::LoadElement:
+ case MDefinition::Opcode::LoadElementHole:
+ case MDefinition::Opcode::LoadUnboxedScalar:
+ case MDefinition::Opcode::LoadDataViewElement:
+ case MDefinition::Opcode::LoadTypedArrayElementHole:
+ case MDefinition::Opcode::CharCodeAt:
+ case MDefinition::Opcode::Mod:
+ case MDefinition::Opcode::InArray:
+ // Only allowed to remove check when definition is the second operand
+ if (use_def->getOperand(0) == def) {
+ return true;
+ }
+ for (size_t i = 2, e = use_def->numOperands(); i < e; i++) {
+ if (use_def->getOperand(i) == def) {
+ return true;
+ }
+ }
+ break;
+ case MDefinition::Opcode::BoundsCheck:
+ // Only allowed to remove check when definition is the first operand
+ if (use_def->toBoundsCheck()->getOperand(1) == def) {
+ return true;
+ }
+ break;
+ case MDefinition::Opcode::ToString:
+ case MDefinition::Opcode::FromCharCode:
+ case MDefinition::Opcode::FromCodePoint:
+ case MDefinition::Opcode::TableSwitch:
+ case MDefinition::Opcode::Compare:
+ case MDefinition::Opcode::BitAnd:
+ case MDefinition::Opcode::BitOr:
+ case MDefinition::Opcode::BitXor:
+ case MDefinition::Opcode::Abs:
+ case MDefinition::Opcode::TruncateToInt32:
+ // Always allowed to remove check. No matter which operand.
+ break;
+ case MDefinition::Opcode::StoreElementHole:
+ case MDefinition::Opcode::StoreTypedArrayElementHole:
+ case MDefinition::Opcode::PostWriteElementBarrier:
+ // Only allowed to remove check when definition is the third operand.
+ for (size_t i = 0, e = use_def->numOperands(); i < e; i++) {
+ if (i == 2) {
+ continue;
+ }
+ if (use_def->getOperand(i) == def) {
+ return true;
+ }
+ }
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef JS_JITSPEW
+void MBinaryArithInstruction::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+
+ switch (type()) {
+ case MIRType::Int32:
+ if (isDiv()) {
+ out.printf(" [%s]", toDiv()->isUnsigned() ? "uint32" : "int32");
+ } else if (isMod()) {
+ out.printf(" [%s]", toMod()->isUnsigned() ? "uint32" : "int32");
+ } else {
+ out.printf(" [int32]");
+ }
+ break;
+ case MIRType::Int64:
+ if (isDiv()) {
+ out.printf(" [%s]", toDiv()->isUnsigned() ? "uint64" : "int64");
+ } else if (isMod()) {
+ out.printf(" [%s]", toMod()->isUnsigned() ? "uint64" : "int64");
+ } else {
+ out.printf(" [int64]");
+ }
+ break;
+ case MIRType::Float32:
+ out.printf(" [float]");
+ break;
+ case MIRType::Double:
+ out.printf(" [double]");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+MDefinition* MRsh::foldsTo(TempAllocator& alloc) {
+ MDefinition* f = MBinaryBitwiseInstruction::foldsTo(alloc);
+
+ if (f != this) {
+ return f;
+ }
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ // It's probably OK to perform this optimization only for int32, as it will
+ // have the greatest effect for asm.js code that is compiled with the JS
+ // pipeline, and that code will not see int64 values.
+
+ if (!lhs->isLsh() || !rhs->isConstant() || rhs->type() != MIRType::Int32) {
+ return this;
+ }
+
+ if (!lhs->getOperand(1)->isConstant() ||
+ lhs->getOperand(1)->type() != MIRType::Int32) {
+ return this;
+ }
+
+ uint32_t shift = rhs->toConstant()->toInt32();
+ uint32_t shift_lhs = lhs->getOperand(1)->toConstant()->toInt32();
+ if (shift != shift_lhs) {
+ return this;
+ }
+
+ switch (shift) {
+ case 16:
+ return MSignExtendInt32::New(alloc, lhs->getOperand(0),
+ MSignExtendInt32::Half);
+ case 24:
+ return MSignExtendInt32::New(alloc, lhs->getOperand(0),
+ MSignExtendInt32::Byte);
+ }
+
+ return this;
+}
+
+MDefinition* MBinaryArithInstruction::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(IsNumberType(type()));
+
+ MDefinition* lhs = getOperand(0);
+ MDefinition* rhs = getOperand(1);
+
+ if (type() == MIRType::Int64) {
+ MOZ_ASSERT(!isTruncated());
+
+ if (MConstant* folded = EvaluateInt64ConstantOperands(alloc, this)) {
+ if (!folded->block()) {
+ block()->insertBefore(this, folded);
+ }
+ return folded;
+ }
+ if (isSub() || isDiv() || isMod()) {
+ return this;
+ }
+ if (rhs->isConstant() &&
+ rhs->toConstant()->toInt64() == int64_t(getIdentity())) {
+ return lhs;
+ }
+ if (lhs->isConstant() &&
+ lhs->toConstant()->toInt64() == int64_t(getIdentity())) {
+ return rhs;
+ }
+ return this;
+ }
+
+ if (MConstant* folded = EvaluateConstantOperands(alloc, this)) {
+ if (isTruncated()) {
+ if (!folded->block()) {
+ block()->insertBefore(this, folded);
+ }
+ if (folded->type() != MIRType::Int32) {
+ return MTruncateToInt32::New(alloc, folded);
+ }
+ }
+ return folded;
+ }
+
+ if (mustPreserveNaN_) {
+ return this;
+ }
+
+ // 0 + -0 = 0. So we can't remove addition
+ if (isAdd() && type() != MIRType::Int32) {
+ return this;
+ }
+
+ if (IsConstant(rhs, getIdentity())) {
+ if (isTruncated()) {
+ return MTruncateToInt32::New(alloc, lhs);
+ }
+ return lhs;
+ }
+
+ // subtraction isn't commutative. So we can't remove subtraction when lhs
+ // equals 0
+ if (isSub()) {
+ return this;
+ }
+
+ if (IsConstant(lhs, getIdentity())) {
+ if (isTruncated()) {
+ return MTruncateToInt32::New(alloc, rhs);
+ }
+ return rhs; // id op x => x
+ }
+
+ return this;
+}
+
+void MBinaryArithInstruction::trySpecializeFloat32(TempAllocator& alloc) {
+ MOZ_ASSERT(IsNumberType(type()));
+
+ // Do not use Float32 if we can use int32.
+ if (type() == MIRType::Int32) {
+ return;
+ }
+
+ if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
+ setResultType(MIRType::Float32);
+ }
+}
+
+void MMinMax::trySpecializeFloat32(TempAllocator& alloc) {
+ if (type() == MIRType::Int32) {
+ return;
+ }
+
+ MDefinition* left = lhs();
+ MDefinition* right = rhs();
+
+ if ((left->canProduceFloat32() ||
+ (left->isMinMax() && left->type() == MIRType::Float32)) &&
+ (right->canProduceFloat32() ||
+ (right->isMinMax() && right->type() == MIRType::Float32))) {
+ setResultType(MIRType::Float32);
+ } else {
+ ConvertOperandsToDouble(this, alloc);
+ }
+}
+
+MDefinition* MMinMax::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(lhs()->type() == type());
+ MOZ_ASSERT(rhs()->type() == type());
+
+ if (lhs() == rhs()) {
+ return lhs();
+ }
+
+ // Fold min/max operations with same inputs.
+ if (lhs()->isMinMax() || rhs()->isMinMax()) {
+ auto* other = lhs()->isMinMax() ? lhs()->toMinMax() : rhs()->toMinMax();
+ auto* operand = lhs()->isMinMax() ? rhs() : lhs();
+
+ if (operand == other->lhs() || operand == other->rhs()) {
+ if (isMax() == other->isMax()) {
+ // min(x, min(x, y)) = min(x, y)
+ // max(x, max(x, y)) = max(x, y)
+ return other;
+ }
+ if (!IsFloatingPointType(type())) {
+ // When neither value is NaN:
+ // max(x, min(x, y)) = x
+ // min(x, max(x, y)) = x
+
+ // Ensure that any bailouts that we depend on to guarantee that |y| is
+ // Int32 are not removed.
+ auto* otherOp = operand == other->lhs() ? other->rhs() : other->lhs();
+ otherOp->setGuardRangeBailoutsUnchecked();
+
+ return operand;
+ }
+ }
+ }
+
+ if (!lhs()->isConstant() && !rhs()->isConstant()) {
+ return this;
+ }
+
+ auto foldConstants = [&alloc](MDefinition* lhs, MDefinition* rhs,
+ bool isMax) -> MConstant* {
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->toConstant()->isTypeRepresentableAsDouble());
+ MOZ_ASSERT(rhs->toConstant()->isTypeRepresentableAsDouble());
+
+ double lnum = lhs->toConstant()->numberToDouble();
+ double rnum = rhs->toConstant()->numberToDouble();
+
+ double result;
+ if (isMax) {
+ result = js::math_max_impl(lnum, rnum);
+ } else {
+ result = js::math_min_impl(lnum, rnum);
+ }
+
+ // The folded MConstant should maintain the same MIRType with the original
+ // inputs.
+ if (lhs->type() == MIRType::Int32) {
+ int32_t cast;
+ if (mozilla::NumberEqualsInt32(result, &cast)) {
+ return MConstant::New(alloc, Int32Value(cast));
+ }
+ return nullptr;
+ }
+ if (lhs->type() == MIRType::Float32) {
+ return MConstant::NewFloat32(alloc, result);
+ }
+ MOZ_ASSERT(lhs->type() == MIRType::Double);
+ return MConstant::New(alloc, DoubleValue(result));
+ };
+
+ // Directly apply math utility to compare the rhs() and lhs() when
+ // they are both constants.
+ if (lhs()->isConstant() && rhs()->isConstant()) {
+ if (!lhs()->toConstant()->isTypeRepresentableAsDouble() ||
+ !rhs()->toConstant()->isTypeRepresentableAsDouble()) {
+ return this;
+ }
+
+ if (auto* folded = foldConstants(lhs(), rhs(), isMax())) {
+ return folded;
+ }
+ }
+
+ MDefinition* operand = lhs()->isConstant() ? rhs() : lhs();
+ MConstant* constant =
+ lhs()->isConstant() ? lhs()->toConstant() : rhs()->toConstant();
+
+ if (operand->isToDouble() &&
+ operand->getOperand(0)->type() == MIRType::Int32) {
+ // min(int32, cte >= INT32_MAX) = int32
+ if (!isMax() && constant->isTypeRepresentableAsDouble() &&
+ constant->numberToDouble() >= INT32_MAX) {
+ MLimitedTruncate* limit = MLimitedTruncate::New(
+ alloc, operand->getOperand(0), TruncateKind::NoTruncate);
+ block()->insertBefore(this, limit);
+ MToDouble* toDouble = MToDouble::New(alloc, limit);
+ return toDouble;
+ }
+
+ // max(int32, cte <= INT32_MIN) = int32
+ if (isMax() && constant->isTypeRepresentableAsDouble() &&
+ constant->numberToDouble() <= INT32_MIN) {
+ MLimitedTruncate* limit = MLimitedTruncate::New(
+ alloc, operand->getOperand(0), TruncateKind::NoTruncate);
+ block()->insertBefore(this, limit);
+ MToDouble* toDouble = MToDouble::New(alloc, limit);
+ return toDouble;
+ }
+ }
+
+ auto foldLength = [](MDefinition* operand, MConstant* constant,
+ bool isMax) -> MDefinition* {
+ if ((operand->isArrayLength() || operand->isArrayBufferViewLength() ||
+ operand->isArgumentsLength() || operand->isStringLength()) &&
+ constant->type() == MIRType::Int32) {
+ // (Array|ArrayBufferView|Arguments|String)Length is always >= 0.
+ // max(array.length, cte <= 0) = array.length
+ // min(array.length, cte <= 0) = cte
+ if (constant->toInt32() <= 0) {
+ return isMax ? operand : constant;
+ }
+ }
+ return nullptr;
+ };
+
+ if (auto* folded = foldLength(operand, constant, isMax())) {
+ return folded;
+ }
+
+ // Attempt to fold nested min/max operations which are produced by
+ // self-hosted built-in functions.
+ if (operand->isMinMax()) {
+ auto* other = operand->toMinMax();
+ MOZ_ASSERT(other->lhs()->type() == type());
+ MOZ_ASSERT(other->rhs()->type() == type());
+
+ MConstant* otherConstant = nullptr;
+ MDefinition* otherOperand = nullptr;
+ if (other->lhs()->isConstant()) {
+ otherConstant = other->lhs()->toConstant();
+ otherOperand = other->rhs();
+ } else if (other->rhs()->isConstant()) {
+ otherConstant = other->rhs()->toConstant();
+ otherOperand = other->lhs();
+ }
+
+ if (otherConstant && constant->isTypeRepresentableAsDouble() &&
+ otherConstant->isTypeRepresentableAsDouble()) {
+ if (isMax() == other->isMax()) {
+ // Fold min(x, min(y, z)) to min(min(x, y), z) with constant min(x, y).
+ // Fold max(x, max(y, z)) to max(max(x, y), z) with constant max(x, y).
+ if (auto* left = foldConstants(constant, otherConstant, isMax())) {
+ block()->insertBefore(this, left);
+ return MMinMax::New(alloc, left, otherOperand, type(), isMax());
+ }
+ } else {
+ // Fold min(x, max(y, z)) to max(min(x, y), min(x, z)).
+ // Fold max(x, min(y, z)) to min(max(x, y), max(x, z)).
+ //
+ // But only do this when min(x, z) can also be simplified.
+ if (auto* right = foldLength(otherOperand, constant, isMax())) {
+ if (auto* left = foldConstants(constant, otherConstant, isMax())) {
+ block()->insertBefore(this, left);
+ return MMinMax::New(alloc, left, right, type(), !isMax());
+ }
+ }
+ }
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MPow::foldsConstant(TempAllocator& alloc) {
+ // Both `x` and `p` in `x^p` must be constants in order to precompute.
+ if (!input()->isConstant() || !power()->isConstant()) {
+ return nullptr;
+ }
+ if (!power()->toConstant()->isTypeRepresentableAsDouble()) {
+ return nullptr;
+ }
+ if (!input()->toConstant()->isTypeRepresentableAsDouble()) {
+ return nullptr;
+ }
+
+ double x = input()->toConstant()->numberToDouble();
+ double p = power()->toConstant()->numberToDouble();
+ double result = js::ecmaPow(x, p);
+ if (type() == MIRType::Int32) {
+ int32_t cast;
+ if (!mozilla::NumberIsInt32(result, &cast)) {
+ // Reject folding if the result isn't an int32, because we'll bail anyway.
+ return nullptr;
+ }
+ return MConstant::New(alloc, Int32Value(cast));
+ }
+ return MConstant::New(alloc, DoubleValue(result));
+}
+
+MDefinition* MPow::foldsConstantPower(TempAllocator& alloc) {
+ // If `p` in `x^p` isn't constant, we can't apply these folds.
+ if (!power()->isConstant()) {
+ return nullptr;
+ }
+ if (!power()->toConstant()->isTypeRepresentableAsDouble()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(type() == MIRType::Double || type() == MIRType::Int32);
+
+ // NOTE: The optimizations must match the optimizations used in |js::ecmaPow|
+ // resp. |js::powi| to avoid differential testing issues.
+
+ double pow = power()->toConstant()->numberToDouble();
+
+ // Math.pow(x, 0.5) is a sqrt with edge-case detection.
+ if (pow == 0.5) {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return MPowHalf::New(alloc, input());
+ }
+
+ // Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5), even for edge cases.
+ if (pow == -0.5) {
+ MOZ_ASSERT(type() == MIRType::Double);
+ MPowHalf* half = MPowHalf::New(alloc, input());
+ block()->insertBefore(this, half);
+ MConstant* one = MConstant::New(alloc, DoubleValue(1.0));
+ block()->insertBefore(this, one);
+ return MDiv::New(alloc, one, half, MIRType::Double);
+ }
+
+ // Math.pow(x, 1) == x.
+ if (pow == 1.0) {
+ return input();
+ }
+
+ auto multiply = [this, &alloc](MDefinition* lhs, MDefinition* rhs) {
+ MMul* mul = MMul::New(alloc, lhs, rhs, type());
+ mul->setBailoutKind(bailoutKind());
+
+ // Multiplying the same number can't yield negative zero.
+ mul->setCanBeNegativeZero(lhs != rhs && canBeNegativeZero());
+ return mul;
+ };
+
+ // Math.pow(x, 2) == x*x.
+ if (pow == 2.0) {
+ return multiply(input(), input());
+ }
+
+ // Math.pow(x, 3) == x*x*x.
+ if (pow == 3.0) {
+ MMul* mul1 = multiply(input(), input());
+ block()->insertBefore(this, mul1);
+ return multiply(input(), mul1);
+ }
+
+ // Math.pow(x, 4) == y*y, where y = x*x.
+ if (pow == 4.0) {
+ MMul* y = multiply(input(), input());
+ block()->insertBefore(this, y);
+ return multiply(y, y);
+ }
+
+ // No optimization
+ return nullptr;
+}
+
+MDefinition* MPow::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsConstant(alloc)) {
+ return def;
+ }
+ if (MDefinition* def = foldsConstantPower(alloc)) {
+ return def;
+ }
+ return this;
+}
+
+MDefinition* MInt32ToIntPtr::foldsTo(TempAllocator& alloc) {
+ MDefinition* def = input();
+ if (def->isConstant()) {
+ int32_t i = def->toConstant()->toInt32();
+ return MConstant::NewIntPtr(alloc, intptr_t(i));
+ }
+
+ if (def->isNonNegativeIntPtrToInt32()) {
+ return def->toNonNegativeIntPtrToInt32()->input();
+ }
+
+ return this;
+}
+
+bool MAbs::fallible() const {
+ return !implicitTruncate_ && (!range() || !range()->hasInt32Bounds());
+}
+
+void MAbs::trySpecializeFloat32(TempAllocator& alloc) {
+ // Do not use Float32 if we can use int32.
+ if (input()->type() == MIRType::Int32) {
+ return;
+ }
+
+ if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
+ setResultType(MIRType::Float32);
+ }
+}
+
+MDefinition* MDiv::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(IsNumberType(type()));
+
+ if (type() == MIRType::Int64) {
+ if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
+ return folded;
+ }
+ return this;
+ }
+
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this)) {
+ return folded;
+ }
+
+ if (MDefinition* folded = EvaluateExactReciprocal(alloc, this)) {
+ return folded;
+ }
+
+ return this;
+}
+
+void MDiv::analyzeEdgeCasesForward() {
+ // This is only meaningful when doing integer division.
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ MOZ_ASSERT(lhs()->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs()->type() == MIRType::Int32);
+
+ // Try removing divide by zero check
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0)) {
+ canBeDivideByZero_ = false;
+ }
+
+ // If lhs is a constant int != INT32_MIN, then
+ // negative overflow check can be skipped.
+ if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(INT32_MIN)) {
+ canBeNegativeOverflow_ = false;
+ }
+
+ // If rhs is a constant int != -1, likewise.
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(-1)) {
+ canBeNegativeOverflow_ = false;
+ }
+
+ // If lhs is != 0, then negative zero check can be skipped.
+ if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(0)) {
+ setCanBeNegativeZero(false);
+ }
+
+ // If rhs is >= 0, likewise.
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
+ if (rhs()->toConstant()->toInt32() >= 0) {
+ setCanBeNegativeZero(false);
+ }
+ }
+}
+
+void MDiv::analyzeEdgeCasesBackward() {
+ if (canBeNegativeZero() && !NeedNegativeZeroCheck(this)) {
+ setCanBeNegativeZero(false);
+ }
+}
+
+bool MDiv::fallible() const { return !isTruncated(); }
+
+MDefinition* MMod::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(IsNumberType(type()));
+
+ if (type() == MIRType::Int64) {
+ if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
+ return folded;
+ }
+ } else {
+ if (MDefinition* folded = EvaluateConstantOperands(alloc, this)) {
+ return folded;
+ }
+ }
+ return this;
+}
+
+void MMod::analyzeEdgeCasesForward() {
+ // These optimizations make sense only for integer division
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0)) {
+ canBeDivideByZero_ = false;
+ }
+
+ if (rhs()->isConstant()) {
+ int32_t n = rhs()->toConstant()->toInt32();
+ if (n > 0 && !IsPowerOfTwo(uint32_t(n))) {
+ canBePowerOfTwoDivisor_ = false;
+ }
+ }
+}
+
+bool MMod::fallible() const {
+ return !isTruncated() &&
+ (isUnsigned() || canBeDivideByZero() || canBeNegativeDividend());
+}
+
+void MMathFunction::trySpecializeFloat32(TempAllocator& alloc) {
+ if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
+ setResultType(MIRType::Float32);
+ specialization_ = MIRType::Float32;
+ }
+}
+
+bool MMathFunction::isFloat32Commutative() const {
+ switch (function_) {
+ case UnaryMathFunction::Floor:
+ case UnaryMathFunction::Ceil:
+ case UnaryMathFunction::Round:
+ case UnaryMathFunction::Trunc:
+ return true;
+ default:
+ return false;
+ }
+}
+
+MHypot* MHypot::New(TempAllocator& alloc, const MDefinitionVector& vector) {
+ uint32_t length = vector.length();
+ MHypot* hypot = new (alloc) MHypot;
+ if (!hypot->init(alloc, length)) {
+ return nullptr;
+ }
+
+ for (uint32_t i = 0; i < length; ++i) {
+ hypot->initOperand(i, vector[i]);
+ }
+ return hypot;
+}
+
+bool MAdd::fallible() const {
+ // the add is fallible if range analysis does not say that it is finite, AND
+ // either the truncation analysis shows that there are non-truncated uses.
+ if (truncateKind() >= TruncateKind::IndirectTruncate) {
+ return false;
+ }
+ if (range() && range()->hasInt32Bounds()) {
+ return false;
+ }
+ return true;
+}
+
+bool MSub::fallible() const {
+ // see comment in MAdd::fallible()
+ if (truncateKind() >= TruncateKind::IndirectTruncate) {
+ return false;
+ }
+ if (range() && range()->hasInt32Bounds()) {
+ return false;
+ }
+ return true;
+}
+
+MDefinition* MSub::foldsTo(TempAllocator& alloc) {
+ MDefinition* out = MBinaryArithInstruction::foldsTo(alloc);
+ if (out != this) {
+ return out;
+ }
+
+ if (type() != MIRType::Int32) {
+ return this;
+ }
+
+ // Optimize X - X to 0. This optimization is only valid for Int32
+ // values. Subtracting a floating point value from itself returns
+ // NaN when the operand is either Infinity or NaN.
+ if (lhs() == rhs()) {
+ // Ensure that any bailouts that we depend on to guarantee that X
+ // is Int32 are not removed.
+ lhs()->setGuardRangeBailoutsUnchecked();
+ return MConstant::New(alloc, Int32Value(0));
+ }
+
+ return this;
+}
+
+MDefinition* MMul::foldsTo(TempAllocator& alloc) {
+ MDefinition* out = MBinaryArithInstruction::foldsTo(alloc);
+ if (out != this) {
+ return out;
+ }
+
+ if (type() != MIRType::Int32) {
+ return this;
+ }
+
+ if (lhs() == rhs()) {
+ setCanBeNegativeZero(false);
+ }
+
+ return this;
+}
+
+void MMul::analyzeEdgeCasesForward() {
+ // Try to remove the check for negative zero
+ // This only makes sense when using the integer multiplication
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ // If lhs is > 0, no need for negative zero check.
+ if (lhs()->isConstant() && lhs()->type() == MIRType::Int32) {
+ if (lhs()->toConstant()->toInt32() > 0) {
+ setCanBeNegativeZero(false);
+ }
+ }
+
+ // If rhs is > 0, likewise.
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
+ if (rhs()->toConstant()->toInt32() > 0) {
+ setCanBeNegativeZero(false);
+ }
+ }
+}
+
+void MMul::analyzeEdgeCasesBackward() {
+ if (canBeNegativeZero() && !NeedNegativeZeroCheck(this)) {
+ setCanBeNegativeZero(false);
+ }
+}
+
+bool MMul::canOverflow() const {
+ if (isTruncated()) {
+ return false;
+ }
+ return !range() || !range()->hasInt32Bounds();
+}
+
+bool MUrsh::fallible() const {
+ if (bailoutsDisabled()) {
+ return false;
+ }
+ return !range() || !range()->hasInt32Bounds();
+}
+
+MIRType MCompare::inputType() {
+ switch (compareType_) {
+ case Compare_Undefined:
+ return MIRType::Undefined;
+ case Compare_Null:
+ return MIRType::Null;
+ case Compare_UInt32:
+ case Compare_Int32:
+ return MIRType::Int32;
+ case Compare_UIntPtr:
+ return MIRType::IntPtr;
+ case Compare_Double:
+ return MIRType::Double;
+ case Compare_Float32:
+ return MIRType::Float32;
+ case Compare_String:
+ return MIRType::String;
+ case Compare_Symbol:
+ return MIRType::Symbol;
+ case Compare_Object:
+ return MIRType::Object;
+ case Compare_BigInt:
+ case Compare_BigInt_Int32:
+ case Compare_BigInt_Double:
+ case Compare_BigInt_String:
+ return MIRType::BigInt;
+ default:
+ MOZ_CRASH("No known conversion");
+ }
+}
+
+static inline bool MustBeUInt32(MDefinition* def, MDefinition** pwrapped) {
+ if (def->isUrsh()) {
+ *pwrapped = def->toUrsh()->lhs();
+ MDefinition* rhs = def->toUrsh()->rhs();
+ return def->toUrsh()->bailoutsDisabled() && rhs->maybeConstantValue() &&
+ rhs->maybeConstantValue()->isInt32(0);
+ }
+
+ if (MConstant* defConst = def->maybeConstantValue()) {
+ *pwrapped = defConst;
+ return defConst->type() == MIRType::Int32 && defConst->toInt32() >= 0;
+ }
+
+ *pwrapped = nullptr; // silence GCC warning
+ return false;
+}
+
+/* static */
+bool MBinaryInstruction::unsignedOperands(MDefinition* left,
+ MDefinition* right) {
+ MDefinition* replace;
+ if (!MustBeUInt32(left, &replace)) {
+ return false;
+ }
+ if (replace->type() != MIRType::Int32) {
+ return false;
+ }
+ if (!MustBeUInt32(right, &replace)) {
+ return false;
+ }
+ if (replace->type() != MIRType::Int32) {
+ return false;
+ }
+ return true;
+}
+
+bool MBinaryInstruction::unsignedOperands() {
+ return unsignedOperands(getOperand(0), getOperand(1));
+}
+
+void MBinaryInstruction::replaceWithUnsignedOperands() {
+ MOZ_ASSERT(unsignedOperands());
+
+ for (size_t i = 0; i < numOperands(); i++) {
+ MDefinition* replace;
+ MustBeUInt32(getOperand(i), &replace);
+ if (replace == getOperand(i)) {
+ continue;
+ }
+
+ getOperand(i)->setImplicitlyUsedUnchecked();
+ replaceOperand(i, replace);
+ }
+}
+
+MDefinition* MBitNot::foldsTo(TempAllocator& alloc) {
+ if (type() == MIRType::Int64) {
+ return this;
+ }
+ MOZ_ASSERT(type() == MIRType::Int32);
+
+ MDefinition* input = getOperand(0);
+
+ if (input->isConstant()) {
+ js::Value v = Int32Value(~(input->toConstant()->toInt32()));
+ return MConstant::New(alloc, v);
+ }
+
+ if (input->isBitNot()) {
+ MOZ_ASSERT(input->toBitNot()->type() == MIRType::Int32);
+ MOZ_ASSERT(input->toBitNot()->getOperand(0)->type() == MIRType::Int32);
+ return MTruncateToInt32::New(alloc,
+ input->toBitNot()->input()); // ~~x => x | 0
+ }
+
+ return this;
+}
+
+static void AssertKnownClass(TempAllocator& alloc, MInstruction* ins,
+ MDefinition* obj) {
+#ifdef DEBUG
+ const JSClass* clasp = GetObjectKnownJSClass(obj);
+ MOZ_ASSERT(clasp);
+
+ auto* assert = MAssertClass::New(alloc, obj, clasp);
+ ins->block()->insertBefore(ins, assert);
+#endif
+}
+
+MDefinition* MBoxNonStrictThis::foldsTo(TempAllocator& alloc) {
+ MDefinition* in = input();
+ if (in->isBox()) {
+ in = in->toBox()->input();
+ }
+
+ if (in->type() == MIRType::Object) {
+ return in;
+ }
+
+ return this;
+}
+
+AliasSet MLoadArgumentsObjectArg::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Any);
+}
+
+AliasSet MLoadArgumentsObjectArgHole::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Any);
+}
+
+AliasSet MInArgumentsObjectArg::getAliasSet() const {
+ // Loads |arguments.length|, but not the actual element, so we can use the
+ // same alias-set as MArgumentsObjectLength.
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+AliasSet MArgumentsObjectLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+bool MGuardArgumentsObjectFlags::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardArgumentsObjectFlags() ||
+ ins->toGuardArgumentsObjectFlags()->flags() != flags()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardArgumentsObjectFlags::getAliasSet() const {
+ // The flags are packed with the length in a fixed private slot.
+ return AliasSet::Load(AliasSet::FixedSlot);
+}
+
+MDefinition* MReturnFromCtor::foldsTo(TempAllocator& alloc) {
+ MDefinition* rval = value();
+ if (rval->isBox()) {
+ rval = rval->toBox()->input();
+ }
+
+ if (rval->type() == MIRType::Object) {
+ return rval;
+ }
+
+ if (rval->type() != MIRType::Value) {
+ return object();
+ }
+
+ return this;
+}
+
+MDefinition* MTypeOf::foldsTo(TempAllocator& alloc) {
+ MDefinition* unboxed = input();
+ if (unboxed->isBox()) {
+ unboxed = unboxed->toBox()->input();
+ }
+
+ JSType type;
+ switch (unboxed->type()) {
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Int32:
+ type = JSTYPE_NUMBER;
+ break;
+ case MIRType::String:
+ type = JSTYPE_STRING;
+ break;
+ case MIRType::Symbol:
+ type = JSTYPE_SYMBOL;
+ break;
+ case MIRType::BigInt:
+ type = JSTYPE_BIGINT;
+ break;
+ case MIRType::Null:
+ type = JSTYPE_OBJECT;
+ break;
+ case MIRType::Undefined:
+ type = JSTYPE_UNDEFINED;
+ break;
+ case MIRType::Boolean:
+ type = JSTYPE_BOOLEAN;
+ break;
+ case MIRType::Object: {
+ KnownClass known = GetObjectKnownClass(unboxed);
+ if (known != KnownClass::None) {
+ if (known == KnownClass::Function) {
+ type = JSTYPE_FUNCTION;
+ } else {
+ type = JSTYPE_OBJECT;
+ }
+
+ AssertKnownClass(alloc, this, unboxed);
+ break;
+ }
+ [[fallthrough]];
+ }
+ default:
+ return this;
+ }
+
+ return MConstant::New(alloc, Int32Value(static_cast<int32_t>(type)));
+}
+
+MDefinition* MTypeOfName::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(input()->type() == MIRType::Int32);
+
+ if (!input()->isConstant()) {
+ return this;
+ }
+
+ static_assert(JSTYPE_UNDEFINED == 0);
+
+ int32_t type = input()->toConstant()->toInt32();
+ MOZ_ASSERT(JSTYPE_UNDEFINED <= type && type < JSTYPE_LIMIT);
+
+ JSString* name =
+ TypeName(static_cast<JSType>(type), GetJitContext()->runtime->names());
+ return MConstant::New(alloc, StringValue(name));
+}
+
+MUrsh* MUrsh::NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type) {
+ MUrsh* ins = new (alloc) MUrsh(left, right, type);
+
+ // Since Ion has no UInt32 type, we use Int32 and we have a special
+ // exception to the type rules: we can return values in
+ // (INT32_MIN,UINT32_MAX] and still claim that we have an Int32 type
+ // without bailing out. This is necessary because Ion has no UInt32
+ // type and we can't have bailouts in wasm code.
+ ins->bailoutsDisabled_ = true;
+
+ return ins;
+}
+
+MResumePoint* MResumePoint::New(TempAllocator& alloc, MBasicBlock* block,
+ jsbytecode* pc, ResumeMode mode) {
+ MResumePoint* resume = new (alloc) MResumePoint(block, pc, mode);
+ if (!resume->init(alloc)) {
+ block->discardPreAllocatedResumePoint(resume);
+ return nullptr;
+ }
+ resume->inherit(block);
+ return resume;
+}
+
+MResumePoint::MResumePoint(MBasicBlock* block, jsbytecode* pc, ResumeMode mode)
+ : MNode(block, Kind::ResumePoint),
+ pc_(pc),
+ instruction_(nullptr),
+ mode_(mode) {
+ block->addResumePoint(this);
+}
+
+bool MResumePoint::init(TempAllocator& alloc) {
+ return operands_.init(alloc, block()->stackDepth());
+}
+
+MResumePoint* MResumePoint::caller() const {
+ return block()->callerResumePoint();
+}
+
+void MResumePoint::inherit(MBasicBlock* block) {
+ // FixedList doesn't initialize its elements, so do unchecked inits.
+ for (size_t i = 0; i < stackDepth(); i++) {
+ initOperand(i, block->getSlot(i));
+ }
+}
+
+void MResumePoint::addStore(TempAllocator& alloc, MDefinition* store,
+ const MResumePoint* cache) {
+ MOZ_ASSERT(block()->outerResumePoint() != this);
+ MOZ_ASSERT_IF(cache, !cache->stores_.empty());
+
+ if (cache && cache->stores_.begin()->operand == store) {
+ // If the last resume point had the same side-effect stack, then we can
+ // reuse the current side effect without cloning it. This is a simple
+ // way to share common context by making a spaghetti stack.
+ if (++cache->stores_.begin() == stores_.begin()) {
+ stores_.copy(cache->stores_);
+ return;
+ }
+ }
+
+ // Ensure that the store would not be deleted by DCE.
+ MOZ_ASSERT(store->isEffectful());
+
+ MStoreToRecover* top = new (alloc) MStoreToRecover(store);
+ stores_.push(top);
+}
+
+#ifdef JS_JITSPEW
+void MResumePoint::dump(GenericPrinter& out) const {
+ out.printf("resumepoint mode=");
+
+ switch (mode()) {
+ case ResumeMode::ResumeAt:
+ if (instruction_) {
+ out.printf("ResumeAt(%u)", instruction_->id());
+ } else {
+ out.printf("ResumeAt");
+ }
+ break;
+ default:
+ out.put(ResumeModeToString(mode()));
+ break;
+ }
+
+ if (MResumePoint* c = caller()) {
+ out.printf(" (caller in block%u)", c->block()->id());
+ }
+
+ for (size_t i = 0; i < numOperands(); i++) {
+ out.printf(" ");
+ if (operands_[i].hasProducer()) {
+ getOperand(i)->printName(out);
+ } else {
+ out.printf("(null)");
+ }
+ }
+ out.printf("\n");
+}
+
+void MResumePoint::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+#endif
+
+bool MResumePoint::isObservableOperand(MUse* u) const {
+ return isObservableOperand(indexOf(u));
+}
+
+bool MResumePoint::isObservableOperand(size_t index) const {
+ return block()->info().isObservableSlot(index);
+}
+
+bool MResumePoint::isRecoverableOperand(MUse* u) const {
+ return block()->info().isRecoverableOperand(indexOf(u));
+}
+
+MDefinition* MTruncateBigIntToInt64::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+
+ if (input->isBox()) {
+ input = input->getOperand(0);
+ }
+
+ // If the operand converts an I64 to BigInt, drop both conversions.
+ if (input->isInt64ToBigInt()) {
+ return input->getOperand(0);
+ }
+
+ // Fold this operation if the input operand is constant.
+ if (input->isConstant()) {
+ return MConstant::NewInt64(
+ alloc, BigInt::toInt64(input->toConstant()->toBigInt()));
+ }
+
+ return this;
+}
+
+MDefinition* MToInt64::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+
+ if (input->isBox()) {
+ input = input->getOperand(0);
+ }
+
+ // Unwrap MInt64ToBigInt: MToInt64(MInt64ToBigInt(int64)) = int64.
+ if (input->isInt64ToBigInt()) {
+ return input->getOperand(0);
+ }
+
+ // When the input is an Int64 already, just return it.
+ if (input->type() == MIRType::Int64) {
+ return input;
+ }
+
+ // Fold this operation if the input operand is constant.
+ if (input->isConstant()) {
+ switch (input->type()) {
+ case MIRType::Boolean:
+ return MConstant::NewInt64(alloc, input->toConstant()->toBoolean());
+ default:
+ break;
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MToNumberInt32::foldsTo(TempAllocator& alloc) {
+ // Fold this operation if the input operand is constant.
+ if (MConstant* cst = input()->maybeConstantValue()) {
+ switch (cst->type()) {
+ case MIRType::Null:
+ if (conversion() == IntConversionInputKind::Any) {
+ return MConstant::New(alloc, Int32Value(0));
+ }
+ break;
+ case MIRType::Boolean:
+ if (conversion() == IntConversionInputKind::Any ||
+ conversion() == IntConversionInputKind::NumbersOrBoolsOnly) {
+ return MConstant::New(alloc, Int32Value(cst->toBoolean()));
+ }
+ break;
+ case MIRType::Int32:
+ return MConstant::New(alloc, Int32Value(cst->toInt32()));
+ case MIRType::Float32:
+ case MIRType::Double:
+ int32_t ival;
+ // Only the value within the range of Int32 can be substituted as
+ // constant.
+ if (mozilla::NumberIsInt32(cst->numberToDouble(), &ival)) {
+ return MConstant::New(alloc, Int32Value(ival));
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ MDefinition* input = getOperand(0);
+ if (input->isBox()) {
+ input = input->toBox()->input();
+ }
+
+ // Do not fold the TruncateToInt32 node when the input is uint32 (e.g. ursh
+ // with a zero constant. Consider the test jit-test/tests/ion/bug1247880.js,
+ // where the relevant code is: |(imul(1, x >>> 0) % 2)|. The imul operator
+ // is folded to a MTruncateToInt32 node, which will result in this MIR:
+ // MMod(MTruncateToInt32(MUrsh(x, MConstant(0))), MConstant(2)). Note that
+ // the MUrsh node's type is int32 (since uint32 is not implemented), and
+ // that would fold the MTruncateToInt32 node. This will make the modulo
+ // unsigned, while is should have been signed.
+ if (input->type() == MIRType::Int32 && !IsUint32Type(input)) {
+ return input;
+ }
+
+ return this;
+}
+
+MDefinition* MBooleanToInt32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ MOZ_ASSERT(input->type() == MIRType::Boolean);
+
+ if (input->isConstant()) {
+ return MConstant::New(alloc, Int32Value(input->toConstant()->toBoolean()));
+ }
+
+ return this;
+}
+
+void MToNumberInt32::analyzeEdgeCasesBackward() {
+ if (!NeedNegativeZeroCheck(this)) {
+ setNeedsNegativeZeroCheck(false);
+ }
+}
+
+MDefinition* MTruncateToInt32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (input->isBox()) {
+ input = input->getOperand(0);
+ }
+
+ // Do not fold the TruncateToInt32 node when the input is uint32 (e.g. ursh
+ // with a zero constant. Consider the test jit-test/tests/ion/bug1247880.js,
+ // where the relevant code is: |(imul(1, x >>> 0) % 2)|. The imul operator
+ // is folded to a MTruncateToInt32 node, which will result in this MIR:
+ // MMod(MTruncateToInt32(MUrsh(x, MConstant(0))), MConstant(2)). Note that
+ // the MUrsh node's type is int32 (since uint32 is not implemented), and
+ // that would fold the MTruncateToInt32 node. This will make the modulo
+ // unsigned, while is should have been signed.
+ if (input->type() == MIRType::Int32 && !IsUint32Type(input)) {
+ return input;
+ }
+
+ if (input->type() == MIRType::Double && input->isConstant()) {
+ int32_t ret = ToInt32(input->toConstant()->toDouble());
+ return MConstant::New(alloc, Int32Value(ret));
+ }
+
+ return this;
+}
+
+MDefinition* MWasmTruncateToInt32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (input->type() == MIRType::Int32) {
+ return input;
+ }
+
+ if (input->type() == MIRType::Double && input->isConstant()) {
+ double d = input->toConstant()->toDouble();
+ if (std::isnan(d)) {
+ return this;
+ }
+
+ if (!isUnsigned() && d <= double(INT32_MAX) && d >= double(INT32_MIN)) {
+ return MConstant::New(alloc, Int32Value(ToInt32(d)));
+ }
+
+ if (isUnsigned() && d <= double(UINT32_MAX) && d >= 0) {
+ return MConstant::New(alloc, Int32Value(ToInt32(d)));
+ }
+ }
+
+ if (input->type() == MIRType::Float32 && input->isConstant()) {
+ double f = double(input->toConstant()->toFloat32());
+ if (std::isnan(f)) {
+ return this;
+ }
+
+ if (!isUnsigned() && f <= double(INT32_MAX) && f >= double(INT32_MIN)) {
+ return MConstant::New(alloc, Int32Value(ToInt32(f)));
+ }
+
+ if (isUnsigned() && f <= double(UINT32_MAX) && f >= 0) {
+ return MConstant::New(alloc, Int32Value(ToInt32(f)));
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MWrapInt64ToInt32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ uint64_t c = input->toConstant()->toInt64();
+ int32_t output = bottomHalf() ? int32_t(c) : int32_t(c >> 32);
+ return MConstant::New(alloc, Int32Value(output));
+ }
+
+ return this;
+}
+
+MDefinition* MExtendInt32ToInt64::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ int32_t c = input->toConstant()->toInt32();
+ int64_t res = isUnsigned() ? int64_t(uint32_t(c)) : int64_t(c);
+ return MConstant::NewInt64(alloc, res);
+ }
+
+ return this;
+}
+
+MDefinition* MSignExtendInt32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ int32_t c = input->toConstant()->toInt32();
+ int32_t res;
+ switch (mode_) {
+ case Byte:
+ res = int32_t(int8_t(c & 0xFF));
+ break;
+ case Half:
+ res = int32_t(int16_t(c & 0xFFFF));
+ break;
+ }
+ return MConstant::New(alloc, Int32Value(res));
+ }
+
+ return this;
+}
+
+MDefinition* MSignExtendInt64::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ int64_t c = input->toConstant()->toInt64();
+ int64_t res;
+ switch (mode_) {
+ case Byte:
+ res = int64_t(int8_t(c & 0xFF));
+ break;
+ case Half:
+ res = int64_t(int16_t(c & 0xFFFF));
+ break;
+ case Word:
+ res = int64_t(int32_t(c & 0xFFFFFFFFU));
+ break;
+ }
+ return MConstant::NewInt64(alloc, res);
+ }
+
+ return this;
+}
+
+MDefinition* MToDouble::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (input->isBox()) {
+ input = input->getOperand(0);
+ }
+
+ if (input->type() == MIRType::Double) {
+ return input;
+ }
+
+ if (input->isConstant() &&
+ input->toConstant()->isTypeRepresentableAsDouble()) {
+ return MConstant::New(alloc,
+ DoubleValue(input->toConstant()->numberToDouble()));
+ }
+
+ return this;
+}
+
+MDefinition* MToFloat32::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = getOperand(0);
+ if (input->isBox()) {
+ input = input->getOperand(0);
+ }
+
+ if (input->type() == MIRType::Float32) {
+ return input;
+ }
+
+ // If x is a Float32, Float32(Double(x)) == x
+ if (!mustPreserveNaN_ && input->isToDouble() &&
+ input->toToDouble()->input()->type() == MIRType::Float32) {
+ return input->toToDouble()->input();
+ }
+
+ if (input->isConstant() &&
+ input->toConstant()->isTypeRepresentableAsDouble()) {
+ return MConstant::NewFloat32(alloc,
+ float(input->toConstant()->numberToDouble()));
+ }
+
+ // Fold ToFloat32(ToDouble(int32)) to ToFloat32(int32).
+ if (input->isToDouble() &&
+ input->toToDouble()->input()->type() == MIRType::Int32) {
+ return MToFloat32::New(alloc, input->toToDouble()->input());
+ }
+
+ return this;
+}
+
+MDefinition* MToString::foldsTo(TempAllocator& alloc) {
+ MDefinition* in = input();
+ if (in->isBox()) {
+ in = in->getOperand(0);
+ }
+
+ if (in->type() == MIRType::String) {
+ return in;
+ }
+ return this;
+}
+
+MDefinition* MClampToUint8::foldsTo(TempAllocator& alloc) {
+ if (MConstant* inputConst = input()->maybeConstantValue()) {
+ if (inputConst->isTypeRepresentableAsDouble()) {
+ int32_t clamped = ClampDoubleToUint8(inputConst->numberToDouble());
+ return MConstant::New(alloc, Int32Value(clamped));
+ }
+ }
+ return this;
+}
+
+bool MCompare::tryFoldEqualOperands(bool* result) {
+ if (lhs() != rhs()) {
+ return false;
+ }
+
+ // Intuitively somebody would think that if lhs === rhs,
+ // then we can just return true. (Or false for !==)
+ // However NaN !== NaN is true! So we spend some time trying
+ // to eliminate this case.
+
+ if (!IsStrictEqualityOp(jsop())) {
+ return false;
+ }
+
+ MOZ_ASSERT(
+ compareType_ == Compare_Undefined || compareType_ == Compare_Null ||
+ compareType_ == Compare_Int32 || compareType_ == Compare_UInt32 ||
+ compareType_ == Compare_UInt64 || compareType_ == Compare_Double ||
+ compareType_ == Compare_Float32 || compareType_ == Compare_UIntPtr ||
+ compareType_ == Compare_String || compareType_ == Compare_Object ||
+ compareType_ == Compare_Symbol || compareType_ == Compare_BigInt ||
+ compareType_ == Compare_BigInt_Int32 ||
+ compareType_ == Compare_BigInt_Double ||
+ compareType_ == Compare_BigInt_String);
+
+ if (isDoubleComparison() || isFloat32Comparison()) {
+ if (!operandsAreNeverNaN()) {
+ return false;
+ }
+ }
+
+ lhs()->setGuardRangeBailoutsUnchecked();
+
+ *result = (jsop() == JSOp::StrictEq);
+ return true;
+}
+
+static JSType TypeOfName(JSLinearString* str) {
+ static constexpr std::array types = {
+ JSTYPE_UNDEFINED, JSTYPE_OBJECT, JSTYPE_FUNCTION, JSTYPE_STRING,
+ JSTYPE_NUMBER, JSTYPE_BOOLEAN, JSTYPE_SYMBOL, JSTYPE_BIGINT,
+#ifdef ENABLE_RECORD_TUPLE
+ JSTYPE_RECORD, JSTYPE_TUPLE,
+#endif
+ };
+ static_assert(types.size() == JSTYPE_LIMIT);
+
+ const JSAtomState& names = GetJitContext()->runtime->names();
+ for (auto type : types) {
+ if (EqualStrings(str, TypeName(type, names))) {
+ return type;
+ }
+ }
+ return JSTYPE_LIMIT;
+}
+
+static mozilla::Maybe<std::pair<MTypeOfName*, JSType>> IsTypeOfCompare(
+ MCompare* ins) {
+ if (!IsEqualityOp(ins->jsop())) {
+ return mozilla::Nothing();
+ }
+ if (ins->compareType() != MCompare::Compare_String) {
+ return mozilla::Nothing();
+ }
+
+ auto* lhs = ins->lhs();
+ auto* rhs = ins->rhs();
+
+ MOZ_ASSERT(ins->type() == MIRType::Boolean);
+ MOZ_ASSERT(lhs->type() == MIRType::String);
+ MOZ_ASSERT(rhs->type() == MIRType::String);
+
+ if (!lhs->isTypeOfName() && !rhs->isTypeOfName()) {
+ return mozilla::Nothing();
+ }
+ if (!lhs->isConstant() && !rhs->isConstant()) {
+ return mozilla::Nothing();
+ }
+
+ auto* typeOfName =
+ lhs->isTypeOfName() ? lhs->toTypeOfName() : rhs->toTypeOfName();
+ MOZ_ASSERT(typeOfName->input()->isTypeOf());
+
+ auto* constant = lhs->isConstant() ? lhs->toConstant() : rhs->toConstant();
+
+ JSType type = TypeOfName(&constant->toString()->asLinear());
+ return mozilla::Some(std::pair(typeOfName, type));
+}
+
+bool MCompare::tryFoldTypeOf(bool* result) {
+ auto typeOfPair = IsTypeOfCompare(this);
+ if (!typeOfPair) {
+ return false;
+ }
+ auto [typeOfName, type] = *typeOfPair;
+ auto* typeOf = typeOfName->input()->toTypeOf();
+
+ switch (type) {
+ case JSTYPE_BOOLEAN:
+ if (!typeOf->input()->mightBeType(MIRType::Boolean)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_NUMBER:
+ if (!typeOf->input()->mightBeType(MIRType::Int32) &&
+ !typeOf->input()->mightBeType(MIRType::Float32) &&
+ !typeOf->input()->mightBeType(MIRType::Double)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_STRING:
+ if (!typeOf->input()->mightBeType(MIRType::String)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_SYMBOL:
+ if (!typeOf->input()->mightBeType(MIRType::Symbol)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_BIGINT:
+ if (!typeOf->input()->mightBeType(MIRType::BigInt)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_OBJECT:
+ if (!typeOf->input()->mightBeType(MIRType::Object) &&
+ !typeOf->input()->mightBeType(MIRType::Null)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_UNDEFINED:
+ if (!typeOf->input()->mightBeType(MIRType::Object) &&
+ !typeOf->input()->mightBeType(MIRType::Undefined)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_FUNCTION:
+ if (!typeOf->input()->mightBeType(MIRType::Object)) {
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+ }
+ break;
+ case JSTYPE_LIMIT:
+ *result = (jsop() == JSOp::StrictNe || jsop() == JSOp::Ne);
+ return true;
+#ifdef ENABLE_RECORD_TUPLE
+ case JSTYPE_RECORD:
+ case JSTYPE_TUPLE:
+ MOZ_CRASH("Records and Tuples are not supported yet.");
+#endif
+ }
+
+ return false;
+}
+
+bool MCompare::tryFold(bool* result) {
+ JSOp op = jsop();
+
+ if (tryFoldEqualOperands(result)) {
+ return true;
+ }
+
+ if (tryFoldTypeOf(result)) {
+ return true;
+ }
+
+ if (compareType_ == Compare_Null || compareType_ == Compare_Undefined) {
+ // The LHS is the value we want to test against null or undefined.
+ if (IsStrictEqualityOp(op)) {
+ if (lhs()->type() == inputType()) {
+ *result = (op == JSOp::StrictEq);
+ return true;
+ }
+ if (!lhs()->mightBeType(inputType())) {
+ *result = (op == JSOp::StrictNe);
+ return true;
+ }
+ } else {
+ MOZ_ASSERT(IsLooseEqualityOp(op));
+ if (IsNullOrUndefined(lhs()->type())) {
+ *result = (op == JSOp::Eq);
+ return true;
+ }
+ if (!lhs()->mightBeType(MIRType::Null) &&
+ !lhs()->mightBeType(MIRType::Undefined) &&
+ !lhs()->mightBeType(MIRType::Object)) {
+ *result = (op == JSOp::Ne);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ return false;
+}
+
+template <typename T>
+static bool FoldComparison(JSOp op, T left, T right) {
+ switch (op) {
+ case JSOp::Lt:
+ return left < right;
+ case JSOp::Le:
+ return left <= right;
+ case JSOp::Gt:
+ return left > right;
+ case JSOp::Ge:
+ return left >= right;
+ case JSOp::StrictEq:
+ case JSOp::Eq:
+ return left == right;
+ case JSOp::StrictNe:
+ case JSOp::Ne:
+ return left != right;
+ default:
+ MOZ_CRASH("Unexpected op.");
+ }
+}
+
+bool MCompare::evaluateConstantOperands(TempAllocator& alloc, bool* result) {
+ if (type() != MIRType::Boolean && type() != MIRType::Int32) {
+ return false;
+ }
+
+ MDefinition* left = getOperand(0);
+ MDefinition* right = getOperand(1);
+
+ if (compareType() == Compare_Double) {
+ // Optimize "MCompare MConstant (MToDouble SomethingInInt32Range).
+ // In most cases the MToDouble was added, because the constant is
+ // a double.
+ // e.g. v < 9007199254740991, where v is an int32 is always true.
+ if (!lhs()->isConstant() && !rhs()->isConstant()) {
+ return false;
+ }
+
+ MDefinition* operand = left->isConstant() ? right : left;
+ MConstant* constant =
+ left->isConstant() ? left->toConstant() : right->toConstant();
+ MOZ_ASSERT(constant->type() == MIRType::Double);
+ double cte = constant->toDouble();
+
+ if (operand->isToDouble() &&
+ operand->getOperand(0)->type() == MIRType::Int32) {
+ bool replaced = false;
+ switch (jsop_) {
+ case JSOp::Lt:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = !((constant == lhs()) ^ (cte < INT32_MIN));
+ replaced = true;
+ }
+ break;
+ case JSOp::Le:
+ if (constant == lhs()) {
+ if (cte > INT32_MAX || cte <= INT32_MIN) {
+ *result = (cte <= INT32_MIN);
+ replaced = true;
+ }
+ } else {
+ if (cte >= INT32_MAX || cte < INT32_MIN) {
+ *result = (cte >= INT32_MIN);
+ replaced = true;
+ }
+ }
+ break;
+ case JSOp::Gt:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = !((constant == rhs()) ^ (cte < INT32_MIN));
+ replaced = true;
+ }
+ break;
+ case JSOp::Ge:
+ if (constant == lhs()) {
+ if (cte >= INT32_MAX || cte < INT32_MIN) {
+ *result = (cte >= INT32_MAX);
+ replaced = true;
+ }
+ } else {
+ if (cte > INT32_MAX || cte <= INT32_MIN) {
+ *result = (cte <= INT32_MIN);
+ replaced = true;
+ }
+ }
+ break;
+ case JSOp::StrictEq: // Fall through.
+ case JSOp::Eq:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = false;
+ replaced = true;
+ }
+ break;
+ case JSOp::StrictNe: // Fall through.
+ case JSOp::Ne:
+ if (cte > INT32_MAX || cte < INT32_MIN) {
+ *result = true;
+ replaced = true;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected op.");
+ }
+ if (replaced) {
+ MLimitedTruncate* limit = MLimitedTruncate::New(
+ alloc, operand->getOperand(0), TruncateKind::NoTruncate);
+ limit->setGuardUnchecked();
+ block()->insertBefore(this, limit);
+ return true;
+ }
+ }
+
+ // Optimize comparison against NaN.
+ if (std::isnan(cte)) {
+ switch (jsop_) {
+ case JSOp::Lt:
+ case JSOp::Le:
+ case JSOp::Gt:
+ case JSOp::Ge:
+ case JSOp::Eq:
+ case JSOp::StrictEq:
+ *result = false;
+ break;
+ case JSOp::Ne:
+ case JSOp::StrictNe:
+ *result = true;
+ break;
+ default:
+ MOZ_CRASH("Unexpected op.");
+ }
+ return true;
+ }
+ }
+
+ if (!left->isConstant() || !right->isConstant()) {
+ return false;
+ }
+
+ MConstant* lhs = left->toConstant();
+ MConstant* rhs = right->toConstant();
+
+ // Fold away some String equality comparisons.
+ if (lhs->type() == MIRType::String && rhs->type() == MIRType::String) {
+ int32_t comp = 0; // Default to equal.
+ if (left != right) {
+ comp = CompareStrings(&lhs->toString()->asLinear(),
+ &rhs->toString()->asLinear());
+ }
+ *result = FoldComparison(jsop_, comp, 0);
+ return true;
+ }
+
+ if (compareType_ == Compare_UInt32) {
+ *result = FoldComparison(jsop_, uint32_t(lhs->toInt32()),
+ uint32_t(rhs->toInt32()));
+ return true;
+ }
+
+ if (compareType_ == Compare_Int64) {
+ *result = FoldComparison(jsop_, lhs->toInt64(), rhs->toInt64());
+ return true;
+ }
+
+ if (compareType_ == Compare_UInt64) {
+ *result = FoldComparison(jsop_, uint64_t(lhs->toInt64()),
+ uint64_t(rhs->toInt64()));
+ return true;
+ }
+
+ if (lhs->isTypeRepresentableAsDouble() &&
+ rhs->isTypeRepresentableAsDouble()) {
+ *result =
+ FoldComparison(jsop_, lhs->numberToDouble(), rhs->numberToDouble());
+ return true;
+ }
+
+ return false;
+}
+
+MDefinition* MCompare::tryFoldTypeOf(TempAllocator& alloc) {
+ auto typeOfPair = IsTypeOfCompare(this);
+ if (!typeOfPair) {
+ return this;
+ }
+ auto [typeOfName, type] = *typeOfPair;
+ auto* typeOf = typeOfName->input()->toTypeOf();
+
+ auto* input = typeOf->input();
+ MOZ_ASSERT(input->type() == MIRType::Value ||
+ input->type() == MIRType::Object);
+
+ // Constant typeof folding handles the other cases.
+ MOZ_ASSERT_IF(input->type() == MIRType::Object, type == JSTYPE_UNDEFINED ||
+ type == JSTYPE_OBJECT ||
+ type == JSTYPE_FUNCTION);
+
+ MOZ_ASSERT(type != JSTYPE_LIMIT, "unknown typeof strings folded earlier");
+
+ // If there's only a single use, assume this |typeof| is used in a simple
+ // comparison context.
+ //
+ // if (typeof thing === "number") { ... }
+ //
+ // It'll be compiled into something similar to:
+ //
+ // if (IsNumber(thing)) { ... }
+ //
+ // This heuristic can go wrong when repeated |typeof| are used in consecutive
+ // if-statements.
+ //
+ // if (typeof thing === "number") { ... }
+ // else if (typeof thing === "string") { ... }
+ // ... repeated for all possible types
+ //
+ // In that case it'd more efficient to emit MTypeOf compared to MTypeOfIs. We
+ // don't yet handle that case, because it'd require a separate optimization
+ // pass to correctly detect it.
+ if (typeOfName->hasOneUse()) {
+ return MTypeOfIs::New(alloc, input, jsop(), type);
+ }
+
+ MConstant* cst = MConstant::New(alloc, Int32Value(type));
+ block()->insertBefore(this, cst);
+
+ return MCompare::New(alloc, typeOf, cst, jsop(), MCompare::Compare_Int32);
+}
+
+MDefinition* MCompare::tryFoldCharCompare(TempAllocator& alloc) {
+ if (compareType() != Compare_String) {
+ return this;
+ }
+
+ MDefinition* left = lhs();
+ MOZ_ASSERT(left->type() == MIRType::String);
+
+ MDefinition* right = rhs();
+ MOZ_ASSERT(right->type() == MIRType::String);
+
+ // |str[i]| is compiled as |MFromCharCode(MCharCodeAt(str, i))|.
+ auto isCharAccess = [](MDefinition* ins) {
+ return ins->isFromCharCode() &&
+ ins->toFromCharCode()->input()->isCharCodeAt();
+ };
+
+ if (left->isConstant() || right->isConstant()) {
+ // Try to optimize |MConstant(string) <compare> (MFromCharCode MCharCodeAt)|
+ // as |MConstant(charcode) <compare> MCharCodeAt|.
+ MConstant* constant;
+ MDefinition* operand;
+ if (left->isConstant()) {
+ constant = left->toConstant();
+ operand = right;
+ } else {
+ constant = right->toConstant();
+ operand = left;
+ }
+
+ if (constant->toString()->length() != 1 || !isCharAccess(operand)) {
+ return this;
+ }
+
+ char16_t charCode = constant->toString()->asLinear().latin1OrTwoByteChar(0);
+ MConstant* charCodeConst = MConstant::New(alloc, Int32Value(charCode));
+ block()->insertBefore(this, charCodeConst);
+
+ MDefinition* charCodeAt = operand->toFromCharCode()->input();
+
+ if (left->isConstant()) {
+ left = charCodeConst;
+ right = charCodeAt;
+ } else {
+ left = charCodeAt;
+ right = charCodeConst;
+ }
+ } else if (isCharAccess(left) && isCharAccess(right)) {
+ // Try to optimize |(MFromCharCode MCharCodeAt) <compare> (MFromCharCode
+ // MCharCodeAt)| as |MCharCodeAt <compare> MCharCodeAt|.
+
+ left = left->toFromCharCode()->input();
+ right = right->toFromCharCode()->input();
+ } else {
+ return this;
+ }
+
+ return MCompare::New(alloc, left, right, jsop(), MCompare::Compare_Int32);
+}
+
+MDefinition* MCompare::tryFoldStringCompare(TempAllocator& alloc) {
+ if (compareType() != Compare_String) {
+ return this;
+ }
+
+ MDefinition* left = lhs();
+ MOZ_ASSERT(left->type() == MIRType::String);
+
+ MDefinition* right = rhs();
+ MOZ_ASSERT(right->type() == MIRType::String);
+
+ if (!left->isConstant() && !right->isConstant()) {
+ return this;
+ }
+
+ // Try to optimize |string <compare> MConstant("")| as |MStringLength(string)
+ // <compare> MConstant(0)|.
+
+ MConstant* constant =
+ left->isConstant() ? left->toConstant() : right->toConstant();
+ if (!constant->toString()->empty()) {
+ return this;
+ }
+
+ MDefinition* operand = left->isConstant() ? right : left;
+
+ auto* strLength = MStringLength::New(alloc, operand);
+ block()->insertBefore(this, strLength);
+
+ auto* zero = MConstant::New(alloc, Int32Value(0));
+ block()->insertBefore(this, zero);
+
+ if (left->isConstant()) {
+ left = zero;
+ right = strLength;
+ } else {
+ left = strLength;
+ right = zero;
+ }
+
+ return MCompare::New(alloc, left, right, jsop(), MCompare::Compare_Int32);
+}
+
+MDefinition* MCompare::tryFoldStringSubstring(TempAllocator& alloc) {
+ if (compareType() != Compare_String) {
+ return this;
+ }
+ if (!IsEqualityOp(jsop())) {
+ return this;
+ }
+
+ auto* left = lhs();
+ MOZ_ASSERT(left->type() == MIRType::String);
+
+ auto* right = rhs();
+ MOZ_ASSERT(right->type() == MIRType::String);
+
+ // One operand must be a constant string.
+ if (!left->isConstant() && !right->isConstant()) {
+ return this;
+ }
+
+ // The constant string must be non-empty.
+ auto* constant =
+ left->isConstant() ? left->toConstant() : right->toConstant();
+ if (constant->toString()->empty()) {
+ return this;
+ }
+
+ // The other operand must be a substring operation.
+ auto* operand = left->isConstant() ? right : left;
+ if (!operand->isSubstr()) {
+ return this;
+ }
+
+ // We want to match this pattern:
+ // Substr(string, Constant(0), Min(Constant(length), StringLength(string)))
+ auto* substr = operand->toSubstr();
+
+ auto isConstantZero = [](auto* def) {
+ return def->isConstant() && def->toConstant()->isInt32(0);
+ };
+
+ if (!isConstantZero(substr->begin())) {
+ return this;
+ }
+
+ auto* length = substr->length();
+ if (length->isBitOr()) {
+ // Unnecessary bit-ops haven't yet been removed.
+ auto* bitOr = length->toBitOr();
+ if (isConstantZero(bitOr->lhs())) {
+ length = bitOr->rhs();
+ } else if (isConstantZero(bitOr->rhs())) {
+ length = bitOr->lhs();
+ }
+ }
+ if (!length->isMinMax() || length->toMinMax()->isMax()) {
+ return this;
+ }
+
+ auto* min = length->toMinMax();
+ if (!min->lhs()->isConstant() && !min->rhs()->isConstant()) {
+ return this;
+ }
+
+ auto* minConstant = min->lhs()->isConstant() ? min->lhs()->toConstant()
+ : min->rhs()->toConstant();
+
+ auto* minOperand = min->lhs()->isConstant() ? min->rhs() : min->lhs();
+ if (!minOperand->isStringLength() ||
+ minOperand->toStringLength()->string() != substr->string()) {
+ return this;
+ }
+
+ static_assert(JSString::MAX_LENGTH < INT32_MAX,
+ "string length can be casted to int32_t");
+
+ // Ensure the string length matches the substring's length.
+ if (!minConstant->isInt32(int32_t(constant->toString()->length()))) {
+ return this;
+ }
+
+ // Now fold code like |str.substring(0, 2) == "aa"| to |str.startsWith("aa")|.
+
+ auto* startsWith = MStringStartsWith::New(alloc, substr->string(), constant);
+ if (jsop() == JSOp::Eq || jsop() == JSOp::StrictEq) {
+ return startsWith;
+ }
+
+ // Invert for inequality.
+ MOZ_ASSERT(jsop() == JSOp::Ne || jsop() == JSOp::StrictNe);
+
+ block()->insertBefore(this, startsWith);
+ return MNot::New(alloc, startsWith);
+}
+
+MDefinition* MCompare::tryFoldStringIndexOf(TempAllocator& alloc) {
+ if (compareType() != Compare_Int32) {
+ return this;
+ }
+ if (!IsEqualityOp(jsop())) {
+ return this;
+ }
+
+ auto* left = lhs();
+ MOZ_ASSERT(left->type() == MIRType::Int32);
+
+ auto* right = rhs();
+ MOZ_ASSERT(right->type() == MIRType::Int32);
+
+ // One operand must be a constant integer.
+ if (!left->isConstant() && !right->isConstant()) {
+ return this;
+ }
+
+ // The constant must be zero.
+ auto* constant =
+ left->isConstant() ? left->toConstant() : right->toConstant();
+ if (!constant->isInt32(0)) {
+ return this;
+ }
+
+ // The other operand must be an indexOf operation.
+ auto* operand = left->isConstant() ? right : left;
+ if (!operand->isStringIndexOf()) {
+ return this;
+ }
+
+ // Fold |str.indexOf(searchStr) == 0| to |str.startsWith(searchStr)|.
+
+ auto* indexOf = operand->toStringIndexOf();
+ auto* startsWith =
+ MStringStartsWith::New(alloc, indexOf->string(), indexOf->searchString());
+ if (jsop() == JSOp::Eq || jsop() == JSOp::StrictEq) {
+ return startsWith;
+ }
+
+ // Invert for inequality.
+ MOZ_ASSERT(jsop() == JSOp::Ne || jsop() == JSOp::StrictNe);
+
+ block()->insertBefore(this, startsWith);
+ return MNot::New(alloc, startsWith);
+}
+
+MDefinition* MCompare::foldsTo(TempAllocator& alloc) {
+ bool result;
+
+ if (tryFold(&result) || evaluateConstantOperands(alloc, &result)) {
+ if (type() == MIRType::Int32) {
+ return MConstant::New(alloc, Int32Value(result));
+ }
+
+ MOZ_ASSERT(type() == MIRType::Boolean);
+ return MConstant::New(alloc, BooleanValue(result));
+ }
+
+ if (MDefinition* folded = tryFoldTypeOf(alloc); folded != this) {
+ return folded;
+ }
+
+ if (MDefinition* folded = tryFoldCharCompare(alloc); folded != this) {
+ return folded;
+ }
+
+ if (MDefinition* folded = tryFoldStringCompare(alloc); folded != this) {
+ return folded;
+ }
+
+ if (MDefinition* folded = tryFoldStringSubstring(alloc); folded != this) {
+ return folded;
+ }
+
+ if (MDefinition* folded = tryFoldStringIndexOf(alloc); folded != this) {
+ return folded;
+ }
+
+ return this;
+}
+
+void MCompare::trySpecializeFloat32(TempAllocator& alloc) {
+ if (AllOperandsCanProduceFloat32(this) && compareType_ == Compare_Double) {
+ compareType_ = Compare_Float32;
+ } else {
+ ConvertOperandsToDouble(this, alloc);
+ }
+}
+
+MDefinition* MNot::foldsTo(TempAllocator& alloc) {
+ // Fold if the input is constant
+ if (MConstant* inputConst = input()->maybeConstantValue()) {
+ bool b;
+ if (inputConst->valueToBoolean(&b)) {
+ if (type() == MIRType::Int32 || type() == MIRType::Int64) {
+ return MConstant::New(alloc, Int32Value(!b));
+ }
+ return MConstant::New(alloc, BooleanValue(!b));
+ }
+ }
+
+ // If the operand of the Not is itself a Not, they cancel out. But we can't
+ // always convert Not(Not(x)) to x because that may loose the conversion to
+ // boolean. We can simplify Not(Not(Not(x))) to Not(x) though.
+ MDefinition* op = getOperand(0);
+ if (op->isNot()) {
+ MDefinition* opop = op->getOperand(0);
+ if (opop->isNot()) {
+ return opop;
+ }
+ }
+
+ // Not of an undefined or null value is always true
+ if (input()->type() == MIRType::Undefined ||
+ input()->type() == MIRType::Null) {
+ return MConstant::New(alloc, BooleanValue(true));
+ }
+
+ // Not of a symbol is always false.
+ if (input()->type() == MIRType::Symbol) {
+ return MConstant::New(alloc, BooleanValue(false));
+ }
+
+ return this;
+}
+
+void MNot::trySpecializeFloat32(TempAllocator& alloc) {
+ (void)EnsureFloatInputOrConvert(this, alloc);
+}
+
+#ifdef JS_JITSPEW
+void MBeta::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+
+ out.printf(" ");
+ comparison_->dump(out);
+}
+#endif
+
+AliasSet MCreateThis::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Any);
+}
+
+bool MGetArgumentsObjectArg::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGetArgumentsObjectArg()) {
+ return false;
+ }
+ if (ins->toGetArgumentsObjectArg()->argno() != argno()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGetArgumentsObjectArg::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Any);
+}
+
+AliasSet MSetArgumentsObjectArg::getAliasSet() const {
+ return AliasSet::Store(AliasSet::Any);
+}
+
+MObjectState::MObjectState(MObjectState* state)
+ : MVariadicInstruction(classOpcode),
+ numSlots_(state->numSlots_),
+ numFixedSlots_(state->numFixedSlots_) {
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+}
+
+MObjectState::MObjectState(JSObject* templateObject)
+ : MObjectState(templateObject->as<NativeObject>().shape()) {}
+
+MObjectState::MObjectState(const Shape* shape)
+ : MVariadicInstruction(classOpcode) {
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+
+ numSlots_ = shape->asShared().slotSpan();
+ numFixedSlots_ = shape->asShared().numFixedSlots();
+}
+
+/* static */
+JSObject* MObjectState::templateObjectOf(MDefinition* obj) {
+ // MNewPlainObject uses a shape constant, not an object.
+ MOZ_ASSERT(!obj->isNewPlainObject());
+
+ if (obj->isNewObject()) {
+ return obj->toNewObject()->templateObject();
+ } else if (obj->isNewCallObject()) {
+ return obj->toNewCallObject()->templateObject();
+ } else if (obj->isNewIterator()) {
+ return obj->toNewIterator()->templateObject();
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+bool MObjectState::init(TempAllocator& alloc, MDefinition* obj) {
+ if (!MVariadicInstruction::init(alloc, numSlots() + 1)) {
+ return false;
+ }
+ // +1, for the Object.
+ initOperand(0, obj);
+ return true;
+}
+
+void MObjectState::initFromTemplateObject(TempAllocator& alloc,
+ MDefinition* undefinedVal) {
+ if (object()->isNewPlainObject()) {
+ MOZ_ASSERT(object()->toNewPlainObject()->shape()->asShared().slotSpan() ==
+ numSlots());
+ for (size_t i = 0; i < numSlots(); i++) {
+ initSlot(i, undefinedVal);
+ }
+ return;
+ }
+
+ JSObject* templateObject = templateObjectOf(object());
+
+ // Initialize all the slots of the object state with the value contained in
+ // the template object. This is needed to account values which are baked in
+ // the template objects and not visible in IonMonkey, such as the
+ // uninitialized-lexical magic value of call objects.
+
+ MOZ_ASSERT(templateObject->is<NativeObject>());
+ NativeObject& nativeObject = templateObject->as<NativeObject>();
+ MOZ_ASSERT(nativeObject.slotSpan() == numSlots());
+
+ for (size_t i = 0; i < numSlots(); i++) {
+ Value val = nativeObject.getSlot(i);
+ MDefinition* def = undefinedVal;
+ if (!val.isUndefined()) {
+ MConstant* ins = MConstant::New(alloc, val);
+ block()->insertBefore(this, ins);
+ def = ins;
+ }
+ initSlot(i, def);
+ }
+}
+
+MObjectState* MObjectState::New(TempAllocator& alloc, MDefinition* obj) {
+ MObjectState* res;
+ if (obj->isNewPlainObject()) {
+ const Shape* shape = obj->toNewPlainObject()->shape();
+ res = new (alloc) MObjectState(shape);
+ } else {
+ JSObject* templateObject = templateObjectOf(obj);
+ MOZ_ASSERT(templateObject, "Unexpected object creation.");
+ res = new (alloc) MObjectState(templateObject);
+ }
+
+ if (!res || !res->init(alloc, obj)) {
+ return nullptr;
+ }
+ return res;
+}
+
+MObjectState* MObjectState::Copy(TempAllocator& alloc, MObjectState* state) {
+ MObjectState* res = new (alloc) MObjectState(state);
+ if (!res || !res->init(alloc, state->object())) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < res->numSlots(); i++) {
+ res->initSlot(i, state->getSlot(i));
+ }
+ return res;
+}
+
+MArrayState::MArrayState(MDefinition* arr) : MVariadicInstruction(classOpcode) {
+ // This instruction is only used as a summary for bailout paths.
+ setResultType(MIRType::Object);
+ setRecoveredOnBailout();
+ if (arr->isNewArrayObject()) {
+ numElements_ = arr->toNewArrayObject()->length();
+ } else {
+ numElements_ = arr->toNewArray()->length();
+ }
+}
+
+bool MArrayState::init(TempAllocator& alloc, MDefinition* obj,
+ MDefinition* len) {
+ if (!MVariadicInstruction::init(alloc, numElements() + 2)) {
+ return false;
+ }
+ // +1, for the Array object.
+ initOperand(0, obj);
+ // +1, for the length value of the array.
+ initOperand(1, len);
+ return true;
+}
+
+void MArrayState::initFromTemplateObject(TempAllocator& alloc,
+ MDefinition* undefinedVal) {
+ for (size_t i = 0; i < numElements(); i++) {
+ initElement(i, undefinedVal);
+ }
+}
+
+MArrayState* MArrayState::New(TempAllocator& alloc, MDefinition* arr,
+ MDefinition* initLength) {
+ MArrayState* res = new (alloc) MArrayState(arr);
+ if (!res || !res->init(alloc, arr, initLength)) {
+ return nullptr;
+ }
+ return res;
+}
+
+MArrayState* MArrayState::Copy(TempAllocator& alloc, MArrayState* state) {
+ MDefinition* arr = state->array();
+ MDefinition* len = state->initializedLength();
+ MArrayState* res = new (alloc) MArrayState(arr);
+ if (!res || !res->init(alloc, arr, len)) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < res->numElements(); i++) {
+ res->initElement(i, state->getElement(i));
+ }
+ return res;
+}
+
+MNewArray::MNewArray(uint32_t length, MConstant* templateConst,
+ gc::Heap initialHeap, bool vmCall)
+ : MUnaryInstruction(classOpcode, templateConst),
+ length_(length),
+ initialHeap_(initialHeap),
+ vmCall_(vmCall) {
+ setResultType(MIRType::Object);
+}
+
+MDefinition::AliasType MLoadFixedSlot::mightAlias(
+ const MDefinition* def) const {
+ if (def->isStoreFixedSlot()) {
+ const MStoreFixedSlot* store = def->toStoreFixedSlot();
+ if (store->slot() != slot()) {
+ return AliasType::NoAlias;
+ }
+ if (store->object() != object()) {
+ return AliasType::MayAlias;
+ }
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition* MLoadFixedSlot::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsToStore(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+MDefinition::AliasType MLoadFixedSlotAndUnbox::mightAlias(
+ const MDefinition* def) const {
+ if (def->isStoreFixedSlot()) {
+ const MStoreFixedSlot* store = def->toStoreFixedSlot();
+ if (store->slot() != slot()) {
+ return AliasType::NoAlias;
+ }
+ if (store->object() != object()) {
+ return AliasType::MayAlias;
+ }
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition* MLoadFixedSlotAndUnbox::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsToStore(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+MDefinition* MWasmExtendU32Index::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ return MConstant::NewInt64(
+ alloc, int64_t(uint32_t(input->toConstant()->toInt32())));
+ }
+
+ return this;
+}
+
+MDefinition* MWasmWrapU32Index::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+ if (input->isConstant()) {
+ return MConstant::New(
+ alloc, Int32Value(int32_t(uint32_t(input->toConstant()->toInt64()))));
+ }
+
+ return this;
+}
+
+// Some helpers for folding wasm and/or/xor on int32/64 values. Rather than
+// duplicating these for 32 and 64-bit values, all folding is done on 64-bit
+// values and masked for the 32-bit case.
+
+const uint64_t Low32Mask = uint64_t(0xFFFFFFFFULL);
+
+// Routines to check and disassemble values.
+
+static bool IsIntegralConstant(const MDefinition* def) {
+ return def->isConstant() &&
+ (def->type() == MIRType::Int32 || def->type() == MIRType::Int64);
+}
+
+static uint64_t GetIntegralConstant(const MDefinition* def) {
+ if (def->type() == MIRType::Int32) {
+ return uint64_t(def->toConstant()->toInt32()) & Low32Mask;
+ }
+ return uint64_t(def->toConstant()->toInt64());
+}
+
+static bool IsIntegralConstantZero(const MDefinition* def) {
+ return IsIntegralConstant(def) && GetIntegralConstant(def) == 0;
+}
+
+static bool IsIntegralConstantOnes(const MDefinition* def) {
+ uint64_t ones = def->type() == MIRType::Int32 ? Low32Mask : ~uint64_t(0);
+ return IsIntegralConstant(def) && GetIntegralConstant(def) == ones;
+}
+
+// Routines to create values.
+static MDefinition* ToIntegralConstant(TempAllocator& alloc, MIRType ty,
+ uint64_t val) {
+ switch (ty) {
+ case MIRType::Int32:
+ return MConstant::New(alloc,
+ Int32Value(int32_t(uint32_t(val & Low32Mask))));
+ case MIRType::Int64:
+ return MConstant::NewInt64(alloc, int64_t(val));
+ default:
+ MOZ_CRASH();
+ }
+}
+
+static MDefinition* ZeroOfType(TempAllocator& alloc, MIRType ty) {
+ return ToIntegralConstant(alloc, ty, 0);
+}
+
+static MDefinition* OnesOfType(TempAllocator& alloc, MIRType ty) {
+ return ToIntegralConstant(alloc, ty, ~uint64_t(0));
+}
+
+MDefinition* MWasmBinaryBitwise::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(op() == Opcode::WasmBinaryBitwise);
+ MOZ_ASSERT(type() == MIRType::Int32 || type() == MIRType::Int64);
+
+ MDefinition* argL = getOperand(0);
+ MDefinition* argR = getOperand(1);
+ MOZ_ASSERT(argL->type() == type() && argR->type() == type());
+
+ // The args are the same (SSA name)
+ if (argL == argR) {
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ case SubOpcode::Or:
+ return argL;
+ case SubOpcode::Xor:
+ return ZeroOfType(alloc, type());
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Both args constant
+ if (IsIntegralConstant(argL) && IsIntegralConstant(argR)) {
+ uint64_t valL = GetIntegralConstant(argL);
+ uint64_t valR = GetIntegralConstant(argR);
+ uint64_t val = valL;
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ val &= valR;
+ break;
+ case SubOpcode::Or:
+ val |= valR;
+ break;
+ case SubOpcode::Xor:
+ val ^= valR;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return ToIntegralConstant(alloc, type(), val);
+ }
+
+ // Left arg is zero
+ if (IsIntegralConstantZero(argL)) {
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ return ZeroOfType(alloc, type());
+ case SubOpcode::Or:
+ case SubOpcode::Xor:
+ return argR;
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Right arg is zero
+ if (IsIntegralConstantZero(argR)) {
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ return ZeroOfType(alloc, type());
+ case SubOpcode::Or:
+ case SubOpcode::Xor:
+ return argL;
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Left arg is ones
+ if (IsIntegralConstantOnes(argL)) {
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ return argR;
+ case SubOpcode::Or:
+ return OnesOfType(alloc, type());
+ case SubOpcode::Xor:
+ return MBitNot::New(alloc, argR);
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Right arg is ones
+ if (IsIntegralConstantOnes(argR)) {
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ return argL;
+ case SubOpcode::Or:
+ return OnesOfType(alloc, type());
+ case SubOpcode::Xor:
+ return MBitNot::New(alloc, argL);
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MWasmAddOffset::foldsTo(TempAllocator& alloc) {
+ MDefinition* baseArg = base();
+ if (!baseArg->isConstant()) {
+ return this;
+ }
+
+ if (baseArg->type() == MIRType::Int32) {
+ CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32();
+ ptr += offset();
+ if (!ptr.isValid()) {
+ return this;
+ }
+ return MConstant::New(alloc, Int32Value(ptr.value()));
+ }
+
+ MOZ_ASSERT(baseArg->type() == MIRType::Int64);
+ CheckedInt<uint64_t> ptr = baseArg->toConstant()->toInt64();
+ ptr += offset();
+ if (!ptr.isValid()) {
+ return this;
+ }
+ return MConstant::NewInt64(alloc, ptr.value());
+}
+
+bool MWasmAlignmentCheck::congruentTo(const MDefinition* ins) const {
+ if (!ins->isWasmAlignmentCheck()) {
+ return false;
+ }
+ const MWasmAlignmentCheck* check = ins->toWasmAlignmentCheck();
+ return byteSize_ == check->byteSize() && congruentIfOperandsEqual(check);
+}
+
+MDefinition::AliasType MAsmJSLoadHeap::mightAlias(
+ const MDefinition* def) const {
+ if (def->isAsmJSStoreHeap()) {
+ const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap();
+ if (store->accessType() != accessType()) {
+ return AliasType::MayAlias;
+ }
+ if (!base()->isConstant() || !store->base()->isConstant()) {
+ return AliasType::MayAlias;
+ }
+ const MConstant* otherBase = store->base()->toConstant();
+ if (base()->toConstant()->equals(otherBase)) {
+ return AliasType::MayAlias;
+ }
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+bool MAsmJSLoadHeap::congruentTo(const MDefinition* ins) const {
+ if (!ins->isAsmJSLoadHeap()) {
+ return false;
+ }
+ const MAsmJSLoadHeap* load = ins->toAsmJSLoadHeap();
+ return load->accessType() == accessType() && congruentIfOperandsEqual(load);
+}
+
+MDefinition::AliasType MWasmLoadInstanceDataField::mightAlias(
+ const MDefinition* def) const {
+ if (def->isWasmStoreInstanceDataField()) {
+ const MWasmStoreInstanceDataField* store =
+ def->toWasmStoreInstanceDataField();
+ return store->instanceDataOffset() == instanceDataOffset_
+ ? AliasType::MayAlias
+ : AliasType::NoAlias;
+ }
+
+ return AliasType::MayAlias;
+}
+
+MDefinition::AliasType MWasmLoadGlobalCell::mightAlias(
+ const MDefinition* def) const {
+ if (def->isWasmStoreGlobalCell()) {
+ // No globals of different type can alias. See bug 1467415 comment 3.
+ if (type() != def->toWasmStoreGlobalCell()->value()->type()) {
+ return AliasType::NoAlias;
+ }
+
+ // We could do better here. We're dealing with two indirect globals.
+ // If at at least one of them is created in this module, then they
+ // can't alias -- in other words they can only alias if they are both
+ // imported. That would require having a flag on globals to indicate
+ // which are imported. See bug 1467415 comment 3, 4th rule.
+ }
+
+ return AliasType::MayAlias;
+}
+
+HashNumber MWasmLoadInstanceDataField::valueHash() const {
+ // Same comment as in MWasmLoadInstanceDataField::congruentTo() applies here.
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, instanceDataOffset_);
+ return hash;
+}
+
+bool MWasmLoadInstanceDataField::congruentTo(const MDefinition* ins) const {
+ if (!ins->isWasmLoadInstanceDataField()) {
+ return false;
+ }
+
+ const MWasmLoadInstanceDataField* other = ins->toWasmLoadInstanceDataField();
+
+ // We don't need to consider the isConstant_ markings here, because
+ // equivalence of offsets implies equivalence of constness.
+ bool sameOffsets = instanceDataOffset_ == other->instanceDataOffset_;
+ MOZ_ASSERT_IF(sameOffsets, isConstant_ == other->isConstant_);
+
+ // We omit checking congruence of the operands. There is only one
+ // operand, the instance pointer, and it only ever has one value within the
+ // domain of optimization. If that should ever change then operand
+ // congruence checking should be reinstated.
+ return sameOffsets /* && congruentIfOperandsEqual(other) */;
+}
+
+MDefinition* MWasmLoadInstanceDataField::foldsTo(TempAllocator& alloc) {
+ if (!dependency() || !dependency()->isWasmStoreInstanceDataField()) {
+ return this;
+ }
+
+ MWasmStoreInstanceDataField* store =
+ dependency()->toWasmStoreInstanceDataField();
+ if (!store->block()->dominates(block())) {
+ return this;
+ }
+
+ if (store->instanceDataOffset() != instanceDataOffset()) {
+ return this;
+ }
+
+ if (store->value()->type() != type()) {
+ return this;
+ }
+
+ return store->value();
+}
+
+bool MWasmLoadGlobalCell::congruentTo(const MDefinition* ins) const {
+ if (!ins->isWasmLoadGlobalCell()) {
+ return false;
+ }
+ const MWasmLoadGlobalCell* other = ins->toWasmLoadGlobalCell();
+ return congruentIfOperandsEqual(other);
+}
+
+#ifdef ENABLE_WASM_SIMD
+MDefinition* MWasmTernarySimd128::foldsTo(TempAllocator& alloc) {
+ if (simdOp() == wasm::SimdOp::V128Bitselect) {
+ if (v2()->op() == MDefinition::Opcode::WasmFloatConstant) {
+ int8_t shuffle[16];
+ if (specializeBitselectConstantMaskAsShuffle(shuffle)) {
+ return BuildWasmShuffleSimd128(alloc, shuffle, v0(), v1());
+ }
+ } else if (canRelaxBitselect()) {
+ return MWasmTernarySimd128::New(alloc, v0(), v1(), v2(),
+ wasm::SimdOp::I8x16RelaxedLaneSelect);
+ }
+ }
+ return this;
+}
+
+inline static bool MatchSpecificShift(MDefinition* instr,
+ wasm::SimdOp simdShiftOp,
+ int shiftValue) {
+ return instr->isWasmShiftSimd128() &&
+ instr->toWasmShiftSimd128()->simdOp() == simdShiftOp &&
+ instr->toWasmShiftSimd128()->rhs()->isConstant() &&
+ instr->toWasmShiftSimd128()->rhs()->toConstant()->toInt32() ==
+ shiftValue;
+}
+
+// Matches MIR subtree that represents PMADDUBSW instruction generated by
+// emscripten. The a and b parameters return subtrees that correspond
+// operands of the instruction, if match is found.
+static bool MatchPmaddubswSequence(MWasmBinarySimd128* lhs,
+ MWasmBinarySimd128* rhs, MDefinition** a,
+ MDefinition** b) {
+ MOZ_ASSERT(lhs->simdOp() == wasm::SimdOp::I16x8Mul &&
+ rhs->simdOp() == wasm::SimdOp::I16x8Mul);
+ // The emscripten/LLVM produced the following sequence for _mm_maddubs_epi16:
+ //
+ // return _mm_adds_epi16(
+ // _mm_mullo_epi16(
+ // _mm_and_si128(__a, _mm_set1_epi16(0x00FF)),
+ // _mm_srai_epi16(_mm_slli_epi16(__b, 8), 8)),
+ // _mm_mullo_epi16(_mm_srli_epi16(__a, 8), _mm_srai_epi16(__b, 8)));
+ //
+ // This will roughly correspond the following MIR:
+ // MWasmBinarySimd128[I16x8AddSatS]
+ // |-- lhs: MWasmBinarySimd128[I16x8Mul] (lhs)
+ // | |-- lhs: MWasmBinarySimd128WithConstant[V128And] (op0)
+ // | | |-- lhs: a
+ // | | -- rhs: SimdConstant::SplatX8(0x00FF)
+ // | -- rhs: MWasmShiftSimd128[I16x8ShrS] (op1)
+ // | |-- lhs: MWasmShiftSimd128[I16x8Shl]
+ // | | |-- lhs: b
+ // | | -- rhs: MConstant[8]
+ // | -- rhs: MConstant[8]
+ // -- rhs: MWasmBinarySimd128[I16x8Mul] (rhs)
+ // |-- lhs: MWasmShiftSimd128[I16x8ShrU] (op2)
+ // | |-- lhs: a
+ // | |-- rhs: MConstant[8]
+ // -- rhs: MWasmShiftSimd128[I16x8ShrS] (op3)
+ // |-- lhs: b
+ // -- rhs: MConstant[8]
+
+ // The I16x8AddSatS and I16x8Mul are commutative, so their operands
+ // may be swapped. Rearrange op0, op1, op2, op3 to be in the order
+ // noted above.
+ MDefinition *op0 = lhs->lhs(), *op1 = lhs->rhs(), *op2 = rhs->lhs(),
+ *op3 = rhs->rhs();
+ if (op1->isWasmBinarySimd128WithConstant()) {
+ // Move MWasmBinarySimd128WithConstant[V128And] as first operand in lhs.
+ std::swap(op0, op1);
+ } else if (op3->isWasmBinarySimd128WithConstant()) {
+ // Move MWasmBinarySimd128WithConstant[V128And] as first operand in rhs.
+ std::swap(op2, op3);
+ }
+ if (op2->isWasmBinarySimd128WithConstant()) {
+ // The lhs and rhs are swapped.
+ // Make MWasmBinarySimd128WithConstant[V128And] to be op0.
+ std::swap(op0, op2);
+ std::swap(op1, op3);
+ }
+ if (op2->isWasmShiftSimd128() &&
+ op2->toWasmShiftSimd128()->simdOp() == wasm::SimdOp::I16x8ShrS) {
+ // The op2 and op3 appears to be in wrong order, swap.
+ std::swap(op2, op3);
+ }
+
+ // Check all instructions SIMD code and constant values for assigned
+ // names op0, op1, op2, op3 (see diagram above).
+ const uint16_t const00FF[8] = {255, 255, 255, 255, 255, 255, 255, 255};
+ if (!op0->isWasmBinarySimd128WithConstant() ||
+ op0->toWasmBinarySimd128WithConstant()->simdOp() !=
+ wasm::SimdOp::V128And ||
+ memcmp(op0->toWasmBinarySimd128WithConstant()->rhs().bytes(), const00FF,
+ 16) != 0 ||
+ !MatchSpecificShift(op1, wasm::SimdOp::I16x8ShrS, 8) ||
+ !MatchSpecificShift(op2, wasm::SimdOp::I16x8ShrU, 8) ||
+ !MatchSpecificShift(op3, wasm::SimdOp::I16x8ShrS, 8) ||
+ !MatchSpecificShift(op1->toWasmShiftSimd128()->lhs(),
+ wasm::SimdOp::I16x8Shl, 8)) {
+ return false;
+ }
+
+ // Check if the instructions arguments that are subtrees match the
+ // a and b assignments. May depend on GVN behavior.
+ MDefinition* maybeA = op0->toWasmBinarySimd128WithConstant()->lhs();
+ MDefinition* maybeB = op3->toWasmShiftSimd128()->lhs();
+ if (maybeA != op2->toWasmShiftSimd128()->lhs() ||
+ maybeB != op1->toWasmShiftSimd128()->lhs()->toWasmShiftSimd128()->lhs()) {
+ return false;
+ }
+
+ *a = maybeA;
+ *b = maybeB;
+ return true;
+}
+
+MDefinition* MWasmBinarySimd128::foldsTo(TempAllocator& alloc) {
+ if (simdOp() == wasm::SimdOp::I8x16Swizzle && rhs()->isWasmFloatConstant()) {
+ // Specialize swizzle(v, constant) as shuffle(mask, v, zero) to trigger all
+ // our shuffle optimizations. We don't report this rewriting as the report
+ // will be overwritten by the subsequent shuffle analysis.
+ int8_t shuffleMask[16];
+ memcpy(shuffleMask, rhs()->toWasmFloatConstant()->toSimd128().bytes(), 16);
+ for (int i = 0; i < 16; i++) {
+ // Out-of-bounds lanes reference the zero vector; in many cases, the zero
+ // vector is removed by subsequent optimizations.
+ if (shuffleMask[i] < 0 || shuffleMask[i] > 15) {
+ shuffleMask[i] = 16;
+ }
+ }
+ MWasmFloatConstant* zero =
+ MWasmFloatConstant::NewSimd128(alloc, SimdConstant::SplatX4(0));
+ if (!zero) {
+ return nullptr;
+ }
+ block()->insertBefore(this, zero);
+ return BuildWasmShuffleSimd128(alloc, shuffleMask, lhs(), zero);
+ }
+
+ // Specialize var OP const / const OP var when possible.
+ //
+ // As the LIR layer can't directly handle v128 constants as part of its normal
+ // machinery we specialize some nodes here if they have single-use v128
+ // constant arguments. The purpose is to generate code that inlines the
+ // constant in the instruction stream, using either a rip-relative load+op or
+ // quickly-synthesized constant in a scratch on x64. There is a general
+ // assumption here that that is better than generating the constant into an
+ // allocatable register, since that register value could not be reused. (This
+ // ignores the possibility that the constant load could be hoisted).
+
+ if (lhs()->isWasmFloatConstant() != rhs()->isWasmFloatConstant() &&
+ specializeForConstantRhs()) {
+ if (isCommutative() && lhs()->isWasmFloatConstant() && lhs()->hasOneUse()) {
+ return MWasmBinarySimd128WithConstant::New(
+ alloc, rhs(), lhs()->toWasmFloatConstant()->toSimd128(), simdOp());
+ }
+
+ if (rhs()->isWasmFloatConstant() && rhs()->hasOneUse()) {
+ return MWasmBinarySimd128WithConstant::New(
+ alloc, lhs(), rhs()->toWasmFloatConstant()->toSimd128(), simdOp());
+ }
+ }
+
+ // Check special encoding for PMADDUBSW.
+ if (canPmaddubsw() && simdOp() == wasm::SimdOp::I16x8AddSatS &&
+ lhs()->isWasmBinarySimd128() && rhs()->isWasmBinarySimd128() &&
+ lhs()->toWasmBinarySimd128()->simdOp() == wasm::SimdOp::I16x8Mul &&
+ rhs()->toWasmBinarySimd128()->simdOp() == wasm::SimdOp::I16x8Mul) {
+ MDefinition *a, *b;
+ if (MatchPmaddubswSequence(lhs()->toWasmBinarySimd128(),
+ rhs()->toWasmBinarySimd128(), &a, &b)) {
+ return MWasmBinarySimd128::New(alloc, a, b, /* commutative = */ false,
+ wasm::SimdOp::MozPMADDUBSW);
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MWasmScalarToSimd128::foldsTo(TempAllocator& alloc) {
+# ifdef DEBUG
+ auto logging = mozilla::MakeScopeExit([&] {
+ js::wasm::ReportSimdAnalysis("scalar-to-simd128 -> constant folded");
+ });
+# endif
+ if (input()->isConstant()) {
+ MConstant* c = input()->toConstant();
+ switch (simdOp()) {
+ case wasm::SimdOp::I8x16Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX16(c->toInt32()));
+ case wasm::SimdOp::I16x8Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX8(c->toInt32()));
+ case wasm::SimdOp::I32x4Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX4(c->toInt32()));
+ case wasm::SimdOp::I64x2Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX2(c->toInt64()));
+ default:
+# ifdef DEBUG
+ logging.release();
+# endif
+ return this;
+ }
+ }
+ if (input()->isWasmFloatConstant()) {
+ MWasmFloatConstant* c = input()->toWasmFloatConstant();
+ switch (simdOp()) {
+ case wasm::SimdOp::F32x4Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX4(c->toFloat32()));
+ case wasm::SimdOp::F64x2Splat:
+ return MWasmFloatConstant::NewSimd128(
+ alloc, SimdConstant::SplatX2(c->toDouble()));
+ default:
+# ifdef DEBUG
+ logging.release();
+# endif
+ return this;
+ }
+ }
+# ifdef DEBUG
+ logging.release();
+# endif
+ return this;
+}
+
+template <typename T>
+static bool AllTrue(const T& v) {
+ constexpr size_t count = sizeof(T) / sizeof(*v);
+ static_assert(count == 16 || count == 8 || count == 4 || count == 2);
+ bool result = true;
+ for (unsigned i = 0; i < count; i++) {
+ result = result && v[i] != 0;
+ }
+ return result;
+}
+
+template <typename T>
+static int32_t Bitmask(const T& v) {
+ constexpr size_t count = sizeof(T) / sizeof(*v);
+ constexpr size_t shift = 8 * sizeof(*v) - 1;
+ static_assert(shift == 7 || shift == 15 || shift == 31 || shift == 63);
+ int32_t result = 0;
+ for (unsigned i = 0; i < count; i++) {
+ result = result | int32_t(((v[i] >> shift) & 1) << i);
+ }
+ return result;
+}
+
+MDefinition* MWasmReduceSimd128::foldsTo(TempAllocator& alloc) {
+# ifdef DEBUG
+ auto logging = mozilla::MakeScopeExit([&] {
+ js::wasm::ReportSimdAnalysis("simd128-to-scalar -> constant folded");
+ });
+# endif
+ if (input()->isWasmFloatConstant()) {
+ SimdConstant c = input()->toWasmFloatConstant()->toSimd128();
+ int32_t i32Result = 0;
+ switch (simdOp()) {
+ case wasm::SimdOp::V128AnyTrue:
+ i32Result = !c.isZeroBits();
+ break;
+ case wasm::SimdOp::I8x16AllTrue:
+ i32Result = AllTrue(
+ SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16());
+ break;
+ case wasm::SimdOp::I8x16Bitmask:
+ i32Result = Bitmask(
+ SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16());
+ break;
+ case wasm::SimdOp::I16x8AllTrue:
+ i32Result = AllTrue(
+ SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8());
+ break;
+ case wasm::SimdOp::I16x8Bitmask:
+ i32Result = Bitmask(
+ SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8());
+ break;
+ case wasm::SimdOp::I32x4AllTrue:
+ i32Result = AllTrue(
+ SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4());
+ break;
+ case wasm::SimdOp::I32x4Bitmask:
+ i32Result = Bitmask(
+ SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4());
+ break;
+ case wasm::SimdOp::I64x2AllTrue:
+ i32Result = AllTrue(
+ SimdConstant::CreateSimd128((int64_t*)c.bytes()).asInt64x2());
+ break;
+ case wasm::SimdOp::I64x2Bitmask:
+ i32Result = Bitmask(
+ SimdConstant::CreateSimd128((int64_t*)c.bytes()).asInt64x2());
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneS:
+ i32Result =
+ SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16()[imm()];
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneU:
+ i32Result = int32_t(SimdConstant::CreateSimd128((int8_t*)c.bytes())
+ .asInt8x16()[imm()]) &
+ 0xFF;
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneS:
+ i32Result =
+ SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8()[imm()];
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneU:
+ i32Result = int32_t(SimdConstant::CreateSimd128((int16_t*)c.bytes())
+ .asInt16x8()[imm()]) &
+ 0xFFFF;
+ break;
+ case wasm::SimdOp::I32x4ExtractLane:
+ i32Result =
+ SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4()[imm()];
+ break;
+ case wasm::SimdOp::I64x2ExtractLane:
+ return MConstant::NewInt64(
+ alloc, SimdConstant::CreateSimd128((int64_t*)c.bytes())
+ .asInt64x2()[imm()]);
+ case wasm::SimdOp::F32x4ExtractLane:
+ return MWasmFloatConstant::NewFloat32(
+ alloc, SimdConstant::CreateSimd128((float*)c.bytes())
+ .asFloat32x4()[imm()]);
+ case wasm::SimdOp::F64x2ExtractLane:
+ return MWasmFloatConstant::NewDouble(
+ alloc, SimdConstant::CreateSimd128((double*)c.bytes())
+ .asFloat64x2()[imm()]);
+ default:
+# ifdef DEBUG
+ logging.release();
+# endif
+ return this;
+ }
+ return MConstant::New(alloc, Int32Value(i32Result), MIRType::Int32);
+ }
+# ifdef DEBUG
+ logging.release();
+# endif
+ return this;
+}
+#endif // ENABLE_WASM_SIMD
+
+MDefinition::AliasType MLoadDynamicSlot::mightAlias(
+ const MDefinition* def) const {
+ if (def->isStoreDynamicSlot()) {
+ const MStoreDynamicSlot* store = def->toStoreDynamicSlot();
+ if (store->slot() != slot()) {
+ return AliasType::NoAlias;
+ }
+
+ if (store->slots() != slots()) {
+ return AliasType::MayAlias;
+ }
+
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+HashNumber MLoadDynamicSlot::valueHash() const {
+ HashNumber hash = MDefinition::valueHash();
+ hash = addU32ToHash(hash, slot_);
+ return hash;
+}
+
+MDefinition* MLoadDynamicSlot::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsToStore(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+#ifdef JS_JITSPEW
+void MLoadDynamicSlot::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %u)", slot());
+}
+
+void MLoadDynamicSlotAndUnbox::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %zu)", slot());
+}
+
+void MStoreDynamicSlot::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %u)", slot());
+}
+
+void MLoadFixedSlot::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %zu)", slot());
+}
+
+void MLoadFixedSlotAndUnbox::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %zu)", slot());
+}
+
+void MStoreFixedSlot::printOpcode(GenericPrinter& out) const {
+ MDefinition::printOpcode(out);
+ out.printf(" (slot %zu)", slot());
+}
+#endif
+
+MDefinition* MGuardFunctionScript::foldsTo(TempAllocator& alloc) {
+ MDefinition* in = input();
+ if (in->isLambda() &&
+ in->toLambda()->templateFunction()->baseScript() == expected()) {
+ return in;
+ }
+ return this;
+}
+
+MDefinition* MFunctionEnvironment::foldsTo(TempAllocator& alloc) {
+ if (input()->isLambda()) {
+ return input()->toLambda()->environmentChain();
+ }
+ if (input()->isFunctionWithProto()) {
+ return input()->toFunctionWithProto()->environmentChain();
+ }
+ return this;
+}
+
+static bool AddIsANonZeroAdditionOf(MAdd* add, MDefinition* ins) {
+ if (add->lhs() != ins && add->rhs() != ins) {
+ return false;
+ }
+ MDefinition* other = (add->lhs() == ins) ? add->rhs() : add->lhs();
+ if (!IsNumberType(other->type())) {
+ return false;
+ }
+ if (!other->isConstant()) {
+ return false;
+ }
+ if (other->toConstant()->numberToDouble() == 0) {
+ return false;
+ }
+ return true;
+}
+
+// Skip over instructions that usually appear between the actual index
+// value being used and the MLoadElement.
+// They don't modify the index value in a meaningful way.
+static MDefinition* SkipUninterestingInstructions(MDefinition* ins) {
+ // Drop the MToNumberInt32 added by the TypePolicy for double and float
+ // values.
+ if (ins->isToNumberInt32()) {
+ return SkipUninterestingInstructions(ins->toToNumberInt32()->input());
+ }
+
+ // Ignore the bounds check, which don't modify the index.
+ if (ins->isBoundsCheck()) {
+ return SkipUninterestingInstructions(ins->toBoundsCheck()->index());
+ }
+
+ // Masking the index for Spectre-mitigation is not observable.
+ if (ins->isSpectreMaskIndex()) {
+ return SkipUninterestingInstructions(ins->toSpectreMaskIndex()->index());
+ }
+
+ return ins;
+}
+
+static bool DefinitelyDifferentValue(MDefinition* ins1, MDefinition* ins2) {
+ ins1 = SkipUninterestingInstructions(ins1);
+ ins2 = SkipUninterestingInstructions(ins2);
+
+ if (ins1 == ins2) {
+ return false;
+ }
+
+ // For constants check they are not equal.
+ if (ins1->isConstant() && ins2->isConstant()) {
+ MConstant* cst1 = ins1->toConstant();
+ MConstant* cst2 = ins2->toConstant();
+
+ if (!cst1->isTypeRepresentableAsDouble() ||
+ !cst2->isTypeRepresentableAsDouble()) {
+ return false;
+ }
+
+ // Be conservative and only allow values that fit into int32.
+ int32_t n1, n2;
+ if (!mozilla::NumberIsInt32(cst1->numberToDouble(), &n1) ||
+ !mozilla::NumberIsInt32(cst2->numberToDouble(), &n2)) {
+ return false;
+ }
+
+ return n1 != n2;
+ }
+
+ // Check if "ins1 = ins2 + cte", which would make both instructions
+ // have different values.
+ if (ins1->isAdd()) {
+ if (AddIsANonZeroAdditionOf(ins1->toAdd(), ins2)) {
+ return true;
+ }
+ }
+ if (ins2->isAdd()) {
+ if (AddIsANonZeroAdditionOf(ins2->toAdd(), ins1)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+MDefinition::AliasType MLoadElement::mightAlias(const MDefinition* def) const {
+ if (def->isStoreElement()) {
+ const MStoreElement* store = def->toStoreElement();
+ if (store->index() != index()) {
+ if (DefinitelyDifferentValue(store->index(), index())) {
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+ }
+
+ if (store->elements() != elements()) {
+ return AliasType::MayAlias;
+ }
+
+ return AliasType::MustAlias;
+ }
+ return AliasType::MayAlias;
+}
+
+MDefinition* MLoadElement::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsToStore(alloc)) {
+ return def;
+ }
+
+ return this;
+}
+
+MDefinition* MWasmUnsignedToDouble::foldsTo(TempAllocator& alloc) {
+ if (input()->isConstant()) {
+ return MConstant::New(
+ alloc, DoubleValue(uint32_t(input()->toConstant()->toInt32())));
+ }
+
+ return this;
+}
+
+MDefinition* MWasmUnsignedToFloat32::foldsTo(TempAllocator& alloc) {
+ if (input()->isConstant()) {
+ double dval = double(uint32_t(input()->toConstant()->toInt32()));
+ if (IsFloat32Representable(dval)) {
+ return MConstant::NewFloat32(alloc, float(dval));
+ }
+ }
+
+ return this;
+}
+
+MWasmCallCatchable* MWasmCallCatchable::New(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ const Args& args,
+ uint32_t stackArgAreaSizeUnaligned,
+ const MWasmCallTryDesc& tryDesc,
+ MDefinition* tableIndexOrRef) {
+ MOZ_ASSERT(tryDesc.inTry);
+
+ MWasmCallCatchable* call = new (alloc) MWasmCallCatchable(
+ desc, callee, stackArgAreaSizeUnaligned, tryDesc.tryNoteIndex);
+
+ call->setSuccessor(FallthroughBranchIndex, tryDesc.fallthroughBlock);
+ call->setSuccessor(PrePadBranchIndex, tryDesc.prePadBlock);
+
+ MOZ_ASSERT_IF(callee.isTable() || callee.isFuncRef(), tableIndexOrRef);
+ if (!call->initWithArgs(alloc, call, args, tableIndexOrRef)) {
+ return nullptr;
+ }
+
+ return call;
+}
+
+MWasmCallUncatchable* MWasmCallUncatchable::New(
+ TempAllocator& alloc, const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee, const Args& args,
+ uint32_t stackArgAreaSizeUnaligned, MDefinition* tableIndexOrRef) {
+ MWasmCallUncatchable* call =
+ new (alloc) MWasmCallUncatchable(desc, callee, stackArgAreaSizeUnaligned);
+
+ MOZ_ASSERT_IF(callee.isTable() || callee.isFuncRef(), tableIndexOrRef);
+ if (!call->initWithArgs(alloc, call, args, tableIndexOrRef)) {
+ return nullptr;
+ }
+
+ return call;
+}
+
+MWasmCallUncatchable* MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
+ TempAllocator& alloc, const wasm::CallSiteDesc& desc,
+ const wasm::SymbolicAddress builtin, wasm::FailureMode failureMode,
+ const ABIArg& instanceArg, const Args& args,
+ uint32_t stackArgAreaSizeUnaligned) {
+ auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
+ MWasmCallUncatchable* call = MWasmCallUncatchable::New(
+ alloc, desc, callee, args, stackArgAreaSizeUnaligned, nullptr);
+ if (!call) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(instanceArg != ABIArg());
+ call->instanceArg_ = instanceArg;
+ call->builtinMethodFailureMode_ = failureMode;
+ return call;
+}
+
+void MSqrt::trySpecializeFloat32(TempAllocator& alloc) {
+ if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
+ setResultType(MIRType::Float32);
+ specialization_ = MIRType::Float32;
+ }
+}
+
+MDefinition* MClz::foldsTo(TempAllocator& alloc) {
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = c->toInt32();
+ if (n == 0) {
+ return MConstant::New(alloc, Int32Value(32));
+ }
+ return MConstant::New(alloc,
+ Int32Value(mozilla::CountLeadingZeroes32(n)));
+ }
+ int64_t n = c->toInt64();
+ if (n == 0) {
+ return MConstant::NewInt64(alloc, int64_t(64));
+ }
+ return MConstant::NewInt64(alloc,
+ int64_t(mozilla::CountLeadingZeroes64(n)));
+ }
+
+ return this;
+}
+
+MDefinition* MCtz::foldsTo(TempAllocator& alloc) {
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = num()->toConstant()->toInt32();
+ if (n == 0) {
+ return MConstant::New(alloc, Int32Value(32));
+ }
+ return MConstant::New(alloc,
+ Int32Value(mozilla::CountTrailingZeroes32(n)));
+ }
+ int64_t n = c->toInt64();
+ if (n == 0) {
+ return MConstant::NewInt64(alloc, int64_t(64));
+ }
+ return MConstant::NewInt64(alloc,
+ int64_t(mozilla::CountTrailingZeroes64(n)));
+ }
+
+ return this;
+}
+
+MDefinition* MPopcnt::foldsTo(TempAllocator& alloc) {
+ if (num()->isConstant()) {
+ MConstant* c = num()->toConstant();
+ if (type() == MIRType::Int32) {
+ int32_t n = num()->toConstant()->toInt32();
+ return MConstant::New(alloc, Int32Value(mozilla::CountPopulation32(n)));
+ }
+ int64_t n = c->toInt64();
+ return MConstant::NewInt64(alloc, int64_t(mozilla::CountPopulation64(n)));
+ }
+
+ return this;
+}
+
+MDefinition* MBoundsCheck::foldsTo(TempAllocator& alloc) {
+ if (type() == MIRType::Int32 && index()->isConstant() &&
+ length()->isConstant()) {
+ uint32_t len = length()->toConstant()->toInt32();
+ uint32_t idx = index()->toConstant()->toInt32();
+ if (idx + uint32_t(minimum()) < len && idx + uint32_t(maximum()) < len) {
+ return index();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MTableSwitch::foldsTo(TempAllocator& alloc) {
+ MDefinition* op = getOperand(0);
+
+ // If we only have one successor, convert to a plain goto to the only
+ // successor. TableSwitch indices are numeric; other types will always go to
+ // the only successor.
+ if (numSuccessors() == 1 ||
+ (op->type() != MIRType::Value && !IsNumberType(op->type()))) {
+ return MGoto::New(alloc, getDefault());
+ }
+
+ if (MConstant* opConst = op->maybeConstantValue()) {
+ if (op->type() == MIRType::Int32) {
+ int32_t i = opConst->toInt32() - low_;
+ MBasicBlock* target;
+ if (size_t(i) < numCases()) {
+ target = getCase(size_t(i));
+ } else {
+ target = getDefault();
+ }
+ MOZ_ASSERT(target);
+ return MGoto::New(alloc, target);
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MArrayJoin::foldsTo(TempAllocator& alloc) {
+ MDefinition* arr = array();
+
+ if (!arr->isStringSplit()) {
+ return this;
+ }
+
+ setRecoveredOnBailout();
+ if (arr->hasLiveDefUses()) {
+ setNotRecoveredOnBailout();
+ return this;
+ }
+
+ // The MStringSplit won't generate any code.
+ arr->setRecoveredOnBailout();
+
+ // We're replacing foo.split(bar).join(baz) by
+ // foo.replace(bar, baz). MStringSplit could be recovered by
+ // a bailout. As we are removing its last use, and its result
+ // could be captured by a resume point, this MStringSplit will
+ // be executed on the bailout path.
+ MDefinition* string = arr->toStringSplit()->string();
+ MDefinition* pattern = arr->toStringSplit()->separator();
+ MDefinition* replacement = sep();
+
+ MStringReplace* substr =
+ MStringReplace::New(alloc, string, pattern, replacement);
+ substr->setFlatReplacement();
+ return substr;
+}
+
+MDefinition* MGetFirstDollarIndex::foldsTo(TempAllocator& alloc) {
+ MDefinition* strArg = str();
+ if (!strArg->isConstant()) {
+ return this;
+ }
+
+ JSLinearString* str = &strArg->toConstant()->toString()->asLinear();
+ int32_t index = GetFirstDollarIndexRawFlat(str);
+ return MConstant::New(alloc, Int32Value(index));
+}
+
+AliasSet MThrowRuntimeLexicalError::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MSlots::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+MDefinition::AliasType MSlots::mightAlias(const MDefinition* store) const {
+ // ArrayPush only modifies object elements, but not object slots.
+ if (store->isArrayPush()) {
+ return AliasType::NoAlias;
+ }
+ return MInstruction::mightAlias(store);
+}
+
+AliasSet MElements::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MInitializedLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MSetInitializedLength::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields);
+}
+
+AliasSet MArrayLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MSetArrayLength::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields);
+}
+
+AliasSet MFunctionLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+AliasSet MFunctionName::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+AliasSet MArrayBufferByteLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::FixedSlot);
+}
+
+AliasSet MArrayBufferViewLength::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset);
+}
+
+AliasSet MArrayBufferViewByteOffset::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset);
+}
+
+AliasSet MArrayBufferViewElements::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MGuardHasAttachedArrayBuffer::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot);
+}
+
+AliasSet MArrayPush::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+}
+
+MDefinition* MGuardNumberToIntPtrIndex::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = this->input();
+
+ if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
+ return MInt32ToIntPtr::New(alloc, input->getOperand(0));
+ }
+
+ if (!input->isConstant()) {
+ return this;
+ }
+
+ // Fold constant double representable as intptr to intptr.
+ int64_t ival;
+ if (!mozilla::NumberEqualsInt64(input->toConstant()->toDouble(), &ival)) {
+ // If not representable as an int64, this access is equal to an OOB access.
+ // So replace it with a known int64/intptr value which also produces an OOB
+ // access. If we don't support OOB accesses we have to bail out.
+ if (!supportOOB()) {
+ return this;
+ }
+ ival = -1;
+ }
+
+ if (ival < INTPTR_MIN || ival > INTPTR_MAX) {
+ return this;
+ }
+
+ return MConstant::NewIntPtr(alloc, intptr_t(ival));
+}
+
+MDefinition* MIsObject::foldsTo(TempAllocator& alloc) {
+ if (!object()->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = object()->getOperand(0);
+ if (unboxed->type() == MIRType::Object) {
+ return MConstant::New(alloc, BooleanValue(true));
+ }
+
+ return this;
+}
+
+MDefinition* MIsNullOrUndefined::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = value();
+ if (input->isBox()) {
+ input = input->toBox()->input();
+ }
+
+ if (input->definitelyType({MIRType::Null, MIRType::Undefined})) {
+ return MConstant::New(alloc, BooleanValue(true));
+ }
+
+ if (!input->mightBeType(MIRType::Null) &&
+ !input->mightBeType(MIRType::Undefined)) {
+ return MConstant::New(alloc, BooleanValue(false));
+ }
+
+ return this;
+}
+
+AliasSet MHomeObjectSuperBase::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+MDefinition* MGuardValue::foldsTo(TempAllocator& alloc) {
+ if (MConstant* cst = value()->maybeConstantValue()) {
+ if (cst->toJSValue() == expected()) {
+ return value();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MGuardNullOrUndefined::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = value();
+ if (input->isBox()) {
+ input = input->toBox()->input();
+ }
+
+ if (input->definitelyType({MIRType::Null, MIRType::Undefined})) {
+ return value();
+ }
+
+ return this;
+}
+
+MDefinition* MGuardIsNotObject::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = value();
+ if (input->isBox()) {
+ input = input->toBox()->input();
+ }
+
+ if (!input->mightBeType(MIRType::Object)) {
+ return value();
+ }
+
+ return this;
+}
+
+MDefinition* MGuardObjectIdentity::foldsTo(TempAllocator& alloc) {
+ if (object()->isConstant() && expected()->isConstant()) {
+ JSObject* obj = &object()->toConstant()->toObject();
+ JSObject* other = &expected()->toConstant()->toObject();
+ if (!bailOnEquality()) {
+ if (obj == other) {
+ return object();
+ }
+ } else {
+ if (obj != other) {
+ return object();
+ }
+ }
+ }
+
+ if (!bailOnEquality() && object()->isNurseryObject() &&
+ expected()->isNurseryObject()) {
+ uint32_t objIndex = object()->toNurseryObject()->nurseryIndex();
+ uint32_t otherIndex = expected()->toNurseryObject()->nurseryIndex();
+ if (objIndex == otherIndex) {
+ return object();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MGuardSpecificFunction::foldsTo(TempAllocator& alloc) {
+ if (function()->isConstant() && expected()->isConstant()) {
+ JSObject* fun = &function()->toConstant()->toObject();
+ JSObject* other = &expected()->toConstant()->toObject();
+ if (fun == other) {
+ return function();
+ }
+ }
+
+ if (function()->isNurseryObject() && expected()->isNurseryObject()) {
+ uint32_t funIndex = function()->toNurseryObject()->nurseryIndex();
+ uint32_t otherIndex = expected()->toNurseryObject()->nurseryIndex();
+ if (funIndex == otherIndex) {
+ return function();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MGuardSpecificAtom::foldsTo(TempAllocator& alloc) {
+ if (str()->isConstant()) {
+ JSString* s = str()->toConstant()->toString();
+ if (s->isAtom()) {
+ JSAtom* cstAtom = &s->asAtom();
+ if (cstAtom == atom()) {
+ return str();
+ }
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MGuardSpecificSymbol::foldsTo(TempAllocator& alloc) {
+ if (symbol()->isConstant()) {
+ if (symbol()->toConstant()->toSymbol() == expected()) {
+ return symbol();
+ }
+ }
+
+ return this;
+}
+
+MDefinition* MGuardSpecificInt32::foldsTo(TempAllocator& alloc) {
+ if (num()->isConstant() && num()->toConstant()->isInt32(expected())) {
+ return num();
+ }
+ return this;
+}
+
+bool MCallBindVar::congruentTo(const MDefinition* ins) const {
+ if (!ins->isCallBindVar()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+bool MGuardShape::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardShape()) {
+ return false;
+ }
+ if (shape() != ins->toGuardShape()->shape()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardShape::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+MDefinition::AliasType MGuardShape::mightAlias(const MDefinition* store) const {
+ // These instructions only modify object elements, but not the shape.
+ if (store->isStoreElementHole() || store->isArrayPush()) {
+ return AliasType::NoAlias;
+ }
+ if (object()->isConstantProto()) {
+ const MDefinition* receiverObject =
+ object()->toConstantProto()->getReceiverObject();
+ switch (store->op()) {
+ case MDefinition::Opcode::StoreFixedSlot:
+ if (store->toStoreFixedSlot()->object()->skipObjectGuards() ==
+ receiverObject) {
+ return AliasType::NoAlias;
+ }
+ break;
+ case MDefinition::Opcode::StoreDynamicSlot:
+ if (store->toStoreDynamicSlot()
+ ->slots()
+ ->toSlots()
+ ->object()
+ ->skipObjectGuards() == receiverObject) {
+ return AliasType::NoAlias;
+ }
+ break;
+ case MDefinition::Opcode::AddAndStoreSlot:
+ if (store->toAddAndStoreSlot()->object()->skipObjectGuards() ==
+ receiverObject) {
+ return AliasType::NoAlias;
+ }
+ break;
+ case MDefinition::Opcode::AllocateAndStoreSlot:
+ if (store->toAllocateAndStoreSlot()->object()->skipObjectGuards() ==
+ receiverObject) {
+ return AliasType::NoAlias;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return MInstruction::mightAlias(store);
+}
+
+AliasSet MGuardMultipleShapes::getAliasSet() const {
+ // Note: This instruction loads the elements of the ListObject used to
+ // store the list of shapes, but that object is internal and not exposed
+ // to script, so it doesn't have to be in the alias set.
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MGuardGlobalGeneration::getAliasSet() const {
+ return AliasSet::Load(AliasSet::GlobalGenerationCounter);
+}
+
+bool MGuardGlobalGeneration::congruentTo(const MDefinition* ins) const {
+ return ins->isGuardGlobalGeneration() &&
+ ins->toGuardGlobalGeneration()->expected() == expected() &&
+ ins->toGuardGlobalGeneration()->generationAddr() == generationAddr();
+}
+
+MDefinition* MGuardIsNotProxy::foldsTo(TempAllocator& alloc) {
+ KnownClass known = GetObjectKnownClass(object());
+ if (known == KnownClass::None) {
+ return this;
+ }
+
+ MOZ_ASSERT(!GetObjectKnownJSClass(object())->isProxyObject());
+ AssertKnownClass(alloc, this, object());
+ return object();
+}
+
+AliasSet MMegamorphicLoadSlotByValue::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+MDefinition* MMegamorphicLoadSlotByValue::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = idVal();
+ if (input->isBox()) {
+ input = input->toBox()->input();
+ }
+
+ MDefinition* result = this;
+
+ if (input->isConstant()) {
+ MConstant* constant = input->toConstant();
+ if (constant->type() == MIRType::Symbol) {
+ PropertyKey id = PropertyKey::Symbol(constant->toSymbol());
+ result = MMegamorphicLoadSlot::New(alloc, object(), id);
+ }
+
+ if (constant->type() == MIRType::String) {
+ JSString* str = constant->toString();
+ if (str->isAtom() && !str->asAtom().isIndex()) {
+ PropertyKey id = PropertyKey::NonIntAtom(str);
+ result = MMegamorphicLoadSlot::New(alloc, object(), id);
+ }
+ }
+ }
+
+ if (result != this) {
+ result->setDependency(dependency());
+ }
+
+ return result;
+}
+
+bool MMegamorphicLoadSlot::congruentTo(const MDefinition* ins) const {
+ if (!ins->isMegamorphicLoadSlot()) {
+ return false;
+ }
+ if (ins->toMegamorphicLoadSlot()->name() != name()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MMegamorphicLoadSlot::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+bool MMegamorphicHasProp::congruentTo(const MDefinition* ins) const {
+ if (!ins->isMegamorphicHasProp()) {
+ return false;
+ }
+ if (ins->toMegamorphicHasProp()->hasOwn() != hasOwn()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MMegamorphicHasProp::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+bool MNurseryObject::congruentTo(const MDefinition* ins) const {
+ if (!ins->isNurseryObject()) {
+ return false;
+ }
+ return nurseryIndex() == ins->toNurseryObject()->nurseryIndex();
+}
+
+AliasSet MGuardFunctionIsNonBuiltinCtor::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+bool MGuardFunctionKind::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardFunctionKind()) {
+ return false;
+ }
+ if (expected() != ins->toGuardFunctionKind()->expected()) {
+ return false;
+ }
+ if (bailOnEquality() != ins->toGuardFunctionKind()->bailOnEquality()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardFunctionKind::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+bool MGuardFunctionScript::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardFunctionScript()) {
+ return false;
+ }
+ if (expected() != ins->toGuardFunctionScript()->expected()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardFunctionScript::getAliasSet() const {
+ // A JSFunction's BaseScript pointer is immutable. Relazification of
+ // self-hosted functions is an exception to this, but we don't use this
+ // guard for self-hosted functions.
+ MOZ_ASSERT(!flags_.isSelfHostedOrIntrinsic());
+ return AliasSet::None();
+}
+
+bool MGuardSpecificAtom::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardSpecificAtom()) {
+ return false;
+ }
+ if (atom() != ins->toGuardSpecificAtom()->atom()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+MDefinition* MGuardStringToIndex::foldsTo(TempAllocator& alloc) {
+ if (!string()->isConstant()) {
+ return this;
+ }
+
+ JSString* str = string()->toConstant()->toString();
+
+ int32_t index = GetIndexFromString(str);
+ if (index < 0) {
+ return this;
+ }
+
+ return MConstant::New(alloc, Int32Value(index));
+}
+
+MDefinition* MGuardStringToInt32::foldsTo(TempAllocator& alloc) {
+ if (!string()->isConstant()) {
+ return this;
+ }
+
+ JSLinearString* str = &string()->toConstant()->toString()->asLinear();
+ double number = LinearStringToNumber(str);
+
+ int32_t n;
+ if (!mozilla::NumberIsInt32(number, &n)) {
+ return this;
+ }
+
+ return MConstant::New(alloc, Int32Value(n));
+}
+
+MDefinition* MGuardStringToDouble::foldsTo(TempAllocator& alloc) {
+ if (!string()->isConstant()) {
+ return this;
+ }
+
+ JSLinearString* str = &string()->toConstant()->toString()->asLinear();
+ double number = LinearStringToNumber(str);
+ return MConstant::New(alloc, DoubleValue(number));
+}
+
+AliasSet MGuardNoDenseElements::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MIteratorHasIndices::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MAllocateAndStoreSlot::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields | AliasSet::DynamicSlot);
+}
+
+AliasSet MLoadDOMExpandoValue::getAliasSet() const {
+ return AliasSet::Load(AliasSet::DOMProxyExpando);
+}
+
+AliasSet MLoadDOMExpandoValueIgnoreGeneration::getAliasSet() const {
+ return AliasSet::Load(AliasSet::DOMProxyExpando);
+}
+
+bool MGuardDOMExpandoMissingOrGuardShape::congruentTo(
+ const MDefinition* ins) const {
+ if (!ins->isGuardDOMExpandoMissingOrGuardShape()) {
+ return false;
+ }
+ if (shape() != ins->toGuardDOMExpandoMissingOrGuardShape()->shape()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardDOMExpandoMissingOrGuardShape::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+MDefinition* MGuardToClass::foldsTo(TempAllocator& alloc) {
+ const JSClass* clasp = GetObjectKnownJSClass(object());
+ if (!clasp || getClass() != clasp) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, object());
+ return object();
+}
+
+MDefinition* MGuardToFunction::foldsTo(TempAllocator& alloc) {
+ if (GetObjectKnownClass(object()) != KnownClass::Function) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, object());
+ return object();
+}
+
+MDefinition* MHasClass::foldsTo(TempAllocator& alloc) {
+ const JSClass* clasp = GetObjectKnownJSClass(object());
+ if (!clasp) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, object());
+ return MConstant::New(alloc, BooleanValue(getClass() == clasp));
+}
+
+MDefinition* MIsCallable::foldsTo(TempAllocator& alloc) {
+ if (input()->type() != MIRType::Object) {
+ return this;
+ }
+
+ KnownClass known = GetObjectKnownClass(input());
+ if (known == KnownClass::None) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, input());
+ return MConstant::New(alloc, BooleanValue(known == KnownClass::Function));
+}
+
+MDefinition* MIsArray::foldsTo(TempAllocator& alloc) {
+ if (input()->type() != MIRType::Object) {
+ return this;
+ }
+
+ KnownClass known = GetObjectKnownClass(input());
+ if (known == KnownClass::None) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, input());
+ return MConstant::New(alloc, BooleanValue(known == KnownClass::Array));
+}
+
+AliasSet MObjectClassToString::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot);
+}
+
+MDefinition* MGuardIsNotArrayBufferMaybeShared::foldsTo(TempAllocator& alloc) {
+ switch (GetObjectKnownClass(object())) {
+ case KnownClass::PlainObject:
+ case KnownClass::Array:
+ case KnownClass::Function:
+ case KnownClass::RegExp:
+ case KnownClass::ArrayIterator:
+ case KnownClass::StringIterator:
+ case KnownClass::RegExpStringIterator: {
+ AssertKnownClass(alloc, this, object());
+ return object();
+ }
+ case KnownClass::None:
+ break;
+ }
+
+ return this;
+}
+
+MDefinition* MCheckIsObj::foldsTo(TempAllocator& alloc) {
+ if (!input()->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = input()->getOperand(0);
+ if (unboxed->type() == MIRType::Object) {
+ return unboxed;
+ }
+
+ return this;
+}
+
+static bool IsBoxedObject(MDefinition* def) {
+ MOZ_ASSERT(def->type() == MIRType::Value);
+
+ if (def->isBox()) {
+ return def->toBox()->input()->type() == MIRType::Object;
+ }
+
+ // Construct calls are always returning a boxed object.
+ //
+ // TODO: We should consider encoding this directly in the graph instead of
+ // having to special case it here.
+ if (def->isCall()) {
+ return def->toCall()->isConstructing();
+ }
+ if (def->isConstructArray()) {
+ return true;
+ }
+ if (def->isConstructArgs()) {
+ return true;
+ }
+
+ return false;
+}
+
+MDefinition* MCheckReturn::foldsTo(TempAllocator& alloc) {
+ auto* returnVal = returnValue();
+ if (!returnVal->isBox()) {
+ return this;
+ }
+
+ auto* unboxedReturnVal = returnVal->toBox()->input();
+ if (unboxedReturnVal->type() == MIRType::Object) {
+ return returnVal;
+ }
+
+ if (unboxedReturnVal->type() != MIRType::Undefined) {
+ return this;
+ }
+
+ auto* thisVal = thisValue();
+ if (IsBoxedObject(thisVal)) {
+ return thisVal;
+ }
+
+ return this;
+}
+
+MDefinition* MCheckThis::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = thisValue();
+ if (!input->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = input->getOperand(0);
+ if (unboxed->mightBeMagicType()) {
+ return this;
+ }
+
+ return input;
+}
+
+MDefinition* MCheckThisReinit::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = thisValue();
+ if (!input->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = input->getOperand(0);
+ if (unboxed->type() != MIRType::MagicUninitializedLexical) {
+ return this;
+ }
+
+ return input;
+}
+
+MDefinition* MCheckObjCoercible::foldsTo(TempAllocator& alloc) {
+ MDefinition* input = checkValue();
+ if (!input->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = input->getOperand(0);
+ if (unboxed->mightBeType(MIRType::Null) ||
+ unboxed->mightBeType(MIRType::Undefined)) {
+ return this;
+ }
+
+ return input;
+}
+
+AliasSet MCheckObjCoercible::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MCheckReturn::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MCheckThis::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MCheckThisReinit::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ExceptionState);
+}
+
+AliasSet MIsPackedArray::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MGuardArrayIsPacked::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MSuperFunction::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MInitHomeObject::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields);
+}
+
+AliasSet MLoadWrapperTarget::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Any);
+}
+
+AliasSet MGuardHasGetterSetter::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+bool MGuardHasGetterSetter::congruentTo(const MDefinition* ins) const {
+ if (!ins->isGuardHasGetterSetter()) {
+ return false;
+ }
+ if (ins->toGuardHasGetterSetter()->propId() != propId()) {
+ return false;
+ }
+ if (ins->toGuardHasGetterSetter()->getterSetter() != getterSetter()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGuardIsExtensible::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MGuardIndexIsNotDenseElement::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::Element);
+}
+
+AliasSet MGuardIndexIsValidUpdateOrAdd::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields);
+}
+
+AliasSet MCallObjectHasSparseElement::getAliasSet() const {
+ return AliasSet::Load(AliasSet::Element | AliasSet::ObjectFields |
+ AliasSet::FixedSlot | AliasSet::DynamicSlot);
+}
+
+AliasSet MLoadSlotByIteratorIndex::getAliasSet() const {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot | AliasSet::Element);
+}
+
+AliasSet MStoreSlotByIteratorIndex::getAliasSet() const {
+ return AliasSet::Store(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot | AliasSet::Element);
+}
+
+MDefinition* MGuardInt32IsNonNegative::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(index()->type() == MIRType::Int32);
+
+ MDefinition* input = index();
+ if (!input->isConstant() || input->toConstant()->toInt32() < 0) {
+ return this;
+ }
+ return input;
+}
+
+MDefinition* MGuardInt32Range::foldsTo(TempAllocator& alloc) {
+ MOZ_ASSERT(input()->type() == MIRType::Int32);
+ MOZ_ASSERT(minimum() <= maximum());
+
+ MDefinition* in = input();
+ if (!in->isConstant()) {
+ return this;
+ }
+ int32_t cst = in->toConstant()->toInt32();
+ if (cst < minimum() || cst > maximum()) {
+ return this;
+ }
+ return in;
+}
+
+MDefinition* MGuardNonGCThing::foldsTo(TempAllocator& alloc) {
+ if (!input()->isBox()) {
+ return this;
+ }
+
+ MDefinition* unboxed = input()->getOperand(0);
+ if (!IsNonGCThing(unboxed->type())) {
+ return this;
+ }
+ return input();
+}
+
+AliasSet MSetObjectHasNonBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MSetObjectHasBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MSetObjectHasValue::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MSetObjectHasValueVMCall::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MSetObjectSize::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectHasNonBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectHasBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectHasValue::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectHasValueVMCall::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectGetNonBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectGetBigInt::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectGetValue::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectGetValueVMCall::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+AliasSet MMapObjectSize::getAliasSet() const {
+ return AliasSet::Load(AliasSet::MapOrSetHashTable);
+}
+
+MIonToWasmCall* MIonToWasmCall::New(TempAllocator& alloc,
+ WasmInstanceObject* instanceObj,
+ const wasm::FuncExport& funcExport) {
+ const wasm::FuncType& funcType =
+ instanceObj->instance().metadata().getFuncExportType(funcExport);
+ const wasm::ValTypeVector& results = funcType.results();
+ MIRType resultType = MIRType::Value;
+ // At the JS boundary some wasm types must be represented as a Value, and in
+ // addition a void return requires an Undefined value.
+ if (results.length() > 0 && !results[0].isEncodedAsJSValueOnEscape()) {
+ MOZ_ASSERT(results.length() == 1,
+ "multiple returns not implemented for inlined Wasm calls");
+ resultType = results[0].toMIRType();
+ }
+
+ auto* ins = new (alloc) MIonToWasmCall(instanceObj, resultType, funcExport);
+ if (!ins->init(alloc, funcType.args().length())) {
+ return nullptr;
+ }
+ return ins;
+}
+
+MBindFunction* MBindFunction::New(TempAllocator& alloc, MDefinition* target,
+ uint32_t argc, JSObject* templateObj) {
+ auto* ins = new (alloc) MBindFunction(templateObj);
+ if (!ins->init(alloc, NumNonArgumentOperands + argc)) {
+ return nullptr;
+ }
+ ins->initOperand(0, target);
+ return ins;
+}
+
+#ifdef DEBUG
+bool MIonToWasmCall::isConsistentFloat32Use(MUse* use) const {
+ const wasm::FuncType& funcType =
+ instance()->metadata().getFuncExportType(funcExport_);
+ return funcType.args()[use->index()].kind() == wasm::ValType::F32;
+}
+#endif
+
+MCreateInlinedArgumentsObject* MCreateInlinedArgumentsObject::New(
+ TempAllocator& alloc, MDefinition* callObj, MDefinition* callee,
+ MDefinitionVector& args, ArgumentsObject* templateObj) {
+ MCreateInlinedArgumentsObject* ins =
+ new (alloc) MCreateInlinedArgumentsObject(templateObj);
+
+ uint32_t argc = args.length();
+ MOZ_ASSERT(argc <= ArgumentsObject::MaxInlinedArgs);
+
+ if (!ins->init(alloc, argc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+
+ ins->initOperand(0, callObj);
+ ins->initOperand(1, callee);
+ for (uint32_t i = 0; i < argc; i++) {
+ ins->initOperand(i + NumNonArgumentOperands, args[i]);
+ }
+
+ return ins;
+}
+
+MGetInlinedArgument* MGetInlinedArgument::New(
+ TempAllocator& alloc, MDefinition* index,
+ MCreateInlinedArgumentsObject* args) {
+ MGetInlinedArgument* ins = new (alloc) MGetInlinedArgument();
+
+ uint32_t argc = args->numActuals();
+ MOZ_ASSERT(argc <= ArgumentsObject::MaxInlinedArgs);
+
+ if (!ins->init(alloc, argc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+
+ ins->initOperand(0, index);
+ for (uint32_t i = 0; i < argc; i++) {
+ ins->initOperand(i + NumNonArgumentOperands, args->getArg(i));
+ }
+
+ return ins;
+}
+
+MGetInlinedArgument* MGetInlinedArgument::New(TempAllocator& alloc,
+ MDefinition* index,
+ const CallInfo& callInfo) {
+ MGetInlinedArgument* ins = new (alloc) MGetInlinedArgument();
+
+ uint32_t argc = callInfo.argc();
+ MOZ_ASSERT(argc <= ArgumentsObject::MaxInlinedArgs);
+
+ if (!ins->init(alloc, argc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+
+ ins->initOperand(0, index);
+ for (uint32_t i = 0; i < argc; i++) {
+ ins->initOperand(i + NumNonArgumentOperands, callInfo.getArg(i));
+ }
+
+ return ins;
+}
+
+MDefinition* MGetInlinedArgument::foldsTo(TempAllocator& alloc) {
+ MDefinition* indexDef = SkipUninterestingInstructions(index());
+ if (!indexDef->isConstant() || indexDef->type() != MIRType::Int32) {
+ return this;
+ }
+
+ int32_t indexConst = indexDef->toConstant()->toInt32();
+ if (indexConst < 0 || uint32_t(indexConst) >= numActuals()) {
+ return this;
+ }
+
+ MDefinition* arg = getArg(indexConst);
+ if (arg->type() != MIRType::Value) {
+ arg = MBox::New(alloc, arg);
+ }
+
+ return arg;
+}
+
+MGetInlinedArgumentHole* MGetInlinedArgumentHole::New(
+ TempAllocator& alloc, MDefinition* index,
+ MCreateInlinedArgumentsObject* args) {
+ auto* ins = new (alloc) MGetInlinedArgumentHole();
+
+ uint32_t argc = args->numActuals();
+ MOZ_ASSERT(argc <= ArgumentsObject::MaxInlinedArgs);
+
+ if (!ins->init(alloc, argc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+
+ ins->initOperand(0, index);
+ for (uint32_t i = 0; i < argc; i++) {
+ ins->initOperand(i + NumNonArgumentOperands, args->getArg(i));
+ }
+
+ return ins;
+}
+
+MDefinition* MGetInlinedArgumentHole::foldsTo(TempAllocator& alloc) {
+ MDefinition* indexDef = SkipUninterestingInstructions(index());
+ if (!indexDef->isConstant() || indexDef->type() != MIRType::Int32) {
+ return this;
+ }
+
+ int32_t indexConst = indexDef->toConstant()->toInt32();
+ if (indexConst < 0) {
+ return this;
+ }
+
+ MDefinition* arg;
+ if (uint32_t(indexConst) < numActuals()) {
+ arg = getArg(indexConst);
+
+ if (arg->type() != MIRType::Value) {
+ arg = MBox::New(alloc, arg);
+ }
+ } else {
+ auto* undefined = MConstant::New(alloc, UndefinedValue());
+ block()->insertBefore(this, undefined);
+
+ arg = MBox::New(alloc, undefined);
+ }
+
+ return arg;
+}
+
+MInlineArgumentsSlice* MInlineArgumentsSlice::New(
+ TempAllocator& alloc, MDefinition* begin, MDefinition* count,
+ MCreateInlinedArgumentsObject* args, JSObject* templateObj,
+ gc::Heap initialHeap) {
+ auto* ins = new (alloc) MInlineArgumentsSlice(templateObj, initialHeap);
+
+ uint32_t argc = args->numActuals();
+ MOZ_ASSERT(argc <= ArgumentsObject::MaxInlinedArgs);
+
+ if (!ins->init(alloc, argc + NumNonArgumentOperands)) {
+ return nullptr;
+ }
+
+ ins->initOperand(0, begin);
+ ins->initOperand(1, count);
+ for (uint32_t i = 0; i < argc; i++) {
+ ins->initOperand(i + NumNonArgumentOperands, args->getArg(i));
+ }
+
+ return ins;
+}
+
+MDefinition* MNormalizeSliceTerm::foldsTo(TempAllocator& alloc) {
+ auto* length = this->length();
+ if (!length->isConstant() && !length->isArgumentsLength()) {
+ return this;
+ }
+
+ if (length->isConstant()) {
+ int32_t lengthConst = length->toConstant()->toInt32();
+ MOZ_ASSERT(lengthConst >= 0);
+
+ // Result is always zero when |length| is zero.
+ if (lengthConst == 0) {
+ return length;
+ }
+
+ auto* value = this->value();
+ if (value->isConstant()) {
+ int32_t valueConst = value->toConstant()->toInt32();
+
+ int32_t normalized;
+ if (valueConst < 0) {
+ normalized = std::max(valueConst + lengthConst, 0);
+ } else {
+ normalized = std::min(valueConst, lengthConst);
+ }
+
+ if (normalized == valueConst) {
+ return value;
+ }
+ if (normalized == lengthConst) {
+ return length;
+ }
+ return MConstant::New(alloc, Int32Value(normalized));
+ }
+
+ return this;
+ }
+
+ auto* value = this->value();
+ if (value->isConstant()) {
+ int32_t valueConst = value->toConstant()->toInt32();
+
+ // Minimum of |value| and |length|.
+ if (valueConst > 0) {
+ bool isMax = false;
+ return MMinMax::New(alloc, value, length, MIRType::Int32, isMax);
+ }
+
+ // Maximum of |value + length| and zero.
+ if (valueConst < 0) {
+ // Safe to truncate because |length| is never negative.
+ auto* add = MAdd::New(alloc, value, length, TruncateKind::Truncate);
+ block()->insertBefore(this, add);
+
+ auto* zero = MConstant::New(alloc, Int32Value(0));
+ block()->insertBefore(this, zero);
+
+ bool isMax = true;
+ return MMinMax::New(alloc, add, zero, MIRType::Int32, isMax);
+ }
+
+ // Directly return the value when it's zero.
+ return value;
+ }
+
+ // Normalizing MArgumentsLength is a no-op.
+ if (value->isArgumentsLength()) {
+ return value;
+ }
+
+ return this;
+}
+
+bool MWasmShiftSimd128::congruentTo(const MDefinition* ins) const {
+ return ins->toWasmShiftSimd128()->simdOp() == simdOp_ &&
+ congruentIfOperandsEqual(ins);
+}
+
+bool MWasmShuffleSimd128::congruentTo(const MDefinition* ins) const {
+ return ins->toWasmShuffleSimd128()->shuffle().equals(&shuffle_) &&
+ congruentIfOperandsEqual(ins);
+}
+
+bool MWasmUnarySimd128::congruentTo(const MDefinition* ins) const {
+ return ins->toWasmUnarySimd128()->simdOp() == simdOp_ &&
+ congruentIfOperandsEqual(ins);
+}
+
+#ifdef ENABLE_WASM_SIMD
+MWasmShuffleSimd128* jit::BuildWasmShuffleSimd128(TempAllocator& alloc,
+ const int8_t* control,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ SimdShuffle s =
+ AnalyzeSimdShuffle(SimdConstant::CreateX16(control), lhs, rhs);
+ switch (s.opd) {
+ case SimdShuffle::Operand::LEFT:
+ // When SimdShuffle::Operand is LEFT the right operand is not used,
+ // lose reference to rhs.
+ rhs = lhs;
+ break;
+ case SimdShuffle::Operand::RIGHT:
+ // When SimdShuffle::Operand is RIGHT the left operand is not used,
+ // lose reference to lhs.
+ lhs = rhs;
+ break;
+ default:
+ break;
+ }
+ return MWasmShuffleSimd128::New(alloc, lhs, rhs, s);
+}
+#endif // ENABLE_WASM_SIMD
+
+static MDefinition* FoldTrivialWasmCasts(TempAllocator& alloc,
+ wasm::RefType sourceType,
+ wasm::RefType destType) {
+ // Upcasts are trivially valid.
+ if (wasm::RefType::isSubTypeOf(sourceType, destType)) {
+ return MConstant::New(alloc, Int32Value(1), MIRType::Int32);
+ }
+
+ // If two types are completely disjoint, then all casts between them are
+ // impossible.
+ if (!wasm::RefType::castPossible(destType, sourceType)) {
+ return MConstant::New(alloc, Int32Value(0), MIRType::Int32);
+ }
+
+ return nullptr;
+}
+
+MDefinition* MWasmGcObjectIsSubtypeOfAbstract::foldsTo(TempAllocator& alloc) {
+ MDefinition* folded = FoldTrivialWasmCasts(alloc, sourceType(), destType());
+ if (folded) {
+ return folded;
+ }
+ return this;
+}
+
+MDefinition* MWasmGcObjectIsSubtypeOfConcrete::foldsTo(TempAllocator& alloc) {
+ MDefinition* folded = FoldTrivialWasmCasts(alloc, sourceType(), destType());
+ if (folded) {
+ return folded;
+ }
+ return this;
+}
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
new file mode 100644
index 0000000000..2449e3588a
--- /dev/null
+++ b/js/src/jit/MIR.h
@@ -0,0 +1,11613 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Everything needed to build actual MIR instructions: the actual opcodes and
+ * instructions, the instruction interface, and use chains.
+ */
+
+#ifndef jit_MIR_h
+#define jit_MIR_h
+
+#include "mozilla/Array.h"
+#include "mozilla/HashFunctions.h"
+#ifdef JS_JITSPEW
+# include "mozilla/Attributes.h" // MOZ_STACK_CLASS
+#endif
+#include "mozilla/MacroForEach.h"
+#ifdef JS_JITSPEW
+# include "mozilla/Sprintf.h"
+# include "mozilla/Vector.h"
+#endif
+
+#include <algorithm>
+#include <initializer_list>
+
+#include "NamespaceImports.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/FixedList.h"
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIROpsGenerated.h"
+#include "jit/ShuffleAnalysis.h"
+#include "jit/TypeData.h"
+#include "jit/TypePolicy.h"
+#include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}Op, JSJitInfo
+#include "js/HeapAPI.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Value.h"
+#include "js/Vector.h"
+#include "vm/BigIntType.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/JSContext.h"
+#include "vm/RegExpObject.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmJS.h" // for WasmInstanceObject
+
+namespace JS {
+struct ExpandoAndGeneration;
+}
+
+namespace js {
+
+namespace wasm {
+class FuncExport;
+extern uint32_t MIRTypeToABIResultSize(jit::MIRType);
+} // namespace wasm
+
+class JS_PUBLIC_API GenericPrinter;
+class NativeIteratorListHead;
+class StringObject;
+
+enum class UnaryMathFunction : uint8_t;
+
+bool CurrentThreadIsIonCompiling();
+
+namespace jit {
+
+class CallInfo;
+
+#ifdef JS_JITSPEW
+// Helper for debug printing. Avoids creating a MIR.h <--> MIRGraph.h cycle.
+// Implementation of this needs to see inside `MBasicBlock`; that is possible
+// in MIR.cpp since it also includes MIRGraph.h, whereas this file does not.
+class MBasicBlock;
+uint32_t GetMBasicBlockId(const MBasicBlock* block);
+
+// Helper class for debug printing. This class allows `::getExtras` methods
+// to add strings to be printed, on a per-MIR-node basis. The strings are
+// copied into storage owned by this class when `::add` is called, so the
+// `::getExtras` methods do not need to be concerned about storage management.
+class MOZ_STACK_CLASS ExtrasCollector {
+ mozilla::Vector<UniqueChars, 4> strings_;
+
+ public:
+ // Add `str` to the collection. A copy, owned by this object, is made. In
+ // case of OOM the call has no effect.
+ void add(const char* str) {
+ UniqueChars dup = DuplicateString(str);
+ if (dup) {
+ (void)strings_.append(std::move(dup));
+ }
+ }
+ size_t count() const { return strings_.length(); }
+ UniqueChars get(size_t ix) { return std::move(strings_[ix]); }
+};
+#endif
+
+// Forward declarations of MIR types.
+#define FORWARD_DECLARE(op) class M##op;
+MIR_OPCODE_LIST(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+// MDefinition visitor which ignores non-overloaded visit functions.
+class MDefinitionVisitorDefaultNoop {
+ public:
+#define VISIT_INS(op) \
+ void visit##op(M##op*) {}
+ MIR_OPCODE_LIST(VISIT_INS)
+#undef VISIT_INS
+};
+
+class BytecodeSite;
+class CompactBufferWriter;
+class Range;
+
+#define MIR_FLAG_LIST(_) \
+ _(InWorklist) \
+ _(EmittedAtUses) \
+ _(Commutative) \
+ _(Movable) /* Allow passes like LICM to move this instruction */ \
+ _(Lowered) /* (Debug only) has a virtual register */ \
+ _(Guard) /* Not removable if uses == 0 */ \
+ \
+ /* Flag an instruction to be considered as a Guard if the instructions \
+ * bails out on some inputs. \
+ * \
+ * Some optimizations can replace an instruction, and leave its operands \
+ * unused. When the type information of the operand got used as a \
+ * predicate of the transformation, then we have to flag the operands as \
+ * GuardRangeBailouts. \
+ * \
+ * This flag prevents further optimization of instructions, which \
+ * might remove the run-time checks (bailout conditions) used as a \
+ * predicate of the previous transformation. \
+ */ \
+ _(GuardRangeBailouts) \
+ \
+ /* Some instructions have uses that aren't directly represented in the \
+ * graph, and need to be handled specially. As an example, this is used to \
+ * keep the flagged instruction in resume points, not substituting with an \
+ * UndefinedValue. This can be used by call inlining when a function \
+ * argument is not used by the inlined instructions. It is also used \
+ * to annotate instructions which were used in removed branches. \
+ */ \
+ _(ImplicitlyUsed) \
+ \
+ /* The instruction has been marked dead for lazy removal from resume \
+ * points. \
+ */ \
+ _(Unused) \
+ \
+ /* Marks if the current instruction should go to the bailout paths instead \
+ * of producing code as part of the control flow. This flag can only be set \
+ * on instructions which are only used by ResumePoint or by other flagged \
+ * instructions. \
+ */ \
+ _(RecoveredOnBailout) \
+ \
+ /* Some instructions might represent an object, but the memory of these \
+ * objects might be incomplete if we have not recovered all the stores which \
+ * were supposed to happen before. This flag is used to annotate \
+ * instructions which might return a pointer to a memory area which is not \
+ * yet fully initialized. This flag is used to ensure that stores are \
+ * executed before returning the value. \
+ */ \
+ _(IncompleteObject) \
+ \
+ /* For WebAssembly, there are functions with multiple results. Instead of \
+ * having the results defined by one call instruction, they are instead \
+ * captured in subsequent result capture instructions, because modelling \
+ * multi-value results in Ion is too complicated. However since they \
+ * capture ambient live registers, it would be an error to move an unrelated \
+ * instruction between the call and the result capture. This flag is used \
+ * to prevent code motion from moving instructions in invalid ways. \
+ */ \
+ _(CallResultCapture) \
+ \
+ /* The current instruction got discarded from the MIR Graph. This is useful \
+ * when we want to iterate over resume points and instructions, while \
+ * handling instructions which are discarded without reporting to the \
+ * iterator. \
+ */ \
+ _(Discarded)
+
+class MDefinition;
+class MInstruction;
+class MBasicBlock;
+class MNode;
+class MUse;
+class MPhi;
+class MIRGraph;
+class MResumePoint;
+class MControlInstruction;
+
+// Represents a use of a node.
+class MUse : public TempObject, public InlineListNode<MUse> {
+ // Grant access to setProducerUnchecked.
+ friend class MDefinition;
+ friend class MPhi;
+
+ MDefinition* producer_; // MDefinition that is being used.
+ MNode* consumer_; // The node that is using this operand.
+
+ // Low-level unchecked edit method for replaceAllUsesWith and
+ // MPhi::removeOperand. This doesn't update use lists!
+ // replaceAllUsesWith and MPhi::removeOperand do that manually.
+ void setProducerUnchecked(MDefinition* producer) {
+ MOZ_ASSERT(consumer_);
+ MOZ_ASSERT(producer_);
+ MOZ_ASSERT(producer);
+ producer_ = producer;
+ }
+
+ public:
+ // Default constructor for use in vectors.
+ MUse() : producer_(nullptr), consumer_(nullptr) {}
+
+ // Move constructor for use in vectors. When an MUse is moved, it stays
+ // in its containing use list.
+ MUse(MUse&& other)
+ : InlineListNode<MUse>(std::move(other)),
+ producer_(other.producer_),
+ consumer_(other.consumer_) {}
+
+ // Construct an MUse initialized with |producer| and |consumer|.
+ MUse(MDefinition* producer, MNode* consumer) {
+ initUnchecked(producer, consumer);
+ }
+
+ // Set this use, which was previously clear.
+ inline void init(MDefinition* producer, MNode* consumer);
+ // Like init, but works even when the use contains uninitialized data.
+ inline void initUnchecked(MDefinition* producer, MNode* consumer);
+ // Like initUnchecked, but set the producer to nullptr.
+ inline void initUncheckedWithoutProducer(MNode* consumer);
+ // Set this use, which was not previously clear.
+ inline void replaceProducer(MDefinition* producer);
+ // Clear this use.
+ inline void releaseProducer();
+
+ MDefinition* producer() const {
+ MOZ_ASSERT(producer_ != nullptr);
+ return producer_;
+ }
+ bool hasProducer() const { return producer_ != nullptr; }
+ MNode* consumer() const {
+ MOZ_ASSERT(consumer_ != nullptr);
+ return consumer_;
+ }
+
+#ifdef DEBUG
+ // Return the operand index of this MUse in its consumer. This is DEBUG-only
+ // as normal code should instead call indexOf on the cast consumer directly,
+ // to allow it to be devirtualized and inlined.
+ size_t index() const;
+#endif
+};
+
+using MUseIterator = InlineList<MUse>::iterator;
+
+// A node is an entry in the MIR graph. It has two kinds:
+// MInstruction: an instruction which appears in the IR stream.
+// MResumePoint: a list of instructions that correspond to the state of the
+// interpreter/Baseline stack.
+//
+// Nodes can hold references to MDefinitions. Each MDefinition has a list of
+// nodes holding such a reference (its use chain).
+class MNode : public TempObject {
+ protected:
+ enum class Kind { Definition = 0, ResumePoint };
+
+ private:
+ static const uintptr_t KindMask = 0x1;
+ uintptr_t blockAndKind_;
+
+ Kind kind() const { return Kind(blockAndKind_ & KindMask); }
+
+ protected:
+ explicit MNode(const MNode& other) : blockAndKind_(other.blockAndKind_) {}
+
+ MNode(MBasicBlock* block, Kind kind) { setBlockAndKind(block, kind); }
+
+ void setBlockAndKind(MBasicBlock* block, Kind kind) {
+ blockAndKind_ = uintptr_t(block) | uintptr_t(kind);
+ MOZ_ASSERT(this->block() == block);
+ }
+
+ MBasicBlock* definitionBlock() const {
+ MOZ_ASSERT(isDefinition());
+ static_assert(unsigned(Kind::Definition) == 0,
+ "Code below relies on low bit being 0");
+ return reinterpret_cast<MBasicBlock*>(blockAndKind_);
+ }
+ MBasicBlock* resumePointBlock() const {
+ MOZ_ASSERT(isResumePoint());
+ static_assert(unsigned(Kind::ResumePoint) == 1,
+ "Code below relies on low bit being 1");
+ // Use a subtraction: if the caller does block()->foo, the compiler
+ // will be able to fold it with the load.
+ return reinterpret_cast<MBasicBlock*>(blockAndKind_ - 1);
+ }
+
+ public:
+ // Returns the definition at a given operand.
+ virtual MDefinition* getOperand(size_t index) const = 0;
+ virtual size_t numOperands() const = 0;
+ virtual size_t indexOf(const MUse* u) const = 0;
+
+ bool isDefinition() const { return kind() == Kind::Definition; }
+ bool isResumePoint() const { return kind() == Kind::ResumePoint; }
+ MBasicBlock* block() const {
+ return reinterpret_cast<MBasicBlock*>(blockAndKind_ & ~KindMask);
+ }
+ MBasicBlock* caller() const;
+
+ // Sets an already set operand, updating use information. If you're looking
+ // for setOperand, this is probably what you want.
+ virtual void replaceOperand(size_t index, MDefinition* operand) = 0;
+
+ // Resets the operand to an uninitialized state, breaking the link
+ // with the previous operand's producer.
+ void releaseOperand(size_t index) { getUseFor(index)->releaseProducer(); }
+ bool hasOperand(size_t index) const {
+ return getUseFor(index)->hasProducer();
+ }
+
+ inline MDefinition* toDefinition();
+ inline MResumePoint* toResumePoint();
+
+ [[nodiscard]] virtual bool writeRecoverData(
+ CompactBufferWriter& writer) const;
+
+#ifdef JS_JITSPEW
+ virtual void dump(GenericPrinter& out) const = 0;
+ virtual void dump() const = 0;
+#endif
+
+ protected:
+ // Need visibility on getUseFor to avoid O(n^2) complexity.
+ friend void AssertBasicGraphCoherency(MIRGraph& graph, bool force);
+
+ // Gets the MUse corresponding to given operand.
+ virtual MUse* getUseFor(size_t index) = 0;
+ virtual const MUse* getUseFor(size_t index) const = 0;
+};
+
+class AliasSet {
+ private:
+ uint32_t flags_;
+
+ public:
+ enum Flag {
+ None_ = 0,
+ ObjectFields = 1 << 0, // shape, class, slots, length etc.
+ Element = 1 << 1, // A Value member of obj->elements or
+ // a typed object.
+ UnboxedElement = 1 << 2, // An unboxed scalar or reference member of
+ // typed object.
+ DynamicSlot = 1 << 3, // A Value member of obj->slots.
+ FixedSlot = 1 << 4, // A Value member of obj->fixedSlots().
+ DOMProperty = 1 << 5, // A DOM property
+ WasmInstanceData = 1 << 6, // An asm.js/wasm private global var
+ WasmHeap = 1 << 7, // An asm.js/wasm heap load
+ WasmHeapMeta = 1 << 8, // The asm.js/wasm heap base pointer and
+ // bounds check limit, in Instance.
+ ArrayBufferViewLengthOrOffset =
+ 1 << 9, // An array buffer view's length or byteOffset
+ WasmGlobalCell = 1 << 10, // A wasm global cell
+ WasmTableElement = 1 << 11, // An element of a wasm table
+ WasmTableMeta = 1 << 12, // A wasm table elements pointer and
+ // length field, in instance data.
+ WasmStackResult = 1 << 13, // A stack result from the current function
+
+ // JSContext's exception state. This is used on instructions like MThrow
+ // or MNewArrayDynamicLength that throw exceptions (other than OOM) but have
+ // no other side effect, to ensure that they get their own up-to-date resume
+ // point. (This resume point will be used when constructing the Baseline
+ // frame during exception bailouts.)
+ ExceptionState = 1 << 14,
+
+ // Used for instructions that load the privateSlot of DOM proxies and
+ // the ExpandoAndGeneration.
+ DOMProxyExpando = 1 << 15,
+
+ // Hash table of a Map or Set object.
+ MapOrSetHashTable = 1 << 16,
+
+ // Internal state of the random number generator
+ RNG = 1 << 17,
+
+ // The pendingException slot on the wasm instance object.
+ WasmPendingException = 1 << 18,
+
+ // The fuzzilliHash slot
+ FuzzilliHash = 1 << 19,
+
+ // The WasmStructObject::inlineData_[..] storage area
+ WasmStructInlineDataArea = 1 << 20,
+
+ // The WasmStructObject::outlineData_ pointer only
+ WasmStructOutlineDataPointer = 1 << 21,
+
+ // The malloc'd block that WasmStructObject::outlineData_ points at
+ WasmStructOutlineDataArea = 1 << 22,
+
+ // The WasmArrayObject::numElements_ field
+ WasmArrayNumElements = 1 << 23,
+
+ // The WasmArrayObject::data_ pointer only
+ WasmArrayDataPointer = 1 << 24,
+
+ // The malloc'd block that WasmArrayObject::data_ points at
+ WasmArrayDataArea = 1 << 25,
+
+ // The generation counter associated with the global object
+ GlobalGenerationCounter = 1 << 26,
+
+ Last = GlobalGenerationCounter,
+
+ Any = Last | (Last - 1),
+ NumCategories = 27,
+
+ // Indicates load or store.
+ Store_ = 1 << 31
+ };
+
+ static_assert((1 << NumCategories) - 1 == Any,
+ "NumCategories must include all flags present in Any");
+
+ explicit AliasSet(uint32_t flags) : flags_(flags) {}
+
+ public:
+ inline bool isNone() const { return flags_ == None_; }
+ uint32_t flags() const { return flags_ & Any; }
+ inline bool isStore() const { return !!(flags_ & Store_); }
+ inline bool isLoad() const { return !isStore() && !isNone(); }
+ inline AliasSet operator|(const AliasSet& other) const {
+ return AliasSet(flags_ | other.flags_);
+ }
+ inline AliasSet operator&(const AliasSet& other) const {
+ return AliasSet(flags_ & other.flags_);
+ }
+ static AliasSet None() { return AliasSet(None_); }
+ static AliasSet Load(uint32_t flags) {
+ MOZ_ASSERT(flags && !(flags & Store_));
+ return AliasSet(flags);
+ }
+ static AliasSet Store(uint32_t flags) {
+ MOZ_ASSERT(flags && !(flags & Store_));
+ return AliasSet(flags | Store_);
+ }
+};
+
+typedef Vector<MDefinition*, 6, JitAllocPolicy> MDefinitionVector;
+typedef Vector<MInstruction*, 6, JitAllocPolicy> MInstructionVector;
+
+// When a floating-point value is used by nodes which would prefer to
+// receive integer inputs, we may be able to help by computing our result
+// into an integer directly.
+//
+// A value can be truncated in 4 differents ways:
+// 1. Ignore Infinities (x / 0 --> 0).
+// 2. Ignore overflow (INT_MIN / -1 == (INT_MAX + 1) --> INT_MIN)
+// 3. Ignore negative zeros. (-0 --> 0)
+// 4. Ignore remainder. (3 / 4 --> 0)
+//
+// Indirect truncation is used to represent that we are interested in the
+// truncated result, but only if it can safely flow into operations which
+// are computed modulo 2^32, such as (2) and (3). Infinities are not safe,
+// as they would have absorbed other math operations. Remainders are not
+// safe, as fractions can be scaled up by multiplication.
+//
+// Division is a particularly interesting node here because it covers all 4
+// cases even when its own operands are integers.
+//
+// Note that these enum values are ordered from least value-modifying to
+// most value-modifying, and code relies on this ordering.
+enum class TruncateKind {
+ // No correction.
+ NoTruncate = 0,
+ // An integer is desired, but we can't skip bailout checks.
+ TruncateAfterBailouts = 1,
+ // The value will be truncated after some arithmetic (see above).
+ IndirectTruncate = 2,
+ // Direct and infallible truncation to int32.
+ Truncate = 3
+};
+
+// An MDefinition is an SSA name.
+class MDefinition : public MNode {
+ friend class MBasicBlock;
+
+ public:
+ enum class Opcode : uint16_t {
+#define DEFINE_OPCODES(op) op,
+ MIR_OPCODE_LIST(DEFINE_OPCODES)
+#undef DEFINE_OPCODES
+ };
+
+ private:
+ InlineList<MUse> uses_; // Use chain.
+ uint32_t id_; // Instruction ID, which after block re-ordering
+ // is sorted within a basic block.
+ Opcode op_; // Opcode.
+ uint16_t flags_; // Bit flags.
+ Range* range_; // Any computed range for this def.
+ union {
+ MDefinition*
+ loadDependency_; // Implicit dependency (store, call, etc.) of this
+ // instruction. Used by alias analysis, GVN and LICM.
+ uint32_t virtualRegister_; // Used by lowering to map definitions to
+ // virtual registers.
+ };
+
+ // Track bailouts by storing the current pc in MIR instruction. Also used
+ // for profiling and keeping track of what the last known pc was.
+ const BytecodeSite* trackedSite_;
+
+ // If we generate a bailout path for this instruction, this is the
+ // bailout kind that will be encoded in the snapshot. When we bail out,
+ // FinishBailoutToBaseline may take action based on the bailout kind to
+ // prevent bailout loops. (For example, if an instruction bails out after
+ // being hoisted by LICM, we will disable LICM when recompiling the script.)
+ BailoutKind bailoutKind_;
+
+ MIRType resultType_; // Representation of result type.
+
+ private:
+ enum Flag {
+ None = 0,
+#define DEFINE_FLAG(flag) flag,
+ MIR_FLAG_LIST(DEFINE_FLAG)
+#undef DEFINE_FLAG
+ Total
+ };
+
+ bool hasFlags(uint32_t flags) const { return (flags_ & flags) == flags; }
+ void removeFlags(uint32_t flags) { flags_ &= ~flags; }
+ void setFlags(uint32_t flags) { flags_ |= flags; }
+
+ // Calling isDefinition or isResumePoint on MDefinition is unnecessary.
+ bool isDefinition() const = delete;
+ bool isResumePoint() const = delete;
+
+ protected:
+ void setInstructionBlock(MBasicBlock* block, const BytecodeSite* site) {
+ MOZ_ASSERT(isInstruction());
+ setBlockAndKind(block, Kind::Definition);
+ setTrackedSite(site);
+ }
+
+ void setPhiBlock(MBasicBlock* block) {
+ MOZ_ASSERT(isPhi());
+ setBlockAndKind(block, Kind::Definition);
+ }
+
+ static HashNumber addU32ToHash(HashNumber hash, uint32_t data) {
+ return data + (hash << 6) + (hash << 16) - hash;
+ }
+
+ static HashNumber addU64ToHash(HashNumber hash, uint64_t data) {
+ hash = addU32ToHash(hash, uint32_t(data));
+ hash = addU32ToHash(hash, uint32_t(data >> 32));
+ return hash;
+ }
+
+ public:
+ explicit MDefinition(Opcode op)
+ : MNode(nullptr, Kind::Definition),
+ id_(0),
+ op_(op),
+ flags_(0),
+ range_(nullptr),
+ loadDependency_(nullptr),
+ trackedSite_(nullptr),
+ bailoutKind_(BailoutKind::Unknown),
+ resultType_(MIRType::None) {}
+
+ // Copying a definition leaves the list of uses empty.
+ explicit MDefinition(const MDefinition& other)
+ : MNode(other),
+ id_(0),
+ op_(other.op_),
+ flags_(other.flags_),
+ range_(other.range_),
+ loadDependency_(other.loadDependency_),
+ trackedSite_(other.trackedSite_),
+ bailoutKind_(other.bailoutKind_),
+ resultType_(other.resultType_) {}
+
+ Opcode op() const { return op_; }
+
+#ifdef JS_JITSPEW
+ const char* opName() const;
+ void printName(GenericPrinter& out) const;
+ static void PrintOpcodeName(GenericPrinter& out, Opcode op);
+ virtual void printOpcode(GenericPrinter& out) const;
+ void dump(GenericPrinter& out) const override;
+ void dump() const override;
+ void dumpLocation(GenericPrinter& out) const;
+ void dumpLocation() const;
+ // Dump any other stuff the node wants to have printed in `extras`. The
+ // added strings are copied, with the `ExtrasCollector` taking ownership of
+ // the copies.
+ virtual void getExtras(ExtrasCollector* extras) {}
+#endif
+
+ // Also for LICM. Test whether this definition is likely to be a call, which
+ // would clobber all or many of the floating-point registers, such that
+ // hoisting floating-point constants out of containing loops isn't likely to
+ // be worthwhile.
+ virtual bool possiblyCalls() const { return false; }
+
+ MBasicBlock* block() const { return definitionBlock(); }
+
+ private:
+#ifdef DEBUG
+ bool trackedSiteMatchesBlock(const BytecodeSite* site) const;
+#endif
+
+ void setTrackedSite(const BytecodeSite* site) {
+ MOZ_ASSERT(site);
+ MOZ_ASSERT(trackedSiteMatchesBlock(site),
+ "tracked bytecode site should match block bytecode site");
+ trackedSite_ = site;
+ }
+
+ public:
+ const BytecodeSite* trackedSite() const {
+ MOZ_ASSERT(trackedSite_,
+ "missing tracked bytecode site; node not assigned to a block?");
+ MOZ_ASSERT(trackedSiteMatchesBlock(trackedSite_),
+ "tracked bytecode site should match block bytecode site");
+ return trackedSite_;
+ }
+
+ BailoutKind bailoutKind() const { return bailoutKind_; }
+ void setBailoutKind(BailoutKind kind) { bailoutKind_ = kind; }
+
+ // Return the range of this value, *before* any bailout checks. Contrast
+ // this with the type() method, and the Range constructor which takes an
+ // MDefinition*, which describe the value *after* any bailout checks.
+ //
+ // Warning: Range analysis is removing the bit-operations such as '| 0' at
+ // the end of the transformations. Using this function to analyse any
+ // operands after the truncate phase of the range analysis will lead to
+ // errors. Instead, one should define the collectRangeInfoPreTrunc() to set
+ // the right set of flags which are dependent on the range of the inputs.
+ Range* range() const {
+ MOZ_ASSERT(type() != MIRType::None);
+ return range_;
+ }
+ void setRange(Range* range) {
+ MOZ_ASSERT(type() != MIRType::None);
+ range_ = range;
+ }
+
+ virtual HashNumber valueHash() const;
+ virtual bool congruentTo(const MDefinition* ins) const { return false; }
+ const MDefinition* skipObjectGuards() const;
+ bool congruentIfOperandsEqual(const MDefinition* ins) const;
+ virtual MDefinition* foldsTo(TempAllocator& alloc);
+ virtual void analyzeEdgeCasesForward();
+ virtual void analyzeEdgeCasesBackward();
+
+ // |canTruncate| reports if this instruction supports truncation. If
+ // |canTruncate| function returns true, then the |truncate| function is
+ // called on the same instruction to mutate the instruction, such as updating
+ // the return type, the range and the specialization of the instruction.
+ virtual bool canTruncate() const;
+ virtual void truncate(TruncateKind kind);
+
+ // Determine what kind of truncate this node prefers for the operand at the
+ // given index.
+ virtual TruncateKind operandTruncateKind(size_t index) const;
+
+ // Compute an absolute or symbolic range for the value of this node.
+ virtual void computeRange(TempAllocator& alloc) {}
+
+ // Collect information from the pre-truncated ranges.
+ virtual void collectRangeInfoPreTrunc() {}
+
+ uint32_t id() const {
+ MOZ_ASSERT(block());
+ return id_;
+ }
+ void setId(uint32_t id) { id_ = id; }
+
+#define FLAG_ACCESSOR(flag) \
+ bool is##flag() const { \
+ static_assert(Flag::Total <= sizeof(flags_) * 8, \
+ "Flags should fit in flags_ field"); \
+ return hasFlags(1 << flag); \
+ } \
+ void set##flag() { \
+ MOZ_ASSERT(!hasFlags(1 << flag)); \
+ setFlags(1 << flag); \
+ } \
+ void setNot##flag() { \
+ MOZ_ASSERT(hasFlags(1 << flag)); \
+ removeFlags(1 << flag); \
+ } \
+ void set##flag##Unchecked() { setFlags(1 << flag); } \
+ void setNot##flag##Unchecked() { removeFlags(1 << flag); }
+
+ MIR_FLAG_LIST(FLAG_ACCESSOR)
+#undef FLAG_ACCESSOR
+
+ // Return the type of this value. This may be speculative, and enforced
+ // dynamically with the use of bailout checks. If all the bailout checks
+ // pass, the value will have this type.
+ //
+ // Unless this is an MUrsh that has bailouts disabled, which, as a special
+ // case, may return a value in (INT32_MAX,UINT32_MAX] even when its type()
+ // is MIRType::Int32.
+ MIRType type() const { return resultType_; }
+
+ bool mightBeType(MIRType type) const {
+ MOZ_ASSERT(type != MIRType::Value);
+
+ if (type == this->type()) {
+ return true;
+ }
+
+ if (this->type() == MIRType::Value) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool mightBeMagicType() const;
+
+ // Return true if the result-set types are a subset of the given types.
+ bool definitelyType(std::initializer_list<MIRType> types) const;
+
+ // Float32 specialization operations (see big comment in IonAnalysis before
+ // the Float32 specialization algorithm).
+ virtual bool isFloat32Commutative() const { return false; }
+ virtual bool canProduceFloat32() const { return false; }
+ virtual bool canConsumeFloat32(MUse* use) const { return false; }
+ virtual void trySpecializeFloat32(TempAllocator& alloc) {}
+#ifdef DEBUG
+ // Used during the pass that checks that Float32 flow into valid MDefinitions
+ virtual bool isConsistentFloat32Use(MUse* use) const {
+ return type() == MIRType::Float32 || canConsumeFloat32(use);
+ }
+#endif
+
+ // Returns the beginning of this definition's use chain.
+ MUseIterator usesBegin() const { return uses_.begin(); }
+
+ // Returns the end of this definition's use chain.
+ MUseIterator usesEnd() const { return uses_.end(); }
+
+ bool canEmitAtUses() const { return !isEmittedAtUses(); }
+
+ // Removes a use at the given position
+ void removeUse(MUse* use) { uses_.remove(use); }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // Number of uses of this instruction. This function is only available
+ // in DEBUG mode since it requires traversing the list. Most users should
+ // use hasUses() or hasOneUse() instead.
+ size_t useCount() const;
+
+ // Number of uses of this instruction (only counting MDefinitions, ignoring
+ // MResumePoints). This function is only available in DEBUG mode since it
+ // requires traversing the list. Most users should use hasUses() or
+ // hasOneUse() instead.
+ size_t defUseCount() const;
+#endif
+
+ // Test whether this MDefinition has exactly one use.
+ bool hasOneUse() const;
+
+ // Test whether this MDefinition has exactly one use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasOneDefUse() const;
+
+ // Test whether this MDefinition has at least one use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasDefUses() const;
+
+ // Test whether this MDefinition has at least one non-recovered use.
+ // (only counting MDefinitions, ignoring MResumePoints)
+ bool hasLiveDefUses() const;
+
+ bool hasUses() const { return !uses_.empty(); }
+
+ // If this MDefinition has a single use (ignoring MResumePoints), returns that
+ // use's definition. Else returns nullptr.
+ MDefinition* maybeSingleDefUse() const;
+
+ // Returns the most recently added use (ignoring MResumePoints) for this
+ // MDefinition. Returns nullptr if there are no uses. Note that this relies on
+ // addUse adding new uses to the front of the list, and should only be called
+ // during MIR building (before optimization passes make changes to the uses).
+ MDefinition* maybeMostRecentlyAddedDefUse() const;
+
+ void addUse(MUse* use) {
+ MOZ_ASSERT(use->producer() == this);
+ uses_.pushFront(use);
+ }
+ void addUseUnchecked(MUse* use) {
+ MOZ_ASSERT(use->producer() == this);
+ uses_.pushFrontUnchecked(use);
+ }
+ void replaceUse(MUse* old, MUse* now) {
+ MOZ_ASSERT(now->producer() == this);
+ uses_.replace(old, now);
+ }
+
+ // Replace the current instruction by a dominating instruction |dom| in all
+ // uses of the current instruction.
+ void replaceAllUsesWith(MDefinition* dom);
+
+ // Like replaceAllUsesWith, but doesn't set ImplicitlyUsed on |this|'s
+ // operands.
+ void justReplaceAllUsesWith(MDefinition* dom);
+
+ // Replace the current instruction by an optimized-out constant in all uses
+ // of the current instruction. Note, that optimized-out constant should not
+ // be observed, and thus they should not flow in any computation.
+ [[nodiscard]] bool optimizeOutAllUses(TempAllocator& alloc);
+
+ // Replace the current instruction by a dominating instruction |dom| in all
+ // instruction, but keep the current instruction for resume point and
+ // instruction which are recovered on bailouts.
+ void replaceAllLiveUsesWith(MDefinition* dom);
+
+ void setVirtualRegister(uint32_t vreg) {
+ virtualRegister_ = vreg;
+ setLoweredUnchecked();
+ }
+ uint32_t virtualRegister() const {
+ MOZ_ASSERT(isLowered());
+ return virtualRegister_;
+ }
+
+ public:
+ // Opcode testing and casts.
+ template <typename MIRType>
+ bool is() const {
+ return op() == MIRType::classOpcode;
+ }
+ template <typename MIRType>
+ MIRType* to() {
+ MOZ_ASSERT(this->is<MIRType>());
+ return static_cast<MIRType*>(this);
+ }
+ template <typename MIRType>
+ const MIRType* to() const {
+ MOZ_ASSERT(this->is<MIRType>());
+ return static_cast<const MIRType*>(this);
+ }
+#define OPCODE_CASTS(opcode) \
+ bool is##opcode() const { return this->is<M##opcode>(); } \
+ M##opcode* to##opcode() { return this->to<M##opcode>(); } \
+ const M##opcode* to##opcode() const { return this->to<M##opcode>(); }
+ MIR_OPCODE_LIST(OPCODE_CASTS)
+#undef OPCODE_CASTS
+
+ inline MConstant* maybeConstantValue();
+
+ inline MInstruction* toInstruction();
+ inline const MInstruction* toInstruction() const;
+ bool isInstruction() const { return !isPhi(); }
+
+ virtual bool isControlInstruction() const { return false; }
+ inline MControlInstruction* toControlInstruction();
+
+ void setResultType(MIRType type) { resultType_ = type; }
+ virtual AliasSet getAliasSet() const {
+ // Instructions are effectful by default.
+ return AliasSet::Store(AliasSet::Any);
+ }
+
+#ifdef DEBUG
+ bool hasDefaultAliasSet() const {
+ AliasSet set = getAliasSet();
+ return set.isStore() && set.flags() == AliasSet::Flag::Any;
+ }
+#endif
+
+ MDefinition* dependency() const {
+ if (getAliasSet().isStore()) {
+ return nullptr;
+ }
+ return loadDependency_;
+ }
+ void setDependency(MDefinition* dependency) {
+ MOZ_ASSERT(!getAliasSet().isStore());
+ loadDependency_ = dependency;
+ }
+ bool isEffectful() const { return getAliasSet().isStore(); }
+
+#ifdef DEBUG
+ bool needsResumePoint() const {
+ // Return whether this instruction should have its own resume point.
+ return isEffectful();
+ }
+#endif
+
+ enum class AliasType : uint32_t { NoAlias = 0, MayAlias = 1, MustAlias = 2 };
+ virtual AliasType mightAlias(const MDefinition* store) const {
+ // Return whether this load may depend on the specified store, given
+ // that the alias sets intersect. This may be refined to exclude
+ // possible aliasing in cases where alias set flags are too imprecise.
+ if (!(getAliasSet().flags() & store->getAliasSet().flags())) {
+ return AliasType::NoAlias;
+ }
+ MOZ_ASSERT(!isEffectful() && store->isEffectful());
+ return AliasType::MayAlias;
+ }
+
+ virtual bool canRecoverOnBailout() const { return false; }
+};
+
+// An MUseDefIterator walks over uses in a definition, skipping any use that is
+// not a definition. Items from the use list must not be deleted during
+// iteration.
+class MUseDefIterator {
+ const MDefinition* def_;
+ MUseIterator current_;
+
+ MUseIterator search(MUseIterator start) {
+ MUseIterator i(start);
+ for (; i != def_->usesEnd(); i++) {
+ if (i->consumer()->isDefinition()) {
+ return i;
+ }
+ }
+ return def_->usesEnd();
+ }
+
+ public:
+ explicit MUseDefIterator(const MDefinition* def)
+ : def_(def), current_(search(def->usesBegin())) {}
+
+ explicit operator bool() const { return current_ != def_->usesEnd(); }
+ MUseDefIterator operator++() {
+ MOZ_ASSERT(current_ != def_->usesEnd());
+ ++current_;
+ current_ = search(current_);
+ return *this;
+ }
+ MUseDefIterator operator++(int) {
+ MUseDefIterator old(*this);
+ operator++();
+ return old;
+ }
+ MUse* use() const { return *current_; }
+ MDefinition* def() const { return current_->consumer()->toDefinition(); }
+};
+
+// Helper class to check that GC pointers embedded in MIR instructions are not
+// in the nursery. Off-thread compilation and nursery GCs can happen in
+// parallel. Nursery pointers are handled with MNurseryObject and the
+// nurseryObjects lists in WarpSnapshot and IonScript.
+//
+// These GC things are rooted through the WarpSnapshot. Compacting GCs cancel
+// off-thread compilations.
+template <typename T>
+class CompilerGCPointer {
+ js::gc::Cell* ptr_;
+
+ public:
+ explicit CompilerGCPointer(T ptr) : ptr_(ptr) {
+ MOZ_ASSERT_IF(ptr, !IsInsideNursery(ptr));
+ MOZ_ASSERT_IF(!CurrentThreadIsIonCompiling(), TlsContext.get()->suppressGC);
+ }
+
+ operator T() const { return static_cast<T>(ptr_); }
+ T operator->() const { return static_cast<T>(ptr_); }
+
+ private:
+ CompilerGCPointer() = delete;
+ CompilerGCPointer(const CompilerGCPointer<T>&) = delete;
+ CompilerGCPointer<T>& operator=(const CompilerGCPointer<T>&) = delete;
+};
+
+using CompilerObject = CompilerGCPointer<JSObject*>;
+using CompilerNativeObject = CompilerGCPointer<NativeObject*>;
+using CompilerFunction = CompilerGCPointer<JSFunction*>;
+using CompilerBaseScript = CompilerGCPointer<BaseScript*>;
+using CompilerPropertyName = CompilerGCPointer<PropertyName*>;
+using CompilerShape = CompilerGCPointer<Shape*>;
+using CompilerGetterSetter = CompilerGCPointer<GetterSetter*>;
+
+// An instruction is an SSA name that is inserted into a basic block's IR
+// stream.
+class MInstruction : public MDefinition, public InlineListNode<MInstruction> {
+ MResumePoint* resumePoint_;
+
+ protected:
+ // All MInstructions are using the "MFoo::New(alloc)" notation instead of
+ // the TempObject new operator. This code redefines the new operator as
+ // protected, and delegates to the TempObject new operator. Thus, the
+ // following code prevents calls to "new(alloc) MFoo" outside the MFoo
+ // members.
+ inline void* operator new(size_t nbytes,
+ TempAllocator::Fallible view) noexcept(true) {
+ return TempObject::operator new(nbytes, view);
+ }
+ inline void* operator new(size_t nbytes, TempAllocator& alloc) {
+ return TempObject::operator new(nbytes, alloc);
+ }
+ template <class T>
+ inline void* operator new(size_t nbytes, T* pos) {
+ return TempObject::operator new(nbytes, pos);
+ }
+
+ public:
+ explicit MInstruction(Opcode op) : MDefinition(op), resumePoint_(nullptr) {}
+
+ // Copying an instruction leaves the resume point as empty.
+ explicit MInstruction(const MInstruction& other)
+ : MDefinition(other), resumePoint_(nullptr) {}
+
+ // Convenient function used for replacing a load by the value of the store
+ // if the types are match, and boxing the value if they do not match.
+ MDefinition* foldsToStore(TempAllocator& alloc);
+
+ void setResumePoint(MResumePoint* resumePoint);
+ void stealResumePoint(MInstruction* other);
+
+ void moveResumePointAsEntry();
+ void clearResumePoint();
+ MResumePoint* resumePoint() const { return resumePoint_; }
+
+ // For instructions which can be cloned with new inputs, with all other
+ // information being the same. clone() implementations do not need to worry
+ // about cloning generic MInstruction/MDefinition state like flags and
+ // resume points.
+ virtual bool canClone() const { return false; }
+ virtual MInstruction* clone(TempAllocator& alloc,
+ const MDefinitionVector& inputs) const {
+ MOZ_CRASH();
+ }
+
+ // Instructions needing to hook into type analysis should return a
+ // TypePolicy.
+ virtual const TypePolicy* typePolicy() = 0;
+ virtual MIRType typePolicySpecialization() = 0;
+};
+
+// Note: GenerateOpcodeFiles.py generates MOpcodesGenerated.h based on the
+// INSTRUCTION_HEADER* macros.
+#define INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
+ static const Opcode classOpcode = Opcode::opcode; \
+ using MThisOpcode = M##opcode;
+
+#define INSTRUCTION_HEADER(opcode) \
+ INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
+ virtual const TypePolicy* typePolicy() override; \
+ virtual MIRType typePolicySpecialization() override;
+
+#define ALLOW_CLONE(typename) \
+ bool canClone() const override { return true; } \
+ MInstruction* clone(TempAllocator& alloc, const MDefinitionVector& inputs) \
+ const override { \
+ MInstruction* res = new (alloc) typename(*this); \
+ for (size_t i = 0; i < numOperands(); i++) \
+ res->replaceOperand(i, inputs[i]); \
+ return res; \
+ }
+
+// Adds MFoo::New functions which are mirroring the arguments of the
+// constructors. Opcodes which are using this macro can be called with a
+// TempAllocator, or the fallible version of the TempAllocator.
+#define TRIVIAL_NEW_WRAPPERS \
+ template <typename... Args> \
+ static MThisOpcode* New(TempAllocator& alloc, Args&&... args) { \
+ return new (alloc) MThisOpcode(std::forward<Args>(args)...); \
+ } \
+ template <typename... Args> \
+ static MThisOpcode* New(TempAllocator::Fallible alloc, Args&&... args) { \
+ return new (alloc) MThisOpcode(std::forward<Args>(args)...); \
+ }
+
+// These macros are used as a syntactic sugar for writting getOperand
+// accessors. They are meant to be used in the body of MIR Instructions as
+// follows:
+//
+// public:
+// INSTRUCTION_HEADER(Foo)
+// NAMED_OPERANDS((0, lhs), (1, rhs))
+//
+// The above example defines 2 accessors, one named "lhs" accessing the first
+// operand, and a one named "rhs" accessing the second operand.
+#define NAMED_OPERAND_ACCESSOR(Index, Name) \
+ MDefinition* Name() const { return getOperand(Index); }
+#define NAMED_OPERAND_ACCESSOR_APPLY(Args) NAMED_OPERAND_ACCESSOR Args
+#define NAMED_OPERANDS(...) \
+ MOZ_FOR_EACH(NAMED_OPERAND_ACCESSOR_APPLY, (), (__VA_ARGS__))
+
+template <size_t Arity>
+class MAryInstruction : public MInstruction {
+ mozilla::Array<MUse, Arity> operands_;
+
+ protected:
+ MUse* getUseFor(size_t index) final { return &operands_[index]; }
+ const MUse* getUseFor(size_t index) const final { return &operands_[index]; }
+ void initOperand(size_t index, MDefinition* operand) {
+ operands_[index].init(operand, this);
+ }
+
+ public:
+ MDefinition* getOperand(size_t index) const final {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final { return Arity; }
+#ifdef DEBUG
+ static const size_t staticNumOperands = Arity;
+#endif
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ operands_[index].replaceProducer(operand);
+ }
+
+ explicit MAryInstruction(Opcode op) : MInstruction(op) {}
+
+ explicit MAryInstruction(const MAryInstruction<Arity>& other)
+ : MInstruction(other) {
+ for (int i = 0; i < (int)Arity;
+ i++) { // N.B. use |int| to avoid warnings when Arity == 0
+ operands_[i].init(other.operands_[i].producer(), this);
+ }
+ }
+};
+
+class MNullaryInstruction : public MAryInstruction<0>,
+ public NoTypePolicy::Data {
+ protected:
+ explicit MNullaryInstruction(Opcode op) : MAryInstruction(op) {}
+
+ HashNumber valueHash() const override;
+};
+
+class MUnaryInstruction : public MAryInstruction<1> {
+ protected:
+ MUnaryInstruction(Opcode op, MDefinition* ins) : MAryInstruction(op) {
+ initOperand(0, ins);
+ }
+
+ HashNumber valueHash() const override;
+
+ public:
+ NAMED_OPERANDS((0, input))
+};
+
+class MBinaryInstruction : public MAryInstruction<2> {
+ protected:
+ MBinaryInstruction(Opcode op, MDefinition* left, MDefinition* right)
+ : MAryInstruction(op) {
+ initOperand(0, left);
+ initOperand(1, right);
+ }
+
+ public:
+ NAMED_OPERANDS((0, lhs), (1, rhs))
+
+ protected:
+ HashNumber valueHash() const override;
+
+ bool binaryCongruentTo(const MDefinition* ins) const {
+ if (op() != ins->op()) {
+ return false;
+ }
+
+ if (type() != ins->type()) {
+ return false;
+ }
+
+ if (isEffectful() || ins->isEffectful()) {
+ return false;
+ }
+
+ const MDefinition* left = getOperand(0);
+ const MDefinition* right = getOperand(1);
+ if (isCommutative() && left->id() > right->id()) {
+ std::swap(left, right);
+ }
+
+ const MBinaryInstruction* bi = static_cast<const MBinaryInstruction*>(ins);
+ const MDefinition* insLeft = bi->getOperand(0);
+ const MDefinition* insRight = bi->getOperand(1);
+ if (bi->isCommutative() && insLeft->id() > insRight->id()) {
+ std::swap(insLeft, insRight);
+ }
+
+ return left == insLeft && right == insRight;
+ }
+
+ public:
+ // Return if the operands to this instruction are both unsigned.
+ static bool unsignedOperands(MDefinition* left, MDefinition* right);
+ bool unsignedOperands();
+
+ // Replace any wrapping operands with the underlying int32 operands
+ // in case of unsigned operands.
+ void replaceWithUnsignedOperands();
+};
+
+class MTernaryInstruction : public MAryInstruction<3> {
+ protected:
+ MTernaryInstruction(Opcode op, MDefinition* first, MDefinition* second,
+ MDefinition* third)
+ : MAryInstruction(op) {
+ initOperand(0, first);
+ initOperand(1, second);
+ initOperand(2, third);
+ }
+
+ HashNumber valueHash() const override;
+};
+
+class MQuaternaryInstruction : public MAryInstruction<4> {
+ protected:
+ MQuaternaryInstruction(Opcode op, MDefinition* first, MDefinition* second,
+ MDefinition* third, MDefinition* fourth)
+ : MAryInstruction(op) {
+ initOperand(0, first);
+ initOperand(1, second);
+ initOperand(2, third);
+ initOperand(3, fourth);
+ }
+
+ HashNumber valueHash() const override;
+};
+
+template <class T>
+class MVariadicT : public T {
+ FixedList<MUse> operands_;
+
+ protected:
+ explicit MVariadicT(typename T::Opcode op) : T(op) {}
+ [[nodiscard]] bool init(TempAllocator& alloc, size_t length) {
+ return operands_.init(alloc, length);
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUnchecked(operand, this);
+ }
+ MUse* getUseFor(size_t index) final { return &operands_[index]; }
+ const MUse* getUseFor(size_t index) const final { return &operands_[index]; }
+
+ // The MWasmCallBase mixin performs initialization for it's subclasses.
+ friend class MWasmCallBase;
+
+ public:
+ // Will assert if called before initialization.
+ MDefinition* getOperand(size_t index) const final {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final { return operands_.length(); }
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ operands_[index].replaceProducer(operand);
+ }
+};
+
+// An instruction with a variable number of operands. Note that the
+// MFoo::New constructor for variadic instructions fallibly
+// initializes the operands_ array and must be checked for OOM.
+using MVariadicInstruction = MVariadicT<MInstruction>;
+
+MIR_OPCODE_CLASS_GENERATED
+
+// Truncation barrier. This is intended for protecting its input against
+// follow-up truncation optimizations.
+class MLimitedTruncate : public MUnaryInstruction,
+ public ConvertToInt32Policy<0>::Data {
+ TruncateKind truncate_;
+ TruncateKind truncateLimit_;
+
+ MLimitedTruncate(MDefinition* input, TruncateKind limit)
+ : MUnaryInstruction(classOpcode, input),
+ truncate_(TruncateKind::NoTruncate),
+ truncateLimit_(limit) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LimitedTruncate)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+ TruncateKind truncateKind() const { return truncate_; }
+ void setTruncateKind(TruncateKind kind) { truncate_ = kind; }
+};
+
+// A constant js::Value.
+class MConstant : public MNullaryInstruction {
+ struct Payload {
+ union {
+ bool b;
+ int32_t i32;
+ int64_t i64;
+ intptr_t iptr;
+ float f;
+ double d;
+ JSString* str;
+ JS::Symbol* sym;
+ BigInt* bi;
+ JSObject* obj;
+ Shape* shape;
+ uint64_t asBits;
+ };
+ Payload() : asBits(0) {}
+ };
+
+ Payload payload_;
+
+ static_assert(sizeof(Payload) == sizeof(uint64_t),
+ "asBits must be big enough for all payload bits");
+
+#ifdef DEBUG
+ void assertInitializedPayload() const;
+#else
+ void assertInitializedPayload() const {}
+#endif
+
+ MConstant(TempAllocator& alloc, const Value& v);
+ explicit MConstant(JSObject* obj);
+ explicit MConstant(Shape* shape);
+ explicit MConstant(float f);
+ explicit MConstant(MIRType type, int64_t i);
+
+ public:
+ INSTRUCTION_HEADER(Constant)
+ static MConstant* New(TempAllocator& alloc, const Value& v);
+ static MConstant* New(TempAllocator::Fallible alloc, const Value& v);
+ static MConstant* New(TempAllocator& alloc, const Value& v, MIRType type);
+ static MConstant* NewFloat32(TempAllocator& alloc, double d);
+ static MConstant* NewInt64(TempAllocator& alloc, int64_t i);
+ static MConstant* NewIntPtr(TempAllocator& alloc, intptr_t i);
+ static MConstant* NewObject(TempAllocator& alloc, JSObject* v);
+ static MConstant* NewShape(TempAllocator& alloc, Shape* s);
+ static MConstant* Copy(TempAllocator& alloc, MConstant* src) {
+ return new (alloc) MConstant(*src);
+ }
+
+ // Try to convert this constant to boolean, similar to js::ToBoolean.
+ // Returns false if the type is MIRType::Magic* or MIRType::Object.
+ [[nodiscard]] bool valueToBoolean(bool* res) const;
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+
+ bool canProduceFloat32() const override;
+
+ ALLOW_CLONE(MConstant)
+
+ bool equals(const MConstant* other) const {
+ assertInitializedPayload();
+ return type() == other->type() && payload_.asBits == other->payload_.asBits;
+ }
+
+ bool toBoolean() const {
+ MOZ_ASSERT(type() == MIRType::Boolean);
+ return payload_.b;
+ }
+ int32_t toInt32() const {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ return payload_.i32;
+ }
+ int64_t toInt64() const {
+ MOZ_ASSERT(type() == MIRType::Int64);
+ return payload_.i64;
+ }
+ intptr_t toIntPtr() const {
+ MOZ_ASSERT(type() == MIRType::IntPtr);
+ return payload_.iptr;
+ }
+ bool isInt32(int32_t i) const {
+ return type() == MIRType::Int32 && payload_.i32 == i;
+ }
+ bool isInt64(int64_t i) const {
+ return type() == MIRType::Int64 && payload_.i64 == i;
+ }
+ const double& toDouble() const {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return payload_.d;
+ }
+ const float& toFloat32() const {
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return payload_.f;
+ }
+ JSString* toString() const {
+ MOZ_ASSERT(type() == MIRType::String);
+ return payload_.str;
+ }
+ JS::Symbol* toSymbol() const {
+ MOZ_ASSERT(type() == MIRType::Symbol);
+ return payload_.sym;
+ }
+ BigInt* toBigInt() const {
+ MOZ_ASSERT(type() == MIRType::BigInt);
+ return payload_.bi;
+ }
+ JSObject& toObject() const {
+ MOZ_ASSERT(type() == MIRType::Object);
+ return *payload_.obj;
+ }
+ JSObject* toObjectOrNull() const {
+ if (type() == MIRType::Object) {
+ return payload_.obj;
+ }
+ MOZ_ASSERT(type() == MIRType::Null);
+ return nullptr;
+ }
+ Shape* toShape() const {
+ MOZ_ASSERT(type() == MIRType::Shape);
+ return payload_.shape;
+ }
+
+ bool isTypeRepresentableAsDouble() const {
+ return IsTypeRepresentableAsDouble(type());
+ }
+ double numberToDouble() const {
+ MOZ_ASSERT(isTypeRepresentableAsDouble());
+ if (type() == MIRType::Int32) {
+ return toInt32();
+ }
+ if (type() == MIRType::Double) {
+ return toDouble();
+ }
+ return toFloat32();
+ }
+
+ // Convert this constant to a js::Value. Float32 constants will be stored
+ // as DoubleValue and NaNs are canonicalized. Callers must be careful: not
+ // all constants can be represented by js::Value (wasm supports int64).
+ Value toJSValue() const;
+};
+
+class MWasmNullConstant : public MNullaryInstruction {
+ explicit MWasmNullConstant() : MNullaryInstruction(classOpcode) {
+ setResultType(MIRType::RefOrNull);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmNullConstant)
+ TRIVIAL_NEW_WRAPPERS
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->isWasmNullConstant();
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MWasmNullConstant)
+};
+
+// Floating-point value as created by wasm. Just a constant value, used to
+// effectively inhibit all the MIR optimizations. This uses the same LIR nodes
+// as a MConstant of the same type would.
+class MWasmFloatConstant : public MNullaryInstruction {
+ union {
+ float f32_;
+ double f64_;
+#ifdef ENABLE_WASM_SIMD
+ int8_t s128_[16];
+ uint64_t bits_[2];
+#else
+ uint64_t bits_[1];
+#endif
+ } u;
+
+ explicit MWasmFloatConstant(MIRType type) : MNullaryInstruction(classOpcode) {
+ u.bits_[0] = 0;
+#ifdef ENABLE_WASM_SIMD
+ u.bits_[1] = 0;
+#endif
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmFloatConstant)
+
+ static MWasmFloatConstant* NewDouble(TempAllocator& alloc, double d) {
+ auto* ret = new (alloc) MWasmFloatConstant(MIRType::Double);
+ ret->u.f64_ = d;
+ return ret;
+ }
+
+ static MWasmFloatConstant* NewFloat32(TempAllocator& alloc, float f) {
+ auto* ret = new (alloc) MWasmFloatConstant(MIRType::Float32);
+ ret->u.f32_ = f;
+ return ret;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ static MWasmFloatConstant* NewSimd128(TempAllocator& alloc,
+ const SimdConstant& s) {
+ auto* ret = new (alloc) MWasmFloatConstant(MIRType::Simd128);
+ memcpy(ret->u.s128_, s.bytes(), 16);
+ return ret;
+ }
+#endif
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ const double& toDouble() const {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return u.f64_;
+ }
+ const float& toFloat32() const {
+ MOZ_ASSERT(type() == MIRType::Float32);
+ return u.f32_;
+ }
+#ifdef ENABLE_WASM_SIMD
+ const SimdConstant toSimd128() const {
+ MOZ_ASSERT(type() == MIRType::Simd128);
+ return SimdConstant::CreateX16(u.s128_);
+ }
+#endif
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ switch (type()) {
+ case MIRType::Float32:
+ SprintfLiteral(buf, "f32{%e}", (double)u.f32_);
+ break;
+ case MIRType::Double:
+ SprintfLiteral(buf, "f64{%e}", u.f64_);
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ SprintfLiteral(buf, "v128{[1]=%016llx:[0]=%016llx}",
+ (unsigned long long int)u.bits_[1],
+ (unsigned long long int)u.bits_[0]);
+ break;
+# endif
+ default:
+ SprintfLiteral(buf, "!!getExtras: missing case!!");
+ break;
+ }
+ extras->add(buf);
+ }
+#endif
+};
+
+class MParameter : public MNullaryInstruction {
+ int32_t index_;
+
+ explicit MParameter(int32_t index)
+ : MNullaryInstruction(classOpcode), index_(index) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Parameter)
+ TRIVIAL_NEW_WRAPPERS
+
+ static const int32_t THIS_SLOT = -1;
+ int32_t index() const { return index_; }
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+};
+
+class MControlInstruction : public MInstruction {
+ protected:
+ explicit MControlInstruction(Opcode op) : MInstruction(op) {}
+
+ public:
+ virtual size_t numSuccessors() const = 0;
+ virtual MBasicBlock* getSuccessor(size_t i) const = 0;
+ virtual void replaceSuccessor(size_t i, MBasicBlock* successor) = 0;
+
+ void initSuccessor(size_t i, MBasicBlock* successor) {
+ MOZ_ASSERT(!getSuccessor(i));
+ replaceSuccessor(i, successor);
+ }
+
+ bool isControlInstruction() const override { return true; }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+};
+
+class MTableSwitch final : public MControlInstruction,
+ public NoFloatPolicy<0>::Data {
+ // The successors of the tableswitch
+ // - First successor = the default case
+ // - Successors 2 and higher = the cases
+ Vector<MBasicBlock*, 0, JitAllocPolicy> successors_;
+ // Index into successors_ sorted on case index
+ Vector<size_t, 0, JitAllocPolicy> cases_;
+
+ MUse operand_;
+ int32_t low_;
+ int32_t high_;
+
+ void initOperand(size_t index, MDefinition* operand) {
+ MOZ_ASSERT(index == 0);
+ operand_.init(operand, this);
+ }
+
+ MTableSwitch(TempAllocator& alloc, MDefinition* ins, int32_t low,
+ int32_t high)
+ : MControlInstruction(classOpcode),
+ successors_(alloc),
+ cases_(alloc),
+ low_(low),
+ high_(high) {
+ initOperand(0, ins);
+ }
+
+ protected:
+ MUse* getUseFor(size_t index) override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+
+ const MUse* getUseFor(size_t index) const override {
+ MOZ_ASSERT(index == 0);
+ return &operand_;
+ }
+
+ public:
+ INSTRUCTION_HEADER(TableSwitch)
+
+ static MTableSwitch* New(TempAllocator& alloc, MDefinition* ins, int32_t low,
+ int32_t high) {
+ return new (alloc) MTableSwitch(alloc, ins, low, high);
+ }
+
+ size_t numSuccessors() const override { return successors_.length(); }
+
+ [[nodiscard]] bool addSuccessor(MBasicBlock* successor, size_t* index) {
+ MOZ_ASSERT(successors_.length() < (size_t)(high_ - low_ + 2));
+ MOZ_ASSERT(!successors_.empty());
+ *index = successors_.length();
+ return successors_.append(successor);
+ }
+
+ MBasicBlock* getSuccessor(size_t i) const override {
+ MOZ_ASSERT(i < numSuccessors());
+ return successors_[i];
+ }
+
+ void replaceSuccessor(size_t i, MBasicBlock* successor) override {
+ MOZ_ASSERT(i < numSuccessors());
+ successors_[i] = successor;
+ }
+
+ int32_t low() const { return low_; }
+
+ int32_t high() const { return high_; }
+
+ MBasicBlock* getDefault() const { return getSuccessor(0); }
+
+ MBasicBlock* getCase(size_t i) const { return getSuccessor(cases_[i]); }
+
+ [[nodiscard]] bool addDefault(MBasicBlock* block, size_t* index = nullptr) {
+ MOZ_ASSERT(successors_.empty());
+ if (index) {
+ *index = 0;
+ }
+ return successors_.append(block);
+ }
+
+ [[nodiscard]] bool addCase(size_t successorIndex) {
+ return cases_.append(successorIndex);
+ }
+
+ size_t numCases() const { return high() - low() + 1; }
+
+ MDefinition* getOperand(size_t index) const override {
+ MOZ_ASSERT(index == 0);
+ return operand_.producer();
+ }
+
+ size_t numOperands() const override { return 1; }
+
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u == getUseFor(0));
+ return 0;
+ }
+
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ MOZ_ASSERT(index == 0);
+ operand_.replaceProducer(operand);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+template <size_t Arity, size_t Successors>
+class MAryControlInstruction : public MControlInstruction {
+ mozilla::Array<MUse, Arity> operands_;
+ mozilla::Array<MBasicBlock*, Successors> successors_;
+
+ protected:
+ explicit MAryControlInstruction(Opcode op) : MControlInstruction(op) {}
+ void setSuccessor(size_t index, MBasicBlock* successor) {
+ successors_[index] = successor;
+ }
+
+ MUse* getUseFor(size_t index) final { return &operands_[index]; }
+ const MUse* getUseFor(size_t index) const final { return &operands_[index]; }
+ void initOperand(size_t index, MDefinition* operand) {
+ operands_[index].init(operand, this);
+ }
+
+ public:
+ MDefinition* getOperand(size_t index) const final {
+ return operands_[index].producer();
+ }
+ size_t numOperands() const final { return Arity; }
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ operands_[index].replaceProducer(operand);
+ }
+ size_t numSuccessors() const final { return Successors; }
+ MBasicBlock* getSuccessor(size_t i) const final { return successors_[i]; }
+ void replaceSuccessor(size_t i, MBasicBlock* succ) final {
+ successors_[i] = succ;
+ }
+};
+
+template <size_t Successors>
+class MVariadicControlInstruction : public MVariadicT<MControlInstruction> {
+ mozilla::Array<MBasicBlock*, Successors> successors_;
+
+ protected:
+ explicit MVariadicControlInstruction(Opcode op)
+ : MVariadicT<MControlInstruction>(op) {}
+ void setSuccessor(size_t index, MBasicBlock* successor) {
+ successors_[index] = successor;
+ }
+
+ public:
+ size_t numSuccessors() const final { return Successors; }
+ MBasicBlock* getSuccessor(size_t i) const final { return successors_[i]; }
+ void replaceSuccessor(size_t i, MBasicBlock* succ) final {
+ successors_[i] = succ;
+ }
+};
+
+// Jump to the start of another basic block.
+class MGoto : public MAryControlInstruction<0, 1>, public NoTypePolicy::Data {
+ explicit MGoto(MBasicBlock* target) : MAryControlInstruction(classOpcode) {
+ setSuccessor(TargetIndex, target);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Goto)
+ static MGoto* New(TempAllocator& alloc, MBasicBlock* target);
+ static MGoto* New(TempAllocator::Fallible alloc, MBasicBlock* target);
+
+ // Variant that may patch the target later.
+ static MGoto* New(TempAllocator& alloc);
+
+ static constexpr size_t TargetIndex = 0;
+
+ MBasicBlock* target() { return getSuccessor(TargetIndex); }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "Block%u", GetMBasicBlockId(target()));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Tests if the input instruction evaluates to true or false, and jumps to the
+// start of a corresponding basic block.
+class MTest : public MAryControlInstruction<1, 2>, public TestPolicy::Data {
+ // It is allowable to specify `trueBranch` or `falseBranch` as nullptr and
+ // patch it in later.
+ MTest(MDefinition* ins, MBasicBlock* trueBranch, MBasicBlock* falseBranch)
+ : MAryControlInstruction(classOpcode) {
+ initOperand(0, ins);
+ setSuccessor(TrueBranchIndex, trueBranch);
+ setSuccessor(FalseBranchIndex, falseBranch);
+ }
+
+ TypeDataList observedTypes_;
+
+ public:
+ INSTRUCTION_HEADER(Test)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input))
+
+ const TypeDataList& observedTypes() const { return observedTypes_; }
+ void setObservedTypes(const TypeDataList& observed) {
+ observedTypes_ = observed;
+ }
+
+ static constexpr size_t TrueBranchIndex = 0;
+ static constexpr size_t FalseBranchIndex = 1;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(TrueBranchIndex); }
+ MBasicBlock* ifFalse() const { return getSuccessor(FalseBranchIndex); }
+ MBasicBlock* branchSuccessor(BranchDirection dir) const {
+ return (dir == TRUE_BRANCH) ? ifTrue() : ifFalse();
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsDoubleNegation(TempAllocator& alloc);
+ MDefinition* foldsConstant(TempAllocator& alloc);
+ MDefinition* foldsTypes(TempAllocator& alloc);
+ MDefinition* foldsNeedlessControlFlow(TempAllocator& alloc);
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "true->Block%u false->Block%u",
+ GetMBasicBlockId(ifTrue()), GetMBasicBlockId(ifFalse()));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Returns from this function to the previous caller.
+class MReturn : public MAryControlInstruction<1, 0>,
+ public BoxInputsPolicy::Data {
+ explicit MReturn(MDefinition* ins) : MAryControlInstruction(classOpcode) {
+ initOperand(0, ins);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Return)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MNewArray : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ // Number of elements to allocate for the array.
+ uint32_t length_;
+
+ // Heap where the array should be allocated.
+ gc::Heap initialHeap_;
+
+ bool vmCall_;
+
+ MNewArray(uint32_t length, MConstant* templateConst, gc::Heap initialHeap,
+ bool vmCall = false);
+
+ public:
+ INSTRUCTION_HEADER(NewArray)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MNewArray* NewVM(TempAllocator& alloc, uint32_t length,
+ MConstant* templateConst, gc::Heap initialHeap) {
+ return new (alloc) MNewArray(length, templateConst, initialHeap, true);
+ }
+
+ uint32_t length() const { return length_; }
+
+ JSObject* templateObject() const {
+ return getOperand(0)->toConstant()->toObjectOrNull();
+ }
+
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ bool isVMCall() const { return vmCall_; }
+
+ // NewArray is marked as non-effectful because all our allocations are
+ // either lazy when we are using "new Array(length)" or bounded by the
+ // script or the stack size when we are using "new Array(...)" or "[...]"
+ // notations. So we might have to allocate the array twice if we bail
+ // during the computation of the first element of the square braket
+ // notation.
+ virtual AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ // The template object can safely be used in the recover instruction
+ // because it can never be mutated by any other function execution.
+ return templateObject() != nullptr;
+ }
+};
+
+class MNewTypedArray : public MUnaryInstruction, public NoTypePolicy::Data {
+ gc::Heap initialHeap_;
+
+ MNewTypedArray(MConstant* templateConst, gc::Heap initialHeap)
+ : MUnaryInstruction(classOpcode, templateConst),
+ initialHeap_(initialHeap) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewTypedArray)
+ TRIVIAL_NEW_WRAPPERS
+
+ TypedArrayObject* templateObject() const {
+ return &getOperand(0)->toConstant()->toObject().as<TypedArrayObject>();
+ }
+
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ virtual AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MNewObject : public MUnaryInstruction, public NoTypePolicy::Data {
+ public:
+ enum Mode { ObjectLiteral, ObjectCreate };
+
+ private:
+ gc::Heap initialHeap_;
+ Mode mode_;
+ bool vmCall_;
+
+ MNewObject(MConstant* templateConst, gc::Heap initialHeap, Mode mode,
+ bool vmCall = false)
+ : MUnaryInstruction(classOpcode, templateConst),
+ initialHeap_(initialHeap),
+ mode_(mode),
+ vmCall_(vmCall) {
+ if (mode == ObjectLiteral) {
+ MOZ_ASSERT(!templateObject());
+ } else {
+ MOZ_ASSERT(templateObject());
+ }
+ setResultType(MIRType::Object);
+
+ // The constant is kept separated in a MConstant, this way we can safely
+ // mark it during GC if we recover the object allocation. Otherwise, by
+ // making it emittedAtUses, we do not produce register allocations for
+ // it and inline its content inside the code produced by the
+ // CodeGenerator.
+ if (templateConst->toConstant()->type() == MIRType::Object) {
+ templateConst->setEmittedAtUses();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MNewObject* NewVM(TempAllocator& alloc, MConstant* templateConst,
+ gc::Heap initialHeap, Mode mode) {
+ return new (alloc) MNewObject(templateConst, initialHeap, mode, true);
+ }
+
+ Mode mode() const { return mode_; }
+
+ JSObject* templateObject() const {
+ return getOperand(0)->toConstant()->toObjectOrNull();
+ }
+
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ bool isVMCall() const { return vmCall_; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ // The template object can safely be used in the recover instruction
+ // because it can never be mutated by any other function execution.
+ return templateObject() != nullptr;
+ }
+};
+
+class MNewPlainObject : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ uint32_t numFixedSlots_;
+ uint32_t numDynamicSlots_;
+ gc::AllocKind allocKind_;
+ gc::Heap initialHeap_;
+
+ MNewPlainObject(MConstant* shapeConst, uint32_t numFixedSlots,
+ uint32_t numDynamicSlots, gc::AllocKind allocKind,
+ gc::Heap initialHeap)
+ : MUnaryInstruction(classOpcode, shapeConst),
+ numFixedSlots_(numFixedSlots),
+ numDynamicSlots_(numDynamicSlots),
+ allocKind_(allocKind),
+ initialHeap_(initialHeap) {
+ setResultType(MIRType::Object);
+
+ // The shape constant is kept separated in a MConstant. This way we can
+ // safely mark it during GC if we recover the object allocation. Otherwise,
+ // by making it emittedAtUses, we do not produce register allocations for it
+ // and inline its content inside the code produced by the CodeGenerator.
+ MOZ_ASSERT(shapeConst->toConstant()->type() == MIRType::Shape);
+ shapeConst->setEmittedAtUses();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewPlainObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ const Shape* shape() const { return getOperand(0)->toConstant()->toShape(); }
+
+ uint32_t numFixedSlots() const { return numFixedSlots_; }
+ uint32_t numDynamicSlots() const { return numDynamicSlots_; }
+ gc::AllocKind allocKind() const { return allocKind_; }
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MNewArrayObject : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ uint32_t length_;
+ gc::Heap initialHeap_;
+
+ MNewArrayObject(TempAllocator& alloc, MConstant* shapeConst, uint32_t length,
+ gc::Heap initialHeap)
+ : MUnaryInstruction(classOpcode, shapeConst),
+ length_(length),
+ initialHeap_(initialHeap) {
+ setResultType(MIRType::Object);
+ MOZ_ASSERT(shapeConst->toConstant()->type() == MIRType::Shape);
+ shapeConst->setEmittedAtUses();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewArrayObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MNewArrayObject* New(TempAllocator& alloc, MConstant* shapeConst,
+ uint32_t length, gc::Heap initialHeap) {
+ return new (alloc) MNewArrayObject(alloc, shapeConst, length, initialHeap);
+ }
+
+ const Shape* shape() const { return getOperand(0)->toConstant()->toShape(); }
+
+ // See MNewArray::getAliasSet comment.
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ uint32_t length() const { return length_; }
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MNewIterator : public MUnaryInstruction, public NoTypePolicy::Data {
+ public:
+ enum Type {
+ ArrayIterator,
+ StringIterator,
+ RegExpStringIterator,
+ };
+
+ private:
+ Type type_;
+
+ MNewIterator(MConstant* templateConst, Type type)
+ : MUnaryInstruction(classOpcode, templateConst), type_(type) {
+ setResultType(MIRType::Object);
+ templateConst->setEmittedAtUses();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewIterator)
+ TRIVIAL_NEW_WRAPPERS
+
+ Type type() const { return type_; }
+
+ JSObject* templateObject() {
+ return getOperand(0)->toConstant()->toObjectOrNull();
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+// Represent the content of all slots of an object. This instruction is not
+// lowered and is not used to generate code.
+class MObjectState : public MVariadicInstruction,
+ public NoFloatPolicyAfter<1>::Data {
+ private:
+ uint32_t numSlots_;
+ uint32_t numFixedSlots_;
+
+ explicit MObjectState(JSObject* templateObject);
+ explicit MObjectState(const Shape* shape);
+ explicit MObjectState(MObjectState* state);
+
+ [[nodiscard]] bool init(TempAllocator& alloc, MDefinition* obj);
+
+ void initSlot(uint32_t slot, MDefinition* def) { initOperand(slot + 1, def); }
+
+ public:
+ INSTRUCTION_HEADER(ObjectState)
+ NAMED_OPERANDS((0, object))
+
+ // Return the template object of any object creation which can be recovered
+ // on bailout.
+ static JSObject* templateObjectOf(MDefinition* obj);
+
+ static MObjectState* New(TempAllocator& alloc, MDefinition* obj);
+ static MObjectState* Copy(TempAllocator& alloc, MObjectState* state);
+
+ // As we might do read of uninitialized properties, we have to copy the
+ // initial values from the template object.
+ void initFromTemplateObject(TempAllocator& alloc, MDefinition* undefinedVal);
+
+ size_t numFixedSlots() const { return numFixedSlots_; }
+ size_t numSlots() const { return numSlots_; }
+
+ MDefinition* getSlot(uint32_t slot) const { return getOperand(slot + 1); }
+ void setSlot(uint32_t slot, MDefinition* def) {
+ replaceOperand(slot + 1, def);
+ }
+
+ bool hasFixedSlot(uint32_t slot) const {
+ return slot < numSlots() && slot < numFixedSlots();
+ }
+ MDefinition* getFixedSlot(uint32_t slot) const {
+ MOZ_ASSERT(slot < numFixedSlots());
+ return getSlot(slot);
+ }
+ void setFixedSlot(uint32_t slot, MDefinition* def) {
+ MOZ_ASSERT(slot < numFixedSlots());
+ setSlot(slot, def);
+ }
+
+ bool hasDynamicSlot(uint32_t slot) const {
+ return numFixedSlots() < numSlots() && slot < numSlots() - numFixedSlots();
+ }
+ MDefinition* getDynamicSlot(uint32_t slot) const {
+ return getSlot(slot + numFixedSlots());
+ }
+ void setDynamicSlot(uint32_t slot, MDefinition* def) {
+ setSlot(slot + numFixedSlots(), def);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+// Represent the contents of all elements of an array. This instruction is not
+// lowered and is not used to generate code.
+class MArrayState : public MVariadicInstruction,
+ public NoFloatPolicyAfter<2>::Data {
+ private:
+ uint32_t numElements_;
+
+ explicit MArrayState(MDefinition* arr);
+
+ [[nodiscard]] bool init(TempAllocator& alloc, MDefinition* obj,
+ MDefinition* len);
+
+ void initElement(uint32_t index, MDefinition* def) {
+ initOperand(index + 2, def);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrayState)
+ NAMED_OPERANDS((0, array), (1, initializedLength))
+
+ static MArrayState* New(TempAllocator& alloc, MDefinition* arr,
+ MDefinition* initLength);
+ static MArrayState* Copy(TempAllocator& alloc, MArrayState* state);
+
+ void initFromTemplateObject(TempAllocator& alloc, MDefinition* undefinedVal);
+
+ void setInitializedLength(MDefinition* def) { replaceOperand(1, def); }
+
+ size_t numElements() const { return numElements_; }
+
+ MDefinition* getElement(uint32_t index) const {
+ return getOperand(index + 2);
+ }
+ void setElement(uint32_t index, MDefinition* def) {
+ replaceOperand(index + 2, def);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+// WrappedFunction stores information about a function that can safely be used
+// off-thread. In particular, a function's flags can be modified on the main
+// thread as functions are relazified and delazified, so we must be careful not
+// to access these flags off-thread.
+class WrappedFunction : public TempObject {
+ // If this is a native function without a JitEntry, the JSFunction*.
+ CompilerFunction nativeFun_;
+ uint16_t nargs_;
+ js::FunctionFlags flags_;
+
+ public:
+ WrappedFunction(JSFunction* nativeFun, uint16_t nargs, FunctionFlags flags);
+
+ // Note: When adding new accessors be sure to add consistency asserts
+ // to the constructor.
+
+ size_t nargs() const { return nargs_; }
+
+ bool isNativeWithoutJitEntry() const {
+ return flags_.isNativeWithoutJitEntry();
+ }
+ bool hasJitEntry() const { return flags_.hasJitEntry(); }
+ bool isConstructor() const { return flags_.isConstructor(); }
+ bool isClassConstructor() const { return flags_.isClassConstructor(); }
+
+ // These fields never change, they can be accessed off-main thread.
+ JSNative native() const {
+ MOZ_ASSERT(isNativeWithoutJitEntry());
+ return nativeFun_->nativeUnchecked();
+ }
+ bool hasJitInfo() const {
+ return flags_.isBuiltinNative() && nativeFun_->jitInfoUnchecked();
+ }
+ const JSJitInfo* jitInfo() const {
+ MOZ_ASSERT(hasJitInfo());
+ return nativeFun_->jitInfoUnchecked();
+ }
+
+ JSFunction* rawNativeJSFunction() const { return nativeFun_; }
+};
+
+enum class DOMObjectKind : uint8_t { Proxy, Native };
+
+class MCallBase : public MVariadicInstruction, public CallPolicy::Data {
+ protected:
+ // The callee, this, and the actual arguments are all operands of MCall.
+ static const size_t CalleeOperandIndex = 0;
+ static const size_t NumNonArgumentOperands = 1;
+
+ explicit MCallBase(Opcode op) : MVariadicInstruction(op) {}
+
+ public:
+ void initCallee(MDefinition* func) { initOperand(CalleeOperandIndex, func); }
+ MDefinition* getCallee() const { return getOperand(CalleeOperandIndex); }
+
+ void replaceCallee(MInstruction* newfunc) {
+ replaceOperand(CalleeOperandIndex, newfunc);
+ }
+
+ void addArg(size_t argnum, MDefinition* arg);
+
+ MDefinition* getArg(uint32_t index) const {
+ return getOperand(NumNonArgumentOperands + index);
+ }
+
+ // The number of stack arguments is the max between the number of formal
+ // arguments and the number of actual arguments. The number of stack
+ // argument includes the |undefined| padding added in case of underflow.
+ // Includes |this|.
+ uint32_t numStackArgs() const {
+ return numOperands() - NumNonArgumentOperands;
+ }
+ uint32_t paddedNumStackArgs() const {
+ if (JitStackValueAlignment > 1) {
+ return AlignBytes(numStackArgs(), JitStackValueAlignment);
+ }
+ return numStackArgs();
+ }
+
+ static size_t IndexOfThis() { return NumNonArgumentOperands; }
+ static size_t IndexOfArgument(size_t index) {
+ return NumNonArgumentOperands + index + 1; // +1 to skip |this|.
+ }
+ static size_t IndexOfStackArg(size_t index) {
+ return NumNonArgumentOperands + index;
+ }
+};
+
+class MCall : public MCallBase {
+ protected:
+ // Monomorphic cache for MCalls with a single JSFunction target.
+ WrappedFunction* target_;
+
+ // Original value of argc from the bytecode.
+ uint32_t numActualArgs_;
+
+ // True if the call is for JSOp::New or JSOp::SuperCall.
+ bool construct_ : 1;
+
+ // True if the caller does not use the return value.
+ bool ignoresReturnValue_ : 1;
+
+ bool needsClassCheck_ : 1;
+ bool maybeCrossRealm_ : 1;
+ bool needsThisCheck_ : 1;
+
+ MCall(WrappedFunction* target, uint32_t numActualArgs, bool construct,
+ bool ignoresReturnValue)
+ : MCallBase(classOpcode),
+ target_(target),
+ numActualArgs_(numActualArgs),
+ construct_(construct),
+ ignoresReturnValue_(ignoresReturnValue),
+ needsClassCheck_(true),
+ maybeCrossRealm_(true),
+ needsThisCheck_(false) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Call)
+ static MCall* New(TempAllocator& alloc, WrappedFunction* target,
+ size_t maxArgc, size_t numActualArgs, bool construct,
+ bool ignoresReturnValue, bool isDOMCall,
+ mozilla::Maybe<DOMObjectKind> objectKind);
+
+ bool needsClassCheck() const { return needsClassCheck_; }
+ void disableClassCheck() { needsClassCheck_ = false; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool needsThisCheck() const { return needsThisCheck_; }
+ void setNeedsThisCheck() {
+ MOZ_ASSERT(construct_);
+ needsThisCheck_ = true;
+ }
+
+ // For monomorphic callsites.
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ bool isConstructing() const { return construct_; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const { return numActualArgs_; }
+
+ bool possiblyCalls() const override { return true; }
+
+ virtual bool isCallDOMNative() const { return false; }
+
+ // A method that can be called to tell the MCall to figure out whether it's
+ // movable or not. This can't be done in the constructor, because it
+ // depends on the arguments to the call, and those aren't passed to the
+ // constructor but are set up later via addArg.
+ virtual void computeMovable() {}
+};
+
+class MCallDOMNative : public MCall {
+ // A helper class for MCalls for DOM natives. Note that this is NOT
+ // actually a separate MIR op from MCall, because all sorts of places use
+ // isCall() to check for calls and all we really want is to overload a few
+ // virtual things from MCall.
+
+ DOMObjectKind objectKind_;
+
+ MCallDOMNative(WrappedFunction* target, uint32_t numActualArgs,
+ DOMObjectKind objectKind)
+ : MCall(target, numActualArgs, false, false), objectKind_(objectKind) {
+ MOZ_ASSERT(getJitInfo()->type() != JSJitInfo::InlinableNative);
+
+ // If our jitinfo is not marked eliminatable, that means that our C++
+ // implementation is fallible or that it never wants to be eliminated or
+ // that we have no hope of ever doing the sort of argument analysis that
+ // would allow us to detemine that we're side-effect-free. In the
+ // latter case we wouldn't get DCEd no matter what, but for the former
+ // two cases we have to explicitly say that we can't be DCEd.
+ if (!getJitInfo()->isEliminatable) {
+ setGuard();
+ }
+ }
+
+ friend MCall* MCall::New(TempAllocator& alloc, WrappedFunction* target,
+ size_t maxArgc, size_t numActualArgs, bool construct,
+ bool ignoresReturnValue, bool isDOMCall,
+ mozilla::Maybe<DOMObjectKind> objectKind);
+
+ const JSJitInfo* getJitInfo() const;
+
+ public:
+ DOMObjectKind objectKind() const { return objectKind_; }
+
+ virtual AliasSet getAliasSet() const override;
+
+ virtual bool congruentTo(const MDefinition* ins) const override;
+
+ virtual bool isCallDOMNative() const override { return true; }
+
+ virtual void computeMovable() override;
+};
+
+// Used to invoke a JSClass call/construct hook.
+class MCallClassHook : public MCallBase {
+ const JSNative target_;
+ bool constructing_ : 1;
+ bool ignoresReturnValue_ : 1;
+
+ MCallClassHook(JSNative target, bool constructing)
+ : MCallBase(classOpcode),
+ target_(target),
+ constructing_(constructing),
+ ignoresReturnValue_(false) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CallClassHook)
+ static MCallClassHook* New(TempAllocator& alloc, JSNative target,
+ uint32_t argc, bool constructing);
+
+ JSNative target() const { return target_; }
+ bool isConstructing() const { return constructing_; }
+
+ uint32_t numActualArgs() const {
+ uint32_t thisAndNewTarget = 1 + constructing_;
+ MOZ_ASSERT(numStackArgs() >= thisAndNewTarget);
+ return numStackArgs() - thisAndNewTarget;
+ }
+
+ bool maybeCrossRealm() const { return true; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+ void setIgnoresReturnValue() { ignoresReturnValue_ = true; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// fun.apply(self, arguments)
+class MApplyArgs : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>,
+ BoxPolicy<2>>::Data {
+ // Single target from CacheIR, or nullptr
+ WrappedFunction* target_;
+ // Number of extra initial formals to skip.
+ uint32_t numExtraFormals_;
+ bool maybeCrossRealm_ = true;
+ bool ignoresReturnValue_ = false;
+
+ MApplyArgs(WrappedFunction* target, MDefinition* fun, MDefinition* argc,
+ MDefinition* self, uint32_t numExtraFormals = 0)
+ : MTernaryInstruction(classOpcode, fun, argc, self),
+ target_(target),
+ numExtraFormals_(numExtraFormals) {
+ MOZ_ASSERT(argc->type() == MIRType::Int32);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ApplyArgs)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getArgc), (2, getThis))
+
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ uint32_t numExtraFormals() const { return numExtraFormals_; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+ void setIgnoresReturnValue() { ignoresReturnValue_ = true; }
+
+ bool isConstructing() const { return false; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MApplyArgsObj
+ : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2>>::Data {
+ WrappedFunction* target_;
+ bool maybeCrossRealm_ = true;
+ bool ignoresReturnValue_ = false;
+
+ MApplyArgsObj(WrappedFunction* target, MDefinition* fun, MDefinition* argsObj,
+ MDefinition* thisArg)
+ : MTernaryInstruction(classOpcode, fun, argsObj, thisArg),
+ target_(target) {
+ MOZ_ASSERT(argsObj->type() == MIRType::Object);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ApplyArgsObj)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getArgsObj), (2, getThis))
+
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+ void setIgnoresReturnValue() { ignoresReturnValue_ = true; }
+
+ bool isConstructing() const { return false; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// fun.apply(fn, array)
+class MApplyArray : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<2>>::Data {
+ // Single target from CacheIR, or nullptr
+ WrappedFunction* target_;
+ bool maybeCrossRealm_ = true;
+ bool ignoresReturnValue_ = false;
+
+ MApplyArray(WrappedFunction* target, MDefinition* fun, MDefinition* elements,
+ MDefinition* self)
+ : MTernaryInstruction(classOpcode, fun, elements, self), target_(target) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ApplyArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getElements), (2, getThis))
+
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+ void setIgnoresReturnValue() { ignoresReturnValue_ = true; }
+
+ bool isConstructing() const { return false; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// |new F(...arguments)| and |super(...arguments)|.
+class MConstructArgs : public MQuaternaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>,
+ BoxPolicy<2>, ObjectPolicy<3>>::Data {
+ // Single target from CacheIR, or nullptr
+ WrappedFunction* target_;
+ // Number of extra initial formals to skip.
+ uint32_t numExtraFormals_;
+ bool maybeCrossRealm_ = true;
+
+ MConstructArgs(WrappedFunction* target, MDefinition* fun, MDefinition* argc,
+ MDefinition* thisValue, MDefinition* newTarget,
+ uint32_t numExtraFormals = 0)
+ : MQuaternaryInstruction(classOpcode, fun, argc, thisValue, newTarget),
+ target_(target),
+ numExtraFormals_(numExtraFormals) {
+ MOZ_ASSERT(argc->type() == MIRType::Int32);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ConstructArgs)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getArgc), (2, getThis),
+ (3, getNewTarget))
+
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ uint32_t numExtraFormals() const { return numExtraFormals_; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool ignoresReturnValue() const { return false; }
+ bool isConstructing() const { return true; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// |new F(...args)| and |super(...args)|.
+class MConstructArray
+ : public MQuaternaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<2>, ObjectPolicy<3>>::Data {
+ // Single target from CacheIR, or nullptr
+ WrappedFunction* target_;
+ bool maybeCrossRealm_ = true;
+ bool needsThisCheck_ = false;
+
+ MConstructArray(WrappedFunction* target, MDefinition* fun,
+ MDefinition* elements, MDefinition* thisValue,
+ MDefinition* newTarget)
+ : MQuaternaryInstruction(classOpcode, fun, elements, thisValue,
+ newTarget),
+ target_(target) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ConstructArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getFunction), (1, getElements), (2, getThis),
+ (3, getNewTarget))
+
+ WrappedFunction* getSingleTarget() const { return target_; }
+
+ bool maybeCrossRealm() const { return maybeCrossRealm_; }
+ void setNotCrossRealm() { maybeCrossRealm_ = false; }
+
+ bool needsThisCheck() const { return needsThisCheck_; }
+ void setNeedsThisCheck() { needsThisCheck_ = true; }
+
+ bool ignoresReturnValue() const { return false; }
+ bool isConstructing() const { return true; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MBail : public MNullaryInstruction {
+ explicit MBail(BailoutKind kind) : MNullaryInstruction(classOpcode) {
+ setBailoutKind(kind);
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Bail)
+
+ static MBail* New(TempAllocator& alloc, BailoutKind kind) {
+ return new (alloc) MBail(kind);
+ }
+ static MBail* New(TempAllocator& alloc) {
+ return new (alloc) MBail(BailoutKind::Inevitable);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MUnreachable : public MAryControlInstruction<0, 0>,
+ public NoTypePolicy::Data {
+ MUnreachable() : MAryControlInstruction(classOpcode) {}
+
+ public:
+ INSTRUCTION_HEADER(Unreachable)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MAssertRecoveredOnBailout : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ bool mustBeRecovered_;
+
+ MAssertRecoveredOnBailout(MDefinition* ins, bool mustBeRecovered)
+ : MUnaryInstruction(classOpcode, ins), mustBeRecovered_(mustBeRecovered) {
+ setResultType(MIRType::Value);
+ setRecoveredOnBailout();
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertRecoveredOnBailout)
+ TRIVIAL_NEW_WRAPPERS
+
+ // Needed to assert that float32 instructions are correctly recovered.
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MAssertFloat32 : public MUnaryInstruction, public NoTypePolicy::Data {
+ bool mustBeFloat32_;
+
+ MAssertFloat32(MDefinition* value, bool mustBeFloat32)
+ : MUnaryInstruction(classOpcode, value), mustBeFloat32_(mustBeFloat32) {}
+
+ public:
+ INSTRUCTION_HEADER(AssertFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+
+ bool mustBeFloat32() const { return mustBeFloat32_; }
+};
+
+class MCompare : public MBinaryInstruction, public ComparePolicy::Data {
+ public:
+ enum CompareType {
+
+ // Anything compared to Undefined
+ Compare_Undefined,
+
+ // Anything compared to Null
+ Compare_Null,
+
+ // Int32 compared to Int32
+ // Boolean compared to Boolean
+ Compare_Int32,
+
+ // Int32 compared as unsigneds
+ Compare_UInt32,
+
+ // Int64 compared to Int64.
+ Compare_Int64,
+
+ // Int64 compared as unsigneds.
+ Compare_UInt64,
+
+ // IntPtr compared as unsigneds.
+ Compare_UIntPtr,
+
+ // Double compared to Double
+ Compare_Double,
+
+ // Float compared to Float
+ Compare_Float32,
+
+ // String compared to String
+ Compare_String,
+
+ // Symbol compared to Symbol
+ Compare_Symbol,
+
+ // Object compared to Object
+ Compare_Object,
+
+ // BigInt compared to BigInt
+ Compare_BigInt,
+
+ // BigInt compared to Int32
+ Compare_BigInt_Int32,
+
+ // BigInt compared to Double
+ Compare_BigInt_Double,
+
+ // BigInt compared to String
+ Compare_BigInt_String,
+
+ // Wasm Ref/AnyRef/NullRef compared to Ref/AnyRef/NullRef
+ Compare_RefOrNull,
+ };
+
+ private:
+ CompareType compareType_;
+ JSOp jsop_;
+ bool operandsAreNeverNaN_;
+
+ // When a floating-point comparison is converted to an integer comparison
+ // (when range analysis proves it safe), we need to convert the operands
+ // to integer as well.
+ bool truncateOperands_;
+
+ MCompare(MDefinition* left, MDefinition* right, JSOp jsop,
+ CompareType compareType)
+ : MBinaryInstruction(classOpcode, left, right),
+ compareType_(compareType),
+ jsop_(jsop),
+ operandsAreNeverNaN_(false),
+ truncateOperands_(false) {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Compare)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MCompare* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, JSOp jsop,
+ CompareType compareType) {
+ MOZ_ASSERT(compareType == Compare_Int32 || compareType == Compare_UInt32 ||
+ compareType == Compare_Int64 || compareType == Compare_UInt64 ||
+ compareType == Compare_Double ||
+ compareType == Compare_Float32 ||
+ compareType == Compare_RefOrNull);
+ auto* ins = MCompare::New(alloc, left, right, jsop, compareType);
+ ins->setResultType(MIRType::Int32);
+ return ins;
+ }
+
+ [[nodiscard]] bool tryFold(bool* result);
+ [[nodiscard]] bool evaluateConstantOperands(TempAllocator& alloc,
+ bool* result);
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ CompareType compareType() const { return compareType_; }
+ bool isInt32Comparison() const { return compareType() == Compare_Int32; }
+ bool isDoubleComparison() const { return compareType() == Compare_Double; }
+ bool isFloat32Comparison() const { return compareType() == Compare_Float32; }
+ bool isNumericComparison() const {
+ return isInt32Comparison() || isDoubleComparison() || isFloat32Comparison();
+ }
+ MIRType inputType();
+
+ JSOp jsop() const { return jsop_; }
+ bool operandsAreNeverNaN() const { return operandsAreNeverNaN_; }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+ void collectRangeInfoPreTrunc() override;
+
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // Both sides of the compare can be Float32
+ return compareType_ == Compare_Float32;
+ }
+#endif
+
+ ALLOW_CLONE(MCompare)
+
+ private:
+ [[nodiscard]] bool tryFoldEqualOperands(bool* result);
+ [[nodiscard]] bool tryFoldTypeOf(bool* result);
+ [[nodiscard]] MDefinition* tryFoldTypeOf(TempAllocator& alloc);
+ [[nodiscard]] MDefinition* tryFoldCharCompare(TempAllocator& alloc);
+ [[nodiscard]] MDefinition* tryFoldStringCompare(TempAllocator& alloc);
+ [[nodiscard]] MDefinition* tryFoldStringSubstring(TempAllocator& alloc);
+ [[nodiscard]] MDefinition* tryFoldStringIndexOf(TempAllocator& alloc);
+
+ public:
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins)) {
+ return false;
+ }
+ return compareType() == ins->toCompare()->compareType() &&
+ jsop() == ins->toCompare()->jsop();
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ switch (compareType_) {
+ case Compare_Undefined:
+ case Compare_Null:
+ case Compare_Int32:
+ case Compare_UInt32:
+ case Compare_Double:
+ case Compare_Float32:
+ case Compare_String:
+ case Compare_Symbol:
+ case Compare_Object:
+ case Compare_BigInt:
+ case Compare_BigInt_Int32:
+ case Compare_BigInt_Double:
+ case Compare_BigInt_String:
+ return true;
+
+ case Compare_Int64:
+ case Compare_UInt64:
+ case Compare_UIntPtr:
+ case Compare_RefOrNull:
+ return false;
+ }
+ MOZ_CRASH("unexpected compare type");
+ }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ const char* ty = nullptr;
+ switch (compareType_) {
+ case Compare_Undefined:
+ ty = "Undefined";
+ break;
+ case Compare_Null:
+ ty = "Null";
+ break;
+ case Compare_Int32:
+ ty = "Int32";
+ break;
+ case Compare_UInt32:
+ ty = "UInt32";
+ break;
+ case Compare_Int64:
+ ty = "Int64";
+ break;
+ case Compare_UInt64:
+ ty = "UInt64";
+ break;
+ case Compare_UIntPtr:
+ ty = "UIntPtr";
+ break;
+ case Compare_Double:
+ ty = "Double";
+ break;
+ case Compare_Float32:
+ ty = "Float32";
+ break;
+ case Compare_String:
+ ty = "String";
+ break;
+ case Compare_Symbol:
+ ty = "Symbol";
+ break;
+ case Compare_Object:
+ ty = "Object";
+ break;
+ case Compare_BigInt:
+ ty = "BigInt";
+ break;
+ case Compare_BigInt_Int32:
+ ty = "BigInt_Int32";
+ break;
+ case Compare_BigInt_Double:
+ ty = "BigInt_Double";
+ break;
+ case Compare_BigInt_String:
+ ty = "BigInt_String";
+ break;
+ case Compare_RefOrNull:
+ ty = "RefOrNull";
+ break;
+ default:
+ ty = "!!unknown!!";
+ break;
+ };
+ char buf[64];
+ SprintfLiteral(buf, "ty=%s jsop=%s", ty, CodeName(jsop()));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Takes a typed value and returns an untyped value.
+class MBox : public MUnaryInstruction, public NoTypePolicy::Data {
+ explicit MBox(MDefinition* ins) : MUnaryInstruction(classOpcode, ins) {
+ // Cannot box a box.
+ MOZ_ASSERT(ins->type() != MIRType::Value);
+
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Box)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MBox)
+};
+
+// Note: the op may have been inverted during lowering (to put constants in a
+// position where they can be immediates), so it is important to use the
+// lir->jsop() instead of the mir->jsop() when it is present.
+static inline Assembler::Condition JSOpToCondition(
+ MCompare::CompareType compareType, JSOp op) {
+ bool isSigned = (compareType != MCompare::Compare_UInt32 &&
+ compareType != MCompare::Compare_UInt64 &&
+ compareType != MCompare::Compare_UIntPtr);
+ return JSOpToCondition(op, isSigned);
+}
+
+// Takes a typed value and checks if it is a certain type. If so, the payload
+// is unpacked and returned as that type. Otherwise, it is considered a
+// deoptimization.
+class MUnbox final : public MUnaryInstruction, public BoxInputsPolicy::Data {
+ public:
+ enum Mode {
+ Fallible, // Check the type, and deoptimize if unexpected.
+ Infallible, // Type guard is not necessary.
+ };
+
+ private:
+ Mode mode_;
+
+ MUnbox(MDefinition* ins, MIRType type, Mode mode)
+ : MUnaryInstruction(classOpcode, ins), mode_(mode) {
+ // Only allow unboxing a non MIRType::Value when input and output types
+ // don't match. This is often used to force a bailout. Boxing happens
+ // during type analysis.
+ MOZ_ASSERT_IF(ins->type() != MIRType::Value, type != ins->type());
+
+ MOZ_ASSERT(type == MIRType::Boolean || type == MIRType::Int32 ||
+ type == MIRType::Double || type == MIRType::String ||
+ type == MIRType::Symbol || type == MIRType::BigInt ||
+ type == MIRType::Object);
+
+ setResultType(type);
+ setMovable();
+
+ if (mode_ == Fallible) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(Unbox)
+ TRIVIAL_NEW_WRAPPERS
+
+ Mode mode() const { return mode_; }
+ bool fallible() const { return mode() != Infallible; }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isUnbox() || ins->toUnbox()->mode() != mode()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MUnbox)
+};
+
+class MAssertRange : public MUnaryInstruction, public NoTypePolicy::Data {
+ // This is the range checked by the assertion. Don't confuse this with the
+ // range_ member or the range() accessor. Since MAssertRange doesn't return
+ // a value, it doesn't use those.
+ const Range* assertedRange_;
+
+ MAssertRange(MDefinition* ins, const Range* assertedRange)
+ : MUnaryInstruction(classOpcode, ins), assertedRange_(assertedRange) {
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertRange)
+ TRIVIAL_NEW_WRAPPERS
+
+ const Range* assertedRange() const { return assertedRange_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+};
+
+class MAssertClass : public MUnaryInstruction, public NoTypePolicy::Data {
+ const JSClass* class_;
+
+ MAssertClass(MDefinition* obj, const JSClass* clasp)
+ : MUnaryInstruction(classOpcode, obj), class_(clasp) {
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertClass)
+ TRIVIAL_NEW_WRAPPERS
+
+ const JSClass* getClass() const { return class_; }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MAssertShape : public MUnaryInstruction, public NoTypePolicy::Data {
+ CompilerShape shape_;
+
+ MAssertShape(MDefinition* obj, Shape* shape)
+ : MUnaryInstruction(classOpcode, obj), shape_(shape) {
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(AssertShape)
+ TRIVIAL_NEW_WRAPPERS
+
+ const Shape* shape() const { return shape_; }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Eager initialization of arguments object.
+class MCreateArgumentsObject : public MUnaryInstruction,
+ public ObjectPolicy<0>::Data {
+ CompilerGCPointer<ArgumentsObject*> templateObj_;
+
+ MCreateArgumentsObject(MDefinition* callObj, ArgumentsObject* templateObj)
+ : MUnaryInstruction(classOpcode, callObj), templateObj_(templateObj) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CreateArgumentsObject)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, getCallObject))
+
+ ArgumentsObject* templateObject() const { return templateObj_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool possiblyCalls() const override { return true; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+// Eager initialization of arguments object for inlined function
+class MCreateInlinedArgumentsObject : public MVariadicInstruction,
+ public NoFloatPolicyAfter<0>::Data {
+ CompilerGCPointer<ArgumentsObject*> templateObj_;
+
+ explicit MCreateInlinedArgumentsObject(ArgumentsObject* templateObj)
+ : MVariadicInstruction(classOpcode), templateObj_(templateObj) {
+ setResultType(MIRType::Object);
+ }
+
+ static const size_t NumNonArgumentOperands = 2;
+
+ public:
+ INSTRUCTION_HEADER(CreateInlinedArgumentsObject)
+ static MCreateInlinedArgumentsObject* New(TempAllocator& alloc,
+ MDefinition* callObj,
+ MDefinition* callee,
+ MDefinitionVector& args,
+ ArgumentsObject* templateObj);
+ NAMED_OPERANDS((0, getCallObject), (1, getCallee))
+
+ ArgumentsObject* templateObject() const { return templateObj_; }
+
+ MDefinition* getArg(uint32_t idx) const {
+ return getOperand(idx + NumNonArgumentOperands);
+ }
+ uint32_t numActuals() const { return numOperands() - NumNonArgumentOperands; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool possiblyCalls() const override { return true; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MGetInlinedArgument
+ : public MVariadicInstruction,
+ public MixPolicy<UnboxedInt32Policy<0>, NoFloatPolicyAfter<1>>::Data {
+ MGetInlinedArgument() : MVariadicInstruction(classOpcode) {
+ setResultType(MIRType::Value);
+ }
+
+ static const size_t NumNonArgumentOperands = 1;
+
+ public:
+ INSTRUCTION_HEADER(GetInlinedArgument)
+ static MGetInlinedArgument* New(TempAllocator& alloc, MDefinition* index,
+ MCreateInlinedArgumentsObject* args);
+ static MGetInlinedArgument* New(TempAllocator& alloc, MDefinition* index,
+ const CallInfo& callInfo);
+ NAMED_OPERANDS((0, index))
+
+ MDefinition* getArg(uint32_t idx) const {
+ return getOperand(idx + NumNonArgumentOperands);
+ }
+ uint32_t numActuals() const { return numOperands() - NumNonArgumentOperands; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+class MGetInlinedArgumentHole
+ : public MVariadicInstruction,
+ public MixPolicy<UnboxedInt32Policy<0>, NoFloatPolicyAfter<1>>::Data {
+ MGetInlinedArgumentHole() : MVariadicInstruction(classOpcode) {
+ setGuard();
+ setResultType(MIRType::Value);
+ }
+
+ static const size_t NumNonArgumentOperands = 1;
+
+ public:
+ INSTRUCTION_HEADER(GetInlinedArgumentHole)
+ static MGetInlinedArgumentHole* New(TempAllocator& alloc, MDefinition* index,
+ MCreateInlinedArgumentsObject* args);
+ NAMED_OPERANDS((0, index))
+
+ MDefinition* getArg(uint32_t idx) const {
+ return getOperand(idx + NumNonArgumentOperands);
+ }
+ uint32_t numActuals() const { return numOperands() - NumNonArgumentOperands; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+class MInlineArgumentsSlice
+ : public MVariadicInstruction,
+ public MixPolicy<UnboxedInt32Policy<0>, UnboxedInt32Policy<1>,
+ NoFloatPolicyAfter<2>>::Data {
+ JSObject* templateObj_;
+ gc::Heap initialHeap_;
+
+ MInlineArgumentsSlice(JSObject* templateObj, gc::Heap initialHeap)
+ : MVariadicInstruction(classOpcode),
+ templateObj_(templateObj),
+ initialHeap_(initialHeap) {
+ setResultType(MIRType::Object);
+ }
+
+ static const size_t NumNonArgumentOperands = 2;
+
+ public:
+ INSTRUCTION_HEADER(InlineArgumentsSlice)
+ static MInlineArgumentsSlice* New(TempAllocator& alloc, MDefinition* begin,
+ MDefinition* count,
+ MCreateInlinedArgumentsObject* args,
+ JSObject* templateObj,
+ gc::Heap initialHeap);
+ NAMED_OPERANDS((0, begin), (1, count))
+
+ JSObject* templateObj() const { return templateObj_; }
+ gc::Heap initialHeap() const { return initialHeap_; }
+
+ MDefinition* getArg(uint32_t idx) const {
+ return getOperand(idx + NumNonArgumentOperands);
+ }
+ uint32_t numActuals() const { return numOperands() - NumNonArgumentOperands; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// Allocates a new BoundFunctionObject and calls
+// BoundFunctionObject::functionBindImpl. This instruction can have arbitrary
+// side-effects because the GetProperty calls for length/name can call into JS.
+class MBindFunction
+ : public MVariadicInstruction,
+ public MixPolicy<ObjectPolicy<0>, NoFloatPolicyAfter<1>>::Data {
+ CompilerGCPointer<JSObject*> templateObj_;
+
+ explicit MBindFunction(JSObject* templateObj)
+ : MVariadicInstruction(classOpcode), templateObj_(templateObj) {
+ setResultType(MIRType::Object);
+ }
+
+ // The target object is operand 0.
+ static const size_t NumNonArgumentOperands = 1;
+
+ public:
+ INSTRUCTION_HEADER(BindFunction)
+ static MBindFunction* New(TempAllocator& alloc, MDefinition* target,
+ uint32_t argc, JSObject* templateObj);
+ NAMED_OPERANDS((0, target))
+
+ JSObject* templateObject() const { return templateObj_; }
+
+ MDefinition* getArg(uint32_t idx) const {
+ return getOperand(idx + NumNonArgumentOperands);
+ }
+ void initArg(size_t i, MDefinition* arg) {
+ initOperand(NumNonArgumentOperands + i, arg);
+ }
+ uint32_t numStackArgs() const {
+ return numOperands() - NumNonArgumentOperands;
+ }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MToFPInstruction : public MUnaryInstruction, public ToDoublePolicy::Data {
+ public:
+ // Types of values which can be converted.
+ enum ConversionKind { NonStringPrimitives, NumbersOnly };
+
+ private:
+ ConversionKind conversion_;
+
+ protected:
+ MToFPInstruction(Opcode op, MDefinition* def,
+ ConversionKind conversion = NonStringPrimitives)
+ : MUnaryInstruction(op, def), conversion_(conversion) {}
+
+ public:
+ ConversionKind conversion() const { return conversion_; }
+};
+
+// Converts a primitive (either typed or untyped) to a double. If the input is
+// not primitive at runtime, a bailout occurs.
+class MToDouble : public MToFPInstruction {
+ private:
+ TruncateKind implicitTruncate_;
+
+ explicit MToDouble(MDefinition* def,
+ ConversionKind conversion = NonStringPrimitives)
+ : MToFPInstruction(classOpcode, def, conversion),
+ implicitTruncate_(TruncateKind::NoTruncate) {
+ setResultType(MIRType::Double);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (!def->definitelyType({MIRType::Undefined, MIRType::Null,
+ MIRType::Boolean, MIRType::Int32, MIRType::Double,
+ MIRType::Float32, MIRType::String})) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToDouble)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isToDouble() || ins->toToDouble()->conversion() != conversion()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ TruncateKind truncateKind() const { return implicitTruncate_; }
+ void setTruncateKind(TruncateKind kind) {
+ implicitTruncate_ = std::max(implicitTruncate_, kind);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ if (input()->type() == MIRType::Value) {
+ return false;
+ }
+ if (input()->type() == MIRType::Symbol) {
+ return false;
+ }
+ if (input()->type() == MIRType::BigInt) {
+ return false;
+ }
+
+ return true;
+ }
+
+ ALLOW_CLONE(MToDouble)
+};
+
+// Converts a primitive (either typed or untyped) to a float32. If the input is
+// not primitive at runtime, a bailout occurs.
+class MToFloat32 : public MToFPInstruction {
+ bool mustPreserveNaN_;
+
+ explicit MToFloat32(MDefinition* def,
+ ConversionKind conversion = NonStringPrimitives)
+ : MToFPInstruction(classOpcode, def, conversion),
+ mustPreserveNaN_(false) {
+ setResultType(MIRType::Float32);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (!def->definitelyType({MIRType::Undefined, MIRType::Null,
+ MIRType::Boolean, MIRType::Int32, MIRType::Double,
+ MIRType::Float32, MIRType::String})) {
+ setGuard();
+ }
+ }
+
+ explicit MToFloat32(MDefinition* def, bool mustPreserveNaN)
+ : MToFloat32(def) {
+ mustPreserveNaN_ = mustPreserveNaN;
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ virtual MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins)) {
+ return false;
+ }
+ auto* other = ins->toToFloat32();
+ return other->conversion() == conversion() &&
+ other->mustPreserveNaN_ == mustPreserveNaN_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool canConsumeFloat32(MUse* use) const override { return true; }
+ bool canProduceFloat32() const override { return true; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MToFloat32)
+};
+
+// Converts a uint32 to a float32 (coming from wasm).
+class MWasmUnsignedToFloat32 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ explicit MWasmUnsignedToFloat32(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ setResultType(MIRType::Float32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmUnsignedToFloat32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool canProduceFloat32() const override { return true; }
+};
+
+class MWrapInt64ToInt32 : public MUnaryInstruction, public NoTypePolicy::Data {
+ bool bottomHalf_;
+
+ explicit MWrapInt64ToInt32(MDefinition* def, bool bottomHalf = true)
+ : MUnaryInstruction(classOpcode, def), bottomHalf_(bottomHalf) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WrapInt64ToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isWrapInt64ToInt32()) {
+ return false;
+ }
+ if (ins->toWrapInt64ToInt32()->bottomHalf() != bottomHalf()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool bottomHalf() const { return bottomHalf_; }
+};
+
+class MExtendInt32ToInt64 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ bool isUnsigned_;
+
+ MExtendInt32ToInt64(MDefinition* def, bool isUnsigned)
+ : MUnaryInstruction(classOpcode, def), isUnsigned_(isUnsigned) {
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ExtendInt32ToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isExtendInt32ToInt64()) {
+ return false;
+ }
+ if (ins->toExtendInt32ToInt64()->isUnsigned_ != isUnsigned_) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// The same as MWasmTruncateToInt64 but with the Instance dependency.
+// It used only for arm now because on arm we need to call builtin to truncate
+// to i64.
+class MWasmBuiltinTruncateToInt64 : public MAryInstruction<2>,
+ public NoTypePolicy::Data {
+ TruncFlags flags_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmBuiltinTruncateToInt64(MDefinition* def, MDefinition* instance,
+ TruncFlags flags,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MAryInstruction(classOpcode),
+ flags_(flags),
+ bytecodeOffset_(bytecodeOffset) {
+ initOperand(0, def);
+ initOperand(1, instance);
+
+ setResultType(MIRType::Int64);
+ setGuard(); // neither removable nor movable because of possible
+ // side-effects.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBuiltinTruncateToInt64)
+ NAMED_OPERANDS((0, input), (1, instance));
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; }
+ bool isSaturating() const { return flags_ & TRUNC_SATURATING; }
+ TruncFlags flags() const { return flags_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmBuiltinTruncateToInt64()->flags() == flags_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MWasmTruncateToInt64 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ TruncFlags flags_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmTruncateToInt64(MDefinition* def, TruncFlags flags,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MUnaryInstruction(classOpcode, def),
+ flags_(flags),
+ bytecodeOffset_(bytecodeOffset) {
+ setResultType(MIRType::Int64);
+ setGuard(); // neither removable nor movable because of possible
+ // side-effects.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmTruncateToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; }
+ bool isSaturating() const { return flags_ & TRUNC_SATURATING; }
+ TruncFlags flags() const { return flags_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmTruncateToInt64()->flags() == flags_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Truncate a value to an int32, with wasm semantics: this will trap when the
+// value is out of range.
+class MWasmTruncateToInt32 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ TruncFlags flags_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmTruncateToInt32(MDefinition* def, TruncFlags flags,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MUnaryInstruction(classOpcode, def),
+ flags_(flags),
+ bytecodeOffset_(bytecodeOffset) {
+ setResultType(MIRType::Int32);
+ setGuard(); // neither removable nor movable because of possible
+ // side-effects.
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmTruncateToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; }
+ bool isSaturating() const { return flags_ & TRUNC_SATURATING; }
+ TruncFlags flags() const { return flags_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmTruncateToInt32()->flags() == flags_;
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Converts an int32 value to intptr by sign-extending it.
+class MInt32ToIntPtr : public MUnaryInstruction,
+ public UnboxedInt32Policy<0>::Data {
+ bool canBeNegative_ = true;
+
+ explicit MInt32ToIntPtr(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ setResultType(MIRType::IntPtr);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Int32ToIntPtr)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canBeNegative() const { return canBeNegative_; }
+ void setCanNotBeNegative() { canBeNegative_ = false; }
+
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Converts an IntPtr value >= 0 to Int32. Bails out if the value > INT32_MAX.
+class MNonNegativeIntPtrToInt32 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ explicit MNonNegativeIntPtrToInt32(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ MOZ_ASSERT(def->type() == MIRType::IntPtr);
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NonNegativeIntPtrToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Converts an IntPtr value to Double.
+class MIntPtrToDouble : public MUnaryInstruction, public NoTypePolicy::Data {
+ explicit MIntPtrToDouble(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ MOZ_ASSERT(def->type() == MIRType::IntPtr);
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IntPtrToDouble)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Subtracts (byteSize - 1) from the input value. Bails out if the result is
+// negative. This is used to implement bounds checks for DataView accesses.
+class MAdjustDataViewLength : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ const uint32_t byteSize_;
+
+ MAdjustDataViewLength(MDefinition* input, uint32_t byteSize)
+ : MUnaryInstruction(classOpcode, input), byteSize_(byteSize) {
+ MOZ_ASSERT(input->type() == MIRType::IntPtr);
+ MOZ_ASSERT(byteSize > 1);
+ setResultType(MIRType::IntPtr);
+ setMovable();
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AdjustDataViewLength)
+ TRIVIAL_NEW_WRAPPERS
+
+ uint32_t byteSize() const { return byteSize_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isAdjustDataViewLength()) {
+ return false;
+ }
+ if (ins->toAdjustDataViewLength()->byteSize() != byteSize()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MInt64ToFloatingPoint : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ bool isUnsigned_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MInt64ToFloatingPoint(MDefinition* def, MIRType type,
+ wasm::BytecodeOffset bytecodeOffset, bool isUnsigned)
+ : MUnaryInstruction(classOpcode, def),
+ isUnsigned_(isUnsigned),
+ bytecodeOffset_(bytecodeOffset) {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Int64ToFloatingPoint)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isInt64ToFloatingPoint()) {
+ return false;
+ }
+ if (ins->toInt64ToFloatingPoint()->isUnsigned_ != isUnsigned_) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// It used only for arm now because on arm we need to call builtin to convert
+// i64 to float.
+class MBuiltinInt64ToFloatingPoint : public MAryInstruction<2>,
+ public NoTypePolicy::Data {
+ bool isUnsigned_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MBuiltinInt64ToFloatingPoint(MDefinition* def, MDefinition* instance,
+ MIRType type,
+ wasm::BytecodeOffset bytecodeOffset,
+ bool isUnsigned)
+ : MAryInstruction(classOpcode),
+ isUnsigned_(isUnsigned),
+ bytecodeOffset_(bytecodeOffset) {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ initOperand(0, def);
+ initOperand(1, instance);
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BuiltinInt64ToFloatingPoint)
+ NAMED_OPERANDS((0, input), (1, instance));
+ TRIVIAL_NEW_WRAPPERS
+
+ bool isUnsigned() const { return isUnsigned_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isBuiltinInt64ToFloatingPoint()) {
+ return false;
+ }
+ if (ins->toBuiltinInt64ToFloatingPoint()->isUnsigned_ != isUnsigned_) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Applies ECMA's ToNumber on a primitive (either typed or untyped) and expects
+// the result to be precisely representable as an Int32, otherwise bails.
+//
+// If the input is not primitive at runtime, a bailout occurs. If the input
+// cannot be converted to an int32 without loss (i.e. 5.5 or undefined) then a
+// bailout occurs.
+class MToNumberInt32 : public MUnaryInstruction, public ToInt32Policy::Data {
+ bool needsNegativeZeroCheck_;
+ IntConversionInputKind conversion_;
+
+ explicit MToNumberInt32(MDefinition* def, IntConversionInputKind conversion =
+ IntConversionInputKind::Any)
+ : MUnaryInstruction(classOpcode, def),
+ needsNegativeZeroCheck_(true),
+ conversion_(conversion) {
+ setResultType(MIRType::Int32);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (!def->definitelyType({MIRType::Undefined, MIRType::Null,
+ MIRType::Boolean, MIRType::Int32, MIRType::Double,
+ MIRType::Float32, MIRType::String})) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToNumberInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ // this only has backwards information flow.
+ void analyzeEdgeCasesBackward() override;
+
+ bool needsNegativeZeroCheck() const { return needsNegativeZeroCheck_; }
+ void setNeedsNegativeZeroCheck(bool needsCheck) {
+ needsNegativeZeroCheck_ = needsCheck;
+ }
+
+ IntConversionInputKind conversion() const { return conversion_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isToNumberInt32() ||
+ ins->toToNumberInt32()->conversion() != conversion()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ ALLOW_CLONE(MToNumberInt32)
+};
+
+// Converts a value or typed input to a truncated int32, for use with bitwise
+// operations. This is an infallible ValueToECMAInt32.
+class MTruncateToInt32 : public MUnaryInstruction, public ToInt32Policy::Data {
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MTruncateToInt32(
+ MDefinition* def,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
+ : MUnaryInstruction(classOpcode, def), bytecodeOffset_(bytecodeOffset) {
+ setResultType(MIRType::Int32);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (mightHaveSideEffects(def)) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(TruncateToInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ static bool mightHaveSideEffects(MDefinition* def) {
+ return !def->definitelyType(
+ {MIRType::Undefined, MIRType::Null, MIRType::Boolean, MIRType::Int32,
+ MIRType::Double, MIRType::Float32, MIRType::String});
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ return input()->type() < MIRType::Symbol;
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ ALLOW_CLONE(MTruncateToInt32)
+};
+
+// It is like MTruncateToInt32 but with instance dependency.
+class MWasmBuiltinTruncateToInt32 : public MAryInstruction<2>,
+ public ToInt32Policy::Data {
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmBuiltinTruncateToInt32(
+ MDefinition* def, MDefinition* instance,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
+ : MAryInstruction(classOpcode), bytecodeOffset_(bytecodeOffset) {
+ initOperand(0, def);
+ initOperand(1, instance);
+ setResultType(MIRType::Int32);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (MTruncateToInt32::mightHaveSideEffects(def)) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBuiltinTruncateToInt32)
+ NAMED_OPERANDS((0, input), (1, instance))
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ ALLOW_CLONE(MWasmBuiltinTruncateToInt32)
+};
+
+// Converts a primitive (either typed or untyped) to a BigInt. If the input is
+// not primitive at runtime, a bailout occurs.
+class MToBigInt : public MUnaryInstruction, public ToBigIntPolicy::Data {
+ private:
+ explicit MToBigInt(MDefinition* def) : MUnaryInstruction(classOpcode, def) {
+ setResultType(MIRType::BigInt);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (!def->definitelyType({MIRType::Boolean, MIRType::BigInt})) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToBigInt)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MToBigInt)
+};
+
+// Takes a Value or typed input and returns a suitable Int64 using the
+// ToBigInt algorithm, possibly calling out to the VM for string, etc inputs.
+class MToInt64 : public MUnaryInstruction, public ToInt64Policy::Data {
+ explicit MToInt64(MDefinition* def) : MUnaryInstruction(classOpcode, def) {
+ setResultType(MIRType::Int64);
+ setMovable();
+
+ // Guard unless the conversion is known to be non-effectful & non-throwing.
+ if (!def->definitelyType(
+ {MIRType::Boolean, MIRType::BigInt, MIRType::Int64})) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MToInt64)
+};
+
+// Takes a BigInt pointer and returns its toInt64 value.
+class MTruncateBigIntToInt64 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ explicit MTruncateBigIntToInt64(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ MOZ_ASSERT(def->type() == MIRType::BigInt);
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TruncateBigIntToInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MTruncateBigIntToInt64)
+};
+
+// Takes an Int64 and returns a fresh BigInt pointer.
+class MInt64ToBigInt : public MUnaryInstruction, public NoTypePolicy::Data {
+ explicit MInt64ToBigInt(MDefinition* def)
+ : MUnaryInstruction(classOpcode, def) {
+ MOZ_ASSERT(def->type() == MIRType::Int64);
+ setResultType(MIRType::BigInt);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Int64ToBigInt)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MInt64ToBigInt)
+};
+
+// Converts any type to a string
+class MToString : public MUnaryInstruction, public ToStringPolicy::Data {
+ public:
+ // MToString has two modes for handling of object/symbol arguments: if the
+ // to-string conversion happens as part of another opcode, we have to bail out
+ // to Baseline. If the conversion is for a stand-alone JSOp we can support
+ // side-effects.
+ enum class SideEffectHandling { Bailout, Supported };
+
+ private:
+ SideEffectHandling sideEffects_;
+ bool mightHaveSideEffects_ = false;
+
+ MToString(MDefinition* def, SideEffectHandling sideEffects)
+ : MUnaryInstruction(classOpcode, def), sideEffects_(sideEffects) {
+ setResultType(MIRType::String);
+
+ if (!def->definitelyType({MIRType::Undefined, MIRType::Null,
+ MIRType::Boolean, MIRType::Int32, MIRType::Double,
+ MIRType::Float32, MIRType::String,
+ MIRType::BigInt})) {
+ mightHaveSideEffects_ = true;
+ }
+
+ // If this instruction is not effectful, mark it as movable and set the
+ // Guard flag if needed. If the operation is effectful it won't be
+ // optimized anyway so there's no need to set any flags.
+ if (!isEffectful()) {
+ setMovable();
+ // Objects might override toString; Symbol throws. We bailout in those
+ // cases and run side-effects in baseline instead.
+ if (mightHaveSideEffects_) {
+ setGuard();
+ }
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(ToString)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isToString()) {
+ return false;
+ }
+ if (sideEffects_ != ins->toToString()->sideEffects_) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ if (supportSideEffects() && mightHaveSideEffects_) {
+ return AliasSet::Store(AliasSet::Any);
+ }
+ return AliasSet::None();
+ }
+
+ bool mightHaveSideEffects() const { return mightHaveSideEffects_; }
+
+ bool supportSideEffects() const {
+ return sideEffects_ == SideEffectHandling::Supported;
+ }
+
+ bool needsSnapshot() const {
+ return sideEffects_ == SideEffectHandling::Bailout && mightHaveSideEffects_;
+ }
+
+ ALLOW_CLONE(MToString)
+};
+
+class MBitNot : public MUnaryInstruction, public BitwisePolicy::Data {
+ explicit MBitNot(MDefinition* input) : MUnaryInstruction(classOpcode, input) {
+ setResultType(MIRType::Int32);
+ if (input->type() == MIRType::Int64) {
+ setResultType(MIRType::Int64);
+ }
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BitNot)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBitNot)
+};
+
+class MTypeOf : public MUnaryInstruction,
+ public BoxExceptPolicy<0, MIRType::Object>::Data {
+ explicit MTypeOf(MDefinition* def) : MUnaryInstruction(classOpcode, def) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+ TypeDataList observed_;
+
+ public:
+ INSTRUCTION_HEADER(TypeOf)
+ TRIVIAL_NEW_WRAPPERS
+
+ void setObservedTypes(const TypeDataList& observed) { observed_ = observed; }
+ bool hasObservedTypes() const { return observed_.count() > 0; }
+ const TypeDataList& observedTypes() const { return observed_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MTypeOfIs : public MUnaryInstruction, public NoTypePolicy::Data {
+ JSOp jsop_;
+ JSType jstype_;
+
+ MTypeOfIs(MDefinition* def, JSOp jsop, JSType jstype)
+ : MUnaryInstruction(classOpcode, def), jsop_(jsop), jstype_(jstype) {
+ MOZ_ASSERT(def->type() == MIRType::Object || def->type() == MIRType::Value);
+
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(TypeOfIs)
+ TRIVIAL_NEW_WRAPPERS
+
+ JSOp jsop() const { return jsop_; }
+ JSType jstype() const { return jstype_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins)) {
+ return false;
+ }
+ return jsop() == ins->toTypeOfIs()->jsop() &&
+ jstype() == ins->toTypeOfIs()->jstype();
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+};
+
+class MBinaryBitwiseInstruction : public MBinaryInstruction,
+ public BitwisePolicy::Data {
+ protected:
+ MBinaryBitwiseInstruction(Opcode op, MDefinition* left, MDefinition* right,
+ MIRType type)
+ : MBinaryInstruction(op, left, right),
+ maskMatchesLeftRange(false),
+ maskMatchesRightRange(false) {
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64 ||
+ (isUrsh() && type == MIRType::Double));
+ setResultType(type);
+ setMovable();
+ }
+
+ bool maskMatchesLeftRange;
+ bool maskMatchesRightRange;
+
+ public:
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ MDefinition* foldUnnecessaryBitop();
+ virtual MDefinition* foldIfZero(size_t operand) = 0;
+ virtual MDefinition* foldIfNegOne(size_t operand) = 0;
+ virtual MDefinition* foldIfEqual() = 0;
+ virtual MDefinition* foldIfAllBitsSet(size_t operand) = 0;
+ void collectRangeInfoPreTrunc() override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return binaryCongruentTo(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ TruncateKind operandTruncateKind(size_t index) const override;
+};
+
+class MBitAnd : public MBinaryBitwiseInstruction {
+ MBitAnd(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(classOpcode, left, right, type) {
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BitAnd)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(operand); // 0 & x => 0;
+ }
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return getOperand(1 - operand); // x & -1 => x
+ }
+ MDefinition* foldIfEqual() override {
+ return getOperand(0); // x & x => x;
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override {
+ // e.g. for uint16: x & 0xffff => x;
+ return getOperand(1 - operand);
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBitAnd)
+};
+
+class MBitOr : public MBinaryBitwiseInstruction {
+ MBitOr(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(classOpcode, left, right, type) {
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BitOr)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(1 -
+ operand); // 0 | x => x, so if ith is 0, return (1-i)th
+ }
+ MDefinition* foldIfNegOne(size_t operand) override {
+ return getOperand(operand); // x | -1 => -1
+ }
+ MDefinition* foldIfEqual() override {
+ return getOperand(0); // x | x => x
+ }
+ MDefinition* foldIfAllBitsSet(size_t operand) override { return this; }
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBitOr)
+};
+
+class MBitXor : public MBinaryBitwiseInstruction {
+ MBitXor(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryBitwiseInstruction(classOpcode, left, right, type) {
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(BitXor)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldIfZero(size_t operand) override {
+ return getOperand(1 - operand); // 0 ^ x => x
+ }
+ MDefinition* foldIfNegOne(size_t operand) override { return this; }
+ MDefinition* foldIfEqual() override { return this; }
+ MDefinition* foldIfAllBitsSet(size_t operand) override { return this; }
+ void computeRange(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBitXor)
+};
+
+class MShiftInstruction : public MBinaryBitwiseInstruction {
+ protected:
+ MShiftInstruction(Opcode op, MDefinition* left, MDefinition* right,
+ MIRType type)
+ : MBinaryBitwiseInstruction(op, left, right, type) {}
+
+ public:
+ MDefinition* foldIfNegOne(size_t operand) override { return this; }
+ MDefinition* foldIfEqual() override { return this; }
+ MDefinition* foldIfAllBitsSet(size_t operand) override { return this; }
+};
+
+class MLsh : public MShiftInstruction {
+ MLsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(classOpcode, left, right, type) {}
+
+ public:
+ INSTRUCTION_HEADER(Lsh)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 << x => 0
+ // x << 0 => x
+ return getOperand(0);
+ }
+
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MLsh)
+};
+
+class MRsh : public MShiftInstruction {
+ MRsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(classOpcode, left, right, type) {}
+
+ public:
+ INSTRUCTION_HEADER(Rsh)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 >> x => 0
+ // x >> 0 => x
+ return getOperand(0);
+ }
+ void computeRange(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MRsh)
+};
+
+class MUrsh : public MShiftInstruction {
+ bool bailoutsDisabled_;
+
+ MUrsh(MDefinition* left, MDefinition* right, MIRType type)
+ : MShiftInstruction(classOpcode, left, right, type),
+ bailoutsDisabled_(false) {}
+
+ public:
+ INSTRUCTION_HEADER(Ursh)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MUrsh* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type);
+
+ MDefinition* foldIfZero(size_t operand) override {
+ // 0 >>> x => 0
+ if (operand == 0) {
+ return getOperand(0);
+ }
+
+ return this;
+ }
+
+ bool bailoutsDisabled() const { return bailoutsDisabled_; }
+
+ bool fallible() const;
+
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MUrsh)
+};
+
+class MSignExtendInt32 : public MUnaryInstruction, public NoTypePolicy::Data {
+ public:
+ enum Mode { Byte, Half };
+
+ private:
+ Mode mode_;
+
+ MSignExtendInt32(MDefinition* op, Mode mode)
+ : MUnaryInstruction(classOpcode, op), mode_(mode) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SignExtendInt32)
+ TRIVIAL_NEW_WRAPPERS
+
+ Mode mode() const { return mode_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins)) {
+ return false;
+ }
+ return ins->isSignExtendInt32() && ins->toSignExtendInt32()->mode_ == mode_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MSignExtendInt32)
+};
+
+class MSignExtendInt64 : public MUnaryInstruction, public NoTypePolicy::Data {
+ public:
+ enum Mode { Byte, Half, Word };
+
+ private:
+ Mode mode_;
+
+ MSignExtendInt64(MDefinition* op, Mode mode)
+ : MUnaryInstruction(classOpcode, op), mode_(mode) {
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(SignExtendInt64)
+ TRIVIAL_NEW_WRAPPERS
+
+ Mode mode() const { return mode_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins)) {
+ return false;
+ }
+ return ins->isSignExtendInt64() && ins->toSignExtendInt64()->mode_ == mode_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MSignExtendInt64)
+};
+
+class MBinaryArithInstruction : public MBinaryInstruction,
+ public ArithPolicy::Data {
+ // Implicit truncate flag is set by the truncate backward range analysis
+ // optimization phase, and by wasm pre-processing. It is used in
+ // NeedNegativeZeroCheck to check if the result of a multiplication needs to
+ // produce -0 double value, and for avoiding overflow checks.
+
+ // This optimization happens when the multiplication cannot be truncated
+ // even if all uses are truncating its result, such as when the range
+ // analysis detect a precision loss in the multiplication.
+ TruncateKind implicitTruncate_;
+
+ // Whether we must preserve NaN semantics, and in particular not fold
+ // (x op id) or (id op x) to x, or replace a division by a multiply of the
+ // exact reciprocal.
+ bool mustPreserveNaN_;
+
+ protected:
+ MBinaryArithInstruction(Opcode op, MDefinition* left, MDefinition* right,
+ MIRType type)
+ : MBinaryInstruction(op, left, right),
+ implicitTruncate_(TruncateKind::NoTruncate),
+ mustPreserveNaN_(false) {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ void setMustPreserveNaN(bool b) { mustPreserveNaN_ = b; }
+ bool mustPreserveNaN() const { return mustPreserveNaN_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ virtual double getIdentity() = 0;
+
+ void setSpecialization(MIRType type) {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ }
+
+ virtual void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!binaryCongruentTo(ins)) {
+ return false;
+ }
+ const auto* other = static_cast<const MBinaryArithInstruction*>(ins);
+ return other->mustPreserveNaN_ == mustPreserveNaN_;
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool isTruncated() const {
+ return implicitTruncate_ == TruncateKind::Truncate;
+ }
+ TruncateKind truncateKind() const { return implicitTruncate_; }
+ void setTruncateKind(TruncateKind kind) {
+ implicitTruncate_ = std::max(implicitTruncate_, kind);
+ }
+};
+
+class MMinMax : public MBinaryInstruction, public ArithPolicy::Data {
+ bool isMax_;
+
+ MMinMax(MDefinition* left, MDefinition* right, MIRType type, bool isMax)
+ : MBinaryInstruction(classOpcode, left, right), isMax_(isMax) {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(MinMax)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MMinMax* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type, bool isMax) {
+ return New(alloc, left, right, type, isMax);
+ }
+
+ bool isMax() const { return isMax_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!congruentIfOperandsEqual(ins)) {
+ return false;
+ }
+ const MMinMax* other = ins->toMinMax();
+ return other->isMax() == isMax();
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MMinMax)
+};
+
+class MMinMaxArray : public MUnaryInstruction, public SingleObjectPolicy::Data {
+ bool isMax_;
+
+ MMinMaxArray(MDefinition* array, MIRType type, bool isMax)
+ : MUnaryInstruction(classOpcode, array), isMax_(isMax) {
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Double);
+ setResultType(type);
+
+ // We can't DCE this, even if the result is unused, in case one of the
+ // elements of the array is an object with a `valueOf` function that
+ // must be called.
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(MinMaxArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, array))
+
+ bool isMax() const { return isMax_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isMinMaxArray() || ins->toMinMaxArray()->isMax() != isMax()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::Element);
+ }
+};
+
+class MAbs : public MUnaryInstruction, public ArithPolicy::Data {
+ bool implicitTruncate_;
+
+ MAbs(MDefinition* num, MIRType type)
+ : MUnaryInstruction(classOpcode, num), implicitTruncate_(false) {
+ MOZ_ASSERT(IsNumberType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Abs)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MAbs* NewWasm(TempAllocator& alloc, MDefinition* num, MIRType type) {
+ auto* ins = new (alloc) MAbs(num, type);
+ if (type == MIRType::Int32) {
+ ins->implicitTruncate_ = true;
+ }
+ return ins;
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool fallible() const;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MAbs)
+};
+
+class MClz : public MUnaryInstruction, public BitwisePolicy::Data {
+ bool operandIsNeverZero_;
+
+ explicit MClz(MDefinition* num, MIRType type)
+ : MUnaryInstruction(classOpcode, num), operandIsNeverZero_(false) {
+ MOZ_ASSERT(IsIntType(type));
+ MOZ_ASSERT(IsNumberType(num->type()));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Clz)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool operandIsNeverZero() const { return operandIsNeverZero_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+};
+
+class MCtz : public MUnaryInstruction, public BitwisePolicy::Data {
+ bool operandIsNeverZero_;
+
+ explicit MCtz(MDefinition* num, MIRType type)
+ : MUnaryInstruction(classOpcode, num), operandIsNeverZero_(false) {
+ MOZ_ASSERT(IsIntType(type));
+ MOZ_ASSERT(IsNumberType(num->type()));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Ctz)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool operandIsNeverZero() const { return operandIsNeverZero_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+ void collectRangeInfoPreTrunc() override;
+};
+
+class MPopcnt : public MUnaryInstruction, public BitwisePolicy::Data {
+ explicit MPopcnt(MDefinition* num, MIRType type)
+ : MUnaryInstruction(classOpcode, num) {
+ MOZ_ASSERT(IsNumberType(num->type()));
+ MOZ_ASSERT(IsIntType(type));
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Popcnt)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, num))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void computeRange(TempAllocator& alloc) override;
+};
+
+// Inline implementation of Math.sqrt().
+class MSqrt : public MUnaryInstruction, public FloatingPointPolicy<0>::Data {
+ MSqrt(MDefinition* num, MIRType type) : MUnaryInstruction(classOpcode, num) {
+ setResultType(type);
+ specialization_ = type;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Sqrt)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MSqrt)
+};
+
+class MCopySign : public MBinaryInstruction, public NoTypePolicy::Data {
+ MCopySign(MDefinition* lhs, MDefinition* rhs, MIRType type)
+ : MBinaryInstruction(classOpcode, lhs, rhs) {
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(CopySign)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MCopySign)
+};
+
+// Inline implementation of Math.hypot().
+class MHypot : public MVariadicInstruction, public AllDoublePolicy::Data {
+ MHypot() : MVariadicInstruction(classOpcode) {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Hypot)
+ static MHypot* New(TempAllocator& alloc, const MDefinitionVector& vector);
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool possiblyCalls() const override { return true; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ bool canClone() const override { return true; }
+
+ MInstruction* clone(TempAllocator& alloc,
+ const MDefinitionVector& inputs) const override {
+ return MHypot::New(alloc, inputs);
+ }
+};
+
+// Inline implementation of Math.pow().
+//
+// Supports the following three specializations:
+//
+// 1. MPow(FloatingPoint, FloatingPoint) -> Double
+// - The most general mode, calls js::ecmaPow.
+// - Never performs a bailout.
+// 2. MPow(FloatingPoint, Int32) -> Double
+// - Optimization to call js::powi instead of js::ecmaPow.
+// - Never performs a bailout.
+// 3. MPow(Int32, Int32) -> Int32
+// - Performs the complete exponentiation operation in assembly code.
+// - Bails out if the result doesn't fit in Int32.
+class MPow : public MBinaryInstruction, public PowPolicy::Data {
+ // If true, the result is guaranteed to never be negative zero, as long as the
+ // power is a positive number.
+ bool canBeNegativeZero_;
+
+ MPow(MDefinition* input, MDefinition* power, MIRType specialization)
+ : MBinaryInstruction(classOpcode, input, power) {
+ MOZ_ASSERT(specialization == MIRType::Int32 ||
+ specialization == MIRType::Double);
+ setResultType(specialization);
+ setMovable();
+
+ // The result can't be negative zero if the base is an Int32 value.
+ canBeNegativeZero_ = input->type() != MIRType::Int32;
+ }
+
+ // Helpers for `foldsTo`
+ MDefinition* foldsConstant(TempAllocator& alloc);
+ MDefinition* foldsConstantPower(TempAllocator& alloc);
+
+ bool canBeNegativeZero() const { return canBeNegativeZero_; }
+
+ public:
+ INSTRUCTION_HEADER(Pow)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* input() const { return lhs(); }
+ MDefinition* power() const { return rhs(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool possiblyCalls() const override { return type() != MIRType::Int32; }
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MPow)
+};
+
+// Inline implementation of Math.pow(x, 0.5), which subtly differs from
+// Math.sqrt(x).
+class MPowHalf : public MUnaryInstruction, public DoublePolicy<0>::Data {
+ bool operandIsNeverNegativeInfinity_;
+ bool operandIsNeverNegativeZero_;
+ bool operandIsNeverNaN_;
+
+ explicit MPowHalf(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input),
+ operandIsNeverNegativeInfinity_(false),
+ operandIsNeverNegativeZero_(false),
+ operandIsNeverNaN_(false) {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PowHalf)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ bool operandIsNeverNegativeInfinity() const {
+ return operandIsNeverNegativeInfinity_;
+ }
+ bool operandIsNeverNegativeZero() const {
+ return operandIsNeverNegativeZero_;
+ }
+ bool operandIsNeverNaN() const { return operandIsNeverNaN_; }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void collectRangeInfoPreTrunc() override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MPowHalf)
+};
+
+class MSign : public MUnaryInstruction, public SignPolicy::Data {
+ private:
+ MSign(MDefinition* input, MIRType resultType)
+ : MUnaryInstruction(classOpcode, input) {
+ MOZ_ASSERT(IsNumberType(input->type()));
+ MOZ_ASSERT(resultType == MIRType::Int32 || resultType == MIRType::Double);
+ specialization_ = input->type();
+ setResultType(resultType);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Sign)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MSign)
+};
+
+class MMathFunction : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data {
+ UnaryMathFunction function_;
+
+ // A nullptr cache means this function will neither access nor update the
+ // cache.
+ MMathFunction(MDefinition* input, UnaryMathFunction function)
+ : MUnaryInstruction(classOpcode, input), function_(function) {
+ setResultType(MIRType::Double);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(MathFunction)
+ TRIVIAL_NEW_WRAPPERS
+
+ UnaryMathFunction function() const { return function_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isMathFunction()) {
+ return false;
+ }
+ if (ins->toMathFunction()->function() != function()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool possiblyCalls() const override { return true; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ static const char* FunctionName(UnaryMathFunction function);
+
+ bool isFloat32Commutative() const override;
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+
+ void computeRange(TempAllocator& alloc) override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MMathFunction)
+};
+
+class MAdd : public MBinaryArithInstruction {
+ MAdd(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(classOpcode, left, right, type) {
+ setCommutative();
+ }
+
+ MAdd(MDefinition* left, MDefinition* right, TruncateKind truncateKind)
+ : MAdd(left, right, MIRType::Int32) {
+ setTruncateKind(truncateKind);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Add)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MAdd* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type) {
+ auto* ret = new (alloc) MAdd(left, right, type);
+ if (type == MIRType::Int32) {
+ ret->setTruncateKind(TruncateKind::Truncate);
+ }
+ return ret;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ double getIdentity() override { return 0; }
+
+ bool fallible() const;
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MAdd)
+};
+
+class MSub : public MBinaryArithInstruction {
+ MSub(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(classOpcode, left, right, type) {}
+
+ public:
+ INSTRUCTION_HEADER(Sub)
+ TRIVIAL_NEW_WRAPPERS
+
+ static MSub* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type, bool mustPreserveNaN) {
+ auto* ret = new (alloc) MSub(left, right, type);
+ ret->setMustPreserveNaN(mustPreserveNaN);
+ if (type == MIRType::Int32) {
+ ret->setTruncateKind(TruncateKind::Truncate);
+ }
+ return ret;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ double getIdentity() override { return 0; }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ bool fallible() const;
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MSub)
+};
+
+class MMul : public MBinaryArithInstruction {
+ public:
+ enum Mode { Normal, Integer };
+
+ private:
+ // Annotation the result could be a negative zero
+ // and we need to guard this during execution.
+ bool canBeNegativeZero_;
+
+ Mode mode_;
+
+ MMul(MDefinition* left, MDefinition* right, MIRType type, Mode mode)
+ : MBinaryArithInstruction(classOpcode, left, right, type),
+ canBeNegativeZero_(true),
+ mode_(mode) {
+ setCommutative();
+ if (mode == Integer) {
+ // This implements the required behavior for Math.imul, which
+ // can never fail and always truncates its output to int32.
+ canBeNegativeZero_ = false;
+ setTruncateKind(TruncateKind::Truncate);
+ }
+ MOZ_ASSERT_IF(mode != Integer, mode == Normal);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Mul)
+
+ static MMul* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type, Mode mode = Normal) {
+ return new (alloc) MMul(left, right, type, mode);
+ }
+ static MMul* NewWasm(TempAllocator& alloc, MDefinition* left,
+ MDefinition* right, MIRType type, Mode mode,
+ bool mustPreserveNaN) {
+ auto* ret = new (alloc) MMul(left, right, type, mode);
+ ret->setMustPreserveNaN(mustPreserveNaN);
+ return ret;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void analyzeEdgeCasesForward() override;
+ void analyzeEdgeCasesBackward() override;
+ void collectRangeInfoPreTrunc() override;
+
+ double getIdentity() override { return 1; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isMul()) {
+ return false;
+ }
+
+ const MMul* mul = ins->toMul();
+ if (canBeNegativeZero_ != mul->canBeNegativeZero()) {
+ return false;
+ }
+
+ if (mode_ != mul->mode()) {
+ return false;
+ }
+
+ if (mustPreserveNaN() != mul->mustPreserveNaN()) {
+ return false;
+ }
+
+ return binaryCongruentTo(ins);
+ }
+
+ bool canOverflow() const;
+
+ bool canBeNegativeZero() const { return canBeNegativeZero_; }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ bool fallible() const { return canBeNegativeZero_ || canOverflow(); }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ Mode mode() const { return mode_; }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MMul)
+};
+
+class MDiv : public MBinaryArithInstruction {
+ bool canBeNegativeZero_;
+ bool canBeNegativeOverflow_;
+ bool canBeDivideByZero_;
+ bool canBeNegativeDividend_;
+ bool unsigned_; // If false, signedness will be derived from operands
+ bool trapOnError_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MDiv(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(classOpcode, left, right, type),
+ canBeNegativeZero_(true),
+ canBeNegativeOverflow_(true),
+ canBeDivideByZero_(true),
+ canBeNegativeDividend_(true),
+ unsigned_(false),
+ trapOnError_(false) {}
+
+ public:
+ INSTRUCTION_HEADER(Div)
+
+ static MDiv* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type) {
+ return new (alloc) MDiv(left, right, type);
+ }
+ static MDiv* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type, bool unsignd, bool trapOnError = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset(),
+ bool mustPreserveNaN = false) {
+ auto* div = new (alloc) MDiv(left, right, type);
+ div->unsigned_ = unsignd;
+ div->trapOnError_ = trapOnError;
+ div->bytecodeOffset_ = bytecodeOffset;
+ if (trapOnError) {
+ div->setGuard(); // not removable because of possible side-effects.
+ div->setNotMovable();
+ }
+ div->setMustPreserveNaN(mustPreserveNaN);
+ if (type == MIRType::Int32) {
+ div->setTruncateKind(TruncateKind::Truncate);
+ }
+ return div;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ void analyzeEdgeCasesForward() override;
+ void analyzeEdgeCasesBackward() override;
+
+ double getIdentity() override { MOZ_CRASH("not used"); }
+
+ bool canBeNegativeZero() const { return canBeNegativeZero_; }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ bool canBeNegativeOverflow() const { return canBeNegativeOverflow_; }
+
+ bool canBeDivideByZero() const { return canBeDivideByZero_; }
+
+ bool canBeNegativeDividend() const {
+ // "Dividend" is an ambiguous concept for unsigned truncated
+ // division, because of the truncation procedure:
+ // ((x>>>0)/2)|0, for example, gets transformed in
+ // MDiv::truncate into a node with lhs representing x (not
+ // x>>>0) and rhs representing the constant 2; in other words,
+ // the MIR node corresponds to "cast operands to unsigned and
+ // divide" operation. In this case, is the dividend x or is it
+ // x>>>0? In order to resolve such ambiguities, we disallow
+ // the usage of this method for unsigned division.
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool isUnsigned() const { return unsigned_; }
+
+ bool isTruncatedIndirectly() const {
+ return truncateKind() >= TruncateKind::IndirectTruncate;
+ }
+
+ bool canTruncateInfinities() const { return isTruncated(); }
+ bool canTruncateRemainder() const { return isTruncated(); }
+ bool canTruncateOverflow() const {
+ return isTruncated() || isTruncatedIndirectly();
+ }
+ bool canTruncateNegativeZero() const {
+ return isTruncated() || isTruncatedIndirectly();
+ }
+
+ bool trapOnError() const { return trapOnError_; }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(bytecodeOffset_.isValid());
+ return bytecodeOffset_;
+ }
+
+ bool isFloat32Commutative() const override { return true; }
+
+ void computeRange(TempAllocator& alloc) override;
+ bool fallible() const;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ void collectRangeInfoPreTrunc() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!MBinaryArithInstruction::congruentTo(ins)) {
+ return false;
+ }
+ const MDiv* other = ins->toDiv();
+ MOZ_ASSERT(other->trapOnError() == trapOnError_);
+ return unsigned_ == other->isUnsigned();
+ }
+
+ ALLOW_CLONE(MDiv)
+};
+
+class MWasmBuiltinDivI64 : public MAryInstruction<3>, public ArithPolicy::Data {
+ bool canBeNegativeZero_;
+ bool canBeNegativeOverflow_;
+ bool canBeDivideByZero_;
+ bool canBeNegativeDividend_;
+ bool unsigned_; // If false, signedness will be derived from operands
+ bool trapOnError_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmBuiltinDivI64(MDefinition* left, MDefinition* right,
+ MDefinition* instance)
+ : MAryInstruction(classOpcode),
+ canBeNegativeZero_(true),
+ canBeNegativeOverflow_(true),
+ canBeDivideByZero_(true),
+ canBeNegativeDividend_(true),
+ unsigned_(false),
+ trapOnError_(false) {
+ initOperand(0, left);
+ initOperand(1, right);
+ initOperand(2, instance);
+
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBuiltinDivI64)
+
+ NAMED_OPERANDS((0, lhs), (1, rhs), (2, instance))
+
+ static MWasmBuiltinDivI64* New(
+ TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MDefinition* instance, bool unsignd, bool trapOnError = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset()) {
+ auto* wasm64Div = new (alloc) MWasmBuiltinDivI64(left, right, instance);
+ wasm64Div->unsigned_ = unsignd;
+ wasm64Div->trapOnError_ = trapOnError;
+ wasm64Div->bytecodeOffset_ = bytecodeOffset;
+ if (trapOnError) {
+ wasm64Div->setGuard(); // not removable because of possible side-effects.
+ wasm64Div->setNotMovable();
+ }
+ return wasm64Div;
+ }
+
+ bool canBeNegativeZero() const { return canBeNegativeZero_; }
+ void setCanBeNegativeZero(bool negativeZero) {
+ canBeNegativeZero_ = negativeZero;
+ }
+
+ bool canBeNegativeOverflow() const { return canBeNegativeOverflow_; }
+
+ bool canBeDivideByZero() const { return canBeDivideByZero_; }
+
+ bool canBeNegativeDividend() const {
+ // "Dividend" is an ambiguous concept for unsigned truncated
+ // division, because of the truncation procedure:
+ // ((x>>>0)/2)|0, for example, gets transformed in
+ // MWasmDiv::truncate into a node with lhs representing x (not
+ // x>>>0) and rhs representing the constant 2; in other words,
+ // the MIR node corresponds to "cast operands to unsigned and
+ // divide" operation. In this case, is the dividend x or is it
+ // x>>>0? In order to resolve such ambiguities, we disallow
+ // the usage of this method for unsigned division.
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool isUnsigned() const { return unsigned_; }
+
+ bool trapOnError() const { return trapOnError_; }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(bytecodeOffset_.isValid());
+ return bytecodeOffset_;
+ }
+
+ ALLOW_CLONE(MWasmBuiltinDivI64)
+};
+
+class MMod : public MBinaryArithInstruction {
+ bool unsigned_; // If false, signedness will be derived from operands
+ bool canBeNegativeDividend_;
+ bool canBePowerOfTwoDivisor_;
+ bool canBeDivideByZero_;
+ bool trapOnError_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MMod(MDefinition* left, MDefinition* right, MIRType type)
+ : MBinaryArithInstruction(classOpcode, left, right, type),
+ unsigned_(false),
+ canBeNegativeDividend_(true),
+ canBePowerOfTwoDivisor_(true),
+ canBeDivideByZero_(true),
+ trapOnError_(false) {}
+
+ public:
+ INSTRUCTION_HEADER(Mod)
+
+ static MMod* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MIRType type) {
+ return new (alloc) MMod(left, right, type);
+ }
+ static MMod* New(
+ TempAllocator& alloc, MDefinition* left, MDefinition* right, MIRType type,
+ bool unsignd, bool trapOnError = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset()) {
+ auto* mod = new (alloc) MMod(left, right, type);
+ mod->unsigned_ = unsignd;
+ mod->trapOnError_ = trapOnError;
+ mod->bytecodeOffset_ = bytecodeOffset;
+ if (trapOnError) {
+ mod->setGuard(); // not removable because of possible side-effects.
+ mod->setNotMovable();
+ }
+ if (type == MIRType::Int32) {
+ mod->setTruncateKind(TruncateKind::Truncate);
+ }
+ return mod;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ double getIdentity() override { MOZ_CRASH("not used"); }
+
+ bool canBeNegativeDividend() const {
+ MOZ_ASSERT(type() == MIRType::Int32 || type() == MIRType::Int64);
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool canBeDivideByZero() const {
+ MOZ_ASSERT(type() == MIRType::Int32 || type() == MIRType::Int64);
+ return canBeDivideByZero_;
+ }
+
+ bool canBePowerOfTwoDivisor() const {
+ MOZ_ASSERT(type() == MIRType::Int32);
+ return canBePowerOfTwoDivisor_;
+ }
+
+ void analyzeEdgeCasesForward() override;
+
+ bool isUnsigned() const { return unsigned_; }
+
+ bool trapOnError() const { return trapOnError_; }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(bytecodeOffset_.isValid());
+ return bytecodeOffset_;
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ bool fallible() const;
+
+ void computeRange(TempAllocator& alloc) override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+ void collectRangeInfoPreTrunc() override;
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return MBinaryArithInstruction::congruentTo(ins) &&
+ unsigned_ == ins->toMod()->isUnsigned();
+ }
+
+ bool possiblyCalls() const override { return type() == MIRType::Double; }
+
+ ALLOW_CLONE(MMod)
+};
+
+class MWasmBuiltinModD : public MAryInstruction<3>, public ArithPolicy::Data {
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmBuiltinModD(MDefinition* left, MDefinition* right, MDefinition* instance,
+ MIRType type)
+ : MAryInstruction(classOpcode) {
+ initOperand(0, left);
+ initOperand(1, right);
+ initOperand(2, instance);
+
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBuiltinModD)
+ NAMED_OPERANDS((0, lhs), (1, rhs), (2, instance))
+
+ static MWasmBuiltinModD* New(
+ TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MDefinition* instance, MIRType type,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset()) {
+ auto* wasmBuiltinModD =
+ new (alloc) MWasmBuiltinModD(left, right, instance, type);
+ wasmBuiltinModD->bytecodeOffset_ = bytecodeOffset;
+ return wasmBuiltinModD;
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(bytecodeOffset_.isValid());
+ return bytecodeOffset_;
+ }
+
+ ALLOW_CLONE(MWasmBuiltinModD)
+};
+
+class MWasmBuiltinModI64 : public MAryInstruction<3>, public ArithPolicy::Data {
+ bool unsigned_; // If false, signedness will be derived from operands
+ bool canBeNegativeDividend_;
+ bool canBeDivideByZero_;
+ bool trapOnError_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmBuiltinModI64(MDefinition* left, MDefinition* right,
+ MDefinition* instance)
+ : MAryInstruction(classOpcode),
+ unsigned_(false),
+ canBeNegativeDividend_(true),
+ canBeDivideByZero_(true),
+ trapOnError_(false) {
+ initOperand(0, left);
+ initOperand(1, right);
+ initOperand(2, instance);
+
+ setResultType(MIRType::Int64);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBuiltinModI64)
+
+ NAMED_OPERANDS((0, lhs), (1, rhs), (2, instance))
+
+ static MWasmBuiltinModI64* New(
+ TempAllocator& alloc, MDefinition* left, MDefinition* right,
+ MDefinition* instance, bool unsignd, bool trapOnError = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset()) {
+ auto* mod = new (alloc) MWasmBuiltinModI64(left, right, instance);
+ mod->unsigned_ = unsignd;
+ mod->trapOnError_ = trapOnError;
+ mod->bytecodeOffset_ = bytecodeOffset;
+ if (trapOnError) {
+ mod->setGuard(); // not removable because of possible side-effects.
+ mod->setNotMovable();
+ }
+ return mod;
+ }
+
+ bool canBeNegativeDividend() const {
+ MOZ_ASSERT(!unsigned_);
+ return canBeNegativeDividend_;
+ }
+
+ bool canBeDivideByZero() const { return canBeDivideByZero_; }
+
+ bool isUnsigned() const { return unsigned_; }
+
+ bool trapOnError() const { return trapOnError_; }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(bytecodeOffset_.isValid());
+ return bytecodeOffset_;
+ }
+
+ ALLOW_CLONE(MWasmBuiltinModI64)
+};
+
+class MBigIntBinaryArithInstruction : public MBinaryInstruction,
+ public BigIntArithPolicy::Data {
+ protected:
+ MBigIntBinaryArithInstruction(Opcode op, MDefinition* left,
+ MDefinition* right)
+ : MBinaryInstruction(op, left, right) {
+ setResultType(MIRType::BigInt);
+ setMovable();
+ }
+
+ public:
+ bool congruentTo(const MDefinition* ins) const override {
+ return binaryCongruentTo(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MBigIntAdd : public MBigIntBinaryArithInstruction {
+ MBigIntAdd(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ setCommutative();
+
+ // Don't guard this instruction even though adding two BigInts can throw
+ // JSMSG_BIGINT_TOO_LARGE. This matches the behavior when adding too large
+ // strings in MConcat.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntAdd)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntAdd)
+};
+
+class MBigIntSub : public MBigIntBinaryArithInstruction {
+ MBigIntSub(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntSub)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntSub)
+};
+
+class MBigIntMul : public MBigIntBinaryArithInstruction {
+ MBigIntMul(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ setCommutative();
+
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntMul)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntMul)
+};
+
+class MBigIntDiv : public MBigIntBinaryArithInstruction {
+ bool canBeDivideByZero_;
+
+ MBigIntDiv(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ MOZ_ASSERT(right->type() == MIRType::BigInt);
+ canBeDivideByZero_ =
+ !right->isConstant() || right->toConstant()->toBigInt()->isZero();
+
+ // Throws when the divisor is zero.
+ if (canBeDivideByZero_) {
+ setGuard();
+ setNotMovable();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntDiv)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canBeDivideByZero() const { return canBeDivideByZero_; }
+
+ AliasSet getAliasSet() const override {
+ if (canBeDivideByZero()) {
+ return AliasSet::Store(AliasSet::ExceptionState);
+ }
+ return AliasSet::None();
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return !canBeDivideByZero(); }
+
+ ALLOW_CLONE(MBigIntDiv)
+};
+
+class MBigIntMod : public MBigIntBinaryArithInstruction {
+ bool canBeDivideByZero_;
+
+ MBigIntMod(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ MOZ_ASSERT(right->type() == MIRType::BigInt);
+ canBeDivideByZero_ =
+ !right->isConstant() || right->toConstant()->toBigInt()->isZero();
+
+ // Throws when the divisor is zero.
+ if (canBeDivideByZero_) {
+ setGuard();
+ setNotMovable();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntMod)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canBeDivideByZero() const { return canBeDivideByZero_; }
+
+ AliasSet getAliasSet() const override {
+ if (canBeDivideByZero()) {
+ return AliasSet::Store(AliasSet::ExceptionState);
+ }
+ return AliasSet::None();
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return !canBeDivideByZero(); }
+
+ ALLOW_CLONE(MBigIntMod)
+};
+
+class MBigIntPow : public MBigIntBinaryArithInstruction {
+ bool canBeNegativeExponent_;
+
+ MBigIntPow(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ MOZ_ASSERT(right->type() == MIRType::BigInt);
+ canBeNegativeExponent_ =
+ !right->isConstant() || right->toConstant()->toBigInt()->isNegative();
+
+ // Throws when the exponent is negative.
+ if (canBeNegativeExponent_) {
+ setGuard();
+ setNotMovable();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntPow)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool canBeNegativeExponent() const { return canBeNegativeExponent_; }
+
+ AliasSet getAliasSet() const override {
+ if (canBeNegativeExponent()) {
+ return AliasSet::Store(AliasSet::ExceptionState);
+ }
+ return AliasSet::None();
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return !canBeNegativeExponent(); }
+
+ ALLOW_CLONE(MBigIntPow)
+};
+
+class MBigIntBitAnd : public MBigIntBinaryArithInstruction {
+ MBigIntBitAnd(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ setCommutative();
+
+ // We don't need to guard this instruction because it can only fail on OOM.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntBitAnd)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntBitAnd)
+};
+
+class MBigIntBitOr : public MBigIntBinaryArithInstruction {
+ MBigIntBitOr(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ setCommutative();
+
+ // We don't need to guard this instruction because it can only fail on OOM.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntBitOr)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntBitOr)
+};
+
+class MBigIntBitXor : public MBigIntBinaryArithInstruction {
+ MBigIntBitXor(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ setCommutative();
+
+ // We don't need to guard this instruction because it can only fail on OOM.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntBitXor)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntBitXor)
+};
+
+class MBigIntLsh : public MBigIntBinaryArithInstruction {
+ MBigIntLsh(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntLsh)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntLsh)
+};
+
+class MBigIntRsh : public MBigIntBinaryArithInstruction {
+ MBigIntRsh(MDefinition* left, MDefinition* right)
+ : MBigIntBinaryArithInstruction(classOpcode, left, right) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntRsh)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntRsh)
+};
+
+class MBigIntUnaryArithInstruction : public MUnaryInstruction,
+ public BigIntArithPolicy::Data {
+ protected:
+ MBigIntUnaryArithInstruction(Opcode op, MDefinition* input)
+ : MUnaryInstruction(op, input) {
+ setResultType(MIRType::BigInt);
+ setMovable();
+ }
+
+ public:
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MBigIntIncrement : public MBigIntUnaryArithInstruction {
+ explicit MBigIntIncrement(MDefinition* input)
+ : MBigIntUnaryArithInstruction(classOpcode, input) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntIncrement)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntIncrement)
+};
+
+class MBigIntDecrement : public MBigIntUnaryArithInstruction {
+ explicit MBigIntDecrement(MDefinition* input)
+ : MBigIntUnaryArithInstruction(classOpcode, input) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntDecrement)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntDecrement)
+};
+
+class MBigIntNegate : public MBigIntUnaryArithInstruction {
+ explicit MBigIntNegate(MDefinition* input)
+ : MBigIntUnaryArithInstruction(classOpcode, input) {
+ // We don't need to guard this instruction because it can only fail on OOM.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntNegate)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntNegate)
+};
+
+class MBigIntBitNot : public MBigIntUnaryArithInstruction {
+ explicit MBigIntBitNot(MDefinition* input)
+ : MBigIntUnaryArithInstruction(classOpcode, input) {
+ // See MBigIntAdd for why we don't guard this instruction.
+ }
+
+ public:
+ INSTRUCTION_HEADER(BigIntBitNot)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MBigIntBitNot)
+};
+
+class MConcat : public MBinaryInstruction,
+ public MixPolicy<ConvertToStringPolicy<0>,
+ ConvertToStringPolicy<1>>::Data {
+ MConcat(MDefinition* left, MDefinition* right)
+ : MBinaryInstruction(classOpcode, left, right) {
+ // At least one input should be definitely string
+ MOZ_ASSERT(left->type() == MIRType::String ||
+ right->type() == MIRType::String);
+
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Concat)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MConcat)
+};
+
+class MStringConvertCase : public MUnaryInstruction,
+ public StringPolicy<0>::Data {
+ public:
+ enum Mode { LowerCase, UpperCase };
+
+ private:
+ Mode mode_;
+
+ MStringConvertCase(MDefinition* string, Mode mode)
+ : MUnaryInstruction(classOpcode, string), mode_(mode) {
+ setResultType(MIRType::String);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(StringConvertCase)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toStringConvertCase()->mode() == mode();
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool possiblyCalls() const override { return true; }
+ Mode mode() const { return mode_; }
+};
+
+// This is a 3 state flag used by FlagPhiInputsAsImplicitlyUsed to record and
+// propagate the information about the consumers of a Phi instruction. This is
+// then used to set ImplicitlyUsed flags on the inputs of such Phi instructions.
+enum class PhiUsage : uint8_t { Unknown, Unused, Used };
+
+using PhiVector = Vector<MPhi*, 4, JitAllocPolicy>;
+
+class MPhi final : public MDefinition,
+ public InlineListNode<MPhi>,
+ public NoTypePolicy::Data {
+ using InputVector = js::Vector<MUse, 2, JitAllocPolicy>;
+ InputVector inputs_;
+
+ TruncateKind truncateKind_;
+ bool triedToSpecialize_;
+ bool isIterator_;
+ bool canProduceFloat32_;
+ bool canConsumeFloat32_;
+ // Record the state of the data flow before any mutation made to the control
+ // flow, such that removing branches is properly accounted for.
+ PhiUsage usageAnalysis_;
+
+ protected:
+ MUse* getUseFor(size_t index) override {
+ MOZ_ASSERT(index < numOperands());
+ return &inputs_[index];
+ }
+ const MUse* getUseFor(size_t index) const override { return &inputs_[index]; }
+
+ public:
+ INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(Phi)
+ virtual const TypePolicy* typePolicy();
+ virtual MIRType typePolicySpecialization();
+
+ MPhi(TempAllocator& alloc, MIRType resultType)
+ : MDefinition(classOpcode),
+ inputs_(alloc),
+ truncateKind_(TruncateKind::NoTruncate),
+ triedToSpecialize_(false),
+ isIterator_(false),
+ canProduceFloat32_(false),
+ canConsumeFloat32_(false),
+ usageAnalysis_(PhiUsage::Unknown) {
+ setResultType(resultType);
+ }
+
+ static MPhi* New(TempAllocator& alloc, MIRType resultType = MIRType::Value) {
+ return new (alloc) MPhi(alloc, resultType);
+ }
+ static MPhi* New(TempAllocator::Fallible alloc,
+ MIRType resultType = MIRType::Value) {
+ return new (alloc) MPhi(alloc.alloc, resultType);
+ }
+
+ void removeOperand(size_t index);
+ void removeAllOperands();
+
+ MDefinition* getOperand(size_t index) const override {
+ return inputs_[index].producer();
+ }
+ size_t numOperands() const override { return inputs_.length(); }
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u >= &inputs_[0]);
+ MOZ_ASSERT(u <= &inputs_[numOperands() - 1]);
+ return u - &inputs_[0];
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ inputs_[index].replaceProducer(operand);
+ }
+ bool triedToSpecialize() const { return triedToSpecialize_; }
+ void specialize(MIRType type) {
+ triedToSpecialize_ = true;
+ setResultType(type);
+ }
+
+#ifdef DEBUG
+ // Assert that this is a phi in a loop header with a unique predecessor and
+ // a unique backedge.
+ void assertLoopPhi() const;
+#else
+ void assertLoopPhi() const {}
+#endif
+
+ // Assuming this phi is in a loop header with a unique loop entry, return
+ // the phi operand along the loop entry.
+ MDefinition* getLoopPredecessorOperand() const;
+
+ // Assuming this phi is in a loop header with a unique loop entry, return
+ // the phi operand along the loop backedge.
+ MDefinition* getLoopBackedgeOperand() const;
+
+ // Whether this phi's type already includes information for def.
+ bool typeIncludes(MDefinition* def);
+
+ // Mark all phis in |iterators|, and the phis they flow into, as having
+ // implicit uses.
+ [[nodiscard]] static bool markIteratorPhis(const PhiVector& iterators);
+
+ // Initializes the operands vector to the given capacity,
+ // permitting use of addInput() instead of addInputSlow().
+ [[nodiscard]] bool reserveLength(size_t length) {
+ return inputs_.reserve(length);
+ }
+
+ // Use only if capacity has been reserved by reserveLength
+ void addInput(MDefinition* ins) {
+ MOZ_ASSERT_IF(type() != MIRType::Value, ins->type() == type());
+ inputs_.infallibleEmplaceBack(ins, this);
+ }
+
+ // Appends a new input to the input vector. May perform reallocation.
+ // Prefer reserveLength() and addInput() instead, where possible.
+ [[nodiscard]] bool addInputSlow(MDefinition* ins) {
+ MOZ_ASSERT_IF(type() != MIRType::Value, ins->type() == type());
+ return inputs_.emplaceBack(ins, this);
+ }
+
+ // Appends a new input to the input vector. Infallible because
+ // we know the inputs fits in the vector's inline storage.
+ void addInlineInput(MDefinition* ins) {
+ MOZ_ASSERT(inputs_.length() < InputVector::InlineLength);
+ MOZ_ALWAYS_TRUE(addInputSlow(ins));
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ MDefinition* foldsTernary(TempAllocator& alloc);
+
+ bool congruentTo(const MDefinition* ins) const override;
+
+ // Mark this phi-node as having replaced all uses of |other|, as during GVN.
+ // For use when GVN eliminates phis which are not equivalent to one another.
+ void updateForReplacement(MPhi* other);
+
+ bool isIterator() const { return isIterator_; }
+ void setIterator() { isIterator_ = true; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+
+ MDefinition* operandIfRedundant();
+
+ bool canProduceFloat32() const override { return canProduceFloat32_; }
+
+ void setCanProduceFloat32(bool can) { canProduceFloat32_ = can; }
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return canConsumeFloat32_;
+ }
+
+ void setCanConsumeFloat32(bool can) { canConsumeFloat32_ = can; }
+
+ TruncateKind operandTruncateKind(size_t index) const override;
+ bool canTruncate() const override;
+ void truncate(TruncateKind kind) override;
+
+ PhiUsage getUsageAnalysis() const { return usageAnalysis_; }
+ void setUsageAnalysis(PhiUsage pu) {
+ MOZ_ASSERT(usageAnalysis_ == PhiUsage::Unknown);
+ usageAnalysis_ = pu;
+ MOZ_ASSERT(usageAnalysis_ != PhiUsage::Unknown);
+ }
+};
+
+// The goal of a Beta node is to split a def at a conditionally taken
+// branch, so that uses dominated by it have a different name.
+class MBeta : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ // This is the range induced by a comparison and branch in a preceding
+ // block. Note that this does not reflect any range constraints from
+ // the input value itself, so this value may differ from the range()
+ // range after it is computed.
+ const Range* comparison_;
+
+ MBeta(MDefinition* val, const Range* comp)
+ : MUnaryInstruction(classOpcode, val), comparison_(comp) {
+ setResultType(val->type());
+ }
+
+ public:
+ INSTRUCTION_HEADER(Beta)
+ TRIVIAL_NEW_WRAPPERS
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+};
+
+// If input evaluates to false (i.e. it's NaN, 0 or -0), 0 is returned, else the
+// input is returned
+class MNaNToZero : public MUnaryInstruction, public DoublePolicy<0>::Data {
+ bool operandIsNeverNaN_;
+ bool operandIsNeverNegativeZero_;
+
+ explicit MNaNToZero(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input),
+ operandIsNeverNaN_(false),
+ operandIsNeverNegativeZero_(false) {
+ setResultType(MIRType::Double);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NaNToZero)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool operandIsNeverNaN() const { return operandIsNeverNaN_; }
+
+ bool operandIsNeverNegativeZero() const {
+ return operandIsNeverNegativeZero_;
+ }
+
+ void collectRangeInfoPreTrunc() override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool writeRecoverData(CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MNaNToZero)
+};
+
+// MIR representation of a Value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrValue : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ ptrdiff_t frameOffset_;
+
+ MOsrValue(MOsrEntry* entry, ptrdiff_t frameOffset)
+ : MUnaryInstruction(classOpcode, entry), frameOffset_(frameOffset) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrValue)
+ TRIVIAL_NEW_WRAPPERS
+
+ ptrdiff_t frameOffset() const { return frameOffset_; }
+
+ MOsrEntry* entry() { return getOperand(0)->toOsrEntry(); }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// MIR representation of a JSObject scope chain pointer on the OSR
+// BaselineFrame. The pointer is indexed off of OsrFrameReg.
+class MOsrEnvironmentChain : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ private:
+ explicit MOsrEnvironmentChain(MOsrEntry* entry)
+ : MUnaryInstruction(classOpcode, entry) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrEnvironmentChain)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() { return getOperand(0)->toOsrEntry(); }
+};
+
+// MIR representation of a JSObject ArgumentsObject pointer on the OSR
+// BaselineFrame. The pointer is indexed off of OsrFrameReg.
+class MOsrArgumentsObject : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ private:
+ explicit MOsrArgumentsObject(MOsrEntry* entry)
+ : MUnaryInstruction(classOpcode, entry) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrArgumentsObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() { return getOperand(0)->toOsrEntry(); }
+};
+
+// MIR representation of the return value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrReturnValue : public MUnaryInstruction, public NoTypePolicy::Data {
+ private:
+ explicit MOsrReturnValue(MOsrEntry* entry)
+ : MUnaryInstruction(classOpcode, entry) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(OsrReturnValue)
+ TRIVIAL_NEW_WRAPPERS
+
+ MOsrEntry* entry() { return getOperand(0)->toOsrEntry(); }
+};
+
+class MBinaryCache : public MBinaryInstruction,
+ public MixPolicy<BoxPolicy<0>, BoxPolicy<1>>::Data {
+ explicit MBinaryCache(MDefinition* left, MDefinition* right, MIRType resType)
+ : MBinaryInstruction(classOpcode, left, right) {
+ setResultType(resType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BinaryCache)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Check whether we need to fire the interrupt handler (in wasm code).
+class MWasmInterruptCheck : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmInterruptCheck(MDefinition* instance,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MUnaryInstruction(classOpcode, instance),
+ bytecodeOffset_(bytecodeOffset) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmInterruptCheck)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+// Directly jumps to the indicated trap, leaving Wasm code and reporting a
+// runtime error.
+
+class MWasmTrap : public MAryControlInstruction<0, 0>,
+ public NoTypePolicy::Data {
+ wasm::Trap trap_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset)
+ : MAryControlInstruction(classOpcode),
+ trap_(trap),
+ bytecodeOffset_(bytecodeOffset) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmTrap)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ wasm::Trap trap() const { return trap_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+// Checks if a value is JS_UNINITIALIZED_LEXICAL, bailout out if so, leaving
+// it to baseline to throw at the correct pc.
+class MLexicalCheck : public MUnaryInstruction, public BoxPolicy<0>::Data {
+ explicit MLexicalCheck(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input) {
+ setResultType(MIRType::Value);
+ setMovable();
+ setGuard();
+
+ // If this instruction bails out, we will set a flag to prevent
+ // lexical checks in this script from being moved.
+ setBailoutKind(BailoutKind::UninitializedLexical);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LexicalCheck)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+// Unconditionally throw a known error number.
+class MThrowMsg : public MNullaryInstruction {
+ const ThrowMsgKind throwMsgKind_;
+
+ explicit MThrowMsg(ThrowMsgKind throwMsgKind)
+ : MNullaryInstruction(classOpcode), throwMsgKind_(throwMsgKind) {
+ setGuard();
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ThrowMsg)
+ TRIVIAL_NEW_WRAPPERS
+
+ ThrowMsgKind throwMsgKind() const { return throwMsgKind_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ExceptionState);
+ }
+};
+
+class MGetFirstDollarIndex : public MUnaryInstruction,
+ public StringPolicy<0>::Data {
+ explicit MGetFirstDollarIndex(MDefinition* str)
+ : MUnaryInstruction(classOpcode, str) {
+ setResultType(MIRType::Int32);
+
+ // Codegen assumes string length > 0. Don't allow LICM to move this
+ // before the .length > 1 check in RegExpReplace in RegExp.js.
+ MOZ_ASSERT(!isMovable());
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetFirstDollarIndex)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, str))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+class MStringReplace : public MTernaryInstruction,
+ public MixPolicy<StringPolicy<0>, StringPolicy<1>,
+ StringPolicy<2>>::Data {
+ private:
+ bool isFlatReplacement_;
+
+ MStringReplace(MDefinition* string, MDefinition* pattern,
+ MDefinition* replacement)
+ : MTernaryInstruction(classOpcode, string, pattern, replacement),
+ isFlatReplacement_(false) {
+ setMovable();
+ setResultType(MIRType::String);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StringReplace)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, string), (1, pattern), (2, replacement))
+
+ void setFlatReplacement() {
+ MOZ_ASSERT(!isFlatReplacement_);
+ isFlatReplacement_ = true;
+ }
+
+ bool isFlatReplacement() const { return isFlatReplacement_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isStringReplace()) {
+ return false;
+ }
+ if (isFlatReplacement_ != ins->toStringReplace()->isFlatReplacement()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override {
+ if (isFlatReplacement_) {
+ MOZ_ASSERT(!pattern()->isRegExp());
+ return true;
+ }
+ return false;
+ }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MLambda : public MBinaryInstruction, public SingleObjectPolicy::Data {
+ MLambda(MDefinition* envChain, MConstant* cst)
+ : MBinaryInstruction(classOpcode, envChain, cst) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Lambda)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain))
+
+ MConstant* functionOperand() const { return getOperand(1)->toConstant(); }
+ JSFunction* templateFunction() const {
+ return &functionOperand()->toObject().as<JSFunction>();
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MFunctionWithProto : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>,
+ ObjectPolicy<2>>::Data {
+ CompilerFunction fun_;
+
+ MFunctionWithProto(MDefinition* envChain, MDefinition* prototype,
+ MConstant* cst)
+ : MTernaryInstruction(classOpcode, envChain, prototype, cst),
+ fun_(&cst->toObject().as<JSFunction>()) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FunctionWithProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, environmentChain), (1, prototype))
+
+ MConstant* functionOperand() const { return getOperand(2)->toConstant(); }
+ JSFunction* function() const { return fun_; }
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MGetNextEntryForIterator
+ : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>>::Data {
+ public:
+ enum Mode { Map, Set };
+
+ private:
+ Mode mode_;
+
+ explicit MGetNextEntryForIterator(MDefinition* iter, MDefinition* result,
+ Mode mode)
+ : MBinaryInstruction(classOpcode, iter, result), mode_(mode) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetNextEntryForIterator)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, iter), (1, result))
+
+ Mode mode() const { return mode_; }
+};
+
+// Convert a Double into an IntPtr value for accessing a TypedArray or DataView
+// element. If the input is non-finite, not an integer, negative, or outside the
+// IntPtr range, either bails out or produces a value which is known to trigger
+// an out-of-bounds access (this depends on the supportOOB flag).
+class MGuardNumberToIntPtrIndex : public MUnaryInstruction,
+ public DoublePolicy<0>::Data {
+ // If true, produce an out-of-bounds index for non-IntPtr doubles instead of
+ // bailing out.
+ const bool supportOOB_;
+
+ MGuardNumberToIntPtrIndex(MDefinition* def, bool supportOOB)
+ : MUnaryInstruction(classOpcode, def), supportOOB_(supportOOB) {
+ MOZ_ASSERT(def->type() == MIRType::Double);
+ setResultType(MIRType::IntPtr);
+ setMovable();
+ if (!supportOOB) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardNumberToIntPtrIndex)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool supportOOB() const { return supportOOB_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardNumberToIntPtrIndex()) {
+ return false;
+ }
+ if (ins->toGuardNumberToIntPtrIndex()->supportOOB() != supportOOB()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ ALLOW_CLONE(MGuardNumberToIntPtrIndex)
+};
+
+// Perform !-operation
+class MNot : public MUnaryInstruction, public TestPolicy::Data {
+ bool operandIsNeverNaN_;
+ TypeDataList observedTypes_;
+
+ explicit MNot(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input), operandIsNeverNaN_(false) {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ static MNot* NewInt32(TempAllocator& alloc, MDefinition* input) {
+ MOZ_ASSERT(input->type() == MIRType::Int32 ||
+ input->type() == MIRType::Int64);
+ auto* ins = new (alloc) MNot(input);
+ ins->setResultType(MIRType::Int32);
+ return ins;
+ }
+
+ INSTRUCTION_HEADER(Not)
+ TRIVIAL_NEW_WRAPPERS
+
+ void setObservedTypes(const TypeDataList& observed) {
+ observedTypes_ = observed;
+ }
+ const TypeDataList& observedTypes() const { return observedTypes_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool operandIsNeverNaN() const { return operandIsNeverNaN_; }
+
+ virtual AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void collectRangeInfoPreTrunc() override;
+
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+ bool isFloat32Commutative() const override { return true; }
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length. The length used
+// in a bounds check must not be negative, or the wrong result may be computed
+// (unsigned comparisons may be used).
+class MBoundsCheck
+ : public MBinaryInstruction,
+ public MixPolicy<Int32OrIntPtrPolicy<0>, Int32OrIntPtrPolicy<1>>::Data {
+ // Range over which to perform the bounds check, may be modified by GVN.
+ int32_t minimum_;
+ int32_t maximum_;
+ bool fallible_;
+
+ MBoundsCheck(MDefinition* index, MDefinition* length)
+ : MBinaryInstruction(classOpcode, index, length),
+ minimum_(0),
+ maximum_(0),
+ fallible_(true) {
+ setGuard();
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32 ||
+ index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(index->type() == length->type());
+
+ // Returns the checked index.
+ setResultType(index->type());
+ }
+
+ public:
+ INSTRUCTION_HEADER(BoundsCheck)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index), (1, length))
+
+ int32_t minimum() const { return minimum_; }
+ void setMinimum(int32_t n) {
+ MOZ_ASSERT(fallible_);
+ minimum_ = n;
+ }
+ int32_t maximum() const { return maximum_; }
+ void setMaximum(int32_t n) {
+ MOZ_ASSERT(fallible_);
+ maximum_ = n;
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isBoundsCheck()) {
+ return false;
+ }
+ const MBoundsCheck* other = ins->toBoundsCheck();
+ if (minimum() != other->minimum() || maximum() != other->maximum()) {
+ return false;
+ }
+ if (fallible() != other->fallible()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+ virtual AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+ bool fallible() const { return fallible_; }
+ void collectRangeInfoPreTrunc() override;
+
+ ALLOW_CLONE(MBoundsCheck)
+};
+
+// Bailout if index < minimum.
+class MBoundsCheckLower : public MUnaryInstruction,
+ public UnboxedInt32Policy<0>::Data {
+ int32_t minimum_;
+ bool fallible_;
+
+ explicit MBoundsCheckLower(MDefinition* index)
+ : MUnaryInstruction(classOpcode, index), minimum_(0), fallible_(true) {
+ setGuard();
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(BoundsCheckLower)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index))
+
+ int32_t minimum() const { return minimum_; }
+ void setMinimum(int32_t n) { minimum_ = n; }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool fallible() const { return fallible_; }
+ void collectRangeInfoPreTrunc() override;
+};
+
+class MSpectreMaskIndex
+ : public MBinaryInstruction,
+ public MixPolicy<Int32OrIntPtrPolicy<0>, Int32OrIntPtrPolicy<1>>::Data {
+ MSpectreMaskIndex(MDefinition* index, MDefinition* length)
+ : MBinaryInstruction(classOpcode, index, length) {
+ // Note: this instruction does not need setGuard(): if there are no uses
+ // it's fine for DCE to eliminate this instruction.
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::Int32 ||
+ index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(index->type() == length->type());
+
+ // Returns the masked index.
+ setResultType(index->type());
+ }
+
+ public:
+ INSTRUCTION_HEADER(SpectreMaskIndex)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index), (1, length))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ virtual AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MSpectreMaskIndex)
+};
+
+// Load a value from a dense array's element vector. Bails out if the element is
+// a hole.
+class MLoadElement : public MBinaryInstruction, public NoTypePolicy::Data {
+ MLoadElement(MDefinition* elements, MDefinition* index)
+ : MBinaryInstruction(classOpcode, elements, index) {
+ // Uses may be optimized away based on this instruction's result
+ // type. This means it's invalid to DCE this instruction, as we
+ // have to invalidate when we read a hole.
+ setGuard();
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasType mightAlias(const MDefinition* store) const override;
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+
+ ALLOW_CLONE(MLoadElement)
+};
+
+class MLoadElementAndUnbox : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MUnbox::Mode mode_;
+
+ MLoadElementAndUnbox(MDefinition* elements, MDefinition* index,
+ MUnbox::Mode mode, MIRType type)
+ : MBinaryInstruction(classOpcode, elements, index), mode_(mode) {
+ setResultType(type);
+ setMovable();
+ if (mode_ == MUnbox::Fallible) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadElementAndUnbox)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ MUnbox::Mode mode() const { return mode_; }
+ bool fallible() const { return mode_ != MUnbox::Infallible; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadElementAndUnbox() ||
+ mode() != ins->toLoadElementAndUnbox()->mode()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+
+ ALLOW_CLONE(MLoadElementAndUnbox);
+};
+
+// Load a value from the elements vector of a native object. If the index is
+// out-of-bounds, or the indexed slot has a hole, undefined is returned instead.
+class MLoadElementHole : public MTernaryInstruction, public NoTypePolicy::Data {
+ bool needsNegativeIntCheck_ = true;
+
+ MLoadElementHole(MDefinition* elements, MDefinition* index,
+ MDefinition* initLength)
+ : MTernaryInstruction(classOpcode, elements, index, initLength) {
+ setResultType(MIRType::Value);
+ setMovable();
+
+ // Set the guard flag to make sure we bail when we see a negative
+ // index. We can clear this flag (and needsNegativeIntCheck_) in
+ // collectRangeInfoPreTrunc.
+ setGuard();
+
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(initLength->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, initLength))
+
+ bool needsNegativeIntCheck() const { return needsNegativeIntCheck_; }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadElementHole()) {
+ return false;
+ }
+ const MLoadElementHole* other = ins->toLoadElementHole();
+ if (needsNegativeIntCheck() != other->needsNegativeIntCheck()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+ void collectRangeInfoPreTrunc() override;
+
+ ALLOW_CLONE(MLoadElementHole)
+};
+
+// Store a value to a dense array slots vector.
+class MStoreElement : public MTernaryInstruction,
+ public NoFloatPolicy<2>::Data {
+ bool needsHoleCheck_;
+ bool needsBarrier_;
+
+ MStoreElement(MDefinition* elements, MDefinition* index, MDefinition* value,
+ bool needsHoleCheck, bool needsBarrier)
+ : MTernaryInstruction(classOpcode, elements, index, value) {
+ needsHoleCheck_ = needsHoleCheck;
+ needsBarrier_ = needsBarrier;
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(value->type() != MIRType::MagicHole);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ static MStoreElement* NewUnbarriered(TempAllocator& alloc,
+ MDefinition* elements,
+ MDefinition* index, MDefinition* value,
+ bool needsHoleCheck) {
+ return new (alloc)
+ MStoreElement(elements, index, value, needsHoleCheck, false);
+ }
+
+ static MStoreElement* NewBarriered(TempAllocator& alloc,
+ MDefinition* elements, MDefinition* index,
+ MDefinition* value, bool needsHoleCheck) {
+ return new (alloc)
+ MStoreElement(elements, index, value, needsHoleCheck, true);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::Element);
+ }
+ bool needsBarrier() const { return needsBarrier_; }
+ bool needsHoleCheck() const { return needsHoleCheck_; }
+ bool fallible() const { return needsHoleCheck(); }
+
+ ALLOW_CLONE(MStoreElement)
+};
+
+// Stores MagicValue(JS_ELEMENTS_HOLE) and marks the elements as non-packed.
+class MStoreHoleValueElement : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MStoreHoleValueElement(MDefinition* elements, MDefinition* index)
+ : MBinaryInstruction(classOpcode, elements, index) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreHoleValueElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::Element | AliasSet::ObjectFields);
+ }
+
+ ALLOW_CLONE(MStoreHoleValueElement)
+};
+
+// Like MStoreElement, but also supports index == initialized length. The
+// downside is that we cannot hoist the elements vector and bounds check, since
+// this instruction may update the (initialized) length and reallocate the
+// elements vector.
+class MStoreElementHole
+ : public MQuaternaryInstruction,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3>>::Data {
+ MStoreElementHole(MDefinition* object, MDefinition* elements,
+ MDefinition* index, MDefinition* value)
+ : MQuaternaryInstruction(classOpcode, object, elements, index, value) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(value->type() != MIRType::MagicHole);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
+
+ AliasSet getAliasSet() const override {
+ // StoreElementHole can update the initialized length, the array length
+ // or reallocate obj->elements.
+ return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+ }
+
+ ALLOW_CLONE(MStoreElementHole)
+};
+
+// Array.prototype.pop or Array.prototype.shift on a dense array.
+class MArrayPopShift : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ public:
+ enum Mode { Pop, Shift };
+
+ private:
+ Mode mode_;
+
+ MArrayPopShift(MDefinition* object, Mode mode)
+ : MUnaryInstruction(classOpcode, object), mode_(mode) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(ArrayPopShift)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool mode() const { return mode_; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+ }
+
+ ALLOW_CLONE(MArrayPopShift)
+};
+
+// All barriered operations - MCompareExchangeTypedArrayElement,
+// MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as
+// well as MLoadUnboxedScalar and MStoreUnboxedScalar when they are
+// marked as requiring a memory barrer - have the following
+// attributes:
+//
+// - Not movable
+// - Not removable
+// - Not congruent with any other instruction
+// - Effectful (they alias every TypedArray store)
+//
+// The intended effect of those constraints is to prevent all loads
+// and stores preceding the barriered operation from being moved to
+// after the barriered operation, and vice versa, and to prevent the
+// barriered operation from being removed or hoisted.
+
+enum MemoryBarrierRequirement {
+ DoesNotRequireMemoryBarrier,
+ DoesRequireMemoryBarrier
+};
+
+// Also see comments at MMemoryBarrierRequirement, above.
+
+// Load an unboxed scalar value from an array buffer view or other object.
+class MLoadUnboxedScalar : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ int32_t offsetAdjustment_ = 0;
+ Scalar::Type storageType_;
+ bool requiresBarrier_;
+
+ MLoadUnboxedScalar(
+ MDefinition* elements, MDefinition* index, Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ : MBinaryInstruction(classOpcode, elements, index),
+ storageType_(storageType),
+ requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
+ setResultType(MIRType::Value);
+ if (requiresBarrier_) {
+ setGuard(); // Not removable or movable
+ } else {
+ setMovable();
+ }
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadUnboxedScalar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ Scalar::Type storageType() const { return storageType_; }
+ bool fallible() const {
+ // Bailout if the result does not fit in an int32.
+ return storageType_ == Scalar::Uint32 && type() == MIRType::Int32;
+ }
+ bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ int32_t offsetAdjustment() const { return offsetAdjustment_; }
+ void setOffsetAdjustment(int32_t offsetAdjustment) {
+ offsetAdjustment_ = offsetAdjustment;
+ }
+ AliasSet getAliasSet() const override {
+ // When a barrier is needed make the instruction effectful by
+ // giving it a "store" effect.
+ if (requiresBarrier_) {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (requiresBarrier_) {
+ return false;
+ }
+ if (!ins->isLoadUnboxedScalar()) {
+ return false;
+ }
+ const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar();
+ if (storageType_ != other->storageType_) {
+ return false;
+ }
+ if (offsetAdjustment() != other->offsetAdjustment()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool canProduceFloat32() const override {
+ return storageType_ == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MLoadUnboxedScalar)
+};
+
+// Load an unboxed scalar value from a dataview object.
+class MLoadDataViewElement : public MTernaryInstruction,
+ public NoTypePolicy::Data {
+ Scalar::Type storageType_;
+
+ MLoadDataViewElement(MDefinition* elements, MDefinition* index,
+ MDefinition* littleEndian, Scalar::Type storageType)
+ : MTernaryInstruction(classOpcode, elements, index, littleEndian),
+ storageType_(storageType) {
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(littleEndian->type() == MIRType::Boolean);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ MOZ_ASSERT(Scalar::byteSize(storageType) > 1);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadDataViewElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, littleEndian))
+
+ Scalar::Type storageType() const { return storageType_; }
+ bool fallible() const {
+ // Bailout if the result does not fit in an int32.
+ return storageType_ == Scalar::Uint32 && type() == MIRType::Int32;
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadDataViewElement()) {
+ return false;
+ }
+ const MLoadDataViewElement* other = ins->toLoadDataViewElement();
+ if (storageType_ != other->storageType_) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ void computeRange(TempAllocator& alloc) override;
+
+ bool canProduceFloat32() const override {
+ return storageType_ == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MLoadDataViewElement)
+};
+
+// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
+class MLoadTypedArrayElementHole : public MBinaryInstruction,
+ public SingleObjectPolicy::Data {
+ Scalar::Type arrayType_;
+ bool forceDouble_;
+
+ MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index,
+ Scalar::Type arrayType, bool forceDouble)
+ : MBinaryInstruction(classOpcode, object, index),
+ arrayType_(arrayType),
+ forceDouble_(forceDouble) {
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadTypedArrayElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, index))
+
+ Scalar::Type arrayType() const { return arrayType_; }
+ bool forceDouble() const { return forceDouble_; }
+ bool fallible() const {
+ return arrayType_ == Scalar::Uint32 && !forceDouble_;
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadTypedArrayElementHole()) {
+ return false;
+ }
+ const MLoadTypedArrayElementHole* other =
+ ins->toLoadTypedArrayElementHole();
+ if (arrayType() != other->arrayType()) {
+ return false;
+ }
+ if (forceDouble() != other->forceDouble()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::UnboxedElement | AliasSet::ObjectFields |
+ AliasSet::ArrayBufferViewLengthOrOffset);
+ }
+ bool canProduceFloat32() const override {
+ return arrayType_ == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MLoadTypedArrayElementHole)
+};
+
+// Base class for MIR ops that write unboxed scalar values.
+class StoreUnboxedScalarBase {
+ Scalar::Type writeType_;
+
+ protected:
+ explicit StoreUnboxedScalarBase(Scalar::Type writeType)
+ : writeType_(writeType) {
+ MOZ_ASSERT(isIntegerWrite() || isFloatWrite() || isBigIntWrite());
+ }
+
+ public:
+ Scalar::Type writeType() const { return writeType_; }
+ bool isByteWrite() const {
+ return writeType_ == Scalar::Int8 || writeType_ == Scalar::Uint8 ||
+ writeType_ == Scalar::Uint8Clamped;
+ }
+ bool isIntegerWrite() const {
+ return isByteWrite() || writeType_ == Scalar::Int16 ||
+ writeType_ == Scalar::Uint16 || writeType_ == Scalar::Int32 ||
+ writeType_ == Scalar::Uint32;
+ }
+ bool isFloatWrite() const {
+ return writeType_ == Scalar::Float32 || writeType_ == Scalar::Float64;
+ }
+ bool isBigIntWrite() const { return Scalar::isBigIntType(writeType_); }
+};
+
+// Store an unboxed scalar value to an array buffer view or other object.
+class MStoreUnboxedScalar : public MTernaryInstruction,
+ public StoreUnboxedScalarBase,
+ public StoreUnboxedScalarPolicy::Data {
+ bool requiresBarrier_;
+
+ MStoreUnboxedScalar(
+ MDefinition* elements, MDefinition* index, MDefinition* value,
+ Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ : MTernaryInstruction(classOpcode, elements, index, value),
+ StoreUnboxedScalarBase(storageType),
+ requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
+ if (requiresBarrier_) {
+ setGuard(); // Not removable or movable
+ }
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreUnboxedScalar)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(2) && writeType() == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MStoreUnboxedScalar)
+};
+
+// Store an unboxed scalar value to a dataview object.
+class MStoreDataViewElement : public MQuaternaryInstruction,
+ public StoreUnboxedScalarBase,
+ public StoreDataViewElementPolicy::Data {
+ MStoreDataViewElement(MDefinition* elements, MDefinition* index,
+ MDefinition* value, MDefinition* littleEndian,
+ Scalar::Type storageType)
+ : MQuaternaryInstruction(classOpcode, elements, index, value,
+ littleEndian),
+ StoreUnboxedScalarBase(storageType) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+ MOZ_ASSERT(Scalar::byteSize(storageType) > 1);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreDataViewElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value), (3, littleEndian))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(2) && writeType() == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MStoreDataViewElement)
+};
+
+class MStoreTypedArrayElementHole : public MQuaternaryInstruction,
+ public StoreUnboxedScalarBase,
+ public StoreTypedArrayHolePolicy::Data {
+ MStoreTypedArrayElementHole(MDefinition* elements, MDefinition* length,
+ MDefinition* index, MDefinition* value,
+ Scalar::Type arrayType)
+ : MQuaternaryInstruction(classOpcode, elements, length, index, value),
+ StoreUnboxedScalarBase(arrayType) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(length->type() == MIRType::IntPtr);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreTypedArrayElementHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
+
+ Scalar::Type arrayType() const { return writeType(); }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+ TruncateKind operandTruncateKind(size_t index) const override;
+
+ bool canConsumeFloat32(MUse* use) const override {
+ return use == getUseFor(3) && arrayType() == Scalar::Float32;
+ }
+
+ ALLOW_CLONE(MStoreTypedArrayElementHole)
+};
+
+// Compute an "effective address", i.e., a compound computation of the form:
+// base + index * scale + displacement
+class MEffectiveAddress : public MBinaryInstruction, public NoTypePolicy::Data {
+ MEffectiveAddress(MDefinition* base, MDefinition* index, Scale scale,
+ int32_t displacement)
+ : MBinaryInstruction(classOpcode, base, index),
+ scale_(scale),
+ displacement_(displacement) {
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ setMovable();
+ setResultType(MIRType::Int32);
+ }
+
+ Scale scale_;
+ int32_t displacement_;
+
+ public:
+ INSTRUCTION_HEADER(EffectiveAddress)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* base() const { return lhs(); }
+ MDefinition* index() const { return rhs(); }
+ Scale scale() const { return scale_; }
+ int32_t displacement() const { return displacement_; }
+
+ ALLOW_CLONE(MEffectiveAddress)
+};
+
+// Clamp input to range [0, 255] for Uint8ClampedArray.
+class MClampToUint8 : public MUnaryInstruction, public ClampPolicy::Data {
+ explicit MClampToUint8(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ClampToUint8)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ void computeRange(TempAllocator& alloc) override;
+
+ ALLOW_CLONE(MClampToUint8)
+};
+
+class MLoadFixedSlot : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ size_t slot_;
+
+ protected:
+ MLoadFixedSlot(MDefinition* obj, size_t slot)
+ : MUnaryInstruction(classOpcode, obj), slot_(slot) {
+ setResultType(MIRType::Value);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadFixedSlot)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ size_t slot() const { return slot_; }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadFixedSlot()) {
+ return false;
+ }
+ if (slot() != ins->toLoadFixedSlot()->slot()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::FixedSlot);
+ }
+
+ AliasType mightAlias(const MDefinition* store) const override;
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MLoadFixedSlot)
+};
+
+class MLoadFixedSlotAndUnbox : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ size_t slot_;
+ MUnbox::Mode mode_;
+
+ MLoadFixedSlotAndUnbox(MDefinition* obj, size_t slot, MUnbox::Mode mode,
+ MIRType type)
+ : MUnaryInstruction(classOpcode, obj), slot_(slot), mode_(mode) {
+ setResultType(type);
+ setMovable();
+ if (mode_ == MUnbox::Fallible) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadFixedSlotAndUnbox)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ size_t slot() const { return slot_; }
+ MUnbox::Mode mode() const { return mode_; }
+ bool fallible() const { return mode_ != MUnbox::Infallible; }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadFixedSlotAndUnbox() ||
+ slot() != ins->toLoadFixedSlotAndUnbox()->slot() ||
+ mode() != ins->toLoadFixedSlotAndUnbox()->mode()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::FixedSlot);
+ }
+
+ AliasType mightAlias(const MDefinition* store) const override;
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MLoadFixedSlotAndUnbox);
+};
+
+class MLoadDynamicSlotAndUnbox : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ size_t slot_;
+ MUnbox::Mode mode_;
+
+ MLoadDynamicSlotAndUnbox(MDefinition* slots, size_t slot, MUnbox::Mode mode,
+ MIRType type)
+ : MUnaryInstruction(classOpcode, slots), slot_(slot), mode_(mode) {
+ setResultType(type);
+ setMovable();
+ if (mode_ == MUnbox::Fallible) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadDynamicSlotAndUnbox)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, slots))
+
+ size_t slot() const { return slot_; }
+ MUnbox::Mode mode() const { return mode_; }
+ bool fallible() const { return mode_ != MUnbox::Infallible; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadDynamicSlotAndUnbox() ||
+ slot() != ins->toLoadDynamicSlotAndUnbox()->slot() ||
+ mode() != ins->toLoadDynamicSlotAndUnbox()->mode()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::DynamicSlot);
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MLoadDynamicSlotAndUnbox);
+};
+
+class MStoreFixedSlot
+ : public MBinaryInstruction,
+ public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1>>::Data {
+ bool needsBarrier_;
+ size_t slot_;
+
+ MStoreFixedSlot(MDefinition* obj, MDefinition* rval, size_t slot,
+ bool barrier)
+ : MBinaryInstruction(classOpcode, obj, rval),
+ needsBarrier_(barrier),
+ slot_(slot) {}
+
+ public:
+ INSTRUCTION_HEADER(StoreFixedSlot)
+ NAMED_OPERANDS((0, object), (1, value))
+
+ static MStoreFixedSlot* NewUnbarriered(TempAllocator& alloc, MDefinition* obj,
+ size_t slot, MDefinition* rval) {
+ return new (alloc) MStoreFixedSlot(obj, rval, slot, false);
+ }
+ static MStoreFixedSlot* NewBarriered(TempAllocator& alloc, MDefinition* obj,
+ size_t slot, MDefinition* rval) {
+ return new (alloc) MStoreFixedSlot(obj, rval, slot, true);
+ }
+
+ size_t slot() const { return slot_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::FixedSlot);
+ }
+ bool needsBarrier() const { return needsBarrier_; }
+ void setNeedsBarrier(bool needsBarrier = true) {
+ needsBarrier_ = needsBarrier;
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MStoreFixedSlot)
+};
+
+class MGetPropertyCache : public MBinaryInstruction,
+ public MixPolicy<BoxExceptPolicy<0, MIRType::Object>,
+ CacheIdPolicy<1>>::Data {
+ MGetPropertyCache(MDefinition* obj, MDefinition* id)
+ : MBinaryInstruction(classOpcode, obj, id) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetPropertyCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, idval))
+};
+
+class MGetPropSuperCache
+ : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>,
+ CacheIdPolicy<2>>::Data {
+ MGetPropSuperCache(MDefinition* obj, MDefinition* receiver, MDefinition* id)
+ : MTernaryInstruction(classOpcode, obj, receiver, id) {
+ setResultType(MIRType::Value);
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetPropSuperCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, receiver), (2, idval))
+};
+
+// Guard the object's proto is |expected|.
+class MGuardProto : public MBinaryInstruction, public SingleObjectPolicy::Data {
+ MGuardProto(MDefinition* obj, MDefinition* expected)
+ : MBinaryInstruction(classOpcode, obj, expected) {
+ MOZ_ASSERT(expected->isConstant() || expected->isNurseryObject());
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, expected))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+ AliasType mightAlias(const MDefinition* def) const override {
+ // These instructions never modify the [[Prototype]].
+ if (def->isAddAndStoreSlot() || def->isAllocateAndStoreSlot()) {
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+ }
+};
+
+// Guard the object has no proto.
+class MGuardNullProto : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ explicit MGuardNullProto(MDefinition* obj)
+ : MUnaryInstruction(classOpcode, obj) {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardNullProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+ AliasType mightAlias(const MDefinition* def) const override {
+ // These instructions never modify the [[Prototype]].
+ if (def->isAddAndStoreSlot() || def->isAllocateAndStoreSlot()) {
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+ }
+};
+
+// Guard on a specific Value.
+class MGuardValue : public MUnaryInstruction, public BoxInputsPolicy::Data {
+ Value expected_;
+
+ MGuardValue(MDefinition* val, const Value& expected)
+ : MUnaryInstruction(classOpcode, val), expected_(expected) {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardValue)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ Value expected() const { return expected_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardValue()) {
+ return false;
+ }
+ if (expected() != ins->toGuardValue()->expected()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Guard on function flags
+class MGuardFunctionFlags : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ // At least one of the expected flags must be set, but not necessarily all
+ // expected flags.
+ uint16_t expectedFlags_;
+
+ // None of the unexpected flags must be set.
+ uint16_t unexpectedFlags_;
+
+ explicit MGuardFunctionFlags(MDefinition* fun, uint16_t expectedFlags,
+ uint16_t unexpectedFlags)
+ : MUnaryInstruction(classOpcode, fun),
+ expectedFlags_(expectedFlags),
+ unexpectedFlags_(unexpectedFlags) {
+ MOZ_ASSERT((expectedFlags & unexpectedFlags) == 0,
+ "Can't guard inconsistent flags");
+ MOZ_ASSERT((expectedFlags | unexpectedFlags) != 0,
+ "Can't guard zero flags");
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardFunctionFlags)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, function))
+
+ uint16_t expectedFlags() const { return expectedFlags_; };
+ uint16_t unexpectedFlags() const { return unexpectedFlags_; };
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardFunctionFlags()) {
+ return false;
+ }
+ if (expectedFlags() != ins->toGuardFunctionFlags()->expectedFlags()) {
+ return false;
+ }
+ if (unexpectedFlags() != ins->toGuardFunctionFlags()->unexpectedFlags()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+};
+
+// Guard on an object's identity, inclusively or exclusively.
+class MGuardObjectIdentity : public MBinaryInstruction,
+ public SingleObjectPolicy::Data {
+ bool bailOnEquality_;
+
+ MGuardObjectIdentity(MDefinition* obj, MDefinition* expected,
+ bool bailOnEquality)
+ : MBinaryInstruction(classOpcode, obj, expected),
+ bailOnEquality_(bailOnEquality) {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardObjectIdentity)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, expected))
+
+ bool bailOnEquality() const { return bailOnEquality_; }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardObjectIdentity()) {
+ return false;
+ }
+ if (bailOnEquality() != ins->toGuardObjectIdentity()->bailOnEquality()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+// Guard on a specific JSFunction. Used instead of MGuardObjectIdentity,
+// so we can store some metadata related to the expected function.
+class MGuardSpecificFunction : public MBinaryInstruction,
+ public SingleObjectPolicy::Data {
+ uint16_t nargs_;
+ FunctionFlags flags_;
+
+ MGuardSpecificFunction(MDefinition* obj, MDefinition* expected,
+ uint16_t nargs, FunctionFlags flags)
+ : MBinaryInstruction(classOpcode, obj, expected),
+ nargs_(nargs),
+ flags_(flags) {
+ MOZ_ASSERT(expected->isConstant() || expected->isNurseryObject());
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardSpecificFunction)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, function), (1, expected))
+
+ uint16_t nargs() const { return nargs_; }
+ FunctionFlags flags() const { return flags_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardSpecificFunction()) {
+ return false;
+ }
+
+ auto* other = ins->toGuardSpecificFunction();
+ if (nargs() != other->nargs() ||
+ flags().toRaw() != other->flags().toRaw()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MGuardSpecificSymbol : public MUnaryInstruction,
+ public SymbolPolicy<0>::Data {
+ CompilerGCPointer<JS::Symbol*> expected_;
+
+ MGuardSpecificSymbol(MDefinition* symbol, JS::Symbol* expected)
+ : MUnaryInstruction(classOpcode, symbol), expected_(expected) {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Symbol);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardSpecificSymbol)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, symbol))
+
+ JS::Symbol* expected() const { return expected_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardSpecificSymbol()) {
+ return false;
+ }
+ if (expected() != ins->toGuardSpecificSymbol()->expected()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MGuardTagNotEqual
+ : public MBinaryInstruction,
+ public MixPolicy<UnboxedInt32Policy<0>, UnboxedInt32Policy<1>>::Data {
+ MGuardTagNotEqual(MDefinition* left, MDefinition* right)
+ : MBinaryInstruction(classOpcode, left, right) {
+ setGuard();
+ setMovable();
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardTagNotEqual)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return binaryCongruentTo(ins);
+ }
+};
+
+// Load from vp[slot] (slots that are not inline in an object).
+class MLoadDynamicSlot : public MUnaryInstruction, public NoTypePolicy::Data {
+ uint32_t slot_;
+
+ MLoadDynamicSlot(MDefinition* slots, uint32_t slot)
+ : MUnaryInstruction(classOpcode, slots), slot_(slot) {
+ setResultType(MIRType::Value);
+ setMovable();
+ MOZ_ASSERT(slots->type() == MIRType::Slots);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadDynamicSlot)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, slots))
+
+ uint32_t slot() const { return slot_; }
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadDynamicSlot()) {
+ return false;
+ }
+ if (slot() != ins->toLoadDynamicSlot()->slot()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ MOZ_ASSERT(slots()->type() == MIRType::Slots);
+ return AliasSet::Load(AliasSet::DynamicSlot);
+ }
+ AliasType mightAlias(const MDefinition* store) const override;
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MLoadDynamicSlot)
+};
+
+class MAddAndStoreSlot
+ : public MBinaryInstruction,
+ public MixPolicy<SingleObjectPolicy, BoxPolicy<1>>::Data {
+ public:
+ enum class Kind {
+ FixedSlot,
+ DynamicSlot,
+ };
+
+ private:
+ Kind kind_;
+ uint32_t slotOffset_;
+ CompilerShape shape_;
+
+ MAddAndStoreSlot(MDefinition* obj, MDefinition* value, Kind kind,
+ uint32_t slotOffset, Shape* shape)
+ : MBinaryInstruction(classOpcode, obj, value),
+ kind_(kind),
+ slotOffset_(slotOffset),
+ shape_(shape) {}
+
+ public:
+ INSTRUCTION_HEADER(AddAndStoreSlot)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ Kind kind() const { return kind_; }
+ uint32_t slotOffset() const { return slotOffset_; }
+ Shape* shape() const { return shape_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::ObjectFields |
+ (kind() == Kind::FixedSlot ? AliasSet::FixedSlot
+ : AliasSet::DynamicSlot));
+ }
+};
+
+// Store to vp[slot] (slots that are not inline in an object).
+class MStoreDynamicSlot : public MBinaryInstruction,
+ public NoFloatPolicy<1>::Data {
+ uint32_t slot_;
+ bool needsBarrier_;
+
+ MStoreDynamicSlot(MDefinition* slots, uint32_t slot, MDefinition* value,
+ bool barrier)
+ : MBinaryInstruction(classOpcode, slots, value),
+ slot_(slot),
+ needsBarrier_(barrier) {
+ MOZ_ASSERT(slots->type() == MIRType::Slots);
+ }
+
+ public:
+ INSTRUCTION_HEADER(StoreDynamicSlot)
+ NAMED_OPERANDS((0, slots), (1, value))
+
+ static MStoreDynamicSlot* NewUnbarriered(TempAllocator& alloc,
+ MDefinition* slots, uint32_t slot,
+ MDefinition* value) {
+ return new (alloc) MStoreDynamicSlot(slots, slot, value, false);
+ }
+ static MStoreDynamicSlot* NewBarriered(TempAllocator& alloc,
+ MDefinition* slots, uint32_t slot,
+ MDefinition* value) {
+ return new (alloc) MStoreDynamicSlot(slots, slot, value, true);
+ }
+
+ uint32_t slot() const { return slot_; }
+ bool needsBarrier() const { return needsBarrier_; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::DynamicSlot);
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ ALLOW_CLONE(MStoreDynamicSlot)
+};
+
+class MSetPropertyCache : public MTernaryInstruction,
+ public MixPolicy<SingleObjectPolicy, CacheIdPolicy<1>,
+ NoFloatPolicy<2>>::Data {
+ bool strict_ : 1;
+
+ MSetPropertyCache(MDefinition* obj, MDefinition* id, MDefinition* value,
+ bool strict)
+ : MTernaryInstruction(classOpcode, obj, id, value), strict_(strict) {}
+
+ public:
+ INSTRUCTION_HEADER(SetPropertyCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, idval), (2, value))
+
+ bool strict() const { return strict_; }
+};
+
+class MMegamorphicSetElement : public MTernaryInstruction,
+ public MegamorphicSetElementPolicy::Data {
+ bool strict_;
+
+ MMegamorphicSetElement(MDefinition* object, MDefinition* index,
+ MDefinition* value, bool strict)
+ : MTernaryInstruction(classOpcode, object, index, value),
+ strict_(strict) {}
+
+ public:
+ INSTRUCTION_HEADER(MegamorphicSetElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, index), (2, value))
+
+ bool strict() const { return strict_; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MSetDOMProperty : public MBinaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>>::Data {
+ const JSJitSetterOp func_;
+ Realm* setterRealm_;
+ DOMObjectKind objectKind_;
+
+ MSetDOMProperty(const JSJitSetterOp func, DOMObjectKind objectKind,
+ Realm* setterRealm, MDefinition* obj, MDefinition* val)
+ : MBinaryInstruction(classOpcode, obj, val),
+ func_(func),
+ setterRealm_(setterRealm),
+ objectKind_(objectKind) {}
+
+ public:
+ INSTRUCTION_HEADER(SetDOMProperty)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ JSJitSetterOp fun() const { return func_; }
+ Realm* setterRealm() const { return setterRealm_; }
+ DOMObjectKind objectKind() const { return objectKind_; }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MGetDOMPropertyBase : public MVariadicInstruction,
+ public ObjectPolicy<0>::Data {
+ const JSJitInfo* info_;
+
+ protected:
+ MGetDOMPropertyBase(Opcode op, const JSJitInfo* jitinfo)
+ : MVariadicInstruction(op), info_(jitinfo) {
+ MOZ_ASSERT(jitinfo);
+ MOZ_ASSERT(jitinfo->type() == JSJitInfo::Getter);
+
+ // We are movable iff the jitinfo says we can be.
+ if (isDomMovable()) {
+ MOZ_ASSERT(jitinfo->aliasSet() != JSJitInfo::AliasEverything);
+ setMovable();
+ } else {
+ // If we're not movable, that means we shouldn't be DCEd either,
+ // because we might throw an exception when called, and getting rid
+ // of that is observable.
+ setGuard();
+ }
+
+ setResultType(MIRType::Value);
+ }
+
+ const JSJitInfo* info() const { return info_; }
+
+ [[nodiscard]] bool init(TempAllocator& alloc, MDefinition* obj,
+ MDefinition* guard, MDefinition* globalGuard) {
+ MOZ_ASSERT(obj);
+ // guard can be null.
+ // globalGuard can be null.
+ size_t operandCount = 1;
+ if (guard) {
+ ++operandCount;
+ }
+ if (globalGuard) {
+ ++operandCount;
+ }
+ if (!MVariadicInstruction::init(alloc, operandCount)) {
+ return false;
+ }
+ initOperand(0, obj);
+
+ size_t operandIndex = 1;
+ // Pin the guard, if we have one as an operand if we want to hoist later.
+ if (guard) {
+ initOperand(operandIndex++, guard);
+ }
+
+ // And the same for the global guard, if we have one.
+ if (globalGuard) {
+ initOperand(operandIndex, globalGuard);
+ }
+
+ return true;
+ }
+
+ public:
+ NAMED_OPERANDS((0, object))
+
+ JSJitGetterOp fun() const { return info_->getter; }
+ bool isInfallible() const { return info_->isInfallible; }
+ bool isDomMovable() const { return info_->isMovable; }
+ JSJitInfo::AliasSet domAliasSet() const { return info_->aliasSet(); }
+ size_t domMemberSlotIndex() const {
+ MOZ_ASSERT(info_->isAlwaysInSlot || info_->isLazilyCachedInSlot);
+ return info_->slotIndex;
+ }
+ bool valueMayBeInSlot() const { return info_->isLazilyCachedInSlot; }
+
+ bool baseCongruentTo(const MGetDOMPropertyBase* ins) const {
+ if (!isDomMovable()) {
+ return false;
+ }
+
+ // Checking the jitinfo is the same as checking the constant function
+ if (!(info() == ins->info())) {
+ return false;
+ }
+
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ JSJitInfo::AliasSet aliasSet = domAliasSet();
+ if (aliasSet == JSJitInfo::AliasNone) {
+ return AliasSet::None();
+ }
+ if (aliasSet == JSJitInfo::AliasDOMSets) {
+ return AliasSet::Load(AliasSet::DOMProperty);
+ }
+ MOZ_ASSERT(aliasSet == JSJitInfo::AliasEverything);
+ return AliasSet::Store(AliasSet::Any);
+ }
+};
+
+class MGetDOMProperty : public MGetDOMPropertyBase {
+ Realm* getterRealm_;
+ DOMObjectKind objectKind_;
+
+ MGetDOMProperty(const JSJitInfo* jitinfo, DOMObjectKind objectKind,
+ Realm* getterRealm)
+ : MGetDOMPropertyBase(classOpcode, jitinfo),
+ getterRealm_(getterRealm),
+ objectKind_(objectKind) {}
+
+ public:
+ INSTRUCTION_HEADER(GetDOMProperty)
+
+ static MGetDOMProperty* New(TempAllocator& alloc, const JSJitInfo* info,
+ DOMObjectKind objectKind, Realm* getterRealm,
+ MDefinition* obj, MDefinition* guard,
+ MDefinition* globalGuard) {
+ auto* res = new (alloc) MGetDOMProperty(info, objectKind, getterRealm);
+ if (!res || !res->init(alloc, obj, guard, globalGuard)) {
+ return nullptr;
+ }
+ return res;
+ }
+
+ Realm* getterRealm() const { return getterRealm_; }
+ DOMObjectKind objectKind() const { return objectKind_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGetDOMProperty()) {
+ return false;
+ }
+
+ if (ins->toGetDOMProperty()->getterRealm() != getterRealm()) {
+ return false;
+ }
+
+ return baseCongruentTo(ins->toGetDOMProperty());
+ }
+
+ bool possiblyCalls() const override { return true; }
+};
+
+class MGetDOMMember : public MGetDOMPropertyBase {
+ explicit MGetDOMMember(const JSJitInfo* jitinfo)
+ : MGetDOMPropertyBase(classOpcode, jitinfo) {
+ setResultType(MIRTypeFromValueType(jitinfo->returnType()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetDOMMember)
+
+ static MGetDOMMember* New(TempAllocator& alloc, const JSJitInfo* info,
+ MDefinition* obj, MDefinition* guard,
+ MDefinition* globalGuard) {
+ auto* res = new (alloc) MGetDOMMember(info);
+ if (!res || !res->init(alloc, obj, guard, globalGuard)) {
+ return nullptr;
+ }
+ return res;
+ }
+
+ bool possiblyCalls() const override { return false; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGetDOMMember()) {
+ return false;
+ }
+
+ return baseCongruentTo(ins->toGetDOMMember());
+ }
+};
+
+class MLoadDOMExpandoValueGuardGeneration : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ JS::ExpandoAndGeneration* expandoAndGeneration_;
+ uint64_t generation_;
+
+ MLoadDOMExpandoValueGuardGeneration(
+ MDefinition* proxy, JS::ExpandoAndGeneration* expandoAndGeneration,
+ uint64_t generation)
+ : MUnaryInstruction(classOpcode, proxy),
+ expandoAndGeneration_(expandoAndGeneration),
+ generation_(generation) {
+ setGuard();
+ setMovable();
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(LoadDOMExpandoValueGuardGeneration)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, proxy))
+
+ JS::ExpandoAndGeneration* expandoAndGeneration() const {
+ return expandoAndGeneration_;
+ }
+ uint64_t generation() const { return generation_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isLoadDOMExpandoValueGuardGeneration()) {
+ return false;
+ }
+ const auto* other = ins->toLoadDOMExpandoValueGuardGeneration();
+ if (expandoAndGeneration() != other->expandoAndGeneration() ||
+ generation() != other->generation()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::DOMProxyExpando);
+ }
+};
+
+// Inlined assembly for Math.floor(double | float32) -> int32.
+class MFloor : public MUnaryInstruction, public FloatingPointPolicy<0>::Data {
+ explicit MFloor(MDefinition* num) : MUnaryInstruction(classOpcode, num) {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Floor)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MFloor)
+};
+
+// Inlined assembly version for Math.ceil(double | float32) -> int32.
+class MCeil : public MUnaryInstruction, public FloatingPointPolicy<0>::Data {
+ explicit MCeil(MDefinition* num) : MUnaryInstruction(classOpcode, num) {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Ceil)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ void computeRange(TempAllocator& alloc) override;
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MCeil)
+};
+
+// Inlined version of Math.round(double | float32) -> int32.
+class MRound : public MUnaryInstruction, public FloatingPointPolicy<0>::Data {
+ explicit MRound(MDefinition* num) : MUnaryInstruction(classOpcode, num) {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Round)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MRound)
+};
+
+// Inlined version of Math.trunc(double | float32) -> int32.
+class MTrunc : public MUnaryInstruction, public FloatingPointPolicy<0>::Data {
+ explicit MTrunc(MDefinition* num) : MUnaryInstruction(classOpcode, num) {
+ setResultType(MIRType::Int32);
+ specialization_ = MIRType::Double;
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Trunc)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MTrunc)
+};
+
+// NearbyInt rounds the floating-point input to the nearest integer, according
+// to the RoundingMode.
+class MNearbyInt : public MUnaryInstruction,
+ public FloatingPointPolicy<0>::Data {
+ RoundingMode roundingMode_;
+
+ explicit MNearbyInt(MDefinition* num, MIRType resultType,
+ RoundingMode roundingMode)
+ : MUnaryInstruction(classOpcode, num), roundingMode_(roundingMode) {
+ MOZ_ASSERT(HasAssemblerSupport(roundingMode));
+
+ MOZ_ASSERT(IsFloatingPointType(resultType));
+ setResultType(resultType);
+ specialization_ = resultType;
+
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(NearbyInt)
+ TRIVIAL_NEW_WRAPPERS
+
+ static bool HasAssemblerSupport(RoundingMode mode) {
+ return Assembler::HasRoundInstruction(mode);
+ }
+
+ RoundingMode roundingMode() const { return roundingMode_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool isFloat32Commutative() const override { return true; }
+ void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toNearbyInt()->roundingMode() == roundingMode_;
+ }
+
+#ifdef JS_JITSPEW
+ void printOpcode(GenericPrinter& out) const override;
+#endif
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+
+ bool canRecoverOnBailout() const override {
+ switch (roundingMode_) {
+ case RoundingMode::Up:
+ case RoundingMode::Down:
+ case RoundingMode::TowardsZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ALLOW_CLONE(MNearbyInt)
+};
+
+class MGetIteratorCache : public MUnaryInstruction,
+ public BoxExceptPolicy<0, MIRType::Object>::Data {
+ explicit MGetIteratorCache(MDefinition* val)
+ : MUnaryInstruction(classOpcode, val) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GetIteratorCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+};
+
+// Implementation for 'in' operator using instruction cache
+class MInCache : public MBinaryInstruction,
+ public MixPolicy<CacheIdPolicy<0>, ObjectPolicy<1>>::Data {
+ MInCache(MDefinition* key, MDefinition* obj)
+ : MBinaryInstruction(classOpcode, key, obj) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, key), (1, object))
+};
+
+// Test whether the index is in the array bounds or a hole.
+class MInArray : public MQuaternaryInstruction, public ObjectPolicy<3>::Data {
+ bool needsNegativeIntCheck_;
+
+ MInArray(MDefinition* elements, MDefinition* index, MDefinition* initLength,
+ MDefinition* object)
+ : MQuaternaryInstruction(classOpcode, elements, index, initLength,
+ object),
+ needsNegativeIntCheck_(true) {
+ setResultType(MIRType::Boolean);
+ setMovable();
+
+ // Set the guard flag to make sure we bail when we see a negative index.
+ // We can clear this flag (and needsNegativeIntCheck_) in
+ // collectRangeInfoPreTrunc.
+ setGuard();
+
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ MOZ_ASSERT(initLength->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, initLength), (3, object))
+
+ bool needsNegativeIntCheck() const { return needsNegativeIntCheck_; }
+ void collectRangeInfoPreTrunc() override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isInArray()) {
+ return false;
+ }
+ const MInArray* other = ins->toInArray();
+ if (needsNegativeIntCheck() != other->needsNegativeIntCheck()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(other);
+ }
+};
+
+// Bail when the element is a hole.
+class MGuardElementNotHole : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MGuardElementNotHole(MDefinition* elements, MDefinition* index)
+ : MBinaryInstruction(classOpcode, elements, index) {
+ setMovable();
+ setGuard();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardElementNotHole)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::Element);
+ }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+class MCheckPrivateFieldCache
+ : public MBinaryInstruction,
+ public MixPolicy<BoxExceptPolicy<0, MIRType::Object>,
+ CacheIdPolicy<1>>::Data {
+ MCheckPrivateFieldCache(MDefinition* obj, MDefinition* id)
+ : MBinaryInstruction(classOpcode, obj, id) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(CheckPrivateFieldCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, idval))
+};
+
+class MHasOwnCache : public MBinaryInstruction,
+ public MixPolicy<BoxExceptPolicy<0, MIRType::Object>,
+ CacheIdPolicy<1>>::Data {
+ MHasOwnCache(MDefinition* obj, MDefinition* id)
+ : MBinaryInstruction(classOpcode, obj, id) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(HasOwnCache)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, idval))
+};
+
+// Implementation for instanceof operator with specific rhs.
+class MInstanceOf : public MBinaryInstruction,
+ public MixPolicy<BoxExceptPolicy<0, MIRType::Object>,
+ ObjectPolicy<1>>::Data {
+ MInstanceOf(MDefinition* obj, MDefinition* proto)
+ : MBinaryInstruction(classOpcode, obj, proto) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(InstanceOf)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Given a value being written to another object, update the generational store
+// buffer if the value is in the nursery and object is in the tenured heap.
+class MPostWriteBarrier : public MBinaryInstruction,
+ public ObjectPolicy<0>::Data {
+ MPostWriteBarrier(MDefinition* obj, MDefinition* value)
+ : MBinaryInstruction(classOpcode, obj, value) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostWriteBarrier)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // During lowering, values that neither have object nor value MIR type
+ // are ignored, thus Float32 can show up at this point without any issue.
+ return use == getUseFor(1);
+ }
+#endif
+
+ ALLOW_CLONE(MPostWriteBarrier)
+};
+
+// Given a value being written to another object's elements at the specified
+// index, update the generational store buffer if the value is in the nursery
+// and object is in the tenured heap.
+class MPostWriteElementBarrier
+ : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<2>>::Data {
+ MPostWriteElementBarrier(MDefinition* obj, MDefinition* value,
+ MDefinition* index)
+ : MTernaryInstruction(classOpcode, obj, value, index) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostWriteElementBarrier)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, value), (2, index))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override {
+ // During lowering, values that neither have object nor value MIR type
+ // are ignored, thus Float32 can show up at this point without any issue.
+ return use == getUseFor(1);
+ }
+#endif
+
+ ALLOW_CLONE(MPostWriteElementBarrier)
+};
+
+class MNewCallObject : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ public:
+ INSTRUCTION_HEADER(NewCallObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ explicit MNewCallObject(MConstant* templateObj)
+ : MUnaryInstruction(classOpcode, templateObj) {
+ setResultType(MIRType::Object);
+ }
+
+ CallObject* templateObject() const {
+ return &getOperand(0)->toConstant()->toObject().as<CallObject>();
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+};
+
+class MNewStringObject : public MUnaryInstruction,
+ public ConvertToStringPolicy<0>::Data {
+ CompilerObject templateObj_;
+
+ MNewStringObject(MDefinition* input, JSObject* templateObj)
+ : MUnaryInstruction(classOpcode, input), templateObj_(templateObj) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ INSTRUCTION_HEADER(NewStringObject)
+ TRIVIAL_NEW_WRAPPERS
+
+ StringObject* templateObj() const;
+};
+
+// This is an alias for MLoadFixedSlot.
+class MEnclosingEnvironment : public MLoadFixedSlot {
+ explicit MEnclosingEnvironment(MDefinition* obj)
+ : MLoadFixedSlot(obj, EnvironmentObject::enclosingEnvironmentSlot()) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ static MEnclosingEnvironment* New(TempAllocator& alloc, MDefinition* obj) {
+ return new (alloc) MEnclosingEnvironment(obj);
+ }
+
+ AliasSet getAliasSet() const override {
+ // EnvironmentObject reserved slots are immutable.
+ return AliasSet::None();
+ }
+};
+
+// This is an element of a spaghetti stack which is used to represent the memory
+// context which has to be restored in case of a bailout.
+struct MStoreToRecover : public TempObject,
+ public InlineSpaghettiStackNode<MStoreToRecover> {
+ MDefinition* operand;
+
+ explicit MStoreToRecover(MDefinition* operand) : operand(operand) {}
+};
+
+using MStoresToRecoverList = InlineSpaghettiStack<MStoreToRecover>;
+
+// A resume point contains the information needed to reconstruct the Baseline
+// Interpreter state from a position in Warp JIT code. A resume point is a
+// mapping of stack slots to MDefinitions.
+//
+// We capture stack state at critical points:
+// * (1) At the beginning of every basic block.
+// * (2) After every effectful operation.
+//
+// As long as these two properties are maintained, instructions can be moved,
+// hoisted, or, eliminated without problems, and ops without side effects do not
+// need to worry about capturing state at precisely the right point in time.
+//
+// Effectful instructions, of course, need to capture state after completion,
+// where the interpreter will not attempt to repeat the operation. For this,
+// ResumeAfter must be used. The state is attached directly to the effectful
+// instruction to ensure that no intermediate instructions could be injected
+// in between by a future analysis pass.
+//
+// During LIR construction, if an instruction can bail back to the interpreter,
+// we create an LSnapshot, which uses the last known resume point to request
+// register/stack assignments for every live value.
+class MResumePoint final : public MNode
+#ifdef DEBUG
+ ,
+ public InlineForwardListNode<MResumePoint>
+#endif
+{
+ private:
+ friend class MBasicBlock;
+ friend void AssertBasicGraphCoherency(MIRGraph& graph, bool force);
+
+ // List of stack slots needed to reconstruct the BaselineFrame.
+ FixedList<MUse> operands_;
+
+ // List of stores needed to reconstruct the content of objects which are
+ // emulated by EmulateStateOf variants.
+ MStoresToRecoverList stores_;
+
+ jsbytecode* pc_;
+ MInstruction* instruction_;
+ ResumeMode mode_;
+ bool isDiscarded_ = false;
+
+ MResumePoint(MBasicBlock* block, jsbytecode* pc, ResumeMode mode);
+ void inherit(MBasicBlock* state);
+
+ // Calling isDefinition or isResumePoint on MResumePoint is unnecessary.
+ bool isDefinition() const = delete;
+ bool isResumePoint() const = delete;
+
+ void setBlock(MBasicBlock* block) {
+ setBlockAndKind(block, Kind::ResumePoint);
+ }
+
+ protected:
+ // Initializes operands_ to an empty array of a fixed length.
+ // The array may then be filled in by inherit().
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ void clearOperand(size_t index) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUncheckedWithoutProducer(this);
+ }
+
+ MUse* getUseFor(size_t index) override { return &operands_[index]; }
+ const MUse* getUseFor(size_t index) const override {
+ return &operands_[index];
+ }
+
+ public:
+ static MResumePoint* New(TempAllocator& alloc, MBasicBlock* block,
+ jsbytecode* pc, ResumeMode mode);
+
+ MBasicBlock* block() const { return resumePointBlock(); }
+
+ size_t numAllocatedOperands() const { return operands_.length(); }
+ uint32_t stackDepth() const { return numAllocatedOperands(); }
+ size_t numOperands() const override { return numAllocatedOperands(); }
+ size_t indexOf(const MUse* u) const final {
+ MOZ_ASSERT(u >= &operands_[0]);
+ MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
+ return u - &operands_[0];
+ }
+ void initOperand(size_t index, MDefinition* operand) {
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ operands_[index].initUnchecked(operand, this);
+ }
+ void replaceOperand(size_t index, MDefinition* operand) final {
+ operands_[index].replaceProducer(operand);
+ }
+
+ bool isObservableOperand(MUse* u) const;
+ bool isObservableOperand(size_t index) const;
+ bool isRecoverableOperand(MUse* u) const;
+
+ MDefinition* getOperand(size_t index) const override {
+ return operands_[index].producer();
+ }
+ jsbytecode* pc() const { return pc_; }
+ MResumePoint* caller() const;
+ uint32_t frameCount() const {
+ uint32_t count = 1;
+ for (MResumePoint* it = caller(); it; it = it->caller()) {
+ count++;
+ }
+ return count;
+ }
+ MInstruction* instruction() { return instruction_; }
+ void setInstruction(MInstruction* ins) {
+ MOZ_ASSERT(!instruction_);
+ instruction_ = ins;
+ }
+ void resetInstruction() {
+ MOZ_ASSERT(instruction_);
+ instruction_ = nullptr;
+ }
+ ResumeMode mode() const { return mode_; }
+
+ void releaseUses() {
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (operands_[i].hasProducer()) {
+ operands_[i].releaseProducer();
+ }
+ }
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+
+ // Register a store instruction on the current resume point. This
+ // instruction would be recovered when we are bailing out. The |cache|
+ // argument can be any resume point, it is used to share memory if we are
+ // doing the same modification.
+ void addStore(TempAllocator& alloc, MDefinition* store,
+ const MResumePoint* cache = nullptr);
+
+ MStoresToRecoverList::iterator storesBegin() const { return stores_.begin(); }
+ MStoresToRecoverList::iterator storesEnd() const { return stores_.end(); }
+
+ void setDiscarded() { isDiscarded_ = true; }
+ bool isDiscarded() const { return isDiscarded_; }
+
+#ifdef JS_JITSPEW
+ virtual void dump(GenericPrinter& out) const override;
+ virtual void dump() const override;
+#endif
+};
+
+class MIsCallable : public MUnaryInstruction,
+ public BoxExceptPolicy<0, MIRType::Object>::Data {
+ explicit MIsCallable(MDefinition* object)
+ : MUnaryInstruction(classOpcode, object) {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsCallable)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MHasClass : public MUnaryInstruction, public SingleObjectPolicy::Data {
+ const JSClass* class_;
+
+ MHasClass(MDefinition* object, const JSClass* clasp)
+ : MUnaryInstruction(classOpcode, object), class_(clasp) {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(HasClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const JSClass* getClass() const { return class_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isHasClass()) {
+ return false;
+ }
+ if (getClass() != ins->toHasClass()->getClass()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+class MGuardToClass : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ const JSClass* class_;
+
+ MGuardToClass(MDefinition* object, const JSClass* clasp)
+ : MUnaryInstruction(classOpcode, object), class_(clasp) {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ MOZ_ASSERT(!clasp->isJSFunction(), "Use MGuardToFunction instead");
+ setResultType(MIRType::Object);
+ setMovable();
+
+ // We will bail out if the class type is incorrect, so we need to ensure we
+ // don't eliminate this instruction
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardToClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const JSClass* getClass() const { return class_; }
+ bool isArgumentsObjectClass() const {
+ return class_ == &MappedArgumentsObject::class_ ||
+ class_ == &UnmappedArgumentsObject::class_;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardToClass()) {
+ return false;
+ }
+ if (getClass() != ins->toGuardToClass()->getClass()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+class MGuardToFunction : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ explicit MGuardToFunction(MDefinition* object)
+ : MUnaryInstruction(classOpcode, object) {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ setResultType(MIRType::Object);
+ setMovable();
+
+ // We will bail out if the class type is incorrect, so we need to ensure we
+ // don't eliminate this instruction
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardToFunction)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardToFunction()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
+// Note: we might call a proxy trap, so this instruction is effectful.
+class MIsArray : public MUnaryInstruction,
+ public BoxExceptPolicy<0, MIRType::Object>::Data {
+ explicit MIsArray(MDefinition* value)
+ : MUnaryInstruction(classOpcode, value) {
+ setResultType(MIRType::Boolean);
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+class MIsTypedArray : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ bool possiblyWrapped_;
+
+ explicit MIsTypedArray(MDefinition* value, bool possiblyWrapped)
+ : MUnaryInstruction(classOpcode, value),
+ possiblyWrapped_(possiblyWrapped) {
+ setResultType(MIRType::Boolean);
+
+ if (possiblyWrapped) {
+ // Proxy checks may throw, so we're neither removable nor movable.
+ setGuard();
+ } else {
+ setMovable();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(IsTypedArray)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value))
+
+ bool isPossiblyWrapped() const { return possiblyWrapped_; }
+ AliasSet getAliasSet() const override {
+ if (isPossiblyWrapped()) {
+ return AliasSet::Store(AliasSet::Any);
+ }
+ return AliasSet::None();
+ }
+};
+
+// Allocate the generator object for a frame.
+class MGenerator : public MTernaryInstruction,
+ public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>>::Data {
+ explicit MGenerator(MDefinition* callee, MDefinition* environmentChain,
+ MDefinition* argsObject)
+ : MTernaryInstruction(classOpcode, callee, environmentChain, argsObject) {
+ setResultType(MIRType::Object);
+ };
+
+ public:
+ INSTRUCTION_HEADER(Generator)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, callee), (1, environmentChain), (2, argsObject))
+};
+
+class MMaybeExtractAwaitValue : public MBinaryInstruction,
+ public BoxPolicy<0>::Data {
+ explicit MMaybeExtractAwaitValue(MDefinition* value, MDefinition* canSkip)
+ : MBinaryInstruction(classOpcode, value, canSkip) {
+ setResultType(MIRType::Value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(MaybeExtractAwaitValue)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, canSkip))
+};
+
+class MAtomicIsLockFree : public MUnaryInstruction,
+ public ConvertToInt32Policy<0>::Data {
+ explicit MAtomicIsLockFree(MDefinition* value)
+ : MUnaryInstruction(classOpcode, value) {
+ setResultType(MIRType::Boolean);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicIsLockFree)
+ TRIVIAL_NEW_WRAPPERS
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ [[nodiscard]] bool writeRecoverData(
+ CompactBufferWriter& writer) const override;
+ bool canRecoverOnBailout() const override { return true; }
+
+ ALLOW_CLONE(MAtomicIsLockFree)
+};
+
+class MCompareExchangeTypedArrayElement
+ : public MQuaternaryInstruction,
+ public MixPolicy<TruncateToInt32OrToBigIntPolicy<2>,
+ TruncateToInt32OrToBigIntPolicy<3>>::Data {
+ Scalar::Type arrayType_;
+
+ explicit MCompareExchangeTypedArrayElement(MDefinition* elements,
+ MDefinition* index,
+ Scalar::Type arrayType,
+ MDefinition* oldval,
+ MDefinition* newval)
+ : MQuaternaryInstruction(classOpcode, elements, index, oldval, newval),
+ arrayType_(arrayType) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(CompareExchangeTypedArrayElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, oldval), (3, newval))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 || arrayType_ == Scalar::Uint8);
+ }
+ Scalar::Type arrayType() const { return arrayType_; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MAtomicExchangeTypedArrayElement
+ : public MTernaryInstruction,
+ public TruncateToInt32OrToBigIntPolicy<2>::Data {
+ Scalar::Type arrayType_;
+
+ MAtomicExchangeTypedArrayElement(MDefinition* elements, MDefinition* index,
+ MDefinition* value, Scalar::Type arrayType)
+ : MTernaryInstruction(classOpcode, elements, index, value),
+ arrayType_(arrayType) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(arrayType <= Scalar::Uint32 || Scalar::isBigIntType(arrayType));
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicExchangeTypedArrayElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 || arrayType_ == Scalar::Uint8);
+ }
+ Scalar::Type arrayType() const { return arrayType_; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MAtomicTypedArrayElementBinop
+ : public MTernaryInstruction,
+ public TruncateToInt32OrToBigIntPolicy<2>::Data {
+ private:
+ AtomicOp op_;
+ Scalar::Type arrayType_;
+ bool forEffect_;
+
+ explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition* elements,
+ MDefinition* index,
+ Scalar::Type arrayType,
+ MDefinition* value, bool forEffect)
+ : MTernaryInstruction(classOpcode, elements, index, value),
+ op_(op),
+ arrayType_(arrayType),
+ forEffect_(forEffect) {
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
+ MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(arrayType <= Scalar::Uint32 || Scalar::isBigIntType(arrayType));
+ setGuard(); // Not removable
+ }
+
+ public:
+ INSTRUCTION_HEADER(AtomicTypedArrayElementBinop)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+ bool isByteArray() const {
+ return (arrayType_ == Scalar::Int8 || arrayType_ == Scalar::Uint8);
+ }
+ AtomicOp operation() const { return op_; }
+ Scalar::Type arrayType() const { return arrayType_; }
+ bool isForEffect() const { return forEffect_; }
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::UnboxedElement);
+ }
+};
+
+class MDebugger : public MNullaryInstruction {
+ MDebugger() : MNullaryInstruction(classOpcode) {
+ setBailoutKind(BailoutKind::Debugger);
+ }
+
+ public:
+ INSTRUCTION_HEADER(Debugger)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Used to load the prototype of an object known to have
+// a static prototype.
+class MObjectStaticProto : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ explicit MObjectStaticProto(MDefinition* object)
+ : MUnaryInstruction(classOpcode, object) {
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(ObjectStaticProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::ObjectFields);
+ }
+ AliasType mightAlias(const MDefinition* def) const override {
+ // These instructions never modify the [[Prototype]].
+ if (def->isAddAndStoreSlot() || def->isAllocateAndStoreSlot() ||
+ def->isStoreElementHole() || def->isArrayPush()) {
+ return AliasType::NoAlias;
+ }
+ return AliasType::MayAlias;
+ }
+};
+
+class MConstantProto : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ // NOTE: we're not going to actually use the underlying receiver object for
+ // anything. This is just here for giving extra information to MGuardShape
+ // to MGuardShape::mightAlias. Accordingly, we don't take it as an operand,
+ // but instead just keep a pointer to it. This means we need to ensure it's
+ // not discarded before we try to access it. If this is discarded, we
+ // basically just become an MConstant for the object's proto, which is fine.
+ const MDefinition* receiverObject_;
+
+ explicit MConstantProto(MDefinition* protoObject,
+ const MDefinition* receiverObject)
+ : MUnaryInstruction(classOpcode, protoObject),
+ receiverObject_(receiverObject) {
+ MOZ_ASSERT(protoObject->isConstant());
+ setResultType(MIRType::Object);
+ setMovable();
+ }
+
+ ALLOW_CLONE(MConstantProto)
+
+ public:
+ INSTRUCTION_HEADER(ConstantProto)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, protoObject))
+
+ HashNumber valueHash() const override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ if (this == ins) {
+ return true;
+ }
+ const MDefinition* receiverObject = getReceiverObject();
+ return congruentIfOperandsEqual(ins) && receiverObject &&
+ receiverObject == ins->toConstantProto()->getReceiverObject();
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ const MDefinition* getReceiverObject() const {
+ if (receiverObject_->isDiscarded()) {
+ return nullptr;
+ }
+ return receiverObject_;
+ }
+};
+
+class MObjectToIterator : public MUnaryInstruction,
+ public ObjectPolicy<0>::Data {
+ NativeIteratorListHead* enumeratorsAddr_;
+ bool wantsIndices_ = false;
+
+ explicit MObjectToIterator(MDefinition* object,
+ NativeIteratorListHead* enumeratorsAddr)
+ : MUnaryInstruction(classOpcode, object),
+ enumeratorsAddr_(enumeratorsAddr) {
+ setResultType(MIRType::Object);
+ }
+
+ public:
+ NativeIteratorListHead* enumeratorsAddr() const { return enumeratorsAddr_; }
+ INSTRUCTION_HEADER(ObjectToIterator)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ bool wantsIndices() const { return wantsIndices_; }
+ void setWantsIndices(bool value) { wantsIndices_ = value; }
+};
+
+// Flips the input's sign bit, independently of the rest of the number's
+// payload. Note this is different from multiplying by minus-one, which has
+// side-effects for e.g. NaNs.
+class MWasmNeg : public MUnaryInstruction, public NoTypePolicy::Data {
+ MWasmNeg(MDefinition* op, MIRType type) : MUnaryInstruction(classOpcode, op) {
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmNeg)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+// Machine-level bitwise AND/OR/XOR, avoiding all JS-level complexity embodied
+// in MBinaryBitwiseInstruction.
+class MWasmBinaryBitwise : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ public:
+ enum class SubOpcode { And, Or, Xor };
+
+ protected:
+ MWasmBinaryBitwise(MDefinition* left, MDefinition* right, MIRType type,
+ SubOpcode subOpcode)
+ : MBinaryInstruction(classOpcode, left, right), subOpcode_(subOpcode) {
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
+ setResultType(type);
+ setMovable();
+ setCommutative();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBinaryBitwise)
+ TRIVIAL_NEW_WRAPPERS
+
+ SubOpcode subOpcode() const { return subOpcode_; }
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->isWasmBinaryBitwise() &&
+ ins->toWasmBinaryBitwise()->subOpcode() == subOpcode() &&
+ binaryCongruentTo(ins);
+ }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ const char* what = "!!unknown!!";
+ switch (subOpcode()) {
+ case SubOpcode::And:
+ what = "And";
+ break;
+ case SubOpcode::Or:
+ what = "Or";
+ break;
+ case SubOpcode::Xor:
+ what = "Xor";
+ break;
+ }
+ extras->add(what);
+ }
+#endif
+
+ private:
+ SubOpcode subOpcode_;
+
+ ALLOW_CLONE(MWasmBinaryBitwise)
+};
+
+class MWasmLoadInstance : public MUnaryInstruction, public NoTypePolicy::Data {
+ uint32_t offset_;
+ AliasSet aliases_;
+
+ explicit MWasmLoadInstance(MDefinition* instance, uint32_t offset,
+ MIRType type, AliasSet aliases)
+ : MUnaryInstruction(classOpcode, instance),
+ offset_(offset),
+ aliases_(aliases) {
+ // Different instance data have different alias classes and only those
+ // classes are allowed.
+ MOZ_ASSERT(
+ aliases_.flags() == AliasSet::Load(AliasSet::WasmHeapMeta).flags() ||
+ aliases_.flags() == AliasSet::Load(AliasSet::WasmTableMeta).flags() ||
+ aliases_.flags() ==
+ AliasSet::Load(AliasSet::WasmPendingException).flags() ||
+ aliases_.flags() == AliasSet::None().flags());
+
+ // The only types supported at the moment.
+ MOZ_ASSERT(type == MIRType::Pointer || type == MIRType::Int32 ||
+ type == MIRType::Int64 || type == MIRType::RefOrNull);
+
+ setMovable();
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadInstance)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance))
+
+ uint32_t offset() const { return offset_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return op() == ins->op() &&
+ offset() == ins->toWasmLoadInstance()->offset() &&
+ type() == ins->type();
+ }
+
+ HashNumber valueHash() const override {
+ return addU32ToHash(HashNumber(op()), offset());
+ }
+
+ AliasSet getAliasSet() const override { return aliases_; }
+};
+
+class MWasmStoreInstance : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ uint32_t offset_;
+ AliasSet aliases_;
+
+ explicit MWasmStoreInstance(MDefinition* instance, MDefinition* value,
+ uint32_t offset, MIRType type, AliasSet aliases)
+ : MBinaryInstruction(classOpcode, instance, value),
+ offset_(offset),
+ aliases_(aliases) {
+ // Different instance data have different alias classes and only those
+ // classes are allowed.
+ MOZ_ASSERT(aliases_.flags() ==
+ AliasSet::Store(AliasSet::WasmPendingException).flags());
+
+ // The only types supported at the moment.
+ MOZ_ASSERT(type == MIRType::Pointer || type == MIRType::Int32 ||
+ type == MIRType::Int64 || type == MIRType::RefOrNull);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreInstance)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance), (1, value))
+
+ uint32_t offset() const { return offset_; }
+
+ AliasSet getAliasSet() const override { return aliases_; }
+};
+
+class MWasmHeapBase : public MUnaryInstruction, public NoTypePolicy::Data {
+ AliasSet aliases_;
+
+ explicit MWasmHeapBase(MDefinition* instance, AliasSet aliases)
+ : MUnaryInstruction(classOpcode, instance), aliases_(aliases) {
+ setMovable();
+ setResultType(MIRType::Pointer);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmHeapBase)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance))
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->isWasmHeapBase();
+ }
+
+ AliasSet getAliasSet() const override { return aliases_; }
+};
+
+// For memory32, bounds check nodes are of type Int32 on 32-bit systems for both
+// wasm and asm.js code, as well as on 64-bit systems for asm.js code and for
+// wasm code that is known to have a bounds check limit that fits into 32 bits.
+// They are of type Int64 only on 64-bit systems for wasm code with 4GB heaps.
+// There is no way for nodes of both types to be present in the same function.
+// Should this change, then BCE must be updated to take type into account.
+//
+// For memory64, bounds check nodes are always of type Int64.
+
+class MWasmBoundsCheck : public MBinaryInstruction, public NoTypePolicy::Data {
+ public:
+ enum Target {
+ // Linear memory at index zero, which is the only memory allowed so far.
+ Memory0,
+ // Everything else. Currently comprises tables, and arrays in the GC
+ // proposal.
+ Unknown
+ };
+
+ private:
+ wasm::BytecodeOffset bytecodeOffset_;
+ Target target_;
+
+ explicit MWasmBoundsCheck(MDefinition* index, MDefinition* boundsCheckLimit,
+ wasm::BytecodeOffset bytecodeOffset, Target target)
+ : MBinaryInstruction(classOpcode, index, boundsCheckLimit),
+ bytecodeOffset_(bytecodeOffset),
+ target_(target) {
+ MOZ_ASSERT(index->type() == boundsCheckLimit->type());
+
+ // Bounds check is effectful: it throws for OOB.
+ setGuard();
+
+ if (JitOptions.spectreIndexMasking) {
+ setResultType(index->type());
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBoundsCheck)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index), (1, boundsCheckLimit))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool isMemory() const { return target_ == MWasmBoundsCheck::Memory0; }
+
+ bool isRedundant() const { return !isGuard(); }
+
+ void setRedundant() { setNotGuard(); }
+
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+class MWasmAddOffset : public MUnaryInstruction, public NoTypePolicy::Data {
+ uint64_t offset_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ MWasmAddOffset(MDefinition* base, uint64_t offset,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MUnaryInstruction(classOpcode, base),
+ offset_(offset),
+ bytecodeOffset_(bytecodeOffset) {
+ setGuard();
+ MOZ_ASSERT(base->type() == MIRType::Int32 ||
+ base->type() == MIRType::Int64);
+ setResultType(base->type());
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmAddOffset)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base))
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ uint64_t offset() const { return offset_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+class MWasmAlignmentCheck : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ uint32_t byteSize_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmAlignmentCheck(MDefinition* index, uint32_t byteSize,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MUnaryInstruction(classOpcode, index),
+ byteSize_(byteSize),
+ bytecodeOffset_(bytecodeOffset) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(byteSize));
+ // Alignment check is effectful: it throws for unaligned.
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmAlignmentCheck)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, index))
+
+ bool congruentTo(const MDefinition* ins) const override;
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ uint32_t byteSize() const { return byteSize_; }
+
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+class MWasmLoad
+ : public MVariadicInstruction, // memoryBase is nullptr on some platforms
+ public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+
+ explicit MWasmLoad(const wasm::MemoryAccessDesc& access, MIRType resultType)
+ : MVariadicInstruction(classOpcode), access_(access) {
+ setGuard();
+ setResultType(resultType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoad)
+ NAMED_OPERANDS((0, base), (1, memoryBase));
+
+ static MWasmLoad* New(TempAllocator& alloc, MDefinition* memoryBase,
+ MDefinition* base, const wasm::MemoryAccessDesc& access,
+ MIRType resultType) {
+ MWasmLoad* load = new (alloc) MWasmLoad(access, resultType);
+ if (!load->init(alloc, 1 + !!memoryBase)) {
+ return nullptr;
+ }
+
+ load->initOperand(0, base);
+ if (memoryBase) {
+ load->initOperand(1, memoryBase);
+ }
+
+ return load;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+
+ AliasSet getAliasSet() const override {
+ // When a barrier is needed, make the instruction effectful by giving
+ // it a "store" effect.
+ if (access_.isAtomic()) {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+ return AliasSet::Load(AliasSet::WasmHeap);
+ }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "(offs=%lld)", (long long int)access().offset64());
+ extras->add(buf);
+ }
+#endif
+};
+
+class MWasmStore : public MVariadicInstruction, public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+
+ explicit MWasmStore(const wasm::MemoryAccessDesc& access)
+ : MVariadicInstruction(classOpcode), access_(access) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStore)
+ NAMED_OPERANDS((0, base), (1, value), (2, memoryBase))
+
+ static MWasmStore* New(TempAllocator& alloc, MDefinition* memoryBase,
+ MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ MDefinition* value) {
+ MWasmStore* store = new (alloc) MWasmStore(access);
+ if (!store->init(alloc, 2 + !!memoryBase)) {
+ return nullptr;
+ }
+
+ store->initOperand(0, base);
+ store->initOperand(1, value);
+ if (memoryBase) {
+ store->initOperand(2, memoryBase);
+ }
+
+ return store;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "(offs=%lld)", (long long int)access().offset64());
+ extras->add(buf);
+ }
+#endif
+};
+
+class MAsmJSMemoryAccess {
+ Scalar::Type accessType_;
+ bool needsBoundsCheck_;
+
+ public:
+ explicit MAsmJSMemoryAccess(Scalar::Type accessType)
+ : accessType_(accessType), needsBoundsCheck_(true) {
+ MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
+ }
+
+ Scalar::Type accessType() const { return accessType_; }
+ unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+
+ wasm::MemoryAccessDesc access() const {
+ return wasm::MemoryAccessDesc(accessType_, Scalar::byteSize(accessType_), 0,
+ wasm::BytecodeOffset());
+ }
+
+ void removeBoundsCheck() { needsBoundsCheck_ = false; }
+};
+
+class MAsmJSLoadHeap
+ : public MVariadicInstruction, // 1 plus optional memoryBase and
+ // boundsCheckLimit
+ public MAsmJSMemoryAccess,
+ public NoTypePolicy::Data {
+ uint32_t memoryBaseIndex_;
+
+ explicit MAsmJSLoadHeap(uint32_t memoryBaseIndex, Scalar::Type accessType)
+ : MVariadicInstruction(classOpcode),
+ MAsmJSMemoryAccess(accessType),
+ memoryBaseIndex_(memoryBaseIndex) {
+ setResultType(ScalarTypeToMIRType(accessType));
+ }
+
+ public:
+ INSTRUCTION_HEADER(AsmJSLoadHeap)
+ NAMED_OPERANDS((0, base), (1, boundsCheckLimit))
+
+ static MAsmJSLoadHeap* New(TempAllocator& alloc, MDefinition* memoryBase,
+ MDefinition* base, MDefinition* boundsCheckLimit,
+ Scalar::Type accessType) {
+ uint32_t nextIndex = 2;
+ uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
+
+ MAsmJSLoadHeap* load =
+ new (alloc) MAsmJSLoadHeap(memoryBaseIndex, accessType);
+ if (!load->init(alloc, nextIndex)) {
+ return nullptr;
+ }
+
+ load->initOperand(0, base);
+ load->initOperand(1, boundsCheckLimit);
+ if (memoryBase) {
+ load->initOperand(memoryBaseIndex, memoryBase);
+ }
+
+ return load;
+ }
+
+ bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+ MDefinition* memoryBase() const {
+ MOZ_ASSERT(hasMemoryBase());
+ return getOperand(memoryBaseIndex_);
+ }
+
+ bool congruentTo(const MDefinition* ins) const override;
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::WasmHeap);
+ }
+ AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MAsmJSStoreHeap
+ : public MVariadicInstruction, // 2 plus optional memoryBase and
+ // boundsCheckLimit
+ public MAsmJSMemoryAccess,
+ public NoTypePolicy::Data {
+ uint32_t memoryBaseIndex_;
+
+ explicit MAsmJSStoreHeap(uint32_t memoryBaseIndex, Scalar::Type accessType)
+ : MVariadicInstruction(classOpcode),
+ MAsmJSMemoryAccess(accessType),
+ memoryBaseIndex_(memoryBaseIndex) {}
+
+ public:
+ INSTRUCTION_HEADER(AsmJSStoreHeap)
+ NAMED_OPERANDS((0, base), (1, value), (2, boundsCheckLimit))
+
+ static MAsmJSStoreHeap* New(TempAllocator& alloc, MDefinition* memoryBase,
+ MDefinition* base, MDefinition* boundsCheckLimit,
+ Scalar::Type accessType, MDefinition* v) {
+ uint32_t nextIndex = 3;
+ uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
+
+ MAsmJSStoreHeap* store =
+ new (alloc) MAsmJSStoreHeap(memoryBaseIndex, accessType);
+ if (!store->init(alloc, nextIndex)) {
+ return nullptr;
+ }
+
+ store->initOperand(0, base);
+ store->initOperand(1, v);
+ store->initOperand(2, boundsCheckLimit);
+ if (memoryBase) {
+ store->initOperand(memoryBaseIndex, memoryBase);
+ }
+
+ return store;
+ }
+
+ bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+ MDefinition* memoryBase() const {
+ MOZ_ASSERT(hasMemoryBase());
+ return getOperand(memoryBaseIndex_);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmCompareExchangeHeap : public MVariadicInstruction,
+ public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmCompareExchangeHeap(const wasm::MemoryAccessDesc& access,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MVariadicInstruction(classOpcode),
+ access_(access),
+ bytecodeOffset_(bytecodeOffset) {
+ setGuard(); // Not removable
+ setResultType(ScalarTypeToMIRType(access.type()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmCompareExchangeHeap)
+ NAMED_OPERANDS((0, base), (1, oldValue), (2, newValue), (3, instance),
+ (4, memoryBase))
+
+ static MWasmCompareExchangeHeap* New(TempAllocator& alloc,
+ wasm::BytecodeOffset bytecodeOffset,
+ MDefinition* memoryBase,
+ MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ MDefinition* oldv, MDefinition* newv,
+ MDefinition* instance) {
+ MWasmCompareExchangeHeap* cas =
+ new (alloc) MWasmCompareExchangeHeap(access, bytecodeOffset);
+ if (!cas->init(alloc, 4 + !!memoryBase)) {
+ return nullptr;
+ }
+ cas->initOperand(0, base);
+ cas->initOperand(1, oldv);
+ cas->initOperand(2, newv);
+ cas->initOperand(3, instance);
+ if (memoryBase) {
+ cas->initOperand(4, memoryBase);
+ }
+ return cas;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmAtomicExchangeHeap : public MVariadicInstruction,
+ public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmAtomicExchangeHeap(const wasm::MemoryAccessDesc& access,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MVariadicInstruction(classOpcode),
+ access_(access),
+ bytecodeOffset_(bytecodeOffset) {
+ setGuard(); // Not removable
+ setResultType(ScalarTypeToMIRType(access.type()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmAtomicExchangeHeap)
+ NAMED_OPERANDS((0, base), (1, value), (2, instance), (3, memoryBase))
+
+ static MWasmAtomicExchangeHeap* New(TempAllocator& alloc,
+ wasm::BytecodeOffset bytecodeOffset,
+ MDefinition* memoryBase,
+ MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ MDefinition* value,
+ MDefinition* instance) {
+ MWasmAtomicExchangeHeap* xchg =
+ new (alloc) MWasmAtomicExchangeHeap(access, bytecodeOffset);
+ if (!xchg->init(alloc, 3 + !!memoryBase)) {
+ return nullptr;
+ }
+
+ xchg->initOperand(0, base);
+ xchg->initOperand(1, value);
+ xchg->initOperand(2, instance);
+ if (memoryBase) {
+ xchg->initOperand(3, memoryBase);
+ }
+
+ return xchg;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmAtomicBinopHeap : public MVariadicInstruction,
+ public NoTypePolicy::Data {
+ AtomicOp op_;
+ wasm::MemoryAccessDesc access_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ explicit MWasmAtomicBinopHeap(AtomicOp op,
+ const wasm::MemoryAccessDesc& access,
+ wasm::BytecodeOffset bytecodeOffset)
+ : MVariadicInstruction(classOpcode),
+ op_(op),
+ access_(access),
+ bytecodeOffset_(bytecodeOffset) {
+ setGuard(); // Not removable
+ setResultType(ScalarTypeToMIRType(access.type()));
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmAtomicBinopHeap)
+ NAMED_OPERANDS((0, base), (1, value), (2, instance), (3, memoryBase))
+
+ static MWasmAtomicBinopHeap* New(TempAllocator& alloc,
+ wasm::BytecodeOffset bytecodeOffset,
+ AtomicOp op, MDefinition* memoryBase,
+ MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ MDefinition* v, MDefinition* instance) {
+ MWasmAtomicBinopHeap* binop =
+ new (alloc) MWasmAtomicBinopHeap(op, access, bytecodeOffset);
+ if (!binop->init(alloc, 3 + !!memoryBase)) {
+ return nullptr;
+ }
+
+ binop->initOperand(0, base);
+ binop->initOperand(1, v);
+ binop->initOperand(2, instance);
+ if (memoryBase) {
+ binop->initOperand(3, memoryBase);
+ }
+
+ return binop;
+ }
+
+ AtomicOp operation() const { return op_; }
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmLoadInstanceDataField : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmLoadInstanceDataField(MIRType type, unsigned instanceDataOffset,
+ bool isConstant, MDefinition* instance)
+ : MUnaryInstruction(classOpcode, instance),
+ instanceDataOffset_(instanceDataOffset),
+ isConstant_(isConstant) {
+ MOZ_ASSERT(IsNumberType(type) || type == MIRType::Simd128 ||
+ type == MIRType::Pointer || type == MIRType::RefOrNull);
+ setResultType(type);
+ setMovable();
+ }
+
+ unsigned instanceDataOffset_;
+ bool isConstant_;
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadInstanceDataField)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance))
+
+ unsigned instanceDataOffset() const { return instanceDataOffset_; }
+
+ HashNumber valueHash() const override;
+ bool congruentTo(const MDefinition* ins) const override;
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ AliasSet getAliasSet() const override {
+ return isConstant_ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmInstanceData);
+ }
+
+ AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MWasmLoadGlobalCell : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmLoadGlobalCell(MIRType type, MDefinition* cellPtr)
+ : MUnaryInstruction(classOpcode, cellPtr) {
+ setResultType(type);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadGlobalCell)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, cellPtr))
+
+ // The default valueHash is good enough, because there are no non-operand
+ // fields.
+ bool congruentTo(const MDefinition* ins) const override;
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::WasmGlobalCell);
+ }
+
+ AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MWasmLoadTableElement : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmLoadTableElement(MDefinition* elements, MDefinition* index)
+ : MBinaryInstruction(classOpcode, elements, index) {
+ setResultType(MIRType::RefOrNull);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadTableElement)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, elements))
+ NAMED_OPERANDS((1, index))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::WasmTableElement);
+ }
+};
+
+class MWasmStoreInstanceDataField : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmStoreInstanceDataField(unsigned instanceDataOffset, MDefinition* value,
+ MDefinition* instance)
+ : MBinaryInstruction(classOpcode, value, instance),
+ instanceDataOffset_(instanceDataOffset) {}
+
+ unsigned instanceDataOffset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreInstanceDataField)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, instance))
+
+ unsigned instanceDataOffset() const { return instanceDataOffset_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmInstanceData);
+ }
+};
+
+class MWasmStoreGlobalCell : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmStoreGlobalCell(MDefinition* value, MDefinition* cellPtr)
+ : MBinaryInstruction(classOpcode, value, cellPtr) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreGlobalCell)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, value), (1, cellPtr))
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmGlobalCell);
+ }
+};
+
+class MWasmStoreStackResult : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmStoreStackResult(MDefinition* stackResultArea, uint32_t offset,
+ MDefinition* value)
+ : MBinaryInstruction(classOpcode, stackResultArea, value),
+ offset_(offset) {}
+
+ uint32_t offset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreStackResult)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, stackResultArea), (1, value))
+
+ uint32_t offset() const { return offset_; }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmStackResult);
+ }
+};
+
+// Represents a known-good derived pointer into an object or memory region (in
+// the most general sense) that will not move while the derived pointer is live.
+// The `offset` *must* be a valid offset into the object represented by `base`;
+// hence overflow in the address calculation will never be an issue. `offset`
+// must be representable as a 31-bit unsigned integer.
+//
+// DO NOT use this with a base value of any JS-heap-resident object type.
+// Such a value would need to be adjusted during GC, yet we have no mechanism
+// to do that. See bug 1810090.
+
+class MWasmDerivedPointer : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmDerivedPointer(MDefinition* base, size_t offset)
+ : MUnaryInstruction(classOpcode, base), offset_(uint32_t(offset)) {
+ MOZ_ASSERT(offset <= INT32_MAX);
+ // Do not change this to allow `base` to be a GC-heap allocated type.
+ MOZ_ASSERT(base->type() == MIRType::Pointer ||
+ base->type() == TargetWordMIRType());
+ setResultType(MIRType::Pointer);
+ setMovable();
+ }
+
+ uint32_t offset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmDerivedPointer)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base))
+
+ uint32_t offset() const { return offset_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmDerivedPointer()->offset() == offset();
+ }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "(offs=%lld)", (long long int)offset_);
+ extras->add(buf);
+ }
+#endif
+
+ ALLOW_CLONE(MWasmDerivedPointer)
+};
+
+// As with MWasmDerivedPointer, DO NOT use this with a base value of any
+// JS-heap-resident object type.
+class MWasmDerivedIndexPointer : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ MWasmDerivedIndexPointer(MDefinition* base, MDefinition* index, Scale scale)
+ : MBinaryInstruction(classOpcode, base, index), scale_(scale) {
+ // Do not change this to allow `base` to be a GC-heap allocated type.
+ MOZ_ASSERT(base->type() == MIRType::Pointer);
+ setResultType(MIRType::Pointer);
+ setMovable();
+ }
+
+ Scale scale_;
+
+ public:
+ INSTRUCTION_HEADER(WasmDerivedIndexPointer)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, base))
+ NAMED_OPERANDS((1, index))
+
+ Scale scale() const { return scale_; }
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmDerivedIndexPointer()->scale() == scale();
+ }
+
+ ALLOW_CLONE(MWasmDerivedIndexPointer)
+};
+
+// Whether to perform a pre-write barrier for a wasm store reference.
+enum class WasmPreBarrierKind : uint8_t { None, Normal };
+
+// Stores a reference to an address. This performs a pre-barrier on the address,
+// but not a post-barrier. A post-barrier must be performed separately, if it's
+// required. The accessed location is `valueBase + valueOffset`. The latter
+// must be be representable as a 31-bit unsigned integer.
+
+class MWasmStoreRef : public MAryInstruction<3>, public NoTypePolicy::Data {
+ uint32_t offset_;
+ AliasSet::Flag aliasSet_;
+ WasmPreBarrierKind preBarrierKind_;
+
+ MWasmStoreRef(MDefinition* instance, MDefinition* valueBase,
+ size_t valueOffset, MDefinition* value, AliasSet::Flag aliasSet,
+ WasmPreBarrierKind preBarrierKind)
+ : MAryInstruction<3>(classOpcode),
+ offset_(uint32_t(valueOffset)),
+ aliasSet_(aliasSet),
+ preBarrierKind_(preBarrierKind) {
+ MOZ_ASSERT(valueOffset <= INT32_MAX);
+ MOZ_ASSERT(valueBase->type() == MIRType::Pointer ||
+ valueBase->type() == MIRType::StackResults);
+ MOZ_ASSERT(value->type() == MIRType::RefOrNull);
+ initOperand(0, instance);
+ initOperand(1, valueBase);
+ initOperand(2, value);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreRef)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance), (1, valueBase), (2, value))
+
+ uint32_t offset() const { return offset_; }
+ AliasSet getAliasSet() const override { return AliasSet::Store(aliasSet_); }
+ WasmPreBarrierKind preBarrierKind() const { return preBarrierKind_; }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "(offs=%lld)", (long long int)offset_);
+ extras->add(buf);
+ }
+#endif
+};
+
+// Given a value being written to another object, update the generational store
+// buffer if the value is in the nursery and object is in the tenured heap.
+class MWasmPostWriteBarrier : public MQuaternaryInstruction,
+ public NoTypePolicy::Data {
+ uint32_t valueOffset_;
+
+ MWasmPostWriteBarrier(MDefinition* instance, MDefinition* object,
+ MDefinition* valueBase, uint32_t valueOffset,
+ MDefinition* value)
+ : MQuaternaryInstruction(classOpcode, instance, object, valueBase, value),
+ valueOffset_(valueOffset) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmPostWriteBarrier)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance), (1, object), (2, valueBase), (3, value))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ uint32_t valueOffset() const { return valueOffset_; }
+
+ ALLOW_CLONE(MWasmPostWriteBarrier)
+};
+
+class MWasmParameter : public MNullaryInstruction {
+ ABIArg abi_;
+
+ MWasmParameter(ABIArg abi, MIRType mirType)
+ : MNullaryInstruction(classOpcode), abi_(abi) {
+ setResultType(mirType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmParameter)
+ TRIVIAL_NEW_WRAPPERS
+
+ ABIArg abi() const { return abi_; }
+};
+
+class MWasmReturn : public MAryControlInstruction<2, 0>,
+ public NoTypePolicy::Data {
+ MWasmReturn(MDefinition* ins, MDefinition* instance)
+ : MAryControlInstruction(classOpcode) {
+ initOperand(0, ins);
+ initOperand(1, instance);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReturn)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MWasmReturnVoid : public MAryControlInstruction<1, 0>,
+ public NoTypePolicy::Data {
+ explicit MWasmReturnVoid(MDefinition* instance)
+ : MAryControlInstruction(classOpcode) {
+ initOperand(0, instance);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReturnVoid)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MWasmStackArg : public MUnaryInstruction, public NoTypePolicy::Data {
+ MWasmStackArg(uint32_t spOffset, MDefinition* ins)
+ : MUnaryInstruction(classOpcode, ins), spOffset_(spOffset) {}
+
+ uint32_t spOffset_;
+
+ public:
+ INSTRUCTION_HEADER(WasmStackArg)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, arg))
+
+ uint32_t spOffset() const { return spOffset_; }
+ void incrementOffset(uint32_t inc) { spOffset_ += inc; }
+};
+
+template <typename Location>
+class MWasmResultBase : public MNullaryInstruction {
+ Location loc_;
+
+ protected:
+ MWasmResultBase(Opcode op, MIRType type, Location loc)
+ : MNullaryInstruction(op), loc_(loc) {
+ setResultType(type);
+ setCallResultCapture();
+ }
+
+ public:
+ Location loc() { return loc_; }
+};
+
+class MWasmRegisterResult : public MWasmResultBase<Register> {
+ MWasmRegisterResult(MIRType type, Register reg)
+ : MWasmResultBase(classOpcode, type, reg) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmRegisterResult)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmFloatRegisterResult : public MWasmResultBase<FloatRegister> {
+ MWasmFloatRegisterResult(MIRType type, FloatRegister reg)
+ : MWasmResultBase(classOpcode, type, reg) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmFloatRegisterResult)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmRegister64Result : public MWasmResultBase<Register64> {
+ explicit MWasmRegister64Result(Register64 reg)
+ : MWasmResultBase(classOpcode, MIRType::Int64, reg) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmRegister64Result)
+ TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmStackResultArea : public MNullaryInstruction {
+ public:
+ class StackResult {
+ // Offset in bytes from lowest address of stack result area.
+ uint32_t offset_;
+ MIRType type_;
+
+ public:
+ StackResult() : type_(MIRType::Undefined) {}
+ StackResult(uint32_t offset, MIRType type) : offset_(offset), type_(type) {}
+
+ bool initialized() const { return type_ != MIRType::Undefined; }
+ uint32_t offset() const {
+ MOZ_ASSERT(initialized());
+ return offset_;
+ }
+ MIRType type() const {
+ MOZ_ASSERT(initialized());
+ return type_;
+ }
+ uint32_t endOffset() const {
+ return offset() + wasm::MIRTypeToABIResultSize(type());
+ }
+ };
+
+ private:
+ FixedList<StackResult> results_;
+ uint32_t base_;
+
+ explicit MWasmStackResultArea()
+ : MNullaryInstruction(classOpcode), base_(UINT32_MAX) {
+ setResultType(MIRType::StackResults);
+ }
+
+ void assertInitialized() const {
+ MOZ_ASSERT(results_.length() != 0);
+#ifdef DEBUG
+ for (size_t i = 0; i < results_.length(); i++) {
+ MOZ_ASSERT(results_[i].initialized());
+ }
+#endif
+ }
+
+ bool baseInitialized() const { return base_ != UINT32_MAX; }
+
+ public:
+ INSTRUCTION_HEADER(WasmStackResultArea)
+ TRIVIAL_NEW_WRAPPERS
+
+ [[nodiscard]] bool init(TempAllocator& alloc, size_t stackResultCount) {
+ MOZ_ASSERT(results_.length() == 0);
+ MOZ_ASSERT(stackResultCount > 0);
+ if (!results_.init(alloc, stackResultCount)) {
+ return false;
+ }
+ for (size_t n = 0; n < stackResultCount; n++) {
+ results_[n] = StackResult();
+ }
+ return true;
+ }
+
+ size_t resultCount() const { return results_.length(); }
+ const StackResult& result(size_t n) const {
+ MOZ_ASSERT(results_[n].initialized());
+ return results_[n];
+ }
+ void initResult(size_t n, const StackResult& loc) {
+ MOZ_ASSERT(!results_[n].initialized());
+ MOZ_ASSERT((n == 0) == (loc.offset() == 0));
+ MOZ_ASSERT_IF(n > 0, loc.offset() >= result(n - 1).endOffset());
+ results_[n] = loc;
+ }
+
+ uint32_t byteSize() const {
+ assertInitialized();
+ return result(resultCount() - 1).endOffset();
+ }
+
+ // Stack index indicating base of stack area.
+ uint32_t base() const {
+ MOZ_ASSERT(baseInitialized());
+ return base_;
+ }
+ void setBase(uint32_t base) {
+ MOZ_ASSERT(!baseInitialized());
+ base_ = base;
+ MOZ_ASSERT(baseInitialized());
+ }
+};
+
+class MWasmStackResult : public MUnaryInstruction, public NoTypePolicy::Data {
+ uint32_t resultIdx_;
+
+ MWasmStackResult(MWasmStackResultArea* resultArea, size_t idx)
+ : MUnaryInstruction(classOpcode, resultArea), resultIdx_(idx) {
+ setResultType(result().type());
+ setCallResultCapture();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStackResult)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, resultArea))
+
+ const MWasmStackResultArea::StackResult& result() const {
+ return resultArea()->toWasmStackResultArea()->result(resultIdx_);
+ }
+};
+
+// Arguments for constructing a catchable wasm call inside of a try block.
+struct MWasmCallTryDesc {
+ bool inTry;
+ uint32_t relativeTryDepth;
+ size_t tryNoteIndex;
+ MBasicBlock* fallthroughBlock;
+ MBasicBlock* prePadBlock;
+
+ MWasmCallTryDesc()
+ : inTry(false),
+ relativeTryDepth(0),
+ tryNoteIndex(0),
+ fallthroughBlock(nullptr),
+ prePadBlock(nullptr) {}
+};
+
+// Mixin class for wasm calls that may or may not be catchable.
+class MWasmCallBase {
+ public:
+ struct Arg {
+ AnyRegister reg;
+ MDefinition* def;
+ Arg(AnyRegister reg, MDefinition* def) : reg(reg), def(def) {}
+ };
+ typedef Vector<Arg, 8, SystemAllocPolicy> Args;
+
+ protected:
+ wasm::CallSiteDesc desc_;
+ wasm::CalleeDesc callee_;
+ wasm::FailureMode builtinMethodFailureMode_;
+ FixedList<AnyRegister> argRegs_;
+ uint32_t stackArgAreaSizeUnaligned_;
+ ABIArg instanceArg_;
+ bool inTry_;
+ size_t tryNoteIndex_;
+
+ MWasmCallBase(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
+ uint32_t stackArgAreaSizeUnaligned, bool inTry,
+ size_t tryNoteIndex)
+ : desc_(desc),
+ callee_(callee),
+ builtinMethodFailureMode_(wasm::FailureMode::Infallible),
+ stackArgAreaSizeUnaligned_(stackArgAreaSizeUnaligned),
+ inTry_(inTry),
+ tryNoteIndex_(tryNoteIndex) {}
+
+ template <class MVariadicT>
+ [[nodiscard]] bool initWithArgs(TempAllocator& alloc, MVariadicT* ins,
+ const Args& args,
+ MDefinition* tableIndexOrRef) {
+ if (!argRegs_.init(alloc, args.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < argRegs_.length(); i++) {
+ argRegs_[i] = args[i].reg;
+ }
+
+ if (!ins->init(alloc, argRegs_.length() + (tableIndexOrRef ? 1 : 0))) {
+ return false;
+ }
+ // FixedList doesn't initialize its elements, so do an unchecked init.
+ for (size_t i = 0; i < argRegs_.length(); i++) {
+ ins->initOperand(i, args[i].def);
+ }
+ if (tableIndexOrRef) {
+ ins->initOperand(argRegs_.length(), tableIndexOrRef);
+ }
+ return true;
+ }
+
+ public:
+ static bool IsWasmCall(MDefinition* def) {
+ return def->isWasmCallCatchable() || def->isWasmCallUncatchable();
+ }
+
+ size_t numArgs() const { return argRegs_.length(); }
+ AnyRegister registerForArg(size_t index) const {
+ MOZ_ASSERT(index < numArgs());
+ return argRegs_[index];
+ }
+ const wasm::CallSiteDesc& desc() const { return desc_; }
+ const wasm::CalleeDesc& callee() const { return callee_; }
+ wasm::FailureMode builtinMethodFailureMode() const {
+ MOZ_ASSERT(callee_.which() == wasm::CalleeDesc::BuiltinInstanceMethod);
+ return builtinMethodFailureMode_;
+ }
+ uint32_t stackArgAreaSizeUnaligned() const {
+ return stackArgAreaSizeUnaligned_;
+ }
+
+ const ABIArg& instanceArg() const { return instanceArg_; }
+
+ bool inTry() const { return inTry_; }
+ size_t tryNoteIndex() const { return tryNoteIndex_; }
+};
+
+// A wasm call that is catchable. This instruction is a control instruction,
+// and terminates the block it is on. A normal return will proceed in a the
+// fallthrough block. An exceptional return will unwind into the landing pad
+// block for this call. The landing pad block must begin with an
+// MWasmCallLandingPrePad.
+class MWasmCallCatchable final : public MVariadicControlInstruction<2>,
+ public MWasmCallBase,
+ public NoTypePolicy::Data {
+ MWasmCallCatchable(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ uint32_t stackArgAreaSizeUnaligned, size_t tryNoteIndex)
+ : MVariadicControlInstruction(classOpcode),
+ MWasmCallBase(desc, callee, stackArgAreaSizeUnaligned, true,
+ tryNoteIndex) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmCallCatchable)
+
+ static MWasmCallCatchable* New(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ const Args& args,
+ uint32_t stackArgAreaSizeUnaligned,
+ const MWasmCallTryDesc& tryDesc,
+ MDefinition* tableIndexOrRef = nullptr);
+
+ bool possiblyCalls() const override { return true; }
+
+ static const size_t FallthroughBranchIndex = 0;
+ static const size_t PrePadBranchIndex = 1;
+};
+
+// A wasm call that is not catchable. This instruction is not a control
+// instruction, and therefore is not a block terminator.
+class MWasmCallUncatchable final : public MVariadicInstruction,
+ public MWasmCallBase,
+ public NoTypePolicy::Data {
+ MWasmCallUncatchable(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ uint32_t stackArgAreaSizeUnaligned)
+ : MVariadicInstruction(classOpcode),
+ MWasmCallBase(desc, callee, stackArgAreaSizeUnaligned, false, 0) {}
+
+ public:
+ INSTRUCTION_HEADER(WasmCallUncatchable)
+
+ static MWasmCallUncatchable* New(TempAllocator& alloc,
+ const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ const Args& args,
+ uint32_t stackArgAreaSizeUnaligned,
+ MDefinition* tableIndexOrRef = nullptr);
+
+ static MWasmCallUncatchable* NewBuiltinInstanceMethodCall(
+ TempAllocator& alloc, const wasm::CallSiteDesc& desc,
+ const wasm::SymbolicAddress builtin, wasm::FailureMode failureMode,
+ const ABIArg& instanceArg, const Args& args,
+ uint32_t stackArgAreaSizeUnaligned);
+
+ bool possiblyCalls() const override { return true; }
+};
+
+// A marker instruction for a block which is the landing pad for a catchable
+// wasm call. This instruction does not emit any code, only filling in
+// metadata. This instruction must be the first instruction added to the
+// landing pad block.
+class MWasmCallLandingPrePad : public MNullaryInstruction {
+ // The block of the call that may unwind to this landing pad.
+ MBasicBlock* callBlock_;
+ // The index of the try note to initialize a landing pad for.
+ size_t tryNoteIndex_;
+
+ explicit MWasmCallLandingPrePad(MBasicBlock* callBlock, size_t tryNoteIndex)
+ : MNullaryInstruction(classOpcode),
+ callBlock_(callBlock),
+ tryNoteIndex_(tryNoteIndex) {
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmCallLandingPrePad)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ size_t tryNoteIndex() { return tryNoteIndex_; }
+ MBasicBlock* callBlock() { return callBlock_; }
+};
+
+class MWasmSelect : public MTernaryInstruction, public NoTypePolicy::Data {
+ MWasmSelect(MDefinition* trueExpr, MDefinition* falseExpr,
+ MDefinition* condExpr)
+ : MTernaryInstruction(classOpcode, trueExpr, falseExpr, condExpr) {
+ MOZ_ASSERT(condExpr->type() == MIRType::Int32);
+ MOZ_ASSERT(trueExpr->type() == falseExpr->type());
+ setResultType(trueExpr->type());
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmSelect)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, trueExpr), (1, falseExpr), (2, condExpr))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MWasmSelect)
+};
+
+class MWasmReinterpret : public MUnaryInstruction, public NoTypePolicy::Data {
+ MWasmReinterpret(MDefinition* val, MIRType toType)
+ : MUnaryInstruction(classOpcode, val) {
+ switch (val->type()) {
+ case MIRType::Int32:
+ MOZ_ASSERT(toType == MIRType::Float32);
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(toType == MIRType::Int32);
+ break;
+ case MIRType::Double:
+ MOZ_ASSERT(toType == MIRType::Int64);
+ break;
+ case MIRType::Int64:
+ MOZ_ASSERT(toType == MIRType::Double);
+ break;
+ default:
+ MOZ_CRASH("unexpected reinterpret conversion");
+ }
+ setMovable();
+ setResultType(toType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReinterpret)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+
+ ALLOW_CLONE(MWasmReinterpret)
+};
+
+class MRotate : public MBinaryInstruction, public NoTypePolicy::Data {
+ bool isLeftRotate_;
+
+ MRotate(MDefinition* input, MDefinition* count, MIRType type,
+ bool isLeftRotate)
+ : MBinaryInstruction(classOpcode, input, count),
+ isLeftRotate_(isLeftRotate) {
+ setMovable();
+ setResultType(type);
+ // Prevent reordering. Although there's no problem eliding call result
+ // definitions, there's also no need, as they cause no codegen.
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(Rotate)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, input), (1, count))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ ins->toRotate()->isLeftRotate() == isLeftRotate_;
+ }
+
+ bool isLeftRotate() const { return isLeftRotate_; }
+
+ ALLOW_CLONE(MRotate)
+};
+
+// Wasm SIMD.
+//
+// See comment in WasmIonCompile.cpp for a justification for these nodes.
+
+// (v128, v128, v128) -> v128 effect-free operation.
+class MWasmTernarySimd128 : public MTernaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::SimdOp simdOp_;
+
+ MWasmTernarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
+ wasm::SimdOp simdOp)
+ : MTernaryInstruction(classOpcode, v0, v1, v2), simdOp_(simdOp) {
+ setMovable();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmTernarySimd128)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, v0), (1, v1), (2, v2))
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ // If the control mask of a bitselect allows the operation to be specialized
+ // as a shuffle and it is profitable to specialize it on this platform, return
+ // true and the appropriate shuffle mask.
+ bool specializeBitselectConstantMaskAsShuffle(int8_t shuffle[16]);
+ // Checks if more relaxed version of lane select can be used. It returns true
+ // if a bit mask input expected to be all 0s or 1s for entire 8-bit lanes,
+ // false otherwise.
+ bool canRelaxBitselect();
+#endif
+
+ wasm::SimdOp simdOp() const { return simdOp_; }
+
+ ALLOW_CLONE(MWasmTernarySimd128)
+};
+
+// (v128, v128) -> v128 effect-free operations.
+class MWasmBinarySimd128 : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::SimdOp simdOp_;
+
+ MWasmBinarySimd128(MDefinition* lhs, MDefinition* rhs, bool commutative,
+ wasm::SimdOp simdOp)
+ : MBinaryInstruction(classOpcode, lhs, rhs), simdOp_(simdOp) {
+ setMovable();
+ setResultType(MIRType::Simd128);
+ if (commutative) {
+ setCommutative();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBinarySimd128)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->toWasmBinarySimd128()->simdOp() == simdOp_ &&
+ congruentIfOperandsEqual(ins);
+ }
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+
+ // Checks if pmaddubsw operation is supported.
+ bool canPmaddubsw();
+#endif
+
+ wasm::SimdOp simdOp() const { return simdOp_; }
+
+ // Platform-dependent specialization.
+ bool specializeForConstantRhs();
+
+ ALLOW_CLONE(MWasmBinarySimd128)
+};
+
+// (v128, const) -> v128 effect-free operations.
+class MWasmBinarySimd128WithConstant : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ SimdConstant rhs_;
+ wasm::SimdOp simdOp_;
+
+ MWasmBinarySimd128WithConstant(MDefinition* lhs, const SimdConstant& rhs,
+ wasm::SimdOp simdOp)
+ : MUnaryInstruction(classOpcode, lhs), rhs_(rhs), simdOp_(simdOp) {
+ setMovable();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmBinarySimd128WithConstant)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->toWasmBinarySimd128WithConstant()->simdOp() == simdOp_ &&
+ congruentIfOperandsEqual(ins) &&
+ rhs_.bitwiseEqual(ins->toWasmBinarySimd128WithConstant()->rhs());
+ }
+
+ wasm::SimdOp simdOp() const { return simdOp_; }
+ MDefinition* lhs() const { return input(); }
+ const SimdConstant& rhs() const { return rhs_; }
+
+ ALLOW_CLONE(MWasmBinarySimd128WithConstant)
+};
+
+// (v128, scalar, imm) -> v128 effect-free operations.
+class MWasmReplaceLaneSimd128 : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ uint32_t laneIndex_;
+ wasm::SimdOp simdOp_;
+
+ MWasmReplaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
+ uint32_t laneIndex, wasm::SimdOp simdOp)
+ : MBinaryInstruction(classOpcode, lhs, rhs),
+ laneIndex_(laneIndex),
+ simdOp_(simdOp) {
+ setMovable();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReplaceLaneSimd128)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->toWasmReplaceLaneSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReplaceLaneSimd128()->laneIndex() == laneIndex_ &&
+ congruentIfOperandsEqual(ins);
+ }
+
+ uint32_t laneIndex() const { return laneIndex_; }
+ wasm::SimdOp simdOp() const { return simdOp_; }
+
+ ALLOW_CLONE(MWasmReplaceLaneSimd128)
+};
+
+// (scalar) -> v128 effect-free operations.
+class MWasmScalarToSimd128 : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::SimdOp simdOp_;
+
+ MWasmScalarToSimd128(MDefinition* src, wasm::SimdOp simdOp)
+ : MUnaryInstruction(classOpcode, src), simdOp_(simdOp) {
+ setMovable();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmScalarToSimd128)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->toWasmScalarToSimd128()->simdOp() == simdOp_ &&
+ congruentIfOperandsEqual(ins);
+ }
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+#endif
+
+ wasm::SimdOp simdOp() const { return simdOp_; }
+
+ ALLOW_CLONE(MWasmScalarToSimd128)
+};
+
+// (v128, imm) -> scalar effect-free operations.
+class MWasmReduceSimd128 : public MUnaryInstruction, public NoTypePolicy::Data {
+ wasm::SimdOp simdOp_;
+ uint32_t imm_;
+
+ MWasmReduceSimd128(MDefinition* src, wasm::SimdOp simdOp, MIRType outType,
+ uint32_t imm)
+ : MUnaryInstruction(classOpcode, src), simdOp_(simdOp), imm_(imm) {
+ setMovable();
+ setResultType(outType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmReduceSimd128)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ return ins->toWasmReduceSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReduceSimd128()->imm() == imm_ &&
+ congruentIfOperandsEqual(ins);
+ }
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+#endif
+
+ uint32_t imm() const { return imm_; }
+ wasm::SimdOp simdOp() const { return simdOp_; }
+
+ ALLOW_CLONE(MWasmReduceSimd128)
+};
+
+class MWasmLoadLaneSimd128
+ : public MVariadicInstruction, // memoryBase is nullptr on some platforms
+ public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+ uint32_t laneSize_;
+ uint32_t laneIndex_;
+ uint32_t memoryBaseIndex_;
+
+ MWasmLoadLaneSimd128(const wasm::MemoryAccessDesc& access, uint32_t laneSize,
+ uint32_t laneIndex, uint32_t memoryBaseIndex)
+ : MVariadicInstruction(classOpcode),
+ access_(access),
+ laneSize_(laneSize),
+ laneIndex_(laneIndex),
+ memoryBaseIndex_(memoryBaseIndex) {
+ MOZ_ASSERT(!access_.isAtomic());
+ setGuard();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadLaneSimd128)
+ NAMED_OPERANDS((0, base), (1, value));
+
+ static MWasmLoadLaneSimd128* New(TempAllocator& alloc,
+ MDefinition* memoryBase, MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ uint32_t laneSize, uint32_t laneIndex,
+ MDefinition* value) {
+ uint32_t nextIndex = 2;
+ uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
+
+ MWasmLoadLaneSimd128* load = new (alloc)
+ MWasmLoadLaneSimd128(access, laneSize, laneIndex, memoryBaseIndex);
+ if (!load->init(alloc, nextIndex)) {
+ return nullptr;
+ }
+
+ load->initOperand(0, base);
+ load->initOperand(1, value);
+ if (memoryBase) {
+ load->initOperand(memoryBaseIndex, memoryBase);
+ }
+
+ return load;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+ uint32_t laneSize() const { return laneSize_; }
+ uint32_t laneIndex() const { return laneIndex_; }
+ bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+ MDefinition* memoryBase() const {
+ MOZ_ASSERT(hasMemoryBase());
+ return getOperand(memoryBaseIndex_);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Load(AliasSet::WasmHeap);
+ }
+};
+
+class MWasmStoreLaneSimd128 : public MVariadicInstruction,
+ public NoTypePolicy::Data {
+ wasm::MemoryAccessDesc access_;
+ uint32_t laneSize_;
+ uint32_t laneIndex_;
+ uint32_t memoryBaseIndex_;
+
+ explicit MWasmStoreLaneSimd128(const wasm::MemoryAccessDesc& access,
+ uint32_t laneSize, uint32_t laneIndex,
+ uint32_t memoryBaseIndex)
+ : MVariadicInstruction(classOpcode),
+ access_(access),
+ laneSize_(laneSize),
+ laneIndex_(laneIndex),
+ memoryBaseIndex_(memoryBaseIndex) {
+ MOZ_ASSERT(!access_.isAtomic());
+ setGuard();
+ setResultType(MIRType::Simd128);
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreLaneSimd128)
+ NAMED_OPERANDS((0, base), (1, value))
+
+ static MWasmStoreLaneSimd128* New(TempAllocator& alloc,
+ MDefinition* memoryBase, MDefinition* base,
+ const wasm::MemoryAccessDesc& access,
+ uint32_t laneSize, uint32_t laneIndex,
+ MDefinition* value) {
+ uint32_t nextIndex = 2;
+ uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
+
+ MWasmStoreLaneSimd128* store = new (alloc)
+ MWasmStoreLaneSimd128(access, laneSize, laneIndex, memoryBaseIndex);
+ if (!store->init(alloc, nextIndex)) {
+ return nullptr;
+ }
+
+ store->initOperand(0, base);
+ store->initOperand(1, value);
+ if (memoryBase) {
+ store->initOperand(memoryBaseIndex, memoryBase);
+ }
+
+ return store;
+ }
+
+ const wasm::MemoryAccessDesc& access() const { return access_; }
+ uint32_t laneSize() const { return laneSize_; }
+ uint32_t laneIndex() const { return laneIndex_; }
+ bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+ MDefinition* memoryBase() const {
+ MOZ_ASSERT(hasMemoryBase());
+ return getOperand(memoryBaseIndex_);
+ }
+
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::WasmHeap);
+ }
+};
+
+// End Wasm SIMD
+
+// Used by MIR building to represent the bytecode result of an operation for
+// which an MBail was generated, to balance the basic block's MDefinition stack.
+class MUnreachableResult : public MNullaryInstruction {
+ explicit MUnreachableResult(MIRType type) : MNullaryInstruction(classOpcode) {
+ MOZ_ASSERT(type != MIRType::None);
+ setResultType(type);
+ }
+
+ public:
+ INSTRUCTION_HEADER(UnreachableResult)
+ TRIVIAL_NEW_WRAPPERS
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins);
+ }
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
+class MIonToWasmCall final : public MVariadicInstruction,
+ public NoTypePolicy::Data {
+ CompilerGCPointer<WasmInstanceObject*> instanceObj_;
+ const wasm::FuncExport& funcExport_;
+
+ MIonToWasmCall(WasmInstanceObject* instanceObj, MIRType resultType,
+ const wasm::FuncExport& funcExport)
+ : MVariadicInstruction(classOpcode),
+ instanceObj_(instanceObj),
+ funcExport_(funcExport) {
+ setResultType(resultType);
+ }
+
+ public:
+ INSTRUCTION_HEADER(IonToWasmCall);
+
+ static MIonToWasmCall* New(TempAllocator& alloc,
+ WasmInstanceObject* instanceObj,
+ const wasm::FuncExport& funcExport);
+
+ void initArg(size_t i, MDefinition* arg) { initOperand(i, arg); }
+
+ WasmInstanceObject* instanceObject() const { return instanceObj_; }
+ wasm::Instance* instance() const { return &instanceObj_->instance(); }
+ const wasm::FuncExport& funcExport() const { return funcExport_; }
+ bool possiblyCalls() const override { return true; }
+#ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override;
+#endif
+};
+
+// For accesses to wasm object fields, we need to be able to describe 8- and
+// 16-bit accesses. But MIRType can't represent those. Hence these two
+// supplemental enums, used for reading and writing fields respectively.
+
+// Indicates how to widen an 8- or 16-bit value (when it is read from memory).
+enum class MWideningOp : uint8_t { None, FromU16, FromS16, FromU8, FromS8 };
+
+#ifdef JS_JITSPEW
+static inline const char* StringFromMWideningOp(MWideningOp op) {
+ switch (op) {
+ case MWideningOp::None:
+ return "None";
+ case MWideningOp::FromU16:
+ return "FromU16";
+ case MWideningOp::FromS16:
+ return "FromS16";
+ case MWideningOp::FromU8:
+ return "FromU8";
+ case MWideningOp::FromS8:
+ return "FromS8";
+ default:
+ break;
+ }
+ MOZ_CRASH("Unknown MWideningOp");
+}
+#endif
+
+// Indicates how to narrow a 32-bit value (when it is written to memory). The
+// operation is a simple truncate.
+enum class MNarrowingOp : uint8_t { None, To16, To8 };
+
+#ifdef JS_JITSPEW
+static inline const char* StringFromMNarrowingOp(MNarrowingOp op) {
+ switch (op) {
+ case MNarrowingOp::None:
+ return "None";
+ case MNarrowingOp::To16:
+ return "To16";
+ case MNarrowingOp::To8:
+ return "To8";
+ default:
+ break;
+ }
+ MOZ_CRASH("Unknown MNarrowingOp");
+}
+#endif
+
+// Provide information about potential trap at the instruction machine code,
+// e.g. null pointer dereference.
+struct TrapSiteInfo {
+ wasm::BytecodeOffset offset;
+ explicit TrapSiteInfo(wasm::BytecodeOffset offset_) : offset(offset_) {}
+};
+
+typedef mozilla::Maybe<TrapSiteInfo> MaybeTrapSiteInfo;
+
+// Load an object field stored at a fixed offset from a base pointer. This
+// field may be any value type, including references. No barriers are
+// performed. The offset must be representable as a 31-bit unsigned integer.
+class MWasmLoadField : public MUnaryInstruction, public NoTypePolicy::Data {
+ uint32_t offset_;
+ MWideningOp wideningOp_;
+ AliasSet aliases_;
+ MaybeTrapSiteInfo maybeTrap_;
+
+ MWasmLoadField(MDefinition* obj, uint32_t offset, MIRType type,
+ MWideningOp wideningOp, AliasSet aliases,
+ MaybeTrapSiteInfo maybeTrap = mozilla::Nothing())
+ : MUnaryInstruction(classOpcode, obj),
+ offset_(uint32_t(offset)),
+ wideningOp_(wideningOp),
+ aliases_(aliases),
+ maybeTrap_(maybeTrap) {
+ MOZ_ASSERT(offset <= INT32_MAX);
+ // "if you want to widen the value when it is loaded, the destination type
+ // must be Int32".
+ MOZ_ASSERT_IF(wideningOp != MWideningOp::None, type == MIRType::Int32);
+ MOZ_ASSERT(
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmStructOutlineDataPointer).flags() ||
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmArrayNumElements).flags() ||
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmArrayDataPointer).flags() ||
+ aliases.flags() == AliasSet::Load(AliasSet::Any).flags());
+ setResultType(type);
+ if (maybeTrap_) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadField)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, obj))
+
+ uint32_t offset() const { return offset_; }
+ MWideningOp wideningOp() const { return wideningOp_; }
+ AliasSet getAliasSet() const override { return aliases_; }
+ MaybeTrapSiteInfo maybeTrap() const { return maybeTrap_; }
+
+ bool congruentTo(const MDefinition* ins) const override {
+ // In the limited case where this insn is used to read
+ // WasmStructObject::outlineData_ (the field itself, not what it points
+ // at), we allow commoning up to happen. This is OK because
+ // WasmStructObject::outlineData_ is readonly for the life of the
+ // WasmStructObject.
+ if (!ins->isWasmLoadField()) {
+ return false;
+ }
+ const MWasmLoadField* other = ins->toWasmLoadField();
+ return ins->isWasmLoadField() && congruentIfOperandsEqual(ins) &&
+ offset() == other->offset() && wideningOp() == other->wideningOp() &&
+ getAliasSet().flags() == other->getAliasSet().flags() &&
+ getAliasSet().flags() ==
+ AliasSet::Load(AliasSet::WasmStructOutlineDataPointer).flags();
+ }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[96];
+ SprintfLiteral(buf, "(offs=%lld, wideningOp=%s)", (long long int)offset_,
+ StringFromMWideningOp(wideningOp_));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Loads a value from a location, denoted as a fixed offset from a base
+// pointer, which (it is assumed) is within a wasm object. This field may be
+// any value type, including references. No barriers are performed.
+//
+// This instruction takes a pointer to a second object `ka`, which it is
+// necessary to keep alive. It is expected that `ka` holds a reference to
+// `obj`, but this is not enforced and no code is generated to access `ka`.
+// This instruction extends the lifetime of `ka` so that it, and hence `obj`,
+// cannot be collected while `obj` is live. This is necessary if `obj` does
+// not point to a GC-managed object. `offset` must be representable as a
+// 31-bit unsigned integer.
+class MWasmLoadFieldKA : public MBinaryInstruction, public NoTypePolicy::Data {
+ uint32_t offset_;
+ MWideningOp wideningOp_;
+ AliasSet aliases_;
+ MaybeTrapSiteInfo maybeTrap_;
+
+ MWasmLoadFieldKA(MDefinition* ka, MDefinition* obj, size_t offset,
+ MIRType type, MWideningOp wideningOp, AliasSet aliases,
+ MaybeTrapSiteInfo maybeTrap = mozilla::Nothing())
+ : MBinaryInstruction(classOpcode, ka, obj),
+ offset_(uint32_t(offset)),
+ wideningOp_(wideningOp),
+ aliases_(aliases),
+ maybeTrap_(maybeTrap) {
+ MOZ_ASSERT(offset <= INT32_MAX);
+ MOZ_ASSERT_IF(wideningOp != MWideningOp::None, type == MIRType::Int32);
+ MOZ_ASSERT(
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmStructInlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmStructOutlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Load(AliasSet::WasmArrayDataArea).flags() ||
+ aliases.flags() == AliasSet::Load(AliasSet::Any).flags());
+ setResultType(type);
+ if (maybeTrap_) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmLoadFieldKA)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, ka), (1, obj))
+
+ uint32_t offset() const { return offset_; }
+ MWideningOp wideningOp() const { return wideningOp_; }
+ AliasSet getAliasSet() const override { return aliases_; }
+ MaybeTrapSiteInfo maybeTrap() const { return maybeTrap_; }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[96];
+ SprintfLiteral(buf, "(offs=%lld, wideningOp=%s)", (long long int)offset_,
+ StringFromMWideningOp(wideningOp_));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Stores a non-reference value to anlocation, denoted as a fixed offset from
+// a base pointer, which (it is assumed) is within a wasm object. This field
+// may be any value type, _excluding_ references. References _must_ use the
+// 'Ref' variant of this instruction. The offset must be representable as a
+// 31-bit unsigned integer.
+//
+// This instruction takes a second object `ka` that must be kept alive, as
+// described for MWasmLoadFieldKA above.
+class MWasmStoreFieldKA : public MTernaryInstruction,
+ public NoTypePolicy::Data {
+ uint32_t offset_;
+ MNarrowingOp narrowingOp_;
+ AliasSet aliases_;
+ MaybeTrapSiteInfo maybeTrap_;
+
+ MWasmStoreFieldKA(MDefinition* ka, MDefinition* obj, size_t offset,
+ MDefinition* value, MNarrowingOp narrowingOp,
+ AliasSet aliases,
+ MaybeTrapSiteInfo maybeTrap = mozilla::Nothing())
+ : MTernaryInstruction(classOpcode, ka, obj, value),
+ offset_(uint32_t(offset)),
+ narrowingOp_(narrowingOp),
+ aliases_(aliases),
+ maybeTrap_(maybeTrap) {
+ MOZ_ASSERT(offset <= INT32_MAX);
+ MOZ_ASSERT(value->type() != MIRType::RefOrNull);
+ // "if you want to narrow the value when it is stored, the source type
+ // must be Int32".
+ MOZ_ASSERT_IF(narrowingOp != MNarrowingOp::None,
+ value->type() == MIRType::Int32);
+ MOZ_ASSERT(
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmStructInlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmStructOutlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmArrayDataArea).flags() ||
+ aliases.flags() == AliasSet::Store(AliasSet::Any).flags());
+ if (maybeTrap_) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreFieldKA)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, ka), (1, obj), (2, value))
+
+ uint32_t offset() const { return offset_; }
+ MNarrowingOp narrowingOp() const { return narrowingOp_; }
+ AliasSet getAliasSet() const override { return aliases_; }
+ MaybeTrapSiteInfo maybeTrap() const { return maybeTrap_; }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[96];
+ SprintfLiteral(buf, "(offs=%lld, narrowingOp=%s)", (long long int)offset_,
+ StringFromMNarrowingOp(narrowingOp_));
+ extras->add(buf);
+ }
+#endif
+};
+
+// Stores a reference value to a location, denoted as a fixed offset from a
+// base pointer, which (it is assumed) is within a wasm object. This
+// instruction emits a pre-barrier. A post barrier _must_ be performed
+// separately. The offset must be representable as a 31-bit unsigned integer.
+//
+// This instruction takes a second object `ka` that must be kept alive, as
+// described for MWasmLoadFieldKA above.
+class MWasmStoreFieldRefKA : public MAryInstruction<4>,
+ public NoTypePolicy::Data {
+ uint32_t offset_;
+ AliasSet aliases_;
+ MaybeTrapSiteInfo maybeTrap_;
+ WasmPreBarrierKind preBarrierKind_;
+
+ MWasmStoreFieldRefKA(MDefinition* instance, MDefinition* ka, MDefinition* obj,
+ size_t offset, MDefinition* value, AliasSet aliases,
+ MaybeTrapSiteInfo maybeTrap,
+ WasmPreBarrierKind preBarrierKind)
+ : MAryInstruction<4>(classOpcode),
+ offset_(uint32_t(offset)),
+ aliases_(aliases),
+ maybeTrap_(maybeTrap),
+ preBarrierKind_(preBarrierKind) {
+ MOZ_ASSERT(obj->type() == TargetWordMIRType() ||
+ obj->type() == MIRType::Pointer ||
+ obj->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(offset <= INT32_MAX);
+ MOZ_ASSERT(value->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmStructInlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmStructOutlineDataArea).flags() ||
+ aliases.flags() ==
+ AliasSet::Store(AliasSet::WasmArrayDataArea).flags() ||
+ aliases.flags() == AliasSet::Store(AliasSet::Any).flags());
+ initOperand(0, instance);
+ initOperand(1, ka);
+ initOperand(2, obj);
+ initOperand(3, value);
+ if (maybeTrap_) {
+ setGuard();
+ }
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmStoreFieldRefKA)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, instance), (1, ka), (2, obj), (3, value))
+
+ uint32_t offset() const { return offset_; }
+ AliasSet getAliasSet() const override { return aliases_; }
+ MaybeTrapSiteInfo maybeTrap() const { return maybeTrap_; }
+ WasmPreBarrierKind preBarrierKind() const { return preBarrierKind_; }
+
+#ifdef JS_JITSPEW
+ void getExtras(ExtrasCollector* extras) override {
+ char buf[64];
+ SprintfLiteral(buf, "(offs=%lld)", (long long int)offset_);
+ extras->add(buf);
+ }
+#endif
+};
+
+class MWasmGcObjectIsSubtypeOfAbstract : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::RefType sourceType_;
+ wasm::RefType destType_;
+
+ MWasmGcObjectIsSubtypeOfAbstract(MDefinition* object,
+ wasm::RefType sourceType,
+ wasm::RefType destType)
+ : MUnaryInstruction(classOpcode, object),
+ sourceType_(sourceType),
+ destType_(destType) {
+ MOZ_ASSERT(!destType.isTypeRef());
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmGcObjectIsSubtypeOfAbstract)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ wasm::RefType sourceType() const { return sourceType_; };
+ wasm::RefType destType() const { return destType_; };
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ sourceType() ==
+ ins->toWasmGcObjectIsSubtypeOfAbstract()->sourceType() &&
+ destType() == ins->toWasmGcObjectIsSubtypeOfAbstract()->destType();
+ }
+
+ HashNumber valueHash() const override {
+ HashNumber hn = MUnaryInstruction::valueHash();
+ hn = addU64ToHash(hn, sourceType().packed().bits());
+ hn = addU64ToHash(hn, destType().packed().bits());
+ return hn;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+// Tests if the WasmGcObject, `object`, is a subtype of `superSuperTypeVector`.
+// The actual super type definition must be known at compile time, so that the
+// subtyping depth of super type depth can be used.
+class MWasmGcObjectIsSubtypeOfConcrete : public MBinaryInstruction,
+ public NoTypePolicy::Data {
+ wasm::RefType sourceType_;
+ wasm::RefType destType_;
+
+ MWasmGcObjectIsSubtypeOfConcrete(MDefinition* object,
+ MDefinition* superSuperTypeVector,
+ wasm::RefType sourceType,
+ wasm::RefType destType)
+ : MBinaryInstruction(classOpcode, object, superSuperTypeVector),
+ sourceType_(sourceType),
+ destType_(destType) {
+ MOZ_ASSERT(destType.isTypeRef());
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(WasmGcObjectIsSubtypeOfConcrete)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object), (1, superSuperTypeVector))
+
+ wasm::RefType sourceType() const { return sourceType_; };
+ wasm::RefType destType() const { return destType_; };
+
+ bool congruentTo(const MDefinition* ins) const override {
+ return congruentIfOperandsEqual(ins) &&
+ sourceType() ==
+ ins->toWasmGcObjectIsSubtypeOfConcrete()->sourceType() &&
+ destType() == ins->toWasmGcObjectIsSubtypeOfConcrete()->destType();
+ }
+
+ HashNumber valueHash() const override {
+ HashNumber hn = MBinaryInstruction::valueHash();
+ hn = addU64ToHash(hn, sourceType().packed().bits());
+ hn = addU64ToHash(hn, destType().packed().bits());
+ return hn;
+ }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+#ifdef FUZZING_JS_FUZZILLI
+class MFuzzilliHash : public MUnaryInstruction, public NoTypePolicy::Data {
+ explicit MFuzzilliHash(MDefinition* obj)
+ : MUnaryInstruction(classOpcode, obj) {
+ setResultType(MIRType::Int32);
+ setMovable();
+ }
+
+ public:
+ INSTRUCTION_HEADER(FuzzilliHash);
+ TRIVIAL_NEW_WRAPPERS
+ ALLOW_CLONE(MFuzzilliHash)
+
+# ifdef DEBUG
+ bool isConsistentFloat32Use(MUse* use) const override { return true; }
+# endif
+
+ AliasSet getAliasSet() const override {
+ MDefinition* obj = getOperand(0);
+ if (obj->type() == MIRType::Object || obj->type() == MIRType::Value) {
+ return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::DynamicSlot | AliasSet::Element |
+ AliasSet::UnboxedElement);
+ }
+ return AliasSet::None();
+ }
+};
+
+class MFuzzilliHashStore : public MUnaryInstruction, public NoTypePolicy::Data {
+ explicit MFuzzilliHashStore(MDefinition* obj)
+ : MUnaryInstruction(classOpcode, obj) {
+ MOZ_ASSERT(obj->type() == MIRType::Int32);
+
+ setResultType(MIRType::None);
+ }
+
+ public:
+ INSTRUCTION_HEADER(FuzzilliHashStore);
+ TRIVIAL_NEW_WRAPPERS
+ ALLOW_CLONE(MFuzzilliHashStore)
+
+ // this is a store and hence effectful, however no other load can
+ // alias with the store
+ AliasSet getAliasSet() const override {
+ return AliasSet::Store(AliasSet::FuzzilliHash);
+ }
+};
+#endif
+
+#undef INSTRUCTION_HEADER
+
+void MUse::init(MDefinition* producer, MNode* consumer) {
+ MOZ_ASSERT(!consumer_, "Initializing MUse that already has a consumer");
+ MOZ_ASSERT(!producer_, "Initializing MUse that already has a producer");
+ initUnchecked(producer, consumer);
+}
+
+void MUse::initUnchecked(MDefinition* producer, MNode* consumer) {
+ MOZ_ASSERT(consumer, "Initializing to null consumer");
+ consumer_ = consumer;
+ producer_ = producer;
+ producer_->addUseUnchecked(this);
+}
+
+void MUse::initUncheckedWithoutProducer(MNode* consumer) {
+ MOZ_ASSERT(consumer, "Initializing to null consumer");
+ consumer_ = consumer;
+ producer_ = nullptr;
+}
+
+void MUse::replaceProducer(MDefinition* producer) {
+ MOZ_ASSERT(consumer_, "Resetting MUse without a consumer");
+ producer_->removeUse(this);
+ producer_ = producer;
+ producer_->addUse(this);
+}
+
+void MUse::releaseProducer() {
+ MOZ_ASSERT(consumer_, "Clearing MUse without a consumer");
+ producer_->removeUse(this);
+ producer_ = nullptr;
+}
+
+// Implement cast functions now that the compiler can see the inheritance.
+
+MDefinition* MNode::toDefinition() {
+ MOZ_ASSERT(isDefinition());
+ return (MDefinition*)this;
+}
+
+MResumePoint* MNode::toResumePoint() {
+ MOZ_ASSERT(isResumePoint());
+ return (MResumePoint*)this;
+}
+
+MInstruction* MDefinition::toInstruction() {
+ MOZ_ASSERT(!isPhi());
+ return (MInstruction*)this;
+}
+
+const MInstruction* MDefinition::toInstruction() const {
+ MOZ_ASSERT(!isPhi());
+ return (const MInstruction*)this;
+}
+
+MControlInstruction* MDefinition::toControlInstruction() {
+ MOZ_ASSERT(isControlInstruction());
+ return (MControlInstruction*)this;
+}
+
+MConstant* MDefinition::maybeConstantValue() {
+ MDefinition* op = this;
+ if (op->isBox()) {
+ op = op->toBox()->input();
+ }
+ if (op->isConstant()) {
+ return op->toConstant();
+ }
+ return nullptr;
+}
+
+#ifdef ENABLE_WASM_SIMD
+MWasmShuffleSimd128* BuildWasmShuffleSimd128(TempAllocator& alloc,
+ const int8_t* control,
+ MDefinition* lhs,
+ MDefinition* rhs);
+#endif // ENABLE_WASM_SIMD
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIR_h */
diff --git a/js/src/jit/MIRGenerator.h b/js/src/jit/MIRGenerator.h
new file mode 100644
index 0000000000..4026a69117
--- /dev/null
+++ b/js/src/jit/MIRGenerator.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MIRGenerator_h
+#define jit_MIRGenerator_h
+
+// This file declares the data structures used to build a control-flow graph
+// containing MIR.
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Result.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/CompileInfo.h"
+#include "jit/CompileWrappers.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitContext.h"
+#include "jit/JitSpewer.h"
+#include "jit/PerfSpewer.h"
+#include "js/Utility.h"
+#include "vm/GeckoProfiler.h"
+
+namespace js {
+namespace jit {
+
+class JitRuntime;
+class MIRGraph;
+class OptimizationInfo;
+
+class MIRGenerator final {
+ public:
+ MIRGenerator(CompileRealm* realm, const JitCompileOptions& options,
+ TempAllocator* alloc, MIRGraph* graph,
+ const CompileInfo* outerInfo,
+ const OptimizationInfo* optimizationInfo);
+
+ void initMinWasmHeapLength(uint64_t init) { minWasmHeapLength_ = init; }
+
+ TempAllocator& alloc() { return *alloc_; }
+ MIRGraph& graph() { return *graph_; }
+ [[nodiscard]] bool ensureBallast() { return alloc().ensureBallast(); }
+ const JitRuntime* jitRuntime() const { return runtime->jitRuntime(); }
+ const CompileInfo& outerInfo() const { return *outerInfo_; }
+ const OptimizationInfo& optimizationInfo() const {
+ return *optimizationInfo_;
+ }
+ bool hasProfilingScripts() const {
+ return runtime && runtime->profilingScripts();
+ }
+
+ template <typename T>
+ T* allocate(size_t count = 1) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
+ return nullptr;
+ }
+ return static_cast<T*>(alloc().allocate(bytes));
+ }
+
+ // Set an error state and prints a message. Returns false so errors can be
+ // propagated up.
+ mozilla::GenericErrorResult<AbortReason> abort(AbortReason r);
+ mozilla::GenericErrorResult<AbortReason> abort(AbortReason r,
+ const char* message, ...)
+ MOZ_FORMAT_PRINTF(3, 4);
+
+ mozilla::GenericErrorResult<AbortReason> abortFmt(AbortReason r,
+ const char* message,
+ va_list ap)
+ MOZ_FORMAT_PRINTF(3, 0);
+
+ // Collect the evaluation result of phases after WarpOracle, such that
+ // off-thread compilation can report what error got encountered.
+ void setOffThreadStatus(AbortReasonOr<Ok>&& result) {
+ MOZ_ASSERT(offThreadStatus_.isOk());
+ offThreadStatus_ = std::move(result);
+ }
+ const AbortReasonOr<Ok>& getOffThreadStatus() const {
+ return offThreadStatus_;
+ }
+
+ [[nodiscard]] bool instrumentedProfiling() {
+ if (!instrumentedProfilingIsCached_) {
+ instrumentedProfiling_ = runtime->geckoProfiler().enabled();
+ instrumentedProfilingIsCached_ = true;
+ }
+ return instrumentedProfiling_;
+ }
+
+ bool isProfilerInstrumentationEnabled() {
+ return !compilingWasm() && instrumentedProfiling();
+ }
+
+ gc::Heap initialStringHeap() const {
+ return stringsCanBeInNursery_ ? gc::Heap::Default : gc::Heap::Tenured;
+ }
+
+ gc::Heap initialBigIntHeap() const {
+ return bigIntsCanBeInNursery_ ? gc::Heap::Default : gc::Heap::Tenured;
+ }
+
+ // Whether the main thread is trying to cancel this build.
+ bool shouldCancel(const char* why) { return cancelBuild_; }
+ void cancel() { cancelBuild_ = true; }
+
+ bool compilingWasm() const { return outerInfo_->compilingWasm(); }
+
+ uint32_t wasmMaxStackArgBytes() const {
+ MOZ_ASSERT(compilingWasm());
+ return wasmMaxStackArgBytes_;
+ }
+ void initWasmMaxStackArgBytes(uint32_t n) {
+ MOZ_ASSERT(compilingWasm());
+ MOZ_ASSERT(wasmMaxStackArgBytes_ == 0);
+ wasmMaxStackArgBytes_ = n;
+ }
+ uint64_t minWasmHeapLength() const { return minWasmHeapLength_; }
+
+ void setNeedsOverrecursedCheck() { needsOverrecursedCheck_ = true; }
+ bool needsOverrecursedCheck() const { return needsOverrecursedCheck_; }
+
+ void setNeedsStaticStackAlignment() { needsStaticStackAlignment_ = true; }
+ bool needsStaticStackAlignment() const { return needsStaticStackAlignment_; }
+
+ public:
+ CompileRealm* realm;
+ CompileRuntime* runtime;
+
+ private:
+ // The CompileInfo for the outermost script.
+ const CompileInfo* outerInfo_;
+
+ const OptimizationInfo* optimizationInfo_;
+ TempAllocator* alloc_;
+ MIRGraph* graph_;
+ AbortReasonOr<Ok> offThreadStatus_;
+ mozilla::Atomic<bool, mozilla::Relaxed> cancelBuild_;
+
+ uint32_t wasmMaxStackArgBytes_;
+ bool needsOverrecursedCheck_;
+ bool needsStaticStackAlignment_;
+
+ bool instrumentedProfiling_;
+ bool instrumentedProfilingIsCached_;
+ bool stringsCanBeInNursery_;
+ bool bigIntsCanBeInNursery_;
+
+ bool disableLICM_ = false;
+
+ public:
+ void disableLICM() { disableLICM_ = true; }
+ bool licmEnabled() const;
+
+ private:
+ uint64_t minWasmHeapLength_;
+
+ IonPerfSpewer wasmPerfSpewer_;
+
+ public:
+ IonPerfSpewer& perfSpewer() { return wasmPerfSpewer_; }
+
+ public:
+ const JitCompileOptions options;
+
+ private:
+ GraphSpewer gs_;
+
+ public:
+ GraphSpewer& graphSpewer() { return gs_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIRGenerator_h */
diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp
new file mode 100644
index 0000000000..86a2bf4ad3
--- /dev/null
+++ b/js/src/jit/MIRGraph.cpp
@@ -0,0 +1,1414 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MIRGraph.h"
+
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+using namespace js;
+using namespace js::jit;
+
+MIRGenerator::MIRGenerator(CompileRealm* realm,
+ const JitCompileOptions& options,
+ TempAllocator* alloc, MIRGraph* graph,
+ const CompileInfo* info,
+ const OptimizationInfo* optimizationInfo)
+ : realm(realm),
+ runtime(realm ? realm->runtime() : nullptr),
+ outerInfo_(info),
+ optimizationInfo_(optimizationInfo),
+ alloc_(alloc),
+ graph_(graph),
+ offThreadStatus_(Ok()),
+ cancelBuild_(false),
+ wasmMaxStackArgBytes_(0),
+ needsOverrecursedCheck_(false),
+ needsStaticStackAlignment_(false),
+ instrumentedProfiling_(false),
+ instrumentedProfilingIsCached_(false),
+ stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
+ : false),
+ bigIntsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateBigInts()
+ : false),
+ minWasmHeapLength_(0),
+ options(options),
+ gs_(alloc) {}
+
+bool MIRGenerator::licmEnabled() const {
+ return optimizationInfo().licmEnabled() && !disableLICM_ &&
+ !outerInfo().hadLICMInvalidation();
+}
+
+mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
+ if (JitSpewEnabled(JitSpew_IonAbort)) {
+ switch (r) {
+ case AbortReason::Alloc:
+ JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
+ break;
+ case AbortReason::Disable:
+ JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
+ break;
+ case AbortReason::Error:
+ JitSpew(JitSpew_IonAbort, "AbortReason::Error");
+ break;
+ case AbortReason::NoAbort:
+ MOZ_CRASH("Abort with AbortReason::NoAbort");
+ break;
+ }
+ }
+ return Err(std::move(r));
+}
+
+mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
+ AbortReason r, const char* message, va_list ap) {
+ JitSpewVA(JitSpew_IonAbort, message, ap);
+ return Err(std::move(r));
+}
+
+mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
+ AbortReason r, const char* message, ...) {
+ va_list ap;
+ va_start(ap, message);
+ auto forward = abortFmt(r, message, ap);
+ va_end(ap);
+ return forward;
+}
+
+void MIRGraph::addBlock(MBasicBlock* block) {
+ MOZ_ASSERT(block);
+ block->setId(blockIdGen_++);
+ blocks_.pushBack(block);
+ numBlocks_++;
+}
+
+void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
+ block->setId(blockIdGen_++);
+ blocks_.insertAfter(at, block);
+ numBlocks_++;
+}
+
+void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
+ block->setId(blockIdGen_++);
+ blocks_.insertBefore(at, block);
+ numBlocks_++;
+}
+
+void MIRGraph::removeBlock(MBasicBlock* block) {
+ // Remove a block from the graph. It will also cleanup the block.
+
+ if (block == osrBlock_) {
+ osrBlock_ = nullptr;
+ }
+
+ if (returnAccumulator_) {
+ size_t i = 0;
+ while (i < returnAccumulator_->length()) {
+ if ((*returnAccumulator_)[i] == block) {
+ returnAccumulator_->erase(returnAccumulator_->begin() + i);
+ } else {
+ i++;
+ }
+ }
+ }
+
+ block->clear();
+ block->markAsDead();
+
+ if (block->isInList()) {
+ blocks_.remove(block);
+ numBlocks_--;
+ }
+}
+
+void MIRGraph::unmarkBlocks() {
+ for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
+ i->unmark();
+ }
+}
+
+MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
+ const CompileInfo& info, MBasicBlock* maybePred,
+ BytecodeSite* site, Kind kind) {
+ MOZ_ASSERT(site->pc() != nullptr);
+
+ MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init()) {
+ return nullptr;
+ }
+
+ if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
+ return nullptr;
+ }
+
+ return block;
+}
+
+MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site,
+ Kind kind, uint32_t popped) {
+ MOZ_ASSERT(site->pc() != nullptr);
+
+ MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init()) {
+ return nullptr;
+ }
+
+ if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
+ return nullptr;
+ }
+
+ return block;
+}
+
+MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
+ const CompileInfo& info,
+ MBasicBlock* pred,
+ BytecodeSite* site) {
+ MOZ_ASSERT(site->pc() != nullptr);
+
+ MBasicBlock* block =
+ new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
+ if (!block->init()) {
+ return nullptr;
+ }
+
+ if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
+ return nullptr;
+ }
+
+ return block;
+}
+
+MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
+ size_t predEdgeIdx, MBasicBlock* succ) {
+ MBasicBlock* split = nullptr;
+ if (!succ->pc()) {
+ // The predecessor does not have a PC, this is a Wasm compilation.
+ split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
+ if (!split) {
+ return nullptr;
+ }
+
+ // Insert the split edge block in-between.
+ split->end(MGoto::New(graph.alloc(), succ));
+ } else {
+ // The predecessor has a PC, this is a Warp compilation.
+ MResumePoint* succEntry = succ->entryResumePoint();
+
+ BytecodeSite* site =
+ new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
+ split =
+ new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
+
+ if (!split->init()) {
+ return nullptr;
+ }
+
+ // A split edge is used to simplify the graph to avoid having a
+ // predecessor with multiple successors as well as a successor with
+ // multiple predecessors. As instructions can be moved in this
+ // split-edge block, we need to give this block a resume point. To do
+ // so, we copy the entry resume points of the successor and filter the
+ // phis to keep inputs from the current edge.
+
+ // Propagate the caller resume point from the inherited block.
+ split->callerResumePoint_ = succ->callerResumePoint();
+
+ // Split-edge are created after the interpreter stack emulation. Thus,
+ // there is no need for creating slots.
+ split->stackPosition_ = succEntry->stackDepth();
+
+ // Create a resume point using our initial stack position.
+ MResumePoint* splitEntry = new (graph.alloc())
+ MResumePoint(split, succEntry->pc(), ResumeMode::ResumeAt);
+ if (!splitEntry->init(graph.alloc())) {
+ return nullptr;
+ }
+ split->entryResumePoint_ = splitEntry;
+
+ // Insert the split edge block in-between.
+ split->end(MGoto::New(graph.alloc(), succ));
+
+ // The target entry resume point might have phi operands, keep the
+ // operands of the phi coming from our edge.
+ size_t succEdgeIdx = succ->indexForPredecessor(pred);
+
+ for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
+ MDefinition* def = succEntry->getOperand(i);
+ // This early in the pipeline, we have no recover instructions in
+ // any entry resume point.
+ if (def->block() == succ) {
+ if (def->isPhi()) {
+ def = def->toPhi()->getOperand(succEdgeIdx);
+ } else {
+ // The phi-operand may already have been optimized out.
+ MOZ_ASSERT(def->isConstant());
+ MOZ_ASSERT(def->type() == MIRType::MagicOptimizedOut);
+
+ def = split->optimizedOutConstant(graph.alloc());
+ }
+ }
+
+ splitEntry->initOperand(i, def);
+ }
+
+ // This is done in the New variant for wasm, so we cannot keep this
+ // line below, where the rest of the graph is modified.
+ if (!split->predecessors_.append(pred)) {
+ return nullptr;
+ }
+ }
+
+ split->setLoopDepth(succ->loopDepth());
+
+ graph.insertBlockAfter(pred, split);
+
+ pred->replaceSuccessor(predEdgeIdx, split);
+ succ->replacePredecessor(pred, split);
+ return split;
+}
+
+void MBasicBlock::moveToNewBlock(MInstruction* ins, MBasicBlock* dst) {
+ MOZ_ASSERT(ins->block() == this);
+ MOZ_ASSERT(!dst->hasLastIns());
+ instructions_.remove(ins);
+ ins->setInstructionBlock(dst, dst->trackedSite());
+ if (MResumePoint* rp = ins->resumePoint()) {
+ removeResumePoint(rp);
+ dst->addResumePoint(rp);
+ rp->setBlock(dst);
+ }
+ dst->instructions_.pushBack(ins);
+}
+
+void MBasicBlock::moveOuterResumePointTo(MBasicBlock* dest) {
+ if (MResumePoint* outer = outerResumePoint()) {
+ removeResumePoint(outer);
+ outerResumePoint_ = nullptr;
+ dest->setOuterResumePoint(outer);
+ dest->addResumePoint(outer);
+ outer->setBlock(dest);
+ }
+}
+
+bool MBasicBlock::wrapInstructionInFastpath(MInstruction* ins,
+ MInstruction* fastpath,
+ MInstruction* condition) {
+ MOZ_ASSERT(ins->block() == this);
+ MOZ_ASSERT(!ins->isControlInstruction());
+
+ MInstructionIterator rest(begin(ins));
+ rest++;
+
+ MResumePoint* resumeBeforeIns = activeResumePoint(ins);
+ MResumePoint* resumeAfterIns = activeResumePoint(*rest);
+
+ // Create the join block.
+ MBasicBlock* join = MBasicBlock::NewInternal(graph_, this, resumeAfterIns);
+ if (!join) {
+ return false;
+ }
+
+ // Update the successors of the current block.
+ for (uint32_t i = 0; i < numSuccessors(); i++) {
+ getSuccessor(i)->replacePredecessor(this, join);
+ }
+ if (successorWithPhis()) {
+ join->setSuccessorWithPhis(successorWithPhis(), positionInPhiSuccessor());
+ clearSuccessorWithPhis();
+ }
+
+ // Copy all instructions after |ins| into the join block.
+ while (rest != end()) {
+ MInstruction* ins = *rest++;
+ moveToNewBlock(ins, join);
+ }
+ MOZ_ASSERT(!hasLastIns());
+ MOZ_ASSERT(join->hasLastIns());
+
+ graph_.insertBlockAfter(this, join);
+
+ // Create the fast path block.
+ MBasicBlock* fastpathBlock =
+ MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
+ if (!fastpathBlock) {
+ return false;
+ }
+ graph_.insertBlockAfter(this, fastpathBlock);
+ fastpathBlock->add(fastpath);
+ fastpathBlock->end(MGoto::New(graph_.alloc(), join));
+
+ // Create the slowpath block.
+ MBasicBlock* slowpathBlock =
+ MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
+ if (!slowpathBlock) {
+ return false;
+ }
+ graph_.insertBlockAfter(fastpathBlock, slowpathBlock);
+ moveToNewBlock(ins, slowpathBlock);
+ slowpathBlock->end(MGoto::New(graph_.alloc(), join));
+
+ // Connect current block to fastpath and slowpath.
+ add(condition);
+ end(MTest::New(graph_.alloc(), condition, fastpathBlock, slowpathBlock));
+
+ // Update predecessors.
+ if (!fastpathBlock->addPredecessorWithoutPhis(this) ||
+ !slowpathBlock->addPredecessorWithoutPhis(this) ||
+ !join->addPredecessorWithoutPhis(fastpathBlock) ||
+ !join->addPredecessorWithoutPhis(slowpathBlock)) {
+ return false;
+ }
+
+ if (ins->hasUses()) {
+ // Insert phi.
+ MPhi* phi = MPhi::New(graph_.alloc());
+ if (!phi->reserveLength(2)) {
+ return false;
+ }
+ phi->addInput(fastpath);
+ fastpathBlock->setSuccessorWithPhis(join, 0);
+ phi->addInput(ins);
+ slowpathBlock->setSuccessorWithPhis(join, 1);
+ join->addPhi(phi);
+
+ for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e;) {
+ MUse* use = *i++;
+ if (use->consumer() != phi && use->consumer() != ins->resumePoint()) {
+ use->replaceProducer(phi);
+ }
+ }
+ }
+
+ moveOuterResumePointTo(join);
+
+ return true;
+}
+
+MBasicBlock* MBasicBlock::NewInternal(MIRGraph& graph, MBasicBlock* orig,
+ MResumePoint* resumePoint) {
+ jsbytecode* pc = IsResumeAfter(resumePoint->mode())
+ ? GetNextPc(resumePoint->pc())
+ : resumePoint->pc();
+
+ BytecodeSite* site =
+ new (graph.alloc()) BytecodeSite(orig->trackedTree(), pc);
+ MBasicBlock* block =
+ new (graph.alloc()) MBasicBlock(graph, orig->info(), site, INTERNAL);
+ if (!block->init()) {
+ return nullptr;
+ }
+
+ // Propagate the caller resume point from the original block.
+ block->callerResumePoint_ = orig->callerResumePoint();
+
+ // Copy the resume point.
+ block->stackPosition_ = resumePoint->stackDepth();
+ MResumePoint* entryResumePoint =
+ new (graph.alloc()) MResumePoint(block, pc, ResumeMode::ResumeAt);
+ if (!entryResumePoint->init(graph.alloc())) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < resumePoint->stackDepth(); i++) {
+ entryResumePoint->initOperand(i, resumePoint->getOperand(i));
+ }
+ block->entryResumePoint_ = entryResumePoint;
+
+ block->setLoopDepth(orig->loopDepth());
+
+ return block;
+}
+
+MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, Kind kind) {
+ BytecodeSite* site = new (graph.alloc()) BytecodeSite();
+ MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
+ if (!block->init()) {
+ return nullptr;
+ }
+
+ if (pred) {
+ block->stackPosition_ = pred->stackPosition_;
+
+ if (block->kind_ == PENDING_LOOP_HEADER) {
+ size_t nphis = block->stackPosition_;
+
+ size_t nfree = graph.phiFreeListLength();
+
+ TempAllocator& alloc = graph.alloc();
+ MPhi* phis = nullptr;
+ if (nphis > nfree) {
+ phis = alloc.allocateArray<MPhi>(nphis - nfree);
+ if (!phis) {
+ return nullptr;
+ }
+ }
+
+ // Note: Phis are inserted in the same order as the slots.
+ for (size_t i = 0; i < nphis; i++) {
+ MDefinition* predSlot = pred->getSlot(i);
+
+ MOZ_ASSERT(predSlot->type() != MIRType::Value);
+
+ MPhi* phi;
+ if (i < nfree) {
+ phi = graph.takePhiFromFreeList();
+ } else {
+ phi = phis + (i - nfree);
+ }
+ new (phi) MPhi(alloc, predSlot->type());
+
+ phi->addInlineInput(predSlot);
+
+ // Add append Phis in the block.
+ block->addPhi(phi);
+ block->setSlot(i, phi);
+ }
+ } else {
+ if (!block->ensureHasSlots(0)) {
+ return nullptr;
+ }
+ block->copySlots(pred);
+ }
+
+ if (!block->predecessors_.append(pred)) {
+ return nullptr;
+ }
+ }
+
+ return block;
+}
+
+// Create an empty and unreachable block which jumps to |header|. Used
+// when the normal entry into a loop is removed (but the loop is still
+// reachable due to OSR) to preserve the invariant that every loop
+// header has two predecessors, which is needed for building the
+// dominator tree. The new block is inserted immediately before the
+// header, which preserves the graph ordering (post-order/RPO). These
+// blocks will all be removed before lowering.
+MBasicBlock* MBasicBlock::NewFakeLoopPredecessor(MIRGraph& graph,
+ MBasicBlock* header) {
+ MOZ_ASSERT(graph.osrBlock());
+
+ MBasicBlock* backedge = header->backedge();
+ MBasicBlock* fake = MBasicBlock::New(graph, header->info(), nullptr,
+ MBasicBlock::FAKE_LOOP_PRED);
+ if (!fake) {
+ return nullptr;
+ }
+
+ graph.insertBlockBefore(header, fake);
+ fake->setUnreachable();
+
+ // Create fake defs to use as inputs for any phis in |header|.
+ for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
+ iter != end; ++iter) {
+ if (!graph.alloc().ensureBallast()) {
+ return nullptr;
+ }
+ MPhi* phi = *iter;
+ auto* fakeDef = MUnreachableResult::New(graph.alloc(), phi->type());
+ fake->add(fakeDef);
+ if (!phi->addInputSlow(fakeDef)) {
+ return nullptr;
+ }
+ }
+
+ fake->end(MGoto::New(graph.alloc(), header));
+
+ if (!header->addPredecessorWithoutPhis(fake)) {
+ return nullptr;
+ }
+
+ // The backedge is always the last predecessor, but we have added a
+ // new pred. Restore |backedge| as |header|'s loop backedge.
+ header->clearLoopHeader();
+ header->setLoopHeader(backedge);
+
+ return fake;
+}
+
+void MIRGraph::removeFakeLoopPredecessors() {
+ MOZ_ASSERT(osrBlock());
+ size_t id = 0;
+ for (ReversePostorderIterator it = rpoBegin(); it != rpoEnd();) {
+ MBasicBlock* block = *it++;
+ if (block->isFakeLoopPred()) {
+ MOZ_ASSERT(block->unreachable());
+ MBasicBlock* succ = block->getSingleSuccessor();
+ succ->removePredecessor(block);
+ removeBlock(block);
+ } else {
+ block->setId(id++);
+ }
+ }
+#ifdef DEBUG
+ canBuildDominators_ = false;
+#endif
+}
+
+MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
+ BytecodeSite* site, Kind kind)
+ : graph_(graph),
+ info_(info),
+ predecessors_(graph.alloc()),
+ stackPosition_(info_.firstStackSlot()),
+ id_(0),
+ domIndex_(0),
+ numDominated_(0),
+ lir_(nullptr),
+ callerResumePoint_(nullptr),
+ entryResumePoint_(nullptr),
+ outerResumePoint_(nullptr),
+ successorWithPhis_(nullptr),
+ positionInPhiSuccessor_(0),
+ loopDepth_(0),
+ kind_(kind),
+ mark_(false),
+ immediatelyDominated_(graph.alloc()),
+ immediateDominator_(nullptr),
+ trackedSite_(site),
+ lineno_(0u),
+ columnIndex_(0u) {
+ MOZ_ASSERT(trackedSite_, "trackedSite_ is non-nullptr");
+}
+
+bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
+
+bool MBasicBlock::increaseSlots(size_t num) {
+ return slots_.growBy(graph_.alloc(), num);
+}
+
+bool MBasicBlock::ensureHasSlots(size_t num) {
+ size_t depth = stackDepth() + num;
+ if (depth > nslots()) {
+ if (!increaseSlots(depth - nslots())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void MBasicBlock::copySlots(MBasicBlock* from) {
+ MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
+ MOZ_ASSERT(stackPosition_ <= nslots());
+
+ MDefinition** thisSlots = slots_.begin();
+ MDefinition** fromSlots = from->slots_.begin();
+ for (size_t i = 0, e = stackPosition_; i < e; ++i) {
+ thisSlots[i] = fromSlots[i];
+ }
+}
+
+bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
+ MBasicBlock* maybePred, uint32_t popped) {
+ MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
+
+ MOZ_ASSERT(stackDepth >= popped);
+ stackDepth -= popped;
+ stackPosition_ = stackDepth;
+
+ if (maybePred && kind_ != PENDING_LOOP_HEADER) {
+ copySlots(maybePred);
+ }
+
+ MOZ_ASSERT(info_.nslots() >= stackPosition_);
+ MOZ_ASSERT(!entryResumePoint_);
+
+ // Propagate the caller resume point from the inherited block.
+ callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
+
+ // Create a resume point using our initial stack state.
+ entryResumePoint_ =
+ new (alloc) MResumePoint(this, pc(), ResumeMode::ResumeAt);
+ if (!entryResumePoint_->init(alloc)) {
+ return false;
+ }
+
+ if (maybePred) {
+ if (!predecessors_.append(maybePred)) {
+ return false;
+ }
+
+ if (kind_ == PENDING_LOOP_HEADER) {
+ for (size_t i = 0; i < stackDepth; i++) {
+ MPhi* phi = MPhi::New(alloc.fallible());
+ if (!phi) {
+ return false;
+ }
+ phi->addInlineInput(maybePred->getSlot(i));
+ addPhi(phi);
+ setSlot(i, phi);
+ entryResumePoint()->initOperand(i, phi);
+ }
+ } else {
+ for (size_t i = 0; i < stackDepth; i++) {
+ entryResumePoint()->initOperand(i, getSlot(i));
+ }
+ }
+ } else {
+ /*
+ * Don't leave the operands uninitialized for the caller, as it may not
+ * initialize them later on.
+ */
+ for (size_t i = 0; i < stackDepth; i++) {
+ entryResumePoint()->clearOperand(i);
+ }
+ }
+
+ return true;
+}
+
+void MBasicBlock::inheritSlots(MBasicBlock* parent) {
+ stackPosition_ = parent->stackPosition_;
+ copySlots(parent);
+}
+
+bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
+ // Remove the previous resume point.
+ discardResumePoint(entryResumePoint_);
+
+ // Create a resume point using our initial stack state.
+ entryResumePoint_ =
+ MResumePoint::New(alloc, this, pc(), ResumeMode::ResumeAt);
+ if (!entryResumePoint_) {
+ return false;
+ }
+ return true;
+}
+
+MDefinition* MBasicBlock::environmentChain() {
+ return getSlot(info().environmentChainSlot());
+}
+
+MDefinition* MBasicBlock::argumentsObject() {
+ return getSlot(info().argsObjSlot());
+}
+
+void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
+ setSlot(info().environmentChainSlot(), scopeObj);
+}
+
+void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
+ setSlot(info().argsObjSlot(), argsObj);
+}
+
+void MBasicBlock::pick(int32_t depth) {
+ // pick takes a value and moves it to the top.
+ // pick(-2):
+ // A B C D E
+ // A B D C E [ swapAt(-2) ]
+ // A B D E C [ swapAt(-1) ]
+ for (; depth < 0; depth++) {
+ swapAt(depth);
+ }
+}
+
+void MBasicBlock::unpick(int32_t depth) {
+ // unpick takes the value on top of the stack and moves it under the depth-th
+ // element;
+ // unpick(-2):
+ // A B C D E
+ // A B C E D [ swapAt(-1) ]
+ // A B E C D [ swapAt(-2) ]
+ for (int32_t n = -1; n >= depth; n--) {
+ swapAt(n);
+ }
+}
+
+void MBasicBlock::swapAt(int32_t depth) {
+ uint32_t lhsDepth = stackPosition_ + depth - 1;
+ uint32_t rhsDepth = stackPosition_ + depth;
+
+ MDefinition* temp = slots_[lhsDepth];
+ slots_[lhsDepth] = slots_[rhsDepth];
+ slots_[rhsDepth] = temp;
+}
+
+void MBasicBlock::discardLastIns() { discard(lastIns()); }
+
+MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
+ // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
+ // then reuse it.
+ MInstruction* ins = *begin();
+ if (ins->type() == MIRType::MagicOptimizedOut) {
+ return ins->toConstant();
+ }
+
+ MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
+ insertBefore(ins, constant);
+ return constant;
+}
+
+void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
+ // Remove |ins| from the current block.
+ MOZ_ASSERT(ins->block() == this);
+ instructions_.remove(ins);
+
+ // Insert into new block, which may be distinct.
+ // Uses and operands are untouched.
+ ins->setInstructionBlock(at->block(), at->trackedSite());
+ at->block()->instructions_.insertBefore(at, ins);
+}
+
+MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
+ MOZ_ASSERT(graph().osrBlock() != this,
+ "We are not supposed to add any instruction in OSR blocks.");
+
+ // Beta nodes and interrupt checks are required to be located at the
+ // beginnings of basic blocks, so we must insert new instructions after any
+ // such instructions.
+ MInstructionIterator insertIter =
+ !ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
+ while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
+ insertIter->isConstant() || insertIter->isParameter() ||
+ (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
+ insertIter++;
+ }
+
+ return *insertIter;
+}
+
+void MBasicBlock::discardResumePoint(
+ MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
+ if (refType & RefType_DiscardOperands) {
+ rp->releaseUses();
+ }
+ rp->setDiscarded();
+ removeResumePoint(rp);
+}
+
+void MBasicBlock::removeResumePoint(MResumePoint* rp) {
+#ifdef DEBUG
+ MResumePointIterator iter = resumePointsBegin();
+ while (*iter != rp) {
+ // We should reach it before reaching the end.
+ MOZ_ASSERT(iter != resumePointsEnd());
+ iter++;
+ }
+ resumePoints_.removeAt(iter);
+#endif
+}
+
+void MBasicBlock::prepareForDiscard(
+ MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
+ // Only remove instructions from the same basic block. This is needed for
+ // correctly removing the resume point if any.
+ MOZ_ASSERT(ins->block() == this);
+
+ MResumePoint* rp = ins->resumePoint();
+ if ((refType & RefType_DiscardResumePoint) && rp) {
+ discardResumePoint(rp, refType);
+ }
+
+ // We need to assert that instructions have no uses after removing the their
+ // resume points operands as they could be captured by their own resume
+ // point.
+ MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
+
+ const uint32_t InstructionOperands =
+ RefType_DiscardOperands | RefType_DiscardInstruction;
+ if ((refType & InstructionOperands) == InstructionOperands) {
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ ins->releaseOperand(i);
+ }
+ }
+
+ ins->setDiscarded();
+}
+
+void MBasicBlock::discard(MInstruction* ins) {
+ prepareForDiscard(ins);
+ instructions_.remove(ins);
+}
+
+void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
+#ifdef DEBUG
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MOZ_ASSERT(!ins->hasOperand(i));
+ }
+#endif
+
+ prepareForDiscard(ins, RefType_IgnoreOperands);
+ instructions_.remove(ins);
+}
+
+void MBasicBlock::discardAllInstructions() {
+ MInstructionIterator iter = begin();
+ discardAllInstructionsStartingAt(iter);
+}
+
+void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
+ while (iter != end()) {
+ // Discard operands and resume point operands and flag the instruction
+ // as discarded. Also we do not assert that we have no uses as blocks
+ // might be removed in reverse post order.
+ MInstruction* ins = *iter++;
+ prepareForDiscard(ins, RefType_DefaultNoAssert);
+ instructions_.remove(ins);
+ }
+}
+
+void MBasicBlock::discardAllPhis() {
+ for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
+ iter->removeAllOperands();
+ }
+
+ for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
+ pred++) {
+ (*pred)->clearSuccessorWithPhis();
+ }
+
+ phis_.clear();
+}
+
+void MBasicBlock::discardAllResumePoints(bool discardEntry) {
+ if (outerResumePoint_) {
+ clearOuterResumePoint();
+ }
+
+ if (discardEntry && entryResumePoint_) {
+ clearEntryResumePoint();
+ }
+
+#ifdef DEBUG
+ if (!entryResumePoint()) {
+ MOZ_ASSERT(resumePointsEmpty());
+ } else {
+ MResumePointIterator iter(resumePointsBegin());
+ MOZ_ASSERT(iter != resumePointsEnd());
+ iter++;
+ MOZ_ASSERT(iter == resumePointsEnd());
+ }
+#endif
+}
+
+void MBasicBlock::clear() {
+ discardAllInstructions();
+ discardAllResumePoints();
+ discardAllPhis();
+}
+
+void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
+ MOZ_ASSERT(at->block() == this);
+ ins->setInstructionBlock(this, at->trackedSite());
+ graph().allocDefinitionId(ins);
+ instructions_.insertBefore(at, ins);
+}
+
+void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
+ MOZ_ASSERT(at->block() == this);
+ ins->setInstructionBlock(this, at->trackedSite());
+ graph().allocDefinitionId(ins);
+ instructions_.insertAfter(at, ins);
+}
+
+void MBasicBlock::insertAtEnd(MInstruction* ins) {
+ if (hasLastIns()) {
+ insertBefore(lastIns(), ins);
+ } else {
+ add(ins);
+ }
+}
+
+void MBasicBlock::addPhi(MPhi* phi) {
+ phis_.pushBack(phi);
+ phi->setPhiBlock(this);
+ graph().allocDefinitionId(phi);
+}
+
+void MBasicBlock::discardPhi(MPhi* phi) {
+ MOZ_ASSERT(!phis_.empty());
+
+ phi->removeAllOperands();
+ phi->setDiscarded();
+
+ phis_.remove(phi);
+
+ if (phis_.empty()) {
+ for (MBasicBlock* pred : predecessors_) {
+ pred->clearSuccessorWithPhis();
+ }
+ }
+}
+
+MResumePoint* MBasicBlock::activeResumePoint(MInstruction* ins) {
+ for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
+ if (iter->resumePoint() && *iter != ins) {
+ return iter->resumePoint();
+ }
+ }
+
+ // If none, take the entry resume point.
+ return entryResumePoint();
+}
+
+void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
+ MResumePoint* rp = activeResumePoint(ins);
+
+ // The only blocks which do not have any entryResumePoint in Ion, are the
+ // SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
+ // Range Analysis phase. In adjustInputs, we are manipulating instructions
+ // which have a TypePolicy. So, as a Goto has no operand and no type
+ // policy, the entry resume point should exist.
+ MOZ_ASSERT(rp);
+
+ // Flag all operands as being potentially used.
+ while (rp) {
+ for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
+ rp->getOperand(i)->setImplicitlyUsedUnchecked();
+ }
+ rp = rp->caller();
+ }
+}
+
+bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
+ return addPredecessorPopN(alloc, pred, 0);
+}
+
+bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
+ uint32_t popped) {
+ MOZ_ASSERT(pred);
+ MOZ_ASSERT(predecessors_.length() > 0);
+
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
+
+ for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
+ MDefinition* mine = getSlot(i);
+ MDefinition* other = pred->getSlot(i);
+
+ if (mine != other) {
+ MIRType phiType = mine->type();
+ if (phiType != other->type()) {
+ phiType = MIRType::Value;
+ }
+
+ // If the current instruction is a phi, and it was created in this
+ // basic block, then we have already placed this phi and should
+ // instead append to its operands.
+ if (mine->isPhi() && mine->block() == this) {
+ MOZ_ASSERT(predecessors_.length());
+ MOZ_ASSERT(!mine->hasDefUses(),
+ "should only change type of newly created phis");
+ mine->setResultType(phiType);
+ if (!mine->toPhi()->addInputSlow(other)) {
+ return false;
+ }
+ } else {
+ // Otherwise, create a new phi node.
+ MPhi* phi = MPhi::New(alloc.fallible(), phiType);
+ if (!phi) {
+ return false;
+ }
+ addPhi(phi);
+
+ // Prime the phi for each predecessor, so input(x) comes from
+ // predecessor(x).
+ if (!phi->reserveLength(predecessors_.length() + 1)) {
+ return false;
+ }
+
+ for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
+ ++j) {
+ MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
+ phi->addInput(mine);
+ }
+ phi->addInput(other);
+
+ setSlot(i, phi);
+ if (entryResumePoint()) {
+ entryResumePoint()->replaceOperand(i, phi);
+ }
+ }
+ }
+ }
+
+ return predecessors_.append(pred);
+}
+
+bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
+ MBasicBlock* existingPred) {
+ MOZ_ASSERT(pred);
+ MOZ_ASSERT(predecessors_.length() > 0);
+
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(!pred->successorWithPhis());
+
+ if (!phisEmpty()) {
+ size_t existingPosition = indexForPredecessor(existingPred);
+ for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
+ if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
+ return false;
+ }
+ }
+ }
+
+ if (!predecessors_.append(pred)) {
+ return false;
+ }
+ return true;
+}
+
+bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
+ // Predecessors must be finished.
+ MOZ_ASSERT(pred && pred->hasLastIns());
+ return predecessors_.append(pred);
+}
+
+bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
+ return immediatelyDominated_.append(child);
+}
+
+void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
+ for (size_t i = 0;; ++i) {
+ MOZ_ASSERT(i < immediatelyDominated_.length(),
+ "Dominated block to remove not present");
+ if (immediatelyDominated_[i] == child) {
+ immediatelyDominated_[i] = immediatelyDominated_.back();
+ immediatelyDominated_.popBack();
+ return;
+ }
+ }
+}
+
+bool MBasicBlock::setBackedge(MBasicBlock* pred) {
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(hasLastIns());
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
+
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ // Add exit definitions to each corresponding phi at the entry.
+ if (!inheritPhisFromBackedge(pred)) {
+ return false;
+ }
+
+ // We are now a loop header proper
+ kind_ = LOOP_HEADER;
+
+ return predecessors_.append(pred);
+}
+
+bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
+ // Predecessors must be finished, and at the correct stack depth.
+ MOZ_ASSERT(hasLastIns());
+ MOZ_ASSERT(pred->hasLastIns());
+ MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
+
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ // Add exit definitions to each corresponding phi at the entry.
+ // Note: Phis are inserted in the same order as the slots. (see
+ // MBasicBlock::New)
+ size_t slot = 0;
+ for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
+ MPhi* entryDef = *phi;
+ MDefinition* exitDef = pred->getSlot(slot);
+
+ // Assert that we already placed phis for each slot.
+ MOZ_ASSERT(entryDef->block() == this);
+
+ // Assert that the phi already has the correct type.
+ MOZ_ASSERT(entryDef->type() == exitDef->type());
+ MOZ_ASSERT(entryDef->type() != MIRType::Value);
+
+ if (entryDef == exitDef) {
+ // If the exit def is the same as the entry def, make a redundant
+ // phi. Since loop headers have exactly two incoming edges, we
+ // know that that's just the first input.
+ //
+ // Note that we eliminate later rather than now, to avoid any
+ // weirdness around pending continue edges which might still hold
+ // onto phis.
+ exitDef = entryDef->getOperand(0);
+ }
+
+ // Phis always have room for 2 operands, so this can't fail.
+ MOZ_ASSERT(phi->numOperands() == 1);
+ entryDef->addInlineInput(exitDef);
+
+ // Two cases here: phis that correspond to locals, and phis that correspond
+ // to loop parameters. Only phis for locals go in slots.
+ if (slot < stackDepth()) {
+ setSlot(slot, entryDef);
+ }
+ }
+
+ // We are now a loop header proper
+ kind_ = LOOP_HEADER;
+
+ return predecessors_.append(pred);
+}
+
+void MBasicBlock::clearLoopHeader() {
+ MOZ_ASSERT(isLoopHeader());
+ kind_ = NORMAL;
+}
+
+void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
+ MOZ_ASSERT(!isLoopHeader());
+ kind_ = LOOP_HEADER;
+
+ size_t numPreds = numPredecessors();
+ MOZ_ASSERT(numPreds != 0);
+
+ size_t lastIndex = numPreds - 1;
+ size_t oldIndex = 0;
+ for (;; ++oldIndex) {
+ MOZ_ASSERT(oldIndex < numPreds);
+ MBasicBlock* pred = getPredecessor(oldIndex);
+ if (pred == newBackedge) {
+ break;
+ }
+ }
+
+ // Set the loop backedge to be the last element in predecessors_.
+ std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
+
+ // If we have phis, reorder their operands accordingly.
+ if (!phisEmpty()) {
+ getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
+ getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
+ for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
+ MPhi* phi = *iter;
+ MDefinition* last = phi->getOperand(oldIndex);
+ MDefinition* old = phi->getOperand(lastIndex);
+ phi->replaceOperand(oldIndex, old);
+ phi->replaceOperand(lastIndex, last);
+ }
+ }
+
+ MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
+ MOZ_ASSERT(backedge() == newBackedge);
+}
+
+size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
+ MOZ_ASSERT(lastIns());
+ for (size_t i = 0; i < numSuccessors(); i++) {
+ if (getSuccessor(i) == block) {
+ return i;
+ }
+ }
+ MOZ_CRASH("Invalid successor");
+}
+
+size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
+ for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
+ if (getPredecessor(i) == block) {
+ return i;
+ }
+ }
+ MOZ_CRASH("Invalid predecessor");
+}
+
+void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
+ MOZ_ASSERT(lastIns());
+
+ // Note, during split-critical-edges, successors-with-phis is not yet set.
+ // During PAA, this case is handled before we enter.
+ MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
+
+ lastIns()->replaceSuccessor(pos, split);
+}
+
+void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
+ for (size_t i = 0; i < numPredecessors(); i++) {
+ if (getPredecessor(i) == old) {
+ predecessors_[i] = split;
+
+#ifdef DEBUG
+ // The same block should not appear twice in the predecessor list.
+ for (size_t j = i; j < numPredecessors(); j++) {
+ MOZ_ASSERT(predecessors_[j] != old);
+ }
+#endif
+
+ return;
+ }
+ }
+
+ MOZ_CRASH("predecessor was not found");
+}
+
+void MBasicBlock::clearDominatorInfo() {
+ setImmediateDominator(nullptr);
+ immediatelyDominated_.clear();
+ numDominated_ = 0;
+}
+
+void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
+ size_t predIndex) {
+ // If we're removing the last backedge, this is no longer a loop.
+ if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
+ clearLoopHeader();
+ }
+
+ // Adjust phis. Note that this can leave redundant phis behind.
+ // Don't adjust successorWithPhis() if we haven't constructed this
+ // information yet.
+ if (pred->successorWithPhis()) {
+ MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
+ pred->clearSuccessorWithPhis();
+ for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
+ getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
+ }
+ }
+
+ // Remove from pred list.
+ predecessors_.erase(predecessors_.begin() + predIndex);
+}
+
+void MBasicBlock::removePredecessor(MBasicBlock* pred) {
+ size_t predIndex = getPredecessorIndex(pred);
+
+ // Remove the phi operands.
+ for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
+ iter->removeOperand(predIndex);
+ }
+
+ // Now we can call the underlying function, which expects that phi
+ // operands have been removed.
+ removePredecessorWithoutPhiOperands(pred, predIndex);
+}
+
+bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge) {
+ // We must be a pending loop header
+ MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
+
+ size_t stackDepth = entryResumePoint()->stackDepth();
+ for (size_t slot = 0; slot < stackDepth; slot++) {
+ // Get the value stack-slot of the back edge.
+ MDefinition* exitDef = backedge->getSlot(slot);
+
+ // Get the value of the loop header.
+ MDefinition* loopDef = entryResumePoint()->getOperand(slot);
+ if (loopDef->block() != this) {
+ // If we are finishing a pending loop header, then we need to ensure
+ // that all operands are phis. This is usualy the case, except for
+ // object/arrays build with generators, in which case we share the
+ // same allocations across all blocks.
+ MOZ_ASSERT(loopDef->block()->id() < id());
+ MOZ_ASSERT(loopDef == exitDef);
+ continue;
+ }
+
+ // Phis are allocated by NewPendingLoopHeader.
+ MPhi* entryDef = loopDef->toPhi();
+ MOZ_ASSERT(entryDef->block() == this);
+
+ if (entryDef == exitDef) {
+ // If the exit def is the same as the entry def, make a redundant
+ // phi. Since loop headers have exactly two incoming edges, we
+ // know that that's just the first input.
+ //
+ // Note that we eliminate later rather than now, to avoid any
+ // weirdness around pending continue edges which might still hold
+ // onto phis.
+ exitDef = entryDef->getOperand(0);
+ }
+
+ if (!entryDef->addInputSlow(exitDef)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
+ *pdirection = FALSE_BRANCH;
+
+ if (numPredecessors() != 1) {
+ return nullptr;
+ }
+
+ MBasicBlock* dom = immediateDominator();
+ if (dom != getPredecessor(0)) {
+ return nullptr;
+ }
+
+ // Look for a trailing MTest branching to this block.
+ MInstruction* ins = dom->lastIns();
+ if (ins->isTest()) {
+ MTest* test = ins->toTest();
+
+ MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
+ if (test->ifTrue() == this && test->ifFalse() == this) {
+ return nullptr;
+ }
+
+ *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
+ return test;
+ }
+
+ return nullptr;
+}
+
+void MBasicBlock::dumpStack(GenericPrinter& out) {
+#ifdef DEBUG
+ out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
+ out.printf("-------------------------------------------\n");
+ for (uint32_t i = 0; i < stackPosition_; i++) {
+ out.printf(" %-3u", i);
+ out.printf(" %-16p\n", (void*)slots_[i]);
+ }
+#endif
+}
+
+void MBasicBlock::dumpStack() {
+ Fprinter out(stderr);
+ dumpStack(out);
+ out.finish();
+}
+
+void MIRGraph::dump(GenericPrinter& out) {
+#ifdef JS_JITSPEW
+ for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
+ iter->dump(out);
+ out.printf("\n");
+ }
+#endif
+}
+
+void MIRGraph::dump() {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
+
+void MBasicBlock::dump(GenericPrinter& out) {
+#ifdef JS_JITSPEW
+ out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
+ unreachable() ? " (unreachable)" : "",
+ isMarked() ? " (marked)" : "");
+ if (MResumePoint* resume = entryResumePoint()) {
+ resume->dump(out);
+ }
+ for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
+ iter->dump(out);
+ }
+ for (MInstructionIterator iter(begin()); iter != end(); iter++) {
+ iter->dump(out);
+ }
+#endif
+}
+
+void MBasicBlock::dump() {
+ Fprinter out(stderr);
+ dump(out);
+ out.finish();
+}
diff --git a/js/src/jit/MIRGraph.h b/js/src/jit/MIRGraph.h
new file mode 100644
index 0000000000..f455e8ab4b
--- /dev/null
+++ b/js/src/jit/MIRGraph.h
@@ -0,0 +1,901 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MIRGraph_h
+#define jit_MIRGraph_h
+
+// This file declares the data structures used to build a control-flow graph
+// containing MIR.
+
+#include "jit/CompileInfo.h"
+#include "jit/FixedList.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/MIR.h"
+
+namespace js {
+namespace jit {
+
+class MBasicBlock;
+class MIRGraph;
+class MStart;
+
+class MDefinitionIterator;
+
+using MInstructionIterator = InlineListIterator<MInstruction>;
+using MInstructionReverseIterator = InlineListReverseIterator<MInstruction>;
+using MPhiIterator = InlineListIterator<MPhi>;
+
+#ifdef DEBUG
+typedef InlineForwardListIterator<MResumePoint> MResumePointIterator;
+#endif
+
+class LBlock;
+
+class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock> {
+ public:
+ enum Kind {
+ NORMAL,
+ PENDING_LOOP_HEADER,
+ LOOP_HEADER,
+ SPLIT_EDGE,
+ FAKE_LOOP_PRED,
+ INTERNAL,
+ DEAD
+ };
+
+ private:
+ MBasicBlock(MIRGraph& graph, const CompileInfo& info, BytecodeSite* site,
+ Kind kind);
+ [[nodiscard]] bool init();
+ void copySlots(MBasicBlock* from);
+ [[nodiscard]] bool inherit(TempAllocator& alloc, size_t stackDepth,
+ MBasicBlock* maybePred, uint32_t popped);
+
+ // This block cannot be reached by any means.
+ bool unreachable_ = false;
+
+ // This block will unconditionally bail out.
+ bool alwaysBails_ = false;
+
+ // Pushes a copy of a local variable or argument.
+ void pushVariable(uint32_t slot) { push(slots_[slot]); }
+
+ // Sets a variable slot to the top of the stack, correctly creating copies
+ // as needed.
+ void setVariable(uint32_t slot) {
+ MOZ_ASSERT(stackPosition_ > info_.firstStackSlot());
+ setSlot(slot, slots_[stackPosition_ - 1]);
+ }
+
+ enum ReferencesType {
+ RefType_None = 0,
+
+ // Assert that the instruction is unused.
+ RefType_AssertNoUses = 1 << 0,
+
+ // Discard the operands of the resume point / instructions if the
+ // following flag are given too.
+ RefType_DiscardOperands = 1 << 1,
+ RefType_DiscardResumePoint = 1 << 2,
+ RefType_DiscardInstruction = 1 << 3,
+
+ // Discard operands of the instruction and its resume point.
+ RefType_DefaultNoAssert = RefType_DiscardOperands |
+ RefType_DiscardResumePoint |
+ RefType_DiscardInstruction,
+
+ // Discard everything and assert that the instruction is not used.
+ RefType_Default = RefType_AssertNoUses | RefType_DefaultNoAssert,
+
+ // Discard resume point operands only, without discarding the operands
+ // of the current instruction. Asserts that the instruction is unused.
+ RefType_IgnoreOperands = RefType_AssertNoUses | RefType_DiscardOperands |
+ RefType_DiscardResumePoint
+ };
+
+ void discardResumePoint(MResumePoint* rp,
+ ReferencesType refType = RefType_Default);
+ void removeResumePoint(MResumePoint* rp);
+
+ // Remove all references to an instruction such that it can be removed from
+ // the list of instruction, without keeping any dangling pointer to it. This
+ // includes the operands of the instruction, and the resume point if
+ // present.
+ void prepareForDiscard(MInstruction* ins,
+ ReferencesType refType = RefType_Default);
+
+ public:
+ ///////////////////////////////////////////////////////
+ ////////// BEGIN GRAPH BUILDING INSTRUCTIONS //////////
+ ///////////////////////////////////////////////////////
+
+ // Creates a new basic block for a MIR generator. If |pred| is not nullptr,
+ // its slots and stack depth are initialized from |pred|.
+ static MBasicBlock* New(MIRGraph& graph, size_t stackDepth,
+ const CompileInfo& info, MBasicBlock* maybePred,
+ BytecodeSite* site, Kind kind);
+ static MBasicBlock* New(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, Kind kind);
+ static MBasicBlock* NewPopN(MIRGraph& graph, const CompileInfo& info,
+ MBasicBlock* pred, BytecodeSite* site, Kind kind,
+ uint32_t popn);
+ static MBasicBlock* NewPendingLoopHeader(MIRGraph& graph,
+ const CompileInfo& info,
+ MBasicBlock* pred,
+ BytecodeSite* site);
+ static MBasicBlock* NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
+ size_t predEdgeIdx, MBasicBlock* succ);
+ static MBasicBlock* NewFakeLoopPredecessor(MIRGraph& graph,
+ MBasicBlock* header);
+
+ // Create a new basic block for internal control flow not present in the
+ // original CFG.
+ static MBasicBlock* NewInternal(MIRGraph& graph, MBasicBlock* orig,
+ MResumePoint* activeResumePoint);
+
+ bool dominates(const MBasicBlock* other) const {
+ return other->domIndex() - domIndex() < numDominated();
+ }
+
+ void setId(uint32_t id) { id_ = id; }
+
+ // Mark this block (and only this block) as unreachable.
+ void setUnreachable() {
+ MOZ_ASSERT(!unreachable_);
+ setUnreachableUnchecked();
+ }
+ void setUnreachableUnchecked() { unreachable_ = true; }
+ bool unreachable() const { return unreachable_; }
+
+ void setAlwaysBails() { alwaysBails_ = true; }
+ bool alwaysBails() const { return alwaysBails_; }
+
+ // Move the definition to the top of the stack.
+ void pick(int32_t depth);
+
+ // Move the top of the stack definition under the depth-th stack value.
+ void unpick(int32_t depth);
+
+ // Exchange 2 stack slots at the defined depth
+ void swapAt(int32_t depth);
+
+ // Note: most of the methods below are hot. Do not un-inline them without
+ // measuring the impact.
+
+ // Gets the instruction associated with various slot types.
+ MDefinition* peek(int32_t depth) {
+ MOZ_ASSERT(depth < 0);
+ MOZ_ASSERT(stackPosition_ + depth >= info_.firstStackSlot());
+ return peekUnchecked(depth);
+ }
+
+ MDefinition* peekUnchecked(int32_t depth) {
+ MOZ_ASSERT(depth < 0);
+ return getSlot(stackPosition_ + depth);
+ }
+
+ MDefinition* environmentChain();
+ MDefinition* argumentsObject();
+
+ // Increase the number of slots available
+ [[nodiscard]] bool increaseSlots(size_t num);
+ [[nodiscard]] bool ensureHasSlots(size_t num);
+
+ // Initializes a slot value; must not be called for normal stack
+ // operations, as it will not create new SSA names for copies.
+ void initSlot(uint32_t slot, MDefinition* ins) {
+ slots_[slot] = ins;
+ if (entryResumePoint()) {
+ entryResumePoint()->initOperand(slot, ins);
+ }
+ }
+
+ // Sets the instruction associated with various slot types. The
+ // instruction must lie at the top of the stack.
+ void setLocal(uint32_t local) { setVariable(info_.localSlot(local)); }
+ void setArg(uint32_t arg) { setVariable(info_.argSlot(arg)); }
+ void setSlot(uint32_t slot, MDefinition* ins) { slots_[slot] = ins; }
+
+ // Tracks an instruction as being pushed onto the operand stack.
+ void push(MDefinition* ins) {
+ MOZ_ASSERT(stackPosition_ < nslots());
+ slots_[stackPosition_++] = ins;
+ }
+ void pushArg(uint32_t arg) { pushVariable(info_.argSlot(arg)); }
+ void pushArgUnchecked(uint32_t arg) {
+ pushVariable(info_.argSlotUnchecked(arg));
+ }
+ void pushLocal(uint32_t local) { pushVariable(info_.localSlot(local)); }
+ void pushSlot(uint32_t slot) { pushVariable(slot); }
+ void setEnvironmentChain(MDefinition* ins);
+ void setArgumentsObject(MDefinition* ins);
+
+ // Returns the top of the stack, then decrements the virtual stack pointer.
+ MDefinition* pop() {
+ MOZ_ASSERT(stackPosition_ > info_.firstStackSlot());
+ return slots_[--stackPosition_];
+ }
+ void popn(uint32_t n) {
+ MOZ_ASSERT(stackPosition_ - n >= info_.firstStackSlot());
+ MOZ_ASSERT(stackPosition_ >= stackPosition_ - n);
+ stackPosition_ -= n;
+ }
+
+ // Adds an instruction to this block's instruction list.
+ inline void add(MInstruction* ins);
+
+ // Marks the last instruction of the block; no further instructions
+ // can be added.
+ void end(MControlInstruction* ins) {
+ MOZ_ASSERT(!hasLastIns()); // Existing control instructions should be
+ // removed first.
+ MOZ_ASSERT(ins);
+ add(ins);
+ }
+
+ // Adds a phi instruction, but does not set successorWithPhis.
+ void addPhi(MPhi* phi);
+
+ // Adds a resume point to this block.
+ void addResumePoint(MResumePoint* resume) {
+#ifdef DEBUG
+ resumePoints_.pushFront(resume);
+#endif
+ }
+
+ // Discard pre-allocated resume point.
+ void discardPreAllocatedResumePoint(MResumePoint* resume) {
+ MOZ_ASSERT(!resume->instruction());
+ discardResumePoint(resume);
+ }
+
+ // Adds a predecessor. Every predecessor must have the same exit stack
+ // depth as the entry state to this block. Adding a predecessor
+ // automatically creates phi nodes and rewrites uses as needed.
+ [[nodiscard]] bool addPredecessor(TempAllocator& alloc, MBasicBlock* pred);
+ [[nodiscard]] bool addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
+ uint32_t popped);
+
+ // Add a predecessor which won't introduce any new phis to this block.
+ // This may be called after the contents of this block have been built.
+ [[nodiscard]] bool addPredecessorSameInputsAs(MBasicBlock* pred,
+ MBasicBlock* existingPred);
+
+ // Stranger utilities used for inlining.
+ [[nodiscard]] bool addPredecessorWithoutPhis(MBasicBlock* pred);
+ void inheritSlots(MBasicBlock* parent);
+ [[nodiscard]] bool initEntrySlots(TempAllocator& alloc);
+
+ // Replaces an edge for a given block with a new block. This is
+ // used for critical edge splitting.
+ //
+ // Note: If successorWithPhis is set, you must not be replacing it.
+ void replacePredecessor(MBasicBlock* old, MBasicBlock* split);
+ void replaceSuccessor(size_t pos, MBasicBlock* split);
+
+ // Removes `pred` from the predecessor list. If this block defines phis,
+ // removes the entry for `pred` and updates the indices of later entries.
+ // This may introduce redundant phis if the new block has fewer
+ // than two predecessors.
+ void removePredecessor(MBasicBlock* pred);
+
+ // A version of removePredecessor which expects that phi operands to
+ // |pred| have already been removed.
+ void removePredecessorWithoutPhiOperands(MBasicBlock* pred, size_t predIndex);
+
+ // Resets all the dominator info so that it can be recomputed.
+ void clearDominatorInfo();
+
+ // Sets a back edge. This places phi nodes and rewrites instructions within
+ // the current loop as necessary.
+ [[nodiscard]] bool setBackedge(MBasicBlock* block);
+ [[nodiscard]] bool setBackedgeWasm(MBasicBlock* block, size_t paramCount);
+
+ // Resets a LOOP_HEADER block to a NORMAL block. This is needed when
+ // optimizations remove the backedge.
+ void clearLoopHeader();
+
+ // Sets a block to a LOOP_HEADER block, with newBackedge as its backedge.
+ // This is needed when optimizations remove the normal entry to a loop
+ // with multiple entries.
+ void setLoopHeader(MBasicBlock* newBackedge);
+
+ // Propagates backedge slots into phis operands of the loop header.
+ [[nodiscard]] bool inheritPhisFromBackedge(MBasicBlock* backedge);
+
+ void insertBefore(MInstruction* at, MInstruction* ins);
+ void insertAfter(MInstruction* at, MInstruction* ins);
+
+ void insertAtEnd(MInstruction* ins);
+
+ // Move an instruction. Movement may cross block boundaries.
+ void moveBefore(MInstruction* at, MInstruction* ins);
+
+ enum IgnoreTop { IgnoreNone = 0, IgnoreRecover = 1 << 0 };
+
+ // Locate the top of the |block|, where it is safe to insert a new
+ // instruction.
+ MInstruction* safeInsertTop(MDefinition* ins = nullptr,
+ IgnoreTop ignore = IgnoreNone);
+
+ // Removes an instruction with the intention to discard it.
+ void discard(MInstruction* ins);
+ void discardLastIns();
+ void discardAllInstructions();
+ void discardAllInstructionsStartingAt(MInstructionIterator iter);
+ void discardAllPhis();
+ void discardAllResumePoints(bool discardEntry = true);
+ void clear();
+
+ // Splits this block in two at a given instruction, inserting a new control
+ // flow diamond with |ins| in the slow path, |fastpath| in the other, and
+ // |condition| determining which path to take.
+ bool wrapInstructionInFastpath(MInstruction* ins, MInstruction* fastpath,
+ MInstruction* condition);
+
+ void moveOuterResumePointTo(MBasicBlock* dest);
+
+ // Move an instruction from this block to a block that has not yet been
+ // terminated.
+ void moveToNewBlock(MInstruction* ins, MBasicBlock* dst);
+
+ // Same as |void discard(MInstruction* ins)| but assuming that
+ // all operands are already discarded.
+ void discardIgnoreOperands(MInstruction* ins);
+
+ // Discards a phi instruction and updates predecessor successorWithPhis.
+ void discardPhi(MPhi* phi);
+
+ // Some instruction which are guarding against some MIRType value, or
+ // against a type expectation should be considered as removing a potenatial
+ // branch where the guard does not hold. We need to register such
+ // instructions in order to do destructive optimizations correctly, such as
+ // Range Analysis.
+ void flagOperandsOfPrunedBranches(MInstruction* ins);
+
+ // Mark this block as having been removed from the graph.
+ void markAsDead() {
+ MOZ_ASSERT(kind_ != DEAD);
+ kind_ = DEAD;
+ }
+
+ ///////////////////////////////////////////////////////
+ /////////// END GRAPH BUILDING INSTRUCTIONS ///////////
+ ///////////////////////////////////////////////////////
+
+ MIRGraph& graph() { return graph_; }
+ const CompileInfo& info() const { return info_; }
+ jsbytecode* pc() const { return trackedSite_->pc(); }
+ uint32_t nslots() const { return slots_.length(); }
+ uint32_t id() const { return id_; }
+ uint32_t numPredecessors() const { return predecessors_.length(); }
+
+ uint32_t domIndex() const {
+ MOZ_ASSERT(!isDead());
+ return domIndex_;
+ }
+ void setDomIndex(uint32_t d) { domIndex_ = d; }
+
+ MBasicBlock* getPredecessor(uint32_t i) const { return predecessors_[i]; }
+ size_t indexForPredecessor(MBasicBlock* block) const {
+ // This should only be called before critical edge splitting.
+ MOZ_ASSERT(!block->successorWithPhis());
+
+ for (size_t i = 0; i < predecessors_.length(); i++) {
+ if (predecessors_[i] == block) {
+ return i;
+ }
+ }
+ MOZ_CRASH();
+ }
+ bool hasAnyIns() const { return !instructions_.empty(); }
+ bool hasLastIns() const {
+ return hasAnyIns() && instructions_.rbegin()->isControlInstruction();
+ }
+ MControlInstruction* lastIns() const {
+ MOZ_ASSERT(hasLastIns());
+ return instructions_.rbegin()->toControlInstruction();
+ }
+ // Find or allocate an optimized out constant.
+ MConstant* optimizedOutConstant(TempAllocator& alloc);
+ MPhiIterator phisBegin() const { return phis_.begin(); }
+ MPhiIterator phisBegin(MPhi* at) const { return phis_.begin(at); }
+ MPhiIterator phisEnd() const { return phis_.end(); }
+ bool phisEmpty() const { return phis_.empty(); }
+#ifdef DEBUG
+ MResumePointIterator resumePointsBegin() const {
+ return resumePoints_.begin();
+ }
+ MResumePointIterator resumePointsEnd() const { return resumePoints_.end(); }
+ bool resumePointsEmpty() const { return resumePoints_.empty(); }
+#endif
+ MInstructionIterator begin() { return instructions_.begin(); }
+ MInstructionIterator begin(MInstruction* at) {
+ MOZ_ASSERT(at->block() == this);
+ return instructions_.begin(at);
+ }
+ MInstructionIterator end() { return instructions_.end(); }
+ MInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
+ MInstructionReverseIterator rbegin(MInstruction* at) {
+ MOZ_ASSERT(at->block() == this);
+ return instructions_.rbegin(at);
+ }
+ MInstructionReverseIterator rend() { return instructions_.rend(); }
+
+ bool isLoopHeader() const { return kind_ == LOOP_HEADER; }
+ bool isPendingLoopHeader() const { return kind_ == PENDING_LOOP_HEADER; }
+
+ bool hasUniqueBackedge() const {
+ MOZ_ASSERT(isLoopHeader());
+ MOZ_ASSERT(numPredecessors() >= 1);
+ if (numPredecessors() == 1 || numPredecessors() == 2) {
+ return true;
+ }
+ if (numPredecessors() == 3) {
+ // fixup block added by NewFakeLoopPredecessor
+ return getPredecessor(1)->numPredecessors() == 0;
+ }
+ return false;
+ }
+ MBasicBlock* backedge() const {
+ MOZ_ASSERT(hasUniqueBackedge());
+ return getPredecessor(numPredecessors() - 1);
+ }
+ MBasicBlock* loopHeaderOfBackedge() const {
+ MOZ_ASSERT(isLoopBackedge());
+ return getSuccessor(numSuccessors() - 1);
+ }
+ MBasicBlock* loopPredecessor() const {
+ MOZ_ASSERT(isLoopHeader());
+ return getPredecessor(0);
+ }
+ bool isLoopBackedge() const {
+ if (!numSuccessors()) {
+ return false;
+ }
+ MBasicBlock* lastSuccessor = getSuccessor(numSuccessors() - 1);
+ return lastSuccessor->isLoopHeader() &&
+ lastSuccessor->hasUniqueBackedge() &&
+ lastSuccessor->backedge() == this;
+ }
+ bool isSplitEdge() const { return kind_ == SPLIT_EDGE; }
+ bool isDead() const { return kind_ == DEAD; }
+ bool isFakeLoopPred() const { return kind_ == FAKE_LOOP_PRED; }
+
+ uint32_t stackDepth() const { return stackPosition_; }
+ bool isMarked() const { return mark_; }
+ void mark() {
+ MOZ_ASSERT(!mark_, "Marking already-marked block");
+ markUnchecked();
+ }
+ void markUnchecked() { mark_ = true; }
+ void unmark() {
+ MOZ_ASSERT(mark_, "Unarking unmarked block");
+ unmarkUnchecked();
+ }
+ void unmarkUnchecked() { mark_ = false; }
+
+ MBasicBlock* immediateDominator() const { return immediateDominator_; }
+
+ void setImmediateDominator(MBasicBlock* dom) { immediateDominator_ = dom; }
+
+ MTest* immediateDominatorBranch(BranchDirection* pdirection);
+
+ size_t numImmediatelyDominatedBlocks() const {
+ return immediatelyDominated_.length();
+ }
+
+ MBasicBlock* getImmediatelyDominatedBlock(size_t i) const {
+ return immediatelyDominated_[i];
+ }
+
+ MBasicBlock** immediatelyDominatedBlocksBegin() {
+ return immediatelyDominated_.begin();
+ }
+
+ MBasicBlock** immediatelyDominatedBlocksEnd() {
+ return immediatelyDominated_.end();
+ }
+
+ // Return the number of blocks dominated by this block. All blocks
+ // dominate at least themselves, so this will always be non-zero.
+ size_t numDominated() const {
+ MOZ_ASSERT(numDominated_ != 0);
+ return numDominated_;
+ }
+
+ void addNumDominated(size_t n) { numDominated_ += n; }
+
+ // Add |child| to this block's immediately-dominated set.
+ bool addImmediatelyDominatedBlock(MBasicBlock* child);
+
+ // Remove |child| from this block's immediately-dominated set.
+ void removeImmediatelyDominatedBlock(MBasicBlock* child);
+
+ // This function retrieves the internal instruction associated with a
+ // slot, and should not be used for normal stack operations. It is an
+ // internal helper that is also used to enhance spew.
+ MDefinition* getSlot(uint32_t index) {
+ MOZ_ASSERT(index < stackPosition_);
+ return slots_[index];
+ }
+
+ MResumePoint* entryResumePoint() const { return entryResumePoint_; }
+ void setEntryResumePoint(MResumePoint* rp) { entryResumePoint_ = rp; }
+ void clearEntryResumePoint() {
+ discardResumePoint(entryResumePoint_);
+ entryResumePoint_ = nullptr;
+ }
+ MResumePoint* outerResumePoint() const { return outerResumePoint_; }
+ void setOuterResumePoint(MResumePoint* outer) {
+ MOZ_ASSERT(!outerResumePoint_);
+ outerResumePoint_ = outer;
+ }
+ void clearOuterResumePoint() {
+ discardResumePoint(outerResumePoint_);
+ outerResumePoint_ = nullptr;
+ }
+ MResumePoint* callerResumePoint() const { return callerResumePoint_; }
+ void setCallerResumePoint(MResumePoint* caller) {
+ callerResumePoint_ = caller;
+ }
+
+ LBlock* lir() const { return lir_; }
+ void assignLir(LBlock* lir) {
+ MOZ_ASSERT(!lir_);
+ lir_ = lir;
+ }
+
+ MBasicBlock* successorWithPhis() const { return successorWithPhis_; }
+ uint32_t positionInPhiSuccessor() const {
+ MOZ_ASSERT(successorWithPhis());
+ return positionInPhiSuccessor_;
+ }
+ void setSuccessorWithPhis(MBasicBlock* successor, uint32_t id) {
+ successorWithPhis_ = successor;
+ positionInPhiSuccessor_ = id;
+ }
+ void clearSuccessorWithPhis() { successorWithPhis_ = nullptr; }
+ size_t numSuccessors() const {
+ MOZ_ASSERT(lastIns());
+ return lastIns()->numSuccessors();
+ }
+ MBasicBlock* getSuccessor(size_t index) const {
+ MOZ_ASSERT(lastIns());
+ return lastIns()->getSuccessor(index);
+ }
+ MBasicBlock* getSingleSuccessor() const {
+ MOZ_ASSERT(numSuccessors() == 1);
+ return getSuccessor(0);
+ }
+ size_t getSuccessorIndex(MBasicBlock*) const;
+ size_t getPredecessorIndex(MBasicBlock*) const;
+
+ void setLoopDepth(uint32_t loopDepth) { loopDepth_ = loopDepth; }
+ uint32_t loopDepth() const { return loopDepth_; }
+
+ void dumpStack(GenericPrinter& out);
+ void dumpStack();
+
+ void dump(GenericPrinter& out);
+ void dump();
+
+ BytecodeSite* trackedSite() const { return trackedSite_; }
+ InlineScriptTree* trackedTree() const { return trackedSite_->tree(); }
+
+ // Find the previous resume point that would be used if this instruction
+ // bails out.
+ MResumePoint* activeResumePoint(MInstruction* ins);
+
+ private:
+ MIRGraph& graph_;
+ const CompileInfo& info_; // Each block originates from a particular script.
+ InlineList<MInstruction> instructions_;
+ Vector<MBasicBlock*, 1, JitAllocPolicy> predecessors_;
+ InlineList<MPhi> phis_;
+ FixedList<MDefinition*> slots_;
+ uint32_t stackPosition_;
+ uint32_t id_;
+ uint32_t domIndex_; // Index in the dominator tree.
+ uint32_t numDominated_;
+ LBlock* lir_;
+
+ // Copy of a dominator block's outerResumePoint_ which holds the state of
+ // caller frame at the time of the call. If not null, this implies that this
+ // basic block corresponds to an inlined script.
+ MResumePoint* callerResumePoint_;
+
+ // Resume point holding baseline-like frame for the PC corresponding to the
+ // entry of this basic block.
+ MResumePoint* entryResumePoint_;
+
+ // Resume point holding baseline-like frame for the PC corresponding to the
+ // beginning of the call-site which is being inlined after this block.
+ MResumePoint* outerResumePoint_;
+
+#ifdef DEBUG
+ // Unordered list used to verify that all the resume points which are
+ // registered are correctly removed when a basic block is removed.
+ InlineForwardList<MResumePoint> resumePoints_;
+#endif
+
+ MBasicBlock* successorWithPhis_;
+ uint32_t positionInPhiSuccessor_;
+ uint32_t loopDepth_;
+ Kind kind_ : 8;
+
+ // Utility mark for traversal algorithms.
+ bool mark_;
+
+ Vector<MBasicBlock*, 1, JitAllocPolicy> immediatelyDominated_;
+ MBasicBlock* immediateDominator_;
+
+ // Track bailouts by storing the current pc in MIR instruction added at
+ // this cycle. This is also used for tracking calls and optimizations when
+ // profiling.
+ BytecodeSite* trackedSite_;
+
+ unsigned lineno_;
+ unsigned columnIndex_;
+
+ public:
+ void setLineno(unsigned l) { lineno_ = l; }
+ unsigned lineno() const { return lineno_; }
+ void setColumnIndex(unsigned c) { columnIndex_ = c; }
+ unsigned columnIndex() const { return columnIndex_; }
+};
+
+using MBasicBlockIterator = InlineListIterator<MBasicBlock>;
+using ReversePostorderIterator = InlineListIterator<MBasicBlock>;
+using PostorderIterator = InlineListReverseIterator<MBasicBlock>;
+
+typedef Vector<MBasicBlock*, 1, JitAllocPolicy> MIRGraphReturns;
+
+class MIRGraph {
+ InlineList<MBasicBlock> blocks_;
+ TempAllocator* alloc_;
+ MIRGraphReturns* returnAccumulator_;
+ uint32_t blockIdGen_;
+ uint32_t idGen_;
+ MBasicBlock* osrBlock_;
+
+ size_t numBlocks_;
+ bool hasTryBlock_;
+
+ InlineList<MPhi> phiFreeList_;
+ size_t phiFreeListLength_;
+
+ public:
+ explicit MIRGraph(TempAllocator* alloc)
+ : alloc_(alloc),
+ returnAccumulator_(nullptr),
+ blockIdGen_(0),
+ idGen_(0),
+ osrBlock_(nullptr),
+ numBlocks_(0),
+ hasTryBlock_(false),
+ phiFreeListLength_(0) {}
+
+ TempAllocator& alloc() const { return *alloc_; }
+
+ void addBlock(MBasicBlock* block);
+ void insertBlockAfter(MBasicBlock* at, MBasicBlock* block);
+ void insertBlockBefore(MBasicBlock* at, MBasicBlock* block);
+
+ void unmarkBlocks();
+
+ void setReturnAccumulator(MIRGraphReturns* accum) {
+ returnAccumulator_ = accum;
+ }
+ MIRGraphReturns* returnAccumulator() const { return returnAccumulator_; }
+
+ [[nodiscard]] bool addReturn(MBasicBlock* returnBlock) {
+ if (!returnAccumulator_) {
+ return true;
+ }
+
+ return returnAccumulator_->append(returnBlock);
+ }
+
+ MBasicBlock* entryBlock() { return *blocks_.begin(); }
+ MBasicBlockIterator begin() { return blocks_.begin(); }
+ MBasicBlockIterator begin(MBasicBlock* at) { return blocks_.begin(at); }
+ MBasicBlockIterator end() { return blocks_.end(); }
+ PostorderIterator poBegin() { return blocks_.rbegin(); }
+ PostorderIterator poBegin(MBasicBlock* at) { return blocks_.rbegin(at); }
+ PostorderIterator poEnd() { return blocks_.rend(); }
+ ReversePostorderIterator rpoBegin() { return blocks_.begin(); }
+ ReversePostorderIterator rpoBegin(MBasicBlock* at) {
+ return blocks_.begin(at);
+ }
+ ReversePostorderIterator rpoEnd() { return blocks_.end(); }
+ void removeBlock(MBasicBlock* block);
+ void moveBlockToEnd(MBasicBlock* block) {
+ blocks_.remove(block);
+ MOZ_ASSERT_IF(!blocks_.empty(), block->id());
+ blocks_.pushBack(block);
+ }
+ void moveBlockBefore(MBasicBlock* at, MBasicBlock* block) {
+ MOZ_ASSERT(block->id());
+ blocks_.remove(block);
+ blocks_.insertBefore(at, block);
+ }
+ void moveBlockAfter(MBasicBlock* at, MBasicBlock* block) {
+ MOZ_ASSERT(block->id());
+ blocks_.remove(block);
+ blocks_.insertAfter(at, block);
+ }
+ size_t numBlocks() const { return numBlocks_; }
+ uint32_t numBlockIds() const { return blockIdGen_; }
+ void allocDefinitionId(MDefinition* ins) { ins->setId(idGen_++); }
+ uint32_t getNumInstructionIds() { return idGen_; }
+ MResumePoint* entryResumePoint() { return entryBlock()->entryResumePoint(); }
+
+ void setOsrBlock(MBasicBlock* osrBlock) {
+ MOZ_ASSERT(!osrBlock_);
+ osrBlock_ = osrBlock;
+ }
+ MBasicBlock* osrBlock() const { return osrBlock_; }
+
+ MBasicBlock* osrPreHeaderBlock() const {
+ return osrBlock() ? osrBlock()->getSingleSuccessor() : nullptr;
+ }
+
+ bool hasTryBlock() const { return hasTryBlock_; }
+ void setHasTryBlock() { hasTryBlock_ = true; }
+
+ void dump(GenericPrinter& out);
+ void dump();
+
+ void addPhiToFreeList(MPhi* phi) {
+ phiFreeList_.pushBack(phi);
+ phiFreeListLength_++;
+ }
+ size_t phiFreeListLength() const { return phiFreeListLength_; }
+ MPhi* takePhiFromFreeList() {
+ MOZ_ASSERT(phiFreeListLength_ > 0);
+ phiFreeListLength_--;
+ return phiFreeList_.popBack();
+ }
+
+ void removeFakeLoopPredecessors();
+
+#ifdef DEBUG
+ // Dominators can't be built after we remove fake loop predecessors.
+ private:
+ bool canBuildDominators_ = true;
+
+ public:
+ bool canBuildDominators() const { return canBuildDominators_; }
+#endif
+};
+
+class MDefinitionIterator {
+ friend class MBasicBlock;
+ friend class MNodeIterator;
+
+ private:
+ MBasicBlock* block_;
+ MPhiIterator phiIter_;
+ MInstructionIterator iter_;
+
+ bool atPhi() const { return phiIter_ != block_->phisEnd(); }
+
+ MDefinition* getIns() {
+ if (atPhi()) {
+ return *phiIter_;
+ }
+ return *iter_;
+ }
+
+ bool more() const { return atPhi() || (*iter_) != block_->lastIns(); }
+
+ public:
+ explicit MDefinitionIterator(MBasicBlock* block)
+ : block_(block), phiIter_(block->phisBegin()), iter_(block->begin()) {}
+
+ MDefinitionIterator operator++() {
+ MOZ_ASSERT(more());
+ if (atPhi()) {
+ ++phiIter_;
+ } else {
+ ++iter_;
+ }
+ return *this;
+ }
+
+ MDefinitionIterator operator++(int) {
+ MDefinitionIterator old(*this);
+ operator++();
+ return old;
+ }
+
+ explicit operator bool() const { return more(); }
+
+ MDefinition* operator*() { return getIns(); }
+
+ MDefinition* operator->() { return getIns(); }
+};
+
+// Iterates on all resume points, phis, and instructions of a MBasicBlock.
+// Resume points are visited as long as they have not been discarded.
+class MNodeIterator {
+ private:
+ // If this is non-null, the resume point that we will visit next (unless
+ // it has been discarded). Initialized to the entry resume point.
+ // Otherwise, resume point of the most recently visited instruction.
+ MResumePoint* resumePoint_;
+
+ mozilla::DebugOnly<MInstruction*> lastInstruction_ = nullptr;
+
+ // Definition iterator which is one step ahead when visiting resume points.
+ // This is in order to avoid incrementing the iterator while it is settled
+ // on a discarded instruction.
+ MDefinitionIterator defIter_;
+
+ MBasicBlock* block() const { return defIter_.block_; }
+
+ bool atResumePoint() const {
+ MOZ_ASSERT_IF(lastInstruction_ && !lastInstruction_->isDiscarded(),
+ lastInstruction_->resumePoint() == resumePoint_);
+ return resumePoint_ && !resumePoint_->isDiscarded();
+ }
+
+ MNode* getNode() {
+ if (atResumePoint()) {
+ return resumePoint_;
+ }
+ return *defIter_;
+ }
+
+ void next() {
+ if (!atResumePoint()) {
+ if (defIter_->isInstruction()) {
+ resumePoint_ = defIter_->toInstruction()->resumePoint();
+ lastInstruction_ = defIter_->toInstruction();
+ }
+ defIter_++;
+ } else {
+ resumePoint_ = nullptr;
+ lastInstruction_ = nullptr;
+ }
+ }
+
+ bool more() const { return defIter_ || atResumePoint(); }
+
+ public:
+ explicit MNodeIterator(MBasicBlock* block)
+ : resumePoint_(block->entryResumePoint()), defIter_(block) {
+ MOZ_ASSERT(bool(block->entryResumePoint()) == atResumePoint());
+ }
+
+ MNodeIterator operator++(int) {
+ MNodeIterator old(*this);
+ if (more()) {
+ next();
+ }
+ return old;
+ }
+
+ explicit operator bool() const { return more(); }
+
+ MNode* operator*() { return getNode(); }
+
+ MNode* operator->() { return getNode(); }
+};
+
+void MBasicBlock::add(MInstruction* ins) {
+ MOZ_ASSERT(!hasLastIns());
+ ins->setInstructionBlock(this, trackedSite_);
+ graph().allocDefinitionId(ins);
+ instructions_.pushBack(ins);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MIRGraph_h */
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
new file mode 100644
index 0000000000..fac9918429
--- /dev/null
+++ b/js/src/jit/MIROps.yaml
@@ -0,0 +1,3064 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# [SMDOC] MIR Opcodes
+# =======================
+# This file defines all MIR opcodes. It is parsed by GenerateMIRFiles.py
+# at build time to create MIROpsGenerated.h. Each opcode consists of a
+# name and a set of attributes that are described below. A few of the
+# attributes below allow setting the value to "custom", meaning the
+# method will be declared for the MIR op, but will need to be implemented
+# in C++ (typically done in MIR.cpp). Unless marked as required, attributes
+# are optional.
+#
+# name [required]
+# ====
+# Opcode name.
+# Possible values:
+# - opcode string: used as the name for MIR opcode.
+#
+# gen_boilerplate
+# ===============
+# Used to decide to generate MIR boilerplate.
+# - true (default): auto generate boilerplate for this MIR opcode
+# - false: do not generate boilerplate for this MIR opcode
+#
+# operands
+# ========
+# A list of operands for the MIR op class constructor. Each operand is a
+# MIR node. The operand kind is specified from the one of the kinds from
+# the MIRType enum in IonTypes.h. The specified types for the
+# operands will decide the type policy for the instruction.
+#
+# The naming of operands is how the NAMED_OPERANDS macro will define
+# its operands.
+#
+# For example:
+# object: Object
+# id: Value
+# value: Object
+#
+# Will result in an instruction having the type policy of:
+# MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2>>
+# and a named operands definition that looks like the following:
+# NAMED_OPERANDS((0, object), (1, idValue), (2, value))
+#
+# - attribute not specified (default): no code generated
+# - operand list: MIRTypes (See MIRType in jit/IonTypes.h)
+#
+# arguments
+# =========
+# A list of non-MIR node arguments to the MIR op class constructor
+# that are passed along with the operands. The arguments require
+# both a name and a full type signature for each item in the list.
+#
+# For example:
+# templateObject: JSObject*
+# initialHeap: gc::Heap
+#
+# For each argument a private variable declaration will be autogenerated
+# in the MIR op class, as well as simple accessor for that variable. If
+# the type of the variable is a GC pointer it will by automatically
+# wrapped by CompilerGCPointer. The above arguments list will result in
+# the following declarations and accessors:
+#
+# CompilerGCPointer<JSObject*> templateObject_;
+# gc::Heap initialHeap_;
+#
+# JSObject* templateObject() const { return templateObject_; }
+# gc::Heap initialHeap() const { return initialHeap_; }
+#
+# - attribute not specified (default): no code generated
+# - operand list: argument names and their full type signature
+#
+# type_policy
+# ============
+# If this attribute is present, then the type policy for that opcode will be
+# NoTypePolicy. This is used for opcode that should have no type policy.
+# - attribute not specified (default): no code generated, type policy
+# is based off of operands
+# - none: defines the type policy as opcode's NoTypePolicy
+#
+# result_type
+# ===========
+# Defines the result type of the MIR opcode.
+# - attribute not specified (default): no code is generated
+# - MIRType string: Will add a call to setResultType to the opcode constructor.
+# This will set the MIR opcodes result type to whatever the
+# specified MIRType is (See MIRType in jit/IonTypes.h).
+#
+# guard
+# =====
+# Set if the opcode is a guard instruction and is used for checks in optimizations
+# such as range analysis and value numbering.
+# - attribute not specified (default): no code generated
+# - true: adds setGuard to opcode constructor
+#
+# movable
+# =======
+# Defines the movable MIR flag for movable instructions. This is used for knowing
+# whether we can hoist an instruction.
+# - attribute not specified (default): no code generated
+# - true: adds setMovable call in opcode constructor
+#
+# folds_to
+# ========
+# The foldsTo method is used for determining if an instruction can be folded into
+# simpler instruction or for constant folding, depending on its operands.
+# - attribute not specified (default): no code generated, no constants to fold
+# - custom: custom C++ implementation
+#
+# congruent_to
+# ============
+# Used by ValueNumbering to determine if two values are congruent.
+# - attribute not specified (default): no code generated, congruentTo(foo) returns
+# false
+# - if_operands_equal: congruentTo(foo) will return congruentIfOperandsEqual(foo)
+# - custom: custom C++ implementation
+#
+# alias_set
+# =========
+# Defines the getAliasSet function for a MIR op. The alias set is used for alias
+# analysis. The default alias set is Any.
+# - attribute not specified (default): no code generated, alias set is Any
+# - none: this is the most common case, this is will set the alias set to None.
+# - custom: custom C++ implementation in MIR.cpp
+#
+# possibly_calls
+# ==============
+# Defines if a opcode can possibly call.
+# - attribute not specified (default): no code generated, opcode does not call
+# - true: possiblyCalls returns true
+# - custom: custom C++ implementation
+#
+# compute_range
+# =============
+# Computes and sets the range value for a MIR node, which is then used in range
+# analysis.
+# - attribute not specified (default): no code generated, range is not set for node
+# - custom: custom C++ implementation in RangeAnalysis.cpp
+#
+# can_recover
+# ===========
+# Indicates whether this instruction can be recovered on bailout.
+# Possible values:
+# - attribute not specified (default): no code generated, canRecoverOnBailout
+# returns false
+# - true: canRecoverOnBailout returns true
+# - custom: canRecoverOnBailout has a custom C++ implementation
+# If the value is either 'true' or 'custom', writeRecoverData has a custom C++
+# implementation.
+#
+# clone
+# =====
+# Allows cloning for that MIR op.
+# - attribute not specified (default): no code generated
+# - true: allows cloning
+#
+
+# TODO(no-TI): try to remove this instruction.
+- name: Start
+
+# Instruction marking on entrypoint for on-stack replacement.
+# OSR may occur at loop headers (at JSOp::LoopHead).
+# There is at most one MOsrEntry per MIRGraph.
+- name: OsrEntry
+ result_type: Pointer
+
+- name: Nop
+ alias_set: none
+ clone: true
+
+- name: LimitedTruncate
+ gen_boilerplate: false
+
+- name: Constant
+ gen_boilerplate: false
+
+- name: WasmNullConstant
+ gen_boilerplate: false
+
+- name: WasmFloatConstant
+ gen_boilerplate: false
+
+- name: Parameter
+ gen_boilerplate: false
+
+- name: Callee
+ result_type: Object
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: IsConstructing
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: TableSwitch
+ gen_boilerplate: false
+
+- name: Goto
+ gen_boilerplate: false
+
+- name: Test
+ gen_boilerplate: false
+
+- name: Return
+ gen_boilerplate: false
+
+- name: Throw
+ operands:
+ ins: Value
+ alias_set: custom
+ possibly_calls: true
+
+- name: NewArray
+ gen_boilerplate: false
+
+- name: NewArrayDynamicLength
+ operands:
+ length: Int32
+ arguments:
+ templateObject: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ # Need to throw if length is negative.
+ guard: true
+ # Throws if length is negative.
+ alias_set: custom
+
+- name: NewTypedArray
+ gen_boilerplate: false
+
+- name: NewTypedArrayDynamicLength
+ operands:
+ length: Int32
+ arguments:
+ templateObject: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ guard: true
+ # Throws if length is negative.
+ alias_set: custom
+
+# Create a new TypedArray from an Array (or Array-like object) or a TypedArray.
+- name: NewTypedArrayFromArray
+ operands:
+ array: Object
+ arguments:
+ templateObject: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ guard: true
+ possibly_calls: true
+
+# Create a new TypedArray from an ArrayBuffer (or SharedArrayBuffer).
+- name: NewTypedArrayFromArrayBuffer
+ operands:
+ arrayBuffer: Object
+ byteOffset: Value
+ length: Value
+ arguments:
+ templateObject: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ guard: true
+ possibly_calls: true
+
+- name: NewObject
+ gen_boilerplate: false
+
+- name: NewPlainObject
+ gen_boilerplate: false
+
+- name: NewArrayObject
+ gen_boilerplate: false
+
+- name: NewIterator
+ gen_boilerplate: false
+
+- name: ObjectState
+ gen_boilerplate: false
+
+- name: ArrayState
+ gen_boilerplate: false
+
+- name: BindFunction
+ gen_boilerplate: false
+
+- name: NewBoundFunction
+ arguments:
+ templateObj: JSObject*
+ result_type: Object
+ alias_set: none
+
+- name: BoundFunctionNumArgs
+ operands:
+ object: Object
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ # A bound function's state is immutable, so there is no
+ # implicit dependency.
+ alias_set: none
+
+- name: GuardBoundFunctionIsConstructor
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ # The is-constructor flag is immutable for a bound function.
+ alias_set: none
+
+# Setting __proto__ in an object literal.
+- name: MutateProto
+ operands:
+ object: Object
+ value: Value
+ result_type: None
+ possibly_calls: true
+
+- name: InitPropGetterSetter
+ operands:
+ object: Object
+ value: Object
+ arguments:
+ name: PropertyName*
+
+- name: InitElemGetterSetter
+ operands:
+ object: Object
+ id: Value
+ value: Object
+
+- name: Call
+ gen_boilerplate: false
+
+- name: CallClassHook
+ gen_boilerplate: false
+
+- name: ApplyArgs
+ gen_boilerplate: false
+
+- name: ApplyArgsObj
+ gen_boilerplate: false
+
+- name: ApplyArray
+ gen_boilerplate: false
+
+- name: ConstructArgs
+ gen_boilerplate: false
+
+- name: ConstructArray
+ gen_boilerplate: false
+
+- name: Bail
+ gen_boilerplate: false
+
+- name: Unreachable
+ gen_boilerplate: false
+
+# This op serves as a way to force the encoding of a snapshot, even if there
+# is no resume point using it. This is useful to run MAssertRecoveredOnBailout
+# assertions.
+- name: EncodeSnapshot
+ guard: true
+
+- name: AssertRecoveredOnBailout
+ gen_boilerplate: false
+
+- name: AssertFloat32
+ gen_boilerplate: false
+
+- name: Compare
+ gen_boilerplate: false
+
+- name: SameValueDouble
+ operands:
+ left: Double
+ right: Double
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ clone: true
+
+- name: SameValue
+ operands:
+ left: Value
+ right: Value
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ clone: true
+
+- name: Box
+ gen_boilerplate: false
+
+- name: Unbox
+ gen_boilerplate: false
+
+- name: AssertRange
+ gen_boilerplate: false
+
+- name: AssertClass
+ gen_boilerplate: false
+
+- name: AssertShape
+ gen_boilerplate: false
+
+# Caller-side allocation of |this| for |new|:
+# Constructs |this| when possible, else MagicValue(JS_IS_CONSTRUCTING).
+- name: CreateThis
+ operands:
+ callee: Object
+ newTarget: Object
+ result_type: Value
+ # Performs a property read from |newTarget| iff |newTarget| is a JSFunction
+ # with an own |.prototype| property.
+ alias_set: custom
+ possibly_calls: true
+
+- name: CreateArgumentsObject
+ gen_boilerplate: false
+
+- name: CreateInlinedArgumentsObject
+ gen_boilerplate: false
+
+- name: GetInlinedArgument
+ gen_boilerplate: false
+
+- name: GetInlinedArgumentHole
+ gen_boilerplate: false
+
+- name: GetArgumentsObjectArg
+ operands:
+ argsObject: Object
+ arguments:
+ argno: size_t
+ result_type: Value
+ congruent_to: custom
+ alias_set: custom
+
+- name: SetArgumentsObjectArg
+ operands:
+ argsObject: Object
+ value: Value
+ arguments:
+ argno: size_t
+ alias_set: custom
+
+# Load |arguments[index]| from a mapped or unmapped arguments object. Bails out
+# when any elements were overridden or deleted. Also bails out if the index is
+# out of bounds.
+- name: LoadArgumentsObjectArg
+ operands:
+ argsObject: Object
+ index: Int32
+ result_type: Value
+ guard: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Load |arguments[index]| from a mapped or unmapped arguments object. Bails out
+# when any elements were overridden or deleted. Returns undefined if the index is
+# out of bounds.
+- name: LoadArgumentsObjectArgHole
+ operands:
+ argsObject: Object
+ index: Int32
+ result_type: Value
+ guard: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: InArgumentsObjectArg
+ operands:
+ argsObject: Object
+ index: Int32
+ result_type: Boolean
+ guard: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Load |arguments.length|. Bails out if the length has been overriden.
+- name: ArgumentsObjectLength
+ operands:
+ argsObject: Object
+ result_type: Int32
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ # Even though the "length" property is lazily resolved, it acts similar to
+ # a normal property load, so we can treat this operation like any other
+ # property read.
+ alias_set: custom
+
+# Create an array from an arguments object.
+- name: ArrayFromArgumentsObject
+ operands:
+ argsObject: Object
+ arguments:
+ shape: Shape*
+ result_type: Object
+ possibly_calls: true
+
+# Guard that the given flags are not set on the arguments object.
+- name: GuardArgumentsObjectFlags
+ operands:
+ argsObject: Object
+ arguments:
+ flags: uint32_t
+ result_type: Object
+ movable: true
+ guard: true
+ congruent_to: custom
+ # The flags are packed with the length in a fixed private slot.
+ alias_set: custom
+
+# Given a MIRType::Value A and a MIRType::Object B:
+# If the Value may be safely unboxed to an Object, return Object(A).
+# Otherwise, return B.
+# Used to implement return behavior for inlined constructors.
+- name: ReturnFromCtor
+ operands:
+ value: Value
+ object: Object
+ result_type: Object
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: ToDouble
+ gen_boilerplate: false
+
+- name: ToFloat32
+ gen_boilerplate: false
+
+# Converts a uint32 to a double (coming from wasm).
+- name: WasmUnsignedToDouble
+ operands:
+ def: Int32
+ type_policy: none
+ result_type: Double
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: WasmUnsignedToFloat32
+ gen_boilerplate: false
+
+- name: WrapInt64ToInt32
+ gen_boilerplate: false
+
+- name: ExtendInt32ToInt64
+ gen_boilerplate: false
+
+- name: WasmBuiltinTruncateToInt64
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt64
+ gen_boilerplate: false
+
+- name: WasmTruncateToInt32
+ gen_boilerplate: false
+
+# Store a JS Value that can't be represented as an AnyRef pointer into an
+# object that holds the value (opaquely) as such a pointer.
+- name: WasmBoxValue
+ operands:
+ def: Value
+ result_type: RefOrNull
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: WasmAnyRefFromJSObject
+ operands:
+ def: Object
+ type_policy: none
+ result_type: RefOrNull
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: Int32ToIntPtr
+ gen_boilerplate: false
+
+- name: NonNegativeIntPtrToInt32
+ gen_boilerplate: false
+
+- name: IntPtrToDouble
+ gen_boilerplate: false
+
+- name: AdjustDataViewLength
+ gen_boilerplate: false
+
+- name: Int64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: BuiltinInt64ToFloatingPoint
+ gen_boilerplate: false
+
+- name: ToNumberInt32
+ gen_boilerplate: false
+
+- name: BooleanToInt32
+ operands:
+ input: Boolean
+ result_type: Int32
+ movable: true
+ compute_range: custom
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: TruncateToInt32
+ gen_boilerplate: false
+
+- name: WasmBuiltinTruncateToInt32
+ gen_boilerplate: false
+
+- name: ToBigInt
+ gen_boilerplate: false
+
+- name: ToInt64
+ gen_boilerplate: false
+
+- name: TruncateBigIntToInt64
+ gen_boilerplate: false
+
+- name: Int64ToBigInt
+ gen_boilerplate: false
+
+- name: ToString
+ gen_boilerplate: false
+
+- name: BitNot
+ gen_boilerplate: false
+
+- name: TypeOf
+ gen_boilerplate: false
+
+- name: TypeOfName
+ operands:
+ input: Int32
+ result_type: String
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+ can_recover: true
+
+- name: TypeOfIs
+ gen_boilerplate: false
+
+- name: ToAsyncIter
+ operands:
+ iterator: Object
+ nextMethod: Value
+ result_type: Object
+
+- name: ToPropertyKeyCache
+ operands:
+ input: Value
+ result_type: Value
+
+- name: BitAnd
+ gen_boilerplate: false
+
+- name: BitOr
+ gen_boilerplate: false
+
+- name: BitXor
+ gen_boilerplate: false
+
+- name: Lsh
+ gen_boilerplate: false
+
+- name: Rsh
+ gen_boilerplate: false
+
+- name: Ursh
+ gen_boilerplate: false
+
+- name: SignExtendInt32
+ gen_boilerplate: false
+
+- name: SignExtendInt64
+ gen_boilerplate: false
+
+- name: MinMax
+ gen_boilerplate: false
+
+- name: MinMaxArray
+ gen_boilerplate: false
+
+- name: Abs
+ gen_boilerplate: false
+
+- name: Clz
+ gen_boilerplate: false
+
+- name: Ctz
+ gen_boilerplate: false
+
+- name: Popcnt
+ gen_boilerplate: false
+
+- name: Sqrt
+ gen_boilerplate: false
+
+- name: CopySign
+ gen_boilerplate: false
+
+# Inline implementation of atan2 (arctangent of y/x).
+- name: Atan2
+ operands:
+ y: Double
+ x: Double
+ result_type: Double
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+ can_recover: true
+ clone: true
+
+- name: Hypot
+ gen_boilerplate: false
+
+- name: Pow
+ gen_boilerplate: false
+
+- name: PowHalf
+ gen_boilerplate: false
+
+- name: Random
+ result_type: Double
+ alias_set: custom
+ possibly_calls: true
+ compute_range: custom
+ can_recover: custom
+ clone: true
+
+- name: Sign
+ gen_boilerplate: false
+
+- name: MathFunction
+ gen_boilerplate: false
+
+- name: Add
+ gen_boilerplate: false
+
+- name: Sub
+ gen_boilerplate: false
+
+- name: Mul
+ gen_boilerplate: false
+
+- name: Div
+ gen_boilerplate: false
+
+- name: WasmBuiltinDivI64
+ gen_boilerplate: false
+
+- name: Mod
+ gen_boilerplate: false
+
+- name: WasmBuiltinModD
+ gen_boilerplate: false
+
+- name: WasmBuiltinModI64
+ gen_boilerplate: false
+
+- name: BigIntAdd
+ gen_boilerplate: false
+
+- name: BigIntSub
+ gen_boilerplate: false
+
+- name: BigIntMul
+ gen_boilerplate: false
+
+- name: BigIntDiv
+ gen_boilerplate: false
+
+- name: BigIntMod
+ gen_boilerplate: false
+
+- name: BigIntPow
+ gen_boilerplate: false
+
+- name: BigIntBitAnd
+ gen_boilerplate: false
+
+- name: BigIntBitOr
+ gen_boilerplate: false
+
+- name: BigIntBitXor
+ gen_boilerplate: false
+
+- name: BigIntLsh
+ gen_boilerplate: false
+
+- name: BigIntRsh
+ gen_boilerplate: false
+
+- name: BigIntIncrement
+ gen_boilerplate: false
+
+- name: BigIntDecrement
+ gen_boilerplate: false
+
+- name: BigIntNegate
+ gen_boilerplate: false
+
+- name: BigIntBitNot
+ gen_boilerplate: false
+
+- name: Int32ToStringWithBase
+ operands:
+ input: Int32
+ base: Int32
+ result_type: String
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: NumberParseInt
+ operands:
+ string: String
+ radix: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: DoubleParseInt
+ operands:
+ number: Double
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: Concat
+ gen_boilerplate: false
+
+- name: LinearizeForCharAccess
+ operands:
+ string: String
+ index: Int32
+ result_type: String
+ movable: true
+ congruent_to: if_operands_equal
+ # Strings are immutable, so there is no implicit dependency.
+ alias_set: none
+
+- name: CharCodeAt
+ operands:
+ string: String
+ index: Int32
+ result_type: Int32
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ # Strings are immutable, so there is no implicit dependency.
+ alias_set: none
+ compute_range: custom
+ can_recover: true
+ clone: true
+
+# Similar to CharCodeAt, but also supports out-of-bounds access.
+- name: CharCodeAtMaybeOutOfBounds
+ operands:
+ string: String
+ index: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ # Strings are immutable, so there is no implicit dependency.
+ alias_set: none
+
+# Like CharCodeAtMaybeOutOfBounds, this operation also supports out-of-bounds access.
+- name: CharAtMaybeOutOfBounds
+ operands:
+ string: String
+ index: Int32
+ result_type: String
+ movable: true
+ congruent_to: if_operands_equal
+ # Strings are immutable, so there is no implicit dependency.
+ alias_set: none
+
+- name: FromCharCode
+ operands:
+ code: Int32
+ result_type: String
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ can_recover: true
+ clone: true
+
+- name: FromCodePoint
+ operands:
+ codePoint: Int32
+ result_type: String
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ clone: true
+
+- name: StringIndexOf
+ operands:
+ string: String
+ searchString: String
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: StringStartsWith
+ operands:
+ string: String
+ searchString: String
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: StringEndsWith
+ operands:
+ string: String
+ searchString: String
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: StringConvertCase
+ gen_boilerplate: false
+
+- name: StringSplit
+ operands:
+ string: String
+ separator: String
+ result_type: Object
+ possibly_calls: true
+ # Although this instruction returns a new array, we don't have to mark
+ # it as store instruction, see also MNewArray.
+ alias_set: none
+ can_recover: true
+
+- name: BoxNonStrictThis
+ operands:
+ def: Value
+ arguments:
+ globalThis: JSObject*
+ result_type: Object
+ folds_to: custom
+ possibly_calls: true
+ # This instruction can allocate a new object for wrapped primitives, but
+ # has no effect on existing objects.
+ alias_set: none
+
+- name: ImplicitThis
+ operands:
+ envChain: Object
+ arguments:
+ name: PropertyName*
+ result_type: Value
+ possibly_calls: true
+
+- name: Phi
+ gen_boilerplate: false
+
+- name: Beta
+ gen_boilerplate: false
+
+- name: NaNToZero
+ gen_boilerplate: false
+
+- name: OsrValue
+ gen_boilerplate: false
+
+- name: OsrEnvironmentChain
+ gen_boilerplate: false
+
+- name: OsrArgumentsObject
+ gen_boilerplate: false
+
+- name: OsrReturnValue
+ gen_boilerplate: false
+
+- name: BinaryCache
+ gen_boilerplate: false
+
+- name: UnaryCache
+ operands:
+ input: Value
+ result_type: Value
+
+# Checks whether we need to fire the interrupt handler.
+- name: CheckOverRecursed
+ guard: true
+ alias_set: none
+
+
+# Check whether we need to fire the interrupt handler.
+- name: InterruptCheck
+ guard: true
+ alias_set: none
+
+- name: WasmInterruptCheck
+ gen_boilerplate: false
+
+- name: WasmTrap
+ gen_boilerplate: false
+
+# Trap if the given value is null
+- name: WasmTrapIfNull
+ operands:
+ value: RefOrNull
+ arguments:
+ trap: wasm::Trap
+ bytecodeOffset: wasm::BytecodeOffset
+ guard: true
+ type_policy: none
+ result_type: None
+
+- name: LexicalCheck
+ gen_boilerplate: false
+
+# Unconditionally throw an uninitialized let error.
+- name: ThrowRuntimeLexicalError
+ arguments:
+ errorNumber: unsigned
+ result_type: None
+ guard: true
+ alias_set: custom
+
+- name: ThrowMsg
+ gen_boilerplate: false
+
+# In the prologues of global and eval scripts, check for redeclarations and
+# initialize bindings.
+- name: GlobalDeclInstantiation
+ guard: true
+
+- name: RegExp
+ arguments:
+ source: RegExpObject*
+ hasShared: bool
+ result_type: Object
+ possibly_calls: true
+ alias_set: none
+
+- name: RegExpMatcher
+ operands:
+ regexp: Object
+ string: String
+ lastIndex: Int32
+ result_type: Value
+ possibly_calls: true
+ can_recover: true
+
+- name: RegExpSearcher
+ operands:
+ regexp: Object
+ string: String
+ lastIndex: Int32
+ result_type: Int32
+ possibly_calls: true
+ can_recover: true
+
+- name: RegExpExecMatch
+ operands:
+ regexp: Object
+ string: String
+ result_type: Value
+ possibly_calls: true
+ can_recover: false
+
+- name: RegExpExecTest
+ operands:
+ regexp: Object
+ string: String
+ result_type: Boolean
+ possibly_calls: true
+ can_recover: false
+
+- name: RegExpPrototypeOptimizable
+ operands:
+ object: Object
+ result_type: Boolean
+ alias_set: none
+
+- name: RegExpInstanceOptimizable
+ operands:
+ object: Object
+ proto: Object
+ result_type: Boolean
+ alias_set: none
+
+- name: GetFirstDollarIndex
+ gen_boilerplate: false
+
+- name: StringReplace
+ gen_boilerplate: false
+
+- name: Substr
+ operands:
+ string: String
+ begin: Int32
+ length: Int32
+ result_type: String
+ congruent_to: if_operands_equal
+ alias_set: none
+ can_recover: true
+
+- name: ModuleMetadata
+ arguments:
+ module: JSObject*
+ result_type: Object
+
+- name: DynamicImport
+ operands:
+ specifier: Value
+ options: Value
+ result_type: Object
+
+- name: Lambda
+ gen_boilerplate: false
+
+- name: FunctionWithProto
+ gen_boilerplate: false
+
+- name: SetFunName
+ operands:
+ fun: Object
+ name: Value
+ arguments:
+ prefixKind: uint8_t
+ result_type: None
+ possibly_calls: true
+
+# Returns obj->slots.
+- name: Slots
+ operands:
+ object: Object
+ result_type: Slots
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ might_alias: custom
+ clone: true
+
+# Returns obj->elements.
+- name: Elements
+ operands:
+ object: Object
+ result_type: Elements
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ clone: true
+
+# Load the initialized length from an elements header.
+- name: InitializedLength
+ operands:
+ elements: Elements
+ type_policy: none
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+ clone: true
+
+- name: SetInitializedLength
+ operands:
+ elements: Elements
+ index: Int32
+ type_policy: none
+ alias_set: custom
+ clone: true
+
+# Load the array length from an elements header.
+- name: ArrayLength
+ operands:
+ elements: Elements
+ type_policy: none
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+ clone: true
+
+# Store to the length in an elements header. Note the input is an *index*, one
+# less than the desired length.
+- name: SetArrayLength
+ operands:
+ elements: Elements
+ index: Int32
+ type_policy: none
+ alias_set: custom
+ # By default no, unless built as a recovered instruction.
+ can_recover: custom
+
+# Load the function length. Bails for functions with lazy scripts or a
+# resolved "length" property.
+- name: FunctionLength
+ operands:
+ function: Object
+ result_type: Int32
+ guard: true
+ congruent_to: if_operands_equal
+ # Even though the "length" property is lazily resolved, it acts similar to
+ # a normal property load, so we can treat this operation like any other
+ # property read.
+ alias_set: custom
+
+# Load the function name. Bails for bound functions when the bound function
+# name prefix isn't present or functions with a resolved "name" property.
+- name: FunctionName
+ operands:
+ function: Object
+ result_type: String
+ guard: true
+ congruent_to: if_operands_equal
+ # Even though the "name" property is lazily resolved, it acts similar to
+ # a normal property load, so we can treat this operation like any other
+ # property read.
+ alias_set: custom
+
+- name: GetNextEntryForIterator
+ gen_boilerplate: false
+
+# Read the byte length of an array buffer as IntPtr.
+- name: ArrayBufferByteLength
+ operands:
+ object: Object
+ result_type: IntPtr
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Read the length of an array buffer view.
+- name: ArrayBufferViewLength
+ operands:
+ object: Object
+ result_type: IntPtr
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+
+- name: ArrayBufferViewByteOffset
+ operands:
+ object: Object
+ result_type: IntPtr
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+
+# Read the length of an array buffer view.
+- name: ArrayBufferViewElements
+ operands:
+ object: Object
+ result_type: Elements
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ clone: true
+
+# Return the element size of a typed array.
+- name: TypedArrayElementSize
+ operands:
+ object: Object
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ # Class is immutable. See also MHasClass.
+ alias_set: none
+ compute_range: custom
+
+# Guard an ArrayBufferView has an attached ArrayBuffer.
+- name: GuardHasAttachedArrayBuffer
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GuardNumberToIntPtrIndex
+ gen_boilerplate: false
+
+- name: KeepAliveObject
+ operands:
+ object: Object
+ result_type: None
+ guard: true
+
+- name: DebugEnterGCUnsafeRegion
+ result_type: None
+ guard: true
+ alias_set: none
+
+- name: DebugLeaveGCUnsafeRegion
+ result_type: None
+ guard: true
+ alias_set: none
+
+- name: Not
+ gen_boilerplate: false
+
+- name: BoundsCheck
+ gen_boilerplate: false
+
+- name: BoundsCheckLower
+ gen_boilerplate: false
+
+- name: SpectreMaskIndex
+ gen_boilerplate: false
+
+- name: LoadElement
+ gen_boilerplate: false
+
+- name: LoadElementAndUnbox
+ gen_boilerplate: false
+
+- name: LoadElementHole
+ gen_boilerplate: false
+
+- name: StoreElement
+ gen_boilerplate: false
+
+- name: StoreHoleValueElement
+ gen_boilerplate: false
+
+- name: StoreElementHole
+ gen_boilerplate: false
+
+- name: ArrayPopShift
+ gen_boilerplate: false
+
+# Array.prototype.push on a dense array. Returns the new array length.
+- name: ArrayPush
+ operands:
+ object: Object
+ value: Value
+ result_type: Int32
+ alias_set: custom
+ compute_range: custom
+ clone: true
+
+# Array.prototype.slice on a dense array.
+- name: ArraySlice
+ operands:
+ object: Object
+ begin: Int32
+ end: Int32
+ arguments:
+ templateObj: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ possibly_calls: true
+
+# Array.prototype.slice on an arguments object.
+- name: ArgumentsSlice
+ operands:
+ object: Object
+ begin: Int32
+ end: Int32
+ arguments:
+ templateObj: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ possibly_calls: true
+
+# Array.prototype.slice on an arguments object.
+- name: FrameArgumentsSlice
+ operands:
+ begin: Int32
+ count: Int32
+ arguments:
+ templateObj: JSObject*
+ initialHeap: gc::Heap
+ result_type: Object
+ alias_set: none
+ possibly_calls: true
+
+# Array.prototype.slice on an inlined arguments object.
+- name: InlineArgumentsSlice
+ gen_boilerplate: false
+
+- name: NormalizeSliceTerm
+ operands:
+ value: Int32
+ length: Int32
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ folds_to: custom
+
+# MArrayJoin doesn't override |getAliasSet()|, because Array.prototype.join
+# might coerce the elements of the Array to strings. This coercion might
+# cause the evaluation of JavaScript code.
+- name: ArrayJoin
+ operands:
+ array: Object
+ sep: String
+ result_type: String
+ possibly_calls: true
+ folds_to: custom
+ # MArrayJoin doesn't override |getAliasSet()|, because Array.prototype.join
+ # might coerce the elements of the Array to strings. This coercion might
+ # cause the evaluation of JavaScript code.
+
+- name: LoadUnboxedScalar
+ gen_boilerplate: false
+
+- name: LoadDataViewElement
+ gen_boilerplate: false
+
+- name: LoadTypedArrayElementHole
+ gen_boilerplate: false
+
+- name: StoreUnboxedScalar
+ gen_boilerplate: false
+
+- name: StoreDataViewElement
+ gen_boilerplate: false
+
+- name: StoreTypedArrayElementHole
+ gen_boilerplate: false
+
+- name: EffectiveAddress
+ gen_boilerplate: false
+
+- name: ClampToUint8
+ gen_boilerplate: false
+
+- name: LoadFixedSlot
+ gen_boilerplate: false
+
+- name: LoadFixedSlotAndUnbox
+ gen_boilerplate: false
+
+- name: LoadDynamicSlotAndUnbox
+ gen_boilerplate: false
+
+- name: StoreFixedSlot
+ gen_boilerplate: false
+
+- name: GetPropertyCache
+ gen_boilerplate: false
+
+- name: HomeObjectSuperBase
+ operands:
+ homeObject: Object
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GetPropSuperCache
+ gen_boilerplate: false
+
+- name: BindNameCache
+ operands:
+ envChain: Object
+ result_type: Object
+
+- name: CallBindVar
+ operands:
+ environmentChain: Object
+ result_type: Object
+ movable: true
+ congruent_to: custom
+ alias_set: none
+
+- name: GuardShape
+ operands:
+ object: Object
+ arguments:
+ shape: Shape*
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: custom
+ alias_set: custom
+ might_alias: custom
+
+- name: GuardMultipleShapes
+ operands:
+ object: Object
+ shapeList: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GuardProto
+ gen_boilerplate: false
+
+- name: GuardNullProto
+ gen_boilerplate: false
+
+# Guard the object is a native object.
+- name: GuardIsNativeObject
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: GuardGlobalGeneration
+ arguments:
+ expected: uint32_t
+ generationAddr: const void*
+ result_type: None
+ guard: true
+ movable: true
+ alias_set: custom
+ congruent_to: custom
+
+- name: GuardIsProxy
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: GuardIsNotDOMProxy
+ operands:
+ proxy: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: GuardIsNotProxy
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: ProxyGet
+ operands:
+ proxy: Object
+ arguments:
+ id: jsid
+ result_type: Value
+ possibly_calls: true
+
+- name: ProxyGetByValue
+ operands:
+ proxy: Object
+ idVal: Value
+ result_type: Value
+ possibly_calls: true
+
+- name: ProxyHasProp
+ operands:
+ proxy: Object
+ idVal: Value
+ arguments:
+ hasOwn: bool
+ result_type: Boolean
+ possibly_calls: true
+
+- name: ProxySet
+ operands:
+ proxy: Object
+ rhs: Value
+ arguments:
+ id: jsid
+ strict: bool
+ possibly_calls: true
+
+- name: ProxySetByValue
+ operands:
+ proxy: Object
+ idVal: Value
+ rhs: Value
+ arguments:
+ strict: bool
+ possibly_calls: true
+
+- name: CallSetArrayLength
+ operands:
+ obj: Object
+ rhs: Value
+ arguments:
+ strict: bool
+ possibly_calls: true
+
+- name: MegamorphicLoadSlot
+ operands:
+ object: Object
+ arguments:
+ name: PropertyKey
+ result_type: Value
+ # Bails when non-native or accessor properties are encountered, so we can't
+ # DCE this instruction.
+ guard: true
+ possibly_calls: true
+ congruent_to: custom
+ alias_set: custom
+
+- name: MegamorphicLoadSlotByValue
+ operands:
+ object: Object
+ idVal: Value
+ result_type: Value
+ # Bails when non-native or accessor properties are encountered, so we can't
+ # DCE this instruction.
+ guard: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: custom
+ possibly_calls: true
+
+- name: MegamorphicStoreSlot
+ operands:
+ object: Object
+ rhs: Value
+ arguments:
+ name: PropertyKey
+ strict: bool
+ possibly_calls: true
+
+- name: MegamorphicHasProp
+ operands:
+ object: Object
+ idVal: Value
+ arguments:
+ hasOwn: bool
+ result_type: Boolean
+ # Bails when non-native or accessor properties are encountered, so we can't
+ # DCE this instruction.
+ guard: true
+ congruent_to: custom
+ alias_set: custom
+ possibly_calls: true
+
+- name: GuardIsNotArrayBufferMaybeShared
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardIsTypedArray
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+# Loads a specific JSObject* that was originally nursery-allocated.
+# See also WarpObjectField.
+- name: NurseryObject
+ arguments:
+ # Index in the Vector of objects stored in the WarpSnapshot.
+ nurseryIndex: uint32_t
+ result_type: Object
+ movable: true
+ congruent_to: custom
+ alias_set: none
+
+- name: GuardValue
+ gen_boilerplate: false
+
+- name: GuardNullOrUndefined
+ operands:
+ value: Value
+ result_type: Value
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardIsNotObject
+ operands:
+ value: Value
+ result_type: Value
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardFunctionFlags
+ gen_boilerplate: false
+
+- name: GuardFunctionIsNonBuiltinCtor
+ operands:
+ function: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GuardFunctionKind
+ operands:
+ function: Object
+ arguments:
+ expected: FunctionFlags::FunctionKind
+ bailOnEquality: bool
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: custom
+ alias_set: custom
+
+- name: GuardFunctionScript
+ operands:
+ function: Object
+ arguments:
+ expected: BaseScript*
+ nargs: uint16_t
+ flags: FunctionFlags
+ result_type: Object
+ guard: true
+ movable: true
+ folds_to: custom
+ congruent_to: custom
+ # A JSFunction's BaseScript pointer is immutable. Relazification of
+ # self-hosted functions is an exception to this, but we don't use this
+ # guard for self-hosted functions.
+ alias_set: custom
+
+- name: GuardObjectIdentity
+ gen_boilerplate: false
+
+- name: GuardSpecificFunction
+ gen_boilerplate: false
+
+- name: GuardSpecificAtom
+ operands:
+ str: String
+ arguments:
+ atom: JSAtom*
+ result_type: String
+ guard: true
+ movable: true
+ congruent_to: custom
+ folds_to: custom
+ alias_set: none
+
+- name: GuardSpecificSymbol
+ gen_boilerplate: false
+
+- name: GuardSpecificInt32
+ operands:
+ num: Int32
+ arguments:
+ expected: int32_t
+ result_type: Int32
+ guard: true
+ movable: true
+ folds_to: custom
+ alias_set: none
+
+- name: GuardStringToIndex
+ operands:
+ string: String
+ result_type: Int32
+ # Mark as guard because this instruction must not be eliminated. For
+ # example, if the string is not an index the operation could change from a
+ # typed array load to a getter call.
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardStringToInt32
+ operands:
+ string: String
+ result_type: Int32
+ # Mark as guard to prevent the issue described in MGuardStringToIndex's
+ # constructor.
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardStringToDouble
+ operands:
+ string: String
+ result_type: Double
+ # Mark as guard to prevent the issue described in MGuardStringToIndex's
+ # constructor.
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardNoDenseElements
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ alias_set: custom
+
+- name: GuardTagNotEqual
+ gen_boilerplate: false
+
+- name: LoadDynamicSlot
+ gen_boilerplate: false
+
+# Inline call to access a function's environment (scope chain).
+- name: FunctionEnvironment
+ operands:
+ function: Object
+ result_type: Object
+ movable: true
+ folds_to: custom
+ # A function's environment is fixed.
+ alias_set: none
+
+# Allocate a new BlockLexicalEnvironmentObject.
+- name: NewLexicalEnvironmentObject
+ operands:
+ templateObj: Object
+ result_type: Object
+ alias_set: none
+
+# Allocate a new ClassBodyEnvironmentObject.
+- name: NewClassBodyEnvironmentObject
+ operands:
+ templateObj: Object
+ result_type: Object
+ alias_set: none
+
+- name: NewVarEnvironmentObject
+ operands:
+ templateObj: Object
+ result_type: Object
+ alias_set: none
+
+- name: HomeObject
+ operands:
+ function: Object
+ result_type: Object
+ movable: true
+ # A function's [[HomeObject]] is fixed.
+ alias_set: none
+
+- name: AddAndStoreSlot
+ gen_boilerplate: false
+
+- name: AllocateAndStoreSlot
+ operands:
+ object: Object
+ value: Value
+ arguments:
+ slotOffset: uint32_t
+ shape: Shape*
+ numNewSlots: uint32_t
+ possibly_calls: true
+ alias_set: custom
+
+- name: AddSlotAndCallAddPropHook
+ operands:
+ object: Object
+ value: Value
+ arguments:
+ shape: Shape*
+ possibly_calls: true
+
+- name: StoreDynamicSlot
+ gen_boilerplate: false
+
+- name: GetNameCache
+ operands:
+ envObj: Object
+ result_type: Value
+
+- name: CallGetIntrinsicValue
+ arguments:
+ name: PropertyName*
+ result_type: Value
+ possibly_calls: true
+
+- name: DeleteProperty
+ operands:
+ value: Value
+ arguments:
+ name: PropertyName*
+ strict: bool
+ result_type: Boolean
+
+- name: DeleteElement
+ operands:
+ value: Value
+ index: Value
+ arguments:
+ strict: bool
+ result_type: Boolean
+
+- name: SetPropertyCache
+ gen_boilerplate: false
+
+- name: MegamorphicSetElement
+ gen_boilerplate: false
+
+- name: SetDOMProperty
+ gen_boilerplate: false
+
+- name: GetDOMProperty
+ gen_boilerplate: false
+
+- name: GetDOMMember
+ gen_boilerplate: false
+
+- name: ObjectToIterator
+ gen_boilerplate: false
+
+- name: ValueToIterator
+ operands:
+ value: Value
+ result_type: Object
+
+- name: IteratorHasIndices
+ operands:
+ object: Object
+ iterator: Object
+ result_type: Boolean
+ alias_set: custom
+
+- name: LoadSlotByIteratorIndex
+ operands:
+ object: Object
+ iterator: Object # TODO: add MIRType::NativeIterator?
+ result_type: Value
+ alias_set: custom
+
+- name: StoreSlotByIteratorIndex
+ operands:
+ object: Object
+ iterator: Object
+ value: Value
+ alias_set: custom
+
+# Load the private value expando from a DOM proxy. The target is stored in the
+# proxy object's private slot.
+# This is either an UndefinedValue (no expando), ObjectValue (the expando
+# object), or PrivateValue(ExpandoAndGeneration*).
+- name: LoadDOMExpandoValue
+ operands:
+ proxy: Object
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: LoadDOMExpandoValueGuardGeneration
+ gen_boilerplate: false
+
+- name: LoadDOMExpandoValueIgnoreGeneration
+ operands:
+ proxy: Object
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Takes an expando Value as input, then guards it's either UndefinedValue or
+# an object with the expected shape.
+- name: GuardDOMExpandoMissingOrGuardShape
+ operands:
+ expando: Value
+ arguments:
+ shape: Shape*
+ result_type: Value
+ guard: true
+ movable: true
+ congruent_to: custom
+ alias_set: custom
+
+- name: StringLength
+ operands:
+ string: String
+ result_type: Int32
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ # The string |length| property is immutable, so there is no
+ # implicit dependency.
+ alias_set: none
+ compute_range: custom
+ can_recover: true
+ clone: true
+
+- name: Floor
+ gen_boilerplate: false
+
+- name: Ceil
+ gen_boilerplate: false
+
+- name: Round
+ gen_boilerplate: false
+
+- name: Trunc
+ gen_boilerplate: false
+
+- name: NearbyInt
+ gen_boilerplate: false
+
+- name: GetIteratorCache
+ gen_boilerplate: false
+
+- name: OptimizeSpreadCallCache
+ operands:
+ value: Value
+ result_type: Value
+
+- name: IteratorMore
+ operands:
+ iterator: Object
+ result_type: Value
+
+- name: IsNoIter
+ operands:
+ def: Object
+ result_type: Boolean
+ type_policy: none
+ movable : true
+ alias_set: none
+
+- name: IteratorEnd
+ operands:
+ iterator: Object
+
+- name: CloseIterCache
+ operands:
+ iter: Object
+ arguments:
+ completionKind: uint8_t
+ possibly_calls: true
+
+- name: InCache
+ gen_boilerplate: false
+
+- name: InArray
+ gen_boilerplate: false
+
+- name: GuardElementNotHole
+ gen_boilerplate: false
+
+- name: NewPrivateName
+ arguments:
+ name: JSAtom*
+ result_type: Symbol
+ possibly_calls: true
+
+- name: CheckPrivateFieldCache
+ gen_boilerplate: false
+
+- name: HasOwnCache
+ gen_boilerplate: false
+
+- name: InstanceOf
+ gen_boilerplate: false
+
+# Implementation for instanceof operator with unknown rhs.
+- name: InstanceOfCache
+ operands:
+ obj: Value
+ proto: Object
+ result_type: Boolean
+
+- name: ArgumentsLength
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ # Arguments |length| cannot be mutated by Ion Code.
+ alias_set: none
+ compute_range: custom
+ can_recover: true
+
+# This MIR instruction is used to get an argument from the actual arguments.
+- name: GetFrameArgument
+ operands:
+ index: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ # This instruction is never aliased, because ops like JSOp::SetArg don't
+ # write to the argument frames. We create an arguments object in that case.
+ alias_set: none
+
+# This MIR instruction is used to get an argument from the actual arguments.
+# Returns undefined if |index| is larger-or-equals to |length|. Bails out if
+# |index| is negative.
+- name: GetFrameArgumentHole
+ operands:
+ index: Int32
+ length: Int32
+ result_type: Value
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ # This instruction is never aliased, because ops like JSOp::SetArg don't
+ # write to the argument frames. We create an arguments object in that case.
+ alias_set: none
+
+- name: NewTarget
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: Rest
+ operands:
+ numActuals: Int32
+ arguments:
+ numFormals: unsigned
+ shape: Shape*
+ result_type: Object
+ possibly_calls: true
+ alias_set: none
+ can_recover: true
+
+- name: PostWriteBarrier
+ gen_boilerplate: false
+
+- name: PostWriteElementBarrier
+ gen_boilerplate: false
+
+- name: AssertCanElidePostWriteBarrier
+ operands:
+ object: Object
+ value: Value
+ result_type: None
+ guard: true
+ alias_set: none
+
+- name: NewNamedLambdaObject
+ arguments:
+ templateObj: NamedLambdaObject*
+ result_type: Object
+ alias_set: none
+
+- name: NewCallObject
+ gen_boilerplate: false
+
+- name: NewStringObject
+ gen_boilerplate: false
+
+- name: IsCallable
+ gen_boilerplate: false
+
+- name: IsConstructor
+ operands:
+ object: Object
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: IsCrossRealmArrayConstructor
+ operands:
+ object: Object
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: IsObject
+ operands:
+ object: Value
+ result_type: Boolean
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: IsNullOrUndefined
+ operands:
+ value: Value
+ result_type: Boolean
+ movable: true
+ folds_to: custom
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: HasClass
+ gen_boilerplate: false
+
+- name: GuardToClass
+ gen_boilerplate: false
+
+- name: GuardToFunction
+ gen_boilerplate: false
+
+- name: IsArray
+ gen_boilerplate: false
+
+- name: IsTypedArray
+ gen_boilerplate: false
+
+- name: ObjectClassToString
+ operands:
+ object: Object
+ result_type: String
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ possibly_calls: true
+ # Tests @@toStringTag is neither present on this object nor on any object
+ # of the prototype chain.
+ alias_set: custom
+
+- name: CheckReturn
+ operands:
+ returnValue: Value
+ thisValue: Value
+ result_type: Value
+ guard: true
+ folds_to: custom
+ alias_set: custom
+
+- name: CheckThis
+ operands:
+ thisValue: Value
+ result_type: Value
+ guard: true
+ folds_to: custom
+ alias_set: custom
+
+- name: AsyncResolve
+ operands:
+ generator: Object
+ valueOrReason: Value
+ arguments:
+ resolveKind: AsyncFunctionResolveKind
+ result_type: Object
+
+# Returns from this function to the previous caller; this looks like a regular
+# Unary instruction and is used to lie to the MIR generator about suspending
+# ops like Yield/Await, which are emitted like returns, but MIR-Build like
+# regular instructions.
+- name: GeneratorReturn
+ operands:
+ input: Value
+ guard: true
+ alias_set: none
+
+- name: AsyncAwait
+ operands:
+ value: Value
+ generator: Object
+ result_type: Object
+
+- name: CheckThisReinit
+ operands:
+ thisValue: Value
+ result_type: Value
+ guard: true
+ folds_to: custom
+ alias_set: custom
+
+- name: Generator
+ gen_boilerplate: false
+
+- name: CanSkipAwait
+ operands:
+ value: Value
+ result_type: Boolean
+
+- name: MaybeExtractAwaitValue
+ gen_boilerplate: false
+
+- name: IncrementWarmUpCounter
+ arguments:
+ script: JSScript*
+ alias_set: none
+
+- name: AtomicIsLockFree
+ gen_boilerplate: false
+
+- name: CompareExchangeTypedArrayElement
+ gen_boilerplate: false
+
+- name: AtomicExchangeTypedArrayElement
+ gen_boilerplate: false
+
+- name: AtomicTypedArrayElementBinop
+ gen_boilerplate: false
+
+- name: Debugger
+ gen_boilerplate: false
+
+- name: CheckIsObj
+ operands:
+ value: Value
+ arguments:
+ checkKind: uint8_t
+ result_type: Object
+ guard: true
+ folds_to: custom
+ alias_set: none
+
+- name: CheckObjCoercible
+ operands:
+ checkValue: Value
+ result_type: Value
+ guard: true
+ folds_to: custom
+ # Throws on null or undefined.
+ alias_set: custom
+
+- name: CheckClassHeritage
+ operands:
+ heritage: Value
+ result_type: Value
+ guard: true
+
+- name: DebugCheckSelfHosted
+ operands:
+ checkValue: Value
+ result_type: Value
+ guard: true
+
+- name: IsPackedArray
+ operands:
+ object: Object
+ result_type: Boolean
+ movable: true
+ alias_set: custom
+
+- name: GuardArrayIsPacked
+ operands:
+ array: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GetPrototypeOf
+ operands:
+ target: Object
+ result_type: Value
+ # May throw if target is a proxy.
+ guard: true
+
+- name: ObjectWithProto
+ operands:
+ prototype: Value
+ result_type: Object
+ # May throw if prototype is neither an object nor null.
+ guard: true
+ possibly_calls: true
+
+- name: ObjectStaticProto
+ gen_boilerplate: false
+
+# This is basically just a limited case of Constant, for objects which are
+# the prototype of another object and will be used for a GuardShape. It
+# includes a reference to the receiver object so we can eliminate redundant
+# shape guards.
+- name: ConstantProto
+ gen_boilerplate: false
+
+- name: BuiltinObject
+ arguments:
+ builtinObjectKind: BuiltinObjectKind
+ result_type: Object
+ possibly_calls: true
+
+- name: SuperFunction
+ operands:
+ callee: Object
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: InitHomeObject
+ operands:
+ function: Object
+ homeObject: Value
+ result_type: Object
+ alias_set: custom
+
+# Return true if the object is definitely a TypedArray constructor, but not
+# necessarily from the currently active realm. Return false if the object is
+# not a TypedArray constructor or if it's a wrapper.
+- name: IsTypedArrayConstructor
+ operands:
+ object: Object
+ result_type: Boolean
+ alias_set: none
+
+# Load the JSValueTag on all platforms except ARM64. See the comments in
+# MacroAssembler-arm64.h for the |cmpTag(Register, ImmTag)| method for why
+# ARM64 doesn't use the raw JSValueTag, but instead a modified tag value. That
+# modified tag value can't be directly compared against JSValueTag constants.
+- name: LoadValueTag
+ operands:
+ value: Value
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+# Load the target object from a proxy wrapper. The target is stored in the
+# proxy object's private slot.
+- name: LoadWrapperTarget
+ operands:
+ object: Object
+ result_type: Object
+ movable: true
+ congruent_to: if_operands_equal
+ # Can't use |AliasSet::None| because the target changes on navigation.
+ # TODO: Investigate using a narrower or a custom alias set.
+ alias_set: custom
+
+# Guard the accessor shape is present on the object or its prototype chain.
+- name: GuardHasGetterSetter
+ operands:
+ object: Object
+ arguments:
+ propId: jsid
+ getterSetter: GetterSetter*
+ result_type: Object
+ guard: true
+ movable: true
+ possibly_calls: true
+ congruent_to: custom
+ alias_set: custom
+
+- name: GuardIsExtensible
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: GuardInt32IsNonNegative
+ operands:
+ index: Int32
+ result_type: Int32
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: GuardInt32Range
+ operands:
+ input: Int32
+ arguments:
+ minimum: int32_t
+ maximum: int32_t
+ result_type: Int32
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+# Guard the input index is either greater than the dense initialized length of
+# an object, or a hole element.
+- name: GuardIndexIsNotDenseElement
+ operands:
+ object: Object
+ index: Int32
+ result_type: Int32
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Guard an array object's length can be updated successfully when adding an
+# element at the input index.
+- name: GuardIndexIsValidUpdateOrAdd
+ operands:
+ object: Object
+ index: Int32
+ result_type: Int32
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Add or update a sparse element of an ArrayObject or PlainObject. It's allowed
+# for the sparse element to be already present on the object. It may also be an
+# accessor property, so this instruction is always marked as effectful.
+- name: CallAddOrUpdateSparseElement
+ operands:
+ object: Object
+ index: Int32
+ value: Value
+ arguments:
+ strict: bool
+ possibly_calls: true
+
+# Get a sparse element from an ArrayObject or PlainObject, possibly by calling
+# an accessor property.
+- name: CallGetSparseElement
+ operands:
+ object: Object
+ index: Int32
+ result_type: Value
+ possibly_calls: true
+
+- name: CallNativeGetElement
+ operands:
+ object: Object
+ index: Int32
+ result_type: Value
+ possibly_calls: true
+
+- name: CallNativeGetElementSuper
+ operands:
+ object: Object
+ index: Int32
+ receiver: Value
+ result_type: Value
+ possibly_calls: true
+
+# Test if a native object has an own element (sparse or dense) at an index.
+- name: CallObjectHasSparseElement
+ operands:
+ object: Object
+ index: Int32
+ result_type: Boolean
+ guard: true
+ congruent_to: if_operands_equal
+ possibly_calls: true
+ alias_set: custom
+
+- name: BigIntAsIntN
+ operands:
+ bits: Int32
+ input: BigInt
+ result_type: BigInt
+ movable: true
+ congruent_to: if_operands_equal
+ possibly_calls: true
+ alias_set: none
+ can_recover: true
+ clone: true
+
+- name: BigIntAsUintN
+ operands:
+ bits: Int32
+ input: BigInt
+ result_type: BigInt
+ movable: true
+ congruent_to: if_operands_equal
+ possibly_calls: true
+ alias_set: none
+ can_recover: true
+ clone: true
+
+- name: GuardNonGCThing
+ operands:
+ input: Value
+ result_type: Value
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ alias_set: none
+
+- name: ToHashableNonGCThing
+ operands:
+ input: Value
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: ToHashableString
+ operands:
+ input: String
+ result_type: String
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: ToHashableValue
+ operands:
+ input: Value
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+ possibly_calls: true
+
+- name: HashNonGCThing
+ operands:
+ input: Value
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: HashString
+ operands:
+ input: String
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: HashSymbol
+ operands:
+ input: Symbol
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: HashBigInt
+ operands:
+ input: BigInt
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
+- name: HashObject
+ operands:
+ set: Object
+ input: Value
+ result_type: Int32
+ # In contrast to the previous hash operations, we can't move this
+ # instruction, because the hashcode is computed from the object's address,
+ # which can change when the object is moved by the GC.
+ movable: false
+ alias_set: none
+
+- name: HashValue
+ operands:
+ set: Object
+ input: Value
+ result_type: Int32
+ movable: false
+ alias_set: none
+
+- name: SetObjectHasNonBigInt
+ operands:
+ set: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: SetObjectHasBigInt
+ operands:
+ set: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: SetObjectHasValue
+ operands:
+ set: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: SetObjectHasValueVMCall
+ operands:
+ set: Object
+ value: Value
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ possibly_calls: true
+
+- name: SetObjectSize
+ operands:
+ set: Object
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectHasNonBigInt
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectHasBigInt
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectHasValue
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectHasValueVMCall
+ operands:
+ map: Object
+ value: Value
+ result_type: Boolean
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ possibly_calls: true
+
+- name: MapObjectGetNonBigInt
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectGetBigInt
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectGetValue
+ operands:
+ map: Object
+ value: Value
+ hash: Int32
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: MapObjectGetValueVMCall
+ operands:
+ map: Object
+ value: Value
+ result_type: Value
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ possibly_calls: true
+
+- name: MapObjectSize
+ operands:
+ map: Object
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+- name: WasmNeg
+ gen_boilerplate: false
+
+- name: WasmBinaryBitwise
+ gen_boilerplate: false
+
+- name: WasmLoadInstance
+ gen_boilerplate: false
+
+- name: WasmStoreInstance
+ gen_boilerplate: false
+
+- name: WasmHeapBase
+ gen_boilerplate: false
+
+- name: WasmBoundsCheck
+ gen_boilerplate: false
+
+- name: WasmExtendU32Index
+ operands:
+ input: Int32
+ result_type: Int64
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ type_policy: none
+ alias_set: none
+
+- name: WasmWrapU32Index
+ operands:
+ input: Int64
+ result_type: Int32
+ movable: true
+ congruent_to: if_operands_equal
+ folds_to: custom
+ type_policy: none
+ alias_set: none
+
+- name: WasmAddOffset
+ gen_boilerplate: false
+
+- name: WasmAlignmentCheck
+ gen_boilerplate: false
+
+- name: WasmLoad
+ gen_boilerplate: false
+
+- name: WasmStore
+ gen_boilerplate: false
+
+- name: AsmJSLoadHeap
+ gen_boilerplate: false
+
+- name: AsmJSStoreHeap
+ gen_boilerplate: false
+
+- name: WasmFence
+ guard: true
+ alias_set: none
+ clone: true
+
+- name: WasmCompareExchangeHeap
+ gen_boilerplate: false
+
+- name: WasmAtomicExchangeHeap
+ gen_boilerplate: false
+
+- name: WasmAtomicBinopHeap
+ gen_boilerplate: false
+
+- name: WasmLoadInstanceDataField
+ gen_boilerplate: false
+
+- name: WasmLoadGlobalCell
+ gen_boilerplate: false
+
+- name: WasmLoadTableElement
+ gen_boilerplate: false
+
+- name: WasmStoreInstanceDataField
+ gen_boilerplate: false
+
+- name: WasmStoreGlobalCell
+ gen_boilerplate: false
+
+- name: WasmStoreStackResult
+ gen_boilerplate: false
+
+- name: WasmDerivedPointer
+ gen_boilerplate: false
+
+- name: WasmDerivedIndexPointer
+ gen_boilerplate: false
+
+- name: WasmStoreRef
+ gen_boilerplate: false
+
+- name: WasmPostWriteBarrier
+ gen_boilerplate: false
+
+- name: WasmParameter
+ gen_boilerplate: false
+
+- name: WasmReturn
+ gen_boilerplate: false
+
+- name: WasmReturnVoid
+ gen_boilerplate: false
+
+- name: WasmStackArg
+ gen_boilerplate: false
+
+- name: WasmRegisterResult
+ gen_boilerplate: false
+
+- name: WasmFloatRegisterResult
+ gen_boilerplate: false
+
+- name: WasmRegister64Result
+ gen_boilerplate: false
+
+- name: WasmStackResultArea
+ gen_boilerplate: false
+
+- name: WasmStackResult
+ gen_boilerplate: false
+
+- name: WasmCallCatchable
+ gen_boilerplate: false
+
+- name: WasmCallUncatchable
+ gen_boilerplate: false
+
+- name: WasmCallLandingPrePad
+ gen_boilerplate: false
+
+- name: WasmSelect
+ gen_boilerplate: false
+
+- name: WasmReinterpret
+ gen_boilerplate: false
+
+- name: Rotate
+ gen_boilerplate: false
+
+- name: WasmBinarySimd128
+ gen_boilerplate: false
+
+- name: WasmBinarySimd128WithConstant
+ gen_boilerplate: false
+
+# (v128, i32) -> v128 effect-free shift operations.
+- name: WasmShiftSimd128
+ operands:
+ lhs: Simd128
+ rhs: Int32
+ arguments:
+ simdOp: wasm::SimdOp
+ type_policy: none
+ result_type: Simd128
+ movable: true
+ congruent_to: custom
+ alias_set: none
+ clone: true
+
+# (v128, v128, mask) -> v128 effect-free operation.
+- name: WasmShuffleSimd128
+ operands:
+ lhs: Simd128
+ rhs: Simd128
+ arguments:
+ shuffle: SimdShuffle
+ type_policy: none
+ result_type: Simd128
+ movable: true
+ congruent_to: custom
+ alias_set: none
+ clone: true
+
+- name: WasmReplaceLaneSimd128
+ gen_boilerplate: false
+
+- name: WasmUnarySimd128
+ operands:
+ src: Simd128
+ arguments:
+ simdOp: wasm::SimdOp
+ type_policy: none
+ result_type: Simd128
+ movable: true
+ congruent_to: custom
+ alias_set: none
+ clone: true
+
+- name: WasmTernarySimd128
+ gen_boilerplate: false
+
+- name: WasmScalarToSimd128
+ gen_boilerplate: false
+
+- name: WasmReduceSimd128
+ gen_boilerplate: false
+
+- name: WasmLoadLaneSimd128
+ gen_boilerplate: false
+
+- name: WasmStoreLaneSimd128
+ gen_boilerplate: false
+
+- name: UnreachableResult
+ gen_boilerplate: false
+
+- name: IonToWasmCall
+ gen_boilerplate: false
+
+- name: WasmLoadField
+ gen_boilerplate: false
+
+- name: WasmLoadFieldKA
+ gen_boilerplate: false
+
+- name: WasmStoreFieldKA
+ gen_boilerplate: false
+
+- name: WasmStoreFieldRefKA
+ gen_boilerplate: false
+
+- name: WasmGcObjectIsSubtypeOfConcrete
+ gen_boilerplate: false
+
+- name: WasmGcObjectIsSubtypeOfAbstract
+ gen_boilerplate: false
+
+#ifdef FUZZING_JS_FUZZILLI
+- name: FuzzilliHash
+ gen_boilerplate: false
+
+- name: FuzzilliHashStore
+ gen_boilerplate: false
+#endif
diff --git a/js/src/jit/MachineState.h b/js/src/jit/MachineState.h
new file mode 100644
index 0000000000..63d04ae6ea
--- /dev/null
+++ b/js/src/jit/MachineState.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MachineState_h
+#define jit_MachineState_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Variant.h"
+
+#include <stdint.h>
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js::jit {
+
+// Information needed to recover machine register state. This supports two
+// different modes:
+//
+// * Bailouts: all registers are pushed on the stack as part of the bailout
+// process, so MachineState simply points to these FPU/GPR arrays.
+// See RegisterDump and BailoutStack.
+//
+// * Safepoints: live registers are pushed on the stack before a VM call, so
+// MachineState stores the register sets and a pointer to the stack memory
+// where these registers were pushed. This is also used by exception bailouts.
+class MOZ_STACK_CLASS MachineState {
+ struct NullState {};
+
+ struct BailoutState {
+ RegisterDump::FPUArray& floatRegs;
+ RegisterDump::GPRArray& regs;
+
+ BailoutState(RegisterDump::FPUArray& floatRegs,
+ RegisterDump::GPRArray& regs)
+ : floatRegs(floatRegs), regs(regs) {}
+ };
+
+ struct SafepointState {
+ FloatRegisterSet floatRegs;
+ GeneralRegisterSet regs;
+ // Pointers to the start of the pushed |floatRegs| and |regs| on the stack.
+ // This is the value of the stack pointer right before the first register
+ // was pushed.
+ char* floatSpillBase;
+ uintptr_t* spillBase;
+
+ SafepointState(const FloatRegisterSet& floatRegs,
+ const GeneralRegisterSet& regs, char* floatSpillBase,
+ uintptr_t* spillBase)
+ : floatRegs(floatRegs),
+ regs(regs),
+ floatSpillBase(floatSpillBase),
+ spillBase(spillBase) {}
+ uintptr_t* addressOfRegister(Register reg) const;
+ char* addressOfRegister(FloatRegister reg) const;
+ };
+ using State = mozilla::Variant<NullState, BailoutState, SafepointState>;
+ State state_{NullState()};
+
+ public:
+ MachineState() = default;
+ MachineState(const MachineState& other) = default;
+ MachineState& operator=(const MachineState& other) = default;
+
+ static MachineState FromBailout(RegisterDump::GPRArray& regs,
+ RegisterDump::FPUArray& fpregs) {
+ MachineState res;
+ res.state_.emplace<BailoutState>(fpregs, regs);
+ return res;
+ }
+
+ static MachineState FromSafepoint(const FloatRegisterSet& floatRegs,
+ const GeneralRegisterSet& regs,
+ char* floatSpillBase,
+ uintptr_t* spillBase) {
+ MachineState res;
+ res.state_.emplace<SafepointState>(floatRegs, regs, floatSpillBase,
+ spillBase);
+ return res;
+ }
+
+ bool has(Register reg) const {
+ if (state_.is<BailoutState>()) {
+ return true;
+ }
+ return state_.as<SafepointState>().regs.hasRegisterIndex(reg);
+ }
+ bool has(FloatRegister reg) const {
+ if (state_.is<BailoutState>()) {
+ return true;
+ }
+ return state_.as<SafepointState>().floatRegs.hasRegisterIndex(reg);
+ }
+
+ uintptr_t read(Register reg) const;
+ template <typename T>
+ T read(FloatRegister reg) const;
+
+ // Used by moving GCs to update pointers.
+ void write(Register reg, uintptr_t value) const;
+};
+
+} // namespace js::jit
+
+#endif /* jit_MachineState_h */
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
new file mode 100644
index 0000000000..2fbe55d4cf
--- /dev/null
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -0,0 +1,1090 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MacroAssembler_inl_h
+#define jit_MacroAssembler_inl_h
+
+#include "jit/MacroAssembler.h"
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "gc/Zone.h"
+#include "jit/CalleeToken.h"
+#include "jit/CompileWrappers.h"
+#include "jit/JitFrames.h"
+#include "jit/JSJitFrameIter.h"
+#include "util/DifferentialTesting.h"
+#include "vm/BigIntType.h"
+#include "vm/JSObject.h"
+#include "vm/ProxyObject.h"
+#include "vm/Runtime.h"
+#include "vm/StringType.h"
+
+#include "jit/ABIFunctionList-inl.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86-inl.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64-inl.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MacroAssembler-arm-inl.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MacroAssembler-arm64-inl.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MacroAssembler-mips32-inl.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MacroAssembler-mips64-inl.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/MacroAssembler-loong64-inl.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/MacroAssembler-riscv64-inl.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/MacroAssembler-wasm32-inl.h"
+#elif !defined(JS_CODEGEN_NONE)
+# error "Unknown architecture!"
+#endif
+
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+template <typename Sig>
+DynFn DynamicFunction(Sig fun) {
+ ABIFunctionSignature<Sig> sig;
+ return DynFn{sig.address(fun)};
+}
+
+// Helper for generatePreBarrier.
+inline DynFn JitPreWriteBarrier(MIRType type) {
+ switch (type) {
+ case MIRType::Value: {
+ using Fn = void (*)(JSRuntime * rt, Value * vp);
+ return DynamicFunction<Fn>(JitValuePreWriteBarrier);
+ }
+ case MIRType::String: {
+ using Fn = void (*)(JSRuntime * rt, JSString * *stringp);
+ return DynamicFunction<Fn>(JitStringPreWriteBarrier);
+ }
+ case MIRType::Object: {
+ using Fn = void (*)(JSRuntime * rt, JSObject * *objp);
+ return DynamicFunction<Fn>(JitObjectPreWriteBarrier);
+ }
+ case MIRType::Shape: {
+ using Fn = void (*)(JSRuntime * rt, Shape * *shapep);
+ return DynamicFunction<Fn>(JitShapePreWriteBarrier);
+ }
+ default:
+ MOZ_CRASH();
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+CodeOffset MacroAssembler::PushWithPatch(ImmWord word) {
+ framePushed_ += sizeof(word.value);
+ return pushWithPatch(word);
+}
+
+CodeOffset MacroAssembler::PushWithPatch(ImmPtr imm) {
+ return PushWithPatch(ImmWord(uintptr_t(imm.value)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+void MacroAssembler::call(TrampolinePtr code) { call(ImmPtr(code.value)); }
+
+CodeOffset MacroAssembler::call(const wasm::CallSiteDesc& desc,
+ const Register reg) {
+ CodeOffset l = call(reg);
+ append(desc, l);
+ return l;
+}
+
+CodeOffset MacroAssembler::call(const wasm::CallSiteDesc& desc,
+ uint32_t funcIndex) {
+ CodeOffset l = callWithPatch();
+ append(desc, l, funcIndex);
+ return l;
+}
+
+void MacroAssembler::call(const wasm::CallSiteDesc& desc, wasm::Trap trap) {
+ CodeOffset l = callWithPatch();
+ append(desc, l, trap);
+}
+
+CodeOffset MacroAssembler::call(const wasm::CallSiteDesc& desc,
+ wasm::SymbolicAddress imm) {
+ MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm),
+ "only for functions which may appear in profiler");
+ CodeOffset raOffset = call(imm);
+ append(desc, raOffset);
+ return raOffset;
+}
+
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::passABIArg(Register reg) {
+ passABIArg(MoveOperand(reg), MoveOp::GENERAL);
+}
+
+void MacroAssembler::passABIArg(FloatRegister reg, MoveOp::Type type) {
+ passABIArg(MoveOperand(reg), type);
+}
+
+void MacroAssembler::callWithABI(DynFn fun, MoveOp::Type result,
+ CheckUnsafeCallWithABI check) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ callWithABINoProfiler(fun.address, result, check);
+}
+
+template <typename Sig, Sig fun>
+void MacroAssembler::callWithABI(MoveOp::Type result,
+ CheckUnsafeCallWithABI check) {
+ ABIFunction<Sig, fun> abiFun;
+ AutoProfilerCallInstrumentation profiler(*this);
+ callWithABINoProfiler(abiFun.address(), result, check);
+}
+
+void MacroAssembler::callWithABI(Register fun, MoveOp::Type result) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ callWithABINoProfiler(fun, result);
+}
+
+void MacroAssembler::callWithABI(const Address& fun, MoveOp::Type result) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ callWithABINoProfiler(fun, result);
+}
+
+void MacroAssembler::appendSignatureType(MoveOp::Type type) {
+#ifdef JS_SIMULATOR
+ signature_ <<= ArgType_Shift;
+ switch (type) {
+ case MoveOp::GENERAL:
+ signature_ |= ArgType_General;
+ break;
+ case MoveOp::DOUBLE:
+ signature_ |= ArgType_Float64;
+ break;
+ case MoveOp::FLOAT32:
+ signature_ |= ArgType_Float32;
+ break;
+ default:
+ MOZ_CRASH("Invalid argument type");
+ }
+#endif
+}
+
+ABIFunctionType MacroAssembler::signature() const {
+#ifdef JS_SIMULATOR
+# ifdef DEBUG
+ switch (signature_) {
+ case Args_General0:
+ case Args_General1:
+ case Args_General2:
+ case Args_General3:
+ case Args_General4:
+ case Args_General5:
+ case Args_General6:
+ case Args_General7:
+ case Args_General8:
+ case Args_Double_None:
+ case Args_Int_Double:
+ case Args_Float32_Float32:
+ case Args_Int_Float32:
+ case Args_Double_Double:
+ case Args_Double_Int:
+ case Args_Double_DoubleInt:
+ case Args_Double_DoubleDouble:
+ case Args_Double_IntDouble:
+ case Args_Int_IntDouble:
+ case Args_Int_DoubleInt:
+ case Args_Int_DoubleIntInt:
+ case Args_Int_IntDoubleIntInt:
+ case Args_Double_DoubleDoubleDouble:
+ case Args_Double_DoubleDoubleDoubleDouble:
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+# endif // DEBUG
+
+ return ABIFunctionType(signature_);
+#else
+ // No simulator enabled.
+ MOZ_CRASH("Only available for making calls within a simulator.");
+#endif
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::callJitNoProfiler(Register callee) {
+#ifdef JS_USE_LINK_REGISTER
+ // The return address is pushed by the callee.
+ call(callee);
+#else
+ callAndPushReturnAddress(callee);
+#endif
+ return currentOffset();
+}
+
+uint32_t MacroAssembler::callJit(Register callee) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ uint32_t ret = callJitNoProfiler(callee);
+ return ret;
+}
+
+uint32_t MacroAssembler::callJit(JitCode* callee) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ call(callee);
+ return currentOffset();
+}
+
+uint32_t MacroAssembler::callJit(TrampolinePtr code) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ call(code);
+ return currentOffset();
+}
+
+uint32_t MacroAssembler::callJit(ImmPtr callee) {
+ AutoProfilerCallInstrumentation profiler(*this);
+ call(callee);
+ return currentOffset();
+}
+
+void MacroAssembler::pushFrameDescriptor(FrameType type) {
+ uint32_t descriptor = MakeFrameDescriptor(type);
+ push(Imm32(descriptor));
+}
+
+void MacroAssembler::PushFrameDescriptor(FrameType type) {
+ uint32_t descriptor = MakeFrameDescriptor(type);
+ Push(Imm32(descriptor));
+}
+
+void MacroAssembler::pushFrameDescriptorForJitCall(FrameType type,
+ uint32_t argc) {
+ uint32_t descriptor = MakeFrameDescriptorForJitCall(type, argc);
+ push(Imm32(descriptor));
+}
+
+void MacroAssembler::PushFrameDescriptorForJitCall(FrameType type,
+ uint32_t argc) {
+ uint32_t descriptor = MakeFrameDescriptorForJitCall(type, argc);
+ Push(Imm32(descriptor));
+}
+
+void MacroAssembler::pushFrameDescriptorForJitCall(FrameType type,
+ Register argc,
+ Register scratch) {
+ if (argc != scratch) {
+ mov(argc, scratch);
+ }
+ lshift32(Imm32(NUMACTUALARGS_SHIFT), scratch);
+ or32(Imm32(int32_t(type)), scratch);
+ push(scratch);
+}
+
+void MacroAssembler::PushFrameDescriptorForJitCall(FrameType type,
+ Register argc,
+ Register scratch) {
+ pushFrameDescriptorForJitCall(type, argc, scratch);
+ framePushed_ += sizeof(uintptr_t);
+}
+
+void MacroAssembler::loadNumActualArgs(Register framePtr, Register dest) {
+ loadPtr(Address(framePtr, JitFrameLayout::offsetOfDescriptor()), dest);
+ rshift32(Imm32(NUMACTUALARGS_SHIFT), dest);
+}
+
+void MacroAssembler::PushCalleeToken(Register callee, bool constructing) {
+ if (constructing) {
+ orPtr(Imm32(CalleeToken_FunctionConstructing), callee);
+ Push(callee);
+ andPtr(Imm32(uint32_t(CalleeTokenMask)), callee);
+ } else {
+ static_assert(CalleeToken_Function == 0,
+ "Non-constructing call requires no tagging");
+ Push(callee);
+ }
+}
+
+void MacroAssembler::loadFunctionFromCalleeToken(Address token, Register dest) {
+#ifdef DEBUG
+ Label ok;
+ loadPtr(token, dest);
+ andPtr(Imm32(uint32_t(~CalleeTokenMask)), dest);
+ branchPtr(Assembler::Equal, dest, Imm32(CalleeToken_Function), &ok);
+ branchPtr(Assembler::Equal, dest, Imm32(CalleeToken_FunctionConstructing),
+ &ok);
+ assumeUnreachable("Unexpected CalleeToken tag");
+ bind(&ok);
+#endif
+ loadPtr(token, dest);
+ andPtr(Imm32(uint32_t(CalleeTokenMask)), dest);
+}
+
+uint32_t MacroAssembler::buildFakeExitFrame(Register scratch) {
+ mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
+
+ PushFrameDescriptor(FrameType::IonJS);
+ uint32_t retAddr = pushFakeReturnAddress(scratch);
+ Push(FramePointer);
+
+ MOZ_ASSERT(framePushed() == initialDepth + ExitFrameLayout::Size());
+ return retAddr;
+}
+
+// ===============================================================
+// Exit frame footer.
+
+void MacroAssembler::enterExitFrame(Register cxreg, Register scratch,
+ const VMFunctionData* f) {
+ MOZ_ASSERT(f);
+ linkExitFrame(cxreg, scratch);
+ // Push VMFunction pointer, to mark arguments.
+ Push(ImmPtr(f));
+}
+
+void MacroAssembler::enterFakeExitFrame(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ linkExitFrame(cxreg, scratch);
+ Push(Imm32(int32_t(type)));
+}
+
+void MacroAssembler::enterFakeExitFrameForNative(Register cxreg,
+ Register scratch,
+ bool isConstructing) {
+ enterFakeExitFrame(cxreg, scratch,
+ isConstructing ? ExitFrameType::ConstructNative
+ : ExitFrameType::CallNative);
+}
+
+void MacroAssembler::leaveExitFrame(size_t extraFrame) {
+ freeStack(ExitFooterFrame::Size() + extraFrame);
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const ConstantOrRegister& src,
+ const ValueOperand& dest) {
+ if (src.constant()) {
+ moveValue(src.value(), dest);
+ return;
+ }
+
+ moveValue(src.reg(), dest);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(ImmPtr imm, Register dest) {
+ addPtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+// ===============================================================
+// Branch functions
+
+template <class L>
+void MacroAssembler::branchIfFalseBool(Register reg, L label) {
+ // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
+ branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
+}
+
+void MacroAssembler::branchIfTrueBool(Register reg, Label* label) {
+ // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
+ branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
+}
+
+void MacroAssembler::branchIfRope(Register str, Label* label) {
+ Address flags(str, JSString::offsetOfFlags());
+ branchTest32(Assembler::Zero, flags, Imm32(JSString::LINEAR_BIT), label);
+}
+
+void MacroAssembler::branchIfNotRope(Register str, Label* label) {
+ Address flags(str, JSString::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(JSString::LINEAR_BIT), label);
+}
+
+void MacroAssembler::branchLatin1String(Register string, Label* label) {
+ branchTest32(Assembler::NonZero, Address(string, JSString::offsetOfFlags()),
+ Imm32(JSString::LATIN1_CHARS_BIT), label);
+}
+
+void MacroAssembler::branchTwoByteString(Register string, Label* label) {
+ branchTest32(Assembler::Zero, Address(string, JSString::offsetOfFlags()),
+ Imm32(JSString::LATIN1_CHARS_BIT), label);
+}
+
+void MacroAssembler::branchIfBigIntIsNegative(Register bigInt, Label* label) {
+ branchTest32(Assembler::NonZero, Address(bigInt, BigInt::offsetOfFlags()),
+ Imm32(BigInt::signBitMask()), label);
+}
+
+void MacroAssembler::branchIfBigIntIsNonNegative(Register bigInt,
+ Label* label) {
+ branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
+ Imm32(BigInt::signBitMask()), label);
+}
+
+void MacroAssembler::branchIfBigIntIsZero(Register bigInt, Label* label) {
+ branch32(Assembler::Equal, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(0), label);
+}
+
+void MacroAssembler::branchIfBigIntIsNonZero(Register bigInt, Label* label) {
+ branch32(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(0), label);
+}
+
+void MacroAssembler::branchTestFunctionFlags(Register fun, uint32_t flags,
+ Condition cond, Label* label) {
+ Address address(fun, JSFunction::offsetOfFlagsAndArgCount());
+ branchTest32(cond, address, Imm32(flags), label);
+}
+
+void MacroAssembler::branchIfNotFunctionIsNonBuiltinCtor(Register fun,
+ Register scratch,
+ Label* label) {
+ // Guard the function has the BASESCRIPT and CONSTRUCTOR flags and does NOT
+ // have the SELF_HOSTED flag.
+ // This is equivalent to JSFunction::isNonBuiltinConstructor.
+ constexpr int32_t mask = FunctionFlags::BASESCRIPT |
+ FunctionFlags::SELF_HOSTED |
+ FunctionFlags::CONSTRUCTOR;
+ constexpr int32_t expected =
+ FunctionFlags::BASESCRIPT | FunctionFlags::CONSTRUCTOR;
+
+ load32(Address(fun, JSFunction::offsetOfFlagsAndArgCount()), scratch);
+ and32(Imm32(mask), scratch);
+ branch32(Assembler::NotEqual, scratch, Imm32(expected), label);
+}
+
+void MacroAssembler::branchIfFunctionHasNoJitEntry(Register fun,
+ bool isConstructing,
+ Label* label) {
+ uint16_t flags = FunctionFlags::HasJitEntryFlags(isConstructing);
+ branchTestFunctionFlags(fun, flags, Assembler::Zero, label);
+}
+
+void MacroAssembler::branchIfFunctionHasJitEntry(Register fun,
+ bool isConstructing,
+ Label* label) {
+ uint16_t flags = FunctionFlags::HasJitEntryFlags(isConstructing);
+ branchTestFunctionFlags(fun, flags, Assembler::NonZero, label);
+}
+
+void MacroAssembler::branchIfScriptHasJitScript(Register script, Label* label) {
+ static_assert(ScriptWarmUpData::JitScriptTag == 0,
+ "Code below depends on tag value");
+ branchTestPtr(Assembler::Zero,
+ Address(script, JSScript::offsetOfWarmUpData()),
+ Imm32(ScriptWarmUpData::TagMask), label);
+}
+
+void MacroAssembler::branchIfScriptHasNoJitScript(Register script,
+ Label* label) {
+ static_assert(ScriptWarmUpData::JitScriptTag == 0,
+ "Code below depends on tag value");
+ static_assert(BaseScript::offsetOfWarmUpData() ==
+ SelfHostedLazyScript::offsetOfWarmUpData(),
+ "SelfHostedLazyScript and BaseScript must use same layout for "
+ "warmUpData_");
+ branchTestPtr(Assembler::NonZero,
+ Address(script, JSScript::offsetOfWarmUpData()),
+ Imm32(ScriptWarmUpData::TagMask), label);
+}
+
+void MacroAssembler::loadJitScript(Register script, Register dest) {
+#ifdef DEBUG
+ Label ok;
+ branchIfScriptHasJitScript(script, &ok);
+ assumeUnreachable("Script has no JitScript!");
+ bind(&ok);
+#endif
+
+ static_assert(ScriptWarmUpData::JitScriptTag == 0,
+ "Code below depends on tag value");
+ loadPtr(Address(script, JSScript::offsetOfWarmUpData()), dest);
+}
+
+void MacroAssembler::loadFunctionArgCount(Register func, Register output) {
+ load32(Address(func, JSFunction::offsetOfFlagsAndArgCount()), output);
+ rshift32(Imm32(JSFunction::ArgCountShift), output);
+}
+
+void MacroAssembler::branchIfObjectEmulatesUndefined(Register objReg,
+ Register scratch,
+ Label* slowCheck,
+ Label* label) {
+ // The branches to out-of-line code here implement a conservative version
+ // of the JSObject::isWrapper test performed in EmulatesUndefined.
+ loadObjClassUnsafe(objReg, scratch);
+
+ branchTestClassIsProxy(true, scratch, slowCheck);
+
+ Address flags(scratch, JSClass::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
+ label);
+}
+
+void MacroAssembler::branchFunctionKind(Condition cond,
+ FunctionFlags::FunctionKind kind,
+ Register fun, Register scratch,
+ Label* label) {
+ Address address(fun, JSFunction::offsetOfFlagsAndArgCount());
+ load32(address, scratch);
+ and32(Imm32(FunctionFlags::FUNCTION_KIND_MASK), scratch);
+ branch32(cond, scratch, Imm32(kind), label);
+}
+
+void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
+ const JSClass* clasp, Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ branchPtr(cond, Address(scratch, BaseShape::offsetOfClasp()), ImmPtr(clasp),
+ label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj, const JSClass* clasp, Register scratch,
+ Label* label) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ branchPtr(cond, Address(scratch, BaseShape::offsetOfClasp()), ImmPtr(clasp),
+ label);
+}
+
+void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
+ const Address& clasp, Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ branchPtr(cond, clasp, scratch, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj, const Address& clasp, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ branchPtr(cond, clasp, scratch, label);
+}
+
+void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
+ Register clasp, Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ branchPtr(cond, clasp, scratch, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestClassIsFunction(Condition cond, Register clasp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ if (cond == Assembler::Equal) {
+ branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), label);
+ branchPtr(Assembler::Equal, clasp, ImmPtr(&ExtendedFunctionClass), label);
+ return;
+ }
+
+ Label isFunction;
+ branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), &isFunction);
+ branchPtr(Assembler::NotEqual, clasp, ImmPtr(&ExtendedFunctionClass), label);
+ bind(&isFunction);
+}
+
+void MacroAssembler::branchTestObjIsFunction(Condition cond, Register obj,
+ Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ branchTestObjIsFunctionNoSpectreMitigations(cond, obj, scratch, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjIsFunctionNoSpectreMitigations(
+ Condition cond, Register obj, Register scratch, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(obj != scratch);
+
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ branchTestClassIsFunction(cond, scratch, label);
+}
+
+void MacroAssembler::branchTestObjShape(Condition cond, Register obj,
+ const Shape* shape, Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ MOZ_ASSERT(spectreRegToZero != scratch);
+
+ if (JitOptions.spectreObjectMitigations) {
+ move32(Imm32(0), scratch);
+ }
+
+ branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape),
+ label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreMovePtr(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjShapeNoSpectreMitigations(Condition cond,
+ Register obj,
+ const Shape* shape,
+ Label* label) {
+ branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape),
+ label);
+}
+
+void MacroAssembler::branchTestObjShape(Condition cond, Register obj,
+ Register shape, Register scratch,
+ Register spectreRegToZero,
+ Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ MOZ_ASSERT(obj != shape);
+ MOZ_ASSERT(spectreRegToZero != scratch);
+
+ if (JitOptions.spectreObjectMitigations) {
+ move32(Imm32(0), scratch);
+ }
+
+ branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreMovePtr(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjShapeNoSpectreMitigations(Condition cond,
+ Register obj,
+ Register shape,
+ Label* label) {
+ branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
+}
+
+void MacroAssembler::branchTestObjShapeUnsafe(Condition cond, Register obj,
+ Register shape, Label* label) {
+ branchTestObjShapeNoSpectreMitigations(cond, obj, shape, label);
+}
+
+void MacroAssembler::branchTestClassIsProxy(bool proxy, Register clasp,
+ Label* label) {
+ branchTest32(proxy ? Assembler::NonZero : Assembler::Zero,
+ Address(clasp, JSClass::offsetOfFlags()),
+ Imm32(JSCLASS_IS_PROXY), label);
+}
+
+void MacroAssembler::branchTestObjectIsProxy(bool proxy, Register object,
+ Register scratch, Label* label) {
+ constexpr uint32_t ShiftedMask = (Shape::kindMask() << Shape::kindShift());
+ static_assert(uint32_t(Shape::Kind::Proxy) == 0,
+ "branchTest32 below depends on proxy kind being 0");
+ loadPtr(Address(object, JSObject::offsetOfShape()), scratch);
+ branchTest32(proxy ? Assembler::Zero : Assembler::NonZero,
+ Address(scratch, Shape::offsetOfImmutableFlags()),
+ Imm32(ShiftedMask), label);
+}
+
+void MacroAssembler::branchTestObjectIsWasmGcObject(bool isGcObject,
+ Register object,
+ Register scratch,
+ Label* label) {
+ constexpr uint32_t ShiftedMask = (Shape::kindMask() << Shape::kindShift());
+ constexpr uint32_t ShiftedKind =
+ (uint32_t(Shape::Kind::WasmGC) << Shape::kindShift());
+ MOZ_ASSERT(object != scratch);
+
+ loadPtr(Address(object, JSObject::offsetOfShape()), scratch);
+ load32(Address(scratch, Shape::offsetOfImmutableFlags()), scratch);
+ and32(Imm32(ShiftedMask), scratch);
+ branch32(isGcObject ? Assembler::Equal : Assembler::NotEqual, scratch,
+ Imm32(ShiftedKind), label);
+}
+
+void MacroAssembler::branchTestProxyHandlerFamily(Condition cond,
+ Register proxy,
+ Register scratch,
+ const void* handlerp,
+ Label* label) {
+#ifdef DEBUG
+ Label ok;
+ branchTestObjectIsProxy(true, proxy, scratch, &ok);
+ assumeUnreachable("Expected ProxyObject in branchTestProxyHandlerFamily");
+ bind(&ok);
+#endif
+
+ Address handlerAddr(proxy, ProxyObject::offsetOfHandler());
+ loadPtr(handlerAddr, scratch);
+ Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily());
+ branchPtr(cond, familyAddr, ImmPtr(handlerp), label);
+}
+
+void MacroAssembler::branchTestNeedsIncrementalBarrier(Condition cond,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ CompileZone* zone = realm()->zone();
+ const uint32_t* needsBarrierAddr = zone->addressOfNeedsIncrementalBarrier();
+ branchTest32(cond, AbsoluteAddress(needsBarrierAddr), Imm32(0x1), label);
+}
+
+void MacroAssembler::branchTestNeedsIncrementalBarrierAnyZone(
+ Condition cond, Label* label, Register scratch) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ if (maybeRealm_) {
+ branchTestNeedsIncrementalBarrier(cond, label);
+ } else {
+ // We are compiling the interpreter or another runtime-wide trampoline, so
+ // we have to load cx->zone.
+ loadPtr(AbsoluteAddress(runtime()->addressOfZone()), scratch);
+ Address needsBarrierAddr(scratch, Zone::offsetOfNeedsIncrementalBarrier());
+ branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
+ }
+}
+
+void MacroAssembler::branchTestMagicValue(Condition cond,
+ const ValueOperand& val,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ branchTestValue(cond, val, MagicValue(why), label);
+}
+
+void MacroAssembler::branchDoubleNotInInt64Range(Address src, Register temp,
+ Label* fail) {
+ using mozilla::FloatingPoint;
+
+ // Tests if double is in [INT64_MIN; INT64_MAX] range
+ uint32_t EXPONENT_MASK = 0x7ff00000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 63)
+ << EXPONENT_SHIFT;
+
+ load32(Address(src.base, src.offset + sizeof(int32_t)), temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::GreaterThanOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void MacroAssembler::branchDoubleNotInUInt64Range(Address src, Register temp,
+ Label* fail) {
+ using mozilla::FloatingPoint;
+
+ // Note: returns failure on -0.0
+ // Tests if double is in [0; UINT64_MAX] range
+ // Take the sign also in the equation. That way we can compare in one test?
+ uint32_t EXPONENT_MASK = 0xfff00000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 64)
+ << EXPONENT_SHIFT;
+
+ load32(Address(src.base, src.offset + sizeof(int32_t)), temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::AboveOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void MacroAssembler::branchFloat32NotInInt64Range(Address src, Register temp,
+ Label* fail) {
+ using mozilla::FloatingPoint;
+
+ // Tests if float is in [INT64_MIN; INT64_MAX] range
+ uint32_t EXPONENT_MASK = 0x7f800000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 63)
+ << EXPONENT_SHIFT;
+
+ load32(src, temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::GreaterThanOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+void MacroAssembler::branchFloat32NotInUInt64Range(Address src, Register temp,
+ Label* fail) {
+ using mozilla::FloatingPoint;
+
+ // Note: returns failure on -0.0
+ // Tests if float is in [0; UINT64_MAX] range
+ // Take the sign also in the equation. That way we can compare in one test?
+ uint32_t EXPONENT_MASK = 0xff800000;
+ uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
+ uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 64)
+ << EXPONENT_SHIFT;
+
+ load32(src, temp);
+ and32(Imm32(EXPONENT_MASK), temp);
+ branch32(Assembler::AboveOrEqual, temp, Imm32(TOO_BIG_EXPONENT), fail);
+}
+
+// ========================================================================
+// Canonicalization primitives.
+void MacroAssembler::canonicalizeFloat(FloatRegister reg) {
+ Label notNaN;
+ branchFloat(DoubleOrdered, reg, reg, &notNaN);
+ loadConstantFloat32(float(JS::GenericNaN()), reg);
+ bind(&notNaN);
+}
+
+void MacroAssembler::canonicalizeFloatIfDeterministic(FloatRegister reg) {
+ // See the comment in TypedArrayObjectTemplate::getElement.
+ if (js::SupportDifferentialTesting()) {
+ canonicalizeFloat(reg);
+ }
+}
+
+void MacroAssembler::canonicalizeDouble(FloatRegister reg) {
+ Label notNaN;
+ branchDouble(DoubleOrdered, reg, reg, &notNaN);
+ loadConstantDouble(JS::GenericNaN(), reg);
+ bind(&notNaN);
+}
+
+void MacroAssembler::canonicalizeDoubleIfDeterministic(FloatRegister reg) {
+ // See the comment in TypedArrayObjectTemplate::getElement.
+ if (js::SupportDifferentialTesting()) {
+ canonicalizeDouble(reg);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <class T>
+void MacroAssembler::storeDouble(FloatRegister src, const T& dest) {
+ canonicalizeDoubleIfDeterministic(src);
+ storeUncanonicalizedDouble(src, dest);
+}
+
+template void MacroAssembler::storeDouble(FloatRegister src,
+ const Address& dest);
+template void MacroAssembler::storeDouble(FloatRegister src,
+ const BaseIndex& dest);
+
+template <class T>
+void MacroAssembler::boxDouble(FloatRegister src, const T& dest) {
+ storeDouble(src, dest);
+}
+
+template <class T>
+void MacroAssembler::storeFloat32(FloatRegister src, const T& dest) {
+ canonicalizeFloatIfDeterministic(src);
+ storeUncanonicalizedFloat32(src, dest);
+}
+
+template void MacroAssembler::storeFloat32(FloatRegister src,
+ const Address& dest);
+template void MacroAssembler::storeFloat32(FloatRegister src,
+ const BaseIndex& dest);
+
+template <typename T>
+void MacroAssembler::fallibleUnboxInt32(const T& src, Register dest,
+ Label* fail) {
+ // Int32Value can be unboxed efficiently with unboxInt32, so use that.
+ branchTestInt32(Assembler::NotEqual, src, fail);
+ unboxInt32(src, dest);
+}
+
+template <typename T>
+void MacroAssembler::fallibleUnboxBoolean(const T& src, Register dest,
+ Label* fail) {
+ // BooleanValue can be unboxed efficiently with unboxBoolean, so use that.
+ branchTestBoolean(Assembler::NotEqual, src, fail);
+ unboxBoolean(src, dest);
+}
+
+template <typename T>
+void MacroAssembler::fallibleUnboxObject(const T& src, Register dest,
+ Label* fail) {
+ fallibleUnboxPtr(src, dest, JSVAL_TYPE_OBJECT, fail);
+}
+
+template <typename T>
+void MacroAssembler::fallibleUnboxString(const T& src, Register dest,
+ Label* fail) {
+ fallibleUnboxPtr(src, dest, JSVAL_TYPE_STRING, fail);
+}
+
+template <typename T>
+void MacroAssembler::fallibleUnboxSymbol(const T& src, Register dest,
+ Label* fail) {
+ fallibleUnboxPtr(src, dest, JSVAL_TYPE_SYMBOL, fail);
+}
+
+template <typename T>
+void MacroAssembler::fallibleUnboxBigInt(const T& src, Register dest,
+ Label* fail) {
+ fallibleUnboxPtr(src, dest, JSVAL_TYPE_BIGINT, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+#ifndef JS_CODEGEN_ARM64
+
+template <typename T>
+void MacroAssembler::branchTestStackPtr(Condition cond, T t, Label* label) {
+ branchTestPtr(cond, getStackPointer(), t, label);
+}
+
+template <typename T>
+void MacroAssembler::branchStackPtr(Condition cond, T rhs, Label* label) {
+ branchPtr(cond, getStackPointer(), rhs, label);
+}
+
+template <typename T>
+void MacroAssembler::branchStackPtrRhs(Condition cond, T lhs, Label* label) {
+ branchPtr(cond, lhs, getStackPointer(), label);
+}
+
+template <typename T>
+void MacroAssembler::addToStackPtr(T t) {
+ addPtr(t, getStackPointer());
+}
+
+template <typename T>
+void MacroAssembler::addStackPtrTo(T t) {
+ addPtr(getStackPointer(), t);
+}
+
+void MacroAssembler::reserveStack(uint32_t amount) {
+ subFromStackPtr(Imm32(amount));
+ adjustFrame(amount);
+}
+#endif // !JS_CODEGEN_ARM64
+
+void MacroAssembler::loadObjClassUnsafe(Register obj, Register dest) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), dest);
+ loadPtr(Address(dest, Shape::offsetOfBaseShape()), dest);
+ loadPtr(Address(dest, BaseShape::offsetOfClasp()), dest);
+}
+
+template <typename EmitPreBarrier>
+void MacroAssembler::storeObjShape(Register shape, Register obj,
+ EmitPreBarrier emitPreBarrier) {
+ MOZ_ASSERT(shape != obj);
+ Address shapeAddr(obj, JSObject::offsetOfShape());
+ emitPreBarrier(*this, shapeAddr);
+ storePtr(shape, shapeAddr);
+}
+
+template <typename EmitPreBarrier>
+void MacroAssembler::storeObjShape(Shape* shape, Register obj,
+ EmitPreBarrier emitPreBarrier) {
+ Address shapeAddr(obj, JSObject::offsetOfShape());
+ emitPreBarrier(*this, shapeAddr);
+ storePtr(ImmGCPtr(shape), shapeAddr);
+}
+
+void MacroAssembler::loadObjProto(Register obj, Register dest) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), dest);
+ loadPtr(Address(dest, Shape::offsetOfBaseShape()), dest);
+ loadPtr(Address(dest, BaseShape::offsetOfProto()), dest);
+}
+
+void MacroAssembler::loadStringLength(Register str, Register dest) {
+ load32(Address(str, JSString::offsetOfLength()), dest);
+}
+
+void MacroAssembler::assertStackAlignment(uint32_t alignment,
+ int32_t offset /* = 0 */) {
+#ifdef DEBUG
+ Label ok, bad;
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+
+ // Wrap around the offset to be a non-negative number.
+ offset %= alignment;
+ if (offset < 0) {
+ offset += alignment;
+ }
+
+ // Test if each bit from offset is set.
+ uint32_t off = offset;
+ while (off) {
+ uint32_t lowestBit = 1 << mozilla::CountTrailingZeroes32(off);
+ branchTestStackPtr(Assembler::Zero, Imm32(lowestBit), &bad);
+ off ^= lowestBit;
+ }
+
+ // Check that all remaining bits are zero.
+ branchTestStackPtr(Assembler::Zero, Imm32((alignment - 1) ^ offset), &ok);
+
+ bind(&bad);
+ breakpoint();
+ bind(&ok);
+#endif
+}
+
+void MacroAssembler::storeCallBoolResult(Register reg) {
+ convertBoolToInt32(ReturnReg, reg);
+}
+
+void MacroAssembler::storeCallInt32Result(Register reg) {
+#if JS_BITS_PER_WORD == 32
+ storeCallPointerResult(reg);
+#else
+ // Ensure the upper 32 bits are cleared.
+ move32(ReturnReg, reg);
+#endif
+}
+
+void MacroAssembler::storeCallResultValue(AnyRegister dest, JSValueType type) {
+ unboxValue(JSReturnOperand, dest, type);
+}
+
+void MacroAssembler::storeCallResultValue(TypedOrValueRegister dest) {
+ if (dest.hasValue()) {
+ storeCallResultValue(dest.valueReg());
+ } else {
+ storeCallResultValue(dest.typedReg(), ValueTypeFromMIRType(dest.type()));
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MacroAssembler_inl_h */
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
new file mode 100644
index 0000000000..87e1aff967
--- /dev/null
+++ b/js/src/jit/MacroAssembler.cpp
@@ -0,0 +1,6671 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MacroAssembler-inl.h"
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jit/AtomicOp.h"
+#include "jit/AtomicOperations.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitFrames.h"
+#include "jit/JitOptions.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitScript.h"
+#include "jit/MoveEmitter.h"
+#include "jit/ReciprocalMulConstants.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/SharedICRegisters.h"
+#include "jit/Simulator.h"
+#include "jit/VMFunctions.h"
+#include "js/Conversions.h"
+#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/ArgumentsObject.h"
+#include "vm/ArrayBufferViewObject.h"
+#include "vm/BoundFunctionObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/Iteration.h"
+#include "vm/JSContext.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValidate.h"
+
+#include "jit/TemplateObject-inl.h"
+#include "vm/BytecodeUtil-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+
+using mozilla::CheckedInt;
+
+TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
+ const JitRuntime* rt = runtime()->jitRuntime();
+ return rt->preBarrier(type);
+}
+
+template <typename S, typename T>
+static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
+ const S& value, const T& dest) {
+ switch (arrayType) {
+ case Scalar::Float32:
+ masm.storeFloat32(value, dest);
+ break;
+ case Scalar::Float64:
+ masm.storeDouble(value, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
+ FloatRegister value,
+ const BaseIndex& dest) {
+ StoreToTypedFloatArray(*this, arrayType, value, dest);
+}
+void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
+ FloatRegister value,
+ const Address& dest) {
+ StoreToTypedFloatArray(*this, arrayType, value, dest);
+}
+
+template <typename S, typename T>
+static void StoreToTypedBigIntArray(MacroAssembler& masm,
+ Scalar::Type arrayType, const S& value,
+ const T& dest) {
+ MOZ_ASSERT(Scalar::isBigIntType(arrayType));
+ masm.store64(value, dest);
+}
+
+void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
+ Register64 value,
+ const BaseIndex& dest) {
+ StoreToTypedBigIntArray(*this, arrayType, value, dest);
+}
+void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
+ Register64 value,
+ const Address& dest) {
+ StoreToTypedBigIntArray(*this, arrayType, value, dest);
+}
+
+void MacroAssembler::boxUint32(Register source, ValueOperand dest,
+ Uint32Mode mode, Label* fail) {
+ switch (mode) {
+ // Fail if the value does not fit in an int32.
+ case Uint32Mode::FailOnDouble: {
+ branchTest32(Assembler::Signed, source, source, fail);
+ tagValue(JSVAL_TYPE_INT32, source, dest);
+ break;
+ }
+ case Uint32Mode::ForceDouble: {
+ // Always convert the value to double.
+ ScratchDoubleScope fpscratch(*this);
+ convertUInt32ToDouble(source, fpscratch);
+ boxDouble(fpscratch, dest, fpscratch);
+ break;
+ }
+ }
+}
+
+template <typename T>
+void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
+ AnyRegister dest, Register temp,
+ Label* fail) {
+ switch (arrayType) {
+ case Scalar::Int8:
+ load8SignExtend(src, dest.gpr());
+ break;
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ load8ZeroExtend(src, dest.gpr());
+ break;
+ case Scalar::Int16:
+ load16SignExtend(src, dest.gpr());
+ break;
+ case Scalar::Uint16:
+ load16ZeroExtend(src, dest.gpr());
+ break;
+ case Scalar::Int32:
+ load32(src, dest.gpr());
+ break;
+ case Scalar::Uint32:
+ if (dest.isFloat()) {
+ load32(src, temp);
+ convertUInt32ToDouble(temp, dest.fpu());
+ } else {
+ load32(src, dest.gpr());
+
+ // Bail out if the value doesn't fit into a signed int32 value. This
+ // is what allows MLoadUnboxedScalar to have a type() of
+ // MIRType::Int32 for UInt32 array loads.
+ branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
+ }
+ break;
+ case Scalar::Float32:
+ loadFloat32(src, dest.fpu());
+ canonicalizeFloat(dest.fpu());
+ break;
+ case Scalar::Float64:
+ loadDouble(src, dest.fpu());
+ canonicalizeDouble(dest.fpu());
+ break;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
+ const Address& src,
+ AnyRegister dest,
+ Register temp, Label* fail);
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
+ const BaseIndex& src,
+ AnyRegister dest,
+ Register temp, Label* fail);
+
+template <typename T>
+void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
+ const ValueOperand& dest,
+ Uint32Mode uint32Mode, Register temp,
+ Label* fail) {
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
+ InvalidReg, nullptr);
+ tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
+ break;
+ case Scalar::Uint32:
+ // Don't clobber dest when we could fail, instead use temp.
+ load32(src, temp);
+ boxUint32(temp, dest, uint32Mode, fail);
+ break;
+ case Scalar::Float32: {
+ ScratchDoubleScope dscratch(*this);
+ FloatRegister fscratch = dscratch.asSingle();
+ loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
+ dest.scratchReg(), nullptr);
+ convertFloat32ToDouble(fscratch, dscratch);
+ boxDouble(dscratch, dest, dscratch);
+ break;
+ }
+ case Scalar::Float64: {
+ ScratchDoubleScope fpscratch(*this);
+ loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
+ dest.scratchReg(), nullptr);
+ boxDouble(fpscratch, dest, fpscratch);
+ break;
+ }
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
+ const Address& src,
+ const ValueOperand& dest,
+ Uint32Mode uint32Mode,
+ Register temp, Label* fail);
+template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
+ const BaseIndex& src,
+ const ValueOperand& dest,
+ Uint32Mode uint32Mode,
+ Register temp, Label* fail);
+
+template <typename T>
+void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
+ const T& src, Register bigInt,
+ Register64 temp) {
+ MOZ_ASSERT(Scalar::isBigIntType(arrayType));
+
+ load64(src, temp);
+ initializeBigInt64(arrayType, bigInt, temp);
+}
+
+template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
+ const Address& src,
+ Register bigInt,
+ Register64 temp);
+template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
+ const BaseIndex& src,
+ Register bigInt,
+ Register64 temp);
+
+// Inlined version of gc::CheckAllocatorState that checks the bare essentials
+// and bails for anything that cannot be handled with our jit allocators.
+void MacroAssembler::checkAllocatorState(Label* fail) {
+ // Don't execute the inline path if GC probes are built in.
+#ifdef JS_GC_PROBES
+ jump(fail);
+#endif
+
+#ifdef JS_GC_ZEAL
+ // Don't execute the inline path if gc zeal or tracing are active.
+ const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
+ branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
+ fail);
+#endif
+
+ // Don't execute the inline path if the realm has an object metadata callback,
+ // as the metadata to use for the object may vary between executions of the
+ // op.
+ if (realm()->hasAllocationMetadataBuilder()) {
+ jump(fail);
+ }
+}
+
+bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
+ gc::Heap initialHeap) {
+ // Note that Ion elides barriers on writes to objects known to be in the
+ // nursery, so any allocation that can be made into the nursery must be made
+ // into the nursery, even if the nursery is disabled. At runtime these will
+ // take the out-of-line path, which is required to insert a barrier for the
+ // initializing writes.
+ return IsNurseryAllocable(allocKind) && initialHeap != gc::Heap::Tenured;
+}
+
+// Inline version of Nursery::allocateObject. If the object has dynamic slots,
+// this fills in the slots_ pointer.
+void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
+ gc::AllocKind allocKind,
+ size_t nDynamicSlots, Label* fail,
+ const AllocSiteInput& allocSite) {
+ MOZ_ASSERT(IsNurseryAllocable(allocKind));
+
+ // Currently the JIT does not nursery allocate foreground finalized
+ // objects. This is allowed for objects that support this and have the
+ // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
+ // though so disallow all foreground finalized objects for now.
+ MOZ_ASSERT(!IsForegroundFinalized(allocKind));
+
+ // We still need to allocate in the nursery, per the comment in
+ // shouldNurseryAllocate; however, we need to insert into the
+ // mallocedBuffers set, so bail to do the nursery allocation in the
+ // interpreter.
+ if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
+ jump(fail);
+ return;
+ }
+
+ // Check whether this allocation site needs pretenuring. This dynamic check
+ // only happens for baseline code.
+ if (allocSite.is<Register>()) {
+ Register site = allocSite.as<Register>();
+ branchTestPtr(Assembler::NonZero,
+ Address(site, gc::AllocSite::offsetOfScriptAndState()),
+ Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
+ }
+
+ // No explicit check for nursery.isEnabled() is needed, as the comparison
+ // with the nursery's end will always fail in such cases.
+ CompileZone* zone = realm()->zone();
+ size_t thingSize = gc::Arena::thingSize(allocKind);
+ size_t totalSize = thingSize;
+ if (nDynamicSlots) {
+ totalSize += ObjectSlots::allocSize(nDynamicSlots);
+ }
+ MOZ_ASSERT(totalSize < INT32_MAX);
+ MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
+
+ bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::Object,
+ totalSize, allocSite);
+
+ if (nDynamicSlots) {
+ store32(Imm32(nDynamicSlots),
+ Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
+ store32(
+ Imm32(0),
+ Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
+ store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots),
+ Address(result, thingSize + ObjectSlots::offsetOfMaybeUniqueId()));
+ computeEffectiveAddress(
+ Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
+ storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
+ }
+}
+
+// Inlined version of FreeSpan::allocate. This does not fill in slots_.
+void MacroAssembler::freeListAllocate(Register result, Register temp,
+ gc::AllocKind allocKind, Label* fail) {
+ CompileZone* zone = realm()->zone();
+ int thingSize = int(gc::Arena::thingSize(allocKind));
+
+ Label fallback;
+ Label success;
+
+ // Load the first and last offsets of |zone|'s free list for |allocKind|.
+ // If there is no room remaining in the span, fall back to get the next one.
+ gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
+ loadPtr(AbsoluteAddress(ptrFreeList), temp);
+ load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
+ load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
+ branch32(Assembler::AboveOrEqual, result, temp, &fallback);
+
+ // Bump the offset for the next allocation.
+ add32(Imm32(thingSize), result);
+ loadPtr(AbsoluteAddress(ptrFreeList), temp);
+ store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
+ sub32(Imm32(thingSize), result);
+ addPtr(temp, result); // Turn the offset into a pointer.
+ jump(&success);
+
+ bind(&fallback);
+ // If there are no free spans left, we bail to finish the allocation. The
+ // interpreter will call the GC allocator to set up a new arena to allocate
+ // from, after which we can resume allocating in the jit.
+ branchTest32(Assembler::Zero, result, result, fail);
+ loadPtr(AbsoluteAddress(ptrFreeList), temp);
+ addPtr(temp, result); // Turn the offset into a pointer.
+ Push(result);
+ // Update the free list to point to the next span (which may be empty).
+ load32(Address(result, 0), result);
+ store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
+ Pop(result);
+
+ bind(&success);
+
+ if (runtime()->geckoProfiler().enabled()) {
+ uint32_t* countAddress = zone->addressOfTenuredAllocCount();
+ movePtr(ImmPtr(countAddress), temp);
+ add32(Imm32(1), Address(temp, 0));
+ }
+}
+
+void MacroAssembler::callFreeStub(Register slots) {
+ // This register must match the one in JitRuntime::generateFreeStub.
+ const Register regSlots = CallTempReg0;
+
+ push(regSlots);
+ movePtr(slots, regSlots);
+ call(runtime()->jitRuntime()->freeStub());
+ pop(regSlots);
+}
+
+// Inlined equivalent of gc::AllocateObject, without failure case handling.
+void MacroAssembler::allocateObject(Register result, Register temp,
+ gc::AllocKind allocKind,
+ uint32_t nDynamicSlots,
+ gc::Heap initialHeap, Label* fail,
+ const AllocSiteInput& allocSite) {
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+
+ checkAllocatorState(fail);
+
+ if (shouldNurseryAllocate(allocKind, initialHeap)) {
+ MOZ_ASSERT(initialHeap == gc::Heap::Default);
+ return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
+ allocSite);
+ }
+
+ // Fall back to calling into the VM to allocate objects in the tenured heap
+ // that have dynamic slots.
+ if (nDynamicSlots) {
+ jump(fail);
+ return;
+ }
+
+ return freeListAllocate(result, temp, allocKind, fail);
+}
+
+void MacroAssembler::createGCObject(Register obj, Register temp,
+ const TemplateObject& templateObj,
+ gc::Heap initialHeap, Label* fail,
+ bool initContents /* = true */) {
+ gc::AllocKind allocKind = templateObj.getAllocKind();
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+
+ uint32_t nDynamicSlots = 0;
+ if (templateObj.isNativeObject()) {
+ const TemplateNativeObject& ntemplate =
+ templateObj.asTemplateNativeObject();
+ nDynamicSlots = ntemplate.numDynamicSlots();
+ }
+
+ allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
+ initGCThing(obj, temp, templateObj, initContents);
+}
+
+void MacroAssembler::createPlainGCObject(
+ Register result, Register shape, Register temp, Register temp2,
+ uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
+ gc::Heap initialHeap, Label* fail, const AllocSiteInput& allocSite,
+ bool initContents /* = true */) {
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+ MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
+
+ // Allocate object.
+ allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
+ allocSite);
+
+ // Initialize shape field.
+ storePtr(shape, Address(result, JSObject::offsetOfShape()));
+
+ // If the object has dynamic slots, allocateObject will initialize
+ // the slots field. If not, we must initialize it now.
+ if (numDynamicSlots == 0) {
+ storePtr(ImmPtr(emptyObjectSlots),
+ Address(result, NativeObject::offsetOfSlots()));
+ }
+
+ // Initialize elements field.
+ storePtr(ImmPtr(emptyObjectElements),
+ Address(result, NativeObject::offsetOfElements()));
+
+ // Initialize fixed slots.
+ if (initContents) {
+ fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
+ temp, 0, numFixedSlots);
+ }
+
+ // Initialize dynamic slots.
+ if (numDynamicSlots > 0) {
+ loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
+ fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
+ }
+}
+
+void MacroAssembler::createArrayWithFixedElements(
+ Register result, Register shape, Register temp, uint32_t arrayLength,
+ uint32_t arrayCapacity, gc::AllocKind allocKind, gc::Heap initialHeap,
+ Label* fail, const AllocSiteInput& allocSite) {
+ MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
+ MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
+ MOZ_ASSERT(result != temp);
+
+ // This only supports allocating arrays with fixed elements and does not
+ // support any dynamic slots or elements.
+ MOZ_ASSERT(arrayCapacity >= arrayLength);
+ MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
+ arrayCapacity + ObjectElements::VALUES_PER_HEADER);
+
+ // Allocate object.
+ allocateObject(result, temp, allocKind, 0, initialHeap, fail, allocSite);
+
+ // Initialize shape field.
+ storePtr(shape, Address(result, JSObject::offsetOfShape()));
+
+ // There are no dynamic slots.
+ storePtr(ImmPtr(emptyObjectSlots),
+ Address(result, NativeObject::offsetOfSlots()));
+
+ // Initialize elements pointer for fixed (inline) elements.
+ computeEffectiveAddress(
+ Address(result, NativeObject::offsetOfFixedElements()), temp);
+ storePtr(temp, Address(result, NativeObject::offsetOfElements()));
+
+ // Initialize elements header.
+ store32(Imm32(ObjectElements::FIXED),
+ Address(temp, ObjectElements::offsetOfFlags()));
+ store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
+ store32(Imm32(arrayCapacity),
+ Address(temp, ObjectElements::offsetOfCapacity()));
+ store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
+}
+
+// Inline version of Nursery::allocateString.
+void MacroAssembler::nurseryAllocateString(Register result, Register temp,
+ gc::AllocKind allocKind,
+ Label* fail) {
+ MOZ_ASSERT(IsNurseryAllocable(allocKind));
+
+ // No explicit check for nursery.isEnabled() is needed, as the comparison
+ // with the nursery's end will always fail in such cases.
+
+ CompileZone* zone = realm()->zone();
+ size_t thingSize = gc::Arena::thingSize(allocKind);
+ bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::String,
+ thingSize);
+}
+
+// Inline version of Nursery::allocateBigInt.
+void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
+ Label* fail) {
+ MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
+
+ // No explicit check for nursery.isEnabled() is needed, as the comparison
+ // with the nursery's end will always fail in such cases.
+
+ CompileZone* zone = realm()->zone();
+ size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
+
+ bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::BigInt,
+ thingSize);
+}
+
+static bool IsNurseryAllocEnabled(CompileZone* zone, JS::TraceKind kind) {
+ switch (kind) {
+ case JS::TraceKind::Object:
+ return zone->allocNurseryObjects();
+ case JS::TraceKind::String:
+ return zone->allocNurseryStrings();
+ case JS::TraceKind::BigInt:
+ return zone->allocNurseryBigInts();
+ default:
+ MOZ_CRASH("Bad nursery allocation kind");
+ }
+}
+
+void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
+ Label* fail, CompileZone* zone,
+ JS::TraceKind traceKind, uint32_t size,
+ const AllocSiteInput& allocSite) {
+ MOZ_ASSERT(size >= gc::MinCellSize);
+
+ uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
+ MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
+ MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
+
+ // We know statically whether nursery allocation is enable for a particular
+ // kind because we discard JIT code when this changes.
+ if (!IsNurseryAllocEnabled(zone, traceKind)) {
+ jump(fail);
+ return;
+ }
+
+ // Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
+ // avoid 64-bit immediate loads.
+ void* posAddr = zone->addressOfNurseryPosition();
+ int32_t endOffset = Nursery::offsetOfCurrentEndFromPosition();
+
+ movePtr(ImmPtr(posAddr), temp);
+ loadPtr(Address(temp, 0), result);
+ addPtr(Imm32(totalSize), result);
+ branchPtr(Assembler::Below, Address(temp, endOffset), result, fail);
+ storePtr(result, Address(temp, 0));
+ subPtr(Imm32(size), result);
+
+ if (allocSite.is<gc::CatchAllAllocSite>()) {
+ // No allocation site supplied. This is the case when called from Warp, or
+ // from places that don't support pretenuring.
+ gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
+ gc::AllocSite* site = zone->catchAllAllocSite(traceKind, siteKind);
+ uintptr_t headerWord = gc::NurseryCellHeader::MakeValue(site, traceKind);
+ storePtr(ImmWord(headerWord),
+ Address(result, -js::Nursery::nurseryCellHeaderSize()));
+
+ // Update the catch all allocation site for strings or if the profiler is
+ // enabled. This is used to calculate the nursery allocation count. The
+ // string data is used to determine whether to disable nursery string
+ // allocation.
+ if (traceKind == JS::TraceKind::String ||
+ runtime()->geckoProfiler().enabled()) {
+ uint32_t* countAddress = site->nurseryAllocCountAddress();
+ CheckedInt<int32_t> counterOffset =
+ (CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
+ CheckedInt<uintptr_t>(uintptr_t(posAddr)))
+ .toChecked<int32_t>();
+ if (counterOffset.isValid()) {
+ add32(Imm32(1), Address(temp, counterOffset.value()));
+ } else {
+ movePtr(ImmPtr(countAddress), temp);
+ add32(Imm32(1), Address(temp, 0));
+ }
+ }
+ } else {
+ // Update allocation site and store pointer in the nursery cell header. This
+ // is only used from baseline.
+ Register site = allocSite.as<Register>();
+ updateAllocSite(temp, result, zone, site);
+ // See NurseryCellHeader::MakeValue.
+ orPtr(Imm32(int32_t(traceKind)), site);
+ storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
+ }
+}
+
+// Update the allocation site in the same way as Nursery::allocateCell.
+void MacroAssembler::updateAllocSite(Register temp, Register result,
+ CompileZone* zone, Register site) {
+ Label done;
+
+ add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
+
+ branch32(Assembler::NotEqual,
+ Address(site, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
+ &done);
+
+ loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
+ storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
+ storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
+
+ bind(&done);
+}
+
+// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
+// allocation requested but unsuccessful.
+void MacroAssembler::allocateString(Register result, Register temp,
+ gc::AllocKind allocKind,
+ gc::Heap initialHeap, Label* fail) {
+ MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
+ allocKind == gc::AllocKind::FAT_INLINE_STRING);
+
+ checkAllocatorState(fail);
+
+ if (shouldNurseryAllocate(allocKind, initialHeap)) {
+ MOZ_ASSERT(initialHeap == gc::Heap::Default);
+ return nurseryAllocateString(result, temp, allocKind, fail);
+ }
+
+ freeListAllocate(result, temp, allocKind, fail);
+}
+
+void MacroAssembler::newGCString(Register result, Register temp,
+ gc::Heap initialHeap, Label* fail) {
+ allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
+}
+
+void MacroAssembler::newGCFatInlineString(Register result, Register temp,
+ gc::Heap initialHeap, Label* fail) {
+ allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
+ initialHeap, fail);
+}
+
+void MacroAssembler::newGCBigInt(Register result, Register temp,
+ gc::Heap initialHeap, Label* fail) {
+ checkAllocatorState(fail);
+
+ if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
+ MOZ_ASSERT(initialHeap == gc::Heap::Default);
+ return nurseryAllocateBigInt(result, temp, fail);
+ }
+
+ freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
+}
+
+void MacroAssembler::copySlotsFromTemplate(
+ Register obj, const TemplateNativeObject& templateObj, uint32_t start,
+ uint32_t end) {
+ uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
+ for (unsigned i = start; i < nfixed; i++) {
+ // Template objects are not exposed to script and therefore immutable.
+ // However, regexp template objects are sometimes used directly (when
+ // the cloning is not observable), and therefore we can end up with a
+ // non-zero lastIndex. Detect this case here and just substitute 0, to
+ // avoid racing with the main thread updating this slot.
+ Value v;
+ if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
+ v = Int32Value(0);
+ } else {
+ v = templateObj.getSlot(i);
+ }
+ storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
+ }
+}
+
+void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
+ uint32_t start, uint32_t end,
+ const Value& v) {
+ MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
+
+ if (start >= end) {
+ return;
+ }
+
+#ifdef JS_NUNBOX32
+ // We only have a single spare register, so do the initialization as two
+ // strided writes of the tag and body.
+ Address addr = base;
+ move32(Imm32(v.toNunboxPayload()), temp);
+ for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
+ store32(temp, ToPayload(addr));
+ }
+
+ addr = base;
+ move32(Imm32(v.toNunboxTag()), temp);
+ for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
+ store32(temp, ToType(addr));
+ }
+#else
+ moveValue(v, ValueOperand(temp));
+ for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
+ storePtr(temp, base);
+ }
+#endif
+}
+
+void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
+ uint32_t start, uint32_t end) {
+ fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
+}
+
+void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
+ uint32_t start, uint32_t end) {
+ fillSlotsWithConstantValue(base, temp, start, end,
+ MagicValue(JS_UNINITIALIZED_LEXICAL));
+}
+
+static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
+ const TemplateNativeObject& templateObj, uint32_t nslots) {
+ MOZ_ASSERT(nslots == templateObj.slotSpan());
+ MOZ_ASSERT(nslots > 0);
+
+ uint32_t first = nslots;
+ for (; first != 0; --first) {
+ if (templateObj.getSlot(first - 1) != UndefinedValue()) {
+ break;
+ }
+ }
+ uint32_t startOfUndefined = first;
+
+ if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
+ for (; first != 0; --first) {
+ if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
+ break;
+ }
+ }
+ }
+ uint32_t startOfUninitialized = first;
+
+ return {startOfUninitialized, startOfUndefined};
+}
+
+void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
+ Register lengthReg,
+ LiveRegisterSet liveRegs, Label* fail,
+ TypedArrayObject* templateObj,
+ TypedArrayLength lengthKind) {
+ MOZ_ASSERT(!templateObj->hasBuffer());
+
+ constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
+ constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
+
+ static_assert(
+ TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
+ "fixed inline element data assumed to begin after the data slot");
+
+ static_assert(
+ TypedArrayObject::INLINE_BUFFER_LIMIT ==
+ JSObject::MAX_BYTE_SIZE - dataOffset,
+ "typed array inline buffer is limited by the maximum object byte size");
+
+ // Initialise data elements to zero.
+ size_t length = templateObj->length();
+ MOZ_ASSERT(length <= INT32_MAX,
+ "Template objects are only created for int32 lengths");
+ size_t nbytes = length * templateObj->bytesPerElement();
+
+ if (lengthKind == TypedArrayLength::Fixed &&
+ nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
+ MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
+
+ // Store data elements inside the remaining JSObject slots.
+ computeEffectiveAddress(Address(obj, dataOffset), temp);
+ storePrivateValue(temp, Address(obj, dataSlotOffset));
+
+ // Write enough zero pointers into fixed data to zero every
+ // element. (This zeroes past the end of a byte count that's
+ // not a multiple of pointer size. That's okay, because fixed
+ // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
+ // and we won't inline unless the desired memory fits in that
+ // space.)
+ static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
+
+ size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
+ for (size_t i = 0; i < numZeroPointers; i++) {
+ storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
+ }
+ MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
+ } else {
+ if (lengthKind == TypedArrayLength::Fixed) {
+ move32(Imm32(length), lengthReg);
+ }
+
+ // Ensure volatile |obj| is saved across the call.
+ if (obj.volatile_()) {
+ liveRegs.addUnchecked(obj);
+ }
+
+ // Allocate a buffer on the heap to store the data elements.
+ PushRegsInMask(liveRegs);
+ using Fn = void (*)(JSContext* cx, TypedArrayObject* obj, int32_t count);
+ setupUnalignedABICall(temp);
+ loadJSContext(temp);
+ passABIArg(temp);
+ passABIArg(obj);
+ passABIArg(lengthReg);
+ callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
+ PopRegsInMask(liveRegs);
+
+ // Fail when data slot is UndefinedValue.
+ branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
+ }
+}
+
+void MacroAssembler::initGCSlots(Register obj, Register temp,
+ const TemplateNativeObject& templateObj) {
+ MOZ_ASSERT(!templateObj.isArrayObject());
+
+ // Slots of non-array objects are required to be initialized.
+ // Use the values currently in the template object.
+ uint32_t nslots = templateObj.slotSpan();
+ if (nslots == 0) {
+ return;
+ }
+
+ uint32_t nfixed = templateObj.numUsedFixedSlots();
+ uint32_t ndynamic = templateObj.numDynamicSlots();
+
+ // Attempt to group slot writes such that we minimize the amount of
+ // duplicated data we need to embed in code and load into registers. In
+ // general, most template object slots will be undefined except for any
+ // reserved slots. Since reserved slots come first, we split the object
+ // logically into independent non-UndefinedValue writes to the head and
+ // duplicated writes of UndefinedValue to the tail. For the majority of
+ // objects, the "tail" will be the entire slot range.
+ //
+ // The template object may be a CallObject, in which case we need to
+ // account for uninitialized lexical slots as well as undefined
+ // slots. Uninitialized lexical slots appears in CallObjects if the function
+ // has parameter expressions, in which case closed over parameters have
+ // TDZ. Uninitialized slots come before undefined slots in CallObjects.
+ auto [startOfUninitialized, startOfUndefined] =
+ FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
+ MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
+ MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
+ MOZ_ASSERT_IF(!templateObj.isCallObject() &&
+ !templateObj.isBlockLexicalEnvironmentObject(),
+ startOfUninitialized == startOfUndefined);
+
+ // Copy over any preserved reserved slots.
+ copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
+
+ // Fill the rest of the fixed slots with undefined and uninitialized.
+ size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
+ fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
+ std::min(startOfUndefined, nfixed));
+
+ if (startOfUndefined < nfixed) {
+ offset = NativeObject::getFixedSlotOffset(startOfUndefined);
+ fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
+ nfixed);
+ }
+
+ if (ndynamic) {
+ // We are short one register to do this elegantly. Borrow the obj
+ // register briefly for our slots base address.
+ push(obj);
+ loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
+
+ // Fill uninitialized slots if necessary. Otherwise initialize all
+ // slots to undefined.
+ if (startOfUndefined > nfixed) {
+ MOZ_ASSERT(startOfUninitialized != startOfUndefined);
+ fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
+ startOfUndefined - nfixed);
+ size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
+ fillSlotsWithUndefined(Address(obj, offset), temp,
+ startOfUndefined - nfixed, ndynamic);
+ } else {
+ fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
+ }
+
+ pop(obj);
+ }
+}
+
+void MacroAssembler::initGCThing(Register obj, Register temp,
+ const TemplateObject& templateObj,
+ bool initContents) {
+ // Fast initialization of an empty object returned by allocateObject().
+
+ storePtr(ImmGCPtr(templateObj.shape()),
+ Address(obj, JSObject::offsetOfShape()));
+
+ if (templateObj.isNativeObject()) {
+ const TemplateNativeObject& ntemplate =
+ templateObj.asTemplateNativeObject();
+ MOZ_ASSERT(!ntemplate.hasDynamicElements());
+
+ // If the object has dynamic slots, the slots member has already been
+ // filled in.
+ if (ntemplate.numDynamicSlots() == 0) {
+ storePtr(ImmPtr(emptyObjectSlots),
+ Address(obj, NativeObject::offsetOfSlots()));
+ }
+
+ if (ntemplate.isArrayObject()) {
+ // Can't skip initializing reserved slots.
+ MOZ_ASSERT(initContents);
+
+ int elementsOffset = NativeObject::offsetOfFixedElements();
+
+ computeEffectiveAddress(Address(obj, elementsOffset), temp);
+ storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
+
+ // Fill in the elements header.
+ store32(
+ Imm32(ntemplate.getDenseCapacity()),
+ Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
+ store32(Imm32(ntemplate.getDenseInitializedLength()),
+ Address(obj, elementsOffset +
+ ObjectElements::offsetOfInitializedLength()));
+ store32(Imm32(ntemplate.getArrayLength()),
+ Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
+ store32(Imm32(ObjectElements::FIXED),
+ Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
+ } else if (ntemplate.isArgumentsObject()) {
+ // The caller will initialize the reserved slots.
+ MOZ_ASSERT(!initContents);
+ storePtr(ImmPtr(emptyObjectElements),
+ Address(obj, NativeObject::offsetOfElements()));
+ } else {
+ // If the target type could be a TypedArray that maps shared memory
+ // then this would need to store emptyObjectElementsShared in that case.
+ MOZ_ASSERT(!ntemplate.isSharedMemory());
+
+ // Can't skip initializing reserved slots.
+ MOZ_ASSERT(initContents);
+
+ storePtr(ImmPtr(emptyObjectElements),
+ Address(obj, NativeObject::offsetOfElements()));
+
+ initGCSlots(obj, temp, ntemplate);
+ }
+ } else {
+ MOZ_CRASH("Unknown object");
+ }
+
+#ifdef JS_GC_PROBES
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+
+ regs.takeUnchecked(obj);
+ Register temp2 = regs.takeAnyGeneral();
+
+ using Fn = void (*)(JSObject* obj);
+ setupUnalignedABICall(temp2);
+ passABIArg(obj);
+ callWithABI<Fn, TraceCreateObject>();
+
+ PopRegsInMask(save);
+#endif
+}
+
+void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
+ Register result, Label* fail) {
+ MOZ_ASSERT(left != result);
+ MOZ_ASSERT(right != result);
+ MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
+
+ Label notPointerEqual;
+ // If operands point to the same instance, the strings are trivially equal.
+ branchPtr(Assembler::NotEqual, left, right,
+ IsEqualityOp(op) ? &notPointerEqual : fail);
+ move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
+ op == JSOp::Ge),
+ result);
+
+ if (IsEqualityOp(op)) {
+ Label done;
+ jump(&done);
+
+ bind(&notPointerEqual);
+
+ Label leftIsNotAtom;
+ Label setNotEqualResult;
+ // Atoms cannot be equal to each other if they point to different strings.
+ Imm32 atomBit(JSString::ATOM_BIT);
+ branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
+ atomBit, &leftIsNotAtom);
+ branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
+ atomBit, &setNotEqualResult);
+
+ bind(&leftIsNotAtom);
+ // Strings of different length can never be equal.
+ loadStringLength(left, result);
+ branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
+ result, fail);
+
+ bind(&setNotEqualResult);
+ move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::loadStringChars(Register str, Register dest,
+ CharEncoding encoding) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ if (encoding == CharEncoding::Latin1) {
+ // If the string is a rope, zero the |str| register. The code below
+ // depends on str->flags so this should block speculative execution.
+ movePtr(ImmWord(0), dest);
+ test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::LINEAR_BIT), dest, str);
+ } else {
+ // If we're loading TwoByte chars, there's an additional risk:
+ // if the string has Latin1 chars, we could read out-of-bounds. To
+ // prevent this, we check both the Linear and Latin1 bits. We don't
+ // have a scratch register, so we use these flags also to block
+ // speculative execution, similar to the use of 0 above.
+ MOZ_ASSERT(encoding == CharEncoding::TwoByte);
+ static constexpr uint32_t Mask =
+ JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
+ static_assert(Mask < 1024,
+ "Mask should be a small, near-null value to ensure we "
+ "block speculative execution when it's used as string "
+ "pointer");
+ move32(Imm32(Mask), dest);
+ and32(Address(str, JSString::offsetOfFlags()), dest);
+ cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
+ str);
+ }
+ }
+
+ // Load the inline chars.
+ computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
+ dest);
+
+ // If it's not an inline string, load the non-inline chars. Use a
+ // conditional move to prevent speculative execution.
+ test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::INLINE_CHARS_BIT),
+ Address(str, JSString::offsetOfNonInlineChars()), dest);
+}
+
+void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
+ CharEncoding encoding) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ // If the string is a rope, has inline chars, or has a different
+ // character encoding, set str to a near-null value to prevent
+ // speculative execution below (when reading str->nonInlineChars).
+
+ static constexpr uint32_t Mask = JSString::LINEAR_BIT |
+ JSString::INLINE_CHARS_BIT |
+ JSString::LATIN1_CHARS_BIT;
+ static_assert(Mask < 1024,
+ "Mask should be a small, near-null value to ensure we "
+ "block speculative execution when it's used as string "
+ "pointer");
+
+ uint32_t expectedBits = JSString::LINEAR_BIT;
+ if (encoding == CharEncoding::Latin1) {
+ expectedBits |= JSString::LATIN1_CHARS_BIT;
+ }
+
+ move32(Imm32(Mask), dest);
+ and32(Address(str, JSString::offsetOfFlags()), dest);
+
+ cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
+ }
+
+ loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
+}
+
+void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
+ MOZ_ASSERT(chars != str);
+ storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
+}
+
+void MacroAssembler::loadInlineStringCharsForStore(Register str,
+ Register dest) {
+ computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
+ dest);
+}
+
+void MacroAssembler::loadInlineStringChars(Register str, Register dest,
+ CharEncoding encoding) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ // Making this Spectre-safe is a bit complicated: using
+ // computeEffectiveAddress and then zeroing the output register if
+ // non-inline is not sufficient: when the index is very large, it would
+ // allow reading |nullptr + index|. Just fall back to loadStringChars
+ // for now.
+ loadStringChars(str, dest, encoding);
+ } else {
+ computeEffectiveAddress(
+ Address(str, JSInlineString::offsetOfInlineStorage()), dest);
+ }
+}
+
+void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ // Zero the output register if the input was not a rope.
+ movePtr(ImmWord(0), dest);
+ test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::LINEAR_BIT),
+ Address(str, JSRope::offsetOfLeft()), dest);
+ } else {
+ loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
+ }
+}
+
+void MacroAssembler::loadRopeRightChild(Register str, Register dest) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ // Zero the output register if the input was not a rope.
+ movePtr(ImmWord(0), dest);
+ test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::LINEAR_BIT),
+ Address(str, JSRope::offsetOfRight()), dest);
+ } else {
+ loadPtr(Address(str, JSRope::offsetOfRight()), dest);
+ }
+}
+
+void MacroAssembler::storeRopeChildren(Register left, Register right,
+ Register str) {
+ storePtr(left, Address(str, JSRope::offsetOfLeft()));
+ storePtr(right, Address(str, JSRope::offsetOfRight()));
+}
+
+void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
+ MOZ_ASSERT(str != dest);
+
+ if (JitOptions.spectreStringMitigations) {
+ // If the string is not a dependent string, zero the |str| register.
+ // The code below loads str->base so this should block speculative
+ // execution.
+ movePtr(ImmWord(0), dest);
+ test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::DEPENDENT_BIT), dest, str);
+ }
+
+ loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
+}
+
+void MacroAssembler::storeDependentStringBase(Register base, Register str) {
+ storePtr(base, Address(str, JSDependentString::offsetOfBase()));
+}
+
+void MacroAssembler::loadRopeChild(Register str, Register index,
+ Register output, Label* isLinear) {
+ // This follows JSString::getChar.
+ branchIfNotRope(str, isLinear);
+
+ loadRopeLeftChild(str, output);
+
+ // Check if the index is contained in the leftChild.
+ Label loadedChild;
+ branch32(Assembler::Above, Address(output, JSString::offsetOfLength()), index,
+ &loadedChild);
+
+ // The index must be in the rightChild.
+ loadRopeRightChild(str, output);
+
+ bind(&loadedChild);
+}
+
+void MacroAssembler::branchIfCanLoadStringChar(Register str, Register index,
+ Register scratch, Label* label) {
+ loadRopeChild(str, index, scratch, label);
+
+ // Branch if the left resp. right side is linear.
+ branchIfNotRope(scratch, label);
+}
+
+void MacroAssembler::branchIfNotCanLoadStringChar(Register str, Register index,
+ Register scratch,
+ Label* label) {
+ Label done;
+ loadRopeChild(str, index, scratch, &done);
+
+ // Branch if the left or right side is another rope.
+ branchIfRope(scratch, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadStringChar(Register str, Register index,
+ Register output, Register scratch1,
+ Register scratch2, Label* fail) {
+ MOZ_ASSERT(str != output);
+ MOZ_ASSERT(str != index);
+ MOZ_ASSERT(index != output);
+ MOZ_ASSERT(output != scratch1);
+ MOZ_ASSERT(output != scratch2);
+
+ // Use scratch1 for the index (adjusted below).
+ move32(index, scratch1);
+ movePtr(str, output);
+
+ // This follows JSString::getChar.
+ Label notRope;
+ branchIfNotRope(str, &notRope);
+
+ loadRopeLeftChild(str, output);
+
+ // Check if the index is contained in the leftChild.
+ Label loadedChild, notInLeft;
+ spectreBoundsCheck32(scratch1, Address(output, JSString::offsetOfLength()),
+ scratch2, &notInLeft);
+ jump(&loadedChild);
+
+ // The index must be in the rightChild.
+ // index -= rope->leftChild()->length()
+ bind(&notInLeft);
+ sub32(Address(output, JSString::offsetOfLength()), scratch1);
+ loadRopeRightChild(str, output);
+
+ // If the left or right side is another rope, give up.
+ bind(&loadedChild);
+ branchIfRope(output, fail);
+
+ bind(&notRope);
+
+ Label isLatin1, done;
+ // We have to check the left/right side for ropes,
+ // because a TwoByte rope might have a Latin1 child.
+ branchLatin1String(output, &isLatin1);
+ loadStringChars(output, scratch2, CharEncoding::TwoByte);
+ loadChar(scratch2, scratch1, output, CharEncoding::TwoByte);
+ jump(&done);
+
+ bind(&isLatin1);
+ loadStringChars(output, scratch2, CharEncoding::Latin1);
+ loadChar(scratch2, scratch1, output, CharEncoding::Latin1);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadStringIndexValue(Register str, Register dest,
+ Label* fail) {
+ MOZ_ASSERT(str != dest);
+
+ load32(Address(str, JSString::offsetOfFlags()), dest);
+
+ // Does not have a cached index value.
+ branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
+
+ // Extract the index.
+ rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
+}
+
+void MacroAssembler::loadChar(Register chars, Register index, Register dest,
+ CharEncoding encoding, int32_t offset /* = 0 */) {
+ if (encoding == CharEncoding::Latin1) {
+ loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
+ } else {
+ loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
+ }
+}
+
+void MacroAssembler::addToCharPtr(Register chars, Register index,
+ CharEncoding encoding) {
+ if (encoding == CharEncoding::Latin1) {
+ static_assert(sizeof(char) == 1,
+ "Latin-1 string index shouldn't need scaling");
+ addPtr(index, chars);
+ } else {
+ computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
+ }
+}
+
+void MacroAssembler::loadStringFromUnit(Register unit, Register dest,
+ const StaticStrings& staticStrings) {
+ movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
+ loadPtr(BaseIndex(dest, unit, ScalePointer), dest);
+}
+
+void MacroAssembler::loadLengthTwoString(Register c1, Register c2,
+ Register dest,
+ const StaticStrings& staticStrings) {
+ // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
+ // to obtain the index into `StaticStrings::length2StaticTable`.
+ static_assert(sizeof(StaticStrings::SmallChar) == 1);
+
+ movePtr(ImmPtr(&StaticStrings::toSmallCharTable.storage), dest);
+ load8ZeroExtend(BaseIndex(dest, c1, Scale::TimesOne), c1);
+ load8ZeroExtend(BaseIndex(dest, c2, Scale::TimesOne), c2);
+
+ lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS), c1);
+ add32(c2, c1);
+
+ // Look up the string from the computed index.
+ movePtr(ImmPtr(&staticStrings.length2StaticTable), dest);
+ loadPtr(BaseIndex(dest, c1, ScalePointer), dest);
+}
+
+void MacroAssembler::loadInt32ToStringWithBase(
+ Register input, Register base, Register dest, Register scratch1,
+ Register scratch2, const StaticStrings& staticStrings,
+ const LiveRegisterSet& volatileRegs, Label* fail) {
+#ifdef DEBUG
+ Label baseBad, baseOk;
+ branch32(Assembler::LessThan, base, Imm32(2), &baseBad);
+ branch32(Assembler::LessThanOrEqual, base, Imm32(36), &baseOk);
+ bind(&baseBad);
+ assumeUnreachable("base must be in range [2, 36]");
+ bind(&baseOk);
+#endif
+
+ // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
+ auto toChar = [this, base](Register r) {
+#ifdef DEBUG
+ Label ok;
+ branch32(Assembler::Below, r, base, &ok);
+ assumeUnreachable("bad digit");
+ bind(&ok);
+#else
+ // Silence unused lambda capture warning.
+ (void)base;
+#endif
+
+ Label done;
+ add32(Imm32('0'), r);
+ branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
+ add32(Imm32('a' - '0' - 10), r);
+ bind(&done);
+ };
+
+ // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
+ Label lengthTwo, done;
+ branch32(Assembler::AboveOrEqual, input, base, &lengthTwo);
+ {
+ move32(input, scratch1);
+ toChar(scratch1);
+
+ loadStringFromUnit(scratch1, dest, staticStrings);
+
+ jump(&done);
+ }
+ bind(&lengthTwo);
+
+ // Compute |base * base|.
+ move32(base, scratch1);
+ mul32(scratch1, scratch1);
+
+ // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
+ branch32(Assembler::AboveOrEqual, input, scratch1, fail);
+ {
+ // Compute |scratch1 = input / base| and |scratch2 = input % base|.
+ move32(input, scratch1);
+ flexibleDivMod32(base, scratch1, scratch2, true, volatileRegs);
+
+ // Compute the digits of the divisor and remainder.
+ toChar(scratch1);
+ toChar(scratch2);
+
+ // Look up the 2-character digit string in the small-char table.
+ loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
+ }
+ bind(&done);
+}
+
+void MacroAssembler::loadInt32ToStringWithBase(
+ Register input, int32_t base, Register dest, Register scratch1,
+ Register scratch2, const StaticStrings& staticStrings, Label* fail) {
+ MOZ_ASSERT(2 <= base && base <= 36, "base must be in range [2, 36]");
+
+ // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
+ auto toChar = [this, base](Register r) {
+#ifdef DEBUG
+ Label ok;
+ branch32(Assembler::Below, r, Imm32(base), &ok);
+ assumeUnreachable("bad digit");
+ bind(&ok);
+#endif
+
+ if (base <= 10) {
+ add32(Imm32('0'), r);
+ } else {
+ Label done;
+ add32(Imm32('0'), r);
+ branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
+ add32(Imm32('a' - '0' - 10), r);
+ bind(&done);
+ }
+ };
+
+ // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
+ Label lengthTwo, done;
+ branch32(Assembler::AboveOrEqual, input, Imm32(base), &lengthTwo);
+ {
+ move32(input, scratch1);
+ toChar(scratch1);
+
+ loadStringFromUnit(scratch1, dest, staticStrings);
+
+ jump(&done);
+ }
+ bind(&lengthTwo);
+
+ // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
+ branch32(Assembler::AboveOrEqual, input, Imm32(base * base), fail);
+ {
+ // Compute |scratch1 = input / base| and |scratch2 = input % base|.
+ if (mozilla::IsPowerOfTwo(uint32_t(base))) {
+ uint32_t shift = mozilla::FloorLog2(base);
+
+ move32(input, scratch1);
+ rshift32(Imm32(shift), scratch1);
+
+ move32(input, scratch2);
+ and32(Imm32((uint32_t(1) << shift) - 1), scratch2);
+ } else {
+ // The following code matches CodeGenerator::visitUDivOrModConstant()
+ // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
+ // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
+ // UINT32_MAX and we need to adjust the shift amount.
+
+ auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(base);
+
+ // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
+ mulHighUnsigned32(Imm32(rmc.multiplier), input, scratch1);
+
+ if (rmc.multiplier > UINT32_MAX) {
+ // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
+ // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
+ // contradicting the proof of correctness in computeDivisionConstants.
+ MOZ_ASSERT(rmc.shiftAmount > 0);
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
+
+ // Compute |t = (n - q) / 2|.
+ move32(input, scratch2);
+ sub32(scratch1, scratch2);
+ rshift32(Imm32(1), scratch2);
+
+ // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
+ add32(scratch2, scratch1);
+
+ // Finish the computation |q = floor(n / d)|.
+ rshift32(Imm32(rmc.shiftAmount - 1), scratch1);
+ } else {
+ rshift32(Imm32(rmc.shiftAmount), scratch1);
+ }
+
+ // Compute the remainder from |r = n - q * d|.
+ move32(scratch1, dest);
+ mul32(Imm32(base), dest);
+ move32(input, scratch2);
+ sub32(dest, scratch2);
+ }
+
+ // Compute the digits of the divisor and remainder.
+ toChar(scratch1);
+ toChar(scratch2);
+
+ // Look up the 2-character digit string in the small-char table.
+ loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
+ }
+ bind(&done);
+}
+
+void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
+ MOZ_ASSERT(digits != bigInt);
+
+ // Load the inline digits.
+ computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
+ digits);
+
+ // If inline digits aren't used, load the heap digits. Use a conditional move
+ // to prevent speculative execution.
+ cmp32LoadPtr(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(int32_t(BigInt::inlineDigitsLength())),
+ Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
+}
+
+void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
+ // This code follows the implementation of |BigInt::toUint64()|. We're also
+ // using it for inline callers of |BigInt::toInt64()|, which works, because
+ // all supported Jit architectures use a two's complement representation for
+ // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
+
+ Label done, nonZero;
+
+ branchIfBigIntIsNonZero(bigInt, &nonZero);
+ {
+ move64(Imm64(0), dest);
+ jump(&done);
+ }
+ bind(&nonZero);
+
+#ifdef JS_PUNBOX64
+ Register digits = dest.reg;
+#else
+ Register digits = dest.high;
+#endif
+
+ loadBigIntDigits(bigInt, digits);
+
+#if JS_PUNBOX64
+ // Load the first digit into the destination register.
+ load64(Address(digits, 0), dest);
+#else
+ // Load the first digit into the destination register's low value.
+ load32(Address(digits, 0), dest.low);
+
+ // And conditionally load the second digit into the high value register.
+ Label twoDigits, digitsDone;
+ branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(1), &twoDigits);
+ {
+ move32(Imm32(0), dest.high);
+ jump(&digitsDone);
+ }
+ {
+ bind(&twoDigits);
+ load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
+ }
+ bind(&digitsDone);
+#endif
+
+ branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
+ Imm32(BigInt::signBitMask()), &done);
+ neg64(dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
+ Register dest) {
+ Label done, nonZero;
+ branchIfBigIntIsNonZero(bigInt, &nonZero);
+ {
+ movePtr(ImmWord(0), dest);
+ jump(&done);
+ }
+ bind(&nonZero);
+
+ loadBigIntDigits(bigInt, dest);
+
+ // Load the first digit into the destination register.
+ loadPtr(Address(dest, 0), dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadBigInt(Register bigInt, Register dest, Label* fail) {
+ Label done, nonZero;
+ branchIfBigIntIsNonZero(bigInt, &nonZero);
+ {
+ movePtr(ImmWord(0), dest);
+ jump(&done);
+ }
+ bind(&nonZero);
+
+ loadBigIntNonZero(bigInt, dest, fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadBigIntNonZero(Register bigInt, Register dest,
+ Label* fail) {
+ MOZ_ASSERT(bigInt != dest);
+
+#ifdef DEBUG
+ Label nonZero;
+ branchIfBigIntIsNonZero(bigInt, &nonZero);
+ assumeUnreachable("Unexpected zero BigInt");
+ bind(&nonZero);
+#endif
+
+ branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(1), fail);
+
+ static_assert(BigInt::inlineDigitsLength() > 0,
+ "Single digit BigInts use inline storage");
+
+ // Load the first inline digit into the destination register.
+ loadPtr(Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
+
+ // Return as a signed pointer.
+ bigIntDigitToSignedPtr(bigInt, dest, fail);
+}
+
+void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt, Register digit,
+ Label* fail) {
+ // BigInt digits are stored as absolute numbers. Take the failure path when
+ // the digit can't be stored in intptr_t.
+ branchTestPtr(Assembler::Signed, digit, digit, fail);
+
+ // Negate |dest| when the BigInt is negative.
+ Label nonNegative;
+ branchIfBigIntIsNonNegative(bigInt, &nonNegative);
+ negPtr(digit);
+ bind(&nonNegative);
+}
+
+void MacroAssembler::loadBigIntAbsolute(Register bigInt, Register dest,
+ Label* fail) {
+ MOZ_ASSERT(bigInt != dest);
+
+ branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(1), fail);
+
+ static_assert(BigInt::inlineDigitsLength() > 0,
+ "Single digit BigInts use inline storage");
+
+ // Load the first inline digit into the destination register.
+ movePtr(ImmWord(0), dest);
+ cmp32LoadPtr(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
+ Imm32(0), Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
+}
+
+void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
+ Register64 val) {
+ MOZ_ASSERT(Scalar::isBigIntType(type));
+
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
+
+ Label done, nonZero;
+ branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
+ {
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
+ jump(&done);
+ }
+ bind(&nonZero);
+
+ if (type == Scalar::BigInt64) {
+ // Set the sign-bit for negative values and then continue with the two's
+ // complement.
+ Label isPositive;
+ branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
+ {
+ store32(Imm32(BigInt::signBitMask()),
+ Address(bigInt, BigInt::offsetOfFlags()));
+ neg64(val);
+ }
+ bind(&isPositive);
+ }
+
+ store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
+
+ static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
+ "BigInt Digit size matches uintptr_t, so there's a single "
+ "store on 64-bit and up to two stores on 32-bit");
+
+#ifndef JS_PUNBOX64
+ Label singleDigit;
+ branchTest32(Assembler::Zero, val.high, val.high, &singleDigit);
+ store32(Imm32(2), Address(bigInt, BigInt::offsetOfLength()));
+ bind(&singleDigit);
+
+ // We can perform a single store64 on 32-bit platforms, because inline
+ // storage can store at least two 32-bit integers.
+ static_assert(BigInt::inlineDigitsLength() >= 2,
+ "BigInt inline storage can store at least two digits");
+#endif
+
+ store64(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
+
+ bind(&done);
+}
+
+void MacroAssembler::initializeBigInt(Register bigInt, Register val) {
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
+
+ Label done, nonZero;
+ branchTestPtr(Assembler::NonZero, val, val, &nonZero);
+ {
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
+ jump(&done);
+ }
+ bind(&nonZero);
+
+ // Set the sign-bit for negative values and then continue with the two's
+ // complement.
+ Label isPositive;
+ branchTestPtr(Assembler::NotSigned, val, val, &isPositive);
+ {
+ store32(Imm32(BigInt::signBitMask()),
+ Address(bigInt, BigInt::offsetOfFlags()));
+ negPtr(val);
+ }
+ bind(&isPositive);
+
+ store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
+
+ static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
+ "BigInt Digit size matches uintptr_t");
+
+ storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
+
+ bind(&done);
+}
+
+void MacroAssembler::initializeBigIntAbsolute(Register bigInt, Register val) {
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
+
+ Label done, nonZero;
+ branchTestPtr(Assembler::NonZero, val, val, &nonZero);
+ {
+ store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
+ jump(&done);
+ }
+ bind(&nonZero);
+
+ store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
+
+ static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
+ "BigInt Digit size matches uintptr_t");
+
+ storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
+
+ bind(&done);
+}
+
+void MacroAssembler::copyBigIntWithInlineDigits(Register src, Register dest,
+ Register temp,
+ gc::Heap initialHeap,
+ Label* fail) {
+ branch32(Assembler::Above, Address(src, BigInt::offsetOfLength()),
+ Imm32(int32_t(BigInt::inlineDigitsLength())), fail);
+
+ newGCBigInt(dest, temp, initialHeap, fail);
+
+ // Copy the sign-bit, but not any of the other bits used by the GC.
+ load32(Address(src, BigInt::offsetOfFlags()), temp);
+ and32(Imm32(BigInt::signBitMask()), temp);
+ store32(temp, Address(dest, BigInt::offsetOfFlags()));
+
+ // Copy the length.
+ load32(Address(src, BigInt::offsetOfLength()), temp);
+ store32(temp, Address(dest, BigInt::offsetOfLength()));
+
+ // Copy the digits.
+ Address srcDigits(src, js::BigInt::offsetOfInlineDigits());
+ Address destDigits(dest, js::BigInt::offsetOfInlineDigits());
+
+ for (size_t i = 0; i < BigInt::inlineDigitsLength(); i++) {
+ static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
+ "BigInt Digit size matches uintptr_t");
+
+ loadPtr(srcDigits, temp);
+ storePtr(temp, destDigits);
+
+ srcDigits = Address(src, srcDigits.offset + sizeof(BigInt::Digit));
+ destDigits = Address(dest, destDigits.offset + sizeof(BigInt::Digit));
+ }
+}
+
+void MacroAssembler::compareBigIntAndInt32(JSOp op, Register bigInt,
+ Register int32, Register scratch1,
+ Register scratch2, Label* ifTrue,
+ Label* ifFalse) {
+ MOZ_ASSERT(IsLooseEqualityOp(op) || IsRelationalOp(op));
+
+ static_assert(std::is_same_v<BigInt::Digit, uintptr_t>,
+ "BigInt digit can be loaded in a pointer-sized register");
+ static_assert(sizeof(BigInt::Digit) >= sizeof(uint32_t),
+ "BigInt digit stores at least an uint32");
+
+ // Test for too large numbers.
+ //
+ // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
+ // the result of the comparison is a constant.
+ if (op == JSOp::Eq || op == JSOp::Ne) {
+ Label* tooLarge = op == JSOp::Eq ? ifFalse : ifTrue;
+ branch32(Assembler::GreaterThan,
+ Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
+ tooLarge);
+ } else {
+ Label doCompare;
+ branch32(Assembler::LessThanOrEqual,
+ Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
+ &doCompare);
+
+ // Still need to take the sign-bit into account for relational operations.
+ if (op == JSOp::Lt || op == JSOp::Le) {
+ branchIfBigIntIsNegative(bigInt, ifTrue);
+ jump(ifFalse);
+ } else {
+ branchIfBigIntIsNegative(bigInt, ifFalse);
+ jump(ifTrue);
+ }
+
+ bind(&doCompare);
+ }
+
+ // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
+ // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
+ // against each other.
+ {
+ // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
+ // resp. strictly greater than the int32 value, depending on the comparison
+ // operator.
+ Label* greaterThan;
+ Label* lessThan;
+ if (op == JSOp::Eq) {
+ greaterThan = ifFalse;
+ lessThan = ifFalse;
+ } else if (op == JSOp::Ne) {
+ greaterThan = ifTrue;
+ lessThan = ifTrue;
+ } else if (op == JSOp::Lt || op == JSOp::Le) {
+ greaterThan = ifFalse;
+ lessThan = ifTrue;
+ } else {
+ MOZ_ASSERT(op == JSOp::Gt || op == JSOp::Ge);
+ greaterThan = ifTrue;
+ lessThan = ifFalse;
+ }
+
+ // BigInt digits are always stored as an absolute number.
+ loadFirstBigIntDigitOrZero(bigInt, scratch1);
+
+ // Load the int32 into |scratch2| and negate it for negative numbers.
+ move32(int32, scratch2);
+
+ Label isNegative, doCompare;
+ branchIfBigIntIsNegative(bigInt, &isNegative);
+ branch32(Assembler::LessThan, int32, Imm32(0), greaterThan);
+ jump(&doCompare);
+
+ // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
+ // unsigned comparison below.
+ bind(&isNegative);
+ branch32(Assembler::GreaterThanOrEqual, int32, Imm32(0), lessThan);
+ neg32(scratch2);
+
+ // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
+ // so we need to explicitly clear any high 32-bits.
+ move32ZeroExtendToPtr(scratch2, scratch2);
+
+ // Reverse the relational comparator for negative numbers.
+ // |-x < -y| <=> |+x > +y|.
+ // |-x ≤ -y| <=> |+x ≥ +y|.
+ // |-x > -y| <=> |+x < +y|.
+ // |-x ≥ -y| <=> |+x ≤ +y|.
+ JSOp reversed = ReverseCompareOp(op);
+ if (reversed != op) {
+ branchPtr(JSOpToCondition(reversed, /* isSigned = */ false), scratch1,
+ scratch2, ifTrue);
+ jump(ifFalse);
+ }
+
+ bind(&doCompare);
+ branchPtr(JSOpToCondition(op, /* isSigned = */ false), scratch1, scratch2,
+ ifTrue);
+ }
+}
+
+void MacroAssembler::equalBigInts(Register left, Register right, Register temp1,
+ Register temp2, Register temp3,
+ Register temp4, Label* notSameSign,
+ Label* notSameLength, Label* notSameDigit) {
+ MOZ_ASSERT(left != temp1);
+ MOZ_ASSERT(right != temp1);
+ MOZ_ASSERT(right != temp2);
+
+ // Jump to |notSameSign| when the sign aren't the same.
+ load32(Address(left, BigInt::offsetOfFlags()), temp1);
+ xor32(Address(right, BigInt::offsetOfFlags()), temp1);
+ branchTest32(Assembler::NonZero, temp1, Imm32(BigInt::signBitMask()),
+ notSameSign);
+
+ // Jump to |notSameLength| when the digits length is different.
+ load32(Address(right, BigInt::offsetOfLength()), temp1);
+ branch32(Assembler::NotEqual, Address(left, BigInt::offsetOfLength()), temp1,
+ notSameLength);
+
+ // Both BigInts have the same sign and the same number of digits. Loop
+ // over each digit, starting with the left-most one, and break from the
+ // loop when the first non-matching digit was found.
+
+ loadBigIntDigits(left, temp2);
+ loadBigIntDigits(right, temp3);
+
+ static_assert(sizeof(BigInt::Digit) == sizeof(void*),
+ "BigInt::Digit is pointer sized");
+
+ computeEffectiveAddress(BaseIndex(temp2, temp1, ScalePointer), temp2);
+ computeEffectiveAddress(BaseIndex(temp3, temp1, ScalePointer), temp3);
+
+ Label start, loop;
+ jump(&start);
+ bind(&loop);
+
+ subPtr(Imm32(sizeof(BigInt::Digit)), temp2);
+ subPtr(Imm32(sizeof(BigInt::Digit)), temp3);
+
+ loadPtr(Address(temp3, 0), temp4);
+ branchPtr(Assembler::NotEqual, Address(temp2, 0), temp4, notSameDigit);
+
+ bind(&start);
+ branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
+
+ // No different digits were found, both BigInts are equal to each other.
+}
+
+void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
+ Label* isObject, Label* isCallable,
+ Label* isUndefined) {
+ loadObjClassUnsafe(obj, scratch);
+
+ // Proxies can emulate undefined and have complex isCallable behavior.
+ branchTestClassIsProxy(true, scratch, slow);
+
+ // JSFunctions are always callable.
+ branchTestClassIsFunction(Assembler::Equal, scratch, isCallable);
+
+ // Objects that emulate undefined.
+ Address flags(scratch, JSClass::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
+ isUndefined);
+
+ // Handle classes with a call hook.
+ branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
+ ImmPtr(nullptr), isObject);
+
+ loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
+ branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
+ ImmPtr(nullptr), isObject);
+
+ jump(isCallable);
+}
+
+void MacroAssembler::isCallableOrConstructor(bool isCallable, Register obj,
+ Register output, Label* isProxy) {
+ MOZ_ASSERT(obj != output);
+
+ Label notFunction, hasCOps, done;
+ loadObjClassUnsafe(obj, output);
+
+ // An object is callable iff:
+ // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
+ // An object is constructor iff:
+ // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
+ // (getClass()->cOps && getClass()->cOps->construct)).
+ branchTestClassIsFunction(Assembler::NotEqual, output, &notFunction);
+ if (isCallable) {
+ move32(Imm32(1), output);
+ } else {
+ static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR)),
+ "FunctionFlags::CONSTRUCTOR has only one bit set");
+
+ load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), output);
+ rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR))),
+ output);
+ and32(Imm32(1), output);
+ }
+ jump(&done);
+
+ bind(&notFunction);
+
+ if (!isCallable) {
+ // For bound functions, we need to check the isConstructor flag.
+ Label notBoundFunction;
+ branchPtr(Assembler::NotEqual, output, ImmPtr(&BoundFunctionObject::class_),
+ &notBoundFunction);
+
+ static_assert(BoundFunctionObject::IsConstructorFlag == 0b1,
+ "AND operation results in boolean value");
+ unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()), output);
+ and32(Imm32(BoundFunctionObject::IsConstructorFlag), output);
+ jump(&done);
+
+ bind(&notBoundFunction);
+ }
+
+ // Just skim proxies off. Their notion of isCallable()/isConstructor() is
+ // more complicated.
+ branchTestClassIsProxy(true, output, isProxy);
+
+ branchPtr(Assembler::NonZero, Address(output, offsetof(JSClass, cOps)),
+ ImmPtr(nullptr), &hasCOps);
+ move32(Imm32(0), output);
+ jump(&done);
+
+ bind(&hasCOps);
+ loadPtr(Address(output, offsetof(JSClass, cOps)), output);
+ size_t opsOffset =
+ isCallable ? offsetof(JSClassOps, call) : offsetof(JSClassOps, construct);
+ cmpPtrSet(Assembler::NonZero, Address(output, opsOffset), ImmPtr(nullptr),
+ output);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadJSContext(Register dest) {
+ movePtr(ImmPtr(runtime()->mainContextPtr()), dest);
+}
+
+static const uint8_t* ContextRealmPtr(CompileRuntime* rt) {
+ return (static_cast<const uint8_t*>(rt->mainContextPtr()) +
+ JSContext::offsetOfRealm());
+}
+
+void MacroAssembler::switchToRealm(Register realm) {
+ storePtr(realm, AbsoluteAddress(ContextRealmPtr(runtime())));
+}
+
+void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
+ MOZ_ASSERT(realm);
+
+ movePtr(ImmPtr(realm), scratch);
+ switchToRealm(scratch);
+}
+
+void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
+ switchToRealm(scratch);
+}
+
+void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
+ Address envChain(FramePointer,
+ BaselineFrame::reverseOffsetOfEnvironmentChain());
+ loadPtr(envChain, scratch);
+ switchToObjectRealm(scratch, scratch);
+}
+
+void MacroAssembler::switchToWasmInstanceRealm(Register scratch1,
+ Register scratch2) {
+ loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), scratch1);
+ loadPtr(Address(InstanceReg, wasm::Instance::offsetOfRealm()), scratch2);
+ storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
+}
+
+void MacroAssembler::debugAssertContextRealm(const void* realm,
+ Register scratch) {
+#ifdef DEBUG
+ Label ok;
+ movePtr(ImmPtr(realm), scratch);
+ branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
+ scratch, &ok);
+ assumeUnreachable("Unexpected context realm");
+ bind(&ok);
+#endif
+}
+
+void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj,
+ Register output) {
+#ifdef DEBUG
+ Label notProxy;
+ branchTestObjectIsProxy(false, obj, output, &notProxy);
+ assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
+ bind(&notProxy);
+#endif
+
+ // The object's realm must not be cx->realm.
+ Label isFalse, done;
+ loadPtr(Address(obj, JSObject::offsetOfShape()), output);
+ loadPtr(Address(output, Shape::offsetOfBaseShape()), output);
+ loadPtr(Address(output, BaseShape::offsetOfRealm()), output);
+ branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
+ output, &isFalse);
+
+ // The object must be a function.
+ branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
+
+ // The function must be the ArrayConstructor native.
+ branchPtr(Assembler::NotEqual,
+ Address(obj, JSFunction::offsetOfNativeOrEnv()),
+ ImmPtr(js::ArrayConstructor), &isFalse);
+
+ move32(Imm32(1), output);
+ jump(&done);
+
+ bind(&isFalse);
+ move32(Imm32(0), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj,
+ Register output) {
+ Label isFalse, isTrue, done;
+
+ // The object must be a function. (Wrappers are not supported.)
+ branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
+
+ // Load the native into |output|.
+ loadPtr(Address(obj, JSFunction::offsetOfNativeOrEnv()), output);
+
+ auto branchIsTypedArrayCtor = [&](Scalar::Type type) {
+ // The function must be a TypedArrayConstructor native (from any realm).
+ JSNative constructor = TypedArrayConstructorNative(type);
+ branchPtr(Assembler::Equal, output, ImmPtr(constructor), &isTrue);
+ };
+
+#define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
+ branchIsTypedArrayCtor(Scalar::N);
+ JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE)
+#undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
+
+ // Falls through to the false case.
+
+ bind(&isFalse);
+ move32(Imm32(0), output);
+ jump(&done);
+
+ bind(&isTrue);
+ move32(Imm32(1), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadMegamorphicCache(Register dest) {
+ movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest);
+}
+void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
+ movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest);
+}
+
+void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest) {
+ uintptr_t cachePtr = uintptr_t(runtime()->addressOfStringToAtomCache());
+ void* offset = (void*)(cachePtr + StringToAtomCache::offsetOfLastLookups());
+ movePtr(ImmPtr(offset), dest);
+}
+
+void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
+ Label doneInner, fatInline;
+ if (!done) {
+ done = &doneInner;
+ }
+ move32(Imm32(JSString::FAT_INLINE_MASK), outHash);
+ and32(Address(id, JSString::offsetOfFlags()), outHash);
+
+ branch32(Assembler::Equal, outHash, Imm32(JSString::FAT_INLINE_MASK),
+ &fatInline);
+ load32(Address(id, NormalAtom::offsetOfHash()), outHash);
+ jump(done);
+ bind(&fatInline);
+ load32(Address(id, FatInlineAtom::offsetOfHash()), outHash);
+ jump(done);
+ bind(&doneInner);
+}
+
+void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
+ Register outHash,
+ Label* cacheMiss) {
+ Label isString, isSymbol, isNull, isUndefined, done, nonAtom, atom,
+ lastLookupAtom;
+
+ {
+ ScratchTagScope tag(*this, value);
+ splitTagForTest(value, tag);
+ branchTestString(Assembler::Equal, tag, &isString);
+ branchTestSymbol(Assembler::Equal, tag, &isSymbol);
+ branchTestNull(Assembler::Equal, tag, &isNull);
+ branchTestUndefined(Assembler::NotEqual, tag, cacheMiss);
+ }
+
+ const JSAtomState& names = runtime()->names();
+ movePropertyKey(PropertyKey::NonIntAtom(names.undefined), outId);
+ move32(Imm32(names.undefined->hash()), outHash);
+ jump(&done);
+
+ bind(&isNull);
+ movePropertyKey(PropertyKey::NonIntAtom(names.null), outId);
+ move32(Imm32(names.null->hash()), outHash);
+ jump(&done);
+
+ bind(&isSymbol);
+ unboxSymbol(value, outId);
+ load32(Address(outId, JS::Symbol::offsetOfHash()), outHash);
+ orPtr(Imm32(PropertyKey::SymbolTypeTag), outId);
+ jump(&done);
+
+ bind(&isString);
+ unboxString(value, outId);
+ branchTest32(Assembler::Zero, Address(outId, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), &nonAtom);
+
+ bind(&atom);
+ loadAtomHash(outId, outHash, &done);
+
+ bind(&nonAtom);
+ loadStringToAtomCacheLastLookups(outHash);
+
+ // Compare each entry in the StringToAtomCache's lastLookups_ array
+ size_t stringOffset = StringToAtomCache::LastLookup::offsetOfString();
+ branchPtr(Assembler::Equal, Address(outHash, stringOffset), outId,
+ &lastLookupAtom);
+ for (size_t i = 0; i < StringToAtomCache::NumLastLookups - 1; ++i) {
+ addPtr(Imm32(sizeof(StringToAtomCache::LastLookup)), outHash);
+ branchPtr(Assembler::Equal, Address(outHash, stringOffset), outId,
+ &lastLookupAtom);
+ }
+
+ // Couldn't find us in the cache, so fall back to the C++ call
+ jump(cacheMiss);
+
+ // We found a hit in the lastLookups_ array! Load the associated atom
+ // and jump back up to our usual atom handling code
+ bind(&lastLookupAtom);
+ size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
+ loadPtr(Address(outHash, atomOffset), outId);
+ jump(&atom);
+
+ bind(&done);
+}
+
+void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
+ Register obj, Register entry, Register scratch1, Register scratch2,
+ ValueOperand output, Label* cacheHit, Label* cacheMiss) {
+ Label isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
+
+ // scratch2 = entry->numHops_
+ load8ZeroExtend(Address(entry, MegamorphicCache::Entry::offsetOfNumHops()),
+ scratch2);
+ // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
+ branch32(Assembler::Equal, scratch2,
+ Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty),
+ cacheMiss);
+ // if (scratch2 == NumHopsForMissingProperty) goto isMissing
+ branch32(Assembler::Equal, scratch2,
+ Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
+ &isMissing);
+
+ // NOTE: Where this is called, `output` can actually alias `obj`, and before
+ // the last cacheMiss branch above we can't write to `obj`, so we can't
+ // use `output`'s scratch register there. However a cache miss is impossible
+ // now, so we're free to use `output` as we like.
+ Register outputScratch = output.scratchReg();
+ if (!outputScratch.aliases(obj)) {
+ // We're okay with paying this very slight extra cost to avoid a potential
+ // footgun of writing to what callers understand as only an input register.
+ movePtr(obj, outputScratch);
+ }
+ branchTest32(Assembler::Zero, scratch2, scratch2, &protoLoopTail);
+ bind(&protoLoopHead);
+ loadObjProto(outputScratch, outputScratch);
+ branchSub32(Assembler::NonZero, Imm32(1), scratch2, &protoLoopHead);
+ bind(&protoLoopTail);
+
+ // scratch1 = entry->slotOffset()
+ load32(Address(entry, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1);
+
+ // scratch2 = slotOffset.offset()
+ move32(scratch1, scratch2);
+ rshift32(Imm32(TaggedSlotOffset::OffsetShift), scratch2);
+
+ // if (!slotOffset.isFixedSlot()) goto dynamicSlot
+ branchTest32(Assembler::Zero, scratch1,
+ Imm32(TaggedSlotOffset::IsFixedSlotFlag), &dynamicSlot);
+ // output = outputScratch[scratch2]
+ loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
+ jump(cacheHit);
+
+ bind(&dynamicSlot);
+ // output = outputScratch->slots_[scratch2]
+ loadPtr(Address(outputScratch, NativeObject::offsetOfSlots()), outputScratch);
+ loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
+ jump(cacheHit);
+
+ bind(&isMissing);
+ // output = undefined
+ moveValue(UndefinedValue(), output);
+ jump(cacheHit);
+}
+
+template <typename IdOperandType>
+void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
+ IdOperandType id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, Label* cacheMiss, Label* cacheMissWithEntry) {
+ // A lot of this code is shared with emitMegamorphicCacheLookup. It would
+ // be nice to be able to avoid the duplication here, but due to a few
+ // differences like taking the id in a ValueOperand instead of being able
+ // to bake it in as an immediate, and only needing a Register for the output
+ // value, it seemed more awkward to read once it was deduplicated.
+
+ // outEntryPtr = obj->shape()
+ loadPtr(Address(obj, JSObject::offsetOfShape()), outEntryPtr);
+
+ movePtr(outEntryPtr, scratch2);
+
+ // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
+ rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
+ rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
+ xorPtr(scratch2, outEntryPtr);
+
+ if constexpr (std::is_same<IdOperandType, ValueOperand>::value) {
+ loadAtomOrSymbolAndHash(id, scratch1, scratch2, cacheMiss);
+ } else {
+ static_assert(std::is_same<IdOperandType, Register>::value);
+ movePtr(id, scratch1);
+ loadAtomHash(scratch1, scratch2, nullptr);
+ }
+ addPtr(scratch2, outEntryPtr);
+
+ // outEntryPtr %= MegamorphicCache::NumEntries
+ constexpr size_t cacheSize = MegamorphicCache::NumEntries;
+ static_assert(mozilla::IsPowerOfTwo(cacheSize));
+ size_t cacheMask = cacheSize - 1;
+ and32(Imm32(cacheMask), outEntryPtr);
+
+ loadMegamorphicCache(scratch2);
+ // outEntryPtr = &scratch2->entries_[outEntryPtr]
+ constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
+ static_assert(sizeof(void*) == 4 || entrySize == 24);
+ if constexpr (sizeof(void*) == 4) {
+ mul32(Imm32(entrySize), outEntryPtr);
+ computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
+ MegamorphicCache::offsetOfEntries()),
+ outEntryPtr);
+ } else {
+ computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
+ outEntryPtr);
+ computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
+ MegamorphicCache::offsetOfEntries()),
+ outEntryPtr);
+ }
+
+ // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
+ branchPtr(Assembler::NotEqual,
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
+ scratch1, cacheMissWithEntry);
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
+
+ // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
+ branchPtr(Assembler::NotEqual,
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
+ scratch1, cacheMissWithEntry);
+
+ // scratch2 = scratch2->generation_
+ load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
+ scratch2);
+ load16ZeroExtend(
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
+ scratch1);
+ // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
+ branch32(Assembler::NotEqual, scratch1, scratch2, cacheMissWithEntry);
+}
+
+void MacroAssembler::emitMegamorphicCacheLookup(
+ PropertyKey id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, ValueOperand output, Label* cacheHit) {
+ Label cacheMiss, isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
+
+ // scratch1 = obj->shape()
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
+
+ movePtr(scratch1, outEntryPtr);
+ movePtr(scratch1, scratch2);
+
+ // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
+ rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
+ rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
+ xorPtr(scratch2, outEntryPtr);
+ addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), outEntryPtr);
+
+ // outEntryPtr %= MegamorphicCache::NumEntries
+ constexpr size_t cacheSize = MegamorphicCache::NumEntries;
+ static_assert(mozilla::IsPowerOfTwo(cacheSize));
+ size_t cacheMask = cacheSize - 1;
+ and32(Imm32(cacheMask), outEntryPtr);
+
+ loadMegamorphicCache(scratch2);
+ // outEntryPtr = &scratch2->entries_[outEntryPtr]
+ constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
+ static_assert(sizeof(void*) == 4 || entrySize == 24);
+ if constexpr (sizeof(void*) == 4) {
+ mul32(Imm32(entrySize), outEntryPtr);
+ computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
+ MegamorphicCache::offsetOfEntries()),
+ outEntryPtr);
+ } else {
+ computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
+ outEntryPtr);
+ computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
+ MegamorphicCache::offsetOfEntries()),
+ outEntryPtr);
+ }
+
+ // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
+ branchPtr(Assembler::NotEqual,
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
+ scratch1, &cacheMiss);
+
+ // if (outEntryPtr->key_ != id) goto cacheMiss
+ movePropertyKey(id, scratch1);
+ branchPtr(Assembler::NotEqual,
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
+ scratch1, &cacheMiss);
+
+ // scratch2 = scratch2->generation_
+ load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
+ scratch2);
+ load16ZeroExtend(
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
+ scratch1);
+ // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
+ branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
+
+ emitExtractValueFromMegamorphicCacheEntry(
+ obj, outEntryPtr, scratch1, scratch2, output, cacheHit, &cacheMiss);
+
+ bind(&cacheMiss);
+}
+
+template <typename IdOperandType>
+void MacroAssembler::emitMegamorphicCacheLookupByValue(
+ IdOperandType id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, ValueOperand output, Label* cacheHit) {
+ Label cacheMiss, cacheMissWithEntry;
+ emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
+ outEntryPtr, &cacheMiss,
+ &cacheMissWithEntry);
+ emitExtractValueFromMegamorphicCacheEntry(obj, outEntryPtr, scratch1,
+ scratch2, output, cacheHit,
+ &cacheMissWithEntry);
+ bind(&cacheMiss);
+ xorPtr(outEntryPtr, outEntryPtr);
+ bind(&cacheMissWithEntry);
+}
+
+template void MacroAssembler::emitMegamorphicCacheLookupByValue<ValueOperand>(
+ ValueOperand id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, ValueOperand output, Label* cacheHit);
+
+template void MacroAssembler::emitMegamorphicCacheLookupByValue<Register>(
+ Register id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, ValueOperand output, Label* cacheHit);
+
+void MacroAssembler::emitMegamorphicCacheLookupExists(
+ ValueOperand id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, Register output, Label* cacheHit, bool hasOwn) {
+ Label cacheMiss, cacheMissWithEntry, cacheHitFalse;
+ emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
+ outEntryPtr, &cacheMiss,
+ &cacheMissWithEntry);
+
+ // scratch1 = outEntryPtr->numHops_
+ load8ZeroExtend(
+ Address(outEntryPtr, MegamorphicCache::Entry::offsetOfNumHops()),
+ scratch1);
+
+ branch32(Assembler::Equal, scratch1,
+ Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
+ &cacheHitFalse);
+
+ if (hasOwn) {
+ branch32(Assembler::NotEqual, scratch1, Imm32(0), &cacheHitFalse);
+ } else {
+ branch32(Assembler::Equal, scratch1,
+ Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty),
+ &cacheMissWithEntry);
+ }
+
+ move32(Imm32(1), output);
+ jump(cacheHit);
+
+ bind(&cacheHitFalse);
+ xor32(output, output);
+ jump(cacheHit);
+
+ bind(&cacheMiss);
+ xorPtr(outEntryPtr, outEntryPtr);
+ bind(&cacheMissWithEntry);
+}
+
+void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator,
+ Register outIndex,
+ Register outKind) {
+ // Load iterator object
+ Address nativeIterAddr(iterator,
+ PropertyIteratorObject::offsetOfIteratorSlot());
+ loadPrivate(nativeIterAddr, outIndex);
+
+ // Compute offset of propertyCursor_ from propertiesBegin()
+ loadPtr(Address(outIndex, NativeIterator::offsetOfPropertyCursor()), outKind);
+ subPtr(Address(outIndex, NativeIterator::offsetOfShapesEnd()), outKind);
+
+ // Compute offset of current index from indicesBegin(). Note that because
+ // propertyCursor has already been incremented, this is actually the offset
+ // of the next index. We adjust accordingly below.
+ size_t indexAdjustment =
+ sizeof(GCPtr<JSLinearString*>) / sizeof(PropertyIndex);
+ if (indexAdjustment != 1) {
+ MOZ_ASSERT(indexAdjustment == 2);
+ rshift32(Imm32(1), outKind);
+ }
+
+ // Load current index.
+ loadPtr(Address(outIndex, NativeIterator::offsetOfPropertiesEnd()), outIndex);
+ load32(BaseIndex(outIndex, outKind, Scale::TimesOne,
+ -int32_t(sizeof(PropertyIndex))),
+ outIndex);
+
+ // Extract kind.
+ move32(outIndex, outKind);
+ rshift32(Imm32(PropertyIndex::KindShift), outKind);
+
+ // Extract index.
+ and32(Imm32(PropertyIndex::IndexMask), outIndex);
+}
+
+template <typename IdType>
+void MacroAssembler::emitMegamorphicCachedSetSlot(
+ IdType id, Register obj, Register scratch1,
+#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
+ Register scratch2, Register scratch3,
+#endif
+ ValueOperand value, Label* cacheHit,
+ void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType)) {
+ Label cacheMiss, dynamicSlot, doAdd, doSet, doAddDynamic, doSetDynamic;
+
+#ifdef JS_CODEGEN_X86
+ pushValue(value);
+ Register scratch2 = value.typeReg();
+ Register scratch3 = value.payloadReg();
+#endif
+
+ // outEntryPtr = obj->shape()
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch3);
+
+ movePtr(scratch3, scratch2);
+
+ // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
+ rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1), scratch3);
+ rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2), scratch2);
+ xorPtr(scratch2, scratch3);
+
+ if constexpr (std::is_same<IdType, ValueOperand>::value) {
+ loadAtomOrSymbolAndHash(id, scratch1, scratch2, &cacheMiss);
+ addPtr(scratch2, scratch3);
+ } else {
+ static_assert(std::is_same<IdType, PropertyKey>::value);
+ addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), scratch3);
+ movePropertyKey(id, scratch1);
+ }
+
+ // scratch3 %= MegamorphicSetPropCache::NumEntries
+ constexpr size_t cacheSize = MegamorphicSetPropCache::NumEntries;
+ static_assert(mozilla::IsPowerOfTwo(cacheSize));
+ size_t cacheMask = cacheSize - 1;
+ and32(Imm32(cacheMask), scratch3);
+
+ loadMegamorphicSetPropCache(scratch2);
+ // scratch3 = &scratch2->entries_[scratch3]
+ constexpr size_t entrySize = sizeof(MegamorphicSetPropCache::Entry);
+ mul32(Imm32(entrySize), scratch3);
+ computeEffectiveAddress(BaseIndex(scratch2, scratch3, TimesOne,
+ MegamorphicSetPropCache::offsetOfEntries()),
+ scratch3);
+
+ // if (scratch3->key_ != scratch1) goto cacheMiss
+ branchPtr(Assembler::NotEqual,
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfKey()),
+ scratch1, &cacheMiss);
+
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
+ // if (scratch3->shape_ != scratch1) goto cacheMiss
+ branchPtr(Assembler::NotEqual,
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfShape()),
+ scratch1, &cacheMiss);
+
+ // scratch2 = scratch2->generation_
+ load16ZeroExtend(
+ Address(scratch2, MegamorphicSetPropCache::offsetOfGeneration()),
+ scratch2);
+ load16ZeroExtend(
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
+ scratch1);
+ // if (scratch3->generation_ != scratch2) goto cacheMiss
+ branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
+
+ // scratch2 = entry->slotOffset()
+ load32(
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
+ scratch2);
+
+ // scratch1 = slotOffset.offset()
+ move32(scratch2, scratch1);
+ rshift32(Imm32(TaggedSlotOffset::OffsetShift), scratch1);
+
+ Address afterShapePtr(scratch3,
+ MegamorphicSetPropCache::Entry::offsetOfAfterShape());
+
+ // if (!slotOffset.isFixedSlot()) goto dynamicSlot
+ branchTest32(Assembler::Zero, scratch2,
+ Imm32(TaggedSlotOffset::IsFixedSlotFlag), &dynamicSlot);
+
+ // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
+ // else jump (or fall-through) to doAdd.
+ addPtr(obj, scratch1);
+ branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSet);
+ jump(&doAdd);
+
+ bind(&dynamicSlot);
+ branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSetDynamic);
+
+ Address slotAddr(scratch1, 0);
+
+ // If entry->newCapacity_ is nonzero, we need to grow the slots on the
+ // object. Otherwise just jump straight to a dynamic add.
+ load16ZeroExtend(
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
+ scratch2);
+ branchTest32(Assembler::Zero, scratch2, scratch2, &doAddDynamic);
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+
+ PushRegsInMask(save);
+
+ regs.takeUnchecked(scratch2);
+ Register tmp;
+ if (regs.has(obj)) {
+ regs.takeUnchecked(obj);
+ tmp = regs.takeAnyGeneral();
+ regs.addUnchecked(obj);
+ } else {
+ tmp = regs.takeAnyGeneral();
+ }
+
+ using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
+ setupUnalignedABICall(tmp);
+ loadJSContext(tmp);
+ passABIArg(tmp);
+ passABIArg(obj);
+ passABIArg(scratch2);
+ callWithABI<Fn, NativeObject::growSlotsPure>();
+ storeCallPointerResult(scratch2);
+ PopRegsInMask(save);
+
+ branchIfFalseBool(scratch2, &cacheMiss);
+
+ bind(&doAddDynamic);
+ addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+
+ bind(&doAdd);
+ // scratch3 = entry->afterShape()
+ loadPtr(
+ Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
+ scratch3);
+
+ storeObjShape(scratch3, obj,
+ [emitPreBarrier](MacroAssembler& masm, const Address& addr) {
+ emitPreBarrier(masm, addr, MIRType::Shape);
+ });
+#ifdef JS_CODEGEN_X86
+ popValue(value);
+#endif
+ storeValue(value, slotAddr);
+ jump(cacheHit);
+
+ bind(&doSetDynamic);
+ addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
+ bind(&doSet);
+ guardedCallPreBarrier(slotAddr, MIRType::Value);
+
+#ifdef JS_CODEGEN_X86
+ popValue(value);
+#endif
+ storeValue(value, slotAddr);
+ jump(cacheHit);
+
+ bind(&cacheMiss);
+#ifdef JS_CODEGEN_X86
+ popValue(value);
+#endif
+}
+
+template void MacroAssembler::emitMegamorphicCachedSetSlot<PropertyKey>(
+ PropertyKey id, Register obj, Register scratch1,
+#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
+ Register scratch2, Register scratch3,
+#endif
+ ValueOperand value, Label* cacheHit,
+ void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
+
+template void MacroAssembler::emitMegamorphicCachedSetSlot<ValueOperand>(
+ ValueOperand id, Register obj, Register scratch1,
+#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
+ Register scratch2, Register scratch3,
+#endif
+ ValueOperand value, Label* cacheHit,
+ void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
+
+void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg, Label* fail) {
+#ifdef DEBUG
+ Label ok;
+ branchPtr(Assembler::NotSigned, reg, reg, &ok);
+ assumeUnreachable("Unexpected negative value");
+ bind(&ok);
+#endif
+
+#ifdef JS_64BIT
+ branchPtr(Assembler::Above, reg, Imm32(INT32_MAX), fail);
+#endif
+}
+
+void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj,
+ Register output) {
+ Address slotAddr(obj, ArrayBufferObject::offsetOfByteLengthSlot());
+ loadPrivate(slotAddr, output);
+}
+
+void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj,
+ Register output) {
+ Address slotAddr(obj, ArrayBufferViewObject::byteOffsetOffset());
+ loadPrivate(slotAddr, output);
+}
+
+void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
+ Register output) {
+ Address slotAddr(obj, ArrayBufferViewObject::lengthOffset());
+ loadPrivate(slotAddr, output);
+}
+
+void MacroAssembler::loadDOMExpandoValueGuardGeneration(
+ Register obj, ValueOperand output,
+ JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
+ Label* fail) {
+ loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
+ output.scratchReg());
+ loadValue(Address(output.scratchReg(),
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
+ output);
+
+ // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
+ // privateSlot.
+ branchTestValue(Assembler::NotEqual, output,
+ PrivateValue(expandoAndGeneration), fail);
+
+ // Guard expandoAndGeneration->generation matches the expected generation.
+ Address generationAddr(output.payloadOrValueReg(),
+ JS::ExpandoAndGeneration::offsetOfGeneration());
+ branch64(Assembler::NotEqual, generationAddr, Imm64(generation), fail);
+
+ // Load expandoAndGeneration->expando into the output Value register.
+ loadValue(Address(output.payloadOrValueReg(),
+ JS::ExpandoAndGeneration::offsetOfExpando()),
+ output);
+}
+
+void MacroAssembler::loadJitActivation(Register dest) {
+ loadJSContext(dest);
+ loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
+}
+
+void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
+ Register scratch,
+ const LiveRegisterSet& volatileRegs,
+ Label* fail) {
+ Label done;
+ branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
+
+ // The pointers are not equal, so if the input string is also an atom it
+ // must be a different string.
+ branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), fail);
+
+ // Check the length.
+ branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
+ Imm32(atom->length()), fail);
+
+ // We have a non-atomized string with the same length. Call a helper
+ // function to do the comparison.
+ PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSString* str1, JSString* str2);
+ setupUnalignedABICall(scratch);
+ movePtr(ImmGCPtr(atom), scratch);
+ passABIArg(scratch);
+ passABIArg(str);
+ callWithABI<Fn, EqualStringsHelperPure>();
+ storeCallPointerResult(scratch);
+
+ MOZ_ASSERT(!volatileRegs.has(scratch));
+ PopRegsInMask(volatileRegs);
+ branchIfFalseBool(scratch, fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::guardStringToInt32(Register str, Register output,
+ Register scratch,
+ LiveRegisterSet volatileRegs,
+ Label* fail) {
+ Label vmCall, done;
+ // Use indexed value as fast path if possible.
+ loadStringIndexValue(str, output, &vmCall);
+ jump(&done);
+ {
+ bind(&vmCall);
+
+ // Reserve space for holding the result int32_t of the call. Use
+ // pointer-size to avoid misaligning the stack on 64-bit platforms.
+ reserveStack(sizeof(uintptr_t));
+ moveStackPtrTo(output);
+
+ volatileRegs.takeUnchecked(scratch);
+ if (output.volatile_()) {
+ volatileRegs.addUnchecked(output);
+ }
+ PushRegsInMask(volatileRegs);
+
+ using Fn = bool (*)(JSContext* cx, JSString* str, int32_t* result);
+ setupUnalignedABICall(scratch);
+ loadJSContext(scratch);
+ passABIArg(scratch);
+ passABIArg(str);
+ passABIArg(output);
+ callWithABI<Fn, GetInt32FromStringPure>();
+ storeCallPointerResult(scratch);
+
+ PopRegsInMask(volatileRegs);
+
+ Label ok;
+ branchIfTrueBool(scratch, &ok);
+ {
+ // OOM path, recovered by GetInt32FromStringPure.
+ //
+ // Use addToStackPtr instead of freeStack as freeStack tracks stack height
+ // flow-insensitively, and using it twice would confuse the stack height
+ // tracking.
+ addToStackPtr(Imm32(sizeof(uintptr_t)));
+ jump(fail);
+ }
+ bind(&ok);
+ load32(Address(output, 0), output);
+ freeStack(sizeof(uintptr_t));
+ }
+ bind(&done);
+}
+
+void MacroAssembler::generateBailoutTail(Register scratch,
+ Register bailoutInfo) {
+ Label bailoutFailed;
+ branchIfFalseBool(ReturnReg, &bailoutFailed);
+
+ // Finish bailing out to Baseline.
+ {
+ // Prepare a register set for use in this case.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
+ !regs.has(AsRegister(getStackPointer())));
+ regs.take(bailoutInfo);
+
+ Register temp = regs.takeAny();
+
+#ifdef DEBUG
+ // Assert the stack pointer points to the JitFrameLayout header. Copying
+ // starts here.
+ Label ok;
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
+ temp);
+ branchStackPtr(Assembler::Equal, temp, &ok);
+ assumeUnreachable("Unexpected stack pointer value");
+ bind(&ok);
+#endif
+
+ Register copyCur = regs.takeAny();
+ Register copyEnd = regs.takeAny();
+
+ // Copy data onto stack.
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
+ copyCur);
+ loadPtr(
+ Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
+ copyEnd);
+ {
+ Label copyLoop;
+ Label endOfCopy;
+ bind(&copyLoop);
+ branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
+ subPtr(Imm32(sizeof(uintptr_t)), copyCur);
+ subFromStackPtr(Imm32(sizeof(uintptr_t)));
+ loadPtr(Address(copyCur, 0), temp);
+ storePtr(temp, Address(getStackPointer(), 0));
+ jump(&copyLoop);
+ bind(&endOfCopy);
+ }
+
+ loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)),
+ FramePointer);
+
+ // Enter exit frame for the FinishBailoutToBaseline call.
+ pushFrameDescriptor(FrameType::BaselineJS);
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
+ push(FramePointer);
+ // No GC things to mark on the stack, push a bare token.
+ loadJSContext(scratch);
+ enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ // Save needed values onto stack temporarily.
+ push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
+
+ // Call a stub to free allocated memory and create arguments objects.
+ using Fn = bool (*)(BaselineBailoutInfo* bailoutInfoArg);
+ setupUnalignedABICall(temp);
+ passABIArg(bailoutInfo);
+ callWithABI<Fn, FinishBailoutToBaseline>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ branchIfFalseBool(ReturnReg, exceptionLabel());
+
+ // Restore values where they need to be and resume execution.
+ AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!enterRegs.has(FramePointer));
+ Register jitcodeReg = enterRegs.takeAny();
+
+ pop(jitcodeReg);
+
+ // Discard exit frame.
+ addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
+
+ jump(jitcodeReg);
+ }
+
+ bind(&bailoutFailed);
+ {
+ // jit::Bailout or jit::InvalidationBailout failed and returned false. The
+ // Ion frame has already been discarded and the stack pointer points to the
+ // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
+ // EnsureUnwoundJitExitFrame, and call the exception handler.
+ loadJSContext(scratch);
+ enterFakeExitFrame(scratch, scratch, ExitFrameType::UnwoundJit);
+ jump(exceptionLabel());
+ }
+}
+
+void MacroAssembler::loadJitCodeRaw(Register func, Register dest) {
+ static_assert(BaseScript::offsetOfJitCodeRaw() ==
+ SelfHostedLazyScript::offsetOfJitCodeRaw(),
+ "SelfHostedLazyScript and BaseScript must use same layout for "
+ "jitCodeRaw_");
+ static_assert(
+ BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset,
+ "Wasm exported functions jit entries must use same layout for "
+ "jitCodeRaw_");
+ loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
+ loadPtr(Address(dest, BaseScript::offsetOfJitCodeRaw()), dest);
+}
+
+void MacroAssembler::loadBaselineJitCodeRaw(Register func, Register dest,
+ Label* failure) {
+ // Load JitScript
+ loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
+ if (failure) {
+ branchIfScriptHasNoJitScript(dest, failure);
+ }
+ loadJitScript(dest, dest);
+
+ // Load BaselineScript
+ loadPtr(Address(dest, JitScript::offsetOfBaselineScript()), dest);
+ if (failure) {
+ static_assert(BaselineDisabledScript == 0x1);
+ branchPtr(Assembler::BelowOrEqual, dest, ImmWord(BaselineDisabledScript),
+ failure);
+ }
+
+ // Load Baseline jitcode
+ loadPtr(Address(dest, BaselineScript::offsetOfMethod()), dest);
+ loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
+}
+
+void MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest) {
+ if (framePtr != dest) {
+ movePtr(framePtr, dest);
+ }
+ subPtr(Imm32(BaselineFrame::Size()), dest);
+}
+
+static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime* rt) {
+ return (static_cast<const uint8_t*>(rt->mainContextPtr()) +
+ JSContext::offsetOfInlinedICScript());
+}
+
+void MacroAssembler::storeICScriptInJSContext(Register icScript) {
+ storePtr(icScript, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
+}
+
+void MacroAssembler::handleFailure() {
+ // Re-entry code is irrelevant because the exception will leave the
+ // running function and never come back
+ TrampolinePtr excTail = runtime()->jitRuntime()->getExceptionTail();
+ jump(excTail);
+}
+
+void MacroAssembler::assumeUnreachable(const char* output) {
+#ifdef JS_MASM_VERBOSE
+ if (!IsCompilingWasm()) {
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+ Register temp = regs.takeAnyGeneral();
+
+ using Fn = void (*)(const char* output);
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ callWithABI<Fn, AssumeUnreachable>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ PopRegsInMask(save);
+ }
+#endif
+
+ breakpoint();
+}
+
+void MacroAssembler::printf(const char* output) {
+#ifdef JS_MASM_VERBOSE
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+
+ Register temp = regs.takeAnyGeneral();
+
+ using Fn = void (*)(const char* output);
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ callWithABI<Fn, Printf0>();
+
+ PopRegsInMask(save);
+#endif
+}
+
+void MacroAssembler::printf(const char* output, Register value) {
+#ifdef JS_MASM_VERBOSE
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(save);
+
+ regs.takeUnchecked(value);
+
+ Register temp = regs.takeAnyGeneral();
+
+ using Fn = void (*)(const char* output, uintptr_t value);
+ setupUnalignedABICall(temp);
+ movePtr(ImmPtr(output), temp);
+ passABIArg(temp);
+ passABIArg(value);
+ callWithABI<Fn, Printf1>();
+
+ PopRegsInMask(save);
+#endif
+}
+
+void MacroAssembler::convertInt32ValueToDouble(ValueOperand val) {
+ Label done;
+ branchTestInt32(Assembler::NotEqual, val, &done);
+ unboxInt32(val, val.scratchReg());
+ ScratchDoubleScope fpscratch(*this);
+ convertInt32ToDouble(val.scratchReg(), fpscratch);
+ boxDouble(fpscratch, val, fpscratch);
+ bind(&done);
+}
+
+void MacroAssembler::convertValueToFloatingPoint(ValueOperand value,
+ FloatRegister output,
+ Label* fail,
+ MIRType outputType) {
+ Label isDouble, isInt32, isBool, isNull, done;
+
+ {
+ ScratchTagScope tag(*this, value);
+ splitTagForTest(value, tag);
+
+ branchTestDouble(Assembler::Equal, tag, &isDouble);
+ branchTestInt32(Assembler::Equal, tag, &isInt32);
+ branchTestBoolean(Assembler::Equal, tag, &isBool);
+ branchTestNull(Assembler::Equal, tag, &isNull);
+ branchTestUndefined(Assembler::NotEqual, tag, fail);
+ }
+
+ // fall-through: undefined
+ loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output,
+ outputType);
+ jump(&done);
+
+ bind(&isNull);
+ loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
+ jump(&done);
+
+ bind(&isBool);
+ boolValueToFloatingPoint(value, output, outputType);
+ jump(&done);
+
+ bind(&isInt32);
+ int32ValueToFloatingPoint(value, output, outputType);
+ jump(&done);
+
+ // On some non-multiAlias platforms, unboxDouble may use the scratch register,
+ // so do not merge code paths here.
+ bind(&isDouble);
+ if (outputType == MIRType::Float32 && hasMultiAlias()) {
+ ScratchDoubleScope tmp(*this);
+ unboxDouble(value, tmp);
+ convertDoubleToFloat32(tmp, output);
+ } else {
+ FloatRegister tmp = output.asDouble();
+ unboxDouble(value, tmp);
+ if (outputType == MIRType::Float32) {
+ convertDoubleToFloat32(tmp, output);
+ }
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest,
+ bool widenFloatToDouble,
+ bool compilingWasm,
+ wasm::BytecodeOffset callOffset) {
+ if (compilingWasm) {
+ Push(InstanceReg);
+ }
+ int32_t framePushedAfterInstance = framePushed();
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ ScratchDoubleScope fpscratch(*this);
+ if (widenFloatToDouble) {
+ convertFloat32ToDouble(src, fpscratch);
+ src = fpscratch;
+ }
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ FloatRegister srcSingle;
+ if (widenFloatToDouble) {
+ MOZ_ASSERT(src.isSingle());
+ srcSingle = src;
+ src = src.asDouble();
+ Push(srcSingle);
+ convertFloat32ToDouble(srcSingle, src);
+ }
+#else
+ // Also see below
+ MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
+#endif
+
+ MOZ_ASSERT(src.isDouble());
+
+ if (compilingWasm) {
+ int32_t instanceOffset = framePushed() - framePushedAfterInstance;
+ setupWasmABICall();
+ passABIArg(src, MoveOp::DOUBLE);
+ callWithABI(callOffset, wasm::SymbolicAddress::ToInt32,
+ mozilla::Some(instanceOffset));
+ } else {
+ using Fn = int32_t (*)(double);
+ setupUnalignedABICall(dest);
+ passABIArg(src, MoveOp::DOUBLE);
+ callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ storeCallInt32Result(dest);
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ // Nothing
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (widenFloatToDouble) {
+ Pop(srcSingle);
+ }
+#else
+ MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
+#endif
+
+ if (compilingWasm) {
+ Pop(InstanceReg);
+ }
+}
+
+void MacroAssembler::convertDoubleToInt(FloatRegister src, Register output,
+ FloatRegister temp, Label* truncateFail,
+ Label* fail,
+ IntConversionBehavior behavior) {
+ switch (behavior) {
+ case IntConversionBehavior::Normal:
+ case IntConversionBehavior::NegativeZeroCheck:
+ convertDoubleToInt32(
+ src, output, fail,
+ behavior == IntConversionBehavior::NegativeZeroCheck);
+ break;
+ case IntConversionBehavior::Truncate:
+ branchTruncateDoubleMaybeModUint32(src, output,
+ truncateFail ? truncateFail : fail);
+ break;
+ case IntConversionBehavior::ClampToUint8:
+ // Clamping clobbers the input register, so use a temp.
+ if (src != temp) {
+ moveDouble(src, temp);
+ }
+ clampDoubleToUint8(temp, output);
+ break;
+ }
+}
+
+void MacroAssembler::convertValueToInt(
+ ValueOperand value, Label* handleStringEntry, Label* handleStringRejoin,
+ Label* truncateDoubleSlow, Register stringReg, FloatRegister temp,
+ Register output, Label* fail, IntConversionBehavior behavior,
+ IntConversionInputKind conversion) {
+ Label done, isInt32, isBool, isDouble, isNull, isString;
+
+ bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
+ behavior == IntConversionBehavior::ClampToUint8) &&
+ handleStringEntry && handleStringRejoin;
+
+ MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
+
+ {
+ ScratchTagScope tag(*this, value);
+ splitTagForTest(value, tag);
+
+ branchTestInt32(Equal, tag, &isInt32);
+ if (conversion == IntConversionInputKind::Any ||
+ conversion == IntConversionInputKind::NumbersOrBoolsOnly) {
+ branchTestBoolean(Equal, tag, &isBool);
+ }
+ branchTestDouble(Equal, tag, &isDouble);
+
+ if (conversion == IntConversionInputKind::Any) {
+ // If we are not truncating, we fail for anything that's not
+ // null. Otherwise we might be able to handle strings and undefined.
+ switch (behavior) {
+ case IntConversionBehavior::Normal:
+ case IntConversionBehavior::NegativeZeroCheck:
+ branchTestNull(Assembler::NotEqual, tag, fail);
+ break;
+
+ case IntConversionBehavior::Truncate:
+ case IntConversionBehavior::ClampToUint8:
+ branchTestNull(Equal, tag, &isNull);
+ if (handleStrings) {
+ branchTestString(Equal, tag, &isString);
+ }
+ branchTestUndefined(Assembler::NotEqual, tag, fail);
+ break;
+ }
+ } else {
+ jump(fail);
+ }
+ }
+
+ // The value is null or undefined in truncation contexts - just emit 0.
+ if (conversion == IntConversionInputKind::Any) {
+ if (isNull.used()) {
+ bind(&isNull);
+ }
+ mov(ImmWord(0), output);
+ jump(&done);
+ }
+
+ // |output| needs to be different from |stringReg| to load string indices.
+ bool handleStringIndices = handleStrings && output != stringReg;
+
+ // First try loading a string index. If that fails, try converting a string
+ // into a double, then jump to the double case.
+ Label handleStringIndex;
+ if (handleStrings) {
+ bind(&isString);
+ unboxString(value, stringReg);
+ if (handleStringIndices) {
+ loadStringIndexValue(stringReg, output, handleStringEntry);
+ jump(&handleStringIndex);
+ } else {
+ jump(handleStringEntry);
+ }
+ }
+
+ // Try converting double into integer.
+ if (isDouble.used() || handleStrings) {
+ if (isDouble.used()) {
+ bind(&isDouble);
+ unboxDouble(value, temp);
+ }
+
+ if (handleStrings) {
+ bind(handleStringRejoin);
+ }
+
+ convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
+ jump(&done);
+ }
+
+ // Just unbox a bool, the result is 0 or 1.
+ if (isBool.used()) {
+ bind(&isBool);
+ unboxBoolean(value, output);
+ jump(&done);
+ }
+
+ // Integers can be unboxed.
+ if (isInt32.used() || handleStringIndices) {
+ if (isInt32.used()) {
+ bind(&isInt32);
+ unboxInt32(value, output);
+ }
+
+ if (handleStringIndices) {
+ bind(&handleStringIndex);
+ }
+
+ if (behavior == IntConversionBehavior::ClampToUint8) {
+ clampIntToUint8(output);
+ }
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::finish() {
+ if (failureLabel_.used()) {
+ bind(&failureLabel_);
+ handleFailure();
+ }
+
+ MacroAssemblerSpecific::finish();
+
+ MOZ_RELEASE_ASSERT(
+ size() <= MaxCodeBytesPerProcess,
+ "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
+
+ if (bytesNeeded() > MaxCodeBytesPerProcess) {
+ setOOM();
+ }
+}
+
+void MacroAssembler::link(JitCode* code) {
+ MOZ_ASSERT(!oom());
+ linkProfilerCallSites(code);
+}
+
+MacroAssembler::AutoProfilerCallInstrumentation::
+ AutoProfilerCallInstrumentation(MacroAssembler& masm) {
+ if (!masm.emitProfilingInstrumentation_) {
+ return;
+ }
+
+ Register reg = CallTempReg0;
+ Register reg2 = CallTempReg1;
+ masm.push(reg);
+ masm.push(reg2);
+
+ CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
+ masm.loadJSContext(reg2);
+ masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
+ masm.storePtr(reg,
+ Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
+
+ masm.appendProfilerCallSite(label);
+
+ masm.pop(reg2);
+ masm.pop(reg);
+}
+
+void MacroAssembler::linkProfilerCallSites(JitCode* code) {
+ for (size_t i = 0; i < profilerCallSites_.length(); i++) {
+ CodeOffset offset = profilerCallSites_[i];
+ CodeLocationLabel location(code, offset);
+ PatchDataWithValueCheck(location, ImmPtr(location.raw()),
+ ImmPtr((void*)-1));
+ }
+}
+
+void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs,
+ bool countIncludesThis) {
+ // The stack should already be aligned to the size of a value.
+ assertStackAlignment(sizeof(Value), 0);
+
+ static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
+ "JitStackValueAlignment is either 1 or 2.");
+ if (JitStackValueAlignment == 1) {
+ return;
+ }
+ // A jit frame is composed of the following:
+ //
+ // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // \________JitFrameLayout_________/
+ // (The stack grows this way --->)
+ //
+ // We want to ensure that |raddr|, the return address, is 16-byte aligned.
+ // (Note: if 8-byte alignment was sufficient, we would have already
+ // returned above.)
+
+ // JitFrameLayout does not affect the alignment, so we can ignore it.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "JitFrameLayout doesn't affect stack alignment");
+
+ // Therefore, we need to ensure that |this| is aligned.
+ // This implies that |argN| must be aligned if N is even,
+ // and offset by |sizeof(Value)| if N is odd.
+
+ // Depending on the context of the caller, it may be easier to pass in a
+ // register that has already been modified to include |this|. If that is the
+ // case, we want to flip the direction of the test.
+ Assembler::Condition condition =
+ countIncludesThis ? Assembler::NonZero : Assembler::Zero;
+
+ Label alignmentIsOffset, end;
+ branchTestPtr(condition, nargs, Imm32(1), &alignmentIsOffset);
+
+ // |argN| should be aligned to 16 bytes.
+ andToStackPtr(Imm32(~(JitStackAlignment - 1)));
+ jump(&end);
+
+ // |argN| should be offset by 8 bytes from 16-byte alignment.
+ // We already know that it is 8-byte aligned, so the only possibilities are:
+ // a) It is 16-byte aligned, and we must offset it by 8 bytes.
+ // b) It is not 16-byte aligned, and therefore already has the right offset.
+ // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
+ bind(&alignmentIsOffset);
+ branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
+ subFromStackPtr(Imm32(sizeof(Value)));
+
+ bind(&end);
+}
+
+void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc,
+ bool countIncludesThis) {
+ // The stack should already be aligned to the size of a value.
+ assertStackAlignment(sizeof(Value), 0);
+
+ static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
+ "JitStackValueAlignment is either 1 or 2.");
+ if (JitStackValueAlignment == 1) {
+ return;
+ }
+
+ // See above for full explanation.
+ uint32_t nArgs = argc + !countIncludesThis;
+ if (nArgs % 2 == 0) {
+ // |argN| should be 16-byte aligned
+ andToStackPtr(Imm32(~(JitStackAlignment - 1)));
+ } else {
+ // |argN| must be 16-byte aligned if argc is even,
+ // and offset by 8 if argc is odd.
+ Label end;
+ branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
+ subFromStackPtr(Imm32(sizeof(Value)));
+ bind(&end);
+ assertStackAlignment(JitStackAlignment, sizeof(Value));
+ }
+}
+
+// ===============================================================
+
+MacroAssembler::MacroAssembler(TempAllocator& alloc,
+ CompileRuntime* maybeRuntime,
+ CompileRealm* maybeRealm)
+ : maybeRuntime_(maybeRuntime),
+ maybeRealm_(maybeRealm),
+ wasmMaxOffsetGuardLimit_(0),
+ framePushed_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ dynamicAlignment_(false),
+ emitProfilingInstrumentation_(false) {
+ moveResolver_.setAllocator(alloc);
+}
+
+StackMacroAssembler::StackMacroAssembler(JSContext* cx, TempAllocator& alloc)
+ : MacroAssembler(alloc, CompileRuntime::get(cx->runtime()),
+ CompileRealm::get(cx->realm())) {}
+
+IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator& alloc,
+ CompileRealm* realm)
+ : MacroAssembler(alloc, realm->runtime(), realm) {
+ MOZ_ASSERT(CurrentThreadIsIonCompiling());
+}
+
+WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc, bool limitedSize)
+ : MacroAssembler(alloc) {
+#if defined(JS_CODEGEN_ARM64)
+ // Stubs + builtins + the baseline compiler all require the native SP,
+ // not the PSP.
+ SetStackPointer64(sp);
+#endif
+ if (!limitedSize) {
+ setUnlimitedBuffer();
+ }
+}
+
+WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc,
+ const wasm::ModuleEnvironment& env,
+ bool limitedSize)
+ : MacroAssembler(alloc) {
+#if defined(JS_CODEGEN_ARM64)
+ // Stubs + builtins + the baseline compiler all require the native SP,
+ // not the PSP.
+ SetStackPointer64(sp);
+#endif
+ setWasmMaxOffsetGuardLimit(
+ wasm::GetMaxOffsetGuardLimit(env.hugeMemoryEnabled()));
+ if (!limitedSize) {
+ setUnlimitedBuffer();
+ }
+}
+
+bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr,
+ AutoSaveLiveRegisters& save) {
+ return buildOOLFakeExitFrame(fakeReturnAddr);
+}
+
+#ifndef JS_CODEGEN_ARM64
+void MacroAssembler::subFromStackPtr(Register reg) {
+ subPtr(reg, getStackPointer());
+}
+#endif // JS_CODEGEN_ARM64
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set) {
+ PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
+}
+
+void MacroAssembler::PopRegsInMask(LiveRegisterSet set) {
+ PopRegsInMaskIgnore(set, LiveRegisterSet());
+}
+
+void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set) {
+ PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
+}
+
+void MacroAssembler::Push(PropertyKey key, Register scratchReg) {
+ if (key.isGCThing()) {
+ // If we're pushing a gcthing, then we can't just push the tagged key
+ // value since the GC won't have any idea that the push instruction
+ // carries a reference to a gcthing. Need to unpack the pointer,
+ // push it using ImmGCPtr, and then rematerialize the PropertyKey at
+ // runtime.
+
+ if (key.isString()) {
+ JSString* str = key.toString();
+ MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
+ static_assert(PropertyKey::StringTypeTag == 0,
+ "need to orPtr StringTypeTag if it's not 0");
+ Push(ImmGCPtr(str));
+ } else {
+ MOZ_ASSERT(key.isSymbol());
+ movePropertyKey(key, scratchReg);
+ Push(scratchReg);
+ }
+ } else {
+ MOZ_ASSERT(key.isInt());
+ Push(ImmWord(key.asRawBits()));
+ }
+}
+
+void MacroAssembler::movePropertyKey(PropertyKey key, Register dest) {
+ if (key.isGCThing()) {
+ // See comment in |Push(PropertyKey, ...)| above for an explanation.
+ if (key.isString()) {
+ JSString* str = key.toString();
+ MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
+ static_assert(PropertyKey::StringTypeTag == 0,
+ "need to orPtr JSID_TYPE_STRING tag if it's not 0");
+ movePtr(ImmGCPtr(str), dest);
+ } else {
+ MOZ_ASSERT(key.isSymbol());
+ JS::Symbol* sym = key.toSymbol();
+ movePtr(ImmGCPtr(sym), dest);
+ orPtr(Imm32(PropertyKey::SymbolTypeTag), dest);
+ }
+ } else {
+ MOZ_ASSERT(key.isInt());
+ movePtr(ImmWord(key.asRawBits()), dest);
+ }
+}
+
+void MacroAssembler::Push(TypedOrValueRegister v) {
+ if (v.hasValue()) {
+ Push(v.valueReg());
+ } else if (IsFloatingPointType(v.type())) {
+ FloatRegister reg = v.typedReg().fpu();
+ if (v.type() == MIRType::Float32) {
+ ScratchDoubleScope fpscratch(*this);
+ convertFloat32ToDouble(reg, fpscratch);
+ PushBoxed(fpscratch);
+ } else {
+ PushBoxed(reg);
+ }
+ } else {
+ Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
+ }
+}
+
+void MacroAssembler::Push(const ConstantOrRegister& v) {
+ if (v.constant()) {
+ Push(v.value());
+ } else {
+ Push(v.reg());
+ }
+}
+
+void MacroAssembler::Push(const Address& addr) {
+ push(addr);
+ framePushed_ += sizeof(uintptr_t);
+}
+
+void MacroAssembler::Push(const ValueOperand& val) {
+ pushValue(val);
+ framePushed_ += sizeof(Value);
+}
+
+void MacroAssembler::Push(const Value& val) {
+ pushValue(val);
+ framePushed_ += sizeof(Value);
+}
+
+void MacroAssembler::Push(JSValueType type, Register reg) {
+ pushValue(type, reg);
+ framePushed_ += sizeof(Value);
+}
+
+void MacroAssembler::Push(const Register64 reg) {
+#if JS_BITS_PER_WORD == 64
+ Push(reg.reg);
+#else
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
+ Push(reg.high);
+ Push(reg.low);
+#endif
+}
+
+void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType) {
+ switch (rootType) {
+ case VMFunctionData::RootNone:
+ MOZ_CRASH("Handle must have root type");
+ case VMFunctionData::RootObject:
+ case VMFunctionData::RootString:
+ case VMFunctionData::RootCell:
+ case VMFunctionData::RootBigInt:
+ Push(ImmPtr(nullptr));
+ break;
+ case VMFunctionData::RootValue:
+ Push(UndefinedValue());
+ break;
+ case VMFunctionData::RootId:
+ Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
+ break;
+ }
+}
+
+void MacroAssembler::popRooted(VMFunctionData::RootType rootType,
+ Register cellReg, const ValueOperand& valueReg) {
+ switch (rootType) {
+ case VMFunctionData::RootNone:
+ MOZ_CRASH("Handle must have root type");
+ case VMFunctionData::RootObject:
+ case VMFunctionData::RootString:
+ case VMFunctionData::RootCell:
+ case VMFunctionData::RootId:
+ case VMFunctionData::RootBigInt:
+ Pop(cellReg);
+ break;
+ case VMFunctionData::RootValue:
+ Pop(valueReg);
+ break;
+ }
+}
+
+void MacroAssembler::adjustStack(int amount) {
+ if (amount > 0) {
+ freeStack(amount);
+ } else if (amount < 0) {
+ reserveStack(-amount);
+ }
+}
+
+void MacroAssembler::freeStack(uint32_t amount) {
+ MOZ_ASSERT(amount <= framePushed_);
+ if (amount) {
+ addToStackPtr(Imm32(amount));
+ }
+ framePushed_ -= amount;
+}
+
+void MacroAssembler::freeStack(Register amount) { addToStackPtr(amount); }
+
+// ===============================================================
+// ABI function calls.
+template <class ABIArgGeneratorT>
+void MacroAssembler::setupABICallHelper() {
+#ifdef DEBUG
+ MOZ_ASSERT(!inCall_);
+ inCall_ = true;
+#endif
+
+#ifdef JS_SIMULATOR
+ signature_ = 0;
+#endif
+
+ // Reinitialize the ABIArg generator.
+ abiArgs_ = ABIArgGeneratorT();
+
+#if defined(JS_CODEGEN_ARM)
+ // On ARM, we need to know what ABI we are using, either in the
+ // simulator, or based on the configure flags.
+# if defined(JS_SIMULATOR_ARM)
+ abiArgs_.setUseHardFp(UseHardFpABI());
+# elif defined(JS_CODEGEN_ARM_HARDFP)
+ abiArgs_.setUseHardFp(true);
+# else
+ abiArgs_.setUseHardFp(false);
+# endif
+#endif
+
+#if defined(JS_CODEGEN_MIPS32)
+ // On MIPS, the system ABI use general registers pairs to encode double
+ // arguments, after one or 2 integer-like arguments. Unfortunately, the
+ // Lowering phase is not capable to express it at the moment. So we enforce
+ // the system ABI here.
+ abiArgs_.enforceO32ABI();
+#endif
+}
+
+void MacroAssembler::setupNativeABICall() {
+ setupABICallHelper<ABIArgGenerator>();
+}
+
+void MacroAssembler::setupWasmABICall() {
+ MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
+ setupABICallHelper<WasmABIArgGenerator>();
+
+#if defined(JS_CODEGEN_ARM)
+ // The builtin thunk does the FP -> GPR moving on soft-FP, so
+ // use hard fp unconditionally.
+ abiArgs_.setUseHardFp(true);
+#endif
+ dynamicAlignment_ = false;
+}
+
+void MacroAssembler::setupAlignedABICall() {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
+ setupNativeABICall();
+ dynamicAlignment_ = false;
+}
+
+void MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type) {
+ MOZ_ASSERT(inCall_);
+ appendSignatureType(type);
+
+ ABIArg arg;
+ switch (type) {
+ case MoveOp::FLOAT32:
+ arg = abiArgs_.next(MIRType::Float32);
+ break;
+ case MoveOp::DOUBLE:
+ arg = abiArgs_.next(MIRType::Double);
+ break;
+ case MoveOp::GENERAL:
+ arg = abiArgs_.next(MIRType::Pointer);
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ MoveOperand to(*this, arg);
+ if (from == to) {
+ return;
+ }
+
+ if (oom()) {
+ return;
+ }
+ propagateOOM(moveResolver_.addMove(from, to, type));
+}
+
+void MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result,
+ CheckUnsafeCallWithABI check) {
+ appendSignatureType(result);
+#ifdef JS_SIMULATOR
+ fun = Simulator::RedirectNativeFunction(fun, signature());
+#endif
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+
+#ifdef DEBUG
+ if (check == CheckUnsafeCallWithABI::Check) {
+ push(ReturnReg);
+ loadJSContext(ReturnReg);
+ Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
+ store32(Imm32(1), flagAddr);
+ pop(ReturnReg);
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/bug1375074.js
+ }
+#endif
+
+ call(ImmPtr(fun));
+
+ callWithABIPost(stackAdjust, result);
+
+#ifdef DEBUG
+ if (check == CheckUnsafeCallWithABI::Check) {
+ Label ok;
+ push(ReturnReg);
+ loadJSContext(ReturnReg);
+ Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
+ branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
+ assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
+ bind(&ok);
+ pop(ReturnReg);
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/bug1375074.js
+ }
+#endif
+}
+
+CodeOffset MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode,
+ wasm::SymbolicAddress imm,
+ mozilla::Maybe<int32_t> instanceOffset,
+ MoveOp::Type result) {
+ MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
+
+ // The instance register is used in builtin thunks and must be set.
+ if (instanceOffset) {
+ loadPtr(Address(getStackPointer(), *instanceOffset + stackAdjust),
+ InstanceReg);
+ } else {
+ MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
+ }
+ CodeOffset raOffset = call(
+ wasm::CallSiteDesc(bytecode.offset(), wasm::CallSite::Symbolic), imm);
+
+ callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
+
+ return raOffset;
+}
+
+void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm,
+ MoveOp::Type result) {
+ MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm));
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust, /* callFromWasm = */ false);
+ call(imm);
+ callWithABIPost(stackAdjust, result, /* callFromWasm = */ false);
+}
+
+// ===============================================================
+// Exit frame footer.
+
+void MacroAssembler::linkExitFrame(Register cxreg, Register scratch) {
+ loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
+ storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
+}
+
+// ===============================================================
+// Simple value-shuffling helpers, to hide MoveResolver verbosity
+// in common cases.
+
+void MacroAssembler::moveRegPair(Register src0, Register src1, Register dst0,
+ Register dst1, MoveOp::Type type) {
+ MoveResolver& moves = moveResolver();
+ if (src0 != dst0) {
+ propagateOOM(moves.addMove(MoveOperand(src0), MoveOperand(dst0), type));
+ }
+ if (src1 != dst1) {
+ propagateOOM(moves.addMove(MoveOperand(src1), MoveOperand(dst1), type));
+ }
+ propagateOOM(moves.resolve());
+ if (oom()) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moves);
+ emitter.finish();
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::pow32(Register base, Register power, Register dest,
+ Register temp1, Register temp2, Label* onOver) {
+ // Inline int32-specialized implementation of js::powi with overflow
+ // detection.
+
+ move32(Imm32(1), dest); // result = 1
+
+ // x^y where x == 1 returns 1 for any y.
+ Label done;
+ branch32(Assembler::Equal, base, Imm32(1), &done);
+
+ move32(base, temp1); // runningSquare = x
+ move32(power, temp2); // n = y
+
+ // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
+ // large enough so that the result is no longer representable as a double with
+ // fractional parts. We can't easily determine when y is too large, so we bail
+ // here.
+ // Note: it's important for this condition to match the code in CacheIR.cpp
+ // (CanAttachInt32Pow) to prevent failure loops.
+ Label start;
+ branchTest32(Assembler::NotSigned, power, power, &start);
+ jump(onOver);
+
+ Label loop;
+ bind(&loop);
+
+ // runningSquare *= runningSquare
+ branchMul32(Assembler::Overflow, temp1, temp1, onOver);
+
+ bind(&start);
+
+ // if ((n & 1) != 0) result *= runningSquare
+ Label even;
+ branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
+ branchMul32(Assembler::Overflow, temp1, dest, onOver);
+ bind(&even);
+
+ // n >>= 1
+ // if (n == 0) return result
+ branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
+
+ bind(&done);
+}
+
+void MacroAssembler::signInt32(Register input, Register output) {
+ MOZ_ASSERT(input != output);
+
+ Label done;
+ move32(input, output);
+ rshift32Arithmetic(Imm32(31), output);
+ branch32(Assembler::LessThanOrEqual, input, Imm32(0), &done);
+ move32(Imm32(1), output);
+ bind(&done);
+}
+
+void MacroAssembler::signDouble(FloatRegister input, FloatRegister output) {
+ MOZ_ASSERT(input != output);
+
+ Label done, zeroOrNaN, negative;
+ loadConstantDouble(0.0, output);
+ branchDouble(Assembler::DoubleEqualOrUnordered, input, output, &zeroOrNaN);
+ branchDouble(Assembler::DoubleLessThan, input, output, &negative);
+
+ loadConstantDouble(1.0, output);
+ jump(&done);
+
+ bind(&negative);
+ loadConstantDouble(-1.0, output);
+ jump(&done);
+
+ bind(&zeroOrNaN);
+ moveDouble(input, output);
+
+ bind(&done);
+}
+
+void MacroAssembler::signDoubleToInt32(FloatRegister input, Register output,
+ FloatRegister temp, Label* fail) {
+ MOZ_ASSERT(input != temp);
+
+ Label done, zeroOrNaN, negative;
+ loadConstantDouble(0.0, temp);
+ branchDouble(Assembler::DoubleEqualOrUnordered, input, temp, &zeroOrNaN);
+ branchDouble(Assembler::DoubleLessThan, input, temp, &negative);
+
+ move32(Imm32(1), output);
+ jump(&done);
+
+ bind(&negative);
+ move32(Imm32(-1), output);
+ jump(&done);
+
+ // Fail for NaN and negative zero.
+ bind(&zeroOrNaN);
+ branchDouble(Assembler::DoubleUnordered, input, input, fail);
+
+ // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
+ // is -Infinity instead of Infinity.
+ loadConstantDouble(1.0, temp);
+ divDouble(input, temp);
+ branchDouble(Assembler::DoubleLessThan, temp, input, fail);
+ move32(Imm32(0), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::randomDouble(Register rng, FloatRegister dest,
+ Register64 temp0, Register64 temp1) {
+ using mozilla::non_crypto::XorShift128PlusRNG;
+
+ static_assert(
+ sizeof(XorShift128PlusRNG) == 2 * sizeof(uint64_t),
+ "Code below assumes XorShift128PlusRNG contains two uint64_t values");
+
+ Address state0Addr(rng, XorShift128PlusRNG::offsetOfState0());
+ Address state1Addr(rng, XorShift128PlusRNG::offsetOfState1());
+
+ Register64 s0Reg = temp0;
+ Register64 s1Reg = temp1;
+
+ // uint64_t s1 = mState[0];
+ load64(state0Addr, s1Reg);
+
+ // s1 ^= s1 << 23;
+ move64(s1Reg, s0Reg);
+ lshift64(Imm32(23), s1Reg);
+ xor64(s0Reg, s1Reg);
+
+ // s1 ^= s1 >> 17
+ move64(s1Reg, s0Reg);
+ rshift64(Imm32(17), s1Reg);
+ xor64(s0Reg, s1Reg);
+
+ // const uint64_t s0 = mState[1];
+ load64(state1Addr, s0Reg);
+
+ // mState[0] = s0;
+ store64(s0Reg, state0Addr);
+
+ // s1 ^= s0
+ xor64(s0Reg, s1Reg);
+
+ // s1 ^= s0 >> 26
+ rshift64(Imm32(26), s0Reg);
+ xor64(s0Reg, s1Reg);
+
+ // mState[1] = s1
+ store64(s1Reg, state1Addr);
+
+ // s1 += mState[0]
+ load64(state0Addr, s0Reg);
+ add64(s0Reg, s1Reg);
+
+ // See comment in XorShift128PlusRNG::nextDouble().
+ static constexpr int MantissaBits =
+ mozilla::FloatingPoint<double>::kExponentShift + 1;
+ static constexpr double ScaleInv = double(1) / (1ULL << MantissaBits);
+
+ and64(Imm64((1ULL << MantissaBits) - 1), s1Reg);
+
+ // Note: we know s1Reg isn't signed after the and64 so we can use the faster
+ // convertInt64ToDouble instead of convertUInt64ToDouble.
+ convertInt64ToDouble(s1Reg, dest);
+
+ // dest *= ScaleInv
+ mulDoublePtr(ImmPtr(&ScaleInv), s0Reg.scratchReg(), dest);
+}
+
+void MacroAssembler::sameValueDouble(FloatRegister left, FloatRegister right,
+ FloatRegister temp, Register dest) {
+ Label nonEqual, isSameValue, isNotSameValue;
+ branchDouble(Assembler::DoubleNotEqualOrUnordered, left, right, &nonEqual);
+ {
+ // First, test for being equal to 0.0, which also includes -0.0.
+ loadConstantDouble(0.0, temp);
+ branchDouble(Assembler::DoubleNotEqual, left, temp, &isSameValue);
+
+ // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
+ // is -Infinity instead of Infinity.
+ Label isNegInf;
+ loadConstantDouble(1.0, temp);
+ divDouble(left, temp);
+ branchDouble(Assembler::DoubleLessThan, temp, left, &isNegInf);
+ {
+ loadConstantDouble(1.0, temp);
+ divDouble(right, temp);
+ branchDouble(Assembler::DoubleGreaterThan, temp, right, &isSameValue);
+ jump(&isNotSameValue);
+ }
+ bind(&isNegInf);
+ {
+ loadConstantDouble(1.0, temp);
+ divDouble(right, temp);
+ branchDouble(Assembler::DoubleLessThan, temp, right, &isSameValue);
+ jump(&isNotSameValue);
+ }
+ }
+ bind(&nonEqual);
+ {
+ // Test if both values are NaN.
+ branchDouble(Assembler::DoubleOrdered, left, left, &isNotSameValue);
+ branchDouble(Assembler::DoubleOrdered, right, right, &isNotSameValue);
+ }
+
+ Label done;
+ bind(&isSameValue);
+ move32(Imm32(1), dest);
+ jump(&done);
+
+ bind(&isNotSameValue);
+ move32(Imm32(0), dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::minMaxArrayInt32(Register array, Register result,
+ Register temp1, Register temp2,
+ Register temp3, bool isMax, Label* fail) {
+ // array must be a packed array. Load its elements.
+ Register elements = temp1;
+ loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
+
+ // Load the length and guard that it is non-zero.
+ Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
+ load32(lengthAddr, temp3);
+ branchTest32(Assembler::Zero, temp3, temp3, fail);
+
+ // Compute the address of the last element.
+ Register elementsEnd = temp2;
+ BaseObjectElementIndex elementsEndAddr(elements, temp3,
+ -int32_t(sizeof(Value)));
+ computeEffectiveAddress(elementsEndAddr, elementsEnd);
+
+ // Load the first element into result.
+ fallibleUnboxInt32(Address(elements, 0), result, fail);
+
+ Label loop, done;
+ bind(&loop);
+
+ // Check whether we're done.
+ branchPtr(Assembler::Equal, elements, elementsEnd, &done);
+
+ // If not, advance to the next element and load it.
+ addPtr(Imm32(sizeof(Value)), elements);
+ fallibleUnboxInt32(Address(elements, 0), temp3, fail);
+
+ // Update result if necessary.
+ Assembler::Condition cond =
+ isMax ? Assembler::GreaterThan : Assembler::LessThan;
+ cmp32Move32(cond, temp3, result, temp3, result);
+
+ jump(&loop);
+ bind(&done);
+}
+
+void MacroAssembler::minMaxArrayNumber(Register array, FloatRegister result,
+ FloatRegister floatTemp, Register temp1,
+ Register temp2, bool isMax,
+ Label* fail) {
+ // array must be a packed array. Load its elements.
+ Register elements = temp1;
+ loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
+
+ // Load the length and check if the array is empty.
+ Label isEmpty;
+ Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
+ load32(lengthAddr, temp2);
+ branchTest32(Assembler::Zero, temp2, temp2, &isEmpty);
+
+ // Compute the address of the last element.
+ Register elementsEnd = temp2;
+ BaseObjectElementIndex elementsEndAddr(elements, temp2,
+ -int32_t(sizeof(Value)));
+ computeEffectiveAddress(elementsEndAddr, elementsEnd);
+
+ // Load the first element into result.
+ ensureDouble(Address(elements, 0), result, fail);
+
+ Label loop, done;
+ bind(&loop);
+
+ // Check whether we're done.
+ branchPtr(Assembler::Equal, elements, elementsEnd, &done);
+
+ // If not, advance to the next element and load it into floatTemp.
+ addPtr(Imm32(sizeof(Value)), elements);
+ ensureDouble(Address(elements, 0), floatTemp, fail);
+
+ // Update result if necessary.
+ if (isMax) {
+ maxDouble(floatTemp, result, /* handleNaN = */ true);
+ } else {
+ minDouble(floatTemp, result, /* handleNaN = */ true);
+ }
+ jump(&loop);
+
+ // With no arguments, min/max return +Infinity/-Infinity respectively.
+ bind(&isEmpty);
+ if (isMax) {
+ loadConstantDouble(mozilla::NegativeInfinity<double>(), result);
+ } else {
+ loadConstantDouble(mozilla::PositiveInfinity<double>(), result);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(Register proto,
+ Register temp,
+ Label* fail) {
+ loadJSContext(temp);
+ loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
+ size_t offset = Realm::offsetOfRegExps() +
+ RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
+ loadPtr(Address(temp, offset), temp);
+ branchTestObjShapeUnsafe(Assembler::NotEqual, proto, temp, fail);
+}
+
+void MacroAssembler::branchIfNotRegExpInstanceOptimizable(Register regexp,
+ Register temp,
+ Label* label) {
+ loadJSContext(temp);
+ loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
+ size_t offset = Realm::offsetOfRegExps() +
+ RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
+ loadPtr(Address(temp, offset), temp);
+ branchTestObjShapeUnsafe(Assembler::NotEqual, regexp, temp, label);
+}
+
+void MacroAssembler::loadRegExpLastIndex(Register regexp, Register string,
+ Register lastIndex,
+ Label* notFoundZeroLastIndex) {
+ Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
+ Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
+ Address stringLength(string, JSString::offsetOfLength());
+
+ Label notGlobalOrSticky, loadedLastIndex;
+
+ branchTest32(Assembler::Zero, flagsSlot,
+ Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
+ &notGlobalOrSticky);
+ {
+ // It's a global or sticky regular expression. Emit the following code:
+ //
+ // lastIndex = regexp.lastIndex
+ // if lastIndex > string.length:
+ // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
+ //
+ // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
+ // treat this as a not-found result.
+ //
+ // See steps 5-8 in js::RegExpBuiltinExec.
+ //
+ // Earlier guards must have ensured regexp.lastIndex is a non-negative
+ // integer.
+#ifdef DEBUG
+ {
+ Label ok;
+ branchTestInt32(Assembler::Equal, lastIndexSlot, &ok);
+ assumeUnreachable("Expected int32 value for lastIndex");
+ bind(&ok);
+ }
+#endif
+ unboxInt32(lastIndexSlot, lastIndex);
+#ifdef DEBUG
+ {
+ Label ok;
+ branchTest32(Assembler::NotSigned, lastIndex, lastIndex, &ok);
+ assumeUnreachable("Expected non-negative lastIndex");
+ bind(&ok);
+ }
+#endif
+ branch32(Assembler::Below, stringLength, lastIndex, notFoundZeroLastIndex);
+ jump(&loadedLastIndex);
+ }
+
+ bind(&notGlobalOrSticky);
+ move32(Imm32(0), lastIndex);
+
+ bind(&loadedLastIndex);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadFunctionLength(Register func,
+ Register funFlagsAndArgCount,
+ Register output, Label* slowPath) {
+#ifdef DEBUG
+ {
+ // These flags should already have been checked by caller.
+ Label ok;
+ uint32_t FlagsToCheck =
+ FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH;
+ branchTest32(Assembler::Zero, funFlagsAndArgCount, Imm32(FlagsToCheck),
+ &ok);
+ assumeUnreachable("The function flags should already have been checked.");
+ bind(&ok);
+ }
+#endif // DEBUG
+
+ // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
+
+ // Load the target function's length.
+ Label isInterpreted, lengthLoaded;
+ branchTest32(Assembler::NonZero, funFlagsAndArgCount,
+ Imm32(FunctionFlags::BASESCRIPT), &isInterpreted);
+ {
+ // The length property of a native function stored with the flags.
+ move32(funFlagsAndArgCount, output);
+ rshift32(Imm32(JSFunction::ArgCountShift), output);
+ jump(&lengthLoaded);
+ }
+ bind(&isInterpreted);
+ {
+ // Load the length property of an interpreted function.
+ loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), output);
+ loadPtr(Address(output, JSScript::offsetOfSharedData()), output);
+ branchTestPtr(Assembler::Zero, output, output, slowPath);
+ loadPtr(Address(output, SharedImmutableScriptData::offsetOfISD()), output);
+ load16ZeroExtend(Address(output, ImmutableScriptData::offsetOfFunLength()),
+ output);
+ }
+ bind(&lengthLoaded);
+}
+
+void MacroAssembler::loadFunctionName(Register func, Register output,
+ ImmGCPtr emptyString, Label* slowPath) {
+ MOZ_ASSERT(func != output);
+
+ // Get the JSFunction flags.
+ load32(Address(func, JSFunction::offsetOfFlagsAndArgCount()), output);
+
+ // If the name was previously resolved, the name property may be shadowed.
+ branchTest32(Assembler::NonZero, output, Imm32(FunctionFlags::RESOLVED_NAME),
+ slowPath);
+
+ Label noName, done;
+ branchTest32(Assembler::NonZero, output,
+ Imm32(FunctionFlags::HAS_GUESSED_ATOM), &noName);
+
+ Address atomAddr(func, JSFunction::offsetOfAtom());
+ branchTestUndefined(Assembler::Equal, atomAddr, &noName);
+ unboxString(atomAddr, output);
+ jump(&done);
+
+ {
+ bind(&noName);
+
+ // An absent name property defaults to the empty string.
+ movePtr(emptyString, output);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::assertFunctionIsExtended(Register func) {
+#ifdef DEBUG
+ Label extended;
+ branchTestFunctionFlags(func, FunctionFlags::EXTENDED, Assembler::NonZero,
+ &extended);
+ assumeUnreachable("Function is not extended");
+ bind(&extended);
+#endif
+}
+
+void MacroAssembler::branchTestType(Condition cond, Register tag,
+ JSValueType type, Label* label) {
+ switch (type) {
+ case JSVAL_TYPE_DOUBLE:
+ branchTestDouble(cond, tag, label);
+ break;
+ case JSVAL_TYPE_INT32:
+ branchTestInt32(cond, tag, label);
+ break;
+ case JSVAL_TYPE_BOOLEAN:
+ branchTestBoolean(cond, tag, label);
+ break;
+ case JSVAL_TYPE_UNDEFINED:
+ branchTestUndefined(cond, tag, label);
+ break;
+ case JSVAL_TYPE_NULL:
+ branchTestNull(cond, tag, label);
+ break;
+ case JSVAL_TYPE_MAGIC:
+ branchTestMagic(cond, tag, label);
+ break;
+ case JSVAL_TYPE_STRING:
+ branchTestString(cond, tag, label);
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ branchTestSymbol(cond, tag, label);
+ break;
+ case JSVAL_TYPE_BIGINT:
+ branchTestBigInt(cond, tag, label);
+ break;
+ case JSVAL_TYPE_OBJECT:
+ branchTestObject(cond, tag, label);
+ break;
+ default:
+ MOZ_CRASH("Unexpected value type");
+ }
+}
+
+void MacroAssembler::branchTestObjShapeList(
+ Condition cond, Register obj, Register shapeElements, Register shapeScratch,
+ Register endScratch, Register spectreScratch, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ bool needSpectreMitigations = spectreScratch != InvalidReg;
+
+ Label done;
+ Label* onMatch = cond == Assembler::Equal ? label : &done;
+
+ // Load the object's shape pointer into shapeScratch, and prepare to compare
+ // it with the shapes in the list. On 64-bit, we box the shape. On 32-bit,
+ // we only have to compare the 32-bit payload.
+#ifdef JS_PUNBOX64
+ loadPtr(Address(obj, JSObject::offsetOfShape()), endScratch);
+ tagValue(JSVAL_TYPE_PRIVATE_GCTHING, endScratch, ValueOperand(shapeScratch));
+#else
+ loadPtr(Address(obj, JSObject::offsetOfShape()), shapeScratch);
+#endif
+
+ // Compute end pointer.
+ Address lengthAddr(shapeElements,
+ ObjectElements::offsetOfInitializedLength());
+ load32(lengthAddr, endScratch);
+ BaseObjectElementIndex endPtrAddr(shapeElements, endScratch);
+ computeEffectiveAddress(endPtrAddr, endScratch);
+
+ Label loop;
+ bind(&loop);
+
+ // Compare the object's shape with a shape from the list. Note that on 64-bit
+ // this includes the tag bits, but on 32-bit we only compare the low word of
+ // the value. This is fine because the list of shapes is never exposed and the
+ // tag is guaranteed to be PrivateGCThing.
+ if (needSpectreMitigations) {
+ move32(Imm32(0), spectreScratch);
+ }
+ branchPtr(Assembler::Equal, Address(shapeElements, 0), shapeScratch, onMatch);
+ if (needSpectreMitigations) {
+ spectreMovePtr(Assembler::Equal, spectreScratch, obj);
+ }
+
+ // Advance to next shape and loop if not finished.
+ addPtr(Imm32(sizeof(Value)), shapeElements);
+ branchPtr(Assembler::Below, shapeElements, endScratch, &loop);
+
+ if (cond == Assembler::NotEqual) {
+ jump(label);
+ bind(&done);
+ }
+}
+
+void MacroAssembler::branchTestObjCompartment(Condition cond, Register obj,
+ const Address& compartment,
+ Register scratch, Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
+ loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
+ branchPtr(cond, compartment, scratch, label);
+}
+
+void MacroAssembler::branchTestObjCompartment(
+ Condition cond, Register obj, const JS::Compartment* compartment,
+ Register scratch, Label* label) {
+ MOZ_ASSERT(obj != scratch);
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
+ loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
+ loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
+ branchPtr(cond, scratch, ImmPtr(compartment), label);
+}
+
+void MacroAssembler::branchIfNonNativeObj(Register obj, Register scratch,
+ Label* label) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ branchTest32(Assembler::Zero,
+ Address(scratch, Shape::offsetOfImmutableFlags()),
+ Imm32(Shape::isNativeBit()), label);
+}
+
+void MacroAssembler::branchIfObjectNotExtensible(Register obj, Register scratch,
+ Label* label) {
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+
+ // Spectre-style checks are not needed here because we do not interpret data
+ // based on this check.
+ static_assert(sizeof(ObjectFlags) == sizeof(uint16_t));
+ load16ZeroExtend(Address(scratch, Shape::offsetOfObjectFlags()), scratch);
+ branchTest32(Assembler::NonZero, scratch,
+ Imm32(uint32_t(ObjectFlag::NotExtensible)), label);
+}
+
+void MacroAssembler::wasmTrap(wasm::Trap trap,
+ wasm::BytecodeOffset bytecodeOffset) {
+ uint32_t trapOffset = wasmTrapInstruction().offset();
+ MOZ_ASSERT_IF(!oom(),
+ currentOffset() - trapOffset == WasmTrapInstructionLength);
+
+ append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
+}
+
+std::pair<CodeOffset, uint32_t> MacroAssembler::wasmReserveStackChecked(
+ uint32_t amount, wasm::BytecodeOffset trapOffset) {
+ if (amount > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
+ // The frame is large. Don't bump sp until after the stack limit check so
+ // that the trap handler isn't called with a wild sp.
+ Label ok;
+ Register scratch = ABINonArgReg0;
+ moveStackPtrTo(scratch);
+
+ Label trap;
+ branchPtr(Assembler::Below, scratch, Imm32(amount), &trap);
+ subPtr(Imm32(amount), scratch);
+ branchPtr(Assembler::Below,
+ Address(InstanceReg, wasm::Instance::offsetOfStackLimit()),
+ scratch, &ok);
+
+ bind(&trap);
+ wasmTrap(wasm::Trap::StackOverflow, trapOffset);
+ CodeOffset trapInsnOffset = CodeOffset(currentOffset());
+
+ bind(&ok);
+ reserveStack(amount);
+ return std::pair<CodeOffset, uint32_t>(trapInsnOffset, 0);
+ }
+
+ reserveStack(amount);
+ Label ok;
+ branchStackPtrRhs(Assembler::Below,
+ Address(InstanceReg, wasm::Instance::offsetOfStackLimit()),
+ &ok);
+ wasmTrap(wasm::Trap::StackOverflow, trapOffset);
+ CodeOffset trapInsnOffset = CodeOffset(currentOffset());
+ bind(&ok);
+ return std::pair<CodeOffset, uint32_t>(trapInsnOffset, amount);
+}
+
+CodeOffset MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee) {
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
+
+ // Load the callee, before the caller's registers are clobbered.
+ uint32_t instanceDataOffset = callee.importInstanceDataOffset();
+ loadPtr(
+ Address(InstanceReg, wasm::Instance::offsetInData(
+ instanceDataOffset +
+ offsetof(wasm::FuncImportInstanceData, code))),
+ ABINonArgReg0);
+
+#if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
+ static_assert(ABINonArgReg0 != InstanceReg, "by constraint");
+#endif
+
+ // Switch to the callee's realm.
+ loadPtr(
+ Address(InstanceReg, wasm::Instance::offsetInData(
+ instanceDataOffset +
+ offsetof(wasm::FuncImportInstanceData, realm))),
+ ABINonArgReg1);
+ loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), ABINonArgReg2);
+ storePtr(ABINonArgReg1, Address(ABINonArgReg2, JSContext::offsetOfRealm()));
+
+ // Switch to the callee's instance and pinned registers and make the call.
+ loadPtr(Address(InstanceReg,
+ wasm::Instance::offsetInData(
+ instanceDataOffset +
+ offsetof(wasm::FuncImportInstanceData, instance))),
+ InstanceReg);
+
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
+ loadWasmPinnedRegsFromInstance();
+
+ return call(desc, ABINonArgReg0);
+}
+
+CodeOffset MacroAssembler::wasmCallBuiltinInstanceMethod(
+ const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
+ wasm::SymbolicAddress builtin, wasm::FailureMode failureMode) {
+ MOZ_ASSERT(instanceArg != ABIArg());
+
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
+
+ if (instanceArg.kind() == ABIArg::GPR) {
+ movePtr(InstanceReg, instanceArg.gpr());
+ } else if (instanceArg.kind() == ABIArg::Stack) {
+ storePtr(InstanceReg,
+ Address(getStackPointer(), instanceArg.offsetFromArgBase()));
+ } else {
+ MOZ_CRASH("Unknown abi passing style for pointer");
+ }
+
+ CodeOffset ret = call(desc, builtin);
+
+ if (failureMode != wasm::FailureMode::Infallible) {
+ Label noTrap;
+ switch (failureMode) {
+ case wasm::FailureMode::Infallible:
+ MOZ_CRASH();
+ case wasm::FailureMode::FailOnNegI32:
+ branchTest32(Assembler::NotSigned, ReturnReg, ReturnReg, &noTrap);
+ break;
+ case wasm::FailureMode::FailOnNullPtr:
+ branchTestPtr(Assembler::NonZero, ReturnReg, ReturnReg, &noTrap);
+ break;
+ case wasm::FailureMode::FailOnInvalidRef:
+ branchPtr(Assembler::NotEqual, ReturnReg,
+ ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
+ &noTrap);
+ break;
+ }
+ wasmTrap(wasm::Trap::ThrowReported,
+ wasm::BytecodeOffset(desc.lineOrBytecode()));
+ bind(&noTrap);
+ }
+
+ return ret;
+}
+
+CodeOffset MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee) {
+ MOZ_ASSERT(callee.which() == wasm::CalleeDesc::AsmJSTable);
+
+ const Register scratch = WasmTableCallScratchReg0;
+ const Register index = WasmTableCallIndexReg;
+
+ // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
+ // it is at present, we can probably generate better code here by folding
+ // the address computation into the load.
+
+ static_assert(sizeof(wasm::FunctionTableElem) == 8 ||
+ sizeof(wasm::FunctionTableElem) == 16,
+ "elements of function tables are two words");
+
+ // asm.js tables require no signature check, and have had their index
+ // masked into range and thus need no bounds check.
+ loadPtr(
+ Address(InstanceReg, wasm::Instance::offsetInData(
+ callee.tableFunctionBaseInstanceDataOffset())),
+ scratch);
+ if (sizeof(wasm::FunctionTableElem) == 8) {
+ computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
+ } else {
+ lshift32(Imm32(4), index);
+ addPtr(index, scratch);
+ }
+ loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
+ return call(desc, scratch);
+}
+
+// In principle, call_indirect requires an expensive context switch to the
+// callee's instance and realm before the call and an almost equally expensive
+// switch back to the caller's ditto after. However, if the caller's instance
+// is the same as the callee's instance then no context switch is required, and
+// it only takes a compare-and-branch at run-time to test this - all values are
+// in registers already. We therefore generate two call paths, one for the fast
+// call without the context switch (which additionally avoids a null check) and
+// one for the slow call with the context switch.
+
+void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ Label* boundsCheckFailedLabel,
+ Label* nullCheckFailedLabel,
+ mozilla::Maybe<uint32_t> tableSize,
+ CodeOffset* fastCallOffset,
+ CodeOffset* slowCallOffset) {
+ static_assert(sizeof(wasm::FunctionTableElem) == 2 * sizeof(void*),
+ "Exactly two pointers or index scaling won't work correctly");
+ MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
+
+ const int shift = sizeof(wasm::FunctionTableElem) == 8 ? 3 : 4;
+ wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
+ const Register calleeScratch = WasmTableCallScratchReg0;
+ const Register index = WasmTableCallIndexReg;
+
+ // Check the table index and throw if out-of-bounds.
+ //
+ // Frequently the table size is known, so optimize for that. Otherwise
+ // compare with a memory operand when that's possible. (There's little sense
+ // in hoisting the load of the bound into a register at a higher level and
+ // reusing that register, because a hoisted value would either have to be
+ // spilled and re-loaded before the next call_indirect, or would be abandoned
+ // because we could not trust that a hoisted value would not have changed.)
+
+ if (boundsCheckFailedLabel) {
+ if (tableSize.isSome()) {
+ branch32(Assembler::Condition::AboveOrEqual, index, Imm32(*tableSize),
+ boundsCheckFailedLabel);
+ } else {
+ branch32(
+ Assembler::Condition::BelowOrEqual,
+ Address(InstanceReg, wasm::Instance::offsetInData(
+ callee.tableLengthInstanceDataOffset())),
+ index, boundsCheckFailedLabel);
+ }
+ }
+
+ // Write the functype-id into the ABI functype-id register.
+
+ const wasm::CallIndirectId callIndirectId = callee.wasmTableSigId();
+ switch (callIndirectId.kind()) {
+ case wasm::CallIndirectIdKind::Global:
+ loadPtr(Address(InstanceReg, wasm::Instance::offsetInData(
+ callIndirectId.instanceDataOffset())),
+ WasmTableCallSigReg);
+ break;
+ case wasm::CallIndirectIdKind::Immediate:
+ move32(Imm32(callIndirectId.immediate()), WasmTableCallSigReg);
+ break;
+ case wasm::CallIndirectIdKind::AsmJS:
+ case wasm::CallIndirectIdKind::None:
+ break;
+ }
+
+ // Load the base pointer of the table and compute the address of the callee in
+ // the table.
+
+ loadPtr(
+ Address(InstanceReg, wasm::Instance::offsetInData(
+ callee.tableFunctionBaseInstanceDataOffset())),
+ calleeScratch);
+ shiftIndex32AndAdd(index, shift, calleeScratch);
+
+ // Load the callee instance and decide whether to take the fast path or the
+ // slow path.
+
+ Label fastCall;
+ Label done;
+ const Register newInstanceTemp = WasmTableCallScratchReg1;
+ loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, instance)),
+ newInstanceTemp);
+ branchPtr(Assembler::Equal, InstanceReg, newInstanceTemp, &fastCall);
+
+ // Slow path: Save context, check for null, setup new context, call, restore
+ // context.
+ //
+ // TODO: The slow path could usefully be out-of-line and the test above would
+ // just fall through to the fast path. This keeps the fast-path code dense,
+ // and has correct static prediction for the branch (forward conditional
+ // branches predicted not taken, normally).
+
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
+ movePtr(newInstanceTemp, InstanceReg);
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
+
+#ifdef WASM_HAS_HEAPREG
+ // Use the null pointer exception resulting from loading HeapReg from a null
+ // instance to handle a call to a null slot.
+ MOZ_ASSERT(nullCheckFailedLabel == nullptr);
+ loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset));
+#else
+ MOZ_ASSERT(nullCheckFailedLabel != nullptr);
+ branchTestPtr(Assembler::Zero, InstanceReg, InstanceReg,
+ nullCheckFailedLabel);
+
+ loadWasmPinnedRegsFromInstance();
+#endif
+ switchToWasmInstanceRealm(index, WasmTableCallScratchReg1);
+
+ loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, code)),
+ calleeScratch);
+
+ *slowCallOffset = call(desc, calleeScratch);
+
+ // Restore registers and realm and join up with the fast path.
+
+ loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
+ InstanceReg);
+ loadWasmPinnedRegsFromInstance();
+ switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+ jump(&done);
+
+ // Fast path: just load the code pointer and go. The instance and heap
+ // register are the same as in the caller, and nothing will be null.
+ //
+ // (In particular, the code pointer will not be null: if it were, the instance
+ // would have been null, and then it would not have been equivalent to our
+ // current instance. So no null check is needed on the fast path.)
+
+ bind(&fastCall);
+
+ loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, code)),
+ calleeScratch);
+
+ // We use a different type of call site for the fast call since the instance
+ // slots in the frame do not have valid values.
+
+ wasm::CallSiteDesc newDesc(desc.lineOrBytecode(),
+ wasm::CallSiteDesc::IndirectFast);
+ *fastCallOffset = call(newDesc, calleeScratch);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ CodeOffset* fastCallOffset,
+ CodeOffset* slowCallOffset) {
+ MOZ_ASSERT(callee.which() == wasm::CalleeDesc::FuncRef);
+ const Register calleeScratch = WasmCallRefCallScratchReg0;
+ const Register calleeFnObj = WasmCallRefReg;
+
+ // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
+ // whether to take the fast path or the slow path. Register this load
+ // instruction to be source of a trap -- null pointer check.
+
+ Label fastCall;
+ Label done;
+ const Register newInstanceTemp = WasmCallRefCallScratchReg1;
+ size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_INSTANCE_SLOT);
+ static_assert(FunctionExtended::WASM_INSTANCE_SLOT < wasm::NullPtrGuardSize);
+ wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
+ append(wasm::Trap::NullPointerDereference,
+ wasm::TrapSite(currentOffset(), trapOffset));
+ loadPtr(Address(calleeFnObj, instanceSlotOffset), newInstanceTemp);
+ branchPtr(Assembler::Equal, InstanceReg, newInstanceTemp, &fastCall);
+
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
+ movePtr(newInstanceTemp, InstanceReg);
+ storePtr(InstanceReg,
+ Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
+
+ loadWasmPinnedRegsFromInstance();
+ switchToWasmInstanceRealm(WasmCallRefCallScratchReg0,
+ WasmCallRefCallScratchReg1);
+
+ // Get funcUncheckedCallEntry() from the function's
+ // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
+ size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
+ loadPtr(Address(calleeFnObj, uncheckedEntrySlotOffset), calleeScratch);
+
+ *slowCallOffset = call(desc, calleeScratch);
+
+ // Restore registers and realm and back to this caller's.
+ loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
+ InstanceReg);
+ loadWasmPinnedRegsFromInstance();
+ switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+ jump(&done);
+
+ // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
+ // The instance and pinned registers are the same as in the caller.
+
+ bind(&fastCall);
+
+ loadPtr(Address(calleeFnObj, uncheckedEntrySlotOffset), calleeScratch);
+
+ // We use a different type of call site for the fast call since the instance
+ // slots in the frame do not have valid values.
+
+ wasm::CallSiteDesc newDesc(desc.lineOrBytecode(),
+ wasm::CallSiteDesc::FuncRefFast);
+ *fastCallOffset = call(newDesc, calleeScratch);
+
+ bind(&done);
+}
+
+bool MacroAssembler::needScratch1ForBranchWasmGcRefType(wasm::RefType type) {
+ MOZ_ASSERT(type.isValid());
+ MOZ_ASSERT(type.isAnyHierarchy());
+ return !type.isNone() && !type.isAny();
+}
+
+bool MacroAssembler::needScratch2ForBranchWasmGcRefType(wasm::RefType type) {
+ MOZ_ASSERT(type.isValid());
+ MOZ_ASSERT(type.isAnyHierarchy());
+ return type.isTypeRef() &&
+ type.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength;
+}
+
+bool MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
+ wasm::RefType type) {
+ return type.isTypeRef();
+}
+
+void MacroAssembler::branchWasmGcObjectIsRefType(
+ Register object, wasm::RefType sourceType, wasm::RefType destType,
+ Label* label, bool onSuccess, Register superSuperTypeVector,
+ Register scratch1, Register scratch2) {
+ MOZ_ASSERT(sourceType.isValid());
+ MOZ_ASSERT(destType.isValid());
+ MOZ_ASSERT(sourceType.isAnyHierarchy());
+ MOZ_ASSERT(destType.isAnyHierarchy());
+ MOZ_ASSERT_IF(needScratch1ForBranchWasmGcRefType(destType),
+ scratch1 != Register::Invalid());
+ MOZ_ASSERT_IF(needScratch2ForBranchWasmGcRefType(destType),
+ scratch2 != Register::Invalid());
+ MOZ_ASSERT_IF(needSuperSuperTypeVectorForBranchWasmGcRefType(destType),
+ superSuperTypeVector != Register::Invalid());
+
+ Label fallthrough;
+ Label* successLabel = onSuccess ? label : &fallthrough;
+ Label* failLabel = onSuccess ? &fallthrough : label;
+ Label* nullLabel = destType.isNullable() ? successLabel : failLabel;
+
+ // Check for null.
+ if (sourceType.isNullable()) {
+ branchTestPtr(Assembler::Zero, object, object, nullLabel);
+ }
+
+ // The only value that can inhabit 'none' is null. So, early out if we got
+ // not-null.
+ if (destType.isNone()) {
+ jump(failLabel);
+ bind(&fallthrough);
+ return;
+ }
+
+ if (destType.isAny()) {
+ // No further checks for 'any'
+ jump(successLabel);
+ bind(&fallthrough);
+ return;
+ }
+
+ // 'type' is now 'eq' or lower, which currently will always be a gc object.
+ // Test for non-gc objects.
+ MOZ_ASSERT(scratch1 != Register::Invalid());
+ if (!wasm::RefType::isSubTypeOf(sourceType, wasm::RefType::eq())) {
+ branchTestObjectIsWasmGcObject(false, object, scratch1, failLabel);
+ }
+
+ if (destType.isEq()) {
+ // No further checks for 'eq'
+ jump(successLabel);
+ bind(&fallthrough);
+ return;
+ }
+
+ // 'type' is now 'struct', 'array', or a concrete type. (Bottom types were
+ // handled above.)
+ //
+ // Casting to a concrete type only requires a simple check on the
+ // object's superTypeVector. Casting to an abstract type (struct, array)
+ // requires loading the object's superTypeVector->typeDef->kind, and checking
+ // that it is correct.
+
+ loadPtr(Address(object, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
+ scratch1);
+ if (destType.isTypeRef()) {
+ // concrete type, do superTypeVector check
+ branchWasmSuperTypeVectorIsSubtype(scratch1, superSuperTypeVector, scratch2,
+ destType.typeDef()->subTypingDepth(),
+ successLabel, true);
+ } else {
+ // abstract type, do kind check
+ loadPtr(Address(scratch1,
+ int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
+ scratch1);
+ load8ZeroExtend(Address(scratch1, int32_t(wasm::TypeDef::offsetOfKind())),
+ scratch1);
+ branch32(Assembler::Equal, scratch1, Imm32(int32_t(destType.typeDefKind())),
+ successLabel);
+ }
+
+ // The cast failed.
+ jump(failLabel);
+ bind(&fallthrough);
+}
+
+void MacroAssembler::branchWasmSuperTypeVectorIsSubtype(
+ Register subSuperTypeVector, Register superSuperTypeVector,
+ Register scratch, uint32_t superTypeDepth, Label* label, bool onSuccess) {
+ MOZ_ASSERT_IF(superTypeDepth >= wasm::MinSuperTypeVectorLength,
+ scratch != Register::Invalid());
+
+ // We generate just different enough code for 'is' subtype vs 'is not'
+ // subtype that we handle them separately.
+ if (onSuccess) {
+ Label failed;
+
+ // At this point, we could generate a fast success check which jumps to
+ // `label` if `subSuperTypeVector == superSuperTypeVector`. However,
+ // profiling of Barista-3 seems to show this is hardly worth anything,
+ // whereas it is worth us generating smaller code and in particular one
+ // fewer conditional branch. So it is omitted:
+ //
+ // branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
+ // label);
+
+ // Emit a bounds check if the super type depth may be out-of-bounds.
+ if (superTypeDepth >= wasm::MinSuperTypeVectorLength) {
+ // Slowest path for having a bounds check of the super type vector
+ load32(
+ Address(subSuperTypeVector, wasm::SuperTypeVector::offsetOfLength()),
+ scratch);
+ branch32(Assembler::LessThanOrEqual, scratch, Imm32(superTypeDepth),
+ &failed);
+ }
+
+ // Load the `superTypeDepth` entry from subSuperTypeVector. This
+ // will be `superSuperTypeVector` if `subSuperTypeVector` is indeed a
+ // subtype.
+ loadPtr(
+ Address(subSuperTypeVector,
+ wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth)),
+ subSuperTypeVector);
+ branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
+ label);
+
+ // Fallthrough to the failed case
+ bind(&failed);
+ return;
+ }
+
+ // Emit a bounds check if the super type depth may be out-of-bounds.
+ if (superTypeDepth >= wasm::MinSuperTypeVectorLength) {
+ load32(Address(subSuperTypeVector, wasm::SuperTypeVector::offsetOfLength()),
+ scratch);
+ branch32(Assembler::LessThanOrEqual, scratch, Imm32(superTypeDepth), label);
+ }
+
+ // Load the `superTypeDepth` entry from subSuperTypeVector. This will be
+ // `superSuperTypeVector` if `subSuperTypeVector` is indeed a subtype.
+ loadPtr(
+ Address(subSuperTypeVector,
+ wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth)),
+ subSuperTypeVector);
+ branchPtr(Assembler::NotEqual, subSuperTypeVector, superSuperTypeVector,
+ label);
+ // Fallthrough to the success case
+}
+
+void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc) {
+ CodeOffset offset = nopPatchableToCall();
+ append(desc, offset);
+}
+
+void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
+ Register temp1, Register temp2,
+ Register temp3, Label* noBarrier) {
+ MOZ_ASSERT(temp1 != PreBarrierReg);
+ MOZ_ASSERT(temp2 != PreBarrierReg);
+ MOZ_ASSERT(temp3 != PreBarrierReg);
+
+ // Load the GC thing in temp1.
+ if (type == MIRType::Value) {
+ unboxGCThingForGCBarrier(Address(PreBarrierReg, 0), temp1);
+ } else {
+ MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
+ type == MIRType::Shape);
+ loadPtr(Address(PreBarrierReg, 0), temp1);
+ }
+
+#ifdef DEBUG
+ // The caller should have checked for null pointers.
+ Label nonZero;
+ branchTestPtr(Assembler::NonZero, temp1, temp1, &nonZero);
+ assumeUnreachable("JIT pre-barrier: unexpected nullptr");
+ bind(&nonZero);
+#endif
+
+ // Load the chunk address in temp2.
+ movePtr(temp1, temp2);
+ andPtr(Imm32(int32_t(~gc::ChunkMask)), temp2);
+
+ // If the GC thing is in the nursery, we don't need to barrier it.
+ if (type == MIRType::Value || type == MIRType::Object ||
+ type == MIRType::String) {
+ branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkStoreBufferOffset),
+ ImmWord(0), noBarrier);
+ } else {
+#ifdef DEBUG
+ Label isTenured;
+ branchPtr(Assembler::Equal, Address(temp2, gc::ChunkStoreBufferOffset),
+ ImmWord(0), &isTenured);
+ assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
+ bind(&isTenured);
+#endif
+ }
+
+ // Determine the bit index and store in temp1.
+ //
+ // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
+ // static_cast<uint32_t>(colorBit);
+ static_assert(gc::CellBytesPerMarkBit == 8,
+ "Calculation below relies on this");
+ static_assert(size_t(gc::ColorBit::BlackBit) == 0,
+ "Calculation below relies on this");
+ andPtr(Imm32(gc::ChunkMask), temp1);
+ rshiftPtr(Imm32(3), temp1);
+
+ static_assert(gc::MarkBitmapWordBits == JS_BITS_PER_WORD,
+ "Calculation below relies on this");
+
+ // Load the bitmap word in temp2.
+ //
+ // word = chunk.bitmap[bit / MarkBitmapWordBits];
+
+ // Fold the adjustment for the fact that arenas don't start at the beginning
+ // of the chunk into the offset to the chunk bitmap.
+ const size_t firstArenaAdjustment = gc::FirstArenaAdjustmentBits / CHAR_BIT;
+ const intptr_t offset =
+ intptr_t(gc::ChunkMarkBitmapOffset) - intptr_t(firstArenaAdjustment);
+
+ movePtr(temp1, temp3);
+#if JS_BITS_PER_WORD == 64
+ rshiftPtr(Imm32(6), temp1);
+ loadPtr(BaseIndex(temp2, temp1, TimesEight, offset), temp2);
+#else
+ rshiftPtr(Imm32(5), temp1);
+ loadPtr(BaseIndex(temp2, temp1, TimesFour, offset), temp2);
+#endif
+
+ // Load the mask in temp1.
+ //
+ // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
+ andPtr(Imm32(gc::MarkBitmapWordBits - 1), temp3);
+ move32(Imm32(1), temp1);
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT(temp3 == rcx);
+ shlq_cl(temp1);
+#elif JS_CODEGEN_X86
+ MOZ_ASSERT(temp3 == ecx);
+ shll_cl(temp1);
+#elif JS_CODEGEN_ARM
+ ma_lsl(temp3, temp1, temp1);
+#elif JS_CODEGEN_ARM64
+ Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
+#elif JS_CODEGEN_MIPS32
+ ma_sll(temp1, temp1, temp3);
+#elif JS_CODEGEN_MIPS64
+ ma_dsll(temp1, temp1, temp3);
+#elif JS_CODEGEN_LOONG64
+ as_sll_d(temp1, temp1, temp3);
+#elif JS_CODEGEN_RISCV64
+ sll(temp1, temp1, temp3);
+#elif JS_CODEGEN_WASM32
+ MOZ_CRASH();
+#elif JS_CODEGEN_NONE
+ MOZ_CRASH();
+#else
+# error "Unknown architecture"
+#endif
+
+ // No barrier is needed if the bit is set, |word & mask != 0|.
+ branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+void MacroAssembler::atomicIsLockFreeJS(Register value, Register output) {
+ // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
+ static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
+ static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
+ static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
+ static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
+
+ Label done;
+ move32(Imm32(1), output);
+ branch32(Assembler::Equal, value, Imm32(8), &done);
+ branch32(Assembler::Equal, value, Imm32(4), &done);
+ branch32(Assembler::Equal, value, Imm32(2), &done);
+ branch32(Assembler::Equal, value, Imm32(1), &done);
+ move32(Imm32(0), output);
+ bind(&done);
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::spectreMaskIndex32(Register index, Register length,
+ Register output) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+ MOZ_ASSERT(length != output);
+ MOZ_ASSERT(index != output);
+
+ move32(Imm32(0), output);
+ cmp32Move32(Assembler::Below, index, length, index, output);
+}
+
+void MacroAssembler::spectreMaskIndex32(Register index, const Address& length,
+ Register output) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != output);
+ MOZ_ASSERT(index != output);
+
+ move32(Imm32(0), output);
+ cmp32Move32(Assembler::Below, index, length, index, output);
+}
+
+void MacroAssembler::spectreMaskIndexPtr(Register index, Register length,
+ Register output) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+ MOZ_ASSERT(length != output);
+ MOZ_ASSERT(index != output);
+
+ movePtr(ImmWord(0), output);
+ cmpPtrMovePtr(Assembler::Below, index, length, index, output);
+}
+
+void MacroAssembler::spectreMaskIndexPtr(Register index, const Address& length,
+ Register output) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != output);
+ MOZ_ASSERT(index != output);
+
+ movePtr(ImmWord(0), output);
+ cmpPtrMovePtr(Assembler::Below, index, length, index, output);
+}
+
+void MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length,
+ Label* failure) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(length));
+ branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
+
+ // Note: it's fine to clobber the input register, as this is a no-op: it
+ // only affects speculative execution.
+ if (JitOptions.spectreIndexMasking) {
+ and32(Imm32(length - 1), index);
+ }
+}
+
+void MacroAssembler::loadWasmPinnedRegsFromInstance(
+ mozilla::Maybe<wasm::BytecodeOffset> trapOffset) {
+#ifdef WASM_HAS_HEAPREG
+ static_assert(wasm::Instance::offsetOfMemoryBase() < 4096,
+ "We count only on the low page being inaccessible");
+ if (trapOffset) {
+ append(wasm::Trap::IndirectCallToNull,
+ wasm::TrapSite(currentOffset(), *trapOffset));
+ }
+ loadPtr(Address(InstanceReg, wasm::Instance::offsetOfMemoryBase()), HeapReg);
+#else
+ MOZ_ASSERT(!trapOffset);
+#endif
+}
+
+//}}} check_macroassembler_style
+
+#ifdef JS_64BIT
+void MacroAssembler::debugAssertCanonicalInt32(Register r) {
+# ifdef DEBUG
+ if (!js::jit::JitOptions.lessDebugCode) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+ Label ok;
+ branchPtr(Assembler::BelowOrEqual, r, ImmWord(UINT32_MAX), &ok);
+ breakpoint();
+ bind(&ok);
+# elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+ Label ok;
+ ScratchRegisterScope scratch(asMasm());
+ move32SignExtendToPtr(r, scratch);
+ branchPtr(Assembler::Equal, r, scratch, &ok);
+ breakpoint();
+ bind(&ok);
+# else
+ MOZ_CRASH("IMPLEMENT ME");
+# endif
+ }
+# endif
+}
+#endif
+
+void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
+ memoryBarrier(sync.barrierBefore);
+}
+
+void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
+ memoryBarrier(sync.barrierAfter);
+}
+
+void MacroAssembler::debugAssertIsObject(const ValueOperand& val) {
+#ifdef DEBUG
+ Label ok;
+ branchTestObject(Assembler::Equal, val, &ok);
+ assumeUnreachable("Expected an object!");
+ bind(&ok);
+#endif
+}
+
+void MacroAssembler::debugAssertObjHasFixedSlots(Register obj,
+ Register scratch) {
+#ifdef DEBUG
+ Label hasFixedSlots;
+ loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
+ branchTest32(Assembler::NonZero,
+ Address(scratch, Shape::offsetOfImmutableFlags()),
+ Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots);
+ assumeUnreachable("Expected a fixed slot");
+ bind(&hasFixedSlots);
+#endif
+}
+
+void MacroAssembler::debugAssertObjectHasClass(Register obj, Register scratch,
+ const JSClass* clasp) {
+#ifdef DEBUG
+ Label done;
+ branchTestObjClassNoSpectreMitigations(Assembler::Equal, obj, clasp, scratch,
+ &done);
+ assumeUnreachable("Class check failed");
+ bind(&done);
+#endif
+}
+
+void MacroAssembler::branchArrayIsNotPacked(Register array, Register temp1,
+ Register temp2, Label* label) {
+ loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
+
+ // Test length == initializedLength.
+ Address initLength(temp1, ObjectElements::offsetOfInitializedLength());
+ load32(Address(temp1, ObjectElements::offsetOfLength()), temp2);
+ branch32(Assembler::NotEqual, initLength, temp2, label);
+
+ // Test the NON_PACKED flag.
+ Address flags(temp1, ObjectElements::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::NON_PACKED),
+ label);
+}
+
+void MacroAssembler::setIsPackedArray(Register obj, Register output,
+ Register temp) {
+ // Ensure it's an ArrayObject.
+ Label notPackedArray;
+ branchTestObjClass(Assembler::NotEqual, obj, &ArrayObject::class_, temp, obj,
+ &notPackedArray);
+
+ branchArrayIsNotPacked(obj, temp, output, &notPackedArray);
+
+ Label done;
+ move32(Imm32(1), output);
+ jump(&done);
+
+ bind(&notPackedArray);
+ move32(Imm32(0), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::packedArrayPop(Register array, ValueOperand output,
+ Register temp1, Register temp2,
+ Label* fail) {
+ // Load obj->elements in temp1.
+ loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
+
+ // Check flags.
+ static constexpr uint32_t UnhandledFlags =
+ ObjectElements::Flags::NON_PACKED |
+ ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
+ ObjectElements::Flags::NOT_EXTENSIBLE |
+ ObjectElements::Flags::MAYBE_IN_ITERATION;
+ Address flags(temp1, ObjectElements::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
+
+ // Load length in temp2. Ensure length == initializedLength.
+ Address lengthAddr(temp1, ObjectElements::offsetOfLength());
+ Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
+ load32(lengthAddr, temp2);
+ branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
+
+ // Result is |undefined| if length == 0.
+ Label notEmpty, done;
+ branchTest32(Assembler::NonZero, temp2, temp2, &notEmpty);
+ {
+ moveValue(UndefinedValue(), output);
+ jump(&done);
+ }
+
+ bind(&notEmpty);
+
+ // Load the last element.
+ sub32(Imm32(1), temp2);
+ BaseObjectElementIndex elementAddr(temp1, temp2);
+ loadValue(elementAddr, output);
+
+ // Pre-barrier the element because we're removing it from the array.
+ EmitPreBarrier(*this, elementAddr, MIRType::Value);
+
+ // Update length and initializedLength.
+ store32(temp2, lengthAddr);
+ store32(temp2, initLengthAddr);
+
+ bind(&done);
+}
+
+void MacroAssembler::packedArrayShift(Register array, ValueOperand output,
+ Register temp1, Register temp2,
+ LiveRegisterSet volatileRegs,
+ Label* fail) {
+ // Load obj->elements in temp1.
+ loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
+
+ // Check flags.
+ static constexpr uint32_t UnhandledFlags =
+ ObjectElements::Flags::NON_PACKED |
+ ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
+ ObjectElements::Flags::NOT_EXTENSIBLE |
+ ObjectElements::Flags::MAYBE_IN_ITERATION;
+ Address flags(temp1, ObjectElements::offsetOfFlags());
+ branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
+
+ // Load length in temp2. Ensure length == initializedLength.
+ Address lengthAddr(temp1, ObjectElements::offsetOfLength());
+ Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
+ load32(lengthAddr, temp2);
+ branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
+
+ // Result is |undefined| if length == 0.
+ Label notEmpty, done;
+ branchTest32(Assembler::NonZero, temp2, temp2, &notEmpty);
+ {
+ moveValue(UndefinedValue(), output);
+ jump(&done);
+ }
+
+ bind(&notEmpty);
+
+ // Load the first element.
+ Address elementAddr(temp1, 0);
+ loadValue(elementAddr, output);
+
+ // Move the other elements and update the initializedLength/length. This will
+ // also trigger pre-barriers.
+ {
+ // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
+ volatileRegs.takeUnchecked(temp1);
+ volatileRegs.takeUnchecked(temp2);
+ if (output.hasVolatileReg()) {
+ volatileRegs.addUnchecked(output);
+ }
+
+ PushRegsInMask(volatileRegs);
+
+ using Fn = void (*)(ArrayObject* arr);
+ setupUnalignedABICall(temp1);
+ passABIArg(array);
+ callWithABI<Fn, ArrayShiftMoveElements>();
+
+ PopRegsInMask(volatileRegs);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::loadArgumentsObjectElement(Register obj, Register index,
+ ValueOperand output,
+ Register temp, Label* fail) {
+ Register temp2 = output.scratchReg();
+
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
+
+ // Ensure no overridden elements.
+ branchTest32(Assembler::NonZero, temp,
+ Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
+
+ // Bounds check.
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
+ spectreBoundsCheck32(index, temp, temp2, fail);
+
+ // Load ArgumentsData.
+ loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), temp);
+
+ // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
+ BaseValueIndex argValue(temp, index, ArgumentsData::offsetOfArgs());
+ branchTestMagic(Assembler::Equal, argValue, fail);
+ loadValue(argValue, output);
+}
+
+void MacroAssembler::loadArgumentsObjectElementHole(Register obj,
+ Register index,
+ ValueOperand output,
+ Register temp,
+ Label* fail) {
+ Register temp2 = output.scratchReg();
+
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
+
+ // Ensure no overridden elements.
+ branchTest32(Assembler::NonZero, temp,
+ Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
+
+ // Bounds check.
+ Label outOfBounds, done;
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
+ spectreBoundsCheck32(index, temp, temp2, &outOfBounds);
+
+ // Load ArgumentsData.
+ loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), temp);
+
+ // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
+ BaseValueIndex argValue(temp, index, ArgumentsData::offsetOfArgs());
+ branchTestMagic(Assembler::Equal, argValue, fail);
+ loadValue(argValue, output);
+ jump(&done);
+
+ bind(&outOfBounds);
+ branch32(Assembler::LessThan, index, Imm32(0), fail);
+ moveValue(UndefinedValue(), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::loadArgumentsObjectElementExists(
+ Register obj, Register index, Register output, Register temp, Label* fail) {
+ // Ensure the index is non-negative.
+ branch32(Assembler::LessThan, index, Imm32(0), fail);
+
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
+
+ // Ensure no overridden or deleted elements.
+ branchTest32(Assembler::NonZero, temp,
+ Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
+
+ // Compare index against the length.
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
+ cmp32Set(Assembler::LessThan, index, temp, output);
+}
+
+void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output,
+ Label* fail) {
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
+ output);
+
+ // Test if length has been overridden.
+ branchTest32(Assembler::NonZero, output,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), fail);
+
+ // Shift out arguments length and return it.
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
+}
+
+void MacroAssembler::branchTestArgumentsObjectFlags(Register obj, Register temp,
+ uint32_t flags,
+ Condition cond,
+ Label* label) {
+ MOZ_ASSERT((flags & ~ArgumentsObject::PACKED_BITS_MASK) == 0);
+
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
+
+ // Test flags.
+ branchTest32(cond, temp, Imm32(flags), label);
+}
+
+static constexpr bool ValidateSizeRange(Scalar::Type from, Scalar::Type to) {
+ for (Scalar::Type type = from; type < to; type = Scalar::Type(type + 1)) {
+ if (TypedArrayElemSize(type) != TypedArrayElemSize(from)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
+ static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
+ static_assert(
+ (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
+ "BigUint64 is the last typed array class");
+
+ Label one, two, four, eight, done;
+
+ loadObjClassUnsafe(obj, output);
+
+ static_assert(ValidateSizeRange(Scalar::Int8, Scalar::Int16),
+ "element size is one in [Int8, Int16)");
+ branchPtr(Assembler::Below, output,
+ ImmPtr(TypedArrayObject::classForType(Scalar::Int16)), &one);
+
+ static_assert(ValidateSizeRange(Scalar::Int16, Scalar::Int32),
+ "element size is two in [Int16, Int32)");
+ branchPtr(Assembler::Below, output,
+ ImmPtr(TypedArrayObject::classForType(Scalar::Int32)), &two);
+
+ static_assert(ValidateSizeRange(Scalar::Int32, Scalar::Float64),
+ "element size is four in [Int32, Float64)");
+ branchPtr(Assembler::Below, output,
+ ImmPtr(TypedArrayObject::classForType(Scalar::Float64)), &four);
+
+ static_assert(ValidateSizeRange(Scalar::Float64, Scalar::Uint8Clamped),
+ "element size is eight in [Float64, Uint8Clamped)");
+ branchPtr(Assembler::Below, output,
+ ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped)),
+ &eight);
+
+ static_assert(ValidateSizeRange(Scalar::Uint8Clamped, Scalar::BigInt64),
+ "element size is one in [Uint8Clamped, BigInt64)");
+ branchPtr(Assembler::Below, output,
+ ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64)), &one);
+
+ static_assert(
+ ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
+ "element size is eight in [BigInt64, MaxTypedArrayViewType)");
+ // Fall through for BigInt64 and BigUint64
+
+ bind(&eight);
+ move32(Imm32(8), output);
+ jump(&done);
+
+ bind(&four);
+ move32(Imm32(4), output);
+ jump(&done);
+
+ bind(&two);
+ move32(Imm32(2), output);
+ jump(&done);
+
+ bind(&one);
+ move32(Imm32(1), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp,
+ Label* notTypedArray) {
+ static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
+ const JSClass* firstTypedArrayClass =
+ TypedArrayObject::classForType(Scalar::Int8);
+
+ static_assert(
+ (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
+ "BigUint64 is the last typed array class");
+ const JSClass* lastTypedArrayClass =
+ TypedArrayObject::classForType(Scalar::BigUint64);
+
+ branchPtr(Assembler::Below, clasp, ImmPtr(firstTypedArrayClass),
+ notTypedArray);
+ branchPtr(Assembler::Above, clasp, ImmPtr(lastTypedArrayClass),
+ notTypedArray);
+}
+
+void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj, Register temp,
+ Label* label) {
+ // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
+
+ // Load obj->elements in temp.
+ loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
+
+ // Shared buffers can't be detached.
+ Label done;
+ branchTest32(Assembler::NonZero,
+ Address(temp, ObjectElements::offsetOfFlags()),
+ Imm32(ObjectElements::SHARED_MEMORY), &done);
+
+ // An ArrayBufferView with a null buffer has never had its buffer exposed to
+ // become detached.
+ fallibleUnboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), temp,
+ &done);
+
+ // Load the ArrayBuffer flags and branch if the detached flag is set.
+ unboxInt32(Address(temp, ArrayBufferObject::offsetOfFlagsSlot()), temp);
+ branchTest32(Assembler::NonZero, temp, Imm32(ArrayBufferObject::DETACHED),
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
+ Label* notReusable) {
+ // See NativeIterator::isReusable.
+ Address flagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
+
+#ifdef DEBUG
+ Label niIsInitialized;
+ branchTest32(Assembler::NonZero, flagsAddr,
+ Imm32(NativeIterator::Flags::Initialized), &niIsInitialized);
+ assumeUnreachable(
+ "Expected a NativeIterator that's been completely "
+ "initialized");
+ bind(&niIsInitialized);
+#endif
+
+ branchTest32(Assembler::NonZero, flagsAddr,
+ Imm32(NativeIterator::Flags::NotReusable), notReusable);
+}
+
+void MacroAssembler::branchNativeIteratorIndices(Condition cond, Register ni,
+ Register temp,
+ NativeIteratorIndices kind,
+ Label* label) {
+ Address iterFlagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
+ load32(iterFlagsAddr, temp);
+ and32(Imm32(NativeIterator::IndicesMask), temp);
+ uint32_t shiftedKind = uint32_t(kind) << NativeIterator::IndicesShift;
+ branch32(cond, temp, Imm32(shiftedKind), label);
+}
+
+static void LoadNativeIterator(MacroAssembler& masm, Register obj,
+ Register dest) {
+ MOZ_ASSERT(obj != dest);
+
+#ifdef DEBUG
+ // Assert we have a PropertyIteratorObject.
+ Label ok;
+ masm.branchTestObjClass(Assembler::Equal, obj,
+ &PropertyIteratorObject::class_, dest, obj, &ok);
+ masm.assumeUnreachable("Expected PropertyIteratorObject!");
+ masm.bind(&ok);
+#endif
+
+ // Load NativeIterator object.
+ Address slotAddr(obj, PropertyIteratorObject::offsetOfIteratorSlot());
+ masm.loadPrivate(slotAddr, dest);
+}
+
+// The ShapeCachePtr may be used to cache an iterator for for-in. Return that
+// iterator in |dest| if:
+// - the shape cache pointer exists and stores a native iterator
+// - the iterator is reusable
+// - the iterated object has no dense elements
+// - the shapes of each object on the proto chain of |obj| match the cached
+// shapes
+// - the proto chain has no dense elements
+// Otherwise, jump to |failure|.
+void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
+ Register temp, Register temp2,
+ Register temp3,
+ Label* failure) {
+ // Register usage:
+ // obj: always contains the input object
+ // temp: walks the obj->shape->baseshape->proto->shape->... chain
+ // temp2: points to the native iterator. Incremented to walk the shapes array.
+ // temp3: scratch space
+ // dest: stores the resulting PropertyIteratorObject on success
+
+ Label success;
+ Register shapeAndProto = temp;
+ Register nativeIterator = temp2;
+
+ // Load ShapeCache from shape.
+ loadPtr(Address(obj, JSObject::offsetOfShape()), shapeAndProto);
+ loadPtr(Address(shapeAndProto, Shape::offsetOfCachePtr()), dest);
+
+ // Check if it's an iterator.
+ movePtr(dest, temp3);
+ andPtr(Imm32(ShapeCachePtr::MASK), temp3);
+ branch32(Assembler::NotEqual, temp3, Imm32(ShapeCachePtr::ITERATOR), failure);
+
+ // If we've cached an iterator, |obj| must be a native object.
+#ifdef DEBUG
+ Label nonNative;
+ branchIfNonNativeObj(obj, temp3, &nonNative);
+#endif
+
+ // Verify that |obj| has no dense elements.
+ loadPtr(Address(obj, NativeObject::offsetOfElements()), temp3);
+ branch32(Assembler::NotEqual,
+ Address(temp3, ObjectElements::offsetOfInitializedLength()),
+ Imm32(0), failure);
+
+ // Clear tag bits from iterator object. |dest| is now valid.
+ // Load the native iterator and verify that it's reusable.
+ andPtr(Imm32(~ShapeCachePtr::MASK), dest);
+ LoadNativeIterator(*this, dest, nativeIterator);
+ branchIfNativeIteratorNotReusable(nativeIterator, failure);
+
+ // We have to compare the shapes in the native iterator with the shapes on the
+ // proto chain to ensure the cached iterator is still valid. The shape array
+ // always starts at a fixed offset from the base of the NativeIterator, so
+ // instead of using an instruction outside the loop to initialize a pointer to
+ // the shapes array, we can bake it into the offset and reuse the pointer to
+ // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
+ // (The first shape corresponds to the object itself. We don't have to check
+ // it, because we got the iterator via the shape.)
+ size_t nativeIteratorProtoShapeOffset =
+ NativeIterator::offsetOfFirstShape() + sizeof(Shape*);
+
+ // Loop over the proto chain. At the head of the loop, |shape| is the shape of
+ // the current object, and |iteratorShapes| points to the expected shape of
+ // its proto.
+ Label protoLoop;
+ bind(&protoLoop);
+
+ // Load the proto. If the proto is null, then we're done.
+ loadPtr(Address(shapeAndProto, Shape::offsetOfBaseShape()), shapeAndProto);
+ loadPtr(Address(shapeAndProto, BaseShape::offsetOfProto()), shapeAndProto);
+ branchPtr(Assembler::Equal, shapeAndProto, ImmPtr(nullptr), &success);
+
+#ifdef DEBUG
+ // We have guarded every shape up until this point, so we know that the proto
+ // is a native object.
+ branchIfNonNativeObj(shapeAndProto, temp3, &nonNative);
+#endif
+
+ // Verify that the proto has no dense elements.
+ loadPtr(Address(shapeAndProto, NativeObject::offsetOfElements()), temp3);
+ branch32(Assembler::NotEqual,
+ Address(temp3, ObjectElements::offsetOfInitializedLength()),
+ Imm32(0), failure);
+
+ // Compare the shape of the proto to the expected shape.
+ loadPtr(Address(shapeAndProto, JSObject::offsetOfShape()), shapeAndProto);
+ loadPtr(Address(nativeIterator, nativeIteratorProtoShapeOffset), temp3);
+ branchPtr(Assembler::NotEqual, shapeAndProto, temp3, failure);
+
+ // Increment |iteratorShapes| and jump back to the top of the loop.
+ addPtr(Imm32(sizeof(Shape*)), nativeIterator);
+ jump(&protoLoop);
+
+#ifdef DEBUG
+ bind(&nonNative);
+ assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
+#endif
+
+ bind(&success);
+}
+
+void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
+ Register temp) {
+ Label done;
+ Register outputScratch = output.scratchReg();
+ LoadNativeIterator(*this, obj, outputScratch);
+
+ // If propertyCursor_ < propertiesEnd_, load the next string and advance
+ // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
+ Label iterDone;
+ Address cursorAddr(outputScratch, NativeIterator::offsetOfPropertyCursor());
+ Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertiesEnd());
+ loadPtr(cursorAddr, temp);
+ branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
+
+ // Get next string.
+ loadPtr(Address(temp, 0), temp);
+
+ // Increase the cursor.
+ addPtr(Imm32(sizeof(GCPtr<JSLinearString*>)), cursorAddr);
+
+ tagValue(JSVAL_TYPE_STRING, temp, output);
+ jump(&done);
+
+ bind(&iterDone);
+ moveValue(MagicValue(JS_NO_ITER_VALUE), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
+ Register temp3) {
+ LoadNativeIterator(*this, obj, temp1);
+
+ // The shared iterator used for for-in with null/undefined is immutable and
+ // unlinked. See NativeIterator::isEmptyIteratorSingleton.
+ Label done;
+ branchTest32(Assembler::NonZero,
+ Address(temp1, NativeIterator::offsetOfFlagsAndCount()),
+ Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton), &done);
+
+ // Clear active bit.
+ and32(Imm32(~NativeIterator::Flags::Active),
+ Address(temp1, NativeIterator::offsetOfFlagsAndCount()));
+
+ // Clear objectBeingIterated.
+ Address iterObjAddr(temp1, NativeIterator::offsetOfObjectBeingIterated());
+ guardedCallPreBarrierAnyZone(iterObjAddr, MIRType::Object, temp2);
+ storePtr(ImmPtr(nullptr), iterObjAddr);
+
+ // Reset property cursor.
+ loadPtr(Address(temp1, NativeIterator::offsetOfShapesEnd()), temp2);
+ storePtr(temp2, Address(temp1, NativeIterator::offsetOfPropertyCursor()));
+
+ // Unlink from the iterator list.
+ const Register next = temp2;
+ const Register prev = temp3;
+ loadPtr(Address(temp1, NativeIterator::offsetOfNext()), next);
+ loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), prev);
+ storePtr(prev, Address(next, NativeIterator::offsetOfPrev()));
+ storePtr(next, Address(prev, NativeIterator::offsetOfNext()));
+#ifdef DEBUG
+ storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfNext()));
+ storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfPrev()));
+#endif
+
+ bind(&done);
+}
+
+void MacroAssembler::registerIterator(Register enumeratorsList, Register iter,
+ Register temp) {
+ // iter->next = list
+ storePtr(enumeratorsList, Address(iter, NativeIterator::offsetOfNext()));
+
+ // iter->prev = list->prev
+ loadPtr(Address(enumeratorsList, NativeIterator::offsetOfPrev()), temp);
+ storePtr(temp, Address(iter, NativeIterator::offsetOfPrev()));
+
+ // list->prev->next = iter
+ storePtr(iter, Address(temp, NativeIterator::offsetOfNext()));
+
+ // list->prev = iter
+ storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
+}
+
+void MacroAssembler::toHashableNonGCThing(ValueOperand value,
+ ValueOperand result,
+ FloatRegister tempFloat) {
+ // Inline implementation of |HashableValue::setValue()|.
+
+#ifdef DEBUG
+ Label ok;
+ branchTestGCThing(Assembler::NotEqual, value, &ok);
+ assumeUnreachable("Unexpected GC thing");
+ bind(&ok);
+#endif
+
+ Label useInput, done;
+ branchTestDouble(Assembler::NotEqual, value, &useInput);
+ {
+ Register int32 = result.scratchReg();
+ unboxDouble(value, tempFloat);
+
+ // Normalize int32-valued doubles to int32 and negative zero to +0.
+ Label canonicalize;
+ convertDoubleToInt32(tempFloat, int32, &canonicalize, false);
+ {
+ tagValue(JSVAL_TYPE_INT32, int32, result);
+ jump(&done);
+ }
+ bind(&canonicalize);
+ {
+ // Normalize the sign bit of a NaN.
+ branchDouble(Assembler::DoubleOrdered, tempFloat, tempFloat, &useInput);
+ moveValue(JS::NaNValue(), result);
+ jump(&done);
+ }
+ }
+
+ bind(&useInput);
+ moveValue(value, result);
+
+ bind(&done);
+}
+
+void MacroAssembler::toHashableValue(ValueOperand value, ValueOperand result,
+ FloatRegister tempFloat,
+ Label* atomizeString, Label* tagString) {
+ // Inline implementation of |HashableValue::setValue()|.
+
+ ScratchTagScope tag(*this, value);
+ splitTagForTest(value, tag);
+
+ Label notString, useInput, done;
+ branchTestString(Assembler::NotEqual, tag, &notString);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ Register str = result.scratchReg();
+ unboxString(value, str);
+
+ branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), &useInput);
+
+ jump(atomizeString);
+ bind(tagString);
+
+ tagValue(JSVAL_TYPE_STRING, str, result);
+ jump(&done);
+ }
+ bind(&notString);
+ branchTestDouble(Assembler::NotEqual, tag, &useInput);
+ {
+ ScratchTagScopeRelease _(&tag);
+
+ Register int32 = result.scratchReg();
+ unboxDouble(value, tempFloat);
+
+ Label canonicalize;
+ convertDoubleToInt32(tempFloat, int32, &canonicalize, false);
+ {
+ tagValue(JSVAL_TYPE_INT32, int32, result);
+ jump(&done);
+ }
+ bind(&canonicalize);
+ {
+ branchDouble(Assembler::DoubleOrdered, tempFloat, tempFloat, &useInput);
+ moveValue(JS::NaNValue(), result);
+ jump(&done);
+ }
+ }
+
+ bind(&useInput);
+ moveValue(value, result);
+
+ bind(&done);
+}
+
+void MacroAssembler::scrambleHashCode(Register result) {
+ // Inline implementation of |mozilla::ScrambleHashCode()|.
+
+ mul32(Imm32(mozilla::kGoldenRatioU32), result);
+}
+
+void MacroAssembler::prepareHashNonGCThing(ValueOperand value, Register result,
+ Register temp) {
+ // Inline implementation of |OrderedHashTable::prepareHash()| and
+ // |mozilla::HashGeneric(v.asRawBits())|.
+
+#ifdef DEBUG
+ Label ok;
+ branchTestGCThing(Assembler::NotEqual, value, &ok);
+ assumeUnreachable("Unexpected GC thing");
+ bind(&ok);
+#endif
+
+ // uint32_t v1 = static_cast<uint32_t>(aValue);
+#ifdef JS_PUNBOX64
+ move64To32(value.toRegister64(), result);
+#else
+ move32(value.payloadReg(), result);
+#endif
+
+ // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
+#ifdef JS_PUNBOX64
+ auto r64 = Register64(temp);
+ move64(value.toRegister64(), r64);
+ rshift64Arithmetic(Imm32(32), r64);
+#else
+ // TODO: This seems like a bug in mozilla::detail::AddUintptrToHash().
+ // The uint64_t input is first converted to uintptr_t and then back to
+ // uint64_t. But |uint64_t(uintptr_t(bits))| actually only clears the high
+ // bits, so this computation:
+ //
+ // aValue = uintptr_t(bits)
+ // v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32)
+ //
+ // really just sets |v2 = 0|. And that means the xor-operation in AddU32ToHash
+ // can be optimized away, because |x ^ 0 = x|.
+ //
+ // Filed as bug 1718516.
+#endif
+
+ // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
+ // with |aHash = 0| and |aValue = v1|.
+ mul32(Imm32(mozilla::kGoldenRatioU32), result);
+
+ // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
+ // with |aHash = <above hash>| and |aValue = v2|.
+ rotateLeft(Imm32(5), result, result);
+#ifdef JS_PUNBOX64
+ xor32(temp, result);
+#endif
+
+ // Combine |mul32| and |scrambleHashCode| by directly multiplying with
+ // |kGoldenRatioU32 * kGoldenRatioU32|.
+ //
+ // mul32(Imm32(mozilla::kGoldenRatioU32), result);
+ //
+ // scrambleHashCode(result);
+ mul32(Imm32(mozilla::kGoldenRatioU32 * mozilla::kGoldenRatioU32), result);
+}
+
+void MacroAssembler::prepareHashString(Register str, Register result,
+ Register temp) {
+ // Inline implementation of |OrderedHashTable::prepareHash()| and
+ // |JSAtom::hash()|.
+
+#ifdef DEBUG
+ Label ok;
+ branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), &ok);
+ assumeUnreachable("Unexpected non-atom string");
+ bind(&ok);
+#endif
+
+ move32(Imm32(JSString::FAT_INLINE_MASK), temp);
+ and32(Address(str, JSString::offsetOfFlags()), temp);
+
+ // Set |result| to 1 for FatInlineAtoms.
+ move32(Imm32(0), result);
+ cmp32Set(Assembler::Equal, temp, Imm32(JSString::FAT_INLINE_MASK), result);
+
+ // Use a computed load for branch-free code.
+
+ static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
+
+ constexpr size_t offsetDiff =
+ FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
+ static_assert(mozilla::IsPowerOfTwo(offsetDiff));
+
+ uint8_t shift = mozilla::FloorLog2Size(offsetDiff);
+ if (IsShiftInScaleRange(shift)) {
+ load32(
+ BaseIndex(str, result, ShiftToScale(shift), NormalAtom::offsetOfHash()),
+ result);
+ } else {
+ lshift32(Imm32(shift), result);
+ load32(BaseIndex(str, result, TimesOne, NormalAtom::offsetOfHash()),
+ result);
+ }
+
+ scrambleHashCode(result);
+}
+
+void MacroAssembler::prepareHashSymbol(Register sym, Register result) {
+ // Inline implementation of |OrderedHashTable::prepareHash()| and
+ // |Symbol::hash()|.
+
+ load32(Address(sym, JS::Symbol::offsetOfHash()), result);
+
+ scrambleHashCode(result);
+}
+
+void MacroAssembler::prepareHashBigInt(Register bigInt, Register result,
+ Register temp1, Register temp2,
+ Register temp3) {
+ // Inline implementation of |OrderedHashTable::prepareHash()| and
+ // |BigInt::hash()|.
+
+ // Inline implementation of |mozilla::AddU32ToHash()|.
+ auto addU32ToHash = [&](auto toAdd) {
+ rotateLeft(Imm32(5), result, result);
+ xor32(toAdd, result);
+ mul32(Imm32(mozilla::kGoldenRatioU32), result);
+ };
+
+ move32(Imm32(0), result);
+
+ // Inline |mozilla::HashBytes()|.
+
+ load32(Address(bigInt, BigInt::offsetOfLength()), temp1);
+ loadBigIntDigits(bigInt, temp2);
+
+ Label start, loop;
+ jump(&start);
+ bind(&loop);
+
+ {
+ // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
+#if defined(JS_CODEGEN_MIPS64)
+ // Hash the lower 32-bits.
+ addU32ToHash(Address(temp2, 0));
+
+ // Hash the upper 32-bits.
+ addU32ToHash(Address(temp2, sizeof(int32_t)));
+#elif JS_PUNBOX64
+ // Use a single 64-bit load on non-MIPS64 platforms.
+ loadPtr(Address(temp2, 0), temp3);
+
+ // Hash the lower 32-bits.
+ addU32ToHash(temp3);
+
+ // Hash the upper 32-bits.
+ rshiftPtr(Imm32(32), temp3);
+ addU32ToHash(temp3);
+#else
+ addU32ToHash(Address(temp2, 0));
+#endif
+ }
+ addPtr(Imm32(sizeof(BigInt::Digit)), temp2);
+
+ bind(&start);
+ branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
+
+ // Compute |mozilla::AddToHash(h, isNegative())|.
+ {
+ static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
+
+ load32(Address(bigInt, BigInt::offsetOfFlags()), temp1);
+ and32(Imm32(BigInt::signBitMask()), temp1);
+ rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1);
+
+ addU32ToHash(temp1);
+ }
+
+ scrambleHashCode(result);
+}
+
+void MacroAssembler::prepareHashObject(Register setObj, ValueOperand value,
+ Register result, Register temp1,
+ Register temp2, Register temp3,
+ Register temp4) {
+#ifdef JS_PUNBOX64
+ // Inline implementation of |OrderedHashTable::prepareHash()| and
+ // |HashCodeScrambler::scramble(v.asRawBits())|.
+
+ // Load the |ValueSet| or |ValueMap|.
+ static_assert(SetObject::getDataSlotOffset() ==
+ MapObject::getDataSlotOffset());
+ loadPrivate(Address(setObj, SetObject::getDataSlotOffset()), temp1);
+
+ // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
+ static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
+ static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
+ auto k0 = Register64(temp1);
+ auto k1 = Register64(temp2);
+ load64(Address(temp1, ValueSet::offsetOfImplHcsK1()), k1);
+ load64(Address(temp1, ValueSet::offsetOfImplHcsK0()), k0);
+
+ // Hash numbers are 32-bit values, so only hash the lower double-word.
+ static_assert(sizeof(mozilla::HashNumber) == 4);
+ move32To64ZeroExtend(value.valueReg(), Register64(result));
+
+ // Inline implementation of |SipHasher::sipHash()|.
+ auto m = Register64(result);
+ auto v0 = Register64(temp3);
+ auto v1 = Register64(temp4);
+ auto v2 = k0;
+ auto v3 = k1;
+
+ auto sipRound = [&]() {
+ // mV0 = WrappingAdd(mV0, mV1);
+ add64(v1, v0);
+
+ // mV1 = RotateLeft(mV1, 13);
+ rotateLeft64(Imm32(13), v1, v1, InvalidReg);
+
+ // mV1 ^= mV0;
+ xor64(v0, v1);
+
+ // mV0 = RotateLeft(mV0, 32);
+ rotateLeft64(Imm32(32), v0, v0, InvalidReg);
+
+ // mV2 = WrappingAdd(mV2, mV3);
+ add64(v3, v2);
+
+ // mV3 = RotateLeft(mV3, 16);
+ rotateLeft64(Imm32(16), v3, v3, InvalidReg);
+
+ // mV3 ^= mV2;
+ xor64(v2, v3);
+
+ // mV0 = WrappingAdd(mV0, mV3);
+ add64(v3, v0);
+
+ // mV3 = RotateLeft(mV3, 21);
+ rotateLeft64(Imm32(21), v3, v3, InvalidReg);
+
+ // mV3 ^= mV0;
+ xor64(v0, v3);
+
+ // mV2 = WrappingAdd(mV2, mV1);
+ add64(v1, v2);
+
+ // mV1 = RotateLeft(mV1, 17);
+ rotateLeft64(Imm32(17), v1, v1, InvalidReg);
+
+ // mV1 ^= mV2;
+ xor64(v2, v1);
+
+ // mV2 = RotateLeft(mV2, 32);
+ rotateLeft64(Imm32(32), v2, v2, InvalidReg);
+ };
+
+ // 1. Initialization.
+ // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
+ move64(Imm64(0x736f6d6570736575), v0);
+ xor64(k0, v0);
+
+ // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
+ move64(Imm64(0x646f72616e646f6d), v1);
+ xor64(k1, v1);
+
+ // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
+ MOZ_ASSERT(v2 == k0);
+ xor64(Imm64(0x6c7967656e657261), v2);
+
+ // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
+ MOZ_ASSERT(v3 == k1);
+ xor64(Imm64(0x7465646279746573), v3);
+
+ // 2. Compression.
+ // mV3 ^= aM;
+ xor64(m, v3);
+
+ // sipRound();
+ sipRound();
+
+ // mV0 ^= aM;
+ xor64(m, v0);
+
+ // 3. Finalization.
+ // mV2 ^= 0xff;
+ xor64(Imm64(0xff), v2);
+
+ // for (int i = 0; i < 3; i++) sipRound();
+ for (int i = 0; i < 3; i++) {
+ sipRound();
+ }
+
+ // return mV0 ^ mV1 ^ mV2 ^ mV3;
+ xor64(v1, v0);
+ xor64(v2, v3);
+ xor64(v3, v0);
+
+ move64To32(v0, result);
+
+ scrambleHashCode(result);
+#else
+ MOZ_CRASH("Not implemented");
+#endif
+}
+
+void MacroAssembler::prepareHashValue(Register setObj, ValueOperand value,
+ Register result, Register temp1,
+ Register temp2, Register temp3,
+ Register temp4) {
+ Label isString, isObject, isSymbol, isBigInt;
+ {
+ ScratchTagScope tag(*this, value);
+ splitTagForTest(value, tag);
+
+ branchTestString(Assembler::Equal, tag, &isString);
+ branchTestObject(Assembler::Equal, tag, &isObject);
+ branchTestSymbol(Assembler::Equal, tag, &isSymbol);
+ branchTestBigInt(Assembler::Equal, tag, &isBigInt);
+ }
+
+ Label done;
+ {
+ prepareHashNonGCThing(value, result, temp1);
+ jump(&done);
+ }
+ bind(&isString);
+ {
+ unboxString(value, temp1);
+ prepareHashString(temp1, result, temp2);
+ jump(&done);
+ }
+ bind(&isObject);
+ {
+ prepareHashObject(setObj, value, result, temp1, temp2, temp3, temp4);
+ jump(&done);
+ }
+ bind(&isSymbol);
+ {
+ unboxSymbol(value, temp1);
+ prepareHashSymbol(temp1, result);
+ jump(&done);
+ }
+ bind(&isBigInt);
+ {
+ unboxBigInt(value, temp1);
+ prepareHashBigInt(temp1, result, temp2, temp3, temp4);
+
+ // Fallthrough to |done|.
+ }
+
+ bind(&done);
+}
+
+template <typename OrderedHashTable>
+void MacroAssembler::orderedHashTableLookup(Register setOrMapObj,
+ ValueOperand value, Register hash,
+ Register entryTemp, Register temp1,
+ Register temp2, Register temp3,
+ Register temp4, Label* found,
+ IsBigInt isBigInt) {
+ // Inline implementation of |OrderedHashTable::lookup()|.
+
+ MOZ_ASSERT_IF(isBigInt == IsBigInt::No, temp3 == InvalidReg);
+ MOZ_ASSERT_IF(isBigInt == IsBigInt::No, temp4 == InvalidReg);
+
+#ifdef DEBUG
+ Label ok;
+ if (isBigInt == IsBigInt::No) {
+ branchTestBigInt(Assembler::NotEqual, value, &ok);
+ assumeUnreachable("Unexpected BigInt");
+ } else if (isBigInt == IsBigInt::Yes) {
+ branchTestBigInt(Assembler::Equal, value, &ok);
+ assumeUnreachable("Unexpected non-BigInt");
+ }
+ bind(&ok);
+#endif
+
+#ifdef DEBUG
+ PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+
+ pushValue(value);
+ moveStackPtrTo(temp2);
+
+ setupUnalignedABICall(temp1);
+ loadJSContext(temp1);
+ passABIArg(temp1);
+ passABIArg(setOrMapObj);
+ passABIArg(temp2);
+ passABIArg(hash);
+
+ if constexpr (std::is_same_v<OrderedHashTable, ValueSet>) {
+ using Fn =
+ void (*)(JSContext*, SetObject*, const Value*, mozilla::HashNumber);
+ callWithABI<Fn, jit::AssertSetObjectHash>();
+ } else {
+ using Fn =
+ void (*)(JSContext*, MapObject*, const Value*, mozilla::HashNumber);
+ callWithABI<Fn, jit::AssertMapObjectHash>();
+ }
+
+ popValue(value);
+ PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+#endif
+
+ // Load the |ValueSet| or |ValueMap|.
+ static_assert(SetObject::getDataSlotOffset() ==
+ MapObject::getDataSlotOffset());
+ loadPrivate(Address(setOrMapObj, SetObject::getDataSlotOffset()), temp1);
+
+ // Load the bucket.
+ move32(hash, entryTemp);
+ load32(Address(temp1, OrderedHashTable::offsetOfImplHashShift()), temp2);
+ flexibleRshift32(temp2, entryTemp);
+
+ loadPtr(Address(temp1, OrderedHashTable::offsetOfImplHashTable()), temp2);
+ loadPtr(BaseIndex(temp2, entryTemp, ScalePointer), entryTemp);
+
+ // Search for a match in this bucket.
+ Label start, loop;
+ jump(&start);
+ bind(&loop);
+ {
+ // Inline implementation of |HashableValue::operator==|.
+
+ static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
+ "offsetof(Data, element) is 0");
+ auto keyAddr = Address(entryTemp, OrderedHashTable::offsetOfEntryKey());
+
+ if (isBigInt == IsBigInt::No) {
+ // Two HashableValues are equal if they have equal bits.
+ branch64(Assembler::Equal, keyAddr, value.toRegister64(), found);
+ } else {
+#ifdef JS_PUNBOX64
+ auto key = ValueOperand(temp1);
+#else
+ auto key = ValueOperand(temp1, temp2);
+#endif
+
+ loadValue(keyAddr, key);
+
+ // Two HashableValues are equal if they have equal bits.
+ branch64(Assembler::Equal, key.toRegister64(), value.toRegister64(),
+ found);
+
+ // BigInt values are considered equal if they represent the same
+ // mathematical value.
+ Label next;
+ fallibleUnboxBigInt(key, temp2, &next);
+ if (isBigInt == IsBigInt::Yes) {
+ unboxBigInt(value, temp1);
+ } else {
+ fallibleUnboxBigInt(value, temp1, &next);
+ }
+ equalBigInts(temp1, temp2, temp3, temp4, temp1, temp2, &next, &next,
+ &next);
+ jump(found);
+ bind(&next);
+ }
+ }
+ loadPtr(Address(entryTemp, OrderedHashTable::offsetOfImplDataChain()),
+ entryTemp);
+ bind(&start);
+ branchTestPtr(Assembler::NonZero, entryTemp, entryTemp, &loop);
+}
+
+void MacroAssembler::setObjectHas(Register setObj, ValueOperand value,
+ Register hash, Register result,
+ Register temp1, Register temp2,
+ Register temp3, Register temp4,
+ IsBigInt isBigInt) {
+ Label found;
+ orderedHashTableLookup<ValueSet>(setObj, value, hash, result, temp1, temp2,
+ temp3, temp4, &found, isBigInt);
+
+ Label done;
+ move32(Imm32(0), result);
+ jump(&done);
+
+ bind(&found);
+ move32(Imm32(1), result);
+ bind(&done);
+}
+
+void MacroAssembler::mapObjectHas(Register mapObj, ValueOperand value,
+ Register hash, Register result,
+ Register temp1, Register temp2,
+ Register temp3, Register temp4,
+ IsBigInt isBigInt) {
+ Label found;
+ orderedHashTableLookup<ValueMap>(mapObj, value, hash, result, temp1, temp2,
+ temp3, temp4, &found, isBigInt);
+
+ Label done;
+ move32(Imm32(0), result);
+ jump(&done);
+
+ bind(&found);
+ move32(Imm32(1), result);
+ bind(&done);
+}
+
+void MacroAssembler::mapObjectGet(Register mapObj, ValueOperand value,
+ Register hash, ValueOperand result,
+ Register temp1, Register temp2,
+ Register temp3, Register temp4,
+ Register temp5, IsBigInt isBigInt) {
+ Label found;
+ orderedHashTableLookup<ValueMap>(mapObj, value, hash, temp1, temp2, temp3,
+ temp4, temp5, &found, isBigInt);
+
+ Label done;
+ moveValue(UndefinedValue(), result);
+ jump(&done);
+
+ // |temp1| holds the found entry.
+ bind(&found);
+ loadValue(Address(temp1, ValueMap::Entry::offsetOfValue()), result);
+
+ bind(&done);
+}
+
+template <typename OrderedHashTable>
+void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj,
+ Register result) {
+ // Inline implementation of |OrderedHashTable::count()|.
+
+ // Load the |ValueSet| or |ValueMap|.
+ static_assert(SetObject::getDataSlotOffset() ==
+ MapObject::getDataSlotOffset());
+ loadPrivate(Address(setOrMapObj, SetObject::getDataSlotOffset()), result);
+
+ // Load the live count.
+ load32(Address(result, OrderedHashTable::offsetOfImplLiveCount()), result);
+}
+
+void MacroAssembler::loadSetObjectSize(Register setObj, Register result) {
+ loadOrderedHashTableCount<ValueSet>(setObj, result);
+}
+
+void MacroAssembler::loadMapObjectSize(Register mapObj, Register result) {
+ loadOrderedHashTableCount<ValueMap>(mapObj, result);
+}
+
+// Can't push large frames blindly on windows, so we must touch frame memory
+// incrementally, with no more than 4096 - 1 bytes between touches.
+//
+// This is used across all platforms for simplicity.
+void MacroAssembler::touchFrameValues(Register numStackValues,
+ Register scratch1, Register scratch2) {
+ const size_t FRAME_TOUCH_INCREMENT = 2048;
+ static_assert(FRAME_TOUCH_INCREMENT < 4096 - 1,
+ "Frame increment is too large");
+
+ moveStackPtrTo(scratch2);
+
+ mov(numStackValues, scratch1);
+ lshiftPtr(Imm32(3), scratch1);
+ {
+ // Note: this loop needs to update the stack pointer register because older
+ // Linux kernels check the distance between the touched address and RSP.
+ // See bug 1839669 comment 47.
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ bind(&touchFrameLoop);
+ branchSub32(Assembler::Signed, Imm32(FRAME_TOUCH_INCREMENT), scratch1,
+ &touchFrameLoopEnd);
+ subFromStackPtr(Imm32(FRAME_TOUCH_INCREMENT));
+ store32(Imm32(0), Address(getStackPointer(), 0));
+ jump(&touchFrameLoop);
+ bind(&touchFrameLoopEnd);
+ }
+
+ moveToStackPtr(scratch2);
+}
+
+namespace js {
+namespace jit {
+
+#ifdef DEBUG
+template <class RegisterType>
+AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(
+ MacroAssembler& masm, RegisterType reg)
+ : RegisterType(reg), masm_(masm), released_(false) {
+ masm.debugTrackedRegisters_.add(reg);
+}
+
+template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(
+ MacroAssembler& masm, Register reg);
+template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(
+ MacroAssembler& masm, FloatRegister reg);
+#endif // DEBUG
+
+#ifdef DEBUG
+template <class RegisterType>
+AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope() {
+ if (!released_) {
+ release();
+ }
+}
+
+template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
+template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
+
+template <class RegisterType>
+void AutoGenericRegisterScope<RegisterType>::release() {
+ MOZ_ASSERT(!released_);
+ released_ = true;
+ const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
+ masm_.debugTrackedRegisters_.take(reg);
+}
+
+template void AutoGenericRegisterScope<Register>::release();
+template void AutoGenericRegisterScope<FloatRegister>::release();
+
+template <class RegisterType>
+void AutoGenericRegisterScope<RegisterType>::reacquire() {
+ MOZ_ASSERT(released_);
+ released_ = false;
+ const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
+ masm_.debugTrackedRegisters_.add(reg);
+}
+
+template void AutoGenericRegisterScope<Register>::reacquire();
+template void AutoGenericRegisterScope<FloatRegister>::reacquire();
+
+#endif // DEBUG
+
+} // namespace jit
+
+} // namespace js
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
new file mode 100644
index 0000000000..9b77830276
--- /dev/null
+++ b/js/src/jit/MacroAssembler.h
@@ -0,0 +1,5611 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MacroAssembler_h
+#define jit_MacroAssembler_h
+
+#include "mozilla/EndianUtils.h"
+#include "mozilla/MacroForEach.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Variant.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MacroAssembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MacroAssembler-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MacroAssembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MacroAssembler-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/MacroAssembler-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/MacroAssembler-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/MacroAssembler-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/MacroAssembler-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+#include "jit/ABIArgGenerator.h"
+#include "jit/ABIFunctions.h"
+#include "jit/AtomicOp.h"
+#include "jit/IonTypes.h"
+#include "jit/MoveResolver.h"
+#include "jit/VMFunctions.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/Memory.h"
+#include "vm/FunctionFlags.h"
+#include "vm/Opcodes.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmFrame.h"
+
+// [SMDOC] MacroAssembler multi-platform overview
+//
+// * How to read/write MacroAssembler method declarations:
+//
+// The following macros are made to avoid #ifdef around each method declarations
+// of the Macro Assembler, and they are also used as an hint on the location of
+// the implementations of each method. For example, the following declaration
+//
+// void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
+//
+// suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
+// x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
+//
+// - If there is no annotation, then there is only one generic definition in
+// MacroAssembler.cpp.
+//
+// - If the declaration is "inline", then the method definition(s) would be in
+// the "-inl.h" variant of the same file(s).
+//
+// The script check_macroassembler_style.py (which runs on every build) is
+// used to verify that method definitions match the annotation on the method
+// declarations. If there is any difference, then you either forgot to define
+// the method in one of the macro assembler, or you forgot to update the
+// annotation of the macro assembler declaration.
+//
+// Some convenient short-cuts are used to avoid repeating the same list of
+// architectures on each method declaration, such as PER_ARCH and
+// PER_SHARED_ARCH.
+//
+// Functions that are architecture-agnostic and are the same for all
+// architectures, that it's necessary to define inline *in this header* to
+// avoid used-before-defined warnings/errors that would occur if the
+// definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
+// marker at end of the declaration:
+//
+// inline uint32_t framePushed() const OOL_IN_HEADER;
+//
+// Such functions should then be defined immediately after MacroAssembler's
+// definition, for example:
+//
+// //{{{ check_macroassembler_style
+// inline uint32_t
+// MacroAssembler::framePushed() const
+// {
+// return framePushed_;
+// }
+// ////}}} check_macroassembler_style
+
+#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64, loong64, riscv64, wasm32
+#define ALL_SHARED_ARCH \
+ arm, arm64, loong64, riscv64, x86_shared, mips_shared, wasm32
+
+// * How this macro works:
+//
+// DEFINED_ON is a macro which check if, for the current architecture, the
+// method is defined on the macro assembler or not.
+//
+// For each architecture, we have a macro named DEFINED_ON_arch. This macro is
+// empty if this is not the current architecture. Otherwise it must be either
+// set to "define" or "crash" (only used for the none target so far).
+//
+// The DEFINED_ON macro maps the list of architecture names given as arguments
+// to a list of macro names. For example,
+//
+// DEFINED_ON(arm, x86_shared)
+//
+// is expanded to
+//
+// DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
+//
+// which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
+// to
+//
+// define
+//
+// or if the JIT is disabled or set to no architecture to
+//
+// crash
+//
+// or to nothing, if the current architecture is not listed in the list of
+// arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
+// contributes to the non-empty result, which is the macro of the current
+// architecture if it is listed in the arguments of DEFINED_ON.
+//
+// This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
+// which results in either no annotation, a MOZ_CRASH(), or a "= delete"
+// annotation on the method declaration.
+
+#define DEFINED_ON_x86
+#define DEFINED_ON_x64
+#define DEFINED_ON_x86_shared
+#define DEFINED_ON_arm
+#define DEFINED_ON_arm64
+#define DEFINED_ON_mips32
+#define DEFINED_ON_mips64
+#define DEFINED_ON_mips_shared
+#define DEFINED_ON_loong64
+#define DEFINED_ON_riscv64
+#define DEFINED_ON_wasm32
+#define DEFINED_ON_none
+
+// Specialize for each architecture.
+#if defined(JS_CODEGEN_X86)
+# undef DEFINED_ON_x86
+# define DEFINED_ON_x86 define
+# undef DEFINED_ON_x86_shared
+# define DEFINED_ON_x86_shared define
+#elif defined(JS_CODEGEN_X64)
+# undef DEFINED_ON_x64
+# define DEFINED_ON_x64 define
+# undef DEFINED_ON_x86_shared
+# define DEFINED_ON_x86_shared define
+#elif defined(JS_CODEGEN_ARM)
+# undef DEFINED_ON_arm
+# define DEFINED_ON_arm define
+#elif defined(JS_CODEGEN_ARM64)
+# undef DEFINED_ON_arm64
+# define DEFINED_ON_arm64 define
+#elif defined(JS_CODEGEN_MIPS32)
+# undef DEFINED_ON_mips32
+# define DEFINED_ON_mips32 define
+# undef DEFINED_ON_mips_shared
+# define DEFINED_ON_mips_shared define
+#elif defined(JS_CODEGEN_MIPS64)
+# undef DEFINED_ON_mips64
+# define DEFINED_ON_mips64 define
+# undef DEFINED_ON_mips_shared
+# define DEFINED_ON_mips_shared define
+#elif defined(JS_CODEGEN_LOONG64)
+# undef DEFINED_ON_loong64
+# define DEFINED_ON_loong64 define
+#elif defined(JS_CODEGEN_RISCV64)
+# undef DEFINED_ON_riscv64
+# define DEFINED_ON_riscv64 define
+#elif defined(JS_CODEGEN_WASM32)
+# undef DEFINED_ON_wasm32
+# define DEFINED_ON_wasm32 define
+#elif defined(JS_CODEGEN_NONE)
+# undef DEFINED_ON_none
+# define DEFINED_ON_none crash
+#else
+# error "Unknown architecture!"
+#endif
+
+#define DEFINED_ON_RESULT_crash \
+ { MOZ_CRASH(); }
+#define DEFINED_ON_RESULT_define
+#define DEFINED_ON_RESULT_ = delete
+
+#define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) Macro##Result
+#define DEFINED_ON_DISPATCH_RESULT(...) \
+ DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
+
+// We need to let the evaluation of MOZ_FOR_EACH terminates.
+#define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
+ DEFINED_ON_DISPATCH_RESULT ParenResult
+#define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)
+#define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)
+
+#define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_##Arch
+#define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
+ DEFINED_ON_EXPAND_ARCH_RESULTS( \
+ (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
+
+#define DEFINED_ON(...) DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
+
+#define PER_ARCH DEFINED_ON(ALL_ARCH)
+#define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
+#define OOL_IN_HEADER
+
+namespace JS {
+struct ExpandoAndGeneration;
+}
+
+namespace js {
+
+class StaticStrings;
+class TypedArrayObject;
+
+enum class NativeIteratorIndices : uint32_t;
+
+namespace wasm {
+class CalleeDesc;
+class CallSiteDesc;
+class BytecodeOffset;
+class MemoryAccessDesc;
+
+struct ModuleEnvironment;
+
+enum class FailureMode : uint8_t;
+enum class SimdOp;
+enum class SymbolicAddress;
+enum class Trap;
+} // namespace wasm
+
+namespace jit {
+
+// Defined in JitFrames.h
+enum class ExitFrameType : uint8_t;
+
+class AutoSaveLiveRegisters;
+class CompileZone;
+class TemplateNativeObject;
+class TemplateObject;
+
+enum class CheckUnsafeCallWithABI {
+ // Require the callee to use AutoUnsafeCallWithABI.
+ Check,
+
+ // We pushed an exit frame so this callWithABI can safely GC and walk the
+ // stack.
+ DontCheckHasExitFrame,
+
+ // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
+ // because we're calling a simple helper function (like malloc or js_free)
+ // that we can't change and/or that we know won't GC.
+ DontCheckOther,
+};
+
+// This is a global function made to create the DynFn type in a controlled
+// environment which would check if the function signature has been registered
+// as an ABI function signature.
+template <typename Sig>
+static inline DynFn DynamicFunction(Sig fun);
+
+enum class CharEncoding { Latin1, TwoByte };
+
+constexpr uint32_t WasmCallerInstanceOffsetBeforeCall =
+ wasm::FrameWithInstances::callerInstanceOffsetWithoutFrame();
+constexpr uint32_t WasmCalleeInstanceOffsetBeforeCall =
+ wasm::FrameWithInstances::calleeInstanceOffsetWithoutFrame();
+
+// Allocation sites may be passed to GC thing allocation methods either via a
+// register (for baseline compilation) or an enum indicating one of the
+// catch-all allocation sites (for optimized compilation).
+struct AllocSiteInput
+ : public mozilla::Variant<Register, gc::CatchAllAllocSite> {
+ using Base = mozilla::Variant<Register, gc::CatchAllAllocSite>;
+ AllocSiteInput() : Base(gc::CatchAllAllocSite::Unknown) {}
+ explicit AllocSiteInput(gc::CatchAllAllocSite catchAll) : Base(catchAll) {}
+ explicit AllocSiteInput(Register reg) : Base(reg) {}
+};
+
+// [SMDOC] Code generation invariants (incomplete)
+//
+// ## 64-bit GPRs carrying 32-bit values
+//
+// At least at the end of every JS or Wasm operation (= SpiderMonkey bytecode or
+// Wasm bytecode; this is necessarily a little vague), if a 64-bit GPR has a
+// 32-bit value, then the upper 32 bits of the register may be predictable in
+// accordance with platform-specific rules, as follows.
+//
+// - On x64 and arm64, the upper bits are zero
+// - On mips64 and loongarch64 the upper bits are the sign extension of the
+// lower bits
+// - (On risc-v we have no rule, having no port yet. Sign extension is the most
+// likely rule, but "unpredictable" is an option.)
+//
+// In most cases no extra work needs to be done to maintain the invariant:
+//
+// - 32-bit operations on x64 and arm64 zero-extend the result to 64 bits.
+// These operations ignore the upper bits of the inputs.
+// - 32-bit operations on mips64 sign-extend the result to 64 bits (even many
+// that are labeled as "unsigned", eg ADDU, though not all, eg LU).
+// Additionally, the inputs to many 32-bit operations must be properly
+// sign-extended to avoid "unpredictable" behavior, and our simulators check
+// that inputs conform.
+// - (32-bit operations on risc-v and loongarch64 sign-extend, much as mips, but
+// appear to ignore the upper bits of the inputs.)
+//
+// The upshot of these invariants is, among other things, that:
+//
+// - No code needs to be generated when a 32-bit value is extended to 64 bits
+// or a 64-bit value is wrapped to 32 bits, if the upper bits are known to be
+// correct because they resulted from an operation that produced them
+// predictably.
+// - Literal loads must be careful to avoid instructions that might extend the
+// literal in the wrong way.
+// - Code that produces values using intermediate values with non-canonical
+// extensions must extend according to platform conventions before being
+// "done".
+//
+// All optimizations are necessarily platform-specific and should only be used
+// in platform-specific code. We may add architectures in the future that do
+// not follow the patterns of the few architectures we already have.
+//
+// Also see MacroAssembler::debugAssertCanonicalInt32().
+
+// The public entrypoint for emitting assembly. Note that a MacroAssembler can
+// use cx->lifoAlloc, so take care not to interleave masm use with other
+// lifoAlloc use if one will be destroyed before the other.
+class MacroAssembler : public MacroAssemblerSpecific {
+ private:
+ // Information about the current JSRuntime. This is nullptr only for Wasm
+ // compilations.
+ CompileRuntime* maybeRuntime_ = nullptr;
+
+ // Information about the current Realm. This is nullptr for Wasm compilations
+ // and when compiling JitRuntime trampolines.
+ CompileRealm* maybeRealm_ = nullptr;
+
+ // Labels for handling exceptions and failures.
+ NonAssertingLabel failureLabel_;
+
+ protected:
+ // Constructor is protected. Use one of the derived classes!
+ explicit MacroAssembler(TempAllocator& alloc,
+ CompileRuntime* maybeRuntime = nullptr,
+ CompileRealm* maybeRealm = nullptr);
+
+ public:
+ MoveResolver& moveResolver() {
+ // As an optimization, the MoveResolver is a persistent data structure
+ // shared between visitors in the CodeGenerator. This assertion
+ // checks that state is not leaking from visitor to visitor
+ // via an unresolved addMove().
+ MOZ_ASSERT(moveResolver_.hasNoPendingMoves());
+ return moveResolver_;
+ }
+
+ size_t instructionsSize() const { return size(); }
+
+ CompileRealm* realm() const {
+ MOZ_ASSERT(maybeRealm_);
+ return maybeRealm_;
+ }
+ CompileRuntime* runtime() const {
+ MOZ_ASSERT(maybeRuntime_);
+ return maybeRuntime_;
+ }
+
+#ifdef JS_HAS_HIDDEN_SP
+ void Push(RegisterOrSP reg);
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ // `op` should be a shift operation. Return true if a variable-width shift
+ // operation on this architecture should pre-mask the shift count, and if so,
+ // return the mask in `*mask`.
+ static bool MustMaskShiftCountSimd128(wasm::SimdOp op, int32_t* mask);
+#endif
+
+ private:
+ // The value returned by GetMaxOffsetGuardLimit() in WasmTypes.h
+ uint32_t wasmMaxOffsetGuardLimit_;
+
+ public:
+ uint32_t wasmMaxOffsetGuardLimit() const { return wasmMaxOffsetGuardLimit_; }
+ void setWasmMaxOffsetGuardLimit(uint32_t limit) {
+ wasmMaxOffsetGuardLimit_ = limit;
+ }
+
+ //{{{ check_macroassembler_decl_style
+ public:
+ // ===============================================================
+ // MacroAssembler high-level usage.
+
+ // Flushes the assembly buffer, on platforms that need it.
+ void flush() PER_SHARED_ARCH;
+
+ // Add a comment that is visible in the pretty printed assembly code.
+ void comment(const char* msg) PER_SHARED_ARCH;
+
+ // ===============================================================
+ // Frame manipulation functions.
+
+ inline uint32_t framePushed() const OOL_IN_HEADER;
+ inline void setFramePushed(uint32_t framePushed) OOL_IN_HEADER;
+ inline void adjustFrame(int32_t value) OOL_IN_HEADER;
+
+ // Adjust the frame, to account for implicit modification of the stack
+ // pointer, such that callee can remove arguments on the behalf of the
+ // caller.
+ inline void implicitPop(uint32_t bytes) OOL_IN_HEADER;
+
+ private:
+ // This field is used to statically (at compilation time) emulate a frame
+ // pointer by keeping track of stack manipulations.
+ //
+ // It is maintained by all stack manipulation functions below.
+ uint32_t framePushed_;
+
+ public:
+ // ===============================================================
+ // Stack manipulation functions -- sets of registers.
+
+ // Approximately speaking, the following routines must use the same memory
+ // layout. Any inconsistencies will certainly lead to crashing in generated
+ // code:
+ //
+ // MacroAssembler::PushRegsInMaskSizeInBytes
+ // MacroAssembler::PushRegsInMask
+ // MacroAssembler::storeRegsInMask
+ // MacroAssembler::PopRegsInMask
+ // MacroAssembler::PopRegsInMaskIgnore
+ // FloatRegister::getRegisterDumpOffsetInBytes
+ // (no class) PushRegisterDump
+ // (union) RegisterContent
+ // JitRuntime::generateInvalidator
+ // JitRuntime::generateBailoutHandler
+ // JSJitFrameIter::machineState
+ //
+ // To be more exact, the invariants are:
+ //
+ // * The save area is conceptually viewed as starting at a highest address
+ // (really, at "highest address - 1") and working down to some lower
+ // address.
+ //
+ // * PushRegsInMask, storeRegsInMask and PopRegsInMask{Ignore} must use
+ // exactly the same memory layout, when starting from the abovementioned
+ // highest address.
+ //
+ // * PushRegsInMaskSizeInBytes must produce a value which is exactly equal
+ // to the change in the machine's stack pointer register as a result of
+ // calling PushRegsInMask or PopRegsInMask{Ignore}. This value must be at
+ // least uintptr_t-aligned on the target, and may be more aligned than that.
+ //
+ // * PushRegsInMaskSizeInBytes must produce a value which is greater than or
+ // equal to the amount of space used by storeRegsInMask.
+ //
+ // * Hence, regardless of whether the save area is created with
+ // storeRegsInMask or PushRegsInMask, it is guaranteed to fit inside an
+ // area of size calculated by PushRegsInMaskSizeInBytes.
+ //
+ // * For the `ignore` argument of PopRegsInMaskIgnore, equality checking
+ // for the floating point/SIMD registers is done on the basis of the
+ // underlying physical register, regardless of width. For example, if the
+ // to-restore set contains v17 (the SIMD register with encoding 17) and
+ // the ignore set contains d17 (the double register with encoding 17) then
+ // no part of the physical register with encoding 17 will be restored.
+ // (This is probably not true on arm32, since that has aliased float32
+ // registers; but none of our other targets do.)
+ //
+ // * {Push,store}RegsInMask/storeRegsInMask are further constrained as
+ // follows: when given the argument AllFloatRegisters, the resulting
+ // memory area must contain exactly all the SIMD/FP registers for the
+ // target at their widest width (that we care about). [We have no targets
+ // where the SIMD registers and FP register sets are disjoint.] They must
+ // be packed end-to-end with no holes, with the register with the lowest
+ // encoding number (0), as returned by FloatRegister::encoding(), at the
+ // abovementioned highest address, register 1 just below that, etc.
+ //
+ // Furthermore the sizeof(RegisterContent) must equal the size of a SIMD
+ // register in the abovementioned array.
+ //
+ // Furthermore the value returned by
+ // FloatRegister::getRegisterDumpOffsetInBytes must be a correct index
+ // into the abovementioned array. Given the constraints, the only correct
+ // value is `reg.encoding() * sizeof(RegisterContent)`.
+ //
+ // Note that some of the routines listed above are JS-only, and do not support
+ // SIMD registers. They are otherwise part of the same equivalence class.
+ // Register spilling for e.g. OOL VM calls is implemented using
+ // PushRegsInMask, and recovered on bailout using machineState. This requires
+ // the same layout to be used in machineState, and therefore in all other code
+ // that can spill registers that are recovered on bailout. Implementations of
+ // JitRuntime::generate{Invalidator,BailoutHandler} should either call
+ // PushRegsInMask, or check carefully to be sure that they generate the same
+ // layout.
+
+ // The size of the area used by PushRegsInMask.
+ size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ void PushRegsInMask(LiveRegisterSet set)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+ void PushRegsInMask(LiveGeneralRegisterSet set);
+
+ // Like PushRegsInMask, but instead of pushing the registers, store them to
+ // |dest|. |dest| should point to the end of the reserved space, so the
+ // first register will be stored at |dest.offset - sizeof(register)|. It is
+ // required that |dest.offset| is at least as large as the value computed by
+ // PushRegsInMaskSizeInBytes for this |set|. In other words, |dest.base|
+ // must point to either the lowest address in the save area, or some address
+ // below that.
+ void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ void PopRegsInMask(LiveRegisterSet set);
+ void PopRegsInMask(LiveGeneralRegisterSet set);
+ void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ // ===============================================================
+ // Stack manipulation functions -- single registers/values.
+
+ void Push(const Operand op) DEFINED_ON(x86_shared);
+ void Push(Register reg) PER_SHARED_ARCH;
+ void Push(Register reg1, Register reg2, Register reg3, Register reg4)
+ DEFINED_ON(arm64);
+ void Push(const Imm32 imm) PER_SHARED_ARCH;
+ void Push(const ImmWord imm) PER_SHARED_ARCH;
+ void Push(const ImmPtr imm) PER_SHARED_ARCH;
+ void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
+ void Push(FloatRegister reg) PER_SHARED_ARCH;
+ void PushBoxed(FloatRegister reg) PER_ARCH;
+ void PushFlags() DEFINED_ON(x86_shared);
+ void Push(PropertyKey key, Register scratchReg);
+ void Push(const Address& addr);
+ void Push(TypedOrValueRegister v);
+ void Push(const ConstantOrRegister& v);
+ void Push(const ValueOperand& val);
+ void Push(const Value& val);
+ void Push(JSValueType type, Register reg);
+ void Push(const Register64 reg);
+ void PushEmptyRooted(VMFunctionData::RootType rootType);
+ inline CodeOffset PushWithPatch(ImmWord word);
+ inline CodeOffset PushWithPatch(ImmPtr imm);
+
+ void Pop(const Operand op) DEFINED_ON(x86_shared);
+ void Pop(Register reg) PER_SHARED_ARCH;
+ void Pop(FloatRegister t) PER_SHARED_ARCH;
+ void Pop(const ValueOperand& val) PER_SHARED_ARCH;
+ void PopFlags() DEFINED_ON(x86_shared);
+ void PopStackPtr()
+ DEFINED_ON(arm, mips_shared, x86_shared, loong64, riscv64, wasm32);
+ void popRooted(VMFunctionData::RootType rootType, Register cellReg,
+ const ValueOperand& valueReg);
+
+ // Move the stack pointer based on the requested amount.
+ void adjustStack(int amount);
+ void freeStack(uint32_t amount);
+
+ // Warning: This method does not update the framePushed() counter.
+ void freeStack(Register amount);
+
+ private:
+ // ===============================================================
+ // Register allocation fields.
+#ifdef DEBUG
+ friend AutoRegisterScope;
+ friend AutoFloatRegisterScope;
+ // Used to track register scopes for debug builds.
+ // Manipulated by the AutoGenericRegisterScope class.
+ AllocatableRegisterSet debugTrackedRegisters_;
+#endif // DEBUG
+
+ public:
+ // ===============================================================
+ // Simple call functions.
+
+ // The returned CodeOffset is the assembler offset for the instruction
+ // immediately following the call; that is, for the return point.
+ CodeOffset call(Register reg) PER_SHARED_ARCH;
+ CodeOffset call(Label* label) PER_SHARED_ARCH;
+
+ void call(const Address& addr) PER_SHARED_ARCH;
+ void call(ImmWord imm) PER_SHARED_ARCH;
+ // Call a target native function, which is neither traceable nor movable.
+ void call(ImmPtr imm) PER_SHARED_ARCH;
+ CodeOffset call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
+ inline CodeOffset call(const wasm::CallSiteDesc& desc,
+ wasm::SymbolicAddress imm);
+
+ // Call a target JitCode, which must be traceable, and may be movable.
+ void call(JitCode* c) PER_SHARED_ARCH;
+
+ inline void call(TrampolinePtr code);
+
+ inline CodeOffset call(const wasm::CallSiteDesc& desc, const Register reg);
+ inline CodeOffset call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
+ inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
+
+ CodeOffset callWithPatch() PER_SHARED_ARCH;
+ void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
+
+ // Push the return address and make a call. On platforms where this function
+ // is not defined, push the link register (pushReturnAddress) at the entry
+ // point of the callee.
+ void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
+ void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
+
+ // These do not adjust framePushed().
+ void pushReturnAddress()
+ DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
+ void popReturnAddress()
+ DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
+
+ // Useful for dealing with two-valued returns.
+ void moveRegPair(Register src0, Register src1, Register dst0, Register dst1,
+ MoveOp::Type type = MoveOp::GENERAL);
+
+ public:
+ // ===============================================================
+ // Patchable near/far jumps.
+
+ // "Far jumps" provide the ability to jump to any uint32_t offset from any
+ // other uint32_t offset without using a constant pool (thus returning a
+ // simple CodeOffset instead of a CodeOffsetJump).
+ CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
+ void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
+
+ // Emit a nop that can be patched to and from a nop and a call with int32
+ // relative displacement.
+ CodeOffset nopPatchableToCall() PER_SHARED_ARCH;
+ void nopPatchableToCall(const wasm::CallSiteDesc& desc);
+ static void patchNopToCall(uint8_t* callsite,
+ uint8_t* target) PER_SHARED_ARCH;
+ static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
+
+ // These methods are like movWithPatch/PatchDataWithValueCheck but allow
+ // using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
+ // ADR instruction on arm64).
+ //
+ // Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
+ // release-asserted).
+ CodeOffset moveNearAddressWithPatch(Register dest) PER_ARCH;
+ static void patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target)
+ DEFINED_ON(x86, x64, arm, arm64, loong64, riscv64, wasm32, mips_shared);
+
+ public:
+ // ===============================================================
+ // [SMDOC] JIT-to-C++ Function Calls (callWithABI)
+ //
+ // callWithABI is used to make a call using the standard C/C++ system ABI.
+ //
+ // callWithABI is a low level interface for making calls, as such every call
+ // made with callWithABI should be organized with 6 steps: spilling live
+ // registers, aligning the stack, listing arguments of the called function,
+ // calling a function pointer, extracting the returned value and restoring
+ // live registers.
+ //
+ // A more detailed example of the six stages:
+ //
+ // 1) Saving of registers that are live. This will vary depending on which
+ // SpiderMonkey compiler you are working on. Registers that shouldn't be
+ // restored can be excluded.
+ //
+ // LiveRegisterSet volatileRegs(...);
+ // volatileRegs.take(scratch);
+ // masm.PushRegsInMask(volatileRegs);
+ //
+ // 2) Align the stack to perform the call with the correct stack alignment.
+ //
+ // When the stack pointer alignment is unknown and cannot be corrected
+ // when generating the code, setupUnalignedABICall must be used to
+ // dynamically align the stack pointer to the expectation of the ABI.
+ // When the stack pointer is known at JIT compilation time, the stack can
+ // be fixed manually and setupAlignedABICall and setupWasmABICall can be
+ // used.
+ //
+ // setupWasmABICall is a special case of setupAlignedABICall as
+ // SpiderMonkey's WebAssembly implementation mostly follow the system
+ // ABI, except for float/double arguments, which always use floating
+ // point registers, even if this is not supported by the system ABI.
+ //
+ // masm.setupUnalignedABICall(scratch);
+ //
+ // 3) Passing arguments. Arguments are passed left-to-right.
+ //
+ // masm.passABIArg(scratch);
+ // masm.passABIArg(FloatOp0, MoveOp::Double);
+ //
+ // Note how float register arguments are annotated with MoveOp::Double.
+ //
+ // Concerning stack-relative address, see the note on passABIArg.
+ //
+ // 4) Make the call:
+ //
+ // using Fn = int32_t (*)(int32_t)
+ // masm.callWithABI<Fn, Callee>();
+ //
+ // In the case where the call returns a double, that needs to be
+ // indicated to the callWithABI like this:
+ //
+ // using Fn = double (*)(int32_t)
+ // masm.callWithABI<Fn, Callee>(MoveOp::DOUBLE);
+ //
+ // There are overloads to allow calls to registers and addresses.
+ //
+ // 5) Take care of the result
+ //
+ // masm.storeCallPointerResult(scratch1);
+ // masm.storeCallBoolResult(scratch1);
+ // masm.storeCallInt32Result(scratch1);
+ // masm.storeCallFloatResult(scratch1);
+ //
+ // 6) Restore the potentially clobbered volatile registers
+ //
+ // masm.PopRegsInMask(volatileRegs);
+ //
+ // If expecting a returned value, this call should use
+ // PopRegsInMaskIgnore to filter out the registers which are containing
+ // the returned value.
+ //
+ // Unless an exit frame is pushed prior to the setupABICall, the callee
+ // should not GC. To ensure this is the case callWithABI is instrumented to
+ // make sure that in the default case callees are annotated with an
+ // AutoUnsafeCallWithABI on the stack.
+ //
+ // A callWithABI can opt out of checking, if for example it is known there
+ // is an exit frame, or the callee is known not to GC.
+ //
+ // If your callee needs to be able to GC, consider using a VMFunction, or
+ // create a fake exit frame, and instrument the TraceJitExitFrame
+ // accordingly.
+
+ // Setup a call to C/C++ code, given the assumption that the framePushed
+ // accurately defines the state of the stack, and that the top of the stack
+ // was properly aligned. Note that this only supports cdecl.
+ //
+ // As a rule of thumb, this can be used in CodeGenerator but not in CacheIR or
+ // Baseline code (because the stack is not aligned to ABIStackAlignment).
+ void setupAlignedABICall();
+
+ // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
+ // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
+ // can be native, since we always know the stack alignment a priori.
+ void setupWasmABICall();
+
+ // Setup an ABI call for when the alignment is not known. This may need a
+ // scratch register.
+ void setupUnalignedABICall(Register scratch) PER_ARCH;
+
+ // Arguments must be assigned to a C/C++ call in order. They are moved
+ // in parallel immediately before performing the call. This process may
+ // temporarily use more stack, in which case esp-relative addresses will be
+ // automatically adjusted. It is extremely important that esp-relative
+ // addresses are computed *after* setupABICall(). Furthermore, no
+ // operations should be emitted while setting arguments.
+ void passABIArg(const MoveOperand& from, MoveOp::Type type);
+ inline void passABIArg(Register reg);
+ inline void passABIArg(FloatRegister reg, MoveOp::Type type);
+
+ inline void callWithABI(
+ DynFn fun, MoveOp::Type result = MoveOp::GENERAL,
+ CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
+ template <typename Sig, Sig fun>
+ inline void callWithABI(
+ MoveOp::Type result = MoveOp::GENERAL,
+ CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
+ inline void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
+ inline void callWithABI(const Address& fun,
+ MoveOp::Type result = MoveOp::GENERAL);
+
+ CodeOffset callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
+ mozilla::Maybe<int32_t> instanceOffset,
+ MoveOp::Type result = MoveOp::GENERAL);
+ void callDebugWithABI(wasm::SymbolicAddress fun,
+ MoveOp::Type result = MoveOp::GENERAL);
+
+ private:
+ // Reinitialize the variables which have to be cleared before making a call
+ // with callWithABI.
+ template <class ABIArgGeneratorT>
+ void setupABICallHelper();
+
+ // Reinitialize the variables which have to be cleared before making a call
+ // with native abi.
+ void setupNativeABICall();
+
+ // Reserve the stack and resolve the arguments move.
+ void callWithABIPre(uint32_t* stackAdjust,
+ bool callFromWasm = false) PER_ARCH;
+
+ // Emits a call to a C/C++ function, resolving all argument moves.
+ void callWithABINoProfiler(void* fun, MoveOp::Type result,
+ CheckUnsafeCallWithABI check);
+ void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
+ void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
+
+ // Restore the stack to its state before the setup function call.
+ void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm = false) PER_ARCH;
+
+ // Create the signature to be able to decode the arguments of a native
+ // function, when calling a function within the simulator.
+ inline void appendSignatureType(MoveOp::Type type);
+ inline ABIFunctionType signature() const;
+
+ // Private variables used to handle moves between registers given as
+ // arguments to passABIArg and the list of ABI registers expected for the
+ // signature of the function.
+ MoveResolver moveResolver_;
+
+ // Architecture specific implementation which specify how registers & stack
+ // offsets are used for calling a function.
+ ABIArgGenerator abiArgs_;
+
+#ifdef DEBUG
+ // Flag use to assert that we use ABI function in the right context.
+ bool inCall_;
+#endif
+
+ // If set by setupUnalignedABICall then callWithABI will pop the stack
+ // register which is on the stack.
+ bool dynamicAlignment_;
+
+#ifdef JS_SIMULATOR
+ // The signature is used to accumulate all types of arguments which are used
+ // by the caller. This is used by the simulators to decode the arguments
+ // properly, and cast the function pointer to the right type.
+ uint32_t signature_;
+#endif
+
+ public:
+ // ===============================================================
+ // Jit Frames.
+ //
+ // These functions are used to build the content of the Jit frames. See
+ // CommonFrameLayout class, and all its derivatives. The content should be
+ // pushed in the opposite order as the fields of the structures, such that
+ // the structures can be used to interpret the content of the stack.
+
+ // Call the Jit function, and push the return address (or let the callee
+ // push the return address).
+ //
+ // These functions return the offset of the return address, in order to use
+ // the return address to index the safepoints, which are used to list all
+ // live registers.
+ inline uint32_t callJitNoProfiler(Register callee);
+ inline uint32_t callJit(Register callee);
+ inline uint32_t callJit(JitCode* code);
+ inline uint32_t callJit(TrampolinePtr code);
+ inline uint32_t callJit(ImmPtr callee);
+
+ // The frame descriptor is the second field of all Jit frames, pushed before
+ // calling the Jit function. See CommonFrameLayout::descriptor_.
+ inline void pushFrameDescriptor(FrameType type);
+ inline void PushFrameDescriptor(FrameType type);
+
+ // For JitFrameLayout, the descriptor also stores the number of arguments
+ // passed by the caller. See MakeFrameDescriptorForJitCall.
+ inline void pushFrameDescriptorForJitCall(FrameType type, uint32_t argc);
+ inline void pushFrameDescriptorForJitCall(FrameType type, Register argc,
+ Register scratch);
+ inline void PushFrameDescriptorForJitCall(FrameType type, uint32_t argc);
+ inline void PushFrameDescriptorForJitCall(FrameType type, Register argc,
+ Register scratch);
+
+ // Load the number of actual arguments from the frame's JitFrameLayout.
+ inline void loadNumActualArgs(Register framePtr, Register dest);
+
+ // Push the callee token of a JSFunction which pointer is stored in the
+ // |callee| register. The callee token is packed with a |constructing| flag
+ // which correspond to the fact that the JS function is called with "new" or
+ // not.
+ inline void PushCalleeToken(Register callee, bool constructing);
+
+ // Unpack a callee token located at the |token| address, and return the
+ // JSFunction pointer in the |dest| register.
+ inline void loadFunctionFromCalleeToken(Address token, Register dest);
+
+ // This function emulates a call by pushing an exit frame on the stack,
+ // except that the fake-function is inlined within the body of the caller.
+ //
+ // This function assumes that the current frame is an IonJS frame.
+ //
+ // This function returns the offset of the /fake/ return address, in order to
+ // use the return address to index the safepoints, which are used to list all
+ // live registers.
+ //
+ // This function should be balanced with a call to adjustStack, to pop the
+ // exit frame and emulate the return statement of the inlined function.
+ inline uint32_t buildFakeExitFrame(Register scratch);
+
+ private:
+ // This function is used by buildFakeExitFrame to push a fake return address
+ // on the stack. This fake return address should never be used for resuming
+ // any execution, and can even be an invalid pointer into the instruction
+ // stream, as long as it does not alias any other.
+ uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
+
+ public:
+ // ===============================================================
+ // Exit frame footer.
+ //
+ // When calling outside the Jit we push an exit frame. To mark the stack
+ // correctly, we have to push additional information, called the Exit frame
+ // footer, which is used to identify how the stack is marked.
+ //
+ // See JitFrames.h, and TraceJitExitFrame in JitFrames.cpp.
+
+ // Push stub code and the VMFunctionData pointer.
+ inline void enterExitFrame(Register cxreg, Register scratch,
+ const VMFunctionData* f);
+
+ // Push an exit frame token to identify which fake exit frame this footer
+ // corresponds to.
+ inline void enterFakeExitFrame(Register cxreg, Register scratch,
+ ExitFrameType type);
+
+ // Push an exit frame token for a native call.
+ inline void enterFakeExitFrameForNative(Register cxreg, Register scratch,
+ bool isConstructing);
+
+ // Pop ExitFrame footer in addition to the extra frame.
+ inline void leaveExitFrame(size_t extraFrame = 0);
+
+ private:
+ // Save the top of the stack into JitActivation::packedExitFP of the
+ // current thread, which should be the location of the latest exit frame.
+ void linkExitFrame(Register cxreg, Register scratch);
+
+ public:
+ // ===============================================================
+ // Move instructions
+
+ inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void move64(Register64 src, Register64 dest) PER_ARCH;
+
+ inline void moveFloat32ToGPR(FloatRegister src,
+ Register dest) PER_SHARED_ARCH;
+ inline void moveGPRToFloat32(Register src,
+ FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void moveDoubleToGPR64(FloatRegister src, Register64 dest) PER_ARCH;
+ inline void moveGPR64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
+
+ inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
+ inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
+
+ // move64To32 will clear the high bits of `dest` on 64-bit systems.
+ inline void move64To32(Register64 src, Register dest) PER_ARCH;
+
+ inline void move32To64ZeroExtend(Register src, Register64 dest) PER_ARCH;
+
+ inline void move8To64SignExtend(Register src, Register64 dest) PER_ARCH;
+ inline void move16To64SignExtend(Register src, Register64 dest) PER_ARCH;
+ inline void move32To64SignExtend(Register src, Register64 dest) PER_ARCH;
+
+ inline void move32SignExtendToPtr(Register src, Register dest) PER_ARCH;
+ inline void move32ZeroExtendToPtr(Register src, Register dest) PER_ARCH;
+
+ // Copy a constant, typed-register, or a ValueOperand into a ValueOperand
+ // destination.
+ inline void moveValue(const ConstantOrRegister& src,
+ const ValueOperand& dest);
+ void moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) PER_ARCH;
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) PER_ARCH;
+ void moveValue(const Value& src, const ValueOperand& dest) PER_ARCH;
+
+ void movePropertyKey(PropertyKey key, Register dest);
+
+ // ===============================================================
+ // Load instructions
+
+ inline void load32SignExtendToPtr(const Address& src, Register dest) PER_ARCH;
+
+ inline void loadAbiReturnAddress(Register dest) PER_SHARED_ARCH;
+
+ public:
+ // ===============================================================
+ // Logical instructions
+
+ inline void not32(Register reg) PER_SHARED_ARCH;
+ inline void notPtr(Register reg) PER_ARCH;
+
+ inline void and32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
+ inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+ inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
+
+ inline void andPtr(Register src, Register dest) PER_ARCH;
+ inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
+
+ inline void or32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+
+ inline void orPtr(Register src, Register dest) PER_ARCH;
+ inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(Register64 src, Register64 dest) PER_ARCH;
+ inline void or64(Register64 src, Register64 dest) PER_ARCH;
+ inline void xor64(Register64 src, Register64 dest) PER_ARCH;
+
+ inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void xor32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+ inline void xor32(const Address& src, Register dest) PER_SHARED_ARCH;
+
+ inline void xorPtr(Register src, Register dest) PER_ARCH;
+ inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void and64(const Operand& src, Register64 dest)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+ inline void or64(const Operand& src, Register64 dest)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+ inline void xor64(const Operand& src, Register64 dest)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+
+ // ===============================================================
+ // Swap instructions
+
+ // Swap the two lower bytes and sign extend the result to 32-bit.
+ inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH;
+
+ // Swap the two lower bytes and zero extend the result to 32-bit.
+ inline void byteSwap16ZeroExtend(Register reg) PER_SHARED_ARCH;
+
+ // Swap all four bytes in a 32-bit integer.
+ inline void byteSwap32(Register reg) PER_SHARED_ARCH;
+
+ // Swap all eight bytes in a 64-bit integer.
+ inline void byteSwap64(Register64 reg) PER_ARCH;
+
+ // ===============================================================
+ // Arithmetic functions
+
+ // Condition flags aren't guaranteed to be set by these functions, for example
+ // x86 will always set condition flags, but ARM64 won't do it unless
+ // explicitly requested. Instead use branch(Add|Sub|Mul|Neg) to test for
+ // condition flags after performing arithmetic operations.
+
+ inline void add32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
+ inline void add32(Imm32 imm, const AbsoluteAddress& dest)
+ DEFINED_ON(x86_shared);
+
+ inline void addPtr(Register src, Register dest) PER_ARCH;
+ inline void addPtr(Register src1, Register src2, Register dest)
+ DEFINED_ON(arm64);
+ inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
+ inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
+ inline void addPtr(ImmPtr imm, Register dest);
+ inline void addPtr(Imm32 imm, const Address& dest)
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
+ inline void addPtr(Imm32 imm, const AbsoluteAddress& dest)
+ DEFINED_ON(x86, x64);
+ inline void addPtr(const Address& src, Register dest)
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
+
+ inline void add64(Register64 src, Register64 dest) PER_ARCH;
+ inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void add64(const Operand& src, Register64 dest)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+
+ inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ // Compute dest=SP-imm where dest is a pointer registers and not SP. The
+ // offset returned from sub32FromStackPtrWithPatch() must be passed to
+ // patchSub32FromStackPtr().
+ inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
+ inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
+
+ inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
+
+ inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
+ inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
+
+ inline void subPtr(Register src, Register dest) PER_ARCH;
+ inline void subPtr(Register src, const Address& dest)
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
+ inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
+ inline void subPtr(const Address& addr, Register dest)
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
+
+ inline void sub64(Register64 src, Register64 dest) PER_ARCH;
+ inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
+ inline void sub64(const Operand& src, Register64 dest)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+
+ inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
+ inline void mul32(Imm32 imm, Register srcDest) PER_SHARED_ARCH;
+
+ inline void mul32(Register src1, Register src2, Register dest, Label* onOver)
+ DEFINED_ON(arm64);
+
+ // Return the high word of the unsigned multiplication into |dest|.
+ inline void mulHighUnsigned32(Imm32 imm, Register src,
+ Register dest) PER_ARCH;
+
+ inline void mulPtr(Register rhs, Register srcDest) PER_ARCH;
+
+ inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
+ inline void mul64(const Operand& src, const Register64& dest,
+ const Register temp)
+ DEFINED_ON(x64, mips64, loong64, riscv64);
+ inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
+ inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
+ DEFINED_ON(x86, x64, arm, mips32, mips64, loong64, riscv64);
+ inline void mul64(const Register64& src, const Register64& dest,
+ const Register temp) PER_ARCH;
+ inline void mul64(const Register64& src1, const Register64& src2,
+ const Register64& dest) DEFINED_ON(arm64);
+ inline void mul64(Imm64 src1, const Register64& src2, const Register64& dest)
+ DEFINED_ON(arm64);
+
+ inline void mulBy3(Register src, Register dest) PER_ARCH;
+
+ inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
+
+ // Perform an integer division, returning the integer part rounded toward
+ // zero. rhs must not be zero, and the division must not overflow.
+ //
+ // On ARM, the chip must have hardware division instructions.
+ inline void quotient32(Register rhs, Register srcDest, bool isUnsigned)
+ DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
+
+ // As above, but srcDest must be eax and tempEdx must be edx.
+ inline void quotient32(Register rhs, Register srcDest, Register tempEdx,
+ bool isUnsigned) DEFINED_ON(x86_shared);
+
+ // Perform an integer division, returning the remainder part.
+ // rhs must not be zero, and the division must not overflow.
+ //
+ // On ARM, the chip must have hardware division instructions.
+ inline void remainder32(Register rhs, Register srcDest, bool isUnsigned)
+ DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
+
+ // As above, but srcDest must be eax and tempEdx must be edx.
+ inline void remainder32(Register rhs, Register srcDest, Register tempEdx,
+ bool isUnsigned) DEFINED_ON(x86_shared);
+
+ // Perform an integer division, returning the integer part rounded toward
+ // zero. rhs must not be zero, and the division must not overflow.
+ //
+ // This variant preserves registers, and doesn't require hardware division
+ // instructions on ARM (will call out to a runtime routine).
+ //
+ // rhs is preserved, srdDest is clobbered.
+ void flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs)
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64, wasm32);
+
+ // Perform an integer division, returning the integer part rounded toward
+ // zero. rhs must not be zero, and the division must not overflow.
+ //
+ // This variant preserves registers, and doesn't require hardware division
+ // instructions on ARM (will call out to a runtime routine).
+ //
+ // rhs is preserved, srdDest is clobbered.
+ void flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs)
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64);
+
+ // Perform an integer division, returning the integer part rounded toward
+ // zero. rhs must not be zero, and the division must not overflow. The
+ // remainder is stored into the third argument register here.
+ //
+ // This variant preserves registers, and doesn't require hardware division
+ // instructions on ARM (will call out to a runtime routine).
+ //
+ // rhs is preserved, srdDest and remOutput are clobbered.
+ void flexibleDivMod32(Register rhs, Register srcDest, Register remOutput,
+ bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs)
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64, wasm32);
+
+ inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void inc64(AbsoluteAddress dest) PER_ARCH;
+
+ inline void neg32(Register reg) PER_SHARED_ARCH;
+ inline void neg64(Register64 reg) PER_ARCH;
+ inline void negPtr(Register reg) PER_ARCH;
+
+ inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
+
+ inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
+
+ inline void abs32(Register src, Register dest) PER_SHARED_ARCH;
+ inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+ inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ inline void sqrtFloat32(FloatRegister src,
+ FloatRegister dest) PER_SHARED_ARCH;
+ inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
+
+ void floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+ void floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+
+ void ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+ void ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+
+ void roundFloat32ToInt32(FloatRegister src, Register dest, FloatRegister temp,
+ Label* fail) PER_SHARED_ARCH;
+ void roundDoubleToInt32(FloatRegister src, Register dest, FloatRegister temp,
+ Label* fail) PER_SHARED_ARCH;
+
+ void truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+ void truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_SHARED_ARCH;
+
+ void nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) PER_SHARED_ARCH;
+ void nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) PER_SHARED_ARCH;
+
+ void signInt32(Register input, Register output);
+ void signDouble(FloatRegister input, FloatRegister output);
+ void signDoubleToInt32(FloatRegister input, Register output,
+ FloatRegister temp, Label* fail);
+
+ void copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) PER_SHARED_ARCH;
+ void copySignFloat32(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) DEFINED_ON(x86_shared, arm64);
+
+ // Returns a random double in range [0, 1) in |dest|. The |rng| register must
+ // hold a pointer to a mozilla::non_crypto::XorShift128PlusRNG.
+ void randomDouble(Register rng, FloatRegister dest, Register64 temp0,
+ Register64 temp1);
+
+ // srcDest = {min,max}{Float32,Double}(srcDest, other)
+ // For min and max, handle NaN specially if handleNaN is true.
+
+ inline void minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) PER_SHARED_ARCH;
+ inline void minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) PER_SHARED_ARCH;
+
+ inline void maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) PER_SHARED_ARCH;
+ inline void maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) PER_SHARED_ARCH;
+
+ void minMaxArrayInt32(Register array, Register result, Register temp1,
+ Register temp2, Register temp3, bool isMax,
+ Label* fail);
+ void minMaxArrayNumber(Register array, FloatRegister result,
+ FloatRegister floatTemp, Register temp1,
+ Register temp2, bool isMax, Label* fail);
+
+ // Compute |pow(base, power)| and store the result in |dest|. If the result
+ // exceeds the int32 range, jumps to |onOver|.
+ // |base| and |power| are preserved, the other input registers are clobbered.
+ void pow32(Register base, Register power, Register dest, Register temp1,
+ Register temp2, Label* onOver);
+
+ void sameValueDouble(FloatRegister left, FloatRegister right,
+ FloatRegister temp, Register dest);
+
+ void branchIfNotRegExpPrototypeOptimizable(Register proto, Register temp,
+ Label* label);
+ void branchIfNotRegExpInstanceOptimizable(Register regexp, Register temp,
+ Label* label);
+
+ void loadRegExpLastIndex(Register regexp, Register string, Register lastIndex,
+ Label* notFoundZeroLastIndex);
+
+ // ===============================================================
+ // Shift functions
+
+ // For shift-by-register there may be platform-specific variations, for
+ // example, x86 will perform the shift mod 32 but ARM will perform the shift
+ // mod 256.
+ //
+ // For shift-by-immediate the platform assembler may restrict the immediate,
+ // for example, the ARM assembler requires the count for 32-bit shifts to be
+ // in the range [0,31].
+
+ inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
+
+ inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
+ inline void rshiftPtr(Imm32 imm, Register src, Register dest)
+ DEFINED_ON(arm64);
+ inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
+
+ inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
+ inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
+
+ // On x86_shared these have the constraint that shift must be in CL.
+ inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
+ inline void rshift32Arithmetic(Register shift,
+ Register srcDest) PER_SHARED_ARCH;
+ inline void lshiftPtr(Register shift, Register srcDest) PER_ARCH;
+ inline void rshiftPtr(Register shift, Register srcDest) PER_ARCH;
+
+ // These variants do not have the above constraint, but may emit some extra
+ // instructions on x86_shared. They also handle shift >= 32 consistently by
+ // masking with 0x1F (either explicitly or relying on the hardware to do
+ // that).
+ inline void flexibleLshift32(Register shift,
+ Register srcDest) PER_SHARED_ARCH;
+ inline void flexibleRshift32(Register shift,
+ Register srcDest) PER_SHARED_ARCH;
+ inline void flexibleRshift32Arithmetic(Register shift,
+ Register srcDest) PER_SHARED_ARCH;
+
+ inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
+ inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
+ inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
+
+ // ===============================================================
+ // Rotation functions
+ // Note: - on x86 and x64 the count register must be in CL.
+ // - on x64 the temp register should be InvalidReg.
+
+ inline void rotateLeft(Imm32 count, Register input,
+ Register dest) PER_SHARED_ARCH;
+ inline void rotateLeft(Register count, Register input,
+ Register dest) PER_SHARED_ARCH;
+ inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest)
+ DEFINED_ON(x64);
+ inline void rotateLeft64(Register count, Register64 input, Register64 dest)
+ DEFINED_ON(x64);
+ inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest,
+ Register temp) PER_ARCH;
+ inline void rotateLeft64(Register count, Register64 input, Register64 dest,
+ Register temp) PER_ARCH;
+
+ inline void rotateRight(Imm32 count, Register input,
+ Register dest) PER_SHARED_ARCH;
+ inline void rotateRight(Register count, Register input,
+ Register dest) PER_SHARED_ARCH;
+ inline void rotateRight64(Imm32 count, Register64 input, Register64 dest)
+ DEFINED_ON(x64);
+ inline void rotateRight64(Register count, Register64 input, Register64 dest)
+ DEFINED_ON(x64);
+ inline void rotateRight64(Imm32 count, Register64 input, Register64 dest,
+ Register temp) PER_ARCH;
+ inline void rotateRight64(Register count, Register64 input, Register64 dest,
+ Register temp) PER_ARCH;
+
+ // ===============================================================
+ // Bit counting functions
+
+ // knownNotZero may be true only if the src is known not to be zero.
+ inline void clz32(Register src, Register dest,
+ bool knownNotZero) PER_SHARED_ARCH;
+ inline void ctz32(Register src, Register dest,
+ bool knownNotZero) PER_SHARED_ARCH;
+
+ inline void clz64(Register64 src, Register dest) PER_ARCH;
+ inline void ctz64(Register64 src, Register dest) PER_ARCH;
+
+ // On x86_shared, temp may be Invalid only if the chip has the POPCNT
+ // instruction. On ARM, temp may never be Invalid.
+ inline void popcnt32(Register src, Register dest,
+ Register temp) PER_SHARED_ARCH;
+
+ // temp may be invalid only if the chip has the POPCNT instruction.
+ inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
+
+ // ===============================================================
+ // Condition functions
+
+ inline void cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) PER_SHARED_ARCH;
+
+ inline void cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) PER_SHARED_ARCH;
+
+ template <typename T1, typename T2>
+ inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
+ wasm32);
+
+ // Only the NotEqual and Equal conditions are allowed.
+ inline void cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) PER_ARCH;
+
+ template <typename T1, typename T2>
+ inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH;
+
+ // ===============================================================
+ // Branch functions
+
+ inline void branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ // Compares the byte in |lhs| against |rhs| using a 8-bit comparison on
+ // x86/x64 or a 32-bit comparison (all other platforms). The caller should
+ // ensure |rhs| is a zero- resp. sign-extended byte value for cross-platform
+ // compatible code.
+ inline void branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branch32(Condition cond, Register lhs, Register rhs,
+ L label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branch32(Condition cond, Register lhs, Imm32 rhs,
+ L label) PER_SHARED_ARCH;
+
+ inline void branch32(Condition cond, Register lhs, const Address& rhs,
+ Label* label) DEFINED_ON(arm64);
+
+ inline void branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) DEFINED_ON(arm, x86_shared);
+ inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branch32(Condition cond, const Operand& lhs, Register rhs,
+ Label* label) DEFINED_ON(x86_shared);
+ inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs,
+ Label* label) DEFINED_ON(x86_shared);
+
+ inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ // The supported condition are Equal, NotEqual, LessThan(orEqual),
+ // GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
+ // is not defined it will fall through to next instruction, else jump to the
+ // fail label.
+ inline void branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail = nullptr) PER_ARCH;
+ inline void branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail = nullptr) PER_ARCH;
+ // Only the NotEqual and Equal conditions are allowed for the branch64
+ // variants with Address as lhs.
+ inline void branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) PER_ARCH;
+ inline void branch64(Condition cond, const Address& lhs, Register64 rhs,
+ Label* label) PER_ARCH;
+
+ // Compare the value at |lhs| with the value at |rhs|. The scratch
+ // register *must not* be the base of |lhs| or |rhs|.
+ inline void branch64(Condition cond, const Address& lhs, const Address& rhs,
+ Register scratch, Label* label) PER_ARCH;
+
+ template <class L>
+ inline void branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchPtr(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
+ // chunk header, or nullptr if it is in the tenured heap.
+ void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
+
+ void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ void branchPtrInNurseryChunk(Condition cond, const Address& address,
+ Register temp, Label* label) DEFINED_ON(x86);
+ void branchValueIsNurseryCell(Condition cond, const Address& address,
+ Register temp, Label* label) PER_ARCH;
+ void branchValueIsNurseryCell(Condition cond, ValueOperand value,
+ Register temp, Label* label) PER_ARCH;
+
+ // This function compares a Value (lhs) which is having a private pointer
+ // boxed inside a js::Value, with a raw pointer (rhs).
+ inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs,
+ Label* label) PER_ARCH;
+
+ inline void branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) PER_SHARED_ARCH;
+
+ // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
+ // jump to the failure label. This particular variant is allowed to return the
+ // value module 2**32, which isn't implemented on all architectures. E.g. the
+ // x64 variants will do this only in the int64_t range.
+ inline void branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest, Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ // Truncate a double/float32 to intptr and when it doesn't fit jump to the
+ // failure label.
+ inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest,
+ Label* fail) DEFINED_ON(x86, x64);
+ inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest,
+ Label* fail) DEFINED_ON(x86, x64);
+
+ // Truncate a double/float32 to int32 and when it doesn't fit jump to the
+ // failure label.
+ inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) PER_ARCH;
+
+ inline void branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) PER_SHARED_ARCH;
+
+ inline void branchDoubleNotInInt64Range(Address src, Register temp,
+ Label* fail);
+ inline void branchDoubleNotInUInt64Range(Address src, Register temp,
+ Label* fail);
+ inline void branchFloat32NotInInt64Range(Address src, Register temp,
+ Label* fail);
+ inline void branchFloat32NotInUInt64Range(Address src, Register temp,
+ Label* fail);
+
+ template <typename T>
+ inline void branchAdd32(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+ template <typename T>
+ inline void branchSub32(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+ template <typename T>
+ inline void branchMul32(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+ template <typename T>
+ inline void branchRshift32(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchNeg32(Condition cond, Register reg,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchAdd64(Condition cond, Imm64 imm, Register64 dest,
+ Label* label) DEFINED_ON(x86, arm, wasm32);
+
+ template <typename T>
+ inline void branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+
+ template <typename T>
+ inline void branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) PER_SHARED_ARCH;
+ inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ template <class L>
+ inline void branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) PER_SHARED_ARCH;
+ inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) PER_SHARED_ARCH;
+
+ template <class L>
+ inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs,
+ Register temp, L label) PER_ARCH;
+
+ // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
+ template <class L>
+ inline void branchIfFalseBool(Register reg, L label);
+
+ // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
+ inline void branchIfTrueBool(Register reg, Label* label);
+
+ inline void branchIfRope(Register str, Label* label);
+ inline void branchIfNotRope(Register str, Label* label);
+
+ inline void branchLatin1String(Register string, Label* label);
+ inline void branchTwoByteString(Register string, Label* label);
+
+ inline void branchIfBigIntIsNegative(Register bigInt, Label* label);
+ inline void branchIfBigIntIsNonNegative(Register bigInt, Label* label);
+ inline void branchIfBigIntIsZero(Register bigInt, Label* label);
+ inline void branchIfBigIntIsNonZero(Register bigInt, Label* label);
+
+ inline void branchTestFunctionFlags(Register fun, uint32_t flags,
+ Condition cond, Label* label);
+
+ inline void branchIfNotFunctionIsNonBuiltinCtor(Register fun,
+ Register scratch,
+ Label* label);
+
+ inline void branchIfFunctionHasNoJitEntry(Register fun, bool isConstructing,
+ Label* label);
+ inline void branchIfFunctionHasJitEntry(Register fun, bool isConstructing,
+ Label* label);
+
+ inline void branchIfScriptHasJitScript(Register script, Label* label);
+ inline void branchIfScriptHasNoJitScript(Register script, Label* label);
+ inline void loadJitScript(Register script, Register dest);
+
+ // Loads the function's argument count.
+ inline void loadFunctionArgCount(Register func, Register output);
+
+ // Loads the function length. This handles interpreted, native, and bound
+ // functions. The caller is responsible for checking that INTERPRETED_LAZY and
+ // RESOLVED_LENGTH flags are not set.
+ void loadFunctionLength(Register func, Register funFlagsAndArgCount,
+ Register output, Label* slowPath);
+
+ // Loads the function name. This handles interpreted, native, and bound
+ // functions.
+ void loadFunctionName(Register func, Register output, ImmGCPtr emptyString,
+ Label* slowPath);
+
+ void assertFunctionIsExtended(Register func);
+
+ inline void branchFunctionKind(Condition cond,
+ FunctionFlags::FunctionKind kind, Register fun,
+ Register scratch, Label* label);
+
+ inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch,
+ Label* slowCheck, Label* label);
+
+ // For all methods below: spectreRegToZero is a register that will be zeroed
+ // on speculatively executed code paths (when the branch should be taken but
+ // branch prediction speculates it isn't). Usually this will be the object
+ // register but the caller may pass a different register.
+
+ inline void branchTestObjClass(Condition cond, Register obj,
+ const JSClass* clasp, Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjClassNoSpectreMitigations(Condition cond,
+ Register obj,
+ const JSClass* clasp,
+ Register scratch,
+ Label* label);
+
+ inline void branchTestObjClass(Condition cond, Register obj,
+ const Address& clasp, Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjClassNoSpectreMitigations(Condition cond,
+ Register obj,
+ const Address& clasp,
+ Register scratch,
+ Label* label);
+
+ inline void branchTestObjClass(Condition cond, Register obj, Register clasp,
+ Register scratch, Register spectreRegToZero,
+ Label* label);
+
+ inline void branchTestObjShape(Condition cond, Register obj,
+ const Shape* shape, Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
+ Register obj,
+ const Shape* shape,
+ Label* label);
+
+ void branchTestObjShapeList(Condition cond, Register obj,
+ Register shapeElements, Register shapeScratch,
+ Register endScratch, Register spectreScratch,
+ Label* label);
+
+ inline void branchTestClassIsFunction(Condition cond, Register clasp,
+ Label* label);
+ inline void branchTestObjIsFunction(Condition cond, Register obj,
+ Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjIsFunctionNoSpectreMitigations(Condition cond,
+ Register obj,
+ Register scratch,
+ Label* label);
+
+ inline void branchTestObjShape(Condition cond, Register obj, Register shape,
+ Register scratch, Register spectreRegToZero,
+ Label* label);
+ inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
+ Register obj,
+ Register shape,
+ Label* label);
+
+ // TODO: audit/fix callers to be Spectre safe.
+ inline void branchTestObjShapeUnsafe(Condition cond, Register obj,
+ Register shape, Label* label);
+
+ void branchTestObjCompartment(Condition cond, Register obj,
+ const Address& compartment, Register scratch,
+ Label* label);
+ void branchTestObjCompartment(Condition cond, Register obj,
+ const JS::Compartment* compartment,
+ Register scratch, Label* label);
+
+ void branchIfNonNativeObj(Register obj, Register scratch, Label* label);
+
+ void branchIfObjectNotExtensible(Register obj, Register scratch,
+ Label* label);
+
+ inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
+
+ inline void branchTestObjectIsProxy(bool proxy, Register object,
+ Register scratch, Label* label);
+
+ inline void branchTestProxyHandlerFamily(Condition cond, Register proxy,
+ Register scratch,
+ const void* handlerp, Label* label);
+
+ inline void branchTestObjectIsWasmGcObject(bool isGcObject, Register obj,
+ Register scratch, Label* label);
+
+ inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
+ inline void branchTestNeedsIncrementalBarrierAnyZone(Condition cond,
+ Label* label,
+ Register scratch);
+
+ // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
+ // value (64bits boxing).
+ inline void branchTestUndefined(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, Register tag, Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+ inline void branchTestNumber(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestString(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestSymbol(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBigInt(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestPrimitive(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestMagic(Condition cond, Register tag,
+ Label* label) PER_SHARED_ARCH;
+ void branchTestType(Condition cond, Register tag, JSValueType type,
+ Label* label);
+
+ // Perform a type-test on a Value, addressed by Address or BaseIndex, or
+ // loaded into ValueOperand.
+ // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
+ // All Variants clobber the ScratchReg on arm64.
+ inline void branchTestUndefined(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestUndefined(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestUndefined(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestInt32(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestDouble(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestBoolean(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBoolean(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestString(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestString(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestSymbol(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestBigInt(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestNull(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ // Clobbers the ScratchReg on x64.
+ inline void branchTestObject(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestGCThing(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestGCThing(Condition cond, const ValueOperand& value,
+ Label* label) PER_SHARED_ARCH;
+
+ inline void branchTestPrimitive(Condition cond, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestMagic(Condition cond, const Address& address,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) PER_SHARED_ARCH;
+ template <class L>
+ inline void branchTestMagic(Condition cond, const ValueOperand& value,
+ L label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ inline void branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) PER_ARCH;
+
+ inline void branchTestMagicValue(Condition cond, const ValueOperand& val,
+ JSWhyMagic why, Label* label);
+
+ void branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) PER_ARCH;
+
+ inline void branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) PER_ARCH;
+
+ // Checks if given Value is evaluated to true or false in a condition.
+ // The type of the value should match the type of the method.
+ inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, x86_shared,
+ wasm32);
+ inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg,
+ Label* label) PER_SHARED_ARCH;
+ inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value,
+ Label* label) PER_ARCH;
+ inline void branchTestStringTruthy(bool truthy, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+ inline void branchTestBigIntTruthy(bool truthy, const ValueOperand& value,
+ Label* label)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
+ x86_shared);
+
+ // Create an unconditional branch to the address given as argument.
+ inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH;
+
+ private:
+ template <typename T, typename S, typename L>
+ inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
+ DEFINED_ON(x86_shared);
+
+ void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
+ DEFINED_ON(x86);
+ template <typename T>
+ void branchValueIsNurseryCellImpl(Condition cond, const T& value,
+ Register temp, Label* label)
+ DEFINED_ON(arm64, x64, mips64, loong64, riscv64);
+
+ template <typename T>
+ inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestBigIntImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T>
+ inline void branchTestGCThingImpl(Condition cond, const T& t,
+ Label* label) PER_SHARED_ARCH;
+ template <typename T>
+ inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
+ DEFINED_ON(arm, arm64, x86_shared);
+ template <typename T, class L>
+ inline void branchTestMagicImpl(Condition cond, const T& t, L label)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ public:
+ template <typename T>
+ inline void testNumberSet(Condition cond, const T& src,
+ Register dest) PER_SHARED_ARCH;
+ template <typename T>
+ inline void testBooleanSet(Condition cond, const T& src,
+ Register dest) PER_SHARED_ARCH;
+ template <typename T>
+ inline void testStringSet(Condition cond, const T& src,
+ Register dest) PER_SHARED_ARCH;
+ template <typename T>
+ inline void testSymbolSet(Condition cond, const T& src,
+ Register dest) PER_SHARED_ARCH;
+ template <typename T>
+ inline void testBigIntSet(Condition cond, const T& src,
+ Register dest) PER_SHARED_ARCH;
+
+ public:
+ // The fallibleUnbox* methods below combine a Value type check with an unbox.
+ // Especially on 64-bit platforms this can be implemented more efficiently
+ // than a separate branch + unbox.
+ //
+ // |src| and |dest| can be the same register, but |dest| may hold garbage on
+ // failure.
+ inline void fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) PER_ARCH;
+ inline void fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) PER_ARCH;
+ inline void fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) PER_ARCH;
+ template <typename T>
+ inline void fallibleUnboxInt32(const T& src, Register dest, Label* fail);
+ template <typename T>
+ inline void fallibleUnboxBoolean(const T& src, Register dest, Label* fail);
+ template <typename T>
+ inline void fallibleUnboxObject(const T& src, Register dest, Label* fail);
+ template <typename T>
+ inline void fallibleUnboxString(const T& src, Register dest, Label* fail);
+ template <typename T>
+ inline void fallibleUnboxSymbol(const T& src, Register dest, Label* fail);
+ template <typename T>
+ inline void fallibleUnboxBigInt(const T& src, Register dest, Label* fail);
+
+ inline void cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86_shared);
+
+ inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs,
+ Register src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86_shared);
+
+ inline void cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) PER_ARCH;
+
+ inline void cmpPtrMovePtr(Condition cond, Register lhs, const Address& rhs,
+ Register src, Register dest) PER_ARCH;
+
+ inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs,
+ const Address& src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, mips_shared, x86_shared);
+
+ inline void cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, mips_shared, x86_shared);
+
+ inline void cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86, x64);
+
+ inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86, x64);
+
+ inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask,
+ const Address& src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86, x64);
+
+ inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask,
+ Register src, Register dest)
+ DEFINED_ON(arm, arm64, loong64, riscv64, wasm32, mips_shared, x86, x64);
+
+ // Conditional move for Spectre mitigations.
+ inline void spectreMovePtr(Condition cond, Register src, Register dest)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ // Zeroes dest if the condition is true.
+ inline void spectreZeroRegister(Condition cond, Register scratch,
+ Register dest)
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, loong64, riscv64, wasm32);
+
+ // Performs a bounds check and zeroes the index register if out-of-bounds
+ // (to mitigate Spectre).
+ private:
+ inline void spectreBoundsCheck32(Register index, const Operand& length,
+ Register maybeScratch, Label* failure)
+ DEFINED_ON(x86);
+
+ public:
+ inline void spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch, Label* failure)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch, Label* failure)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ inline void spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch, Label* failure)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+ inline void spectreBoundsCheckPtr(Register index, const Address& length,
+ Register maybeScratch, Label* failure)
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
+
+ // ========================================================================
+ // Canonicalization primitives.
+ inline void canonicalizeDouble(FloatRegister reg);
+ inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
+
+ inline void canonicalizeFloat(FloatRegister reg);
+ inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
+
+ public:
+ // ========================================================================
+ // Memory access primitives.
+ inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
+ wasm32);
+ inline void storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
+ wasm32);
+ inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
+ DEFINED_ON(x86_shared);
+
+ template <class T>
+ inline void storeDouble(FloatRegister src, const T& dest);
+
+ template <class T>
+ inline void boxDouble(FloatRegister src, const T& dest);
+
+ using MacroAssemblerSpecific::boxDouble;
+
+ inline void storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
+ wasm32);
+ inline void storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& dest)
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
+ wasm32);
+ inline void storeUncanonicalizedFloat32(FloatRegister src,
+ const Operand& dest)
+ DEFINED_ON(x86_shared);
+
+ template <class T>
+ inline void storeFloat32(FloatRegister src, const T& dest);
+
+ template <typename T>
+ void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest) PER_ARCH;
+
+ inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
+
+ public:
+ // ========================================================================
+ // Wasm SIMD
+ //
+ // Naming is "operationSimd128" when operate on the whole vector, otherwise
+ // it's "operation<Type><Size>x<Lanes>".
+ //
+ // For microarchitectural reasons we can in principle get a performance win by
+ // using int or float specific instructions in the operationSimd128 case when
+ // we know that subsequent operations on the result are int or float oriented.
+ // In practice, we don't care about that yet.
+ //
+ // The order of operations here follows those in the SIMD overview document,
+ // https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md.
+ //
+ // Since we must target Intel SSE indefinitely and SSE is one-address or
+ // two-address, the x86 porting interfaces are nearly all one-address or
+ // two-address. Likewise there are two-address ARM64 interfaces to support
+ // the baseline compiler. But there are also three-address ARM64 interfaces
+ // as the ARM64 Ion back-end can use those. In the future, they may support
+ // AVX2 or similar for x86.
+ //
+ // Conventions for argument order and naming and semantics:
+ // - Condition codes come first.
+ // - Other immediates (masks, shift counts) come next.
+ // - Operands come next:
+ // - For a binary two-address operator where the left-hand-side has the
+ // same type as the result, one register parameter is normally named
+ // `lhsDest` and is both the left-hand side and destination; the other
+ // parameter is named `rhs` and is the right-hand side. `rhs` comes
+ // first, `lhsDest` second. `rhs` and `lhsDest` may be the same register
+ // (if rhs is a register).
+ // - For a binary three-address operator the order is `lhs`, `rhs`, `dest`,
+ // and generally these registers may be the same.
+ // - For a unary operator, the input is named `src` and the output is named
+ // `dest`. `src` comes first, `dest` second. `src` and `dest` may be
+ // the same register (if `src` is a register).
+ // - Temp registers follow operands and are named `temp` if there's only one,
+ // otherwise `temp1`, `temp2`, etc regardless of type. GPR temps precede
+ // FPU temps. If there are several temps then they must be distinct
+ // registers, and they must be distinct from the operand registers unless
+ // noted.
+
+ // Moves
+
+ inline void moveSimd128(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Constants
+
+ inline void loadConstantSimd128(const SimdConstant& v, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Splat
+
+ inline void splatX16(Register src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void splatX16(uint32_t srcLane, FloatRegister src, FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ inline void splatX8(Register src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void splatX8(uint32_t srcLane, FloatRegister src, FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ inline void splatX4(Register src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void splatX4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void splatX2(Register64 src, FloatRegister dest)
+ DEFINED_ON(x86, x64, arm64);
+
+ inline void splatX2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Extract lane as scalar. Float extraction does not canonicalize the value.
+
+ inline void extractLaneInt8x16(uint32_t lane, FloatRegister src,
+ Register dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtractLaneInt8x16(uint32_t lane, FloatRegister src,
+ Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extractLaneInt16x8(uint32_t lane, FloatRegister src,
+ Register dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtractLaneInt16x8(uint32_t lane, FloatRegister src,
+ Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extractLaneInt32x4(uint32_t lane, FloatRegister src,
+ Register dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void extractLaneInt64x2(uint32_t lane, FloatRegister src,
+ Register64 dest) DEFINED_ON(x86, x64, arm64);
+
+ inline void extractLaneFloat32x4(uint32_t lane, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extractLaneFloat64x2(uint32_t lane, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Replace lane value
+
+ inline void replaceLaneInt8x16(unsigned lane, FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void replaceLaneInt8x16(unsigned lane, Register rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void replaceLaneInt16x8(unsigned lane, FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void replaceLaneInt16x8(unsigned lane, Register rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void replaceLaneInt32x4(unsigned lane, FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void replaceLaneInt32x4(unsigned lane, Register rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
+ Register64 rhs, FloatRegister dest)
+ DEFINED_ON(x86, x64);
+
+ inline void replaceLaneInt64x2(unsigned lane, Register64 rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86, x64, arm64);
+
+ inline void replaceLaneFloat32x4(unsigned lane, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void replaceLaneFloat64x2(unsigned lane, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Shuffle - blend and permute with immediate indices, and its many
+ // specializations. Lane values other than those mentioned are illegal.
+
+ // lane values 0..31
+ inline void shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void shuffleInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Lane values must be 0 (select from lhs) or FF (select from rhs).
+ // The behavior is undefined for lane values that are neither 0 nor FF.
+ // on x86_shared: it is required that lhs == dest.
+ inline void blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ // Lane values must be 0 (select from lhs) or FF (select from rhs).
+ // The behavior is undefined for lane values that are neither 0 nor FF.
+ inline void blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ // Lane values must be 0 (select from lhs) or FFFF (select from rhs).
+ // The behavior is undefined for lane values that are neither 0 nor FFFF.
+ // on x86_shared: it is required that lhs == dest.
+ inline void blendInt16x8(const uint16_t lanes[8], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Mask lane values must be ~0 or 0. The former selects from lhs and the
+ // latter from rhs.
+ // The implementation works effectively for I8x16, I16x8, I32x4, and I64x2.
+ inline void laneSelectSimd128(FloatRegister mask, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveHighInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void interleaveLowInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Permute - permute with immediate indices.
+
+ // lane values 0..15
+ inline void permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ // lane values 0..7
+ inline void permuteInt16x8(const uint16_t lanes[8], FloatRegister src,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ // lane values 0..3 [sic].
+ inline void permuteHighInt16x8(const uint16_t lanes[4], FloatRegister src,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // lane values 0..3.
+ inline void permuteLowInt16x8(const uint16_t lanes[4], FloatRegister src,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // lane values 0..3
+ inline void permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ // Funnel shift by immediate count:
+ // low_16_bytes_of((lhs ++ rhs) >> shift*8), shift must be < 16
+ inline void concatAndRightShiftSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, uint32_t shift)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Rotate right by immediate count:
+ // low_16_bytes_of((src ++ src) >> shift*8), shift must be < 16
+ inline void rotateRightSimd128(FloatRegister src, FloatRegister dest,
+ uint32_t shift) DEFINED_ON(arm64);
+
+ // Shift bytes with immediate count, shifting in zeroes. Shift count 0..15.
+
+ inline void leftShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void rightShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Reverse bytes in lanes.
+
+ inline void reverseInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void reverseInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void reverseInt64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Swizzle - permute with variable indices. `rhs` holds the lanes parameter.
+
+ inline void swizzleInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void swizzleInt8x16Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Integer Add
+
+ inline void addInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void addInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void addInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void addInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Integer Subtract
+
+ inline void subInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ // Integer Multiply
+
+ inline void mulInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void mulInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void mulInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void mulInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // On x86_shared, it is required lhs == dest
+ inline void mulInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp)
+ DEFINED_ON(x86_shared);
+
+ inline void mulInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest, FloatRegister temp)
+ DEFINED_ON(x86_shared);
+
+ inline void mulInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) DEFINED_ON(arm64);
+
+ // Note for the extMul opcodes, the NxM designation is for the input lanes;
+ // the output lanes are twice as wide.
+ inline void extMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void q15MulrSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Integer Negate
+
+ inline void negInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void negInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void negInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void negInt64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Saturating integer add
+
+ inline void addSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedAddSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedAddSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void addSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedAddSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedAddSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Saturating integer subtract
+
+ inline void subSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedSubSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedSubSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedSubSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedSubSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Lane-wise integer minimum
+
+ inline void minInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void minInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMinInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMinInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void minInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void minInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMinInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMinInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void minInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void minInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMinInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMinInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Lane-wise integer maximum
+
+ inline void maxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void maxInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMaxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMaxInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void maxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void maxInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMaxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMaxInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void maxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void maxInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedMaxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedMaxInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Lane-wise integer rounding average
+
+ inline void unsignedAverageInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedAverageInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Lane-wise integer absolute value
+
+ inline void absInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void absInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void absInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void absInt64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Left shift by scalar. Immediates and variable shifts must have been
+ // masked; shifts of zero will work but may or may not generate code.
+
+ inline void leftShiftInt8x16(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ inline void leftShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void leftShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void leftShiftInt16x8(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void leftShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void leftShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void leftShiftInt32x4(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void leftShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void leftShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void leftShiftInt64x2(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void leftShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void leftShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Right shift by scalar. Immediates and variable shifts must have been
+ // masked; shifts of zero will work but may or may not generate code.
+
+ inline void rightShiftInt8x16(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ inline void rightShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void rightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedRightShiftInt8x16(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp)
+ DEFINED_ON(x86_shared);
+
+ inline void unsignedRightShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void rightShiftInt16x8(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void rightShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void rightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedRightShiftInt16x8(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void unsignedRightShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void rightShiftInt32x4(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void rightShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void rightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedRightShiftInt32x4(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void unsignedRightShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void rightShiftInt64x2(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ inline void rightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void rightShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void unsignedRightShiftInt64x2(Register rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ inline void unsignedRightShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Sign replication operation
+
+ inline void signReplicationInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void signReplicationInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void signReplicationInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void signReplicationInt64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // Bitwise and, or, xor, not
+
+ inline void bitwiseAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseAndSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseAndSimd128(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void bitwiseOrSimd128(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseOrSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseOrSimd128(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void bitwiseXorSimd128(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseXorSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseXorSimd128(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void bitwiseNotSimd128(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Bitwise AND with compliment: dest = lhs & ~rhs, note only arm64 can do it.
+ inline void bitwiseAndNotSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister lhsDest) DEFINED_ON(arm64);
+
+ // Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
+ // wants but what the x86 hardware offers. Hence the name.
+
+ inline void bitwiseNotAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void bitwiseNotAndSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared);
+
+ // Bitwise select
+
+ inline void bitwiseSelectSimd128(FloatRegister mask, FloatRegister onTrue,
+ FloatRegister onFalse, FloatRegister dest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ inline void bitwiseSelectSimd128(FloatRegister onTrue, FloatRegister onFalse,
+ FloatRegister maskDest) DEFINED_ON(arm64);
+
+ // Population count
+
+ inline void popcntInt8x16(FloatRegister src, FloatRegister dest,
+ FloatRegister temp) DEFINED_ON(x86_shared);
+
+ inline void popcntInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ // Any lane true, ie, any bit set
+
+ inline void anyTrueSimd128(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // All lanes true
+
+ inline void allTrueInt8x16(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void allTrueInt16x8(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void allTrueInt32x4(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void allTrueInt64x2(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Bitmask, ie extract and compress high bits of all lanes
+
+ inline void bitmaskInt8x16(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared);
+
+ inline void bitmaskInt8x16(FloatRegister src, Register dest,
+ FloatRegister temp) DEFINED_ON(arm64);
+
+ inline void bitmaskInt16x8(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared);
+
+ inline void bitmaskInt16x8(FloatRegister src, Register dest,
+ FloatRegister temp) DEFINED_ON(arm64);
+
+ inline void bitmaskInt32x4(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared);
+
+ inline void bitmaskInt32x4(FloatRegister src, Register dest,
+ FloatRegister temp) DEFINED_ON(arm64);
+
+ inline void bitmaskInt64x2(FloatRegister src, Register dest)
+ DEFINED_ON(x86_shared);
+
+ inline void bitmaskInt64x2(FloatRegister src, Register dest,
+ FloatRegister temp) DEFINED_ON(arm64);
+
+ // Comparisons (integer and floating-point)
+
+ inline void compareInt8x16(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // On x86_shared, limited to !=, ==, <=, >
+ inline void compareInt8x16(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // On arm64, use any integer comparison condition.
+ inline void compareInt8x16(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void compareInt16x8(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void compareInt16x8(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // On x86_shared, limited to !=, ==, <=, >
+ inline void compareInt16x8(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // On x86_shared, limited to !=, ==, <=, >
+ inline void compareInt32x4(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void compareInt32x4(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // On arm64, use any integer comparison condition.
+ inline void compareInt32x4(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void compareForEqualityInt64x2(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void compareForOrderingInt64x2(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2)
+ DEFINED_ON(x86_shared);
+
+ inline void compareInt64x2(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest) DEFINED_ON(arm64);
+
+ inline void compareInt64x2(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ inline void compareFloat32x4(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // On x86_shared, limited to ==, !=, <, <=
+ inline void compareFloat32x4(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // On x86_shared, limited to ==, !=, <, <=
+ // On arm64, use any float-point comparison condition.
+ inline void compareFloat32x4(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void compareFloat64x2(Assembler::Condition cond, FloatRegister rhs,
+ FloatRegister lhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // On x86_shared, limited to ==, !=, <, <=
+ inline void compareFloat64x2(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ // On x86_shared, limited to ==, !=, <, <=
+ // On arm64, use any float-point comparison condition.
+ inline void compareFloat64x2(Assembler::Condition cond, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Load
+
+ inline void loadUnalignedSimd128(const Operand& src, FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void loadUnalignedSimd128(const Address& src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void loadUnalignedSimd128(const BaseIndex& src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Store
+
+ inline void storeUnalignedSimd128(FloatRegister src, const Address& dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void storeUnalignedSimd128(FloatRegister src, const BaseIndex& dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating point negation
+
+ inline void negFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void negFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating point absolute value
+
+ inline void absFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void absFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // NaN-propagating minimum
+
+ inline void minFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) DEFINED_ON(x86_shared);
+
+ inline void minFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(arm64);
+
+ inline void minFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void minFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) DEFINED_ON(x86_shared);
+
+ inline void minFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(arm64);
+
+ inline void minFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ // NaN-propagating maximum
+
+ inline void maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) DEFINED_ON(x86_shared);
+
+ inline void maxFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(arm64);
+
+ inline void maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ inline void maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) DEFINED_ON(x86_shared);
+
+ inline void maxFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
+ DEFINED_ON(arm64);
+
+ inline void maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(arm64);
+
+ // Floating add
+
+ inline void addFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void addFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void addFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Floating subtract
+
+ inline void subFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void subFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void subFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Floating division
+
+ inline void divFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void divFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void divFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void divFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Floating Multiply
+
+ inline void mulFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void mulFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void mulFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void mulFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ // Pairwise add
+
+ inline void extAddPairwiseInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtAddPairwiseInt8x16(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void extAddPairwiseInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedExtAddPairwiseInt16x8(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating square root
+
+ inline void sqrtFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void sqrtFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Integer to floating point with rounding
+
+ inline void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void convertInt32x4ToFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedConvertInt32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating point to integer with saturation
+
+ inline void truncSatFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp)
+ DEFINED_ON(x86_shared);
+
+ inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(arm64);
+
+ inline void truncSatFloat64x2ToInt32x4(FloatRegister src, FloatRegister dest,
+ FloatRegister temp)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void truncFloat32x4ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedTruncFloat32x4ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void truncFloat64x2ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedTruncFloat64x2ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating point narrowing
+
+ inline void convertFloat64x2ToFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating point widening
+
+ inline void convertFloat32x4ToFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Integer to integer narrowing
+
+ inline void narrowInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void narrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedNarrowInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedNarrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void narrowInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void narrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedNarrowInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void unsignedNarrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Integer to integer widening
+
+ inline void widenLowInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void widenHighInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenLowInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenHighInt8x16(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void widenLowInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void widenHighInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenLowInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenHighInt16x8(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void widenLowInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenLowInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void widenHighInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void unsignedWidenHighInt32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Compare-based minimum/maximum
+ //
+ // On x86, the signature is (rhsDest, lhs); on arm64 it is (rhs, lhsDest).
+ //
+ // The masm preprocessor can't deal with multiple declarations with identical
+ // signatures even if they are on different platforms, hence the weird
+ // argument names.
+
+ inline void pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMinFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMinFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMaxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void pseudoMaxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Widening/pairwise integer dot product
+
+ inline void widenDotInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared, arm64);
+
+ inline void widenDotInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) DEFINED_ON(x86_shared);
+
+ inline void dotInt8x16Int7x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared);
+
+ inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp)
+ DEFINED_ON(arm64);
+
+ // Floating point rounding
+
+ inline void ceilFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void ceilFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void floorFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void floorFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void truncFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void truncFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void nearestFloat32x4(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void nearestFloat64x2(FloatRegister src, FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ // Floating multiply-accumulate: srcDest [+-]= src1 * src2
+
+ inline void fmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) DEFINED_ON(x86_shared, arm64);
+
+ inline void fnmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void fmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) DEFINED_ON(x86_shared, arm64);
+
+ inline void fnmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void minFloat32x4Relaxed(FloatRegister src, FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void minFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void maxFloat32x4Relaxed(FloatRegister src, FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void maxFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void minFloat64x2Relaxed(FloatRegister src, FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void minFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void maxFloat64x2Relaxed(FloatRegister src, FloatRegister srcDest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void maxFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ inline void q15MulrInt16x8Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest)
+ DEFINED_ON(x86_shared, arm64);
+
+ public:
+ // ========================================================================
+ // Truncate floating point.
+
+ // Undefined behaviour when truncation is outside Int64 range.
+ // Needs a temp register if SSE3 is not present.
+ inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
+ DEFINED_ON(x86_shared);
+ inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+ DEFINED_ON(x86, x64);
+ inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
+ DEFINED_ON(x86_shared);
+ inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+ DEFINED_ON(x86, x64);
+
+ public:
+ // ========================================================================
+ // Convert floating point.
+
+ // temp required on x86 and x64; must be undefined on mips64 and loong64.
+ void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp)
+ DEFINED_ON(arm64, mips64, loong64, riscv64, wasm32, x64, x86);
+
+ void convertInt64ToFloat32(Register64 src, FloatRegister dest)
+ DEFINED_ON(arm64, mips64, loong64, riscv64, wasm32, x64, x86);
+
+ bool convertUInt64ToDoubleNeedsTemp() PER_ARCH;
+
+ // temp required when convertUInt64ToDoubleNeedsTemp() returns true.
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) PER_ARCH;
+
+ void convertInt64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
+
+ void convertIntPtrToDouble(Register src, FloatRegister dest) PER_ARCH;
+
+ public:
+ // ========================================================================
+ // wasm support
+
+ CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
+
+ void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
+
+ // Load all pinned regs via InstanceReg. If the trapOffset is something,
+ // give the first load a trap descriptor with type IndirectCallToNull, so that
+ // a null instance will cause a trap.
+ void loadWasmPinnedRegsFromInstance(
+ mozilla::Maybe<wasm::BytecodeOffset> trapOffset = mozilla::Nothing());
+
+ // Returns a pair: the offset of the undefined (trapping) instruction, and
+ // the number of extra bytes of stack allocated prior to the trap
+ // instruction proper.
+ std::pair<CodeOffset, uint32_t> wasmReserveStackChecked(
+ uint32_t amount, wasm::BytecodeOffset trapOffset);
+
+ // Emit a bounds check against the wasm heap limit, jumping to 'ok' if 'cond'
+ // holds; this can be the label either of the access or of the trap. The
+ // label should name a code position greater than the position of the bounds
+ // check.
+ //
+ // If JitOptions.spectreMaskIndex is true, a no-op speculation barrier is
+ // emitted in the code stream after the check to prevent an OOB access from
+ // being executed speculatively. (On current tier-1 platforms the barrier is
+ // a conditional saturation of 'index' to 'boundsCheckLimit', using the same
+ // condition as the check.) If the condition is such that the bounds check
+ // branches out of line to the trap, the barrier will actually be executed
+ // when the bounds check passes.
+ //
+ // On 32-bit systems for both wasm and asm.js, and on 64-bit systems for
+ // asm.js, heap lengths are limited to 2GB. On 64-bit systems for wasm,
+ // 32-bit heap lengths are limited to 4GB, and 64-bit heap lengths will be
+ // limited to something much larger.
+
+ void wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, loong64, riscv64,
+ wasm32);
+
+ void wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok)
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, loong64, riscv64,
+ wasm32);
+
+ void wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok)
+ DEFINED_ON(arm64, mips64, x64, x86, arm, loong64, riscv64, wasm32);
+
+ void wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok)
+ DEFINED_ON(arm64, mips64, x64, x86, arm, loong64, riscv64, wasm32);
+
+ // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
+ void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
+ AnyRegister out) DEFINED_ON(x86, x64);
+ void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr,
+ Register64 out) DEFINED_ON(x86, x64);
+ void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Operand dstAddr) DEFINED_ON(x86, x64);
+ void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
+ Operand dstAddr) DEFINED_ON(x86);
+
+ // For all the ARM/MIPS/LOONG64 wasmLoad and wasmStore functions below, `ptr`
+ // MUST equal `ptrScratch`, and that register will be updated based on
+ // conditions listed below (where it is only mentioned as `ptr`).
+
+ // `ptr` will be updated if access.offset() != 0 or access.type() ==
+ // Scalar::Int64.
+ void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output)
+ DEFINED_ON(arm, loong64, riscv64, mips_shared);
+ void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, Register64 output)
+ DEFINED_ON(arm, mips32, mips64, loong64, riscv64);
+ void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch)
+ DEFINED_ON(arm, loong64, riscv64, mips_shared);
+ void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch)
+ DEFINED_ON(arm, mips32, mips64, loong64, riscv64);
+
+ // These accept general memoryBase + ptr + offset (in `access`); the offset is
+ // always smaller than the guard region. They will insert an additional add
+ // if the offset is nonzero, and of course that add may require a temporary
+ // register for the offset if the offset is large, and instructions to set it
+ // up.
+ void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, AnyRegister output) DEFINED_ON(arm64);
+ void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register64 output) DEFINED_ON(arm64);
+ void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr) DEFINED_ON(arm64);
+ void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr) DEFINED_ON(arm64);
+
+ // `ptr` will always be updated.
+ void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register output, Register tmp)
+ DEFINED_ON(mips32, mips64);
+
+ // MIPS: `ptr` will always be updated.
+ void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, FloatRegister output,
+ Register tmp1) DEFINED_ON(mips32, mips64);
+
+ // `ptr` will always be updated.
+ void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output,
+ Register tmp) DEFINED_ON(mips32, mips64);
+
+ // MIPS: `ptr` will always be updated.
+ void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp)
+ DEFINED_ON(mips32, mips64);
+
+ // `ptr` will always be updated.
+ void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
+ FloatRegister floatValue, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp)
+ DEFINED_ON(mips32, mips64);
+
+ // `ptr` will always be updated.
+ void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp)
+ DEFINED_ON(mips32, mips64);
+
+ // wasm specific methods, used in both the wasm baseline compiler and ion.
+
+ // The truncate-to-int32 methods do not bind the rejoin label; clients must
+ // do so if oolWasmTruncateCheckF64ToI32() can jump to it.
+ void wasmTruncateDoubleToUInt32(FloatRegister input, Register output,
+ bool isSaturating, Label* oolEntry) PER_ARCH;
+ void wasmTruncateDoubleToInt32(FloatRegister input, Register output,
+ bool isSaturating,
+ Label* oolEntry) PER_SHARED_ARCH;
+ void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output,
+ TruncFlags flags, wasm::BytecodeOffset off,
+ Label* rejoin)
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, loong64, riscv64, wasm32);
+
+ void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output,
+ bool isSaturating, Label* oolEntry) PER_ARCH;
+ void wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
+ bool isSaturating,
+ Label* oolEntry) PER_SHARED_ARCH;
+ void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output,
+ TruncFlags flags, wasm::BytecodeOffset off,
+ Label* rejoin)
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, loong64, riscv64, wasm32);
+
+ // The truncate-to-int64 methods will always bind the `oolRejoin` label
+ // after the last emitted instruction.
+ void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output,
+ bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble)
+ DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
+ void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output,
+ bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble)
+ DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
+ void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output,
+ TruncFlags flags, wasm::BytecodeOffset off,
+ Label* rejoin)
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, loong64, riscv64, wasm32);
+
+ void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output,
+ bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble)
+ DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
+ void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output,
+ bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble)
+ DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
+ void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output,
+ TruncFlags flags, wasm::BytecodeOffset off,
+ Label* rejoin)
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, loong64, riscv64, wasm32);
+
+ // This function takes care of loading the callee's instance and pinned regs
+ // but it is the caller's responsibility to save/restore instance or pinned
+ // regs.
+ CodeOffset wasmCallImport(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee);
+
+ // WasmTableCallIndexReg must contain the index of the indirect call. This is
+ // for wasm calls only.
+ //
+ // Indirect calls use a dual-path mechanism where a run-time test determines
+ // whether a context switch is needed (slow path) or not (fast path). This
+ // gives rise to two call instructions, both of which need safe points. As
+ // per normal, the call offsets are the code offsets at the end of the call
+ // instructions (the return points).
+ //
+ // `boundsCheckFailedLabel` is non-null iff a bounds check is required.
+ // `nullCheckFailedLabel` is non-null only on platforms that can't fold the
+ // null check into the rest of the call instructions.
+ void wasmCallIndirect(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee,
+ Label* boundsCheckFailedLabel,
+ Label* nullCheckFailedLabel,
+ mozilla::Maybe<uint32_t> tableSize,
+ CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
+
+ // This function takes care of loading the callee's instance and address from
+ // pinned reg.
+ void wasmCallRef(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee, CodeOffset* fastCallOffset,
+ CodeOffset* slowCallOffset);
+
+ // WasmTableCallIndexReg must contain the index of the indirect call.
+ // This is for asm.js calls only.
+ CodeOffset asmCallIndirect(const wasm::CallSiteDesc& desc,
+ const wasm::CalleeDesc& callee);
+
+ // This function takes care of loading the pointer to the current instance
+ // as the implicit first argument. It preserves instance and pinned registers.
+ // (instance & pinned regs are non-volatile registers in the system ABI).
+ CodeOffset wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc,
+ const ABIArg& instanceArg,
+ wasm::SymbolicAddress builtin,
+ wasm::FailureMode failureMode);
+
+ // Perform a subtype check that `object` is a subtype of `type`, branching to
+ // `label` depending on `onSuccess`. `type` must be in the `any` hierarchy.
+ //
+ // `superSuperTypeVector` is required iff the destination type is a concrete
+ // type. `scratch1` is required iff the destination type is eq or lower and
+ // not none. `scratch2` is required iff the destination type is a concrete
+ // type and its `subTypingDepth` is >= wasm::MinSuperTypeVectorLength.
+ //
+ // `object` and `superSuperTypeVector` are preserved. Scratch registers are
+ // clobbered.
+ void branchWasmGcObjectIsRefType(Register object, wasm::RefType sourceType,
+ wasm::RefType destType, Label* label,
+ bool onSuccess,
+ Register superSuperTypeVector,
+ Register scratch1, Register scratch2);
+ static bool needScratch1ForBranchWasmGcRefType(wasm::RefType type);
+ static bool needScratch2ForBranchWasmGcRefType(wasm::RefType type);
+ static bool needSuperSuperTypeVectorForBranchWasmGcRefType(
+ wasm::RefType type);
+
+ // Perform a subtype check that `subSuperTypeVector` is a subtype of
+ // `superSuperTypeVector`, branching to `label` depending on `onSuccess`.
+ // This method is a specialization of the general
+ // `wasm::TypeDef::isSubTypeOf` method for the case where the
+ // `superSuperTypeVector` is statically known, which is the case for all
+ // wasm instructions.
+ //
+ // `scratch` is required iff the `subTypeDepth` is >=
+ // wasm::MinSuperTypeVectorLength. `subSuperTypeVector` is clobbered by this
+ // method. `superSuperTypeVector` is preserved.
+ void branchWasmSuperTypeVectorIsSubtype(Register subSuperTypeVector,
+ Register superSuperTypeVector,
+ Register scratch,
+ uint32_t superTypeDepth, Label* label,
+ bool onSuccess);
+
+ // Compute ptr += (indexTemp32 << shift) where shift can be any value < 32.
+ // May destroy indexTemp32. The value of indexTemp32 must be positive, and it
+ // is implementation-defined what happens if bits are lost or the value
+ // becomes negative through the shift. On 64-bit systems, the high 32 bits of
+ // indexTemp32 must be zero, not garbage.
+ void shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) PER_SHARED_ARCH;
+
+ // The System ABI frequently states that the high bits of a 64-bit register
+ // that holds a 32-bit return value are unpredictable, and C++ compilers will
+ // indeed generate code that leaves garbage in the upper bits.
+ //
+ // Adjust the contents of the 64-bit register `r` to conform to our internal
+ // convention, which requires predictable high bits. In practice, this means
+ // that the 32-bit value will be zero-extended or sign-extended to 64 bits as
+ // appropriate for the platform.
+ void widenInt32(Register r) DEFINED_ON(arm64, x64, mips64, loong64, riscv64);
+
+ // As enterFakeExitFrame(), but using register conventions appropriate for
+ // wasm stubs.
+ void enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) PER_SHARED_ARCH;
+
+ public:
+ // ========================================================================
+ // Barrier functions.
+
+ void emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register temp1,
+ Register temp2, Register temp3, Label* noBarrier);
+
+ public:
+ // ========================================================================
+ // Clamping functions.
+
+ inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
+
+ public:
+ // ========================================================================
+ // Primitive atomic operations.
+ //
+ // If the access is from JS and the eventual destination of the result is a
+ // js::Value, it's probably best to use the JS-specific versions of these,
+ // see further below.
+ //
+ // Temp registers must be defined unless otherwise noted in the per-function
+ // constraints.
+
+ // 8-bit, 16-bit, and 32-bit wide operations.
+ //
+ // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
+ // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
+ // result will be zero on some platforms (eg, on x64) and will be the sign
+ // extension of the lower bits on other platforms (eg, MIPS).
+
+ // CompareExchange with memory. Return the value that was in memory,
+ // whether we wrote or not.
+ //
+ // x86-shared: `output` must be eax.
+ // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+ // and 16-bit wide operations.
+
+ void compareExchange(Scalar::Type type, const Synchronization& sync,
+ const Address& mem, Register expected,
+ Register replacement, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void compareExchange(Scalar::Type type, const Synchronization& sync,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void compareExchange(Scalar::Type type, const Synchronization& sync,
+ const Address& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void compareExchange(Scalar::Type type, const Synchronization& sync,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ // x86: `expected` and `output` must be edx:eax; `replacement` is ecx:ebx.
+ // x64: `output` must be rax.
+ // ARM: Registers must be distinct; `replacement` and `output` must be
+ // (even,odd) pairs.
+
+ void compareExchange64(const Synchronization& sync, const Address& mem,
+ Register64 expected, Register64 replacement,
+ Register64 output)
+ DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
+
+ void compareExchange64(const Synchronization& sync, const BaseIndex& mem,
+ Register64 expected, Register64 replacement,
+ Register64 output)
+ DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
+
+ // Exchange with memory. Return the value initially in memory.
+ // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+ // and 16-bit wide operations.
+
+ void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ const Address& mem, Register value, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ const BaseIndex& mem, Register value, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ const Address& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ const BaseIndex& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ // x86: `value` must be ecx:ebx; `output` must be edx:eax.
+ // ARM: `value` and `output` must be distinct and (even,odd) pairs.
+ // ARM64: `value` and `output` must be distinct.
+
+ void atomicExchange64(const Synchronization& sync, const Address& mem,
+ Register64 value, Register64 output)
+ DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
+
+ void atomicExchange64(const Synchronization& sync, const BaseIndex& mem,
+ Register64 value, Register64 output)
+ DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
+
+ // Read-modify-write with memory. Return the value in memory before the
+ // operation.
+ //
+ // x86-shared:
+ // For 8-bit operations, `value` and `output` must have a byte subregister.
+ // For Add and Sub, `temp` must be invalid.
+ // For And, Or, and Xor, `output` must be eax and `temp` must have a byte
+ // subregister.
+ //
+ // ARM: Registers `value` and `output` must differ.
+ // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+ // and 16-bit wide operations; `value` and `output` must differ.
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register temp, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const Address& mem,
+ Register temp, Register output) DEFINED_ON(x86_shared);
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register temp, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const BaseIndex& mem,
+ Register temp, Register output) DEFINED_ON(x86_shared);
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+
+ // x86:
+ // `temp` must be ecx:ebx; `output` must be edx:eax.
+ // x64:
+ // For Add and Sub, `temp` is ignored.
+ // For And, Or, and Xor, `output` must be rax.
+ // ARM:
+ // `temp` and `output` must be (even,odd) pairs and distinct from `value`.
+ // ARM64:
+ // Registers `value`, `temp`, and `output` must all differ.
+
+ void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem, Register64 temp,
+ Register64 output)
+ DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
+
+ void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ const Address& value, const Address& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem, Register64 temp,
+ Register64 output)
+ DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
+
+ void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ const Address& value, const BaseIndex& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ // x64:
+ // `value` can be any register.
+ // ARM:
+ // `temp` must be an (even,odd) pair and distinct from `value`.
+ // ARM64:
+ // Registers `value` and `temp` must differ.
+
+ void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem) DEFINED_ON(x64);
+
+ void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem, Register64 temp)
+ DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
+
+ void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem) DEFINED_ON(x64);
+
+ void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem, Register64 temp)
+ DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
+
+ // 64-bit atomic load. On 64-bit systems, use regular load with
+ // Synchronization::Load, not this method.
+ //
+ // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
+ // ARM: `output` must be (even,odd) pair.
+
+ void atomicLoad64(const Synchronization& sync, const Address& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ void atomicLoad64(const Synchronization& sync, const Address& mem,
+ Register64 output) DEFINED_ON(arm);
+
+ void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
+ Register64 output) DEFINED_ON(arm);
+
+ // 64-bit atomic store. On 64-bit systems, use regular store with
+ // Synchronization::Store, not this method.
+ //
+ // x86: `value` must be ecx:ebx; `temp` must be edx:eax.
+ // ARM: `value` and `temp` must be (even,odd) pairs.
+
+ void atomicStore64(const Synchronization& sync, const Address& mem,
+ Register64 value, Register64 temp) DEFINED_ON(x86, arm);
+
+ void atomicStore64(const Synchronization& sync, const BaseIndex& mem,
+ Register64 value, Register64 temp) DEFINED_ON(x86, arm);
+
+ // ========================================================================
+ // Wasm atomic operations.
+ //
+ // Constraints, when omitted, are exactly as for the primitive operations
+ // above.
+
+ void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register expected,
+ Register replacement, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value, Register output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Imm32 value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Imm32 value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ // Read-modify-write with memory. Return no value.
+ //
+ // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+ // and 16-bit wide operations.
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const Address& mem, Register temp)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Imm32 value, const Address& mem, Register temp)
+ DEFINED_ON(x86_shared);
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const BaseIndex& mem, Register temp)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Imm32 value, const BaseIndex& mem, Register temp)
+ DEFINED_ON(x86_shared);
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ // 64-bit wide operations.
+
+ // 64-bit atomic load. On 64-bit systems, use regular wasm load with
+ // Synchronization::Load, not this method.
+ //
+ // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
+ // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
+ // MIPS32: `temp` should be invalid.
+
+ void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp, Register64 output)
+ DEFINED_ON(arm, mips32, x86, wasm32);
+
+ void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) DEFINED_ON(arm, mips32, x86, wasm32);
+
+ // x86: `expected` must be the same as `output`, and must be edx:eax.
+ // x86: `replacement` must be ecx:ebx.
+ // x64: `output` must be rax.
+ // ARM: Registers must be distinct; `replacement` and `output` must be
+ // (even,odd) pairs.
+ // ARM64: The base register in `mem` must not overlap `output`.
+ // MIPS: Registers must be distinct.
+
+ void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 expected,
+ Register64 replacement,
+ Register64 output) PER_ARCH;
+
+ void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 expected,
+ Register64 replacement,
+ Register64 output) PER_ARCH;
+
+ // x86: `value` must be ecx:ebx; `output` must be edx:eax.
+ // ARM: Registers must be distinct; `value` and `output` must be (even,odd)
+ // pairs.
+ // MIPS: Registers must be distinct.
+
+ void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) PER_ARCH;
+
+ void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) PER_ARCH;
+
+ // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
+ // x64: For And, Or, and Xor `output` must be rax.
+ // ARM: Registers must be distinct; `temp` and `output` must be (even,odd)
+ // pairs.
+ // MIPS: Registers must be distinct.
+ // MIPS32: `temp` should be invalid.
+
+ void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, x64);
+
+ void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output)
+ DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, x64);
+
+ void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ const Address& value, const Address& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ const Address& value, const BaseIndex& mem,
+ Register64 temp, Register64 output) DEFINED_ON(x86);
+
+ // Here `value` can be any register.
+
+ void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const BaseIndex& mem)
+ DEFINED_ON(x64);
+
+ void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) DEFINED_ON(arm64);
+
+ // ========================================================================
+ // JS atomic operations.
+ //
+ // Here the arrayType must be a type that is valid for JS. As of 2017 that
+ // is an 8-bit, 16-bit, or 32-bit integer type.
+ //
+ // If arrayType is Scalar::Uint32 then:
+ //
+ // - `output` must be a float register
+ // - if the operation takes one temp register then `temp` must be defined
+ // - if the operation takes two temp registers then `temp2` must be defined.
+ //
+ // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
+ // (`temp1` must always be valid.)
+ //
+ // For additional register constraints, see the primitive 32-bit operations
+ // and/or wasm operations above.
+
+ void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const Address& mem, Register expected,
+ Register replacement, Register temp,
+ AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register temp,
+ AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const Address& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register temp,
+ AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const BaseIndex& mem, Register expected,
+ Register replacement, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register temp,
+ AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const Address& mem, Register value, Register temp,
+ AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const BaseIndex& mem, Register value, Register temp,
+ AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const Address& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register temp,
+ AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp, AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register temp1, Register temp2, AnyRegister output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register temp1, Register temp2, AnyRegister output)
+ DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const Address& mem,
+ Register temp1, Register temp2, AnyRegister output)
+ DEFINED_ON(x86_shared);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const BaseIndex& mem,
+ Register temp1, Register temp2, AnyRegister output)
+ DEFINED_ON(x86_shared);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp, AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp, AnyRegister output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register temp) DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register temp) DEFINED_ON(arm, arm64, x86_shared);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const Address& mem,
+ Register temp) DEFINED_ON(x86_shared);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Imm32 value, const BaseIndex& mem,
+ Register temp) DEFINED_ON(x86_shared);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ AtomicOp op, Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp)
+ DEFINED_ON(mips_shared, loong64, riscv64);
+
+ void atomicIsLockFreeJS(Register value, Register output);
+
+ // ========================================================================
+ // Spectre Mitigations.
+ //
+ // Spectre attacks are side-channel attacks based on cache pollution or
+ // slow-execution of some instructions. We have multiple spectre mitigations
+ // possible:
+ //
+ // - Stop speculative executions, with memory barriers. Memory barriers
+ // force all branches depending on loads to be resolved, and thus
+ // resolve all miss-speculated paths.
+ //
+ // - Use conditional move instructions. Some CPUs have a branch predictor,
+ // and not a flag predictor. In such cases, using a conditional move
+ // instruction to zero some pointer/index is enough to add a
+ // data-dependency which prevents any futher executions until the load is
+ // resolved.
+
+ void spectreMaskIndex32(Register index, Register length, Register output);
+ void spectreMaskIndex32(Register index, const Address& length,
+ Register output);
+ void spectreMaskIndexPtr(Register index, Register length, Register output);
+ void spectreMaskIndexPtr(Register index, const Address& length,
+ Register output);
+
+ // The length must be a power of two. Performs a bounds check and Spectre
+ // index masking.
+ void boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure);
+
+ void speculationBarrier() PER_SHARED_ARCH;
+
+ //}}} check_macroassembler_decl_style
+ public:
+ // Unsafe here means the caller is responsible for Spectre mitigations if
+ // needed. Prefer branchTestObjClass or one of the other masm helpers!
+ inline void loadObjClassUnsafe(Register obj, Register dest);
+
+ template <typename EmitPreBarrier>
+ inline void storeObjShape(Register shape, Register obj,
+ EmitPreBarrier emitPreBarrier);
+ template <typename EmitPreBarrier>
+ inline void storeObjShape(Shape* shape, Register obj,
+ EmitPreBarrier emitPreBarrier);
+
+ inline void loadObjProto(Register obj, Register dest);
+
+ inline void loadStringLength(Register str, Register dest);
+
+ void loadStringChars(Register str, Register dest, CharEncoding encoding);
+
+ void loadNonInlineStringChars(Register str, Register dest,
+ CharEncoding encoding);
+ void loadNonInlineStringCharsForStore(Register str, Register dest);
+ void storeNonInlineStringChars(Register chars, Register str);
+
+ void loadInlineStringChars(Register str, Register dest,
+ CharEncoding encoding);
+ void loadInlineStringCharsForStore(Register str, Register dest);
+
+ private:
+ void loadRopeChild(Register str, Register index, Register output,
+ Label* isLinear);
+
+ public:
+ void branchIfCanLoadStringChar(Register str, Register index, Register scratch,
+ Label* label);
+ void branchIfNotCanLoadStringChar(Register str, Register index,
+ Register scratch, Label* label);
+
+ void loadStringChar(Register str, Register index, Register output,
+ Register scratch1, Register scratch2, Label* fail);
+
+ void loadRopeLeftChild(Register str, Register dest);
+ void loadRopeRightChild(Register str, Register dest);
+ void storeRopeChildren(Register left, Register right, Register str);
+
+ void loadDependentStringBase(Register str, Register dest);
+ void storeDependentStringBase(Register base, Register str);
+
+ void loadStringIndexValue(Register str, Register dest, Label* fail);
+
+ /**
+ * Store the character in |src| to |dest|.
+ */
+ template <typename T>
+ void storeChar(const T& src, Address dest, CharEncoding encoding) {
+ if (encoding == CharEncoding::Latin1) {
+ store8(src, dest);
+ } else {
+ store16(src, dest);
+ }
+ }
+
+ /**
+ * Load the character at |src| into |dest|.
+ */
+ template <typename T>
+ void loadChar(const T& src, Register dest, CharEncoding encoding) {
+ if (encoding == CharEncoding::Latin1) {
+ load8ZeroExtend(src, dest);
+ } else {
+ load16ZeroExtend(src, dest);
+ }
+ }
+
+ /**
+ * Load the character at |chars[index + offset]| into |dest|. The optional
+ * offset argument is not scaled to the character encoding.
+ */
+ void loadChar(Register chars, Register index, Register dest,
+ CharEncoding encoding, int32_t offset = 0);
+
+ /**
+ * Add |index| to |chars| so that |chars| now points at |chars[index]|.
+ */
+ void addToCharPtr(Register chars, Register index, CharEncoding encoding);
+
+ private:
+ void loadStringFromUnit(Register unit, Register dest,
+ const StaticStrings& staticStrings);
+ void loadLengthTwoString(Register c1, Register c2, Register dest,
+ const StaticStrings& staticStrings);
+
+ public:
+ /**
+ * Load the string representation of |input| in base |base|. Jumps to |fail|
+ * when the string representation needs to be allocated dynamically.
+ */
+ void loadInt32ToStringWithBase(Register input, Register base, Register dest,
+ Register scratch1, Register scratch2,
+ const StaticStrings& staticStrings,
+ const LiveRegisterSet& volatileRegs,
+ Label* fail);
+ void loadInt32ToStringWithBase(Register input, int32_t base, Register dest,
+ Register scratch1, Register scratch2,
+ const StaticStrings& staticStrings,
+ Label* fail);
+
+ /**
+ * Load the BigInt digits from |bigInt| into |digits|.
+ */
+ void loadBigIntDigits(Register bigInt, Register digits);
+
+ /**
+ * Load the first [u]int64 value from |bigInt| into |dest|.
+ */
+ void loadBigInt64(Register bigInt, Register64 dest);
+
+ /**
+ * Load the first digit from |bigInt| into |dest|. Handles the case when the
+ * BigInt digits length is zero.
+ *
+ * Note: A BigInt digit is a pointer-sized value.
+ */
+ void loadFirstBigIntDigitOrZero(Register bigInt, Register dest);
+
+ /**
+ * Load the number stored in |bigInt| into |dest|. Handles the case when the
+ * BigInt digits length is zero. Jumps to |fail| when the number can't be
+ * saved into a single pointer-sized register.
+ */
+ void loadBigInt(Register bigInt, Register dest, Label* fail);
+
+ /**
+ * Load the number stored in |bigInt| into |dest|. Doesn't handle the case
+ * when the BigInt digits length is zero. Jumps to |fail| when the number
+ * can't be saved into a single pointer-sized register.
+ */
+ void loadBigIntNonZero(Register bigInt, Register dest, Label* fail);
+
+ /**
+ * Load the absolute number stored in |bigInt| into |dest|. Handles the case
+ * when the BigInt digits length is zero. Jumps to |fail| when the number
+ * can't be saved into a single pointer-sized register.
+ */
+ void loadBigIntAbsolute(Register bigInt, Register dest, Label* fail);
+
+ /**
+ * In-place modifies the BigInt digit to a signed pointer-sized value. Jumps
+ * to |fail| when the digit exceeds the representable range.
+ */
+ void bigIntDigitToSignedPtr(Register bigInt, Register digit, Label* fail);
+
+ /**
+ * Initialize a BigInt from |val|. Clobbers |val|!
+ */
+ void initializeBigInt64(Scalar::Type type, Register bigInt, Register64 val);
+
+ /**
+ * Initialize a BigInt from the signed, pointer-sized register |val|.
+ * Clobbers |val|!
+ */
+ void initializeBigInt(Register bigInt, Register val);
+
+ /**
+ * Initialize a BigInt from the pointer-sized register |val|.
+ */
+ void initializeBigIntAbsolute(Register bigInt, Register val);
+
+ /**
+ * Copy a BigInt. Jumps to |fail| on allocation failure or when the BigInt
+ * digits need to be heap allocated.
+ */
+ void copyBigIntWithInlineDigits(Register src, Register dest, Register temp,
+ gc::Heap initialHeap, Label* fail);
+
+ /**
+ * Compare a BigInt and an Int32 value. Falls through to the false case.
+ */
+ void compareBigIntAndInt32(JSOp op, Register bigInt, Register int32,
+ Register scratch1, Register scratch2,
+ Label* ifTrue, Label* ifFalse);
+
+ /**
+ * Compare two BigInts for equality. Falls through if both BigInts are equal
+ * to each other.
+ *
+ * - When we jump to |notSameLength|, |temp1| holds the length of the right
+ * operand.
+ * - When we jump to |notSameDigit|, |temp2| points to the current digit of
+ * the left operand and |temp4| holds the current digit of the right
+ * operand.
+ */
+ void equalBigInts(Register left, Register right, Register temp1,
+ Register temp2, Register temp3, Register temp4,
+ Label* notSameSign, Label* notSameLength,
+ Label* notSameDigit);
+
+ void loadJSContext(Register dest);
+
+ void switchToRealm(Register realm);
+ void switchToRealm(const void* realm, Register scratch);
+ void switchToObjectRealm(Register obj, Register scratch);
+ void switchToBaselineFrameRealm(Register scratch);
+ void switchToWasmInstanceRealm(Register scratch1, Register scratch2);
+ void debugAssertContextRealm(const void* realm, Register scratch);
+
+ void loadJitActivation(Register dest);
+
+ void guardSpecificAtom(Register str, JSAtom* atom, Register scratch,
+ const LiveRegisterSet& volatileRegs, Label* fail);
+
+ void guardStringToInt32(Register str, Register output, Register scratch,
+ LiveRegisterSet volatileRegs, Label* fail);
+
+ template <typename T>
+ void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
+ if (dest.hasValue()) {
+ loadValue(src, dest.valueReg());
+ } else {
+ loadUnboxedValue(src, dest.type(), dest.typedReg());
+ }
+ }
+
+ template <typename T>
+ void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
+ if (src.hasValue()) {
+ storeValue(src.valueReg(), dest);
+ } else if (IsFloatingPointType(src.type())) {
+ FloatRegister reg = src.typedReg().fpu();
+ if (src.type() == MIRType::Float32) {
+ ScratchDoubleScope fpscratch(*this);
+ convertFloat32ToDouble(reg, fpscratch);
+ boxDouble(fpscratch, dest);
+ } else {
+ boxDouble(reg, dest);
+ }
+ } else {
+ storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
+ }
+ }
+
+ template <typename T>
+ void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
+ if (src.constant()) {
+ storeValue(src.value(), dest);
+ } else {
+ storeTypedOrValue(src.reg(), dest);
+ }
+ }
+
+ void storeCallPointerResult(Register reg) {
+ if (reg != ReturnReg) {
+ mov(ReturnReg, reg);
+ }
+ }
+
+ inline void storeCallBoolResult(Register reg);
+ inline void storeCallInt32Result(Register reg);
+
+ void storeCallFloatResult(FloatRegister reg) {
+ if (reg != ReturnDoubleReg) {
+ moveDouble(ReturnDoubleReg, reg);
+ }
+ }
+
+ inline void storeCallResultValue(AnyRegister dest, JSValueType type);
+
+ void storeCallResultValue(ValueOperand dest) {
+#if defined(JS_NUNBOX32)
+ // reshuffle the return registers used for a call result to store into
+ // dest, using ReturnReg as a scratch register if necessary. This must
+ // only be called after returning from a call, at a point when the
+ // return register is not live. XXX would be better to allow wrappers
+ // to store the return value to different places.
+ if (dest.typeReg() == JSReturnReg_Data) {
+ if (dest.payloadReg() == JSReturnReg_Type) {
+ // swap the two registers.
+ mov(JSReturnReg_Type, ReturnReg);
+ mov(JSReturnReg_Data, JSReturnReg_Type);
+ mov(ReturnReg, JSReturnReg_Data);
+ } else {
+ mov(JSReturnReg_Data, dest.payloadReg());
+ mov(JSReturnReg_Type, dest.typeReg());
+ }
+ } else {
+ mov(JSReturnReg_Type, dest.typeReg());
+ mov(JSReturnReg_Data, dest.payloadReg());
+ }
+#elif defined(JS_PUNBOX64)
+ if (dest.valueReg() != JSReturnReg) {
+ mov(JSReturnReg, dest.valueReg());
+ }
+#else
+# error "Bad architecture"
+#endif
+ }
+
+ inline void storeCallResultValue(TypedOrValueRegister dest);
+
+ private:
+ TrampolinePtr preBarrierTrampoline(MIRType type);
+
+ template <typename T>
+ void unguardedCallPreBarrier(const T& address, MIRType type) {
+ Label done;
+ if (type == MIRType::Value) {
+ branchTestGCThing(Assembler::NotEqual, address, &done);
+ } else if (type == MIRType::Object || type == MIRType::String) {
+ branchPtr(Assembler::Equal, address, ImmWord(0), &done);
+ }
+
+ Push(PreBarrierReg);
+ computeEffectiveAddress(address, PreBarrierReg);
+
+ TrampolinePtr preBarrier = preBarrierTrampoline(type);
+
+ call(preBarrier);
+ Pop(PreBarrierReg);
+ // On arm64, SP may be < PSP now (that's OK).
+ // eg testcase: tests/auto-regress/bug702915.js
+ bind(&done);
+ }
+
+ public:
+ template <typename T>
+ void guardedCallPreBarrier(const T& address, MIRType type) {
+ Label done;
+ branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
+ unguardedCallPreBarrier(address, type);
+ bind(&done);
+ }
+
+ // Like guardedCallPreBarrier, but unlike guardedCallPreBarrier this can be
+ // called from runtime-wide trampolines because it loads cx->zone (instead of
+ // baking in the current Zone) if JitContext::realm is nullptr.
+ template <typename T>
+ void guardedCallPreBarrierAnyZone(const T& address, MIRType type,
+ Register scratch) {
+ Label done;
+ branchTestNeedsIncrementalBarrierAnyZone(Assembler::Zero, &done, scratch);
+ unguardedCallPreBarrier(address, type);
+ bind(&done);
+ }
+
+ enum class Uint32Mode { FailOnDouble, ForceDouble };
+
+ void boxUint32(Register source, ValueOperand dest, Uint32Mode uint32Mode,
+ Label* fail);
+
+ template <typename T>
+ void loadFromTypedArray(Scalar::Type arrayType, const T& src,
+ AnyRegister dest, Register temp, Label* fail);
+
+ template <typename T>
+ void loadFromTypedArray(Scalar::Type arrayType, const T& src,
+ const ValueOperand& dest, Uint32Mode uint32Mode,
+ Register temp, Label* fail);
+
+ template <typename T>
+ void loadFromTypedBigIntArray(Scalar::Type arrayType, const T& src,
+ Register bigInt, Register64 temp);
+
+ template <typename S, typename T>
+ void storeToTypedIntArray(Scalar::Type arrayType, const S& value,
+ const T& dest) {
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ store8(value, dest);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ store16(value, dest);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ store32(value, dest);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+ }
+
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+ const BaseIndex& dest);
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+ const Address& dest);
+
+ void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
+ const BaseIndex& dest);
+ void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
+ const Address& dest);
+
+ void memoryBarrierBefore(const Synchronization& sync);
+ void memoryBarrierAfter(const Synchronization& sync);
+
+ void debugAssertIsObject(const ValueOperand& val);
+ void debugAssertObjHasFixedSlots(Register obj, Register scratch);
+
+ void debugAssertObjectHasClass(Register obj, Register scratch,
+ const JSClass* clasp);
+
+ void branchArrayIsNotPacked(Register array, Register temp1, Register temp2,
+ Label* label);
+
+ void setIsPackedArray(Register obj, Register output, Register temp);
+
+ void packedArrayPop(Register array, ValueOperand output, Register temp1,
+ Register temp2, Label* fail);
+ void packedArrayShift(Register array, ValueOperand output, Register temp1,
+ Register temp2, LiveRegisterSet volatileRegs,
+ Label* fail);
+
+ void loadArgumentsObjectElement(Register obj, Register index,
+ ValueOperand output, Register temp,
+ Label* fail);
+ void loadArgumentsObjectElementHole(Register obj, Register index,
+ ValueOperand output, Register temp,
+ Label* fail);
+ void loadArgumentsObjectElementExists(Register obj, Register index,
+ Register output, Register temp,
+ Label* fail);
+
+ void loadArgumentsObjectLength(Register obj, Register output, Label* fail);
+
+ void branchTestArgumentsObjectFlags(Register obj, Register temp,
+ uint32_t flags, Condition cond,
+ Label* label);
+
+ void typedArrayElementSize(Register obj, Register output);
+ void branchIfClassIsNotTypedArray(Register clasp, Label* notTypedArray);
+
+ void branchIfHasDetachedArrayBuffer(Register obj, Register temp,
+ Label* label);
+
+ void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
+ void branchNativeIteratorIndices(Condition cond, Register ni, Register temp,
+ NativeIteratorIndices kind, Label* label);
+
+ void maybeLoadIteratorFromShape(Register obj, Register dest, Register temp,
+ Register temp2, Register temp3,
+ Label* failure);
+
+ void iteratorMore(Register obj, ValueOperand output, Register temp);
+ void iteratorClose(Register obj, Register temp1, Register temp2,
+ Register temp3);
+ void registerIterator(Register enumeratorsList, Register iter, Register temp);
+
+ void toHashableNonGCThing(ValueOperand value, ValueOperand result,
+ FloatRegister tempFloat);
+
+ void toHashableValue(ValueOperand value, ValueOperand result,
+ FloatRegister tempFloat, Label* atomizeString,
+ Label* tagString);
+
+ private:
+ void scrambleHashCode(Register result);
+
+ public:
+ void prepareHashNonGCThing(ValueOperand value, Register result,
+ Register temp);
+ void prepareHashString(Register str, Register result, Register temp);
+ void prepareHashSymbol(Register sym, Register result);
+ void prepareHashBigInt(Register bigInt, Register result, Register temp1,
+ Register temp2, Register temp3);
+ void prepareHashObject(Register setObj, ValueOperand value, Register result,
+ Register temp1, Register temp2, Register temp3,
+ Register temp4);
+ void prepareHashValue(Register setObj, ValueOperand value, Register result,
+ Register temp1, Register temp2, Register temp3,
+ Register temp4);
+
+ private:
+ enum class IsBigInt { No, Yes, Maybe };
+
+ /**
+ * Search for a value in a OrderedHashTable.
+ *
+ * When we jump to |found|, |entryTemp| holds the found hashtable entry.
+ */
+ template <typename OrderedHashTable>
+ void orderedHashTableLookup(Register setOrMapObj, ValueOperand value,
+ Register hash, Register entryTemp, Register temp1,
+ Register temp3, Register temp4, Register temp5,
+ Label* found, IsBigInt isBigInt);
+
+ void setObjectHas(Register setObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4, IsBigInt isBigInt);
+
+ void mapObjectHas(Register mapObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4, IsBigInt isBigInt);
+
+ void mapObjectGet(Register mapObj, ValueOperand value, Register hash,
+ ValueOperand result, Register temp1, Register temp2,
+ Register temp3, Register temp4, Register temp5,
+ IsBigInt isBigInt);
+
+ public:
+ void setObjectHasNonBigInt(Register setObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2) {
+ return setObjectHas(setObj, value, hash, result, temp1, temp2, InvalidReg,
+ InvalidReg, IsBigInt::No);
+ }
+ void setObjectHasBigInt(Register setObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4) {
+ return setObjectHas(setObj, value, hash, result, temp1, temp2, temp3, temp4,
+ IsBigInt::Yes);
+ }
+ void setObjectHasValue(Register setObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4) {
+ return setObjectHas(setObj, value, hash, result, temp1, temp2, temp3, temp4,
+ IsBigInt::Maybe);
+ }
+
+ void mapObjectHasNonBigInt(Register mapObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2) {
+ return mapObjectHas(mapObj, value, hash, result, temp1, temp2, InvalidReg,
+ InvalidReg, IsBigInt::No);
+ }
+ void mapObjectHasBigInt(Register mapObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4) {
+ return mapObjectHas(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
+ IsBigInt::Yes);
+ }
+ void mapObjectHasValue(Register mapObj, ValueOperand value, Register hash,
+ Register result, Register temp1, Register temp2,
+ Register temp3, Register temp4) {
+ return mapObjectHas(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
+ IsBigInt::Maybe);
+ }
+
+ void mapObjectGetNonBigInt(Register mapObj, ValueOperand value, Register hash,
+ ValueOperand result, Register temp1,
+ Register temp2, Register temp3) {
+ return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3,
+ InvalidReg, InvalidReg, IsBigInt::No);
+ }
+ void mapObjectGetBigInt(Register mapObj, ValueOperand value, Register hash,
+ ValueOperand result, Register temp1, Register temp2,
+ Register temp3, Register temp4, Register temp5) {
+ return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
+ temp5, IsBigInt::Yes);
+ }
+ void mapObjectGetValue(Register mapObj, ValueOperand value, Register hash,
+ ValueOperand result, Register temp1, Register temp2,
+ Register temp3, Register temp4, Register temp5) {
+ return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
+ temp5, IsBigInt::Maybe);
+ }
+
+ private:
+ template <typename OrderedHashTable>
+ void loadOrderedHashTableCount(Register setOrMapObj, Register result);
+
+ public:
+ void loadSetObjectSize(Register setObj, Register result);
+ void loadMapObjectSize(Register mapObj, Register result);
+
+ // Inline version of js_TypedArray_uint8_clamp_double.
+ // This function clobbers the input register.
+ void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
+
+ using MacroAssemblerSpecific::ensureDouble;
+
+ template <typename S>
+ void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ branchTestDouble(Assembler::Equal, source, &isDouble);
+ branchTestInt32(Assembler::NotEqual, source, failure);
+
+ convertInt32ToDouble(source, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+ }
+
+ // Inline allocation.
+ private:
+ void checkAllocatorState(Label* fail);
+ bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::Heap initialHeap);
+ void nurseryAllocateObject(
+ Register result, Register temp, gc::AllocKind allocKind,
+ size_t nDynamicSlots, Label* fail,
+ const AllocSiteInput& allocSite = AllocSiteInput());
+ void bumpPointerAllocate(Register result, Register temp, Label* fail,
+ CompileZone* zone, JS::TraceKind traceKind,
+ uint32_t size,
+ const AllocSiteInput& allocSite = AllocSiteInput());
+ void updateAllocSite(Register temp, Register result, CompileZone* zone,
+ Register site);
+
+ void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind,
+ Label* fail);
+ void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
+ uint32_t nDynamicSlots, gc::Heap initialHeap, Label* fail,
+ const AllocSiteInput& allocSite = AllocSiteInput());
+ void nurseryAllocateString(Register result, Register temp,
+ gc::AllocKind allocKind, Label* fail);
+ void allocateString(Register result, Register temp, gc::AllocKind allocKind,
+ gc::Heap initialHeap, Label* fail);
+ void nurseryAllocateBigInt(Register result, Register temp, Label* fail);
+ void copySlotsFromTemplate(Register obj,
+ const TemplateNativeObject& templateObj,
+ uint32_t start, uint32_t end);
+ void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start,
+ uint32_t end, const Value& v);
+ void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start,
+ uint32_t end);
+ void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start,
+ uint32_t end);
+
+ void initGCSlots(Register obj, Register temp,
+ const TemplateNativeObject& templateObj);
+
+ public:
+ void callFreeStub(Register slots);
+ void createGCObject(Register result, Register temp,
+ const TemplateObject& templateObj, gc::Heap initialHeap,
+ Label* fail, bool initContents = true);
+
+ void createPlainGCObject(Register result, Register shape, Register temp,
+ Register temp2, uint32_t numFixedSlots,
+ uint32_t numDynamicSlots, gc::AllocKind allocKind,
+ gc::Heap initialHeap, Label* fail,
+ const AllocSiteInput& allocSite,
+ bool initContents = true);
+
+ void createArrayWithFixedElements(
+ Register result, Register shape, Register temp, uint32_t arrayLength,
+ uint32_t arrayCapacity, gc::AllocKind allocKind, gc::Heap initialHeap,
+ Label* fail, const AllocSiteInput& allocSite = AllocSiteInput());
+
+ void initGCThing(Register obj, Register temp,
+ const TemplateObject& templateObj, bool initContents = true);
+
+ enum class TypedArrayLength { Fixed, Dynamic };
+
+ void initTypedArraySlots(Register obj, Register temp, Register lengthReg,
+ LiveRegisterSet liveRegs, Label* fail,
+ TypedArrayObject* templateObj,
+ TypedArrayLength lengthKind);
+
+ void newGCString(Register result, Register temp, gc::Heap initialHeap,
+ Label* fail);
+ void newGCFatInlineString(Register result, Register temp,
+ gc::Heap initialHeap, Label* fail);
+
+ void newGCBigInt(Register result, Register temp, gc::Heap initialHeap,
+ Label* fail);
+
+ // Compares two strings for equality based on the JSOP.
+ // This checks for identical pointers, atoms and length and fails for
+ // everything else.
+ void compareStrings(JSOp op, Register left, Register right, Register result,
+ Label* fail);
+
+ // Result of the typeof operation. Falls back to slow-path for proxies.
+ void typeOfObject(Register objReg, Register scratch, Label* slow,
+ Label* isObject, Label* isCallable, Label* isUndefined);
+
+ // Implementation of IsCallable. Doesn't handle proxies.
+ void isCallable(Register obj, Register output, Label* isProxy) {
+ isCallableOrConstructor(true, obj, output, isProxy);
+ }
+ void isConstructor(Register obj, Register output, Label* isProxy) {
+ isCallableOrConstructor(false, obj, output, isProxy);
+ }
+
+ void setIsCrossRealmArrayConstructor(Register obj, Register output);
+
+ void setIsDefinitelyTypedArrayConstructor(Register obj, Register output);
+
+ void loadMegamorphicCache(Register dest);
+ void loadStringToAtomCacheLastLookups(Register dest);
+ void loadMegamorphicSetPropCache(Register dest);
+
+ void loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
+ Register outHash, Label* cacheMiss);
+
+ void loadAtomHash(Register id, Register hash, Label* done);
+
+ void emitExtractValueFromMegamorphicCacheEntry(
+ Register obj, Register entry, Register scratch1, Register scratch2,
+ ValueOperand output, Label* cacheHit, Label* cacheMiss);
+
+ template <typename IdOperandType>
+ void emitMegamorphicCacheLookupByValueCommon(
+ IdOperandType id, Register obj, Register scratch1, Register scratch2,
+ Register outEntryPtr, Label* cacheMiss, Label* cacheMissWithEntry);
+
+ void emitMegamorphicCacheLookup(PropertyKey id, Register obj,
+ Register scratch1, Register scratch2,
+ Register outEntryPtr, ValueOperand output,
+ Label* cacheHit);
+
+ // NOTE: |id| must either be a ValueOperand or a Register. If it is a
+ // Register, we assume that it is an atom.
+ template <typename IdOperandType>
+ void emitMegamorphicCacheLookupByValue(IdOperandType id, Register obj,
+ Register scratch1, Register scratch2,
+ Register outEntryPtr,
+ ValueOperand output, Label* cacheHit);
+
+ void emitMegamorphicCacheLookupExists(ValueOperand id, Register obj,
+ Register scratch1, Register scratch2,
+ Register outEntryPtr, Register output,
+ Label* cacheHit, bool hasOwn);
+
+ // Given a PropertyIteratorObject with valid indices, extract the current
+ // PropertyIndex, storing the index in |outIndex| and the kind in |outKind|
+ void extractCurrentIndexAndKindFromIterator(Register iterator,
+ Register outIndex,
+ Register outKind);
+
+ template <typename IdType>
+#ifdef JS_CODEGEN_X86
+ // See MegamorphicSetElement in LIROps.yaml
+ void emitMegamorphicCachedSetSlot(IdType id, Register obj, Register scratch1,
+ ValueOperand value, Label* cacheHit,
+ void (*emitPreBarrier)(MacroAssembler&,
+ const Address&,
+ MIRType));
+#else
+ void emitMegamorphicCachedSetSlot(
+ IdType id, Register obj, Register scratch1, Register scratch2,
+ Register scratch3, ValueOperand value, Label* cacheHit,
+ void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
+#endif
+
+ void loadDOMExpandoValueGuardGeneration(
+ Register obj, ValueOperand output,
+ JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
+ Label* fail);
+
+ void guardNonNegativeIntPtrToInt32(Register reg, Label* fail);
+
+ void loadArrayBufferByteLengthIntPtr(Register obj, Register output);
+ void loadArrayBufferViewByteOffsetIntPtr(Register obj, Register output);
+ void loadArrayBufferViewLengthIntPtr(Register obj, Register output);
+
+ private:
+ void isCallableOrConstructor(bool isCallable, Register obj, Register output,
+ Label* isProxy);
+
+ public:
+ // Generates code used to complete a bailout.
+ void generateBailoutTail(Register scratch, Register bailoutInfo);
+
+ public:
+#ifndef JS_CODEGEN_ARM64
+ // StackPointer manipulation functions.
+ // On ARM64, the StackPointer is implemented as two synchronized registers.
+ // Code shared across platforms must use these functions to be valid.
+ template <typename T>
+ inline void addToStackPtr(T t);
+ template <typename T>
+ inline void addStackPtrTo(T t);
+
+ void subFromStackPtr(Imm32 imm32)
+ DEFINED_ON(mips32, mips64, loong64, riscv64, wasm32, arm, x86, x64);
+ void subFromStackPtr(Register reg);
+
+ template <typename T>
+ void subStackPtrFrom(T t) {
+ subPtr(getStackPointer(), t);
+ }
+
+ template <typename T>
+ void andToStackPtr(T t) {
+ andPtr(t, getStackPointer());
+ }
+
+ template <typename T>
+ void moveToStackPtr(T t) {
+ movePtr(t, getStackPointer());
+ }
+ template <typename T>
+ void moveStackPtrTo(T t) {
+ movePtr(getStackPointer(), t);
+ }
+
+ template <typename T>
+ void loadStackPtr(T t) {
+ loadPtr(t, getStackPointer());
+ }
+ template <typename T>
+ void storeStackPtr(T t) {
+ storePtr(getStackPointer(), t);
+ }
+
+ // StackPointer testing functions.
+ // On ARM64, sp can function as the zero register depending on context.
+ // Code shared across platforms must use these functions to be valid.
+ template <typename T>
+ inline void branchTestStackPtr(Condition cond, T t, Label* label);
+ template <typename T>
+ inline void branchStackPtr(Condition cond, T rhs, Label* label);
+ template <typename T>
+ inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
+
+ // Move the stack pointer based on the requested amount.
+ inline void reserveStack(uint32_t amount);
+#else // !JS_CODEGEN_ARM64
+ void reserveStack(uint32_t amount);
+#endif
+
+ public:
+ void enableProfilingInstrumentation() {
+ emitProfilingInstrumentation_ = true;
+ }
+
+ private:
+ // This class is used to surround call sites throughout the assembler. This
+ // is used by callWithABI, and callJit functions, except if suffixed by
+ // NoProfiler.
+ class MOZ_RAII AutoProfilerCallInstrumentation {
+ public:
+ explicit AutoProfilerCallInstrumentation(MacroAssembler& masm);
+ ~AutoProfilerCallInstrumentation() = default;
+ };
+ friend class AutoProfilerCallInstrumentation;
+
+ void appendProfilerCallSite(CodeOffset label) {
+ propagateOOM(profilerCallSites_.append(label));
+ }
+
+ // Fix up the code pointers to be written for locations where profilerCallSite
+ // emitted moves of RIP to a register.
+ void linkProfilerCallSites(JitCode* code);
+
+ // This field is used to manage profiling instrumentation output. If
+ // provided and enabled, then instrumentation will be emitted around call
+ // sites.
+ bool emitProfilingInstrumentation_;
+
+ // Record locations of the call sites.
+ Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
+
+ public:
+ void loadJitCodeRaw(Register func, Register dest);
+ void loadBaselineJitCodeRaw(Register func, Register dest,
+ Label* failure = nullptr);
+ void storeICScriptInJSContext(Register icScript);
+
+ void loadBaselineFramePtr(Register framePtr, Register dest);
+
+ void pushBaselineFramePtr(Register framePtr, Register scratch) {
+ loadBaselineFramePtr(framePtr, scratch);
+ push(scratch);
+ }
+
+ void PushBaselineFramePtr(Register framePtr, Register scratch) {
+ loadBaselineFramePtr(framePtr, scratch);
+ Push(scratch);
+ }
+
+ using MacroAssemblerSpecific::movePtr;
+
+ void movePtr(TrampolinePtr ptr, Register dest) {
+ movePtr(ImmPtr(ptr.value), dest);
+ }
+
+ private:
+ void handleFailure();
+
+ public:
+ Label* exceptionLabel() {
+ // Exceptions are currently handled the same way as sequential failures.
+ return &failureLabel_;
+ }
+
+ Label* failureLabel() { return &failureLabel_; }
+
+ void finish();
+ void link(JitCode* code);
+
+ void assumeUnreachable(const char* output);
+
+ void printf(const char* output);
+ void printf(const char* output, Register value);
+
+#define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
+ MOZ_ASSERT(IsFloatingPointType(type)); \
+ if (type == MIRType::Double) \
+ method##Double(arg1d, arg2); \
+ else \
+ method##Float32(arg1f, arg2);
+
+ void loadConstantFloatingPoint(double d, float f, FloatRegister dest,
+ MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
+ }
+ void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest,
+ MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
+ }
+ void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest,
+ MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
+ }
+ void convertInt32ToFloatingPoint(Register src, FloatRegister dest,
+ MIRType destType) {
+ DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
+ }
+
+#undef DISPATCH_FLOATING_POINT_OP
+
+ void convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
+ Label* fail, MIRType outputType);
+
+ void outOfLineTruncateSlow(FloatRegister src, Register dest,
+ bool widenFloatToDouble, bool compilingWasm,
+ wasm::BytecodeOffset callOffset);
+
+ void convertInt32ValueToDouble(ValueOperand val);
+
+ void convertValueToDouble(ValueOperand value, FloatRegister output,
+ Label* fail) {
+ convertValueToFloatingPoint(value, output, fail, MIRType::Double);
+ }
+
+ void convertValueToFloat(ValueOperand value, FloatRegister output,
+ Label* fail) {
+ convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
+ }
+
+ //
+ // Functions for converting values to int.
+ //
+ void convertDoubleToInt(FloatRegister src, Register output,
+ FloatRegister temp, Label* truncateFail, Label* fail,
+ IntConversionBehavior behavior);
+
+ // Strings may be handled by providing labels to jump to when the behavior
+ // is truncation or clamping. The subroutine, usually an OOL call, is
+ // passed the unboxed string in |stringReg| and should convert it to a
+ // double store into |temp|.
+ void convertValueToInt(
+ ValueOperand value, Label* handleStringEntry, Label* handleStringRejoin,
+ Label* truncateDoubleSlow, Register stringReg, FloatRegister temp,
+ Register output, Label* fail, IntConversionBehavior behavior,
+ IntConversionInputKind conversion = IntConversionInputKind::Any);
+
+ // This carries over the MToNumberInt32 operation on the ValueOperand
+ // input; see comment at the top of this class.
+ void convertValueToInt32(
+ ValueOperand value, FloatRegister temp, Register output, Label* fail,
+ bool negativeZeroCheck,
+ IntConversionInputKind conversion = IntConversionInputKind::Any) {
+ convertValueToInt(
+ value, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
+ negativeZeroCheck ? IntConversionBehavior::NegativeZeroCheck
+ : IntConversionBehavior::Normal,
+ conversion);
+ }
+
+ // This carries over the MTruncateToInt32 operation on the ValueOperand
+ // input; see the comment at the top of this class.
+ void truncateValueToInt32(ValueOperand value, Label* handleStringEntry,
+ Label* handleStringRejoin,
+ Label* truncateDoubleSlow, Register stringReg,
+ FloatRegister temp, Register output, Label* fail) {
+ convertValueToInt(value, handleStringEntry, handleStringRejoin,
+ truncateDoubleSlow, stringReg, temp, output, fail,
+ IntConversionBehavior::Truncate);
+ }
+
+ void truncateValueToInt32(ValueOperand value, FloatRegister temp,
+ Register output, Label* fail) {
+ truncateValueToInt32(value, nullptr, nullptr, nullptr, InvalidReg, temp,
+ output, fail);
+ }
+
+ // Convenience functions for clamping values to uint8.
+ void clampValueToUint8(ValueOperand value, Label* handleStringEntry,
+ Label* handleStringRejoin, Register stringReg,
+ FloatRegister temp, Register output, Label* fail) {
+ convertValueToInt(value, handleStringEntry, handleStringRejoin, nullptr,
+ stringReg, temp, output, fail,
+ IntConversionBehavior::ClampToUint8);
+ }
+
+ [[nodiscard]] bool icBuildOOLFakeExitFrame(void* fakeReturnAddr,
+ AutoSaveLiveRegisters& save);
+
+ // Align the stack pointer based on the number of arguments which are pushed
+ // on the stack, such that the JitFrameLayout would be correctly aligned on
+ // the JitStackAlignment.
+ void alignJitStackBasedOnNArgs(Register nargs, bool countIncludesThis);
+ void alignJitStackBasedOnNArgs(uint32_t argc, bool countIncludesThis);
+
+ inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
+
+ void touchFrameValues(Register numStackValues, Register scratch1,
+ Register scratch2);
+
+#ifdef JS_64BIT
+ // See comment block "64-bit GPRs carrying 32-bit values" above. This asserts
+ // that the high bits of the register are appropriate for the architecture and
+ // the value in the low bits.
+ void debugAssertCanonicalInt32(Register r);
+#endif
+};
+
+// StackMacroAssembler checks no GC will happen while it's on the stack.
+class MOZ_RAII StackMacroAssembler : public MacroAssembler {
+ JS::AutoCheckCannotGC nogc;
+
+ public:
+ StackMacroAssembler(JSContext* cx, TempAllocator& alloc);
+};
+
+// WasmMacroAssembler does not contain GC pointers, so it doesn't need the no-GC
+// checking StackMacroAssembler has.
+class MOZ_RAII WasmMacroAssembler : public MacroAssembler {
+ public:
+ explicit WasmMacroAssembler(TempAllocator& alloc, bool limitedSize = true);
+ explicit WasmMacroAssembler(TempAllocator& alloc,
+ const wasm::ModuleEnvironment& env,
+ bool limitedSize = true);
+ ~WasmMacroAssembler() { assertNoGCThings(); }
+};
+
+// Heap-allocated MacroAssembler used for Ion off-thread code generation.
+// GC cancels off-thread compilations.
+class IonHeapMacroAssembler : public MacroAssembler {
+ public:
+ IonHeapMacroAssembler(TempAllocator& alloc, CompileRealm* realm);
+};
+
+//{{{ check_macroassembler_style
+inline uint32_t MacroAssembler::framePushed() const { return framePushed_; }
+
+inline void MacroAssembler::setFramePushed(uint32_t framePushed) {
+ framePushed_ = framePushed;
+}
+
+inline void MacroAssembler::adjustFrame(int32_t value) {
+ MOZ_ASSERT_IF(value < 0, framePushed_ >= uint32_t(-value));
+ setFramePushed(framePushed_ + value);
+}
+
+inline void MacroAssembler::implicitPop(uint32_t bytes) {
+ MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
+ MOZ_ASSERT(bytes <= INT32_MAX);
+ adjustFrame(-int32_t(bytes));
+}
+//}}} check_macroassembler_style
+
+static inline Assembler::DoubleCondition JSOpToDoubleCondition(JSOp op) {
+ switch (op) {
+ case JSOp::Eq:
+ case JSOp::StrictEq:
+ return Assembler::DoubleEqual;
+ case JSOp::Ne:
+ case JSOp::StrictNe:
+ return Assembler::DoubleNotEqualOrUnordered;
+ case JSOp::Lt:
+ return Assembler::DoubleLessThan;
+ case JSOp::Le:
+ return Assembler::DoubleLessThanOrEqual;
+ case JSOp::Gt:
+ return Assembler::DoubleGreaterThan;
+ case JSOp::Ge:
+ return Assembler::DoubleGreaterThanOrEqual;
+ default:
+ MOZ_CRASH("Unexpected comparison operation");
+ }
+}
+
+// Note: the op may have been inverted during lowering (to put constants in a
+// position where they can be immediates), so it is important to use the
+// lir->jsop() instead of the mir->jsop() when it is present.
+static inline Assembler::Condition JSOpToCondition(JSOp op, bool isSigned) {
+ if (isSigned) {
+ switch (op) {
+ case JSOp::Eq:
+ case JSOp::StrictEq:
+ return Assembler::Equal;
+ case JSOp::Ne:
+ case JSOp::StrictNe:
+ return Assembler::NotEqual;
+ case JSOp::Lt:
+ return Assembler::LessThan;
+ case JSOp::Le:
+ return Assembler::LessThanOrEqual;
+ case JSOp::Gt:
+ return Assembler::GreaterThan;
+ case JSOp::Ge:
+ return Assembler::GreaterThanOrEqual;
+ default:
+ MOZ_CRASH("Unrecognized comparison operation");
+ }
+ } else {
+ switch (op) {
+ case JSOp::Eq:
+ case JSOp::StrictEq:
+ return Assembler::Equal;
+ case JSOp::Ne:
+ case JSOp::StrictNe:
+ return Assembler::NotEqual;
+ case JSOp::Lt:
+ return Assembler::Below;
+ case JSOp::Le:
+ return Assembler::BelowOrEqual;
+ case JSOp::Gt:
+ return Assembler::Above;
+ case JSOp::Ge:
+ return Assembler::AboveOrEqual;
+ default:
+ MOZ_CRASH("Unrecognized comparison operation");
+ }
+ }
+}
+
+static inline size_t StackDecrementForCall(uint32_t alignment,
+ size_t bytesAlreadyPushed,
+ size_t bytesToPush) {
+ return bytesToPush +
+ ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
+}
+
+// Helper for generatePreBarrier.
+inline DynFn JitPreWriteBarrier(MIRType type);
+} // namespace jit
+
+} // namespace js
+
+#endif /* jit_MacroAssembler_h */
diff --git a/js/src/jit/MoveEmitter.h b/js/src/jit/MoveEmitter.h
new file mode 100644
index 0000000000..7ce7743018
--- /dev/null
+++ b/js/src/jit/MoveEmitter.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveEmitter_h
+#define jit_MoveEmitter_h
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "jit/x86-shared/MoveEmitter-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/MoveEmitter-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MoveEmitter-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/MoveEmitter-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MoveEmitter-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/MoveEmitter-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/MoveEmitter-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/MoveEmitter-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/MoveEmitter-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+#endif /* jit_MoveEmitter_h */
diff --git a/js/src/jit/MoveResolver.cpp b/js/src/jit/MoveResolver.cpp
new file mode 100644
index 0000000000..4a9edd8e74
--- /dev/null
+++ b/js/src/jit/MoveResolver.cpp
@@ -0,0 +1,443 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MoveResolver.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include "jit/MacroAssembler.h"
+#include "jit/RegisterSets.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg) : disp_(0) {
+ switch (arg.kind()) {
+ case ABIArg::GPR:
+ kind_ = Kind::Reg;
+ code_ = arg.gpr().code();
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ kind_ = Kind::RegPair;
+ code_ = arg.evenGpr().code();
+ MOZ_ASSERT(code_ % 2 == 0);
+ MOZ_ASSERT(code_ + 1 == arg.oddGpr().code());
+ break;
+#endif
+ case ABIArg::FPU:
+ kind_ = Kind::FloatReg;
+ code_ = arg.fpu().code();
+ break;
+ case ABIArg::Stack:
+ kind_ = Kind::Memory;
+ if (IsHiddenSP(masm.getStackPointer())) {
+ MOZ_CRASH(
+ "Hidden SP cannot be represented as register code on this "
+ "platform");
+ } else {
+ code_ = AsRegister(masm.getStackPointer()).code();
+ }
+ disp_ = arg.offsetFromArgBase();
+ break;
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+}
+
+MoveResolver::MoveResolver() : numCycles_(0), curCycles_(0) {}
+
+void MoveResolver::resetState() {
+ numCycles_ = 0;
+ curCycles_ = 0;
+}
+
+bool MoveResolver::addMove(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type) {
+ // Assert that we're not doing no-op moves.
+ MOZ_ASSERT(!(from == to));
+ PendingMove* pm = movePool_.allocate(from, to, type);
+ if (!pm) {
+ return false;
+ }
+ pending_.pushBack(pm);
+ return true;
+}
+
+// Given move (A -> B), this function attempts to find any move (B -> *) in the
+// pending move list, and returns the first one.
+MoveResolver::PendingMove* MoveResolver::findBlockingMove(
+ const PendingMove* last) {
+ for (PendingMoveIterator iter = pending_.begin(); iter != pending_.end();
+ iter++) {
+ PendingMove* other = *iter;
+
+ if (other->from().aliases(last->to())) {
+ // We now have pairs in the form (A -> X) (X -> y). The second pair
+ // blocks the move in the first pair, so return it.
+ return other;
+ }
+ }
+
+ // No blocking moves found.
+ return nullptr;
+}
+
+// Given move (A -> B), this function attempts to find any move (B -> *) in the
+// move list iterator, and returns the first one.
+// N.B. It is unclear if a single move can complete more than one cycle, so to
+// be conservative, this function operates on iterators, so the caller can
+// process all instructions that start a cycle.
+MoveResolver::PendingMove* MoveResolver::findCycledMove(
+ PendingMoveIterator* iter, PendingMoveIterator end,
+ const PendingMove* last) {
+ for (; *iter != end; (*iter)++) {
+ PendingMove* other = **iter;
+ if (other->from().aliases(last->to())) {
+ // We now have pairs in the form (A -> X) (X -> y). The second pair
+ // blocks the move in the first pair, so return it.
+ (*iter)++;
+ return other;
+ }
+ }
+ // No blocking moves found.
+ return nullptr;
+}
+
+#ifdef JS_CODEGEN_ARM
+static inline bool MoveIsDouble(const MoveOperand& move) {
+ if (!move.isFloatReg()) {
+ return false;
+ }
+ return move.floatReg().isDouble();
+}
+#endif
+
+#ifdef JS_CODEGEN_ARM
+static inline bool MoveIsSingle(const MoveOperand& move) {
+ if (!move.isFloatReg()) {
+ return false;
+ }
+ return move.floatReg().isSingle();
+}
+#endif
+
+#ifdef JS_CODEGEN_ARM
+bool MoveResolver::isDoubleAliasedAsSingle(const MoveOperand& move) {
+ if (!MoveIsDouble(move)) {
+ return false;
+ }
+
+ for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
+ PendingMove* other = *iter;
+ if (other->from().aliases(move) && MoveIsSingle(other->from())) {
+ return true;
+ }
+ if (other->to().aliases(move) && MoveIsSingle(other->to())) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+#ifdef JS_CODEGEN_ARM
+static MoveOperand SplitIntoLowerHalf(const MoveOperand& move) {
+ if (MoveIsDouble(move)) {
+ FloatRegister lowerSingle = move.floatReg().asSingle();
+ return MoveOperand(lowerSingle);
+ }
+
+ MOZ_ASSERT(move.isMemoryOrEffectiveAddress());
+ return move;
+}
+#endif
+
+#ifdef JS_CODEGEN_ARM
+static MoveOperand SplitIntoUpperHalf(const MoveOperand& move) {
+ if (MoveIsDouble(move)) {
+ FloatRegister lowerSingle = move.floatReg().asSingle();
+ FloatRegister upperSingle =
+ VFPRegister(lowerSingle.code() + 1, VFPRegister::Single);
+ return MoveOperand(upperSingle);
+ }
+
+ MOZ_ASSERT(move.isMemoryOrEffectiveAddress());
+ return MoveOperand(move.base(), move.disp() + sizeof(float));
+}
+#endif
+
+// Resolves the pending_ list to a list in orderedMoves_.
+bool MoveResolver::resolve() {
+ resetState();
+ orderedMoves_.clear();
+
+ // Upon return from this function, the pending_ list must be cleared.
+ auto clearPending = mozilla::MakeScopeExit([this]() { pending_.clear(); });
+
+#ifdef JS_CODEGEN_ARM
+ // Some of ARM's double registers alias two of its single registers,
+ // but the algorithm below assumes that every register can participate
+ // in at most one cycle. To satisfy the algorithm, any double registers
+ // that may conflict are split into their single-register halves.
+ //
+ // This logic is only applicable because ARM only uses registers d0-d15,
+ // all of which alias s0-s31. Double registers d16-d31 are unused.
+ // Therefore there is never a double move that cannot be split.
+ // If this changes in the future, the algorithm will have to be fixed.
+
+ bool splitDoubles = false;
+ for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
+ PendingMove* pm = *iter;
+
+ if (isDoubleAliasedAsSingle(pm->from()) ||
+ isDoubleAliasedAsSingle(pm->to())) {
+ splitDoubles = true;
+ break;
+ }
+ }
+
+ if (splitDoubles) {
+ for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
+ PendingMove* pm = *iter;
+
+ if (!MoveIsDouble(pm->from()) && !MoveIsDouble(pm->to())) {
+ continue;
+ }
+
+ MoveOperand fromLower = SplitIntoLowerHalf(pm->from());
+ MoveOperand toLower = SplitIntoLowerHalf(pm->to());
+
+ PendingMove* lower =
+ movePool_.allocate(fromLower, toLower, MoveOp::FLOAT32);
+ if (!lower) {
+ return false;
+ }
+
+ // Insert the new node before the current position to not affect
+ // iteration.
+ pending_.insertBefore(pm, lower);
+
+ // Overwrite pm in place for the upper move. Iteration proceeds as normal.
+ MoveOperand fromUpper = SplitIntoUpperHalf(pm->from());
+ MoveOperand toUpper = SplitIntoUpperHalf(pm->to());
+ pm->overwrite(fromUpper, toUpper, MoveOp::FLOAT32);
+ }
+ }
+#endif
+
+ InlineList<PendingMove> stack;
+
+ // This is a depth-first-search without recursion, which tries to find
+ // cycles in a list of moves.
+ //
+ // Algorithm.
+ //
+ // S = Traversal stack.
+ // P = Pending move list.
+ // O = Ordered list of moves.
+ //
+ // As long as there are pending moves in P:
+ // Let |root| be any pending move removed from P
+ // Add |root| to the traversal stack.
+ // As long as S is not empty:
+ // Let |L| be the most recent move added to S.
+ //
+ // Find any pending move M whose source is L's destination, thus
+ // preventing L's move until M has completed.
+ //
+ // If a move M was found,
+ // Remove M from the pending list.
+ // If M's destination is |root|,
+ // Annotate M and |root| as cycles.
+ // Add M to S.
+ // do not Add M to O, since M may have other conflictors in P
+ // that have not yet been processed.
+ // Otherwise,
+ // Add M to S.
+ // Otherwise,
+ // Remove L from S.
+ // Add L to O.
+ //
+ while (!pending_.empty()) {
+ PendingMove* pm = pending_.popBack();
+
+ // Add this pending move to the cycle detection stack.
+ stack.pushBack(pm);
+
+ while (!stack.empty()) {
+ PendingMove* blocking = findBlockingMove(stack.peekBack());
+
+ if (blocking) {
+ PendingMoveIterator stackiter = stack.begin();
+ PendingMove* cycled = findCycledMove(&stackiter, stack.end(), blocking);
+ if (cycled) {
+ // Find the cycle's start.
+ // We annotate cycles at each move in the cycle, and
+ // assert that we do not find two cycles in one move chain
+ // traversal (which would indicate two moves to the same
+ // destination).
+ // Since there can be more than one cycle, find them all.
+ do {
+ cycled->setCycleEnd(curCycles_);
+ cycled = findCycledMove(&stackiter, stack.end(), blocking);
+ } while (cycled);
+
+ blocking->setCycleBegin(pm->type(), curCycles_);
+ curCycles_++;
+ pending_.remove(blocking);
+ stack.pushBack(blocking);
+ } else {
+ // This is a new link in the move chain, so keep
+ // searching for a cycle.
+ pending_.remove(blocking);
+ stack.pushBack(blocking);
+ }
+ } else {
+ // Otherwise, pop the last move on the search stack because it's
+ // complete and not participating in a cycle. The resulting
+ // move can safely be added to the ordered move list.
+ PendingMove* done = stack.popBack();
+ if (!addOrderedMove(*done)) {
+ return false;
+ }
+ movePool_.free(done);
+ }
+ }
+ // If the current queue is empty, it is certain that there are
+ // all previous cycles cannot conflict with future cycles,
+ // so re-set the counter of pending cycles, while keeping a high-water mark.
+ if (numCycles_ < curCycles_) {
+ numCycles_ = curCycles_;
+ }
+ curCycles_ = 0;
+ }
+
+ return true;
+}
+
+bool MoveResolver::addOrderedMove(const MoveOp& move) {
+ // Sometimes the register allocator generates move groups where multiple
+ // moves have the same source. Try to optimize these cases when the source
+ // is in memory and the target of one of the moves is in a register.
+ MOZ_ASSERT(!move.from().aliases(move.to()));
+
+ if (!move.from().isMemory() || move.isCycleBegin() || move.isCycleEnd()) {
+ return orderedMoves_.append(move);
+ }
+
+ // Look for an earlier move with the same source, where no intervening move
+ // touches either the source or destination of the new move.
+ for (int i = orderedMoves_.length() - 1; i >= 0; i--) {
+ const MoveOp& existing = orderedMoves_[i];
+
+ if (existing.from() == move.from() && !existing.to().aliases(move.to()) &&
+ existing.type() == move.type() && !existing.isCycleBegin() &&
+ !existing.isCycleEnd()) {
+ MoveOp* after = orderedMoves_.begin() + i + 1;
+ if (existing.to().isGeneralReg() || existing.to().isFloatReg()) {
+ MoveOp nmove(existing.to(), move.to(), move.type());
+ return orderedMoves_.insert(after, nmove);
+ } else if (move.to().isGeneralReg() || move.to().isFloatReg()) {
+ MoveOp nmove(move.to(), existing.to(), move.type());
+ orderedMoves_[i] = move;
+ return orderedMoves_.insert(after, nmove);
+ }
+ }
+
+ if (existing.aliases(move)) {
+ break;
+ }
+ }
+
+ return orderedMoves_.append(move);
+}
+
+void MoveResolver::reorderMove(size_t from, size_t to) {
+ MOZ_ASSERT(from != to);
+
+ MoveOp op = orderedMoves_[from];
+ if (from < to) {
+ for (size_t i = from; i < to; i++) {
+ orderedMoves_[i] = orderedMoves_[i + 1];
+ }
+ } else {
+ for (size_t i = from; i > to; i--) {
+ orderedMoves_[i] = orderedMoves_[i - 1];
+ }
+ }
+ orderedMoves_[to] = op;
+}
+
+void MoveResolver::sortMemoryToMemoryMoves() {
+ // Try to reorder memory->memory moves so that they are executed right
+ // before a move that clobbers some register. This will allow the move
+ // emitter to use that clobbered register as a scratch register for the
+ // memory->memory move, if necessary.
+ for (size_t i = 0; i < orderedMoves_.length(); i++) {
+ const MoveOp& base = orderedMoves_[i];
+ if (!base.from().isMemory() || !base.to().isMemory()) {
+ continue;
+ }
+ if (base.type() != MoveOp::GENERAL && base.type() != MoveOp::INT32) {
+ continue;
+ }
+
+ // Look for an earlier move clobbering a register.
+ bool found = false;
+ for (int j = i - 1; j >= 0; j--) {
+ const MoveOp& previous = orderedMoves_[j];
+ if (previous.aliases(base) || previous.isCycleBegin() ||
+ previous.isCycleEnd()) {
+ break;
+ }
+
+ if (previous.to().isGeneralReg()) {
+ reorderMove(i, j);
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ continue;
+ }
+
+ // Look for a later move clobbering a register.
+ if (i + 1 < orderedMoves_.length()) {
+ bool found = false, skippedRegisterUse = false;
+ for (size_t j = i + 1; j < orderedMoves_.length(); j++) {
+ const MoveOp& later = orderedMoves_[j];
+ if (later.aliases(base) || later.isCycleBegin() || later.isCycleEnd()) {
+ break;
+ }
+
+ if (later.to().isGeneralReg()) {
+ if (skippedRegisterUse) {
+ reorderMove(i, j);
+ found = true;
+ } else {
+ // There is no move that uses a register between the
+ // original memory->memory move and this move that
+ // clobbers a register. The move should already be able
+ // to use a scratch register, so don't shift anything
+ // around.
+ }
+ break;
+ }
+
+ if (later.from().isGeneralReg()) {
+ skippedRegisterUse = true;
+ }
+ }
+
+ if (found) {
+ // Redo the search for memory->memory moves at the current
+ // index, so we don't skip the move just shifted back.
+ i--;
+ }
+ }
+ }
+}
diff --git a/js/src/jit/MoveResolver.h b/js/src/jit/MoveResolver.h
new file mode 100644
index 0000000000..d87182da3b
--- /dev/null
+++ b/js/src/jit/MoveResolver.h
@@ -0,0 +1,309 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveResolver_h
+#define jit_MoveResolver_h
+
+#include <algorithm>
+
+#include "jit/InlineList.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+
+// This is similar to Operand, but carries more information. We're also not
+// guaranteed that Operand looks like this on all ISAs.
+class MoveOperand {
+ public:
+ enum class Kind : uint8_t {
+ // A register in the "integer", aka "general purpose", class.
+ Reg,
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ // Two consecutive "integer" registers (aka "general purpose"). The even
+ // register contains the lower part, the odd register has the high bits
+ // of the content.
+ RegPair,
+#endif
+ // A register in the "float" register class.
+ FloatReg,
+ // A memory region.
+ Memory,
+ // The address of a memory region.
+ EffectiveAddress
+ };
+
+ private:
+ Kind kind_;
+ uint8_t code_;
+ int32_t disp_;
+
+ static_assert(std::max(Registers::Total, FloatRegisters::Total) <= UINT8_MAX,
+ "Any register code must fit in code_");
+
+ public:
+ MoveOperand() = delete;
+ explicit MoveOperand(Register reg)
+ : kind_(Kind::Reg), code_(reg.code()), disp_(0) {}
+ explicit MoveOperand(FloatRegister reg)
+ : kind_(Kind::FloatReg), code_(reg.code()), disp_(0) {}
+ MoveOperand(Register reg, int32_t disp, Kind kind = Kind::Memory)
+ : kind_(kind), code_(reg.code()), disp_(disp) {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+
+ // With a zero offset, this is a plain reg-to-reg move.
+ if (disp == 0 && kind_ == Kind::EffectiveAddress) {
+ kind_ = Kind::Reg;
+ }
+ }
+ explicit MoveOperand(const Address& addr, Kind kind = Kind::Memory)
+ : MoveOperand(AsRegister(addr.base), addr.offset, kind) {}
+ MoveOperand(MacroAssembler& masm, const ABIArg& arg);
+ MoveOperand(const MoveOperand& other) = default;
+ bool isFloatReg() const { return kind_ == Kind::FloatReg; }
+ bool isGeneralReg() const { return kind_ == Kind::Reg; }
+ bool isGeneralRegPair() const {
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ return kind_ == Kind::RegPair;
+#else
+ return false;
+#endif
+ }
+ bool isMemory() const { return kind_ == Kind::Memory; }
+ bool isEffectiveAddress() const { return kind_ == Kind::EffectiveAddress; }
+ bool isMemoryOrEffectiveAddress() const {
+ return isMemory() || isEffectiveAddress();
+ }
+ Register reg() const {
+ MOZ_ASSERT(isGeneralReg());
+ return Register::FromCode(code_);
+ }
+ Register evenReg() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(code_);
+ }
+ Register oddReg() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(code_ + 1);
+ }
+ FloatRegister floatReg() const {
+ MOZ_ASSERT(isFloatReg());
+ return FloatRegister::FromCode(code_);
+ }
+ Register base() const {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+ return Register::FromCode(code_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(isMemoryOrEffectiveAddress());
+ return disp_;
+ }
+
+ bool aliases(MoveOperand other) const {
+ // These are not handled presently, but Memory and EffectiveAddress
+ // only appear in controlled circumstances in the trampoline code
+ // which ensures these cases never come up.
+
+ MOZ_ASSERT_IF(isMemoryOrEffectiveAddress() && other.isGeneralReg(),
+ base() != other.reg());
+ MOZ_ASSERT_IF(other.isMemoryOrEffectiveAddress() && isGeneralReg(),
+ other.base() != reg());
+
+ // Check if one of the operand is a registe rpair, in which case, we
+ // have to check any other register, or register pair.
+ if (isGeneralRegPair() || other.isGeneralRegPair()) {
+ if (isGeneralRegPair() && other.isGeneralRegPair()) {
+ // Assume that register pairs are aligned on even registers.
+ MOZ_ASSERT(!evenReg().aliases(other.oddReg()));
+ MOZ_ASSERT(!oddReg().aliases(other.evenReg()));
+ // Pair of registers are composed of consecutive registers, thus
+ // if the first registers are aliased, then the second registers
+ // are aliased too.
+ MOZ_ASSERT(evenReg().aliases(other.evenReg()) ==
+ oddReg().aliases(other.oddReg()));
+ return evenReg().aliases(other.evenReg());
+ } else if (other.isGeneralReg()) {
+ MOZ_ASSERT(isGeneralRegPair());
+ return evenReg().aliases(other.reg()) || oddReg().aliases(other.reg());
+ } else if (isGeneralReg()) {
+ MOZ_ASSERT(other.isGeneralRegPair());
+ return other.evenReg().aliases(reg()) || other.oddReg().aliases(reg());
+ }
+ return false;
+ }
+
+ if (kind_ != other.kind_) {
+ return false;
+ }
+ if (kind_ == Kind::FloatReg) {
+ return floatReg().aliases(other.floatReg());
+ }
+ if (code_ != other.code_) {
+ return false;
+ }
+ if (isMemoryOrEffectiveAddress()) {
+ return disp_ == other.disp_;
+ }
+ return true;
+ }
+
+ bool operator==(const MoveOperand& other) const {
+ if (kind_ != other.kind_) {
+ return false;
+ }
+ if (code_ != other.code_) {
+ return false;
+ }
+ if (isMemoryOrEffectiveAddress()) {
+ return disp_ == other.disp_;
+ }
+ return true;
+ }
+ bool operator!=(const MoveOperand& other) const { return !operator==(other); }
+};
+
+// This represents a move operation.
+class MoveOp {
+ protected:
+ MoveOperand from_;
+ MoveOperand to_;
+ int32_t cycleBeginSlot_ = -1;
+ int32_t cycleEndSlot_ = -1;
+ bool cycleBegin_ = false;
+ bool cycleEnd_ = false;
+
+ public:
+ enum Type : uint8_t { GENERAL, INT32, FLOAT32, DOUBLE, SIMD128 };
+
+ protected:
+ Type type_;
+
+ // If cycleBegin_ is true, endCycleType_ is the type of the move at the end
+ // of the cycle. For example, given these moves:
+ // INT32 move a -> b
+ // GENERAL move b -> a
+ // the move resolver starts by copying b into a temporary location, so that
+ // the last move can read it. This copy needs to use use type GENERAL.
+ Type endCycleType_;
+
+ public:
+ MoveOp() = delete;
+ MoveOp(const MoveOperand& from, const MoveOperand& to, Type type)
+ : from_(from),
+ to_(to),
+ type_(type),
+ endCycleType_(GENERAL) // initialize to silence UBSan warning
+ {}
+
+ bool isCycleBegin() const { return cycleBegin_; }
+ bool isCycleEnd() const { return cycleEnd_; }
+ uint32_t cycleBeginSlot() const {
+ MOZ_ASSERT(cycleBeginSlot_ != -1);
+ return cycleBeginSlot_;
+ }
+ uint32_t cycleEndSlot() const {
+ MOZ_ASSERT(cycleEndSlot_ != -1);
+ return cycleEndSlot_;
+ }
+ const MoveOperand& from() const { return from_; }
+ const MoveOperand& to() const { return to_; }
+ Type type() const { return type_; }
+ Type endCycleType() const {
+ MOZ_ASSERT(isCycleBegin());
+ return endCycleType_;
+ }
+ bool aliases(const MoveOperand& op) const {
+ return from().aliases(op) || to().aliases(op);
+ }
+ bool aliases(const MoveOp& other) const {
+ return aliases(other.from()) || aliases(other.to());
+ }
+#ifdef JS_CODEGEN_ARM
+ void overwrite(MoveOperand& from, MoveOperand& to, Type type) {
+ from_ = from;
+ to_ = to;
+ type_ = type;
+ }
+#endif
+};
+
+class MoveResolver {
+ private:
+ struct PendingMove : public MoveOp,
+ public TempObject,
+ public InlineListNode<PendingMove> {
+ PendingMove() = delete;
+
+ PendingMove(const MoveOperand& from, const MoveOperand& to, Type type)
+ : MoveOp(from, to, type) {}
+
+ void setCycleBegin(Type endCycleType, int cycleSlot) {
+ MOZ_ASSERT(!cycleBegin_);
+ cycleBegin_ = true;
+ cycleBeginSlot_ = cycleSlot;
+ endCycleType_ = endCycleType;
+ }
+ void setCycleEnd(int cycleSlot) {
+ MOZ_ASSERT(!cycleEnd_);
+ cycleEnd_ = true;
+ cycleEndSlot_ = cycleSlot;
+ }
+ };
+
+ using PendingMoveIterator = InlineList<MoveResolver::PendingMove>::iterator;
+
+ js::Vector<MoveOp, 16, SystemAllocPolicy> orderedMoves_;
+ int numCycles_;
+ int curCycles_;
+ TempObjectPool<PendingMove> movePool_;
+
+ InlineList<PendingMove> pending_;
+
+ PendingMove* findBlockingMove(const PendingMove* last);
+ PendingMove* findCycledMove(PendingMoveIterator* stack,
+ PendingMoveIterator end,
+ const PendingMove* first);
+ [[nodiscard]] bool addOrderedMove(const MoveOp& move);
+ void reorderMove(size_t from, size_t to);
+
+ // Internal reset function. Does not clear lists.
+ void resetState();
+
+#ifdef JS_CODEGEN_ARM
+ bool isDoubleAliasedAsSingle(const MoveOperand& move);
+#endif
+
+ public:
+ MoveResolver();
+
+ // Resolves a move group into two lists of ordered moves. These moves must
+ // be executed in the order provided. Some moves may indicate that they
+ // participate in a cycle. For every cycle there are two such moves, and it
+ // is guaranteed that cycles do not nest inside each other in the list.
+ //
+ // After calling addMove() for each parallel move, resolve() performs the
+ // cycle resolution algorithm. Calling addMove() again resets the resolver.
+ [[nodiscard]] bool addMove(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type);
+ [[nodiscard]] bool resolve();
+ void sortMemoryToMemoryMoves();
+
+ size_t numMoves() const { return orderedMoves_.length(); }
+ const MoveOp& getMove(size_t i) const { return orderedMoves_[i]; }
+ uint32_t numCycles() const { return numCycles_; }
+ bool hasNoPendingMoves() const { return pending_.empty(); }
+ void setAllocator(TempAllocator& alloc) { movePool_.setAllocator(alloc); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MoveResolver_h */
diff --git a/js/src/jit/PcScriptCache.h b/js/src/jit/PcScriptCache.h
new file mode 100644
index 0000000000..c83c479c85
--- /dev/null
+++ b/js/src/jit/PcScriptCache.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_PcScriptCache_h
+#define jit_PcScriptCache_h
+
+#include "mozilla/Array.h"
+#include "js/TypeDecls.h"
+#include "vm/Runtime.h"
+
+// Defines a fixed-size hash table solely for the purpose of caching
+// jit::GetPcScript(). One cache is attached to each JSRuntime; it functions as
+// if cleared on GC.
+
+namespace js {
+namespace jit {
+
+struct PcScriptCacheEntry {
+ uint8_t* returnAddress; // Key into the hash table.
+ jsbytecode* pc; // Cached PC.
+ JSScript* script; // Cached script.
+};
+
+struct PcScriptCache {
+ private:
+ static const uint32_t Length = 73;
+
+ // GC number at the time the cache was filled or created.
+ // Storing and checking against this number allows us to not bother
+ // clearing this cache on every GC -- only when actually necessary.
+ uint64_t gcNumber;
+
+ // List of cache entries.
+ mozilla::Array<PcScriptCacheEntry, Length> entries;
+
+ public:
+ explicit PcScriptCache(uint64_t gcNumber) { clear(gcNumber); }
+
+ void clear(uint64_t gcNumber) {
+ for (uint32_t i = 0; i < Length; i++) {
+ entries[i].returnAddress = nullptr;
+ }
+ this->gcNumber = gcNumber;
+ }
+
+ // Get a value from the cache. May perform lazy allocation.
+ [[nodiscard]] bool get(JSRuntime* rt, uint32_t hash, uint8_t* addr,
+ JSScript** scriptRes, jsbytecode** pcRes) {
+ // If a GC occurred, lazily clear the cache now.
+ if (gcNumber != rt->gc.gcNumber()) {
+ clear(rt->gc.gcNumber());
+ return false;
+ }
+
+ if (entries[hash].returnAddress != addr) {
+ return false;
+ }
+
+ *scriptRes = entries[hash].script;
+ if (pcRes) {
+ *pcRes = entries[hash].pc;
+ }
+
+ return true;
+ }
+
+ void add(uint32_t hash, uint8_t* addr, jsbytecode* pc, JSScript* script) {
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT(pc);
+ MOZ_ASSERT(script);
+ entries[hash].returnAddress = addr;
+ entries[hash].pc = pc;
+ entries[hash].script = script;
+ }
+
+ static uint32_t Hash(uint8_t* addr) {
+ uint32_t key = (uint32_t)((uintptr_t)addr);
+ return ((key >> 3) * 2654435761u) % Length;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_PcScriptCache_h */
diff --git a/js/src/jit/PerfSpewer.cpp b/js/src/jit/PerfSpewer.cpp
new file mode 100644
index 0000000000..a5262edefd
--- /dev/null
+++ b/js/src/jit/PerfSpewer.cpp
@@ -0,0 +1,1218 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Printf.h"
+
+#if defined(JS_ION_PERF) && defined(XP_UNIX)
+# include <fcntl.h>
+# include <sys/mman.h>
+# include <sys/stat.h>
+# include <unistd.h>
+#endif
+
+#if defined(JS_ION_PERF) && defined(XP_LINUX) && !defined(ANDROID) && \
+ defined(__GLIBC__)
+# include <dlfcn.h>
+# include <sys/syscall.h>
+# include <sys/types.h>
+# include <unistd.h>
+# define gettid() static_cast<pid_t>(syscall(__NR_gettid))
+#endif
+
+#if defined(JS_ION_PERF) && defined(XP_MACOSX)
+# include <pthread.h>
+# include <unistd.h>
+
+pid_t gettid_pthread() {
+ uint64_t tid;
+ if (pthread_threadid_np(nullptr, &tid) != 0) {
+ return 0;
+ }
+ // Truncate the tid to 32 bits. macOS thread IDs are usually small enough.
+ // And even if we do end up truncating, it doesn't matter much for Jitdump
+ // as long as the process ID is correct.
+ return pid_t(tid);
+}
+# define gettid() gettid_pthread()
+
+const char* get_current_dir_name_cwd() {
+ constexpr size_t CWD_MAX = 256;
+ char* buffer = (char*)malloc(CWD_MAX);
+ if (getcwd(buffer, CWD_MAX) == nullptr) {
+ buffer[0] = 0;
+ }
+ return buffer;
+}
+# define get_current_dir_name() get_current_dir_name_cwd()
+#endif
+
+#include "jit/PerfSpewer.h"
+
+#include <atomic>
+
+#include "jit/Jitdump.h"
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+#include "js/JitCodeAPI.h"
+#include "js/Printf.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/MutexIDs.h"
+
+#ifdef XP_WIN
+# include <windef.h>
+# include <codecvt>
+# include <evntprov.h>
+# include <locale>
+# include <string>
+# include <windows.h>
+
+const GUID PROVIDER_JSCRIPT9 = {
+ 0x57277741,
+ 0x3638,
+ 0x4a4b,
+ {0xbd, 0xba, 0x0a, 0xc6, 0xe4, 0x5d, 0xa5, 0x6c}};
+const EVENT_DESCRIPTOR MethodLoad = {0x9, 0x0, 0x0, 0x4, 0xa, 0x1, 0x1};
+
+static REGHANDLE sETWRegistrationHandle = NULL;
+
+static std::atomic<bool> etwCollection = false;
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+enum class PerfModeType { None, Function, Source, IR, IROperands };
+
+static std::atomic<bool> geckoProfiling = false;
+static std::atomic<PerfModeType> PerfMode = PerfModeType::None;
+
+// Mutex to guard access to the profiler vectors and jitdump file if perf
+// profiling is enabled.
+static js::Mutex PerfMutex(mutexid::PerfSpewer);
+
+static PersistentRooted<GCVector<JitCode*, 0, js::SystemAllocPolicy>>
+ jitCodeVector;
+static ProfilerJitCodeVector profilerData;
+
+static bool IsGeckoProfiling() { return geckoProfiling; }
+#ifdef JS_ION_PERF
+static UniqueChars spew_dir;
+static FILE* JitDumpFilePtr = nullptr;
+static void* mmap_address = nullptr;
+static bool IsPerfProfiling() { return JitDumpFilePtr != nullptr; }
+#endif
+
+AutoLockPerfSpewer::AutoLockPerfSpewer() { PerfMutex.lock(); }
+
+AutoLockPerfSpewer::~AutoLockPerfSpewer() { PerfMutex.unlock(); }
+
+#ifdef JS_ION_PERF
+static uint64_t GetMonotonicTimestamp() {
+ struct timespec ts = {};
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+
+// values are from /usr/include/elf.h
+static uint32_t GetMachineEncoding() {
+# if defined(JS_CODEGEN_X86)
+ return 3; // EM_386
+# elif defined(JS_CODEGEN_X64)
+ return 62; // EM_X86_64
+# elif defined(JS_CODEGEN_ARM)
+ return 40; // EM_ARM
+# elif defined(JS_CODEGEN_ARM64)
+ return 183; // EM_AARCH64
+# elif defined(JS_CODEGEN_MIPS32)
+ return 8; // EM_MIPS
+# elif defined(JS_CODEGEN_MIPS64)
+ return 8; // EM_MIPS
+# else
+ return 0; // Unsupported
+# endif
+}
+
+static void WriteToJitDumpFile(const void* addr, uint32_t size,
+ AutoLockPerfSpewer& lock) {
+ MOZ_RELEASE_ASSERT(JitDumpFilePtr);
+ size_t rv = fwrite(addr, 1, size, JitDumpFilePtr);
+ MOZ_RELEASE_ASSERT(rv == size);
+}
+
+static void WriteJitDumpDebugEntry(uint64_t addr, const char* filename,
+ uint32_t lineno, uint32_t colno,
+ AutoLockPerfSpewer& lock) {
+ JitDumpDebugEntry entry = {addr, lineno, colno};
+ WriteToJitDumpFile(&entry, sizeof(entry), lock);
+ WriteToJitDumpFile(filename, strlen(filename) + 1, lock);
+}
+
+static bool FileExists(const char* filename) {
+ // We don't currently dump external resources to disk.
+ if (strncmp(filename, "http", 4) == 0) {
+ return false;
+ }
+
+ struct stat buf = {};
+ return stat(filename, &buf) == 0;
+}
+
+static void writeJitDumpHeader(AutoLockPerfSpewer& lock) {
+ JitDumpHeader header = {};
+ header.magic = 0x4A695444;
+ header.version = 1;
+ header.total_size = sizeof(header);
+ header.elf_mach = GetMachineEncoding();
+ header.pad1 = 0;
+ header.pid = getpid();
+ header.timestamp = GetMonotonicTimestamp();
+ header.flags = 0;
+
+ WriteToJitDumpFile(&header, sizeof(header), lock);
+}
+
+static bool openJitDump() {
+ if (JitDumpFilePtr) {
+ return true;
+ }
+ AutoLockPerfSpewer lock;
+
+ const ssize_t bufferSize = 256;
+ char filenameBuffer[bufferSize];
+
+ // We want to write absolute filenames into the debug info or else we get the
+ // filenames in the perf report
+ if (getenv("PERF_SPEW_DIR")) {
+ char* env_dir = getenv("PERF_SPEW_DIR");
+ if (env_dir[0] == '/') {
+ spew_dir = JS_smprintf("%s", env_dir);
+ } else {
+ const char* dir = get_current_dir_name();
+ spew_dir = JS_smprintf("%s/%s", dir, env_dir);
+ free((void*)dir);
+ }
+ } else {
+ fprintf(stderr, "Please define PERF_SPEW_DIR as an output directory.\n");
+ return false;
+ }
+
+ if (snprintf(filenameBuffer, bufferSize, "%s/jit-%d.dump", spew_dir.get(),
+ getpid()) >= bufferSize) {
+ return false;
+ }
+
+ MOZ_ASSERT(!JitDumpFilePtr);
+
+ int fd = open(filenameBuffer, O_CREAT | O_TRUNC | O_RDWR, 0666);
+ JitDumpFilePtr = fdopen(fd, "w+");
+
+ if (!JitDumpFilePtr) {
+ return false;
+ }
+
+# ifdef XP_LINUX
+ // We need to mmap the jitdump file for perf to find it.
+ long page_size = sysconf(_SC_PAGESIZE);
+ mmap_address =
+ mmap(nullptr, page_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
+ if (mmap_address == MAP_FAILED) {
+ PerfMode = PerfModeType::None;
+ return false;
+ }
+# endif
+
+ writeJitDumpHeader(lock);
+ return true;
+}
+
+static void CheckPerf() {
+ static bool PerfChecked = false;
+
+ if (!PerfChecked) {
+ const char* env = getenv("IONPERF");
+ if (env == nullptr) {
+ PerfMode = PerfModeType::None;
+ fprintf(stderr,
+ "Warning: JIT perf reporting requires IONPERF set to \"func\" "
+ ", \"src\" or \"ir\". ");
+ fprintf(stderr, "Perf mapping will be deactivated.\n");
+ } else if (!strcmp(env, "src")) {
+ PerfMode = PerfModeType::Source;
+ } else if (!strcmp(env, "ir")) {
+ PerfMode = PerfModeType::IR;
+ } else if (!strcmp(env, "ir-ops")) {
+# ifdef JS_JITSPEW
+ PerfMode = PerfModeType::IROperands;
+# else
+ fprintf(stderr,
+ "Warning: IONPERF=ir-ops requires --enable-jitspew to be "
+ "enabled, defaulting to IONPERF=ir\n");
+ PerfMode = PerfModeType::IR;
+# endif
+ } else if (!strcmp(env, "func")) {
+ PerfMode = PerfModeType::Function;
+ } else {
+ fprintf(stderr, "Use IONPERF=func to record at function granularity\n");
+ fprintf(stderr,
+ "Use IONPERF=ir to record and annotate assembly with IR\n");
+ fprintf(stderr,
+ "Use IONPERF=src to record and annotate assembly with source, if "
+ "available locally\n");
+ exit(0);
+ }
+
+ if (PerfMode != PerfModeType::None) {
+ if (openJitDump()) {
+ PerfChecked = true;
+ return;
+ }
+
+ fprintf(stderr, "Failed to open perf map file. Disabling IONPERF.\n");
+ PerfMode = PerfModeType::None;
+ }
+ PerfChecked = true;
+ }
+}
+#endif
+
+#ifdef XP_WIN
+void NTAPI ETWEnableCallback(LPCGUID aSourceId, ULONG aIsEnabled, UCHAR aLevel,
+ ULONGLONG aMatchAnyKeyword,
+ ULONGLONG aMatchAllKeyword,
+ PEVENT_FILTER_DESCRIPTOR aFilterData,
+ PVOID aCallbackContext) {
+ // This is called on a CRT worker thread. This means this might race with
+ // our main thread, but that is okay.
+ etwCollection = aIsEnabled;
+ PerfMode = aIsEnabled ? PerfModeType::Function : PerfModeType::None;
+}
+
+void RegisterETW() {
+ static bool sHasRegisteredETW = false;
+ if (!sHasRegisteredETW) {
+ if (getenv("ETW_ENABLED")) {
+ EventRegister(&PROVIDER_JSCRIPT9, // GUID that identifies the provider
+ ETWEnableCallback, // Callback for enabling collection
+ NULL, // Context not used
+ &sETWRegistrationHandle // Used when calling EventWrite
+ // and EventUnregister
+ );
+ }
+ sHasRegisteredETW = true;
+ }
+}
+#endif
+
+/* static */
+void PerfSpewer::Init() {
+#ifdef JS_ION_PERF
+ CheckPerf();
+#endif
+#ifdef XP_WIN
+ RegisterETW();
+#endif
+}
+
+static void DisablePerfSpewer(AutoLockPerfSpewer& lock) {
+ fprintf(stderr, "Warning: Disabling PerfSpewer.");
+
+ geckoProfiling = false;
+#ifdef XP_WIN
+ etwCollection = false;
+#endif
+ PerfMode = PerfModeType::None;
+#ifdef JS_ION_PERF
+ long page_size = sysconf(_SC_PAGESIZE);
+ munmap(mmap_address, page_size);
+ fclose(JitDumpFilePtr);
+ JitDumpFilePtr = nullptr;
+#endif
+}
+
+void js::jit::ResetPerfSpewer(bool enabled) {
+ AutoLockPerfSpewer lock;
+
+ profilerData.clear();
+ jitCodeVector.clear();
+ geckoProfiling = enabled;
+}
+
+static JS::JitCodeRecord* CreateProfilerEntry(AutoLockPerfSpewer& lock) {
+ if (!IsGeckoProfiling()) {
+ return nullptr;
+ }
+
+ if (!profilerData.growBy(1)) {
+ DisablePerfSpewer(lock);
+ return nullptr;
+ }
+ return &profilerData.back();
+}
+
+static JS::JitCodeIRInfo* CreateProfilerIREntry(JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) {
+ if (!record) {
+ return nullptr;
+ }
+
+ if (!record->irInfo.growBy(1)) {
+ DisablePerfSpewer(lock);
+ return nullptr;
+ }
+ return &record->irInfo.back();
+}
+
+static JS::JitCodeSourceInfo* CreateProfilerSourceEntry(
+ JS::JitCodeRecord* record, AutoLockPerfSpewer& lock) {
+ if (!record) {
+ return nullptr;
+ }
+
+ if (!record->sourceInfo.growBy(1)) {
+ DisablePerfSpewer(lock);
+ return nullptr;
+ }
+ return &record->sourceInfo.back();
+}
+
+JS::JitOpcodeDictionary::JitOpcodeDictionary() {
+ MOZ_ASSERT(JS_IsInitialized());
+
+#define COPY_JSOP_OPCODE(name, ...) \
+ if (!baselineDictionary.append(JS_smprintf(#name))) { \
+ return; \
+ }
+ FOR_EACH_OPCODE(COPY_JSOP_OPCODE)
+#undef COPY_JSOP_OPCODE
+
+#define COPY_LIR_OPCODE(name, ...) \
+ if (!ionDictionary.append(JS_smprintf(#name))) { \
+ return; \
+ }
+ LIR_OPCODE_LIST(COPY_LIR_OPCODE)
+#undef COPY_LIR_OPCODE
+
+#define COPY_CACHEIR_OPCODE(name, ...) \
+ if (!icDictionary.append(JS_smprintf(#name))) { \
+ return; \
+ }
+ CACHE_IR_OPS(COPY_CACHEIR_OPCODE)
+#undef COPY_CACHEIR_OPCODE
+}
+
+// API to access JitCode data for the Gecko Profiler.
+void JS::JitCodeIterator::getDataForIndex(size_t IteratorIndex) {
+ if (IteratorIndex >= profilerData.length()) {
+ data = nullptr;
+ } else {
+ data = &profilerData[IteratorIndex];
+ }
+}
+
+JS::JitCodeIterator::JitCodeIterator() : iteratorIndex(0) {
+ MOZ_ASSERT(JS_IsInitialized());
+ PerfMutex.lock();
+ getDataForIndex(0);
+}
+
+JS::JitCodeIterator::~JitCodeIterator() { PerfMutex.unlock(); }
+
+static bool PerfSrcEnabled() {
+ return PerfMode == PerfModeType::Source || geckoProfiling;
+}
+
+#ifdef JS_JITSPEW
+static bool PerfIROpsEnabled() { return PerfMode == PerfModeType::IROperands; }
+#endif
+
+static bool PerfIREnabled() {
+ return (PerfMode == PerfModeType::IROperands) ||
+ (PerfMode == PerfModeType::IR) || geckoProfiling;
+}
+
+static bool PerfFuncEnabled() {
+ return PerfMode == PerfModeType::Function || geckoProfiling;
+}
+
+bool js::jit::PerfEnabled() {
+ return PerfSrcEnabled() || PerfIREnabled() || PerfFuncEnabled();
+}
+
+void InlineCachePerfSpewer::recordInstruction(MacroAssembler& masm,
+ CacheOp op) {
+ if (!PerfIREnabled()) {
+ return;
+ }
+ AutoLockPerfSpewer lock;
+
+ if (!opcodes_.emplaceBack(masm.currentOffset(), static_cast<unsigned>(op))) {
+ opcodes_.clear();
+ DisablePerfSpewer(lock);
+ }
+}
+
+#define CHECK_RETURN(x) \
+ if (!(x)) { \
+ AutoLockPerfSpewer lock; \
+ DisablePerfSpewer(lock); \
+ return; \
+ }
+
+void IonPerfSpewer::recordInstruction(MacroAssembler& masm, LInstruction* ins) {
+ if (!PerfIREnabled() && !PerfSrcEnabled()) {
+ return;
+ }
+
+ LNode::Opcode op = ins->op();
+ UniqueChars opcodeStr;
+
+ jsbytecode* bytecodepc = nullptr;
+ if (MDefinition* mir = ins->mirRaw()) {
+ bytecodepc = mir->trackedSite()->pc();
+ }
+
+#ifdef JS_JITSPEW
+ if (PerfIROpsEnabled()) {
+ Sprinter buf;
+ CHECK_RETURN(buf.init());
+ CHECK_RETURN(buf.put(LIRCodeName(op)));
+ ins->printOperands(buf);
+ opcodeStr = buf.release();
+ }
+#endif
+ if (!opcodes_.emplaceBack(masm.currentOffset(), static_cast<unsigned>(op),
+ opcodeStr, bytecodepc)) {
+ opcodes_.clear();
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ }
+}
+
+#ifdef JS_JITSPEW
+static void PrintStackValue(StackValue* stackVal, CompilerFrameInfo& frame,
+ Sprinter& buf) {
+ switch (stackVal->kind()) {
+ /****** Constant ******/
+ case StackValue::Constant: {
+ js::Value constantVal = stackVal->constant();
+ if (constantVal.isInt32()) {
+ CHECK_RETURN(buf.jsprintf("%d", constantVal.toInt32()));
+ } else if (constantVal.isObjectOrNull()) {
+ CHECK_RETURN(buf.jsprintf("obj:%p", constantVal.toObjectOrNull()));
+ } else if (constantVal.isString()) {
+ CHECK_RETURN(buf.put("str:"));
+ CHECK_RETURN(buf.putString(constantVal.toString()));
+ } else if (constantVal.isNumber()) {
+ CHECK_RETURN(buf.jsprintf("num:%f", constantVal.toNumber()));
+ } else if (constantVal.isSymbol()) {
+ CHECK_RETURN(buf.put("sym:"));
+ constantVal.toSymbol()->dump(buf);
+ } else {
+ CHECK_RETURN(buf.jsprintf("raw:%" PRIx64, constantVal.asRawBits()));
+ }
+ } break;
+ /****** Register ******/
+ case StackValue::Register: {
+ Register reg = stackVal->reg().payloadOrValueReg();
+ CHECK_RETURN(buf.put(reg.name()));
+ } break;
+ /****** Stack ******/
+ case StackValue::Stack:
+ CHECK_RETURN(buf.put("stack"));
+ break;
+ /****** ThisSlot ******/
+ case StackValue::ThisSlot: {
+# ifdef JS_HAS_HIDDEN_SP
+ CHECK_RETURN(buf.put("this"));
+# else
+ Address addr = frame.addressOfThis();
+ CHECK_RETURN(buf.jsprintf("this:%s(%d)", addr.base.name(), addr.offset));
+# endif
+ } break;
+ /****** LocalSlot ******/
+ case StackValue::LocalSlot:
+ CHECK_RETURN(buf.jsprintf("local:%u", stackVal->localSlot()));
+ break;
+ /****** ArgSlot ******/
+ case StackValue::ArgSlot:
+ CHECK_RETURN(buf.jsprintf("arg:%u", stackVal->argSlot()));
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected kind");
+ break;
+ }
+}
+#endif
+
+void BaselinePerfSpewer::recordInstruction(JSContext* cx, MacroAssembler& masm,
+ jsbytecode* pc,
+ CompilerFrameInfo& frame) {
+ if (!PerfIREnabled()) {
+ return;
+ }
+
+ JSOp op = JSOp(*pc);
+ UniqueChars opcodeStr;
+
+#ifdef JS_JITSPEW
+ if (PerfIROpsEnabled()) {
+ JSScript* script = frame.script;
+ unsigned numOperands = js::StackUses(op, pc);
+
+ Sprinter buf(cx);
+ CHECK_RETURN(buf.init());
+ CHECK_RETURN(buf.put(js::CodeName(op)));
+
+ switch (op) {
+ case JSOp::SetName:
+ case JSOp::SetGName:
+ case JSOp::BindName:
+ case JSOp::BindGName:
+ case JSOp::GetName:
+ case JSOp::GetGName: {
+ // Emit the name used for these ops
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ CHECK_RETURN(buf.put(" "));
+ CHECK_RETURN(buf.putString(name));
+ } break;
+ default:
+ break;
+ }
+
+ // Output should be "JSOp (operand1), (operand2), ..."
+ for (unsigned i = 1; i <= numOperands; i++) {
+ CHECK_RETURN(buf.put(" ("));
+ StackValue* stackVal = frame.peek(-int(i));
+ PrintStackValue(stackVal, frame, buf);
+
+ if (i < numOperands) {
+ CHECK_RETURN(buf.put("),"));
+ } else {
+ CHECK_RETURN(buf.put(")"));
+ }
+ }
+ opcodeStr = buf.release();
+ }
+#endif
+
+ if (!opcodes_.emplaceBack(masm.currentOffset(), static_cast<unsigned>(op),
+ opcodeStr)) {
+ opcodes_.clear();
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ }
+}
+
+const char* BaselinePerfSpewer::CodeName(unsigned op) {
+ return js::CodeName(static_cast<JSOp>(op));
+}
+
+const char* BaselineInterpreterPerfSpewer::CodeName(unsigned op) {
+ return js::CodeName(static_cast<JSOp>(op));
+}
+
+const char* IonPerfSpewer::CodeName(unsigned op) {
+ return js::jit::LIRCodeName(static_cast<LNode::Opcode>(op));
+}
+const char* InlineCachePerfSpewer::CodeName(unsigned op) {
+ return js::jit::CacheIRCodeName(static_cast<CacheOp>(op));
+}
+
+void PerfSpewer::CollectJitCodeInfo(UniqueChars& function_name, JitCode* code,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+ // Hold the JitCode objects here so they are not GC'd during profiling.
+ if (IsGeckoProfiling()) {
+ if (!jitCodeVector.append(code)) {
+ DisablePerfSpewer(lock);
+ }
+ }
+
+ CollectJitCodeInfo(function_name, reinterpret_cast<void*>(code->raw()),
+ code->instructionsSize(), profilerRecord, lock);
+}
+
+void PerfSpewer::CollectJitCodeInfo(UniqueChars& function_name, void* code_addr,
+ uint64_t code_size,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+#ifdef JS_ION_PERF
+ static uint64_t codeIndex = 1;
+
+ if (IsPerfProfiling()) {
+ JitDumpLoadRecord record = {};
+
+ record.header.id = JIT_CODE_LOAD;
+ record.header.total_size =
+ sizeof(record) + strlen(function_name.get()) + 1 + code_size;
+ record.header.timestamp = GetMonotonicTimestamp();
+ record.pid = getpid();
+ record.tid = gettid();
+ record.vma = uint64_t(code_addr);
+ record.code_addr = uint64_t(code_addr);
+ record.code_size = code_size;
+ record.code_index = codeIndex++;
+
+ WriteToJitDumpFile(&record, sizeof(record), lock);
+ WriteToJitDumpFile(function_name.get(), strlen(function_name.get()) + 1,
+ lock);
+ WriteToJitDumpFile(code_addr, code_size, lock);
+ }
+#endif
+#ifdef XP_WIN
+ if (etwCollection) {
+ void* scriptContextId = NULL;
+ uint32_t flags = 0;
+ uint64_t map = 0;
+ uint64_t assembly = 0;
+ uint32_t line_col = 0;
+ uint32_t method = 0;
+
+ int name_len = strlen(function_name.get());
+ std::wstring name(name_len + 1, '\0');
+ if (MultiByteToWideChar(CP_UTF8, 0, function_name.get(), name_len,
+ name.data(), name.size()) == 0) {
+ DisablePerfSpewer(lock);
+ return;
+ }
+
+ EVENT_DATA_DESCRIPTOR EventData[10];
+
+ EventDataDescCreate(&EventData[0], &scriptContextId, sizeof(PVOID));
+ EventDataDescCreate(&EventData[1], &code_addr, sizeof(PVOID));
+ EventDataDescCreate(&EventData[2], &code_size, sizeof(unsigned __int64));
+ EventDataDescCreate(&EventData[3], &method, sizeof(uint32_t));
+ EventDataDescCreate(&EventData[4], &flags, sizeof(const unsigned short));
+ EventDataDescCreate(&EventData[5], &map, sizeof(const unsigned short));
+ EventDataDescCreate(&EventData[6], &assembly, sizeof(unsigned __int64));
+ EventDataDescCreate(&EventData[7], &line_col, sizeof(const unsigned int));
+ EventDataDescCreate(&EventData[8], &line_col, sizeof(const unsigned int));
+ EventDataDescCreate(&EventData[9], name.c_str(),
+ sizeof(wchar_t) * (name.length() + 1));
+
+ ULONG result = EventWrite(
+ sETWRegistrationHandle, // From EventRegister
+ &MethodLoad, // EVENT_DESCRIPTOR generated from the manifest
+ (ULONG)10, // Size of the array of EVENT_DATA_DESCRIPTORs
+ EventData // Array of descriptors that contain the event data
+ );
+
+ if (result != ERROR_SUCCESS) {
+ DisablePerfSpewer(lock);
+ return;
+ }
+ }
+#endif
+
+ if (IsGeckoProfiling()) {
+ profilerRecord->instructionSize = code_size;
+ profilerRecord->code_addr = uint64_t(code_addr);
+ profilerRecord->functionName = std::move(function_name);
+ }
+}
+
+void PerfSpewer::recordOffset(MacroAssembler& masm, const char* msg) {
+ if (!PerfIREnabled()) {
+ return;
+ }
+
+ UniqueChars offsetStr = DuplicateString(msg);
+ if (!opcodes_.emplaceBack(masm.currentOffset(), offsetStr)) {
+ opcodes_.clear();
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ }
+}
+
+void PerfSpewer::saveJitCodeIRInfo(JitCode* code,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+#ifdef JS_ION_PERF
+ static uint32_t filenameCounter = 0;
+ UniqueChars scriptFilename;
+ FILE* scriptFile = nullptr;
+
+ if (IsPerfProfiling()) {
+ scriptFilename = JS_smprintf("%s/jitdump-script-%u.%u.txt", spew_dir.get(),
+ filenameCounter++, getpid());
+ scriptFile = fopen(scriptFilename.get(), "w");
+ if (!scriptFile) {
+ DisablePerfSpewer(lock);
+ return;
+ }
+
+ JitDumpDebugRecord debug_record = {};
+ uint64_t n_records = opcodes_.length();
+
+ debug_record.header.id = JIT_CODE_DEBUG_INFO;
+ debug_record.header.total_size =
+ sizeof(debug_record) + n_records * (sizeof(JitDumpDebugEntry) +
+ strlen(scriptFilename.get()) + 1);
+ debug_record.header.timestamp = GetMonotonicTimestamp();
+ debug_record.code_addr = uint64_t(code->raw());
+ debug_record.nr_entry = n_records;
+
+ WriteToJitDumpFile(&debug_record, sizeof(debug_record), lock);
+ }
+#endif
+
+ if (profilerRecord) {
+ profilerRecord->tier = GetTier();
+ }
+
+ for (size_t i = 0; i < opcodes_.length(); i++) {
+ OpcodeEntry& entry = opcodes_[i];
+#ifdef JS_ION_PERF
+ if (IsPerfProfiling()) {
+ // If a string was recorded for this offset, use that instead.
+ if (entry.str) {
+ fprintf(scriptFile, "%s\n", entry.str.get());
+ } else {
+ fprintf(scriptFile, "%s\n", CodeName(entry.opcode));
+ }
+ uint64_t addr = uint64_t(code->raw()) + entry.offset;
+ uint64_t lineno = i + 1;
+ WriteJitDumpDebugEntry(addr, scriptFilename.get(), lineno, 0, lock);
+ }
+#endif
+
+ if (JS::JitCodeIRInfo* irInfo =
+ CreateProfilerIREntry(profilerRecord, lock)) {
+ irInfo->offset = entry.offset;
+ irInfo->opcode = entry.opcode;
+ // Profiler API now owns this string, if defined.
+ irInfo->str = std::move(entry.str);
+ }
+ }
+ opcodes_.clear();
+
+#ifdef JS_ION_PERF
+ if (IsPerfProfiling()) {
+ fclose(scriptFile);
+ }
+#endif
+}
+
+void BaselinePerfSpewer::saveJitCodeSourceInfo(
+ JSScript* script, JitCode* code, JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+ const char* filename = script->filename();
+ if (!filename) {
+ return;
+ }
+
+#ifdef JS_ION_PERF
+ bool perfProfiling = IsPerfProfiling() && FileExists(filename);
+
+ // If we are using perf, we need to know the number of debug entries ahead of
+ // time for the header.
+ if (perfProfiling) {
+ JitDumpDebugRecord debug_record = {};
+ uint64_t n_records = 0;
+
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* const sn = *iter;
+ switch (sn->type()) {
+ case SrcNoteType::SetLine:
+ case SrcNoteType::NewLine:
+ case SrcNoteType::ColSpan:
+ if (sn->delta() > 0) {
+ n_records++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Nothing to do
+ if (n_records == 0) {
+ return;
+ }
+
+ debug_record.header.id = JIT_CODE_DEBUG_INFO;
+ debug_record.header.total_size =
+ sizeof(debug_record) +
+ n_records * (sizeof(JitDumpDebugEntry) + strlen(filename) + 1);
+
+ debug_record.header.timestamp = GetMonotonicTimestamp();
+ debug_record.code_addr = uint64_t(code->raw());
+ debug_record.nr_entry = n_records;
+
+ WriteToJitDumpFile(&debug_record, sizeof(debug_record), lock);
+ }
+#endif
+
+ uint32_t lineno = script->lineno();
+ uint32_t colno = script->column();
+ uint64_t offset = 0;
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ offset += sn->delta();
+
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, script->lineno());
+ colno = 0;
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ colno = 0;
+ } else if (type == SrcNoteType::ColSpan) {
+ colno += SrcNote::ColSpan::getSpan(sn);
+ } else {
+ continue;
+ }
+
+ // Don't add entries that won't change the offset
+ if (sn->delta() <= 0) {
+ continue;
+ }
+
+ if (JS::JitCodeSourceInfo* srcInfo =
+ CreateProfilerSourceEntry(profilerRecord, lock)) {
+ srcInfo->offset = offset;
+ srcInfo->lineno = lineno;
+ srcInfo->colno = colno;
+ srcInfo->filename = JS_smprintf("%s", filename);
+ }
+
+#ifdef JS_ION_PERF
+ if (perfProfiling) {
+ WriteJitDumpDebugEntry(uint64_t(code->raw()) + offset, filename, lineno,
+ colno, lock);
+ }
+#endif
+ }
+}
+
+void IonPerfSpewer::saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+ const char* filename = script->filename();
+ if (!filename) {
+ return;
+ }
+
+#ifdef JS_ION_PERF
+ bool perfProfiling = IsPerfProfiling() && FileExists(filename);
+
+ if (perfProfiling) {
+ JitDumpDebugRecord debug_record = {};
+
+ uint64_t n_records = 0;
+ for (OpcodeEntry& entry : opcodes_) {
+ if (entry.bytecodepc) {
+ n_records++;
+ }
+ }
+
+ debug_record.header.id = JIT_CODE_DEBUG_INFO;
+ debug_record.header.total_size =
+ sizeof(debug_record) +
+ n_records * (sizeof(JitDumpDebugEntry) + strlen(filename) + 1);
+ debug_record.header.timestamp = GetMonotonicTimestamp();
+ debug_record.code_addr = uint64_t(code->raw());
+ debug_record.nr_entry = n_records;
+
+ WriteToJitDumpFile(&debug_record, sizeof(debug_record), lock);
+ }
+#endif
+ uint32_t lineno = 0;
+ uint32_t colno = 0;
+
+ for (OpcodeEntry& entry : opcodes_) {
+ jsbytecode* pc = entry.bytecodepc;
+ if (!pc) {
+ continue;
+ }
+ // We could probably make this a bit faster by caching the previous pc
+ // offset, but it currently doesn't seem noticeable when testing.
+ lineno = PCToLineNumber(script, pc, &colno);
+
+ if (JS::JitCodeSourceInfo* srcInfo =
+ CreateProfilerSourceEntry(profilerRecord, lock)) {
+ srcInfo->offset = entry.offset;
+ srcInfo->lineno = lineno;
+ srcInfo->colno = colno;
+ srcInfo->filename = JS_smprintf("%s", filename);
+ }
+
+#ifdef JS_ION_PERF
+ if (perfProfiling) {
+ WriteJitDumpDebugEntry(uint64_t(code->raw()) + entry.offset, filename,
+ lineno, colno, lock);
+ }
+#endif
+ }
+}
+
+static UniqueChars GetFunctionDesc(const char* tierName, JSContext* cx,
+ JSScript* script,
+ const char* stubName = nullptr) {
+ MOZ_ASSERT(script && tierName && cx);
+ UniqueChars funName;
+ if (script->function() && script->function()->displayAtom()) {
+ funName = AtomToPrintableString(cx, script->function()->displayAtom());
+ }
+
+ if (stubName) {
+ return JS_smprintf("%s: %s : %s (%s:%u:%u)", tierName, stubName,
+ funName ? funName.get() : "*", script->filename(),
+ script->lineno(), script->column());
+ }
+ return JS_smprintf("%s: %s (%s:%u:%u)", tierName,
+ funName ? funName.get() : "*", script->filename(),
+ script->lineno(), script->column());
+}
+
+void PerfSpewer::saveDebugInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock) {
+ MOZ_ASSERT(code);
+ if (PerfIREnabled()) {
+ saveJitCodeIRInfo(code, profilerRecord, lock);
+ } else if (PerfSrcEnabled() && script) {
+ saveJitCodeSourceInfo(script, code, profilerRecord, lock);
+ }
+}
+
+void PerfSpewer::saveProfile(JitCode* code, UniqueChars& desc,
+ JSScript* script) {
+ MOZ_ASSERT(PerfEnabled());
+ MOZ_ASSERT(code && desc);
+ AutoLockPerfSpewer lock;
+ JS::JitCodeRecord* profilerRecord = CreateProfilerEntry(lock);
+
+ saveDebugInfo(script, code, profilerRecord, lock);
+ CollectJitCodeInfo(desc, code, profilerRecord, lock);
+}
+
+void IonICPerfSpewer::saveProfile(JSContext* cx, JSScript* script,
+ JitCode* code, const char* stubName) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = GetFunctionDesc("IonIC", cx, script, stubName);
+ PerfSpewer::saveProfile(code, desc, nullptr);
+}
+
+void BaselineICPerfSpewer::saveProfile(JitCode* code, const char* stubName) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = JS_smprintf("BaselineIC: %s", stubName);
+ PerfSpewer::saveProfile(code, desc, nullptr);
+}
+
+void BaselinePerfSpewer::saveProfile(JSContext* cx, JSScript* script,
+ JitCode* code) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = GetFunctionDesc("Baseline", cx, script);
+ PerfSpewer::saveProfile(code, desc, script);
+}
+
+void BaselineInterpreterPerfSpewer::saveProfile(JitCode* code) {
+ if (!PerfEnabled()) {
+ return;
+ }
+
+ enum class SpewKind { Uninitialized, SingleSym, MultiSym };
+
+ // Check which type of Baseline Interpreter Spew is requested.
+ static SpewKind kind = SpewKind::Uninitialized;
+ if (kind == SpewKind::Uninitialized) {
+ if (getenv("IONPERF_SINGLE_BLINTERP")) {
+ kind = SpewKind::SingleSym;
+ } else {
+ kind = SpewKind::MultiSym;
+ }
+ }
+
+ // For SingleSym, just emit one "BaselineInterpreter" symbol
+ // and emit the opcodes as IR if IONPERF=ir is used.
+ if (kind == SpewKind::SingleSym) {
+ UniqueChars desc = DuplicateString("BaselineInterpreter");
+ PerfSpewer::saveProfile(code, desc, nullptr);
+ return;
+ }
+
+ // For MultiSym, split up each opcode into its own symbol.
+ // No IR is emitted in this case, so we can skip PerfSpewer::saveProfile.
+ MOZ_ASSERT(kind == SpewKind::MultiSym);
+ for (size_t i = 1; i < opcodes_.length(); i++) {
+ uintptr_t base = uintptr_t(code->raw()) + opcodes_[i - 1].offset;
+ uintptr_t size = opcodes_[i].offset - opcodes_[i - 1].offset;
+
+ UniqueChars rangeName;
+ if (opcodes_[i - 1].str) {
+ rangeName = JS_smprintf("BlinterpOp: %s", opcodes_[i - 1].str.get());
+ } else {
+ rangeName =
+ JS_smprintf("BlinterpOp: %s", CodeName(opcodes_[i - 1].opcode));
+ }
+
+ // If rangeName is empty, we probably went OOM.
+ if (!rangeName) {
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ return;
+ }
+
+ MOZ_ASSERT(base + size <=
+ uintptr_t(code->raw()) + code->instructionsSize());
+ CollectPerfSpewerJitCodeProfile(base, size, rangeName.get());
+ }
+}
+
+void BaselineInterpreterPerfSpewer::recordOffset(MacroAssembler& masm,
+ JSOp op) {
+ if (!PerfEnabled()) {
+ return;
+ }
+
+ if (!opcodes_.emplaceBack(masm.currentOffset(), unsigned(op))) {
+ opcodes_.clear();
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ }
+}
+
+void BaselineInterpreterPerfSpewer::recordOffset(MacroAssembler& masm,
+ const char* name) {
+ if (!PerfEnabled()) {
+ return;
+ }
+
+ UniqueChars desc = DuplicateString(name);
+ if (!opcodes_.emplaceBack(masm.currentOffset(), desc)) {
+ opcodes_.clear();
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ }
+}
+
+void IonPerfSpewer::saveProfile(JSContext* cx, JSScript* script,
+ JitCode* code) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = GetFunctionDesc("Ion", cx, script);
+ PerfSpewer::saveProfile(code, desc, script);
+}
+
+void js::jit::CollectPerfSpewerJitCodeProfile(JitCode* code, const char* msg) {
+ if (!code || !PerfEnabled()) {
+ return;
+ }
+
+ size_t size = code->instructionsSize();
+ if (size > 0) {
+ AutoLockPerfSpewer lock;
+
+ JS::JitCodeRecord* profilerRecord = CreateProfilerEntry(lock);
+ UniqueChars desc = JS_smprintf("%s", msg);
+ PerfSpewer::CollectJitCodeInfo(desc, code, profilerRecord, lock);
+ }
+}
+
+void js::jit::CollectPerfSpewerJitCodeProfile(uintptr_t base, uint64_t size,
+ const char* msg) {
+ if (!PerfEnabled()) {
+ return;
+ }
+
+ if (size > 0) {
+ AutoLockPerfSpewer lock;
+
+ JS::JitCodeRecord* profilerRecord = CreateProfilerEntry(lock);
+ UniqueChars desc = JS_smprintf("%s", msg);
+ PerfSpewer::CollectJitCodeInfo(desc, reinterpret_cast<void*>(base), size,
+ profilerRecord, lock);
+ }
+}
+
+void js::jit::CollectPerfSpewerWasmMap(uintptr_t base, uintptr_t size,
+ const char* filename,
+ const char* annotation) {
+ if (size == 0U || !PerfEnabled()) {
+ return;
+ }
+ AutoLockPerfSpewer lock;
+
+ JS::JitCodeRecord* profilerRecord = CreateProfilerEntry(lock);
+ UniqueChars desc = JS_smprintf("%s: Function %s", filename, annotation);
+ PerfSpewer::CollectJitCodeInfo(desc, reinterpret_cast<void*>(base),
+ uint64_t(size), profilerRecord, lock);
+}
+
+void js::jit::CollectPerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size,
+ const char* filename,
+ unsigned lineno,
+ const char* funcName) {
+ if (size == 0U || !PerfEnabled()) {
+ return;
+ }
+ AutoLockPerfSpewer lock;
+
+ JS::JitCodeRecord* profilerRecord = CreateProfilerEntry(lock);
+ UniqueChars desc =
+ JS_smprintf("%s:%u: Function %s", filename, lineno, funcName);
+ PerfSpewer::CollectJitCodeInfo(desc, reinterpret_cast<void*>(base),
+ uint64_t(size), profilerRecord, lock);
+}
+
+void js::jit::PerfSpewerRangeRecorder::appendEntry(UniqueChars& desc) {
+ if (!ranges.append(std::make_pair(masm.currentOffset(), std::move(desc)))) {
+ AutoLockPerfSpewer lock;
+ DisablePerfSpewer(lock);
+ ranges.clear();
+ }
+}
+
+void js::jit::PerfSpewerRangeRecorder::recordOffset(const char* name) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = DuplicateString(name);
+ appendEntry(desc);
+}
+
+void js::jit::PerfSpewerRangeRecorder::recordOffset(const char* name,
+ JSContext* cx,
+ JSScript* script) {
+ if (!PerfEnabled()) {
+ return;
+ }
+ UniqueChars desc = GetFunctionDesc(name, cx, script);
+ appendEntry(desc);
+}
+
+void js::jit::PerfSpewerRangeRecorder::collectRangesForJitCode(JitCode* code) {
+ if (!PerfEnabled() || ranges.empty()) {
+ return;
+ }
+
+ uintptr_t basePtr = uintptr_t(code->raw());
+ uintptr_t offsetStart = 0;
+
+ for (OffsetPair& pair : ranges) {
+ uint32_t offsetEnd = std::get<0>(pair);
+ uintptr_t rangeSize = uintptr_t(offsetEnd - offsetStart);
+ const char* rangeName = std::get<1>(pair).get();
+
+ CollectPerfSpewerJitCodeProfile(basePtr + offsetStart, rangeSize,
+ rangeName);
+ offsetStart = offsetEnd;
+ }
+
+ MOZ_ASSERT(offsetStart <= code->instructionsSize());
+ ranges.clear();
+}
diff --git a/js/src/jit/PerfSpewer.h b/js/src/jit/PerfSpewer.h
new file mode 100644
index 0000000000..659194526d
--- /dev/null
+++ b/js/src/jit/PerfSpewer.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_PerfSpewer_h
+#define jit_PerfSpewer_h
+
+#ifdef JS_ION_PERF
+# include <stdio.h>
+#endif
+#include "jit/BaselineFrameInfo.h"
+#include "jit/CacheIR.h"
+#include "jit/JitCode.h"
+#include "jit/LIR.h"
+#include "js/AllocPolicy.h"
+#include "js/JitCodeAPI.h"
+#include "js/Vector.h"
+#include "vm/JSScript.h"
+
+namespace js::jit {
+
+using ProfilerJitCodeVector = Vector<JS::JitCodeRecord, 0, SystemAllocPolicy>;
+
+void ResetPerfSpewer(bool enabled);
+
+struct AutoLockPerfSpewer {
+ AutoLockPerfSpewer();
+ ~AutoLockPerfSpewer();
+};
+
+class MBasicBlock;
+class MacroAssembler;
+
+bool PerfEnabled();
+
+class PerfSpewer {
+ protected:
+ struct OpcodeEntry {
+ uint32_t offset = 0;
+ unsigned opcode = 0;
+ jsbytecode* bytecodepc = nullptr;
+
+ // This string is used to replace the opcode, to define things like
+ // Prologue/Epilogue, or to add operand info.
+ UniqueChars str;
+
+ OpcodeEntry(uint32_t offset_, unsigned opcode_, UniqueChars& str_,
+ jsbytecode* pc)
+ : offset(offset_), opcode(opcode_), bytecodepc(pc) {
+ str = std::move(str_);
+ }
+
+ OpcodeEntry(uint32_t offset_, unsigned opcode_, UniqueChars& str_)
+ : offset(offset_), opcode(opcode_) {
+ str = std::move(str_);
+ }
+ OpcodeEntry(uint32_t offset_, UniqueChars& str_) : offset(offset_) {
+ str = std::move(str_);
+ }
+ OpcodeEntry(uint32_t offset_, unsigned opcode_)
+ : offset(offset_), opcode(opcode_) {}
+
+ OpcodeEntry(OpcodeEntry&& copy) {
+ offset = copy.offset;
+ opcode = copy.opcode;
+ bytecodepc = copy.bytecodepc;
+ str = std::move(copy.str);
+ }
+
+ // Do not copy the UniqueChars member.
+ OpcodeEntry(OpcodeEntry& copy) = delete;
+ };
+ Vector<OpcodeEntry, 0, SystemAllocPolicy> opcodes_;
+
+ uint32_t lir_opcode_length = 0;
+ uint32_t js_opcode_length = 0;
+
+ virtual JS::JitTier GetTier() { return JS::JitTier::Other; }
+
+ virtual const char* CodeName(unsigned op) = 0;
+
+ virtual void saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) = 0;
+
+ void saveDebugInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock);
+
+ void saveProfile(JitCode* code, UniqueChars& desc, JSScript* script);
+
+ void saveJitCodeIRInfo(JitCode* code, JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock);
+
+ public:
+ PerfSpewer() = default;
+
+ void recordOffset(MacroAssembler& masm, const char*);
+
+ static void Init();
+
+ static void CollectJitCodeInfo(UniqueChars& function_name, JitCode* code,
+ JS::JitCodeRecord*, AutoLockPerfSpewer& lock);
+ static void CollectJitCodeInfo(UniqueChars& function_name, void* code_addr,
+ uint64_t code_size,
+ JS::JitCodeRecord* profilerRecord,
+ AutoLockPerfSpewer& lock);
+};
+
+void CollectPerfSpewerJitCodeProfile(JitCode* code, const char* msg);
+void CollectPerfSpewerJitCodeProfile(uintptr_t base, uint64_t size,
+ const char* msg);
+
+void CollectPerfSpewerWasmMap(uintptr_t base, uintptr_t size,
+ const char* filename, const char* annotation);
+void CollectPerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size,
+ const char* filename, unsigned lineno,
+ const char* funcName);
+
+class IonPerfSpewer : public PerfSpewer {
+ JS::JitTier GetTier() override { return JS::JitTier::Ion; }
+ const char* CodeName(unsigned op) override;
+
+ void saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) override;
+
+ public:
+ void recordInstruction(MacroAssembler& masm, LInstruction* ins);
+ void saveProfile(JSContext* cx, JSScript* script, JitCode* code);
+};
+
+class BaselineInterpreterPerfSpewer : public PerfSpewer {
+ JS::JitTier GetTier() override { return JS::JitTier::Baseline; }
+ const char* CodeName(unsigned op) override;
+
+ // Do nothing, BaselineInterpreter has no source to reference.
+ void saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) override {}
+
+ public:
+ void recordOffset(MacroAssembler& masm, JSOp op);
+ void recordOffset(MacroAssembler& masm, const char* name);
+ void saveProfile(JitCode* code);
+};
+
+class BaselinePerfSpewer : public PerfSpewer {
+ JS::JitTier GetTier() override { return JS::JitTier::Baseline; }
+ const char* CodeName(unsigned op) override;
+
+ void saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) override;
+
+ public:
+ void recordInstruction(JSContext* cx, MacroAssembler& masm, jsbytecode* pc,
+ CompilerFrameInfo& frame);
+ void saveProfile(JSContext* cx, JSScript* script, JitCode* code);
+};
+
+class InlineCachePerfSpewer : public PerfSpewer {
+ JS::JitTier GetTier() override { return JS::JitTier::IC; }
+ const char* CodeName(unsigned op) override;
+
+ void saveJitCodeSourceInfo(JSScript* script, JitCode* code,
+ JS::JitCodeRecord* record,
+ AutoLockPerfSpewer& lock) override {
+ // IC stubs have no source code to reference.
+ return;
+ }
+
+ public:
+ void recordInstruction(MacroAssembler& masm, CacheOp op);
+};
+
+class BaselineICPerfSpewer : public InlineCachePerfSpewer {
+ public:
+ void saveProfile(JitCode* code, const char* stubName);
+};
+
+class IonICPerfSpewer : public InlineCachePerfSpewer {
+ public:
+ void saveProfile(JSContext* cx, JSScript* script, JitCode* code,
+ const char* stubName);
+};
+
+class PerfSpewerRangeRecorder {
+ using OffsetPair = std::tuple<uint32_t, UniqueChars>;
+ Vector<OffsetPair, 0, js::SystemAllocPolicy> ranges;
+
+ MacroAssembler& masm;
+
+ void appendEntry(UniqueChars& desc);
+
+ public:
+ explicit PerfSpewerRangeRecorder(MacroAssembler& masm_) : masm(masm_){};
+ void recordOffset(const char* name);
+ void recordOffset(const char* name, JSContext* cx, JSScript* script);
+ void collectRangesForJitCode(JitCode* code);
+};
+
+} // namespace js::jit
+
+#endif /* jit_PerfSpewer_h */
diff --git a/js/src/jit/ProcessExecutableMemory.cpp b/js/src/jit/ProcessExecutableMemory.cpp
new file mode 100644
index 0000000000..3cf38adcba
--- /dev/null
+++ b/js/src/jit/ProcessExecutableMemory.cpp
@@ -0,0 +1,935 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ProcessExecutableMemory.h"
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <errno.h>
+
+#include "jsfriendapi.h"
+#include "jsmath.h"
+
+#include "gc/Memory.h"
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/JitOptions.h"
+#include "threading/LockGuard.h"
+#include "threading/Mutex.h"
+#include "util/Memory.h"
+#include "util/Poison.h"
+#include "util/WindowsWrapper.h"
+#include "vm/MutexIDs.h"
+
+#ifdef XP_WIN
+# include "mozilla/StackWalk_windows.h"
+# include "mozilla/WindowsVersion.h"
+#elif defined(__wasi__)
+# if defined(JS_CODEGEN_WASM32)
+# include <cstdlib>
+# else
+// Nothing.
+# endif
+#else
+# include <sys/mman.h>
+# include <unistd.h>
+#endif
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/valgrind.h>
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+#ifdef XP_WIN
+# if defined(HAVE_64BIT_BUILD)
+# define NEED_JIT_UNWIND_HANDLING
+# endif
+
+static void* ComputeRandomAllocationAddress() {
+ /*
+ * Inspiration is V8's OS::Allocate in platform-win32.cc.
+ *
+ * VirtualAlloc takes 64K chunks out of the virtual address space, so we
+ * keep 16b alignment.
+ *
+ * x86: V8 comments say that keeping addresses in the [64MiB, 1GiB) range
+ * tries to avoid system default DLL mapping space. In the end, we get 13
+ * bits of randomness in our selection.
+ * x64: [2GiB, 4TiB), with 25 bits of randomness.
+ */
+# ifdef HAVE_64BIT_BUILD
+ static const uintptr_t base = 0x0000000080000000;
+ static const uintptr_t mask = 0x000003ffffff0000;
+# elif defined(_M_IX86) || defined(__i386__)
+ static const uintptr_t base = 0x04000000;
+ static const uintptr_t mask = 0x3fff0000;
+# else
+# error "Unsupported architecture"
+# endif
+
+ uint64_t rand = js::GenerateRandomSeed();
+ return (void*)(base | (rand & mask));
+}
+
+# ifdef NEED_JIT_UNWIND_HANDLING
+static js::JitExceptionHandler sJitExceptionHandler;
+static bool sHasInstalledFunctionTable = false;
+# endif
+
+JS_PUBLIC_API void js::SetJitExceptionHandler(JitExceptionHandler handler) {
+# ifdef NEED_JIT_UNWIND_HANDLING
+ MOZ_ASSERT(!sJitExceptionHandler);
+ sJitExceptionHandler = handler;
+# else
+ // Just do nothing if unwind handling is disabled.
+# endif
+}
+
+# ifdef NEED_JIT_UNWIND_HANDLING
+# if defined(_M_ARM64)
+// See the ".xdata records" section of
+// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
+// These records can have various fields present or absent depending on the
+// bits set in the header. Our struct will use one 32-bit slot for unwind codes,
+// and no slots for epilog scopes.
+struct UnwindData {
+ uint32_t functionLength : 18;
+ uint32_t version : 2;
+ uint32_t hasExceptionHandler : 1;
+ uint32_t packedEpilog : 1;
+ uint32_t epilogCount : 5;
+ uint32_t codeWords : 5;
+ uint8_t unwindCodes[4];
+ uint32_t exceptionHandler;
+};
+
+static const unsigned ThunkLength = 20;
+# else
+// From documentation for UNWIND_INFO on
+// https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64
+struct UnwindInfo {
+ uint8_t version : 3;
+ uint8_t flags : 5;
+ uint8_t sizeOfPrologue;
+ uint8_t countOfUnwindCodes;
+ uint8_t frameRegister : 4;
+ uint8_t frameOffset : 4;
+};
+static const unsigned ThunkLength = 12;
+union UnwindCode {
+ struct {
+ uint8_t codeOffset;
+ uint8_t unwindOp : 4;
+ uint8_t opInfo : 4;
+ };
+ uint16_t frameOffset;
+};
+
+static constexpr int kNumberOfUnwindCodes = 2;
+static constexpr int kPushRbpInstructionLength = 1;
+static constexpr int kMovRbpRspInstructionLength = 3;
+static constexpr int kRbpPrefixCodes = 2;
+static constexpr int kRbpPrefixLength =
+ kPushRbpInstructionLength + kMovRbpRspInstructionLength;
+
+struct UnwindData {
+ UnwindInfo unwindInfo;
+ UnwindCode unwindCodes[kNumberOfUnwindCodes];
+ uint32_t exceptionHandler;
+
+ UnwindData() {
+ static constexpr int kOpPushNonvol = 0;
+ static constexpr int kOpSetFPReg = 3;
+
+ unwindInfo.version = 1;
+ unwindInfo.flags = UNW_FLAG_EHANDLER;
+ unwindInfo.sizeOfPrologue = kRbpPrefixLength;
+ unwindInfo.countOfUnwindCodes = kRbpPrefixCodes;
+ unwindInfo.frameRegister = 5;
+ unwindInfo.frameOffset = 0;
+
+ // Offset here are specified to beginning of the -next- instruction.
+ unwindCodes[0].codeOffset = kRbpPrefixLength; // movq rbp, rsp
+ unwindCodes[0].unwindOp = kOpSetFPReg;
+ unwindCodes[0].opInfo = 0;
+
+ unwindCodes[1].codeOffset = kPushRbpInstructionLength; // push rbp
+ unwindCodes[1].unwindOp = kOpPushNonvol;
+ unwindCodes[1].opInfo = 5;
+ }
+};
+# endif
+
+struct ExceptionHandlerRecord {
+ void* dynamicTable;
+ UnwindData unwindData;
+ uint8_t thunk[ThunkLength];
+ RUNTIME_FUNCTION runtimeFunction;
+};
+
+// This function must match the function pointer type PEXCEPTION_HANDLER
+// mentioned in:
+// http://msdn.microsoft.com/en-us/library/ssa62fwe.aspx.
+// This type is rather elusive in documentation; Wine is the best I've found:
+// http://source.winehq.org/source/include/winnt.h
+static DWORD ExceptionHandler(PEXCEPTION_RECORD exceptionRecord,
+ _EXCEPTION_REGISTRATION_RECORD*, PCONTEXT context,
+ _EXCEPTION_REGISTRATION_RECORD**) {
+ if (sJitExceptionHandler) {
+ return sJitExceptionHandler(exceptionRecord, context);
+ }
+
+ return ExceptionContinueSearch;
+}
+
+PRUNTIME_FUNCTION RuntimeFunctionCallback(DWORD64 ControlPc, PVOID Context);
+
+// Required for enabling Stackwalking on windows using external tools.
+NTSYSAPI DWORD NTAPI RtlAddGrowableFunctionTable(
+ PVOID* DynamicTable, PRUNTIME_FUNCTION FunctionTable, DWORD EntryCount,
+ DWORD MaximumEntryCount, ULONG_PTR RangeBase, ULONG_PTR RangeEnd);
+
+// For an explanation of the problem being solved here, see
+// SetJitExceptionFilter in jsfriendapi.h.
+static bool RegisterExecutableMemory(void* p, size_t bytes, size_t pageSize) {
+ if (!VirtualAlloc(p, pageSize, MEM_COMMIT, PAGE_READWRITE)) {
+ MOZ_CRASH();
+ }
+
+ // A page was reserved inside this structure for the record. This is because
+ // all entries in the record are describes as an offset from the start of the
+ // memory region. We construct the record there.
+ ExceptionHandlerRecord* r = new (p) ExceptionHandlerRecord();
+ void* handler = JS_FUNC_TO_DATA_PTR(void*, ExceptionHandler);
+
+ // Because the .xdata format on ARM64 can only encode sizes up to 1M (much
+ // too small for our JIT code regions), we register a function table callback
+ // to provide RUNTIME_FUNCTIONs at runtime. Windows doesn't seem to care about
+ // the size fields on RUNTIME_FUNCTIONs that are created in this way, so the
+ // same RUNTIME_FUNCTION can work for any address in the region. We'll set up
+ // a generic one now and the callback can just return a pointer to it.
+
+ // All these fields are specified to be offsets from the base of the
+ // executable code (which is 'p'), even if they have 'Address' in their
+ // names. In particular, exceptionHandler is a ULONG offset which is a
+ // 32-bit integer. Since 'p' can be farther than INT32_MAX away from
+ // sJitExceptionHandler, we must generate a little thunk inside the
+ // record. The record is put on its own page so that we can take away write
+ // access to protect against accidental clobbering.
+
+# if defined(_M_ARM64)
+ if (!sJitExceptionHandler) {
+ return false;
+ }
+
+ r->runtimeFunction.BeginAddress = pageSize;
+ r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindData);
+ static_assert(offsetof(ExceptionHandlerRecord, unwindData) % 4 == 0,
+ "The ARM64 .pdata format requires that exception information "
+ "RVAs be 4-byte aligned.");
+
+ memset(&r->unwindData, 0, sizeof(r->unwindData));
+ r->unwindData.hasExceptionHandler = true;
+ r->unwindData.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
+
+ // Use a fake unwind code to make the Windows unwinder do _something_. If the
+ // PC and SP both stay unchanged, we'll fail the unwinder's sanity checks and
+ // it won't call our exception handler.
+ r->unwindData.codeWords = 1; // one 32-bit word gives us up to 4 codes
+ r->unwindData.unwindCodes[0] =
+ 0b00000001; // alloc_s small stack of size 1*16
+ r->unwindData.unwindCodes[1] = 0b11100100; // end
+
+ uint32_t* thunk = (uint32_t*)r->thunk;
+ uint16_t* addr = (uint16_t*)&handler;
+
+ // xip0/r16 should be safe to clobber: Windows just used it to call our thunk.
+ const uint8_t reg = 16;
+
+ // Say `handler` is 0x4444333322221111, then:
+ thunk[0] = 0xd2800000 | addr[0] << 5 | reg; // mov xip0, 1111
+ thunk[1] = 0xf2a00000 | addr[1] << 5 | reg; // movk xip0, 2222 lsl #0x10
+ thunk[2] = 0xf2c00000 | addr[2] << 5 | reg; // movk xip0, 3333 lsl #0x20
+ thunk[3] = 0xf2e00000 | addr[3] << 5 | reg; // movk xip0, 4444 lsl #0x30
+ thunk[4] = 0xd61f0000 | reg << 5; // br xip0
+# else
+ r->runtimeFunction.BeginAddress = pageSize;
+ r->runtimeFunction.EndAddress = (DWORD)bytes;
+ r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindData);
+ r->unwindData.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
+
+ // mov imm64, rax
+ r->thunk[0] = 0x48;
+ r->thunk[1] = 0xb8;
+ memcpy(&r->thunk[2], &handler, 8);
+
+ // jmp rax
+ r->thunk[10] = 0xff;
+ r->thunk[11] = 0xe0;
+# endif
+
+ BOOLEAN result = false;
+
+ // RtlAddGrowableFunctionTable is only available in Windows 8.1 and higher.
+ // This can be simplified if our compile target changes.
+ HMODULE ntdll_module =
+ LoadLibraryExW(L"ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+
+ static decltype(&::RtlAddGrowableFunctionTable) addGrowableFunctionTable =
+ reinterpret_cast<decltype(&::RtlAddGrowableFunctionTable)>(
+ ::GetProcAddress(ntdll_module, "RtlAddGrowableFunctionTable"));
+
+ // AddGrowableFunctionTable will write into the region. We must therefore
+ // only write-protect is after this has been called.
+ if (addGrowableFunctionTable) {
+ // XXX NB: The profiler believes this function is only called from the main
+ // thread. If that ever becomes untrue, the profiler must be updated
+ // immediately.
+ AutoSuppressStackWalking suppress;
+ result = addGrowableFunctionTable(&r->dynamicTable, &r->runtimeFunction, 1,
+ 1, (ULONG_PTR)p,
+ (ULONG_PTR)p + bytes - pageSize) == S_OK;
+ } else {
+ if (!sJitExceptionHandler) {
+ // No point installing this.
+ return false;
+ }
+ // XXX NB: The profiler believes this function is only called from the main
+ // thread. If that ever becomes untrue, the profiler must be updated
+ // immediately.
+ AutoSuppressStackWalking suppress;
+ result =
+ RtlInstallFunctionTableCallback((DWORD64)p | 0x3, (DWORD64)p, bytes,
+ RuntimeFunctionCallback, NULL, NULL);
+ }
+
+ DWORD oldProtect;
+ if (result && !VirtualProtect(p, pageSize, PAGE_EXECUTE_READ, &oldProtect)) {
+ MOZ_CRASH();
+ }
+
+ return result;
+}
+
+static void UnregisterExecutableMemory(void* p, size_t bytes, size_t pageSize) {
+ // There's no such thing as RtlUninstallFunctionTableCallback, so there's
+ // nothing to do here.
+}
+# endif
+
+static void* ReserveProcessExecutableMemory(size_t bytes) {
+# ifdef NEED_JIT_UNWIND_HANDLING
+ size_t pageSize = gc::SystemPageSize();
+ // Always reserve space for the unwind information.
+ bytes += pageSize;
+# endif
+
+ void* p = nullptr;
+ for (size_t i = 0; i < 10; i++) {
+ void* randomAddr = ComputeRandomAllocationAddress();
+ p = VirtualAlloc(randomAddr, bytes, MEM_RESERVE, PAGE_NOACCESS);
+ if (p) {
+ break;
+ }
+ }
+
+ if (!p) {
+ // Try again without randomization.
+ p = VirtualAlloc(nullptr, bytes, MEM_RESERVE, PAGE_NOACCESS);
+ if (!p) {
+ return nullptr;
+ }
+ }
+
+# ifdef NEED_JIT_UNWIND_HANDLING
+ if (RegisterExecutableMemory(p, bytes, pageSize)) {
+ sHasInstalledFunctionTable = true;
+ } else {
+ if (sJitExceptionHandler) {
+ // This should have succeeded if we have an exception handler. Bail.
+ VirtualFree(p, 0, MEM_RELEASE);
+ return nullptr;
+ }
+ }
+
+ // Skip the first page where we might have allocated an exception handler
+ // record.
+ p = (uint8_t*)p + pageSize;
+ bytes -= pageSize;
+
+ RegisterJitCodeRegion((uint8_t*)p, bytes);
+# endif
+ return p;
+}
+
+static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
+# ifdef NEED_JIT_UNWIND_HANDLING
+ UnregisterJitCodeRegion((uint8_t*)addr, bytes);
+
+ size_t pageSize = gc::SystemPageSize();
+ addr = (uint8_t*)addr - pageSize;
+
+ if (sHasInstalledFunctionTable) {
+ UnregisterExecutableMemory(addr, bytes, pageSize);
+ }
+# endif
+
+ VirtualFree(addr, 0, MEM_RELEASE);
+}
+
+static DWORD ProtectionSettingToFlags(ProtectionSetting protection) {
+ switch (protection) {
+ case ProtectionSetting::Protected:
+ return PAGE_NOACCESS;
+ case ProtectionSetting::Writable:
+ return PAGE_READWRITE;
+ case ProtectionSetting::Executable:
+ return PAGE_EXECUTE_READ;
+ }
+ MOZ_CRASH();
+}
+
+[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
+ ProtectionSetting protection) {
+ void* p = VirtualAlloc(addr, bytes, MEM_COMMIT,
+ ProtectionSettingToFlags(protection));
+ if (!p) {
+ return false;
+ }
+ MOZ_RELEASE_ASSERT(p == addr);
+ return true;
+}
+
+static void DecommitPages(void* addr, size_t bytes) {
+ if (!VirtualFree(addr, bytes, MEM_DECOMMIT)) {
+ MOZ_CRASH("DecommitPages failed");
+ }
+}
+#elif defined(__wasi__)
+# if defined(JS_CODEGEN_WASM32)
+static void* ReserveProcessExecutableMemory(size_t bytes) {
+ return malloc(bytes);
+}
+
+static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
+ free(addr);
+}
+
+[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
+ ProtectionSetting protection) {
+ return true;
+}
+
+static void DecommitPages(void* addr, size_t bytes) {}
+
+# else
+static void* ReserveProcessExecutableMemory(size_t bytes) {
+ MOZ_CRASH("NYI for WASI.");
+ return nullptr;
+}
+static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
+ MOZ_CRASH("NYI for WASI.");
+}
+[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
+ ProtectionSetting protection) {
+ MOZ_CRASH("NYI for WASI.");
+ return false;
+}
+static void DecommitPages(void* addr, size_t bytes) {
+ MOZ_CRASH("NYI for WASI.");
+}
+# endif
+#else // !XP_WIN && !__wasi__
+# ifndef MAP_NORESERVE
+# define MAP_NORESERVE 0
+# endif
+
+static void* ComputeRandomAllocationAddress() {
+# ifdef __OpenBSD__
+ // OpenBSD already has random mmap and the idea that all x64 cpus
+ // have 48-bit address space is not correct. Returning nullptr
+ // allows OpenBSD do to the right thing.
+ return nullptr;
+# else
+ uint64_t rand = js::GenerateRandomSeed();
+
+# ifdef HAVE_64BIT_BUILD
+ // x64 CPUs have a 48-bit address space and on some platforms the OS will
+ // give us access to 47 bits, so to be safe we right shift by 18 to leave
+ // 46 bits.
+ rand >>= 18;
+# else
+ // On 32-bit, right shift by 34 to leave 30 bits, range [0, 1GiB). Then add
+ // 512MiB to get range [512MiB, 1.5GiB), or [0x20000000, 0x60000000). This
+ // is based on V8 comments in platform-posix.cc saying this range is
+ // relatively unpopulated across a variety of kernels.
+ rand >>= 34;
+ rand += 512 * 1024 * 1024;
+# endif
+
+ // Ensure page alignment.
+ uintptr_t mask = ~uintptr_t(gc::SystemPageSize() - 1);
+ return (void*)uintptr_t(rand & mask);
+# endif
+}
+
+static void* ReserveProcessExecutableMemory(size_t bytes) {
+ // Note that randomAddr is just a hint: if the address is not available
+ // mmap will pick a different address.
+ void* randomAddr = ComputeRandomAllocationAddress();
+ void* p = MozTaggedAnonymousMmap(randomAddr, bytes, PROT_NONE,
+ MAP_NORESERVE | MAP_PRIVATE | MAP_ANON, -1,
+ 0, "js-executable-memory");
+ if (p == MAP_FAILED) {
+ return nullptr;
+ }
+ return p;
+}
+
+static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
+ mozilla::DebugOnly<int> result = munmap(addr, bytes);
+ MOZ_ASSERT(!result || errno == ENOMEM);
+}
+
+static unsigned ProtectionSettingToFlags(ProtectionSetting protection) {
+# ifdef MOZ_VALGRIND
+ // If we're configured for Valgrind and running on it, use a slacker
+ // scheme that doesn't change execute permissions, since doing so causes
+ // Valgrind a lot of extra overhead re-JITting code that loses and later
+ // regains execute permission. See bug 1338179.
+ if (RUNNING_ON_VALGRIND) {
+ switch (protection) {
+ case ProtectionSetting::Protected:
+ return PROT_NONE;
+ case ProtectionSetting::Writable:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case ProtectionSetting::Executable:
+ return PROT_READ | PROT_EXEC;
+ }
+ MOZ_CRASH();
+ }
+ // If we get here, we're configured for Valgrind but not running on
+ // it, so use the standard scheme.
+# endif
+ switch (protection) {
+ case ProtectionSetting::Protected:
+ return PROT_NONE;
+ case ProtectionSetting::Writable:
+ return PROT_READ | PROT_WRITE;
+ case ProtectionSetting::Executable:
+ return PROT_READ | PROT_EXEC;
+ }
+ MOZ_CRASH();
+}
+
+[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
+ ProtectionSetting protection) {
+ void* p = MozTaggedAnonymousMmap(
+ addr, bytes, ProtectionSettingToFlags(protection),
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0, "js-executable-memory");
+ if (p == MAP_FAILED) {
+ return false;
+ }
+ MOZ_RELEASE_ASSERT(p == addr);
+ return true;
+}
+
+static void DecommitPages(void* addr, size_t bytes) {
+ // Use mmap with MAP_FIXED and PROT_NONE. Inspired by jemalloc's
+ // pages_decommit.
+ void* p = MozTaggedAnonymousMmap(addr, bytes, PROT_NONE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0,
+ "js-executable-memory");
+ MOZ_RELEASE_ASSERT(addr == p);
+}
+#endif
+
+template <size_t NumBits>
+class PageBitSet {
+ using WordType = uint32_t;
+ static const size_t BitsPerWord = sizeof(WordType) * 8;
+
+ static_assert((NumBits % BitsPerWord) == 0,
+ "NumBits must be a multiple of BitsPerWord");
+ static const size_t NumWords = NumBits / BitsPerWord;
+
+ mozilla::Array<WordType, NumWords> words_;
+
+ uint32_t indexToWord(uint32_t index) const {
+ MOZ_ASSERT(index < NumBits);
+ return index / BitsPerWord;
+ }
+ WordType indexToBit(uint32_t index) const {
+ MOZ_ASSERT(index < NumBits);
+ return WordType(1) << (index % BitsPerWord);
+ }
+
+ public:
+ void init() { mozilla::PodArrayZero(words_); }
+ bool contains(size_t index) const {
+ uint32_t word = indexToWord(index);
+ return words_[word] & indexToBit(index);
+ }
+ void insert(size_t index) {
+ MOZ_ASSERT(!contains(index));
+ uint32_t word = indexToWord(index);
+ words_[word] |= indexToBit(index);
+ }
+ void remove(size_t index) {
+ MOZ_ASSERT(contains(index));
+ uint32_t word = indexToWord(index);
+ words_[word] &= ~indexToBit(index);
+ }
+
+#ifdef DEBUG
+ bool empty() const {
+ for (size_t i = 0; i < NumWords; i++) {
+ if (words_[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+#endif
+};
+
+// Per-process executable memory allocator. It reserves a block of memory of
+// MaxCodeBytesPerProcess bytes, then allocates/deallocates pages from that.
+//
+// This has a number of benefits compared to raw mmap/VirtualAlloc:
+//
+// * More resillient against certain attacks.
+//
+// * Behaves more consistently across platforms: it avoids the 64K granularity
+// issues on Windows, for instance.
+//
+// * On x64, near jumps can be used for jumps to other JIT pages.
+//
+// * On Win64, we have to register the exception handler only once (at process
+// startup). This saves some memory and avoids RtlAddFunctionTable profiler
+// deadlocks.
+class ProcessExecutableMemory {
+ static_assert(
+ (MaxCodeBytesPerProcess % ExecutableCodePageSize) == 0,
+ "MaxCodeBytesPerProcess must be a multiple of ExecutableCodePageSize");
+ static const size_t MaxCodePages =
+ MaxCodeBytesPerProcess / ExecutableCodePageSize;
+
+ // Start of the MaxCodeBytesPerProcess memory block or nullptr if
+ // uninitialized. Note that this is NOT guaranteed to be aligned to
+ // ExecutableCodePageSize.
+ uint8_t* base_;
+
+ // The fields below should only be accessed while we hold the lock.
+ Mutex lock_ MOZ_UNANNOTATED;
+
+ // pagesAllocated_ is an Atomic so that bytesAllocated does not have to
+ // take the lock.
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> pagesAllocated_;
+
+ // Page where we should try to allocate next.
+ size_t cursor_;
+
+ mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> rng_;
+ PageBitSet<MaxCodePages> pages_;
+
+ public:
+ ProcessExecutableMemory()
+ : base_(nullptr),
+ lock_(mutexid::ProcessExecutableRegion),
+ pagesAllocated_(0),
+ cursor_(0),
+ rng_(),
+ pages_() {}
+
+ [[nodiscard]] bool init() {
+ pages_.init();
+
+ MOZ_RELEASE_ASSERT(!initialized());
+ MOZ_RELEASE_ASSERT(HasJitBackend());
+ MOZ_RELEASE_ASSERT(gc::SystemPageSize() <= ExecutableCodePageSize);
+
+ void* p = ReserveProcessExecutableMemory(MaxCodeBytesPerProcess);
+ if (!p) {
+ return false;
+ }
+
+ base_ = static_cast<uint8_t*>(p);
+
+ mozilla::Array<uint64_t, 2> seed;
+ GenerateXorShift128PlusSeed(seed);
+ rng_.emplace(seed[0], seed[1]);
+ return true;
+ }
+
+ uint8_t* base() const { return base_; }
+
+ bool initialized() const { return base_ != nullptr; }
+
+ size_t bytesAllocated() const {
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+ return pagesAllocated_ * ExecutableCodePageSize;
+ }
+
+ void release() {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(pages_.empty());
+ MOZ_ASSERT(pagesAllocated_ == 0);
+ DeallocateProcessExecutableMemory(base_, MaxCodeBytesPerProcess);
+ base_ = nullptr;
+ rng_.reset();
+ MOZ_ASSERT(!initialized());
+ }
+
+ void assertValidAddress(void* p, size_t bytes) const {
+ MOZ_RELEASE_ASSERT(p >= base_ &&
+ uintptr_t(p) + bytes <=
+ uintptr_t(base_) + MaxCodeBytesPerProcess);
+ }
+
+ bool containsAddress(const void* p) const {
+ return p >= base_ &&
+ uintptr_t(p) < uintptr_t(base_) + MaxCodeBytesPerProcess;
+ }
+
+ void* allocate(size_t bytes, ProtectionSetting protection,
+ MemCheckKind checkKind);
+ void deallocate(void* addr, size_t bytes, bool decommit);
+};
+
+void* ProcessExecutableMemory::allocate(size_t bytes,
+ ProtectionSetting protection,
+ MemCheckKind checkKind) {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(HasJitBackend());
+ MOZ_ASSERT(bytes > 0);
+ MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
+
+ size_t numPages = bytes / ExecutableCodePageSize;
+
+ // Take the lock and try to allocate.
+ void* p = nullptr;
+ {
+ LockGuard<Mutex> guard(lock_);
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+
+ // Check if we have enough pages available.
+ if (pagesAllocated_ + numPages >= MaxCodePages) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(bytes <= MaxCodeBytesPerProcess);
+
+ // Maybe skip a page to make allocations less predictable.
+ size_t page = cursor_ + (rng_.ref().next() % 2);
+
+ for (size_t i = 0; i < MaxCodePages; i++) {
+ // Make sure page + numPages - 1 is a valid index.
+ if (page + numPages > MaxCodePages) {
+ page = 0;
+ }
+
+ bool available = true;
+ for (size_t j = 0; j < numPages; j++) {
+ if (pages_.contains(page + j)) {
+ available = false;
+ break;
+ }
+ }
+ if (!available) {
+ page++;
+ continue;
+ }
+
+ // Mark the pages as unavailable.
+ for (size_t j = 0; j < numPages; j++) {
+ pages_.insert(page + j);
+ }
+
+ pagesAllocated_ += numPages;
+ MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
+
+ // If we allocated a small number of pages, move cursor_ to the
+ // next page. We don't do this for larger allocations to avoid
+ // skipping a large number of small holes.
+ if (numPages <= 2) {
+ cursor_ = page + numPages;
+ }
+
+ p = base_ + page * ExecutableCodePageSize;
+ break;
+ }
+ if (!p) {
+ return nullptr;
+ }
+ }
+
+ // Commit the pages after releasing the lock.
+ if (!CommitPages(p, bytes, protection)) {
+ deallocate(p, bytes, /* decommit = */ false);
+ return nullptr;
+ }
+
+ SetMemCheckKind(p, bytes, checkKind);
+
+ return p;
+}
+
+void ProcessExecutableMemory::deallocate(void* addr, size_t bytes,
+ bool decommit) {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT((uintptr_t(addr) % gc::SystemPageSize()) == 0);
+ MOZ_ASSERT(bytes > 0);
+ MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
+
+ assertValidAddress(addr, bytes);
+
+ size_t firstPage =
+ (static_cast<uint8_t*>(addr) - base_) / ExecutableCodePageSize;
+ size_t numPages = bytes / ExecutableCodePageSize;
+
+ // Decommit before taking the lock.
+ MOZ_MAKE_MEM_NOACCESS(addr, bytes);
+ if (decommit) {
+ DecommitPages(addr, bytes);
+ }
+
+ LockGuard<Mutex> guard(lock_);
+ MOZ_ASSERT(numPages <= pagesAllocated_);
+ pagesAllocated_ -= numPages;
+
+ for (size_t i = 0; i < numPages; i++) {
+ pages_.remove(firstPage + i);
+ }
+
+ // Move the cursor back so we can reuse pages instead of fragmenting the
+ // whole region.
+ if (firstPage < cursor_) {
+ cursor_ = firstPage;
+ }
+}
+
+static ProcessExecutableMemory execMemory;
+
+void* js::jit::AllocateExecutableMemory(size_t bytes,
+ ProtectionSetting protection,
+ MemCheckKind checkKind) {
+ return execMemory.allocate(bytes, protection, checkKind);
+}
+
+void js::jit::DeallocateExecutableMemory(void* addr, size_t bytes) {
+ execMemory.deallocate(addr, bytes, /* decommit = */ true);
+}
+
+bool js::jit::InitProcessExecutableMemory() { return execMemory.init(); }
+
+void js::jit::ReleaseProcessExecutableMemory() { execMemory.release(); }
+
+size_t js::jit::LikelyAvailableExecutableMemory() {
+ // Round down available memory to the closest MB.
+ return MaxCodeBytesPerProcess -
+ AlignBytes(execMemory.bytesAllocated(), 0x100000U);
+}
+
+bool js::jit::CanLikelyAllocateMoreExecutableMemory() {
+ // Use a 8 MB buffer.
+ static const size_t BufferSize = 8 * 1024 * 1024;
+
+ MOZ_ASSERT(execMemory.bytesAllocated() <= MaxCodeBytesPerProcess);
+
+ return execMemory.bytesAllocated() + BufferSize <= MaxCodeBytesPerProcess;
+}
+
+bool js::jit::AddressIsInExecutableMemory(const void* p) {
+ return execMemory.containsAddress(p);
+}
+
+bool js::jit::ReprotectRegion(void* start, size_t size,
+ ProtectionSetting protection,
+ MustFlushICache flushICache) {
+#if defined(JS_CODEGEN_WASM32)
+ return true;
+#endif
+
+ // Flush ICache when making code executable, before we modify |size|.
+ if (flushICache == MustFlushICache::Yes) {
+ MOZ_ASSERT(protection == ProtectionSetting::Executable);
+ jit::FlushICache(start, size);
+ }
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ size_t pageSize = gc::SystemPageSize();
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ MOZ_ASSERT((uintptr_t(pageStart) % pageSize) == 0);
+
+ execMemory.assertValidAddress(pageStart, size);
+
+ // On weak memory systems, make sure new code is visible on all cores before
+ // addresses of the code are made public. Now is the latest moment in time
+ // when we can do that, and we're assuming that every other thread that has
+ // written into the memory that is being reprotected here has synchronized
+ // with this thread in such a way that the memory writes have become visible
+ // and we therefore only need to execute the fence once here. See bug 1529933
+ // for a longer discussion of why this is both necessary and sufficient.
+ //
+ // We use the C++ fence here -- and not AtomicOperations::fenceSeqCst() --
+ // primarily because ReprotectRegion will be called while we construct our own
+ // jitted atomics. But the C++ fence is sufficient and correct, too.
+#ifdef __wasi__
+ MOZ_CRASH("NYI FOR WASI.");
+#else
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+
+# ifdef XP_WIN
+ DWORD flags = ProtectionSettingToFlags(protection);
+ // This is a essentially a VirtualProtect, but with lighter impact on
+ // antivirus analysis. See bug 1823634.
+ if (!VirtualAlloc(pageStart, size, MEM_COMMIT, flags)) {
+ return false;
+ }
+# else
+ unsigned flags = ProtectionSettingToFlags(protection);
+ if (mprotect(pageStart, size, flags)) {
+ return false;
+ }
+# endif
+#endif // __wasi__
+
+ execMemory.assertValidAddress(pageStart, size);
+ return true;
+}
+
+#if defined(XP_WIN) && defined(NEED_JIT_UNWIND_HANDLING)
+static PRUNTIME_FUNCTION RuntimeFunctionCallback(DWORD64 ControlPc,
+ PVOID Context) {
+ MOZ_ASSERT(sJitExceptionHandler);
+
+ // RegisterExecutableMemory already set up the runtime function in the
+ // exception-data page preceding the allocation.
+ uint8_t* p = execMemory.base();
+ if (!p) {
+ return nullptr;
+ }
+ return (PRUNTIME_FUNCTION)(p - gc::SystemPageSize() +
+ offsetof(ExceptionHandlerRecord, runtimeFunction));
+}
+#endif
diff --git a/js/src/jit/ProcessExecutableMemory.h b/js/src/jit/ProcessExecutableMemory.h
new file mode 100644
index 0000000000..51747634f3
--- /dev/null
+++ b/js/src/jit/ProcessExecutableMemory.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ProcessExecutableMemory_h
+#define jit_ProcessExecutableMemory_h
+
+#include "util/Poison.h"
+
+namespace js {
+namespace jit {
+
+// Limit on the number of bytes of executable memory to prevent JIT spraying
+// attacks.
+#if JS_BITS_PER_WORD == 32
+static const size_t MaxCodeBytesPerProcess = 140 * 1024 * 1024;
+#else
+// This is the largest number which satisfies various alignment static
+// asserts that is <= INT32_MAX. The INT32_MAX limit is required for making a
+// single call to RtlInstallFunctionTableCallback(). (This limit could be
+// relaxed in the future by making multiple calls.)
+static const size_t MaxCodeBytesPerProcess = 2044 * 1024 * 1024;
+#endif
+
+// Limit on the number of bytes of code memory per buffer. This limit comes
+// about because we encode an unresolved relative unconditional branch during
+// assembly as a branch instruction that carries the absolute offset of the next
+// branch instruction in the chain of branches that all reference the same
+// unresolved label. For this architecture to work, no branch instruction may
+// lie at an offset greater than the maximum forward branch distance. This is
+// true on both ARM and ARM64.
+//
+// Notably, even though we know that the offsets thus encoded are always
+// positive offsets, we use only the positive part of the signed range of the
+// branch offset.
+//
+// On ARM-32, we are limited by BOffImm::IsInRange(), which checks that the
+// offset is no greater than 2^25-4 in the offset's 26-bit signed field.
+//
+// On ARM-64, we are limited by Instruction::ImmBranchMaxForwardOffset(), which
+// checks that the offset is no greater than 2^27-4 in the offset's 28-bit
+// signed field.
+//
+// On MIPS, there are no limitations because the assembler has to implement
+// jump chaining to be effective at all (jump offsets are quite small).
+//
+// On x86 and x64, there are no limitations here because the assembler
+// MOZ_CRASHes if the 32-bit offset is exceeded.
+
+#if defined(JS_CODEGEN_ARM)
+static const size_t MaxCodeBytesPerBuffer = (1 << 25) - 4;
+#elif defined(JS_CODEGEN_ARM64)
+static const size_t MaxCodeBytesPerBuffer = (1 << 27) - 4;
+#else
+static const size_t MaxCodeBytesPerBuffer = MaxCodeBytesPerProcess;
+#endif
+
+// Executable code is allocated in 64K chunks. ExecutableAllocator uses pools
+// that are at least this big. Code we allocate does not necessarily have 64K
+// alignment though.
+static const size_t ExecutableCodePageSize = 64 * 1024;
+
+enum class ProtectionSetting {
+ Protected, // Not readable, writable, or executable.
+ Writable,
+ Executable,
+};
+
+/// Whether the instruction cache must be flushed
+
+enum class MustFlushICache { No, Yes };
+
+[[nodiscard]] extern bool ReprotectRegion(void* start, size_t size,
+ ProtectionSetting protection,
+ MustFlushICache flushICache);
+
+// Functions called at process start-up/shutdown to initialize/release the
+// executable memory region.
+[[nodiscard]] extern bool InitProcessExecutableMemory();
+extern void ReleaseProcessExecutableMemory();
+
+// Allocate/deallocate executable pages.
+extern void* AllocateExecutableMemory(size_t bytes,
+ ProtectionSetting protection,
+ MemCheckKind checkKind);
+extern void DeallocateExecutableMemory(void* addr, size_t bytes);
+
+// Returns true if we can allocate a few more MB of executable code without
+// hitting our code limit. This function can be used to stop compiling things
+// that are optional (like Baseline and Ion code) when we're about to reach the
+// limit, so we are less likely to OOM or crash. Note that the limit is
+// per-process, so other threads can also allocate code after we call this
+// function.
+extern bool CanLikelyAllocateMoreExecutableMemory();
+
+// Returns a rough guess of how much executable memory remains available,
+// rounded down to MB limit. Note this can fluctuate as other threads within
+// the process allocate executable memory.
+extern size_t LikelyAvailableExecutableMemory();
+
+// Returns whether |p| is stored in the executable code buffer.
+extern bool AddressIsInExecutableMemory(const void* p);
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_ProcessExecutableMemory_h
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
new file mode 100644
index 0000000000..64b27b98ff
--- /dev/null
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -0,0 +1,3679 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RangeAnalysis.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jsmath.h"
+
+#include "jit/CompileInfo.h"
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/CheckedArithmetic.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Uint8Clamped.h"
+
+#include "vm/BytecodeUtil-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+using mozilla::Abs;
+using mozilla::CountLeadingZeroes32;
+using mozilla::ExponentComponent;
+using mozilla::FloorLog2;
+using mozilla::IsNegativeZero;
+using mozilla::NegativeInfinity;
+using mozilla::NumberEqualsInt32;
+using mozilla::PositiveInfinity;
+
+// [SMDOC] IonMonkey Range Analysis
+//
+// This algorithm is based on the paper "Eliminating Range Checks Using
+// Static Single Assignment Form" by Gough and Klaren.
+//
+// We associate a range object with each SSA name, and the ranges are consulted
+// in order to determine whether overflow is possible for arithmetic
+// computations.
+//
+// An important source of range information that requires care to take
+// advantage of is conditional control flow. Consider the code below:
+//
+// if (x < 0) {
+// y = x + 2000000000;
+// } else {
+// if (x < 1000000000) {
+// y = x * 2;
+// } else {
+// y = x - 3000000000;
+// }
+// }
+//
+// The arithmetic operations in this code cannot overflow, but it is not
+// sufficient to simply associate each name with a range, since the information
+// differs between basic blocks. The traditional dataflow approach would be
+// associate ranges with (name, basic block) pairs. This solution is not
+// satisfying, since we lose the benefit of SSA form: in SSA form, each
+// definition has a unique name, so there is no need to track information about
+// the control flow of the program.
+//
+// The approach used here is to add a new form of pseudo operation called a
+// beta node, which associates range information with a value. These beta
+// instructions take one argument and additionally have an auxiliary constant
+// range associated with them. Operationally, beta nodes are just copies, but
+// the invariant expressed by beta node copies is that the output will fall
+// inside the range given by the beta node. Gough and Klaeren refer to SSA
+// extended with these beta nodes as XSA form. The following shows the example
+// code transformed into XSA form:
+//
+// if (x < 0) {
+// x1 = Beta(x, [INT_MIN, -1]);
+// y1 = x1 + 2000000000;
+// } else {
+// x2 = Beta(x, [0, INT_MAX]);
+// if (x2 < 1000000000) {
+// x3 = Beta(x2, [INT_MIN, 999999999]);
+// y2 = x3*2;
+// } else {
+// x4 = Beta(x2, [1000000000, INT_MAX]);
+// y3 = x4 - 3000000000;
+// }
+// y4 = Phi(y2, y3);
+// }
+// y = Phi(y1, y4);
+//
+// We insert beta nodes for the purposes of range analysis (they might also be
+// usefully used for other forms of bounds check elimination) and remove them
+// after range analysis is performed. The remaining compiler phases do not ever
+// encounter beta nodes.
+
+static bool IsDominatedUse(MBasicBlock* block, MUse* use) {
+ MNode* n = use->consumer();
+ bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
+
+ if (isPhi) {
+ MPhi* phi = n->toDefinition()->toPhi();
+ return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
+ }
+
+ return block->dominates(n->block());
+}
+
+static inline void SpewRange(MDefinition* def) {
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
+ def->range()) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" ");
+ def->printName(out);
+ out.printf(" has range ");
+ def->range()->dump(out);
+ out.printf("\n");
+ }
+#endif
+}
+
+#ifdef JS_JITSPEW
+static const char* TruncateKindString(TruncateKind kind) {
+ switch (kind) {
+ case TruncateKind::NoTruncate:
+ return "NoTruncate";
+ case TruncateKind::TruncateAfterBailouts:
+ return "TruncateAfterBailouts";
+ case TruncateKind::IndirectTruncate:
+ return "IndirectTruncate";
+ case TruncateKind::Truncate:
+ return "Truncate";
+ default:
+ MOZ_CRASH("Unknown truncate kind.");
+ }
+}
+
+static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
+ bool shouldClone) {
+ if (JitSpewEnabled(JitSpew_Range)) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" ");
+ out.printf("truncating ");
+ def->printName(out);
+ out.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind),
+ shouldClone);
+ }
+}
+#else
+static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
+ bool shouldClone) {}
+#endif
+
+TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
+
+void RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig,
+ MDefinition* dom,
+ MBasicBlock* block) {
+ for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
+ MUse* use = *i++;
+ if (use->consumer() != dom && IsDominatedUse(block, use)) {
+ use->replaceProducer(dom);
+ }
+ }
+}
+
+bool RangeAnalysis::addBetaNodes() {
+ JitSpew(JitSpew_Range, "Adding beta nodes");
+
+ for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
+ MBasicBlock* block = *i;
+ JitSpew(JitSpew_Range, "Looking at block %u", block->id());
+
+ BranchDirection branch_dir;
+ MTest* test = block->immediateDominatorBranch(&branch_dir);
+
+ if (!test || !test->getOperand(0)->isCompare()) {
+ continue;
+ }
+
+ MCompare* compare = test->getOperand(0)->toCompare();
+
+ if (!compare->isNumericComparison()) {
+ continue;
+ }
+
+ // TODO: support unsigned comparisons
+ if (compare->compareType() == MCompare::Compare_UInt32) {
+ continue;
+ }
+
+ // isNumericComparison should return false for UIntPtr.
+ MOZ_ASSERT(compare->compareType() != MCompare::Compare_UIntPtr);
+
+ MDefinition* left = compare->getOperand(0);
+ MDefinition* right = compare->getOperand(1);
+ double bound;
+ double conservativeLower = NegativeInfinity<double>();
+ double conservativeUpper = PositiveInfinity<double>();
+ MDefinition* val = nullptr;
+
+ JSOp jsop = compare->jsop();
+
+ if (branch_dir == FALSE_BRANCH) {
+ jsop = NegateCompareOp(jsop);
+ conservativeLower = GenericNaN();
+ conservativeUpper = GenericNaN();
+ }
+
+ MConstant* leftConst = left->maybeConstantValue();
+ MConstant* rightConst = right->maybeConstantValue();
+ if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
+ bound = leftConst->numberToDouble();
+ val = right;
+ jsop = ReverseCompareOp(jsop);
+ } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
+ bound = rightConst->numberToDouble();
+ val = left;
+ } else if (left->type() == MIRType::Int32 &&
+ right->type() == MIRType::Int32) {
+ MDefinition* smaller = nullptr;
+ MDefinition* greater = nullptr;
+ if (jsop == JSOp::Lt) {
+ smaller = left;
+ greater = right;
+ } else if (jsop == JSOp::Gt) {
+ smaller = right;
+ greater = left;
+ }
+ if (smaller && greater) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ MBeta* beta;
+ beta = MBeta::New(
+ alloc(), smaller,
+ Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(smaller, beta, block);
+ JitSpew(JitSpew_Range, " Adding beta node for smaller %u",
+ smaller->id());
+ beta = MBeta::New(
+ alloc(), greater,
+ Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(greater, beta, block);
+ JitSpew(JitSpew_Range, " Adding beta node for greater %u",
+ greater->id());
+ }
+ continue;
+ } else {
+ continue;
+ }
+
+ // At this point, one of the operands if the compare is a constant, and
+ // val is the other operand.
+ MOZ_ASSERT(val);
+
+ Range comp;
+ switch (jsop) {
+ case JSOp::Le:
+ comp.setDouble(conservativeLower, bound);
+ break;
+ case JSOp::Lt:
+ // For integers, if x < c, the upper bound of x is c-1.
+ if (val->type() == MIRType::Int32) {
+ int32_t intbound;
+ if (NumberEqualsInt32(bound, &intbound) &&
+ SafeSub(intbound, 1, &intbound)) {
+ bound = intbound;
+ }
+ }
+ comp.setDouble(conservativeLower, bound);
+
+ // Negative zero is not less than zero.
+ if (bound == 0) {
+ comp.refineToExcludeNegativeZero();
+ }
+ break;
+ case JSOp::Ge:
+ comp.setDouble(bound, conservativeUpper);
+ break;
+ case JSOp::Gt:
+ // For integers, if x > c, the lower bound of x is c+1.
+ if (val->type() == MIRType::Int32) {
+ int32_t intbound;
+ if (NumberEqualsInt32(bound, &intbound) &&
+ SafeAdd(intbound, 1, &intbound)) {
+ bound = intbound;
+ }
+ }
+ comp.setDouble(bound, conservativeUpper);
+
+ // Negative zero is not greater than zero.
+ if (bound == 0) {
+ comp.refineToExcludeNegativeZero();
+ }
+ break;
+ case JSOp::StrictEq:
+ case JSOp::Eq:
+ comp.setDouble(bound, bound);
+ break;
+ case JSOp::StrictNe:
+ case JSOp::Ne:
+ // Negative zero is not not-equal to zero.
+ if (bound == 0) {
+ comp.refineToExcludeNegativeZero();
+ break;
+ }
+ continue; // well, we could have
+ // [-\inf, bound-1] U [bound+1, \inf] but we only use
+ // contiguous ranges.
+ default:
+ continue;
+ }
+
+ if (JitSpewEnabled(JitSpew_Range)) {
+ JitSpewHeader(JitSpew_Range);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" Adding beta node for %u with range ", val->id());
+ comp.dump(out);
+ out.printf("\n");
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
+ block->insertBefore(*block->begin(), beta);
+ replaceDominatedUsesWith(val, beta, block);
+ }
+
+ return true;
+}
+
+bool RangeAnalysis::removeBetaNodes() {
+ JitSpew(JitSpew_Range, "Removing beta nodes");
+
+ for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
+ MBasicBlock* block = *i;
+ for (MDefinitionIterator iter(*i); iter;) {
+ MDefinition* def = *iter++;
+ if (def->isBeta()) {
+ auto* beta = def->toBeta();
+ MDefinition* op = beta->input();
+ JitSpew(JitSpew_Range, " Removing beta node %u for %u", beta->id(),
+ op->id());
+ beta->justReplaceAllUsesWith(op);
+ block->discard(beta);
+ } else {
+ // We only place Beta nodes at the beginning of basic
+ // blocks, so if we see something else, we can move on
+ // to the next block.
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+void SymbolicBound::dump(GenericPrinter& out) const {
+ if (loop) {
+ out.printf("[loop] ");
+ }
+ sum.dump(out);
+}
+
+void SymbolicBound::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+// Test whether the given range's exponent tells us anything that its lower
+// and upper bound values don't.
+static bool IsExponentInteresting(const Range* r) {
+ // If it lacks either a lower or upper bound, the exponent is interesting.
+ if (!r->hasInt32Bounds()) {
+ return true;
+ }
+
+ // Otherwise if there's no fractional part, the lower and upper bounds,
+ // which are integers, are perfectly precise.
+ if (!r->canHaveFractionalPart()) {
+ return false;
+ }
+
+ // Otherwise, if the bounds are conservatively rounded across a power-of-two
+ // boundary, the exponent may imply a tighter range.
+ return FloorLog2(std::max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
+}
+
+void Range::dump(GenericPrinter& out) const {
+ assertInvariants();
+
+ // Floating-point or Integer subset.
+ if (canHaveFractionalPart_) {
+ out.printf("F");
+ } else {
+ out.printf("I");
+ }
+
+ out.printf("[");
+
+ if (!hasInt32LowerBound_) {
+ out.printf("?");
+ } else {
+ out.printf("%d", lower_);
+ }
+ if (symbolicLower_) {
+ out.printf(" {");
+ symbolicLower_->dump(out);
+ out.printf("}");
+ }
+
+ out.printf(", ");
+
+ if (!hasInt32UpperBound_) {
+ out.printf("?");
+ } else {
+ out.printf("%d", upper_);
+ }
+ if (symbolicUpper_) {
+ out.printf(" {");
+ symbolicUpper_->dump(out);
+ out.printf("}");
+ }
+
+ out.printf("]");
+
+ bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
+ bool includesNegativeInfinity =
+ max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
+ bool includesPositiveInfinity =
+ max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
+ bool includesNegativeZero = canBeNegativeZero_;
+
+ if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
+ includesNegativeZero) {
+ out.printf(" (");
+ bool first = true;
+ if (includesNaN) {
+ if (first) {
+ first = false;
+ } else {
+ out.printf(" ");
+ }
+ out.printf("U NaN");
+ }
+ if (includesNegativeInfinity) {
+ if (first) {
+ first = false;
+ } else {
+ out.printf(" ");
+ }
+ out.printf("U -Infinity");
+ }
+ if (includesPositiveInfinity) {
+ if (first) {
+ first = false;
+ } else {
+ out.printf(" ");
+ }
+ out.printf("U Infinity");
+ }
+ if (includesNegativeZero) {
+ if (first) {
+ first = false;
+ } else {
+ out.printf(" ");
+ }
+ out.printf("U -0");
+ }
+ out.printf(")");
+ }
+ if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this)) {
+ out.printf(" (< pow(2, %d+1))", max_exponent_);
+ }
+}
+
+void Range::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+ out.printf("\n");
+ out.finish();
+}
+
+Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
+ const Range* rhs, bool* emptyRange) {
+ *emptyRange = false;
+
+ if (!lhs && !rhs) {
+ return nullptr;
+ }
+
+ if (!lhs) {
+ return new (alloc) Range(*rhs);
+ }
+ if (!rhs) {
+ return new (alloc) Range(*lhs);
+ }
+
+ int32_t newLower = std::max(lhs->lower_, rhs->lower_);
+ int32_t newUpper = std::min(lhs->upper_, rhs->upper_);
+
+ // If upper < lower, then we have conflicting constraints. Consider:
+ //
+ // if (x < 0) {
+ // if (x > 0) {
+ // [Some code.]
+ // }
+ // }
+ //
+ // In this case, the block is unreachable.
+ if (newUpper < newLower) {
+ // If both ranges can be NaN, the result can still be NaN.
+ if (!lhs->canBeNaN() || !rhs->canBeNaN()) {
+ *emptyRange = true;
+ }
+ return nullptr;
+ }
+
+ bool newHasInt32LowerBound =
+ lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
+ bool newHasInt32UpperBound =
+ lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
+ lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero =
+ NegativeZeroFlag(lhs->canBeNegativeZero_ && rhs->canBeNegativeZero_);
+
+ uint16_t newExponent = std::min(lhs->max_exponent_, rhs->max_exponent_);
+
+ // NaN is a special value which is neither greater than infinity or less than
+ // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
+ // can end up thinking we have both a lower and upper bound, even though NaN
+ // is still possible. In this case, just be conservative, since any case where
+ // we can have NaN is not especially interesting.
+ if (newHasInt32LowerBound && newHasInt32UpperBound &&
+ newExponent == IncludesInfinityAndNaN) {
+ return nullptr;
+ }
+
+ // If one of the ranges has a fractional part and the other doesn't, it's
+ // possible that we will have computed a newExponent that's more precise
+ // than our newLower and newUpper. This is unusual, so we handle it here
+ // instead of in optimize().
+ //
+ // For example, consider the range F[0,1.5]. Range analysis represents the
+ // lower and upper bound as integers, so we'd actually have
+ // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
+ // more precise upper bound than the integer upper bound.
+ //
+ // When intersecting such a range with an integer range, the fractional part
+ // of the range is dropped. The max exponent of 0 remains valid, so the
+ // upper bound needs to be adjusted to 1.
+ //
+ // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
+ // the naive intersection is I[2,2], but since the max exponent tells us
+ // that the value is always less than 2, the intersection is actually empty.
+ if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
+ (lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
+ newHasInt32UpperBound && newLower == newUpper)) {
+ refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
+ &newUpper, &newHasInt32UpperBound);
+
+ // If we're intersecting two ranges that don't overlap, this could also
+ // push the bounds past each other, since the actual intersection is
+ // the empty set.
+ if (newLower > newUpper) {
+ *emptyRange = true;
+ return nullptr;
+ }
+ }
+
+ return new (alloc)
+ Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
+ newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
+}
+
+void Range::unionWith(const Range* other) {
+ int32_t newLower = std::min(lower_, other->lower_);
+ int32_t newUpper = std::max(upper_, other->upper_);
+
+ bool newHasInt32LowerBound =
+ hasInt32LowerBound_ && other->hasInt32LowerBound_;
+ bool newHasInt32UpperBound =
+ hasInt32UpperBound_ && other->hasInt32UpperBound_;
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
+ canHaveFractionalPart_ || other->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero =
+ NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
+
+ uint16_t newExponent = std::max(max_exponent_, other->max_exponent_);
+
+ rawInitialize(newLower, newHasInt32LowerBound, newUpper,
+ newHasInt32UpperBound, newCanHaveFractionalPart,
+ newMayIncludeNegativeZero, newExponent);
+}
+
+Range::Range(const MDefinition* def)
+ : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
+ if (const Range* other = def->range()) {
+ // The instruction has range information; use it.
+ *this = *other;
+
+ // Simulate the effect of converting the value to its type.
+ // Note: we cannot clamp here, since ranges aren't allowed to shrink
+ // and truncation can increase range again. So doing wrapAround to
+ // mimick a possible truncation.
+ switch (def->type()) {
+ case MIRType::Int32:
+ // MToNumberInt32 cannot truncate. So we can safely clamp.
+ if (def->isToNumberInt32()) {
+ clampToInt32();
+ } else {
+ wrapAroundToInt32();
+ }
+ break;
+ case MIRType::Boolean:
+ wrapAroundToBoolean();
+ break;
+ case MIRType::None:
+ MOZ_CRASH("Asking for the range of an instruction with no value");
+ default:
+ break;
+ }
+ } else {
+ // Otherwise just use type information. We can trust the type here
+ // because we don't care what value the instruction actually produces,
+ // but what value we might get after we get past the bailouts.
+ switch (def->type()) {
+ case MIRType::Int32:
+ setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
+ break;
+ case MIRType::Boolean:
+ setInt32(0, 1);
+ break;
+ case MIRType::None:
+ MOZ_CRASH("Asking for the range of an instruction with no value");
+ default:
+ setUnknown();
+ break;
+ }
+ }
+
+ // As a special case, MUrsh is permitted to claim a result type of
+ // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
+ // bailouts. If range analysis hasn't ruled out values in
+ // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
+ // use as either a uint32 or an int32.
+ if (!hasInt32UpperBound() && def->isUrsh() &&
+ def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
+ lower_ = INT32_MIN;
+ }
+
+ assertInvariants();
+}
+
+static uint16_t ExponentImpliedByDouble(double d) {
+ // Handle the special values.
+ if (std::isnan(d)) {
+ return Range::IncludesInfinityAndNaN;
+ }
+ if (std::isinf(d)) {
+ return Range::IncludesInfinity;
+ }
+
+ // Otherwise take the exponent part and clamp it at zero, since the Range
+ // class doesn't track fractional ranges.
+ return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d)));
+}
+
+void Range::setDouble(double l, double h) {
+ MOZ_ASSERT(!(l > h));
+
+ // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
+ if (l >= INT32_MIN && l <= INT32_MAX) {
+ lower_ = int32_t(::floor(l));
+ hasInt32LowerBound_ = true;
+ } else if (l >= INT32_MAX) {
+ lower_ = INT32_MAX;
+ hasInt32LowerBound_ = true;
+ } else {
+ lower_ = INT32_MIN;
+ hasInt32LowerBound_ = false;
+ }
+ if (h >= INT32_MIN && h <= INT32_MAX) {
+ upper_ = int32_t(::ceil(h));
+ hasInt32UpperBound_ = true;
+ } else if (h <= INT32_MIN) {
+ upper_ = INT32_MIN;
+ hasInt32UpperBound_ = true;
+ } else {
+ upper_ = INT32_MAX;
+ hasInt32UpperBound_ = false;
+ }
+
+ // Infer max_exponent_.
+ uint16_t lExp = ExponentImpliedByDouble(l);
+ uint16_t hExp = ExponentImpliedByDouble(h);
+ max_exponent_ = std::max(lExp, hExp);
+
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+
+ // Infer the canHaveFractionalPart_ setting. We can have a
+ // fractional part if the range crosses through the neighborhood of zero. We
+ // won't have a fractional value if the value is always beyond the point at
+ // which double precision can't represent fractional values.
+ uint16_t minExp = std::min(lExp, hExp);
+ bool includesNegative = std::isnan(l) || l < 0;
+ bool includesPositive = std::isnan(h) || h > 0;
+ bool crossesZero = includesNegative && includesPositive;
+ if (crossesZero || minExp < MaxTruncatableExponent) {
+ canHaveFractionalPart_ = IncludesFractionalParts;
+ }
+
+ // Infer the canBeNegativeZero_ setting. We can have a negative zero if
+ // either bound is zero.
+ if (!(l > 0) && !(h < 0)) {
+ canBeNegativeZero_ = IncludesNegativeZero;
+ }
+
+ optimize();
+}
+
+void Range::setDoubleSingleton(double d) {
+ setDouble(d, d);
+
+ // The above setDouble call is for comparisons, and treats negative zero
+ // as equal to zero. We're aiming for a minimum range, so we can clear the
+ // negative zero flag if the value isn't actually negative zero.
+ if (!IsNegativeZero(d)) {
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ }
+
+ assertInvariants();
+}
+
+static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
+ return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
+}
+
+Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
+ if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) {
+ l = NoInt32LowerBound;
+ }
+
+ int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
+ if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) {
+ h = NoInt32UpperBound;
+ }
+
+ // The exponent is at most one greater than the greater of the operands'
+ // exponents, except for NaN and infinity cases.
+ uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
+ if (e <= Range::MaxFiniteExponent) {
+ ++e;
+ }
+
+ // Infinity + -Infinity is NaN.
+ if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
+ e = Range::IncludesInfinityAndNaN;
+ }
+
+ return new (alloc) Range(
+ l, h,
+ FractionalPartFlag(lhs->canHaveFractionalPart() ||
+ rhs->canHaveFractionalPart()),
+ NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
+ e);
+}
+
+Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
+ if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) {
+ l = NoInt32LowerBound;
+ }
+
+ int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
+ if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) {
+ h = NoInt32UpperBound;
+ }
+
+ // The exponent is at most one greater than the greater of the operands'
+ // exponents, except for NaN and infinity cases.
+ uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
+ if (e <= Range::MaxFiniteExponent) {
+ ++e;
+ }
+
+ // Infinity - Infinity is NaN.
+ if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
+ e = Range::IncludesInfinityAndNaN;
+ }
+
+ return new (alloc)
+ Range(l, h,
+ FractionalPartFlag(lhs->canHaveFractionalPart() ||
+ rhs->canHaveFractionalPart()),
+ NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
+}
+
+Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+
+ // If both numbers can be negative, result can be negative in the whole range
+ if (lhs->lower() < 0 && rhs->lower() < 0) {
+ return Range::NewInt32Range(alloc, INT32_MIN,
+ std::max(lhs->upper(), rhs->upper()));
+ }
+
+ // Only one of both numbers can be negative.
+ // - result can't be negative
+ // - Upper bound is minimum of both upper range,
+ int32_t lower = 0;
+ int32_t upper = std::min(lhs->upper(), rhs->upper());
+
+ // EXCEPT when upper bound of non negative number is max value,
+ // because negative value can return the whole max value.
+ // -1 & 5 = 5
+ if (lhs->lower() < 0) {
+ upper = rhs->upper();
+ }
+ if (rhs->lower() < 0) {
+ upper = lhs->upper();
+ }
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ // When one operand is always 0 or always -1, it's a special case where we
+ // can compute a fully precise result. Handling these up front also
+ // protects the code below from calling CountLeadingZeroes32 with a zero
+ // operand or from shifting an int32_t by 32.
+ if (lhs->lower() == lhs->upper()) {
+ if (lhs->lower() == 0) {
+ return new (alloc) Range(*rhs);
+ }
+ if (lhs->lower() == -1) {
+ return new (alloc) Range(*lhs);
+ }
+ }
+ if (rhs->lower() == rhs->upper()) {
+ if (rhs->lower() == 0) {
+ return new (alloc) Range(*lhs);
+ }
+ if (rhs->lower() == -1) {
+ return new (alloc) Range(*rhs);
+ }
+ }
+
+ // The code below uses CountLeadingZeroes32, which has undefined behavior
+ // if its operand is 0. We rely on the code above to protect it.
+ MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
+ MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
+ MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
+ MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
+
+ int32_t lower = INT32_MIN;
+ int32_t upper = INT32_MAX;
+
+ if (lhs->lower() >= 0 && rhs->lower() >= 0) {
+ // Both operands are non-negative, so the result won't be less than either.
+ lower = std::max(lhs->lower(), rhs->lower());
+ // The result will have leading zeros where both operands have leading
+ // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
+ // account for the bit of sign.
+ upper = int32_t(UINT32_MAX >> std::min(CountLeadingZeroes32(lhs->upper()),
+ CountLeadingZeroes32(rhs->upper())));
+ } else {
+ // The result will have leading ones where either operand has leading ones.
+ if (lhs->upper() < 0) {
+ unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
+ lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
+ upper = -1;
+ }
+ if (rhs->upper() < 0) {
+ unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
+ lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
+ upper = -1;
+ }
+ }
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ int32_t lhsLower = lhs->lower();
+ int32_t lhsUpper = lhs->upper();
+ int32_t rhsLower = rhs->lower();
+ int32_t rhsUpper = rhs->upper();
+ bool invertAfter = false;
+
+ // If either operand is negative, bitwise-negate it, and arrange to negate
+ // the result; ~((~x)^y) == x^y. If both are negative the negations on the
+ // result cancel each other out; effectively this is (~x)^(~y) == x^y.
+ // These transformations reduce the number of cases we have to handle below.
+ if (lhsUpper < 0) {
+ lhsLower = ~lhsLower;
+ lhsUpper = ~lhsUpper;
+ std::swap(lhsLower, lhsUpper);
+ invertAfter = !invertAfter;
+ }
+ if (rhsUpper < 0) {
+ rhsLower = ~rhsLower;
+ rhsUpper = ~rhsUpper;
+ std::swap(rhsLower, rhsUpper);
+ invertAfter = !invertAfter;
+ }
+
+ // Handle cases where lhs or rhs is always zero specially, because they're
+ // easy cases where we can be perfectly precise, and because it protects the
+ // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
+ // undefined behavior.
+ int32_t lower = INT32_MIN;
+ int32_t upper = INT32_MAX;
+ if (lhsLower == 0 && lhsUpper == 0) {
+ upper = rhsUpper;
+ lower = rhsLower;
+ } else if (rhsLower == 0 && rhsUpper == 0) {
+ upper = lhsUpper;
+ lower = lhsLower;
+ } else if (lhsLower >= 0 && rhsLower >= 0) {
+ // Both operands are non-negative. The result will be non-negative.
+ lower = 0;
+ // To compute the upper value, take each operand's upper value and
+ // set all bits that don't correspond to leading zero bits in the
+ // other to one. For each one, this gives an upper bound for the
+ // result, so we can take the minimum between the two.
+ unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
+ unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
+ upper = std::min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
+ lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
+ }
+
+ // If we bitwise-negated one (but not both) of the operands above, apply the
+ // bitwise-negate to the result, completing ~((~x)^y) == x^y.
+ if (invertAfter) {
+ lower = ~lower;
+ upper = ~upper;
+ std::swap(lower, upper);
+ }
+
+ return Range::NewInt32Range(alloc, lower, upper);
+}
+
+Range* Range::not_(TempAllocator& alloc, const Range* op) {
+ MOZ_ASSERT(op->isInt32());
+ return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
+}
+
+Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
+ lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
+
+ NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
+ (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
+ (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
+
+ uint16_t exponent;
+ if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
+ // Two finite values.
+ exponent = lhs->numBits() + rhs->numBits() - 1;
+ if (exponent > Range::MaxFiniteExponent) {
+ exponent = Range::IncludesInfinity;
+ }
+ } else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
+ !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
+ !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
+ // Two values that multiplied together won't produce a NaN.
+ exponent = Range::IncludesInfinity;
+ } else {
+ // Could be anything.
+ exponent = Range::IncludesInfinityAndNaN;
+ }
+
+ if (MissingAnyInt32Bounds(lhs, rhs)) {
+ return new (alloc)
+ Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
+ newMayIncludeNegativeZero, exponent);
+ }
+ int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
+ int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
+ int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
+ int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
+ return new (alloc)
+ Range(std::min(std::min(a, b), std::min(c, d)),
+ std::max(std::max(a, b), std::max(c, d)), newCanHaveFractionalPart,
+ newMayIncludeNegativeZero, exponent);
+}
+
+Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
+ MOZ_ASSERT(lhs->isInt32());
+ int32_t shift = c & 0x1f;
+
+ // If the shift doesn't loose bits or shift bits into the sign bit, we
+ // can simply compute the correct range by shifting.
+ if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
+ lhs->lower() &&
+ (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
+ lhs->upper()) {
+ return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
+ uint32_t(lhs->upper()) << shift);
+ }
+
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+}
+
+Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
+ MOZ_ASSERT(lhs->isInt32());
+ int32_t shift = c & 0x1f;
+ return Range::NewInt32Range(alloc, lhs->lower() >> shift,
+ lhs->upper() >> shift);
+}
+
+Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
+ // ursh's left operand is uint32, not int32, but for range analysis we
+ // currently approximate it as int32. We assume here that the range has
+ // already been adjusted accordingly by our callers.
+ MOZ_ASSERT(lhs->isInt32());
+
+ int32_t shift = c & 0x1f;
+
+ // If the value is always non-negative or always negative, we can simply
+ // compute the correct range by shifting.
+ if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
+ return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
+ uint32_t(lhs->upper()) >> shift);
+ }
+
+ // Otherwise return the most general range after the shift.
+ return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
+}
+
+Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+}
+
+Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+
+ // Canonicalize the shift range to 0 to 31.
+ int32_t shiftLower = rhs->lower();
+ int32_t shiftUpper = rhs->upper();
+ if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
+ shiftLower = 0;
+ shiftUpper = 31;
+ } else {
+ shiftLower &= 0x1f;
+ shiftUpper &= 0x1f;
+ if (shiftLower > shiftUpper) {
+ shiftLower = 0;
+ shiftUpper = 31;
+ }
+ }
+ MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
+
+ // The lhs bounds are signed, thus the minimum is either the lower bound
+ // shift by the smallest shift if negative or the lower bound shifted by the
+ // biggest shift otherwise. And the opposite for the maximum.
+ int32_t lhsLower = lhs->lower();
+ int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
+ int32_t lhsUpper = lhs->upper();
+ int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
+
+ return Range::NewInt32Range(alloc, min, max);
+}
+
+Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ // ursh's left operand is uint32, not int32, but for range analysis we
+ // currently approximate it as int32. We assume here that the range has
+ // already been adjusted accordingly by our callers.
+ MOZ_ASSERT(lhs->isInt32());
+ MOZ_ASSERT(rhs->isInt32());
+ return Range::NewUInt32Range(
+ alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
+}
+
+Range* Range::abs(TempAllocator& alloc, const Range* op) {
+ int32_t l = op->lower_;
+ int32_t u = op->upper_;
+ FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
+
+ // Abs never produces a negative zero.
+ NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
+
+ return new (alloc) Range(
+ std::max(std::max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
+ std::max(std::max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
+ op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
+ canBeNegativeZero, op->max_exponent_);
+}
+
+Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ // If either operand is NaN, the result is NaN.
+ if (lhs->canBeNaN() || rhs->canBeNaN()) {
+ return nullptr;
+ }
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
+ lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero =
+ NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
+
+ return new (alloc) Range(std::min(lhs->lower_, rhs->lower_),
+ lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
+ std::min(lhs->upper_, rhs->upper_),
+ lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
+ newCanHaveFractionalPart, newMayIncludeNegativeZero,
+ std::max(lhs->max_exponent_, rhs->max_exponent_));
+}
+
+Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
+ // If either operand is NaN, the result is NaN.
+ if (lhs->canBeNaN() || rhs->canBeNaN()) {
+ return nullptr;
+ }
+
+ FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
+ lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
+ NegativeZeroFlag newMayIncludeNegativeZero =
+ NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
+
+ return new (alloc) Range(std::max(lhs->lower_, rhs->lower_),
+ lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
+ std::max(lhs->upper_, rhs->upper_),
+ lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
+ newCanHaveFractionalPart, newMayIncludeNegativeZero,
+ std::max(lhs->max_exponent_, rhs->max_exponent_));
+}
+
+Range* Range::floor(TempAllocator& alloc, const Range* op) {
+ Range* copy = new (alloc) Range(*op);
+ // Decrement lower bound of copy range if op have a factional part and lower
+ // bound is Int32 defined. Also we avoid to decrement when op have a
+ // fractional part but lower_ >= JSVAL_INT_MAX.
+ if (op->canHaveFractionalPart() && op->hasInt32LowerBound()) {
+ copy->setLowerInit(int64_t(copy->lower_) - 1);
+ }
+
+ // Also refine max_exponent_ because floor may have decremented int value
+ // If we've got int32 defined bounds, just deduce it using defined bounds.
+ // But, if we don't have those, value's max_exponent_ may have changed.
+ // Because we're looking to maintain an over estimation, if we can,
+ // we increment it.
+ if (copy->hasInt32Bounds())
+ copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
+ else if (copy->max_exponent_ < MaxFiniteExponent)
+ copy->max_exponent_++;
+
+ copy->canHaveFractionalPart_ = ExcludesFractionalParts;
+ copy->assertInvariants();
+ return copy;
+}
+
+Range* Range::ceil(TempAllocator& alloc, const Range* op) {
+ Range* copy = new (alloc) Range(*op);
+
+ // We need to refine max_exponent_ because ceil may have incremented the int
+ // value. If we have got int32 bounds defined, just deduce it using the
+ // defined bounds. Else we can just increment its value, as we are looking to
+ // maintain an over estimation.
+ if (copy->hasInt32Bounds()) {
+ copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
+ } else if (copy->max_exponent_ < MaxFiniteExponent) {
+ copy->max_exponent_++;
+ }
+
+ // If the range is definitely above 0 or below -1, we don't need to include
+ // -0; otherwise we do.
+
+ copy->canBeNegativeZero_ = ((copy->lower_ > 0) || (copy->upper_ <= -1))
+ ? copy->canBeNegativeZero_
+ : IncludesNegativeZero;
+
+ copy->canHaveFractionalPart_ = ExcludesFractionalParts;
+ copy->assertInvariants();
+ return copy;
+}
+
+Range* Range::sign(TempAllocator& alloc, const Range* op) {
+ if (op->canBeNaN()) {
+ return nullptr;
+ }
+
+ return new (alloc) Range(std::max(std::min(op->lower_, 1), -1),
+ std::max(std::min(op->upper_, 1), -1),
+ Range::ExcludesFractionalParts,
+ NegativeZeroFlag(op->canBeNegativeZero()), 0);
+}
+
+Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
+ Range* copy = new (alloc) Range(*op);
+ if (copy->canBeNaN()) {
+ copy->max_exponent_ = Range::IncludesInfinity;
+ if (!copy->canBeZero()) {
+ Range zero;
+ zero.setDoubleSingleton(0);
+ copy->unionWith(&zero);
+ }
+ }
+ copy->refineToExcludeNegativeZero();
+ return copy;
+}
+
+bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
+ // The result can only be negative zero if both sides are finite and they
+ // have differing signs.
+ return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
+ (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
+}
+
+bool Range::update(const Range* other) {
+ bool changed = lower_ != other->lower_ ||
+ hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
+ upper_ != other->upper_ ||
+ hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
+ canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
+ canBeNegativeZero_ != other->canBeNegativeZero_ ||
+ max_exponent_ != other->max_exponent_;
+ if (changed) {
+ lower_ = other->lower_;
+ hasInt32LowerBound_ = other->hasInt32LowerBound_;
+ upper_ = other->upper_;
+ hasInt32UpperBound_ = other->hasInt32UpperBound_;
+ canHaveFractionalPart_ = other->canHaveFractionalPart_;
+ canBeNegativeZero_ = other->canBeNegativeZero_;
+ max_exponent_ = other->max_exponent_;
+ assertInvariants();
+ }
+
+ return changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range Computation for MIR Nodes
+///////////////////////////////////////////////////////////////////////////////
+
+void MPhi::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+
+ Range* range = nullptr;
+ for (size_t i = 0, e = numOperands(); i < e; i++) {
+ if (getOperand(i)->block()->unreachable()) {
+ JitSpew(JitSpew_Range, "Ignoring unreachable input %u",
+ getOperand(i)->id());
+ continue;
+ }
+
+ // Peek at the pre-bailout range so we can take a short-cut; if any of
+ // the operands has an unknown range, this phi has an unknown range.
+ if (!getOperand(i)->range()) {
+ return;
+ }
+
+ Range input(getOperand(i));
+
+ if (range) {
+ range->unionWith(&input);
+ } else {
+ range = new (alloc) Range(input);
+ }
+ }
+
+ setRange(range);
+}
+
+void MBeta::computeRange(TempAllocator& alloc) {
+ bool emptyRange = false;
+
+ Range opRange(getOperand(0));
+ Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
+ if (emptyRange) {
+ JitSpew(JitSpew_Range, "Marking block for inst %u unreachable", id());
+ block()->setUnreachableUnchecked();
+ } else {
+ setRange(range);
+ }
+}
+
+void MConstant::computeRange(TempAllocator& alloc) {
+ if (isTypeRepresentableAsDouble()) {
+ double d = numberToDouble();
+ setRange(Range::NewDoubleSingletonRange(alloc, d));
+ } else if (type() == MIRType::Boolean) {
+ bool b = toBoolean();
+ setRange(Range::NewInt32Range(alloc, b, b));
+ }
+}
+
+void MCharCodeAt::computeRange(TempAllocator& alloc) {
+ // ECMA 262 says that the integer will be non-negative and at most 65535.
+ setRange(Range::NewInt32Range(alloc, 0, 65535));
+}
+
+void MClampToUint8::computeRange(TempAllocator& alloc) {
+ setRange(Range::NewUInt32Range(alloc, 0, 255));
+}
+
+void MBitAnd::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::and_(alloc, &left, &right));
+}
+
+void MBitOr::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::or_(alloc, &left, &right));
+}
+
+void MBitXor::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+ right.wrapAroundToInt32();
+
+ setRange(Range::xor_(alloc, &left, &right));
+}
+
+void MBitNot::computeRange(TempAllocator& alloc) {
+ if (type() == MIRType::Int64) {
+ return;
+ }
+ MOZ_ASSERT(type() == MIRType::Int32);
+
+ Range op(getOperand(0));
+ op.wrapAroundToInt32();
+
+ setRange(Range::not_(alloc, &op));
+}
+
+void MLsh::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::lsh(alloc, &left, c));
+ return;
+ }
+
+ right.wrapAroundToShiftCount();
+ setRange(Range::lsh(alloc, &left, &right));
+}
+
+void MRsh::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ left.wrapAroundToInt32();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::rsh(alloc, &left, c));
+ return;
+ }
+
+ right.wrapAroundToShiftCount();
+ setRange(Range::rsh(alloc, &left, &right));
+}
+
+void MUrsh::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+
+ // ursh can be thought of as converting its left operand to uint32, or it
+ // can be thought of as converting its left operand to int32, and then
+ // reinterpreting the int32 bits as a uint32 value. Both approaches yield
+ // the same result. Since we lack support for full uint32 ranges, we use
+ // the second interpretation, though it does cause us to be conservative.
+ left.wrapAroundToInt32();
+ right.wrapAroundToShiftCount();
+
+ MConstant* rhsConst = getOperand(1)->maybeConstantValue();
+ if (rhsConst && rhsConst->type() == MIRType::Int32) {
+ int32_t c = rhsConst->toInt32();
+ setRange(Range::ursh(alloc, &left, c));
+ } else {
+ setRange(Range::ursh(alloc, &left, &right));
+ }
+
+ MOZ_ASSERT(range()->lower() >= 0);
+}
+
+void MAbs::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+
+ Range other(getOperand(0));
+ Range* next = Range::abs(alloc, &other);
+ if (implicitTruncate_) {
+ next->wrapAroundToInt32();
+ }
+ setRange(next);
+}
+
+void MFloor::computeRange(TempAllocator& alloc) {
+ Range other(getOperand(0));
+ setRange(Range::floor(alloc, &other));
+}
+
+void MCeil::computeRange(TempAllocator& alloc) {
+ Range other(getOperand(0));
+ setRange(Range::ceil(alloc, &other));
+}
+
+void MClz::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void MCtz::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void MPopcnt::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32) {
+ return;
+ }
+ setRange(Range::NewUInt32Range(alloc, 0, 32));
+}
+
+void MMinMax::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ setRange(isMax() ? Range::max(alloc, &left, &right)
+ : Range::min(alloc, &left, &right));
+}
+
+void MAdd::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ Range* next = Range::add(alloc, &left, &right);
+ if (isTruncated()) {
+ next->wrapAroundToInt32();
+ }
+ setRange(next);
+}
+
+void MSub::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ Range* next = Range::sub(alloc, &left, &right);
+ if (isTruncated()) {
+ next->wrapAroundToInt32();
+ }
+ setRange(next);
+}
+
+void MMul::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+ Range left(getOperand(0));
+ Range right(getOperand(1));
+ if (canBeNegativeZero()) {
+ canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
+ }
+ Range* next = Range::mul(alloc, &left, &right);
+ if (!next->canBeNegativeZero()) {
+ canBeNegativeZero_ = false;
+ }
+ // Truncated multiplications could overflow in both directions
+ if (isTruncated()) {
+ next->wrapAroundToInt32();
+ }
+ setRange(next);
+}
+
+void MMod::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+ Range lhs(getOperand(0));
+ Range rhs(getOperand(1));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
+ return;
+ }
+
+ // If RHS can be zero, the result can be NaN.
+ if (rhs.lower() <= 0 && rhs.upper() >= 0) {
+ return;
+ }
+
+ // If both operands are non-negative integers, we can optimize this to an
+ // unsigned mod.
+ if (type() == MIRType::Int32 && rhs.lower() > 0) {
+ bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
+ rhs.canHaveFractionalPart();
+ // It is not possible to check that lhs.lower() >= 0, since the range
+ // of a ursh with rhs a 0 constant is wrapped around the int32 range in
+ // Range::Range(). However, IsUint32Type() will only return true for
+ // nodes that lie in the range [0, UINT32_MAX].
+ bool hasUint32s =
+ IsUint32Type(getOperand(0)) &&
+ getOperand(1)->type() == MIRType::Int32 &&
+ (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
+ if (!hasDoubles || hasUint32s) {
+ unsigned_ = true;
+ }
+ }
+
+ // For unsigned mod, we have to convert both operands to unsigned.
+ // Note that we handled the case of a zero rhs above.
+ if (unsigned_) {
+ // The result of an unsigned mod will never be unsigned-greater than
+ // either operand.
+ uint32_t lhsBound = std::max<uint32_t>(lhs.lower(), lhs.upper());
+ uint32_t rhsBound = std::max<uint32_t>(rhs.lower(), rhs.upper());
+
+ // If either range crosses through -1 as a signed value, it could be
+ // the maximum unsigned value when interpreted as unsigned. If the range
+ // doesn't include -1, then the simple max value we computed above is
+ // correct.
+ if (lhs.lower() <= -1 && lhs.upper() >= -1) {
+ lhsBound = UINT32_MAX;
+ }
+ if (rhs.lower() <= -1 && rhs.upper() >= -1) {
+ rhsBound = UINT32_MAX;
+ }
+
+ // The result will never be equal to the rhs, and we shouldn't have
+ // any rounding to worry about.
+ MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
+ --rhsBound;
+
+ // This gives us two upper bounds, so we can take the best one.
+ setRange(Range::NewUInt32Range(alloc, 0, std::min(lhsBound, rhsBound)));
+ return;
+ }
+
+ // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
+ // First, the absolute value of the result will always be less than the
+ // absolute value of rhs. (And if rhs is zero, the result is NaN).
+ int64_t a = Abs<int64_t>(rhs.lower());
+ int64_t b = Abs<int64_t>(rhs.upper());
+ if (a == 0 && b == 0) {
+ return;
+ }
+ int64_t rhsAbsBound = std::max(a, b);
+
+ // If the value is known to be integer, less-than abs(rhs) is equivalent
+ // to less-than-or-equal abs(rhs)-1. This is important for being able to
+ // say that the result of x%256 is an 8-bit unsigned number.
+ if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) {
+ --rhsAbsBound;
+ }
+
+ // Next, the absolute value of the result will never be greater than the
+ // absolute value of lhs.
+ int64_t lhsAbsBound =
+ std::max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
+
+ // This gives us two upper bounds, so we can take the best one.
+ int64_t absBound = std::min(lhsAbsBound, rhsAbsBound);
+
+ // Now consider the sign of the result.
+ // If lhs is non-negative, the result will be non-negative.
+ // If lhs is non-positive, the result will be non-positive.
+ int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
+ int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
+
+ Range::FractionalPartFlag newCanHaveFractionalPart =
+ Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
+ rhs.canHaveFractionalPart());
+
+ // If the lhs can have the sign bit set and we can return a zero, it'll be a
+ // negative zero.
+ Range::NegativeZeroFlag newMayIncludeNegativeZero =
+ Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
+
+ setRange(new (alloc) Range(lower, upper, newCanHaveFractionalPart,
+ newMayIncludeNegativeZero,
+ std::min(lhs.exponent(), rhs.exponent())));
+}
+
+void MDiv::computeRange(TempAllocator& alloc) {
+ if (type() != MIRType::Int32 && type() != MIRType::Double) {
+ return;
+ }
+ Range lhs(getOperand(0));
+ Range rhs(getOperand(1));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
+ return;
+ }
+
+ // Something simple for now: When dividing by a positive rhs, the result
+ // won't be further from zero than lhs.
+ if (lhs.lower() >= 0 && rhs.lower() >= 1) {
+ setRange(new (alloc) Range(0, lhs.upper(), Range::IncludesFractionalParts,
+ Range::IncludesNegativeZero, lhs.exponent()));
+ } else if (unsigned_ && rhs.lower() >= 1) {
+ // We shouldn't set the unsigned flag if the inputs can have
+ // fractional parts.
+ MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
+ // We shouldn't set the unsigned flag if the inputs can be
+ // negative zero.
+ MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
+ // Unsigned division by a non-zero rhs will return a uint32 value.
+ setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
+ }
+}
+
+void MSqrt::computeRange(TempAllocator& alloc) {
+ Range input(getOperand(0));
+
+ // If either operand is a NaN, the result is NaN. This also conservatively
+ // handles Infinity cases.
+ if (!input.hasInt32Bounds()) {
+ return;
+ }
+
+ // Sqrt of a negative non-zero value is NaN.
+ if (input.lower() < 0) {
+ return;
+ }
+
+ // Something simple for now: When taking the sqrt of a positive value, the
+ // result won't be further from zero than the input.
+ // And, sqrt of an integer may have a fractional part.
+ setRange(new (alloc) Range(0, input.upper(), Range::IncludesFractionalParts,
+ input.canBeNegativeZero(), input.exponent()));
+}
+
+void MToDouble::computeRange(TempAllocator& alloc) {
+ setRange(new (alloc) Range(getOperand(0)));
+}
+
+void MToFloat32::computeRange(TempAllocator& alloc) {}
+
+void MTruncateToInt32::computeRange(TempAllocator& alloc) {
+ Range* output = new (alloc) Range(getOperand(0));
+ output->wrapAroundToInt32();
+ setRange(output);
+}
+
+void MToNumberInt32::computeRange(TempAllocator& alloc) {
+ // No clamping since this computes the range *before* bailouts.
+ setRange(new (alloc) Range(getOperand(0)));
+}
+
+void MBooleanToInt32::computeRange(TempAllocator& alloc) {
+ setRange(Range::NewUInt32Range(alloc, 0, 1));
+}
+
+void MLimitedTruncate::computeRange(TempAllocator& alloc) {
+ Range* output = new (alloc) Range(input());
+ setRange(output);
+}
+
+static Range* GetArrayBufferViewRange(TempAllocator& alloc, Scalar::Type type) {
+ switch (type) {
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
+ case Scalar::Uint16:
+ return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
+ case Scalar::Uint32:
+ return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
+
+ case Scalar::Int8:
+ return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
+ case Scalar::Int16:
+ return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
+ case Scalar::Int32:
+ return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::MaxTypedArrayViewType:
+ break;
+ }
+ return nullptr;
+}
+
+void MLoadUnboxedScalar::computeRange(TempAllocator& alloc) {
+ // We have an Int32 type and if this is a UInt32 load it may produce a value
+ // outside of our range, but we have a bailout to handle those cases.
+ setRange(GetArrayBufferViewRange(alloc, storageType()));
+}
+
+void MLoadDataViewElement::computeRange(TempAllocator& alloc) {
+ // We have an Int32 type and if this is a UInt32 load it may produce a value
+ // outside of our range, but we have a bailout to handle those cases.
+ setRange(GetArrayBufferViewRange(alloc, storageType()));
+}
+
+void MArrayLength::computeRange(TempAllocator& alloc) {
+ // Array lengths can go up to UINT32_MAX. We will bail out if the array
+ // length > INT32_MAX.
+ MOZ_ASSERT(type() == MIRType::Int32);
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+}
+
+void MInitializedLength::computeRange(TempAllocator& alloc) {
+ setRange(
+ Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
+}
+
+void MArrayBufferViewLength::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::MaxByteLength <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::MaxByteLength <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
+ constexpr auto MaxTypedArraySize = sizeof(double);
+
+#define ASSERT_MAX_SIZE(_, T, N) \
+ static_assert(sizeof(T) <= MaxTypedArraySize, \
+ "unexpected typed array type exceeding 64-bits storage");
+ JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SIZE)
+#undef ASSERT_MAX_SIZE
+
+ setRange(Range::NewUInt32Range(alloc, 0, MaxTypedArraySize));
+}
+
+void MStringLength::computeRange(TempAllocator& alloc) {
+ static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
+ "NewUInt32Range requires a uint32 value");
+ setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
+}
+
+void MArgumentsLength::computeRange(TempAllocator& alloc) {
+ // This is is a conservative upper bound on what |TooManyActualArguments|
+ // checks. If exceeded, Ion will not be entered in the first place.
+ static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
+ "NewUInt32Range requires a uint32 value");
+ setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
+}
+
+void MBoundsCheck::computeRange(TempAllocator& alloc) {
+ // Just transfer the incoming index range to the output. The length() is
+ // also interesting, but it is handled as a bailout check, and we're
+ // computing a pre-bailout range here.
+ setRange(new (alloc) Range(index()));
+}
+
+void MSpectreMaskIndex::computeRange(TempAllocator& alloc) {
+ // Just transfer the incoming index range to the output for now.
+ setRange(new (alloc) Range(index()));
+}
+
+void MInt32ToIntPtr::computeRange(TempAllocator& alloc) {
+ setRange(new (alloc) Range(input()));
+}
+
+void MNonNegativeIntPtrToInt32::computeRange(TempAllocator& alloc) {
+ // We will bail out if the IntPtr value > INT32_MAX.
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+}
+
+void MArrayPush::computeRange(TempAllocator& alloc) {
+ // MArrayPush returns the new array length. It bails out if the new length
+ // doesn't fit in an Int32.
+ MOZ_ASSERT(type() == MIRType::Int32);
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+}
+
+void MMathFunction::computeRange(TempAllocator& alloc) {
+ Range opRange(getOperand(0));
+ switch (function()) {
+ case UnaryMathFunction::SinNative:
+ case UnaryMathFunction::SinFdlibm:
+ case UnaryMathFunction::CosNative:
+ case UnaryMathFunction::CosFdlibm:
+ if (!opRange.canBeInfiniteOrNaN()) {
+ setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void MSign::computeRange(TempAllocator& alloc) {
+ Range opRange(getOperand(0));
+ setRange(Range::sign(alloc, &opRange));
+}
+
+void MRandom::computeRange(TempAllocator& alloc) {
+ Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
+
+ // Random never returns negative zero.
+ r->refineToExcludeNegativeZero();
+
+ setRange(r);
+}
+
+void MNaNToZero::computeRange(TempAllocator& alloc) {
+ Range other(input());
+ setRange(Range::NaNToZero(alloc, &other));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range Analysis
+///////////////////////////////////////////////////////////////////////////////
+
+static BranchDirection NegateBranchDirection(BranchDirection dir) {
+ return (dir == FALSE_BRANCH) ? TRUE_BRANCH : FALSE_BRANCH;
+}
+
+bool RangeAnalysis::analyzeLoop(MBasicBlock* header) {
+ MOZ_ASSERT(header->hasUniqueBackedge());
+
+ // Try to compute an upper bound on the number of times the loop backedge
+ // will be taken. Look for tests that dominate the backedge and which have
+ // an edge leaving the loop body.
+ MBasicBlock* backedge = header->backedge();
+
+ // Ignore trivial infinite loops.
+ if (backedge == header) {
+ return true;
+ }
+
+ bool canOsr;
+ size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
+
+ // Ignore broken loops.
+ if (numBlocks == 0) {
+ return true;
+ }
+
+ LoopIterationBound* iterationBound = nullptr;
+
+ MBasicBlock* block = backedge;
+ do {
+ BranchDirection direction;
+ MTest* branch = block->immediateDominatorBranch(&direction);
+
+ if (block == block->immediateDominator()) {
+ break;
+ }
+
+ block = block->immediateDominator();
+
+ if (branch) {
+ direction = NegateBranchDirection(direction);
+ MBasicBlock* otherBlock = branch->branchSuccessor(direction);
+ if (!otherBlock->isMarked()) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ iterationBound = analyzeLoopIterationCount(header, branch, direction);
+ if (iterationBound) {
+ break;
+ }
+ }
+ }
+ } while (block != header);
+
+ if (!iterationBound) {
+ UnmarkLoopBlocks(graph_, header);
+ return true;
+ }
+
+ if (!loopIterationBounds.append(iterationBound)) {
+ return false;
+ }
+
+#ifdef DEBUG
+ if (JitSpewEnabled(JitSpew_Range)) {
+ Sprinter sp(GetJitContext()->cx);
+ if (!sp.init()) {
+ return false;
+ }
+ iterationBound->boundSum.dump(sp);
+ JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
+ sp.string());
+ }
+#endif
+
+ // Try to compute symbolic bounds for the phi nodes at the head of this
+ // loop, expressed in terms of the iteration bound just computed.
+
+ for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd();
+ iter++) {
+ analyzeLoopPhi(iterationBound, *iter);
+ }
+
+ if (!mir->compilingWasm() && !mir->outerInfo().hadBoundsCheckBailout()) {
+ // Try to hoist any bounds checks from the loop using symbolic bounds.
+
+ Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin(header));
+ iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+ if (!block->isMarked()) {
+ continue;
+ }
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* def = *iter;
+ if (def->isBoundsCheck() && def->isMovable()) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
+ if (!hoistedChecks.append(def->toBoundsCheck())) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // Note: replace all uses of the original bounds check with the
+ // actual index. This is usually done during bounds check elimination,
+ // but in this case it's safe to do it here since the load/store is
+ // definitely not loop-invariant, so we will never move it before
+ // one of the bounds checks we just added.
+ for (size_t i = 0; i < hoistedChecks.length(); i++) {
+ MBoundsCheck* ins = hoistedChecks[i];
+ ins->replaceAllUsesWith(ins->index());
+ ins->block()->discard(ins);
+ }
+ }
+
+ UnmarkLoopBlocks(graph_, header);
+ return true;
+}
+
+// Unbox beta nodes in order to hoist instruction properly, and not be limited
+// by the beta nodes which are added after each branch.
+static inline MDefinition* DefinitionOrBetaInputDefinition(MDefinition* ins) {
+ while (ins->isBeta()) {
+ ins = ins->toBeta()->input();
+ }
+ return ins;
+}
+
+LoopIterationBound* RangeAnalysis::analyzeLoopIterationCount(
+ MBasicBlock* header, MTest* test, BranchDirection direction) {
+ SimpleLinearSum lhs(nullptr, 0);
+ MDefinition* rhs;
+ bool lessEqual;
+ if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) {
+ return nullptr;
+ }
+
+ // Ensure the rhs is a loop invariant term.
+ if (rhs && rhs->block()->isMarked()) {
+ if (lhs.term && lhs.term->block()->isMarked()) {
+ return nullptr;
+ }
+ MDefinition* temp = lhs.term;
+ lhs.term = rhs;
+ rhs = temp;
+ if (!SafeSub(0, lhs.constant, &lhs.constant)) {
+ return nullptr;
+ }
+ lessEqual = !lessEqual;
+ }
+
+ MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
+
+ // Ensure the lhs is a phi node from the start of the loop body.
+ if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) {
+ return nullptr;
+ }
+
+ // Check that the value of the lhs changes by a constant amount with each
+ // loop iteration. This requires that the lhs be written in every loop
+ // iteration with a value that is a constant difference from its value at
+ // the start of the iteration.
+
+ if (lhs.term->toPhi()->numOperands() != 2) {
+ return nullptr;
+ }
+
+ // The first operand of the phi should be the lhs' value at the start of
+ // the first executed iteration, and not a value written which could
+ // replace the second operand below during the middle of execution.
+ MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
+ if (lhsInitial->block()->isMarked()) {
+ return nullptr;
+ }
+
+ // The second operand of the phi should be a value written by an add/sub
+ // in every loop iteration, i.e. in a block which dominates the backedge.
+ MDefinition* lhsWrite = DefinitionOrBetaInputDefinition(
+ lhs.term->toPhi()->getLoopBackedgeOperand());
+ if (!lhsWrite->isAdd() && !lhsWrite->isSub()) {
+ return nullptr;
+ }
+ if (!lhsWrite->block()->isMarked()) {
+ return nullptr;
+ }
+ MBasicBlock* bb = header->backedge();
+ for (; bb != lhsWrite->block() && bb != header;
+ bb = bb->immediateDominator()) {
+ }
+ if (bb != lhsWrite->block()) {
+ return nullptr;
+ }
+
+ SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
+
+ // Check that the value of the lhs at the backedge is of the form
+ // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
+ // of the iteration, and not that written to lhs in a previous iteration,
+ // as such a previous value could not appear directly in the addition:
+ // it could not be stored in lhs as the lhs add/sub executes in every
+ // iteration, and if it were stored in another variable its use here would
+ // be as an operand to a phi node for that variable.
+ if (lhsModified.term != lhs.term) {
+ return nullptr;
+ }
+
+ LinearSum iterationBound(alloc());
+ LinearSum currentIteration(alloc());
+
+ if (lhsModified.constant == 1 && !lessEqual) {
+ // The value of lhs is 'initial(lhs) + iterCount' and this will end
+ // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
+ // on the number of backedges executed is:
+ //
+ // initial(lhs) + iterCount + lhsN == rhs
+ // iterCount == rhsN - initial(lhs) - lhsN
+
+ if (rhs) {
+ if (!iterationBound.add(rhs, 1)) {
+ return nullptr;
+ }
+ }
+ if (!iterationBound.add(lhsInitial, -1)) {
+ return nullptr;
+ }
+
+ int32_t lhsConstant;
+ if (!SafeSub(0, lhs.constant, &lhsConstant)) {
+ return nullptr;
+ }
+ if (!iterationBound.add(lhsConstant)) {
+ return nullptr;
+ }
+
+ if (!currentIteration.add(lhs.term, 1)) {
+ return nullptr;
+ }
+ if (!currentIteration.add(lhsInitial, -1)) {
+ return nullptr;
+ }
+ } else if (lhsModified.constant == -1 && lessEqual) {
+ // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
+ // case, an upper bound on the number of backedges executed is:
+ //
+ // initial(lhs) - iterCount + lhsN == rhs
+ // iterCount == initial(lhs) - rhs + lhsN
+
+ if (!iterationBound.add(lhsInitial, 1)) {
+ return nullptr;
+ }
+ if (rhs) {
+ if (!iterationBound.add(rhs, -1)) {
+ return nullptr;
+ }
+ }
+ if (!iterationBound.add(lhs.constant)) {
+ return nullptr;
+ }
+
+ if (!currentIteration.add(lhsInitial, 1)) {
+ return nullptr;
+ }
+ if (!currentIteration.add(lhs.term, -1)) {
+ return nullptr;
+ }
+ } else {
+ return nullptr;
+ }
+
+ return new (alloc())
+ LoopIterationBound(header, test, iterationBound, currentIteration);
+}
+
+void RangeAnalysis::analyzeLoopPhi(LoopIterationBound* loopBound, MPhi* phi) {
+ // Given a bound on the number of backedges taken, compute an upper and
+ // lower bound for a phi node that may change by a constant amount each
+ // iteration. Unlike for the case when computing the iteration bound
+ // itself, the phi does not need to change the same amount every iteration,
+ // but is required to change at most N and be either nondecreasing or
+ // nonincreasing.
+
+ MOZ_ASSERT(phi->numOperands() == 2);
+
+ MDefinition* initial = phi->getLoopPredecessorOperand();
+ if (initial->block()->isMarked()) {
+ return;
+ }
+
+ SimpleLinearSum modified =
+ ExtractLinearSum(phi->getLoopBackedgeOperand(), MathSpace::Infinite);
+
+ if (modified.term != phi || modified.constant == 0) {
+ return;
+ }
+
+ if (!phi->range()) {
+ phi->setRange(new (alloc()) Range(phi));
+ }
+
+ LinearSum initialSum(alloc());
+ if (!initialSum.add(initial, 1)) {
+ return;
+ }
+
+ // The phi may change by N each iteration, and is either nondecreasing or
+ // nonincreasing. initial(phi) is either a lower or upper bound for the
+ // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
+ // at all points within the loop, provided that loopBound >= 0.
+ //
+ // We are more interested, however, in the bound for phi at points
+ // dominated by the loop bound's test; if the test dominates e.g. a bounds
+ // check we want to hoist from the loop, using the value of the phi at the
+ // head of the loop for this will usually be too imprecise to hoist the
+ // check. These points will execute only if the backedge executes at least
+ // one more time (as the test passed and the test dominates the backedge),
+ // so we know both that loopBound >= 1 and that the phi's value has changed
+ // at most loopBound - 1 times. Thus, another upper or lower bound for the
+ // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
+ // ensure that loopBound >= 0.
+
+ LinearSum limitSum(loopBound->boundSum);
+ if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) {
+ return;
+ }
+
+ int32_t negativeConstant;
+ if (!SafeSub(0, modified.constant, &negativeConstant) ||
+ !limitSum.add(negativeConstant)) {
+ return;
+ }
+
+ Range* initRange = initial->range();
+ if (modified.constant > 0) {
+ if (initRange && initRange->hasInt32LowerBound()) {
+ phi->range()->refineLower(initRange->lower());
+ }
+ phi->range()->setSymbolicLower(
+ SymbolicBound::New(alloc(), nullptr, initialSum));
+ phi->range()->setSymbolicUpper(
+ SymbolicBound::New(alloc(), loopBound, limitSum));
+ } else {
+ if (initRange && initRange->hasInt32UpperBound()) {
+ phi->range()->refineUpper(initRange->upper());
+ }
+ phi->range()->setSymbolicUpper(
+ SymbolicBound::New(alloc(), nullptr, initialSum));
+ phi->range()->setSymbolicLower(
+ SymbolicBound::New(alloc(), loopBound, limitSum));
+ }
+
+ JitSpew(JitSpew_Range, "added symbolic range on %u", phi->id());
+ SpewRange(phi);
+}
+
+// Whether bound is valid at the specified bounds check instruction in a loop,
+// and may be used to hoist ins.
+static inline bool SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins,
+ const SymbolicBound* bound) {
+ if (!bound->loop) {
+ return true;
+ }
+ if (ins->block() == header) {
+ return false;
+ }
+ MBasicBlock* bb = ins->block()->immediateDominator();
+ while (bb != header && bb != bound->loop->test->block()) {
+ bb = bb->immediateDominator();
+ }
+ return bb == bound->loop->test->block();
+}
+
+bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header,
+ MBoundsCheck* ins) {
+ // The bounds check's length must be loop invariant or a constant.
+ MDefinition* length = DefinitionOrBetaInputDefinition(ins->length());
+ if (length->block()->isMarked() && !length->isConstant()) {
+ return false;
+ }
+
+ // The bounds check's index should not be loop invariant (else we would
+ // already have hoisted it during LICM).
+ SimpleLinearSum index = ExtractLinearSum(ins->index());
+ if (!index.term || !index.term->block()->isMarked()) {
+ return false;
+ }
+
+ // Check for a symbolic lower and upper bound on the index. If either
+ // condition depends on an iteration bound for the loop, only hoist if
+ // the bounds check is dominated by the iteration bound's test.
+ if (!index.term->range()) {
+ return false;
+ }
+ const SymbolicBound* lower = index.term->range()->symbolicLower();
+ if (!lower || !SymbolicBoundIsValid(header, ins, lower)) {
+ return false;
+ }
+ const SymbolicBound* upper = index.term->range()->symbolicUpper();
+ if (!upper || !SymbolicBoundIsValid(header, ins, upper)) {
+ return false;
+ }
+
+ MBasicBlock* preLoop = header->loopPredecessor();
+ MOZ_ASSERT(!preLoop->isMarked());
+
+ MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum,
+ BailoutKind::HoistBoundsCheck);
+ if (!lowerTerm) {
+ return false;
+ }
+
+ MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum,
+ BailoutKind::HoistBoundsCheck);
+ if (!upperTerm) {
+ return false;
+ }
+
+ // We are checking that index + indexConstant >= 0, and know that
+ // index >= lowerTerm + lowerConstant. Thus, check that:
+ //
+ // lowerTerm + lowerConstant + indexConstant >= 0
+ // lowerTerm >= -lowerConstant - indexConstant
+
+ int32_t lowerConstant = 0;
+ if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) {
+ return false;
+ }
+ if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) {
+ return false;
+ }
+
+ // We are checking that index < boundsLength, and know that
+ // index <= upperTerm + upperConstant. Thus, check that:
+ //
+ // upperTerm + upperConstant < boundsLength
+
+ int32_t upperConstant = index.constant;
+ if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) {
+ return false;
+ }
+
+ // Hoist the loop invariant lower bounds checks.
+ MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
+ lowerCheck->setMinimum(lowerConstant);
+ lowerCheck->computeRange(alloc());
+ lowerCheck->collectRangeInfoPreTrunc();
+ lowerCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
+ preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
+
+ // A common pattern for iterating over typed arrays is this:
+ //
+ // for (var i = 0; i < ta.length; i++) {
+ // use ta[i];
+ // }
+ //
+ // Here |upperTerm| (= ta.length) is a NonNegativeIntPtrToInt32 instruction.
+ // Unwrap this if |length| is also an IntPtr so that we don't add an
+ // unnecessary bounds check and Int32ToIntPtr below.
+ if (upperTerm->isNonNegativeIntPtrToInt32() &&
+ length->type() == MIRType::IntPtr) {
+ upperTerm = upperTerm->toNonNegativeIntPtrToInt32()->input();
+ }
+
+ // Hoist the loop invariant upper bounds checks.
+ if (upperTerm != length || upperConstant >= 0) {
+ // Hoist the bound check's length if it isn't already loop invariant.
+ if (length->block()->isMarked()) {
+ MOZ_ASSERT(length->isConstant());
+ MInstruction* lengthIns = length->toInstruction();
+ lengthIns->block()->moveBefore(preLoop->lastIns(), lengthIns);
+ }
+
+ // If the length is IntPtr, convert the upperTerm to that as well for the
+ // bounds check.
+ if (length->type() == MIRType::IntPtr &&
+ upperTerm->type() == MIRType::Int32) {
+ upperTerm = MInt32ToIntPtr::New(alloc(), upperTerm);
+ upperTerm->computeRange(alloc());
+ upperTerm->collectRangeInfoPreTrunc();
+ preLoop->insertBefore(preLoop->lastIns(), upperTerm->toInstruction());
+ }
+
+ MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
+ upperCheck->setMinimum(upperConstant);
+ upperCheck->setMaximum(upperConstant);
+ upperCheck->computeRange(alloc());
+ upperCheck->collectRangeInfoPreTrunc();
+ upperCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
+ preLoop->insertBefore(preLoop->lastIns(), upperCheck);
+ }
+
+ return true;
+}
+
+bool RangeAnalysis::analyze() {
+ JitSpew(JitSpew_Range, "Doing range propagation");
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin());
+ iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+ // No blocks are supposed to be unreachable, except when we have an OSR
+ // block, in which case the Value Numbering phase add fixup blocks which
+ // are unreachable.
+ MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
+
+ // If the block's immediate dominator is unreachable, the block is
+ // unreachable. Iterating in RPO, we'll always see the immediate
+ // dominator before the block.
+ if (block->immediateDominator()->unreachable()) {
+ block->setUnreachableUnchecked();
+ continue;
+ }
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* def = *iter;
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ def->computeRange(alloc());
+ JitSpew(JitSpew_Range, "computing range on %u", def->id());
+ SpewRange(def);
+ }
+
+ // Beta node range analysis may have marked this block unreachable. If
+ // so, it's no longer interesting to continue processing it.
+ if (block->unreachable()) {
+ continue;
+ }
+
+ if (block->isLoopHeader()) {
+ if (!analyzeLoop(block)) {
+ return false;
+ }
+ }
+
+ // First pass at collecting range info - while the beta nodes are still
+ // around and before truncation.
+ for (MInstructionIterator iter(block->begin()); iter != block->end();
+ iter++) {
+ iter->collectRangeInfoPreTrunc();
+ }
+ }
+
+ return true;
+}
+
+bool RangeAnalysis::addRangeAssertions() {
+ if (!JitOptions.checkRangeAnalysis) {
+ return true;
+ }
+
+ // Check the computed range for this instruction, if the option is set. Note
+ // that this code is quite invasive; it adds numerous additional
+ // instructions for each MInstruction with a computed range, and it uses
+ // registers, so it also affects register allocation.
+ for (ReversePostorderIterator iter(graph_.rpoBegin());
+ iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+
+ // Do not add assertions in unreachable blocks.
+ if (block->unreachable()) {
+ continue;
+ }
+
+ for (MDefinitionIterator iter(block); iter; iter++) {
+ MDefinition* ins = *iter;
+
+ // Perform range checking for all numeric and numeric-like types.
+ if (!IsNumberType(ins->type()) && ins->type() != MIRType::Boolean &&
+ ins->type() != MIRType::Value && ins->type() != MIRType::IntPtr) {
+ continue;
+ }
+
+ // MIsNoIter is fused with the MTest that follows it and emitted as
+ // LIsNoIterAndBranch. Similarly, MIteratorHasIndices is fused to
+ // become LIteratorHasIndicesAndBranch. Skip them to avoid complicating
+ // lowering.
+ if (ins->isIsNoIter() || ins->isIteratorHasIndices()) {
+ MOZ_ASSERT(ins->hasOneUse());
+ continue;
+ }
+
+ Range r(ins);
+
+ MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
+
+ // Don't insert assertions if there's nothing interesting to assert.
+ if (r.isUnknown() ||
+ (ins->type() == MIRType::Int32 && r.isUnknownInt32())) {
+ continue;
+ }
+
+ // Don't add a use to an instruction that is recovered on bailout.
+ if (ins->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ MAssertRange* guard =
+ MAssertRange::New(alloc(), ins, new (alloc()) Range(r));
+
+ // Beta nodes and interrupt checks are required to be located at the
+ // beginnings of basic blocks, so we must insert range assertions
+ // after any such instructions.
+ MInstruction* insertAt = nullptr;
+ if (block->graph().osrBlock() == block) {
+ insertAt = ins->toInstruction();
+ } else {
+ insertAt = block->safeInsertTop(ins);
+ }
+
+ if (insertAt == *iter) {
+ block->insertAfter(insertAt, guard);
+ } else {
+ block->insertBefore(insertAt, guard);
+ }
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Range based Truncation
+///////////////////////////////////////////////////////////////////////////////
+
+void Range::clampToInt32() {
+ if (isInt32()) {
+ return;
+ }
+ int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
+ int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
+ setInt32(l, h);
+}
+
+void Range::wrapAroundToInt32() {
+ if (!hasInt32Bounds()) {
+ setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
+ } else if (canHaveFractionalPart()) {
+ // Clearing the fractional field may provide an opportunity to refine
+ // lower_ or upper_.
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ refineInt32BoundsByExponent(max_exponent_, &lower_, &hasInt32LowerBound_,
+ &upper_, &hasInt32UpperBound_);
+
+ assertInvariants();
+ } else {
+ // If nothing else, we can clear the negative zero flag.
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ }
+ MOZ_ASSERT(isInt32());
+}
+
+void Range::wrapAroundToShiftCount() {
+ wrapAroundToInt32();
+ if (lower() < 0 || upper() >= 32) {
+ setInt32(0, 31);
+ }
+}
+
+void Range::wrapAroundToBoolean() {
+ wrapAroundToInt32();
+ if (!isBoolean()) {
+ setInt32(0, 1);
+ }
+ MOZ_ASSERT(isBoolean());
+}
+
+bool MDefinition::canTruncate() const {
+ // No procedure defined for truncating this instruction.
+ return false;
+}
+
+void MDefinition::truncate(TruncateKind kind) {
+ MOZ_CRASH("No procedure defined for truncating this instruction.");
+}
+
+bool MConstant::canTruncate() const { return IsFloatingPointType(type()); }
+
+void MConstant::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+
+ // Truncate the double to int, since all uses truncates it.
+ int32_t res = ToInt32(numberToDouble());
+ payload_.asBits = 0;
+ payload_.i32 = res;
+ setResultType(MIRType::Int32);
+ if (range()) {
+ range()->setInt32(res, res);
+ }
+}
+
+bool MPhi::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MPhi::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+ truncateKind_ = kind;
+ setResultType(MIRType::Int32);
+ if (kind >= TruncateKind::IndirectTruncate && range()) {
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool MAdd::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MAdd::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+
+ setSpecialization(MIRType::Int32);
+ if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool MSub::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MSub::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+ setSpecialization(MIRType::Int32);
+ if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool MMul::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MMul::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+ setSpecialization(MIRType::Int32);
+ if (truncateKind() >= TruncateKind::IndirectTruncate) {
+ setCanBeNegativeZero(false);
+ if (range()) {
+ range()->wrapAroundToInt32();
+ }
+ }
+}
+
+bool MDiv::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MDiv::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+ setSpecialization(MIRType::Int32);
+
+ // Divisions where the lhs and rhs are unsigned and the result is
+ // truncated can be lowered more efficiently.
+ if (unsignedOperands()) {
+ replaceWithUnsignedOperands();
+ unsigned_ = true;
+ }
+}
+
+bool MMod::canTruncate() const {
+ return type() == MIRType::Double || type() == MIRType::Int32;
+}
+
+void MMod::truncate(TruncateKind kind) {
+ // As for division, handle unsigned modulus with a truncated result.
+ MOZ_ASSERT(canTruncate());
+
+ // Remember analysis, needed for fallible checks.
+ setTruncateKind(kind);
+ setSpecialization(MIRType::Int32);
+
+ if (unsignedOperands()) {
+ replaceWithUnsignedOperands();
+ unsigned_ = true;
+ }
+}
+
+bool MToDouble::canTruncate() const {
+ MOZ_ASSERT(type() == MIRType::Double);
+ return true;
+}
+
+void MToDouble::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+ setTruncateKind(kind);
+
+ // We use the return type to flag that this MToDouble should be replaced by
+ // a MTruncateToInt32 when modifying the graph.
+ setResultType(MIRType::Int32);
+ if (truncateKind() >= TruncateKind::IndirectTruncate) {
+ if (range()) {
+ range()->wrapAroundToInt32();
+ }
+ }
+}
+
+bool MLimitedTruncate::canTruncate() const { return true; }
+
+void MLimitedTruncate::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+ setTruncateKind(kind);
+ setResultType(MIRType::Int32);
+ if (kind >= TruncateKind::IndirectTruncate && range()) {
+ range()->wrapAroundToInt32();
+ }
+}
+
+bool MCompare::canTruncate() const {
+ if (!isDoubleComparison()) {
+ return false;
+ }
+
+ // If both operands are naturally in the int32 range, we can convert from
+ // a double comparison to being an int32 comparison.
+ if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
+ return false;
+ }
+
+ return true;
+}
+
+void MCompare::truncate(TruncateKind kind) {
+ MOZ_ASSERT(canTruncate());
+ compareType_ = Compare_Int32;
+
+ // Truncating the operands won't change their value because we don't force a
+ // truncation, but it will change their type, which we need because we
+ // now expect integer inputs.
+ truncateOperands_ = true;
+}
+
+TruncateKind MDefinition::operandTruncateKind(size_t index) const {
+ // Generic routine: We don't know anything.
+ return TruncateKind::NoTruncate;
+}
+
+TruncateKind MPhi::operandTruncateKind(size_t index) const {
+ // The truncation applied to a phi is effectively applied to the phi's
+ // operands.
+ return truncateKind_;
+}
+
+TruncateKind MTruncateToInt32::operandTruncateKind(size_t index) const {
+ // This operator is an explicit truncate to int32.
+ return TruncateKind::Truncate;
+}
+
+TruncateKind MBinaryBitwiseInstruction::operandTruncateKind(
+ size_t index) const {
+ // The bitwise operators truncate to int32.
+ return TruncateKind::Truncate;
+}
+
+TruncateKind MLimitedTruncate::operandTruncateKind(size_t index) const {
+ return std::min(truncateKind(), truncateLimit_);
+}
+
+TruncateKind MAdd::operandTruncateKind(size_t index) const {
+ // This operator is doing some arithmetic. If its result is truncated,
+ // it's an indirect truncate for its operands.
+ return std::min(truncateKind(), TruncateKind::IndirectTruncate);
+}
+
+TruncateKind MSub::operandTruncateKind(size_t index) const {
+ // See the comment in MAdd::operandTruncateKind.
+ return std::min(truncateKind(), TruncateKind::IndirectTruncate);
+}
+
+TruncateKind MMul::operandTruncateKind(size_t index) const {
+ // See the comment in MAdd::operandTruncateKind.
+ return std::min(truncateKind(), TruncateKind::IndirectTruncate);
+}
+
+TruncateKind MToDouble::operandTruncateKind(size_t index) const {
+ // MToDouble propagates its truncate kind to its operand.
+ return truncateKind();
+}
+
+TruncateKind MStoreUnboxedScalar::operandTruncateKind(size_t index) const {
+ // An integer store truncates the stored value.
+ return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
+ : TruncateKind::NoTruncate;
+}
+
+TruncateKind MStoreDataViewElement::operandTruncateKind(size_t index) const {
+ // An integer store truncates the stored value.
+ return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
+ : TruncateKind::NoTruncate;
+}
+
+TruncateKind MStoreTypedArrayElementHole::operandTruncateKind(
+ size_t index) const {
+ // An integer store truncates the stored value.
+ return (index == 3 && isIntegerWrite()) ? TruncateKind::Truncate
+ : TruncateKind::NoTruncate;
+}
+
+TruncateKind MDiv::operandTruncateKind(size_t index) const {
+ return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
+}
+
+TruncateKind MMod::operandTruncateKind(size_t index) const {
+ return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
+}
+
+TruncateKind MCompare::operandTruncateKind(size_t index) const {
+ // If we're doing an int32 comparison on operands which were previously
+ // floating-point, convert them!
+ MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
+ return truncateOperands_ ? TruncateKind::TruncateAfterBailouts
+ : TruncateKind::NoTruncate;
+}
+
+static bool TruncateTest(TempAllocator& alloc, MTest* test) {
+ // If all possible inputs to the test are either int32 or boolean,
+ // convert those inputs to int32 so that an int32 test can be performed.
+
+ if (test->input()->type() != MIRType::Value) {
+ return true;
+ }
+
+ if (!test->input()->isPhi() || !test->input()->hasOneDefUse() ||
+ test->input()->isImplicitlyUsed()) {
+ return true;
+ }
+
+ MPhi* phi = test->input()->toPhi();
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* def = phi->getOperand(i);
+ if (!def->isBox()) {
+ return true;
+ }
+ MDefinition* inner = def->getOperand(0);
+ if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32) {
+ return true;
+ }
+ }
+
+ for (size_t i = 0; i < phi->numOperands(); i++) {
+ MDefinition* inner = phi->getOperand(i)->getOperand(0);
+ if (inner->type() != MIRType::Int32) {
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+ MBasicBlock* block = inner->block();
+ inner = MToNumberInt32::New(alloc, inner);
+ block->insertBefore(block->lastIns(), inner->toInstruction());
+ }
+ MOZ_ASSERT(inner->type() == MIRType::Int32);
+ phi->replaceOperand(i, inner);
+ }
+
+ phi->setResultType(MIRType::Int32);
+ return true;
+}
+
+// Truncating instruction result is an optimization which implies
+// knowing all uses of an instruction. This implies that if one of
+// the uses got removed, then Range Analysis is not be allowed to do
+// any modification which can change the result, especially if the
+// result can be observed.
+//
+// This corner can easily be understood with UCE examples, but it
+// might also happen with type inference assumptions. Note: Type
+// inference is implicitly branches where other types might be
+// flowing into.
+static bool CloneForDeadBranches(TempAllocator& alloc,
+ MInstruction* candidate) {
+ // Compare returns a boolean so it doesn't have to be recovered on bailout
+ // because the output would remain correct.
+ if (candidate->isCompare()) {
+ return true;
+ }
+
+ MOZ_ASSERT(candidate->canClone());
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+
+ MDefinitionVector operands(alloc);
+ size_t end = candidate->numOperands();
+ if (!operands.reserve(end)) {
+ return false;
+ }
+ for (size_t i = 0; i < end; ++i) {
+ operands.infallibleAppend(candidate->getOperand(i));
+ }
+
+ MInstruction* clone = candidate->clone(alloc, operands);
+ if (!clone) {
+ return false;
+ }
+ clone->setRange(nullptr);
+
+ // Set ImplicitlyUsed flag on the cloned instruction in order to chain recover
+ // instruction for the bailout path.
+ clone->setImplicitlyUsedUnchecked();
+
+ candidate->block()->insertBefore(candidate, clone);
+
+ if (!candidate->maybeConstantValue()) {
+ MOZ_ASSERT(clone->canRecoverOnBailout());
+ clone->setRecoveredOnBailout();
+ }
+
+ // Replace the candidate by its recovered on bailout clone within recovered
+ // instructions and resume points operands.
+ for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd();) {
+ MUse* use = *i++;
+ MNode* ins = use->consumer();
+ if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ use->replaceProducer(clone);
+ }
+
+ return true;
+}
+
+// Examine all the users of |candidate| and determine the most aggressive
+// truncate kind that satisfies all of them.
+static TruncateKind ComputeRequestedTruncateKind(MDefinition* candidate,
+ bool* shouldClone) {
+ bool isCapturedResult =
+ false; // Check if used by a recovered instruction or a resume point.
+ bool isObservableResult =
+ false; // Check if it can be read from another frame.
+ bool isRecoverableResult = true; // Check if it can safely be reconstructed.
+ bool isImplicitlyUsed = candidate->isImplicitlyUsed();
+ bool hasTryBlock = candidate->block()->graph().hasTryBlock();
+
+ TruncateKind kind = TruncateKind::Truncate;
+ for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd();
+ use++) {
+ if (use->consumer()->isResumePoint()) {
+ // Truncation is a destructive optimization, as such, we need to pay
+ // attention to removed branches and prevent optimization
+ // destructive optimizations if we have no alternative. (see
+ // ImplicitlyUsed flag)
+ isCapturedResult = true;
+ isObservableResult =
+ isObservableResult ||
+ use->consumer()->toResumePoint()->isObservableOperand(*use);
+ isRecoverableResult =
+ isRecoverableResult &&
+ use->consumer()->toResumePoint()->isRecoverableOperand(*use);
+ continue;
+ }
+
+ MDefinition* consumer = use->consumer()->toDefinition();
+ if (consumer->isRecoveredOnBailout()) {
+ isCapturedResult = true;
+ isImplicitlyUsed = isImplicitlyUsed || consumer->isImplicitlyUsed();
+ continue;
+ }
+
+ TruncateKind consumerKind =
+ consumer->operandTruncateKind(consumer->indexOf(*use));
+ kind = std::min(kind, consumerKind);
+ if (kind == TruncateKind::NoTruncate) {
+ break;
+ }
+ }
+
+ // We cannot do full trunction on guarded instructions.
+ if (candidate->isGuard() || candidate->isGuardRangeBailouts()) {
+ kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
+ }
+
+ // If the value naturally produces an int32 value (before bailout checks)
+ // that needs no conversion, we don't have to worry about resume points
+ // seeing truncated values.
+ bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
+
+ // If the instruction is explicitly truncated (not indirectly) by all its
+ // uses and if it is not implicitly used, then we can safely encode its
+ // truncated result as part of the resume point operands. This is safe,
+ // because even if we resume with a truncated double, the next baseline
+ // instruction operating on this instruction is going to be a no-op.
+ //
+ // Note, that if the result can be observed from another frame, then this
+ // optimization is not safe. Similarly, if this function contains a try
+ // block, the result could be observed from a catch block, which we do
+ // not compile.
+ bool safeToConvert = kind == TruncateKind::Truncate && !isImplicitlyUsed &&
+ !isObservableResult && !hasTryBlock;
+
+ // If the candidate instruction appears as operand of a resume point or a
+ // recover instruction, and we have to truncate its result, then we might
+ // have to either recover the result during the bailout, or avoid the
+ // truncation.
+ if (isCapturedResult && needsConversion && !safeToConvert) {
+ // If the result can be recovered from all the resume points (not needed
+ // for iterating over the inlined frames), and this instruction can be
+ // recovered on bailout, then we can clone it and use the cloned
+ // instruction to encode the recover instruction. Otherwise, we should
+ // keep the original result and bailout if the value is not in the int32
+ // range.
+ if (!JitOptions.disableRecoverIns && isRecoverableResult &&
+ candidate->canRecoverOnBailout()) {
+ *shouldClone = true;
+ } else {
+ kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
+ }
+ }
+
+ return kind;
+}
+
+static TruncateKind ComputeTruncateKind(MDefinition* candidate,
+ bool* shouldClone) {
+ // Compare operations might coerce its inputs to int32 if the ranges are
+ // correct. So we do not need to check if all uses are coerced.
+ if (candidate->isCompare()) {
+ return TruncateKind::TruncateAfterBailouts;
+ }
+
+ // Set truncated flag if range analysis ensure that it has no
+ // rounding errors and no fractional part. Note that we can't use
+ // the MDefinition Range constructor, because we need to know if
+ // the value will have rounding errors before any bailout checks.
+ const Range* r = candidate->range();
+ bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
+
+ // Special case integer division and modulo: a/b can be infinite, and a%b
+ // can be NaN but cannot actually have rounding errors induced by truncation.
+ if ((candidate->isDiv() || candidate->isMod()) &&
+ candidate->type() == MIRType::Int32) {
+ canHaveRoundingErrors = false;
+ }
+
+ if (canHaveRoundingErrors) {
+ return TruncateKind::NoTruncate;
+ }
+
+ // Ensure all observable uses are truncated.
+ return ComputeRequestedTruncateKind(candidate, shouldClone);
+}
+
+static void RemoveTruncatesOnOutput(MDefinition* truncated) {
+ // Compare returns a boolean so it doen't have any output truncates.
+ if (truncated->isCompare()) {
+ return;
+ }
+
+ MOZ_ASSERT(truncated->type() == MIRType::Int32);
+ MOZ_ASSERT(Range(truncated).isInt32());
+
+ for (MUseDefIterator use(truncated); use; use++) {
+ MDefinition* def = use.def();
+ if (!def->isTruncateToInt32() || !def->isToNumberInt32()) {
+ continue;
+ }
+
+ def->replaceAllUsesWith(truncated);
+ }
+}
+
+void RangeAnalysis::adjustTruncatedInputs(MDefinition* truncated) {
+ MBasicBlock* block = truncated->block();
+ for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
+ TruncateKind kind = truncated->operandTruncateKind(i);
+ if (kind == TruncateKind::NoTruncate) {
+ continue;
+ }
+
+ MDefinition* input = truncated->getOperand(i);
+ if (input->type() == MIRType::Int32) {
+ continue;
+ }
+
+ if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
+ truncated->replaceOperand(i, input->getOperand(0));
+ } else {
+ MInstruction* op;
+ if (kind == TruncateKind::TruncateAfterBailouts) {
+ MOZ_ASSERT(!mir->outerInfo().hadEagerTruncationBailout());
+ op = MToNumberInt32::New(alloc(), truncated->getOperand(i));
+ op->setBailoutKind(BailoutKind::EagerTruncation);
+ } else {
+ op = MTruncateToInt32::New(alloc(), truncated->getOperand(i));
+ }
+
+ if (truncated->isPhi()) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ pred->insertBefore(pred->lastIns(), op);
+ } else {
+ block->insertBefore(truncated->toInstruction(), op);
+ }
+ truncated->replaceOperand(i, op);
+ }
+ }
+
+ if (truncated->isToDouble()) {
+ truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
+ block->discard(truncated->toToDouble());
+ }
+}
+
+bool RangeAnalysis::canTruncate(MDefinition* def, TruncateKind kind) const {
+ if (kind == TruncateKind::NoTruncate) {
+ return false;
+ }
+
+ // Range Analysis is sometimes eager to do optimizations, even if we
+ // are not able to truncate an instruction. In such case, we
+ // speculatively compile the instruction to an int32 instruction
+ // while adding a guard. This is what is implied by
+ // TruncateAfterBailout.
+ //
+ // If a previous compilation was invalidated because a speculative
+ // truncation bailed out, we no longer attempt to make this kind of
+ // eager optimization.
+ if (mir->outerInfo().hadEagerTruncationBailout()) {
+ if (kind == TruncateKind::TruncateAfterBailouts) {
+ return false;
+ }
+ // MDiv and MMod always require TruncateAfterBailout for their operands.
+ // See MDiv::operandTruncateKind and MMod::operandTruncateKind.
+ if (def->isDiv() || def->isMod()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Iterate backward on all instruction and attempt to truncate operations for
+// each instruction which respect the following list of predicates: Has been
+// analyzed by range analysis, the range has no rounding errors, all uses cases
+// are truncating the result.
+//
+// If the truncation of the operation is successful, then the instruction is
+// queue for later updating the graph to restore the type correctness by
+// converting the operands that need to be truncated.
+//
+// We iterate backward because it is likely that a truncated operation truncates
+// some of its operands.
+bool RangeAnalysis::truncate() {
+ JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
+
+ // Automatic truncation is disabled for wasm because the truncation logic
+ // is based on IonMonkey which assumes that we can bailout if the truncation
+ // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
+ // any automatic truncations.
+ MOZ_ASSERT(!mir->compilingWasm());
+
+ Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
+
+ for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd();
+ block++) {
+ for (MInstructionReverseIterator iter(block->rbegin());
+ iter != block->rend(); iter++) {
+ if (iter->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ if (iter->type() == MIRType::None) {
+ if (iter->isTest()) {
+ if (!TruncateTest(alloc(), iter->toTest())) {
+ return false;
+ }
+ }
+ continue;
+ }
+
+ // Remember all bitop instructions for folding after range analysis.
+ switch (iter->op()) {
+ case MDefinition::Opcode::BitAnd:
+ case MDefinition::Opcode::BitOr:
+ case MDefinition::Opcode::BitXor:
+ case MDefinition::Opcode::Lsh:
+ case MDefinition::Opcode::Rsh:
+ case MDefinition::Opcode::Ursh:
+ if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter))) {
+ return false;
+ }
+ break;
+ default:;
+ }
+
+ bool shouldClone = false;
+ TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
+
+ // Truncate this instruction if possible.
+ if (!canTruncate(*iter, kind) || !iter->canTruncate()) {
+ continue;
+ }
+
+ SpewTruncate(*iter, kind, shouldClone);
+
+ // If needed, clone the current instruction for keeping it for the
+ // bailout path. This give us the ability to truncate instructions
+ // even after the removal of branches.
+ if (shouldClone && !CloneForDeadBranches(alloc(), *iter)) {
+ return false;
+ }
+
+ // TruncateAfterBailouts keeps the bailout code as-is and
+ // continues with truncated operations, with the expectation
+ // that we are unlikely to bail out. If we do bail out, then we
+ // will set a flag in FinishBailoutToBaseline to prevent eager
+ // truncation when we recompile, to avoid bailout loops.
+ if (kind == TruncateKind::TruncateAfterBailouts) {
+ iter->setBailoutKind(BailoutKind::EagerTruncation);
+ }
+
+ iter->truncate(kind);
+
+ // Delay updates of inputs/outputs to avoid creating node which
+ // would be removed by the truncation of the next operations.
+ iter->setInWorklist();
+ if (!worklist.append(*iter)) {
+ return false;
+ }
+ }
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end; ++iter) {
+ bool shouldClone = false;
+ TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
+
+ // Truncate this phi if possible.
+ if (shouldClone || !canTruncate(*iter, kind) || !iter->canTruncate()) {
+ continue;
+ }
+
+ SpewTruncate(*iter, kind, shouldClone);
+
+ iter->truncate(kind);
+
+ // Delay updates of inputs/outputs to avoid creating node which
+ // would be removed by the truncation of the next operations.
+ iter->setInWorklist();
+ if (!worklist.append(*iter)) {
+ return false;
+ }
+ }
+ }
+
+ // Update inputs/outputs of truncated instructions.
+ JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
+ while (!worklist.empty()) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ MDefinition* def = worklist.popCopy();
+ def->setNotInWorklist();
+ RemoveTruncatesOnOutput(def);
+ adjustTruncatedInputs(def);
+ }
+
+ return true;
+}
+
+bool RangeAnalysis::removeUnnecessaryBitops() {
+ JitSpew(JitSpew_Range, "Begin (removeUnnecessaryBitops)");
+ // Note: This operation change the semantic of the program in a way which
+ // uniquely works with Int32, Recover Instructions added by the Sink phase
+ // expects the MIR Graph to still have a valid flow as-if they were double
+ // operations instead of Int32 operations. Thus, this phase should be
+ // executed after the Sink phase, and before DCE.
+
+ // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
+ // input. This is done after range analysis rather than during GVN as the
+ // presence of the bitop can change which instructions are truncated.
+ for (size_t i = 0; i < bitops.length(); i++) {
+ MBinaryBitwiseInstruction* ins = bitops[i];
+ if (ins->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ MDefinition* folded = ins->foldUnnecessaryBitop();
+ if (folded != ins) {
+ ins->replaceAllLiveUsesWith(folded);
+ ins->setRecoveredOnBailout();
+ }
+ }
+
+ bitops.clear();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Collect Range information of operands
+///////////////////////////////////////////////////////////////////////////////
+
+void MInArray::collectRangeInfoPreTrunc() {
+ Range indexRange(index());
+ if (indexRange.isFiniteNonNegative()) {
+ needsNegativeIntCheck_ = false;
+ setNotGuard();
+ }
+}
+
+void MLoadElementHole::collectRangeInfoPreTrunc() {
+ Range indexRange(index());
+ if (indexRange.isFiniteNonNegative()) {
+ needsNegativeIntCheck_ = false;
+ setNotGuard();
+ }
+}
+
+void MInt32ToIntPtr::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+ if (inputRange.isFiniteNonNegative()) {
+ canBeNegative_ = false;
+ }
+}
+
+void MClz::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+ if (!inputRange.canBeZero()) {
+ operandIsNeverZero_ = true;
+ }
+}
+
+void MCtz::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+ if (!inputRange.canBeZero()) {
+ operandIsNeverZero_ = true;
+ }
+}
+
+void MDiv::collectRangeInfoPreTrunc() {
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ // Test if Dividend is non-negative.
+ if (lhsRange.isFiniteNonNegative()) {
+ canBeNegativeDividend_ = false;
+ }
+
+ // Try removing divide by zero check.
+ if (!rhsRange.canBeZero()) {
+ canBeDivideByZero_ = false;
+ }
+
+ // If lhsRange does not contain INT32_MIN in its range,
+ // negative overflow check can be skipped.
+ if (!lhsRange.contains(INT32_MIN)) {
+ canBeNegativeOverflow_ = false;
+ }
+
+ // If rhsRange does not contain -1 likewise.
+ if (!rhsRange.contains(-1)) {
+ canBeNegativeOverflow_ = false;
+ }
+
+ // If lhsRange does not contain a zero,
+ // negative zero check can be skipped.
+ if (!lhsRange.canBeZero()) {
+ canBeNegativeZero_ = false;
+ }
+
+ // If rhsRange >= 0 negative zero check can be skipped.
+ if (rhsRange.isFiniteNonNegative()) {
+ canBeNegativeZero_ = false;
+ }
+
+ if (fallible()) {
+ setGuardRangeBailoutsUnchecked();
+ }
+}
+
+void MMul::collectRangeInfoPreTrunc() {
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ // If lhsRange contains only positive then we can skip negative zero check.
+ if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero()) {
+ setCanBeNegativeZero(false);
+ }
+
+ // Likewise rhsRange.
+ if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero()) {
+ setCanBeNegativeZero(false);
+ }
+
+ // If rhsRange and lhsRange contain Non-negative integers only,
+ // We skip negative zero check.
+ if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative()) {
+ setCanBeNegativeZero(false);
+ }
+
+ // If rhsRange and lhsRange < 0. Then we skip negative zero check.
+ if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative()) {
+ setCanBeNegativeZero(false);
+ }
+}
+
+void MMod::collectRangeInfoPreTrunc() {
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+ if (lhsRange.isFiniteNonNegative()) {
+ canBeNegativeDividend_ = false;
+ }
+ if (!rhsRange.canBeZero()) {
+ canBeDivideByZero_ = false;
+ }
+ if (type() == MIRType::Int32 && fallible()) {
+ setGuardRangeBailoutsUnchecked();
+ }
+}
+
+void MToNumberInt32::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+ if (!inputRange.canBeNegativeZero()) {
+ needsNegativeZeroCheck_ = false;
+ }
+}
+
+void MBoundsCheck::collectRangeInfoPreTrunc() {
+ Range indexRange(index());
+ Range lengthRange(length());
+ if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound()) {
+ return;
+ }
+ if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN()) {
+ return;
+ }
+
+ int64_t indexLower = indexRange.lower();
+ int64_t indexUpper = indexRange.upper();
+ int64_t lengthLower = lengthRange.lower();
+ int64_t min = minimum();
+ int64_t max = maximum();
+
+ if (indexLower + min >= 0 && indexUpper + max < lengthLower) {
+ fallible_ = false;
+ }
+}
+
+void MBoundsCheckLower::collectRangeInfoPreTrunc() {
+ Range indexRange(index());
+ if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) {
+ fallible_ = false;
+ }
+}
+
+void MCompare::collectRangeInfoPreTrunc() {
+ if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
+ operandsAreNeverNaN_ = true;
+ }
+}
+
+void MNot::collectRangeInfoPreTrunc() {
+ if (!Range(input()).canBeNaN()) {
+ operandIsNeverNaN_ = true;
+ }
+}
+
+void MPowHalf::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+ if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) {
+ operandIsNeverNegativeInfinity_ = true;
+ }
+ if (!inputRange.canBeNegativeZero()) {
+ operandIsNeverNegativeZero_ = true;
+ }
+ if (!inputRange.canBeNaN()) {
+ operandIsNeverNaN_ = true;
+ }
+}
+
+void MUrsh::collectRangeInfoPreTrunc() {
+ if (type() == MIRType::Int64) {
+ return;
+ }
+
+ Range lhsRange(lhs()), rhsRange(rhs());
+
+ // As in MUrsh::computeRange(), convert the inputs.
+ lhsRange.wrapAroundToInt32();
+ rhsRange.wrapAroundToShiftCount();
+
+ // If the most significant bit of our result is always going to be zero,
+ // we can optimize by disabling bailout checks for enforcing an int32 range.
+ if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) {
+ bailoutsDisabled_ = true;
+ }
+}
+
+static bool DoesMaskMatchRange(int32_t mask, Range& range) {
+ // Check if range is positive, because the bitand operator in `(-3) & 0xff`
+ // can't be eliminated.
+ if (range.lower() >= 0) {
+ MOZ_ASSERT(range.isInt32());
+ // Check that the mask value has all bits set given the range upper bound.
+ // Note that the upper bound does not have to be exactly the mask value. For
+ // example, consider `x & 0xfff` where `x` is a uint8. That expression can
+ // still be optimized to `x`.
+ int bits = 1 + FloorLog2(range.upper());
+ uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
+ if ((mask & maskNeeded) == maskNeeded) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
+ Range lhsRange(lhs());
+ Range rhsRange(rhs());
+
+ if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
+ DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange)) {
+ maskMatchesRightRange = true;
+ }
+
+ if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
+ DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange)) {
+ maskMatchesLeftRange = true;
+ }
+}
+
+void MNaNToZero::collectRangeInfoPreTrunc() {
+ Range inputRange(input());
+
+ if (!inputRange.canBeNaN()) {
+ operandIsNeverNaN_ = true;
+ }
+ if (!inputRange.canBeNegativeZero()) {
+ operandIsNeverNegativeZero_ = true;
+ }
+}
+
+bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode) {
+ *shouldRemoveDeadCode = false;
+
+ for (ReversePostorderIterator iter(graph_.rpoBegin());
+ iter != graph_.rpoEnd(); iter++) {
+ MBasicBlock* block = *iter;
+
+ if (!block->unreachable()) {
+ continue;
+ }
+
+ // Filter out unreachable fake entries.
+ if (block->numPredecessors() == 0) {
+ // Ignore fixup blocks added by the Value Numbering phase, in order
+ // to keep the dominator tree as-is when we have OSR Block which are
+ // no longer reachable from the main entry point of the graph.
+ MOZ_ASSERT(graph_.osrBlock());
+ continue;
+ }
+
+ MControlInstruction* cond = block->getPredecessor(0)->lastIns();
+ if (!cond->isTest()) {
+ continue;
+ }
+
+ // Replace the condition of the test control instruction by a constant
+ // chosen based which of the successors has the unreachable flag which is
+ // added by MBeta::computeRange on its own block.
+ MTest* test = cond->toTest();
+ MDefinition* condition = test->input();
+
+ // If the false-branch is unreachable, then the test condition must be true.
+ // If the true-branch is unreachable, then the test condition must be false.
+ MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
+ bool value = block == test->ifFalse();
+ MConstant* constant =
+ MConstant::New(alloc().fallible(), BooleanValue(value));
+ if (!constant) {
+ return false;
+ }
+
+ condition->setGuardRangeBailoutsUnchecked();
+
+ test->block()->insertBefore(test, constant);
+
+ test->replaceOperand(0, constant);
+ JitSpew(JitSpew_Range,
+ "Update condition of %u to reflect unreachable branches.",
+ test->id());
+
+ *shouldRemoveDeadCode = true;
+ }
+
+ return tryRemovingGuards();
+}
+
+bool RangeAnalysis::tryRemovingGuards() {
+ MDefinitionVector guards(alloc());
+
+ for (ReversePostorderIterator block = graph_.rpoBegin();
+ block != graph_.rpoEnd(); block++) {
+ for (MDefinitionIterator iter(*block); iter; iter++) {
+ if (!iter->isGuardRangeBailouts()) {
+ continue;
+ }
+
+ iter->setInWorklist();
+ if (!guards.append(*iter)) {
+ return false;
+ }
+ }
+ }
+
+ // Flag all fallible instructions which were indirectly used in the
+ // computation of the condition, such that we do not ignore
+ // bailout-paths which are used to shrink the input range of the
+ // operands of the condition.
+ for (size_t i = 0; i < guards.length(); i++) {
+ MDefinition* guard = guards[i];
+
+ // If this ins is a guard even without guardRangeBailouts,
+ // there is no reason in trying to hoist the guardRangeBailouts check.
+ guard->setNotGuardRangeBailouts();
+ if (!DeadIfUnused(guard)) {
+ guard->setGuardRangeBailouts();
+ continue;
+ }
+ guard->setGuardRangeBailouts();
+
+ if (!guard->isPhi()) {
+ if (!guard->range()) {
+ continue;
+ }
+
+ // Filter the range of the instruction based on its MIRType.
+ Range typeFilteredRange(guard);
+
+ // If the output range is updated by adding the inner range,
+ // then the MIRType act as an effectful filter. As we do not know if
+ // this filtered Range might change or not the result of the
+ // previous comparison, we have to keep this instruction as a guard
+ // because it has to bailout in order to restrict the Range to its
+ // MIRType.
+ if (typeFilteredRange.update(guard->range())) {
+ continue;
+ }
+ }
+
+ guard->setNotGuardRangeBailouts();
+
+ // Propagate the guard to its operands.
+ for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
+ MDefinition* operand = guard->getOperand(op);
+
+ // Already marked.
+ if (operand->isInWorklist()) {
+ continue;
+ }
+
+ MOZ_ASSERT(!operand->isGuardRangeBailouts());
+
+ operand->setInWorklist();
+ operand->setGuardRangeBailouts();
+ if (!guards.append(operand)) {
+ return false;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < guards.length(); i++) {
+ MDefinition* guard = guards[i];
+ guard->setNotInWorklist();
+ }
+
+ return true;
+}
diff --git a/js/src/jit/RangeAnalysis.h b/js/src/jit/RangeAnalysis.h
new file mode 100644
index 0000000000..f9dfa29d09
--- /dev/null
+++ b/js/src/jit/RangeAnalysis.h
@@ -0,0 +1,683 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RangeAnalysis_h
+#define jit_RangeAnalysis_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <stdint.h>
+
+#include "jit/IonAnalysis.h"
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "js/AllocPolicy.h"
+#include "js/Value.h"
+#include "js/Vector.h"
+
+namespace js {
+
+class JS_PUBLIC_API GenericPrinter;
+
+namespace jit {
+
+class MBasicBlock;
+class MBinaryBitwiseInstruction;
+class MBoundsCheck;
+class MDefinition;
+class MIRGenerator;
+class MIRGraph;
+class MPhi;
+class MTest;
+
+enum class TruncateKind;
+
+// An upper bound computed on the number of backedges a loop will take.
+// This count only includes backedges taken while running Ion code: for OSR
+// loops, this will exclude iterations that executed in the interpreter or in
+// baseline compiled code.
+struct LoopIterationBound : public TempObject {
+ // Loop for which this bound applies.
+ MBasicBlock* header;
+
+ // Test from which this bound was derived; after executing exactly 'bound'
+ // times this test will exit the loop. Code in the loop body which this
+ // test dominates (will include the backedge) will execute at most 'bound'
+ // times. Other code in the loop will execute at most '1 + Max(bound, 0)'
+ // times.
+ MTest* test;
+
+ // Symbolic bound computed for the number of backedge executions. The terms
+ // in this bound are all loop invariant.
+ LinearSum boundSum;
+
+ // Linear sum for the number of iterations already executed, at the start
+ // of the loop header. This will use loop invariant terms and header phis.
+ LinearSum currentSum;
+
+ LoopIterationBound(MBasicBlock* header, MTest* test,
+ const LinearSum& boundSum, const LinearSum& currentSum)
+ : header(header),
+ test(test),
+ boundSum(boundSum),
+ currentSum(currentSum) {}
+};
+
+typedef Vector<LoopIterationBound*, 0, SystemAllocPolicy>
+ LoopIterationBoundVector;
+
+// A symbolic upper or lower bound computed for a term.
+struct SymbolicBound : public TempObject {
+ private:
+ SymbolicBound(LoopIterationBound* loop, const LinearSum& sum)
+ : loop(loop), sum(sum) {}
+
+ public:
+ // Any loop iteration bound from which this was derived.
+ //
+ // If non-nullptr, then 'sum' is only valid within the loop body, at
+ // points dominated by the loop bound's test (see LoopIterationBound).
+ //
+ // If nullptr, then 'sum' is always valid.
+ LoopIterationBound* loop;
+
+ static SymbolicBound* New(TempAllocator& alloc, LoopIterationBound* loop,
+ const LinearSum& sum) {
+ return new (alloc) SymbolicBound(loop, sum);
+ }
+
+ // Computed symbolic bound, see above.
+ LinearSum sum;
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+};
+
+class RangeAnalysis {
+ protected:
+ bool blockDominates(MBasicBlock* b, MBasicBlock* b2);
+ void replaceDominatedUsesWith(MDefinition* orig, MDefinition* dom,
+ MBasicBlock* block);
+
+ protected:
+ MIRGenerator* mir;
+ MIRGraph& graph_;
+ Vector<MBinaryBitwiseInstruction*, 16, SystemAllocPolicy> bitops;
+
+ TempAllocator& alloc() const;
+
+ public:
+ RangeAnalysis(MIRGenerator* mir, MIRGraph& graph) : mir(mir), graph_(graph) {}
+ [[nodiscard]] bool addBetaNodes();
+ [[nodiscard]] bool analyze();
+ [[nodiscard]] bool addRangeAssertions();
+ [[nodiscard]] bool removeBetaNodes();
+ [[nodiscard]] bool prepareForUCE(bool* shouldRemoveDeadCode);
+ [[nodiscard]] bool tryRemovingGuards();
+ [[nodiscard]] bool truncate();
+ [[nodiscard]] bool removeUnnecessaryBitops();
+
+ private:
+ bool canTruncate(MDefinition* def, TruncateKind kind) const;
+ void adjustTruncatedInputs(MDefinition* def);
+
+ // Any iteration bounds discovered for loops in the graph.
+ LoopIterationBoundVector loopIterationBounds;
+
+ private:
+ [[nodiscard]] bool analyzeLoop(MBasicBlock* header);
+ LoopIterationBound* analyzeLoopIterationCount(MBasicBlock* header,
+ MTest* test,
+ BranchDirection direction);
+ void analyzeLoopPhi(LoopIterationBound* loopBound, MPhi* phi);
+ [[nodiscard]] bool tryHoistBoundsCheck(MBasicBlock* header,
+ MBoundsCheck* ins);
+};
+
+class Range : public TempObject {
+ public:
+ // Int32 are signed. INT32_MAX is pow(2,31)-1 and INT32_MIN is -pow(2,31),
+ // so the greatest exponent we need is 31.
+ static const uint16_t MaxInt32Exponent = 31;
+
+ // UInt32 are unsigned. UINT32_MAX is pow(2,32)-1, so it's the greatest
+ // value that has an exponent of 31.
+ static const uint16_t MaxUInt32Exponent = 31;
+
+ // Maximal exponenent under which we have no precission loss on double
+ // operations. Double has 52 bits of mantissa, so 2^52+1 cannot be
+ // represented without loss.
+ static const uint16_t MaxTruncatableExponent =
+ mozilla::FloatingPoint<double>::kExponentShift;
+
+ // Maximum exponent for finite values.
+ static const uint16_t MaxFiniteExponent =
+ mozilla::FloatingPoint<double>::kExponentBias;
+
+ // An special exponent value representing all non-NaN values. This
+ // includes finite values and the infinities.
+ static const uint16_t IncludesInfinity = MaxFiniteExponent + 1;
+
+ // An special exponent value representing all possible double-precision
+ // values. This includes finite values, the infinities, and NaNs.
+ static const uint16_t IncludesInfinityAndNaN = UINT16_MAX;
+
+ // This range class uses int32_t ranges, but has several interfaces which
+ // use int64_t, which either holds an int32_t value, or one of the following
+ // special values which mean a value which is beyond the int32 range,
+ // potentially including infinity or NaN. These special values are
+ // guaranteed to compare greater, and less than, respectively, any int32_t
+ // value.
+ static const int64_t NoInt32UpperBound = int64_t(JSVAL_INT_MAX) + 1;
+ static const int64_t NoInt32LowerBound = int64_t(JSVAL_INT_MIN) - 1;
+
+ enum FractionalPartFlag : bool {
+ ExcludesFractionalParts = false,
+ IncludesFractionalParts = true
+ };
+ enum NegativeZeroFlag : bool {
+ ExcludesNegativeZero = false,
+ IncludesNegativeZero = true
+ };
+
+ private:
+ // Absolute ranges.
+ //
+ // We represent ranges where the endpoints can be in the set:
+ // {-infty} U [INT_MIN, INT_MAX] U {infty}. A bound of +/-
+ // infty means that the value may have overflowed in that
+ // direction. When computing the range of an integer
+ // instruction, the ranges of the operands can be clamped to
+ // [INT_MIN, INT_MAX], since if they had overflowed they would
+ // no longer be integers. This is important for optimizations
+ // and somewhat subtle.
+ //
+ // N.B.: All of the operations that compute new ranges based
+ // on existing ranges will ignore the hasInt32*Bound_ flags of the
+ // input ranges; that is, they implicitly clamp the ranges of
+ // the inputs to [INT_MIN, INT_MAX]. Therefore, while our range might
+ // be unbounded (and could overflow), when using this information to
+ // propagate through other ranges, we disregard this fact; if that code
+ // executes, then the overflow did not occur, so we may safely assume
+ // that the range is [INT_MIN, INT_MAX] instead.
+ //
+ // To facilitate this trick, we maintain the invariants that:
+ // 1) hasInt32LowerBound_ == false implies lower_ == JSVAL_INT_MIN
+ // 2) hasInt32UpperBound_ == false implies upper_ == JSVAL_INT_MAX
+ //
+ // As a second and less precise range analysis, we represent the maximal
+ // exponent taken by a value. The exponent is calculated by taking the
+ // absolute value and looking at the position of the highest bit. All
+ // exponent computation have to be over-estimations of the actual result. On
+ // the Int32 this over approximation is rectified.
+
+ MOZ_INIT_OUTSIDE_CTOR int32_t lower_;
+ MOZ_INIT_OUTSIDE_CTOR int32_t upper_;
+
+ MOZ_INIT_OUTSIDE_CTOR bool hasInt32LowerBound_;
+ MOZ_INIT_OUTSIDE_CTOR bool hasInt32UpperBound_;
+
+ MOZ_INIT_OUTSIDE_CTOR FractionalPartFlag canHaveFractionalPart_ : 1;
+ MOZ_INIT_OUTSIDE_CTOR NegativeZeroFlag canBeNegativeZero_ : 1;
+ MOZ_INIT_OUTSIDE_CTOR uint16_t max_exponent_;
+
+ // Any symbolic lower or upper bound computed for this term.
+ const SymbolicBound* symbolicLower_;
+ const SymbolicBound* symbolicUpper_;
+
+ // This function simply makes several MOZ_ASSERTs to verify the internal
+ // consistency of this range.
+ void assertInvariants() const {
+ // Basic sanity :).
+ MOZ_ASSERT(lower_ <= upper_);
+
+ // When hasInt32LowerBound_ or hasInt32UpperBound_ are false, we set
+ // lower_ and upper_ to these specific values as it simplifies the
+ // implementation in some places.
+ MOZ_ASSERT_IF(!hasInt32LowerBound_, lower_ == JSVAL_INT_MIN);
+ MOZ_ASSERT_IF(!hasInt32UpperBound_, upper_ == JSVAL_INT_MAX);
+
+ // max_exponent_ must be one of three possible things.
+ MOZ_ASSERT(max_exponent_ <= MaxFiniteExponent ||
+ max_exponent_ == IncludesInfinity ||
+ max_exponent_ == IncludesInfinityAndNaN);
+
+ // Forbid the max_exponent_ field from implying better bounds for
+ // lower_/upper_ fields. We have to add 1 to the max_exponent_ when
+ // canHaveFractionalPart_ is true in order to accomodate
+ // fractional offsets. For example, 2147483647.9 is greater than
+ // INT32_MAX, so a range containing that value will have
+ // hasInt32UpperBound_ set to false, however that value also has
+ // exponent 30, which is strictly less than MaxInt32Exponent. For
+ // another example, 1.9 has an exponent of 0 but requires upper_ to be
+ // at least 2, which has exponent 1.
+ mozilla::DebugOnly<uint32_t> adjustedExponent =
+ max_exponent_ + (canHaveFractionalPart_ ? 1 : 0);
+ MOZ_ASSERT_IF(!hasInt32LowerBound_ || !hasInt32UpperBound_,
+ adjustedExponent >= MaxInt32Exponent);
+ MOZ_ASSERT(adjustedExponent >= mozilla::FloorLog2(mozilla::Abs(upper_)));
+ MOZ_ASSERT(adjustedExponent >= mozilla::FloorLog2(mozilla::Abs(lower_)));
+
+ // The following are essentially static assertions, but FloorLog2 isn't
+ // trivially suitable for constexpr :(.
+ MOZ_ASSERT(mozilla::FloorLog2(JSVAL_INT_MIN) == MaxInt32Exponent);
+ MOZ_ASSERT(mozilla::FloorLog2(JSVAL_INT_MAX) == 30);
+ MOZ_ASSERT(mozilla::FloorLog2(UINT32_MAX) == MaxUInt32Exponent);
+ MOZ_ASSERT(mozilla::FloorLog2(0) == 0);
+ }
+
+ // Set the lower_ and hasInt32LowerBound_ values.
+ void setLowerInit(int64_t x) {
+ if (x > JSVAL_INT_MAX) {
+ lower_ = JSVAL_INT_MAX;
+ hasInt32LowerBound_ = true;
+ } else if (x < JSVAL_INT_MIN) {
+ lower_ = JSVAL_INT_MIN;
+ hasInt32LowerBound_ = false;
+ } else {
+ lower_ = int32_t(x);
+ hasInt32LowerBound_ = true;
+ }
+ }
+ // Set the upper_ and hasInt32UpperBound_ values.
+ void setUpperInit(int64_t x) {
+ if (x > JSVAL_INT_MAX) {
+ upper_ = JSVAL_INT_MAX;
+ hasInt32UpperBound_ = false;
+ } else if (x < JSVAL_INT_MIN) {
+ upper_ = JSVAL_INT_MIN;
+ hasInt32UpperBound_ = true;
+ } else {
+ upper_ = int32_t(x);
+ hasInt32UpperBound_ = true;
+ }
+ }
+
+ // Compute the least exponent value that would be compatible with the
+ // values of lower() and upper().
+ //
+ // Note:
+ // exponent of JSVAL_INT_MIN == 31
+ // exponent of JSVAL_INT_MAX == 30
+ uint16_t exponentImpliedByInt32Bounds() const {
+ // The number of bits needed to encode |max| is the power of 2 plus one.
+ uint32_t max = std::max(mozilla::Abs(lower()), mozilla::Abs(upper()));
+ uint16_t result = mozilla::FloorLog2(max);
+ MOZ_ASSERT(result ==
+ (max == 0 ? 0 : mozilla::ExponentComponent(double(max))));
+ return result;
+ }
+
+ // When converting a range which contains fractional values to a range
+ // containing only integers, the old max_exponent_ value may imply a better
+ // lower and/or upper bound than was previously available, because they no
+ // longer need to be conservative about fractional offsets and the ends of
+ // the range.
+ //
+ // Given an exponent value and pointers to the lower and upper bound values,
+ // this function refines the lower and upper bound values to the tighest
+ // bound for integer values implied by the exponent.
+ static void refineInt32BoundsByExponent(uint16_t e, int32_t* l, bool* lb,
+ int32_t* h, bool* hb) {
+ if (e < MaxInt32Exponent) {
+ // pow(2, max_exponent_+1)-1 to compute a maximum absolute value.
+ int32_t limit = (uint32_t(1) << (e + 1)) - 1;
+ *h = std::min(*h, limit);
+ *l = std::max(*l, -limit);
+ *hb = true;
+ *lb = true;
+ }
+ }
+
+ // If the value of any of the fields implies a stronger possible value for
+ // any other field, update that field to the stronger value. The range must
+ // be completely valid before and it is guaranteed to be kept valid.
+ void optimize() {
+ assertInvariants();
+
+ if (hasInt32Bounds()) {
+ // Examine lower() and upper(), and if they imply a better exponent
+ // bound than max_exponent_, set that value as the new
+ // max_exponent_.
+ uint16_t newExponent = exponentImpliedByInt32Bounds();
+ if (newExponent < max_exponent_) {
+ max_exponent_ = newExponent;
+ assertInvariants();
+ }
+
+ // If we have a completely precise range, the value is an integer,
+ // since we can only represent integers.
+ if (canHaveFractionalPart_ && lower_ == upper_) {
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ assertInvariants();
+ }
+ }
+
+ // If the range doesn't include zero, it doesn't include negative zero.
+ if (canBeNegativeZero_ && !canBeZero()) {
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ assertInvariants();
+ }
+ }
+
+ // Set the range fields to the given raw values.
+ void rawInitialize(int32_t l, bool lb, int32_t h, bool hb,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero, uint16_t e) {
+ lower_ = l;
+ upper_ = h;
+ hasInt32LowerBound_ = lb;
+ hasInt32UpperBound_ = hb;
+ canHaveFractionalPart_ = canHaveFractionalPart;
+ canBeNegativeZero_ = canBeNegativeZero;
+ max_exponent_ = e;
+ optimize();
+ }
+
+ // Construct a range from the given raw values.
+ Range(int32_t l, bool lb, int32_t h, bool hb,
+ FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero, uint16_t e)
+ : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
+ rawInitialize(l, lb, h, hb, canHaveFractionalPart, canBeNegativeZero, e);
+ }
+
+ public:
+ Range() : symbolicLower_(nullptr), symbolicUpper_(nullptr) { setUnknown(); }
+
+ Range(int64_t l, int64_t h, FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero, uint16_t e)
+ : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
+ set(l, h, canHaveFractionalPart, canBeNegativeZero, e);
+ }
+
+ Range(const Range& other)
+ : lower_(other.lower_),
+ upper_(other.upper_),
+ hasInt32LowerBound_(other.hasInt32LowerBound_),
+ hasInt32UpperBound_(other.hasInt32UpperBound_),
+ canHaveFractionalPart_(other.canHaveFractionalPart_),
+ canBeNegativeZero_(other.canBeNegativeZero_),
+ max_exponent_(other.max_exponent_),
+ symbolicLower_(nullptr),
+ symbolicUpper_(nullptr) {
+ assertInvariants();
+ }
+
+ // Construct a range from the given MDefinition. This differs from the
+ // MDefinition's range() method in that it describes the range of values
+ // *after* any bailout checks.
+ explicit Range(const MDefinition* def);
+
+ static Range* NewInt32Range(TempAllocator& alloc, int32_t l, int32_t h) {
+ return new (alloc) Range(l, h, ExcludesFractionalParts,
+ ExcludesNegativeZero, MaxInt32Exponent);
+ }
+
+ // Construct an int32 range containing just i. This is just a convenience
+ // wrapper around NewInt32Range.
+ static Range* NewInt32SingletonRange(TempAllocator& alloc, int32_t i) {
+ return NewInt32Range(alloc, i, i);
+ }
+
+ static Range* NewUInt32Range(TempAllocator& alloc, uint32_t l, uint32_t h) {
+ // For now, just pass them to the constructor as int64_t values.
+ // They'll become unbounded if they're not in the int32_t range.
+ return new (alloc) Range(l, h, ExcludesFractionalParts,
+ ExcludesNegativeZero, MaxUInt32Exponent);
+ }
+
+ // Construct a range containing values >= l and <= h. Note that this
+ // function treats negative zero as equal to zero, as >= and <= do. If the
+ // range includes zero, it is assumed to include negative zero too.
+ static Range* NewDoubleRange(TempAllocator& alloc, double l, double h) {
+ if (std::isnan(l) && std::isnan(h)) {
+ return nullptr;
+ }
+
+ Range* r = new (alloc) Range();
+ r->setDouble(l, h);
+ return r;
+ }
+
+ // Construct the strictest possible range containing d, or null if d is NaN.
+ // This function treats negative zero as distinct from zero, since this
+ // makes the strictest possible range containin zero a range which
+ // contains one value rather than two.
+ static Range* NewDoubleSingletonRange(TempAllocator& alloc, double d) {
+ if (std::isnan(d)) {
+ return nullptr;
+ }
+
+ Range* r = new (alloc) Range();
+ r->setDoubleSingleton(d);
+ return r;
+ }
+
+ void dump(GenericPrinter& out) const;
+ void dump() const;
+ [[nodiscard]] bool update(const Range* other);
+
+ // Unlike the other operations, unionWith is an in-place
+ // modification. This is to avoid a bunch of useless extra
+ // copying when chaining together unions when handling Phi
+ // nodes.
+ void unionWith(const Range* other);
+ static Range* intersect(TempAllocator& alloc, const Range* lhs,
+ const Range* rhs, bool* emptyRange);
+ static Range* add(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* sub(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* mul(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* and_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* or_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* not_(TempAllocator& alloc, const Range* op);
+ static Range* lsh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* rsh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* ursh(TempAllocator& alloc, const Range* lhs, int32_t c);
+ static Range* lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* abs(TempAllocator& alloc, const Range* op);
+ static Range* min(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* max(TempAllocator& alloc, const Range* lhs, const Range* rhs);
+ static Range* floor(TempAllocator& alloc, const Range* op);
+ static Range* ceil(TempAllocator& alloc, const Range* op);
+ static Range* sign(TempAllocator& alloc, const Range* op);
+ static Range* NaNToZero(TempAllocator& alloc, const Range* op);
+
+ [[nodiscard]] static bool negativeZeroMul(const Range* lhs, const Range* rhs);
+
+ bool isUnknownInt32() const {
+ return isInt32() && lower() == INT32_MIN && upper() == INT32_MAX;
+ }
+
+ bool isUnknown() const {
+ return !hasInt32LowerBound_ && !hasInt32UpperBound_ &&
+ canHaveFractionalPart_ && canBeNegativeZero_ &&
+ max_exponent_ == IncludesInfinityAndNaN;
+ }
+
+ bool hasInt32LowerBound() const { return hasInt32LowerBound_; }
+ bool hasInt32UpperBound() const { return hasInt32UpperBound_; }
+
+ // Test whether the value is known to be within [INT32_MIN,INT32_MAX].
+ // Note that this does not necessarily mean the value is an integer.
+ bool hasInt32Bounds() const {
+ return hasInt32LowerBound() && hasInt32UpperBound();
+ }
+
+ // Test whether the value is known to be representable as an int32.
+ bool isInt32() const {
+ return hasInt32Bounds() && !canHaveFractionalPart_ && !canBeNegativeZero_;
+ }
+
+ // Test whether the given value is known to be either 0 or 1.
+ bool isBoolean() const {
+ return lower() >= 0 && upper() <= 1 && !canHaveFractionalPart_ &&
+ !canBeNegativeZero_;
+ }
+
+ bool canHaveRoundingErrors() const {
+ return canHaveFractionalPart_ || canBeNegativeZero_ ||
+ max_exponent_ >= MaxTruncatableExponent;
+ }
+
+ // Test if an integer x belongs to the range.
+ bool contains(int32_t x) const { return x >= lower_ && x <= upper_; }
+
+ // Test whether the range contains zero (of either sign).
+ bool canBeZero() const { return contains(0); }
+
+ // Test whether the range contains NaN values.
+ bool canBeNaN() const { return max_exponent_ == IncludesInfinityAndNaN; }
+
+ // Test whether the range contains infinities or NaN values.
+ bool canBeInfiniteOrNaN() const { return max_exponent_ >= IncludesInfinity; }
+
+ FractionalPartFlag canHaveFractionalPart() const {
+ return canHaveFractionalPart_;
+ }
+
+ NegativeZeroFlag canBeNegativeZero() const { return canBeNegativeZero_; }
+
+ uint16_t exponent() const {
+ MOZ_ASSERT(!canBeInfiniteOrNaN());
+ return max_exponent_;
+ }
+
+ uint16_t numBits() const {
+ return exponent() + 1; // 2^0 -> 1
+ }
+
+ // Return the lower bound. Asserts that the value has an int32 bound.
+ int32_t lower() const {
+ MOZ_ASSERT(hasInt32LowerBound());
+ return lower_;
+ }
+
+ // Return the upper bound. Asserts that the value has an int32 bound.
+ int32_t upper() const {
+ MOZ_ASSERT(hasInt32UpperBound());
+ return upper_;
+ }
+
+ // Test whether all values in this range can are finite and negative.
+ bool isFiniteNegative() const { return upper_ < 0 && !canBeInfiniteOrNaN(); }
+
+ // Test whether all values in this range can are finite and non-negative.
+ bool isFiniteNonNegative() const {
+ return lower_ >= 0 && !canBeInfiniteOrNaN();
+ }
+
+ // Test whether a value in this range can possibly be a finite
+ // negative value. Note that "negative zero" is not considered negative.
+ bool canBeFiniteNegative() const { return lower_ < 0; }
+
+ // Test whether a value in this range can possibly be a finite
+ // non-negative value.
+ bool canBeFiniteNonNegative() const { return upper_ >= 0; }
+
+ // Test whether a value in this range can have the sign bit set (not
+ // counting NaN, where the sign bit is meaningless).
+ bool canHaveSignBitSet() const {
+ return !hasInt32LowerBound() || canBeFiniteNegative() ||
+ canBeNegativeZero();
+ }
+
+ // Set this range to have a lower bound not less than x.
+ void refineLower(int32_t x) {
+ assertInvariants();
+ hasInt32LowerBound_ = true;
+ lower_ = std::max(lower_, x);
+ optimize();
+ }
+
+ // Set this range to have an upper bound not greater than x.
+ void refineUpper(int32_t x) {
+ assertInvariants();
+ hasInt32UpperBound_ = true;
+ upper_ = std::min(upper_, x);
+ optimize();
+ }
+
+ // Set this range to exclude negative zero.
+ void refineToExcludeNegativeZero() {
+ assertInvariants();
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ optimize();
+ }
+
+ void setInt32(int32_t l, int32_t h) {
+ hasInt32LowerBound_ = true;
+ hasInt32UpperBound_ = true;
+ lower_ = l;
+ upper_ = h;
+ canHaveFractionalPart_ = ExcludesFractionalParts;
+ canBeNegativeZero_ = ExcludesNegativeZero;
+ max_exponent_ = exponentImpliedByInt32Bounds();
+ assertInvariants();
+ }
+
+ // Set this range to include values >= l and <= h. Note that this
+ // function treats negative zero as equal to zero, as >= and <= do. If the
+ // range includes zero, it is assumed to include negative zero too.
+ void setDouble(double l, double h);
+
+ // Set this range to the narrowest possible range containing d.
+ // This function treats negative zero as distinct from zero, since this
+ // makes the narrowest possible range containin zero a range which
+ // contains one value rather than two.
+ void setDoubleSingleton(double d);
+
+ void setUnknown() {
+ set(NoInt32LowerBound, NoInt32UpperBound, IncludesFractionalParts,
+ IncludesNegativeZero, IncludesInfinityAndNaN);
+ MOZ_ASSERT(isUnknown());
+ }
+
+ void set(int64_t l, int64_t h, FractionalPartFlag canHaveFractionalPart,
+ NegativeZeroFlag canBeNegativeZero, uint16_t e) {
+ max_exponent_ = e;
+ canHaveFractionalPart_ = canHaveFractionalPart;
+ canBeNegativeZero_ = canBeNegativeZero;
+ setLowerInit(l);
+ setUpperInit(h);
+ optimize();
+ }
+
+ // Make the lower end of this range at least INT32_MIN, and make
+ // the upper end of this range at most INT32_MAX.
+ void clampToInt32();
+
+ // If this range exceeds int32_t range, at either or both ends, change
+ // it to int32_t range. Otherwise do nothing.
+ void wrapAroundToInt32();
+
+ // If this range exceeds [0, 32) range, at either or both ends, change
+ // it to the [0, 32) range. Otherwise do nothing.
+ void wrapAroundToShiftCount();
+
+ // If this range exceeds [0, 1] range, at either or both ends, change
+ // it to the [0, 1] range. Otherwise do nothing.
+ void wrapAroundToBoolean();
+
+ const SymbolicBound* symbolicLower() const { return symbolicLower_; }
+ const SymbolicBound* symbolicUpper() const { return symbolicUpper_; }
+
+ void setSymbolicLower(SymbolicBound* bound) { symbolicLower_ = bound; }
+ void setSymbolicUpper(SymbolicBound* bound) { symbolicUpper_ = bound; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RangeAnalysis_h */
diff --git a/js/src/jit/ReciprocalMulConstants.cpp b/js/src/jit/ReciprocalMulConstants.cpp
new file mode 100644
index 0000000000..956c2e62d9
--- /dev/null
+++ b/js/src/jit/ReciprocalMulConstants.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ReciprocalMulConstants.h"
+
+#include "mozilla/Assertions.h"
+
+using namespace js::jit;
+
+ReciprocalMulConstants ReciprocalMulConstants::computeDivisionConstants(
+ uint32_t d, int maxLog) {
+ MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
+ // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
+ MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
+
+ // Speeding up division by non power-of-2 constants is possible by
+ // calculating, during compilation, a value M such that high-order
+ // bits of M*n correspond to the result of the division of n by d.
+ // No value of M can serve this purpose for arbitrarily big values
+ // of n but, for optimizing integer division, we're just concerned
+ // with values of n whose absolute value is bounded (by fitting in
+ // an integer type, say). With this in mind, we'll find a constant
+ // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
+ // then be 31 for signed division or 32 for unsigned division.
+ //
+ // The original presentation of this technique appears in Hacker's
+ // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
+ // for our version follows; we'll denote maxLog by L in the proof,
+ // for conciseness.
+ //
+ // Formally, for |d| < 2^L, we'll compute two magic values M and s
+ // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
+ // (M * n) >> (32 + s) = floor(n/d) if 0 <= n < 2^L
+ // (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
+ //
+ // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
+ // M - 2^p/d <= 2^(p-L)/d. (1)
+ // (Observe that p = CeilLog32(d) + L satisfies this, as the right
+ // side of (1) is at least one in this case). Then,
+ //
+ // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
+ // Proof: Indeed, M is monotone in p and, for p equal to the above
+ // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
+ // 2^p / d < 2^p/(d - 1) * (d - 1)/d
+ // <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
+ // The claim follows by applying the ceiling function.
+ //
+ // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
+ // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
+ // Mn/2^p - 1 < x <= Mn/2^p. (2)
+ // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
+ // n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
+ // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
+ // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
+ //
+ // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
+ // Proof: The proof is similar. Equation (2) holds as above. Using
+ // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
+ // n/d + n/(2^L d) - 1 < x < n/d.
+ // Using n >= -2^L and summing 1,
+ // n/d - 1/d < x + 1 < n/d + 1.
+ // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
+ // In other words, x + 1 = ceil(n/d).
+ //
+ // Condition (1) isn't necessary for the existence of M and s with
+ // the properties above. Hacker's Delight provides a slightly less
+ // restrictive condition when d >= 196611, at the cost of a 3-page
+ // proof of correctness, for the case L = 31.
+ //
+ // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
+ // 2^(p-L) >= d - (2^p)%d.
+ // In order to avoid overflow in the (2^p) % d calculation, we can
+ // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
+ // without overflow as UINT64_MAX >> (64-p).
+
+ // We now compute the least p >= 32 with the property above...
+ int32_t p = 32;
+ while ((uint64_t(1) << (p - maxLog)) + (UINT64_MAX >> (64 - p)) % d + 1 < d) {
+ p++;
+ }
+
+ // ...and the corresponding M. For either the signed (L=31) or the
+ // unsigned (L=32) case, this value can be too large (cf. item a).
+ // Codegen can still multiply by M by multiplying by (M - 2^L) and
+ // adjusting the value afterwards, if this is the case.
+ ReciprocalMulConstants rmc;
+ rmc.multiplier = (UINT64_MAX >> (64 - p)) / d + 1;
+ rmc.shiftAmount = p - 32;
+
+ return rmc;
+}
diff --git a/js/src/jit/ReciprocalMulConstants.h b/js/src/jit/ReciprocalMulConstants.h
new file mode 100644
index 0000000000..d305a4caa5
--- /dev/null
+++ b/js/src/jit/ReciprocalMulConstants.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ReciprocalMulConstants_h
+#define jit_ReciprocalMulConstants_h
+
+#include <stdint.h>
+
+namespace js::jit {
+
+struct ReciprocalMulConstants {
+ int64_t multiplier;
+ int32_t shiftAmount;
+
+ static ReciprocalMulConstants computeSignedDivisionConstants(uint32_t d) {
+ return computeDivisionConstants(d, 31);
+ }
+
+ static ReciprocalMulConstants computeUnsignedDivisionConstants(uint32_t d) {
+ return computeDivisionConstants(d, 32);
+ }
+
+ private:
+ static ReciprocalMulConstants computeDivisionConstants(uint32_t d,
+ int maxLog);
+};
+
+} // namespace js::jit
+
+#endif /* jit_ReciprocalMulConstants_h */
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
new file mode 100644
index 0000000000..14fc051eb7
--- /dev/null
+++ b/js/src/jit/Recover.cpp
@@ -0,0 +1,2116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Recover.h"
+
+#include "jsmath.h"
+
+#include "builtin/Object.h"
+#include "builtin/RegExp.h"
+#include "builtin/String.h"
+#include "jit/Bailouts.h"
+#include "jit/CompileInfo.h"
+#include "jit/Ion.h"
+#include "jit/JitSpewer.h"
+#include "jit/JSJitFrameIter.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/VMFunctions.h"
+#include "util/DifferentialTesting.h"
+#include "vm/BigIntType.h"
+#include "vm/EqualityOperations.h"
+#include "vm/Interpreter.h"
+#include "vm/Iteration.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/StringType.h"
+
+#include "vm/Interpreter-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+bool MNode::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_CRASH("This instruction is not serializable");
+}
+
+void RInstruction::readRecoverData(CompactBufferReader& reader,
+ RInstructionStorage* raw) {
+ uint32_t op = reader.readUnsigned();
+ switch (Opcode(op)) {
+#define MATCH_OPCODES_(op) \
+ case Recover_##op: \
+ static_assert(sizeof(R##op) <= sizeof(RInstructionStorage), \
+ "storage space must be big enough to store R" #op); \
+ static_assert(alignof(R##op) <= alignof(RInstructionStorage), \
+ "storage space must be aligned adequate to store R" #op); \
+ new (raw->addr()) R##op(reader); \
+ break;
+
+ RECOVER_OPCODE_LIST(MATCH_OPCODES_)
+#undef MATCH_OPCODES_
+
+ case Recover_Invalid:
+ default:
+ MOZ_CRASH("Bad decoding of the previous instruction?");
+ }
+}
+
+bool MResumePoint::writeRecoverData(CompactBufferWriter& writer) const {
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ResumePoint));
+
+ MBasicBlock* bb = block();
+ bool hasFun = bb->info().hasFunMaybeLazy();
+ uint32_t nargs = bb->info().nargs();
+ JSScript* script = bb->info().script();
+ uint32_t exprStack = stackDepth() - bb->info().ninvoke();
+
+#ifdef DEBUG
+ // Ensure that all snapshot which are encoded can safely be used for
+ // bailouts.
+ uint32_t numIntermediate = NumIntermediateValues(mode());
+ if (JSContext* cx = GetJitContext()->cx) {
+ if (!AssertBailoutStackDepth(cx, script, pc(), mode(),
+ exprStack - numIntermediate)) {
+ return false;
+ }
+ }
+#endif
+
+ uint32_t formalArgs = CountArgSlots(script, hasFun, nargs);
+
+ // Test if we honor the maximum of arguments at all times. This is a sanity
+ // check and not an algorithm limit. So check might be a bit too loose. +4
+ // to account for scope chain, return value, this value and maybe
+ // arguments_object.
+ MOZ_ASSERT(formalArgs < SNAPSHOT_MAX_NARGS + 4);
+
+#ifdef JS_JITSPEW
+ uint32_t implicit = StartArgSlot(script);
+#endif
+ uint32_t nallocs = formalArgs + script->nfixed() + exprStack;
+
+ JitSpew(JitSpew_IonSnapshots,
+ "Starting frame; implicit %u, formals %u, fixed %zu, exprs %u",
+ implicit, formalArgs - implicit, script->nfixed(), exprStack);
+
+ uint32_t pcOff = script->pcToOffset(pc());
+ JitSpew(JitSpew_IonSnapshots, "Writing pc offset %u, mode %s, nslots %u",
+ pcOff, ResumeModeToString(mode()), nallocs);
+
+ uint32_t pcOffAndMode =
+ (pcOff << RResumePoint::PCOffsetShift) | uint32_t(mode());
+ MOZ_RELEASE_ASSERT((pcOffAndMode >> RResumePoint::PCOffsetShift) == pcOff,
+ "pcOff doesn't fit in pcOffAndMode");
+ writer.writeUnsigned(pcOffAndMode);
+
+ writer.writeUnsigned(nallocs);
+ return true;
+}
+
+RResumePoint::RResumePoint(CompactBufferReader& reader) {
+ pcOffsetAndMode_ = reader.readUnsigned();
+ numOperands_ = reader.readUnsigned();
+ JitSpew(JitSpew_IonSnapshots,
+ "Read RResumePoint (pc offset %u, mode %s, nslots %u)", pcOffset(),
+ ResumeModeToString(mode()), numOperands_);
+}
+
+bool RResumePoint::recover(JSContext* cx, SnapshotIterator& iter) const {
+ MOZ_CRASH("This instruction is not recoverable.");
+}
+
+bool MBitNot::writeRecoverData(CompactBufferWriter& writer) const {
+ // 64-bit int bitnots exist only when compiling wasm; they exist neither for
+ // JS nor asm.js. So we don't expect them here.
+ MOZ_ASSERT(type() != MIRType::Int64);
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitNot));
+ return true;
+}
+
+RBitNot::RBitNot(CompactBufferReader& reader) {}
+
+bool RBitNot::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::BitNot(cx, &operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBitAnd::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitAnd));
+ return true;
+}
+
+RBitAnd::RBitAnd(CompactBufferReader& reader) {}
+
+bool RBitAnd::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitAnd(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBitOr::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitOr));
+ return true;
+}
+
+RBitOr::RBitOr(CompactBufferReader& reader) {}
+
+bool RBitOr::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitOr(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBitXor::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BitXor));
+ return true;
+}
+
+RBitXor::RBitXor(CompactBufferReader& reader) {}
+
+bool RBitXor::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::BitXor(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MLsh::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Lsh));
+ return true;
+}
+
+RLsh::RLsh(CompactBufferReader& reader) {}
+
+bool RLsh::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitLsh(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MRsh::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Rsh));
+ return true;
+}
+
+RRsh::RRsh(CompactBufferReader& reader) {}
+
+bool RRsh::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ if (!js::BitRsh(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MUrsh::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ursh));
+ return true;
+}
+
+RUrsh::RUrsh(CompactBufferReader& reader) {}
+
+bool RUrsh::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+
+ RootedValue result(cx);
+ if (!js::UrshValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MSignExtendInt32::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_SignExtendInt32));
+ MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
+ writer.writeByte(uint8_t(mode_));
+ return true;
+}
+
+RSignExtendInt32::RSignExtendInt32(CompactBufferReader& reader) {
+ mode_ = reader.readByte();
+}
+
+bool RSignExtendInt32::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+
+ int32_t i;
+ if (!ToInt32(cx, operand, &i)) {
+ return false;
+ }
+
+ int32_t result;
+ switch (MSignExtendInt32::Mode(mode_)) {
+ case MSignExtendInt32::Byte:
+ result = static_cast<int8_t>(i);
+ break;
+ case MSignExtendInt32::Half:
+ result = static_cast<int16_t>(i);
+ break;
+ }
+
+ iter.storeInstructionResult(JS::Int32Value(result));
+ return true;
+}
+
+bool MAdd::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Add));
+ writer.writeByte(type() == MIRType::Float32);
+ return true;
+}
+
+RAdd::RAdd(CompactBufferReader& reader) {
+ isFloatOperation_ = reader.readByte();
+}
+
+bool RAdd::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::AddValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MSub::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Sub));
+ writer.writeByte(type() == MIRType::Float32);
+ return true;
+}
+
+RSub::RSub(CompactBufferReader& reader) {
+ isFloatOperation_ = reader.readByte();
+}
+
+bool RSub::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::SubValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MMul::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Mul));
+ writer.writeByte(type() == MIRType::Float32);
+ MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
+ writer.writeByte(uint8_t(mode_));
+ return true;
+}
+
+RMul::RMul(CompactBufferReader& reader) {
+ isFloatOperation_ = reader.readByte();
+ mode_ = reader.readByte();
+}
+
+bool RMul::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (MMul::Mode(mode_) == MMul::Normal) {
+ if (!js::MulValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ // MIRType::Float32 is a specialization embedding the fact that the
+ // result is rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(MMul::Mode(mode_) == MMul::Integer);
+ if (!js::math_imul_handle(cx, lhs, rhs, &result)) {
+ return false;
+ }
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MDiv::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Div));
+ writer.writeByte(type() == MIRType::Float32);
+ return true;
+}
+
+RDiv::RDiv(CompactBufferReader& reader) {
+ isFloatOperation_ = reader.readByte();
+}
+
+bool RDiv::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::DivValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MMod::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Mod));
+ return true;
+}
+
+RMod::RMod(CompactBufferReader& reader) {}
+
+bool RMod::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::ModValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MNot::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Not));
+ return true;
+}
+
+RNot::RNot(CompactBufferReader& reader) {}
+
+bool RNot::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue v(cx, iter.read());
+ RootedValue result(cx);
+
+ result.setBoolean(!ToBoolean(v));
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntAdd::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntAdd));
+ return true;
+}
+
+RBigIntAdd::RBigIntAdd(CompactBufferReader& reader) {}
+
+bool RBigIntAdd::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::AddValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntSub::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntSub));
+ return true;
+}
+
+RBigIntSub::RBigIntSub(CompactBufferReader& reader) {}
+
+bool RBigIntSub::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::SubValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntMul::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntMul));
+ return true;
+}
+
+RBigIntMul::RBigIntMul(CompactBufferReader& reader) {}
+
+bool RBigIntMul::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::MulValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntDiv::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntDiv));
+ return true;
+}
+
+RBigIntDiv::RBigIntDiv(CompactBufferReader& reader) {}
+
+bool RBigIntDiv::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ "division by zero throws and therefore can't be recovered");
+ if (!js::DivValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntMod::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntMod));
+ return true;
+}
+
+RBigIntMod::RBigIntMod(CompactBufferReader& reader) {}
+
+bool RBigIntMod::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ "division by zero throws and therefore can't be recovered");
+ if (!js::ModValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntPow::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntPow));
+ return true;
+}
+
+RBigIntPow::RBigIntPow(CompactBufferReader& reader) {}
+
+bool RBigIntPow::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ MOZ_ASSERT(!rhs.toBigInt()->isNegative(),
+ "negative exponent throws and therefore can't be recovered");
+ if (!js::PowValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntBitAnd::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntBitAnd));
+ return true;
+}
+
+RBigIntBitAnd::RBigIntBitAnd(CompactBufferReader& reader) {}
+
+bool RBigIntBitAnd::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::BitAnd(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntBitOr::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntBitOr));
+ return true;
+}
+
+RBigIntBitOr::RBigIntBitOr(CompactBufferReader& reader) {}
+
+bool RBigIntBitOr::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::BitOr(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntBitXor::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntBitXor));
+ return true;
+}
+
+RBigIntBitXor::RBigIntBitXor(CompactBufferReader& reader) {}
+
+bool RBigIntBitXor::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::BitXor(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntLsh::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntLsh));
+ return true;
+}
+
+RBigIntLsh::RBigIntLsh(CompactBufferReader& reader) {}
+
+bool RBigIntLsh::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::BitLsh(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntRsh::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntRsh));
+ return true;
+}
+
+RBigIntRsh::RBigIntRsh(CompactBufferReader& reader) {}
+
+bool RBigIntRsh::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
+ if (!js::BitRsh(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntIncrement::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntIncrement));
+ return true;
+}
+
+RBigIntIncrement::RBigIntIncrement(CompactBufferReader& reader) {}
+
+bool RBigIntIncrement::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(operand.isBigInt());
+ if (!js::IncOperation(cx, operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntDecrement::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntDecrement));
+ return true;
+}
+
+RBigIntDecrement::RBigIntDecrement(CompactBufferReader& reader) {}
+
+bool RBigIntDecrement::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(operand.isBigInt());
+ if (!js::DecOperation(cx, operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntNegate::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntNegate));
+ return true;
+}
+
+RBigIntNegate::RBigIntNegate(CompactBufferReader& reader) {}
+
+bool RBigIntNegate::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(operand.isBigInt());
+ if (!js::NegOperation(cx, &operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MBigIntBitNot::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntBitNot));
+ return true;
+}
+
+RBigIntBitNot::RBigIntBitNot(CompactBufferReader& reader) {}
+
+bool RBigIntBitNot::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(operand.isBigInt());
+ if (!js::BitNot(cx, &operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MCompare::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Compare));
+
+ static_assert(sizeof(JSOp) == sizeof(uint8_t));
+ writer.writeByte(uint8_t(jsop_));
+ return true;
+}
+
+RCompare::RCompare(CompactBufferReader& reader) {
+ jsop_ = JSOp(reader.readByte());
+
+ MOZ_ASSERT(IsEqualityOp(jsop_) || IsRelationalOp(jsop_));
+}
+
+bool RCompare::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+
+ bool result;
+ switch (jsop_) {
+ case JSOp::Eq:
+ case JSOp::Ne:
+ if (!js::LooselyEqual(cx, lhs, rhs, &result)) {
+ return false;
+ }
+ if (jsop_ == JSOp::Ne) {
+ result = !result;
+ }
+ break;
+ case JSOp::StrictEq:
+ case JSOp::StrictNe:
+ if (!StrictlyEqual(cx, lhs, rhs, &result)) {
+ return false;
+ }
+ if (jsop_ == JSOp::StrictNe) {
+ result = !result;
+ }
+ break;
+ case JSOp::Lt:
+ if (!js::LessThan(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+ break;
+ case JSOp::Le:
+ if (!js::LessThanOrEqual(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+ break;
+ case JSOp::Gt:
+ if (!js::GreaterThan(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+ break;
+ case JSOp::Ge:
+ if (!js::GreaterThanOrEqual(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected op.");
+ }
+
+ iter.storeInstructionResult(BooleanValue(result));
+ return true;
+}
+
+bool MConcat::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Concat));
+ return true;
+}
+
+RConcat::RConcat(CompactBufferReader& reader) {}
+
+bool RConcat::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue lhs(cx, iter.read());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!lhs.isObject() && !rhs.isObject());
+ if (!js::AddValues(cx, &lhs, &rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+RStringLength::RStringLength(CompactBufferReader& reader) {}
+
+bool RStringLength::recover(JSContext* cx, SnapshotIterator& iter) const {
+ JSString* string = iter.read().toString();
+
+ static_assert(JSString::MAX_LENGTH <= INT32_MAX,
+ "Can cast string length to int32_t");
+
+ iter.storeInstructionResult(Int32Value(int32_t(string->length())));
+ return true;
+}
+
+bool MStringLength::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringLength));
+ return true;
+}
+
+bool MArgumentsLength::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ArgumentsLength));
+ return true;
+}
+
+RArgumentsLength::RArgumentsLength(CompactBufferReader& reader) {}
+
+bool RArgumentsLength::recover(JSContext* cx, SnapshotIterator& iter) const {
+ uintptr_t numActualArgs = iter.frame()->numActualArgs();
+
+ static_assert(ARGS_LENGTH_MAX <= INT32_MAX,
+ "Can cast arguments count to int32_t");
+ MOZ_ASSERT(numActualArgs <= ARGS_LENGTH_MAX);
+
+ iter.storeInstructionResult(JS::Int32Value(int32_t(numActualArgs)));
+ return true;
+}
+
+bool MFloor::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Floor));
+ return true;
+}
+
+RFloor::RFloor(CompactBufferReader& reader) {}
+
+bool RFloor::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_floor_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MCeil::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ceil));
+ return true;
+}
+
+RCeil::RCeil(CompactBufferReader& reader) {}
+
+bool RCeil::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_ceil_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MRound::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Round));
+ return true;
+}
+
+RRound::RRound(CompactBufferReader& reader) {}
+
+bool RRound::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_round_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MTrunc::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Trunc));
+ return true;
+}
+
+RTrunc::RTrunc(CompactBufferReader& reader) {}
+
+bool RTrunc::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_trunc_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MCharCodeAt::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_CharCodeAt));
+ return true;
+}
+
+RCharCodeAt::RCharCodeAt(CompactBufferReader& reader) {}
+
+bool RCharCodeAt::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedString lhs(cx, iter.read().toString());
+ RootedValue rhs(cx, iter.read());
+ RootedValue result(cx);
+
+ if (!js::str_charCodeAt_impl(cx, lhs, rhs, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MFromCharCode::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_FromCharCode));
+ return true;
+}
+
+RFromCharCode::RFromCharCode(CompactBufferReader& reader) {}
+
+bool RFromCharCode::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ RootedValue result(cx);
+
+ MOZ_ASSERT(!operand.isObject());
+ if (!js::str_fromCharCode_one_arg(cx, operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MPow::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Pow));
+ return true;
+}
+
+RPow::RPow(CompactBufferReader& reader) {}
+
+bool RPow::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double base = iter.read().toNumber();
+ double power = iter.read().toNumber();
+ double result = ecmaPow(base, power);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MPowHalf::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_PowHalf));
+ return true;
+}
+
+RPowHalf::RPowHalf(CompactBufferReader& reader) {}
+
+bool RPowHalf::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double base = iter.read().toNumber();
+ double power = 0.5;
+ double result = ecmaPow(base, power);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MMinMax::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_MinMax));
+ writer.writeByte(isMax_);
+ return true;
+}
+
+RMinMax::RMinMax(CompactBufferReader& reader) { isMax_ = reader.readByte(); }
+
+bool RMinMax::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double x = iter.read().toNumber();
+ double y = iter.read().toNumber();
+
+ double result;
+ if (isMax_) {
+ result = js::math_max_impl(x, y);
+ } else {
+ result = js::math_min_impl(x, y);
+ }
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MAbs::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Abs));
+ return true;
+}
+
+RAbs::RAbs(CompactBufferReader& reader) {}
+
+bool RAbs::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_abs_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MSqrt::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Sqrt));
+ writer.writeByte(type() == MIRType::Float32);
+ return true;
+}
+
+RSqrt::RSqrt(CompactBufferReader& reader) {
+ isFloatOperation_ = reader.readByte();
+}
+
+bool RSqrt::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_sqrt_impl(num);
+
+ // MIRType::Float32 is a specialization embedding the fact that the result is
+ // rounded to a Float32.
+ if (isFloatOperation_) {
+ result = js::RoundFloat32(result);
+ }
+
+ iter.storeInstructionResult(DoubleValue(result));
+ return true;
+}
+
+bool MAtan2::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Atan2));
+ return true;
+}
+
+RAtan2::RAtan2(CompactBufferReader& reader) {}
+
+bool RAtan2::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double y = iter.read().toNumber();
+ double x = iter.read().toNumber();
+ double result = js::ecmaAtan2(y, x);
+
+ iter.storeInstructionResult(DoubleValue(result));
+ return true;
+}
+
+bool MHypot::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Hypot));
+ writer.writeUnsigned(uint32_t(numOperands()));
+ return true;
+}
+
+RHypot::RHypot(CompactBufferReader& reader)
+ : numOperands_(reader.readUnsigned()) {}
+
+bool RHypot::recover(JSContext* cx, SnapshotIterator& iter) const {
+ JS::RootedValueVector vec(cx);
+
+ if (!vec.reserve(numOperands_)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numOperands_; ++i) {
+ vec.infallibleAppend(iter.read());
+ }
+
+ RootedValue result(cx);
+
+ if (!js::math_hypot_handle(cx, vec, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MNearbyInt::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ switch (roundingMode_) {
+ case RoundingMode::Up:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ceil));
+ return true;
+ case RoundingMode::Down:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Floor));
+ return true;
+ case RoundingMode::TowardsZero:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Trunc));
+ return true;
+ default:
+ MOZ_CRASH("Unsupported rounding mode.");
+ }
+}
+
+RNearbyInt::RNearbyInt(CompactBufferReader& reader) {
+ roundingMode_ = reader.readByte();
+}
+
+bool RNearbyInt::recover(JSContext* cx, SnapshotIterator& iter) const {
+ MOZ_CRASH("Unsupported rounding mode.");
+}
+
+bool MSign::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Sign));
+ return true;
+}
+
+RSign::RSign(CompactBufferReader& reader) {}
+
+bool RSign::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::math_sign_impl(num);
+
+ iter.storeInstructionResult(NumberValue(result));
+ return true;
+}
+
+bool MMathFunction::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ switch (function_) {
+ case UnaryMathFunction::Ceil:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Ceil));
+ return true;
+ case UnaryMathFunction::Floor:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Floor));
+ return true;
+ case UnaryMathFunction::Round:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Round));
+ return true;
+ case UnaryMathFunction::Trunc:
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Trunc));
+ return true;
+ case UnaryMathFunction::SinNative:
+ case UnaryMathFunction::SinFdlibm:
+ case UnaryMathFunction::CosNative:
+ case UnaryMathFunction::CosFdlibm:
+ case UnaryMathFunction::TanNative:
+ case UnaryMathFunction::TanFdlibm:
+ case UnaryMathFunction::Log:
+ case UnaryMathFunction::Exp:
+ case UnaryMathFunction::ACos:
+ case UnaryMathFunction::ASin:
+ case UnaryMathFunction::ATan:
+ case UnaryMathFunction::Log10:
+ case UnaryMathFunction::Log2:
+ case UnaryMathFunction::Log1P:
+ case UnaryMathFunction::ExpM1:
+ case UnaryMathFunction::CosH:
+ case UnaryMathFunction::SinH:
+ case UnaryMathFunction::TanH:
+ case UnaryMathFunction::ACosH:
+ case UnaryMathFunction::ASinH:
+ case UnaryMathFunction::ATanH:
+ case UnaryMathFunction::Cbrt:
+ static_assert(sizeof(UnaryMathFunction) == sizeof(uint8_t));
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_MathFunction));
+ writer.writeByte(uint8_t(function_));
+ return true;
+ }
+ MOZ_CRASH("Unknown math function.");
+}
+
+RMathFunction::RMathFunction(CompactBufferReader& reader) {
+ function_ = UnaryMathFunction(reader.readByte());
+}
+
+bool RMathFunction::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+
+ double result;
+ switch (function_) {
+ case UnaryMathFunction::SinNative:
+ result = js::math_sin_native_impl(num);
+ break;
+ case UnaryMathFunction::SinFdlibm:
+ result = js::math_sin_fdlibm_impl(num);
+ break;
+ case UnaryMathFunction::CosNative:
+ result = js::math_cos_native_impl(num);
+ break;
+ case UnaryMathFunction::CosFdlibm:
+ result = js::math_cos_fdlibm_impl(num);
+ break;
+ case UnaryMathFunction::TanNative:
+ result = js::math_tan_native_impl(num);
+ break;
+ case UnaryMathFunction::TanFdlibm:
+ result = js::math_tan_fdlibm_impl(num);
+ break;
+ case UnaryMathFunction::Log:
+ result = js::math_log_impl(num);
+ break;
+ case UnaryMathFunction::Exp:
+ result = js::math_exp_impl(num);
+ break;
+ case UnaryMathFunction::ACos:
+ result = js::math_acos_impl(num);
+ break;
+ case UnaryMathFunction::ASin:
+ result = js::math_asin_impl(num);
+ break;
+ case UnaryMathFunction::ATan:
+ result = js::math_atan_impl(num);
+ break;
+ case UnaryMathFunction::Log10:
+ result = js::math_log10_impl(num);
+ break;
+ case UnaryMathFunction::Log2:
+ result = js::math_log2_impl(num);
+ break;
+ case UnaryMathFunction::Log1P:
+ result = js::math_log1p_impl(num);
+ break;
+ case UnaryMathFunction::ExpM1:
+ result = js::math_expm1_impl(num);
+ break;
+ case UnaryMathFunction::CosH:
+ result = js::math_cosh_impl(num);
+ break;
+ case UnaryMathFunction::SinH:
+ result = js::math_sinh_impl(num);
+ break;
+ case UnaryMathFunction::TanH:
+ result = js::math_tanh_impl(num);
+ break;
+ case UnaryMathFunction::ACosH:
+ result = js::math_acosh_impl(num);
+ break;
+ case UnaryMathFunction::ASinH:
+ result = js::math_asinh_impl(num);
+ break;
+ case UnaryMathFunction::ATanH:
+ result = js::math_atanh_impl(num);
+ break;
+ case UnaryMathFunction::Cbrt:
+ result = js::math_cbrt_impl(num);
+ break;
+
+ case UnaryMathFunction::Trunc:
+ case UnaryMathFunction::Floor:
+ case UnaryMathFunction::Ceil:
+ case UnaryMathFunction::Round:
+ // These have their own recover instructions.
+ MOZ_CRASH("Unexpected rounding math function.");
+ }
+
+ iter.storeInstructionResult(DoubleValue(result));
+ return true;
+}
+
+bool MRandom::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(this->canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Random));
+ return true;
+}
+
+bool MRandom::canRecoverOnBailout() const {
+ return !js::SupportDifferentialTesting();
+}
+
+RRandom::RRandom(CompactBufferReader& reader) {}
+
+bool RRandom::recover(JSContext* cx, SnapshotIterator& iter) const {
+ iter.storeInstructionResult(DoubleValue(math_random_impl(cx)));
+ return true;
+}
+
+bool MStringSplit::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringSplit));
+ return true;
+}
+
+RStringSplit::RStringSplit(CompactBufferReader& reader) {}
+
+bool RStringSplit::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedString str(cx, iter.read().toString());
+ RootedString sep(cx, iter.read().toString());
+
+ JSObject* res = StringSplitString(cx, str, sep, INT32_MAX);
+ if (!res) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*res));
+ return true;
+}
+
+bool MNaNToZero::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NaNToZero));
+ return true;
+}
+
+RNaNToZero::RNaNToZero(CompactBufferReader& reader) {}
+
+bool RNaNToZero::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double v = iter.read().toNumber();
+ if (std::isnan(v) || mozilla::IsNegativeZero(v)) {
+ v = 0.0;
+ }
+
+ iter.storeInstructionResult(DoubleValue(v));
+ return true;
+}
+
+bool MRegExpMatcher::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_RegExpMatcher));
+ return true;
+}
+
+RRegExpMatcher::RRegExpMatcher(CompactBufferReader& reader) {}
+
+bool RRegExpMatcher::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject regexp(cx, &iter.read().toObject());
+ RootedString input(cx, iter.read().toString());
+ int32_t lastIndex = iter.read().toInt32();
+
+ RootedValue result(cx);
+ if (!RegExpMatcherRaw(cx, regexp, input, lastIndex, nullptr, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(result);
+ return true;
+}
+
+bool MRegExpSearcher::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_RegExpSearcher));
+ return true;
+}
+
+RRegExpSearcher::RRegExpSearcher(CompactBufferReader& reader) {}
+
+bool RRegExpSearcher::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject regexp(cx, &iter.read().toObject());
+ RootedString input(cx, iter.read().toString());
+ int32_t lastIndex = iter.read().toInt32();
+
+ int32_t result;
+ if (!RegExpSearcherRaw(cx, regexp, input, lastIndex, nullptr, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(Int32Value(result));
+ return true;
+}
+
+bool MTypeOf::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_TypeOf));
+ return true;
+}
+
+RTypeOf::RTypeOf(CompactBufferReader& reader) {}
+
+bool RTypeOf::recover(JSContext* cx, SnapshotIterator& iter) const {
+ JS::Value v = iter.read();
+
+ iter.storeInstructionResult(Int32Value(TypeOfValue(v)));
+ return true;
+}
+
+bool MTypeOfName::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_TypeOfName));
+ return true;
+}
+
+RTypeOfName::RTypeOfName(CompactBufferReader& reader) {}
+
+bool RTypeOfName::recover(JSContext* cx, SnapshotIterator& iter) const {
+ int32_t type = iter.read().toInt32();
+ MOZ_ASSERT(JSTYPE_UNDEFINED <= type && type < JSTYPE_LIMIT);
+
+ JSString* name = TypeName(JSType(type), *cx->runtime()->commonNames);
+ iter.storeInstructionResult(StringValue(name));
+ return true;
+}
+
+bool MToDouble::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ToDouble));
+ return true;
+}
+
+RToDouble::RToDouble(CompactBufferReader& reader) {}
+
+bool RToDouble::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue v(cx, iter.read());
+
+ MOZ_ASSERT(!v.isObject());
+ MOZ_ASSERT(!v.isSymbol());
+ MOZ_ASSERT(!v.isBigInt());
+
+ double dbl;
+ if (!ToNumber(cx, v, &dbl)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(DoubleValue(dbl));
+ return true;
+}
+
+bool MToFloat32::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ToFloat32));
+ return true;
+}
+
+RToFloat32::RToFloat32(CompactBufferReader& reader) {}
+
+bool RToFloat32::recover(JSContext* cx, SnapshotIterator& iter) const {
+ double num = iter.read().toNumber();
+ double result = js::RoundFloat32(num);
+
+ iter.storeInstructionResult(DoubleValue(result));
+ return true;
+}
+
+bool MTruncateToInt32::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_TruncateToInt32));
+ return true;
+}
+
+RTruncateToInt32::RTruncateToInt32(CompactBufferReader& reader) {}
+
+bool RTruncateToInt32::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue value(cx, iter.read());
+
+ int32_t trunc;
+ if (!JS::ToInt32(cx, value, &trunc)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(Int32Value(trunc));
+ return true;
+}
+
+bool MNewObject::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewObject));
+
+ // Recover instructions are only supported if we have a template object.
+ MOZ_ASSERT(mode_ == MNewObject::ObjectCreate);
+ return true;
+}
+
+RNewObject::RNewObject(CompactBufferReader& reader) {}
+
+bool RNewObject::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject templateObject(cx, &iter.read().toObject());
+
+ // See CodeGenerator::visitNewObjectVMCall.
+ // Note that recover instructions are only used if mode == ObjectCreate.
+ JSObject* resultObject =
+ ObjectCreateWithTemplate(cx, templateObject.as<PlainObject>());
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MNewPlainObject::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewPlainObject));
+
+ MOZ_ASSERT(gc::AllocKind(uint8_t(allocKind_)) == allocKind_);
+ writer.writeByte(uint8_t(allocKind_));
+ MOZ_ASSERT(gc::Heap(uint8_t(initialHeap_)) == initialHeap_);
+ writer.writeByte(uint8_t(initialHeap_));
+ return true;
+}
+
+RNewPlainObject::RNewPlainObject(CompactBufferReader& reader) {
+ allocKind_ = gc::AllocKind(reader.readByte());
+ MOZ_ASSERT(gc::IsValidAllocKind(allocKind_));
+ initialHeap_ = gc::Heap(reader.readByte());
+ MOZ_ASSERT(initialHeap_ == gc::Heap::Default ||
+ initialHeap_ == gc::Heap::Tenured);
+}
+
+bool RNewPlainObject::recover(JSContext* cx, SnapshotIterator& iter) const {
+ Rooted<SharedShape*> shape(cx,
+ &iter.read().toGCCellPtr().as<Shape>().asShared());
+
+ // See CodeGenerator::visitNewPlainObject.
+ JSObject* resultObject =
+ NewPlainObjectOptimizedFallback(cx, shape, allocKind_, initialHeap_);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MNewArrayObject::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewArrayObject));
+
+ writer.writeUnsigned(length_);
+ MOZ_ASSERT(gc::Heap(uint8_t(initialHeap_)) == initialHeap_);
+ writer.writeByte(uint8_t(initialHeap_));
+ return true;
+}
+
+RNewArrayObject::RNewArrayObject(CompactBufferReader& reader) {
+ length_ = reader.readUnsigned();
+ initialHeap_ = gc::Heap(reader.readByte());
+ MOZ_ASSERT(initialHeap_ == gc::Heap::Default ||
+ initialHeap_ == gc::Heap::Tenured);
+}
+
+bool RNewArrayObject::recover(JSContext* cx, SnapshotIterator& iter) const {
+ iter.read(); // Skip unused shape field.
+
+ NewObjectKind kind =
+ initialHeap_ == gc::Heap::Tenured ? TenuredObject : GenericObject;
+ JSObject* array = NewArrayOperation(cx, length_, kind);
+ if (!array) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*array));
+ return true;
+}
+
+bool MNewTypedArray::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewTypedArray));
+ return true;
+}
+
+RNewTypedArray::RNewTypedArray(CompactBufferReader& reader) {}
+
+bool RNewTypedArray::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject templateObject(cx, &iter.read().toObject());
+
+ size_t length = templateObject.as<TypedArrayObject>()->length();
+ MOZ_ASSERT(length <= INT32_MAX,
+ "Template objects are only created for int32 lengths");
+
+ JSObject* resultObject =
+ NewTypedArrayWithTemplateAndLength(cx, templateObject, length);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MNewArray::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewArray));
+ writer.writeUnsigned(length());
+ return true;
+}
+
+RNewArray::RNewArray(CompactBufferReader& reader) {
+ count_ = reader.readUnsigned();
+}
+
+bool RNewArray::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject templateObject(cx, &iter.read().toObject());
+ Rooted<Shape*> shape(cx, templateObject->shape());
+
+ ArrayObject* resultObject = NewArrayWithShape(cx, count_, shape);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MNewIterator::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewIterator));
+ writer.writeByte(type_);
+ return true;
+}
+
+RNewIterator::RNewIterator(CompactBufferReader& reader) {
+ type_ = reader.readByte();
+}
+
+bool RNewIterator::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject templateObject(cx, &iter.read().toObject());
+
+ JSObject* resultObject = nullptr;
+ switch (MNewIterator::Type(type_)) {
+ case MNewIterator::ArrayIterator:
+ resultObject = NewArrayIterator(cx);
+ break;
+ case MNewIterator::StringIterator:
+ resultObject = NewStringIterator(cx);
+ break;
+ case MNewIterator::RegExpStringIterator:
+ resultObject = NewRegExpStringIterator(cx);
+ break;
+ }
+
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MLambda::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Lambda));
+ return true;
+}
+
+RLambda::RLambda(CompactBufferReader& reader) {}
+
+bool RLambda::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject scopeChain(cx, &iter.read().toObject());
+ RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+
+ JSObject* resultObject = js::Lambda(cx, fun, scopeChain);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MFunctionWithProto::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_FunctionWithProto));
+ return true;
+}
+
+RFunctionWithProto::RFunctionWithProto(CompactBufferReader& reader) {}
+
+bool RFunctionWithProto::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject scopeChain(cx, &iter.read().toObject());
+ RootedObject prototype(cx, &iter.read().toObject());
+ RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+
+ JSObject* resultObject =
+ js::FunWithProtoOperation(cx, fun, scopeChain, prototype);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MNewCallObject::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_NewCallObject));
+ return true;
+}
+
+RNewCallObject::RNewCallObject(CompactBufferReader& reader) {}
+
+bool RNewCallObject::recover(JSContext* cx, SnapshotIterator& iter) const {
+ Rooted<CallObject*> templateObj(cx, &iter.read().toObject().as<CallObject>());
+
+ Rooted<SharedShape*> shape(cx, templateObj->sharedShape());
+
+ JSObject* resultObject = CallObject::createWithShape(cx, shape);
+ if (!resultObject) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultObject));
+ return true;
+}
+
+bool MObjectState::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ObjectState));
+ writer.writeUnsigned(numSlots());
+ return true;
+}
+
+RObjectState::RObjectState(CompactBufferReader& reader) {
+ numSlots_ = reader.readUnsigned();
+}
+
+bool RObjectState::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedObject object(cx, &iter.read().toObject());
+ Handle<NativeObject*> nativeObject = object.as<NativeObject>();
+ MOZ_ASSERT(nativeObject->slotSpan() == numSlots());
+
+ for (size_t i = 0; i < numSlots(); i++) {
+ Value val = iter.read();
+ nativeObject->setSlot(i, val);
+ }
+
+ iter.storeInstructionResult(ObjectValue(*object));
+ return true;
+}
+
+bool MArrayState::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ArrayState));
+ writer.writeUnsigned(numElements());
+ return true;
+}
+
+RArrayState::RArrayState(CompactBufferReader& reader) {
+ numElements_ = reader.readUnsigned();
+}
+
+bool RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const {
+ ArrayObject* object = &iter.read().toObject().as<ArrayObject>();
+ uint32_t initLength = iter.read().toInt32();
+
+ MOZ_ASSERT(object->getDenseInitializedLength() == 0,
+ "initDenseElement call below relies on this");
+ object->setDenseInitializedLength(initLength);
+
+ for (size_t index = 0; index < numElements(); index++) {
+ Value val = iter.read();
+
+ if (index >= initLength) {
+ MOZ_ASSERT(val.isUndefined());
+ continue;
+ }
+
+ object->initDenseElement(index, val);
+ }
+
+ iter.storeInstructionResult(ObjectValue(*object));
+ return true;
+}
+
+bool MSetArrayLength::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ // For simplicity, we capture directly the object instead of the elements
+ // pointer.
+ MOZ_ASSERT(elements()->type() != MIRType::Elements);
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_SetArrayLength));
+ return true;
+}
+
+bool MSetArrayLength::canRecoverOnBailout() const {
+ return isRecoveredOnBailout();
+}
+
+RSetArrayLength::RSetArrayLength(CompactBufferReader& reader) {}
+
+bool RSetArrayLength::recover(JSContext* cx, SnapshotIterator& iter) const {
+ Rooted<ArrayObject*> obj(cx, &iter.read().toObject().as<ArrayObject>());
+ RootedValue len(cx, iter.read());
+
+ RootedId id(cx, NameToId(cx->names().length));
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Data(len, JS::PropertyAttribute::Writable));
+ ObjectOpResult error;
+ if (!ArraySetLength(cx, obj, id, desc, error)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*obj));
+ return true;
+}
+
+bool MAssertRecoveredOnBailout::writeRecoverData(
+ CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ MOZ_RELEASE_ASSERT(input()->isRecoveredOnBailout() == mustBeRecovered_,
+ "assertRecoveredOnBailout failed during compilation");
+ writer.writeUnsigned(
+ uint32_t(RInstruction::Recover_AssertRecoveredOnBailout));
+ return true;
+}
+
+RAssertRecoveredOnBailout::RAssertRecoveredOnBailout(
+ CompactBufferReader& reader) {}
+
+bool RAssertRecoveredOnBailout::recover(JSContext* cx,
+ SnapshotIterator& iter) const {
+ iter.read(); // skip the unused operand.
+ iter.storeInstructionResult(UndefinedValue());
+ return true;
+}
+
+bool MStringReplace::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_StringReplace));
+ writer.writeByte(isFlatReplacement_);
+ return true;
+}
+
+RStringReplace::RStringReplace(CompactBufferReader& reader) {
+ isFlatReplacement_ = reader.readByte();
+}
+
+bool RStringReplace::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedString string(cx, iter.read().toString());
+ RootedString pattern(cx, iter.read().toString());
+ RootedString replace(cx, iter.read().toString());
+
+ JSString* result =
+ isFlatReplacement_
+ ? js::StringFlatReplaceString(cx, string, pattern, replace)
+ : js::str_replace_string_raw(cx, string, pattern, replace);
+
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(StringValue(result));
+ return true;
+}
+
+bool MSubstr::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Substr));
+ return true;
+}
+
+RSubstr::RSubstr(CompactBufferReader& reader) {}
+
+bool RSubstr::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedString str(cx, iter.read().toString());
+ int32_t begin = iter.read().toInt32();
+ int32_t length = iter.read().toInt32();
+
+ JSString* result = SubstringKernel(cx, str, begin, length);
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(StringValue(result));
+ return true;
+}
+
+bool MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_AtomicIsLockFree));
+ return true;
+}
+
+RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader) {}
+
+bool RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const {
+ RootedValue operand(cx, iter.read());
+ MOZ_ASSERT(operand.isInt32());
+
+ int32_t result;
+ if (!js::AtomicIsLockFree(cx, operand, &result)) {
+ return false;
+ }
+
+ iter.storeInstructionResult(Int32Value(result));
+ return true;
+}
+
+bool MBigIntAsIntN::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntAsIntN));
+ return true;
+}
+
+RBigIntAsIntN::RBigIntAsIntN(CompactBufferReader& reader) {}
+
+bool RBigIntAsIntN::recover(JSContext* cx, SnapshotIterator& iter) const {
+ int32_t bits = iter.read().toInt32();
+ RootedBigInt input(cx, iter.read().toBigInt());
+
+ MOZ_ASSERT(bits >= 0);
+ BigInt* result = BigInt::asIntN(cx, input, bits);
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(JS::BigIntValue(result));
+ return true;
+}
+
+bool MBigIntAsUintN::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_BigIntAsUintN));
+ return true;
+}
+
+RBigIntAsUintN::RBigIntAsUintN(CompactBufferReader& reader) {}
+
+bool RBigIntAsUintN::recover(JSContext* cx, SnapshotIterator& iter) const {
+ int32_t bits = iter.read().toInt32();
+ RootedBigInt input(cx, iter.read().toBigInt());
+
+ MOZ_ASSERT(bits >= 0);
+ BigInt* result = BigInt::asUintN(cx, input, bits);
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(JS::BigIntValue(result));
+ return true;
+}
+
+bool MCreateArgumentsObject::writeRecoverData(
+ CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_CreateArgumentsObject));
+ return true;
+}
+
+RCreateArgumentsObject::RCreateArgumentsObject(CompactBufferReader& reader) {}
+
+bool RCreateArgumentsObject::recover(JSContext* cx,
+ SnapshotIterator& iter) const {
+ RootedObject callObject(cx, &iter.read().toObject());
+ RootedObject result(
+ cx, ArgumentsObject::createForIon(cx, iter.frame(), callObject));
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(JS::ObjectValue(*result));
+ return true;
+}
+
+bool MCreateInlinedArgumentsObject::writeRecoverData(
+ CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(
+ uint32_t(RInstruction::Recover_CreateInlinedArgumentsObject));
+ writer.writeUnsigned(numActuals());
+ return true;
+}
+
+RCreateInlinedArgumentsObject::RCreateInlinedArgumentsObject(
+ CompactBufferReader& reader) {
+ numActuals_ = reader.readUnsigned();
+}
+
+bool RCreateInlinedArgumentsObject::recover(JSContext* cx,
+ SnapshotIterator& iter) const {
+ RootedObject callObject(cx, &iter.read().toObject());
+ RootedFunction callee(cx, &iter.read().toObject().as<JSFunction>());
+
+ JS::RootedValueArray<ArgumentsObject::MaxInlinedArgs> argsArray(cx);
+ for (uint32_t i = 0; i < numActuals_; i++) {
+ argsArray[i].set(iter.read());
+ }
+
+ ArgumentsObject* result = ArgumentsObject::createFromValueArray(
+ cx, argsArray, callee, callObject, numActuals_);
+ if (!result) {
+ return false;
+ }
+
+ iter.storeInstructionResult(JS::ObjectValue(*result));
+ return true;
+}
+
+bool MRest::writeRecoverData(CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_Rest));
+ writer.writeUnsigned(numFormals());
+ return true;
+}
+
+RRest::RRest(CompactBufferReader& reader) {
+ numFormals_ = reader.readUnsigned();
+}
+
+bool RRest::recover(JSContext* cx, SnapshotIterator& iter) const {
+ JitFrameLayout* frame = iter.frame();
+
+ uint32_t numActuals = iter.read().toInt32();
+ MOZ_ASSERT(numActuals == frame->numActualArgs());
+
+ uint32_t numFormals = numFormals_;
+
+ uint32_t length = std::max(numActuals, numFormals) - numFormals;
+ Value* src = frame->actualArgs() + numFormals;
+ JSObject* rest = jit::InitRestParameter(cx, length, src, nullptr);
+ if (!rest) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*rest));
+ return true;
+}
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
new file mode 100644
index 0000000000..1db8f92d1a
--- /dev/null
+++ b/js/src/jit/Recover.h
@@ -0,0 +1,964 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Recover_h
+#define jit_Recover_h
+
+#include "mozilla/Attributes.h"
+
+#include "jit/MIR.h"
+#include "jit/Snapshots.h"
+
+namespace js {
+namespace jit {
+
+// [SMDOC] IonMonkey Recover Instructions
+//
+// This file contains all recover instructions.
+//
+// A recover instruction is an equivalent of a MIR instruction which is executed
+// before the reconstruction of a baseline frame. Recover instructions are used
+// by resume points to fill the value which are not produced by the code
+// compiled by IonMonkey. For example, if a value is optimized away by
+// IonMonkey, but required by Baseline, then we should have a recover
+// instruction to fill the missing baseline frame slot.
+//
+// Recover instructions are executed either during a bailout, or under a call
+// when the stack frame is introspected. If the stack is introspected, then any
+// use of recover instruction must lead to an invalidation of the code.
+//
+// For each MIR instruction where |canRecoverOnBailout| might return true, we
+// have a RInstruction of the same name.
+//
+// Recover instructions are encoded by the code generator into a compact buffer
+// (RecoverWriter). The MIR instruction method |writeRecoverData| should write a
+// tag in the |CompactBufferWriter| which is used by
+// |RInstruction::readRecoverData| to dispatch to the right Recover
+// instruction. Then |writeRecoverData| writes any local fields which are
+// necessary for the execution of the |recover| method. These fields are decoded
+// by the Recover instruction constructor which has a |CompactBufferReader| as
+// argument. The constructor of the Recover instruction should follow the same
+// sequence as the |writeRecoverData| method of the MIR instruction.
+//
+// Recover instructions are decoded by the |SnapshotIterator| (RecoverReader),
+// which is given as argument of the |recover| methods, in order to read the
+// operands. The number of operands read should be the same as the result of
+// |numOperands|, which corresponds to the number of operands of the MIR
+// instruction. Operands should be decoded in the same order as the operands of
+// the MIR instruction.
+//
+// The result of the |recover| method should either be a failure, or a value
+// stored on the |SnapshotIterator|, by using the |storeInstructionResult|
+// method.
+
+#define RECOVER_OPCODE_LIST(_) \
+ _(ResumePoint) \
+ _(BitNot) \
+ _(BitAnd) \
+ _(BitOr) \
+ _(BitXor) \
+ _(Lsh) \
+ _(Rsh) \
+ _(Ursh) \
+ _(SignExtendInt32) \
+ _(Add) \
+ _(Sub) \
+ _(Mul) \
+ _(Div) \
+ _(Mod) \
+ _(Not) \
+ _(BigIntAdd) \
+ _(BigIntSub) \
+ _(BigIntMul) \
+ _(BigIntDiv) \
+ _(BigIntMod) \
+ _(BigIntPow) \
+ _(BigIntBitAnd) \
+ _(BigIntBitOr) \
+ _(BigIntBitXor) \
+ _(BigIntLsh) \
+ _(BigIntRsh) \
+ _(BigIntIncrement) \
+ _(BigIntDecrement) \
+ _(BigIntNegate) \
+ _(BigIntBitNot) \
+ _(Compare) \
+ _(Concat) \
+ _(StringLength) \
+ _(ArgumentsLength) \
+ _(Floor) \
+ _(Ceil) \
+ _(Round) \
+ _(Trunc) \
+ _(CharCodeAt) \
+ _(FromCharCode) \
+ _(Pow) \
+ _(PowHalf) \
+ _(MinMax) \
+ _(Abs) \
+ _(Sqrt) \
+ _(Atan2) \
+ _(Hypot) \
+ _(NearbyInt) \
+ _(Sign) \
+ _(MathFunction) \
+ _(Random) \
+ _(StringSplit) \
+ _(NaNToZero) \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(StringReplace) \
+ _(Substr) \
+ _(TypeOf) \
+ _(TypeOfName) \
+ _(ToDouble) \
+ _(ToFloat32) \
+ _(TruncateToInt32) \
+ _(NewObject) \
+ _(NewPlainObject) \
+ _(NewArrayObject) \
+ _(NewTypedArray) \
+ _(NewArray) \
+ _(NewIterator) \
+ _(NewCallObject) \
+ _(Lambda) \
+ _(FunctionWithProto) \
+ _(ObjectState) \
+ _(ArrayState) \
+ _(SetArrayLength) \
+ _(AtomicIsLockFree) \
+ _(BigIntAsIntN) \
+ _(BigIntAsUintN) \
+ _(CreateArgumentsObject) \
+ _(CreateInlinedArgumentsObject) \
+ _(Rest) \
+ _(AssertRecoveredOnBailout)
+
+class RResumePoint;
+class SnapshotIterator;
+
+class MOZ_NON_PARAM RInstruction {
+ public:
+ enum Opcode {
+#define DEFINE_OPCODES_(op) Recover_##op,
+ RECOVER_OPCODE_LIST(DEFINE_OPCODES_)
+#undef DEFINE_OPCODES_
+ Recover_Invalid
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // As opposed to the MIR, there is no need to add more methods as every
+ // other instruction is well abstracted under the "recover" method.
+ bool isResumePoint() const { return opcode() == Recover_ResumePoint; }
+ inline const RResumePoint* toResumePoint() const;
+
+ // Call the copy constructor of a specific RInstruction, to do a copy of the
+ // RInstruction content.
+ virtual void cloneInto(RInstructionStorage* raw) const = 0;
+
+ // Number of allocations which are encoded in the Snapshot for recovering
+ // the current instruction.
+ virtual uint32_t numOperands() const = 0;
+
+ // Function used to recover the value computed by this instruction. This
+ // function reads its arguments from the allocations listed on the snapshot
+ // iterator and stores its returned value on the snapshot iterator too.
+ [[nodiscard]] virtual bool recover(JSContext* cx,
+ SnapshotIterator& iter) const = 0;
+
+ // Decode an RInstruction on top of the reserved storage space, based on the
+ // tag written by the writeRecoverData function of the corresponding MIR
+ // instruction.
+ static void readRecoverData(CompactBufferReader& reader,
+ RInstructionStorage* raw);
+};
+
+#define RINSTRUCTION_HEADER_(op) \
+ private: \
+ friend class RInstruction; \
+ explicit R##op(CompactBufferReader& reader); \
+ explicit R##op(const R##op& src) = default; \
+ \
+ public: \
+ Opcode opcode() const override { return RInstruction::Recover_##op; } \
+ void cloneInto(RInstructionStorage* raw) const override { \
+ new (raw->addr()) R##op(*this); \
+ }
+
+#define RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp) \
+ RINSTRUCTION_HEADER_(op) \
+ uint32_t numOperands() const override { return numOp; }
+
+#ifdef DEBUG
+# define RINSTRUCTION_HEADER_NUM_OP_(op, numOp) \
+ RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp) \
+ static_assert( \
+ M##op::staticNumOperands == numOp, \
+ "The recover instructions's numOperands should equal to the " \
+ "MIR's numOperands");
+#else
+# define RINSTRUCTION_HEADER_NUM_OP_(op, numOp) \
+ RINSTRUCTION_HEADER_NUM_OP_MAIN(op, numOp)
+#endif
+
+class RResumePoint final : public RInstruction {
+ private:
+ uint32_t pcOffsetAndMode_; // Offset from script->code and ResumeMode.
+ uint32_t numOperands_; // Number of slots.
+
+ public:
+ RINSTRUCTION_HEADER_(ResumePoint)
+
+ // Used to encode/decode pcOffsetAndMode_.
+ static constexpr uint32_t PCOffsetShift = 4;
+ static constexpr uint32_t ResumeModeMask = 0b1111;
+ static_assert(uint32_t(ResumeMode::Last) <= ResumeModeMask);
+
+ uint32_t pcOffset() const { return pcOffsetAndMode_ >> PCOffsetShift; }
+ ResumeMode mode() const {
+ return ResumeMode(pcOffsetAndMode_ & ResumeModeMask);
+ }
+
+ uint32_t numOperands() const override { return numOperands_; }
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBitNot final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitNot, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBitAnd final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitAnd, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBitOr final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitOr, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBitXor final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BitXor, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RLsh final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Lsh, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RRsh final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Rsh, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RUrsh final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Ursh, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSignExtendInt32 final : public RInstruction {
+ private:
+ uint8_t mode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(SignExtendInt32, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RAdd final : public RInstruction {
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Add, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSub final : public RInstruction {
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Sub, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RMul final : public RInstruction {
+ private:
+ bool isFloatOperation_;
+ uint8_t mode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Mul, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RDiv final : public RInstruction {
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Div, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RMod final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Mod, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNot final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Not, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntAdd final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntAdd, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntSub final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntSub, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntMul final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntMul, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntDiv final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntDiv, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntMod final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntMod, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntPow final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntPow, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntBitAnd final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntBitAnd, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntBitOr final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntBitOr, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntBitXor final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntBitXor, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntLsh final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntLsh, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntRsh final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntRsh, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntIncrement final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntIncrement, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntDecrement final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntDecrement, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntNegate final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntNegate, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntBitNot final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntBitNot, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RCompare final : public RInstruction {
+ JSOp jsop_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Compare, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RConcat final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Concat, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RStringLength final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringLength, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RArgumentsLength final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ArgumentsLength, 0)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RFloor final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Floor, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RCeil final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Ceil, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RRound final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Round, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RTrunc final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Trunc, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RCharCodeAt final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(CharCodeAt, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RFromCharCode final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(FromCharCode, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RPow final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Pow, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RPowHalf final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(PowHalf, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RMinMax final : public RInstruction {
+ private:
+ bool isMax_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(MinMax, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RAbs final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Abs, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSqrt final : public RInstruction {
+ private:
+ bool isFloatOperation_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Sqrt, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RAtan2 final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Atan2, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RHypot final : public RInstruction {
+ private:
+ uint32_t numOperands_;
+
+ public:
+ RINSTRUCTION_HEADER_(Hypot)
+
+ uint32_t numOperands() const override { return numOperands_; }
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNearbyInt final : public RInstruction {
+ private:
+ uint8_t roundingMode_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NearbyInt, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSign final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Sign, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RMathFunction final : public RInstruction {
+ private:
+ UnaryMathFunction function_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(MathFunction, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RRandom final : public RInstruction {
+ RINSTRUCTION_HEADER_NUM_OP_(Random, 0)
+ public:
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RStringSplit final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringSplit, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNaNToZero final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NaNToZero, 1);
+
+ bool recover(JSContext* cx, SnapshotIterator& iter) const override;
+};
+
+class RRegExpMatcher final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(RegExpMatcher, 3)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RRegExpSearcher final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(RegExpSearcher, 3)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RStringReplace final : public RInstruction {
+ private:
+ bool isFlatReplacement_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(StringReplace, 3)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSubstr final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Substr, 3)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RTypeOf final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(TypeOf, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RTypeOfName final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(TypeOfName, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RToDouble final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ToDouble, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RToFloat32 final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ToFloat32, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RTruncateToInt32 final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(TruncateToInt32, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewObject final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewObject, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewPlainObject final : public RInstruction {
+ private:
+ gc::AllocKind allocKind_;
+ gc::Heap initialHeap_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewPlainObject, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewArrayObject final : public RInstruction {
+ private:
+ uint32_t length_;
+ gc::Heap initialHeap_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewArrayObject, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewTypedArray final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewTypedArray, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewArray final : public RInstruction {
+ private:
+ uint32_t count_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewArray, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewIterator final : public RInstruction {
+ private:
+ uint8_t type_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewIterator, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RLambda final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Lambda, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RFunctionWithProto final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(FunctionWithProto, 3)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RNewCallObject final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(NewCallObject, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RObjectState final : public RInstruction {
+ private:
+ uint32_t numSlots_; // Number of slots.
+
+ public:
+ RINSTRUCTION_HEADER_(ObjectState)
+
+ uint32_t numSlots() const { return numSlots_; }
+ uint32_t numOperands() const override {
+ // +1 for the object.
+ return numSlots() + 1;
+ }
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RArrayState final : public RInstruction {
+ private:
+ uint32_t numElements_;
+
+ public:
+ RINSTRUCTION_HEADER_(ArrayState)
+
+ uint32_t numElements() const { return numElements_; }
+ uint32_t numOperands() const override {
+ // +1 for the array.
+ // +1 for the initalized length.
+ return numElements() + 2;
+ }
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RSetArrayLength final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(SetArrayLength, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RAtomicIsLockFree final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(AtomicIsLockFree, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntAsIntN final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntAsIntN, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RBigIntAsUintN final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(BigIntAsUintN, 2)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RCreateArgumentsObject final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(CreateArgumentsObject, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RCreateInlinedArgumentsObject final : public RInstruction {
+ private:
+ uint32_t numActuals_;
+
+ public:
+ RINSTRUCTION_HEADER_(CreateInlinedArgumentsObject)
+
+ uint32_t numActuals() const { return numActuals_; }
+ uint32_t numOperands() const override {
+ // +1 for the callObj.
+ // +1 for the callee.
+ return numActuals() + 2;
+ }
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RRest final : public RInstruction {
+ uint32_t numFormals_;
+
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(Rest, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+class RAssertRecoveredOnBailout final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(AssertRecoveredOnBailout, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
+#undef RINSTRUCTION_HEADER_
+#undef RINSTRUCTION_HEADER_NUM_OP_
+#undef RINSTRUCTION_HEADER_NUM_OP_MAIN
+
+const RResumePoint* RInstruction::toResumePoint() const {
+ MOZ_ASSERT(isResumePoint());
+ return static_cast<const RResumePoint*>(this);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Recover_h */
diff --git a/js/src/jit/RegExpStubConstants.h b/js/src/jit/RegExpStubConstants.h
new file mode 100644
index 0000000000..86ad75c27c
--- /dev/null
+++ b/js/src/jit/RegExpStubConstants.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RegExpStubConstants_h
+#define jit_RegExpStubConstants_h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "irregexp/RegExpTypes.h"
+#include "vm/MatchPairs.h"
+
+namespace js {
+namespace jit {
+
+static constexpr size_t InputOutputDataSize = sizeof(irregexp::InputOutputData);
+
+// Amount of space to reserve on the stack when executing RegExps inline.
+static constexpr size_t RegExpReservedStack =
+ InputOutputDataSize + sizeof(MatchPairs) +
+ RegExpObject::MaxPairCount * sizeof(MatchPair);
+
+// RegExpExecTest return value to indicate failure.
+static constexpr int32_t RegExpExecTestResultFailed = -1;
+
+// RegExpSearcher return values to indicate not-found or failure.
+static constexpr int32_t RegExpSearcherResultNotFound = -1;
+static constexpr int32_t RegExpSearcherResultFailed = -2;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RegExpStubConstants_h */
diff --git a/js/src/jit/RegisterAllocator.cpp b/js/src/jit/RegisterAllocator.cpp
new file mode 100644
index 0000000000..c572e4bc2a
--- /dev/null
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -0,0 +1,669 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RegisterAllocator.h"
+
+using namespace js;
+using namespace js::jit;
+
+#ifdef DEBUG
+bool AllocationIntegrityState::record() {
+ // Ignore repeated record() calls.
+ if (!instructions.empty()) {
+ return true;
+ }
+
+ if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) {
+ return false;
+ }
+
+ if (!virtualRegisters.appendN((LDefinition*)nullptr,
+ graph.numVirtualRegisters())) {
+ return false;
+ }
+
+ if (!blocks.reserve(graph.numBlocks())) {
+ return false;
+ }
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ blocks.infallibleAppend(BlockInfo());
+ LBlock* block = graph.getBlock(i);
+ MOZ_ASSERT(block->mir()->id() == i);
+
+ BlockInfo& blockInfo = blocks[i];
+ if (!blockInfo.phis.reserve(block->numPhis())) {
+ return false;
+ }
+
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ blockInfo.phis.infallibleAppend(InstructionInfo());
+ InstructionInfo& info = blockInfo.phis[j];
+ LPhi* phi = block->getPhi(j);
+ MOZ_ASSERT(phi->numDefs() == 1);
+ uint32_t vreg = phi->getDef(0)->virtualRegister();
+ virtualRegisters[vreg] = phi->getDef(0);
+ if (!info.outputs.append(*phi->getDef(0))) {
+ return false;
+ }
+ for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
+ if (!info.inputs.append(*phi->getOperand(k))) {
+ return false;
+ }
+ }
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ iter++) {
+ LInstruction* ins = *iter;
+ InstructionInfo& info = instructions[ins->id()];
+
+ for (size_t k = 0; k < ins->numTemps(); k++) {
+ if (!ins->getTemp(k)->isBogusTemp()) {
+ uint32_t vreg = ins->getTemp(k)->virtualRegister();
+ virtualRegisters[vreg] = ins->getTemp(k);
+ }
+ if (!info.temps.append(*ins->getTemp(k))) {
+ return false;
+ }
+ }
+ for (size_t k = 0; k < ins->numDefs(); k++) {
+ if (!ins->getDef(k)->isBogusTemp()) {
+ uint32_t vreg = ins->getDef(k)->virtualRegister();
+ virtualRegisters[vreg] = ins->getDef(k);
+ }
+ if (!info.outputs.append(*ins->getDef(k))) {
+ return false;
+ }
+ }
+ for (LInstruction::InputIterator alloc(*ins); alloc.more();
+ alloc.next()) {
+ if (!info.inputs.append(**alloc)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool AllocationIntegrityState::check() {
+ MOZ_ASSERT(!instructions.empty());
+
+# ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_RegAlloc)) {
+ dump();
+ }
+# endif
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+
+ // Check that all instruction inputs and outputs have been assigned an
+ // allocation.
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ iter++) {
+ LInstruction* ins = *iter;
+
+ for (LInstruction::InputIterator alloc(*ins); alloc.more();
+ alloc.next()) {
+ MOZ_ASSERT(!alloc->isUse());
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ MOZ_ASSERT(!def->output()->isUse());
+
+ LDefinition oldDef = instructions[ins->id()].outputs[i];
+ MOZ_ASSERT_IF(
+ oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
+ *def->output() == *ins->getOperand(oldDef.getReusedInput()));
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
+
+ LDefinition oldTemp = instructions[ins->id()].temps[i];
+ MOZ_ASSERT_IF(
+ oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
+ *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
+ }
+ }
+ }
+
+ // Check that the register assignment and move groups preserve the original
+ // semantics of the virtual registers. Each virtual register has a single
+ // write (owing to the SSA representation), but the allocation may move the
+ // written value around between registers and memory locations along
+ // different paths through the script.
+ //
+ // For each use of an allocation, follow the physical value which is read
+ // backward through the script, along all paths to the value's virtual
+ // register's definition.
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ iter++) {
+ LInstruction* ins = *iter;
+ const InstructionInfo& info = instructions[ins->id()];
+
+ LSafepoint* safepoint = ins->safepoint();
+ if (safepoint) {
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (ins->getTemp(i)->isBogusTemp()) {
+ continue;
+ }
+ uint32_t vreg = info.temps[i].virtualRegister();
+ LAllocation* alloc = ins->getTemp(i)->output();
+ checkSafepointAllocation(ins, vreg, *alloc);
+ }
+ MOZ_ASSERT_IF(ins->isCall(), safepoint->liveRegs().emptyFloat() &&
+ safepoint->liveRegs().emptyGeneral());
+ }
+
+ size_t inputIndex = 0;
+ for (LInstruction::InputIterator alloc(*ins); alloc.more();
+ inputIndex++, alloc.next()) {
+ LAllocation oldInput = info.inputs[inputIndex];
+ if (!oldInput.isUse()) {
+ continue;
+ }
+
+ uint32_t vreg = oldInput.toUse()->virtualRegister();
+
+ if (safepoint && !oldInput.toUse()->usedAtStart()) {
+ checkSafepointAllocation(ins, vreg, **alloc);
+ }
+
+ // Temps must never alias inputs (even at-start uses) unless explicitly
+ // requested.
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ if (ins->getTemp(i)->isBogusTemp()) {
+ continue;
+ }
+ LAllocation* tempAlloc = ins->getTemp(i)->output();
+
+ // Fixed uses and fixed temps are allowed to alias.
+ if (oldInput.toUse()->isFixedRegister() && info.temps[i].isFixed()) {
+ continue;
+ }
+
+ // MUST_REUSE_INPUT temps will alias their input.
+ if (info.temps[i].policy() == LDefinition::MUST_REUSE_INPUT &&
+ info.temps[i].getReusedInput() == inputIndex) {
+ continue;
+ }
+
+ MOZ_ASSERT(!tempAlloc->aliases(**alloc));
+ }
+
+ // Start checking at the previous instruction, in case this
+ // instruction reuses its input register for an output.
+ LInstructionReverseIterator riter = block->rbegin(ins);
+ riter++;
+ if (!checkIntegrity(block, *riter, vreg, **alloc)) {
+ return false;
+ }
+
+ while (!worklist.empty()) {
+ IntegrityItem item = worklist.popCopy();
+ if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg,
+ item.alloc)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
+ uint32_t vreg,
+ LAllocation alloc) {
+ for (LInstructionReverseIterator iter(block->rbegin(ins));
+ iter != block->rend(); iter++) {
+ ins = *iter;
+
+ // Follow values through assignments in move groups. All assignments in
+ // a move group are considered to happen simultaneously, so stop after
+ // the first matching move is found.
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ if (group->getMove(i).to() == alloc) {
+ alloc = group->getMove(i).from();
+ break;
+ }
+ }
+ }
+
+ const InstructionInfo& info = instructions[ins->id()];
+
+ // Make sure the physical location being tracked is not clobbered by
+ // another instruction, and that if the originating vreg definition is
+ // found that it is writing to the tracked location.
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ LDefinition* def = ins->getDef(i);
+ if (def->isBogusTemp()) {
+ continue;
+ }
+ if (info.outputs[i].virtualRegister() == vreg) {
+# ifdef JS_JITSPEW
+ // If the following assertion is about to fail, print some useful info.
+ if (!(*def->output() == alloc) && JitSpewEnabled(JitSpew_RegAlloc)) {
+ CodePosition input(ins->id(), CodePosition::INPUT);
+ CodePosition output(ins->id(), CodePosition::OUTPUT);
+ JitSpew(JitSpew_RegAlloc,
+ "Instruction at %u-%u, output number %u:", input.bits(),
+ output.bits(), unsigned(i));
+ JitSpew(JitSpew_RegAlloc,
+ " Error: conflicting allocations: %s vs %s",
+ (*def->output()).toString().get(), alloc.toString().get());
+ }
+# endif
+ MOZ_ASSERT(*def->output() == alloc);
+
+ // Found the original definition, done scanning.
+ return true;
+ } else {
+ MOZ_ASSERT(*def->output() != alloc);
+ }
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp()) {
+ MOZ_ASSERT(*temp->output() != alloc);
+ }
+ }
+
+ if (ins->safepoint()) {
+ checkSafepointAllocation(ins, vreg, alloc);
+ }
+ }
+
+ // Phis are effectless, but change the vreg we are tracking. Check if there
+ // is one which produced this vreg. We need to follow back through the phi
+ // inputs as it is not guaranteed the register allocator filled in physical
+ // allocations for the inputs and outputs of the phis.
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
+ LPhi* phi = block->getPhi(i);
+ if (info.outputs[0].virtualRegister() == vreg) {
+ for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
+ uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
+ LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
+ if (!addPredecessor(predecessor, newvreg, alloc)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ // No phi which defined the vreg we are tracking, follow back through all
+ // predecessors with the existing vreg.
+ for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
+ LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
+ if (!addPredecessor(predecessor, vreg, alloc)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
+ uint32_t vreg,
+ LAllocation alloc) {
+ LSafepoint* safepoint = ins->safepoint();
+ MOZ_ASSERT(safepoint);
+
+ if (ins->isCall() && alloc.isRegister()) {
+ return;
+ }
+
+ if (alloc.isRegister()) {
+ MOZ_ASSERT(safepoint->liveRegs().has(alloc.toRegister()));
+ }
+
+ // The |this| argument slot is implicitly included in all safepoints.
+ if (alloc.isArgument() &&
+ alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value)) {
+ return;
+ }
+
+ LDefinition::Type type = virtualRegisters[vreg]
+ ? virtualRegisters[vreg]->type()
+ : LDefinition::GENERAL;
+
+ switch (type) {
+ case LDefinition::OBJECT:
+ MOZ_ASSERT(safepoint->hasGcPointer(alloc));
+ break;
+ case LDefinition::STACKRESULTS:
+ MOZ_ASSERT(safepoint->hasAllGcPointersFromStackArea(alloc));
+ break;
+ case LDefinition::SLOTS:
+ MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
+ break;
+# ifdef JS_NUNBOX32
+ // Do not assert that safepoint information for nunbox types is complete,
+ // as if a vreg for a value's components are copied in multiple places
+ // then the safepoint information may not reflect all copies. All copies
+ // of payloads must be reflected, however, for generational GC.
+ case LDefinition::TYPE:
+ break;
+ case LDefinition::PAYLOAD:
+ MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
+ break;
+# else
+ case LDefinition::BOX:
+ MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
+ break;
+# endif
+ default:
+ break;
+ }
+}
+
+bool AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg,
+ LAllocation alloc) {
+ // There is no need to reanalyze if we have already seen this predecessor.
+ // We share the seen allocations across analysis of each use, as there will
+ // likely be common ground between different uses of the same vreg.
+ IntegrityItem item;
+ item.block = block;
+ item.vreg = vreg;
+ item.alloc = alloc;
+ item.index = seen.count();
+
+ IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
+ if (p) {
+ return true;
+ }
+ if (!seen.add(p, item)) {
+ return false;
+ }
+
+ return worklist.append(item);
+}
+
+void AllocationIntegrityState::dump() {
+# ifdef JS_JITSPEW
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ JitSpew(JitSpew_RegAlloc, "Register Allocation Integrity State:");
+
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ MBasicBlock* mir = block->mir();
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " Block %lu",
+ static_cast<unsigned long>(blockIndex));
+ for (size_t i = 0; i < mir->numSuccessors(); i++) {
+ JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
+ mir->getSuccessor(i)->id());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ const InstructionInfo& info = blocks[blockIndex].phis[i];
+ LPhi* phi = block->getPhi(i);
+ CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
+ CodePosition output(block->getPhi(block->numPhis() - 1)->id(),
+ CodePosition::OUTPUT);
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " %u-%u Phi [def %s] ", input.bits(),
+ output.bits(), phi->getDef(0)->toString().get());
+ for (size_t j = 0; j < phi->numOperands(); j++) {
+ JitSpewCont(JitSpew_RegAlloc, " [use %s]",
+ info.inputs[j].toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ iter++) {
+ LInstruction* ins = *iter;
+ const InstructionInfo& info = instructions[ins->id()];
+
+ CodePosition input(ins->id(), CodePosition::INPUT);
+ CodePosition output(ins->id(), CodePosition::OUTPUT);
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " ");
+ if (input != CodePosition::MIN) {
+ JitSpewCont(JitSpew_RegAlloc, "%u-%u ", input.bits(), output.bits());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
+
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ JitSpewCont(JitSpew_RegAlloc, " [%s <- %s]",
+ group->getMove(i).to().toString().get(),
+ group->getMove(i).from().toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ continue;
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ JitSpewCont(JitSpew_RegAlloc, " [def %s]",
+ ins->getDef(i)->toString().get());
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp()) {
+ JitSpewCont(JitSpew_RegAlloc, " [temp v%u %s]",
+ info.temps[i].virtualRegister(), temp->toString().get());
+ }
+ }
+
+ size_t index = 0;
+ for (LInstruction::InputIterator alloc(*ins); alloc.more();
+ alloc.next()) {
+ JitSpewCont(JitSpew_RegAlloc, " [use %s",
+ info.inputs[index++].toString().get());
+ if (!alloc->isConstant()) {
+ JitSpewCont(JitSpew_RegAlloc, " %s", alloc->toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "]");
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+ }
+
+ // Print discovered allocations at the ends of blocks, in the order they
+ // were discovered.
+
+ Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
+ if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
+ fprintf(stderr, "OOM while dumping allocations\n");
+ return;
+ }
+
+ for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
+ IntegrityItem item = iter.front();
+ seenOrdered[item.index] = item;
+ }
+
+ if (!seenOrdered.empty()) {
+ fprintf(stderr, "Intermediate Allocations:\n");
+
+ for (size_t i = 0; i < seenOrdered.length(); i++) {
+ IntegrityItem item = seenOrdered[i];
+ fprintf(stderr, " block %u reg v%u alloc %s\n", item.block->mir()->id(),
+ item.vreg, item.alloc.toString().get());
+ }
+ }
+
+ fprintf(stderr, "\n");
+# endif
+}
+#endif // DEBUG
+
+const CodePosition CodePosition::MAX(UINT_MAX);
+const CodePosition CodePosition::MIN(0);
+
+bool RegisterAllocator::init() {
+ if (!insData.init(mir, graph.numInstructions())) {
+ return false;
+ }
+
+ if (!entryPositions.reserve(graph.numBlocks()) ||
+ !exitPositions.reserve(graph.numBlocks())) {
+ return false;
+ }
+
+ for (size_t i = 0; i < graph.numBlocks(); i++) {
+ LBlock* block = graph.getBlock(i);
+ for (LInstructionIterator ins = block->begin(); ins != block->end();
+ ins++) {
+ insData[ins->id()] = *ins;
+ }
+ for (size_t j = 0; j < block->numPhis(); j++) {
+ LPhi* phi = block->getPhi(j);
+ insData[phi->id()] = phi;
+ }
+
+ CodePosition entry =
+ block->numPhis() != 0
+ ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
+ : inputOf(block->firstInstructionWithId());
+ CodePosition exit = outputOf(block->lastInstructionWithId());
+
+ MOZ_ASSERT(block->mir()->id() == i);
+ entryPositions.infallibleAppend(entry);
+ exitPositions.infallibleAppend(exit);
+ }
+
+ return true;
+}
+
+LMoveGroup* RegisterAllocator::getInputMoveGroup(LInstruction* ins) {
+ MOZ_ASSERT(!ins->fixReuseMoves());
+ if (ins->inputMoves()) {
+ return ins->inputMoves();
+ }
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setInputMoves(moves);
+ ins->block()->insertBefore(ins, moves);
+ return moves;
+}
+
+LMoveGroup* RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins) {
+ if (ins->fixReuseMoves()) {
+ return ins->fixReuseMoves();
+ }
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setFixReuseMoves(moves);
+ ins->block()->insertBefore(ins, moves);
+ return moves;
+}
+
+LMoveGroup* RegisterAllocator::getMoveGroupAfter(LInstruction* ins) {
+ if (ins->movesAfter()) {
+ return ins->movesAfter();
+ }
+
+ LMoveGroup* moves = LMoveGroup::New(alloc());
+ ins->setMovesAfter(moves);
+
+ ins->block()->insertAfter(ins, moves);
+ return moves;
+}
+
+void RegisterAllocator::dumpInstructions(const char* who) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_RegAlloc, "LIR instructions %s", who);
+
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
+ LBlock* block = graph.getBlock(blockIndex);
+ MBasicBlock* mir = block->mir();
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " Block %lu",
+ static_cast<unsigned long>(blockIndex));
+ for (size_t i = 0; i < mir->numSuccessors(); i++) {
+ JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
+ mir->getSuccessor(i)->id());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+
+ for (size_t i = 0; i < block->numPhis(); i++) {
+ LPhi* phi = block->getPhi(i);
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " %u-%u Phi [def %s]",
+ inputOf(phi).bits(), outputOf(phi).bits(),
+ phi->getDef(0)->toString().get());
+ for (size_t j = 0; j < phi->numOperands(); j++) {
+ JitSpewCont(JitSpew_RegAlloc, " [use %s]",
+ phi->getOperand(j)->toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+
+ for (LInstructionIterator iter = block->begin(); iter != block->end();
+ iter++) {
+ LInstruction* ins = *iter;
+
+ JitSpewHeader(JitSpew_RegAlloc);
+ JitSpewCont(JitSpew_RegAlloc, " ");
+ if (ins->id() != 0) {
+ JitSpewCont(JitSpew_RegAlloc, "%u-%u ", inputOf(ins).bits(),
+ outputOf(ins).bits());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
+
+ if (ins->isMoveGroup()) {
+ LMoveGroup* group = ins->toMoveGroup();
+ for (int i = group->numMoves() - 1; i >= 0; i--) {
+ // Use two printfs, as LAllocation::toString is not reentant.
+ JitSpewCont(JitSpew_RegAlloc, " [%s",
+ group->getMove(i).to().toString().get());
+ JitSpewCont(JitSpew_RegAlloc, " <- %s]",
+ group->getMove(i).from().toString().get());
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ continue;
+ }
+
+ for (size_t i = 0; i < ins->numDefs(); i++) {
+ JitSpewCont(JitSpew_RegAlloc, " [def %s]",
+ ins->getDef(i)->toString().get());
+ }
+
+ for (size_t i = 0; i < ins->numTemps(); i++) {
+ LDefinition* temp = ins->getTemp(i);
+ if (!temp->isBogusTemp()) {
+ JitSpewCont(JitSpew_RegAlloc, " [temp %s]", temp->toString().get());
+ }
+ }
+
+ for (LInstruction::InputIterator alloc(*ins); alloc.more();
+ alloc.next()) {
+ if (!alloc->isBogus()) {
+ JitSpewCont(JitSpew_RegAlloc, " [use %s]", alloc->toString().get());
+ }
+ }
+
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+ }
+ }
+ JitSpewCont(JitSpew_RegAlloc, "\n");
+#endif // JS_JITSPEW
+}
diff --git a/js/src/jit/RegisterAllocator.h b/js/src/jit/RegisterAllocator.h
new file mode 100644
index 0000000000..845ef91ce9
--- /dev/null
+++ b/js/src/jit/RegisterAllocator.h
@@ -0,0 +1,314 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RegisterAllocator_h
+#define jit_RegisterAllocator_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+// Generic structures and functions for use by register allocators.
+
+namespace js {
+namespace jit {
+
+class LIRGenerator;
+
+#ifdef DEBUG
+// Structure for running a liveness analysis on a finished register allocation.
+// This analysis can be used for two purposes:
+//
+// - Check the integrity of the allocation, i.e. that the reads and writes of
+// physical values preserve the semantics of the original virtual registers.
+//
+// - Populate safepoints with live registers, GC thing and value data, to
+// streamline the process of prototyping new allocators.
+struct AllocationIntegrityState {
+ explicit AllocationIntegrityState(LIRGraph& graph) : graph(graph) {}
+
+ // Record all virtual registers in the graph. This must be called before
+ // register allocation, to pick up the original LUses.
+ [[nodiscard]] bool record();
+
+ // Perform the liveness analysis on the graph, and assert on an invalid
+ // allocation. This must be called after register allocation, to pick up
+ // all assigned physical values.
+ [[nodiscard]] bool check();
+
+ private:
+ LIRGraph& graph;
+
+ // For all instructions and phis in the graph, keep track of the virtual
+ // registers for all inputs and outputs of the nodes. These are overwritten
+ // in place during register allocation. This information is kept on the
+ // side rather than in the instructions and phis themselves to avoid
+ // debug-builds-only bloat in the size of the involved structures.
+
+ struct InstructionInfo {
+ Vector<LAllocation, 2, SystemAllocPolicy> inputs;
+ Vector<LDefinition, 0, SystemAllocPolicy> temps;
+ Vector<LDefinition, 1, SystemAllocPolicy> outputs;
+
+ InstructionInfo() = default;
+
+ InstructionInfo(const InstructionInfo& o) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!inputs.appendAll(o.inputs) || !temps.appendAll(o.temps) ||
+ !outputs.appendAll(o.outputs)) {
+ oomUnsafe.crash("InstructionInfo::InstructionInfo");
+ }
+ }
+ };
+ Vector<InstructionInfo, 0, SystemAllocPolicy> instructions;
+
+ struct BlockInfo {
+ Vector<InstructionInfo, 5, SystemAllocPolicy> phis;
+ BlockInfo() = default;
+ BlockInfo(const BlockInfo& o) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!phis.appendAll(o.phis)) {
+ oomUnsafe.crash("BlockInfo::BlockInfo");
+ }
+ }
+ };
+ Vector<BlockInfo, 0, SystemAllocPolicy> blocks;
+
+ Vector<LDefinition*, 20, SystemAllocPolicy> virtualRegisters;
+
+ // Describes a correspondence that should hold at the end of a block.
+ // The value which was written to vreg in the original LIR should be
+ // physically stored in alloc after the register allocation.
+ struct IntegrityItem {
+ LBlock* block;
+ uint32_t vreg;
+ LAllocation alloc;
+
+ // Order of insertion into seen, for sorting.
+ uint32_t index;
+
+ using Lookup = IntegrityItem;
+ static HashNumber hash(const IntegrityItem& item) {
+ HashNumber hash = item.alloc.hash();
+ hash = mozilla::RotateLeft(hash, 4) ^ item.vreg;
+ hash = mozilla::RotateLeft(hash, 4) ^ HashNumber(item.block->mir()->id());
+ return hash;
+ }
+ static bool match(const IntegrityItem& one, const IntegrityItem& two) {
+ return one.block == two.block && one.vreg == two.vreg &&
+ one.alloc == two.alloc;
+ }
+ };
+
+ // Items still to be processed.
+ Vector<IntegrityItem, 10, SystemAllocPolicy> worklist;
+
+ // Set of all items that have already been processed.
+ typedef HashSet<IntegrityItem, IntegrityItem, SystemAllocPolicy>
+ IntegrityItemSet;
+ IntegrityItemSet seen;
+
+ [[nodiscard]] bool checkIntegrity(LBlock* block, LInstruction* ins,
+ uint32_t vreg, LAllocation alloc);
+ void checkSafepointAllocation(LInstruction* ins, uint32_t vreg,
+ LAllocation alloc);
+ [[nodiscard]] bool addPredecessor(LBlock* block, uint32_t vreg,
+ LAllocation alloc);
+
+ void dump();
+};
+#endif // DEBUG
+
+// Represents with better-than-instruction precision a position in the
+// instruction stream.
+//
+// An issue comes up when performing register allocation as to how to represent
+// information such as "this register is only needed for the input of
+// this instruction, it can be clobbered in the output". Just having ranges
+// of instruction IDs is insufficiently expressive to denote all possibilities.
+// This class solves this issue by associating an extra bit with the instruction
+// ID which indicates whether the position is the input half or output half of
+// an instruction.
+class CodePosition {
+ private:
+ constexpr explicit CodePosition(uint32_t bits) : bits_(bits) {}
+
+ static const unsigned int INSTRUCTION_SHIFT = 1;
+ static const unsigned int SUBPOSITION_MASK = 1;
+ uint32_t bits_;
+
+ public:
+ static const CodePosition MAX;
+ static const CodePosition MIN;
+
+ // This is the half of the instruction this code position represents, as
+ // described in the huge comment above.
+ enum SubPosition { INPUT, OUTPUT };
+
+ constexpr CodePosition() : bits_(0) {}
+
+ CodePosition(uint32_t instruction, SubPosition where) {
+ MOZ_ASSERT(instruction < 0x80000000u);
+ MOZ_ASSERT(((uint32_t)where & SUBPOSITION_MASK) == (uint32_t)where);
+ bits_ = (instruction << INSTRUCTION_SHIFT) | (uint32_t)where;
+ }
+
+ uint32_t ins() const { return bits_ >> INSTRUCTION_SHIFT; }
+
+ uint32_t bits() const { return bits_; }
+
+ SubPosition subpos() const { return (SubPosition)(bits_ & SUBPOSITION_MASK); }
+
+ bool operator<(CodePosition other) const { return bits_ < other.bits_; }
+
+ bool operator<=(CodePosition other) const { return bits_ <= other.bits_; }
+
+ bool operator!=(CodePosition other) const { return bits_ != other.bits_; }
+
+ bool operator==(CodePosition other) const { return bits_ == other.bits_; }
+
+ bool operator>(CodePosition other) const { return bits_ > other.bits_; }
+
+ bool operator>=(CodePosition other) const { return bits_ >= other.bits_; }
+
+ uint32_t operator-(CodePosition other) const {
+ MOZ_ASSERT(bits_ >= other.bits_);
+ return bits_ - other.bits_;
+ }
+
+ CodePosition previous() const {
+ MOZ_ASSERT(*this != MIN);
+ return CodePosition(bits_ - 1);
+ }
+ CodePosition next() const {
+ MOZ_ASSERT(*this != MAX);
+ return CodePosition(bits_ + 1);
+ }
+};
+
+// Structure to track all moves inserted next to instructions in a graph.
+class InstructionDataMap {
+ FixedList<LNode*> insData_;
+
+ public:
+ InstructionDataMap() : insData_() {}
+
+ [[nodiscard]] bool init(MIRGenerator* gen, uint32_t numInstructions) {
+ if (!insData_.init(gen->alloc(), numInstructions)) {
+ return false;
+ }
+ memset(&insData_[0], 0, sizeof(LNode*) * numInstructions);
+ return true;
+ }
+
+ LNode*& operator[](CodePosition pos) { return operator[](pos.ins()); }
+ LNode* const& operator[](CodePosition pos) const {
+ return operator[](pos.ins());
+ }
+ LNode*& operator[](uint32_t ins) { return insData_[ins]; }
+ LNode* const& operator[](uint32_t ins) const { return insData_[ins]; }
+};
+
+// Common superclass for register allocators.
+class RegisterAllocator {
+ void operator=(const RegisterAllocator&) = delete;
+ RegisterAllocator(const RegisterAllocator&) = delete;
+
+ protected:
+ // Context
+ MIRGenerator* mir;
+ LIRGenerator* lir;
+ LIRGraph& graph;
+
+ // Pool of all registers that should be considered allocateable
+ AllocatableRegisterSet allRegisters_;
+
+ // Computed data
+ InstructionDataMap insData;
+ Vector<CodePosition, 12, SystemAllocPolicy> entryPositions;
+ Vector<CodePosition, 12, SystemAllocPolicy> exitPositions;
+
+ RegisterAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
+ : mir(mir), lir(lir), graph(graph), allRegisters_(RegisterSet::All()) {
+ MOZ_ASSERT(!allRegisters_.has(FramePointer));
+ if (mir->compilingWasm()) {
+ takeWasmRegisters(allRegisters_);
+ }
+ }
+
+ [[nodiscard]] bool init();
+
+ TempAllocator& alloc() const { return mir->alloc(); }
+
+ CodePosition outputOf(const LNode* ins) const {
+ return ins->isPhi() ? outputOf(ins->toPhi())
+ : outputOf(ins->toInstruction());
+ }
+ CodePosition outputOf(const LPhi* ins) const {
+ // All phis in a block write their outputs after all of them have
+ // read their inputs. Consequently, it doesn't make sense to talk
+ // about code positions in the middle of a series of phis.
+ LBlock* block = ins->block();
+ return CodePosition(block->getPhi(block->numPhis() - 1)->id(),
+ CodePosition::OUTPUT);
+ }
+ CodePosition outputOf(const LInstruction* ins) const {
+ return CodePosition(ins->id(), CodePosition::OUTPUT);
+ }
+ CodePosition inputOf(const LNode* ins) const {
+ return ins->isPhi() ? inputOf(ins->toPhi()) : inputOf(ins->toInstruction());
+ }
+ CodePosition inputOf(const LPhi* ins) const {
+ // All phis in a block read their inputs before any of them write their
+ // outputs. Consequently, it doesn't make sense to talk about code
+ // positions in the middle of a series of phis.
+ return CodePosition(ins->block()->getPhi(0)->id(), CodePosition::INPUT);
+ }
+ CodePosition inputOf(const LInstruction* ins) const {
+ return CodePosition(ins->id(), CodePosition::INPUT);
+ }
+ CodePosition entryOf(const LBlock* block) {
+ return entryPositions[block->mir()->id()];
+ }
+ CodePosition exitOf(const LBlock* block) {
+ return exitPositions[block->mir()->id()];
+ }
+
+ LMoveGroup* getInputMoveGroup(LInstruction* ins);
+ LMoveGroup* getFixReuseMoveGroup(LInstruction* ins);
+ LMoveGroup* getMoveGroupAfter(LInstruction* ins);
+
+ // Atomic group helper. See comments in BacktrackingAllocator.cpp.
+ CodePosition minimalDefEnd(LNode* ins) const;
+
+ void dumpInstructions(const char* who);
+
+ public:
+ template <typename TakeableSet>
+ static void takeWasmRegisters(TakeableSet& regs) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+ regs.take(HeapReg);
+#endif
+ MOZ_ASSERT(!regs.has(FramePointer));
+ }
+};
+
+static inline AnyRegister GetFixedRegister(const LDefinition* def,
+ const LUse* use) {
+ return def->isFloatReg()
+ ? AnyRegister(FloatRegister::FromCode(use->registerCode()))
+ : AnyRegister(Register::FromCode(use->registerCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RegisterAllocator_h */
diff --git a/js/src/jit/RegisterSets.h b/js/src/jit/RegisterSets.h
new file mode 100644
index 0000000000..9ffc48f651
--- /dev/null
+++ b/js/src/jit/RegisterSets.h
@@ -0,0 +1,1332 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RegisterSets_h
+#define jit_RegisterSets_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Variant.h"
+
+#include <new>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/IonTypes.h"
+#include "jit/Registers.h"
+#include "js/Value.h"
+
+namespace js {
+namespace jit {
+
+struct AnyRegister {
+ using Code = uint8_t;
+
+ static const uint8_t Total = Registers::Total + FloatRegisters::Total;
+ static const uint8_t FirstFloatReg = Registers::Total;
+ static const uint8_t Invalid = UINT8_MAX;
+
+ static_assert(size_t(Registers::Total) + FloatRegisters::Total <= UINT8_MAX,
+ "Number of registers must fit in uint8_t");
+
+ private:
+ Code code_;
+
+ public:
+ AnyRegister() : code_(Invalid) {}
+
+ explicit AnyRegister(Register gpr) { code_ = gpr.code(); }
+ explicit AnyRegister(FloatRegister fpu) {
+ code_ = fpu.code() + Registers::Total;
+ }
+ static AnyRegister FromCode(uint8_t i) {
+ MOZ_ASSERT(i < Total);
+ AnyRegister r;
+ r.code_ = i;
+ return r;
+ }
+ bool isFloat() const {
+ MOZ_ASSERT(isValid());
+ return code_ >= Registers::Total;
+ }
+ Register gpr() const {
+ MOZ_ASSERT(!isFloat());
+ return Register::FromCode(code_);
+ }
+ FloatRegister fpu() const {
+ MOZ_ASSERT(isFloat());
+ return FloatRegister::FromCode(code_ - Registers::Total);
+ }
+ bool operator==(AnyRegister other) const {
+ // We don't need the operands to be valid to test for equality.
+ return code_ == other.code_;
+ }
+ bool operator!=(AnyRegister other) const {
+ // We don't need the operands to be valid to test for equality.
+ return code_ != other.code_;
+ }
+ const char* name() const { return isFloat() ? fpu().name() : gpr().name(); }
+ Code code() const {
+ MOZ_ASSERT(isValid());
+ return code_;
+ }
+ bool volatile_() const {
+ return isFloat() ? fpu().volatile_() : gpr().volatile_();
+ }
+ AnyRegister aliased(uint8_t aliasIdx) const {
+ AnyRegister ret;
+ if (isFloat()) {
+ ret = AnyRegister(fpu().aliased(aliasIdx));
+ } else {
+ ret = AnyRegister(gpr().aliased(aliasIdx));
+ }
+ MOZ_ASSERT_IF(aliasIdx == 0, ret == *this);
+ return ret;
+ }
+ uint8_t numAliased() const {
+ if (isFloat()) {
+ return fpu().numAliased();
+ }
+ return gpr().numAliased();
+ }
+ bool aliases(const AnyRegister& other) const {
+ if (isFloat() && other.isFloat()) {
+ return fpu().aliases(other.fpu());
+ }
+ if (!isFloat() && !other.isFloat()) {
+ return gpr().aliases(other.gpr());
+ }
+ return false;
+ }
+ // do the two registers hold the same type of data (e.g. both float32, both
+ // gpr)
+ bool isCompatibleReg(const AnyRegister other) const {
+ if (isFloat() && other.isFloat()) {
+ return fpu().equiv(other.fpu());
+ }
+ if (!isFloat() && !other.isFloat()) {
+ return true;
+ }
+ return false;
+ }
+ bool isValid() const { return code_ != Invalid; }
+};
+
+// Registers to hold a boxed value. Uses one register on 64 bit
+// platforms, two registers on 32 bit platforms.
+class ValueOperand {
+#if defined(JS_NUNBOX32)
+ Register type_;
+ Register payload_;
+
+ public:
+ constexpr ValueOperand(Register type, Register payload)
+ : type_(type), payload_(payload) {}
+
+ constexpr Register typeReg() const { return type_; }
+ constexpr Register payloadReg() const { return payload_; }
+ constexpr Register64 toRegister64() const {
+ return Register64(typeReg(), payloadReg());
+ }
+ constexpr bool aliases(Register reg) const {
+ return type_ == reg || payload_ == reg;
+ }
+ constexpr Register payloadOrValueReg() const { return payloadReg(); }
+ bool hasVolatileReg() const {
+ return type_.volatile_() || payload_.volatile_();
+ }
+ constexpr bool operator==(const ValueOperand& o) const {
+ return type_ == o.type_ && payload_ == o.payload_;
+ }
+ constexpr bool operator!=(const ValueOperand& o) const {
+ return !(*this == o);
+ }
+
+#elif defined(JS_PUNBOX64)
+ Register value_;
+
+ public:
+ explicit constexpr ValueOperand(Register value) : value_(value) {}
+
+ constexpr Register valueReg() const { return value_; }
+ constexpr Register64 toRegister64() const { return Register64(valueReg()); }
+ constexpr bool aliases(Register reg) const { return value_ == reg; }
+ constexpr Register payloadOrValueReg() const { return valueReg(); }
+ bool hasVolatileReg() const { return value_.volatile_(); }
+ constexpr bool operator==(const ValueOperand& o) const {
+ return value_ == o.value_;
+ }
+ constexpr bool operator!=(const ValueOperand& o) const {
+ return !(*this == o);
+ }
+#endif
+
+ constexpr Register scratchReg() const { return payloadOrValueReg(); }
+
+ ValueOperand() = default;
+};
+
+// Registers to hold either either a typed or untyped value.
+class TypedOrValueRegister {
+ // Type of value being stored.
+ MIRType type_;
+
+ union U {
+ AnyRegister::Code typed;
+#if defined(JS_PUNBOX64)
+ Register::Code value;
+#elif defined(JS_NUNBOX32)
+ struct {
+ Register::Code valueType;
+ Register::Code valuePayload;
+ } s;
+#else
+# error "Bad architecture"
+#endif
+ } data;
+
+ public:
+ TypedOrValueRegister() = default;
+
+ TypedOrValueRegister(MIRType type, AnyRegister reg) : type_(type) {
+ data.typed = reg.code();
+ }
+
+ MOZ_IMPLICIT TypedOrValueRegister(ValueOperand value)
+ : type_(MIRType::Value) {
+#if defined(JS_PUNBOX64)
+ data.value = value.valueReg().code();
+#elif defined(JS_NUNBOX32)
+ data.s.valueType = value.typeReg().code();
+ data.s.valuePayload = value.payloadReg().code();
+#else
+# error "Bad architecture"
+#endif
+ }
+
+ MIRType type() const { return type_; }
+
+ bool hasTyped() const {
+ return type() != MIRType::None && type() != MIRType::Value;
+ }
+
+ bool hasValue() const { return type() == MIRType::Value; }
+
+ AnyRegister typedReg() const {
+ MOZ_ASSERT(hasTyped());
+ return AnyRegister::FromCode(data.typed);
+ }
+
+ ValueOperand valueReg() const {
+ MOZ_ASSERT(hasValue());
+#if defined(JS_PUNBOX64)
+ return ValueOperand(Register::FromCode(data.value));
+#elif defined(JS_NUNBOX32)
+ return ValueOperand(Register::FromCode(data.s.valueType),
+ Register::FromCode(data.s.valuePayload));
+#else
+# error "Bad architecture"
+#endif
+ }
+
+ AnyRegister scratchReg() {
+ if (hasValue()) {
+ return AnyRegister(valueReg().scratchReg());
+ }
+ return typedReg();
+ }
+};
+
+// A constant value, or registers to hold a typed/untyped value.
+class ConstantOrRegister {
+ // Whether a constant value is being stored.
+ bool constant_;
+
+ // Space to hold either a Value or a TypedOrValueRegister.
+ union U {
+ JS::Value constant;
+ TypedOrValueRegister reg;
+
+ // |constant| has a non-trivial constructor and therefore MUST be
+ // placement-new'd into existence.
+ MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ U() {}
+ MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ } data;
+
+ public:
+ ConstantOrRegister() = delete;
+
+ MOZ_IMPLICIT ConstantOrRegister(const JS::Value& value) : constant_(true) {
+ MOZ_ASSERT(constant());
+ new (&data.constant) JS::Value(value);
+ }
+
+ MOZ_IMPLICIT ConstantOrRegister(TypedOrValueRegister reg) : constant_(false) {
+ MOZ_ASSERT(!constant());
+ new (&data.reg) TypedOrValueRegister(reg);
+ }
+
+ bool constant() const { return constant_; }
+
+ JS::Value value() const {
+ MOZ_ASSERT(constant());
+ return data.constant;
+ }
+
+ const TypedOrValueRegister& reg() const {
+ MOZ_ASSERT(!constant());
+ return data.reg;
+ }
+};
+
+template <typename T>
+class TypedRegisterSet {
+ public:
+ using RegType = T;
+ using SetType = typename T::SetType;
+
+ private:
+ SetType bits_;
+
+ public:
+ explicit constexpr TypedRegisterSet(SetType bits) : bits_(bits) {}
+
+ constexpr TypedRegisterSet() : bits_(0) {}
+ constexpr TypedRegisterSet(const TypedRegisterSet<T>& set)
+ : bits_(set.bits_) {}
+
+ static inline TypedRegisterSet All() {
+ return TypedRegisterSet(T::Codes::AllocatableMask);
+ }
+ static inline TypedRegisterSet Intersect(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs) {
+ return TypedRegisterSet(lhs.bits_ & rhs.bits_);
+ }
+ static inline TypedRegisterSet Union(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs) {
+ return TypedRegisterSet(lhs.bits_ | rhs.bits_);
+ }
+ static inline TypedRegisterSet Not(const TypedRegisterSet& in) {
+ return TypedRegisterSet(~in.bits_ & T::Codes::AllocatableMask);
+ }
+ static inline TypedRegisterSet Subtract(const TypedRegisterSet& lhs,
+ const TypedRegisterSet& rhs) {
+ return TypedRegisterSet(lhs.bits_ & ~rhs.bits_);
+ }
+ static inline TypedRegisterSet VolatileNot(const TypedRegisterSet& in) {
+ const SetType allocatableVolatile =
+ T::Codes::AllocatableMask & T::Codes::VolatileMask;
+ return TypedRegisterSet(~in.bits_ & allocatableVolatile);
+ }
+ static inline TypedRegisterSet Volatile() {
+ return TypedRegisterSet(T::Codes::AllocatableMask & T::Codes::VolatileMask);
+ }
+ static inline TypedRegisterSet NonVolatile() {
+ return TypedRegisterSet(T::Codes::AllocatableMask &
+ T::Codes::NonVolatileMask);
+ }
+
+ bool empty() const { return !bits_; }
+ void clear() { bits_ = 0; }
+
+ bool hasRegisterIndex(T reg) const {
+ return !!(bits_ & (SetType(1) << reg.code()));
+ }
+ bool hasAllocatable(T reg) const {
+ return !(~bits_ & reg.alignedOrDominatedAliasedSet());
+ }
+
+ void addRegisterIndex(T reg) { bits_ |= (SetType(1) << reg.code()); }
+ void addAllocatable(T reg) { bits_ |= reg.alignedOrDominatedAliasedSet(); }
+
+ void takeRegisterIndex(T reg) { bits_ &= ~(SetType(1) << reg.code()); }
+ void takeAllocatable(T reg) { bits_ &= ~reg.alignedOrDominatedAliasedSet(); }
+
+ static constexpr RegTypeName DefaultType = RegType::DefaultType;
+
+ template <RegTypeName Name>
+ SetType allLive() const {
+ return T::template LiveAsIndexableSet<Name>(bits_);
+ }
+ template <RegTypeName Name>
+ SetType allAllocatable() const {
+ return T::template AllocatableAsIndexableSet<Name>(bits_);
+ }
+
+ static RegType FirstRegister(SetType set) {
+ return RegType::FromCode(RegType::FirstBit(set));
+ }
+ static RegType LastRegister(SetType set) {
+ return RegType::FromCode(RegType::LastBit(set));
+ }
+
+ SetType bits() const { return bits_; }
+ uint32_t size() const { return T::SetSize(bits_); }
+ bool operator==(const TypedRegisterSet<T>& other) const {
+ return other.bits_ == bits_;
+ }
+ TypedRegisterSet<T> reduceSetForPush() const {
+ return T::ReduceSetForPush(*this);
+ }
+ uint32_t getPushSizeInBytes() const { return T::GetPushSizeInBytes(*this); }
+
+ size_t offsetOfPushedRegister(RegType reg) const {
+ MOZ_ASSERT(hasRegisterIndex(reg));
+ return T::OffsetOfPushedRegister(bits(), reg);
+ }
+};
+
+using GeneralRegisterSet = TypedRegisterSet<Register>;
+using FloatRegisterSet = TypedRegisterSet<FloatRegister>;
+
+class AnyRegisterIterator;
+
+class RegisterSet {
+ GeneralRegisterSet gpr_;
+ FloatRegisterSet fpu_;
+
+ friend class AnyRegisterIterator;
+
+ public:
+ RegisterSet() = default;
+ constexpr RegisterSet(const GeneralRegisterSet& gpr,
+ const FloatRegisterSet& fpu)
+ : gpr_(gpr), fpu_(fpu) {}
+ static inline RegisterSet All() {
+ return RegisterSet(GeneralRegisterSet::All(), FloatRegisterSet::All());
+ }
+ static inline RegisterSet Intersect(const RegisterSet& lhs,
+ const RegisterSet& rhs) {
+ return RegisterSet(GeneralRegisterSet::Intersect(lhs.gpr_, rhs.gpr_),
+ FloatRegisterSet::Intersect(lhs.fpu_, rhs.fpu_));
+ }
+ static inline RegisterSet Union(const RegisterSet& lhs,
+ const RegisterSet& rhs) {
+ return RegisterSet(GeneralRegisterSet::Union(lhs.gpr_, rhs.gpr_),
+ FloatRegisterSet::Union(lhs.fpu_, rhs.fpu_));
+ }
+ static inline RegisterSet Not(const RegisterSet& in) {
+ return RegisterSet(GeneralRegisterSet::Not(in.gpr_),
+ FloatRegisterSet::Not(in.fpu_));
+ }
+ static inline RegisterSet VolatileNot(const RegisterSet& in) {
+ return RegisterSet(GeneralRegisterSet::VolatileNot(in.gpr_),
+ FloatRegisterSet::VolatileNot(in.fpu_));
+ }
+ static inline RegisterSet Volatile() {
+ return RegisterSet(GeneralRegisterSet::Volatile(),
+ FloatRegisterSet::Volatile());
+ }
+
+ bool empty() const { return fpu_.empty() && gpr_.empty(); }
+ void clear() {
+ fpu_.clear();
+ gpr_.clear();
+ }
+ bool emptyGeneral() const { return gpr_.empty(); }
+ bool emptyFloat() const { return fpu_.empty(); }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::GPR;
+
+ constexpr GeneralRegisterSet gprs() const { return gpr_; }
+ GeneralRegisterSet& gprs() { return gpr_; }
+ constexpr FloatRegisterSet fpus() const { return fpu_; }
+ FloatRegisterSet& fpus() { return fpu_; }
+ bool operator==(const RegisterSet& other) const {
+ return other.gpr_ == gpr_ && other.fpu_ == fpu_;
+ }
+};
+
+// [SMDOC] JIT Register-Set overview
+//
+// There are 2 use cases for register sets:
+//
+// 1. To serve as a pool of allocatable register. This is useful for working
+// on the code produced by some stub where free registers are available, or
+// when we can release some registers.
+//
+// 2. To serve as a list of typed registers. This is useful for working with
+// live registers and to manipulate them with the proper instructions. This
+// is used by the register allocator to fill the Safepoints.
+//
+// These 2 uses cases can be used on top of 3 different backend representation
+// of register sets, which are either GeneralRegisterSet, FloatRegisterSet, or
+// RegisterSet (for both). These classes are used to store the bit sets to
+// represent each register.
+//
+// Each use case defines an Accessor class, such as AllocatableSetAccessor or
+// LiveSetAccessor, which is parameterized with the type of the register
+// set. These accessors are in charge of manipulating the register set in a
+// consistent way.
+//
+// The RegSetCommonInterface class is used to wrap the accessors with convenient
+// shortcuts which are based on the accessors.
+//
+// Then, to avoid to many levels of complexity while using these interfaces,
+// shortcut templates are created to make it easy to distinguish between a
+// register set used for allocating registers, or a register set used for making
+// a collection of allocated (live) registers.
+//
+// This separation exists to prevent mixing LiveSet and AllocatableSet
+// manipulations of the same register set, and ensure safety while avoiding
+// false positive.
+
+template <typename RegisterSet>
+class AllocatableSet;
+
+template <typename RegisterSet>
+class LiveSet;
+
+// [SMDOC] JIT Register-Set (Allocatable)
+//
+// Base accessors classes have the minimal set of raw methods to manipulate the
+// register set given as parameter in a consistent manner. These methods are:
+//
+// - all<Type>: Returns a bit-set of all the register of a specific type
+// which are present.
+//
+// - has: Returns if all the bits needed to take a register are present.
+//
+// - takeUnchecked: Subtracts the bits used to represent the register in the
+// register set.
+//
+// - addUnchecked: Adds the bits used to represent the register in the
+// register set.
+
+// The AllocatableSet accessors are used to make a pool of unused
+// registers. Taking or adding registers should consider the aliasing rules of
+// the architecture. For example, on ARM, the following piece of code should
+// work fine, knowing that the double register |d0| is composed of float
+// registers |s0| and |s1|:
+//
+// AllocatableFloatRegisterSet regs;
+// regs.add(s0);
+// regs.add(s1);
+// // d0 is now available.
+// regs.take(d0);
+//
+// These accessors are useful for allocating registers within the functions used
+// to generate stubs, trampolines, and inline caches (BaselineIC, IonCache).
+template <typename Set>
+class AllocatableSetAccessors {
+ public:
+ using RegSet = Set;
+ using RegType = typename RegSet::RegType;
+ using SetType = typename RegSet::SetType;
+
+ protected:
+ RegSet set_;
+
+ template <RegTypeName Name>
+ SetType all() const {
+ return set_.template allAllocatable<Name>();
+ }
+
+ public:
+ AllocatableSetAccessors() : set_() {}
+ explicit constexpr AllocatableSetAccessors(SetType set) : set_(set) {}
+ explicit constexpr AllocatableSetAccessors(RegSet set) : set_(set) {}
+
+ bool has(RegType reg) const { return set_.hasAllocatable(reg); }
+
+ template <RegTypeName Name>
+ bool hasAny(RegType reg) const {
+ return all<Name>() != 0;
+ }
+
+ void addUnchecked(RegType reg) { set_.addAllocatable(reg); }
+
+ void takeUnchecked(RegType reg) { set_.takeAllocatable(reg); }
+};
+
+// Specialization of the AllocatableSet accessors for the RegisterSet aggregate.
+template <>
+class AllocatableSetAccessors<RegisterSet> {
+ public:
+ using RegSet = RegisterSet;
+ using RegType = AnyRegister;
+ using SetType = char;
+
+ protected:
+ RegisterSet set_;
+
+ template <RegTypeName Name>
+ GeneralRegisterSet::SetType allGpr() const {
+ return set_.gprs().allAllocatable<Name>();
+ }
+ template <RegTypeName Name>
+ FloatRegisterSet::SetType allFpu() const {
+ return set_.fpus().allAllocatable<Name>();
+ }
+
+ public:
+ AllocatableSetAccessors() : set_() {}
+ explicit constexpr AllocatableSetAccessors(SetType) = delete;
+ explicit constexpr AllocatableSetAccessors(RegisterSet set) : set_(set) {}
+
+ bool has(Register reg) const { return set_.gprs().hasAllocatable(reg); }
+ bool has(FloatRegister reg) const { return set_.fpus().hasAllocatable(reg); }
+
+ void addUnchecked(Register reg) { set_.gprs().addAllocatable(reg); }
+ void addUnchecked(FloatRegister reg) { set_.fpus().addAllocatable(reg); }
+
+ void takeUnchecked(Register reg) { set_.gprs().takeAllocatable(reg); }
+ void takeUnchecked(FloatRegister reg) { set_.fpus().takeAllocatable(reg); }
+};
+
+// [SMDOC] JIT Register-Set (Live)
+//
+// The LiveSet accessors are used to collect a list of allocated
+// registers. Taking or adding a register should *not* consider the aliases, as
+// we care about interpreting the registers with the correct type. For example,
+// on x64, where one float registers can be interpreted as an Simd128, a Double,
+// or a Float, adding xmm0 as an Simd128, does not make the register available
+// as a Double.
+//
+// LiveFloatRegisterSet regs;
+// regs.add(xmm0.asSimd128());
+// regs.take(xmm0); // Assert!
+//
+// These accessors are useful for recording the result of a register allocator,
+// such as what the Backtracking allocator do on the Safepoints.
+template <typename Set>
+class LiveSetAccessors {
+ public:
+ using RegSet = Set;
+ using RegType = typename RegSet::RegType;
+ using SetType = typename RegSet::SetType;
+
+ protected:
+ RegSet set_;
+
+ template <RegTypeName Name>
+ SetType all() const {
+ return set_.template allLive<Name>();
+ }
+
+ public:
+ LiveSetAccessors() : set_() {}
+ explicit constexpr LiveSetAccessors(SetType set) : set_(set) {}
+ explicit constexpr LiveSetAccessors(RegSet set) : set_(set) {}
+
+ bool has(RegType reg) const { return set_.hasRegisterIndex(reg); }
+
+ void addUnchecked(RegType reg) { set_.addRegisterIndex(reg); }
+
+ void takeUnchecked(RegType reg) { set_.takeRegisterIndex(reg); }
+};
+
+// Specialization of the LiveSet accessors for the RegisterSet aggregate.
+template <>
+class LiveSetAccessors<RegisterSet> {
+ public:
+ using RegSet = RegisterSet;
+ using RegType = AnyRegister;
+ using SetType = char;
+
+ protected:
+ RegisterSet set_;
+
+ template <RegTypeName Name>
+ GeneralRegisterSet::SetType allGpr() const {
+ return set_.gprs().allLive<Name>();
+ }
+ template <RegTypeName Name>
+ FloatRegisterSet::SetType allFpu() const {
+ return set_.fpus().allLive<Name>();
+ }
+
+ public:
+ LiveSetAccessors() : set_() {}
+ explicit constexpr LiveSetAccessors(SetType) = delete;
+ explicit constexpr LiveSetAccessors(RegisterSet set) : set_(set) {}
+
+ bool has(Register reg) const { return set_.gprs().hasRegisterIndex(reg); }
+ bool has(FloatRegister reg) const {
+ return set_.fpus().hasRegisterIndex(reg);
+ }
+
+ void addUnchecked(Register reg) { set_.gprs().addRegisterIndex(reg); }
+ void addUnchecked(FloatRegister reg) { set_.fpus().addRegisterIndex(reg); }
+
+ void takeUnchecked(Register reg) { set_.gprs().takeRegisterIndex(reg); }
+ void takeUnchecked(FloatRegister reg) { set_.fpus().takeRegisterIndex(reg); }
+};
+
+#define DEFINE_ACCESSOR_CONSTRUCTORS_(REGSET) \
+ typedef typename Parent::RegSet RegSet; \
+ typedef typename Parent::RegType RegType; \
+ typedef typename Parent::SetType SetType; \
+ \
+ constexpr REGSET() : Parent() {} \
+ explicit constexpr REGSET(SetType set) : Parent(set) {} \
+ explicit constexpr REGSET(RegSet set) : Parent(set) {}
+
+// This class adds checked accessors on top of the unchecked variants defined by
+// AllocatableSet and LiveSet accessors. Also it defines interface which are
+// specialized to the register set implementation, such as |getAny| and
+// |takeAny| variants.
+template <class Accessors, typename Set>
+class SpecializedRegSet : public Accessors {
+ using Parent = Accessors;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(SpecializedRegSet)
+
+ SetType bits() const { return this->Parent::set_.bits(); }
+
+ using Parent::has;
+
+ using Parent::addUnchecked;
+ void add(RegType reg) {
+ MOZ_ASSERT(!this->has(reg));
+ addUnchecked(reg);
+ }
+
+ using Parent::takeUnchecked;
+ void take(RegType reg) {
+ MOZ_ASSERT(this->has(reg));
+ takeUnchecked(reg);
+ }
+
+ template <RegTypeName Name>
+ bool hasAny() const {
+ return Parent::template all<Name>() != 0;
+ }
+
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType getFirst() const {
+ SetType set = Parent::template all<Name>();
+ MOZ_ASSERT(set);
+ return RegSet::FirstRegister(set);
+ }
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType getLast() const {
+ SetType set = Parent::template all<Name>();
+ MOZ_ASSERT(set);
+ return RegSet::LastRegister(set);
+ }
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType getAny() const {
+ // The choice of first or last here is mostly arbitrary, as they are
+ // about the same speed on popular architectures. We choose first, as
+ // it has the advantage of using the "lower" registers more often. These
+ // registers are sometimes more efficient (e.g. optimized encodings for
+ // EAX on x86).
+ return getFirst<Name>();
+ }
+
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType getAnyExcluding(RegType preclude) {
+ if (!this->has(preclude)) {
+ return getAny<Name>();
+ }
+
+ take(preclude);
+ RegType result = getAny<Name>();
+ add(preclude);
+ return result;
+ }
+
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType takeAny() {
+ RegType reg = getAny<Name>();
+ take(reg);
+ return reg;
+ }
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType takeFirst() {
+ RegType reg = getFirst<Name>();
+ take(reg);
+ return reg;
+ }
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType takeLast() {
+ RegType reg = getLast<Name>();
+ take(reg);
+ return reg;
+ }
+
+ ValueOperand takeAnyValue() {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(takeAny<RegTypeName::GPR>(),
+ takeAny<RegTypeName::GPR>());
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(takeAny<RegTypeName::GPR>());
+#else
+# error "Bad architecture"
+#endif
+ }
+
+ bool aliases(ValueOperand v) const {
+#ifdef JS_NUNBOX32
+ return this->has(v.typeReg()) || this->has(v.payloadReg());
+#else
+ return this->has(v.valueReg());
+#endif
+ }
+
+ template <RegTypeName Name = RegSet::DefaultType>
+ RegType takeAnyExcluding(RegType preclude) {
+ RegType reg = getAnyExcluding<Name>(preclude);
+ take(reg);
+ return reg;
+ }
+};
+
+// Specialization of the accessors for the RegisterSet aggregate.
+template <class Accessors>
+class SpecializedRegSet<Accessors, RegisterSet> : public Accessors {
+ using Parent = Accessors;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(SpecializedRegSet)
+
+ GeneralRegisterSet gprs() const { return this->Parent::set_.gprs(); }
+ GeneralRegisterSet& gprs() { return this->Parent::set_.gprs(); }
+ FloatRegisterSet fpus() const { return this->Parent::set_.fpus(); }
+ FloatRegisterSet& fpus() { return this->Parent::set_.fpus(); }
+
+ bool emptyGeneral() const { return this->Parent::set_.emptyGeneral(); }
+ bool emptyFloat() const { return this->Parent::set_.emptyFloat(); }
+
+ using Parent::has;
+ bool has(AnyRegister reg) const {
+ return reg.isFloat() ? this->has(reg.fpu()) : this->has(reg.gpr());
+ }
+
+ template <RegTypeName Name>
+ bool hasAny() const {
+ if (Name == RegTypeName::GPR) {
+ return Parent::template allGpr<RegTypeName::GPR>() != 0;
+ }
+ return Parent::template allFpu<Name>() != 0;
+ }
+
+ using Parent::addUnchecked;
+ void addUnchecked(AnyRegister reg) {
+ if (reg.isFloat()) {
+ addUnchecked(reg.fpu());
+ } else {
+ addUnchecked(reg.gpr());
+ }
+ }
+
+ void add(Register reg) {
+ MOZ_ASSERT(!this->has(reg));
+ addUnchecked(reg);
+ }
+ void add(FloatRegister reg) {
+ MOZ_ASSERT(!this->has(reg));
+ addUnchecked(reg);
+ }
+ void add(AnyRegister reg) {
+ if (reg.isFloat()) {
+ add(reg.fpu());
+ } else {
+ add(reg.gpr());
+ }
+ }
+
+ using Parent::takeUnchecked;
+ void takeUnchecked(AnyRegister reg) {
+ if (reg.isFloat()) {
+ takeUnchecked(reg.fpu());
+ } else {
+ takeUnchecked(reg.gpr());
+ }
+ }
+
+ void take(Register reg) {
+#ifdef DEBUG
+ bool hasReg = this->has(reg);
+ MOZ_ASSERT(hasReg);
+#endif
+ takeUnchecked(reg);
+ }
+ void take(FloatRegister reg) {
+ MOZ_ASSERT(this->has(reg));
+ takeUnchecked(reg);
+ }
+ void take(AnyRegister reg) {
+ if (reg.isFloat()) {
+ take(reg.fpu());
+ } else {
+ take(reg.gpr());
+ }
+ }
+
+ Register getAnyGeneral() const {
+ GeneralRegisterSet::SetType set =
+ Parent::template allGpr<RegTypeName::GPR>();
+ MOZ_ASSERT(set);
+ return GeneralRegisterSet::FirstRegister(set);
+ }
+ template <RegTypeName Name = RegTypeName::Float64>
+ FloatRegister getAnyFloat() const {
+ FloatRegisterSet::SetType set = Parent::template allFpu<Name>();
+ MOZ_ASSERT(set);
+ return FloatRegisterSet::FirstRegister(set);
+ }
+
+ Register takeAnyGeneral() {
+ Register reg = getAnyGeneral();
+ take(reg);
+ return reg;
+ }
+ template <RegTypeName Name = RegTypeName::Float64>
+ FloatRegister takeAnyFloat() {
+ FloatRegister reg = getAnyFloat<Name>();
+ take(reg);
+ return reg;
+ }
+ ValueOperand takeAnyValue() {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(takeAnyGeneral(), takeAnyGeneral());
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(takeAnyGeneral());
+#else
+# error "Bad architecture"
+#endif
+ }
+};
+
+// Interface which is common to all register set implementations. It overloads
+// |add|, |take| and |takeUnchecked| methods for types such as |ValueOperand|,
+// |TypedOrValueRegister|, and |Register64|.
+template <class Accessors, typename Set>
+class CommonRegSet : public SpecializedRegSet<Accessors, Set> {
+ typedef SpecializedRegSet<Accessors, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(CommonRegSet)
+
+ RegSet set() const { return this->Parent::set_; }
+ RegSet& set() { return this->Parent::set_; }
+
+ bool empty() const { return this->Parent::set_.empty(); }
+ void clear() { this->Parent::set_.clear(); }
+
+ using Parent::add;
+ void add(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ add(value.payloadReg());
+ add(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ add(value.valueReg());
+#else
+# error "Bad architecture"
+#endif
+ }
+ void add(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ add(reg.high);
+ add(reg.low);
+#else
+ add(reg.reg);
+#endif
+ }
+
+ using Parent::addUnchecked;
+ void addUnchecked(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ addUnchecked(value.payloadReg());
+ addUnchecked(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ addUnchecked(value.valueReg());
+#else
+# error "Bad architecture"
+#endif
+ }
+ void addUnchecked(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ take(reg.high);
+ take(reg.low);
+#else
+ take(reg.reg);
+#endif
+ }
+
+ void add(TypedOrValueRegister reg) {
+ if (reg.hasValue()) {
+ add(reg.valueReg());
+ } else if (reg.hasTyped()) {
+ add(reg.typedReg());
+ }
+ }
+
+ using Parent::take;
+ void take(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ take(value.payloadReg());
+ take(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ take(value.valueReg());
+#else
+# error "Bad architecture"
+#endif
+ }
+ void take(TypedOrValueRegister reg) {
+ if (reg.hasValue()) {
+ take(reg.valueReg());
+ } else if (reg.hasTyped()) {
+ take(reg.typedReg());
+ }
+ }
+ void take(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ take(reg.high);
+ take(reg.low);
+#else
+ take(reg.reg);
+#endif
+ }
+
+ using Parent::takeUnchecked;
+ void takeUnchecked(ValueOperand value) {
+#if defined(JS_NUNBOX32)
+ takeUnchecked(value.payloadReg());
+ takeUnchecked(value.typeReg());
+#elif defined(JS_PUNBOX64)
+ takeUnchecked(value.valueReg());
+#else
+# error "Bad architecture"
+#endif
+ }
+ void takeUnchecked(TypedOrValueRegister reg) {
+ if (reg.hasValue()) {
+ takeUnchecked(reg.valueReg());
+ } else if (reg.hasTyped()) {
+ takeUnchecked(reg.typedReg());
+ }
+ }
+ void takeUnchecked(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ takeUnchecked(reg.high);
+ takeUnchecked(reg.low);
+#else
+ takeUnchecked(reg.reg);
+#endif
+ }
+};
+
+// These classes do not provide any additional members, they only use their
+// constructors to forward to the common interface for all register sets. The
+// only benefit of these classes is to provide user friendly names.
+template <typename Set>
+class LiveSet : public CommonRegSet<LiveSetAccessors<Set>, Set> {
+ typedef CommonRegSet<LiveSetAccessors<Set>, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(LiveSet)
+};
+
+template <typename Set>
+class AllocatableSet : public CommonRegSet<AllocatableSetAccessors<Set>, Set> {
+ typedef CommonRegSet<AllocatableSetAccessors<Set>, Set> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_(AllocatableSet)
+
+ LiveSet<Set> asLiveSet() const { return LiveSet<Set>(this->set()); }
+};
+
+#define DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(REGSET) \
+ typedef Parent::RegSet RegSet; \
+ typedef Parent::RegType RegType; \
+ typedef Parent::SetType SetType; \
+ \
+ constexpr REGSET() : Parent() {} \
+ explicit constexpr REGSET(SetType) = delete; \
+ explicit constexpr REGSET(RegSet set) : Parent(set) {} \
+ constexpr REGSET(GeneralRegisterSet gpr, FloatRegisterSet fpu) \
+ : Parent(RegisterSet(gpr, fpu)) {} \
+ REGSET(REGSET<GeneralRegisterSet> gpr, REGSET<FloatRegisterSet> fpu) \
+ : Parent(RegisterSet(gpr.set(), fpu.set())) {}
+
+template <>
+class LiveSet<RegisterSet>
+ : public CommonRegSet<LiveSetAccessors<RegisterSet>, RegisterSet> {
+ // Note: We have to provide a qualified name for LiveSetAccessors, as it is
+ // interpreted as being the specialized class name inherited from the parent
+ // class specialization.
+ typedef CommonRegSet<jit::LiveSetAccessors<RegisterSet>, RegisterSet> Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(LiveSet)
+};
+
+template <>
+class AllocatableSet<RegisterSet>
+ : public CommonRegSet<AllocatableSetAccessors<RegisterSet>, RegisterSet> {
+ // Note: We have to provide a qualified name for AllocatableSetAccessors, as
+ // it is interpreted as being the specialized class name inherited from the
+ // parent class specialization.
+ typedef CommonRegSet<jit::AllocatableSetAccessors<RegisterSet>, RegisterSet>
+ Parent;
+
+ public:
+ DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_(AllocatableSet)
+
+ LiveSet<RegisterSet> asLiveSet() const {
+ return LiveSet<RegisterSet>(this->set());
+ }
+};
+
+#undef DEFINE_ACCESSOR_CONSTRUCTORS_FOR_REGISTERSET_
+#undef DEFINE_ACCESSOR_CONSTRUCTORS_
+
+using AllocatableGeneralRegisterSet = AllocatableSet<GeneralRegisterSet>;
+using AllocatableFloatRegisterSet = AllocatableSet<FloatRegisterSet>;
+using AllocatableRegisterSet = AllocatableSet<RegisterSet>;
+
+using LiveGeneralRegisterSet = LiveSet<GeneralRegisterSet>;
+using LiveFloatRegisterSet = LiveSet<FloatRegisterSet>;
+using LiveRegisterSet = LiveSet<RegisterSet>;
+
+// iterates in whatever order happens to be convenient.
+// Use TypedRegisterBackwardIterator or TypedRegisterForwardIterator if a
+// specific order is required.
+template <typename T>
+class TypedRegisterIterator {
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterIterator(TypedRegisterSet<T> regset)
+ : regset_(regset) {}
+ explicit TypedRegisterIterator(LiveSet<TypedRegisterSet<T>> regset)
+ : regset_(regset) {}
+ TypedRegisterIterator(const TypedRegisterIterator& other)
+ : regset_(other.regset_) {}
+
+ bool more() const { return !regset_.empty(); }
+ TypedRegisterIterator<T>& operator++() {
+ regset_.template takeAny<RegTypeName::Any>();
+ return *this;
+ }
+ T operator*() const { return regset_.template getAny<RegTypeName::Any>(); }
+};
+
+// iterates backwards, that is, rn to r0
+template <typename T>
+class TypedRegisterBackwardIterator {
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterBackwardIterator(TypedRegisterSet<T> regset)
+ : regset_(regset) {}
+ explicit TypedRegisterBackwardIterator(LiveSet<TypedRegisterSet<T>> regset)
+ : regset_(regset) {}
+ TypedRegisterBackwardIterator(const TypedRegisterBackwardIterator& other)
+ : regset_(other.regset_) {}
+
+ bool more() const { return !regset_.empty(); }
+ TypedRegisterBackwardIterator<T>& operator++() {
+ regset_.template takeLast<RegTypeName::Any>();
+ return *this;
+ }
+ T operator*() const { return regset_.template getLast<RegTypeName::Any>(); }
+};
+
+// iterates forwards, that is r0 to rn
+template <typename T>
+class TypedRegisterForwardIterator {
+ LiveSet<TypedRegisterSet<T>> regset_;
+
+ public:
+ explicit TypedRegisterForwardIterator(TypedRegisterSet<T> regset)
+ : regset_(regset) {}
+ explicit TypedRegisterForwardIterator(LiveSet<TypedRegisterSet<T>> regset)
+ : regset_(regset) {}
+ TypedRegisterForwardIterator(const TypedRegisterForwardIterator& other)
+ : regset_(other.regset_) {}
+
+ bool more() const { return !regset_.empty(); }
+ TypedRegisterForwardIterator<T>& operator++() {
+ regset_.template takeFirst<RegTypeName::Any>();
+ return *this;
+ }
+ T operator*() const { return regset_.template getFirst<RegTypeName::Any>(); }
+};
+
+using GeneralRegisterIterator = TypedRegisterIterator<Register>;
+using FloatRegisterIterator = TypedRegisterIterator<FloatRegister>;
+using GeneralRegisterBackwardIterator = TypedRegisterBackwardIterator<Register>;
+using FloatRegisterBackwardIterator =
+ TypedRegisterBackwardIterator<FloatRegister>;
+using GeneralRegisterForwardIterator = TypedRegisterForwardIterator<Register>;
+using FloatRegisterForwardIterator =
+ TypedRegisterForwardIterator<FloatRegister>;
+
+class AnyRegisterIterator {
+ GeneralRegisterIterator geniter_;
+ FloatRegisterIterator floatiter_;
+
+ public:
+ AnyRegisterIterator()
+ : geniter_(GeneralRegisterSet::All()),
+ floatiter_(FloatRegisterSet::All()) {}
+ AnyRegisterIterator(GeneralRegisterSet genset, FloatRegisterSet floatset)
+ : geniter_(genset), floatiter_(floatset) {}
+ explicit AnyRegisterIterator(const RegisterSet& set)
+ : geniter_(set.gpr_), floatiter_(set.fpu_) {}
+ explicit AnyRegisterIterator(const LiveSet<RegisterSet>& set)
+ : geniter_(set.gprs()), floatiter_(set.fpus()) {}
+ AnyRegisterIterator(const AnyRegisterIterator& other) = default;
+ bool more() const { return geniter_.more() || floatiter_.more(); }
+ AnyRegisterIterator& operator++() {
+ if (geniter_.more()) {
+ ++geniter_;
+ } else {
+ ++floatiter_;
+ }
+ return *this;
+ }
+ AnyRegister operator*() const {
+ if (geniter_.more()) {
+ return AnyRegister(*geniter_);
+ }
+ return AnyRegister(*floatiter_);
+ }
+};
+
+class ABIArg {
+ public:
+ enum Kind {
+ GPR,
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ GPR_PAIR,
+#endif
+ FPU,
+ Stack,
+ Uninitialized = -1
+ };
+
+ private:
+ Kind kind_;
+ union {
+ Register::Code gpr_;
+ FloatRegister::Code fpu_;
+ uint32_t offset_;
+ } u;
+
+ public:
+ ABIArg() : kind_(Uninitialized) { u.offset_ = -1; }
+ explicit ABIArg(Register gpr) : kind_(GPR) { u.gpr_ = gpr.code(); }
+ explicit ABIArg(Register gprLow, Register gprHigh) {
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ kind_ = GPR_PAIR;
+#else
+ MOZ_CRASH("Unsupported type of ABI argument.");
+#endif
+ u.gpr_ = gprLow.code();
+ MOZ_ASSERT(u.gpr_ % 2 == 0);
+ MOZ_ASSERT(u.gpr_ + 1 == gprHigh.code());
+ }
+ explicit ABIArg(FloatRegister fpu) : kind_(FPU) { u.fpu_ = fpu.code(); }
+ explicit ABIArg(uint32_t offset) : kind_(Stack) { u.offset_ = offset; }
+
+ Kind kind() const {
+ MOZ_ASSERT(kind_ != Uninitialized);
+ return kind_;
+ }
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ bool isGeneralRegPair() const { return kind() == GPR_PAIR; }
+#else
+ bool isGeneralRegPair() const { return false; }
+#endif
+
+ Register gpr() const {
+ MOZ_ASSERT(kind() == GPR);
+ return Register::FromCode(u.gpr_);
+ }
+ Register64 gpr64() const {
+#ifdef JS_PUNBOX64
+ return Register64(gpr());
+#else
+ return Register64(oddGpr(), evenGpr());
+#endif
+ }
+ Register evenGpr() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(u.gpr_);
+ }
+ Register oddGpr() const {
+ MOZ_ASSERT(isGeneralRegPair());
+ return Register::FromCode(u.gpr_ + 1);
+ }
+ FloatRegister fpu() const {
+ MOZ_ASSERT(kind() == FPU);
+ return FloatRegister::FromCode(u.fpu_);
+ }
+ uint32_t offsetFromArgBase() const {
+ MOZ_ASSERT(kind() == Stack);
+ return u.offset_;
+ }
+
+ bool argInRegister() const { return kind() != Stack; }
+ AnyRegister reg() const {
+ return kind() == GPR ? AnyRegister(gpr()) : AnyRegister(fpu());
+ }
+
+ bool operator==(const ABIArg& rhs) const {
+ if (kind_ != rhs.kind_) {
+ return false;
+ }
+
+ switch (kind_) {
+ case GPR:
+ return u.gpr_ == rhs.u.gpr_;
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case GPR_PAIR:
+ return u.gpr_ == rhs.u.gpr_;
+#endif
+ case FPU:
+ return u.fpu_ == rhs.u.fpu_;
+ case Stack:
+ return u.offset_ == rhs.u.offset_;
+ case Uninitialized:
+ return true;
+ }
+ MOZ_CRASH("Invalid value for ABIArg kind");
+ }
+
+ bool operator!=(const ABIArg& rhs) const { return !(*this == rhs); }
+};
+
+// Get the set of registers which should be saved by a block of code which
+// clobbers all registers besides |unused|, but does not clobber floating point
+// registers.
+inline LiveGeneralRegisterSet SavedNonVolatileRegisters(
+ const AllocatableGeneralRegisterSet& unused) {
+ LiveGeneralRegisterSet result;
+
+ for (GeneralRegisterIterator iter(GeneralRegisterSet::NonVolatile());
+ iter.more(); ++iter) {
+ Register reg = *iter;
+ if (!unused.has(reg)) {
+ result.add(reg);
+ }
+ }
+
+ // Some platforms require the link register to be saved, if calls can be made.
+#if defined(JS_CODEGEN_ARM)
+ result.add(Register::FromCode(Registers::lr));
+#elif defined(JS_CODEGEN_ARM64)
+ result.add(Register::FromCode(Registers::lr));
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ result.add(Register::FromCode(Registers::ra));
+#endif
+
+ return result;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_RegisterSets_h */
diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h
new file mode 100644
index 0000000000..1ae9c1954c
--- /dev/null
+++ b/js/src/jit/Registers.h
@@ -0,0 +1,299 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Registers_h
+#define jit_Registers_h
+
+#include "mozilla/Array.h"
+
+#include "jit/IonTypes.h"
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/Architecture-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Architecture-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Architecture-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Architecture-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/Architecture-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/Architecture-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/Architecture-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/Architecture-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {
+
+struct Register {
+ using Codes = Registers;
+ using Encoding = Codes::Encoding;
+ using Code = Codes::Code;
+ using SetType = Codes::SetType;
+
+ Encoding reg_;
+ explicit constexpr Register(Encoding e) : reg_(e) {}
+ Register() : reg_(Encoding(Codes::Invalid)) {}
+
+ static Register FromCode(Code i) {
+ MOZ_ASSERT(i < Registers::Total);
+ Register r{Encoding(i)};
+ return r;
+ }
+ static Register FromName(const char* name) {
+ Code code = Registers::FromName(name);
+ Register r{Encoding(code)};
+ return r;
+ }
+ constexpr static Register Invalid() {
+ Register r{Encoding(Codes::Invalid)};
+ return r;
+ }
+ constexpr Code code() const { return Code(reg_); }
+ Encoding encoding() const {
+ MOZ_ASSERT(Code(reg_) < Registers::Total);
+ return reg_;
+ }
+ const char* name() const { return Registers::GetName(code()); }
+ constexpr bool operator==(Register other) const { return reg_ == other.reg_; }
+ constexpr bool operator!=(Register other) const { return reg_ != other.reg_; }
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & Registers::VolatileMask);
+ }
+ bool aliases(const Register& other) const { return reg_ == other.reg_; }
+ uint32_t numAliased() const { return 1; }
+
+ Register aliased(uint32_t aliasIdx) const {
+ MOZ_ASSERT(aliasIdx == 0);
+ return *this;
+ }
+
+ SetType alignedOrDominatedAliasedSet() const { return SetType(1) << code(); }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::GPR;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return SetType(0);
+ }
+
+ static uint32_t SetSize(SetType x) { return Codes::SetSize(x); }
+ static uint32_t FirstBit(SetType x) { return Codes::FirstBit(x); }
+ static uint32_t LastBit(SetType x) { return Codes::LastBit(x); }
+
+ // Returns the offset of |reg| on the stack, assuming all registers in |set|
+ // were pushed in order (e.g. by |PushRegsInMask|). This is computed by
+ // clearing the lower bits (registers that were pushed later).
+ static size_t OffsetOfPushedRegister(SetType set, Register reg) {
+ return sizeof(Codes::RegisterContent) * Codes::SetSize(set >> reg.code());
+ }
+};
+
+// Architectures where the stack pointer is not a plain register with a standard
+// register encoding must define JS_HAS_HIDDEN_SP and HiddenSPEncoding.
+
+#ifdef JS_HAS_HIDDEN_SP
+struct RegisterOrSP {
+ // The register code -- but possibly one that cannot be represented as a bit
+ // position in a 32-bit vector.
+ uint32_t code;
+
+ explicit RegisterOrSP(uint32_t code) : code(code) {}
+ explicit RegisterOrSP(Register r) : code(r.code()) {}
+};
+
+static inline bool IsHiddenSP(RegisterOrSP r) {
+ return r.code == HiddenSPEncoding;
+}
+
+static inline Register AsRegister(RegisterOrSP r) {
+ MOZ_ASSERT(!IsHiddenSP(r));
+ return Register::FromCode(r.code);
+}
+
+static inline Register AsRegister(Register r) { return r; }
+
+inline bool operator==(Register r, RegisterOrSP e) {
+ return r.code() == e.code;
+}
+
+inline bool operator!=(Register r, RegisterOrSP e) { return !(r == e); }
+
+inline bool operator==(RegisterOrSP e, Register r) { return r == e; }
+
+inline bool operator!=(RegisterOrSP e, Register r) { return r != e; }
+
+inline bool operator==(RegisterOrSP lhs, RegisterOrSP rhs) {
+ return lhs.code == rhs.code;
+}
+
+inline bool operator!=(RegisterOrSP lhs, RegisterOrSP rhs) {
+ return !(lhs == rhs);
+}
+#else
+// On platforms where there's nothing special about SP, make RegisterOrSP be
+// just Register, and return false for IsHiddenSP(r) for any r so that we use
+// "normal" code for handling the SP. This reduces ifdeffery throughout the
+// jit.
+using RegisterOrSP = Register;
+
+static inline bool IsHiddenSP(RegisterOrSP r) { return false; }
+
+static inline Register AsRegister(RegisterOrSP r) { return r; }
+#endif
+
+template <>
+inline Register::SetType Register::LiveAsIndexableSet<RegTypeName::GPR>(
+ SetType set) {
+ return set;
+}
+
+template <>
+inline Register::SetType Register::LiveAsIndexableSet<RegTypeName::Any>(
+ SetType set) {
+ return set;
+}
+
+template <>
+inline Register::SetType Register::AllocatableAsIndexableSet<RegTypeName::GPR>(
+ SetType set) {
+ return set;
+}
+
+#if JS_BITS_PER_WORD == 32
+// Note, some platform code depends on INT64LOW_OFFSET being zero.
+static const uint32_t INT64LOW_OFFSET = 0 * sizeof(int32_t);
+static const uint32_t INT64HIGH_OFFSET = 1 * sizeof(int32_t);
+#endif
+
+struct Register64 {
+#ifdef JS_PUNBOX64
+ Register reg;
+#else
+ Register high;
+ Register low;
+#endif
+
+#ifdef JS_PUNBOX64
+ explicit constexpr Register64(Register r) : reg(r) {}
+ constexpr bool operator==(Register64 other) const { return reg == other.reg; }
+ constexpr bool operator!=(Register64 other) const { return reg != other.reg; }
+ Register scratchReg() { return reg; }
+ static Register64 Invalid() { return Register64(Register::Invalid()); }
+#else
+ constexpr Register64(Register h, Register l) : high(h), low(l) {}
+ constexpr bool operator==(Register64 other) const {
+ return high == other.high && low == other.low;
+ }
+ constexpr bool operator!=(Register64 other) const {
+ return high != other.high || low != other.low;
+ }
+ Register scratchReg() { return high; }
+ static Register64 Invalid() {
+ return Register64(Register::Invalid(), Register::Invalid());
+ }
+#endif
+};
+
+class RegisterDump {
+ public:
+ typedef mozilla::Array<Registers::RegisterContent, Registers::Total> GPRArray;
+ typedef mozilla::Array<FloatRegisters::RegisterContent,
+ FloatRegisters::TotalPhys>
+ FPUArray;
+
+ protected: // Silence Clang warning.
+ GPRArray regs_;
+ FPUArray fpregs_;
+
+ public:
+ static size_t offsetOfRegister(Register reg) {
+ return offsetof(RegisterDump, regs_) + reg.code() * sizeof(uintptr_t);
+ }
+ static size_t offsetOfRegister(FloatRegister reg) {
+ return offsetof(RegisterDump, fpregs_) + reg.getRegisterDumpOffsetInBytes();
+ }
+};
+
+// Class for mapping each register to an offset.
+class RegisterOffsets {
+ mozilla::Array<uint32_t, Registers::Total> offsets_;
+
+ // Sentinel value representing an uninitialized offset.
+ static constexpr uint32_t InvalidOffset = UINT32_MAX;
+
+ public:
+ RegisterOffsets() {
+ for (size_t i = 0; i < Registers::Total; i++) {
+ offsets_[i] = InvalidOffset;
+ }
+ }
+
+ RegisterOffsets(const RegisterOffsets&) = delete;
+ void operator=(const RegisterOffsets&) = delete;
+
+ bool hasOffset(Register reg) const {
+ return offsets_[reg.code()] != InvalidOffset;
+ }
+ uint32_t getOffset(Register reg) const {
+ MOZ_ASSERT(hasOffset(reg));
+ return offsets_[reg.code()];
+ }
+ void setOffset(Register reg, size_t offset) {
+ MOZ_ASSERT(offset < InvalidOffset);
+ offsets_[reg.code()] = uint32_t(offset);
+ }
+};
+
+class MacroAssembler;
+
+// Declares a register as owned within the scope of the object.
+// In debug mode, owned register state is tracked within the MacroAssembler,
+// and an assert will fire if ownership is conflicting.
+// In contrast to ARM64's UseScratchRegisterScope, this class has no overhead
+// in non-debug builds.
+template <class RegisterType>
+struct AutoGenericRegisterScope : public RegisterType {
+ // Prevent MacroAssembler templates from creating copies,
+ // which causes the destructor to fire more than once.
+ AutoGenericRegisterScope(const AutoGenericRegisterScope& other) = delete;
+
+#ifdef DEBUG
+ MacroAssembler& masm_;
+ bool released_;
+ explicit AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg);
+ ~AutoGenericRegisterScope();
+ void release();
+ void reacquire();
+#else
+ constexpr explicit AutoGenericRegisterScope(MacroAssembler& masm,
+ RegisterType reg)
+ : RegisterType(reg) {}
+ void release() {}
+ void reacquire() {}
+#endif
+};
+
+using AutoRegisterScope = AutoGenericRegisterScope<Register>;
+using AutoFloatRegisterScope = AutoGenericRegisterScope<FloatRegister>;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Registers_h */
diff --git a/js/src/jit/RematerializedFrame-inl.h b/js/src/jit/RematerializedFrame-inl.h
new file mode 100644
index 0000000000..390aaa35d0
--- /dev/null
+++ b/js/src/jit/RematerializedFrame-inl.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RematerializedFrame_inl_h
+#define jit_RematerializedFrame_inl_h
+
+#include "jit/RematerializedFrame.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "vm/JSScript.h" // JSScript
+
+#include "vm/JSScript-inl.h" // JSScript::isDebuggee
+
+inline void js::jit::RematerializedFrame::unsetIsDebuggee() {
+ MOZ_ASSERT(!script()->isDebuggee());
+ isDebuggee_ = false;
+}
+
+#endif // jit_RematerializedFrame_inl_h
diff --git a/js/src/jit/RematerializedFrame.cpp b/js/src/jit/RematerializedFrame.cpp
new file mode 100644
index 0000000000..c9544057f7
--- /dev/null
+++ b/js/src/jit/RematerializedFrame.cpp
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/RematerializedFrame.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jit/Bailouts.h"
+#include "jit/JSJitFrameIter.h"
+#include "js/friend/DumpFunctions.h" // js::DumpValue
+#include "vm/ArgumentsObject.h"
+
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace jit;
+
+struct CopyValueToRematerializedFrame {
+ Value* slots;
+
+ explicit CopyValueToRematerializedFrame(Value* slots) : slots(slots) {}
+
+ void operator()(const Value& v) { *slots++ = v; }
+};
+
+RematerializedFrame::RematerializedFrame(JSContext* cx, uint8_t* top,
+ unsigned numActualArgs,
+ InlineFrameIterator& iter,
+ MaybeReadFallback& fallback)
+ : prevUpToDate_(false),
+ isDebuggee_(iter.script()->isDebuggee()),
+ hasInitialEnv_(false),
+ isConstructing_(iter.isConstructing()),
+ hasCachedSavedFrame_(false),
+ top_(top),
+ pc_(iter.pc()),
+ frameNo_(iter.frameNo()),
+ numActualArgs_(numActualArgs),
+ script_(iter.script()),
+ envChain_(nullptr),
+ argsObj_(nullptr) {
+ if (iter.isFunctionFrame()) {
+ callee_ = iter.callee(fallback);
+ } else {
+ callee_ = nullptr;
+ }
+
+ CopyValueToRematerializedFrame op(slots_);
+ iter.readFrameArgsAndLocals(
+ cx, op, op, &envChain_, &hasInitialEnv_, &returnValue_, &argsObj_,
+ &thisArgument_, ReadFrameArgsBehavior::ActualsAndFormals, fallback);
+}
+
+/* static */
+RematerializedFrame* RematerializedFrame::New(JSContext* cx, uint8_t* top,
+ InlineFrameIterator& iter,
+ MaybeReadFallback& fallback) {
+ unsigned numFormals =
+ iter.isFunctionFrame() ? iter.calleeTemplate()->nargs() : 0;
+ unsigned argSlots = std::max(numFormals, iter.numActualArgs());
+ unsigned extraSlots = argSlots + iter.script()->nfixed();
+
+ // One Value slot is included in sizeof(RematerializedFrame), so we can
+ // reduce the extra slot count by one. However, if there are zero slot
+ // allocations total, then reducing the slots by one will lead to
+ // the memory allocation being smaller than sizeof(RematerializedFrame).
+ if (extraSlots > 0) {
+ extraSlots -= 1;
+ }
+
+ RematerializedFrame* buf =
+ cx->pod_calloc_with_extra<RematerializedFrame, Value>(extraSlots);
+ if (!buf) {
+ return nullptr;
+ }
+
+ return new (buf)
+ RematerializedFrame(cx, top, iter.numActualArgs(), iter, fallback);
+}
+
+/* static */
+bool RematerializedFrame::RematerializeInlineFrames(
+ JSContext* cx, uint8_t* top, InlineFrameIterator& iter,
+ MaybeReadFallback& fallback, RematerializedFrameVector& frames) {
+ Rooted<RematerializedFrameVector> tempFrames(cx,
+ RematerializedFrameVector(cx));
+ if (!tempFrames.resize(iter.frameCount())) {
+ return false;
+ }
+
+ while (true) {
+ size_t frameNo = iter.frameNo();
+ tempFrames[frameNo].reset(
+ RematerializedFrame::New(cx, top, iter, fallback));
+ if (!tempFrames[frameNo]) {
+ return false;
+ }
+ if (tempFrames[frameNo]->environmentChain()) {
+ if (!EnsureHasEnvironmentObjects(cx, tempFrames[frameNo].get().get())) {
+ return false;
+ }
+ }
+
+ if (!iter.more()) {
+ break;
+ }
+ ++iter;
+ }
+
+ frames = std::move(tempFrames.get());
+ return true;
+}
+
+CallObject& RematerializedFrame::callObj() const {
+ MOZ_ASSERT(hasInitialEnvironment());
+ MOZ_ASSERT(callee()->needsCallObject());
+
+ JSObject* env = environmentChain();
+ while (!env->is<CallObject>()) {
+ env = env->enclosingEnvironment();
+ }
+ return env->as<CallObject>();
+}
+
+bool RematerializedFrame::initFunctionEnvironmentObjects(JSContext* cx) {
+ return js::InitFunctionEnvironmentObjects(cx, this);
+}
+
+bool RematerializedFrame::pushVarEnvironment(JSContext* cx,
+ Handle<Scope*> scope) {
+ return js::PushVarEnvironmentObject(cx, scope, this);
+}
+
+void RematerializedFrame::trace(JSTracer* trc) {
+ TraceRoot(trc, &script_, "remat ion frame script");
+ TraceRoot(trc, &envChain_, "remat ion frame env chain");
+ if (callee_) {
+ TraceRoot(trc, &callee_, "remat ion frame callee");
+ }
+ if (argsObj_) {
+ TraceRoot(trc, &argsObj_, "remat ion frame argsobj");
+ }
+ TraceRoot(trc, &returnValue_, "remat ion frame return value");
+ TraceRoot(trc, &thisArgument_, "remat ion frame this");
+ TraceRootRange(trc, numArgSlots() + script_->nfixed(), slots_,
+ "remat ion frame stack");
+}
+
+void RematerializedFrame::dump() {
+ fprintf(stderr, " Rematerialized Ion Frame%s\n",
+ inlined() ? " (inlined)" : "");
+ if (isFunctionFrame()) {
+ fprintf(stderr, " callee fun: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(*callee()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ } else {
+ fprintf(stderr, " global frame, no callee\n");
+ }
+
+ fprintf(stderr, " file %s line %u offset %zu\n", script()->filename(),
+ script()->lineno(), script()->pcToOffset(pc()));
+
+ fprintf(stderr, " script = %p\n", (void*)script());
+
+ if (isFunctionFrame()) {
+ fprintf(stderr, " env chain: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(*environmentChain()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+
+ if (hasArgsObj()) {
+ fprintf(stderr, " args obj: ");
+#ifdef DEBUG
+ DumpValue(ObjectValue(argsObj()));
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ fprintf(stderr, " this: ");
+#ifdef DEBUG
+ DumpValue(thisArgument());
+#else
+ fprintf(stderr, "?\n");
+#endif
+
+ for (unsigned i = 0; i < numActualArgs(); i++) {
+ if (i < numFormalArgs()) {
+ fprintf(stderr, " formal (arg %u): ", i);
+ } else {
+ fprintf(stderr, " overflown (arg %u): ", i);
+ }
+#ifdef DEBUG
+ DumpValue(argv()[i]);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+
+ for (unsigned i = 0; i < script()->nfixed(); i++) {
+ fprintf(stderr, " local %u: ", i);
+#ifdef DEBUG
+ DumpValue(locals()[i]);
+#else
+ fprintf(stderr, "?\n");
+#endif
+ }
+ }
+
+ fputc('\n', stderr);
+}
diff --git a/js/src/jit/RematerializedFrame.h b/js/src/jit/RematerializedFrame.h
new file mode 100644
index 0000000000..06f0c475a2
--- /dev/null
+++ b/js/src/jit/RematerializedFrame.h
@@ -0,0 +1,222 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_RematerializedFrame_h
+#define jit_RematerializedFrame_h
+
+#include "mozilla/Assertions.h"
+
+#include <algorithm>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "jit/JitFrames.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "js/GCVector.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Value.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/Stack.h"
+
+class JS_PUBLIC_API JSTracer;
+
+namespace js {
+
+class ArgumentsObject;
+class CallObject;
+
+namespace jit {
+
+class InlineFrameIterator;
+struct MaybeReadFallback;
+
+// RematerializedFrame: An optimized frame that has been rematerialized with
+// values read out of Snapshots.
+//
+// If the Debugger API tries to inspect or modify an IonMonkey frame, much of
+// the information it expects to find in a frame is missing: function calls may
+// have been inlined, variables may have been optimized out, and so on. So when
+// this happens, SpiderMonkey builds one or more Rematerialized frames from the
+// IonMonkey frame, using the snapshot metadata built by Ion to reconstruct the
+// missing parts. The Rematerialized frames are now the authority on the state
+// of those frames, and the Ion frame is ignored: stack iterators ignore the Ion
+// frame, producing the Rematerialized frames in their stead; and when control
+// returns to the Ion frame, we pop it, rebuild Baseline frames from the
+// Rematerialized frames, and resume execution in Baseline.
+class RematerializedFrame {
+ // See DebugScopes::updateLiveScopes.
+ bool prevUpToDate_;
+
+ // Propagated to the Baseline frame once this is popped.
+ bool isDebuggee_;
+
+ // Has an initial environment has been pushed on the environment chain for
+ // function frames that need a CallObject or eval frames that need a
+ // VarEnvironmentObject?
+ bool hasInitialEnv_;
+
+ // Is this frame constructing?
+ bool isConstructing_;
+
+ // If true, this frame has been on the stack when
+ // |js::SavedStacks::saveCurrentStack| was called, and so there is a
+ // |js::SavedFrame| object cached for this frame.
+ bool hasCachedSavedFrame_;
+
+ // The fp of the top frame associated with this possibly inlined frame.
+ uint8_t* top_;
+
+ // The bytecode at the time of rematerialization.
+ jsbytecode* pc_;
+
+ size_t frameNo_;
+ unsigned numActualArgs_;
+
+ JSScript* script_;
+ JSObject* envChain_;
+ JSFunction* callee_;
+ ArgumentsObject* argsObj_;
+
+ Value returnValue_;
+ Value thisArgument_;
+ Value slots_[1];
+
+ RematerializedFrame(JSContext* cx, uint8_t* top, unsigned numActualArgs,
+ InlineFrameIterator& iter, MaybeReadFallback& fallback);
+
+ public:
+ static RematerializedFrame* New(JSContext* cx, uint8_t* top,
+ InlineFrameIterator& iter,
+ MaybeReadFallback& fallback);
+
+ // RematerializedFrame are allocated on non-GC heap, so use GCVector and
+ // UniquePtr to ensure they are traced and cleaned up correctly.
+ using RematerializedFrameVector = GCVector<UniquePtr<RematerializedFrame>>;
+
+ // Rematerialize all remaining frames pointed to by |iter| into |frames|
+ // in older-to-younger order, e.g., frames[0] is the oldest frame.
+ [[nodiscard]] static bool RematerializeInlineFrames(
+ JSContext* cx, uint8_t* top, InlineFrameIterator& iter,
+ MaybeReadFallback& fallback, RematerializedFrameVector& frames);
+
+ bool prevUpToDate() const { return prevUpToDate_; }
+ void setPrevUpToDate() { prevUpToDate_ = true; }
+ void unsetPrevUpToDate() { prevUpToDate_ = false; }
+
+ bool isDebuggee() const { return isDebuggee_; }
+ void setIsDebuggee() { isDebuggee_ = true; }
+ inline void unsetIsDebuggee();
+
+ uint8_t* top() const { return top_; }
+ JSScript* outerScript() const {
+ JitFrameLayout* jsFrame = (JitFrameLayout*)top_;
+ return ScriptFromCalleeToken(jsFrame->calleeToken());
+ }
+ jsbytecode* pc() const { return pc_; }
+ size_t frameNo() const { return frameNo_; }
+ bool inlined() const { return frameNo_ > 0; }
+
+ JSObject* environmentChain() const { return envChain_; }
+
+ template <typename SpecificEnvironment>
+ void pushOnEnvironmentChain(SpecificEnvironment& env) {
+ MOZ_ASSERT(*environmentChain() == env.enclosingEnvironment());
+ envChain_ = &env;
+ if (IsFrameInitialEnvironment(this, env)) {
+ hasInitialEnv_ = true;
+ }
+ }
+
+ template <typename SpecificEnvironment>
+ void popOffEnvironmentChain() {
+ MOZ_ASSERT(envChain_->is<SpecificEnvironment>());
+ envChain_ = &envChain_->as<SpecificEnvironment>().enclosingEnvironment();
+ }
+
+ [[nodiscard]] bool initFunctionEnvironmentObjects(JSContext* cx);
+ [[nodiscard]] bool pushVarEnvironment(JSContext* cx, Handle<Scope*> scope);
+
+ bool hasInitialEnvironment() const { return hasInitialEnv_; }
+ CallObject& callObj() const;
+
+ bool hasArgsObj() const { return !!argsObj_; }
+ ArgumentsObject& argsObj() const {
+ MOZ_ASSERT(hasArgsObj());
+ MOZ_ASSERT(script()->needsArgsObj());
+ return *argsObj_;
+ }
+
+ bool isFunctionFrame() const { return script_->isFunction(); }
+ bool isGlobalFrame() const { return script_->isGlobalCode(); }
+ bool isModuleFrame() const { return script_->isModule(); }
+
+ JSScript* script() const { return script_; }
+ JSFunction* callee() const {
+ MOZ_ASSERT(isFunctionFrame());
+ MOZ_ASSERT(callee_);
+ return callee_;
+ }
+ Value calleev() const { return ObjectValue(*callee()); }
+ Value& thisArgument() { return thisArgument_; }
+
+ bool isConstructing() const { return isConstructing_; }
+
+ bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; }
+
+ void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; }
+
+ void clearHasCachedSavedFrame() { hasCachedSavedFrame_ = false; }
+
+ unsigned numFormalArgs() const {
+ return isFunctionFrame() ? callee()->nargs() : 0;
+ }
+ unsigned numActualArgs() const { return numActualArgs_; }
+ unsigned numArgSlots() const {
+ return (std::max)(numFormalArgs(), numActualArgs());
+ }
+
+ Value* argv() { return slots_; }
+ Value* locals() { return slots_ + numArgSlots(); }
+
+ Value& unaliasedLocal(unsigned i) {
+ MOZ_ASSERT(i < script()->nfixed());
+ return locals()[i];
+ }
+ Value& unaliasedFormal(unsigned i,
+ MaybeCheckAliasing checkAliasing = CHECK_ALIASING) {
+ MOZ_ASSERT(i < numFormalArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals() &&
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+ Value& unaliasedActual(unsigned i,
+ MaybeCheckAliasing checkAliasing = CHECK_ALIASING) {
+ MOZ_ASSERT(i < numActualArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing && i < numFormalArgs(),
+ !script()->formalIsAliased(i));
+ return argv()[i];
+ }
+
+ void setReturnValue(const Value& value) { returnValue_ = value; }
+
+ Value& returnValue() {
+ MOZ_ASSERT(!script()->noScriptRval());
+ return returnValue_;
+ }
+
+ void trace(JSTracer* trc);
+ void dump();
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_RematerializedFrame_h
diff --git a/js/src/jit/SafepointIndex-inl.h b/js/src/jit/SafepointIndex-inl.h
new file mode 100644
index 0000000000..45e54314a3
--- /dev/null
+++ b/js/src/jit/SafepointIndex-inl.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SafepointIndex_inl_h
+#define jit_SafepointIndex_inl_h
+
+#include "jit/SafepointIndex.h"
+
+#include "jit/LIR.h"
+
+namespace js::jit {
+
+inline SafepointIndex::SafepointIndex(const CodegenSafepointIndex& csi)
+ : displacement_(csi.displacement()),
+ safepointOffset_(csi.safepoint()->offset()) {}
+
+} // namespace js::jit
+
+#endif /* jit_SafepointIndex_inl_h */
diff --git a/js/src/jit/SafepointIndex.cpp b/js/src/jit/SafepointIndex.cpp
new file mode 100644
index 0000000000..4e486133cc
--- /dev/null
+++ b/js/src/jit/SafepointIndex.cpp
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/SafepointIndex-inl.h"
+
+#include "jit/MacroAssembler.h"
+
+namespace js::jit {
+
+uint32_t OsiIndex::returnPointDisplacement() const {
+ // In general, pointer arithmetic on code is bad, but in this case,
+ // getting the return address from a call instruction, stepping over pools
+ // would be wrong.
+ return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize();
+}
+
+} // namespace js::jit
diff --git a/js/src/jit/SafepointIndex.h b/js/src/jit/SafepointIndex.h
new file mode 100644
index 0000000000..ab8ebcb785
--- /dev/null
+++ b/js/src/jit/SafepointIndex.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SafepointIndex_h
+#define jit_SafepointIndex_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/IonTypes.h"
+
+namespace js {
+namespace jit {
+
+class LSafepoint;
+class CodegenSafepointIndex;
+
+// Two-tuple that lets you look up the safepoint entry given the
+// displacement of a call instruction within the JIT code.
+class SafepointIndex {
+ // The displacement is the distance from the first byte of the JIT'd code
+ // to the return address (of the call that the safepoint was generated for).
+ uint32_t displacement_ = 0;
+
+ // Offset within the safepoint buffer.
+ uint32_t safepointOffset_ = 0;
+
+ public:
+ inline explicit SafepointIndex(const CodegenSafepointIndex& csi);
+
+ uint32_t displacement() const { return displacement_; }
+ uint32_t safepointOffset() const { return safepointOffset_; }
+};
+
+class CodegenSafepointIndex {
+ uint32_t displacement_ = 0;
+
+ LSafepoint* safepoint_ = nullptr;
+
+ public:
+ CodegenSafepointIndex(uint32_t displacement, LSafepoint* safepoint)
+ : displacement_(displacement), safepoint_(safepoint) {}
+
+ LSafepoint* safepoint() const { return safepoint_; }
+ uint32_t displacement() const { return displacement_; }
+
+ inline SnapshotOffset snapshotOffset() const;
+ inline bool hasSnapshotOffset() const;
+};
+
+// The OSI point is patched to a call instruction. Therefore, the
+// returnPoint for an OSI call is the address immediately following that
+// call instruction. The displacement of that point within the assembly
+// buffer is the |returnPointDisplacement|.
+class OsiIndex {
+ uint32_t callPointDisplacement_;
+ uint32_t snapshotOffset_;
+
+ public:
+ OsiIndex(uint32_t callPointDisplacement, uint32_t snapshotOffset)
+ : callPointDisplacement_(callPointDisplacement),
+ snapshotOffset_(snapshotOffset) {}
+
+ uint32_t returnPointDisplacement() const;
+ uint32_t callPointDisplacement() const { return callPointDisplacement_; }
+ uint32_t snapshotOffset() const { return snapshotOffset_; }
+};
+
+} /* namespace jit */
+} /* namespace js */
+
+#endif /* jit_SafepointIndex_h */
diff --git a/js/src/jit/Safepoints.cpp b/js/src/jit/Safepoints.cpp
new file mode 100644
index 0000000000..7dc06ca886
--- /dev/null
+++ b/js/src/jit/Safepoints.cpp
@@ -0,0 +1,559 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Safepoints.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/BitSet.h"
+#include "jit/IonScript.h"
+#include "jit/JitSpewer.h"
+#include "jit/LIR.h"
+#include "jit/SafepointIndex.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::FloorLog2;
+
+SafepointWriter::SafepointWriter(uint32_t localSlotsSize,
+ uint32_t argumentsSize)
+ : localSlots_((localSlotsSize / sizeof(intptr_t)) +
+ 1), // Stack slot counts are inclusive.
+ argumentSlots_(argumentsSize / sizeof(intptr_t)) {}
+
+bool SafepointWriter::init(TempAllocator& alloc) {
+ return localSlots_.init(alloc) && argumentSlots_.init(alloc);
+}
+
+uint32_t SafepointWriter::startEntry() {
+ JitSpew(JitSpew_Safepoints,
+ "Encoding safepoint (position %zu):", stream_.length());
+ return uint32_t(stream_.length());
+}
+
+void SafepointWriter::writeOsiCallPointOffset(uint32_t osiCallPointOffset) {
+ stream_.writeUnsigned(osiCallPointOffset);
+}
+
+static void WriteRegisterMask(CompactBufferWriter& stream,
+ PackedRegisterMask bits) {
+ if (sizeof(PackedRegisterMask) == 1) {
+ stream.writeByte(bits);
+ } else {
+ MOZ_ASSERT(sizeof(PackedRegisterMask) <= 4);
+ stream.writeUnsigned(bits);
+ }
+}
+
+static PackedRegisterMask ReadRegisterMask(CompactBufferReader& stream) {
+ if (sizeof(PackedRegisterMask) == 1) {
+ return stream.readByte();
+ }
+ MOZ_ASSERT(sizeof(PackedRegisterMask) <= 4);
+ return stream.readUnsigned();
+}
+
+static void WriteFloatRegisterMask(CompactBufferWriter& stream,
+ FloatRegisters::SetType bits) {
+ switch (sizeof(FloatRegisters::SetType)) {
+#ifdef JS_CODEGEN_ARM64
+ case 16:
+ stream.writeUnsigned64(bits.low());
+ stream.writeUnsigned64(bits.high());
+ break;
+#else
+ case 1:
+ stream.writeByte(bits);
+ break;
+ case 4:
+ stream.writeUnsigned(bits);
+ break;
+ case 8:
+ stream.writeUnsigned64(bits);
+ break;
+#endif
+ default:
+ MOZ_CRASH("WriteFloatRegisterMask: unexpected size");
+ }
+}
+
+static FloatRegisters::SetType ReadFloatRegisterMask(
+ CompactBufferReader& stream) {
+ switch (sizeof(FloatRegisters::SetType)) {
+#ifdef JS_CODEGEN_ARM64
+ case 16: {
+ uint64_t low = stream.readUnsigned64();
+ uint64_t high = stream.readUnsigned64();
+ return Bitset128(high, low);
+ }
+#else
+ case 1:
+ return stream.readByte();
+ case 2:
+ case 3:
+ case 4:
+ return stream.readUnsigned();
+ case 8:
+ return stream.readUnsigned64();
+#endif
+ default:
+ MOZ_CRASH("ReadFloatRegisterMask: unexpected size");
+ }
+}
+
+void SafepointWriter::writeGcRegs(LSafepoint* safepoint) {
+ LiveGeneralRegisterSet gc(safepoint->gcRegs());
+ LiveGeneralRegisterSet spilledGpr(safepoint->liveRegs().gprs());
+ LiveFloatRegisterSet spilledFloat(safepoint->liveRegs().fpus());
+ LiveGeneralRegisterSet slots(safepoint->slotsOrElementsRegs());
+ LiveGeneralRegisterSet valueRegs;
+
+ WriteRegisterMask(stream_, spilledGpr.bits());
+ if (!spilledGpr.empty()) {
+ WriteRegisterMask(stream_, gc.bits());
+ WriteRegisterMask(stream_, slots.bits());
+
+#ifdef JS_PUNBOX64
+ valueRegs = safepoint->valueRegs();
+ WriteRegisterMask(stream_, valueRegs.bits());
+#endif
+ }
+
+ // GC registers are a subset of the spilled registers.
+ MOZ_ASSERT((valueRegs.bits() & ~spilledGpr.bits()) == 0);
+ MOZ_ASSERT((gc.bits() & ~spilledGpr.bits()) == 0);
+
+ WriteFloatRegisterMask(stream_, spilledFloat.bits());
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Safepoints)) {
+ for (GeneralRegisterForwardIterator iter(spilledGpr); iter.more(); ++iter) {
+ const char* type = gc.has(*iter) ? "gc"
+ : slots.has(*iter) ? "slots"
+ : valueRegs.has(*iter) ? "value"
+ : "any";
+ JitSpew(JitSpew_Safepoints, " %s reg: %s", type, (*iter).name());
+ }
+ for (FloatRegisterForwardIterator iter(spilledFloat); iter.more(); ++iter) {
+ JitSpew(JitSpew_Safepoints, " float reg: %s", (*iter).name());
+ }
+ }
+#endif
+}
+
+static void WriteBitset(const BitSet& set, CompactBufferWriter& stream) {
+ size_t count = set.rawLength();
+ const uint32_t* words = set.raw();
+ for (size_t i = 0; i < count; i++) {
+ stream.writeUnsigned(words[i]);
+ }
+}
+
+static void MapSlotsToBitset(BitSet& stackSet, BitSet& argumentSet,
+ CompactBufferWriter& stream,
+ const LSafepoint::SlotList& slots) {
+ stackSet.clear();
+ argumentSet.clear();
+
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ // Slots are represented at a distance from |fp|. We divide by the
+ // pointer size, since we only care about pointer-sized/aligned slots
+ // here.
+ MOZ_ASSERT(slots[i].slot % sizeof(intptr_t) == 0);
+ size_t index = slots[i].slot / sizeof(intptr_t);
+ (slots[i].stack ? stackSet : argumentSet).insert(index);
+ }
+
+ WriteBitset(stackSet, stream);
+ WriteBitset(argumentSet, stream);
+}
+
+void SafepointWriter::writeGcSlots(LSafepoint* safepoint) {
+ LSafepoint::SlotList& slots = safepoint->gcSlots();
+
+#ifdef JS_JITSPEW
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ JitSpew(JitSpew_Safepoints, " gc slot: %u", slots[i].slot);
+ }
+#endif
+
+ MapSlotsToBitset(localSlots_, argumentSlots_, stream_, slots);
+}
+
+void SafepointWriter::writeSlotsOrElementsSlots(LSafepoint* safepoint) {
+ LSafepoint::SlotList& slots = safepoint->slotsOrElementsSlots();
+
+ stream_.writeUnsigned(slots.length());
+
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ if (!slots[i].stack) {
+ MOZ_CRASH();
+ }
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_Safepoints, " slots/elements slot: %u", slots[i].slot);
+#endif
+ stream_.writeUnsigned(slots[i].slot);
+ }
+}
+
+#ifdef JS_PUNBOX64
+void SafepointWriter::writeValueSlots(LSafepoint* safepoint) {
+ LSafepoint::SlotList& slots = safepoint->valueSlots();
+
+# ifdef JS_JITSPEW
+ for (uint32_t i = 0; i < slots.length(); i++) {
+ JitSpew(JitSpew_Safepoints, " gc value: %u", slots[i].slot);
+ }
+# endif
+
+ MapSlotsToBitset(localSlots_, argumentSlots_, stream_, slots);
+}
+#endif
+
+#if defined(JS_JITSPEW) && defined(JS_NUNBOX32)
+static void DumpNunboxPart(const LAllocation& a) {
+ Fprinter& out = JitSpewPrinter();
+ if (a.isStackSlot()) {
+ out.printf("stack %d", a.toStackSlot()->slot());
+ } else if (a.isArgument()) {
+ out.printf("arg %d", a.toArgument()->index());
+ } else {
+ out.printf("reg %s", a.toGeneralReg()->reg().name());
+ }
+}
+#endif // DEBUG
+
+// Nunbox part encoding:
+//
+// Reg = 000
+// Stack = 001
+// Arg = 010
+//
+// [vwu] nentries:
+// uint16_t: tttp ppXX XXXY YYYY
+//
+// If ttt = Reg, type is reg XXXXX
+// If ppp = Reg, payload is reg YYYYY
+//
+// If ttt != Reg, type is:
+// XXXXX if not 11111, otherwise followed by [vwu]
+// If ppp != Reg, payload is:
+// YYYYY if not 11111, otherwise followed by [vwu]
+//
+enum NunboxPartKind { Part_Reg, Part_Stack, Part_Arg };
+
+static const uint32_t PART_KIND_BITS = 3;
+static const uint32_t PART_KIND_MASK = (1 << PART_KIND_BITS) - 1;
+static const uint32_t PART_INFO_BITS = 5;
+static const uint32_t PART_INFO_MASK = (1 << PART_INFO_BITS) - 1;
+
+static const uint32_t MAX_INFO_VALUE = (1 << PART_INFO_BITS) - 1;
+static const uint32_t TYPE_KIND_SHIFT = 16 - PART_KIND_BITS;
+static const uint32_t PAYLOAD_KIND_SHIFT = TYPE_KIND_SHIFT - PART_KIND_BITS;
+static const uint32_t TYPE_INFO_SHIFT = PAYLOAD_KIND_SHIFT - PART_INFO_BITS;
+static const uint32_t PAYLOAD_INFO_SHIFT = TYPE_INFO_SHIFT - PART_INFO_BITS;
+
+static_assert(PAYLOAD_INFO_SHIFT == 0);
+
+#ifdef JS_NUNBOX32
+static inline NunboxPartKind AllocationToPartKind(const LAllocation& a) {
+ if (a.isRegister()) {
+ return Part_Reg;
+ }
+ if (a.isStackSlot()) {
+ return Part_Stack;
+ }
+ MOZ_ASSERT(a.isArgument());
+ return Part_Arg;
+}
+
+// gcc 4.5 doesn't actually inline CanEncodeInfoInHeader when only
+// using the "inline" keyword, and miscompiles the function as well
+// when doing block reordering with branch prediction information.
+// See bug 799295 comment 71.
+static MOZ_ALWAYS_INLINE bool CanEncodeInfoInHeader(const LAllocation& a,
+ uint32_t* out) {
+ if (a.isGeneralReg()) {
+ *out = a.toGeneralReg()->reg().code();
+ return true;
+ }
+
+ if (a.isStackSlot()) {
+ *out = a.toStackSlot()->slot();
+ } else {
+ *out = a.toArgument()->index();
+ }
+
+ return *out < MAX_INFO_VALUE;
+}
+
+void SafepointWriter::writeNunboxParts(LSafepoint* safepoint) {
+ LSafepoint::NunboxList& entries = safepoint->nunboxParts();
+
+# ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_Safepoints)) {
+ for (uint32_t i = 0; i < entries.length(); i++) {
+ SafepointNunboxEntry& entry = entries[i];
+ if (entry.type.isUse() || entry.payload.isUse()) {
+ continue;
+ }
+ JitSpewHeader(JitSpew_Safepoints);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" nunbox (type in ");
+ DumpNunboxPart(entry.type);
+ out.printf(", payload in ");
+ DumpNunboxPart(entry.payload);
+ out.printf(")\n");
+ }
+ }
+# endif
+
+ // Safepoints are permitted to have partially filled in entries for nunboxes,
+ // provided that only the type is live and not the payload. Omit these from
+ // the written safepoint.
+
+ size_t pos = stream_.length();
+ stream_.writeUnsigned(entries.length());
+
+ size_t count = 0;
+ for (size_t i = 0; i < entries.length(); i++) {
+ SafepointNunboxEntry& entry = entries[i];
+
+ if (entry.payload.isUse()) {
+ // No allocation associated with the payload.
+ continue;
+ }
+
+ if (entry.type.isUse()) {
+ // No allocation associated with the type. Look for another
+ // safepoint entry with an allocation for the type.
+ entry.type = safepoint->findTypeAllocation(entry.typeVreg);
+ if (entry.type.isUse()) {
+ continue;
+ }
+ }
+
+ count++;
+
+ uint16_t header = 0;
+
+ header |= (AllocationToPartKind(entry.type) << TYPE_KIND_SHIFT);
+ header |= (AllocationToPartKind(entry.payload) << PAYLOAD_KIND_SHIFT);
+
+ uint32_t typeVal;
+ bool typeExtra = !CanEncodeInfoInHeader(entry.type, &typeVal);
+ if (!typeExtra) {
+ header |= (typeVal << TYPE_INFO_SHIFT);
+ } else {
+ header |= (MAX_INFO_VALUE << TYPE_INFO_SHIFT);
+ }
+
+ uint32_t payloadVal;
+ bool payloadExtra = !CanEncodeInfoInHeader(entry.payload, &payloadVal);
+ if (!payloadExtra) {
+ header |= (payloadVal << PAYLOAD_INFO_SHIFT);
+ } else {
+ header |= (MAX_INFO_VALUE << PAYLOAD_INFO_SHIFT);
+ }
+
+ stream_.writeFixedUint16_t(header);
+ if (typeExtra) {
+ stream_.writeUnsigned(typeVal);
+ }
+ if (payloadExtra) {
+ stream_.writeUnsigned(payloadVal);
+ }
+ }
+
+ // Update the stream with the actual number of safepoint entries written.
+ stream_.writeUnsignedAt(pos, count, entries.length());
+}
+#endif
+
+void SafepointWriter::encode(LSafepoint* safepoint) {
+ uint32_t safepointOffset = startEntry();
+
+ MOZ_ASSERT(safepoint->osiCallPointOffset());
+
+ writeOsiCallPointOffset(safepoint->osiCallPointOffset());
+ writeGcRegs(safepoint);
+ writeGcSlots(safepoint);
+
+#ifdef JS_PUNBOX64
+ writeValueSlots(safepoint);
+#else
+ writeNunboxParts(safepoint);
+#endif
+
+ writeSlotsOrElementsSlots(safepoint);
+
+ endEntry();
+ safepoint->setOffset(safepointOffset);
+}
+
+void SafepointWriter::endEntry() {
+ JitSpew(JitSpew_Safepoints, " -- entry ended at %u",
+ uint32_t(stream_.length()));
+}
+
+SafepointReader::SafepointReader(IonScript* script, const SafepointIndex* si)
+ : stream_(script->safepoints() + si->safepointOffset(),
+ script->safepoints() + script->safepointsSize()),
+ localSlots_((script->localSlotsSize() / sizeof(intptr_t)) +
+ 1), // Stack slot counts are inclusive.
+ argumentSlots_(script->argumentSlotsSize() / sizeof(intptr_t)),
+ nunboxSlotsRemaining_(0),
+ slotsOrElementsSlotsRemaining_(0) {
+ osiCallPointOffset_ = stream_.readUnsigned();
+
+ // gcSpills is a subset of allGprSpills.
+ allGprSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+ if (allGprSpills_.empty()) {
+ gcSpills_ = allGprSpills_;
+ valueSpills_ = allGprSpills_;
+ slotsOrElementsSpills_ = allGprSpills_;
+ } else {
+ gcSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+ slotsOrElementsSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+#ifdef JS_PUNBOX64
+ valueSpills_ = GeneralRegisterSet(ReadRegisterMask(stream_));
+#endif
+ }
+
+ allFloatSpills_ = FloatRegisterSet(ReadFloatRegisterMask(stream_));
+
+ advanceFromGcRegs();
+}
+
+uint32_t SafepointReader::osiReturnPointOffset() const {
+ return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
+}
+
+CodeLocationLabel SafepointReader::InvalidationPatchPoint(
+ IonScript* script, const SafepointIndex* si) {
+ SafepointReader reader(script, si);
+
+ return CodeLocationLabel(script->method(),
+ CodeOffset(reader.osiCallPointOffset()));
+}
+
+void SafepointReader::advanceFromGcRegs() {
+ currentSlotChunk_ = 0;
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = true;
+}
+
+bool SafepointReader::getSlotFromBitmap(SafepointSlotEntry* entry) {
+ while (currentSlotChunk_ == 0) {
+ // Are there any more chunks to read?
+ if (currentSlotsAreStack_) {
+ if (nextSlotChunkNumber_ == BitSet::RawLengthForBits(localSlots_)) {
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = false;
+ continue;
+ }
+ } else if (nextSlotChunkNumber_ ==
+ BitSet::RawLengthForBits(argumentSlots_)) {
+ return false;
+ }
+
+ // Yes, read the next chunk.
+ currentSlotChunk_ = stream_.readUnsigned();
+ nextSlotChunkNumber_++;
+ }
+
+ // The current chunk still has bits in it, so get the next bit, then mask
+ // it out of the slot chunk.
+ uint32_t bit = FloorLog2(currentSlotChunk_);
+ currentSlotChunk_ &= ~(1 << bit);
+
+ // Return the slot, and re-scale it by the pointer size, reversing the
+ // transformation in MapSlotsToBitset.
+ entry->stack = currentSlotsAreStack_;
+ entry->slot = (((nextSlotChunkNumber_ - 1) * BitSet::BitsPerWord) + bit) *
+ sizeof(intptr_t);
+ return true;
+}
+
+bool SafepointReader::getGcSlot(SafepointSlotEntry* entry) {
+ if (getSlotFromBitmap(entry)) {
+ return true;
+ }
+ advanceFromGcSlots();
+ return false;
+}
+
+void SafepointReader::advanceFromGcSlots() {
+ // No, reset the counter.
+ currentSlotChunk_ = 0;
+ nextSlotChunkNumber_ = 0;
+ currentSlotsAreStack_ = true;
+#ifdef JS_NUNBOX32
+ // Nunbox slots are next.
+ nunboxSlotsRemaining_ = stream_.readUnsigned();
+#else
+ // Value slots are next.
+#endif
+}
+
+bool SafepointReader::getValueSlot(SafepointSlotEntry* entry) {
+ if (getSlotFromBitmap(entry)) {
+ return true;
+ }
+ advanceFromNunboxOrValueSlots();
+ return false;
+}
+
+static inline LAllocation PartFromStream(CompactBufferReader& stream,
+ NunboxPartKind kind, uint32_t info) {
+ if (kind == Part_Reg) {
+ return LGeneralReg(Register::FromCode(info));
+ }
+
+ if (info == MAX_INFO_VALUE) {
+ info = stream.readUnsigned();
+ }
+
+ if (kind == Part_Stack) {
+ return LStackSlot(info);
+ }
+
+ MOZ_ASSERT(kind == Part_Arg);
+ return LArgument(info);
+}
+
+bool SafepointReader::getNunboxSlot(LAllocation* type, LAllocation* payload) {
+ if (!nunboxSlotsRemaining_--) {
+ advanceFromNunboxOrValueSlots();
+ return false;
+ }
+
+ uint16_t header = stream_.readFixedUint16_t();
+ NunboxPartKind typeKind =
+ (NunboxPartKind)((header >> TYPE_KIND_SHIFT) & PART_KIND_MASK);
+ NunboxPartKind payloadKind =
+ (NunboxPartKind)((header >> PAYLOAD_KIND_SHIFT) & PART_KIND_MASK);
+ uint32_t typeInfo = (header >> TYPE_INFO_SHIFT) & PART_INFO_MASK;
+ uint32_t payloadInfo = (header >> PAYLOAD_INFO_SHIFT) & PART_INFO_MASK;
+
+ *type = PartFromStream(stream_, typeKind, typeInfo);
+ *payload = PartFromStream(stream_, payloadKind, payloadInfo);
+ return true;
+}
+
+void SafepointReader::advanceFromNunboxOrValueSlots() {
+ slotsOrElementsSlotsRemaining_ = stream_.readUnsigned();
+}
+
+bool SafepointReader::getSlotsOrElementsSlot(SafepointSlotEntry* entry) {
+ if (!slotsOrElementsSlotsRemaining_--) {
+ return false;
+ }
+ entry->stack = true;
+ entry->slot = stream_.readUnsigned();
+ return true;
+}
diff --git a/js/src/jit/Safepoints.h b/js/src/jit/Safepoints.h
new file mode 100644
index 0000000000..90f827e738
--- /dev/null
+++ b/js/src/jit/Safepoints.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Safepoints_h
+#define jit_Safepoints_h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/BitSet.h"
+#include "jit/CompactBuffer.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+class CodeLocationLabel;
+class IonScript;
+class SafepointIndex;
+struct SafepointSlotEntry;
+class TempAllocator;
+
+class LAllocation;
+class LSafepoint;
+
+static const uint32_t INVALID_SAFEPOINT_OFFSET = uint32_t(-1);
+
+class SafepointWriter {
+ CompactBufferWriter stream_;
+ BitSet localSlots_;
+ BitSet argumentSlots_;
+
+ public:
+ explicit SafepointWriter(uint32_t localSlotsSize, uint32_t argumentsSize);
+ [[nodiscard]] bool init(TempAllocator& alloc);
+
+ private:
+ // A safepoint entry is written in the order these functions appear.
+ uint32_t startEntry();
+
+ void writeOsiCallPointOffset(uint32_t osiPointOffset);
+ void writeGcRegs(LSafepoint* safepoint);
+ void writeGcSlots(LSafepoint* safepoint);
+
+ void writeSlotsOrElementsSlots(LSafepoint* safepoint);
+
+#ifdef JS_PUNBOX64
+ void writeValueSlots(LSafepoint* safepoint);
+#else
+ void writeNunboxParts(LSafepoint* safepoint);
+#endif
+
+ void endEntry();
+
+ public:
+ void encode(LSafepoint* safepoint);
+
+ size_t size() const { return stream_.length(); }
+ const uint8_t* buffer() const { return stream_.buffer(); }
+ bool oom() const { return stream_.oom(); }
+};
+
+class SafepointReader {
+ CompactBufferReader stream_;
+ uint32_t localSlots_;
+ uint32_t argumentSlots_;
+ uint32_t currentSlotChunk_;
+ bool currentSlotsAreStack_;
+ uint32_t nextSlotChunkNumber_;
+ uint32_t osiCallPointOffset_;
+ GeneralRegisterSet gcSpills_;
+ GeneralRegisterSet valueSpills_;
+ GeneralRegisterSet slotsOrElementsSpills_;
+ GeneralRegisterSet allGprSpills_;
+ FloatRegisterSet allFloatSpills_;
+ uint32_t nunboxSlotsRemaining_;
+ uint32_t slotsOrElementsSlotsRemaining_;
+
+ private:
+ void advanceFromGcRegs();
+ void advanceFromGcSlots();
+ void advanceFromNunboxOrValueSlots();
+ [[nodiscard]] bool getSlotFromBitmap(SafepointSlotEntry* entry);
+
+ public:
+ SafepointReader(IonScript* script, const SafepointIndex* si);
+
+ static CodeLocationLabel InvalidationPatchPoint(IonScript* script,
+ const SafepointIndex* si);
+
+ uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
+ LiveGeneralRegisterSet gcSpills() const {
+ return LiveGeneralRegisterSet(gcSpills_);
+ }
+ LiveGeneralRegisterSet slotsOrElementsSpills() const {
+ return LiveGeneralRegisterSet(slotsOrElementsSpills_);
+ }
+ LiveGeneralRegisterSet valueSpills() const {
+ return LiveGeneralRegisterSet(valueSpills_);
+ }
+ LiveGeneralRegisterSet allGprSpills() const {
+ return LiveGeneralRegisterSet(allGprSpills_);
+ }
+ LiveFloatRegisterSet allFloatSpills() const {
+ return LiveFloatRegisterSet(allFloatSpills_);
+ }
+ uint32_t osiReturnPointOffset() const;
+
+ // Returns true if a slot was read, false if there are no more slots.
+ [[nodiscard]] bool getGcSlot(SafepointSlotEntry* entry);
+
+ // Returns true if a slot was read, false if there are no more value slots.
+ [[nodiscard]] bool getValueSlot(SafepointSlotEntry* entry);
+
+ // Returns true if a nunbox slot was read, false if there are no more
+ // nunbox slots.
+ [[nodiscard]] bool getNunboxSlot(LAllocation* type, LAllocation* payload);
+
+ // Returns true if a slot was read, false if there are no more slots.
+ [[nodiscard]] bool getSlotsOrElementsSlot(SafepointSlotEntry* entry);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Safepoints_h */
diff --git a/js/src/jit/ScalarReplacement.cpp b/js/src/jit/ScalarReplacement.cpp
new file mode 100644
index 0000000000..0ded3601aa
--- /dev/null
+++ b/js/src/jit/ScalarReplacement.cpp
@@ -0,0 +1,3086 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ScalarReplacement.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/WarpBuilderShared.h"
+#include "js/Vector.h"
+#include "vm/ArgumentsObject.h"
+
+#include "gc/ObjectKind-inl.h"
+
+namespace js {
+namespace jit {
+
+template <typename MemoryView>
+class EmulateStateOf {
+ private:
+ using BlockState = typename MemoryView::BlockState;
+
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+
+ // Block state at the entrance of all basic blocks.
+ Vector<BlockState*, 8, SystemAllocPolicy> states_;
+
+ public:
+ EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir), graph_(graph) {}
+
+ bool run(MemoryView& view);
+};
+
+template <typename MemoryView>
+bool EmulateStateOf<MemoryView>::run(MemoryView& view) {
+ // Initialize the current block state of each block to an unknown state.
+ if (!states_.appendN(nullptr, graph_.numBlocks())) {
+ return false;
+ }
+
+ // Initialize the first block which needs to be traversed in RPO.
+ MBasicBlock* startBlock = view.startingBlock();
+ if (!view.initStartingState(&states_[startBlock->id()])) {
+ return false;
+ }
+
+ // Iterate over each basic block which has a valid entry state, and merge
+ // the state in the successor blocks.
+ for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
+ block != graph_.rpoEnd(); block++) {
+ if (mir_->shouldCancel(MemoryView::phaseName)) {
+ return false;
+ }
+
+ // Get the block state as the result of the merge of all predecessors
+ // which have already been visited in RPO. This means that backedges
+ // are not yet merged into the loop.
+ BlockState* state = states_[block->id()];
+ if (!state) {
+ continue;
+ }
+ view.setEntryBlockState(state);
+
+ // Iterates over resume points, phi and instructions.
+ for (MNodeIterator iter(*block); iter;) {
+ // Increment the iterator before visiting the instruction, as the
+ // visit function might discard itself from the basic block.
+ MNode* ins = *iter++;
+ if (ins->isDefinition()) {
+ MDefinition* def = ins->toDefinition();
+ switch (def->op()) {
+#define MIR_OP(op) \
+ case MDefinition::Opcode::op: \
+ view.visit##op(def->to##op()); \
+ break;
+ MIR_OPCODE_LIST(MIR_OP)
+#undef MIR_OP
+ }
+ } else {
+ view.visitResumePoint(ins->toResumePoint());
+ }
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+ if (view.oom()) {
+ return false;
+ }
+ }
+
+ // For each successor, merge the current state into the state of the
+ // successors.
+ for (size_t s = 0; s < block->numSuccessors(); s++) {
+ MBasicBlock* succ = block->getSuccessor(s);
+ if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) {
+ return false;
+ }
+ }
+ }
+
+ states_.clear();
+ return true;
+}
+
+static inline bool IsOptimizableObjectInstruction(MInstruction* ins) {
+ return ins->isNewObject() || ins->isNewPlainObject() ||
+ ins->isNewCallObject() || ins->isNewIterator();
+}
+
+static bool PhiOperandEqualTo(MDefinition* operand, MInstruction* newObject) {
+ if (operand == newObject) {
+ return true;
+ }
+
+ switch (operand->op()) {
+ case MDefinition::Opcode::GuardShape:
+ return PhiOperandEqualTo(operand->toGuardShape()->input(), newObject);
+
+ case MDefinition::Opcode::GuardToClass:
+ return PhiOperandEqualTo(operand->toGuardToClass()->input(), newObject);
+
+ case MDefinition::Opcode::CheckIsObj:
+ return PhiOperandEqualTo(operand->toCheckIsObj()->input(), newObject);
+
+ case MDefinition::Opcode::Unbox:
+ return PhiOperandEqualTo(operand->toUnbox()->input(), newObject);
+
+ default:
+ return false;
+ }
+}
+
+// Return true if all phi operands are equal to |newObject|.
+static bool PhiOperandsEqualTo(MPhi* phi, MInstruction* newObject) {
+ MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
+
+ for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+ if (!PhiOperandEqualTo(phi->getOperand(i), newObject)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
+ const Shape* shapeDefault = nullptr);
+
+// Returns False if the lambda is not escaped and if it is optimizable by
+// ScalarReplacementOfObject.
+static bool IsLambdaEscaped(MInstruction* ins, MInstruction* lambda,
+ MInstruction* newObject, const Shape* shape) {
+ MOZ_ASSERT(lambda->isLambda() || lambda->isFunctionWithProto());
+ MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
+ JitSpewDef(JitSpew_Escape, "Check lambda\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ // The scope chain is not escaped if none of the Lambdas which are
+ // capturing it are escaped.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::GuardToFunction: {
+ auto* guard = def->toGuardToFunction();
+ if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardFunctionScript: {
+ auto* guard = def->toGuardFunctionScript();
+ BaseScript* actual;
+ if (lambda->isLambda()) {
+ actual = lambda->toLambda()->templateFunction()->baseScript();
+ } else {
+ actual = lambda->toFunctionWithProto()->function()->baseScript();
+ }
+ if (actual != guard->expected()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching script guard\n",
+ guard);
+ return true;
+ }
+ if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::FunctionEnvironment: {
+ if (IsObjectEscaped(def->toFunctionEnvironment(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+ JitSpew(JitSpew_Escape, "Lambda is not escaped");
+ return false;
+}
+
+static bool IsLambdaEscaped(MInstruction* lambda, MInstruction* newObject,
+ const Shape* shape) {
+ return IsLambdaEscaped(lambda, lambda, newObject, shape);
+}
+
+// Returns False if the object is not escaped and if it is optimizable by
+// ScalarReplacementOfObject.
+//
+// For the moment, this code is dumb as it only supports objects which are not
+// changing shape.
+static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
+ const Shape* shapeDefault) {
+ MOZ_ASSERT(ins->type() == MIRType::Object || ins->isPhi());
+ MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
+
+ JitSpewDef(JitSpew_Escape, "Check object\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ const Shape* shape = shapeDefault;
+ if (!shape) {
+ if (ins->isNewPlainObject()) {
+ shape = ins->toNewPlainObject()->shape();
+ } else if (JSObject* templateObj = MObjectState::templateObjectOf(ins)) {
+ shape = templateObj->shape();
+ }
+ }
+
+ if (!shape) {
+ JitSpew(JitSpew_Escape, "No shape defined.");
+ return true;
+ }
+
+ // Check if the object is escaped. If the object is not the first argument
+ // of either a known Store / Load, then we consider it as escaped. This is a
+ // cheap and conservative escape analysis.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::StoreFixedSlot:
+ case MDefinition::Opcode::LoadFixedSlot:
+ // Not escaped if it is the first argument.
+ if (def->indexOf(*i) == 0) {
+ break;
+ }
+
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+
+ case MDefinition::Opcode::PostWriteBarrier:
+ break;
+
+ case MDefinition::Opcode::Slots: {
+#ifdef DEBUG
+ // Assert that MSlots are only used by MStoreDynamicSlot and
+ // MLoadDynamicSlot.
+ MSlots* ins = def->toSlots();
+ MOZ_ASSERT(ins->object() != 0);
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ // toDefinition should normally never fail, since they don't get
+ // captured by resume points.
+ MDefinition* def = (*i)->consumer()->toDefinition();
+ MOZ_ASSERT(def->op() == MDefinition::Opcode::StoreDynamicSlot ||
+ def->op() == MDefinition::Opcode::LoadDynamicSlot);
+ }
+#endif
+ break;
+ }
+
+ case MDefinition::Opcode::GuardShape: {
+ MGuardShape* guard = def->toGuardShape();
+ MOZ_ASSERT(!ins->isGuardShape());
+ if (shape != guard->shape()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
+ return true;
+ }
+ if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardToClass: {
+ MGuardToClass* guard = def->toGuardToClass();
+ if (!shape || shape->getObjectClass() != guard->getClass()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
+ return true;
+ }
+ if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::CheckIsObj: {
+ if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Unbox: {
+ if (def->type() != MIRType::Object) {
+ JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
+ return true;
+ }
+ if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Lambda:
+ case MDefinition::Opcode::FunctionWithProto: {
+ if (IsLambdaEscaped(def->toInstruction(), newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Phi: {
+ auto* phi = def->toPhi();
+ if (!PhiOperandsEqualTo(phi, newObject)) {
+ JitSpewDef(JitSpew_Escape, "has different phi operands\n", def);
+ return true;
+ }
+ if (IsObjectEscaped(phi, newObject, shape)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Compare: {
+ bool canFold;
+ if (!def->toCompare()->tryFold(&canFold)) {
+ JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // Doesn't escape the object.
+ case MDefinition::Opcode::IsObject:
+ break;
+
+ // This instruction is a no-op used to verify that scalar replacement
+ // is working as expected in jit-test.
+ case MDefinition::Opcode::AssertRecoveredOnBailout:
+ break;
+
+ // This is just a special flavor of constant which lets us optimize
+ // out some guards in certain circumstances. We'll turn this into a
+ // regular constant later.
+ case MDefinition::Opcode::ConstantProto:
+ break;
+
+ // We definitely don't need barriers for objects that don't exist.
+ case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Object is not escaped");
+ return false;
+}
+
+class ObjectMemoryView : public MDefinitionVisitorDefaultNoop {
+ public:
+ using BlockState = MObjectState;
+ static const char phaseName[];
+
+ private:
+ TempAllocator& alloc_;
+ MConstant* undefinedVal_;
+ MInstruction* obj_;
+ MBasicBlock* startBlock_;
+ BlockState* state_;
+
+ // Used to improve the memory usage by sharing common modification.
+ const MResumePoint* lastResumePoint_;
+
+ bool oom_;
+
+ public:
+ ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
+
+ MBasicBlock* startingBlock();
+ bool initStartingState(BlockState** pState);
+
+ void setEntryBlockState(BlockState* state);
+ bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
+ BlockState** pSuccState);
+
+#ifdef DEBUG
+ void assertSuccess();
+#else
+ void assertSuccess() {}
+#endif
+
+ bool oom() const { return oom_; }
+
+ private:
+ MDefinition* functionForCallObject(MDefinition* ins);
+
+ public:
+ void visitResumePoint(MResumePoint* rp);
+ void visitObjectState(MObjectState* ins);
+ void visitStoreFixedSlot(MStoreFixedSlot* ins);
+ void visitLoadFixedSlot(MLoadFixedSlot* ins);
+ void visitPostWriteBarrier(MPostWriteBarrier* ins);
+ void visitStoreDynamicSlot(MStoreDynamicSlot* ins);
+ void visitLoadDynamicSlot(MLoadDynamicSlot* ins);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardToClass(MGuardToClass* ins);
+ void visitCheckIsObj(MCheckIsObj* ins);
+ void visitUnbox(MUnbox* ins);
+ void visitFunctionEnvironment(MFunctionEnvironment* ins);
+ void visitGuardToFunction(MGuardToFunction* ins);
+ void visitGuardFunctionScript(MGuardFunctionScript* ins);
+ void visitLambda(MLambda* ins);
+ void visitFunctionWithProto(MFunctionWithProto* ins);
+ void visitPhi(MPhi* ins);
+ void visitCompare(MCompare* ins);
+ void visitConstantProto(MConstantProto* ins);
+ void visitIsObject(MIsObject* ins);
+ void visitAssertCanElidePostWriteBarrier(
+ MAssertCanElidePostWriteBarrier* ins);
+};
+
+/* static */ const char ObjectMemoryView::phaseName[] =
+ "Scalar Replacement of Object";
+
+ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
+ : alloc_(alloc),
+ undefinedVal_(nullptr),
+ obj_(obj),
+ startBlock_(obj->block()),
+ state_(nullptr),
+ lastResumePoint_(nullptr),
+ oom_(false) {
+ // Annotate snapshots RValue such that we recover the store first.
+ obj_->setIncompleteObject();
+
+ // Annotate the instruction such that we do not replace it by a
+ // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
+ obj_->setImplicitlyUsedUnchecked();
+}
+
+MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; }
+
+bool ObjectMemoryView::initStartingState(BlockState** pState) {
+ // Uninitialized slots have an "undefined" value.
+ undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
+ startBlock_->insertBefore(obj_, undefinedVal_);
+
+ // Create a new block state and insert at it at the location of the new
+ // object.
+ BlockState* state = BlockState::New(alloc_, obj_);
+ if (!state) {
+ return false;
+ }
+
+ startBlock_->insertAfter(obj_, state);
+
+ // Initialize the properties of the object state.
+ state->initFromTemplateObject(alloc_, undefinedVal_);
+
+ // Hold out of resume point until it is visited.
+ state->setInWorklist();
+
+ *pState = state;
+ return true;
+}
+
+void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
+
+bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
+ MBasicBlock* succ,
+ BlockState** pSuccState) {
+ BlockState* succState = *pSuccState;
+
+ // When a block has no state yet, create an empty one for the
+ // successor.
+ if (!succState) {
+ // If the successor is not dominated then the object cannot flow
+ // in this basic block without a Phi. We know that no Phi exist
+ // in non-dominated successors as the conservative escaped
+ // analysis fails otherwise. Such condition can succeed if the
+ // successor is a join at the end of a if-block and the object
+ // only exists within the branch.
+ if (!startBlock_->dominates(succ)) {
+ return true;
+ }
+
+ // If there is only one predecessor, carry over the last state of the
+ // block to the successor. As the block state is immutable, if the
+ // current block has multiple successors, they will share the same entry
+ // state.
+ if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
+ *pSuccState = state_;
+ return true;
+ }
+
+ // If we have multiple predecessors, then we allocate one Phi node for
+ // each predecessor, and create a new block state which only has phi
+ // nodes. These would later be removed by the removal of redundant phi
+ // nodes.
+ succState = BlockState::Copy(alloc_, state_);
+ if (!succState) {
+ return false;
+ }
+
+ size_t numPreds = succ->numPredecessors();
+ for (size_t slot = 0; slot < state_->numSlots(); slot++) {
+ MPhi* phi = MPhi::New(alloc_.fallible());
+ if (!phi || !phi->reserveLength(numPreds)) {
+ return false;
+ }
+
+ // Fill the input of the successors Phi with undefined
+ // values, and each block later fills the Phi inputs.
+ for (size_t p = 0; p < numPreds; p++) {
+ phi->addInput(undefinedVal_);
+ }
+
+ // Add Phi in the list of Phis of the basic block.
+ succ->addPhi(phi);
+ succState->setSlot(slot, phi);
+ }
+
+ // Insert the newly created block state instruction at the beginning
+ // of the successor block, after all the phi nodes. Note that it
+ // would be captured by the entry resume point of the successor
+ // block.
+ succ->insertBefore(succ->safeInsertTop(), succState);
+ *pSuccState = succState;
+ }
+
+ MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
+ if (succ->numPredecessors() > 1 && succState->numSlots() &&
+ succ != startBlock_) {
+ // We need to re-compute successorWithPhis as the previous EliminatePhis
+ // phase might have removed all the Phis from the successor block.
+ size_t currIndex;
+ MOZ_ASSERT(!succ->phisEmpty());
+ if (curr->successorWithPhis()) {
+ MOZ_ASSERT(curr->successorWithPhis() == succ);
+ currIndex = curr->positionInPhiSuccessor();
+ } else {
+ currIndex = succ->indexForPredecessor(curr);
+ curr->setSuccessorWithPhis(succ, currIndex);
+ }
+ MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
+
+ // Copy the current slot states to the index of current block in all the
+ // Phi created during the first visit of the successor.
+ for (size_t slot = 0; slot < state_->numSlots(); slot++) {
+ MPhi* phi = succState->getSlot(slot)->toPhi();
+ phi->replaceOperand(currIndex, state_->getSlot(slot));
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void ObjectMemoryView::assertSuccess() {
+ for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
+ MNode* ins = (*i)->consumer();
+ MDefinition* def = nullptr;
+
+ // Resume points have been replaced by the object state.
+ if (ins->isResumePoint() ||
+ (def = ins->toDefinition())->isRecoveredOnBailout()) {
+ MOZ_ASSERT(obj_->isIncompleteObject());
+ continue;
+ }
+
+ // The only remaining uses would be removed by DCE, which will also
+ // recover the object on bailouts.
+ MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isFunctionWithProto());
+ MOZ_ASSERT(!def->hasDefUses());
+ }
+}
+#endif
+
+void ObjectMemoryView::visitResumePoint(MResumePoint* rp) {
+ // As long as the MObjectState is not yet seen next to the allocation, we do
+ // not patch the resume point to recover the side effects.
+ if (!state_->isInWorklist()) {
+ rp->addStore(alloc_, state_, lastResumePoint_);
+ lastResumePoint_ = rp;
+ }
+}
+
+void ObjectMemoryView::visitObjectState(MObjectState* ins) {
+ if (ins->isInWorklist()) {
+ ins->setNotInWorklist();
+ }
+}
+
+void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) {
+ // Skip stores made on other objects.
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ // Clone the state and update the slot value.
+ if (state_->hasFixedSlot(ins->slot())) {
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setFixedSlot(ins->slot(), ins->value());
+ ins->block()->insertBefore(ins->toInstruction(), state_);
+ } else {
+ // UnsafeSetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) {
+ // Skip loads made on other objects.
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ // Replace load by the slot value.
+ if (state_->hasFixedSlot(ins->slot())) {
+ ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
+ } else {
+ // UnsafeGetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ ins->replaceAllUsesWith(undefinedVal_);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
+ // Skip loads made on other objects.
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
+ // Skip stores made on other objects.
+ MSlots* slots = ins->slots()->toSlots();
+ if (slots->object() != obj_) {
+ // Guard objects are replaced when they are visited.
+ MOZ_ASSERT(!slots->object()->isGuardShape() ||
+ slots->object()->toGuardShape()->object() != obj_);
+ return;
+ }
+
+ // Clone the state and update the slot value.
+ if (state_->hasDynamicSlot(ins->slot())) {
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setDynamicSlot(ins->slot(), ins->value());
+ ins->block()->insertBefore(ins->toInstruction(), state_);
+ } else {
+ // UnsafeSetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
+ // Skip loads made on other objects.
+ MSlots* slots = ins->slots()->toSlots();
+ if (slots->object() != obj_) {
+ // Guard objects are replaced when they are visited.
+ MOZ_ASSERT(!slots->object()->isGuardShape() ||
+ slots->object()->toGuardShape()->object() != obj_);
+ return;
+ }
+
+ // Replace load by the slot value.
+ if (state_->hasDynamicSlot(ins->slot())) {
+ ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
+ } else {
+ // UnsafeGetReserveSlot can access baked-in slots which are guarded by
+ // conditions, which are not seen by the escape analysis.
+ MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
+ ins->block()->insertBefore(ins, bailout);
+ ins->replaceAllUsesWith(undefinedVal_);
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitGuardShape(MGuardShape* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitGuardToClass(MGuardToClass* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitCheckIsObj(MCheckIsObj* ins) {
+ // Skip checks on other objects.
+ if (ins->input() != obj_) {
+ return;
+ }
+
+ // Replace the check by its object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitUnbox(MUnbox* ins) {
+ // Skip unrelated unboxes.
+ if (ins->input() != obj_) {
+ return;
+ }
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ // Replace the unbox with the object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove the unbox.
+ ins->block()->discard(ins);
+}
+
+MDefinition* ObjectMemoryView::functionForCallObject(MDefinition* ins) {
+ // Return early when we don't replace MNewCallObject.
+ if (!obj_->isNewCallObject()) {
+ return nullptr;
+ }
+
+ // Unwrap instructions until we found either MLambda or MFunctionWithProto.
+ // Return the function instruction if their environment chain matches the
+ // MNewCallObject we're about to replace.
+ while (true) {
+ switch (ins->op()) {
+ case MDefinition::Opcode::Lambda: {
+ if (ins->toLambda()->environmentChain() == obj_) {
+ return ins;
+ }
+ return nullptr;
+ }
+ case MDefinition::Opcode::FunctionWithProto: {
+ if (ins->toFunctionWithProto()->environmentChain() == obj_) {
+ return ins;
+ }
+ return nullptr;
+ }
+ case MDefinition::Opcode::FunctionEnvironment:
+ ins = ins->toFunctionEnvironment()->function();
+ break;
+ case MDefinition::Opcode::GuardToFunction:
+ ins = ins->toGuardToFunction()->object();
+ break;
+ case MDefinition::Opcode::GuardFunctionScript:
+ ins = ins->toGuardFunctionScript()->function();
+ break;
+ default:
+ return nullptr;
+ }
+ }
+}
+
+void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) {
+ // Skip function environment which are not aliases of the NewCallObject.
+ if (!functionForCallObject(ins)) {
+ return;
+ }
+
+ // Replace the function environment by the scope chain of the lambda.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitGuardToFunction(MGuardToFunction* ins) {
+ // Skip guards on other objects.
+ auto* function = functionForCallObject(ins);
+ if (!function) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(function);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitGuardFunctionScript(MGuardFunctionScript* ins) {
+ // Skip guards on other objects.
+ auto* function = functionForCallObject(ins);
+ if (!function) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(function);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitLambda(MLambda* ins) {
+ if (ins->environmentChain() != obj_) {
+ return;
+ }
+
+ // In order to recover the lambda we need to recover the scope chain, as the
+ // lambda is holding it.
+ ins->setIncompleteObject();
+}
+
+void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) {
+ if (ins->environmentChain() != obj_) {
+ return;
+ }
+
+ ins->setIncompleteObject();
+}
+
+void ObjectMemoryView::visitPhi(MPhi* ins) {
+ // Skip phis on other objects.
+ if (!PhiOperandsEqualTo(ins, obj_)) {
+ return;
+ }
+
+ // Replace the phi by its object.
+ ins->replaceAllUsesWith(obj_);
+
+ // Remove original instruction.
+ ins->block()->discardPhi(ins);
+}
+
+void ObjectMemoryView::visitCompare(MCompare* ins) {
+ // Skip unrelated comparisons.
+ if (ins->lhs() != obj_ && ins->rhs() != obj_) {
+ return;
+ }
+
+ bool folded;
+ MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
+
+ auto* cst = MConstant::New(alloc_, BooleanValue(folded));
+ ins->block()->insertBefore(ins, cst);
+
+ // Replace the comparison with a constant.
+ ins->replaceAllUsesWith(cst);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitConstantProto(MConstantProto* ins) {
+ if (ins->getReceiverObject() != obj_) {
+ return;
+ }
+
+ auto* cst = ins->protoObject();
+ ins->replaceAllUsesWith(cst);
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitIsObject(MIsObject* ins) {
+ // Skip unrelated tests.
+ if (ins->input() != obj_) {
+ return;
+ }
+
+ auto* cst = MConstant::New(alloc_, BooleanValue(true));
+ ins->block()->insertBefore(ins, cst);
+
+ // Replace the test with a constant.
+ ins->replaceAllUsesWith(cst);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ObjectMemoryView::visitAssertCanElidePostWriteBarrier(
+ MAssertCanElidePostWriteBarrier* ins) {
+ if (ins->object() != obj_) {
+ return;
+ }
+
+ ins->block()->discard(ins);
+}
+
+static bool IndexOf(MDefinition* ins, int32_t* res) {
+ MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
+ MDefinition* indexDef = ins->getOperand(1); // ins->index();
+ if (indexDef->isSpectreMaskIndex()) {
+ indexDef = indexDef->toSpectreMaskIndex()->index();
+ }
+ if (indexDef->isBoundsCheck()) {
+ indexDef = indexDef->toBoundsCheck()->index();
+ }
+ if (indexDef->isToNumberInt32()) {
+ indexDef = indexDef->toToNumberInt32()->getOperand(0);
+ }
+ MConstant* indexDefConst = indexDef->maybeConstantValue();
+ if (!indexDefConst || indexDefConst->type() != MIRType::Int32) {
+ return false;
+ }
+ *res = indexDefConst->toInt32();
+ return true;
+}
+
+static inline bool IsOptimizableArrayInstruction(MInstruction* ins) {
+ return ins->isNewArray() || ins->isNewArrayObject();
+}
+
+// We don't support storing holes when doing scalar replacement, so any
+// optimizable MNewArrayObject instruction is guaranteed to be packed.
+static inline bool IsPackedArray(MInstruction* ins) {
+ return ins->isNewArrayObject();
+}
+
+// Returns False if the elements is not escaped and if it is optimizable by
+// ScalarReplacementOfArray.
+static bool IsElementEscaped(MDefinition* def, MInstruction* newArray,
+ uint32_t arraySize) {
+ MOZ_ASSERT(def->isElements());
+ MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
+
+ JitSpewDef(JitSpew_Escape, "Check elements\n", def);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
+ // The MIRType::Elements cannot be captured in a resume point as
+ // it does not represent a value allocation.
+ MDefinition* access = (*i)->consumer()->toDefinition();
+
+ switch (access->op()) {
+ case MDefinition::Opcode::LoadElement: {
+ MOZ_ASSERT(access->toLoadElement()->elements() == def);
+
+ // If the index is not a constant then this index can alias
+ // all others. We do not handle this case.
+ int32_t index;
+ if (!IndexOf(access, &index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a load element with a non-trivial index\n", access);
+ return true;
+ }
+ if (index < 0 || arraySize <= uint32_t(index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a load element with an out-of-bound index\n", access);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::StoreElement: {
+ MStoreElement* storeElem = access->toStoreElement();
+ MOZ_ASSERT(storeElem->elements() == def);
+
+ // StoreElement must bail out if it stores to a hole, in case
+ // there is a setter on the prototype chain. If this StoreElement
+ // might store to a hole, we can't scalar-replace it.
+ if (storeElem->needsHoleCheck()) {
+ JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n",
+ storeElem);
+ return true;
+ }
+
+ // If the index is not a constant then this index can alias
+ // all others. We do not handle this case.
+ int32_t index;
+ if (!IndexOf(storeElem, &index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a store element with a non-trivial index\n",
+ storeElem);
+ return true;
+ }
+ if (index < 0 || arraySize <= uint32_t(index)) {
+ JitSpewDef(JitSpew_Escape,
+ "has a store element with an out-of-bound index\n",
+ storeElem);
+ return true;
+ }
+
+ // Dense element holes are written using MStoreHoleValueElement instead
+ // of MStoreElement.
+ MOZ_ASSERT(storeElem->value()->type() != MIRType::MagicHole);
+ break;
+ }
+
+ case MDefinition::Opcode::SetInitializedLength:
+ MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
+ break;
+
+ case MDefinition::Opcode::InitializedLength:
+ MOZ_ASSERT(access->toInitializedLength()->elements() == def);
+ break;
+
+ case MDefinition::Opcode::ArrayLength:
+ MOZ_ASSERT(access->toArrayLength()->elements() == def);
+ break;
+
+ case MDefinition::Opcode::ApplyArray:
+ MOZ_ASSERT(access->toApplyArray()->getElements() == def);
+ if (!IsPackedArray(newArray)) {
+ JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
+ access);
+ return true;
+ }
+ break;
+
+ case MDefinition::Opcode::ConstructArray:
+ MOZ_ASSERT(access->toConstructArray()->getElements() == def);
+ if (!IsPackedArray(newArray)) {
+ JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
+ access);
+ return true;
+ }
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
+ return true;
+ }
+ }
+ JitSpew(JitSpew_Escape, "Elements is not escaped");
+ return false;
+}
+
+// Returns False if the array is not escaped and if it is optimizable by
+// ScalarReplacementOfArray.
+//
+// For the moment, this code is dumb as it only supports arrays which are not
+// changing length, with only access with known constants.
+static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
+
+ JitSpewDef(JitSpew_Escape, "Check array\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ const Shape* shape;
+ uint32_t length;
+ if (newArray->isNewArrayObject()) {
+ length = newArray->toNewArrayObject()->length();
+ shape = newArray->toNewArrayObject()->shape();
+ } else {
+ length = newArray->toNewArray()->length();
+ JSObject* templateObject = newArray->toNewArray()->templateObject();
+ if (!templateObject) {
+ JitSpew(JitSpew_Escape, "No template object defined.");
+ return true;
+ }
+ shape = templateObject->shape();
+ }
+
+ if (length >= 16) {
+ JitSpew(JitSpew_Escape, "Array has too many elements");
+ return true;
+ }
+
+ // Check if the object is escaped. If the object is not the first argument
+ // of either a known Store / Load, then we consider it as escaped. This is a
+ // cheap and conservative escape analysis.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+ if (!consumer->isDefinition()) {
+ // Cannot optimize if it is observable from fun.arguments or others.
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::Elements: {
+ MElements* elem = def->toElements();
+ MOZ_ASSERT(elem->object() == ins);
+ if (IsElementEscaped(elem, newArray, length)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
+ return true;
+ }
+
+ break;
+ }
+
+ case MDefinition::Opcode::GuardShape: {
+ MGuardShape* guard = def->toGuardShape();
+ if (shape != guard->shape()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
+ return true;
+ }
+ if (IsArrayEscaped(guard, newArray)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+
+ break;
+ }
+
+ case MDefinition::Opcode::GuardToClass: {
+ MGuardToClass* guard = def->toGuardToClass();
+ if (shape->getObjectClass() != guard->getClass()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
+ return true;
+ }
+ if (IsArrayEscaped(guard, newArray)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+
+ break;
+ }
+
+ case MDefinition::Opcode::GuardArrayIsPacked: {
+ auto* guard = def->toGuardArrayIsPacked();
+ if (!IsPackedArray(newArray)) {
+ JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", def);
+ return true;
+ }
+ if (IsArrayEscaped(guard, newArray)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Unbox: {
+ if (def->type() != MIRType::Object) {
+ JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
+ return true;
+ }
+ if (IsArrayEscaped(def->toInstruction(), newArray)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is supported for |JSOp::OptimizeSpreadCall|.
+ case MDefinition::Opcode::Compare: {
+ bool canFold;
+ if (!def->toCompare()->tryFold(&canFold)) {
+ JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::PostWriteBarrier:
+ case MDefinition::Opcode::PostWriteElementBarrier:
+ break;
+
+ // This instruction is a no-op used to verify that scalar replacement
+ // is working as expected in jit-test.
+ case MDefinition::Opcode::AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Array is not escaped");
+ return false;
+}
+
+// This class replaces every MStoreElement and MSetInitializedLength by an
+// MArrayState which emulates the content of the array. All MLoadElement,
+// MInitializedLength and MArrayLength are replaced by the corresponding value.
+//
+// In order to restore the value of the array correctly in case of bailouts, we
+// replace all reference of the allocation by the MArrayState definition.
+class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
+ public:
+ using BlockState = MArrayState;
+ static const char* phaseName;
+
+ private:
+ TempAllocator& alloc_;
+ MConstant* undefinedVal_;
+ MConstant* length_;
+ MInstruction* arr_;
+ MBasicBlock* startBlock_;
+ BlockState* state_;
+
+ // Used to improve the memory usage by sharing common modification.
+ const MResumePoint* lastResumePoint_;
+
+ bool oom_;
+
+ public:
+ ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
+
+ MBasicBlock* startingBlock();
+ bool initStartingState(BlockState** pState);
+
+ void setEntryBlockState(BlockState* state);
+ bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
+ BlockState** pSuccState);
+
+#ifdef DEBUG
+ void assertSuccess();
+#else
+ void assertSuccess() {}
+#endif
+
+ bool oom() const { return oom_; }
+
+ private:
+ bool isArrayStateElements(MDefinition* elements);
+ void discardInstruction(MInstruction* ins, MDefinition* elements);
+
+ public:
+ void visitResumePoint(MResumePoint* rp);
+ void visitArrayState(MArrayState* ins);
+ void visitStoreElement(MStoreElement* ins);
+ void visitLoadElement(MLoadElement* ins);
+ void visitSetInitializedLength(MSetInitializedLength* ins);
+ void visitInitializedLength(MInitializedLength* ins);
+ void visitArrayLength(MArrayLength* ins);
+ void visitPostWriteBarrier(MPostWriteBarrier* ins);
+ void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardToClass(MGuardToClass* ins);
+ void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
+ void visitUnbox(MUnbox* ins);
+ void visitCompare(MCompare* ins);
+ void visitApplyArray(MApplyArray* ins);
+ void visitConstructArray(MConstructArray* ins);
+};
+
+const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
+
+ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
+ : alloc_(alloc),
+ undefinedVal_(nullptr),
+ length_(nullptr),
+ arr_(arr),
+ startBlock_(arr->block()),
+ state_(nullptr),
+ lastResumePoint_(nullptr),
+ oom_(false) {
+ // Annotate snapshots RValue such that we recover the store first.
+ arr_->setIncompleteObject();
+
+ // Annotate the instruction such that we do not replace it by a
+ // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
+ arr_->setImplicitlyUsedUnchecked();
+}
+
+MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; }
+
+bool ArrayMemoryView::initStartingState(BlockState** pState) {
+ // Uninitialized elements have an "undefined" value.
+ undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
+ MConstant* initLength = MConstant::New(alloc_, Int32Value(0));
+ arr_->block()->insertBefore(arr_, undefinedVal_);
+ arr_->block()->insertBefore(arr_, initLength);
+
+ // Create a new block state and insert at it at the location of the new array.
+ BlockState* state = BlockState::New(alloc_, arr_, initLength);
+ if (!state) {
+ return false;
+ }
+
+ startBlock_->insertAfter(arr_, state);
+
+ // Initialize the elements of the array state.
+ state->initFromTemplateObject(alloc_, undefinedVal_);
+
+ // Hold out of resume point until it is visited.
+ state->setInWorklist();
+
+ *pState = state;
+ return true;
+}
+
+void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
+
+bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
+ MBasicBlock* succ,
+ BlockState** pSuccState) {
+ BlockState* succState = *pSuccState;
+
+ // When a block has no state yet, create an empty one for the
+ // successor.
+ if (!succState) {
+ // If the successor is not dominated then the array cannot flow
+ // in this basic block without a Phi. We know that no Phi exist
+ // in non-dominated successors as the conservative escaped
+ // analysis fails otherwise. Such condition can succeed if the
+ // successor is a join at the end of a if-block and the array
+ // only exists within the branch.
+ if (!startBlock_->dominates(succ)) {
+ return true;
+ }
+
+ // If there is only one predecessor, carry over the last state of the
+ // block to the successor. As the block state is immutable, if the
+ // current block has multiple successors, they will share the same entry
+ // state.
+ if (succ->numPredecessors() <= 1 || !state_->numElements()) {
+ *pSuccState = state_;
+ return true;
+ }
+
+ // If we have multiple predecessors, then we allocate one Phi node for
+ // each predecessor, and create a new block state which only has phi
+ // nodes. These would later be removed by the removal of redundant phi
+ // nodes.
+ succState = BlockState::Copy(alloc_, state_);
+ if (!succState) {
+ return false;
+ }
+
+ size_t numPreds = succ->numPredecessors();
+ for (size_t index = 0; index < state_->numElements(); index++) {
+ MPhi* phi = MPhi::New(alloc_.fallible());
+ if (!phi || !phi->reserveLength(numPreds)) {
+ return false;
+ }
+
+ // Fill the input of the successors Phi with undefined
+ // values, and each block later fills the Phi inputs.
+ for (size_t p = 0; p < numPreds; p++) {
+ phi->addInput(undefinedVal_);
+ }
+
+ // Add Phi in the list of Phis of the basic block.
+ succ->addPhi(phi);
+ succState->setElement(index, phi);
+ }
+
+ // Insert the newly created block state instruction at the beginning
+ // of the successor block, after all the phi nodes. Note that it
+ // would be captured by the entry resume point of the successor
+ // block.
+ succ->insertBefore(succ->safeInsertTop(), succState);
+ *pSuccState = succState;
+ }
+
+ MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
+ if (succ->numPredecessors() > 1 && succState->numElements() &&
+ succ != startBlock_) {
+ // We need to re-compute successorWithPhis as the previous EliminatePhis
+ // phase might have removed all the Phis from the successor block.
+ size_t currIndex;
+ MOZ_ASSERT(!succ->phisEmpty());
+ if (curr->successorWithPhis()) {
+ MOZ_ASSERT(curr->successorWithPhis() == succ);
+ currIndex = curr->positionInPhiSuccessor();
+ } else {
+ currIndex = succ->indexForPredecessor(curr);
+ curr->setSuccessorWithPhis(succ, currIndex);
+ }
+ MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
+
+ // Copy the current element states to the index of current block in all
+ // the Phi created during the first visit of the successor.
+ for (size_t index = 0; index < state_->numElements(); index++) {
+ MPhi* phi = succState->getElement(index)->toPhi();
+ phi->replaceOperand(currIndex, state_->getElement(index));
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); }
+#endif
+
+void ArrayMemoryView::visitResumePoint(MResumePoint* rp) {
+ // As long as the MArrayState is not yet seen next to the allocation, we do
+ // not patch the resume point to recover the side effects.
+ if (!state_->isInWorklist()) {
+ rp->addStore(alloc_, state_, lastResumePoint_);
+ lastResumePoint_ = rp;
+ }
+}
+
+void ArrayMemoryView::visitArrayState(MArrayState* ins) {
+ if (ins->isInWorklist()) {
+ ins->setNotInWorklist();
+ }
+}
+
+bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) {
+ return elements->isElements() && elements->toElements()->object() == arr_;
+}
+
+void ArrayMemoryView::discardInstruction(MInstruction* ins,
+ MDefinition* elements) {
+ MOZ_ASSERT(elements->isElements());
+ ins->block()->discard(ins);
+ if (!elements->hasLiveDefUses()) {
+ elements->block()->discard(elements->toInstruction());
+ }
+}
+
+void ArrayMemoryView::visitStoreElement(MStoreElement* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ // Register value of the setter in the state.
+ int32_t index;
+ MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ state_->setElement(index, ins->value());
+ ins->block()->insertBefore(ins, state_);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitLoadElement(MLoadElement* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ // Replace by the value contained at the index.
+ int32_t index;
+ MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
+
+ // The only way to store a hole value in a new array is with
+ // StoreHoleValueElement, which IsElementEscaped does not allow.
+ // Therefore, we do not have to do a hole check.
+ MDefinition* element = state_->getElement(index);
+ MOZ_ASSERT(element->type() != MIRType::MagicHole);
+
+ ins->replaceAllUsesWith(element);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ // Replace by the new initialized length. Note that the argument of
+ // MSetInitializedLength is the last index and not the initialized length.
+ // To obtain the length, we need to add 1 to it, and thus we need to create
+ // a new constant that we register in the ArrayState.
+ state_ = BlockState::Copy(alloc_, state_);
+ if (!state_) {
+ oom_ = true;
+ return;
+ }
+
+ int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
+ MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
+ ins->block()->insertBefore(ins, initLength);
+ ins->block()->insertBefore(ins, state_);
+ state_->setInitializedLength(initLength);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ // Replace by the value of the length.
+ ins->replaceAllUsesWith(state_->initializedLength());
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ // Replace by the value of the length.
+ if (!length_) {
+ length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
+ arr_->block()->insertBefore(arr_, length_);
+ }
+ ins->replaceAllUsesWith(length_);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
+ // Skip barriers on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitPostWriteElementBarrier(
+ MPostWriteElementBarrier* ins) {
+ // Skip barriers on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitGuardShape(MGuardShape* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitGuardToClass(MGuardToClass* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
+ // Skip guards on other objects.
+ if (ins->array() != arr_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitUnbox(MUnbox* ins) {
+ // Skip unrelated unboxes.
+ if (ins->getOperand(0) != arr_) {
+ return;
+ }
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ // Replace the unbox with the array object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove the unbox.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitCompare(MCompare* ins) {
+ // Skip unrelated comparisons.
+ if (ins->lhs() != arr_ && ins->rhs() != arr_) {
+ return;
+ }
+
+ bool folded;
+ MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
+
+ auto* cst = MConstant::New(alloc_, BooleanValue(folded));
+ ins->block()->insertBefore(ins, cst);
+
+ // Replace the comparison with a constant.
+ ins->replaceAllUsesWith(cst);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArrayMemoryView::visitApplyArray(MApplyArray* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->getElements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ uint32_t numElements = state_->numElements();
+
+ CallInfo callInfo(alloc_, /*constructing=*/false, ins->ignoresReturnValue());
+ if (!callInfo.initForApplyArray(ins->getFunction(), ins->getThis(),
+ numElements)) {
+ oom_ = true;
+ return;
+ }
+
+ for (uint32_t i = 0; i < numElements; i++) {
+ auto* element = state_->getElement(i);
+ MOZ_ASSERT(element->type() != MIRType::MagicHole);
+
+ callInfo.initArg(i, element);
+ }
+
+ auto addUndefined = [this]() { return undefinedVal_; };
+
+ bool needsThisCheck = false;
+ bool isDOMCall = false;
+ auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
+ ins->getSingleTarget(), isDOMCall);
+ if (!call) {
+ oom_ = true;
+ return;
+ }
+ if (!ins->maybeCrossRealm()) {
+ call->setNotCrossRealm();
+ }
+
+ ins->block()->insertBefore(ins, call);
+ ins->replaceAllUsesWith(call);
+
+ call->stealResumePoint(ins);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void ArrayMemoryView::visitConstructArray(MConstructArray* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->getElements();
+ if (!isArrayStateElements(elements)) {
+ return;
+ }
+
+ uint32_t numElements = state_->numElements();
+
+ CallInfo callInfo(alloc_, /*constructing=*/true, ins->ignoresReturnValue());
+ if (!callInfo.initForConstructArray(ins->getFunction(), ins->getThis(),
+ ins->getNewTarget(), numElements)) {
+ oom_ = true;
+ return;
+ }
+
+ for (uint32_t i = 0; i < numElements; i++) {
+ auto* element = state_->getElement(i);
+ MOZ_ASSERT(element->type() != MIRType::MagicHole);
+
+ callInfo.initArg(i, element);
+ }
+
+ auto addUndefined = [this]() { return undefinedVal_; };
+
+ bool needsThisCheck = ins->needsThisCheck();
+ bool isDOMCall = false;
+ auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
+ ins->getSingleTarget(), isDOMCall);
+ if (!call) {
+ oom_ = true;
+ return;
+ }
+ if (!ins->maybeCrossRealm()) {
+ call->setNotCrossRealm();
+ }
+
+ ins->block()->insertBefore(ins, call);
+ ins->replaceAllUsesWith(call);
+
+ call->stealResumePoint(ins);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+static inline bool IsOptimizableArgumentsInstruction(MInstruction* ins) {
+ return ins->isCreateArgumentsObject() ||
+ ins->isCreateInlinedArgumentsObject();
+}
+
+class ArgumentsReplacer : public MDefinitionVisitorDefaultNoop {
+ private:
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+ MInstruction* args_;
+
+ bool oom_ = false;
+
+ TempAllocator& alloc() { return graph_.alloc(); }
+
+ bool isInlinedArguments() const {
+ return args_->isCreateInlinedArgumentsObject();
+ }
+
+ MNewArrayObject* inlineArgsArray(MInstruction* ins, Shape* shape,
+ uint32_t begin, uint32_t count);
+
+ void visitGuardToClass(MGuardToClass* ins);
+ void visitGuardProto(MGuardProto* ins);
+ void visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags* ins);
+ void visitUnbox(MUnbox* ins);
+ void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins);
+ void visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins);
+ void visitLoadArgumentsObjectArgHole(MLoadArgumentsObjectArgHole* ins);
+ void visitInArgumentsObjectArg(MInArgumentsObjectArg* ins);
+ void visitArgumentsObjectLength(MArgumentsObjectLength* ins);
+ void visitApplyArgsObj(MApplyArgsObj* ins);
+ void visitArrayFromArgumentsObject(MArrayFromArgumentsObject* ins);
+ void visitArgumentsSlice(MArgumentsSlice* ins);
+ void visitLoadFixedSlot(MLoadFixedSlot* ins);
+
+ bool oom() const { return oom_; }
+
+ public:
+ ArgumentsReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* args)
+ : mir_(mir), graph_(graph), args_(args) {
+ MOZ_ASSERT(IsOptimizableArgumentsInstruction(args_));
+ }
+
+ bool escapes(MInstruction* ins, bool guardedForMapped = false);
+ bool run();
+ void assertSuccess();
+};
+
+// Returns false if the arguments object does not escape.
+bool ArgumentsReplacer::escapes(MInstruction* ins, bool guardedForMapped) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ JitSpewDef(JitSpew_Escape, "Check arguments object\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ // We can replace inlined arguments in scripts with OSR entries, but
+ // the outermost arguments object has already been allocated before
+ // we enter via OSR and can't be replaced.
+ if (ins->isCreateArgumentsObject() && graph_.osrBlock()) {
+ JitSpew(JitSpew_Escape, "Can't replace outermost OSR arguments");
+ return true;
+ }
+
+ // Check all uses to see whether they can be supported without
+ // allocating an ArgumentsObject.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+
+ // If a resume point can observe this instruction, we can only optimize
+ // if it is recoverable.
+ if (consumer->isResumePoint()) {
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable args object cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::GuardToClass: {
+ MGuardToClass* guard = def->toGuardToClass();
+ if (!guard->isArgumentsObjectClass()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
+ return true;
+ }
+ bool isMapped = guard->getClass() == &MappedArgumentsObject::class_;
+ if (escapes(guard, isMapped)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardProto: {
+ if (escapes(def->toInstruction(), guardedForMapped)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardArgumentsObjectFlags: {
+ if (escapes(def->toInstruction(), guardedForMapped)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Unbox: {
+ if (def->type() != MIRType::Object) {
+ JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
+ return true;
+ }
+ if (escapes(def->toInstruction())) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::LoadFixedSlot: {
+ MLoadFixedSlot* load = def->toLoadFixedSlot();
+
+ // We can replace arguments.callee.
+ if (load->slot() == ArgumentsObject::CALLEE_SLOT) {
+ MOZ_ASSERT(guardedForMapped);
+ continue;
+ }
+ JitSpew(JitSpew_Escape, "is escaped by unsupported LoadFixedSlot\n");
+ return true;
+ }
+
+ case MDefinition::Opcode::ApplyArgsObj: {
+ if (ins == def->toApplyArgsObj()->getThis()) {
+ JitSpew(JitSpew_Escape, "is escaped as |this| arg of ApplyArgsObj\n");
+ return true;
+ }
+ MOZ_ASSERT(ins == def->toApplyArgsObj()->getArgsObj());
+ break;
+ }
+
+ // This is a replaceable consumer.
+ case MDefinition::Opcode::ArgumentsObjectLength:
+ case MDefinition::Opcode::GetArgumentsObjectArg:
+ case MDefinition::Opcode::LoadArgumentsObjectArg:
+ case MDefinition::Opcode::LoadArgumentsObjectArgHole:
+ case MDefinition::Opcode::InArgumentsObjectArg:
+ case MDefinition::Opcode::ArrayFromArgumentsObject:
+ case MDefinition::Opcode::ArgumentsSlice:
+ break;
+
+ // This instruction is a no-op used to test that scalar replacement
+ // is working as expected.
+ case MDefinition::Opcode::AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "ArgumentsObject is not escaped");
+ return false;
+}
+
+// Replacing the arguments object is simpler than replacing an object
+// or array, because the arguments object does not change state.
+bool ArgumentsReplacer::run() {
+ MBasicBlock* startBlock = args_->block();
+
+ // Iterate over each basic block.
+ for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
+ block != graph_.rpoEnd(); block++) {
+ if (mir_->shouldCancel("Scalar replacement of Arguments Object")) {
+ return false;
+ }
+
+ // Iterates over phis and instructions.
+ // We do not have to visit resume points. Any resume points that capture
+ // the argument object will be handled by the Sink pass.
+ for (MDefinitionIterator iter(*block); iter;) {
+ // Increment the iterator before visiting the instruction, as the
+ // visit function might discard itself from the basic block.
+ MDefinition* def = *iter++;
+ switch (def->op()) {
+#define MIR_OP(op) \
+ case MDefinition::Opcode::op: \
+ visit##op(def->to##op()); \
+ break;
+ MIR_OPCODE_LIST(MIR_OP)
+#undef MIR_OP
+ }
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+ if (oom()) {
+ return false;
+ }
+ }
+ }
+
+ assertSuccess();
+ return true;
+}
+
+void ArgumentsReplacer::assertSuccess() {
+ MOZ_ASSERT(args_->canRecoverOnBailout());
+ MOZ_ASSERT(!args_->hasLiveDefUses());
+}
+
+void ArgumentsReplacer::visitGuardToClass(MGuardToClass* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != args_) {
+ return;
+ }
+ MOZ_ASSERT(ins->isArgumentsObjectClass());
+
+ // Replace the guard with the args object.
+ ins->replaceAllUsesWith(args_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitGuardProto(MGuardProto* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != args_) {
+ return;
+ }
+
+ // The prototype can only be changed through explicit operations, for example
+ // by calling |Reflect.setPrototype|. We have already determined that the args
+ // object doesn't escape, so its prototype can't be mutated.
+
+ // Replace the guard with the args object.
+ ins->replaceAllUsesWith(args_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitGuardArgumentsObjectFlags(
+ MGuardArgumentsObjectFlags* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+#ifdef DEBUG
+ // Each *_OVERRIDDEN_BIT can only be set by setting or deleting a
+ // property of the args object. We have already determined that the
+ // args object doesn't escape, so its properties can't be mutated.
+ //
+ // FORWARDED_ARGUMENTS_BIT is set if any mapped argument is closed
+ // over, which is an immutable property of the script. Because we
+ // are replacing the args object for a known script, we can check
+ // the flag once, which is done when we first attach the CacheIR,
+ // and rely on it. (Note that this wouldn't be true if we didn't
+ // know the origin of args_, because it could be passed in from
+ // another function.)
+ uint32_t supportedBits = ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
+ ArgumentsObject::ITERATOR_OVERRIDDEN_BIT |
+ ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
+ ArgumentsObject::CALLEE_OVERRIDDEN_BIT |
+ ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
+
+ MOZ_ASSERT((ins->flags() & ~supportedBits) == 0);
+ MOZ_ASSERT_IF(ins->flags() & ArgumentsObject::FORWARDED_ARGUMENTS_BIT,
+ !args_->block()->info().anyFormalIsForwarded());
+#endif
+
+ // Replace the guard with the args object.
+ ins->replaceAllUsesWith(args_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitUnbox(MUnbox* ins) {
+ // Skip unrelated unboxes.
+ if (ins->getOperand(0) != args_) {
+ return;
+ }
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ // Replace the unbox with the args object.
+ ins->replaceAllUsesWith(args_);
+
+ // Remove the unbox.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitGetArgumentsObjectArg(
+ MGetArgumentsObjectArg* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ // We don't support setting arguments in ArgumentsReplacer::escapes,
+ // so we can load the initial value of the argument without worrying
+ // about it being stale.
+ MDefinition* getArg;
+ if (isInlinedArguments()) {
+ // Inlined frames have direct access to the actual arguments.
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+ if (ins->argno() < actualArgs->numActuals()) {
+ getArg = actualArgs->getArg(ins->argno());
+ } else {
+ // Omitted arguments are not mapped to the arguments object, and
+ // will always be undefined.
+ auto* undef = MConstant::New(alloc(), UndefinedValue());
+ ins->block()->insertBefore(ins, undef);
+ getArg = undef;
+ }
+ } else {
+ // Load the argument from the frame.
+ auto* index = MConstant::New(alloc(), Int32Value(ins->argno()));
+ ins->block()->insertBefore(ins, index);
+
+ auto* loadArg = MGetFrameArgument::New(alloc(), index);
+ ins->block()->insertBefore(ins, loadArg);
+ getArg = loadArg;
+ }
+ ins->replaceAllUsesWith(getArg);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitLoadArgumentsObjectArg(
+ MLoadArgumentsObjectArg* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ MDefinition* index = ins->index();
+
+ MInstruction* loadArg;
+ if (isInlinedArguments()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+
+ // Insert bounds check.
+ auto* length =
+ MConstant::New(alloc(), Int32Value(actualArgs->numActuals()));
+ ins->block()->insertBefore(ins, length);
+
+ MInstruction* check = MBoundsCheck::New(alloc(), index, length);
+ check->setBailoutKind(ins->bailoutKind());
+ ins->block()->insertBefore(ins, check);
+
+ if (mir_->outerInfo().hadBoundsCheckBailout()) {
+ check->setNotMovable();
+ }
+
+ loadArg = MGetInlinedArgument::New(alloc(), check, actualArgs);
+ if (!loadArg) {
+ oom_ = true;
+ return;
+ }
+ } else {
+ // Insert bounds check.
+ auto* length = MArgumentsLength::New(alloc());
+ ins->block()->insertBefore(ins, length);
+
+ MInstruction* check = MBoundsCheck::New(alloc(), index, length);
+ check->setBailoutKind(ins->bailoutKind());
+ ins->block()->insertBefore(ins, check);
+
+ if (mir_->outerInfo().hadBoundsCheckBailout()) {
+ check->setNotMovable();
+ }
+
+ if (JitOptions.spectreIndexMasking) {
+ check = MSpectreMaskIndex::New(alloc(), check, length);
+ ins->block()->insertBefore(ins, check);
+ }
+
+ loadArg = MGetFrameArgument::New(alloc(), check);
+ }
+ ins->block()->insertBefore(ins, loadArg);
+ ins->replaceAllUsesWith(loadArg);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitLoadArgumentsObjectArgHole(
+ MLoadArgumentsObjectArgHole* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ MDefinition* index = ins->index();
+
+ MInstruction* loadArg;
+ if (isInlinedArguments()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+
+ loadArg = MGetInlinedArgumentHole::New(alloc(), index, actualArgs);
+ if (!loadArg) {
+ oom_ = true;
+ return;
+ }
+ } else {
+ auto* length = MArgumentsLength::New(alloc());
+ ins->block()->insertBefore(ins, length);
+
+ loadArg = MGetFrameArgumentHole::New(alloc(), index, length);
+ }
+ loadArg->setBailoutKind(ins->bailoutKind());
+ ins->block()->insertBefore(ins, loadArg);
+ ins->replaceAllUsesWith(loadArg);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ MDefinition* index = ins->index();
+
+ // Ensure the index is non-negative.
+ auto* guardedIndex = MGuardInt32IsNonNegative::New(alloc(), index);
+ guardedIndex->setBailoutKind(ins->bailoutKind());
+ ins->block()->insertBefore(ins, guardedIndex);
+
+ MInstruction* length;
+ if (isInlinedArguments()) {
+ uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
+ length = MConstant::New(alloc(), Int32Value(argc));
+ } else {
+ length = MArgumentsLength::New(alloc());
+ }
+ ins->block()->insertBefore(ins, length);
+
+ auto* compare = MCompare::New(alloc(), guardedIndex, length, JSOp::Lt,
+ MCompare::Compare_Int32);
+ ins->block()->insertBefore(ins, compare);
+ ins->replaceAllUsesWith(compare);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitArgumentsObjectLength(
+ MArgumentsObjectLength* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ MInstruction* length;
+ if (isInlinedArguments()) {
+ uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
+ length = MConstant::New(alloc(), Int32Value(argc));
+ } else {
+ length = MArgumentsLength::New(alloc());
+ }
+ ins->block()->insertBefore(ins, length);
+ ins->replaceAllUsesWith(length);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitApplyArgsObj(MApplyArgsObj* ins) {
+ // Skip other arguments objects.
+ if (ins->getArgsObj() != args_) {
+ return;
+ }
+
+ MInstruction* newIns;
+ if (isInlinedArguments()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+ CallInfo callInfo(alloc(), /*constructing=*/false,
+ ins->ignoresReturnValue());
+
+ callInfo.initForApplyInlinedArgs(ins->getFunction(), ins->getThis(),
+ actualArgs->numActuals());
+ for (uint32_t i = 0; i < actualArgs->numActuals(); i++) {
+ callInfo.initArg(i, actualArgs->getArg(i));
+ }
+
+ auto addUndefined = [this, &ins]() -> MConstant* {
+ MConstant* undef = MConstant::New(alloc(), UndefinedValue());
+ ins->block()->insertBefore(ins, undef);
+ return undef;
+ };
+
+ bool needsThisCheck = false;
+ bool isDOMCall = false;
+ auto* call = MakeCall(alloc(), addUndefined, callInfo, needsThisCheck,
+ ins->getSingleTarget(), isDOMCall);
+ if (!call) {
+ oom_ = true;
+ return;
+ }
+ if (!ins->maybeCrossRealm()) {
+ call->setNotCrossRealm();
+ }
+ newIns = call;
+ } else {
+ auto* numArgs = MArgumentsLength::New(alloc());
+ ins->block()->insertBefore(ins, numArgs);
+
+ // TODO: Should we rename MApplyArgs?
+ auto* apply = MApplyArgs::New(alloc(), ins->getSingleTarget(),
+ ins->getFunction(), numArgs, ins->getThis());
+ apply->setBailoutKind(ins->bailoutKind());
+ if (!ins->maybeCrossRealm()) {
+ apply->setNotCrossRealm();
+ }
+ if (ins->ignoresReturnValue()) {
+ apply->setIgnoresReturnValue();
+ }
+ newIns = apply;
+ }
+
+ ins->block()->insertBefore(ins, newIns);
+ ins->replaceAllUsesWith(newIns);
+
+ newIns->stealResumePoint(ins);
+ ins->block()->discard(ins);
+}
+
+MNewArrayObject* ArgumentsReplacer::inlineArgsArray(MInstruction* ins,
+ Shape* shape,
+ uint32_t begin,
+ uint32_t count) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+
+ // Contrary to |WarpBuilder::build_Rest()|, we can always create
+ // MNewArrayObject, because we're guaranteed to have a shape and all
+ // arguments can be stored into fixed elements.
+ static_assert(
+ gc::CanUseFixedElementsForArray(ArgumentsObject::MaxInlinedArgs));
+
+ gc::Heap heap = gc::Heap::Default;
+
+ // Allocate an array of the correct size.
+ auto* shapeConstant = MConstant::NewShape(alloc(), shape);
+ ins->block()->insertBefore(ins, shapeConstant);
+
+ auto* newArray = MNewArrayObject::New(alloc(), shapeConstant, count, heap);
+ ins->block()->insertBefore(ins, newArray);
+
+ if (count) {
+ auto* elements = MElements::New(alloc(), newArray);
+ ins->block()->insertBefore(ins, elements);
+
+ MConstant* index = nullptr;
+ for (uint32_t i = 0; i < count; i++) {
+ index = MConstant::New(alloc(), Int32Value(i));
+ ins->block()->insertBefore(ins, index);
+
+ MDefinition* arg = actualArgs->getArg(begin + i);
+ auto* store = MStoreElement::NewUnbarriered(alloc(), elements, index, arg,
+ /* needsHoleCheck = */ false);
+ ins->block()->insertBefore(ins, store);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), newArray, arg);
+ ins->block()->insertBefore(ins, barrier);
+ }
+
+ auto* initLength = MSetInitializedLength::New(alloc(), elements, index);
+ ins->block()->insertBefore(ins, initLength);
+ }
+
+ return newArray;
+}
+
+void ArgumentsReplacer::visitArrayFromArgumentsObject(
+ MArrayFromArgumentsObject* ins) {
+ // Skip other arguments objects.
+ if (ins->argsObject() != args_) {
+ return;
+ }
+
+ // We can only replace `arguments` because we've verified that the `arguments`
+ // object hasn't been modified in any way. This implies that the arguments
+ // stored in the stack frame haven't been changed either.
+ //
+ // The idea to replace `arguments` in spread calls `f(...arguments)` is now as
+ // follows:
+ // We replace |MArrayFromArgumentsObject| with the identical instructions we
+ // emit when building a rest-array object, cf. |WarpBuilder::build_Rest()|. In
+ // a next step, scalar replacement will then replace these new instructions
+ // themselves.
+
+ Shape* shape = ins->shape();
+ MOZ_ASSERT(shape);
+
+ MDefinition* replacement;
+ if (isInlinedArguments()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+ uint32_t numActuals = actualArgs->numActuals();
+ MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
+
+ replacement = inlineArgsArray(ins, shape, 0, numActuals);
+ } else {
+ // We can use |MRest| to read all arguments, because we've guaranteed that
+ // the arguments stored in the stack frame haven't changed; see the comment
+ // at the start of this method.
+
+ auto* numActuals = MArgumentsLength::New(alloc());
+ ins->block()->insertBefore(ins, numActuals);
+
+ // Set |numFormals| to zero to read all arguments, including any formals.
+ uint32_t numFormals = 0;
+
+ auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
+ ins->block()->insertBefore(ins, rest);
+
+ replacement = rest;
+ }
+
+ ins->replaceAllUsesWith(replacement);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+static uint32_t NormalizeSlice(MDefinition* def, uint32_t length) {
+ int32_t value = def->toConstant()->toInt32();
+ if (value < 0) {
+ return std::max(int32_t(uint32_t(value) + length), 0);
+ }
+ return std::min(uint32_t(value), length);
+}
+
+void ArgumentsReplacer::visitArgumentsSlice(MArgumentsSlice* ins) {
+ // Skip other arguments objects.
+ if (ins->object() != args_) {
+ return;
+ }
+
+ // Optimise the common pattern |Array.prototype.slice.call(arguments, begin)|,
+ // where |begin| is a non-negative, constant int32.
+ //
+ // An absent end-index is replaced by |arguments.length|, so we try to match
+ // |Array.prototype.slice.call(arguments, begin, arguments.length)|.
+ if (isInlinedArguments()) {
+ // When this is an inlined arguments, |arguments.length| has been replaced
+ // by a constant.
+ if (ins->begin()->isConstant() && ins->end()->isConstant()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+ uint32_t numActuals = actualArgs->numActuals();
+ MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
+
+ uint32_t begin = NormalizeSlice(ins->begin(), numActuals);
+ uint32_t end = NormalizeSlice(ins->end(), numActuals);
+ uint32_t count = end > begin ? end - begin : 0;
+ MOZ_ASSERT(count <= numActuals);
+
+ Shape* shape = ins->templateObj()->shape();
+ auto* newArray = inlineArgsArray(ins, shape, begin, count);
+
+ ins->replaceAllUsesWith(newArray);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+ return;
+ }
+ } else {
+ // Otherwise |arguments.length| is emitted as MArgumentsLength.
+ if (ins->begin()->isConstant() && ins->end()->isArgumentsLength()) {
+ int32_t begin = ins->begin()->toConstant()->toInt32();
+ if (begin >= 0) {
+ auto* numActuals = MArgumentsLength::New(alloc());
+ ins->block()->insertBefore(ins, numActuals);
+
+ // Set |numFormals| to read all arguments starting at |begin|.
+ uint32_t numFormals = begin;
+
+ Shape* shape = ins->templateObj()->shape();
+
+ // Use MRest because it can be scalar replaced, which enables further
+ // optimizations.
+ auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
+ ins->block()->insertBefore(ins, rest);
+
+ ins->replaceAllUsesWith(rest);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+ return;
+ }
+ }
+ }
+
+ MInstruction* numArgs;
+ if (isInlinedArguments()) {
+ uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
+ numArgs = MConstant::New(alloc(), Int32Value(argc));
+ } else {
+ numArgs = MArgumentsLength::New(alloc());
+ }
+ ins->block()->insertBefore(ins, numArgs);
+
+ auto* begin = MNormalizeSliceTerm::New(alloc(), ins->begin(), numArgs);
+ ins->block()->insertBefore(ins, begin);
+
+ auto* end = MNormalizeSliceTerm::New(alloc(), ins->end(), numArgs);
+ ins->block()->insertBefore(ins, end);
+
+ bool isMax = false;
+ auto* beginMin = MMinMax::New(alloc(), begin, end, MIRType::Int32, isMax);
+ ins->block()->insertBefore(ins, beginMin);
+
+ // Safe to truncate because both operands are positive and end >= beginMin.
+ auto* count = MSub::New(alloc(), end, beginMin, MIRType::Int32);
+ count->setTruncateKind(TruncateKind::Truncate);
+ ins->block()->insertBefore(ins, count);
+
+ MInstruction* replacement;
+ if (isInlinedArguments()) {
+ auto* actualArgs = args_->toCreateInlinedArgumentsObject();
+ replacement =
+ MInlineArgumentsSlice::New(alloc(), beginMin, count, actualArgs,
+ ins->templateObj(), ins->initialHeap());
+ } else {
+ replacement = MFrameArgumentsSlice::New(
+ alloc(), beginMin, count, ins->templateObj(), ins->initialHeap());
+ }
+ ins->block()->insertBefore(ins, replacement);
+
+ ins->replaceAllUsesWith(replacement);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void ArgumentsReplacer::visitLoadFixedSlot(MLoadFixedSlot* ins) {
+ // Skip other arguments objects.
+ if (ins->object() != args_) {
+ return;
+ }
+
+ MOZ_ASSERT(ins->slot() == ArgumentsObject::CALLEE_SLOT);
+
+ MDefinition* replacement;
+ if (isInlinedArguments()) {
+ replacement = args_->toCreateInlinedArgumentsObject()->getCallee();
+ } else {
+ auto* callee = MCallee::New(alloc());
+ ins->block()->insertBefore(ins, callee);
+ replacement = callee;
+ }
+ ins->replaceAllUsesWith(replacement);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+static inline bool IsOptimizableRestInstruction(MInstruction* ins) {
+ return ins->isRest();
+}
+
+class RestReplacer : public MDefinitionVisitorDefaultNoop {
+ private:
+ MIRGenerator* mir_;
+ MIRGraph& graph_;
+ MInstruction* rest_;
+
+ TempAllocator& alloc() { return graph_.alloc(); }
+ MRest* rest() const { return rest_->toRest(); }
+
+ bool isRestElements(MDefinition* elements);
+ void discardInstruction(MInstruction* ins, MDefinition* elements);
+ MDefinition* restLength(MInstruction* ins);
+ void visitLength(MInstruction* ins, MDefinition* elements);
+
+ void visitGuardToClass(MGuardToClass* ins);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
+ void visitUnbox(MUnbox* ins);
+ void visitCompare(MCompare* ins);
+ void visitLoadElement(MLoadElement* ins);
+ void visitArrayLength(MArrayLength* ins);
+ void visitInitializedLength(MInitializedLength* ins);
+ void visitApplyArray(MApplyArray* ins);
+ void visitConstructArray(MConstructArray* ins);
+
+ bool escapes(MElements* ins);
+
+ public:
+ RestReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* rest)
+ : mir_(mir), graph_(graph), rest_(rest) {
+ MOZ_ASSERT(IsOptimizableRestInstruction(rest_));
+ }
+
+ bool escapes(MInstruction* ins);
+ bool run();
+ void assertSuccess();
+};
+
+// Returns false if the rest array object does not escape.
+bool RestReplacer::escapes(MInstruction* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ JitSpewDef(JitSpew_Escape, "Check rest array\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ // We can replace rest arrays in scripts with OSR entries, but the outermost
+ // rest object has already been allocated before we enter via OSR and can't be
+ // replaced.
+ // See also the same restriction when replacing |arguments|.
+ if (graph_.osrBlock()) {
+ JitSpew(JitSpew_Escape, "Can't replace outermost OSR rest array");
+ return true;
+ }
+
+ // Check all uses to see whether they can be supported without allocating an
+ // ArrayObject for the rest parameter.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+
+ // If a resume point can observe this instruction, we can only optimize
+ // if it is recoverable.
+ if (consumer->isResumePoint()) {
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape, "Observable rest array cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::Elements: {
+ auto* elem = def->toElements();
+ MOZ_ASSERT(elem->object() == ins);
+ if (escapes(elem)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardShape: {
+ const Shape* shape = rest()->shape();
+ if (!shape) {
+ JitSpew(JitSpew_Escape, "No shape defined.");
+ return true;
+ }
+
+ auto* guard = def->toGuardShape();
+ if (shape != guard->shape()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def);
+ return true;
+ }
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardToClass: {
+ auto* guard = def->toGuardToClass();
+ if (guard->getClass() != &ArrayObject::class_) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def);
+ return true;
+ }
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardArrayIsPacked: {
+ // Rest arrays are always packed as long as they aren't modified.
+ auto* guard = def->toGuardArrayIsPacked();
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Unbox: {
+ if (def->type() != MIRType::Object) {
+ JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
+ return true;
+ }
+ if (escapes(def->toInstruction())) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is supported for |JSOp::OptimizeSpreadCall|.
+ case MDefinition::Opcode::Compare: {
+ bool canFold;
+ if (!def->toCompare()->tryFold(&canFold)) {
+ JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is a no-op used to test that scalar replacement is
+ // working as expected.
+ case MDefinition::Opcode::AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Rest array object is not escaped");
+ return false;
+}
+
+bool RestReplacer::escapes(MElements* ins) {
+ JitSpewDef(JitSpew_Escape, "Check rest array elements\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ // The MIRType::Elements cannot be captured in a resume point as it does not
+ // represent a value allocation.
+ MDefinition* def = (*i)->consumer()->toDefinition();
+
+ switch (def->op()) {
+ case MDefinition::Opcode::LoadElement:
+ MOZ_ASSERT(def->toLoadElement()->elements() == ins);
+ break;
+
+ case MDefinition::Opcode::ArrayLength:
+ MOZ_ASSERT(def->toArrayLength()->elements() == ins);
+ break;
+
+ case MDefinition::Opcode::InitializedLength:
+ MOZ_ASSERT(def->toInitializedLength()->elements() == ins);
+ break;
+
+ case MDefinition::Opcode::ApplyArray:
+ MOZ_ASSERT(def->toApplyArray()->getElements() == ins);
+ break;
+
+ case MDefinition::Opcode::ConstructArray:
+ MOZ_ASSERT(def->toConstructArray()->getElements() == ins);
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Rest array object is not escaped");
+ return false;
+}
+
+// Replacing the rest array object is simpler than replacing an object or array,
+// because the rest array object does not change state.
+bool RestReplacer::run() {
+ MBasicBlock* startBlock = rest_->block();
+
+ // Iterate over each basic block.
+ for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
+ block != graph_.rpoEnd(); block++) {
+ if (mir_->shouldCancel("Scalar replacement of rest array object")) {
+ return false;
+ }
+
+ // Iterates over phis and instructions.
+ // We do not have to visit resume points. Any resume points that capture the
+ // rest array object will be handled by the Sink pass.
+ for (MDefinitionIterator iter(*block); iter;) {
+ // Increment the iterator before visiting the instruction, as the visit
+ // function might discard itself from the basic block.
+ MDefinition* def = *iter++;
+ switch (def->op()) {
+#define MIR_OP(op) \
+ case MDefinition::Opcode::op: \
+ visit##op(def->to##op()); \
+ break;
+ MIR_OPCODE_LIST(MIR_OP)
+#undef MIR_OP
+ }
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+ }
+ }
+
+ assertSuccess();
+ return true;
+}
+
+void RestReplacer::assertSuccess() {
+ MOZ_ASSERT(rest_->canRecoverOnBailout());
+ MOZ_ASSERT(!rest_->hasLiveDefUses());
+}
+
+bool RestReplacer::isRestElements(MDefinition* elements) {
+ return elements->isElements() && elements->toElements()->object() == rest_;
+}
+
+void RestReplacer::discardInstruction(MInstruction* ins,
+ MDefinition* elements) {
+ MOZ_ASSERT(elements->isElements());
+ ins->block()->discard(ins);
+ if (!elements->hasLiveDefUses()) {
+ elements->block()->discard(elements->toInstruction());
+ }
+}
+
+void RestReplacer::visitGuardToClass(MGuardToClass* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != rest_) {
+ return;
+ }
+ MOZ_ASSERT(ins->getClass() == &ArrayObject::class_);
+
+ // Replace the guard with the array object.
+ ins->replaceAllUsesWith(rest_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void RestReplacer::visitGuardShape(MGuardShape* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != rest_) {
+ return;
+ }
+
+ // Replace the guard with the array object.
+ ins->replaceAllUsesWith(rest_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void RestReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
+ // Skip guards on other objects.
+ if (ins->array() != rest_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(rest_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void RestReplacer::visitUnbox(MUnbox* ins) {
+ // Skip unrelated unboxes.
+ if (ins->input() != rest_) {
+ return;
+ }
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ // Replace the unbox with the array object.
+ ins->replaceAllUsesWith(rest_);
+
+ // Remove the unbox.
+ ins->block()->discard(ins);
+}
+
+void RestReplacer::visitCompare(MCompare* ins) {
+ // Skip unrelated comparisons.
+ if (ins->lhs() != rest_ && ins->rhs() != rest_) {
+ return;
+ }
+
+ bool folded;
+ MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
+
+ auto* cst = MConstant::New(alloc(), BooleanValue(folded));
+ ins->block()->insertBefore(ins, cst);
+
+ // Replace the comparison with a constant.
+ ins->replaceAllUsesWith(cst);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void RestReplacer::visitLoadElement(MLoadElement* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isRestElements(elements)) {
+ return;
+ }
+
+ MDefinition* index = ins->index();
+
+ // Adjust the index to skip any extra formals.
+ if (uint32_t formals = rest()->numFormals()) {
+ auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
+ ins->block()->insertBefore(ins, numFormals);
+
+ auto* add = MAdd::New(alloc(), index, numFormals, TruncateKind::Truncate);
+ ins->block()->insertBefore(ins, add);
+
+ index = add;
+ }
+
+ auto* loadArg = MGetFrameArgument::New(alloc(), index);
+
+ ins->block()->insertBefore(ins, loadArg);
+ ins->replaceAllUsesWith(loadArg);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+MDefinition* RestReplacer::restLength(MInstruction* ins) {
+ // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length.
+
+ auto* numActuals = rest()->numActuals();
+
+ if (uint32_t formals = rest()->numFormals()) {
+ auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
+ ins->block()->insertBefore(ins, numFormals);
+
+ auto* length = MSub::New(alloc(), numActuals, numFormals, MIRType::Int32);
+ length->setTruncateKind(TruncateKind::Truncate);
+ ins->block()->insertBefore(ins, length);
+
+ auto* zero = MConstant::New(alloc(), Int32Value(0));
+ ins->block()->insertBefore(ins, zero);
+
+ bool isMax = true;
+ auto* minmax = MMinMax::New(alloc(), length, zero, MIRType::Int32, isMax);
+ ins->block()->insertBefore(ins, minmax);
+
+ return minmax;
+ }
+
+ return numActuals;
+}
+
+void RestReplacer::visitLength(MInstruction* ins, MDefinition* elements) {
+ MOZ_ASSERT(ins->isArrayLength() || ins->isInitializedLength());
+
+ // Skip other array objects.
+ if (!isRestElements(elements)) {
+ return;
+ }
+
+ MDefinition* replacement = restLength(ins);
+
+ ins->replaceAllUsesWith(replacement);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void RestReplacer::visitArrayLength(MArrayLength* ins) {
+ visitLength(ins, ins->elements());
+}
+
+void RestReplacer::visitInitializedLength(MInitializedLength* ins) {
+ // The initialized length of a rest array is equal to its length.
+ visitLength(ins, ins->elements());
+}
+
+void RestReplacer::visitApplyArray(MApplyArray* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->getElements();
+ if (!isRestElements(elements)) {
+ return;
+ }
+
+ auto* numActuals = restLength(ins);
+
+ auto* apply =
+ MApplyArgs::New(alloc(), ins->getSingleTarget(), ins->getFunction(),
+ numActuals, ins->getThis(), rest()->numFormals());
+ apply->setBailoutKind(ins->bailoutKind());
+ if (!ins->maybeCrossRealm()) {
+ apply->setNotCrossRealm();
+ }
+ if (ins->ignoresReturnValue()) {
+ apply->setIgnoresReturnValue();
+ }
+ ins->block()->insertBefore(ins, apply);
+
+ ins->replaceAllUsesWith(apply);
+
+ apply->stealResumePoint(ins);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+void RestReplacer::visitConstructArray(MConstructArray* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->getElements();
+ if (!isRestElements(elements)) {
+ return;
+ }
+
+ auto* numActuals = restLength(ins);
+
+ auto* construct = MConstructArgs::New(
+ alloc(), ins->getSingleTarget(), ins->getFunction(), numActuals,
+ ins->getThis(), ins->getNewTarget(), rest()->numFormals());
+ construct->setBailoutKind(ins->bailoutKind());
+ if (!ins->maybeCrossRealm()) {
+ construct->setNotCrossRealm();
+ }
+
+ ins->block()->insertBefore(ins, construct);
+ ins->replaceAllUsesWith(construct);
+
+ construct->stealResumePoint(ins);
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
+bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)");
+
+ EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
+ EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
+ bool addedPhi = false;
+
+ for (ReversePostorderIterator block = graph.rpoBegin();
+ block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Scalar Replacement (main loop)")) {
+ return false;
+ }
+
+ for (MInstructionIterator ins = block->begin(); ins != block->end();
+ ins++) {
+ if (IsOptimizableObjectInstruction(*ins) &&
+ !IsObjectEscaped(*ins, *ins)) {
+ ObjectMemoryView view(graph.alloc(), *ins);
+ if (!replaceObject.run(view)) {
+ return false;
+ }
+ view.assertSuccess();
+ addedPhi = true;
+ continue;
+ }
+
+ if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) {
+ ArrayMemoryView view(graph.alloc(), *ins);
+ if (!replaceArray.run(view)) {
+ return false;
+ }
+ view.assertSuccess();
+ addedPhi = true;
+ continue;
+ }
+
+ if (IsOptimizableArgumentsInstruction(*ins)) {
+ ArgumentsReplacer replacer(mir, graph, *ins);
+ if (replacer.escapes(*ins)) {
+ continue;
+ }
+ if (!replacer.run()) {
+ return false;
+ }
+ continue;
+ }
+
+ if (IsOptimizableRestInstruction(*ins)) {
+ RestReplacer replacer(mir, graph, *ins);
+ if (replacer.escapes(*ins)) {
+ continue;
+ }
+ if (!replacer.run()) {
+ return false;
+ }
+ continue;
+ }
+ }
+ }
+
+ if (addedPhi) {
+ // Phis added by Scalar Replacement are only redundant Phis which are
+ // not directly captured by any resume point but only by the MDefinition
+ // state. The conservative observability only focuses on Phis which are
+ // not used as resume points operands.
+ AssertExtendedGraphCoherency(graph);
+ if (!EliminatePhis(mir, graph, ConservativeObservability)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} /* namespace jit */
+} /* namespace js */
diff --git a/js/src/jit/ScalarReplacement.h b/js/src/jit/ScalarReplacement.h
new file mode 100644
index 0000000000..1b46604aa4
--- /dev/null
+++ b/js/src/jit/ScalarReplacement.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file declares scalar replacement of objects transformation.
+#ifndef jit_ScalarReplacement_h
+#define jit_ScalarReplacement_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+[[nodiscard]] bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ScalarReplacement_h */
diff --git a/js/src/jit/ScalarTypeUtils.h b/js/src/jit/ScalarTypeUtils.h
new file mode 100644
index 0000000000..19e904d9b8
--- /dev/null
+++ b/js/src/jit/ScalarTypeUtils.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ScalarTypeUtils_h
+#define jit_ScalarTypeUtils_h
+
+#include "mozilla/CheckedInt.h"
+
+#include <stdint.h>
+
+#include "js/ScalarType.h"
+
+namespace js {
+namespace jit {
+
+// Compute |index * Scalar::byteSize(type) + offsetAdjustment|. If this doesn't
+// overflow and is non-negative, return true and store the result in *offset.
+// If the computation overflows or the result is negative, false is returned and
+// *offset is left unchanged.
+[[nodiscard]] inline bool ArrayOffsetFitsInInt32(int32_t index,
+ Scalar::Type type,
+ int32_t offsetAdjustment,
+ int32_t* offset) {
+ mozilla::CheckedInt<int32_t> val = index;
+ val *= Scalar::byteSize(type);
+ val += offsetAdjustment;
+ if (!val.isValid() || val.value() < 0) {
+ return false;
+ }
+
+ *offset = val.value();
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ScalarTypeUtils_h */
diff --git a/js/src/jit/ScriptFromCalleeToken.h b/js/src/jit/ScriptFromCalleeToken.h
new file mode 100644
index 0000000000..a4642893a6
--- /dev/null
+++ b/js/src/jit/ScriptFromCalleeToken.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ScriptFromCalleeToken_h
+#define jit_ScriptFromCalleeToken_h
+
+#include "mozilla/Assertions.h"
+
+#include "jit/CalleeToken.h"
+#include "js/TypeDecls.h"
+#include "vm/JSFunction.h"
+
+namespace js::jit {
+
+static inline JSScript* ScriptFromCalleeToken(CalleeToken token) {
+ switch (GetCalleeTokenTag(token)) {
+ case CalleeToken_Script:
+ return CalleeTokenToScript(token);
+ case CalleeToken_Function:
+ case CalleeToken_FunctionConstructing:
+ return CalleeTokenToFunction(token)->nonLazyScript();
+ }
+ MOZ_CRASH("invalid callee token tag");
+}
+
+JSScript* MaybeForwardedScriptFromCalleeToken(CalleeToken token);
+
+} /* namespace js::jit */
+
+#endif /* jit_ScriptFromCalleeToken_h */
diff --git a/js/src/jit/SharedICHelpers-inl.h b/js/src/jit/SharedICHelpers-inl.h
new file mode 100644
index 0000000000..346ae4a52d
--- /dev/null
+++ b/js/src/jit/SharedICHelpers-inl.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICHelpers_inl_h
+#define jit_SharedICHelpers_inl_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/SharedICHelpers-x86-inl.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/SharedICHelpers-x64-inl.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/SharedICHelpers-arm-inl.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/SharedICHelpers-arm64-inl.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+# include "jit/mips-shared/SharedICHelpers-mips-shared-inl.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/SharedICHelpers-loong64-inl.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/SharedICHelpers-riscv64-inl.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/SharedICHelpers-wasm32-inl.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/SharedICHelpers-none-inl.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICHelpers_inl_h */
diff --git a/js/src/jit/SharedICHelpers.h b/js/src/jit/SharedICHelpers.h
new file mode 100644
index 0000000000..682ea73b5f
--- /dev/null
+++ b/js/src/jit/SharedICHelpers.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICHelpers_h
+#define jit_SharedICHelpers_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/SharedICHelpers-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/SharedICHelpers-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/SharedICHelpers-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/SharedICHelpers-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+# include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/SharedICHelpers-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/SharedICHelpers-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/SharedICHelpers-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/SharedICHelpers-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICHelpers_h */
diff --git a/js/src/jit/SharedICRegisters.h b/js/src/jit/SharedICRegisters.h
new file mode 100644
index 0000000000..122785ce75
--- /dev/null
+++ b/js/src/jit/SharedICRegisters.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_SharedICRegisters_h
+#define jit_SharedICRegisters_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/SharedICRegisters-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/SharedICRegisters-x64.h"
+#elif defined(JS_CODEGEN_ARM)
+# include "jit/arm/SharedICRegisters-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/SharedICRegisters-arm64.h"
+#elif defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/SharedICRegisters-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/SharedICRegisters-mips64.h"
+#elif defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/SharedICRegisters-loong64.h"
+#elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/SharedICRegisters-riscv64.h"
+#elif defined(JS_CODEGEN_WASM32)
+# include "jit/wasm32/SharedICRegisters-wasm32.h"
+#elif defined(JS_CODEGEN_NONE)
+# include "jit/none/SharedICRegisters-none.h"
+#else
+# error "Unknown architecture!"
+#endif
+
+namespace js {
+namespace jit {} // namespace jit
+} // namespace js
+
+#endif /* jit_SharedICRegisters_h */
diff --git a/js/src/jit/ShuffleAnalysis.cpp b/js/src/jit/ShuffleAnalysis.cpp
new file mode 100644
index 0000000000..20158b18d4
--- /dev/null
+++ b/js/src/jit/ShuffleAnalysis.cpp
@@ -0,0 +1,747 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ShuffleAnalysis.h"
+#include "jit/MIR.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+#ifdef ENABLE_WASM_SIMD
+
+// Specialization analysis for SIMD operations. This is still x86-centric but
+// generalizes fairly easily to other architectures.
+
+// Optimization of v8x16.shuffle. The general byte shuffle+blend is very
+// expensive (equivalent to at least a dozen instructions), and we want to avoid
+// that if we can. So look for special cases - there are many.
+//
+// The strategy is to sort the operation into one of three buckets depending
+// on the shuffle pattern and inputs:
+//
+// - single operand; shuffles on these values are rotations, reversals,
+// transpositions, and general permutations
+// - single-operand-with-interesting-constant (especially zero); shuffles on
+// these values are often byte shift or scatter operations
+// - dual operand; shuffles on these operations are blends, catenated
+// shifts, and (in the worst case) general shuffle+blends
+//
+// We're not trying to solve the general problem, only to lower reasonably
+// expressed patterns that express common operations. Producers that produce
+// dense and convoluted patterns will end up with the general byte shuffle.
+// Producers that produce simpler patterns that easily map to hardware will
+// get faster code.
+//
+// In particular, these matchers do not try to combine transformations, so a
+// shuffle that optimally is lowered to rotate + permute32x4 + rotate, say, is
+// usually going to end up as a general byte shuffle.
+
+// Reduce a 0..31 byte mask to a 0..15 word mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int16_t controlWords[8];
+ for (int i = 0; i < 16; i += 2) {
+ if (!((lanes[i] & 1) == 0 && lanes[i + 1] == lanes[i] + 1)) {
+ return false;
+ }
+ controlWords[i / 2] = int16_t(lanes[i] / 2);
+ }
+ *control = SimdConstant::CreateX8(controlWords);
+ return true;
+}
+
+// Reduce a 0..31 byte mask to a 0..7 dword mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToDWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int32_t controlDWords[4];
+ for (int i = 0; i < 16; i += 4) {
+ if (!((lanes[i] & 3) == 0 && lanes[i + 1] == lanes[i] + 1 &&
+ lanes[i + 2] == lanes[i] + 2 && lanes[i + 3] == lanes[i] + 3)) {
+ return false;
+ }
+ controlDWords[i / 4] = lanes[i] / 4;
+ }
+ *control = SimdConstant::CreateX4(controlDWords);
+ return true;
+}
+
+// Reduce a 0..31 byte mask to a 0..3 qword mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToQWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int64_t controlQWords[2];
+ for (int i = 0; i < 16; i += 8) {
+ if (!((lanes[i] & 7) == 0 && lanes[i + 1] == lanes[i] + 1 &&
+ lanes[i + 2] == lanes[i] + 2 && lanes[i + 3] == lanes[i] + 3 &&
+ lanes[i + 4] == lanes[i] + 4 && lanes[i + 5] == lanes[i] + 5 &&
+ lanes[i + 6] == lanes[i] + 6 && lanes[i + 7] == lanes[i] + 7)) {
+ return false;
+ }
+ controlQWords[i / 8] = lanes[i] / 8;
+ }
+ *control = SimdConstant::CreateX2(controlQWords);
+ return true;
+}
+
+// Skip across consecutive values in lanes starting at i, returning the index
+// after the last element. Lane values must be <= len-1 ("masked").
+//
+// Since every element is a 1-element run, the return value is never the same as
+// the starting i.
+template <typename T>
+static int ScanIncreasingMasked(const T* lanes, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i < len);
+ MOZ_ASSERT(lanes[i] <= len - 1);
+ i++;
+ while (i < len && lanes[i] == lanes[i - 1] + 1) {
+ MOZ_ASSERT(lanes[i] <= len - 1);
+ i++;
+ }
+ return i;
+}
+
+// Skip across consecutive values in lanes starting at i, returning the index
+// after the last element. Lane values must be <= len*2-1 ("unmasked"); the
+// values len-1 and len are not considered consecutive.
+//
+// Since every element is a 1-element run, the return value is never the same as
+// the starting i.
+template <typename T>
+static int ScanIncreasingUnmasked(const T* lanes, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i < len);
+ if (lanes[i] < len) {
+ i++;
+ while (i < len && lanes[i] < len && lanes[i - 1] == lanes[i] - 1) {
+ i++;
+ }
+ } else {
+ i++;
+ while (i < len && lanes[i] >= len && lanes[i - 1] == lanes[i] - 1) {
+ i++;
+ }
+ }
+ return i;
+}
+
+// Skip lanes that equal v starting at i, returning the index just beyond the
+// last of those. There is no requirement that the initial lanes[i] == v.
+template <typename T>
+static int ScanConstant(const T* lanes, int v, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i <= len);
+ while (i < len && lanes[i] == v) {
+ i++;
+ }
+ return i;
+}
+
+// Mask lane values denoting rhs elements into lhs elements.
+template <typename T>
+static void MaskLanes(T* result, const T* input) {
+ int len = int(16 / sizeof(T));
+ for (int i = 0; i < len; i++) {
+ result[i] = input[i] & (len - 1);
+ }
+}
+
+// Apply a transformation to each lane value.
+template <typename T>
+static void MapLanes(T* result, const T* input, int (*f)(int)) {
+ // Hazard analysis trips on "IndirectCall: f" error.
+ // Suppress the check -- `f` is expected to be trivial here.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ int len = int(16 / sizeof(T));
+ for (int i = 0; i < len; i++) {
+ result[i] = f(input[i]);
+ }
+}
+
+// Recognize an identity permutation, assuming lanes is masked.
+template <typename T>
+static bool IsIdentity(const T* lanes) {
+ return ScanIncreasingMasked(lanes, 0) == int(16 / sizeof(T));
+}
+
+// Recognize part of an identity permutation starting at start, with
+// the first value of the permutation expected to be bias.
+template <typename T>
+static bool IsIdentity(const T* lanes, int start, int len, int bias) {
+ if (lanes[start] != bias) {
+ return false;
+ }
+ for (int i = start + 1; i < start + len; i++) {
+ if (lanes[i] != lanes[i - 1] + 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// We can permute by dwords if the mask is reducible to a dword mask, and in
+// this case a single PSHUFD is enough.
+static bool TryPermute32x4(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToDWordMask(&tmp)) {
+ return false;
+ }
+ *control = tmp;
+ return true;
+}
+
+// Can we perform a byte rotate right? We can use PALIGNR. The shift count is
+// just lanes[0], and *control is unchanged.
+static bool TryRotateRight8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ // Look for the end of the first run of consecutive bytes.
+ int i = ScanIncreasingMasked(lanes, 0);
+
+ // First run must start at a value s.t. we have a rotate if all remaining
+ // bytes are a run.
+ if (lanes[0] != 16 - i) {
+ return false;
+ }
+
+ // If we reached the end of the vector, we're done.
+ if (i == 16) {
+ return true;
+ }
+
+ // Second run must start at source lane zero.
+ if (lanes[i] != 0) {
+ return false;
+ }
+
+ // Second run must end at the end of the lane vector.
+ return ScanIncreasingMasked(lanes, i) == 16;
+}
+
+// We can permute by words if the mask is reducible to a word mask.
+static bool TryPermute16x8(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return false;
+ }
+ *control = tmp;
+ return true;
+}
+
+// A single word lane is copied into all the other lanes: PSHUF*W + PSHUFD.
+static bool TryBroadcast16x8(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return false;
+ }
+ const SimdConstant::I16x8& lanes = tmp.asInt16x8();
+ if (ScanConstant(lanes, lanes[0], 0) < 8) {
+ return false;
+ }
+ *control = tmp;
+ return true;
+}
+
+// A single byte lane is copied int all the other lanes: PUNPCK*BW + PSHUF*W +
+// PSHUFD.
+static bool TryBroadcast8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ return ScanConstant(lanes, lanes[0], 0) >= 16;
+}
+
+template <int N>
+static bool TryReverse(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ for (int i = 0; i < 16; i++) {
+ if (lanes[i] != (i ^ (N - 1))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Look for permutations of a single operand.
+static SimdPermuteOp AnalyzePermute(SimdConstant* control) {
+ // Lane indices are input-agnostic for single-operand permutations.
+ SimdConstant::I8x16 controlBytes;
+ MaskLanes(controlBytes, control->asInt8x16());
+
+ // Get rid of no-ops immediately, so nobody else needs to check.
+ if (IsIdentity(controlBytes)) {
+ return SimdPermuteOp::MOVE;
+ }
+
+ // Default control is the masked bytes.
+ *control = SimdConstant::CreateX16(controlBytes);
+
+ // Analysis order matters here and is architecture-dependent or even
+ // microarchitecture-dependent: ideally the cheapest implementation first.
+ // The Intel manual says that the cost of a PSHUFB is about five other
+ // operations, so make that our cutoff.
+ //
+ // Word, dword, and qword reversals are handled optimally by general permutes.
+ //
+ // Byte reversals are probably best left to PSHUFB, no alternative rendition
+ // seems to reliably go below five instructions. (Discuss.)
+ //
+ // Word swaps within doublewords and dword swaps within quadwords are handled
+ // optimally by general permutes.
+ //
+ // Dword and qword broadcasts are handled by dword permute.
+
+ if (TryPermute32x4(control)) {
+ return SimdPermuteOp::PERMUTE_32x4;
+ }
+ if (TryRotateRight8x16(control)) {
+ return SimdPermuteOp::ROTATE_RIGHT_8x16;
+ }
+ if (TryBroadcast16x8(control)) {
+ return SimdPermuteOp::BROADCAST_16x8;
+ }
+ if (TryPermute16x8(control)) {
+ return SimdPermuteOp::PERMUTE_16x8;
+ }
+ if (TryBroadcast8x16(control)) {
+ return SimdPermuteOp::BROADCAST_8x16;
+ }
+ if (TryReverse<2>(control)) {
+ return SimdPermuteOp::REVERSE_16x8;
+ }
+ if (TryReverse<4>(control)) {
+ return SimdPermuteOp::REVERSE_32x4;
+ }
+ if (TryReverse<8>(control)) {
+ return SimdPermuteOp::REVERSE_64x2;
+ }
+
+ // TODO: (From v8) Unzip and transpose generally have renditions that slightly
+ // beat a general permute (three or four instructions)
+ //
+ // TODO: (From MacroAssemblerX86Shared::ShuffleX4): MOVLHPS and MOVHLPS can be
+ // used when merging two values.
+
+ // The default operation is to permute bytes with the default control.
+ return SimdPermuteOp::PERMUTE_8x16;
+}
+
+// Can we shift the bytes left or right by a constant? A shift is a run of
+// lanes from the rhs (which is zero) on one end and a run of values from the
+// lhs on the other end.
+static Maybe<SimdPermuteOp> TryShift8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+
+ // Represent all zero lanes by 16
+ SimdConstant::I8x16 zeroesMasked;
+ MapLanes(zeroesMasked, lanes, [](int x) -> int { return x >= 16 ? 16 : x; });
+
+ int i = ScanConstant(zeroesMasked, 16, 0);
+ int shiftLeft = i;
+ if (shiftLeft > 0 && lanes[shiftLeft] != 0) {
+ return Nothing();
+ }
+
+ i = ScanIncreasingUnmasked(zeroesMasked, i);
+ int shiftRight = 16 - i;
+ if (shiftRight > 0 && lanes[i - 1] != 15) {
+ return Nothing();
+ }
+
+ i = ScanConstant(zeroesMasked, 16, i);
+ if (i < 16 || (shiftRight > 0 && shiftLeft > 0) ||
+ (shiftRight == 0 && shiftLeft == 0)) {
+ return Nothing();
+ }
+
+ if (shiftRight) {
+ *control = SimdConstant::SplatX16((int8_t)shiftRight);
+ return Some(SimdPermuteOp::SHIFT_RIGHT_8x16);
+ }
+ *control = SimdConstant::SplatX16((int8_t)shiftLeft);
+ return Some(SimdPermuteOp::SHIFT_LEFT_8x16);
+}
+
+static Maybe<SimdPermuteOp> AnalyzeShuffleWithZero(SimdConstant* control) {
+ Maybe<SimdPermuteOp> op;
+ op = TryShift8x16(control);
+ if (op) {
+ return op;
+ }
+
+ // TODO: Optimization opportunity? A byte-blend-with-zero is just a CONST;
+ // PAND. This may beat the general byte blend code below.
+ return Nothing();
+}
+
+// Concat: if the result is the suffix (high bytes) of the rhs in front of a
+// prefix (low bytes) of the lhs then this is PALIGNR; ditto if the operands are
+// swapped.
+static Maybe<SimdShuffleOp> TryConcatRightShift8x16(SimdConstant* control,
+ bool* swapOperands) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int i = ScanIncreasingUnmasked(lanes, 0);
+ MOZ_ASSERT(i < 16, "Single-operand run should have been handled elswhere");
+ // First run must end with 15 % 16
+ if ((lanes[i - 1] & 15) != 15) {
+ return Nothing();
+ }
+ // Second run must start with 0 % 16
+ if ((lanes[i] & 15) != 0) {
+ return Nothing();
+ }
+ // The two runs must come from different inputs
+ if ((lanes[i] & 16) == (lanes[i - 1] & 16)) {
+ return Nothing();
+ }
+ int suffixLength = i;
+
+ i = ScanIncreasingUnmasked(lanes, i);
+ // Must end at the left end
+ if (i != 16) {
+ return Nothing();
+ }
+
+ // If the suffix is from the lhs then swap the operands
+ if (lanes[0] < 16) {
+ *swapOperands = !*swapOperands;
+ }
+ *control = SimdConstant::SplatX16((int8_t)suffixLength);
+ return Some(SimdShuffleOp::CONCAT_RIGHT_SHIFT_8x16);
+}
+
+// Blend words: if we pick words from both operands without a pattern but all
+// the input words stay in their position then this is PBLENDW (immediate mask);
+// this also handles all larger sizes on x64.
+static Maybe<SimdShuffleOp> TryBlendInt16x8(SimdConstant* control) {
+ SimdConstant tmp(*control);
+ if (!ByteMaskToWordMask(&tmp)) {
+ return Nothing();
+ }
+ SimdConstant::I16x8 masked;
+ MaskLanes(masked, tmp.asInt16x8());
+ if (!IsIdentity(masked)) {
+ return Nothing();
+ }
+ SimdConstant::I16x8 mapped;
+ MapLanes(mapped, tmp.asInt16x8(),
+ [](int x) -> int { return x < 8 ? 0 : -1; });
+ *control = SimdConstant::CreateX8(mapped);
+ return Some(SimdShuffleOp::BLEND_16x8);
+}
+
+// Blend bytes: if we pick bytes ditto then this is a byte blend, which can be
+// handled with a CONST, PAND, PANDNOT, and POR.
+//
+// TODO: Optimization opportunity? If we pick all but one lanes from one with at
+// most one from the other then it could be a MOV + PEXRB + PINSRB (also if this
+// element is not in its source location).
+static Maybe<SimdShuffleOp> TryBlendInt8x16(SimdConstant* control) {
+ SimdConstant::I8x16 masked;
+ MaskLanes(masked, control->asInt8x16());
+ if (!IsIdentity(masked)) {
+ return Nothing();
+ }
+ SimdConstant::I8x16 mapped;
+ MapLanes(mapped, control->asInt8x16(),
+ [](int x) -> int { return x < 16 ? 0 : -1; });
+ *control = SimdConstant::CreateX16(mapped);
+ return Some(SimdShuffleOp::BLEND_8x16);
+}
+
+template <typename T>
+static bool MatchInterleave(const T* lanes, int lhs, int rhs, int len) {
+ for (int i = 0; i < len; i++) {
+ if (lanes[i * 2] != lhs + i || lanes[i * 2 + 1] != rhs + i) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Unpack/interleave:
+// - if we interleave the low (bytes/words/doublewords) of the inputs into
+// the output then this is UNPCKL*W (possibly with a swap of operands).
+// - if we interleave the high ditto then it is UNPCKH*W (ditto)
+template <typename T>
+static Maybe<SimdShuffleOp> TryInterleave(const T* lanes, int lhs, int rhs,
+ bool* swapOperands,
+ SimdShuffleOp lowOp,
+ SimdShuffleOp highOp) {
+ int len = int(32 / (sizeof(T) * 4));
+ if (MatchInterleave(lanes, lhs, rhs, len)) {
+ return Some(lowOp);
+ }
+ if (MatchInterleave(lanes, rhs, lhs, len)) {
+ *swapOperands = !*swapOperands;
+ return Some(lowOp);
+ }
+ if (MatchInterleave(lanes, lhs + len, rhs + len, len)) {
+ return Some(highOp);
+ }
+ if (MatchInterleave(lanes, rhs + len, lhs + len, len)) {
+ *swapOperands = !*swapOperands;
+ return Some(highOp);
+ }
+ return Nothing();
+}
+
+static Maybe<SimdShuffleOp> TryInterleave64x2(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToQWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I64x2& lanes = tmp.asInt64x2();
+ return TryInterleave(lanes, 0, 2, swapOperands,
+ SimdShuffleOp::INTERLEAVE_LOW_64x2,
+ SimdShuffleOp::INTERLEAVE_HIGH_64x2);
+}
+
+static Maybe<SimdShuffleOp> TryInterleave32x4(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToDWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I32x4& lanes = tmp.asInt32x4();
+ return TryInterleave(lanes, 0, 4, swapOperands,
+ SimdShuffleOp::INTERLEAVE_LOW_32x4,
+ SimdShuffleOp::INTERLEAVE_HIGH_32x4);
+}
+
+static Maybe<SimdShuffleOp> TryInterleave16x8(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I16x8& lanes = tmp.asInt16x8();
+ return TryInterleave(lanes, 0, 8, swapOperands,
+ SimdShuffleOp::INTERLEAVE_LOW_16x8,
+ SimdShuffleOp::INTERLEAVE_HIGH_16x8);
+}
+
+static Maybe<SimdShuffleOp> TryInterleave8x16(SimdConstant* control,
+ bool* swapOperands) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ return TryInterleave(lanes, 0, 16, swapOperands,
+ SimdShuffleOp::INTERLEAVE_LOW_8x16,
+ SimdShuffleOp::INTERLEAVE_HIGH_8x16);
+}
+
+static SimdShuffleOp AnalyzeTwoArgShuffle(SimdConstant* control,
+ bool* swapOperands) {
+ Maybe<SimdShuffleOp> op;
+ op = TryConcatRightShift8x16(control, swapOperands);
+ if (!op) {
+ op = TryBlendInt16x8(control);
+ }
+ if (!op) {
+ op = TryBlendInt8x16(control);
+ }
+ if (!op) {
+ op = TryInterleave64x2(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave32x4(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave16x8(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave8x16(control, swapOperands);
+ }
+ if (!op) {
+ op = Some(SimdShuffleOp::SHUFFLE_BLEND_8x16);
+ }
+ return *op;
+}
+
+// Reorder the operands if that seems useful, notably, move a constant to the
+// right hand side. Rewrites the control to account for any move.
+static bool MaybeReorderShuffleOperands(MDefinition** lhs, MDefinition** rhs,
+ SimdConstant* control) {
+ if ((*lhs)->isWasmFloatConstant()) {
+ MDefinition* tmp = *lhs;
+ *lhs = *rhs;
+ *rhs = tmp;
+
+ int8_t controlBytes[16];
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ for (unsigned i = 0; i < 16; i++) {
+ controlBytes[i] = int8_t(lanes[i] ^ 16);
+ }
+ *control = SimdConstant::CreateX16(controlBytes);
+
+ return true;
+ }
+ return false;
+}
+
+# ifdef DEBUG
+static const SimdShuffle& ReportShuffleSpecialization(const SimdShuffle& s) {
+ switch (s.opd) {
+ case SimdShuffle::Operand::BOTH:
+ case SimdShuffle::Operand::BOTH_SWAPPED:
+ switch (*s.shuffleOp) {
+ case SimdShuffleOp::SHUFFLE_BLEND_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shuffle+blend 8x16");
+ break;
+ case SimdShuffleOp::BLEND_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> blend 8x16");
+ break;
+ case SimdShuffleOp::BLEND_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> blend 16x8");
+ break;
+ case SimdShuffleOp::CONCAT_RIGHT_SHIFT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> concat+shift-right 8x16");
+ break;
+ case SimdShuffleOp::INTERLEAVE_HIGH_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 8x16");
+ break;
+ case SimdShuffleOp::INTERLEAVE_HIGH_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 16x8");
+ break;
+ case SimdShuffleOp::INTERLEAVE_HIGH_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 32x4");
+ break;
+ case SimdShuffleOp::INTERLEAVE_HIGH_64x2:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 64x2");
+ break;
+ case SimdShuffleOp::INTERLEAVE_LOW_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 8x16");
+ break;
+ case SimdShuffleOp::INTERLEAVE_LOW_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 16x8");
+ break;
+ case SimdShuffleOp::INTERLEAVE_LOW_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 32x4");
+ break;
+ case SimdShuffleOp::INTERLEAVE_LOW_64x2:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 64x2");
+ break;
+ default:
+ MOZ_CRASH("Unexpected shuffle op");
+ }
+ break;
+ case SimdShuffle::Operand::LEFT:
+ case SimdShuffle::Operand::RIGHT:
+ switch (*s.permuteOp) {
+ case SimdPermuteOp::BROADCAST_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> broadcast 8x16");
+ break;
+ case SimdPermuteOp::BROADCAST_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> broadcast 16x8");
+ break;
+ case SimdPermuteOp::MOVE:
+ js::wasm::ReportSimdAnalysis("shuffle -> move");
+ break;
+ case SimdPermuteOp::REVERSE_16x8:
+ js::wasm::ReportSimdAnalysis(
+ "shuffle -> reverse bytes in 16-bit lanes");
+ break;
+ case SimdPermuteOp::REVERSE_32x4:
+ js::wasm::ReportSimdAnalysis(
+ "shuffle -> reverse bytes in 32-bit lanes");
+ break;
+ case SimdPermuteOp::REVERSE_64x2:
+ js::wasm::ReportSimdAnalysis(
+ "shuffle -> reverse bytes in 64-bit lanes");
+ break;
+ case SimdPermuteOp::PERMUTE_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> permute 8x16");
+ break;
+ case SimdPermuteOp::PERMUTE_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> permute 16x8");
+ break;
+ case SimdPermuteOp::PERMUTE_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> permute 32x4");
+ break;
+ case SimdPermuteOp::ROTATE_RIGHT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> rotate-right 8x16");
+ break;
+ case SimdPermuteOp::SHIFT_LEFT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shift-left 8x16");
+ break;
+ case SimdPermuteOp::SHIFT_RIGHT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shift-right 8x16");
+ break;
+ default:
+ MOZ_CRASH("Unexpected permute op");
+ }
+ break;
+ }
+ return s;
+}
+# endif // DEBUG
+
+SimdShuffle jit::AnalyzeSimdShuffle(SimdConstant control, MDefinition* lhs,
+ MDefinition* rhs) {
+# ifdef DEBUG
+# define R(s) ReportShuffleSpecialization(s)
+# else
+# define R(s) (s)
+# endif
+
+ // If only one of the inputs is used, determine which.
+ bool useLeft = true;
+ bool useRight = true;
+ if (lhs == rhs) {
+ useRight = false;
+ } else {
+ bool allAbove = true;
+ bool allBelow = true;
+ const SimdConstant::I8x16& lanes = control.asInt8x16();
+ for (int8_t i : lanes) {
+ allAbove = allAbove && i >= 16;
+ allBelow = allBelow && i < 16;
+ }
+ if (allAbove) {
+ useLeft = false;
+ } else if (allBelow) {
+ useRight = false;
+ }
+ }
+
+ // Deal with one-ignored-input.
+ if (!(useLeft && useRight)) {
+ SimdPermuteOp op = AnalyzePermute(&control);
+ return R(SimdShuffle::permute(
+ useLeft ? SimdShuffle::Operand::LEFT : SimdShuffle::Operand::RIGHT,
+ control, op));
+ }
+
+ // Move constants to rhs.
+ bool swapOperands = MaybeReorderShuffleOperands(&lhs, &rhs, &control);
+
+ // Deal with constant rhs.
+ if (rhs->isWasmFloatConstant()) {
+ SimdConstant rhsConstant = rhs->toWasmFloatConstant()->toSimd128();
+ if (rhsConstant.isZeroBits()) {
+ Maybe<SimdPermuteOp> op = AnalyzeShuffleWithZero(&control);
+ if (op) {
+ return R(SimdShuffle::permute(swapOperands ? SimdShuffle::Operand::RIGHT
+ : SimdShuffle::Operand::LEFT,
+ control, *op));
+ }
+ }
+ }
+
+ // Two operands both of which are used. If there's one constant operand it is
+ // now on the rhs.
+ SimdShuffleOp op = AnalyzeTwoArgShuffle(&control, &swapOperands);
+ return R(SimdShuffle::shuffle(swapOperands
+ ? SimdShuffle::Operand::BOTH_SWAPPED
+ : SimdShuffle::Operand::BOTH,
+ control, op));
+# undef R
+}
+
+#endif // ENABLE_WASM_SIMD
diff --git a/js/src/jit/ShuffleAnalysis.h b/js/src/jit/ShuffleAnalysis.h
new file mode 100644
index 0000000000..84133ccf5a
--- /dev/null
+++ b/js/src/jit/ShuffleAnalysis.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ShuffleAnalysis_h
+#define jit_ShuffleAnalysis_h
+
+#include "jit/IonTypes.h"
+
+namespace js {
+namespace jit {
+
+class MDefinition;
+
+// Permutation operations. NOTE: these may still be x86-centric, but the set
+// can accomodate operations from other architectures.
+//
+// The "low-order" byte is in lane 0 of an 8x16 datum, the "high-order" byte
+// in lane 15. The low-order byte is also the "rightmost". In wasm, the
+// constant (v128.const i8x16 0 1 2 ... 15) has 0 in the low-order byte and 15
+// in the high-order byte.
+enum class SimdPermuteOp {
+ // A single byte lane is copied into all the other byte lanes. control_[0]
+ // has the source lane.
+ BROADCAST_8x16,
+
+ // A single word lane is copied into all the other word lanes. control_[0]
+ // has the source lane.
+ BROADCAST_16x8,
+
+ // Copy input to output.
+ MOVE,
+
+ // control_ has bytes in range 0..15 s.t. control_[i] holds the source lane
+ // for output lane i.
+ PERMUTE_8x16,
+
+ // control_ has int16s in range 0..7, as for 8x16. In addition, the high
+ // byte of control_[0] has flags detailing the operation, values taken
+ // from the Perm16x8Action enum below.
+ PERMUTE_16x8,
+
+ // control_ has int32s in range 0..3, as for 8x16.
+ PERMUTE_32x4,
+
+ // control_[0] has the number of places to rotate by.
+ ROTATE_RIGHT_8x16,
+
+ // Zeroes are shifted into high-order bytes and low-order bytes are lost.
+ // control_[0] has the number of places to shift by.
+ SHIFT_RIGHT_8x16,
+
+ // Zeroes are shifted into low-order bytes and high-order bytes are lost.
+ // control_[0] has the number of places to shift by.
+ SHIFT_LEFT_8x16,
+
+ // Reverse bytes of 16-bit lanes.
+ REVERSE_16x8,
+
+ // Reverse bytes of 32-bit lanes.
+ REVERSE_32x4,
+
+ // Reverse bytes of 64-bit lanes.
+ REVERSE_64x2,
+};
+
+// Shuffle operations. NOTE: these may still be x86-centric, but the set can
+// accomodate operations from other architectures.
+enum class SimdShuffleOp {
+ // Blend bytes. control_ has the blend mask as an I8x16: 0 to select from
+ // the lhs, -1 to select from the rhs.
+ BLEND_8x16,
+
+ // Blend words. control_ has the blend mask as an I16x8: 0 to select from
+ // the lhs, -1 to select from the rhs.
+ BLEND_16x8,
+
+ // Concat the lhs in front of the rhs and shift right by bytes, extracting
+ // the low 16 bytes; control_[0] has the shift count.
+ CONCAT_RIGHT_SHIFT_8x16,
+
+ // Interleave qwords/dwords/words/bytes from high/low halves of operands.
+ // The low-order item in the result comes from the lhs, then the next from
+ // the rhs, and so on. control_ is ignored.
+ INTERLEAVE_HIGH_8x16,
+ INTERLEAVE_HIGH_16x8,
+ INTERLEAVE_HIGH_32x4,
+ INTERLEAVE_HIGH_64x2,
+ INTERLEAVE_LOW_8x16,
+ INTERLEAVE_LOW_16x8,
+ INTERLEAVE_LOW_32x4,
+ INTERLEAVE_LOW_64x2,
+
+ // Fully general shuffle+blend. control_ has the shuffle mask.
+ SHUFFLE_BLEND_8x16,
+};
+
+// Representation of the result of the shuffle analysis.
+struct SimdShuffle {
+ enum class Operand {
+ // Both inputs, in the original lhs-rhs order
+ BOTH,
+ // Both inputs, but in rhs-lhs order
+ BOTH_SWAPPED,
+ // Only the lhs input
+ LEFT,
+ // Only the rhs input
+ RIGHT,
+ };
+
+ Operand opd;
+ SimdConstant control;
+ mozilla::Maybe<SimdPermuteOp> permuteOp; // Single operands
+ mozilla::Maybe<SimdShuffleOp> shuffleOp; // Double operands
+
+ static SimdShuffle permute(Operand opd, SimdConstant control,
+ SimdPermuteOp op) {
+ MOZ_ASSERT(opd == Operand::LEFT || opd == Operand::RIGHT);
+ SimdShuffle s{opd, control, mozilla::Some(op), mozilla::Nothing()};
+ return s;
+ }
+
+ static SimdShuffle shuffle(Operand opd, SimdConstant control,
+ SimdShuffleOp op) {
+ MOZ_ASSERT(opd == Operand::BOTH || opd == Operand::BOTH_SWAPPED);
+ SimdShuffle s{opd, control, mozilla::Nothing(), mozilla::Some(op)};
+ return s;
+ }
+
+ bool equals(const SimdShuffle* other) const {
+ return permuteOp == other->permuteOp && shuffleOp == other->shuffleOp &&
+ opd == other->opd && control.bitwiseEqual(other->control);
+ }
+};
+
+#ifdef ENABLE_WASM_SIMD
+
+SimdShuffle AnalyzeSimdShuffle(SimdConstant control, MDefinition* lhs,
+ MDefinition* rhs);
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_ShuffleAnalysis_h
diff --git a/js/src/jit/Simulator.h b/js/src/jit/Simulator.h
new file mode 100644
index 0000000000..18b98662f6
--- /dev/null
+++ b/js/src/jit/Simulator.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Simulator_h
+#define jit_Simulator_h
+
+#if defined(JS_SIMULATOR_ARM)
+# include "jit/arm/Simulator-arm.h"
+#elif defined(JS_SIMULATOR_ARM64)
+# include "jit/arm64/vixl/Simulator-vixl.h"
+#elif defined(JS_SIMULATOR_MIPS32)
+# include "jit/mips32/Simulator-mips32.h"
+#elif defined(JS_SIMULATOR_MIPS64)
+# include "jit/mips64/Simulator-mips64.h"
+#elif defined(JS_SIMULATOR_LOONG64)
+# include "jit/loong64/Simulator-loong64.h"
+#elif defined(JS_SIMULATOR_RISCV64)
+# include "jit/riscv64/Simulator-riscv64.h"
+#elif defined(JS_SIMULATOR)
+# error "Unexpected simulator platform"
+#endif
+
+#if defined(JS_SIMULATOR_ARM64)
+namespace js::jit {
+using Simulator = vixl::Simulator;
+}
+#endif
+
+#endif /* jit_Simulator_h */
diff --git a/js/src/jit/Sink.cpp b/js/src/jit/Sink.cpp
new file mode 100644
index 0000000000..36977fb93d
--- /dev/null
+++ b/js/src/jit/Sink.cpp
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Sink.h"
+
+#include "jit/IonOptimizationLevels.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+namespace js {
+namespace jit {
+
+// Given the last found common dominator and a new definition to dominate, the
+// CommonDominator function returns the basic block which dominate the last
+// common dominator and the definition. If no such block exists, then this
+// functions return null.
+static MBasicBlock* CommonDominator(MBasicBlock* commonDominator,
+ MBasicBlock* defBlock) {
+ // This is the first instruction visited, record its basic block as being
+ // the only interesting one.
+ if (!commonDominator) {
+ return defBlock;
+ }
+
+ // Iterate on immediate dominators of the known common dominator to find a
+ // block which dominates all previous uses as well as this instruction.
+ while (!commonDominator->dominates(defBlock)) {
+ MBasicBlock* nextBlock = commonDominator->immediateDominator();
+ // All uses are dominated, so, this cannot happen unless the graph
+ // coherency is not respected.
+ MOZ_ASSERT(commonDominator != nextBlock);
+ commonDominator = nextBlock;
+ }
+
+ return commonDominator;
+}
+
+bool Sink(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_Sink, "Begin");
+ TempAllocator& alloc = graph.alloc();
+ bool sinkEnabled = mir->optimizationInfo().sinkEnabled();
+
+ for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
+ block++) {
+ if (mir->shouldCancel("Sink")) {
+ return false;
+ }
+
+ for (MInstructionReverseIterator iter = block->rbegin();
+ iter != block->rend();) {
+ MInstruction* ins = *iter++;
+
+ // Only instructions which can be recovered on bailout can be moved
+ // into the bailout paths.
+ if (ins->isGuard() || ins->isGuardRangeBailouts() ||
+ ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout()) {
+ continue;
+ }
+
+ // Compute a common dominator for all uses of the current
+ // instruction.
+ bool hasLiveUses = false;
+ bool hasUses = false;
+ MBasicBlock* usesDominator = nullptr;
+ for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
+ hasUses = true;
+ MNode* consumerNode = (*i)->consumer();
+ if (consumerNode->isResumePoint()) {
+ if (!consumerNode->toResumePoint()->isRecoverableOperand(*i)) {
+ hasLiveUses = true;
+ }
+ continue;
+ }
+
+ MDefinition* consumer = consumerNode->toDefinition();
+ if (consumer->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ hasLiveUses = true;
+
+ // If the instruction is a Phi, then we should dominate the
+ // predecessor from which the value is coming from.
+ MBasicBlock* consumerBlock = consumer->block();
+ if (consumer->isPhi()) {
+ consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));
+ }
+
+ usesDominator = CommonDominator(usesDominator, consumerBlock);
+ if (usesDominator == *block) {
+ break;
+ }
+ }
+
+ // Leave this instruction for DCE.
+ if (!hasUses) {
+ continue;
+ }
+
+ // We have no uses, so sink this instruction in all the bailout
+ // paths.
+ if (!hasLiveUses) {
+ MOZ_ASSERT(!usesDominator);
+ ins->setRecoveredOnBailout();
+ JitSpewDef(JitSpew_Sink,
+ " No live uses, recover the instruction on bailout\n", ins);
+ continue;
+ }
+
+ // This guard is temporarly moved here as the above code deals with
+ // Dead Code elimination, which got moved into this Sink phase, as
+ // the Dead Code elimination used to move instructions with no-live
+ // uses to the bailout path.
+ if (!sinkEnabled) {
+ continue;
+ }
+
+ // To move an effectful instruction, we would have to verify that the
+ // side-effect is not observed. In the mean time, we just inhibit
+ // this optimization on effectful instructions.
+ if (ins->isEffectful()) {
+ continue;
+ }
+
+ // If all the uses are under a loop, we might not want to work
+ // against LICM by moving everything back into the loop, but if the
+ // loop is it-self inside an if, then we still want to move the
+ // computation under this if statement.
+ while (block->loopDepth() < usesDominator->loopDepth()) {
+ MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
+ usesDominator = usesDominator->immediateDominator();
+ }
+
+ // Only move instructions if there is a branch between the dominator
+ // of the uses and the original instruction. This prevent moving the
+ // computation of the arguments into an inline function if there is
+ // no major win.
+ MBasicBlock* lastJoin = usesDominator;
+ while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
+ MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
+ MBasicBlock* next = lastJoin->immediateDominator();
+ if (next->numSuccessors() > 1) {
+ break;
+ }
+ lastJoin = next;
+ }
+ if (*block == lastJoin) {
+ continue;
+ }
+
+ // Skip to the next instruction if we cannot find a common dominator
+ // for all the uses of this instruction, or if the common dominator
+ // correspond to the block of the current instruction.
+ if (!usesDominator || usesDominator == *block) {
+ continue;
+ }
+
+ // Only instruction which can be recovered on bailout and which are
+ // sinkable can be moved into blocks which are below while filling
+ // the resume points with a clone which is recovered on bailout.
+
+ // If the instruction has live uses and if it is clonable, then we
+ // can clone the instruction for all non-dominated uses and move the
+ // instruction into the block which is dominating all live uses.
+ if (!ins->canClone()) {
+ continue;
+ }
+
+ // If the block is a split-edge block, which is created for folding
+ // test conditions, then the block has no resume point and has
+ // multiple predecessors. In such case, we cannot safely move
+ // bailing instruction to these blocks as we have no way to bailout.
+ if (!usesDominator->entryResumePoint() &&
+ usesDominator->numPredecessors() != 1) {
+ continue;
+ }
+
+ JitSpewDef(JitSpew_Sink, " Can Clone & Recover, sink instruction\n",
+ ins);
+ JitSpew(JitSpew_Sink, " into Block %u", usesDominator->id());
+
+ // Copy the arguments and clone the instruction.
+ MDefinitionVector operands(alloc);
+ for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
+ if (!operands.append(ins->getOperand(i))) {
+ return false;
+ }
+ }
+
+ MInstruction* clone = ins->clone(alloc, operands);
+ if (!clone) {
+ return false;
+ }
+ ins->block()->insertBefore(ins, clone);
+ clone->setRecoveredOnBailout();
+
+ // We should not update the producer of the entry resume point, as
+ // it cannot refer to any instruction within the basic block excepts
+ // for Phi nodes.
+ MResumePoint* entry = usesDominator->entryResumePoint();
+
+ // Replace the instruction by its clone in all the resume points /
+ // recovered-on-bailout instructions which are not in blocks which
+ // are dominated by the usesDominator block.
+ for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e;) {
+ MUse* use = *i++;
+ MNode* consumer = use->consumer();
+
+ // If the consumer is a Phi, then we look for the index of the
+ // use to find the corresponding predecessor block, which is
+ // then used as the consumer block.
+ MBasicBlock* consumerBlock = consumer->block();
+ if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
+ consumerBlock = consumerBlock->getPredecessor(
+ consumer->toDefinition()->toPhi()->indexOf(use));
+ }
+
+ // Keep the current instruction for all dominated uses, except
+ // for the entry resume point of the block in which the
+ // instruction would be moved into.
+ if (usesDominator->dominates(consumerBlock) &&
+ (!consumer->isResumePoint() ||
+ consumer->toResumePoint() != entry)) {
+ continue;
+ }
+
+ use->replaceProducer(clone);
+ }
+
+ // As we move this instruction in a different block, we should
+ // verify that we do not carry over a resume point which would refer
+ // to an outdated state of the control flow.
+ if (ins->resumePoint()) {
+ ins->clearResumePoint();
+ }
+
+ // Now, that all uses which are not dominated by usesDominator are
+ // using the cloned instruction, we can safely move the instruction
+ // into the usesDominator block.
+ MInstruction* at =
+ usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
+ block->moveBefore(at, ins);
+ }
+ }
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/Sink.h b/js/src/jit/Sink.h
new file mode 100644
index 0000000000..0e714ed3fb
--- /dev/null
+++ b/js/src/jit/Sink.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file declares sink transformation.
+#ifndef jit_Sink_h
+#define jit_Sink_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+[[nodiscard]] bool Sink(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Sink_h */
diff --git a/js/src/jit/Snapshots.cpp b/js/src/jit/Snapshots.cpp
new file mode 100644
index 0000000000..2b6c2a945b
--- /dev/null
+++ b/js/src/jit/Snapshots.cpp
@@ -0,0 +1,605 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Snapshots.h"
+
+#include "jit/JitSpewer.h"
+#ifdef TRACK_SNAPSHOTS
+# include "jit/LIR.h"
+#endif
+#include "jit/MIR.h"
+#include "jit/Recover.h"
+#include "js/Printer.h"
+
+using namespace js;
+using namespace js::jit;
+
+// [SMDOC] IonMonkey Snapshot encoding
+//
+// Encodings:
+// [ptr] A fixed-size pointer.
+// [vwu] A variable-width unsigned integer.
+// [vws] A variable-width signed integer.
+// [u8] An 8-bit unsigned integer.
+// [u8'] An 8-bit unsigned integer which is potentially extended with packed
+// data.
+// [u8"] Packed data which is stored and packed in the previous [u8'].
+// [vwu*] A list of variable-width unsigned integers.
+// [pld] Payload of Recover Value Allocation:
+// PAYLOAD_NONE:
+// There is no payload.
+//
+// PAYLOAD_INDEX:
+// [vwu] Index, such as the constant pool index.
+//
+// PAYLOAD_STACK_OFFSET:
+// [vws] Stack offset based on the base of the Ion frame.
+//
+// PAYLOAD_GPR:
+// [u8] Code of the general register.
+//
+// PAYLOAD_FPU:
+// [u8] Code of the FPU register.
+//
+// PAYLOAD_PACKED_TAG:
+// [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode
+// of the RValueAllocation.
+//
+// Snapshot header:
+//
+// [vwu] bits ((n+1)-31]: recover instruction offset
+// bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS)
+//
+// Snapshot body, repeated "frame count" times, from oldest frame to newest
+// frame. Note that the first frame doesn't have the "parent PC" field.
+//
+// [ptr] Debug only: JSScript*
+// [vwu] pc offset
+// [vwu] # of RVA's indexes, including nargs
+// [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains
+// nargs + nfixed + stackDepth items.
+//
+// Recover value allocations are encoded at the end of the Snapshot buffer, and
+// they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each
+// allocation is determined by the RValueAllocation::Layout, which can be
+// obtained from the RValueAllocation::Mode with layoutFromMode function. The
+// layout structure list the type of payload which are used to serialized /
+// deserialized / dumped the content of the allocations.
+//
+// R(ecover)ValueAllocation items:
+// [u8'] Mode, which defines the type of the payload as well as the
+// interpretation.
+// [pld] first payload (packed tag, index, stack offset, register, ...)
+// [pld] second payload (register, stack offset, none)
+//
+// Modes:
+// CONSTANT [INDEX]
+// Index into the constant pool.
+//
+// CST_UNDEFINED []
+// Constant value which correspond to the "undefined" JS value.
+//
+// CST_NULL []
+// Constant value which correspond to the "null" JS value.
+//
+// DOUBLE_REG [FPU_REG]
+// Double value stored in a FPU register.
+//
+// ANY_FLOAT_REG [FPU_REG]
+// Any Float value (float32, simd) stored in a FPU register.
+//
+// ANY_FLOAT_STACK [STACK_OFFSET]
+// Any Float value (float32, simd) stored on the stack.
+//
+// UNTYPED_REG [GPR_REG]
+// UNTYPED_STACK [STACK_OFFSET]
+// UNTYPED_REG_REG [GPR_REG, GPR_REG]
+// UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET]
+// UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG]
+// UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET]
+// Value with dynamically known type. On 32 bits architecture, the
+// first register/stack-offset correspond to the holder of the type,
+// and the second correspond to the payload of the JS Value.
+//
+// RECOVER_INSTRUCTION [INDEX]
+// Index into the list of recovered instruction results.
+//
+// RI_WITH_DEFAULT_CST [INDEX] [INDEX]
+// The first payload is the index into the list of recovered
+// instruction results. The second payload is the index in the
+// constant pool.
+//
+// TYPED_REG [PACKED_TAG, GPR_REG]:
+// Value with statically known type, which payload is stored in a
+// register.
+//
+// TYPED_STACK [PACKED_TAG, STACK_OFFSET]:
+// Value with statically known type, which payload is stored at an
+// offset on the stack.
+//
+
+const RValueAllocation::Layout& RValueAllocation::layoutFromMode(Mode mode) {
+ switch (mode) {
+ case CONSTANT: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_INDEX,
+ PAYLOAD_NONE, "constant"};
+ return layout;
+ }
+
+ case CST_UNDEFINED: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_NONE, PAYLOAD_NONE, "undefined"};
+ return layout;
+ }
+
+ case CST_NULL: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_NONE,
+ PAYLOAD_NONE, "null"};
+ return layout;
+ }
+
+ case DOUBLE_REG: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE,
+ "double"};
+ return layout;
+ }
+ case ANY_FLOAT_REG: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE,
+ "float register content"};
+ return layout;
+ }
+ case ANY_FLOAT_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET, PAYLOAD_NONE, "float register content"};
+ return layout;
+ }
+#if defined(JS_NUNBOX32)
+ case UNTYPED_REG_REG: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_GPR,
+ "value"};
+ return layout;
+ }
+ case UNTYPED_REG_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_GPR, PAYLOAD_STACK_OFFSET, "value"};
+ return layout;
+ }
+ case UNTYPED_STACK_REG: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET,
+ PAYLOAD_GPR, "value"};
+ return layout;
+ }
+ case UNTYPED_STACK_STACK: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_STACK_OFFSET, PAYLOAD_STACK_OFFSET, "value"};
+ return layout;
+ }
+#elif defined(JS_PUNBOX64)
+ case UNTYPED_REG: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_NONE,
+ "value"};
+ return layout;
+ }
+ case UNTYPED_STACK: {
+ static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET,
+ PAYLOAD_NONE, "value"};
+ return layout;
+ }
+#endif
+ case RECOVER_INSTRUCTION: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_INDEX, PAYLOAD_NONE, "instruction"};
+ return layout;
+ }
+ case RI_WITH_DEFAULT_CST: {
+ static const RValueAllocation::Layout layout = {
+ PAYLOAD_INDEX, PAYLOAD_INDEX, "instruction with default"};
+ return layout;
+ }
+
+ default: {
+ static const RValueAllocation::Layout regLayout = {
+ PAYLOAD_PACKED_TAG, PAYLOAD_GPR, "typed value"};
+
+ static const RValueAllocation::Layout stackLayout = {
+ PAYLOAD_PACKED_TAG, PAYLOAD_STACK_OFFSET, "typed value"};
+
+ if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX) {
+ return regLayout;
+ }
+ if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX) {
+ return stackLayout;
+ }
+ }
+ }
+
+ MOZ_CRASH_UNSAFE_PRINTF("Unexpected mode: 0x%x", uint32_t(mode));
+}
+
+// Pad serialized RValueAllocations by a multiple of X bytes in the allocation
+// buffer. By padding serialized value allocations, we are building an
+// indexable table of elements of X bytes, and thus we can safely divide any
+// offset within the buffer by X to obtain an index.
+//
+// By padding, we are loosing space within the allocation buffer, but we
+// multiple by X the number of indexes that we can store on one byte in each
+// snapshots.
+//
+// Some value allocations are taking more than X bytes to be encoded, in which
+// case we will pad to a multiple of X, and we are wasting indexes. The choice
+// of X should be balanced between the wasted padding of serialized value
+// allocation, and the saving made in snapshot indexes.
+static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */
+
+void RValueAllocation::readPayload(CompactBufferReader& reader,
+ PayloadType type, uint8_t* mode,
+ Payload* p) {
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ p->index = reader.readUnsigned();
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ p->stackOffset = reader.readSigned();
+ break;
+ case PAYLOAD_GPR:
+ p->gpr = Register::FromCode(reader.readByte());
+ break;
+ case PAYLOAD_FPU:
+ p->fpu.data = reader.readByte();
+ break;
+ case PAYLOAD_PACKED_TAG:
+ p->type = JSValueType(*mode & PACKED_TAG_MASK);
+ *mode = *mode & ~PACKED_TAG_MASK;
+ break;
+ }
+}
+
+RValueAllocation RValueAllocation::read(CompactBufferReader& reader) {
+ uint8_t mode = reader.readByte();
+ const Layout& layout = layoutFromMode(Mode(mode & MODE_BITS_MASK));
+ Payload arg1, arg2;
+
+ readPayload(reader, layout.type1, &mode, &arg1);
+ readPayload(reader, layout.type2, &mode, &arg2);
+ return RValueAllocation(Mode(mode), arg1, arg2);
+}
+
+void RValueAllocation::writePayload(CompactBufferWriter& writer,
+ PayloadType type, Payload p) {
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ writer.writeUnsigned(p.index);
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ writer.writeSigned(p.stackOffset);
+ break;
+ case PAYLOAD_GPR:
+ static_assert(Registers::Total <= 0x100,
+ "Not enough bytes to encode all registers.");
+ writer.writeByte(p.gpr.code());
+ break;
+ case PAYLOAD_FPU:
+ static_assert(FloatRegisters::Total <= 0x100,
+ "Not enough bytes to encode all float registers.");
+ writer.writeByte(p.fpu.code());
+ break;
+ case PAYLOAD_PACKED_TAG: {
+ // This code assumes that the PACKED_TAG payload is following the
+ // writeByte of the mode.
+ if (!writer.oom()) {
+ MOZ_ASSERT(writer.length());
+ uint8_t* mode = writer.buffer() + (writer.length() - 1);
+ MOZ_ASSERT((*mode & PACKED_TAG_MASK) == 0 &&
+ (p.type & ~PACKED_TAG_MASK) == 0);
+ *mode = *mode | p.type;
+ }
+ break;
+ }
+ }
+}
+
+void RValueAllocation::writePadding(CompactBufferWriter& writer) {
+ // Write 0x7f in all padding bytes.
+ while (writer.length() % ALLOCATION_TABLE_ALIGNMENT) {
+ writer.writeByte(0x7f);
+ }
+}
+
+void RValueAllocation::write(CompactBufferWriter& writer) const {
+ const Layout& layout = layoutFromMode(mode());
+ MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG);
+ MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0);
+
+ writer.writeByte(mode_);
+ writePayload(writer, layout.type1, arg1_);
+ writePayload(writer, layout.type2, arg2_);
+ writePadding(writer);
+}
+
+HashNumber RValueAllocation::hash() const {
+ HashNumber res = 0;
+ res = HashNumber(mode_);
+ res = arg1_.index + (res << 6) + (res << 16) - res;
+ res = arg2_.index + (res << 6) + (res << 16) - res;
+ return res;
+}
+
+#ifdef JS_JITSPEW
+void RValueAllocation::dumpPayload(GenericPrinter& out, PayloadType type,
+ Payload p) {
+ switch (type) {
+ case PAYLOAD_NONE:
+ break;
+ case PAYLOAD_INDEX:
+ out.printf("index %u", p.index);
+ break;
+ case PAYLOAD_STACK_OFFSET:
+ out.printf("stack %d", p.stackOffset);
+ break;
+ case PAYLOAD_GPR:
+ out.printf("reg %s", p.gpr.name());
+ break;
+ case PAYLOAD_FPU:
+ out.printf("reg %s", p.fpu.name());
+ break;
+ case PAYLOAD_PACKED_TAG:
+ out.printf("%s", ValTypeToString(p.type));
+ break;
+ }
+}
+
+void RValueAllocation::dump(GenericPrinter& out) const {
+ const Layout& layout = layoutFromMode(mode());
+ out.printf("%s", layout.name);
+
+ if (layout.type1 != PAYLOAD_NONE) {
+ out.printf(" (");
+ }
+ dumpPayload(out, layout.type1, arg1_);
+ if (layout.type2 != PAYLOAD_NONE) {
+ out.printf(", ");
+ }
+ dumpPayload(out, layout.type2, arg2_);
+ if (layout.type1 != PAYLOAD_NONE) {
+ out.printf(")");
+ }
+}
+#endif // JS_JITSPEW
+
+SnapshotReader::SnapshotReader(const uint8_t* snapshots, uint32_t offset,
+ uint32_t RVATableSize, uint32_t listSize)
+ : reader_(snapshots + offset, snapshots + listSize),
+ allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize),
+ allocTable_(snapshots + listSize),
+ allocRead_(0) {
+ if (!snapshots) {
+ return;
+ }
+ JitSpew(JitSpew_IonSnapshots, "Creating snapshot reader");
+ readSnapshotHeader();
+}
+
+#define COMPUTE_SHIFT_AFTER_(name) (name##_BITS + name##_SHIFT)
+#define COMPUTE_MASK_(name) ((uint32_t(1 << name##_BITS) - 1) << name##_SHIFT)
+
+// Details of snapshot header packing.
+static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0;
+static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 6;
+static const uint32_t SNAPSHOT_BAILOUTKIND_MASK =
+ COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND);
+
+static_assert((1 << SNAPSHOT_BAILOUTKIND_BITS) - 1 >=
+ uint8_t(BailoutKind::Limit),
+ "Not enough bits for BailoutKinds");
+
+static const uint32_t SNAPSHOT_ROFFSET_SHIFT =
+ COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND);
+static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT;
+static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET);
+
+#undef COMPUTE_MASK_
+#undef COMPUTE_SHIFT_AFTER_
+
+void SnapshotReader::readSnapshotHeader() {
+ uint32_t bits = reader_.readUnsigned();
+
+ bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >>
+ SNAPSHOT_BAILOUTKIND_SHIFT);
+ recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT;
+
+ JitSpew(JitSpew_IonSnapshots, "Read snapshot header with bailout kind %u",
+ uint32_t(bailoutKind_));
+
+#ifdef TRACK_SNAPSHOTS
+ readTrackSnapshot();
+#endif
+}
+
+#ifdef TRACK_SNAPSHOTS
+void SnapshotReader::readTrackSnapshot() {
+ pcOpcode_ = reader_.readUnsigned();
+ mirOpcode_ = reader_.readUnsigned();
+ mirId_ = reader_.readUnsigned();
+ lirOpcode_ = reader_.readUnsigned();
+ lirId_ = reader_.readUnsigned();
+}
+
+void SnapshotReader::spewBailingFrom() const {
+# ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonBailouts)) {
+ JitSpewHeader(JitSpew_IonBailouts);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" bailing from bytecode: %s, MIR: ", CodeName(JSOp(pcOpcode_)));
+ MDefinition::PrintOpcodeName(out, MDefinition::Opcode(mirOpcode_));
+ out.printf(" [%u], LIR: ", mirId_);
+ LInstruction::printName(out, LInstruction::Opcode(lirOpcode_));
+ out.printf(" [%u]", lirId_);
+ out.printf("\n");
+ }
+# endif
+}
+#endif
+
+uint32_t SnapshotReader::readAllocationIndex() {
+ allocRead_++;
+ return reader_.readUnsigned();
+}
+
+RValueAllocation SnapshotReader::readAllocation() {
+ JitSpew(JitSpew_IonSnapshots, "Reading slot %u", allocRead_);
+ uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT;
+ allocReader_.seek(allocTable_, offset);
+ return RValueAllocation::read(allocReader_);
+}
+
+SnapshotWriter::SnapshotWriter()
+ // Based on the measurements made in Bug 962555 comment 20, this length
+ // should be enough to prevent the reallocation of the hash table for at
+ // least half of the compilations.
+ : allocMap_(32) {}
+
+RecoverReader::RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers,
+ uint32_t size)
+ : reader_(nullptr, nullptr), numInstructions_(0), numInstructionsRead_(0) {
+ if (!recovers) {
+ return;
+ }
+ reader_ =
+ CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size);
+ readRecoverHeader();
+ readInstruction();
+}
+
+RecoverReader::RecoverReader(const RecoverReader& rr)
+ : reader_(rr.reader_),
+ numInstructions_(rr.numInstructions_),
+ numInstructionsRead_(rr.numInstructionsRead_) {
+ if (reader_.currentPosition()) {
+ rr.instruction()->cloneInto(&rawData_);
+ }
+}
+
+RecoverReader& RecoverReader::operator=(const RecoverReader& rr) {
+ reader_ = rr.reader_;
+ numInstructions_ = rr.numInstructions_;
+ numInstructionsRead_ = rr.numInstructionsRead_;
+ if (reader_.currentPosition()) {
+ rr.instruction()->cloneInto(&rawData_);
+ }
+ return *this;
+}
+
+void RecoverReader::readRecoverHeader() {
+ numInstructions_ = reader_.readUnsigned();
+ MOZ_ASSERT(numInstructions_);
+
+ JitSpew(JitSpew_IonSnapshots, "Read recover header with instructionCount %u",
+ numInstructions_);
+}
+
+void RecoverReader::readInstruction() {
+ MOZ_ASSERT(moreInstructions());
+ RInstruction::readRecoverData(reader_, &rawData_);
+ numInstructionsRead_++;
+}
+
+SnapshotOffset SnapshotWriter::startSnapshot(RecoverOffset recoverOffset,
+ BailoutKind kind) {
+ lastStart_ = writer_.length();
+ allocWritten_ = 0;
+
+ JitSpew(JitSpew_IonSnapshots,
+ "starting snapshot with recover offset %u, bailout kind %u",
+ recoverOffset, uint32_t(kind));
+
+ MOZ_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS));
+ MOZ_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS));
+ uint32_t bits = (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) |
+ (recoverOffset << SNAPSHOT_ROFFSET_SHIFT);
+
+ writer_.writeUnsigned(bits);
+ return lastStart_;
+}
+
+#ifdef TRACK_SNAPSHOTS
+void SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode,
+ uint32_t mirId, uint32_t lirOpcode,
+ uint32_t lirId) {
+ writer_.writeUnsigned(pcOpcode);
+ writer_.writeUnsigned(mirOpcode);
+ writer_.writeUnsigned(mirId);
+ writer_.writeUnsigned(lirOpcode);
+ writer_.writeUnsigned(lirId);
+}
+#endif
+
+bool SnapshotWriter::add(const RValueAllocation& alloc) {
+ uint32_t offset;
+ RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc);
+ if (!p) {
+ offset = allocWriter_.length();
+ alloc.write(allocWriter_);
+ if (!allocMap_.add(p, alloc, offset)) {
+ allocWriter_.setOOM();
+ return false;
+ }
+ } else {
+ offset = p->value();
+ }
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_IonSnapshots)) {
+ JitSpewHeader(JitSpew_IonSnapshots);
+ Fprinter& out = JitSpewPrinter();
+ out.printf(" slot %u (%u): ", allocWritten_, offset);
+ alloc.dump(out);
+ out.printf("\n");
+ }
+#endif
+
+ allocWritten_++;
+ writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT);
+ return true;
+}
+
+void SnapshotWriter::endSnapshot() {
+ // Place a sentinel for asserting on the other end.
+#ifdef DEBUG
+ writer_.writeSigned(-1);
+#endif
+
+ JitSpew(JitSpew_IonSnapshots,
+ "ending snapshot total size: %u bytes (start %u)",
+ uint32_t(writer_.length() - lastStart_), lastStart_);
+}
+
+RecoverOffset RecoverWriter::startRecover(uint32_t instructionCount) {
+ MOZ_ASSERT(instructionCount);
+ instructionCount_ = instructionCount;
+ instructionsWritten_ = 0;
+
+ JitSpew(JitSpew_IonSnapshots, "starting recover with %u instruction(s)",
+ instructionCount);
+
+ RecoverOffset recoverOffset = writer_.length();
+ writer_.writeUnsigned(instructionCount);
+ return recoverOffset;
+}
+
+void RecoverWriter::writeInstruction(const MNode* rp) {
+ if (!rp->writeRecoverData(writer_)) {
+ writer_.setOOM();
+ }
+ instructionsWritten_++;
+}
+
+void RecoverWriter::endRecover() {
+ MOZ_ASSERT(instructionCount_ == instructionsWritten_);
+}
diff --git a/js/src/jit/Snapshots.h b/js/src/jit/Snapshots.h
new file mode 100644
index 0000000000..c0c332d926
--- /dev/null
+++ b/js/src/jit/Snapshots.h
@@ -0,0 +1,529 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_Snapshot_h
+#define jit_Snapshot_h
+
+#include "mozilla/Attributes.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonTypes.h"
+#include "jit/Registers.h"
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+class JS_PUBLIC_API GenericPrinter;
+
+namespace jit {
+
+class RValueAllocation;
+
+// A Recover Value Allocation mirror what is known at compiled time as being the
+// MIRType and the LAllocation. This is read out of the snapshot to recover the
+// value which would be there if this frame was an interpreter frame instead of
+// an Ion frame.
+//
+// It is used with the SnapshotIterator to recover a Value from the stack,
+// spilled registers or the list of constant of the compiled script.
+//
+// Unit tests are located in jsapi-tests/testJitRValueAlloc.cpp.
+class RValueAllocation {
+ public:
+ // See RValueAllocation encoding in Snapshots.cpp
+ enum Mode {
+ CONSTANT = 0x00,
+ CST_UNDEFINED = 0x01,
+ CST_NULL = 0x02,
+ DOUBLE_REG = 0x03,
+ ANY_FLOAT_REG = 0x04,
+ ANY_FLOAT_STACK = 0x05,
+#if defined(JS_NUNBOX32)
+ UNTYPED_REG_REG = 0x06,
+ UNTYPED_REG_STACK = 0x07,
+ UNTYPED_STACK_REG = 0x08,
+ UNTYPED_STACK_STACK = 0x09,
+#elif defined(JS_PUNBOX64)
+ UNTYPED_REG = 0x06,
+ UNTYPED_STACK = 0x07,
+#endif
+
+ // Recover instructions.
+ RECOVER_INSTRUCTION = 0x0a,
+ RI_WITH_DEFAULT_CST = 0x0b,
+
+ // The JSValueType is packed in the Mode.
+ TYPED_REG_MIN = 0x10,
+ TYPED_REG_MAX = 0x1f,
+ TYPED_REG = TYPED_REG_MIN,
+
+ // The JSValueType is packed in the Mode.
+ TYPED_STACK_MIN = 0x20,
+ TYPED_STACK_MAX = 0x2f,
+ TYPED_STACK = TYPED_STACK_MIN,
+
+ // This mask can be used with any other valid mode. When this flag is
+ // set on the mode, this inform the snapshot iterator that even if the
+ // allocation is readable, the content of if might be incomplete unless
+ // all side-effects are executed.
+ RECOVER_SIDE_EFFECT_MASK = 0x80,
+
+ // This mask represents the set of bits which can be used to encode a
+ // value in a snapshot. The mode is used to determine how to interpret
+ // the union of values and how to pack the value in memory.
+ MODE_BITS_MASK = 0x17f,
+
+ INVALID = 0x100,
+ };
+
+ enum { PACKED_TAG_MASK = 0x0f };
+
+ // See Payload encoding in Snapshots.cpp
+ enum PayloadType {
+ PAYLOAD_NONE,
+ PAYLOAD_INDEX,
+ PAYLOAD_STACK_OFFSET,
+ PAYLOAD_GPR,
+ PAYLOAD_FPU,
+ PAYLOAD_PACKED_TAG
+ };
+
+ struct Layout {
+ PayloadType type1;
+ PayloadType type2;
+ const char* name;
+ };
+
+ private:
+ Mode mode_;
+
+ // Additional information to recover the content of the allocation.
+ struct FloatRegisterBits {
+ uint32_t data;
+ bool operator==(const FloatRegisterBits& other) const {
+ return data == other.data;
+ }
+ uint32_t code() const { return data; }
+ const char* name() const {
+ FloatRegister tmp = FloatRegister::FromCode(data);
+ return tmp.name();
+ }
+ };
+
+ union Payload {
+ uint32_t index;
+ int32_t stackOffset;
+ Register gpr;
+ FloatRegisterBits fpu;
+ JSValueType type;
+
+ Payload() : index(0) {
+ static_assert(sizeof(index) == sizeof(Payload),
+ "All Payload bits are initialized.");
+ }
+ };
+
+ Payload arg1_;
+ Payload arg2_;
+
+ static Payload payloadOfIndex(uint32_t index) {
+ Payload p;
+ p.index = index;
+ return p;
+ }
+ static Payload payloadOfStackOffset(int32_t offset) {
+ Payload p;
+ p.stackOffset = offset;
+ return p;
+ }
+ static Payload payloadOfRegister(Register reg) {
+ Payload p;
+ p.gpr = reg;
+ return p;
+ }
+ static Payload payloadOfFloatRegister(FloatRegister reg) {
+ Payload p;
+ FloatRegisterBits b;
+ b.data = reg.code();
+ p.fpu = b;
+ return p;
+ }
+ static Payload payloadOfValueType(JSValueType type) {
+ Payload p;
+ p.type = type;
+ return p;
+ }
+
+ static const Layout& layoutFromMode(Mode mode);
+
+ static void readPayload(CompactBufferReader& reader, PayloadType t,
+ uint8_t* mode, Payload* p);
+ static void writePayload(CompactBufferWriter& writer, PayloadType t,
+ Payload p);
+ static void writePadding(CompactBufferWriter& writer);
+#ifdef JS_JITSPEW
+ static void dumpPayload(GenericPrinter& out, PayloadType t, Payload p);
+#endif
+ static bool equalPayloads(PayloadType t, Payload lhs, Payload rhs);
+
+ RValueAllocation(Mode mode, Payload a1, Payload a2)
+ : mode_(mode), arg1_(a1), arg2_(a2) {}
+
+ RValueAllocation(Mode mode, Payload a1) : mode_(mode), arg1_(a1) {
+ arg2_.index = 0;
+ }
+
+ explicit RValueAllocation(Mode mode) : mode_(mode) {
+ arg1_.index = 0;
+ arg2_.index = 0;
+ }
+
+ public:
+ RValueAllocation() : mode_(INVALID) {
+ arg1_.index = 0;
+ arg2_.index = 0;
+ }
+
+ // DOUBLE_REG
+ static RValueAllocation Double(FloatRegister reg) {
+ return RValueAllocation(DOUBLE_REG, payloadOfFloatRegister(reg));
+ }
+
+ // ANY_FLOAT_REG or ANY_FLOAT_STACK
+ static RValueAllocation AnyFloat(FloatRegister reg) {
+ return RValueAllocation(ANY_FLOAT_REG, payloadOfFloatRegister(reg));
+ }
+ static RValueAllocation AnyFloat(int32_t offset) {
+ return RValueAllocation(ANY_FLOAT_STACK, payloadOfStackOffset(offset));
+ }
+
+ // TYPED_REG or TYPED_STACK
+ static RValueAllocation Typed(JSValueType type, Register reg) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE && type != JSVAL_TYPE_MAGIC &&
+ type != JSVAL_TYPE_NULL && type != JSVAL_TYPE_UNDEFINED);
+ return RValueAllocation(TYPED_REG, payloadOfValueType(type),
+ payloadOfRegister(reg));
+ }
+ static RValueAllocation Typed(JSValueType type, int32_t offset) {
+ MOZ_ASSERT(type != JSVAL_TYPE_MAGIC && type != JSVAL_TYPE_NULL &&
+ type != JSVAL_TYPE_UNDEFINED);
+ return RValueAllocation(TYPED_STACK, payloadOfValueType(type),
+ payloadOfStackOffset(offset));
+ }
+
+ // UNTYPED
+#if defined(JS_NUNBOX32)
+ static RValueAllocation Untyped(Register type, Register payload) {
+ return RValueAllocation(UNTYPED_REG_REG, payloadOfRegister(type),
+ payloadOfRegister(payload));
+ }
+
+ static RValueAllocation Untyped(Register type, int32_t payloadStackOffset) {
+ return RValueAllocation(UNTYPED_REG_STACK, payloadOfRegister(type),
+ payloadOfStackOffset(payloadStackOffset));
+ }
+
+ static RValueAllocation Untyped(int32_t typeStackOffset, Register payload) {
+ return RValueAllocation(UNTYPED_STACK_REG,
+ payloadOfStackOffset(typeStackOffset),
+ payloadOfRegister(payload));
+ }
+
+ static RValueAllocation Untyped(int32_t typeStackOffset,
+ int32_t payloadStackOffset) {
+ return RValueAllocation(UNTYPED_STACK_STACK,
+ payloadOfStackOffset(typeStackOffset),
+ payloadOfStackOffset(payloadStackOffset));
+ }
+
+#elif defined(JS_PUNBOX64)
+ static RValueAllocation Untyped(Register reg) {
+ return RValueAllocation(UNTYPED_REG, payloadOfRegister(reg));
+ }
+
+ static RValueAllocation Untyped(int32_t stackOffset) {
+ return RValueAllocation(UNTYPED_STACK, payloadOfStackOffset(stackOffset));
+ }
+#endif
+
+ // common constants.
+ static RValueAllocation Undefined() {
+ return RValueAllocation(CST_UNDEFINED);
+ }
+ static RValueAllocation Null() { return RValueAllocation(CST_NULL); }
+
+ // CONSTANT's index
+ static RValueAllocation ConstantPool(uint32_t index) {
+ return RValueAllocation(CONSTANT, payloadOfIndex(index));
+ }
+
+ // Recover instruction's index
+ static RValueAllocation RecoverInstruction(uint32_t index) {
+ return RValueAllocation(RECOVER_INSTRUCTION, payloadOfIndex(index));
+ }
+ static RValueAllocation RecoverInstruction(uint32_t riIndex,
+ uint32_t cstIndex) {
+ return RValueAllocation(RI_WITH_DEFAULT_CST, payloadOfIndex(riIndex),
+ payloadOfIndex(cstIndex));
+ }
+
+ void setNeedSideEffect() {
+ MOZ_ASSERT(!needSideEffect() && mode_ != INVALID);
+ mode_ = Mode(mode_ | RECOVER_SIDE_EFFECT_MASK);
+ }
+
+ void writeHeader(CompactBufferWriter& writer, JSValueType type,
+ uint32_t regCode) const;
+
+ public:
+ static RValueAllocation read(CompactBufferReader& reader);
+ void write(CompactBufferWriter& writer) const;
+
+ public:
+ bool valid() const { return mode_ != INVALID; }
+ Mode mode() const { return Mode(mode_ & MODE_BITS_MASK); }
+ bool needSideEffect() const { return mode_ & RECOVER_SIDE_EFFECT_MASK; }
+
+ uint32_t index() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_INDEX);
+ return arg1_.index;
+ }
+ int32_t stackOffset() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_STACK_OFFSET);
+ return arg1_.stackOffset;
+ }
+ Register reg() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_GPR);
+ return arg1_.gpr;
+ }
+ FloatRegister fpuReg() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_FPU);
+ FloatRegisterBits b = arg1_.fpu;
+ return FloatRegister::FromCode(b.data);
+ }
+ JSValueType knownType() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type1 == PAYLOAD_PACKED_TAG);
+ return arg1_.type;
+ }
+
+ uint32_t index2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_INDEX);
+ return arg2_.index;
+ }
+ int32_t stackOffset2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_STACK_OFFSET);
+ return arg2_.stackOffset;
+ }
+ Register reg2() const {
+ MOZ_ASSERT(layoutFromMode(mode()).type2 == PAYLOAD_GPR);
+ return arg2_.gpr;
+ }
+
+ public:
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out) const;
+#endif
+
+ bool operator==(const RValueAllocation& rhs) const {
+ // Note, this equality compares the verbatim content of the payload,
+ // which is made possible because we ensure that the payload content is
+ // fully initialized during the creation.
+ static_assert(sizeof(int32_t) == sizeof(Payload),
+ "All Payload bits are compared.");
+ return mode_ == rhs.mode_ && arg1_.index == rhs.arg1_.index &&
+ arg2_.index == rhs.arg2_.index;
+ }
+
+ HashNumber hash() const;
+
+ struct Hasher {
+ using Key = RValueAllocation;
+ using Lookup = Key;
+ static HashNumber hash(const Lookup& v) { return v.hash(); }
+ static bool match(const Key& k, const Lookup& l) { return k == l; }
+ };
+};
+
+class RecoverWriter;
+
+// Collects snapshots in a contiguous buffer, which is copied into IonScript
+// memory after code generation.
+class SnapshotWriter {
+ CompactBufferWriter writer_;
+ CompactBufferWriter allocWriter_;
+
+ // Map RValueAllocations to an offset in the allocWriter_ buffer. This is
+ // useful as value allocations are repeated frequently.
+ using RVA = RValueAllocation;
+ typedef HashMap<RVA, uint32_t, RVA::Hasher, SystemAllocPolicy> RValueAllocMap;
+ RValueAllocMap allocMap_;
+
+ // This is only used to assert sanity.
+ uint32_t allocWritten_;
+
+ // Used to report size of the snapshot in the spew messages.
+ SnapshotOffset lastStart_;
+
+ public:
+ SnapshotWriter();
+
+ SnapshotOffset startSnapshot(RecoverOffset recoverOffset, BailoutKind kind);
+#ifdef TRACK_SNAPSHOTS
+ void trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId,
+ uint32_t lirOpcode, uint32_t lirId);
+#endif
+ [[nodiscard]] bool add(const RValueAllocation& slot);
+
+ uint32_t allocWritten() const { return allocWritten_; }
+ void endSnapshot();
+
+ bool oom() const {
+ return writer_.oom() || writer_.length() >= MAX_BUFFER_SIZE ||
+ allocWriter_.oom() || allocWriter_.length() >= MAX_BUFFER_SIZE;
+ }
+
+ size_t listSize() const { return writer_.length(); }
+ const uint8_t* listBuffer() const { return writer_.buffer(); }
+
+ size_t RVATableSize() const { return allocWriter_.length(); }
+ const uint8_t* RVATableBuffer() const { return allocWriter_.buffer(); }
+};
+
+class MNode;
+
+class RecoverWriter {
+ CompactBufferWriter writer_;
+
+ uint32_t instructionCount_;
+ uint32_t instructionsWritten_;
+
+ public:
+ SnapshotOffset startRecover(uint32_t instructionCount);
+
+ void writeInstruction(const MNode* rp);
+
+ void endRecover();
+
+ size_t size() const { return writer_.length(); }
+ const uint8_t* buffer() const { return writer_.buffer(); }
+
+ bool oom() const {
+ return writer_.oom() || writer_.length() >= MAX_BUFFER_SIZE;
+ }
+};
+
+class RecoverReader;
+
+// A snapshot reader reads the entries out of the compressed snapshot buffer in
+// a script. These entries describe the equivalent interpreter frames at a given
+// position in JIT code. Each entry is an Ion's value allocations, used to
+// recover the corresponding Value from an Ion frame.
+class SnapshotReader {
+ CompactBufferReader reader_;
+ CompactBufferReader allocReader_;
+ const uint8_t* allocTable_;
+
+ BailoutKind bailoutKind_;
+ uint32_t allocRead_; // Number of slots that have been read.
+ RecoverOffset recoverOffset_; // Offset of the recover instructions.
+
+#ifdef TRACK_SNAPSHOTS
+ private:
+ uint32_t pcOpcode_;
+ uint32_t mirOpcode_;
+ uint32_t mirId_;
+ uint32_t lirOpcode_;
+ uint32_t lirId_;
+
+ public:
+ void readTrackSnapshot();
+ void spewBailingFrom() const;
+#endif
+
+ private:
+ void readSnapshotHeader();
+ uint32_t readAllocationIndex();
+
+ public:
+ SnapshotReader(const uint8_t* snapshots, uint32_t offset,
+ uint32_t RVATableSize, uint32_t listSize);
+
+ RValueAllocation readAllocation();
+ void skipAllocation() { readAllocationIndex(); }
+
+ BailoutKind bailoutKind() const { return bailoutKind_; }
+ RecoverOffset recoverOffset() const { return recoverOffset_; }
+
+ uint32_t numAllocationsRead() const { return allocRead_; }
+ void resetNumAllocationsRead() { allocRead_ = 0; }
+};
+
+class MOZ_NON_PARAM RInstructionStorage {
+ static constexpr size_t Size = 4 * sizeof(uint32_t);
+
+ // This presumes all RInstructionStorage are safely void*-alignable.
+ // RInstruction::readRecoverData asserts that no RInstruction subclass
+ // has stricter alignment requirements than RInstructionStorage.
+ static constexpr size_t Alignment = alignof(void*);
+
+ alignas(Alignment) unsigned char mem[Size];
+
+ public:
+ const void* addr() const { return mem; }
+ void* addr() { return mem; }
+
+ RInstructionStorage() = default;
+
+ // Making a copy of raw bytes holding a RInstruction instance would be a
+ // strict aliasing violation: see bug 1269319 for an instance of bytewise
+ // copying having caused crashes.
+ RInstructionStorage(const RInstructionStorage&) = delete;
+ RInstructionStorage& operator=(const RInstructionStorage& other) = delete;
+};
+
+class RInstruction;
+
+class RecoverReader {
+ CompactBufferReader reader_;
+
+ // Number of encoded instructions.
+ uint32_t numInstructions_;
+
+ // Number of instruction read.
+ uint32_t numInstructionsRead_;
+
+ // Space is reserved as part of the RecoverReader to avoid allocations of
+ // data which is needed to decode the current instruction.
+ RInstructionStorage rawData_;
+
+ private:
+ void readRecoverHeader();
+ void readInstruction();
+
+ public:
+ RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers,
+ uint32_t size);
+ explicit RecoverReader(const RecoverReader& rr);
+ RecoverReader& operator=(const RecoverReader& rr);
+
+ uint32_t numInstructions() const { return numInstructions_; }
+ uint32_t numInstructionsRead() const { return numInstructionsRead_; }
+
+ bool moreInstructions() const {
+ return numInstructionsRead_ < numInstructions_;
+ }
+ void nextInstruction() { readInstruction(); }
+
+ const RInstruction* instruction() const {
+ return reinterpret_cast<const RInstruction*>(rawData_.addr());
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_Snapshot_h */
diff --git a/js/src/jit/StackSlotAllocator.h b/js/src/jit/StackSlotAllocator.h
new file mode 100644
index 0000000000..74a2ab18bd
--- /dev/null
+++ b/js/src/jit/StackSlotAllocator.h
@@ -0,0 +1,133 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_StackSlotAllocator_h
+#define jit_StackSlotAllocator_h
+
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+class StackSlotAllocator {
+ js::Vector<uint32_t, 4, SystemAllocPolicy> normalSlots;
+ js::Vector<uint32_t, 4, SystemAllocPolicy> doubleSlots;
+ uint32_t height_;
+
+ void addAvailableSlot(uint32_t index) {
+ // Ignoring OOM here (and below) is fine; it just means the stack slot
+ // will be unused.
+ (void)normalSlots.append(index);
+ }
+ void addAvailableDoubleSlot(uint32_t index) {
+ (void)doubleSlots.append(index);
+ }
+
+ uint32_t allocateQuadSlot() {
+ // This relies on the fact that any architecture specific
+ // alignment of the stack pointer is done a priori.
+ if (height_ % 8 != 0) {
+ addAvailableSlot(height_ += 4);
+ }
+ if (height_ % 16 != 0) {
+ addAvailableDoubleSlot(height_ += 8);
+ }
+ return height_ += 16;
+ }
+ uint32_t allocateDoubleSlot() {
+ if (!doubleSlots.empty()) {
+ return doubleSlots.popCopy();
+ }
+ if (height_ % 8 != 0) {
+ addAvailableSlot(height_ += 4);
+ }
+ return height_ += 8;
+ }
+ uint32_t allocateSlot() {
+ if (!normalSlots.empty()) {
+ return normalSlots.popCopy();
+ }
+ if (!doubleSlots.empty()) {
+ uint32_t index = doubleSlots.popCopy();
+ addAvailableSlot(index - 4);
+ return index;
+ }
+ return height_ += 4;
+ }
+
+ public:
+ StackSlotAllocator() : height_(0) {}
+
+ void allocateStackArea(LStackArea* alloc) {
+ uint32_t size = alloc->size();
+
+ MOZ_ASSERT(size % 4 == 0);
+ switch (alloc->alignment()) {
+ case 8:
+ if ((height_ + size) % 8 != 0) {
+ addAvailableSlot(height_ += 4);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected stack results area alignment");
+ }
+ MOZ_ASSERT((height_ + size) % alloc->alignment() == 0);
+
+ height_ += size;
+ alloc->setBase(height_);
+ }
+
+ static uint32_t width(LDefinition::Type type) {
+ switch (type) {
+#if JS_BITS_PER_WORD == 32
+ case LDefinition::GENERAL:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#endif
+#ifdef JS_NUNBOX32
+ case LDefinition::TYPE:
+ case LDefinition::PAYLOAD:
+#endif
+ case LDefinition::INT32:
+ case LDefinition::FLOAT32:
+ return 4;
+#if JS_BITS_PER_WORD == 64
+ case LDefinition::GENERAL:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+#endif
+#ifdef JS_PUNBOX64
+ case LDefinition::BOX:
+#endif
+ case LDefinition::DOUBLE:
+ return 8;
+ case LDefinition::SIMD128:
+ return 16;
+ case LDefinition::STACKRESULTS:
+ MOZ_CRASH("Stack results area must be allocated manually");
+ }
+ MOZ_CRASH("Unknown slot type");
+ }
+
+ uint32_t allocateSlot(LDefinition::Type type) {
+ switch (width(type)) {
+ case 4:
+ return allocateSlot();
+ case 8:
+ return allocateDoubleSlot();
+ case 16:
+ return allocateQuadSlot();
+ }
+ MOZ_CRASH("Unknown slot width");
+ }
+
+ uint32_t stackHeight() const { return height_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_StackSlotAllocator_h */
diff --git a/js/src/jit/TemplateObject-inl.h b/js/src/jit/TemplateObject-inl.h
new file mode 100644
index 0000000000..3bfeb8e72d
--- /dev/null
+++ b/js/src/jit/TemplateObject-inl.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TemplateObject_inl_h
+#define jit_TemplateObject_inl_h
+
+#include "jit/TemplateObject.h"
+
+#include "vm/EnvironmentObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/RegExpObject.h"
+
+namespace js {
+namespace jit {
+
+inline gc::AllocKind TemplateObject::getAllocKind() const {
+ return obj_->asTenured().getAllocKind();
+}
+
+inline bool TemplateObject::isNativeObject() const {
+ return obj_->is<NativeObject>();
+}
+
+inline bool TemplateObject::isArrayObject() const {
+ return obj_->is<ArrayObject>();
+}
+
+inline bool TemplateObject::isArgumentsObject() const {
+ return obj_->is<ArgumentsObject>();
+}
+
+inline bool TemplateObject::isTypedArrayObject() const {
+ return obj_->is<TypedArrayObject>();
+}
+
+inline bool TemplateObject::isRegExpObject() const {
+ return obj_->is<RegExpObject>();
+}
+
+inline bool TemplateObject::isCallObject() const {
+ return obj_->is<CallObject>();
+}
+
+inline bool TemplateObject::isBlockLexicalEnvironmentObject() const {
+ return obj_->is<BlockLexicalEnvironmentObject>();
+}
+
+inline bool TemplateObject::isPlainObject() const {
+ return obj_->is<PlainObject>();
+}
+
+inline gc::Cell* TemplateObject::shape() const {
+ Shape* shape = obj_->shape();
+ MOZ_ASSERT(!shape->isDictionary());
+ return shape;
+}
+
+inline const TemplateNativeObject& TemplateObject::asTemplateNativeObject()
+ const {
+ MOZ_ASSERT(isNativeObject());
+ return *static_cast<const TemplateNativeObject*>(this);
+}
+
+inline bool TemplateNativeObject::hasDynamicSlots() const {
+ return asNativeObject().hasDynamicSlots();
+}
+
+inline uint32_t TemplateNativeObject::numDynamicSlots() const {
+ return asNativeObject().numDynamicSlots();
+}
+
+inline uint32_t TemplateNativeObject::numUsedFixedSlots() const {
+ return asNativeObject().numUsedFixedSlots();
+}
+
+inline uint32_t TemplateNativeObject::numFixedSlots() const {
+ return asNativeObject().numFixedSlots();
+}
+
+inline uint32_t TemplateNativeObject::slotSpan() const {
+ return asNativeObject().sharedShape()->slotSpan();
+}
+
+inline Value TemplateNativeObject::getSlot(uint32_t i) const {
+ return asNativeObject().getSlot(i);
+}
+
+inline const Value* TemplateNativeObject::getDenseElements() const {
+ return asNativeObject().getDenseElements();
+}
+
+#ifdef DEBUG
+inline bool TemplateNativeObject::isSharedMemory() const {
+ return asNativeObject().isSharedMemory();
+}
+#endif
+
+inline uint32_t TemplateNativeObject::getDenseCapacity() const {
+ return asNativeObject().getDenseCapacity();
+}
+
+inline uint32_t TemplateNativeObject::getDenseInitializedLength() const {
+ return asNativeObject().getDenseInitializedLength();
+}
+
+inline uint32_t TemplateNativeObject::getArrayLength() const {
+ return obj_->as<ArrayObject>().length();
+}
+
+inline bool TemplateNativeObject::hasDynamicElements() const {
+ return asNativeObject().hasDynamicElements();
+}
+
+inline gc::Cell* TemplateNativeObject::regExpShared() const {
+ RegExpObject* regexp = &obj_->as<RegExpObject>();
+ MOZ_ASSERT(regexp->hasShared());
+ return regexp->getShared();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TemplateObject_inl_h */
diff --git a/js/src/jit/TemplateObject.h b/js/src/jit/TemplateObject.h
new file mode 100644
index 0000000000..3620b94738
--- /dev/null
+++ b/js/src/jit/TemplateObject.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TemplateObject_h
+#define jit_TemplateObject_h
+
+#include "vm/NativeObject.h"
+#include "vm/Shape.h"
+
+namespace js {
+namespace jit {
+
+class TemplateNativeObject;
+
+// Wrapper for template objects. This should only expose methods that can be
+// safely called off-thread without racing with the main thread.
+class TemplateObject {
+ protected:
+ JSObject* obj_;
+
+ public:
+ explicit TemplateObject(JSObject* obj) : obj_(obj) {}
+
+ inline gc::AllocKind getAllocKind() const;
+
+ // The following methods rely on the object's group->clasp. This is safe
+ // to read off-thread for template objects.
+ inline bool isNativeObject() const;
+ inline const TemplateNativeObject& asTemplateNativeObject() const;
+ inline bool isArrayObject() const;
+ inline bool isArgumentsObject() const;
+ inline bool isTypedArrayObject() const;
+ inline bool isRegExpObject() const;
+ inline bool isCallObject() const;
+ inline bool isBlockLexicalEnvironmentObject() const;
+ inline bool isPlainObject() const;
+
+ // The shape should not change. This is true for template objects because
+ // they're never exposed to arbitrary script.
+ inline gc::Cell* shape() const;
+};
+
+class TemplateNativeObject : public TemplateObject {
+ protected:
+ NativeObject& asNativeObject() const { return obj_->as<NativeObject>(); }
+
+ public:
+ // Reading slot counts and object slots is safe, as long as we don't touch
+ // the BaseShape (it can change when we create a ShapeTable for the shape).
+ inline bool hasDynamicSlots() const;
+ inline uint32_t numDynamicSlots() const;
+ inline uint32_t numUsedFixedSlots() const;
+ inline uint32_t numFixedSlots() const;
+ inline uint32_t slotSpan() const;
+ inline Value getSlot(uint32_t i) const;
+
+ // Reading ObjectElements fields is safe, except for the flags.
+ // isSharedMemory is an exception: it's debug-only and not called on arrays.
+#ifdef DEBUG
+ inline bool isSharedMemory() const;
+#endif
+ inline uint32_t getDenseCapacity() const;
+ inline uint32_t getDenseInitializedLength() const;
+ inline uint32_t getArrayLength() const;
+ inline bool hasDynamicElements() const;
+ inline const Value* getDenseElements() const;
+
+ inline gc::Cell* regExpShared() const;
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TemplateObject_h */
diff --git a/js/src/jit/Trampoline.cpp b/js/src/jit/Trampoline.cpp
new file mode 100644
index 0000000000..bb2e81e183
--- /dev/null
+++ b/js/src/jit/Trampoline.cpp
@@ -0,0 +1,260 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <initializer_list>
+
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "vm/JitActivation.h"
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void JitRuntime::generateExceptionTailStub(MacroAssembler& masm,
+ Label* profilerExitTail,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateExceptionTailStub");
+
+ exceptionTailOffset_ = startTrampolineCode(masm);
+
+ masm.bind(masm.failureLabel());
+ masm.handleFailureWithHandlerTail(profilerExitTail, bailoutTail);
+}
+
+void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
+ Label* profilerExitTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateProfilerExitFrameTailStub");
+
+ profilerExitFrameTailOffset_ = startTrampolineCode(masm);
+ masm.bind(profilerExitTail);
+
+ static constexpr size_t CallerFPOffset =
+ CommonFrameLayout::offsetOfCallerFramePtr();
+
+ // Assert the caller frame's type is one of the types we expect.
+ auto emitAssertPrevFrameType = [&masm](
+ Register framePtr, Register scratch,
+ std::initializer_list<FrameType> types) {
+#ifdef DEBUG
+ masm.loadPtr(Address(framePtr, CommonFrameLayout::offsetOfDescriptor()),
+ scratch);
+ masm.and32(Imm32(FRAMETYPE_MASK), scratch);
+
+ Label checkOk;
+ for (FrameType type : types) {
+ masm.branch32(Assembler::Equal, scratch, Imm32(type), &checkOk);
+ }
+ masm.assumeUnreachable("Unexpected previous frame");
+ masm.bind(&checkOk);
+#else
+ (void)masm;
+#endif
+ };
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(JSReturnOperand);
+ Register scratch = regs.takeAny();
+
+ // The code generated below expects that the current frame pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately before
+ // the frame epilogue and ret(). Thus, after this stub's business is done, it
+ // restores the frame pointer and stack pointer, then executes a ret() and
+ // returns directly to the caller frame, on behalf of the callee script that
+ // jumped to this code.
+ //
+ // Thus the expected state is:
+ //
+ // [JitFrameLayout] <-- FramePointer
+ // [frame contents] <-- StackPointer
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame.
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a Baseline or Ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion (or Baseline JSOp::Resume)
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- IonICCall <---- Ion
+ // |
+ // ^--- Arguments Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // | |
+ // | ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // |
+ // ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // |
+ // ^--- Entry Frame (BaselineInterpreter)
+ //
+ // NOTE: Keep this in sync with JSJitProfilingFrameIterator::moveToNextFrame!
+
+ Register actReg = regs.takeAny();
+ masm.loadJSContext(actReg);
+ masm.loadPtr(Address(actReg, offsetof(JSContext, profilingActivation_)),
+ actReg);
+
+ Address lastProfilingFrame(actReg,
+ JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg,
+ JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, FramePointer, scratch, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current frame "
+ "pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Move FP into a scratch register and use that scratch register below, to
+ // allow unwrapping rectifier frames without clobbering FP.
+ Register fpScratch = regs.takeAny();
+ masm.mov(FramePointer, fpScratch);
+
+ Label again;
+ masm.bind(&again);
+
+ // Load the frame descriptor into |scratch|, figure out what to do depending
+ // on its type.
+ masm.loadPtr(Address(fpScratch, JitFrameLayout::offsetOfDescriptor()),
+ scratch);
+ masm.and32(Imm32(FRAMETYPE_MASK), scratch);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_BaselineOrIonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonICCall;
+ Label handle_Entry;
+
+ // We check for IonJS and BaselineStub first because these are the most common
+ // types. Calls from Baseline are usually from a BaselineStub frame.
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::IonJS),
+ &handle_BaselineOrIonJS);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::BaselineStub),
+ &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::Rectifier),
+ &handle_Rectifier);
+ if (JitOptions.emitInterpreterEntryTrampoline) {
+ masm.branch32(Assembler::Equal, scratch,
+ Imm32(FrameType::BaselineInterpreterEntry),
+ &handle_Rectifier); // Handle this similarly to rectifier.
+ }
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::CppToJSJit),
+ &handle_Entry);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::BaselineJS),
+ &handle_BaselineOrIonJS);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::IonICCall),
+ &handle_IonICCall);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::WasmToJSJit),
+ &handle_Entry);
+
+ masm.assumeUnreachable(
+ "Invalid caller frame type when returning from a JIT frame.");
+
+ masm.bind(&handle_BaselineOrIonJS);
+ {
+ // Returning directly to a Baseline or Ion frame.
+
+ // lastProfilingCallSite := ReturnAddress
+ masm.loadPtr(Address(fpScratch, JitFrameLayout::offsetOfReturnAddress()),
+ scratch);
+ masm.storePtr(scratch, lastProfilingCallSite);
+
+ // lastProfilingFrame := CallerFrame
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), scratch);
+ masm.storePtr(scratch, lastProfilingFrame);
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+ }
+
+ // Shared implementation for BaselineStub and IonICCall frames.
+ auto emitHandleStubFrame = [&](FrameType expectedPrevType) {
+ // Load pointer to stub frame and assert type of its caller frame.
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
+ emitAssertPrevFrameType(fpScratch, scratch, {expectedPrevType});
+
+ // lastProfilingCallSite := StubFrame.ReturnAddress
+ masm.loadPtr(Address(fpScratch, CommonFrameLayout::offsetOfReturnAddress()),
+ scratch);
+ masm.storePtr(scratch, lastProfilingCallSite);
+
+ // lastProfilingFrame := StubFrame.CallerFrame
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), scratch);
+ masm.storePtr(scratch, lastProfilingFrame);
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+ };
+
+ masm.bind(&handle_BaselineStub);
+ {
+ // BaselineJS => BaselineStub frame.
+ emitHandleStubFrame(FrameType::BaselineJS);
+ }
+
+ masm.bind(&handle_IonICCall);
+ {
+ // IonJS => IonICCall frame.
+ emitHandleStubFrame(FrameType::IonJS);
+ }
+
+ masm.bind(&handle_Rectifier);
+ {
+ // There can be multiple previous frame types so just "unwrap" the arguments
+ // rectifier frame and try again.
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
+ emitAssertPrevFrameType(fpScratch, scratch,
+ {FrameType::IonJS, FrameType::BaselineStub,
+ FrameType::CppToJSJit, FrameType::WasmToJSJit});
+ masm.jump(&again);
+ }
+
+ masm.bind(&handle_Entry);
+ {
+ // FrameType::CppToJSJit / FrameType::WasmToJSJit
+ //
+ // A fast-path wasm->jit transition frame is an entry frame from the point
+ // of view of the JIT.
+ // Store null into both fields.
+ masm.movePtr(ImmPtr(nullptr), scratch);
+ masm.storePtr(scratch, lastProfilingCallSite);
+ masm.storePtr(scratch, lastProfilingFrame);
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+ }
+}
diff --git a/js/src/jit/TrialInlining.cpp b/js/src/jit/TrialInlining.cpp
new file mode 100644
index 0000000000..f4befca1fa
--- /dev/null
+++ b/js/src/jit/TrialInlining.cpp
@@ -0,0 +1,928 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TrialInlining.h"
+
+#include "jit/BaselineCacheIRCompiler.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/CacheIRCloner.h"
+#include "jit/CacheIRHealth.h"
+#include "jit/CacheIRWriter.h"
+#include "jit/Ion.h" // TooManyFormalArguments
+
+#include "vm/BytecodeLocation-inl.h"
+
+using mozilla::Maybe;
+
+namespace js {
+namespace jit {
+
+bool DoTrialInlining(JSContext* cx, BaselineFrame* frame) {
+ RootedScript script(cx, frame->script());
+ ICScript* icScript = frame->icScript();
+ bool isRecursive = icScript->depth() > 0;
+
+#ifdef JS_CACHEIR_SPEW
+ if (cx->spewer().enabled(cx, script, SpewChannel::CacheIRHealthReport)) {
+ for (uint32_t i = 0; i < icScript->numICEntries(); i++) {
+ ICEntry& entry = icScript->icEntry(i);
+ ICFallbackStub* fallbackStub = icScript->fallbackStub(i);
+
+ // If the IC is megamorphic or generic, then we have already
+ // spewed the IC report on transition.
+ if (!(uint8_t(fallbackStub->state().mode()) > 0)) {
+ jit::ICStub* stub = entry.firstStub();
+ bool sawNonZeroCount = false;
+ while (!stub->isFallback()) {
+ uint32_t count = stub->enteredCount();
+ if (count > 0 && sawNonZeroCount) {
+ CacheIRHealth cih;
+ cih.healthReportForIC(cx, &entry, fallbackStub, script,
+ SpewContext::TrialInlining);
+ break;
+ }
+
+ if (count > 0 && !sawNonZeroCount) {
+ sawNonZeroCount = true;
+ }
+
+ stub = stub->toCacheIRStub()->next();
+ }
+ }
+ }
+ }
+#endif
+
+ if (!script->canIonCompile()) {
+ return true;
+ }
+
+ // Baseline shouldn't attempt trial inlining in scripts that are too large.
+ MOZ_ASSERT_IF(JitOptions.limitScriptSize,
+ script->length() <= JitOptions.ionMaxScriptSize);
+
+ const uint32_t MAX_INLINING_DEPTH = 4;
+ if (icScript->depth() > MAX_INLINING_DEPTH) {
+ return true;
+ }
+
+ InliningRoot* root = isRecursive ? icScript->inliningRoot()
+ : script->jitScript()->inliningRoot();
+ if (JitSpewEnabled(JitSpew_WarpTrialInlining)) {
+ // Eagerly create the inlining root when it's used in the spew output.
+ if (!root) {
+ MOZ_ASSERT(!isRecursive);
+ root = script->jitScript()->getOrCreateInliningRoot(cx, script);
+ if (!root) {
+ return false;
+ }
+ }
+ UniqueChars funName;
+ if (script->function() && script->function()->displayAtom()) {
+ funName = AtomToPrintableString(cx, script->function()->displayAtom());
+ }
+
+ JitSpew(
+ JitSpew_WarpTrialInlining,
+ "Trial inlining for %s script '%s' (%s:%u:%u (%p)) (inliningRoot=%p)",
+ (isRecursive ? "inner" : "outer"),
+ funName ? funName.get() : "<unnamed>", script->filename(),
+ script->lineno(), script->column(), frame->script(), root);
+ JitSpewIndent spewIndent(JitSpew_WarpTrialInlining);
+ }
+
+ TrialInliner inliner(cx, script, icScript);
+ return inliner.tryInlining();
+}
+
+void TrialInliner::cloneSharedPrefix(ICCacheIRStub* stub,
+ const uint8_t* endOfPrefix,
+ CacheIRWriter& writer) {
+ CacheIRReader reader(stub->stubInfo());
+ CacheIRCloner cloner(stub);
+ while (reader.currentPosition() < endOfPrefix) {
+ CacheOp op = reader.readOp();
+ cloner.cloneOp(op, reader, writer);
+ }
+}
+
+bool TrialInliner::replaceICStub(ICEntry& entry, ICFallbackStub* fallback,
+ CacheIRWriter& writer, CacheKind kind) {
+ MOZ_ASSERT(fallback->trialInliningState() == TrialInliningState::Candidate);
+
+ fallback->discardStubs(cx(), &entry);
+
+ // Note: AttachBaselineCacheIRStub never throws an exception.
+ ICAttachResult result = AttachBaselineCacheIRStub(
+ cx(), writer, kind, script_, icScript_, fallback, "TrialInline");
+ if (result == ICAttachResult::Attached) {
+ MOZ_ASSERT(fallback->trialInliningState() == TrialInliningState::Inlined);
+ return true;
+ }
+
+ MOZ_ASSERT(fallback->trialInliningState() == TrialInliningState::Candidate);
+ icScript_->removeInlinedChild(fallback->pcOffset());
+
+ if (result == ICAttachResult::OOM) {
+ ReportOutOfMemory(cx());
+ return false;
+ }
+
+ // We failed to attach a new IC stub due to CacheIR size limits. Disable trial
+ // inlining for this location and return true.
+ MOZ_ASSERT(result == ICAttachResult::TooLarge);
+ fallback->setTrialInliningState(TrialInliningState::Failure);
+ return true;
+}
+
+ICCacheIRStub* TrialInliner::maybeSingleStub(const ICEntry& entry) {
+ // Look for a single non-fallback stub followed by stubs with entered-count 0.
+ // Allow one optimized stub before the fallback stub to support the
+ // CallIRGenerator::emitCalleeGuard optimization where we first try a
+ // GuardSpecificFunction guard before falling back to GuardFunctionHasScript.
+ ICStub* stub = entry.firstStub();
+ if (stub->isFallback()) {
+ return nullptr;
+ }
+ ICStub* next = stub->toCacheIRStub()->next();
+ if (next->enteredCount() != 0) {
+ return nullptr;
+ }
+
+ ICFallbackStub* fallback = nullptr;
+ if (next->isFallback()) {
+ fallback = next->toFallbackStub();
+ } else {
+ ICStub* nextNext = next->toCacheIRStub()->next();
+ if (!nextNext->isFallback() || nextNext->enteredCount() != 0) {
+ return nullptr;
+ }
+ fallback = nextNext->toFallbackStub();
+ }
+
+ if (fallback->trialInliningState() != TrialInliningState::Candidate) {
+ return nullptr;
+ }
+
+ return stub->toCacheIRStub();
+}
+
+Maybe<InlinableOpData> FindInlinableOpData(ICCacheIRStub* stub,
+ BytecodeLocation loc) {
+ if (loc.isInvokeOp()) {
+ Maybe<InlinableCallData> call = FindInlinableCallData(stub);
+ if (call.isSome()) {
+ return call;
+ }
+ }
+ if (loc.isGetPropOp() || loc.isGetElemOp()) {
+ Maybe<InlinableGetterData> getter = FindInlinableGetterData(stub);
+ if (getter.isSome()) {
+ return getter;
+ }
+ }
+ if (loc.isSetPropOp()) {
+ Maybe<InlinableSetterData> setter = FindInlinableSetterData(stub);
+ if (setter.isSome()) {
+ return setter;
+ }
+ }
+ return mozilla::Nothing();
+}
+
+Maybe<InlinableCallData> FindInlinableCallData(ICCacheIRStub* stub) {
+ Maybe<InlinableCallData> data;
+
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ const uint8_t* stubData = stub->stubDataStart();
+
+ ObjOperandId calleeGuardOperand;
+ CallFlags flags;
+ JSFunction* target = nullptr;
+
+ CacheIRReader reader(stubInfo);
+ while (reader.more()) {
+ const uint8_t* opStart = reader.currentPosition();
+
+ CacheOp op = reader.readOp();
+ CacheIROpInfo opInfo = CacheIROpInfos[size_t(op)];
+ uint32_t argLength = opInfo.argLength;
+ mozilla::DebugOnly<const uint8_t*> argStart = reader.currentPosition();
+
+ switch (op) {
+ case CacheOp::GuardSpecificFunction: {
+ // If we see a guard, remember which operand we are guarding.
+ MOZ_ASSERT(data.isNothing());
+ calleeGuardOperand = reader.objOperandId();
+ uint32_t targetOffset = reader.stubOffset();
+ (void)reader.stubOffset(); // nargsAndFlags
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, targetOffset);
+ target = reinterpret_cast<JSFunction*>(rawTarget);
+ break;
+ }
+ case CacheOp::GuardFunctionScript: {
+ MOZ_ASSERT(data.isNothing());
+ calleeGuardOperand = reader.objOperandId();
+ uint32_t targetOffset = reader.stubOffset();
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, targetOffset);
+ target = reinterpret_cast<BaseScript*>(rawTarget)->function();
+ (void)reader.stubOffset(); // nargsAndFlags
+ break;
+ }
+ case CacheOp::CallScriptedFunction: {
+ // If we see a call, check if `callee` is the previously guarded
+ // operand. If it is, we know the target and can inline.
+ ObjOperandId calleeOperand = reader.objOperandId();
+ mozilla::DebugOnly<Int32OperandId> argcId = reader.int32OperandId();
+ flags = reader.callFlags();
+ mozilla::DebugOnly<uint32_t> argcFixed = reader.uint32Immediate();
+ MOZ_ASSERT(argcFixed <= MaxUnrolledArgCopy);
+
+ if (calleeOperand == calleeGuardOperand) {
+ MOZ_ASSERT(static_cast<OperandId&>(argcId).id() == 0);
+ MOZ_ASSERT(data.isNothing());
+ data.emplace();
+ data->endOfSharedPrefix = opStart;
+ }
+ break;
+ }
+ case CacheOp::CallInlinedFunction: {
+ ObjOperandId calleeOperand = reader.objOperandId();
+ mozilla::DebugOnly<Int32OperandId> argcId = reader.int32OperandId();
+ uint32_t icScriptOffset = reader.stubOffset();
+ flags = reader.callFlags();
+ mozilla::DebugOnly<uint32_t> argcFixed = reader.uint32Immediate();
+ MOZ_ASSERT(argcFixed <= MaxUnrolledArgCopy);
+
+ if (calleeOperand == calleeGuardOperand) {
+ MOZ_ASSERT(static_cast<OperandId&>(argcId).id() == 0);
+ MOZ_ASSERT(data.isNothing());
+ data.emplace();
+ data->endOfSharedPrefix = opStart;
+ uintptr_t rawICScript =
+ stubInfo->getStubRawWord(stubData, icScriptOffset);
+ data->icScript = reinterpret_cast<ICScript*>(rawICScript);
+ }
+ break;
+ }
+ default:
+ if (!opInfo.transpile) {
+ return mozilla::Nothing();
+ }
+ if (data.isSome()) {
+ MOZ_ASSERT(op == CacheOp::ReturnFromIC);
+ }
+ reader.skip(argLength);
+ break;
+ }
+ MOZ_ASSERT(argStart + argLength == reader.currentPosition());
+ }
+
+ if (data.isSome()) {
+ // Warp only supports inlining Standard and FunCall calls.
+ if (flags.getArgFormat() != CallFlags::Standard &&
+ flags.getArgFormat() != CallFlags::FunCall) {
+ return mozilla::Nothing();
+ }
+ data->calleeOperand = calleeGuardOperand;
+ data->callFlags = flags;
+ data->target = target;
+ }
+ return data;
+}
+
+Maybe<InlinableGetterData> FindInlinableGetterData(ICCacheIRStub* stub) {
+ Maybe<InlinableGetterData> data;
+
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ const uint8_t* stubData = stub->stubDataStart();
+
+ CacheIRReader reader(stubInfo);
+ while (reader.more()) {
+ const uint8_t* opStart = reader.currentPosition();
+
+ CacheOp op = reader.readOp();
+ CacheIROpInfo opInfo = CacheIROpInfos[size_t(op)];
+ uint32_t argLength = opInfo.argLength;
+ mozilla::DebugOnly<const uint8_t*> argStart = reader.currentPosition();
+
+ switch (op) {
+ case CacheOp::CallScriptedGetterResult: {
+ data.emplace();
+ data->receiverOperand = reader.valOperandId();
+
+ uint32_t getterOffset = reader.stubOffset();
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, getterOffset);
+ data->target = reinterpret_cast<JSFunction*>(rawTarget);
+
+ data->sameRealm = reader.readBool();
+ (void)reader.stubOffset(); // nargsAndFlags
+
+ data->endOfSharedPrefix = opStart;
+ break;
+ }
+ case CacheOp::CallInlinedGetterResult: {
+ data.emplace();
+ data->receiverOperand = reader.valOperandId();
+
+ uint32_t getterOffset = reader.stubOffset();
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, getterOffset);
+ data->target = reinterpret_cast<JSFunction*>(rawTarget);
+
+ uint32_t icScriptOffset = reader.stubOffset();
+ uintptr_t rawICScript =
+ stubInfo->getStubRawWord(stubData, icScriptOffset);
+ data->icScript = reinterpret_cast<ICScript*>(rawICScript);
+
+ data->sameRealm = reader.readBool();
+ (void)reader.stubOffset(); // nargsAndFlags
+
+ data->endOfSharedPrefix = opStart;
+ break;
+ }
+ default:
+ if (!opInfo.transpile) {
+ return mozilla::Nothing();
+ }
+ if (data.isSome()) {
+ MOZ_ASSERT(op == CacheOp::ReturnFromIC);
+ }
+ reader.skip(argLength);
+ break;
+ }
+ MOZ_ASSERT(argStart + argLength == reader.currentPosition());
+ }
+
+ return data;
+}
+
+Maybe<InlinableSetterData> FindInlinableSetterData(ICCacheIRStub* stub) {
+ Maybe<InlinableSetterData> data;
+
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ const uint8_t* stubData = stub->stubDataStart();
+
+ CacheIRReader reader(stubInfo);
+ while (reader.more()) {
+ const uint8_t* opStart = reader.currentPosition();
+
+ CacheOp op = reader.readOp();
+ CacheIROpInfo opInfo = CacheIROpInfos[size_t(op)];
+ uint32_t argLength = opInfo.argLength;
+ mozilla::DebugOnly<const uint8_t*> argStart = reader.currentPosition();
+
+ switch (op) {
+ case CacheOp::CallScriptedSetter: {
+ data.emplace();
+ data->receiverOperand = reader.objOperandId();
+
+ uint32_t setterOffset = reader.stubOffset();
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, setterOffset);
+ data->target = reinterpret_cast<JSFunction*>(rawTarget);
+
+ data->rhsOperand = reader.valOperandId();
+ data->sameRealm = reader.readBool();
+ (void)reader.stubOffset(); // nargsAndFlags
+
+ data->endOfSharedPrefix = opStart;
+ break;
+ }
+ case CacheOp::CallInlinedSetter: {
+ data.emplace();
+ data->receiverOperand = reader.objOperandId();
+
+ uint32_t setterOffset = reader.stubOffset();
+ uintptr_t rawTarget = stubInfo->getStubRawWord(stubData, setterOffset);
+ data->target = reinterpret_cast<JSFunction*>(rawTarget);
+
+ data->rhsOperand = reader.valOperandId();
+
+ uint32_t icScriptOffset = reader.stubOffset();
+ uintptr_t rawICScript =
+ stubInfo->getStubRawWord(stubData, icScriptOffset);
+ data->icScript = reinterpret_cast<ICScript*>(rawICScript);
+
+ data->sameRealm = reader.readBool();
+ (void)reader.stubOffset(); // nargsAndFlags
+
+ data->endOfSharedPrefix = opStart;
+ break;
+ }
+ default:
+ if (!opInfo.transpile) {
+ return mozilla::Nothing();
+ }
+ if (data.isSome()) {
+ MOZ_ASSERT(op == CacheOp::ReturnFromIC);
+ }
+ reader.skip(argLength);
+ break;
+ }
+ MOZ_ASSERT(argStart + argLength == reader.currentPosition());
+ }
+
+ return data;
+}
+
+// Return the maximum number of actual arguments that will be passed to the
+// target function. This may be an overapproximation, for example when inlining
+// js::fun_call we may omit an argument.
+static uint32_t GetMaxCalleeNumActuals(BytecodeLocation loc) {
+ switch (loc.getOp()) {
+ case JSOp::GetProp:
+ case JSOp::GetElem:
+ // Getters do not pass arguments.
+ return 0;
+
+ case JSOp::SetProp:
+ case JSOp::StrictSetProp:
+ // Setters pass 1 argument.
+ return 1;
+
+ case JSOp::Call:
+ case JSOp::CallContent:
+ case JSOp::CallIgnoresRv:
+ case JSOp::CallIter:
+ case JSOp::CallContentIter:
+ case JSOp::New:
+ case JSOp::NewContent:
+ case JSOp::SuperCall:
+ return loc.getCallArgc();
+
+ default:
+ MOZ_CRASH("Unsupported op");
+ }
+}
+
+/*static*/
+bool TrialInliner::canInline(JSFunction* target, HandleScript caller,
+ BytecodeLocation loc) {
+ if (!target->hasJitScript()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: no JIT script");
+ return false;
+ }
+ JSScript* script = target->nonLazyScript();
+ if (!script->jitScript()->hasBaselineScript()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: no BaselineScript");
+ return false;
+ }
+ if (script->uninlineable()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: uninlineable flag");
+ return false;
+ }
+ if (!script->canIonCompile()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: can't ion-compile");
+ return false;
+ }
+ if (script->isDebuggee()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: is debuggee");
+ return false;
+ }
+ // Don't inline cross-realm calls.
+ if (target->realm() != caller->realm()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: cross-realm call");
+ return false;
+ }
+ if (JitOptions.onlyInlineSelfHosted && !script->selfHosted()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: only inlining self hosted");
+ return false;
+ }
+
+ uint32_t maxCalleeNumActuals = GetMaxCalleeNumActuals(loc);
+ if (maxCalleeNumActuals > ArgumentsObject::MaxInlinedArgs) {
+ if (script->needsArgsObj()) {
+ JitSpew(JitSpew_WarpTrialInlining,
+ "SKIP: needs arguments object with %u actual args (maximum %u)",
+ maxCalleeNumActuals, ArgumentsObject::MaxInlinedArgs);
+ return false;
+ }
+ // The GetArgument(n) intrinsic in self-hosted code uses MGetInlinedArgument
+ // too, so the same limit applies.
+ if (script->usesArgumentsIntrinsics()) {
+ JitSpew(JitSpew_WarpTrialInlining,
+ "SKIP: uses GetArgument(i) with %u actual args (maximum %u)",
+ maxCalleeNumActuals, ArgumentsObject::MaxInlinedArgs);
+ return false;
+ }
+ }
+
+ if (TooManyFormalArguments(target->nargs())) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: Too many formal arguments: %u",
+ unsigned(target->nargs()));
+ return false;
+ }
+
+ if (TooManyFormalArguments(maxCalleeNumActuals)) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: argc too large: %u",
+ unsigned(loc.getCallArgc()));
+ return false;
+ }
+
+ return true;
+}
+
+TrialInliningDecision TrialInliner::getInliningDecision(JSFunction* target,
+ ICCacheIRStub* stub,
+ BytecodeLocation loc) {
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_WarpTrialInlining)) {
+ BaseScript* baseScript =
+ target->hasBaseScript() ? target->baseScript() : nullptr;
+
+ UniqueChars funName;
+ if (target->displayAtom()) {
+ funName = AtomToPrintableString(cx(), target->displayAtom());
+ }
+
+ JitSpew(JitSpew_WarpTrialInlining,
+ "Inlining candidate JSOp::%s (offset=%u): callee script '%s' "
+ "(%s:%u:%u)",
+ CodeName(loc.getOp()), loc.bytecodeToOffset(script_),
+ funName ? funName.get() : "<unnamed>",
+ baseScript ? baseScript->filename() : "<not-scripted>",
+ baseScript ? baseScript->lineno() : 0,
+ baseScript ? baseScript->column() : 0);
+ JitSpewIndent spewIndent(JitSpew_WarpTrialInlining);
+ }
+#endif
+
+ if (!canInline(target, script_, loc)) {
+ return TrialInliningDecision::NoInline;
+ }
+
+ // Don't inline (direct) recursive calls. This still allows recursion if
+ // called through another function (f => g => f).
+ JSScript* targetScript = target->nonLazyScript();
+ if (script_ == targetScript) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: recursion");
+ return TrialInliningDecision::NoInline;
+ }
+
+ // Don't inline if the callee has a loop that was hot enough to enter Warp
+ // via OSR. This helps prevent getting stuck in Baseline code for a long time.
+ if (targetScript->jitScript()->hadIonOSR()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: had OSR");
+ return TrialInliningDecision::NoInline;
+ }
+
+ // Ensure the total bytecode size does not exceed ionMaxScriptSize.
+ size_t newTotalSize =
+ inliningRootTotalBytecodeSize() + targetScript->length();
+ if (newTotalSize > JitOptions.ionMaxScriptSize) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: total size too big");
+ return TrialInliningDecision::NoInline;
+ }
+
+ uint32_t entryCount = stub->enteredCount();
+ if (entryCount < JitOptions.inliningEntryThreshold) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: Entry count is %u (minimum %u)",
+ unsigned(entryCount), unsigned(JitOptions.inliningEntryThreshold));
+ return TrialInliningDecision::NoInline;
+ }
+
+ if (!JitOptions.isSmallFunction(targetScript)) {
+ if (!targetScript->isInlinableLargeFunction()) {
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: Length is %u (maximum %u)",
+ unsigned(targetScript->length()),
+ unsigned(JitOptions.smallFunctionMaxBytecodeLength));
+ return TrialInliningDecision::NoInline;
+ }
+
+ JitSpew(JitSpew_WarpTrialInlining,
+ "INFO: Ignored length (%u) of InlinableLargeFunction",
+ unsigned(targetScript->length()));
+ }
+
+ JitScript* jitScript = targetScript->jitScript();
+ ICScript* icScript = jitScript->icScript();
+
+ // Check for any ICs which are not monomorphic. The observation here is that
+ // trial inlining can help us a lot in cases where it lets us further
+ // specialize a script. But if it's already monomorphic, it's unlikely that
+ // we will see significant specialization wins from trial inlining, so we
+ // can use a cheaper and simpler inlining strategy.
+ for (size_t i = 0; i < icScript->numICEntries(); i++) {
+ ICEntry& entry = icScript->icEntry(i);
+ ICFallbackStub* fallback = icScript->fallbackStub(i);
+ if (fallback->enteredCount() > 0 ||
+ fallback->state().mode() != ICState::Mode::Specialized) {
+ return TrialInliningDecision::Inline;
+ }
+
+ ICStub* firstStub = entry.firstStub();
+ if (firstStub != fallback) {
+ for (ICStub* next = firstStub->toCacheIRStub()->next(); next;
+ next = next->maybeNext()) {
+ if (next->enteredCount() != 0) {
+ return TrialInliningDecision::Inline;
+ }
+ }
+ }
+ }
+
+ JitSpewIndent spewIndent(JitSpew_WarpTrialInlining);
+ JitSpew(JitSpew_WarpTrialInlining, "SUCCESS: Inlined monomorphically");
+ return TrialInliningDecision::MonomorphicInline;
+}
+
+ICScript* TrialInliner::createInlinedICScript(JSFunction* target,
+ BytecodeLocation loc) {
+ MOZ_ASSERT(target->hasJitEntry());
+ MOZ_ASSERT(target->hasJitScript());
+
+ InliningRoot* root = getOrCreateInliningRoot();
+ if (!root) {
+ return nullptr;
+ }
+
+ JSScript* targetScript = target->baseScript()->asJSScript();
+
+ // We don't have to check for overflow here because we have already
+ // successfully allocated an ICScript with this number of entries
+ // when creating the JitScript for the target function, and we
+ // checked for overflow then.
+ uint32_t fallbackStubsOffset =
+ sizeof(ICScript) + targetScript->numICEntries() * sizeof(ICEntry);
+ uint32_t allocSize = fallbackStubsOffset +
+ targetScript->numICEntries() * sizeof(ICFallbackStub);
+
+ void* raw = cx()->pod_malloc<uint8_t>(allocSize);
+ MOZ_ASSERT(uintptr_t(raw) % alignof(ICScript) == 0);
+ if (!raw) {
+ return nullptr;
+ }
+
+ uint32_t initialWarmUpCount = JitOptions.trialInliningInitialWarmUpCount;
+
+ uint32_t depth = icScript_->depth() + 1;
+ UniquePtr<ICScript> inlinedICScript(new (raw) ICScript(
+ initialWarmUpCount, fallbackStubsOffset, allocSize, depth, root));
+
+ inlinedICScript->initICEntries(cx(), targetScript);
+
+ uint32_t pcOffset = loc.bytecodeToOffset(script_);
+ ICScript* result = inlinedICScript.get();
+ if (!icScript_->addInlinedChild(cx(), std::move(inlinedICScript), pcOffset)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(result->numICEntries() == targetScript->numICEntries());
+
+ root->addToTotalBytecodeSize(targetScript->length());
+
+ JitSpewIndent spewIndent(JitSpew_WarpTrialInlining);
+ JitSpew(JitSpew_WarpTrialInlining,
+ "SUCCESS: Outer ICScript: %p Inner ICScript: %p", icScript_, result);
+
+ return result;
+}
+
+bool TrialInliner::maybeInlineCall(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc) {
+ ICCacheIRStub* stub = maybeSingleStub(entry);
+ if (!stub) {
+#ifdef JS_JITSPEW
+ if (fallback->numOptimizedStubs() > 1) {
+ JitSpew(JitSpew_WarpTrialInlining,
+ "Inlining candidate JSOp::%s (offset=%u):", CodeName(loc.getOp()),
+ fallback->pcOffset());
+ JitSpewIndent spewIndent(JitSpew_WarpTrialInlining);
+ JitSpew(JitSpew_WarpTrialInlining, "SKIP: Polymorphic (%u stubs)",
+ (unsigned)fallback->numOptimizedStubs());
+ }
+#endif
+ return true;
+ }
+
+ MOZ_ASSERT(!icScript_->hasInlinedChild(fallback->pcOffset()));
+
+ // Look for a CallScriptedFunction with a known target.
+ Maybe<InlinableCallData> data = FindInlinableCallData(stub);
+ if (data.isNothing()) {
+ return true;
+ }
+
+ MOZ_ASSERT(!data->icScript);
+
+ TrialInliningDecision inlining = getInliningDecision(data->target, stub, loc);
+ // Decide whether to inline the target.
+ if (inlining == TrialInliningDecision::NoInline) {
+ return true;
+ }
+
+ if (inlining == TrialInliningDecision::MonomorphicInline) {
+ fallback->setTrialInliningState(TrialInliningState::MonomorphicInlined);
+ return true;
+ }
+
+ ICScript* newICScript = createInlinedICScript(data->target, loc);
+ if (!newICScript) {
+ return false;
+ }
+
+ CacheIRWriter writer(cx());
+ Int32OperandId argcId(writer.setInputOperandId(0));
+ cloneSharedPrefix(stub, data->endOfSharedPrefix, writer);
+
+ writer.callInlinedFunction(data->calleeOperand, argcId, newICScript,
+ data->callFlags,
+ ClampFixedArgc(loc.getCallArgc()));
+ writer.returnFromIC();
+
+ return replaceICStub(entry, fallback, writer, CacheKind::Call);
+}
+
+bool TrialInliner::maybeInlineGetter(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc, CacheKind kind) {
+ ICCacheIRStub* stub = maybeSingleStub(entry);
+ if (!stub) {
+ return true;
+ }
+
+ MOZ_ASSERT(!icScript_->hasInlinedChild(fallback->pcOffset()));
+
+ Maybe<InlinableGetterData> data = FindInlinableGetterData(stub);
+ if (data.isNothing()) {
+ return true;
+ }
+
+ MOZ_ASSERT(!data->icScript);
+
+ TrialInliningDecision inlining = getInliningDecision(data->target, stub, loc);
+ // Decide whether to inline the target.
+ if (inlining == TrialInliningDecision::NoInline) {
+ return true;
+ }
+
+ if (inlining == TrialInliningDecision::MonomorphicInline) {
+ fallback->setTrialInliningState(TrialInliningState::MonomorphicInlined);
+ return true;
+ }
+
+ ICScript* newICScript = createInlinedICScript(data->target, loc);
+ if (!newICScript) {
+ return false;
+ }
+
+ CacheIRWriter writer(cx());
+ ValOperandId valId(writer.setInputOperandId(0));
+ if (kind == CacheKind::GetElem) {
+ // Register the key operand.
+ writer.setInputOperandId(1);
+ }
+ cloneSharedPrefix(stub, data->endOfSharedPrefix, writer);
+
+ writer.callInlinedGetterResult(data->receiverOperand, data->target,
+ newICScript, data->sameRealm);
+ writer.returnFromIC();
+
+ return replaceICStub(entry, fallback, writer, kind);
+}
+
+bool TrialInliner::maybeInlineSetter(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc, CacheKind kind) {
+ ICCacheIRStub* stub = maybeSingleStub(entry);
+ if (!stub) {
+ return true;
+ }
+
+ MOZ_ASSERT(!icScript_->hasInlinedChild(fallback->pcOffset()));
+
+ Maybe<InlinableSetterData> data = FindInlinableSetterData(stub);
+ if (data.isNothing()) {
+ return true;
+ }
+
+ MOZ_ASSERT(!data->icScript);
+
+ TrialInliningDecision inlining = getInliningDecision(data->target, stub, loc);
+ // Decide whether to inline the target.
+ if (inlining == TrialInliningDecision::NoInline) {
+ return true;
+ }
+
+ if (inlining == TrialInliningDecision::MonomorphicInline) {
+ fallback->setTrialInliningState(TrialInliningState::MonomorphicInlined);
+ return true;
+ }
+
+ ICScript* newICScript = createInlinedICScript(data->target, loc);
+ if (!newICScript) {
+ return false;
+ }
+
+ CacheIRWriter writer(cx());
+ ValOperandId objValId(writer.setInputOperandId(0));
+ ValOperandId rhsValId(writer.setInputOperandId(1));
+ cloneSharedPrefix(stub, data->endOfSharedPrefix, writer);
+
+ writer.callInlinedSetter(data->receiverOperand, data->target,
+ data->rhsOperand, newICScript, data->sameRealm);
+ writer.returnFromIC();
+
+ return replaceICStub(entry, fallback, writer, kind);
+}
+
+bool TrialInliner::tryInlining() {
+ uint32_t numICEntries = icScript_->numICEntries();
+ BytecodeLocation startLoc = script_->location();
+
+ for (uint32_t icIndex = 0; icIndex < numICEntries; icIndex++) {
+ ICEntry& entry = icScript_->icEntry(icIndex);
+ ICFallbackStub* fallback = icScript_->fallbackStub(icIndex);
+
+ if (!TryFoldingStubs(cx(), fallback, script_, icScript_)) {
+ return false;
+ }
+
+ BytecodeLocation loc =
+ startLoc + BytecodeLocationOffset(fallback->pcOffset());
+ JSOp op = loc.getOp();
+ switch (op) {
+ case JSOp::Call:
+ case JSOp::CallContent:
+ case JSOp::CallIgnoresRv:
+ case JSOp::CallIter:
+ case JSOp::CallContentIter:
+ case JSOp::New:
+ case JSOp::NewContent:
+ case JSOp::SuperCall:
+ if (!maybeInlineCall(entry, fallback, loc)) {
+ return false;
+ }
+ break;
+ case JSOp::GetProp:
+ if (!maybeInlineGetter(entry, fallback, loc, CacheKind::GetProp)) {
+ return false;
+ }
+ break;
+ case JSOp::GetElem:
+ if (!maybeInlineGetter(entry, fallback, loc, CacheKind::GetElem)) {
+ return false;
+ }
+ break;
+ case JSOp::SetProp:
+ case JSOp::StrictSetProp:
+ if (!maybeInlineSetter(entry, fallback, loc, CacheKind::SetProp)) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
+InliningRoot* TrialInliner::maybeGetInliningRoot() const {
+ if (auto* root = icScript_->inliningRoot()) {
+ return root;
+ }
+
+ MOZ_ASSERT(!icScript_->isInlined());
+ return script_->jitScript()->inliningRoot();
+}
+
+InliningRoot* TrialInliner::getOrCreateInliningRoot() {
+ if (auto* root = maybeGetInliningRoot()) {
+ return root;
+ }
+ return script_->jitScript()->getOrCreateInliningRoot(cx(), script_);
+}
+
+size_t TrialInliner::inliningRootTotalBytecodeSize() const {
+ if (auto* root = maybeGetInliningRoot()) {
+ return root->totalBytecodeSize();
+ }
+ return script_->length();
+}
+
+bool InliningRoot::addInlinedScript(UniquePtr<ICScript> icScript) {
+ return inlinedScripts_.append(std::move(icScript));
+}
+
+void InliningRoot::trace(JSTracer* trc) {
+ TraceEdge(trc, &owningScript_, "inlining-root-owning-script");
+ for (auto& inlinedScript : inlinedScripts_) {
+ inlinedScript->trace(trc);
+ }
+}
+
+void InliningRoot::purgeOptimizedStubs(Zone* zone) {
+ for (auto& inlinedScript : inlinedScripts_) {
+ inlinedScript->purgeOptimizedStubs(zone);
+ }
+}
+
+void InliningRoot::resetWarmUpCounts(uint32_t count) {
+ for (auto& inlinedScript : inlinedScripts_) {
+ inlinedScript->resetWarmUpCount(count);
+ }
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/TrialInlining.h b/js/src/jit/TrialInlining.h
new file mode 100644
index 0000000000..53613f4981
--- /dev/null
+++ b/js/src/jit/TrialInlining.h
@@ -0,0 +1,194 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TrialInlining_h
+#define jit_TrialInlining_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/Barrier.h"
+#include "jit/CacheIR.h"
+#include "jit/ICStubSpace.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+#include "vm/JSScript.h"
+
+/*
+ * [SMDOC] Trial Inlining
+ *
+ * WarpBuilder relies on transpiling CacheIR. When inlining scripted
+ * functions in WarpBuilder, we want our ICs to be as monomorphic as
+ * possible. Functions with multiple callers complicate this. An IC in
+ * such a function might be monomorphic for any given caller, but
+ * polymorphic overall. This make the input to WarpBuilder less precise.
+ *
+ * To solve this problem, we do trial inlining. During baseline
+ * execution, we identify call sites for which it would be useful to
+ * have more precise inlining data. For each such call site, we
+ * allocate a fresh ICScript and replace the existing call IC with a
+ * new specialized IC that invokes the callee using the new
+ * ICScript. Other callers of the callee will continue using the
+ * default ICScript. When we eventually Warp-compile the script, we
+ * can generate code for the callee using the IC information in our
+ * private ICScript, which is specialized for its caller.
+ *
+ * The same approach can be used to inline recursively.
+ */
+
+class JS_PUBLIC_API JSTracer;
+struct JS_PUBLIC_API JSContext;
+
+class JSFunction;
+
+namespace JS {
+class Zone;
+}
+
+namespace js {
+
+class BytecodeLocation;
+
+namespace jit {
+
+class BaselineFrame;
+class CacheIRWriter;
+class ICCacheIRStub;
+class ICEntry;
+class ICFallbackStub;
+class ICScript;
+
+/*
+ * An InliningRoot is owned by a JitScript. In turn, it owns the set
+ * of ICScripts that are candidates for being inlined in that JitScript.
+ */
+class InliningRoot {
+ public:
+ explicit InliningRoot(JSContext* cx, JSScript* owningScript)
+ : owningScript_(owningScript),
+ inlinedScripts_(cx),
+ totalBytecodeSize_(owningScript->length()) {}
+
+ JitScriptICStubSpace* jitScriptStubSpace() { return &jitScriptStubSpace_; }
+
+ void trace(JSTracer* trc);
+
+ bool addInlinedScript(js::UniquePtr<ICScript> icScript);
+
+ uint32_t numInlinedScripts() const { return inlinedScripts_.length(); }
+
+ void purgeOptimizedStubs(Zone* zone);
+ void resetWarmUpCounts(uint32_t count);
+
+ JSScript* owningScript() const { return owningScript_; }
+
+ size_t totalBytecodeSize() const { return totalBytecodeSize_; }
+
+ void addToTotalBytecodeSize(size_t size) { totalBytecodeSize_ += size; }
+
+ private:
+ JitScriptICStubSpace jitScriptStubSpace_ = {};
+ HeapPtr<JSScript*> owningScript_;
+ js::Vector<js::UniquePtr<ICScript>> inlinedScripts_;
+
+ // Bytecode size of outer script and all inlined scripts.
+ size_t totalBytecodeSize_;
+};
+
+class InlinableOpData {
+ public:
+ JSFunction* target = nullptr;
+ ICScript* icScript = nullptr;
+ const uint8_t* endOfSharedPrefix = nullptr;
+};
+
+class InlinableCallData : public InlinableOpData {
+ public:
+ ObjOperandId calleeOperand;
+ CallFlags callFlags;
+};
+
+class InlinableGetterData : public InlinableOpData {
+ public:
+ ValOperandId receiverOperand;
+ bool sameRealm = false;
+};
+
+class InlinableSetterData : public InlinableOpData {
+ public:
+ ObjOperandId receiverOperand;
+ ValOperandId rhsOperand;
+ bool sameRealm = false;
+};
+
+mozilla::Maybe<InlinableOpData> FindInlinableOpData(ICCacheIRStub* stub,
+ BytecodeLocation loc);
+
+mozilla::Maybe<InlinableCallData> FindInlinableCallData(ICCacheIRStub* stub);
+mozilla::Maybe<InlinableGetterData> FindInlinableGetterData(
+ ICCacheIRStub* stub);
+mozilla::Maybe<InlinableSetterData> FindInlinableSetterData(
+ ICCacheIRStub* stub);
+
+enum class TrialInliningDecision {
+ NoInline,
+ Inline,
+ MonomorphicInline,
+};
+
+class MOZ_RAII TrialInliner {
+ public:
+ TrialInliner(JSContext* cx, HandleScript script, ICScript* icScript)
+ : cx_(cx), script_(script), icScript_(icScript) {}
+
+ JSContext* cx() { return cx_; }
+
+ [[nodiscard]] bool tryInlining();
+ [[nodiscard]] bool maybeInlineCall(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc);
+ [[nodiscard]] bool maybeInlineGetter(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc, CacheKind kind);
+ [[nodiscard]] bool maybeInlineSetter(ICEntry& entry, ICFallbackStub* fallback,
+ BytecodeLocation loc, CacheKind kind);
+
+ static bool canInline(JSFunction* target, HandleScript caller,
+ BytecodeLocation loc);
+
+ private:
+ ICCacheIRStub* maybeSingleStub(const ICEntry& entry);
+ void cloneSharedPrefix(ICCacheIRStub* stub, const uint8_t* endOfPrefix,
+ CacheIRWriter& writer);
+ ICScript* createInlinedICScript(JSFunction* target, BytecodeLocation loc);
+ [[nodiscard]] bool replaceICStub(ICEntry& entry, ICFallbackStub* fallback,
+ CacheIRWriter& writer, CacheKind kind);
+
+ TrialInliningDecision getInliningDecision(JSFunction* target,
+ ICCacheIRStub* stub,
+ BytecodeLocation loc);
+
+ InliningRoot* getOrCreateInliningRoot();
+ InliningRoot* maybeGetInliningRoot() const;
+ size_t inliningRootTotalBytecodeSize() const;
+
+ JSContext* cx_;
+ HandleScript script_;
+ ICScript* icScript_;
+};
+
+bool DoTrialInlining(JSContext* cx, BaselineFrame* frame);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TrialInlining_h */
diff --git a/js/src/jit/TypeData.h b/js/src/jit/TypeData.h
new file mode 100644
index 0000000000..4a05895e87
--- /dev/null
+++ b/js/src/jit/TypeData.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TypeData_h
+#define jit_TypeData_h
+
+#include "js/Value.h"
+
+namespace js {
+namespace jit {
+
+class TypeData {
+ JSValueType type_;
+
+ public:
+ TypeData() : type_(JSVAL_TYPE_UNKNOWN) {}
+ explicit TypeData(JSValueType type) : type_(type) {}
+
+ JSValueType type() const { return type_; }
+ bool hasData() const { return type_ != JSVAL_TYPE_UNKNOWN; }
+};
+
+class TypeDataList {
+ const static size_t MaxLength = 6;
+
+ uint8_t count_ = 0;
+ TypeData typeData_[MaxLength];
+
+ public:
+ TypeDataList() {}
+
+ uint8_t count() const { return count_; }
+
+ void addTypeData(TypeData data) {
+ MOZ_ASSERT(count_ < MaxLength);
+ MOZ_ASSERT(!typeData_[count_].hasData());
+ typeData_[count_++] = data;
+ }
+ TypeData get(uint32_t idx) const {
+ MOZ_ASSERT(idx < count_);
+ return typeData_[idx];
+ }
+
+ const TypeData* begin() const { return &typeData_[0]; }
+ const TypeData* end() const { return begin() + count_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TypeData_h */
diff --git a/js/src/jit/TypePolicy.cpp b/js/src/jit/TypePolicy.cpp
new file mode 100644
index 0000000000..d28f0275e9
--- /dev/null
+++ b/js/src/jit/TypePolicy.cpp
@@ -0,0 +1,1152 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TypePolicy.h"
+
+#include "jit/JitAllocPolicy.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+using namespace js;
+using namespace js::jit;
+
+static void EnsureOperandNotFloat32(TempAllocator& alloc, MInstruction* def,
+ unsigned op) {
+ MDefinition* in = def->getOperand(op);
+ if (in->type() == MIRType::Float32) {
+ MToDouble* replace = MToDouble::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ if (def->isRecoveredOnBailout()) {
+ replace->setRecoveredOnBailout();
+ }
+ def->replaceOperand(op, replace);
+ }
+}
+
+template <class T>
+[[nodiscard]] static bool ConvertOperand(TempAllocator& alloc,
+ MInstruction* def, unsigned op,
+ MIRType expected) {
+ MDefinition* in = def->getOperand(op);
+ if (in->type() == expected) {
+ return true;
+ }
+
+ auto* replace = T::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+[[nodiscard]] static bool UnboxOperand(TempAllocator& alloc, MInstruction* def,
+ unsigned op, MIRType expected) {
+ MDefinition* in = def->getOperand(op);
+ if (in->type() == expected) {
+ return true;
+ }
+
+ auto* replace = MUnbox::New(alloc, in, expected, MUnbox::Fallible);
+ replace->setBailoutKind(BailoutKind::TypePolicy);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(op, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+}
+
+MDefinition* js::jit::AlwaysBoxAt(TempAllocator& alloc, MInstruction* at,
+ MDefinition* operand) {
+ MDefinition* boxedOperand = operand;
+ // Replace Float32 by double
+ if (operand->type() == MIRType::Float32) {
+ MInstruction* replace = MToDouble::New(alloc, operand);
+ at->block()->insertBefore(at, replace);
+ boxedOperand = replace;
+ }
+ MBox* box = MBox::New(alloc, boxedOperand);
+ at->block()->insertBefore(at, box);
+ return box;
+}
+
+static MDefinition* BoxAt(TempAllocator& alloc, MInstruction* at,
+ MDefinition* operand) {
+ if (operand->isUnbox()) {
+ return operand->toUnbox()->input();
+ }
+ return AlwaysBoxAt(alloc, at, operand);
+}
+
+bool BoxInputsPolicy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Value) {
+ continue;
+ }
+ ins->replaceOperand(i, BoxAt(alloc, ins, in));
+ }
+ return true;
+}
+
+bool ArithPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MOZ_ASSERT(IsNumberType(ins->type()));
+ MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Int32 ||
+ ins->type() == MIRType::Float32);
+
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == ins->type()) {
+ continue;
+ }
+
+ MInstruction* replace;
+
+ if (ins->type() == MIRType::Double) {
+ replace = MToDouble::New(alloc, in);
+ } else if (ins->type() == MIRType::Float32) {
+ replace = MToFloat32::New(alloc, in);
+ } else {
+ replace = MToNumberInt32::New(alloc, in);
+ }
+
+ replace->setBailoutKind(BailoutKind::TypePolicy);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BigIntArithPolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ MOZ_ASSERT(ins->type() == MIRType::BigInt);
+
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ if (!ConvertOperand<MToBigInt>(alloc, ins, i, MIRType::BigInt)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool AllDoublePolicy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Double) {
+ continue;
+ }
+
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+ MInstruction* replace = MToDouble::New(alloc, in);
+
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(i, replace);
+
+ if (!replace->typePolicy()->adjustInputs(alloc, replace)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ComparePolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const {
+ MOZ_ASSERT(def->isCompare());
+ MCompare* compare = def->toCompare();
+
+ // Convert Float32 operands to doubles
+ for (size_t i = 0; i < 2; i++) {
+ MDefinition* in = def->getOperand(i);
+ if (in->type() == MIRType::Float32) {
+ MInstruction* replace = MToDouble::New(alloc, in);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(i, replace);
+ }
+ }
+
+ auto replaceOperand = [&](size_t index, auto* replace) {
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(index, replace);
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+ };
+
+ if (compare->compareType() == MCompare::Compare_Undefined ||
+ compare->compareType() == MCompare::Compare_Null) {
+ // Nothing to do for undefined and null, lowering handles all types.
+ return true;
+ }
+
+ if (compare->compareType() == MCompare::Compare_UIntPtr) {
+ MOZ_ASSERT(compare->lhs()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(compare->rhs()->type() == MIRType::IntPtr);
+ return true;
+ }
+
+ // Compare_BigInt_Int32 specialization is done for "BigInt <cmp> Int32".
+ // Compare_BigInt_Double specialization is done for "BigInt <cmp> Double".
+ // Compare_BigInt_String specialization is done for "BigInt <cmp> String".
+ if (compare->compareType() == MCompare::Compare_BigInt_Int32 ||
+ compare->compareType() == MCompare::Compare_BigInt_Double ||
+ compare->compareType() == MCompare::Compare_BigInt_String) {
+ if (MDefinition* in = def->getOperand(0); in->type() != MIRType::BigInt) {
+ auto* replace =
+ MUnbox::New(alloc, in, MIRType::BigInt, MUnbox::Infallible);
+ if (!replaceOperand(0, replace)) {
+ return false;
+ }
+ }
+
+ MDefinition* in = def->getOperand(1);
+
+ MInstruction* replace = nullptr;
+ if (compare->compareType() == MCompare::Compare_BigInt_Int32) {
+ if (in->type() != MIRType::Int32) {
+ replace = MToNumberInt32::New(
+ alloc, in, IntConversionInputKind::NumbersOrBoolsOnly);
+ }
+ } else if (compare->compareType() == MCompare::Compare_BigInt_Double) {
+ if (in->type() != MIRType::Double) {
+ replace = MToDouble::New(alloc, in, MToFPInstruction::NumbersOnly);
+ }
+ } else {
+ MOZ_ASSERT(compare->compareType() == MCompare::Compare_BigInt_String);
+ if (in->type() != MIRType::String) {
+ replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
+ }
+ }
+
+ if (replace) {
+ if (!replaceOperand(1, replace)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Convert all inputs to the right input type
+ MIRType type = compare->inputType();
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Double ||
+ type == MIRType::Float32 || type == MIRType::Object ||
+ type == MIRType::String || type == MIRType::Symbol ||
+ type == MIRType::BigInt);
+ for (size_t i = 0; i < 2; i++) {
+ MDefinition* in = def->getOperand(i);
+ if (in->type() == type) {
+ continue;
+ }
+
+ MInstruction* replace;
+
+ switch (type) {
+ case MIRType::Double:
+ replace = MToDouble::New(alloc, in, MToFPInstruction::NumbersOnly);
+ break;
+ case MIRType::Float32:
+ replace = MToFloat32::New(alloc, in, MToFPInstruction::NumbersOnly);
+ break;
+ case MIRType::Int32: {
+ IntConversionInputKind convert = IntConversionInputKind::NumbersOnly;
+ replace = MToNumberInt32::New(alloc, in, convert);
+ break;
+ }
+ case MIRType::Object:
+ replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Infallible);
+ break;
+ case MIRType::String:
+ replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
+ break;
+ case MIRType::Symbol:
+ replace = MUnbox::New(alloc, in, MIRType::Symbol, MUnbox::Infallible);
+ break;
+ case MIRType::BigInt:
+ replace = MUnbox::New(alloc, in, MIRType::BigInt, MUnbox::Infallible);
+ break;
+ default:
+ MOZ_CRASH("Unknown compare specialization");
+ }
+
+ if (!replaceOperand(i, replace)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool TestPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MDefinition* op = ins->getOperand(0);
+ switch (op->type()) {
+ case MIRType::Value:
+ case MIRType::Null:
+ case MIRType::Undefined:
+ case MIRType::Boolean:
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ break;
+
+ case MIRType::String: {
+ MStringLength* length = MStringLength::New(alloc, op);
+ ins->block()->insertBefore(ins, length);
+ ins->replaceOperand(0, length);
+ break;
+ }
+
+ default:
+ MOZ_ASSERT(IsMagicType(op->type()));
+ ins->replaceOperand(0, BoxAt(alloc, ins, op));
+ break;
+ }
+ return true;
+}
+
+bool BitwisePolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ MOZ_ASSERT(ins->type() == MIRType::Int32 || ins->type() == MIRType::Double);
+
+ // This policy works for both unary and binary bitwise operations.
+ for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
+ if (!ConvertOperand<MTruncateToInt32>(alloc, ins, i, MIRType::Int32)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PowPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MOZ_ASSERT(ins->type() == MIRType::Int32 || ins->type() == MIRType::Double);
+
+ if (ins->type() == MIRType::Int32) {
+ // Both operands must be int32.
+ return UnboxedInt32Policy<0>::staticAdjustInputs(alloc, ins) &&
+ UnboxedInt32Policy<1>::staticAdjustInputs(alloc, ins);
+ }
+
+ // Otherwise, input must be a double.
+ if (!DoublePolicy<0>::staticAdjustInputs(alloc, ins)) {
+ return false;
+ }
+
+ // Power may be an int32 or a double. Integers receive a faster path.
+ MDefinition* power = ins->toPow()->power();
+ if (power->isToDouble()) {
+ MDefinition* input = power->toToDouble()->input();
+ if (input->type() == MIRType::Int32) {
+ ins->replaceOperand(1, input);
+ return true;
+ }
+ }
+ return DoublePolicy<1>::staticAdjustInputs(alloc, ins);
+}
+
+bool SignPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MOZ_ASSERT(ins->isSign());
+ MIRType specialization = ins->typePolicySpecialization();
+
+ // MSign is specialized for int32 input types.
+ if (specialization == MIRType::Int32) {
+ return UnboxedInt32Policy<0>::staticAdjustInputs(alloc, ins);
+ }
+
+ // Otherwise convert input to double.
+ MOZ_ASSERT(IsFloatingPointType(specialization));
+ return DoublePolicy<0>::staticAdjustInputs(alloc, ins);
+}
+
+template <unsigned Op>
+bool SymbolPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ return UnboxOperand(alloc, ins, Op, MIRType::Symbol);
+}
+
+template bool SymbolPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool BooleanPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ return UnboxOperand(alloc, ins, Op, MIRType::Boolean);
+}
+
+template bool BooleanPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool StringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ return UnboxOperand(alloc, ins, Op, MIRType::String);
+}
+
+template bool StringPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool StringPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool StringPolicy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool ConvertToStringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::String) {
+ return true;
+ }
+
+ MToString* replace =
+ MToString::New(alloc, in, MToString::SideEffectHandling::Bailout);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(Op, replace);
+
+ return ToStringPolicy::staticAdjustInputs(alloc, replace);
+}
+
+template bool ConvertToStringPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool ConvertToStringPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool ConvertToStringPolicy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool BigIntPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ return UnboxOperand(alloc, ins, Op, MIRType::BigInt);
+}
+
+template bool BigIntPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool BigIntPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op>
+bool UnboxedInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ return UnboxOperand(alloc, def, Op, MIRType::Int32);
+}
+
+template bool UnboxedInt32Policy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool UnboxedInt32Policy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool UnboxedInt32Policy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool UnboxedInt32Policy<3>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool Int32OrIntPtrPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ MDefinition* in = def->getOperand(Op);
+ if (in->type() == MIRType::IntPtr) {
+ return true;
+ }
+
+ return UnboxedInt32Policy<Op>::staticAdjustInputs(alloc, def);
+}
+
+template bool Int32OrIntPtrPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool Int32OrIntPtrPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool ConvertToInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ return ConvertOperand<MToNumberInt32>(alloc, def, Op, MIRType::Int32);
+}
+
+template bool ConvertToInt32Policy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool TruncateToInt32OrToBigIntPolicy<Op>::staticAdjustInputs(
+ TempAllocator& alloc, MInstruction* def) {
+ MOZ_ASSERT(def->isCompareExchangeTypedArrayElement() ||
+ def->isAtomicExchangeTypedArrayElement() ||
+ def->isAtomicTypedArrayElementBinop());
+
+ Scalar::Type type;
+ if (def->isCompareExchangeTypedArrayElement()) {
+ type = def->toCompareExchangeTypedArrayElement()->arrayType();
+ } else if (def->isAtomicExchangeTypedArrayElement()) {
+ type = def->toAtomicExchangeTypedArrayElement()->arrayType();
+ } else {
+ type = def->toAtomicTypedArrayElementBinop()->arrayType();
+ }
+
+ if (Scalar::isBigIntType(type)) {
+ return ConvertOperand<MToBigInt>(alloc, def, Op, MIRType::BigInt);
+ }
+ return ConvertOperand<MTruncateToInt32>(alloc, def, Op, MIRType::Int32);
+}
+
+template bool TruncateToInt32OrToBigIntPolicy<2>::staticAdjustInputs(
+ TempAllocator& alloc, MInstruction* def);
+template bool TruncateToInt32OrToBigIntPolicy<3>::staticAdjustInputs(
+ TempAllocator& alloc, MInstruction* def);
+
+template <unsigned Op>
+bool DoublePolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ return ConvertOperand<MToDouble>(alloc, def, Op, MIRType::Double);
+}
+
+template bool DoublePolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool DoublePolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool Float32Policy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ return ConvertOperand<MToFloat32>(alloc, def, Op, MIRType::Float32);
+}
+
+template bool Float32Policy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool Float32Policy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool Float32Policy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool FloatingPointPolicy<Op>::adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const {
+ MIRType policyType = def->typePolicySpecialization();
+ if (policyType == MIRType::Double) {
+ return DoublePolicy<Op>::staticAdjustInputs(alloc, def);
+ }
+ return Float32Policy<Op>::staticAdjustInputs(alloc, def);
+}
+
+template bool FloatingPointPolicy<0>::adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const;
+
+template <unsigned Op>
+bool NoFloatPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ EnsureOperandNotFloat32(alloc, def, Op);
+ return true;
+}
+
+template bool NoFloatPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool NoFloatPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool NoFloatPolicy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool NoFloatPolicy<3>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned FirstOp>
+bool NoFloatPolicyAfter<FirstOp>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def) {
+ for (size_t op = FirstOp, e = def->numOperands(); op < e; op++) {
+ EnsureOperandNotFloat32(alloc, def, op);
+ }
+ return true;
+}
+
+template bool NoFloatPolicyAfter<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool NoFloatPolicyAfter<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+template bool NoFloatPolicyAfter<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+
+template <unsigned Op>
+bool BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == MIRType::Value) {
+ return true;
+ }
+
+ ins->replaceOperand(Op, BoxAt(alloc, ins, in));
+ return true;
+}
+
+template bool BoxPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool BoxPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool BoxPolicy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+template <unsigned Op, MIRType Type>
+bool BoxExceptPolicy<Op, Type>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MDefinition* in = ins->getOperand(Op);
+ if (in->type() == Type) {
+ return true;
+ }
+ return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
+}
+
+template bool BoxExceptPolicy<0, MIRType::Object>::staticAdjustInputs(
+ TempAllocator& alloc, MInstruction* ins);
+
+template <unsigned Op>
+bool CacheIdPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MDefinition* in = ins->getOperand(Op);
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ return true;
+ default:
+ return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
+ }
+}
+
+template bool CacheIdPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool CacheIdPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+bool ToDoublePolicy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->isToDouble() || ins->isToFloat32());
+
+ MDefinition* in = ins->getOperand(0);
+ MToFPInstruction::ConversionKind conversion;
+ if (ins->isToDouble()) {
+ conversion = ins->toToDouble()->conversion();
+ } else {
+ conversion = ins->toToFloat32()->conversion();
+ }
+
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Double:
+ case MIRType::Value:
+ // No need for boxing for these types.
+ return true;
+ case MIRType::Null:
+ // No need for boxing, when we will convert.
+ if (conversion == MToFPInstruction::NonStringPrimitives) {
+ return true;
+ }
+ break;
+ case MIRType::Undefined:
+ case MIRType::Boolean:
+ // No need for boxing, when we will convert.
+ if (conversion == MToFPInstruction::NonStringPrimitives) {
+ return true;
+ }
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ // Objects might be effectful. Symbols and BigInts give TypeError.
+ break;
+ default:
+ break;
+ }
+
+ in = BoxAt(alloc, ins, in);
+ ins->replaceOperand(0, in);
+ return true;
+}
+
+bool ToInt32Policy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->isToNumberInt32() || ins->isTruncateToInt32());
+
+ IntConversionInputKind conversion = IntConversionInputKind::Any;
+ if (ins->isToNumberInt32()) {
+ conversion = ins->toToNumberInt32()->conversion();
+ }
+
+ MDefinition* in = ins->getOperand(0);
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Double:
+ case MIRType::Value:
+ // No need for boxing for these types.
+ return true;
+ case MIRType::Undefined:
+ // No need for boxing when truncating.
+ if (ins->isTruncateToInt32()) {
+ return true;
+ }
+ break;
+ case MIRType::Null:
+ // No need for boxing, when we will convert.
+ if (conversion == IntConversionInputKind::Any) {
+ return true;
+ }
+ break;
+ case MIRType::Boolean:
+ // No need for boxing, when we will convert.
+ if (conversion == IntConversionInputKind::Any) {
+ return true;
+ }
+ if (conversion == IntConversionInputKind::NumbersOrBoolsOnly) {
+ return true;
+ }
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ // Objects might be effectful. Symbols and BigInts give TypeError.
+ break;
+ default:
+ break;
+ }
+
+ in = BoxAt(alloc, ins, in);
+ ins->replaceOperand(0, in);
+ return true;
+}
+
+bool ToBigIntPolicy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->isToBigInt());
+
+ MDefinition* in = ins->getOperand(0);
+ switch (in->type()) {
+ case MIRType::BigInt:
+ case MIRType::Value:
+ // No need for boxing for these types.
+ return true;
+ default:
+ // Any other types need to be boxed.
+ break;
+ }
+
+ in = BoxAt(alloc, ins, in);
+ ins->replaceOperand(0, in);
+ return true;
+}
+
+bool ToStringPolicy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->isToString());
+
+ MIRType type = ins->getOperand(0)->type();
+ if (type == MIRType::Object || type == MIRType::Symbol ||
+ type == MIRType::BigInt) {
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ return true;
+ }
+
+ // TODO remove the following line once 966957 has landed
+ EnsureOperandNotFloat32(alloc, ins, 0);
+
+ return true;
+}
+
+bool ToInt64Policy::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->isToInt64());
+
+ MDefinition* input = ins->getOperand(0);
+ MIRType type = input->type();
+
+ switch (type) {
+ case MIRType::BigInt: {
+ auto* replace = MTruncateBigIntToInt64::New(alloc, input);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(0, replace);
+ break;
+ }
+ // No need for boxing for these types, because they are handled specially
+ // when this instruction is lowered to LIR.
+ case MIRType::Boolean:
+ case MIRType::String:
+ case MIRType::Int64:
+ case MIRType::Value:
+ break;
+ default:
+ ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
+ break;
+ }
+
+ return true;
+}
+
+template <unsigned Op>
+bool ObjectPolicy<Op>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ MOZ_ASSERT(ins->getOperand(Op)->type() != MIRType::Slots);
+ MOZ_ASSERT(ins->getOperand(Op)->type() != MIRType::Elements);
+
+ return UnboxOperand(alloc, ins, Op, MIRType::Object);
+}
+
+template bool ObjectPolicy<0>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool ObjectPolicy<1>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool ObjectPolicy<2>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+template bool ObjectPolicy<3>::staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+
+bool CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MCallBase* call;
+ if (ins->isCall()) {
+ call = ins->toCall();
+ } else {
+ call = ins->toCallClassHook();
+ }
+
+ MDefinition* func = call->getCallee();
+ if (func->type() != MIRType::Object) {
+ MInstruction* unbox =
+ MUnbox::New(alloc, func, MIRType::Object, MUnbox::Fallible);
+ unbox->setBailoutKind(BailoutKind::TypePolicy);
+ call->block()->insertBefore(call, unbox);
+ call->replaceCallee(unbox);
+
+ if (!unbox->typePolicy()->adjustInputs(alloc, unbox)) {
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i < call->numStackArgs(); i++) {
+ if (!alloc.ensureBallast()) {
+ return false;
+ }
+ EnsureOperandNotFloat32(alloc, call, MCallBase::IndexOfStackArg(i));
+ }
+
+ return true;
+}
+
+bool MegamorphicSetElementPolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ // The first operand should be an object.
+ if (!SingleObjectPolicy::staticAdjustInputs(alloc, ins)) {
+ return false;
+ }
+
+ // Box the index and value operands.
+ for (size_t i = 1, e = ins->numOperands(); i < e; i++) {
+ MDefinition* in = ins->getOperand(i);
+ if (in->type() == MIRType::Value) {
+ continue;
+ }
+ ins->replaceOperand(i, BoxAt(alloc, ins, in));
+ }
+ return true;
+}
+
+bool StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc,
+ MInstruction* ins,
+ Scalar::Type writeType,
+ MDefinition* value,
+ int valueOperand) {
+ if (Scalar::isBigIntType(writeType)) {
+ if (value->type() == MIRType::BigInt) {
+ return true;
+ }
+
+ auto* replace = MToBigInt::New(alloc, value);
+ ins->block()->insertBefore(ins, replace);
+ ins->replaceOperand(valueOperand, replace);
+
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+ }
+
+ MDefinition* curValue = value;
+ // First, ensure the value is int32, boolean, double or Value.
+ // The conversion is based on TypedArrayObjectTemplate::setElementTail.
+ switch (value->type()) {
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::Boolean:
+ case MIRType::Value:
+ break;
+ case MIRType::Null:
+ value->setImplicitlyUsedUnchecked();
+ value = MConstant::New(alloc, Int32Value(0));
+ ins->block()->insertBefore(ins, value->toInstruction());
+ break;
+ case MIRType::Undefined:
+ value->setImplicitlyUsedUnchecked();
+ value = MConstant::New(alloc, JS::NaNValue());
+ ins->block()->insertBefore(ins, value->toInstruction());
+ break;
+ case MIRType::Object:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ value = BoxAt(alloc, ins, value);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+
+ if (value != curValue) {
+ ins->replaceOperand(valueOperand, value);
+ curValue = value;
+ }
+
+ MOZ_ASSERT(
+ value->type() == MIRType::Int32 || value->type() == MIRType::Boolean ||
+ value->type() == MIRType::Double || value->type() == MIRType::Float32 ||
+ value->type() == MIRType::Value);
+
+ switch (writeType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (value->type() != MIRType::Int32) {
+ value = MTruncateToInt32::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ case Scalar::Uint8Clamped:
+ // The transpiler should have inserted MClampToUint8.
+ MOZ_ASSERT(value->type() == MIRType::Int32);
+ break;
+ case Scalar::Float32:
+ if (value->type() != MIRType::Float32) {
+ value = MToFloat32::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ case Scalar::Float64:
+ if (value->type() != MIRType::Double) {
+ value = MToDouble::New(alloc, value);
+ ins->block()->insertBefore(ins, value->toInstruction());
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid array type");
+ }
+
+ if (value != curValue) {
+ ins->replaceOperand(valueOperand, value);
+ }
+
+ return true;
+}
+
+bool StoreUnboxedScalarPolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ MStoreUnboxedScalar* store = ins->toStoreUnboxedScalar();
+ MOZ_ASSERT(store->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(store->index()->type() == MIRType::IntPtr);
+
+ return adjustValueInput(alloc, store, store->writeType(), store->value(), 2);
+}
+
+bool StoreDataViewElementPolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ auto* store = ins->toStoreDataViewElement();
+ MOZ_ASSERT(store->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(store->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(store->littleEndian()->type() == MIRType::Boolean);
+
+ return StoreUnboxedScalarPolicy::adjustValueInput(
+ alloc, ins, store->writeType(), store->value(), 2);
+}
+
+bool StoreTypedArrayHolePolicy::adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const {
+ MStoreTypedArrayElementHole* store = ins->toStoreTypedArrayElementHole();
+ MOZ_ASSERT(store->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(store->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(store->length()->type() == MIRType::IntPtr);
+
+ return StoreUnboxedScalarPolicy::adjustValueInput(
+ alloc, ins, store->arrayType(), store->value(), 3);
+}
+
+bool ClampPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const {
+ MDefinition* in = ins->toClampToUint8()->input();
+
+ switch (in->type()) {
+ case MIRType::Int32:
+ case MIRType::Double:
+ case MIRType::Value:
+ break;
+ default:
+ ins->replaceOperand(0, BoxAt(alloc, ins, in));
+ break;
+ }
+
+ return true;
+}
+
+// Lists of all TypePolicy specializations which are used by MIR Instructions.
+#define TYPE_POLICY_LIST(_) \
+ _(AllDoublePolicy) \
+ _(ArithPolicy) \
+ _(BigIntArithPolicy) \
+ _(BitwisePolicy) \
+ _(BoxInputsPolicy) \
+ _(CallPolicy) \
+ _(MegamorphicSetElementPolicy) \
+ _(ClampPolicy) \
+ _(ComparePolicy) \
+ _(PowPolicy) \
+ _(SignPolicy) \
+ _(StoreDataViewElementPolicy) \
+ _(StoreTypedArrayHolePolicy) \
+ _(StoreUnboxedScalarPolicy) \
+ _(TestPolicy) \
+ _(ToDoublePolicy) \
+ _(ToInt32Policy) \
+ _(ToBigIntPolicy) \
+ _(ToStringPolicy) \
+ _(ToInt64Policy)
+
+#define TEMPLATE_TYPE_POLICY_LIST(_) \
+ _(BigIntPolicy<0>) \
+ _(BooleanPolicy<0>) \
+ _(BoxExceptPolicy<0, MIRType::Object>) \
+ _(BoxPolicy<0>) \
+ _(ConvertToInt32Policy<0>) \
+ _(ConvertToStringPolicy<0>) \
+ _(ConvertToStringPolicy<2>) \
+ _(DoublePolicy<0>) \
+ _(FloatingPointPolicy<0>) \
+ _(UnboxedInt32Policy<0>) \
+ _(UnboxedInt32Policy<1>) \
+ _(TruncateToInt32OrToBigIntPolicy<2>) \
+ _(MixPolicy<ObjectPolicy<0>, StringPolicy<1>, BoxPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, BoxPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, BoxPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2>>) \
+ _(MixPolicy<StringPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<StringPolicy<0>, ObjectPolicy<1>, StringPolicy<2>>) \
+ _(MixPolicy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, StringPolicy<1>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, BoxPolicy<2>, \
+ ObjectPolicy<3>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>, \
+ UnboxedInt32Policy<3>>) \
+ _(MixPolicy<TruncateToInt32OrToBigIntPolicy<2>, \
+ TruncateToInt32OrToBigIntPolicy<3>>) \
+ _(MixPolicy<ObjectPolicy<0>, CacheIdPolicy<1>, NoFloatPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>, \
+ CacheIdPolicy<2>>) \
+ _(MixPolicy<BoxPolicy<0>, ObjectPolicy<1>>) \
+ _(MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1>>) \
+ _(MixPolicy<ConvertToStringPolicy<0>, ObjectPolicy<1>>) \
+ _(MixPolicy<DoublePolicy<0>, DoublePolicy<1>>) \
+ _(MixPolicy<UnboxedInt32Policy<0>, UnboxedInt32Policy<1>>) \
+ _(MixPolicy<Int32OrIntPtrPolicy<0>, Int32OrIntPtrPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1>>) \
+ _(MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>) \
+ _(MixPolicy<CacheIdPolicy<0>, ObjectPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<3>>) \
+ _(MixPolicy<ObjectPolicy<0>, NoFloatPolicyAfter<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, StringPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<2>>) \
+ _(MixPolicy<ObjectPolicy<1>, ConvertToStringPolicy<0>>) \
+ _(MixPolicy<StringPolicy<0>, UnboxedInt32Policy<1>>) \
+ _(MixPolicy<StringPolicy<0>, StringPolicy<1>>) \
+ _(MixPolicy<BoxPolicy<0>, BoxPolicy<1>>) \
+ _(MixPolicy<ObjectPolicy<0>, BoxPolicy<2>, ObjectPolicy<3>>) \
+ _(MixPolicy<BoxExceptPolicy<0, MIRType::Object>, ObjectPolicy<1>>) \
+ _(MixPolicy<UnboxedInt32Policy<0>, BigIntPolicy<1>>) \
+ _(MixPolicy<UnboxedInt32Policy<0>, NoFloatPolicyAfter<1>>) \
+ _(MixPolicy<UnboxedInt32Policy<0>, UnboxedInt32Policy<1>, \
+ NoFloatPolicyAfter<2>>) \
+ _(NoFloatPolicy<0>) \
+ _(NoFloatPolicy<1>) \
+ _(NoFloatPolicy<2>) \
+ _(NoFloatPolicyAfter<0>) \
+ _(NoFloatPolicyAfter<1>) \
+ _(NoFloatPolicyAfter<2>) \
+ _(ObjectPolicy<0>) \
+ _(ObjectPolicy<1>) \
+ _(ObjectPolicy<3>) \
+ _(StringPolicy<0>) \
+ _(SymbolPolicy<0>)
+
+namespace js {
+namespace jit {
+
+// Define for all used TypePolicy specialization, the definition for
+// |TypePolicy::Data::thisTypePolicy|. This function returns one constant
+// instance of the TypePolicy which is shared among all MIR Instructions of the
+// same type.
+//
+// This Macro use __VA_ARGS__ to account for commas of template parameters.
+#define DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_(...) \
+ const TypePolicy* __VA_ARGS__::Data::thisTypePolicy() { \
+ static constexpr __VA_ARGS__ singletonType; \
+ return &singletonType; \
+ }
+
+TYPE_POLICY_LIST(DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_)
+TEMPLATE_TYPE_POLICY_LIST(template <> DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_)
+#undef DEFINE_TYPE_POLICY_SINGLETON_INSTANCES_
+
+} // namespace jit
+} // namespace js
+
+namespace {
+
+// For extra-good measure in case an unqualified use is ever introduced. (The
+// main use in the macro below is explicitly qualified so as not to consult
+// this scope and find this function.)
+inline TypePolicy* thisTypePolicy() = delete;
+
+static MIRType thisTypeSpecialization() {
+ MOZ_CRASH("TypeSpecialization lacks definition of thisTypeSpecialization.");
+}
+
+} // namespace
+
+// For each MIR Instruction, this macro define the |typePolicy| method which is
+// using the |thisTypePolicy| method. The |thisTypePolicy| method is either a
+// member of the MIR Instruction, such as with MGetElementCache, a member
+// inherited from the TypePolicy::Data structure, or a member inherited from
+// NoTypePolicy if the MIR instruction has no type policy.
+#define DEFINE_MIR_TYPEPOLICY_MEMBERS_(op) \
+ const TypePolicy* js::jit::M##op::typePolicy() { \
+ return M##op::thisTypePolicy(); \
+ } \
+ \
+ MIRType js::jit::M##op::typePolicySpecialization() { \
+ return thisTypeSpecialization(); \
+ }
+
+MIR_OPCODE_LIST(DEFINE_MIR_TYPEPOLICY_MEMBERS_)
+#undef DEFINE_MIR_TYPEPOLICY_MEMBERS_
diff --git a/js/src/jit/TypePolicy.h b/js/src/jit/TypePolicy.h
new file mode 100644
index 0000000000..fdc61eb950
--- /dev/null
+++ b/js/src/jit/TypePolicy.h
@@ -0,0 +1,557 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TypePolicy_h
+#define jit_TypePolicy_h
+
+#include "jit/IonTypes.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+namespace js {
+namespace jit {
+
+class MInstruction;
+class MDefinition;
+class TempAllocator;
+
+extern MDefinition* AlwaysBoxAt(TempAllocator& alloc, MInstruction* at,
+ MDefinition* operand);
+
+// A type policy directs the type analysis phases, which insert conversion,
+// boxing, unboxing, and type changes as necessary.
+class TypePolicy {
+ public:
+ // Analyze the inputs of the instruction and perform one of the following
+ // actions for each input:
+ // * Nothing; the input already type-checks.
+ // * If untyped, optionally ask the input to try and specialize its value.
+ // * Replace the operand with a conversion instruction.
+ // * Insert an unconditional deoptimization (no conversion possible).
+ [[nodiscard]] virtual bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const = 0;
+};
+
+struct TypeSpecializationData {
+ protected:
+ // Specifies three levels of specialization:
+ // - < Value. This input is expected and required.
+ // - == None. This op should not be specialized.
+ MIRType specialization_;
+
+ MIRType thisTypeSpecialization() { return specialization_; }
+
+ public:
+ MIRType specialization() const { return specialization_; }
+};
+
+#define EMPTY_DATA_ \
+ struct Data { \
+ static const TypePolicy* thisTypePolicy(); \
+ }
+
+#define INHERIT_DATA_(DATA_TYPE) \
+ struct Data : public DATA_TYPE { \
+ static const TypePolicy* thisTypePolicy(); \
+ }
+
+#define SPECIALIZATION_DATA_ INHERIT_DATA_(TypeSpecializationData)
+
+class NoTypePolicy {
+ public:
+ struct Data {
+ static const TypePolicy* thisTypePolicy() { return nullptr; }
+ };
+};
+
+class BoxInputsPolicy final : public TypePolicy {
+ public:
+ constexpr BoxInputsPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+class ArithPolicy final : public TypePolicy {
+ public:
+ constexpr ArithPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+class BigIntArithPolicy final : public TypePolicy {
+ public:
+ constexpr BigIntArithPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+class AllDoublePolicy final : public TypePolicy {
+ public:
+ constexpr AllDoublePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+class BitwisePolicy final : public TypePolicy {
+ public:
+ constexpr BitwisePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+class ComparePolicy final : public TypePolicy {
+ public:
+ constexpr ComparePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+// Policy for MTest instructions.
+class TestPolicy final : public TypePolicy {
+ public:
+ constexpr TestPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+class CallPolicy final : public TypePolicy {
+ public:
+ constexpr CallPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+// Policy for MPow:
+//
+// * If return type is MIRType::Double, we need (Double, Double) or
+// (Double, Int32) operands.
+// * If return type is MIRType::Int32, we need (Int32, Int32) operands.
+class PowPolicy final : public TypePolicy {
+ public:
+ constexpr PowPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+// Policy for MSign. Operand is either Double or Int32.
+class SignPolicy final : public TypePolicy {
+ public:
+ constexpr SignPolicy() = default;
+ SPECIALIZATION_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+// Expect a symbol for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class SymbolPolicy final : public TypePolicy {
+ public:
+ constexpr SymbolPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a boolean for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class BooleanPolicy final : public TypePolicy {
+ public:
+ constexpr BooleanPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a string for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class StringPolicy final : public TypePolicy {
+ public:
+ constexpr StringPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a string for operand Op. Else a ToString instruction is inserted.
+template <unsigned Op>
+class ConvertToStringPolicy final : public TypePolicy {
+ public:
+ constexpr ConvertToStringPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a BigInt for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class BigIntPolicy final : public TypePolicy {
+ public:
+ constexpr BigIntPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expects either an Int32 or a boxed Int32 for operand Op; may unbox if needed.
+template <unsigned Op>
+class UnboxedInt32Policy final : private TypePolicy {
+ public:
+ constexpr UnboxedInt32Policy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expects either an Int32 or IntPtr for operand Op.
+template <unsigned Op>
+class Int32OrIntPtrPolicy final : private TypePolicy {
+ public:
+ constexpr Int32OrIntPtrPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect an Int for operand Op. Else a ToInt32 instruction is inserted.
+template <unsigned Op>
+class ConvertToInt32Policy final : public TypePolicy {
+ public:
+ constexpr ConvertToInt32Policy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect either an Int or BigInt for operand Op. Else a TruncateToInt32 or
+// ToBigInt instruction is inserted.
+template <unsigned Op>
+class TruncateToInt32OrToBigIntPolicy final : public TypePolicy {
+ public:
+ constexpr TruncateToInt32OrToBigIntPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a double for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class DoublePolicy final : public TypePolicy {
+ public:
+ constexpr DoublePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a float32 for operand Op. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class Float32Policy final : public TypePolicy {
+ public:
+ constexpr Float32Policy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Expect a float32 OR a double for operand Op, but will prioritize Float32
+// if the result type is set as such. If the input is a Value, it is unboxed.
+template <unsigned Op>
+class FloatingPointPolicy final : public TypePolicy {
+ public:
+ constexpr FloatingPointPolicy() = default;
+ SPECIALIZATION_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+template <unsigned Op>
+class NoFloatPolicy final : public TypePolicy {
+ public:
+ constexpr NoFloatPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Policy for guarding variadic instructions such as object / array state
+// instructions.
+template <unsigned FirstOp>
+class NoFloatPolicyAfter final : public TypePolicy {
+ public:
+ constexpr NoFloatPolicyAfter() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Box objects or strings as an input to a ToDouble instruction.
+class ToDoublePolicy final : public TypePolicy {
+ public:
+ constexpr ToDoublePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box objects, strings and undefined as input to a ToInt32 instruction.
+class ToInt32Policy final : public TypePolicy {
+ public:
+ constexpr ToInt32Policy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box any non-BigInts as input to a ToBigInt instruction.
+class ToBigIntPolicy final : public TypePolicy {
+ public:
+ constexpr ToBigIntPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box objects as input to a ToString instruction.
+class ToStringPolicy final : public TypePolicy {
+ public:
+ constexpr ToStringPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* def);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override {
+ return staticAdjustInputs(alloc, def);
+ }
+};
+
+// Box non-Boolean, non-String, non-BigInt as input to a ToInt64 instruction.
+class ToInt64Policy final : public TypePolicy {
+ public:
+ constexpr ToInt64Policy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+template <unsigned Op>
+class ObjectPolicy final : public TypePolicy {
+ public:
+ constexpr ObjectPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Single-object input. If the input is a Value, it is unboxed. If it is
+// a primitive, we use ValueToNonNullObject.
+using SingleObjectPolicy = ObjectPolicy<0>;
+
+template <unsigned Op>
+class BoxPolicy final : public TypePolicy {
+ public:
+ constexpr BoxPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Boxes everything except inputs of type Type.
+template <unsigned Op, MIRType Type>
+class BoxExceptPolicy final : public TypePolicy {
+ public:
+ constexpr BoxExceptPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Box if not a typical property id (string, symbol, int32).
+template <unsigned Op>
+class CacheIdPolicy final : public TypePolicy {
+ public:
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins);
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+// Combine multiple policies.
+template <class... Policies>
+class MixPolicy final : public TypePolicy {
+ public:
+ constexpr MixPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] static bool staticAdjustInputs(TempAllocator& alloc,
+ MInstruction* ins) {
+ return (Policies::staticAdjustInputs(alloc, ins) && ...);
+ }
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override {
+ return staticAdjustInputs(alloc, ins);
+ }
+};
+
+class MegamorphicSetElementPolicy final : public TypePolicy {
+ public:
+ constexpr MegamorphicSetElementPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* def) const override;
+};
+
+class StoreDataViewElementPolicy;
+class StoreTypedArrayHolePolicy;
+
+class StoreUnboxedScalarPolicy : public TypePolicy {
+ private:
+ constexpr StoreUnboxedScalarPolicy() = default;
+ [[nodiscard]] static bool adjustValueInput(TempAllocator& alloc,
+ MInstruction* ins,
+ Scalar::Type arrayType,
+ MDefinition* value,
+ int valueOperand);
+
+ friend class StoreDataViewElementPolicy;
+ friend class StoreTypedArrayHolePolicy;
+
+ public:
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+class StoreDataViewElementPolicy final : public StoreUnboxedScalarPolicy {
+ public:
+ constexpr StoreDataViewElementPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+class StoreTypedArrayHolePolicy final : public StoreUnboxedScalarPolicy {
+ public:
+ constexpr StoreTypedArrayHolePolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+// Accepts integers and doubles. Everything else is boxed.
+class ClampPolicy final : public TypePolicy {
+ public:
+ constexpr ClampPolicy() = default;
+ EMPTY_DATA_;
+ [[nodiscard]] bool adjustInputs(TempAllocator& alloc,
+ MInstruction* ins) const override;
+};
+
+#undef SPECIALIZATION_DATA_
+#undef INHERIT_DATA_
+#undef EMPTY_DATA_
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TypePolicy_h */
diff --git a/js/src/jit/VMFunctionList-inl.h b/js/src/jit/VMFunctionList-inl.h
new file mode 100644
index 0000000000..0a5953817b
--- /dev/null
+++ b/js/src/jit/VMFunctionList-inl.h
@@ -0,0 +1,379 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_VMFunctionList_inl_h
+#define jit_VMFunctionList_inl_h
+
+#include "builtin/Eval.h"
+#include "builtin/ModuleObject.h" // js::GetOrCreateModuleMetaObject
+#include "builtin/Object.h" // js::ObjectCreateWithTemplate
+#include "builtin/Promise.h" // js::AsyncFunctionAwait
+#include "builtin/RegExp.h"
+#include "builtin/String.h"
+#include "builtin/TestingFunctions.h"
+#include "jit/BaselineIC.h"
+#include "jit/Ion.h"
+#include "jit/IonIC.h"
+#include "jit/TrialInlining.h"
+#include "jit/VMFunctions.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BigIntType.h"
+#include "vm/BoundFunctionObject.h"
+#include "vm/EqualityOperations.h"
+#include "vm/Interpreter.h"
+#include "vm/Iteration.h"
+#include "vm/TypedArrayObject.h"
+
+#include "jit/BaselineFrame-inl.h"
+#include "vm/Interpreter-inl.h"
+
+namespace js {
+namespace jit {
+
+#ifdef FUZZING_JS_FUZZILLI
+# define VMFUNCTION_FUZZILLI_LIST(_) \
+ _(FuzzilliHashObject, js::FuzzilliHashObject) \
+ _(FuzzilliHashObjectInl, js::FuzzilliHashObjectInl)
+#else
+# define VMFUNCTION_FUZZILLI_LIST(_)
+#endif
+
+// List of all VM functions to be used with callVM. Each entry stores the name
+// (must be unique, used for the VMFunctionId enum and profiling) and the C++
+// function to be called. This list must be sorted on the name field.
+#define VMFUNCTION_LIST(_) \
+ _(AddOrUpdateSparseElementHelper, js::AddOrUpdateSparseElementHelper) \
+ _(AddSlotAndCallAddPropHook, js::AddSlotAndCallAddPropHook) \
+ _(ArgumentsObjectCreateForInlinedIon, \
+ js::ArgumentsObject::createForInlinedIon) \
+ _(ArgumentsObjectCreateForIon, js::ArgumentsObject::createForIon) \
+ _(ArgumentsSliceDense, js::ArgumentsSliceDense) \
+ _(ArrayConstructorOneArg, js::ArrayConstructorOneArg) \
+ _(ArrayFromArgumentsObject, js::ArrayFromArgumentsObject) \
+ _(ArrayJoin, js::jit::ArrayJoin) \
+ _(ArraySliceDense, js::ArraySliceDense) \
+ _(AsyncFunctionAwait, js::AsyncFunctionAwait) \
+ _(AsyncFunctionResolve, js::AsyncFunctionResolve) \
+ _(AtomicsAdd64, js::jit::AtomicsAdd64) \
+ _(AtomicsAnd64, js::jit::AtomicsAnd64) \
+ _(AtomicsCompareExchange64, js::jit::AtomicsCompareExchange64) \
+ _(AtomicsExchange64, js::jit::AtomicsExchange64) \
+ _(AtomicsLoad64, js::jit::AtomicsLoad64) \
+ _(AtomicsOr64, js::jit::AtomicsOr64) \
+ _(AtomicsSub64, js::jit::AtomicsSub64) \
+ _(AtomicsXor64, js::jit::AtomicsXor64) \
+ _(AtomizeString, js::AtomizeString) \
+ _(BaselineCompileFromBaselineInterpreter, \
+ js::jit::BaselineCompileFromBaselineInterpreter) \
+ _(BaselineDebugPrologue, js::jit::DebugPrologue) \
+ _(BaselineGetFunctionThis, js::jit::BaselineGetFunctionThis) \
+ _(BigIntAdd, JS::BigInt::add) \
+ _(BigIntAsIntN, js::jit::BigIntAsIntN) \
+ _(BigIntAsUintN, js::jit::BigIntAsUintN) \
+ _(BigIntBitAnd, JS::BigInt::bitAnd) \
+ _(BigIntBitNot, JS::BigInt::bitNot) \
+ _(BigIntBitOr, JS::BigInt::bitOr) \
+ _(BigIntBitXor, JS::BigInt::bitXor) \
+ _(BigIntDec, JS::BigInt::dec) \
+ _(BigIntDiv, JS::BigInt::div) \
+ _(BigIntInc, JS::BigInt::inc) \
+ _(BigIntLeftShift, JS::BigInt::lsh) \
+ _(BigIntMod, JS::BigInt::mod) \
+ _(BigIntMul, JS::BigInt::mul) \
+ _(BigIntNeg, JS::BigInt::neg) \
+ _(BigIntPow, JS::BigInt::pow) \
+ _(BigIntRightShift, JS::BigInt::rsh) \
+ _(BigIntStringEqual, \
+ js::jit::BigIntStringEqual<js::jit::EqualityKind::Equal>) \
+ _(BigIntStringGreaterThanOrEqual, \
+ js::jit::BigIntStringCompare<js::jit::ComparisonKind::GreaterThanOrEqual>) \
+ _(BigIntStringLessThan, \
+ js::jit::BigIntStringCompare<js::jit::ComparisonKind::LessThan>) \
+ _(BigIntStringNotEqual, \
+ js::jit::BigIntStringEqual<js::jit::EqualityKind::NotEqual>) \
+ _(BigIntSub, JS::BigInt::sub) \
+ _(BindVarOperation, js::BindVarOperation) \
+ _(BlockLexicalEnvironmentObjectCreateWithoutEnclosing, \
+ js::BlockLexicalEnvironmentObject::createWithoutEnclosing) \
+ _(BoxBoxableValue, js::wasm::BoxBoxableValue) \
+ _(BoxNonStrictThis, js::BoxNonStrictThis) \
+ _(BuiltinObjectOperation, js::BuiltinObjectOperation) \
+ _(CallDOMGetter, js::jit::CallDOMGetter) \
+ _(CallDOMSetter, js::jit::CallDOMSetter) \
+ _(CallNativeGetter, js::jit::CallNativeGetter) \
+ _(CallNativeSetter, js::jit::CallNativeSetter) \
+ _(CallObjectCreateWithShape, js::CallObject::createWithShape) \
+ _(CanSkipAwait, js::CanSkipAwait) \
+ _(CharCodeAt, js::jit::CharCodeAt) \
+ _(CheckClassHeritageOperation, js::CheckClassHeritageOperation) \
+ _(CheckOverRecursed, js::jit::CheckOverRecursed) \
+ _(CheckOverRecursedBaseline, js::jit::CheckOverRecursedBaseline) \
+ _(CheckPrivateFieldOperation, js::CheckPrivateFieldOperation) \
+ _(ClassBodyLexicalEnvironmentObjectCreateWithoutEnclosing, \
+ js::ClassBodyLexicalEnvironmentObject::createWithoutEnclosing) \
+ _(CloneRegExpObject, js::CloneRegExpObject) \
+ _(CloseIterOperation, js::CloseIterOperation) \
+ _(ConcatStrings, js::ConcatStrings<CanGC>) \
+ _(CreateAsyncFromSyncIterator, js::CreateAsyncFromSyncIterator) \
+ _(CreateBigIntFromInt64, js::jit::CreateBigIntFromInt64) \
+ _(CreateBigIntFromUint64, js::jit::CreateBigIntFromUint64) \
+ _(CreateGenerator, js::jit::CreateGenerator) \
+ _(CreateGeneratorFromFrame, js::jit::CreateGeneratorFromFrame) \
+ _(CreateThisFromIC, js::jit::CreateThisFromIC) \
+ _(CreateThisFromIon, js::jit::CreateThisFromIon) \
+ _(DebugAfterYield, js::jit::DebugAfterYield) \
+ _(DebugEpilogueOnBaselineReturn, js::jit::DebugEpilogueOnBaselineReturn) \
+ _(DebugLeaveLexicalEnv, js::jit::DebugLeaveLexicalEnv) \
+ _(DebugLeaveThenFreshenLexicalEnv, js::jit::DebugLeaveThenFreshenLexicalEnv) \
+ _(DebugLeaveThenPopLexicalEnv, js::jit::DebugLeaveThenPopLexicalEnv) \
+ _(DebugLeaveThenRecreateLexicalEnv, \
+ js::jit::DebugLeaveThenRecreateLexicalEnv) \
+ _(Debug_CheckSelfHosted, js::Debug_CheckSelfHosted) \
+ _(DelElemOperationNonStrict, js::DelElemOperation<false>) \
+ _(DelElemOperationStrict, js::DelElemOperation<true>) \
+ _(DelPropOperationNonStrict, js::DelPropOperation<false>) \
+ _(DelPropOperationStrict, js::DelPropOperation<true>) \
+ _(DeleteNameOperation, js::DeleteNameOperation) \
+ _(DoCallFallback, js::jit::DoCallFallback) \
+ _(DoConcatStringObject, js::jit::DoConcatStringObject) \
+ _(DoSpreadCallFallback, js::jit::DoSpreadCallFallback) \
+ _(DoStringToInt64, js::jit::DoStringToInt64) \
+ _(DoTrialInlining, js::jit::DoTrialInlining) \
+ _(EnterWith, js::jit::EnterWith) \
+ _(ExtractAwaitValue, js::ExtractAwaitValue) \
+ _(FinalSuspend, js::jit::FinalSuspend) \
+ _(FreshenLexicalEnv, js::jit::FreshenLexicalEnv) \
+ _(FunWithProtoOperation, js::FunWithProtoOperation) \
+ _(FunctionBindCreate, js::BoundFunctionObject::createWithTemplate) \
+ _(FunctionBindImpl, js::BoundFunctionObject::functionBindImpl) \
+ _(FunctionBindSpecializedBaseline, \
+ js::BoundFunctionObject::functionBindSpecializedBaseline) \
+ VMFUNCTION_FUZZILLI_LIST(_) \
+ _(GeneratorThrowOrReturn, js::jit::GeneratorThrowOrReturn) \
+ _(GetAndClearException, js::GetAndClearException) \
+ _(GetFirstDollarIndexRaw, js::GetFirstDollarIndexRaw) \
+ _(GetImportOperation, js::GetImportOperation) \
+ _(GetIntrinsicValue, js::jit::GetIntrinsicValue) \
+ _(GetIterator, js::GetIterator) \
+ _(GetIteratorWithIndices, js::GetIteratorWithIndices) \
+ _(GetNonSyntacticGlobalThis, js::GetNonSyntacticGlobalThis) \
+ _(GetOrCreateModuleMetaObject, js::GetOrCreateModuleMetaObject) \
+ _(GetPrototypeOf, js::jit::GetPrototypeOf) \
+ _(GetSparseElementHelper, js::GetSparseElementHelper) \
+ _(GlobalDeclInstantiationFromIon, js::jit::GlobalDeclInstantiationFromIon) \
+ _(GlobalOrEvalDeclInstantiation, js::GlobalOrEvalDeclInstantiation) \
+ _(HandleDebugTrap, js::jit::HandleDebugTrap) \
+ _(ImplicitThisOperation, js::ImplicitThisOperation) \
+ _(ImportMetaOperation, js::ImportMetaOperation) \
+ _(InitElemGetterSetterOperation, js::InitElemGetterSetterOperation) \
+ _(InitFunctionEnvironmentObjects, js::jit::InitFunctionEnvironmentObjects) \
+ _(InitPropGetterSetterOperation, js::InitPropGetterSetterOperation) \
+ _(InitRestParameter, js::jit::InitRestParameter) \
+ _(Int32ToString, js::Int32ToString<CanGC>) \
+ _(Int32ToStringWithBase, js::Int32ToStringWithBase) \
+ _(InterpretResume, js::jit::InterpretResume) \
+ _(InterruptCheck, js::jit::InterruptCheck) \
+ _(InvokeFunction, js::jit::InvokeFunction) \
+ _(IonBinaryArithICUpdate, js::jit::IonBinaryArithIC::update) \
+ _(IonBindNameICUpdate, js::jit::IonBindNameIC::update) \
+ _(IonCheckPrivateFieldICUpdate, js::jit::IonCheckPrivateFieldIC::update) \
+ _(IonCloseIterICUpdate, js::jit::IonCloseIterIC::update) \
+ _(IonCompareICUpdate, js::jit::IonCompareIC::update) \
+ _(IonCompileScriptForBaselineAtEntry, \
+ js::jit::IonCompileScriptForBaselineAtEntry) \
+ _(IonCompileScriptForBaselineOSR, js::jit::IonCompileScriptForBaselineOSR) \
+ _(IonGetIteratorICUpdate, js::jit::IonGetIteratorIC::update) \
+ _(IonGetNameICUpdate, js::jit::IonGetNameIC::update) \
+ _(IonGetPropSuperICUpdate, js::jit::IonGetPropSuperIC::update) \
+ _(IonGetPropertyICUpdate, js::jit::IonGetPropertyIC::update) \
+ _(IonHasOwnICUpdate, js::jit::IonHasOwnIC::update) \
+ _(IonInICUpdate, js::jit::IonInIC::update) \
+ _(IonInstanceOfICUpdate, js::jit::IonInstanceOfIC::update) \
+ _(IonOptimizeSpreadCallICUpdate, js::jit::IonOptimizeSpreadCallIC::update) \
+ _(IonSetPropertyICUpdate, js::jit::IonSetPropertyIC::update) \
+ _(IonToPropertyKeyICUpdate, js::jit::IonToPropertyKeyIC::update) \
+ _(IonUnaryArithICUpdate, js::jit::IonUnaryArithIC::update) \
+ _(IsArrayFromJit, js::IsArrayFromJit) \
+ _(IsPossiblyWrappedTypedArray, js::jit::IsPossiblyWrappedTypedArray) \
+ _(IsPrototypeOf, js::IsPrototypeOf) \
+ _(Lambda, js::Lambda) \
+ _(LeaveWith, js::jit::LeaveWith) \
+ _(LinearizeForCharAccess, js::jit::LinearizeForCharAccess) \
+ _(LoadAliasedDebugVar, js::LoadAliasedDebugVar) \
+ _(MapObjectGet, js::jit::MapObjectGet) \
+ _(MapObjectHas, js::jit::MapObjectHas) \
+ _(MutatePrototype, js::jit::MutatePrototype) \
+ _(NamedLambdaObjectCreateWithoutEnclosing, \
+ js::NamedLambdaObject::createWithoutEnclosing) \
+ _(NativeGetElement, js::NativeGetElement) \
+ _(NewArgumentsObject, js::jit::NewArgumentsObject) \
+ _(NewArrayIterator, js::NewArrayIterator) \
+ _(NewArrayObjectBaselineFallback, js::NewArrayObjectBaselineFallback) \
+ _(NewArrayObjectEnsureDenseInitLength, \
+ js::jit::NewArrayObjectEnsureDenseInitLength) \
+ _(NewArrayObjectOptimzedFallback, js::NewArrayObjectOptimizedFallback) \
+ _(NewArrayOperation, js::NewArrayOperation) \
+ _(NewArrayWithShape, js::NewArrayWithShape) \
+ _(NewObjectOperation, js::NewObjectOperation) \
+ _(NewPlainObjectBaselineFallback, js::NewPlainObjectBaselineFallback) \
+ _(NewPlainObjectOptimizedFallback, js::NewPlainObjectOptimizedFallback) \
+ _(NewPrivateName, js::NewPrivateName) \
+ _(NewRegExpStringIterator, js::NewRegExpStringIterator) \
+ _(NewStringIterator, js::NewStringIterator) \
+ _(NewStringObject, js::jit::NewStringObject) \
+ _(NewTypedArrayWithTemplateAndArray, js::NewTypedArrayWithTemplateAndArray) \
+ _(NewTypedArrayWithTemplateAndBuffer, \
+ js::NewTypedArrayWithTemplateAndBuffer) \
+ _(NewTypedArrayWithTemplateAndLength, \
+ js::NewTypedArrayWithTemplateAndLength) \
+ _(NormalSuspend, js::jit::NormalSuspend) \
+ _(NumberParseInt, js::NumberParseInt) \
+ _(NumberToString, js::NumberToString<CanGC>) \
+ _(ObjectCreateWithTemplate, js::ObjectCreateWithTemplate) \
+ _(ObjectWithProtoOperation, js::ObjectWithProtoOperation) \
+ _(OnDebuggerStatement, js::jit::OnDebuggerStatement) \
+ _(ProxyGetProperty, js::ProxyGetProperty) \
+ _(ProxyGetPropertyByValue, js::ProxyGetPropertyByValue) \
+ _(ProxyHas, js::ProxyHas) \
+ _(ProxyHasOwn, js::ProxyHasOwn) \
+ _(ProxySetProperty, js::ProxySetProperty) \
+ _(ProxySetPropertyByValue, js::ProxySetPropertyByValue) \
+ _(PushClassBodyEnv, js::jit::PushClassBodyEnv) \
+ _(PushLexicalEnv, js::jit::PushLexicalEnv) \
+ _(PushVarEnv, js::jit::PushVarEnv) \
+ _(RecreateLexicalEnv, js::jit::RecreateLexicalEnv) \
+ _(RegExpBuiltinExecMatchFromJit, js::RegExpBuiltinExecMatchFromJit) \
+ _(RegExpBuiltinExecTestFromJit, js::RegExpBuiltinExecTestFromJit) \
+ _(RegExpMatcherRaw, js::RegExpMatcherRaw) \
+ _(RegExpSearcherRaw, js::RegExpSearcherRaw) \
+ _(SameValue, js::SameValue) \
+ _(SetArrayLength, js::jit::SetArrayLength) \
+ _(SetElementMegamorphicNoCache, js::jit::SetElementMegamorphic<false>) \
+ _(SetElementMegamorphicYesCache, js::jit::SetElementMegamorphic<true>) \
+ _(SetElementSuper, js::SetElementSuper) \
+ _(SetFunctionName, js::SetFunctionName) \
+ _(SetIntrinsicOperation, js::SetIntrinsicOperation) \
+ _(SetObjectHas, js::jit::SetObjectHas) \
+ _(SetPropertyMegamorphicNoCache, js::jit::SetPropertyMegamorphic<false>) \
+ _(SetPropertyMegamorphicYesCache, js::jit::SetPropertyMegamorphic<true>) \
+ _(SetPropertySuper, js::SetPropertySuper) \
+ _(StartDynamicModuleImport, js::StartDynamicModuleImport) \
+ _(StringBigIntGreaterThanOrEqual, \
+ js::jit::StringBigIntCompare<js::jit::ComparisonKind::GreaterThanOrEqual>) \
+ _(StringBigIntLessThan, \
+ js::jit::StringBigIntCompare<js::jit::ComparisonKind::LessThan>) \
+ _(StringEndsWith, js::StringEndsWith) \
+ _(StringFlatReplaceString, js::StringFlatReplaceString) \
+ _(StringFromCharCode, js::jit::StringFromCharCode) \
+ _(StringFromCodePoint, js::jit::StringFromCodePoint) \
+ _(StringIndexOf, js::StringIndexOf) \
+ _(StringReplace, js::jit::StringReplace) \
+ _(StringSplitString, js::StringSplitString) \
+ _(StringStartsWith, js::StringStartsWith) \
+ _(StringToLowerCase, js::StringToLowerCase) \
+ _(StringToNumber, js::StringToNumber) \
+ _(StringToUpperCase, js::StringToUpperCase) \
+ _(StringsCompareGreaterThanOrEquals, \
+ js::jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>) \
+ _(StringsCompareLessThan, js::jit::StringsCompare<ComparisonKind::LessThan>) \
+ _(StringsEqual, js::jit::StringsEqual<js::jit::EqualityKind::Equal>) \
+ _(StringsNotEqual, js::jit::StringsEqual<js::jit::EqualityKind::NotEqual>) \
+ _(SubstringKernel, js::SubstringKernel) \
+ _(ThrowBadDerivedReturnOrUninitializedThis, \
+ js::jit::ThrowBadDerivedReturnOrUninitializedThis) \
+ _(ThrowCheckIsObject, js::ThrowCheckIsObject) \
+ _(ThrowInitializedThis, js::ThrowInitializedThis) \
+ _(ThrowMsgOperation, js::ThrowMsgOperation) \
+ _(ThrowObjectCoercible, js::ThrowObjectCoercible) \
+ _(ThrowOperation, js::ThrowOperation) \
+ _(ThrowRuntimeLexicalError, js::jit::ThrowRuntimeLexicalError) \
+ _(ThrowUninitializedThis, js::ThrowUninitializedThis) \
+ _(ToBigInt, js::ToBigInt) \
+ _(ToStringSlow, js::ToStringSlow<CanGC>) \
+ _(ValueToIterator, js::ValueToIterator) \
+ _(VarEnvironmentObjectCreateWithoutEnclosing, \
+ js::VarEnvironmentObject::createWithoutEnclosing)
+
+// The list below is for tail calls. The third argument specifies the number of
+// non-argument Values the VM wrapper should pop from the stack. This is used
+// for Baseline ICs.
+//
+// This list is required to be alphabetized.
+#define TAIL_CALL_VMFUNCTION_LIST(_) \
+ _(DoBinaryArithFallback, js::jit::DoBinaryArithFallback, 2) \
+ _(DoBindNameFallback, js::jit::DoBindNameFallback, 0) \
+ _(DoCheckPrivateFieldFallback, js::jit::DoCheckPrivateFieldFallback, 2) \
+ _(DoCloseIterFallback, js::jit::DoCloseIterFallback, 0) \
+ _(DoCompareFallback, js::jit::DoCompareFallback, 2) \
+ _(DoGetElemFallback, js::jit::DoGetElemFallback, 2) \
+ _(DoGetElemSuperFallback, js::jit::DoGetElemSuperFallback, 3) \
+ _(DoGetIntrinsicFallback, js::jit::DoGetIntrinsicFallback, 0) \
+ _(DoGetIteratorFallback, js::jit::DoGetIteratorFallback, 1) \
+ _(DoGetNameFallback, js::jit::DoGetNameFallback, 0) \
+ _(DoGetPropFallback, js::jit::DoGetPropFallback, 1) \
+ _(DoGetPropSuperFallback, js::jit::DoGetPropSuperFallback, 0) \
+ _(DoHasOwnFallback, js::jit::DoHasOwnFallback, 2) \
+ _(DoInFallback, js::jit::DoInFallback, 2) \
+ _(DoInstanceOfFallback, js::jit::DoInstanceOfFallback, 2) \
+ _(DoNewArrayFallback, js::jit::DoNewArrayFallback, 0) \
+ _(DoNewObjectFallback, js::jit::DoNewObjectFallback, 0) \
+ _(DoOptimizeSpreadCallFallback, js::jit::DoOptimizeSpreadCallFallback, 0) \
+ _(DoRestFallback, js::jit::DoRestFallback, 0) \
+ _(DoSetElemFallback, js::jit::DoSetElemFallback, 2) \
+ _(DoSetPropFallback, js::jit::DoSetPropFallback, 1) \
+ _(DoToBoolFallback, js::jit::DoToBoolFallback, 0) \
+ _(DoToPropertyKeyFallback, js::jit::DoToPropertyKeyFallback, 0) \
+ _(DoTypeOfFallback, js::jit::DoTypeOfFallback, 0) \
+ _(DoUnaryArithFallback, js::jit::DoUnaryArithFallback, 1)
+
+#define DEF_ID(name, ...) name,
+enum class VMFunctionId { VMFUNCTION_LIST(DEF_ID) Count };
+enum class TailCallVMFunctionId { TAIL_CALL_VMFUNCTION_LIST(DEF_ID) Count };
+#undef DEF_ID
+
+// Define the VMFunctionToId template to map from signature + function to
+// the VMFunctionId. This lets us verify the consumer/codegen code matches
+// the C++ signature.
+template <typename Function, Function fun>
+struct VMFunctionToId; // Error here? Update VMFUNCTION_LIST?
+
+template <typename Function, Function fun>
+struct TailCallVMFunctionToId; // Error here? Update TAIL_CALL_VMFUNCTION_LIST?
+
+// GCC warns when the signature does not have matching attributes (for example
+// [[nodiscard]]). Squelch this warning to avoid a GCC-only footgun.
+#if MOZ_IS_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wignored-attributes"
+#endif
+
+// Note: the use of ::fp instead of fp is intentional to enforce use of
+// fully-qualified names in the list above.
+#define DEF_TEMPLATE(name, fp) \
+ template <> \
+ struct VMFunctionToId<decltype(&(::fp)), ::fp> { \
+ static constexpr VMFunctionId id = VMFunctionId::name; \
+ };
+VMFUNCTION_LIST(DEF_TEMPLATE)
+#undef DEF_TEMPLATE
+
+#define DEF_TEMPLATE(name, fp, valuesToPop) \
+ template <> \
+ struct TailCallVMFunctionToId<decltype(&(::fp)), ::fp> { \
+ static constexpr TailCallVMFunctionId id = TailCallVMFunctionId::name; \
+ };
+TAIL_CALL_VMFUNCTION_LIST(DEF_TEMPLATE)
+#undef DEF_TEMPLATE
+
+#if MOZ_IS_GCC
+# pragma GCC diagnostic pop
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_VMFunctionList_inl_h
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
new file mode 100644
index 0000000000..23ef986641
--- /dev/null
+++ b/js/src/jit/VMFunctions.cpp
@@ -0,0 +1,2940 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/VMFunctions.h"
+
+#include "mozilla/FloatingPoint.h"
+
+#include "builtin/MapObject.h"
+#include "builtin/String.h"
+#include "ds/OrderedHashTable.h"
+#include "gc/Cell.h"
+#include "gc/GC.h"
+#include "jit/arm/Simulator-arm.h"
+#include "jit/AtomicOperations.h"
+#include "jit/BaselineIC.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/Simulator.h"
+#include "js/experimental/JitInfo.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/friend/WindowProxy.h" // js::IsWindow
+#include "js/Printf.h"
+#include "js/TraceKind.h"
+#include "vm/ArrayObject.h"
+#include "vm/Compartment.h"
+#include "vm/Interpreter.h"
+#include "vm/JSAtom.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/SelfHosting.h"
+#include "vm/StaticStrings.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Watchtower.h"
+#include "wasm/WasmGcObject.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "jit/BaselineFrame-inl.h"
+#include "jit/VMFunctionList-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/PlainObject-inl.h" // js::CreateThis
+#include "vm/StringObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+
+class ArgumentsObject;
+class NamedLambdaObject;
+class AsyncFunctionGeneratorObject;
+class RegExpObject;
+
+namespace jit {
+
+struct IonOsrTempData;
+
+struct PopValues {
+ uint8_t numValues;
+
+ explicit constexpr PopValues(uint8_t numValues) : numValues(numValues) {}
+};
+
+template <class>
+struct ReturnTypeToDataType { /* Unexpected return type for a VMFunction. */
+};
+template <>
+struct ReturnTypeToDataType<void> {
+ static const DataType result = Type_Void;
+};
+template <>
+struct ReturnTypeToDataType<bool> {
+ static const DataType result = Type_Bool;
+};
+template <class T>
+struct ReturnTypeToDataType<T*> {
+ // Assume by default that any pointer return types are cells.
+ static_assert(std::is_base_of_v<gc::Cell, T>);
+
+ static const DataType result = Type_Cell;
+};
+
+// Convert argument types to properties of the argument known by the jit.
+template <class T>
+struct TypeToArgProperties {
+ static const uint32_t result =
+ (sizeof(T) <= sizeof(void*) ? VMFunctionData::Word
+ : VMFunctionData::Double);
+};
+template <>
+struct TypeToArgProperties<const Value&> {
+ static const uint32_t result =
+ TypeToArgProperties<Value>::result | VMFunctionData::ByRef;
+};
+template <>
+struct TypeToArgProperties<HandleValue> {
+ static const uint32_t result =
+ TypeToArgProperties<Value>::result | VMFunctionData::ByRef;
+};
+template <>
+struct TypeToArgProperties<MutableHandleValue> {
+ static const uint32_t result =
+ TypeToArgProperties<Value>::result | VMFunctionData::ByRef;
+};
+template <>
+struct TypeToArgProperties<HandleId> {
+ static const uint32_t result =
+ TypeToArgProperties<jsid>::result | VMFunctionData::ByRef;
+};
+template <class T>
+struct TypeToArgProperties<Handle<T*>> {
+ // Assume by default that any pointer handle types are cells.
+ static_assert(std::is_base_of_v<gc::Cell, T>);
+
+ static const uint32_t result =
+ TypeToArgProperties<T*>::result | VMFunctionData::ByRef;
+};
+template <class T>
+struct TypeToArgProperties<Handle<T>> {
+ // Fail for Handle types that aren't specialized above.
+};
+
+// Convert argument type to whether or not it should be passed in a float
+// register on platforms that have them, like x64.
+template <class T>
+struct TypeToPassInFloatReg {
+ static const uint32_t result = 0;
+};
+template <>
+struct TypeToPassInFloatReg<double> {
+ static const uint32_t result = 1;
+};
+
+// Convert argument types to root types used by the gc, see TraceJitExitFrame.
+template <class T>
+struct TypeToRootType {
+ static const uint32_t result = VMFunctionData::RootNone;
+};
+template <>
+struct TypeToRootType<HandleValue> {
+ static const uint32_t result = VMFunctionData::RootValue;
+};
+template <>
+struct TypeToRootType<MutableHandleValue> {
+ static const uint32_t result = VMFunctionData::RootValue;
+};
+template <>
+struct TypeToRootType<HandleId> {
+ static const uint32_t result = VMFunctionData::RootId;
+};
+template <class T>
+struct TypeToRootType<Handle<T*>> {
+ // Assume by default that any pointer types are cells.
+ static_assert(std::is_base_of_v<gc::Cell, T>);
+
+ static constexpr uint32_t rootType() {
+ using JS::TraceKind;
+
+ switch (JS::MapTypeToTraceKind<T>::kind) {
+ case TraceKind::Object:
+ return VMFunctionData::RootObject;
+ case TraceKind::BigInt:
+ return VMFunctionData::RootBigInt;
+ case TraceKind::String:
+ return VMFunctionData::RootString;
+ case TraceKind::Shape:
+ case TraceKind::Script:
+ case TraceKind::Scope:
+ return VMFunctionData::RootCell;
+ case TraceKind::Symbol:
+ case TraceKind::BaseShape:
+ case TraceKind::Null:
+ case TraceKind::JitCode:
+ case TraceKind::RegExpShared:
+ case TraceKind::GetterSetter:
+ case TraceKind::PropMap:
+ MOZ_CRASH("Unexpected trace kind");
+ }
+ }
+
+ static constexpr uint32_t result = rootType();
+};
+template <class T>
+struct TypeToRootType<Handle<T>> {
+ // Fail for Handle types that aren't specialized above.
+};
+
+template <class>
+struct OutParamToDataType {
+ static const DataType result = Type_Void;
+};
+template <class T>
+struct OutParamToDataType<const T*> {
+ // Const pointers can't be output parameters.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<uint64_t*> {
+ // Already used as an input type, so it can't be used as an output param.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<JSObject*> {
+ // Already used as an input type, so it can't be used as an output param.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<JSString*> {
+ // Already used as an input type, so it can't be used as an output param.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<BaselineFrame*> {
+ // Already used as an input type, so it can't be used as an output param.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<gc::AllocSite*> {
+ // Already used as an input type, so it can't be used as an output param.
+ static const DataType result = Type_Void;
+};
+template <>
+struct OutParamToDataType<Value*> {
+ static const DataType result = Type_Value;
+};
+template <>
+struct OutParamToDataType<int*> {
+ static const DataType result = Type_Int32;
+};
+template <>
+struct OutParamToDataType<uint32_t*> {
+ static const DataType result = Type_Int32;
+};
+template <>
+struct OutParamToDataType<bool*> {
+ static const DataType result = Type_Bool;
+};
+template <>
+struct OutParamToDataType<double*> {
+ static const DataType result = Type_Double;
+};
+template <class T>
+struct OutParamToDataType<T*> {
+ // Fail for pointer types that aren't specialized above.
+};
+template <class T>
+struct OutParamToDataType<T**> {
+ static const DataType result = Type_Pointer;
+};
+template <class T>
+struct OutParamToDataType<MutableHandle<T>> {
+ static const DataType result = Type_Handle;
+};
+
+template <class>
+struct OutParamToRootType {
+ static const VMFunctionData::RootType result = VMFunctionData::RootNone;
+};
+template <>
+struct OutParamToRootType<MutableHandleValue> {
+ static const VMFunctionData::RootType result = VMFunctionData::RootValue;
+};
+template <>
+struct OutParamToRootType<MutableHandleObject> {
+ static const VMFunctionData::RootType result = VMFunctionData::RootObject;
+};
+template <>
+struct OutParamToRootType<MutableHandleString> {
+ static const VMFunctionData::RootType result = VMFunctionData::RootString;
+};
+template <>
+struct OutParamToRootType<MutableHandleBigInt> {
+ static const VMFunctionData::RootType result = VMFunctionData::RootBigInt;
+};
+
+// Construct a bit mask from a list of types. The mask is constructed as an OR
+// of the mask produced for each argument. The result of each argument is
+// shifted by its index, such that the result of the first argument is on the
+// low bits of the mask, and the result of the last argument in part of the
+// high bits of the mask.
+template <template <typename> class Each, typename ResultType, size_t Shift,
+ typename... Args>
+struct BitMask;
+
+template <template <typename> class Each, typename ResultType, size_t Shift>
+struct BitMask<Each, ResultType, Shift> {
+ static constexpr ResultType result = ResultType();
+};
+
+template <template <typename> class Each, typename ResultType, size_t Shift,
+ typename HeadType, typename... TailTypes>
+struct BitMask<Each, ResultType, Shift, HeadType, TailTypes...> {
+ static_assert(ResultType(Each<HeadType>::result) < (1 << Shift),
+ "not enough bits reserved by the shift for individual results");
+ static_assert(sizeof...(TailTypes) < (8 * sizeof(ResultType) / Shift),
+ "not enough bits in the result type to store all bit masks");
+
+ static constexpr ResultType result =
+ ResultType(Each<HeadType>::result) |
+ (BitMask<Each, ResultType, Shift, TailTypes...>::result << Shift);
+};
+
+// Helper template to build the VMFunctionData for a function.
+template <typename... Args>
+struct VMFunctionDataHelper;
+
+template <class R, typename... Args>
+struct VMFunctionDataHelper<R (*)(JSContext*, Args...)>
+ : public VMFunctionData {
+ using Fun = R (*)(JSContext*, Args...);
+
+ static constexpr DataType returnType() {
+ return ReturnTypeToDataType<R>::result;
+ }
+ static constexpr DataType outParam() {
+ return OutParamToDataType<typename LastArg<Args...>::Type>::result;
+ }
+ static constexpr RootType outParamRootType() {
+ return OutParamToRootType<typename LastArg<Args...>::Type>::result;
+ }
+ static constexpr size_t NbArgs() { return sizeof...(Args); }
+ static constexpr size_t explicitArgs() {
+ return NbArgs() - (outParam() != Type_Void ? 1 : 0);
+ }
+ static constexpr uint32_t argumentProperties() {
+ return BitMask<TypeToArgProperties, uint32_t, 2, Args...>::result;
+ }
+ static constexpr uint32_t argumentPassedInFloatRegs() {
+ return BitMask<TypeToPassInFloatReg, uint32_t, 2, Args...>::result;
+ }
+ static constexpr uint64_t argumentRootTypes() {
+ return BitMask<TypeToRootType, uint64_t, 3, Args...>::result;
+ }
+ constexpr explicit VMFunctionDataHelper(const char* name)
+ : VMFunctionData(name, explicitArgs(), argumentProperties(),
+ argumentPassedInFloatRegs(), argumentRootTypes(),
+ outParam(), outParamRootType(), returnType(),
+ /* extraValuesToPop = */ 0, NonTailCall) {}
+ constexpr explicit VMFunctionDataHelper(const char* name,
+ MaybeTailCall expectTailCall,
+ PopValues extraValuesToPop)
+ : VMFunctionData(name, explicitArgs(), argumentProperties(),
+ argumentPassedInFloatRegs(), argumentRootTypes(),
+ outParam(), outParamRootType(), returnType(),
+ extraValuesToPop.numValues, expectTailCall) {}
+};
+
+// GCC warns when the signature does not have matching attributes (for example
+// [[nodiscard]]). Squelch this warning to avoid a GCC-only footgun.
+#if MOZ_IS_GCC
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wignored-attributes"
+#endif
+
+// Generate VMFunctionData array.
+static constexpr VMFunctionData vmFunctions[] = {
+#define DEF_VMFUNCTION(name, fp) VMFunctionDataHelper<decltype(&(::fp))>(#name),
+ VMFUNCTION_LIST(DEF_VMFUNCTION)
+#undef DEF_VMFUNCTION
+};
+static constexpr VMFunctionData tailCallVMFunctions[] = {
+#define DEF_VMFUNCTION(name, fp, valuesToPop) \
+ VMFunctionDataHelper<decltype(&(::fp))>(#name, TailCall, \
+ PopValues(valuesToPop)),
+ TAIL_CALL_VMFUNCTION_LIST(DEF_VMFUNCTION)
+#undef DEF_VMFUNCTION
+};
+
+#if MOZ_IS_GCC
+# pragma GCC diagnostic pop
+#endif
+
+// Generate arrays storing C++ function pointers. These pointers are not stored
+// in VMFunctionData because there's no good way to cast them to void* in
+// constexpr code. Compilers are smart enough to treat the const array below as
+// constexpr.
+#define DEF_VMFUNCTION(name, fp, ...) (void*)(::fp),
+static void* const vmFunctionTargets[] = {VMFUNCTION_LIST(DEF_VMFUNCTION)};
+static void* const tailCallVMFunctionTargets[] = {
+ TAIL_CALL_VMFUNCTION_LIST(DEF_VMFUNCTION)};
+#undef DEF_VMFUNCTION
+
+const VMFunctionData& GetVMFunction(VMFunctionId id) {
+ return vmFunctions[size_t(id)];
+}
+const VMFunctionData& GetVMFunction(TailCallVMFunctionId id) {
+ return tailCallVMFunctions[size_t(id)];
+}
+
+static DynFn GetVMFunctionTarget(VMFunctionId id) {
+ return DynFn{vmFunctionTargets[size_t(id)]};
+}
+
+static DynFn GetVMFunctionTarget(TailCallVMFunctionId id) {
+ return DynFn{tailCallVMFunctionTargets[size_t(id)]};
+}
+
+template <typename IdT>
+bool JitRuntime::generateVMWrappers(JSContext* cx, MacroAssembler& masm,
+ VMWrapperOffsets& offsets) {
+ // Generate all VM function wrappers.
+
+ static constexpr size_t NumVMFunctions = size_t(IdT::Count);
+
+ if (!offsets.reserve(NumVMFunctions)) {
+ return false;
+ }
+
+#ifdef DEBUG
+ const char* lastName = nullptr;
+#endif
+
+ for (size_t i = 0; i < NumVMFunctions; i++) {
+ IdT id = IdT(i);
+ const VMFunctionData& fun = GetVMFunction(id);
+
+#ifdef DEBUG
+ // Assert the list is sorted by name.
+ if (lastName) {
+ MOZ_ASSERT(strcmp(lastName, fun.name()) < 0,
+ "VM function list must be sorted by name");
+ }
+ lastName = fun.name();
+#endif
+
+ JitSpew(JitSpew_Codegen, "# VM function wrapper (%s)", fun.name());
+
+ uint32_t offset;
+ if (!generateVMWrapper(cx, masm, fun, GetVMFunctionTarget(id), &offset)) {
+ return false;
+ }
+
+ MOZ_ASSERT(offsets.length() == size_t(id));
+ offsets.infallibleAppend(offset);
+ }
+
+ return true;
+};
+
+bool JitRuntime::generateVMWrappers(JSContext* cx, MacroAssembler& masm) {
+ if (!generateVMWrappers<VMFunctionId>(cx, masm, functionWrapperOffsets_)) {
+ return false;
+ }
+
+ if (!generateVMWrappers<TailCallVMFunctionId>(
+ cx, masm, tailCallFunctionWrapperOffsets_)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool InvokeFunction(JSContext* cx, HandleObject obj, bool constructing,
+ bool ignoresReturnValue, uint32_t argc, Value* argv,
+ MutableHandleValue rval) {
+ RootedExternalValueArray argvRoot(cx, argc + 1 + constructing, argv);
+
+ // Data in the argument vector is arranged for a JIT -> JIT call.
+ RootedValue thisv(cx, argv[0]);
+ Value* argvWithoutThis = argv + 1;
+
+ RootedValue fval(cx, ObjectValue(*obj));
+ if (constructing) {
+ if (!IsConstructor(fval)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval,
+ nullptr);
+ return false;
+ }
+
+ ConstructArgs cargs(cx);
+ if (!cargs.init(cx, argc)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < argc; i++) {
+ cargs[i].set(argvWithoutThis[i]);
+ }
+
+ RootedValue newTarget(cx, argvWithoutThis[argc]);
+
+ // See CreateThisFromIon for why this can be NullValue.
+ if (thisv.isNull()) {
+ thisv.setMagic(JS_IS_CONSTRUCTING);
+ }
+
+ // If |this| hasn't been created, or is JS_UNINITIALIZED_LEXICAL,
+ // we can use normal construction code without creating an extraneous
+ // object.
+ if (thisv.isMagic()) {
+ MOZ_ASSERT(thisv.whyMagic() == JS_IS_CONSTRUCTING ||
+ thisv.whyMagic() == JS_UNINITIALIZED_LEXICAL);
+
+ RootedObject obj(cx);
+ if (!Construct(cx, fval, cargs, newTarget, &obj)) {
+ return false;
+ }
+
+ rval.setObject(*obj);
+ return true;
+ }
+
+ // Otherwise the default |this| has already been created. We could
+ // almost perform a *call* at this point, but we'd break |new.target|
+ // in the function. So in this one weird case we call a one-off
+ // construction path that *won't* set |this| to JS_IS_CONSTRUCTING.
+ return InternalConstructWithProvidedThis(cx, fval, thisv, cargs, newTarget,
+ rval);
+ }
+
+ InvokeArgsMaybeIgnoresReturnValue args(cx);
+ if (!args.init(cx, argc, ignoresReturnValue)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < argc; i++) {
+ args[i].set(argvWithoutThis[i]);
+ }
+
+ return Call(cx, fval, thisv, args, rval);
+}
+
+void* GetContextSensitiveInterpreterStub() {
+ return TlsContext.get()->runtime()->jitRuntime()->interpreterStub().value;
+}
+
+bool InvokeFromInterpreterStub(JSContext* cx,
+ InterpreterStubExitFrameLayout* frame) {
+ JitFrameLayout* jsFrame = frame->jsFrame();
+ CalleeToken token = jsFrame->calleeToken();
+
+ Value* argv = jsFrame->thisAndActualArgs();
+ uint32_t numActualArgs = jsFrame->numActualArgs();
+ bool constructing = CalleeTokenIsConstructing(token);
+ RootedFunction fun(cx, CalleeTokenToFunction(token));
+
+ // Ensure new.target immediately follows the actual arguments (the arguments
+ // rectifier added padding).
+ if (constructing && numActualArgs < fun->nargs()) {
+ argv[1 + numActualArgs] = argv[1 + fun->nargs()];
+ }
+
+ RootedValue rval(cx);
+ if (!InvokeFunction(cx, fun, constructing,
+ /* ignoresReturnValue = */ false, numActualArgs, argv,
+ &rval)) {
+ return false;
+ }
+
+ // Overwrite |this| with the return value.
+ argv[0] = rval;
+ return true;
+}
+
+static bool CheckOverRecursedImpl(JSContext* cx, size_t extra) {
+ // We just failed the jitStackLimit check. There are two possible reasons:
+ // 1) jitStackLimit was the real stack limit and we're over-recursed
+ // 2) jitStackLimit was set to JS::NativeStackLimitMin by
+ // JSContext::requestInterrupt and we need to call
+ // JSContext::handleInterrupt.
+
+ // This handles 1).
+#ifdef JS_SIMULATOR
+ if (cx->simulator()->overRecursedWithExtra(extra)) {
+ ReportOverRecursed(cx);
+ return false;
+ }
+#else
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkWithExtra(cx, extra)) {
+ return false;
+ }
+#endif
+
+ // This handles 2).
+ gc::MaybeVerifyBarriers(cx);
+ return cx->handleInterrupt();
+}
+
+bool CheckOverRecursed(JSContext* cx) { return CheckOverRecursedImpl(cx, 0); }
+
+bool CheckOverRecursedBaseline(JSContext* cx, BaselineFrame* frame) {
+ // The stack check in Baseline happens before pushing locals so we have to
+ // account for that by including script->nslots() in the C++ recursion check.
+ size_t extra = frame->script()->nslots() * sizeof(Value);
+ return CheckOverRecursedImpl(cx, extra);
+}
+
+bool MutatePrototype(JSContext* cx, Handle<PlainObject*> obj,
+ HandleValue value) {
+ if (!value.isObjectOrNull()) {
+ return true;
+ }
+
+ RootedObject newProto(cx, value.toObjectOrNull());
+ return SetPrototype(cx, obj, newProto);
+}
+
+template <EqualityKind Kind>
+bool StringsEqual(JSContext* cx, HandleString lhs, HandleString rhs,
+ bool* res) {
+ if (!js::EqualStrings(cx, lhs, rhs, res)) {
+ return false;
+ }
+ if (Kind != EqualityKind::Equal) {
+ *res = !*res;
+ }
+ return true;
+}
+
+template bool StringsEqual<EqualityKind::Equal>(JSContext* cx, HandleString lhs,
+ HandleString rhs, bool* res);
+template bool StringsEqual<EqualityKind::NotEqual>(JSContext* cx,
+ HandleString lhs,
+ HandleString rhs, bool* res);
+
+template <ComparisonKind Kind>
+bool StringsCompare(JSContext* cx, HandleString lhs, HandleString rhs,
+ bool* res) {
+ int32_t result;
+ if (!js::CompareStrings(cx, lhs, rhs, &result)) {
+ return false;
+ }
+ if (Kind == ComparisonKind::LessThan) {
+ *res = result < 0;
+ } else {
+ *res = result >= 0;
+ }
+ return true;
+}
+
+template bool StringsCompare<ComparisonKind::LessThan>(JSContext* cx,
+ HandleString lhs,
+ HandleString rhs,
+ bool* res);
+template bool StringsCompare<ComparisonKind::GreaterThanOrEqual>(
+ JSContext* cx, HandleString lhs, HandleString rhs, bool* res);
+
+bool ArrayPushDensePure(JSContext* cx, ArrayObject* arr, Value* v) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // Shape guards guarantee that the input is an extensible ArrayObject, which
+ // has a writable "length" property and has no other indexed properties.
+ MOZ_ASSERT(arr->isExtensible());
+ MOZ_ASSERT(arr->lengthIsWritable());
+ MOZ_ASSERT(!arr->isIndexed());
+
+ // Length must fit in an int32 because we guard against overflow before
+ // calling this VM function.
+ uint32_t index = arr->length();
+ MOZ_ASSERT(index < uint32_t(INT32_MAX));
+
+ DenseElementResult result = arr->setOrExtendDenseElements(cx, index, v, 1);
+ if (result == DenseElementResult::Failure) {
+ cx->recoverFromOutOfMemory();
+ }
+ return result == DenseElementResult::Success;
+}
+
+JSString* ArrayJoin(JSContext* cx, HandleObject array, HandleString sep) {
+ JS::RootedValueArray<3> argv(cx);
+ argv[0].setUndefined();
+ argv[1].setObject(*array);
+ argv[2].setString(sep);
+ if (!js::array_join(cx, 1, argv.begin())) {
+ return nullptr;
+ }
+ return argv[0].toString();
+}
+
+bool SetArrayLength(JSContext* cx, HandleObject obj, HandleValue value,
+ bool strict) {
+ Handle<ArrayObject*> array = obj.as<ArrayObject>();
+
+ RootedId id(cx, NameToId(cx->names().length));
+ ObjectOpResult result;
+
+ // SetArrayLength is called by IC stubs for SetProp and SetElem on arrays'
+ // "length" property.
+ //
+ // ArraySetLength below coerces |value| before checking for length being
+ // writable, and in the case of illegal values, will throw RangeError even
+ // when "length" is not writable. This is incorrect observable behavior,
+ // as a regular [[Set]] operation will check for "length" being
+ // writable before attempting any assignment.
+ //
+ // So, perform ArraySetLength if and only if "length" is writable.
+ if (array->lengthIsWritable()) {
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Data(value, JS::PropertyAttribute::Writable));
+ if (!ArraySetLength(cx, array, id, desc, result)) {
+ return false;
+ }
+ } else {
+ MOZ_ALWAYS_TRUE(result.fail(JSMSG_READ_ONLY));
+ }
+
+ return result.checkStrictModeError(cx, obj, id, strict);
+}
+
+bool CharCodeAt(JSContext* cx, HandleString str, int32_t index,
+ uint32_t* code) {
+ char16_t c;
+ if (!str->getChar(cx, index, &c)) {
+ return false;
+ }
+ *code = c;
+ return true;
+}
+
+JSLinearString* StringFromCharCode(JSContext* cx, int32_t code) {
+ char16_t c = char16_t(code);
+
+ if (StaticStrings::hasUnit(c)) {
+ return cx->staticStrings().getUnit(c);
+ }
+
+ return NewStringCopyNDontDeflate<CanGC>(cx, &c, 1);
+}
+
+JSLinearString* StringFromCharCodeNoGC(JSContext* cx, int32_t code) {
+ AutoUnsafeCallWithABI unsafe;
+
+ char16_t c = char16_t(code);
+
+ if (StaticStrings::hasUnit(c)) {
+ return cx->staticStrings().getUnit(c);
+ }
+
+ return NewStringCopyNDontDeflate<NoGC>(cx, &c, 1);
+}
+
+JSString* StringFromCodePoint(JSContext* cx, int32_t codePoint) {
+ RootedValue rval(cx, Int32Value(codePoint));
+ if (!str_fromCodePoint_one_arg(cx, rval, &rval)) {
+ return nullptr;
+ }
+
+ return rval.toString();
+}
+
+JSLinearString* LinearizeForCharAccessPure(JSString* str) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // Should only be called on ropes.
+ MOZ_ASSERT(str->isRope());
+
+ // ensureLinear is intentionally called with a nullptr to avoid OOM reporting.
+ return str->ensureLinear(nullptr);
+}
+
+JSLinearString* LinearizeForCharAccess(JSContext* cx, JSString* str) {
+ // Should only be called on ropes.
+ MOZ_ASSERT(str->isRope());
+
+ return str->ensureLinear(cx);
+}
+
+bool SetProperty(JSContext* cx, HandleObject obj, Handle<PropertyName*> name,
+ HandleValue value, bool strict, jsbytecode* pc) {
+ RootedId id(cx, NameToId(name));
+
+ RootedValue receiver(cx, ObjectValue(*obj));
+ ObjectOpResult result;
+ if (MOZ_LIKELY(!obj->getOpsSetProperty())) {
+ JSOp op = JSOp(*pc);
+ if (op == JSOp::SetName || op == JSOp::StrictSetName ||
+ op == JSOp::SetGName || op == JSOp::StrictSetGName) {
+ if (!NativeSetProperty<Unqualified>(cx, obj.as<NativeObject>(), id, value,
+ receiver, result)) {
+ return false;
+ }
+ } else {
+ if (!NativeSetProperty<Qualified>(cx, obj.as<NativeObject>(), id, value,
+ receiver, result)) {
+ return false;
+ }
+ }
+ } else {
+ if (!SetProperty(cx, obj, id, value, receiver, result)) {
+ return false;
+ }
+ }
+ return result.checkStrictModeError(cx, obj, id, strict);
+}
+
+bool InterruptCheck(JSContext* cx) {
+ gc::MaybeVerifyBarriers(cx);
+
+ return CheckForInterrupt(cx);
+}
+
+JSObject* NewStringObject(JSContext* cx, HandleString str) {
+ return StringObject::create(cx, str);
+}
+
+bool OperatorIn(JSContext* cx, HandleValue key, HandleObject obj, bool* out) {
+ RootedId id(cx);
+ return ToPropertyKey(cx, key, &id) && HasProperty(cx, obj, id, out);
+}
+
+bool GetIntrinsicValue(JSContext* cx, Handle<PropertyName*> name,
+ MutableHandleValue rval) {
+ return GlobalObject::getIntrinsicValue(cx, cx->global(), name, rval);
+}
+
+bool CreateThisFromIC(JSContext* cx, HandleObject callee,
+ HandleObject newTarget, MutableHandleValue rval) {
+ HandleFunction fun = callee.as<JSFunction>();
+ MOZ_ASSERT(fun->isInterpreted());
+ MOZ_ASSERT(fun->isConstructor());
+ MOZ_ASSERT(cx->realm() == fun->realm(),
+ "Realm switching happens before creating this");
+
+ // CreateThis expects rval to be this magic value.
+ rval.set(MagicValue(JS_IS_CONSTRUCTING));
+
+ if (!js::CreateThis(cx, fun, newTarget, GenericObject, rval)) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(rval.isObject(), fun->realm() == rval.toObject().nonCCWRealm());
+ return true;
+}
+
+bool CreateThisFromIon(JSContext* cx, HandleObject callee,
+ HandleObject newTarget, MutableHandleValue rval) {
+ // Return JS_IS_CONSTRUCTING for cases not supported by the inline call path.
+ rval.set(MagicValue(JS_IS_CONSTRUCTING));
+
+ if (!callee->is<JSFunction>()) {
+ return true;
+ }
+
+ HandleFunction fun = callee.as<JSFunction>();
+ if (!fun->isInterpreted() || !fun->isConstructor()) {
+ return true;
+ }
+
+ // If newTarget is not a function or is a function with a possibly-getter
+ // .prototype property, return NullValue to signal to LCallGeneric that it has
+ // to take the slow path. Note that we return NullValue instead of a
+ // MagicValue only because it's easier and faster to check for in JIT code
+ // (if we returned a MagicValue, JIT code would have to check both the type
+ // tag and the JSWhyMagic payload).
+ if (!fun->constructorNeedsUninitializedThis()) {
+ if (!newTarget->is<JSFunction>()) {
+ rval.setNull();
+ return true;
+ }
+ JSFunction* newTargetFun = &newTarget->as<JSFunction>();
+ if (!newTargetFun->hasNonConfigurablePrototypeDataProperty()) {
+ rval.setNull();
+ return true;
+ }
+ }
+
+ AutoRealm ar(cx, fun);
+ if (!js::CreateThis(cx, fun, newTarget, GenericObject, rval)) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(rval.isObject(), fun->realm() == rval.toObject().nonCCWRealm());
+ return true;
+}
+
+void PostWriteBarrier(JSRuntime* rt, js::gc::Cell* cell) {
+ AutoUnsafeCallWithABI unsafe;
+ rt->gc.storeBuffer().putWholeCellDontCheckLast(cell);
+}
+
+static const size_t MAX_WHOLE_CELL_BUFFER_SIZE = 4096;
+
+template <IndexInBounds InBounds>
+void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj, int32_t index) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!IsInsideNursery(obj));
+
+ if (InBounds == IndexInBounds::Yes) {
+ MOZ_ASSERT(uint32_t(index) <
+ obj->as<NativeObject>().getDenseInitializedLength());
+ } else {
+ if (MOZ_UNLIKELY(!obj->is<NativeObject>() || index < 0 ||
+ uint32_t(index) >=
+ NativeObject::MAX_DENSE_ELEMENTS_COUNT)) {
+ rt->gc.storeBuffer().putWholeCell(obj);
+ return;
+ }
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (nobj->isInWholeCellBuffer()) {
+ return;
+ }
+
+ if (nobj->getDenseInitializedLength() > MAX_WHOLE_CELL_BUFFER_SIZE
+#ifdef JS_GC_ZEAL
+ || rt->hasZealMode(gc::ZealMode::ElementsBarrier)
+#endif
+ ) {
+ rt->gc.storeBuffer().putSlot(nobj, HeapSlot::Element,
+ nobj->unshiftedIndex(index), 1);
+ return;
+ }
+
+ rt->gc.storeBuffer().putWholeCell(obj);
+}
+
+template void PostWriteElementBarrier<IndexInBounds::Yes>(JSRuntime* rt,
+ JSObject* obj,
+ int32_t index);
+
+template void PostWriteElementBarrier<IndexInBounds::Maybe>(JSRuntime* rt,
+ JSObject* obj,
+ int32_t index);
+
+void PostGlobalWriteBarrier(JSRuntime* rt, GlobalObject* obj) {
+ MOZ_ASSERT(obj->JSObject::is<GlobalObject>());
+
+ if (!obj->realm()->globalWriteBarriered) {
+ AutoUnsafeCallWithABI unsafe;
+ rt->gc.storeBuffer().putWholeCell(obj);
+ obj->realm()->globalWriteBarriered = 1;
+ }
+}
+
+bool GetInt32FromStringPure(JSContext* cx, JSString* str, int32_t* result) {
+ // We shouldn't GC here as this is called directly from IC code.
+ AutoUnsafeCallWithABI unsafe;
+
+ double d;
+ if (!StringToNumberPure(cx, str, &d)) {
+ return false;
+ }
+
+ return mozilla::NumberIsInt32(d, result);
+}
+
+int32_t GetIndexFromString(JSString* str) {
+ // We shouldn't GC here as this is called directly from IC code.
+ AutoUnsafeCallWithABI unsafe;
+
+ if (!str->isLinear()) {
+ return -1;
+ }
+
+ uint32_t index = UINT32_MAX; // Initialize this to appease Valgrind.
+ if (!str->asLinear().isIndex(&index) || index > INT32_MAX) {
+ return -1;
+ }
+
+ return int32_t(index);
+}
+
+JSObject* WrapObjectPure(JSContext* cx, JSObject* obj) {
+ // IC code calls this directly so we shouldn't GC.
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(cx->compartment() != obj->compartment());
+
+ // From: Compartment::getNonWrapperObjectForCurrentCompartment
+ // Note that if the object is same-compartment, but has been wrapped into a
+ // different compartment, we need to unwrap it and return the bare same-
+ // compartment object. Note again that windows are always wrapped by a
+ // WindowProxy even when same-compartment so take care not to strip this
+ // particular wrapper.
+ obj = UncheckedUnwrap(obj, /* stopAtWindowProxy = */ true);
+ if (cx->compartment() == obj->compartment()) {
+ MOZ_ASSERT(!IsWindow(obj));
+ JS::ExposeObjectToActiveJS(obj);
+ return obj;
+ }
+
+ // Try to Lookup an existing wrapper for this object. We assume that
+ // if we can find such a wrapper, not calling preWrap is correct.
+ if (ObjectWrapperMap::Ptr p = cx->compartment()->lookupWrapper(obj)) {
+ JSObject* wrapped = p->value().get();
+
+ // Ensure the wrapper is still exposed.
+ JS::ExposeObjectToActiveJS(wrapped);
+ return wrapped;
+ }
+
+ return nullptr;
+}
+
+bool DebugPrologue(JSContext* cx, BaselineFrame* frame) {
+ return DebugAPI::onEnterFrame(cx, frame);
+}
+
+bool DebugEpilogueOnBaselineReturn(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc) {
+ if (!DebugEpilogue(cx, frame, pc, true)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool DebugEpilogue(JSContext* cx, BaselineFrame* frame, const jsbytecode* pc,
+ bool ok) {
+ // If DebugAPI::onLeaveFrame returns |true| we have to return the frame's
+ // return value. If it returns |false|, the debugger threw an exception.
+ // In both cases we have to pop debug scopes.
+ ok = DebugAPI::onLeaveFrame(cx, frame, pc, ok);
+
+ // Unwind to the outermost environment.
+ EnvironmentIter ei(cx, frame, pc);
+ UnwindAllEnvironmentsInFrame(cx, ei);
+
+ if (!ok) {
+ // Pop this frame by updating packedExitFP, so that the exception
+ // handling code will start at the previous frame.
+ JitFrameLayout* prefix = frame->framePrefix();
+ EnsureUnwoundJitExitFrame(cx->activation()->asJit(), prefix);
+ return false;
+ }
+
+ return true;
+}
+
+void FrameIsDebuggeeCheck(BaselineFrame* frame) {
+ AutoUnsafeCallWithABI unsafe;
+ if (frame->script()->isDebuggee()) {
+ frame->setIsDebuggee();
+ }
+}
+
+JSObject* CreateGeneratorFromFrame(JSContext* cx, BaselineFrame* frame) {
+ return AbstractGeneratorObject::createFromFrame(cx, frame);
+}
+
+JSObject* CreateGenerator(JSContext* cx, HandleFunction callee,
+ HandleScript script, HandleObject environmentChain,
+ HandleObject args) {
+ Rooted<ArgumentsObject*> argsObj(
+ cx, args ? &args->as<ArgumentsObject>() : nullptr);
+ return AbstractGeneratorObject::create(cx, callee, script, environmentChain,
+ argsObj);
+}
+
+bool NormalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame,
+ uint32_t frameSize, const jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::InitialYield || JSOp(*pc) == JSOp::Yield ||
+ JSOp(*pc) == JSOp::Await);
+
+ // Minus one because we don't want to include the return value.
+ uint32_t numSlots = frame->numValueSlots(frameSize) - 1;
+ MOZ_ASSERT(numSlots >= frame->script()->nfixed());
+ return AbstractGeneratorObject::suspend(cx, obj, frame, pc, numSlots);
+}
+
+bool FinalSuspend(JSContext* cx, HandleObject obj, const jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::FinalYieldRval);
+ AbstractGeneratorObject::finalSuspend(obj);
+ return true;
+}
+
+bool InterpretResume(JSContext* cx, HandleObject obj, Value* stackValues,
+ MutableHandleValue rval) {
+ MOZ_ASSERT(obj->is<AbstractGeneratorObject>());
+
+ // The |stackValues| argument points to the JSOp::Resume operands on the
+ // native stack. Because the stack grows down, these values are:
+ //
+ // [resumeKind, argument, generator, ..]
+
+ MOZ_ASSERT(stackValues[2].toObject() == *obj);
+
+ GeneratorResumeKind resumeKind = IntToResumeKind(stackValues[0].toInt32());
+ JSAtom* kind = ResumeKindToAtom(cx, resumeKind);
+
+ FixedInvokeArgs<3> args(cx);
+
+ args[0].setObject(*obj);
+ args[1].set(stackValues[1]);
+ args[2].setString(kind);
+
+ return CallSelfHostedFunction(cx, cx->names().InterpretGeneratorResume,
+ UndefinedHandleValue, args, rval);
+}
+
+bool DebugAfterYield(JSContext* cx, BaselineFrame* frame) {
+ // The BaselineFrame has just been constructed by JSOp::Resume in the
+ // caller. We need to set its debuggee flag as necessary.
+ //
+ // If a breakpoint is set on JSOp::AfterYield, or stepping is enabled,
+ // we may already have done this work. Don't fire onEnterFrame again.
+ if (frame->script()->isDebuggee() && !frame->isDebuggee()) {
+ frame->setIsDebuggee();
+ return DebugAPI::onResumeFrame(cx, frame);
+ }
+
+ return true;
+}
+
+bool GeneratorThrowOrReturn(JSContext* cx, BaselineFrame* frame,
+ Handle<AbstractGeneratorObject*> genObj,
+ HandleValue arg, int32_t resumeKindArg) {
+ GeneratorResumeKind resumeKind = IntToResumeKind(resumeKindArg);
+ MOZ_ALWAYS_FALSE(
+ js::GeneratorThrowOrReturn(cx, frame, genObj, arg, resumeKind));
+ return false;
+}
+
+bool GlobalDeclInstantiationFromIon(JSContext* cx, HandleScript script,
+ const jsbytecode* pc) {
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+
+ RootedObject envChain(cx, &cx->global()->lexicalEnvironment());
+ GCThingIndex lastFun = GET_GCTHING_INDEX(pc);
+
+ return GlobalOrEvalDeclInstantiation(cx, envChain, script, lastFun);
+}
+
+bool InitFunctionEnvironmentObjects(JSContext* cx, BaselineFrame* frame) {
+ return frame->initFunctionEnvironmentObjects(cx);
+}
+
+bool NewArgumentsObject(JSContext* cx, BaselineFrame* frame,
+ MutableHandleValue res) {
+ ArgumentsObject* obj = ArgumentsObject::createExpected(cx, frame);
+ if (!obj) {
+ return false;
+ }
+ res.setObject(*obj);
+ return true;
+}
+
+ArrayObject* NewArrayObjectEnsureDenseInitLength(JSContext* cx, int32_t count) {
+ MOZ_ASSERT(count >= 0);
+
+ auto* array = NewDenseFullyAllocatedArray(cx, count);
+ if (!array) {
+ return nullptr;
+ }
+ array->ensureDenseInitializedLength(0, count);
+
+ return array;
+}
+
+JSObject* InitRestParameter(JSContext* cx, uint32_t length, Value* rest,
+ HandleObject objRes) {
+ if (objRes) {
+ Handle<ArrayObject*> arrRes = objRes.as<ArrayObject>();
+ MOZ_ASSERT(arrRes->getDenseInitializedLength() == 0);
+
+ // Fast path: we managed to allocate the array inline; initialize the
+ // slots.
+ if (length > 0) {
+ if (!arrRes->ensureElements(cx, length)) {
+ return nullptr;
+ }
+ arrRes->initDenseElements(rest, length);
+ arrRes->setLength(length);
+ }
+ return arrRes;
+ }
+
+ return NewDenseCopiedArray(cx, length, rest);
+}
+
+bool HandleDebugTrap(JSContext* cx, BaselineFrame* frame,
+ const uint8_t* retAddr) {
+ RootedScript script(cx, frame->script());
+ jsbytecode* pc;
+ if (frame->runningInInterpreter()) {
+ pc = frame->interpreterPC();
+ } else {
+ BaselineScript* blScript = script->baselineScript();
+ pc = blScript->retAddrEntryFromReturnAddress(retAddr).pc(script);
+ }
+
+ // The Baseline Interpreter calls HandleDebugTrap for every op when the script
+ // is in step mode or has breakpoints. The Baseline Compiler can toggle
+ // breakpoints more granularly for specific bytecode PCs.
+ if (frame->runningInInterpreter()) {
+ MOZ_ASSERT(DebugAPI::hasAnyBreakpointsOrStepMode(script));
+ } else {
+ MOZ_ASSERT(DebugAPI::stepModeEnabled(script) ||
+ DebugAPI::hasBreakpointsAt(script, pc));
+ }
+
+ if (JSOp(*pc) == JSOp::AfterYield) {
+ // JSOp::AfterYield will set the frame's debuggee flag and call the
+ // onEnterFrame handler, but if we set a breakpoint there we have to do
+ // it now.
+ MOZ_ASSERT(!frame->isDebuggee());
+
+ if (!DebugAfterYield(cx, frame)) {
+ return false;
+ }
+
+ // If the frame is not a debuggee we're done. This can happen, for instance,
+ // if the onEnterFrame hook called removeDebuggee.
+ if (!frame->isDebuggee()) {
+ return true;
+ }
+ }
+
+ MOZ_ASSERT(frame->isDebuggee());
+
+ if (DebugAPI::stepModeEnabled(script) && !DebugAPI::onSingleStep(cx)) {
+ return false;
+ }
+
+ if (DebugAPI::hasBreakpointsAt(script, pc) && !DebugAPI::onTrap(cx)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool OnDebuggerStatement(JSContext* cx, BaselineFrame* frame) {
+ return DebugAPI::onDebuggerStatement(cx, frame);
+}
+
+bool GlobalHasLiveOnDebuggerStatement(JSContext* cx) {
+ AutoUnsafeCallWithABI unsafe;
+ return cx->realm()->isDebuggee() &&
+ DebugAPI::hasDebuggerStatementHook(cx->global());
+}
+
+bool PushLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ Handle<LexicalScope*> scope) {
+ return frame->pushLexicalEnvironment(cx, scope);
+}
+
+bool DebugLeaveThenPopLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc) {
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ frame->popOffEnvironmentChain<ScopedLexicalEnvironmentObject>();
+ return true;
+}
+
+bool FreshenLexicalEnv(JSContext* cx, BaselineFrame* frame) {
+ return frame->freshenLexicalEnvironment(cx);
+}
+
+bool DebugLeaveThenFreshenLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc) {
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ return frame->freshenLexicalEnvironment(cx);
+}
+
+bool RecreateLexicalEnv(JSContext* cx, BaselineFrame* frame) {
+ return frame->recreateLexicalEnvironment(cx);
+}
+
+bool DebugLeaveThenRecreateLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc) {
+ MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));
+ return frame->recreateLexicalEnvironment(cx);
+}
+
+bool DebugLeaveLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc) {
+ MOZ_ASSERT_IF(!frame->runningInInterpreter(),
+ frame->script()->baselineScript()->hasDebugInstrumentation());
+ if (cx->realm()->isDebuggee()) {
+ DebugEnvironments::onPopLexical(cx, frame, pc);
+ }
+ return true;
+}
+
+bool PushClassBodyEnv(JSContext* cx, BaselineFrame* frame,
+ Handle<ClassBodyScope*> scope) {
+ return frame->pushClassBodyEnvironment(cx, scope);
+}
+
+bool PushVarEnv(JSContext* cx, BaselineFrame* frame, Handle<Scope*> scope) {
+ return frame->pushVarEnvironment(cx, scope);
+}
+
+bool EnterWith(JSContext* cx, BaselineFrame* frame, HandleValue val,
+ Handle<WithScope*> templ) {
+ return EnterWithOperation(cx, frame, val, templ);
+}
+
+bool LeaveWith(JSContext* cx, BaselineFrame* frame) {
+ if (MOZ_UNLIKELY(frame->isDebuggee())) {
+ DebugEnvironments::onPopWith(frame);
+ }
+ frame->popOffEnvironmentChain<WithEnvironmentObject>();
+ return true;
+}
+
+bool InitBaselineFrameForOsr(BaselineFrame* frame,
+ InterpreterFrame* interpFrame,
+ uint32_t numStackValues) {
+ return frame->initForOsr(interpFrame, numStackValues);
+}
+
+JSString* StringReplace(JSContext* cx, HandleString string,
+ HandleString pattern, HandleString repl) {
+ MOZ_ASSERT(string);
+ MOZ_ASSERT(pattern);
+ MOZ_ASSERT(repl);
+
+ return str_replace_string_raw(cx, string, pattern, repl);
+}
+
+void AssertValidBigIntPtr(JSContext* cx, JS::BigInt* bi) {
+ AutoUnsafeCallWithABI unsafe;
+ // FIXME: check runtime?
+ MOZ_ASSERT(cx->zone() == bi->zone());
+ MOZ_ASSERT(bi->isAligned());
+ MOZ_ASSERT(bi->getAllocKind() == gc::AllocKind::BIGINT);
+}
+
+void AssertValidObjectPtr(JSContext* cx, JSObject* obj) {
+ AutoUnsafeCallWithABI unsafe;
+#ifdef DEBUG
+ // Check what we can, so that we'll hopefully assert/crash if we get a
+ // bogus object (pointer).
+ MOZ_ASSERT(obj->compartment() == cx->compartment());
+ MOZ_ASSERT(obj->zoneFromAnyThread() == cx->zone());
+ MOZ_ASSERT(obj->runtimeFromMainThread() == cx->runtime());
+
+ if (obj->isTenured()) {
+ MOZ_ASSERT(obj->isAligned());
+ gc::AllocKind kind = obj->asTenured().getAllocKind();
+ MOZ_ASSERT(gc::IsObjectAllocKind(kind));
+ }
+#endif
+}
+
+void AssertValidStringPtr(JSContext* cx, JSString* str) {
+ AutoUnsafeCallWithABI unsafe;
+#ifdef DEBUG
+ // We can't closely inspect strings from another runtime.
+ if (str->runtimeFromAnyThread() != cx->runtime()) {
+ MOZ_ASSERT(str->isPermanentAtom());
+ return;
+ }
+
+ if (str->isAtom()) {
+ MOZ_ASSERT(str->zone()->isAtomsZone());
+ } else {
+ MOZ_ASSERT(str->zone() == cx->zone());
+ }
+
+ MOZ_ASSERT(str->isAligned());
+ MOZ_ASSERT(str->length() <= JSString::MAX_LENGTH);
+
+ gc::AllocKind kind = str->getAllocKind();
+ if (str->isFatInline()) {
+ MOZ_ASSERT(kind == gc::AllocKind::FAT_INLINE_STRING ||
+ kind == gc::AllocKind::FAT_INLINE_ATOM);
+ } else if (str->isExternal()) {
+ MOZ_ASSERT(kind == gc::AllocKind::EXTERNAL_STRING);
+ } else if (str->isAtom()) {
+ MOZ_ASSERT(kind == gc::AllocKind::ATOM);
+ } else if (str->isLinear()) {
+ MOZ_ASSERT(kind == gc::AllocKind::STRING ||
+ kind == gc::AllocKind::FAT_INLINE_STRING);
+ } else {
+ MOZ_ASSERT(kind == gc::AllocKind::STRING);
+ }
+#endif
+}
+
+void AssertValidSymbolPtr(JSContext* cx, JS::Symbol* sym) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // We can't closely inspect symbols from another runtime.
+ if (sym->runtimeFromAnyThread() != cx->runtime()) {
+ MOZ_ASSERT(sym->isWellKnownSymbol());
+ return;
+ }
+
+ MOZ_ASSERT(sym->zone()->isAtomsZone());
+ MOZ_ASSERT(sym->isAligned());
+ if (JSAtom* desc = sym->description()) {
+ AssertValidStringPtr(cx, desc);
+ }
+
+ MOZ_ASSERT(sym->getAllocKind() == gc::AllocKind::SYMBOL);
+}
+
+void AssertValidValue(JSContext* cx, Value* v) {
+ AutoUnsafeCallWithABI unsafe;
+ if (v->isObject()) {
+ AssertValidObjectPtr(cx, &v->toObject());
+ } else if (v->isString()) {
+ AssertValidStringPtr(cx, v->toString());
+ } else if (v->isSymbol()) {
+ AssertValidSymbolPtr(cx, v->toSymbol());
+ } else if (v->isBigInt()) {
+ AssertValidBigIntPtr(cx, v->toBigInt());
+ }
+}
+
+bool ObjectIsCallable(JSObject* obj) {
+ AutoUnsafeCallWithABI unsafe;
+ return obj->isCallable();
+}
+
+bool ObjectIsConstructor(JSObject* obj) {
+ AutoUnsafeCallWithABI unsafe;
+ return obj->isConstructor();
+}
+
+void JitValuePreWriteBarrier(JSRuntime* rt, Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+ MOZ_ASSERT(vp->isGCThing());
+ MOZ_ASSERT(!vp->toGCThing()->isMarkedBlack());
+ gc::ValuePreWriteBarrier(*vp);
+}
+
+void JitStringPreWriteBarrier(JSRuntime* rt, JSString** stringp) {
+ AutoUnsafeCallWithABI unsafe;
+ MOZ_ASSERT(*stringp);
+ MOZ_ASSERT(!(*stringp)->isMarkedBlack());
+ gc::PreWriteBarrier(*stringp);
+}
+
+void JitObjectPreWriteBarrier(JSRuntime* rt, JSObject** objp) {
+ AutoUnsafeCallWithABI unsafe;
+ MOZ_ASSERT(*objp);
+ MOZ_ASSERT(!(*objp)->isMarkedBlack());
+ gc::PreWriteBarrier(*objp);
+}
+
+void JitShapePreWriteBarrier(JSRuntime* rt, Shape** shapep) {
+ AutoUnsafeCallWithABI unsafe;
+ MOZ_ASSERT(!(*shapep)->isMarkedBlack());
+ gc::PreWriteBarrier(*shapep);
+}
+
+bool ThrowRuntimeLexicalError(JSContext* cx, unsigned errorNumber) {
+ ScriptFrameIter iter(cx);
+ RootedScript script(cx, iter.script());
+ ReportRuntimeLexicalError(cx, errorNumber, script, iter.pc());
+ return false;
+}
+
+bool ThrowBadDerivedReturnOrUninitializedThis(JSContext* cx, HandleValue v) {
+ MOZ_ASSERT(!v.isObject());
+ if (v.isUndefined()) {
+ return js::ThrowUninitializedThis(cx);
+ }
+
+ ReportValueError(cx, JSMSG_BAD_DERIVED_RETURN, JSDVG_IGNORE_STACK, v,
+ nullptr);
+ return false;
+}
+
+bool BaselineGetFunctionThis(JSContext* cx, BaselineFrame* frame,
+ MutableHandleValue res) {
+ return GetFunctionThis(cx, frame, res);
+}
+
+bool CallNativeGetter(JSContext* cx, HandleFunction callee,
+ HandleValue receiver, MutableHandleValue result) {
+ AutoRealm ar(cx, callee);
+
+ MOZ_ASSERT(callee->isNativeFun());
+ JSNative natfun = callee->native();
+
+ JS::RootedValueArray<2> vp(cx);
+ vp[0].setObject(*callee.get());
+ vp[1].set(receiver);
+
+ if (!natfun(cx, 0, vp.begin())) {
+ return false;
+ }
+
+ result.set(vp[0]);
+ return true;
+}
+
+bool CallDOMGetter(JSContext* cx, const JSJitInfo* info, HandleObject obj,
+ MutableHandleValue result) {
+ MOZ_ASSERT(info->type() == JSJitInfo::Getter);
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(obj->getClass()->isDOMClass());
+ MOZ_ASSERT(obj->as<NativeObject>().numFixedSlots() > 0);
+
+#ifdef DEBUG
+ DOMInstanceClassHasProtoAtDepth instanceChecker =
+ cx->runtime()->DOMcallbacks->instanceClassMatchesProto;
+ MOZ_ASSERT(instanceChecker(obj->getClass(), info->protoID, info->depth));
+#endif
+
+ // Loading DOM_OBJECT_SLOT, which must be the first slot.
+ JS::Value val = JS::GetReservedSlot(obj, 0);
+ JSJitGetterOp getter = info->getter;
+ return getter(cx, obj, val.toPrivate(), JSJitGetterCallArgs(result));
+}
+
+bool CallNativeSetter(JSContext* cx, HandleFunction callee, HandleObject obj,
+ HandleValue rhs) {
+ AutoRealm ar(cx, callee);
+
+ MOZ_ASSERT(callee->isNativeFun());
+ JSNative natfun = callee->native();
+
+ JS::RootedValueArray<3> vp(cx);
+ vp[0].setObject(*callee.get());
+ vp[1].setObject(*obj.get());
+ vp[2].set(rhs);
+
+ return natfun(cx, 1, vp.begin());
+}
+
+bool CallDOMSetter(JSContext* cx, const JSJitInfo* info, HandleObject obj,
+ HandleValue value) {
+ MOZ_ASSERT(info->type() == JSJitInfo::Setter);
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(obj->getClass()->isDOMClass());
+ MOZ_ASSERT(obj->as<NativeObject>().numFixedSlots() > 0);
+
+#ifdef DEBUG
+ DOMInstanceClassHasProtoAtDepth instanceChecker =
+ cx->runtime()->DOMcallbacks->instanceClassMatchesProto;
+ MOZ_ASSERT(instanceChecker(obj->getClass(), info->protoID, info->depth));
+#endif
+
+ // Loading DOM_OBJECT_SLOT, which must be the first slot.
+ JS::Value val = JS::GetReservedSlot(obj, 0);
+ JSJitSetterOp setter = info->setter;
+
+ RootedValue v(cx, value);
+ return setter(cx, obj, val.toPrivate(), JSJitSetterCallArgs(&v));
+}
+
+bool EqualStringsHelperPure(JSString* str1, JSString* str2) {
+ // IC code calls this directly so we shouldn't GC.
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(str1->isAtom());
+ MOZ_ASSERT(!str2->isAtom());
+ MOZ_ASSERT(str1->length() == str2->length());
+
+ // ensureLinear is intentionally called with a nullptr to avoid OOM
+ // reporting; if it fails, we will continue to the next stub.
+ JSLinearString* str2Linear = str2->ensureLinear(nullptr);
+ if (!str2Linear) {
+ return false;
+ }
+
+ return EqualChars(&str1->asLinear(), str2Linear);
+}
+
+static bool MaybeTypedArrayIndexString(jsid id) {
+ MOZ_ASSERT(id.isAtom() || id.isSymbol());
+
+ if (MOZ_LIKELY(id.isAtom())) {
+ JSAtom* str = id.toAtom();
+ if (str->length() > 0) {
+ // Only check the first character because we want this function to be
+ // fast.
+ return CanStartTypedArrayIndex(str->latin1OrTwoByteChar(0));
+ }
+ }
+ return false;
+}
+
+static void VerifyCacheEntry(JSContext* cx, NativeObject* obj, PropertyKey key,
+ const MegamorphicCacheEntry& entry) {
+#ifdef DEBUG
+ if (entry.isMissingProperty()) {
+ NativeObject* pobj;
+ PropertyResult prop;
+ MOZ_ASSERT(LookupPropertyPure(cx, obj, key, &pobj, &prop));
+ MOZ_ASSERT(prop.isNotFound());
+ return;
+ }
+ if (entry.isMissingOwnProperty()) {
+ MOZ_ASSERT(!obj->containsPure(key));
+ return;
+ }
+ MOZ_ASSERT(entry.isDataProperty());
+ for (size_t i = 0, numHops = entry.numHops(); i < numHops; i++) {
+ MOZ_ASSERT(!obj->containsPure(key));
+ obj = &obj->staticPrototype()->as<NativeObject>();
+ }
+ mozilla::Maybe<PropertyInfo> prop = obj->lookupPure(key);
+ MOZ_ASSERT(prop.isSome());
+ MOZ_ASSERT(prop->isDataProperty());
+ MOZ_ASSERT(obj->getTaggedSlotOffset(prop->slot()) == entry.slotOffset());
+#endif
+}
+
+static MOZ_ALWAYS_INLINE bool GetNativeDataPropertyPureImpl(
+ JSContext* cx, JSObject* obj, jsid id, MegamorphicCacheEntry* entry,
+ Value* vp) {
+ MOZ_ASSERT(obj->is<NativeObject>());
+ NativeObject* nobj = &obj->as<NativeObject>();
+ Shape* receiverShape = obj->shape();
+ MegamorphicCache& cache = cx->caches().megamorphicCache;
+
+ MOZ_ASSERT_IF(JitOptions.enableWatchtowerMegamorphic, entry);
+
+ size_t numHops = 0;
+ while (true) {
+ MOZ_ASSERT(!nobj->getOpsLookupProperty());
+
+ uint32_t index;
+ if (PropMap* map = nobj->shape()->lookup(cx, id, &index)) {
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (!prop.isDataProperty()) {
+ return false;
+ }
+ if (entry) {
+ TaggedSlotOffset offset = nobj->getTaggedSlotOffset(prop.slot());
+ cache.initEntryForDataProperty(entry, receiverShape, id, numHops,
+ offset);
+ }
+ *vp = nobj->getSlot(prop.slot());
+ return true;
+ }
+
+ // Property not found. Watch out for Class hooks and TypedArrays.
+ if (MOZ_UNLIKELY(!nobj->is<PlainObject>())) {
+ if (ClassMayResolveId(cx->names(), nobj->getClass(), id, nobj)) {
+ return false;
+ }
+
+ // Don't skip past TypedArrayObjects if the id can be a TypedArray index.
+ if (nobj->is<TypedArrayObject>()) {
+ if (MaybeTypedArrayIndexString(id)) {
+ return false;
+ }
+ }
+ }
+
+ JSObject* proto = nobj->staticPrototype();
+ if (!proto) {
+ if (entry) {
+ cache.initEntryForMissingProperty(entry, receiverShape, id);
+ }
+ vp->setUndefined();
+ return true;
+ }
+
+ if (!proto->is<NativeObject>()) {
+ return false;
+ }
+ nobj = &proto->as<NativeObject>();
+ numHops++;
+ }
+}
+
+bool GetNativeDataPropertyPureWithCacheLookup(JSContext* cx, JSObject* obj,
+ PropertyKey id,
+ MegamorphicCacheEntry* entry,
+ Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // If we're on x86, we didn't have enough registers to populate this
+ // directly in Baseline JITted code, so we do the lookup here.
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ Shape* receiverShape = obj->shape();
+ MegamorphicCache& cache = cx->caches().megamorphicCache;
+
+ if (cache.lookup(receiverShape, id, &entry)) {
+ NativeObject* nobj = &obj->as<NativeObject>();
+ VerifyCacheEntry(cx, nobj, id, *entry);
+ if (entry->isDataProperty()) {
+ for (size_t i = 0, numHops = entry->numHops(); i < numHops; i++) {
+ nobj = &nobj->staticPrototype()->as<NativeObject>();
+ }
+ uint32_t offset = entry->slotOffset().offset();
+ if (entry->slotOffset().isFixedSlot()) {
+ size_t index = NativeObject::getFixedSlotIndexFromOffset(offset);
+ *vp = nobj->getFixedSlot(index);
+ } else {
+ size_t index = NativeObject::getDynamicSlotIndexFromOffset(offset);
+ *vp = nobj->getDynamicSlot(index);
+ }
+ return true;
+ }
+ if (entry->isMissingProperty()) {
+ vp->setUndefined();
+ return true;
+ }
+ MOZ_ASSERT(entry->isMissingOwnProperty());
+ }
+ }
+
+ return GetNativeDataPropertyPureImpl(cx, obj, id, entry, vp);
+}
+
+bool GetNativeDataPropertyPure(JSContext* cx, JSObject* obj, PropertyKey id,
+ MegamorphicCacheEntry* entry, Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+ MOZ_ASSERT_IF(JitOptions.enableWatchtowerMegamorphic, entry);
+ return GetNativeDataPropertyPureImpl(cx, obj, id, entry, vp);
+}
+
+static MOZ_ALWAYS_INLINE bool ValueToAtomOrSymbolPure(JSContext* cx,
+ const Value& idVal,
+ jsid* id) {
+ if (MOZ_LIKELY(idVal.isString())) {
+ JSString* s = idVal.toString();
+ JSAtom* atom;
+ if (s->isAtom()) {
+ atom = &s->asAtom();
+ } else {
+ atom = AtomizeString(cx, s);
+ if (!atom) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+ }
+
+ // Watch out for integer ids because they may be stored in dense elements.
+ static_assert(PropertyKey::IntMin == 0);
+ static_assert(NativeObject::MAX_DENSE_ELEMENTS_COUNT < PropertyKey::IntMax,
+ "All dense elements must have integer jsids");
+ uint32_t index;
+ if (MOZ_UNLIKELY(atom->isIndex(&index) && index <= PropertyKey::IntMax)) {
+ return false;
+ }
+
+ *id = PropertyKey::NonIntAtom(atom);
+ return true;
+ }
+
+ if (idVal.isSymbol()) {
+ *id = PropertyKey::Symbol(idVal.toSymbol());
+ return true;
+ }
+
+ if (idVal.isNull()) {
+ *id = PropertyKey::NonIntAtom(cx->names().null);
+ return true;
+ }
+
+ if (idVal.isUndefined()) {
+ *id = PropertyKey::NonIntAtom(cx->names().undefined);
+ return true;
+ }
+
+ return false;
+}
+
+bool GetNativeDataPropertyByValuePure(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* entry, Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // vp[0] contains the id, result will be stored in vp[1].
+ Value idVal = vp[0];
+ jsid id;
+ if (!ValueToAtomOrSymbolPure(cx, idVal, &id)) {
+ return false;
+ }
+
+ Shape* receiverShape = obj->shape();
+ MegamorphicCache& cache = cx->caches().megamorphicCache;
+ if (!entry && JitOptions.enableWatchtowerMegamorphic) {
+ cache.lookup(receiverShape, id, &entry);
+ }
+
+ Value* res = vp + 1;
+ return GetNativeDataPropertyPureImpl(cx, obj, id, entry, res);
+}
+
+bool SetNativeDataPropertyPure(JSContext* cx, JSObject* obj, PropertyKey id,
+ Value* val) {
+ AutoUnsafeCallWithABI unsafe;
+
+ if (MOZ_UNLIKELY(!obj->is<NativeObject>())) {
+ return false;
+ }
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ uint32_t index;
+ PropMap* map = nobj->shape()->lookup(cx, id, &index);
+ if (!map) {
+ return false;
+ }
+
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (!prop.isDataProperty() || !prop.writable()) {
+ return false;
+ }
+
+ nobj->setSlot(prop.slot(), *val);
+ return true;
+}
+
+bool ObjectHasGetterSetterPure(JSContext* cx, JSObject* objArg, jsid id,
+ GetterSetter* getterSetter) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // Window objects may require outerizing (passing the WindowProxy to the
+ // getter/setter), so we don't support them here.
+ if (MOZ_UNLIKELY(!objArg->is<NativeObject>() || IsWindow(objArg))) {
+ return false;
+ }
+
+ NativeObject* nobj = &objArg->as<NativeObject>();
+
+ while (true) {
+ uint32_t index;
+ if (PropMap* map = nobj->shape()->lookup(cx, id, &index)) {
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (!prop.isAccessorProperty()) {
+ return false;
+ }
+ GetterSetter* actualGetterSetter = nobj->getGetterSetter(prop);
+ if (actualGetterSetter == getterSetter) {
+ return true;
+ }
+ return (actualGetterSetter->getter() == getterSetter->getter() &&
+ actualGetterSetter->setter() == getterSetter->setter());
+ }
+
+ // Property not found. Watch out for Class hooks.
+ if (!nobj->is<PlainObject>()) {
+ if (ClassMayResolveId(cx->names(), nobj->getClass(), id, nobj)) {
+ return false;
+ }
+ }
+
+ JSObject* proto = nobj->staticPrototype();
+ if (!proto) {
+ return false;
+ }
+
+ if (!proto->is<NativeObject>()) {
+ return false;
+ }
+ nobj = &proto->as<NativeObject>();
+ }
+}
+
+template <bool HasOwn>
+bool HasNativeDataPropertyPure(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* entry, Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // vp[0] contains the id, result will be stored in vp[1].
+ Value idVal = vp[0];
+ jsid id;
+ if (!ValueToAtomOrSymbolPure(cx, idVal, &id)) {
+ return false;
+ }
+
+ MegamorphicCache& cache = cx->caches().megamorphicCache;
+ Shape* receiverShape = obj->shape();
+ if (!entry && JitOptions.enableWatchtowerMegamorphic) {
+ if (cache.lookup(receiverShape, id, &entry)) {
+ VerifyCacheEntry(cx, &obj->as<NativeObject>(), id, *entry);
+ }
+ }
+
+ size_t numHops = 0;
+ do {
+ if (MOZ_UNLIKELY(!obj->is<NativeObject>())) {
+ return false;
+ }
+
+ MOZ_ASSERT(!obj->getOpsLookupProperty());
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ uint32_t index;
+ if (PropMap* map = nobj->shape()->lookup(cx, id, &index)) {
+ if (JitOptions.enableWatchtowerMegamorphic) {
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (prop.isDataProperty()) {
+ TaggedSlotOffset offset = nobj->getTaggedSlotOffset(prop.slot());
+ cache.initEntryForDataProperty(entry, receiverShape, id, numHops,
+ offset);
+ }
+ }
+ vp[1].setBoolean(true);
+ return true;
+ }
+
+ // Property not found. Watch out for Class hooks and TypedArrays.
+ if (MOZ_UNLIKELY(!obj->is<PlainObject>())) {
+ // Fail if there's a resolve hook, unless the mayResolve hook tells us
+ // the resolve hook won't define a property with this id.
+ if (ClassMayResolveId(cx->names(), obj->getClass(), id, obj)) {
+ return false;
+ }
+
+ // Don't skip past TypedArrayObjects if the id can be a TypedArray
+ // index.
+ if (obj->is<TypedArrayObject>()) {
+ if (MaybeTypedArrayIndexString(id)) {
+ return false;
+ }
+ }
+ }
+
+ // If implementing Object.hasOwnProperty, don't follow protochain.
+ if constexpr (HasOwn) {
+ break;
+ }
+
+ // Get prototype. Objects that may allow dynamic prototypes are already
+ // filtered out above.
+ obj = obj->staticPrototype();
+ numHops++;
+ } while (obj);
+
+ // Missing property.
+ if (entry) {
+ if constexpr (HasOwn) {
+ cache.initEntryForMissingOwnProperty(entry, receiverShape, id);
+ } else {
+ cache.initEntryForMissingProperty(entry, receiverShape, id);
+ }
+ }
+ vp[1].setBoolean(false);
+ return true;
+}
+
+template bool HasNativeDataPropertyPure<true>(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* entry,
+ Value* vp);
+
+template bool HasNativeDataPropertyPure<false>(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* entry,
+ Value* vp);
+
+bool HasNativeElementPure(JSContext* cx, NativeObject* obj, int32_t index,
+ Value* vp) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(!obj->getOpsHasProperty());
+ MOZ_ASSERT(!obj->getOpsLookupProperty());
+ MOZ_ASSERT(!obj->getOpsGetOwnPropertyDescriptor());
+
+ if (MOZ_UNLIKELY(index < 0)) {
+ return false;
+ }
+
+ if (obj->containsDenseElement(index)) {
+ vp[0].setBoolean(true);
+ return true;
+ }
+
+ jsid id = PropertyKey::Int(index);
+ uint32_t unused;
+ if (obj->shape()->lookup(cx, id, &unused)) {
+ vp[0].setBoolean(true);
+ return true;
+ }
+
+ // Fail if there's a resolve hook, unless the mayResolve hook tells
+ // us the resolve hook won't define a property with this id.
+ if (MOZ_UNLIKELY(ClassMayResolveId(cx->names(), obj->getClass(), id, obj))) {
+ return false;
+ }
+ // TypedArrayObject are also native and contain indexed properties.
+ if (MOZ_UNLIKELY(obj->is<TypedArrayObject>())) {
+ size_t length = obj->as<TypedArrayObject>().length();
+ vp[0].setBoolean(uint32_t(index) < length);
+ return true;
+ }
+
+ vp[0].setBoolean(false);
+ return true;
+}
+
+// Fast path for setting/adding a plain object property. This is the common case
+// for megamorphic SetProp/SetElem.
+template <bool UseCache>
+static bool TryAddOrSetPlainObjectProperty(JSContext* cx,
+ Handle<PlainObject*> obj,
+ PropertyKey key, HandleValue value,
+ bool* optimized) {
+ MOZ_ASSERT(!*optimized);
+
+ Shape* receiverShape = obj->shape();
+ MegamorphicSetPropCache& cache = *cx->caches().megamorphicSetPropCache;
+
+#ifdef DEBUG
+ if constexpr (UseCache) {
+ MegamorphicSetPropCache::Entry* entry;
+ if (cache.lookup(receiverShape, key, &entry)) {
+ if (entry->afterShape() != nullptr) { // AddProp
+ NativeObject* holder = nullptr;
+ PropertyResult prop;
+ MOZ_ASSERT(LookupPropertyPure(cx, obj, key, &holder, &prop));
+ MOZ_ASSERT(obj != holder);
+ MOZ_ASSERT_IF(prop.isFound(),
+ prop.isNativeProperty() &&
+ prop.propertyInfo().isDataProperty() &&
+ prop.propertyInfo().writable());
+ } else { // SetProp
+ mozilla::Maybe<PropertyInfo> prop = obj->lookupPure(key);
+ MOZ_ASSERT(prop.isSome());
+ MOZ_ASSERT(prop->isDataProperty());
+ MOZ_ASSERT(obj->getTaggedSlotOffset(prop->slot()) ==
+ entry->slotOffset());
+ }
+ }
+ }
+#endif
+
+ // Fast path for changing a data property.
+ uint32_t index;
+ if (PropMap* map = obj->shape()->lookup(cx, key, &index)) {
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (!prop.isDataProperty() || !prop.writable()) {
+ return true;
+ }
+ obj->setSlot(prop.slot(), value);
+ *optimized = true;
+
+ if constexpr (UseCache) {
+ TaggedSlotOffset offset = obj->getTaggedSlotOffset(prop.slot());
+ cache.set(receiverShape, nullptr, key, offset, 0);
+ }
+ return true;
+ }
+
+ // Don't support "__proto__". This lets us take advantage of the
+ // hasNonWritableOrAccessorPropExclProto optimization below.
+ if (MOZ_UNLIKELY(!obj->isExtensible() || key.isAtom(cx->names().proto))) {
+ return true;
+ }
+
+ // Ensure the proto chain contains only plain objects. Deoptimize for accessor
+ // properties and non-writable data properties (we can't shadow non-writable
+ // properties).
+ JSObject* proto = obj->staticPrototype();
+ while (proto) {
+ if (!proto->is<PlainObject>()) {
+ return true;
+ }
+ PlainObject* plainProto = &proto->as<PlainObject>();
+ if (plainProto->hasNonWritableOrAccessorPropExclProto()) {
+ uint32_t index;
+ if (PropMap* map = plainProto->shape()->lookup(cx, key, &index)) {
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (!prop.isDataProperty() || !prop.writable()) {
+ return true;
+ }
+ break;
+ }
+ }
+ proto = plainProto->staticPrototype();
+ }
+
+#ifdef DEBUG
+ // At this point either the property is missing or it's a writable data
+ // property on the proto chain that we can shadow.
+ {
+ NativeObject* holder = nullptr;
+ PropertyResult prop;
+ MOZ_ASSERT(LookupPropertyPure(cx, obj, key, &holder, &prop));
+ MOZ_ASSERT(obj != holder);
+ MOZ_ASSERT_IF(prop.isFound(), prop.isNativeProperty() &&
+ prop.propertyInfo().isDataProperty() &&
+ prop.propertyInfo().writable());
+ }
+#endif
+
+ *optimized = true;
+ Rooted<PropertyKey> keyRoot(cx, key);
+ Rooted<Shape*> receiverShapeRoot(cx, receiverShape);
+ uint32_t resultSlot = 0;
+ size_t numDynamic = obj->numDynamicSlots();
+ bool res = AddDataPropertyToPlainObject(cx, obj, keyRoot, value, &resultSlot);
+
+ if constexpr (UseCache) {
+ if (res && obj->shape()->isShared() &&
+ resultSlot < SharedPropMap::MaxPropsForNonDictionary &&
+ !Watchtower::watchesPropertyAdd(obj)) {
+ TaggedSlotOffset offset = obj->getTaggedSlotOffset(resultSlot);
+ uint32_t newCapacity = 0;
+ if (!(resultSlot < obj->numFixedSlots() ||
+ (resultSlot - obj->numFixedSlots()) < numDynamic)) {
+ newCapacity = obj->numDynamicSlots();
+ }
+ cache.set(receiverShapeRoot, obj->shape(), keyRoot, offset, newCapacity);
+ }
+ }
+
+ return res;
+}
+
+template <bool Cached>
+bool SetElementMegamorphic(JSContext* cx, HandleObject obj, HandleValue index,
+ HandleValue value, bool strict) {
+ if (obj->is<PlainObject>()) {
+ PropertyKey key;
+ if (ValueToAtomOrSymbolPure(cx, index, &key)) {
+ bool optimized = false;
+ if (!TryAddOrSetPlainObjectProperty<Cached>(cx, obj.as<PlainObject>(),
+ key, value, &optimized)) {
+ return false;
+ }
+ if (optimized) {
+ return true;
+ }
+ }
+ }
+ Rooted<Value> receiver(cx, ObjectValue(*obj));
+ return SetObjectElementWithReceiver(cx, obj, index, value, receiver, strict);
+}
+
+template bool SetElementMegamorphic<false>(JSContext* cx, HandleObject obj,
+ HandleValue index, HandleValue value,
+ bool strict);
+template bool SetElementMegamorphic<true>(JSContext* cx, HandleObject obj,
+ HandleValue index, HandleValue value,
+ bool strict);
+
+template <bool Cached>
+bool SetPropertyMegamorphic(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue value, bool strict) {
+ if (obj->is<PlainObject>()) {
+ bool optimized = false;
+ if (!TryAddOrSetPlainObjectProperty<Cached>(cx, obj.as<PlainObject>(), id,
+ value, &optimized)) {
+ return false;
+ }
+ if (optimized) {
+ return true;
+ }
+ }
+ Rooted<Value> receiver(cx, ObjectValue(*obj));
+ ObjectOpResult result;
+ return SetProperty(cx, obj, id, value, receiver, result) &&
+ result.checkStrictModeError(cx, obj, id, strict);
+}
+
+template bool SetPropertyMegamorphic<false>(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue value,
+ bool strict);
+template bool SetPropertyMegamorphic<true>(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue value,
+ bool strict);
+
+void HandleCodeCoverageAtPC(BaselineFrame* frame, jsbytecode* pc) {
+ AutoUnsafeCallWithABI unsafe(UnsafeABIStrictness::AllowPendingExceptions);
+
+ MOZ_ASSERT(frame->runningInInterpreter());
+
+ JSScript* script = frame->script();
+ MOZ_ASSERT(pc == script->main() || BytecodeIsJumpTarget(JSOp(*pc)));
+
+ if (!script->hasScriptCounts()) {
+ if (!script->realm()->collectCoverageForDebug()) {
+ return;
+ }
+ JSContext* cx = script->runtimeFromMainThread()->mainContextFromOwnThread();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!script->initScriptCounts(cx)) {
+ oomUnsafe.crash("initScriptCounts");
+ }
+ }
+
+ PCCounts* counts = script->maybeGetPCCounts(pc);
+ MOZ_ASSERT(counts);
+ counts->numExec()++;
+}
+
+void HandleCodeCoverageAtPrologue(BaselineFrame* frame) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(frame->runningInInterpreter());
+
+ JSScript* script = frame->script();
+ jsbytecode* main = script->main();
+ if (!BytecodeIsJumpTarget(JSOp(*main))) {
+ HandleCodeCoverageAtPC(frame, main);
+ }
+}
+
+JSString* TypeOfNameObject(JSObject* obj, JSRuntime* rt) {
+ AutoUnsafeCallWithABI unsafe;
+ JSType type = js::TypeOfObject(obj);
+ return TypeName(type, *rt->commonNames);
+}
+
+bool GetPrototypeOf(JSContext* cx, HandleObject target,
+ MutableHandleValue rval) {
+ MOZ_ASSERT(target->hasDynamicPrototype());
+
+ RootedObject proto(cx);
+ if (!GetPrototype(cx, target, &proto)) {
+ return false;
+ }
+ rval.setObjectOrNull(proto);
+ return true;
+}
+
+static JSString* ConvertObjectToStringForConcat(JSContext* cx,
+ HandleValue obj) {
+ MOZ_ASSERT(obj.isObject());
+ RootedValue rootedObj(cx, obj);
+ if (!ToPrimitive(cx, &rootedObj)) {
+ return nullptr;
+ }
+ return ToString<CanGC>(cx, rootedObj);
+}
+
+bool DoConcatStringObject(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ JSString* lstr = nullptr;
+ JSString* rstr = nullptr;
+
+ if (lhs.isString()) {
+ // Convert rhs first.
+ MOZ_ASSERT(lhs.isString() && rhs.isObject());
+ rstr = ConvertObjectToStringForConcat(cx, rhs);
+ if (!rstr) {
+ return false;
+ }
+
+ // lhs is already string.
+ lstr = lhs.toString();
+ } else {
+ MOZ_ASSERT(rhs.isString() && lhs.isObject());
+ // Convert lhs first.
+ lstr = ConvertObjectToStringForConcat(cx, lhs);
+ if (!lstr) {
+ return false;
+ }
+
+ // rhs is already string.
+ rstr = rhs.toString();
+ }
+
+ JSString* str = ConcatStrings<NoGC>(cx, lstr, rstr);
+ if (!str) {
+ RootedString nlstr(cx, lstr), nrstr(cx, rstr);
+ str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
+ if (!str) {
+ return false;
+ }
+ }
+
+ res.setString(str);
+ return true;
+}
+
+bool IsPossiblyWrappedTypedArray(JSContext* cx, JSObject* obj, bool* result) {
+ JSObject* unwrapped = CheckedUnwrapDynamic(obj, cx);
+ if (!unwrapped) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ *result = unwrapped->is<TypedArrayObject>();
+ return true;
+}
+
+// Called from CreateDependentString::generateFallback.
+void* AllocateDependentString(JSContext* cx) {
+ AutoUnsafeCallWithABI unsafe;
+ return cx->newCell<JSDependentString, NoGC>(js::gc::Heap::Default);
+}
+void* AllocateFatInlineString(JSContext* cx) {
+ AutoUnsafeCallWithABI unsafe;
+ return cx->newCell<JSFatInlineString, NoGC>(js::gc::Heap::Default);
+}
+
+// Called to allocate a BigInt if inline allocation failed.
+void* AllocateBigIntNoGC(JSContext* cx, bool requestMinorGC) {
+ AutoUnsafeCallWithABI unsafe;
+
+ if (requestMinorGC) {
+ cx->nursery().requestMinorGC(JS::GCReason::OUT_OF_NURSERY);
+ }
+
+ return cx->newCell<JS::BigInt, NoGC>(js::gc::Heap::Tenured);
+}
+
+void AllocateAndInitTypedArrayBuffer(JSContext* cx, TypedArrayObject* obj,
+ int32_t count) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // Initialize the data slot to UndefinedValue to signal to our JIT caller that
+ // the allocation failed if the slot isn't overwritten below.
+ obj->initFixedSlot(TypedArrayObject::DATA_SLOT, UndefinedValue());
+
+ // Negative numbers or zero will bail out to the slow path, which in turn will
+ // raise an invalid argument exception or create a correct object with zero
+ // elements.
+ constexpr size_t maxByteLength = TypedArrayObject::MaxByteLength;
+ if (count <= 0 || size_t(count) > maxByteLength / obj->bytesPerElement()) {
+ obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, PrivateValue(size_t(0)));
+ return;
+ }
+
+ obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, PrivateValue(count));
+
+ size_t nbytes = size_t(count) * obj->bytesPerElement();
+ MOZ_ASSERT(nbytes <= maxByteLength);
+ nbytes = RoundUp(nbytes, sizeof(Value));
+
+ void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
+ js::ArrayBufferContentsArena);
+ if (buf) {
+ InitReservedSlot(obj, TypedArrayObject::DATA_SLOT, buf, nbytes,
+ MemoryUse::TypedArrayElements);
+ }
+}
+
+void* CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots) {
+ MOZ_ASSERT(nDynamicSlots);
+
+ AutoUnsafeCallWithABI unsafe;
+ ArrayObject* array = cx->newCell<ArrayObject, NoGC>(kind, gc::Heap::Default,
+ &ArrayObject::class_);
+ if (!array || !array->allocateInitialSlots(cx, nDynamicSlots)) {
+ return nullptr;
+ }
+
+ return array;
+}
+
+#ifdef JS_GC_PROBES
+void TraceCreateObject(JSObject* obj) {
+ AutoUnsafeCallWithABI unsafe;
+ js::gc::gcprobes::CreateObject(obj);
+}
+#endif
+
+#if JS_BITS_PER_WORD == 32
+BigInt* CreateBigIntFromInt64(JSContext* cx, uint32_t low, uint32_t high) {
+ uint64_t n = (static_cast<uint64_t>(high) << 32) + low;
+ return js::BigInt::createFromInt64(cx, n);
+}
+
+BigInt* CreateBigIntFromUint64(JSContext* cx, uint32_t low, uint32_t high) {
+ uint64_t n = (static_cast<uint64_t>(high) << 32) + low;
+ return js::BigInt::createFromUint64(cx, n);
+}
+#else
+BigInt* CreateBigIntFromInt64(JSContext* cx, uint64_t i64) {
+ return js::BigInt::createFromInt64(cx, i64);
+}
+
+BigInt* CreateBigIntFromUint64(JSContext* cx, uint64_t i64) {
+ return js::BigInt::createFromUint64(cx, i64);
+}
+#endif
+
+bool DoStringToInt64(JSContext* cx, HandleString str, uint64_t* res) {
+ BigInt* bi;
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, bi, js::StringToBigInt(cx, str));
+
+ if (!bi) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_INVALID_SYNTAX);
+ return false;
+ }
+
+ *res = js::BigInt::toUint64(bi);
+ return true;
+}
+
+template <EqualityKind Kind>
+bool BigIntEqual(BigInt* x, BigInt* y) {
+ AutoUnsafeCallWithABI unsafe;
+ bool res = BigInt::equal(x, y);
+ if (Kind != EqualityKind::Equal) {
+ res = !res;
+ }
+ return res;
+}
+
+template bool BigIntEqual<EqualityKind::Equal>(BigInt* x, BigInt* y);
+template bool BigIntEqual<EqualityKind::NotEqual>(BigInt* x, BigInt* y);
+
+template <ComparisonKind Kind>
+bool BigIntCompare(BigInt* x, BigInt* y) {
+ AutoUnsafeCallWithABI unsafe;
+ bool res = BigInt::lessThan(x, y);
+ if (Kind != ComparisonKind::LessThan) {
+ res = !res;
+ }
+ return res;
+}
+
+template bool BigIntCompare<ComparisonKind::LessThan>(BigInt* x, BigInt* y);
+template bool BigIntCompare<ComparisonKind::GreaterThanOrEqual>(BigInt* x,
+ BigInt* y);
+
+template <EqualityKind Kind>
+bool BigIntNumberEqual(BigInt* x, double y) {
+ AutoUnsafeCallWithABI unsafe;
+ bool res = BigInt::equal(x, y);
+ if (Kind != EqualityKind::Equal) {
+ res = !res;
+ }
+ return res;
+}
+
+template bool BigIntNumberEqual<EqualityKind::Equal>(BigInt* x, double y);
+template bool BigIntNumberEqual<EqualityKind::NotEqual>(BigInt* x, double y);
+
+template <ComparisonKind Kind>
+bool BigIntNumberCompare(BigInt* x, double y) {
+ AutoUnsafeCallWithABI unsafe;
+ mozilla::Maybe<bool> res = BigInt::lessThan(x, y);
+ if (Kind == ComparisonKind::LessThan) {
+ return res.valueOr(false);
+ }
+ return !res.valueOr(true);
+}
+
+template bool BigIntNumberCompare<ComparisonKind::LessThan>(BigInt* x,
+ double y);
+template bool BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>(BigInt* x,
+ double y);
+
+template <ComparisonKind Kind>
+bool NumberBigIntCompare(double x, BigInt* y) {
+ AutoUnsafeCallWithABI unsafe;
+ mozilla::Maybe<bool> res = BigInt::lessThan(x, y);
+ if (Kind == ComparisonKind::LessThan) {
+ return res.valueOr(false);
+ }
+ return !res.valueOr(true);
+}
+
+template bool NumberBigIntCompare<ComparisonKind::LessThan>(double x,
+ BigInt* y);
+template bool NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>(
+ double x, BigInt* y);
+
+template <EqualityKind Kind>
+bool BigIntStringEqual(JSContext* cx, HandleBigInt x, HandleString y,
+ bool* res) {
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *res, BigInt::equal(cx, x, y));
+ if (Kind != EqualityKind::Equal) {
+ *res = !*res;
+ }
+ return true;
+}
+
+template bool BigIntStringEqual<EqualityKind::Equal>(JSContext* cx,
+ HandleBigInt x,
+ HandleString y, bool* res);
+template bool BigIntStringEqual<EqualityKind::NotEqual>(JSContext* cx,
+ HandleBigInt x,
+ HandleString y,
+ bool* res);
+
+template <ComparisonKind Kind>
+bool BigIntStringCompare(JSContext* cx, HandleBigInt x, HandleString y,
+ bool* res) {
+ mozilla::Maybe<bool> result;
+ if (!BigInt::lessThan(cx, x, y, result)) {
+ return false;
+ }
+ if (Kind == ComparisonKind::LessThan) {
+ *res = result.valueOr(false);
+ } else {
+ *res = !result.valueOr(true);
+ }
+ return true;
+}
+
+template bool BigIntStringCompare<ComparisonKind::LessThan>(JSContext* cx,
+ HandleBigInt x,
+ HandleString y,
+ bool* res);
+template bool BigIntStringCompare<ComparisonKind::GreaterThanOrEqual>(
+ JSContext* cx, HandleBigInt x, HandleString y, bool* res);
+
+template <ComparisonKind Kind>
+bool StringBigIntCompare(JSContext* cx, HandleString x, HandleBigInt y,
+ bool* res) {
+ mozilla::Maybe<bool> result;
+ if (!BigInt::lessThan(cx, x, y, result)) {
+ return false;
+ }
+ if (Kind == ComparisonKind::LessThan) {
+ *res = result.valueOr(false);
+ } else {
+ *res = !result.valueOr(true);
+ }
+ return true;
+}
+
+template bool StringBigIntCompare<ComparisonKind::LessThan>(JSContext* cx,
+ HandleString x,
+ HandleBigInt y,
+ bool* res);
+template bool StringBigIntCompare<ComparisonKind::GreaterThanOrEqual>(
+ JSContext* cx, HandleString x, HandleBigInt y, bool* res);
+
+BigInt* BigIntAsIntN(JSContext* cx, HandleBigInt x, int32_t bits) {
+ MOZ_ASSERT(bits >= 0);
+ return BigInt::asIntN(cx, x, uint64_t(bits));
+}
+
+BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits) {
+ MOZ_ASSERT(bits >= 0);
+ return BigInt::asUintN(cx, x, uint64_t(bits));
+}
+
+template <typename T>
+static int32_t AtomicsCompareExchange(TypedArrayObject* typedArray,
+ size_t index, int32_t expected,
+ int32_t replacement) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::compareExchangeSeqCst(addr + index, T(expected),
+ T(replacement));
+}
+
+AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsCompareExchange<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsCompareExchange<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsCompareExchange<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsCompareExchange<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsCompareExchange<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsCompareExchange<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsExchange(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::exchangeSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsExchange<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsExchange<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsExchange<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsExchange<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsExchange<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsExchange<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsAdd(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::fetchAddSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsAdd(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsAdd<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsAdd<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsAdd<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsAdd<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsAdd<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsAdd<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsSub(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::fetchSubSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsSub(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsSub<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsSub<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsSub<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsSub<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsSub<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsSub<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsAnd(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::fetchAndSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsAnd<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsAnd<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsAnd<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsAnd<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsAnd<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsAnd<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsOr(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::fetchOrSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsOr<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsOr<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsOr<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsOr<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsOr<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsOr<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename T>
+static int32_t AtomicsXor(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
+ return jit::AtomicOperations::fetchXorSeqCst(addr + index, T(value));
+}
+
+AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType) {
+ switch (elementType) {
+ case Scalar::Int8:
+ return AtomicsXor<int8_t>;
+ case Scalar::Uint8:
+ return AtomicsXor<uint8_t>;
+ case Scalar::Int16:
+ return AtomicsXor<int16_t>;
+ case Scalar::Uint16:
+ return AtomicsXor<uint16_t>;
+ case Scalar::Int32:
+ return AtomicsXor<int32_t>;
+ case Scalar::Uint32:
+ return AtomicsXor<uint32_t>;
+ default:
+ MOZ_CRASH("Unexpected TypedArray type");
+ }
+}
+
+template <typename AtomicOp, typename... Args>
+static BigInt* AtomicAccess64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, AtomicOp op, Args... args) {
+ MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ if (typedArray->type() == Scalar::BigInt64) {
+ SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
+ int64_t v = op(addr + index, BigInt::toInt64(args)...);
+ return BigInt::createFromInt64(cx, v);
+ }
+
+ SharedMem<uint64_t*> addr = typedArray->dataPointerEither().cast<uint64_t*>();
+ uint64_t v = op(addr + index, BigInt::toUint64(args)...);
+ return BigInt::createFromUint64(cx, v);
+}
+
+template <typename AtomicOp, typename... Args>
+static auto AtomicAccess64(TypedArrayObject* typedArray, size_t index,
+ AtomicOp op, Args... args) {
+ MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
+ MOZ_ASSERT(!typedArray->hasDetachedBuffer());
+ MOZ_ASSERT(index < typedArray->length());
+
+ if (typedArray->type() == Scalar::BigInt64) {
+ SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
+ return op(addr + index, BigInt::toInt64(args)...);
+ }
+
+ SharedMem<uint64_t*> addr = typedArray->dataPointerEither().cast<uint64_t*>();
+ return op(addr + index, BigInt::toUint64(args)...);
+}
+
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index) {
+ return AtomicAccess64(cx, typedArray, index, [](auto addr) {
+ return jit::AtomicOperations::loadSeqCst(addr);
+ });
+}
+
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ AutoUnsafeCallWithABI unsafe;
+
+ AtomicAccess64(
+ typedArray, index,
+ [](auto addr, auto val) {
+ jit::AtomicOperations::storeSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* expected,
+ const BigInt* replacement) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto oldval, auto newval) {
+ return jit::AtomicOperations::compareExchangeSeqCst(addr, oldval,
+ newval);
+ },
+ expected, replacement);
+}
+
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::exchangeSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::fetchAddSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::fetchAndSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::fetchOrSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::fetchSubSeqCst(addr, val);
+ },
+ value);
+}
+
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
+ return AtomicAccess64(
+ cx, typedArray, index,
+ [](auto addr, auto val) {
+ return jit::AtomicOperations::fetchXorSeqCst(addr, val);
+ },
+ value);
+}
+
+JSAtom* AtomizeStringNoGC(JSContext* cx, JSString* str) {
+ // IC code calls this directly so we shouldn't GC.
+ AutoUnsafeCallWithABI unsafe;
+
+ JSAtom* atom = AtomizeString(cx, str);
+ if (!atom) {
+ cx->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ return atom;
+}
+
+bool SetObjectHas(JSContext* cx, HandleObject obj, HandleValue key,
+ bool* rval) {
+ return SetObject::has(cx, obj, key, rval);
+}
+
+bool MapObjectHas(JSContext* cx, HandleObject obj, HandleValue key,
+ bool* rval) {
+ return MapObject::has(cx, obj, key, rval);
+}
+
+bool MapObjectGet(JSContext* cx, HandleObject obj, HandleValue key,
+ MutableHandleValue rval) {
+ return MapObject::get(cx, obj, key, rval);
+}
+
+#ifdef DEBUG
+template <class OrderedHashTable>
+static mozilla::HashNumber HashValue(JSContext* cx, OrderedHashTable* hashTable,
+ const Value* value) {
+ RootedValue rootedValue(cx, *value);
+ HashableValue hashable;
+ MOZ_ALWAYS_TRUE(hashable.setValue(cx, rootedValue));
+
+ return hashTable->hash(hashable);
+}
+#endif
+
+void AssertSetObjectHash(JSContext* cx, SetObject* obj, const Value* value,
+ mozilla::HashNumber actualHash) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(actualHash == HashValue(cx, obj->getData(), value));
+}
+
+void AssertMapObjectHash(JSContext* cx, MapObject* obj, const Value* value,
+ mozilla::HashNumber actualHash) {
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(actualHash == HashValue(cx, obj->getData(), value));
+}
+
+void AssertPropertyLookup(NativeObject* obj, PropertyKey id, uint32_t slot) {
+ AutoUnsafeCallWithABI unsafe;
+#ifdef DEBUG
+ mozilla::Maybe<PropertyInfo> prop = obj->lookupPure(id);
+ MOZ_ASSERT(prop.isSome());
+ MOZ_ASSERT(prop->slot() == slot);
+#else
+ MOZ_CRASH("This should only be called in debug builds.");
+#endif
+}
+
+void AssumeUnreachable(const char* output) {
+ MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
+}
+
+void Printf0(const char* output) {
+ AutoUnsafeCallWithABI unsafe;
+
+ // Use stderr instead of stdout because this is only used for debug
+ // output. stderr is less likely to interfere with the program's normal
+ // output, and it's always unbuffered.
+ fprintf(stderr, "%s", output);
+}
+
+void Printf1(const char* output, uintptr_t value) {
+ AutoUnsafeCallWithABI unsafe;
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ js::UniqueChars line = JS_sprintf_append(nullptr, output, value);
+ if (!line) {
+ oomUnsafe.crash("OOM at masm.printf");
+ }
+ fprintf(stderr, "%s", line.get());
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
new file mode 100644
index 0000000000..36f05ae157
--- /dev/null
+++ b/js/src/jit/VMFunctions.h
@@ -0,0 +1,713 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_VMFunctions_h
+#define jit_VMFunctions_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/HashFunctions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/AllocKind.h"
+#include "js/ScalarType.h"
+#include "js/TypeDecls.h"
+
+class JSJitInfo;
+class JSLinearString;
+
+namespace js {
+
+class AbstractGeneratorObject;
+class ArrayObject;
+class GlobalObject;
+class InterpreterFrame;
+class LexicalScope;
+class ClassBodyScope;
+class MapObject;
+class NativeObject;
+class PlainObject;
+class PropertyName;
+class SetObject;
+class Shape;
+class TypedArrayObject;
+class WithScope;
+class MegamorphicCacheEntry;
+
+namespace gc {
+
+struct Cell;
+
+}
+
+namespace jit {
+
+class BaselineFrame;
+class InterpreterStubExitFrameLayout;
+
+enum DataType : uint8_t {
+ Type_Void,
+ Type_Bool,
+ Type_Int32,
+ Type_Double,
+ Type_Pointer,
+ Type_Cell,
+ Type_Value,
+ Type_Handle
+};
+
+enum MaybeTailCall : bool { TailCall, NonTailCall };
+
+// [SMDOC] JIT-to-C++ Function Calls. (callVM)
+//
+// Sometimes it is easier to reuse C++ code by calling VM's functions. Calling a
+// function from the VM can be achieved with the use of callWithABI but this is
+// discouraged when the called functions might trigger exceptions and/or
+// garbage collections which are expecting to walk the stack. VMFunctions and
+// callVM are interfaces provided to handle the exception handling and register
+// the stack end (JITActivation) such that walking the stack is made possible.
+//
+// VMFunctionData is a structure which contains the necessary information needed
+// for generating a trampoline function to make a call (with generateVMWrapper)
+// and to root the arguments of the function (in TraceJitExitFrame).
+// VMFunctionData is created with the VMFunctionDataHelper template, which
+// infers the VMFunctionData fields from the function signature. The rooting and
+// trampoline code is therefore determined by the arguments of a function and
+// their locations in the signature of a function.
+//
+// VM functions all expect a JSContext* as first argument. This argument is
+// implicitly provided by the trampoline code (in generateVMWrapper) and used
+// for creating new objects or reporting errors. If your function does not make
+// use of a JSContext* argument, then you might probably use a callWithABI
+// call.
+//
+// Functions described using the VMFunction system must conform to a simple
+// protocol: the return type must have a special "failure" value (for example,
+// false for bool, or nullptr for Objects). If the function is designed to
+// return a value that does not meet this requirement - such as
+// object-or-nullptr, or an integer, an optional, final outParam can be
+// specified. In this case, the return type must be boolean to indicate
+// failure.
+//
+// JIT Code usage:
+//
+// Different JIT compilers in SpiderMonkey have their own implementations of
+// callVM to call VM functions. However, the general shape of them is that
+// arguments (excluding the JSContext or trailing out-param) are pushed on to
+// the stack from right to left (rightmost argument is pushed first).
+//
+// Regardless of return value protocol being used (final outParam, or return
+// value) the generated trampolines ensure the return value ends up in
+// JSReturnOperand, ReturnReg or ReturnDoubleReg.
+//
+// Example:
+//
+// The details will differ slightly between the different compilers in
+// SpiderMonkey, but the general shape of our usage looks like this:
+//
+// Suppose we have a function Foo:
+//
+// bool Foo(JSContext* cx, HandleObject x, HandleId y,
+// MutableHandleValue z);
+//
+// This function returns true on success, and z is the outparam return value.
+//
+// A VM function wrapper for this can be created by adding an entry to
+// VM_FUNCTION_LIST in VMFunctionList-inl.h:
+//
+// _(Foo, js::Foo)
+//
+// In the compiler code the call would then be issued like this:
+//
+// masm.Push(id);
+// masm.Push(obj);
+//
+// using Fn = bool (*)(JSContext*, HandleObject, HandleId,
+// MutableHandleValue);
+// if (!callVM<Fn, js::Foo>()) {
+// return false;
+// }
+//
+// After this, the result value is in the return value register.
+
+// Data for a VM function. All VMFunctionDatas are stored in a constexpr array.
+struct VMFunctionData {
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // Informative name of the wrapped function. The name should not be present
+ // in release builds in order to save memory.
+ const char* name_;
+#endif
+
+ // Note: a maximum of seven root types is supported.
+ enum RootType : uint8_t {
+ RootNone = 0,
+ RootObject,
+ RootString,
+ RootId,
+ RootValue,
+ RootCell,
+ RootBigInt
+ };
+
+ // Contains an combination of enumerated types used by the gc for marking
+ // arguments of the VM wrapper.
+ uint64_t argumentRootTypes;
+
+ enum ArgProperties {
+ WordByValue = 0,
+ DoubleByValue = 1,
+ WordByRef = 2,
+ DoubleByRef = 3,
+ // BitMask version.
+ Word = 0,
+ Double = 1,
+ ByRef = 2
+ };
+
+ // Contains properties about the first 16 arguments.
+ uint32_t argumentProperties;
+
+ // Which arguments should be passed in float register on platforms that
+ // have them.
+ uint32_t argumentPassedInFloatRegs;
+
+ // Number of arguments expected, excluding JSContext * as an implicit
+ // first argument and an outparam as a possible implicit final argument.
+ uint8_t explicitArgs;
+
+ // The root type of the out param if outParam == Type_Handle.
+ RootType outParamRootType;
+
+ // The outparam may be any Type_*, and must be the final argument to the
+ // function, if not Void. outParam != Void implies that the return type
+ // has a boolean failure mode.
+ DataType outParam;
+
+ // Type returned by the C function and used by the VMFunction wrapper to
+ // check for failures of the C function. Valid failure/return types are
+ // boolean and object pointers which are asserted inside the VMFunction
+ // constructor. If the C function use an outparam (!= Type_Void), then
+ // the only valid failure/return type is boolean -- object pointers are
+ // pointless because the wrapper will only use it to compare it against
+ // nullptr before discarding its value.
+ DataType returnType;
+
+ // Number of Values the VM wrapper should pop from the stack when it returns.
+ // Used by baseline IC stubs so that they can use tail calls to call the VM
+ // wrapper.
+ uint8_t extraValuesToPop;
+
+ // On some architectures, called functions need to explicitly push their
+ // return address, for a tail call, there is nothing to push, so tail-callness
+ // needs to be known at compile time.
+ MaybeTailCall expectTailCall;
+
+ uint32_t argc() const {
+ // JSContext * + args + (OutParam? *)
+ return 1 + explicitArgc() + ((outParam == Type_Void) ? 0 : 1);
+ }
+
+ DataType failType() const { return returnType; }
+
+ // Whether this function returns anything more than a boolean flag for
+ // failures.
+ bool returnsData() const {
+ return returnType == Type_Cell || outParam != Type_Void;
+ }
+
+ ArgProperties argProperties(uint32_t explicitArg) const {
+ return ArgProperties((argumentProperties >> (2 * explicitArg)) & 3);
+ }
+
+ RootType argRootType(uint32_t explicitArg) const {
+ return RootType((argumentRootTypes >> (3 * explicitArg)) & 7);
+ }
+
+ bool argPassedInFloatReg(uint32_t explicitArg) const {
+ return ((argumentPassedInFloatRegs >> explicitArg) & 1) == 1;
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ const char* name() const { return name_; }
+#endif
+
+ // Return the stack size consumed by explicit arguments.
+ size_t explicitStackSlots() const {
+ size_t stackSlots = explicitArgs;
+
+ // Fetch all double-word flags of explicit arguments.
+ uint32_t n = ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & 0x55555555 // = Mask double-size args.
+ & argumentProperties;
+
+ // Add the number of double-word flags. (expect a few loop
+ // iteration)
+ while (n) {
+ stackSlots++;
+ n &= n - 1;
+ }
+ return stackSlots;
+ }
+
+ // Double-size argument which are passed by value are taking the space
+ // of 2 C arguments. This function is used to compute the number of
+ // argument expected by the C function. This is not the same as
+ // explicitStackSlots because reference to stack slots may take one less
+ // register in the total count.
+ size_t explicitArgc() const {
+ size_t stackSlots = explicitArgs;
+
+ // Fetch all explicit arguments.
+ uint32_t n = ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & argumentProperties;
+
+ // Filter double-size arguments (0x5 = 0b0101) and remove (& ~)
+ // arguments passed by reference (0b1010 >> 1 == 0b0101).
+ n = (n & 0x55555555) & ~(n >> 1);
+
+ // Add the number of double-word transfered by value. (expect a few
+ // loop iteration)
+ while (n) {
+ stackSlots++;
+ n &= n - 1;
+ }
+ return stackSlots;
+ }
+
+ size_t doubleByRefArgs() const {
+ size_t count = 0;
+
+ // Fetch all explicit arguments.
+ uint32_t n = ((1 << (explicitArgs * 2)) - 1) // = Explicit argument mask.
+ & argumentProperties;
+
+ // Filter double-size arguments (0x5 = 0b0101) and take (&) only
+ // arguments passed by reference (0b1010 >> 1 == 0b0101).
+ n = (n & 0x55555555) & (n >> 1);
+
+ // Add the number of double-word transfered by refference. (expect a
+ // few loop iterations)
+ while (n) {
+ count++;
+ n &= n - 1;
+ }
+ return count;
+ }
+
+ constexpr VMFunctionData(const char* name, uint32_t explicitArgs,
+ uint32_t argumentProperties,
+ uint32_t argumentPassedInFloatRegs,
+ uint64_t argRootTypes, DataType outParam,
+ RootType outParamRootType, DataType returnType,
+ uint8_t extraValuesToPop = 0,
+ MaybeTailCall expectTailCall = NonTailCall)
+ :
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ name_(name),
+#endif
+ argumentRootTypes(argRootTypes),
+ argumentProperties(argumentProperties),
+ argumentPassedInFloatRegs(argumentPassedInFloatRegs),
+ explicitArgs(explicitArgs),
+ outParamRootType(outParamRootType),
+ outParam(outParam),
+ returnType(returnType),
+ extraValuesToPop(extraValuesToPop),
+ expectTailCall(expectTailCall) {
+ // Check for valid failure/return type.
+ MOZ_ASSERT_IF(outParam != Type_Void,
+ returnType == Type_Void || returnType == Type_Bool);
+ MOZ_ASSERT(returnType == Type_Void || returnType == Type_Bool ||
+ returnType == Type_Cell);
+ }
+
+ constexpr VMFunctionData(const VMFunctionData& o) = default;
+};
+
+// Extract the last element of a list of types.
+template <typename... ArgTypes>
+struct LastArg;
+
+template <>
+struct LastArg<> {
+ using Type = void;
+};
+
+template <typename HeadType>
+struct LastArg<HeadType> {
+ using Type = HeadType;
+};
+
+template <typename HeadType, typename... TailTypes>
+struct LastArg<HeadType, TailTypes...> {
+ using Type = typename LastArg<TailTypes...>::Type;
+};
+
+[[nodiscard]] bool InvokeFunction(JSContext* cx, HandleObject obj0,
+ bool constructing, bool ignoresReturnValue,
+ uint32_t argc, Value* argv,
+ MutableHandleValue rval);
+
+bool InvokeFromInterpreterStub(JSContext* cx,
+ InterpreterStubExitFrameLayout* frame);
+void* GetContextSensitiveInterpreterStub();
+
+bool CheckOverRecursed(JSContext* cx);
+bool CheckOverRecursedBaseline(JSContext* cx, BaselineFrame* frame);
+
+[[nodiscard]] bool MutatePrototype(JSContext* cx, Handle<PlainObject*> obj,
+ HandleValue value);
+
+enum class EqualityKind : bool { NotEqual, Equal };
+
+template <EqualityKind Kind>
+bool StringsEqual(JSContext* cx, HandleString lhs, HandleString rhs, bool* res);
+
+enum class ComparisonKind : bool { GreaterThanOrEqual, LessThan };
+
+template <ComparisonKind Kind>
+bool StringsCompare(JSContext* cx, HandleString lhs, HandleString rhs,
+ bool* res);
+
+[[nodiscard]] bool ArrayPushDensePure(JSContext* cx, ArrayObject* arr,
+ Value* v);
+JSString* ArrayJoin(JSContext* cx, HandleObject array, HandleString sep);
+[[nodiscard]] bool SetArrayLength(JSContext* cx, HandleObject obj,
+ HandleValue value, bool strict);
+
+[[nodiscard]] bool CharCodeAt(JSContext* cx, HandleString str, int32_t index,
+ uint32_t* code);
+JSLinearString* StringFromCharCode(JSContext* cx, int32_t code);
+JSLinearString* StringFromCharCodeNoGC(JSContext* cx, int32_t code);
+JSString* StringFromCodePoint(JSContext* cx, int32_t codePoint);
+JSLinearString* LinearizeForCharAccessPure(JSString* str);
+JSLinearString* LinearizeForCharAccess(JSContext* cx, JSString* str);
+
+[[nodiscard]] bool SetProperty(JSContext* cx, HandleObject obj,
+ Handle<PropertyName*> name, HandleValue value,
+ bool strict, jsbytecode* pc);
+
+[[nodiscard]] bool InterruptCheck(JSContext* cx);
+
+JSObject* NewStringObject(JSContext* cx, HandleString str);
+
+bool OperatorIn(JSContext* cx, HandleValue key, HandleObject obj, bool* out);
+
+[[nodiscard]] bool GetIntrinsicValue(JSContext* cx, Handle<PropertyName*> name,
+ MutableHandleValue rval);
+
+[[nodiscard]] bool CreateThisFromIC(JSContext* cx, HandleObject callee,
+ HandleObject newTarget,
+ MutableHandleValue rval);
+[[nodiscard]] bool CreateThisFromIon(JSContext* cx, HandleObject callee,
+ HandleObject newTarget,
+ MutableHandleValue rval);
+
+void PostWriteBarrier(JSRuntime* rt, js::gc::Cell* cell);
+void PostGlobalWriteBarrier(JSRuntime* rt, GlobalObject* obj);
+
+enum class IndexInBounds { Yes, Maybe };
+
+template <IndexInBounds InBounds>
+void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj, int32_t index);
+
+// If |str| represents an int32, assign it to |result| and return true.
+// Otherwise return false.
+bool GetInt32FromStringPure(JSContext* cx, JSString* str, int32_t* result);
+
+// If |str| is an index in the range [0, INT32_MAX], return it. If the string
+// is not an index in this range, return -1.
+int32_t GetIndexFromString(JSString* str);
+
+JSObject* WrapObjectPure(JSContext* cx, JSObject* obj);
+
+[[nodiscard]] bool DebugPrologue(JSContext* cx, BaselineFrame* frame);
+[[nodiscard]] bool DebugEpilogue(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc, bool ok);
+[[nodiscard]] bool DebugEpilogueOnBaselineReturn(JSContext* cx,
+ BaselineFrame* frame,
+ const jsbytecode* pc);
+void FrameIsDebuggeeCheck(BaselineFrame* frame);
+
+JSObject* CreateGeneratorFromFrame(JSContext* cx, BaselineFrame* frame);
+JSObject* CreateGenerator(JSContext* cx, HandleFunction, HandleScript,
+ HandleObject, HandleObject);
+
+[[nodiscard]] bool NormalSuspend(JSContext* cx, HandleObject obj,
+ BaselineFrame* frame, uint32_t frameSize,
+ const jsbytecode* pc);
+[[nodiscard]] bool FinalSuspend(JSContext* cx, HandleObject obj,
+ const jsbytecode* pc);
+[[nodiscard]] bool InterpretResume(JSContext* cx, HandleObject obj,
+ Value* stackValues, MutableHandleValue rval);
+[[nodiscard]] bool DebugAfterYield(JSContext* cx, BaselineFrame* frame);
+[[nodiscard]] bool GeneratorThrowOrReturn(
+ JSContext* cx, BaselineFrame* frame,
+ Handle<AbstractGeneratorObject*> genObj, HandleValue arg,
+ int32_t resumeKindArg);
+
+[[nodiscard]] bool GlobalDeclInstantiationFromIon(JSContext* cx,
+ HandleScript script,
+ const jsbytecode* pc);
+[[nodiscard]] bool InitFunctionEnvironmentObjects(JSContext* cx,
+ BaselineFrame* frame);
+
+[[nodiscard]] bool NewArgumentsObject(JSContext* cx, BaselineFrame* frame,
+ MutableHandleValue res);
+
+ArrayObject* NewArrayObjectEnsureDenseInitLength(JSContext* cx, int32_t count);
+
+JSObject* InitRestParameter(JSContext* cx, uint32_t length, Value* rest,
+ HandleObject res);
+
+[[nodiscard]] bool HandleDebugTrap(JSContext* cx, BaselineFrame* frame,
+ const uint8_t* retAddr);
+[[nodiscard]] bool OnDebuggerStatement(JSContext* cx, BaselineFrame* frame);
+[[nodiscard]] bool GlobalHasLiveOnDebuggerStatement(JSContext* cx);
+
+[[nodiscard]] bool EnterWith(JSContext* cx, BaselineFrame* frame,
+ HandleValue val, Handle<WithScope*> templ);
+[[nodiscard]] bool LeaveWith(JSContext* cx, BaselineFrame* frame);
+
+[[nodiscard]] bool PushLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ Handle<LexicalScope*> scope);
+[[nodiscard]] bool PushClassBodyEnv(JSContext* cx, BaselineFrame* frame,
+ Handle<ClassBodyScope*> scope);
+[[nodiscard]] bool DebugLeaveThenPopLexicalEnv(JSContext* cx,
+ BaselineFrame* frame,
+ const jsbytecode* pc);
+[[nodiscard]] bool FreshenLexicalEnv(JSContext* cx, BaselineFrame* frame);
+[[nodiscard]] bool DebugLeaveThenFreshenLexicalEnv(JSContext* cx,
+ BaselineFrame* frame,
+ const jsbytecode* pc);
+[[nodiscard]] bool RecreateLexicalEnv(JSContext* cx, BaselineFrame* frame);
+[[nodiscard]] bool DebugLeaveThenRecreateLexicalEnv(JSContext* cx,
+ BaselineFrame* frame,
+ const jsbytecode* pc);
+[[nodiscard]] bool DebugLeaveLexicalEnv(JSContext* cx, BaselineFrame* frame,
+ const jsbytecode* pc);
+
+[[nodiscard]] bool PushVarEnv(JSContext* cx, BaselineFrame* frame,
+ Handle<Scope*> scope);
+
+[[nodiscard]] bool InitBaselineFrameForOsr(BaselineFrame* frame,
+ InterpreterFrame* interpFrame,
+ uint32_t numStackValues);
+
+JSString* StringReplace(JSContext* cx, HandleString string,
+ HandleString pattern, HandleString repl);
+
+void AssertValidBigIntPtr(JSContext* cx, JS::BigInt* bi);
+void AssertValidObjectPtr(JSContext* cx, JSObject* obj);
+void AssertValidStringPtr(JSContext* cx, JSString* str);
+void AssertValidSymbolPtr(JSContext* cx, JS::Symbol* sym);
+void AssertValidValue(JSContext* cx, Value* v);
+
+void JitValuePreWriteBarrier(JSRuntime* rt, Value* vp);
+void JitStringPreWriteBarrier(JSRuntime* rt, JSString** stringp);
+void JitObjectPreWriteBarrier(JSRuntime* rt, JSObject** objp);
+void JitShapePreWriteBarrier(JSRuntime* rt, Shape** shapep);
+
+bool ObjectIsCallable(JSObject* obj);
+bool ObjectIsConstructor(JSObject* obj);
+
+[[nodiscard]] bool ThrowRuntimeLexicalError(JSContext* cx,
+ unsigned errorNumber);
+
+[[nodiscard]] bool ThrowBadDerivedReturnOrUninitializedThis(JSContext* cx,
+ HandleValue v);
+
+[[nodiscard]] bool BaselineGetFunctionThis(JSContext* cx, BaselineFrame* frame,
+ MutableHandleValue res);
+
+[[nodiscard]] bool CallNativeGetter(JSContext* cx, HandleFunction callee,
+ HandleValue receiver,
+ MutableHandleValue result);
+
+bool CallDOMGetter(JSContext* cx, const JSJitInfo* jitInfo, HandleObject obj,
+ MutableHandleValue result);
+
+bool CallDOMSetter(JSContext* cx, const JSJitInfo* jitInfo, HandleObject obj,
+ HandleValue value);
+
+[[nodiscard]] bool CallNativeSetter(JSContext* cx, HandleFunction callee,
+ HandleObject obj, HandleValue rhs);
+
+[[nodiscard]] bool EqualStringsHelperPure(JSString* str1, JSString* str2);
+
+void HandleCodeCoverageAtPC(BaselineFrame* frame, jsbytecode* pc);
+void HandleCodeCoverageAtPrologue(BaselineFrame* frame);
+
+bool GetNativeDataPropertyPure(JSContext* cx, JSObject* obj, PropertyKey id,
+ MegamorphicCacheEntry* entry, Value* vp);
+
+bool GetNativeDataPropertyPureWithCacheLookup(JSContext* cx, JSObject* obj,
+ PropertyKey id,
+ MegamorphicCacheEntry* entry,
+ Value* vp);
+
+bool GetNativeDataPropertyByValuePure(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* cacheEntry,
+ Value* vp);
+
+template <bool HasOwn>
+bool HasNativeDataPropertyPure(JSContext* cx, JSObject* obj,
+ MegamorphicCacheEntry* cacheEntry, Value* vp);
+
+bool HasNativeElementPure(JSContext* cx, NativeObject* obj, int32_t index,
+ Value* vp);
+
+bool ObjectHasGetterSetterPure(JSContext* cx, JSObject* objArg, jsid id,
+ GetterSetter* getterSetter);
+
+template <bool Cached>
+bool SetElementMegamorphic(JSContext* cx, HandleObject obj, HandleValue index,
+ HandleValue value, bool strict);
+
+template <bool Cached>
+bool SetPropertyMegamorphic(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue value, bool strict);
+
+JSString* TypeOfNameObject(JSObject* obj, JSRuntime* rt);
+
+bool GetPrototypeOf(JSContext* cx, HandleObject target,
+ MutableHandleValue rval);
+
+bool DoConcatStringObject(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res);
+
+bool IsPossiblyWrappedTypedArray(JSContext* cx, JSObject* obj, bool* result);
+
+void* AllocateDependentString(JSContext* cx);
+void* AllocateFatInlineString(JSContext* cx);
+void* AllocateBigIntNoGC(JSContext* cx, bool requestMinorGC);
+void AllocateAndInitTypedArrayBuffer(JSContext* cx, TypedArrayObject* obj,
+ int32_t count);
+
+void* CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots);
+#ifdef JS_GC_PROBES
+void TraceCreateObject(JSObject* obj);
+#endif
+
+bool DoStringToInt64(JSContext* cx, HandleString str, uint64_t* res);
+
+#if JS_BITS_PER_WORD == 32
+BigInt* CreateBigIntFromInt64(JSContext* cx, uint32_t low, uint32_t high);
+BigInt* CreateBigIntFromUint64(JSContext* cx, uint32_t low, uint32_t high);
+#else
+BigInt* CreateBigIntFromInt64(JSContext* cx, uint64_t i64);
+BigInt* CreateBigIntFromUint64(JSContext* cx, uint64_t i64);
+#endif
+
+template <EqualityKind Kind>
+bool BigIntEqual(BigInt* x, BigInt* y);
+
+template <ComparisonKind Kind>
+bool BigIntCompare(BigInt* x, BigInt* y);
+
+template <EqualityKind Kind>
+bool BigIntNumberEqual(BigInt* x, double y);
+
+template <ComparisonKind Kind>
+bool BigIntNumberCompare(BigInt* x, double y);
+
+template <ComparisonKind Kind>
+bool NumberBigIntCompare(double x, BigInt* y);
+
+template <EqualityKind Kind>
+bool BigIntStringEqual(JSContext* cx, HandleBigInt x, HandleString y,
+ bool* res);
+
+template <ComparisonKind Kind>
+bool BigIntStringCompare(JSContext* cx, HandleBigInt x, HandleString y,
+ bool* res);
+
+template <ComparisonKind Kind>
+bool StringBigIntCompare(JSContext* cx, HandleString x, HandleBigInt y,
+ bool* res);
+
+BigInt* BigIntAsIntN(JSContext* cx, HandleBigInt x, int32_t bits);
+BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits);
+
+using AtomicsCompareExchangeFn = int32_t (*)(TypedArrayObject*, size_t, int32_t,
+ int32_t);
+
+using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
+
+AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsAdd(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsSub(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType);
+AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType);
+
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index);
+
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* expected,
+ const BigInt* replacement);
+
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value);
+
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+
+JSAtom* AtomizeStringNoGC(JSContext* cx, JSString* str);
+
+bool SetObjectHas(JSContext* cx, HandleObject obj, HandleValue key, bool* rval);
+bool MapObjectHas(JSContext* cx, HandleObject obj, HandleValue key, bool* rval);
+bool MapObjectGet(JSContext* cx, HandleObject obj, HandleValue key,
+ MutableHandleValue rval);
+
+void AssertSetObjectHash(JSContext* cx, SetObject* obj, const Value* value,
+ mozilla::HashNumber actualHash);
+void AssertMapObjectHash(JSContext* cx, MapObject* obj, const Value* value,
+ mozilla::HashNumber actualHash);
+
+void AssertPropertyLookup(NativeObject* obj, PropertyKey id, uint32_t slot);
+
+// Functions used when JS_MASM_VERBOSE is enabled.
+void AssumeUnreachable(const char* output);
+void Printf0(const char* output);
+void Printf1(const char* output, uintptr_t value);
+
+enum class TailCallVMFunctionId;
+enum class VMFunctionId;
+
+extern const VMFunctionData& GetVMFunction(VMFunctionId id);
+extern const VMFunctionData& GetVMFunction(TailCallVMFunctionId id);
+
+} // namespace jit
+} // namespace js
+
+#if defined(JS_CODEGEN_ARM)
+extern "C" {
+extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
+extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
+}
+#endif
+
+#endif /* jit_VMFunctions_h */
diff --git a/js/src/jit/ValueNumbering.cpp b/js/src/jit/ValueNumbering.cpp
new file mode 100644
index 0000000000..263fbce1a9
--- /dev/null
+++ b/js/src/jit/ValueNumbering.cpp
@@ -0,0 +1,1338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/ValueNumbering.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+/*
+ * [SMDOC] IonMonkey Value Numbering
+ *
+ * Some notes on the main algorithm here:
+ * - The SSA identifier id() is the value number. We do replaceAllUsesWith as
+ * we go, so there's always at most one visible value with a given number.
+ *
+ * - Consequently, the GVN algorithm is effectively pessimistic. This means it
+ * is not as powerful as an optimistic GVN would be, but it is simpler and
+ * faster.
+ *
+ * - We iterate in RPO, so that when visiting a block, we've already optimized
+ * and hashed all values in dominating blocks. With occasional exceptions,
+ * this allows us to do everything in a single pass.
+ *
+ * - When we do use multiple passes, we just re-run the algorithm on the whole
+ * graph instead of doing sparse propagation. This is a tradeoff to keep the
+ * algorithm simpler and lighter on inputs that don't have a lot of
+ * interesting unreachable blocks or degenerate loop induction variables, at
+ * the expense of being slower on inputs that do. The loop for this always
+ * terminates, because it only iterates when code is or will be removed, so
+ * eventually it must stop iterating.
+ *
+ * - Values are not immediately removed from the hash set when they go out of
+ * scope. Instead, we check for dominance after a lookup. If the dominance
+ * check fails, the value is removed.
+ */
+
+HashNumber ValueNumberer::VisibleValues::ValueHasher::hash(Lookup ins) {
+ return ins->valueHash();
+}
+
+// Test whether two MDefinitions are congruent.
+bool ValueNumberer::VisibleValues::ValueHasher::match(Key k, Lookup l) {
+ // If one of the instructions depends on a store, and the other instruction
+ // does not depend on the same store, the instructions are not congruent.
+ if (k->dependency() != l->dependency()) {
+ return false;
+ }
+
+ bool congruent =
+ k->congruentTo(l); // Ask the values themselves what they think.
+#ifdef JS_JITSPEW
+ if (congruent != l->congruentTo(k)) {
+ JitSpew(
+ JitSpew_GVN,
+ " congruentTo relation is not symmetric between %s%u and %s%u!!",
+ k->opName(), k->id(), l->opName(), l->id());
+ }
+#endif
+ return congruent;
+}
+
+void ValueNumberer::VisibleValues::ValueHasher::rekey(Key& k, Key newKey) {
+ k = newKey;
+}
+
+ValueNumberer::VisibleValues::VisibleValues(TempAllocator& alloc)
+ : set_(alloc) {}
+
+// Look up the first entry for |def|.
+ValueNumberer::VisibleValues::Ptr ValueNumberer::VisibleValues::findLeader(
+ const MDefinition* def) const {
+ return set_.lookup(def);
+}
+
+// Look up the first entry for |def|.
+ValueNumberer::VisibleValues::AddPtr
+ValueNumberer::VisibleValues::findLeaderForAdd(MDefinition* def) {
+ return set_.lookupForAdd(def);
+}
+
+// Insert a value into the set.
+bool ValueNumberer::VisibleValues::add(AddPtr p, MDefinition* def) {
+ return set_.add(p, def);
+}
+
+// Insert a value onto the set overwriting any existing entry.
+void ValueNumberer::VisibleValues::overwrite(AddPtr p, MDefinition* def) {
+ set_.replaceKey(p, def);
+}
+
+// |def| will be discarded, so remove it from any sets.
+void ValueNumberer::VisibleValues::forget(const MDefinition* def) {
+ Ptr p = set_.lookup(def);
+ if (p && *p == def) {
+ set_.remove(p);
+ }
+}
+
+// Clear all state.
+void ValueNumberer::VisibleValues::clear() { set_.clear(); }
+
+#ifdef DEBUG
+// Test whether |def| is in the set.
+bool ValueNumberer::VisibleValues::has(const MDefinition* def) const {
+ Ptr p = set_.lookup(def);
+ return p && *p == def;
+}
+#endif
+
+// Call MDefinition::justReplaceAllUsesWith, and add some GVN-specific asserts.
+static void ReplaceAllUsesWith(MDefinition* from, MDefinition* to) {
+ MOZ_ASSERT(from != to, "GVN shouldn't try to replace a value with itself");
+ MOZ_ASSERT(from->type() == to->type(), "Def replacement has different type");
+ MOZ_ASSERT(!to->isDiscarded(),
+ "GVN replaces an instruction by a removed instruction");
+
+ // We don't need the extra setting of ImplicitlyUsed flags that the regular
+ // replaceAllUsesWith does because we do it ourselves.
+ from->justReplaceAllUsesWith(to);
+}
+
+// Test whether |succ| is a successor of |block|.
+static bool HasSuccessor(const MControlInstruction* block,
+ const MBasicBlock* succ) {
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+ if (block->getSuccessor(i) == succ) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Given a block which has had predecessors removed but is still reachable, test
+// whether the block's new dominator will be closer than its old one and whether
+// it will expose potential optimization opportunities.
+static MBasicBlock* ComputeNewDominator(MBasicBlock* block, MBasicBlock* old) {
+ MBasicBlock* now = block->getPredecessor(0);
+ for (size_t i = 1, e = block->numPredecessors(); i < e; ++i) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ // Note that dominators haven't been recomputed yet, so we have to check
+ // whether now dominates pred, not block.
+ while (!now->dominates(pred)) {
+ MBasicBlock* next = now->immediateDominator();
+ if (next == old) {
+ return old;
+ }
+ if (next == now) {
+ MOZ_ASSERT(block == old,
+ "Non-self-dominating block became self-dominating");
+ return block;
+ }
+ now = next;
+ }
+ }
+ MOZ_ASSERT(old != block || old != now,
+ "Missed self-dominating block staying self-dominating");
+ return now;
+}
+
+// Test for any defs which look potentially interesting to GVN.
+static bool BlockHasInterestingDefs(MBasicBlock* block) {
+ return !block->phisEmpty() || *block->begin() != block->lastIns();
+}
+
+// Walk up the dominator tree from |block| to the root and test for any defs
+// which look potentially interesting to GVN.
+static bool ScanDominatorsForDefs(MBasicBlock* block) {
+ for (MBasicBlock* i = block;;) {
+ if (BlockHasInterestingDefs(block)) {
+ return true;
+ }
+
+ MBasicBlock* immediateDominator = i->immediateDominator();
+ if (immediateDominator == i) {
+ break;
+ }
+ i = immediateDominator;
+ }
+ return false;
+}
+
+// Walk up the dominator tree from |now| to |old| and test for any defs which
+// look potentially interesting to GVN.
+static bool ScanDominatorsForDefs(MBasicBlock* now, MBasicBlock* old) {
+ MOZ_ASSERT(old->dominates(now),
+ "Refined dominator not dominated by old dominator");
+
+ for (MBasicBlock* i = now; i != old; i = i->immediateDominator()) {
+ if (BlockHasInterestingDefs(i)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Given a block which has had predecessors removed but is still reachable, test
+// whether the block's new dominator will be closer than its old one and whether
+// it will expose potential optimization opportunities.
+static bool IsDominatorRefined(MBasicBlock* block) {
+ MBasicBlock* old = block->immediateDominator();
+ MBasicBlock* now = ComputeNewDominator(block, old);
+
+ // If this block is just a goto and it doesn't dominate its destination,
+ // removing its predecessors won't refine the dominators of anything
+ // interesting.
+ MControlInstruction* control = block->lastIns();
+ if (*block->begin() == control && block->phisEmpty() && control->isGoto() &&
+ !block->dominates(control->toGoto()->target())) {
+ return false;
+ }
+
+ // We've computed block's new dominator. Test whether there are any
+ // newly-dominating definitions which look interesting.
+ if (block == old) {
+ return block != now && ScanDominatorsForDefs(now);
+ }
+ MOZ_ASSERT(block != now, "Non-self-dominating block became self-dominating");
+ return ScanDominatorsForDefs(now, old);
+}
+
+// |def| has just had one of its users release it. If it's now dead, enqueue it
+// for discarding, otherwise just make note of it.
+bool ValueNumberer::handleUseReleased(MDefinition* def,
+ ImplicitUseOption implicitUseOption) {
+ if (IsDiscardable(def)) {
+ values_.forget(def);
+ if (!deadDefs_.append(def)) {
+ return false;
+ }
+ } else {
+ if (implicitUseOption == SetImplicitUse) {
+ def->setImplicitlyUsedUnchecked();
+ }
+ }
+ return true;
+}
+
+// Discard |def| and anything in its use-def subtree which is no longer needed.
+bool ValueNumberer::discardDefsRecursively(MDefinition* def,
+ AllowEffectful allowEffectful) {
+ MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
+
+ return discardDef(def, allowEffectful) && processDeadDefs();
+}
+
+// Assuming |resume| is unreachable, release its operands.
+// It might be nice to integrate this code with prepareForDiscard, however GVN
+// needs it to call handleUseReleased so that it can observe when a definition
+// becomes unused, so it isn't trivial to do.
+bool ValueNumberer::releaseResumePointOperands(MResumePoint* resume) {
+ for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+ if (!resume->hasOperand(i)) {
+ continue;
+ }
+ MDefinition* op = resume->getOperand(i);
+ resume->releaseOperand(i);
+
+ // We set the ImplicitlyUsed flag when removing resume point operands,
+ // because even though we may think we're certain that a particular
+ // branch might not be taken, the type information might be incomplete.
+ if (!handleUseReleased(op, SetImplicitUse)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Assuming |phi| is dead, release and remove its operands. If an operand
+// becomes dead, push it to the discard worklist.
+bool ValueNumberer::releaseAndRemovePhiOperands(MPhi* phi) {
+ // MPhi saves operands in a vector so we iterate in reverse.
+ for (int o = phi->numOperands() - 1; o >= 0; --o) {
+ MDefinition* op = phi->getOperand(o);
+ phi->removeOperand(o);
+ if (!handleUseReleased(op, DontSetImplicitUse)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Assuming |def| is dead, release its operands. If an operand becomes dead,
+// push it to the discard worklist.
+bool ValueNumberer::releaseOperands(MDefinition* def) {
+ for (size_t o = 0, e = def->numOperands(); o < e; ++o) {
+ MDefinition* op = def->getOperand(o);
+ def->releaseOperand(o);
+ if (!handleUseReleased(op, DontSetImplicitUse)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Discard |def| and mine its operands for any subsequently dead defs.
+bool ValueNumberer::discardDef(MDefinition* def,
+ AllowEffectful allowEffectful) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Discarding %s %s%u",
+ def->block()->isMarked() ? "unreachable" : "dead", def->opName(),
+ def->id());
+#endif
+#ifdef DEBUG
+ MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
+ if (def->block()->isMarked()) {
+ MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
+ } else {
+ MOZ_ASSERT(allowEffectful == AllowEffectful::Yes
+ ? IsDiscardableAllowEffectful(def)
+ : IsDiscardable(def),
+ "Discarding non-discardable definition");
+ MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
+ }
+#endif
+
+ MBasicBlock* block = def->block();
+ if (def->isPhi()) {
+ MPhi* phi = def->toPhi();
+ if (!releaseAndRemovePhiOperands(phi)) {
+ return false;
+ }
+ block->discardPhi(phi);
+ } else {
+ MInstruction* ins = def->toInstruction();
+ if (MResumePoint* resume = ins->resumePoint()) {
+ if (!releaseResumePointOperands(resume)) {
+ return false;
+ }
+ }
+ if (!releaseOperands(ins)) {
+ return false;
+ }
+ block->discardIgnoreOperands(ins);
+ }
+
+ // If that was the last definition in the block, it can be safely removed
+ // from the graph.
+ if (block->phisEmpty() && block->begin() == block->end()) {
+ MOZ_ASSERT(block->isMarked(),
+ "Reachable block lacks at least a control instruction");
+
+ // As a special case, don't remove a block which is a dominator tree
+ // root so that we don't invalidate the iterator in visitGraph. We'll
+ // check for this and remove it later.
+ if (block->immediateDominator() != block) {
+ JitSpew(JitSpew_GVN, " Block block%u is now empty; discarding",
+ block->id());
+ graph_.removeBlock(block);
+ blocksRemoved_ = true;
+ } else {
+ JitSpew(JitSpew_GVN,
+ " Dominator root block%u is now empty; will discard later",
+ block->id());
+ }
+ }
+
+ return true;
+}
+
+// Recursively discard all the defs on the deadDefs_ worklist.
+bool ValueNumberer::processDeadDefs() {
+ MDefinition* nextDef = nextDef_;
+ while (!deadDefs_.empty()) {
+ MDefinition* def = deadDefs_.popCopy();
+
+ // Don't invalidate the MDefinition iterator. This is what we're going
+ // to visit next, so we won't miss anything.
+ if (def == nextDef) {
+ continue;
+ }
+
+ if (!discardDef(def)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Test whether |block|, which is a loop header, has any predecessors other than
+// |loopPred|, the loop predecessor, which it doesn't dominate.
+static bool hasNonDominatingPredecessor(MBasicBlock* block,
+ MBasicBlock* loopPred) {
+ MOZ_ASSERT(block->isLoopHeader());
+ MOZ_ASSERT(block->loopPredecessor() == loopPred);
+
+ for (uint32_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+ MBasicBlock* pred = block->getPredecessor(i);
+ if (pred != loopPred && !block->dominates(pred)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// A loop is about to be made reachable only through an OSR entry into one of
+// its nested loops. Fix everything up.
+bool ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block) {
+ // Create an empty and unreachable(!) block which jumps to |block|. This
+ // allows |block| to remain marked as a loop header, so we don't have to
+ // worry about moving a different block into place as the new loop header,
+ // which is hard, especially if the OSR is into a nested loop. Doing all
+ // that would produce slightly more optimal code, but this is so
+ // extraordinarily rare that it isn't worth the complexity.
+ MBasicBlock* fake = MBasicBlock::NewFakeLoopPredecessor(graph_, block);
+ if (!fake) {
+ return false;
+ }
+ fake->setImmediateDominator(fake);
+ fake->addNumDominated(1);
+ fake->setDomIndex(fake->id());
+
+ JitSpew(JitSpew_GVN, " Created fake block%u", fake->id());
+ hasOSRFixups_ = true;
+ return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, after releasing the phi
+// operands on that edge and discarding any definitions consequently made dead.
+bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock* block,
+ MBasicBlock* pred,
+ size_t predIndex) {
+ MOZ_ASSERT(
+ !block->isMarked(),
+ "Block marked unreachable should have predecessors removed already");
+
+ // Before removing the predecessor edge, scan the phi operands for that edge
+ // for dead code before they get removed.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end;) {
+ MPhi* phi = *iter++;
+ MOZ_ASSERT(!values_.has(phi),
+ "Visited phi in block having predecessor removed");
+ MOZ_ASSERT(!phi->isGuard());
+
+ MDefinition* op = phi->getOperand(predIndex);
+ phi->removeOperand(predIndex);
+
+ nextDef_ = iter != end ? *iter : nullptr;
+ if (!handleUseReleased(op, DontSetImplicitUse) || !processDeadDefs()) {
+ return false;
+ }
+
+ // If |nextDef_| became dead while we had it pinned, advance the
+ // iterator and discard it now.
+ while (nextDef_ && !nextDef_->hasUses() &&
+ !nextDef_->isGuardRangeBailouts()) {
+ phi = nextDef_->toPhi();
+ iter++;
+ nextDef_ = iter != end ? *iter : nullptr;
+ if (!discardDefsRecursively(phi)) {
+ return false;
+ }
+ }
+ }
+ nextDef_ = nullptr;
+
+ block->removePredecessorWithoutPhiOperands(pred, predIndex);
+ return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, and if this makes |block|
+// unreachable, mark it so, and remove the rest of its incoming edges too. And
+// discard any instructions made dead by the entailed release of any phi
+// operands.
+bool ValueNumberer::removePredecessorAndCleanUp(MBasicBlock* block,
+ MBasicBlock* pred) {
+ MOZ_ASSERT(!block->isMarked(),
+ "Removing predecessor on block already marked unreachable");
+
+ // We'll be removing a predecessor, so anything we know about phis in this
+ // block will be wrong.
+ for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
+ iter != end; ++iter) {
+ values_.forget(*iter);
+ }
+
+ // If this is a loop header, test whether it will become an unreachable
+ // loop, or whether it needs special OSR-related fixups.
+ bool isUnreachableLoop = false;
+ if (block->isLoopHeader()) {
+ if (block->loopPredecessor() == pred) {
+ if (MOZ_UNLIKELY(hasNonDominatingPredecessor(block, pred))) {
+ JitSpew(JitSpew_GVN,
+ " "
+ "Loop with header block%u is now only reachable through an "
+ "OSR entry into the middle of the loop!!",
+ block->id());
+ } else {
+ // Deleting the entry into the loop makes the loop unreachable.
+ isUnreachableLoop = true;
+ JitSpew(JitSpew_GVN,
+ " "
+ "Loop with header block%u is no longer reachable",
+ block->id());
+ }
+#ifdef JS_JITSPEW
+ } else if (block->hasUniqueBackedge() && block->backedge() == pred) {
+ JitSpew(JitSpew_GVN, " Loop with header block%u is no longer a loop",
+ block->id());
+#endif
+ }
+ }
+
+ // Actually remove the CFG edge.
+ if (!removePredecessorAndDoDCE(block, pred,
+ block->getPredecessorIndex(pred))) {
+ return false;
+ }
+
+ // We've now edited the CFG; check to see if |block| became unreachable.
+ if (block->numPredecessors() == 0 || isUnreachableLoop) {
+ JitSpew(JitSpew_GVN, " Disconnecting block%u", block->id());
+
+ // Remove |block| from its dominator parent's subtree. This is the only
+ // immediately-dominated-block information we need to update, because
+ // everything dominated by this block is about to be swept away.
+ MBasicBlock* parent = block->immediateDominator();
+ if (parent != block) {
+ parent->removeImmediatelyDominatedBlock(block);
+ }
+
+ // Completely disconnect it from the CFG. We do this now rather than
+ // just doing it later when we arrive there in visitUnreachableBlock
+ // so that we don't leave a partially broken loop sitting around. This
+ // also lets visitUnreachableBlock assert that numPredecessors() == 0,
+ // which is a nice invariant.
+ if (block->isLoopHeader()) {
+ block->clearLoopHeader();
+ }
+ for (size_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+ if (!removePredecessorAndDoDCE(block, block->getPredecessor(i), i)) {
+ return false;
+ }
+ }
+
+ // Clear out the resume point operands, as they can hold things that
+ // don't appear to dominate them live.
+ if (MResumePoint* resume = block->entryResumePoint()) {
+ if (!releaseResumePointOperands(resume) || !processDeadDefs()) {
+ return false;
+ }
+ if (MResumePoint* outer = block->outerResumePoint()) {
+ if (!releaseResumePointOperands(outer) || !processDeadDefs()) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MInstructionIterator iter(block->begin()), end(block->end());
+ iter != end;) {
+ MInstruction* ins = *iter++;
+ nextDef_ = iter != end ? *iter : nullptr;
+ if (MResumePoint* resume = ins->resumePoint()) {
+ if (!releaseResumePointOperands(resume) || !processDeadDefs()) {
+ return false;
+ }
+ }
+ }
+ nextDef_ = nullptr;
+ } else {
+#ifdef DEBUG
+ MOZ_ASSERT(block->outerResumePoint() == nullptr,
+ "Outer resume point in block without an entry resume point");
+ for (MInstructionIterator iter(block->begin()), end(block->end());
+ iter != end; ++iter) {
+ MOZ_ASSERT(iter->resumePoint() == nullptr,
+ "Instruction with resume point in block without entry "
+ "resume point");
+ }
+#endif
+ }
+
+ // Use the mark to note that we've already removed all its predecessors,
+ // and we know it's unreachable.
+ block->mark();
+ }
+
+ return true;
+}
+
+// Return a simplified form of |def|, if we can.
+MDefinition* ValueNumberer::simplified(MDefinition* def) const {
+ return def->foldsTo(graph_.alloc());
+}
+
+// If an equivalent and dominating value already exists in the set, return it.
+// Otherwise insert |def| into the set and return it.
+MDefinition* ValueNumberer::leader(MDefinition* def) {
+ // If the value isn't suitable for eliminating, don't bother hashing it. The
+ // convention is that congruentTo returns false for node kinds that wish to
+ // opt out of redundance elimination.
+ // TODO: It'd be nice to clean up that convention (bug 1031406).
+ if (!def->isEffectful() && def->congruentTo(def)) {
+ // Look for a match.
+ VisibleValues::AddPtr p = values_.findLeaderForAdd(def);
+ if (p) {
+ MDefinition* rep = *p;
+ if (!rep->isDiscarded() && rep->block()->dominates(def->block())) {
+ // We found a dominating congruent value.
+ return rep;
+ }
+
+ // The congruent value doesn't dominate. It never will again in this
+ // dominator tree, so overwrite it.
+ values_.overwrite(p, def);
+ } else {
+ // No match. Add a new entry.
+ if (!values_.add(p, def)) {
+ return nullptr;
+ }
+ }
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Recording %s%u", def->opName(), def->id());
+#endif
+ }
+
+ return def;
+}
+
+// Test whether |phi| is dominated by a congruent phi.
+bool ValueNumberer::hasLeader(const MPhi* phi,
+ const MBasicBlock* phiBlock) const {
+ if (VisibleValues::Ptr p = values_.findLeader(phi)) {
+ const MDefinition* rep = *p;
+ return rep != phi && rep->block()->dominates(phiBlock);
+ }
+ return false;
+}
+
+// Test whether there are any phis in |header| which are newly optimizable, as a
+// result of optimizations done inside the loop. This is not a sparse approach,
+// but restarting is rare enough in practice. Termination is ensured by
+// discarding the phi triggering the iteration.
+bool ValueNumberer::loopHasOptimizablePhi(MBasicBlock* header) const {
+ // If the header is unreachable, don't bother re-optimizing it.
+ if (header->isMarked()) {
+ return false;
+ }
+
+ // Rescan the phis for any that can be simplified, since they may be reading
+ // values from backedges.
+ for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
+ iter != end; ++iter) {
+ MPhi* phi = *iter;
+ MOZ_ASSERT_IF(!phi->hasUses(), !DeadIfUnused(phi));
+
+ if (phi->operandIfRedundant() || hasLeader(phi, header)) {
+ return true; // Phi can be simplified.
+ }
+ }
+ return false;
+}
+
+// Visit |def|.
+bool ValueNumberer::visitDefinition(MDefinition* def) {
+ // Nop does not fit in any of the previous optimization, as its only purpose
+ // is to reduce the register pressure by keeping additional resume
+ // point. Still, there is no need consecutive list of MNop instructions, and
+ // this will slow down every other iteration on the Graph.
+ if (def->isNop()) {
+ MNop* nop = def->toNop();
+ MBasicBlock* block = nop->block();
+
+ // We look backward to know if we can remove the previous Nop, we do not
+ // look forward as we would not benefit from the folding made by GVN.
+ MInstructionReverseIterator iter = ++block->rbegin(nop);
+
+ // This nop is at the beginning of the basic block, just replace the
+ // resume point of the basic block by the one from the resume point.
+ if (iter == block->rend()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
+ nop->moveResumePointAsEntry();
+ block->discard(nop);
+ return true;
+ }
+
+ // The previous instruction is also a Nop, no need to keep it anymore.
+ MInstruction* prev = *iter;
+ if (prev->isNop()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id());
+ block->discard(prev);
+ return true;
+ }
+
+ // The Nop is introduced to capture the result and make sure the operands
+ // are not live anymore when there are no further uses. Though when
+ // all operands are still needed the Nop doesn't decrease the liveness
+ // and can get removed.
+ MResumePoint* rp = nop->resumePoint();
+ if (rp && rp->numOperands() > 0 &&
+ rp->getOperand(rp->numOperands() - 1) == prev &&
+ !nop->block()->lastIns()->isThrow() &&
+ !prev->isAssertRecoveredOnBailout()) {
+ size_t numOperandsLive = 0;
+ for (size_t j = 0; j < prev->numOperands(); j++) {
+ for (size_t i = 0; i < rp->numOperands(); i++) {
+ if (prev->getOperand(j) == rp->getOperand(i)) {
+ numOperandsLive++;
+ break;
+ }
+ }
+ }
+
+ if (numOperandsLive == prev->numOperands()) {
+ JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
+ block->discard(nop);
+ }
+ }
+
+ return true;
+ }
+
+ // Skip optimizations on instructions which are recovered on bailout, to
+ // avoid mixing instructions which are recovered on bailouts with
+ // instructions which are not.
+ if (def->isRecoveredOnBailout()) {
+ return true;
+ }
+
+ // If this instruction has a dependency() into an unreachable block, we'll
+ // need to update AliasAnalysis.
+ MDefinition* dep = def->dependency();
+ if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
+ JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
+ if (updateAliasAnalysis_ && !dependenciesBroken_) {
+ // TODO: Recomputing alias-analysis could theoretically expose more
+ // GVN opportunities.
+ JitSpew(JitSpew_GVN, " Will recompute!");
+ dependenciesBroken_ = true;
+ }
+ // Temporarily clear its dependency, to protect foldsTo, which may
+ // wish to use the dependency to do store-to-load forwarding.
+ def->setDependency(def->toInstruction());
+ } else {
+ dep = nullptr;
+ }
+
+ // Look for a simplified form of |def|.
+ MDefinition* sim = simplified(def);
+ if (sim != def) {
+ if (sim == nullptr) {
+ return false;
+ }
+
+ bool isNewInstruction = sim->block() == nullptr;
+
+ // If |sim| doesn't belong to a block, insert it next to |def|.
+ if (isNewInstruction) {
+ // A new |sim| node mustn't be effectful when |def| wasn't effectful.
+ MOZ_ASSERT((def->isEffectful() && sim->isEffectful()) ||
+ !sim->isEffectful());
+
+ // If both instructions are effectful, |sim| must have stolen the resume
+ // point of |def| when it's a new instruction.
+ MOZ_ASSERT_IF(def->isEffectful() && sim->isEffectful(),
+ !def->toInstruction()->resumePoint() &&
+ sim->toInstruction()->resumePoint());
+
+ def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
+ }
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Folded %s%u to %s%u", def->opName(), def->id(),
+ sim->opName(), sim->id());
+#endif
+ MOZ_ASSERT(!sim->isDiscarded());
+ ReplaceAllUsesWith(def, sim);
+
+ // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
+ // guard, then either |rep| is also a guard, or a guard isn't actually
+ // needed, so we can clear |def|'s guard flag and let it be discarded.
+ def->setNotGuardUnchecked();
+
+ if (def->isGuardRangeBailouts()) {
+ sim->setGuardRangeBailoutsUnchecked();
+ }
+
+ if (sim->bailoutKind() == BailoutKind::Unknown) {
+ sim->setBailoutKind(def->bailoutKind());
+ }
+
+ // Discard |def| if it's now unused. Similar to guards, we allow to replace
+ // effectful instructions when the node's foldsTo method said |def| can be
+ // replaced.
+ if (DeadIfUnusedAllowEffectful(def)) {
+ if (!discardDefsRecursively(def, AllowEffectful::Yes)) {
+ return false;
+ }
+
+ // If that ended up discarding |sim|, then we're done here.
+ if (sim->isDiscarded()) {
+ return true;
+ }
+ }
+
+ if (!rerun_ && def->isPhi() && !sim->isPhi()) {
+ rerun_ = true;
+ JitSpew(JitSpew_GVN,
+ " Replacing phi%u may have enabled cascading optimisations; "
+ "will re-run",
+ def->id());
+ }
+
+ // Otherwise, procede to optimize with |sim| in place of |def|.
+ def = sim;
+
+ // If the simplified instruction was already part of the graph, then we
+ // probably already visited and optimized this instruction.
+ if (!isNewInstruction) {
+ return true;
+ }
+ }
+
+ // Now that foldsTo is done, re-enable the original dependency. Even though
+ // it may be pointing into a discarded block, it's still valid for the
+ // purposes of detecting congruent loads.
+ if (dep != nullptr) {
+ def->setDependency(dep);
+ }
+
+ // Look for a dominating def which makes |def| redundant.
+ MDefinition* rep = leader(def);
+ if (rep != def) {
+ if (rep == nullptr) {
+ return false;
+ }
+
+ if (rep->isPhi()) {
+ MOZ_ASSERT(def->isPhi());
+ rep->toPhi()->updateForReplacement(def->toPhi());
+ }
+
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Replacing %s%u with %s%u", def->opName(),
+ def->id(), rep->opName(), rep->id());
+#endif
+ ReplaceAllUsesWith(def, rep);
+
+ // The node's congruentTo said |def| is congruent to |rep|, and it's
+ // dominated by |rep|. If |def| is a guard, it's covered by |rep|,
+ // so we can clear |def|'s guard flag and let it be discarded.
+ def->setNotGuardUnchecked();
+
+ if (DeadIfUnused(def)) {
+ // discardDef should not add anything to the deadDefs, as the
+ // redundant operation should have the same input operands.
+ mozilla::DebugOnly<bool> r = discardDef(def);
+ MOZ_ASSERT(
+ r,
+ "discardDef shouldn't have tried to add anything to the worklist, "
+ "so it shouldn't have failed");
+ MOZ_ASSERT(deadDefs_.empty(),
+ "discardDef shouldn't have added anything to the worklist");
+ }
+ }
+
+ return true;
+}
+
+// Visit the control instruction at the end of |block|.
+bool ValueNumberer::visitControlInstruction(MBasicBlock* block) {
+ // Look for a simplified form of the control instruction.
+ MControlInstruction* control = block->lastIns();
+ MDefinition* rep = simplified(control);
+ if (rep == control) {
+ return true;
+ }
+
+ if (rep == nullptr) {
+ return false;
+ }
+
+ MControlInstruction* newControl = rep->toControlInstruction();
+ MOZ_ASSERT(!newControl->block(),
+ "Control instruction replacement shouldn't already be in a block");
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_GVN, " Folded control instruction %s%u to %s%u",
+ control->opName(), control->id(), newControl->opName(),
+ graph_.getNumInstructionIds());
+#endif
+
+ // If the simplification removes any CFG edges, update the CFG and remove
+ // any blocks that become dead.
+ size_t oldNumSuccs = control->numSuccessors();
+ size_t newNumSuccs = newControl->numSuccessors();
+ if (newNumSuccs != oldNumSuccs) {
+ MOZ_ASSERT(newNumSuccs < oldNumSuccs,
+ "New control instruction has too many successors");
+ for (size_t i = 0; i != oldNumSuccs; ++i) {
+ MBasicBlock* succ = control->getSuccessor(i);
+ if (HasSuccessor(newControl, succ)) {
+ continue;
+ }
+ if (succ->isMarked()) {
+ continue;
+ }
+ if (!removePredecessorAndCleanUp(succ, block)) {
+ return false;
+ }
+ if (succ->isMarked()) {
+ continue;
+ }
+ if (!rerun_) {
+ if (!remainingBlocks_.append(succ)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ if (!releaseOperands(control)) {
+ return false;
+ }
+ block->discardIgnoreOperands(control);
+ block->end(newControl);
+ if (block->entryResumePoint() && newNumSuccs != oldNumSuccs) {
+ block->flagOperandsOfPrunedBranches(newControl);
+ }
+ return processDeadDefs();
+}
+
+// |block| is unreachable. Mine it for opportunities to delete more dead
+// code, and then discard it.
+bool ValueNumberer::visitUnreachableBlock(MBasicBlock* block) {
+ JitSpew(JitSpew_GVN, " Visiting unreachable block%u%s%s%s", block->id(),
+ block->isLoopHeader() ? " (loop header)" : "",
+ block->isSplitEdge() ? " (split edge)" : "",
+ block->immediateDominator() == block ? " (dominator root)" : "");
+
+ MOZ_ASSERT(block->isMarked(),
+ "Visiting unmarked (and therefore reachable?) block");
+ MOZ_ASSERT(block->numPredecessors() == 0,
+ "Block marked unreachable still has predecessors");
+ MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block");
+ MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block");
+ MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
+
+ // Disconnect all outgoing CFG edges.
+ for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (succ->isDead() || succ->isMarked()) {
+ continue;
+ }
+ if (!removePredecessorAndCleanUp(succ, block)) {
+ return false;
+ }
+ if (succ->isMarked()) {
+ continue;
+ }
+ // |succ| is still reachable. Make a note of it so that we can scan
+ // it for interesting dominator tree changes later.
+ if (!rerun_) {
+ if (!remainingBlocks_.append(succ)) {
+ return false;
+ }
+ }
+ }
+
+ // Discard any instructions with no uses. The remaining instructions will be
+ // discarded when their last use is discarded.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MDefinitionIterator iter(block); iter;) {
+ MDefinition* def = *iter++;
+ if (def->hasUses()) {
+ continue;
+ }
+ nextDef_ = iter ? *iter : nullptr;
+ if (!discardDefsRecursively(def)) {
+ return false;
+ }
+ }
+
+ nextDef_ = nullptr;
+ MControlInstruction* control = block->lastIns();
+ return discardDefsRecursively(control);
+}
+
+// Visit all the phis and instructions |block|.
+bool ValueNumberer::visitBlock(MBasicBlock* block) {
+ MOZ_ASSERT(!block->isMarked(), "Blocks marked unreachable during GVN");
+ MOZ_ASSERT(!block->isDead(), "Block to visit is already dead");
+
+ JitSpew(JitSpew_GVN, " Visiting block%u", block->id());
+
+ // Visit the definitions in the block top-down.
+ MOZ_ASSERT(nextDef_ == nullptr);
+ for (MDefinitionIterator iter(block); iter;) {
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+ MDefinition* def = *iter++;
+
+ // Remember where our iterator is so that we don't invalidate it.
+ nextDef_ = iter ? *iter : nullptr;
+
+ // If the definition is dead, discard it.
+ if (IsDiscardable(def)) {
+ if (!discardDefsRecursively(def)) {
+ return false;
+ }
+ continue;
+ }
+
+ if (!visitDefinition(def)) {
+ return false;
+ }
+ }
+ nextDef_ = nullptr;
+
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+
+ return visitControlInstruction(block);
+}
+
+// Visit all the blocks dominated by dominatorRoot.
+bool ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot) {
+ JitSpew(JitSpew_GVN,
+ " Visiting dominator tree (with %" PRIu64
+ " blocks) rooted at block%u%s",
+ uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
+ dominatorRoot == graph_.entryBlock() ? " (normal entry block)"
+ : dominatorRoot == graph_.osrBlock() ? " (OSR entry block)"
+ : dominatorRoot->numPredecessors() == 0
+ ? " (odd unreachable block)"
+ : " (merge point from normal entry and OSR entry)");
+ MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
+ "root is not a dominator tree root");
+
+ // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
+ // property that we'll always visit a block before any block it dominates,
+ // so we can make a single pass through the list and see every full
+ // redundance.
+ size_t numVisited = 0;
+ size_t numDiscarded = 0;
+ for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot));;) {
+ MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+ MBasicBlock* block = *iter++;
+ // We're only visiting blocks in dominatorRoot's tree right now.
+ if (!dominatorRoot->dominates(block)) {
+ continue;
+ }
+
+ // If this is a loop backedge, remember the header, as we may not be able
+ // to find it after we simplify the block.
+ MBasicBlock* header =
+ block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;
+
+ if (block->isMarked()) {
+ // This block has become unreachable; handle it specially.
+ if (!visitUnreachableBlock(block)) {
+ return false;
+ }
+ ++numDiscarded;
+ } else {
+ // Visit the block!
+ if (!visitBlock(block)) {
+ return false;
+ }
+ ++numVisited;
+ }
+
+ // If the block is/was a loop backedge, check to see if the block that
+ // is/was its header has optimizable phis, which would want a re-run.
+ if (!rerun_ && header && loopHasOptimizablePhi(header)) {
+ JitSpew(JitSpew_GVN,
+ " Loop phi in block%u can now be optimized; will re-run GVN!",
+ header->id());
+ rerun_ = true;
+ remainingBlocks_.clear();
+ }
+
+ MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
+ "Visited blocks too many times");
+ if (numVisited >= dominatorRoot->numDominated() - numDiscarded) {
+ break;
+ }
+ }
+
+ totalNumVisited_ += numVisited;
+ values_.clear();
+ return true;
+}
+
+// Visit all the blocks in the graph.
+bool ValueNumberer::visitGraph() {
+ // Due to OSR blocks, the set of blocks dominated by a blocks may not be
+ // contiguous in the RPO. Do a separate traversal for each dominator tree
+ // root. There's always the main entry, and sometimes there's an OSR entry,
+ // and then there are the roots formed where the OSR paths merge with the
+ // main entry paths.
+ for (ReversePostorderIterator iter(graph_.rpoBegin());;) {
+ MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+ MBasicBlock* block = *iter;
+ if (block->immediateDominator() == block) {
+ if (!visitDominatorTree(block)) {
+ return false;
+ }
+
+ // Normally unreachable blocks would be removed by now, but if this
+ // block is a dominator tree root, it has been special-cased and left
+ // in place in order to avoid invalidating our iterator. Now that
+ // we've finished the tree, increment the iterator, and then if it's
+ // marked for removal, remove it.
+ ++iter;
+ if (block->isMarked()) {
+ JitSpew(JitSpew_GVN, " Discarding dominator root block%u",
+ block->id());
+ MOZ_ASSERT(
+ block->begin() == block->end(),
+ "Unreachable dominator tree root has instructions after tree walk");
+ MOZ_ASSERT(block->phisEmpty(),
+ "Unreachable dominator tree root has phis after tree walk");
+ graph_.removeBlock(block);
+ blocksRemoved_ = true;
+ }
+
+ MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(),
+ "Visited blocks too many times");
+ if (totalNumVisited_ >= graph_.numBlocks()) {
+ break;
+ }
+ } else {
+ // This block a dominator tree root. Proceed to the next one.
+ ++iter;
+ }
+ }
+ totalNumVisited_ = 0;
+ return true;
+}
+
+bool ValueNumberer::insertOSRFixups() {
+ ReversePostorderIterator end(graph_.end());
+ for (ReversePostorderIterator iter(graph_.begin()); iter != end;) {
+ MBasicBlock* block = *iter++;
+
+ // Only add fixup block above for loops which can be reached from OSR.
+ if (!block->isLoopHeader()) {
+ continue;
+ }
+
+ // If the loop header is not self-dominated, then this loop does not
+ // have to deal with a second entry point, so there is no need to add a
+ // second entry point with a fixup block.
+ if (block->immediateDominator() != block) {
+ continue;
+ }
+
+ if (!fixupOSROnlyLoop(block)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// OSR fixups serve the purpose of representing the non-OSR entry into a loop
+// when the only real entry is an OSR entry into the middle. However, if the
+// entry into the middle is subsequently folded away, the loop may actually
+// have become unreachable. Mark-and-sweep all blocks to remove all such code.
+bool ValueNumberer::cleanupOSRFixups() {
+ // Mark.
+ Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc());
+ unsigned numMarked = 2;
+ graph_.entryBlock()->mark();
+ graph_.osrBlock()->mark();
+ if (!worklist.append(graph_.entryBlock()) ||
+ !worklist.append(graph_.osrBlock())) {
+ return false;
+ }
+ while (!worklist.empty()) {
+ MBasicBlock* block = worklist.popCopy();
+ for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+ MBasicBlock* succ = block->getSuccessor(i);
+ if (!succ->isMarked()) {
+ ++numMarked;
+ succ->mark();
+ if (!worklist.append(succ)) {
+ return false;
+ }
+ } else if (succ->isLoopHeader() && succ->loopPredecessor() == block &&
+ succ->numPredecessors() == 3) {
+ // Unmark fixup blocks if the loop predecessor is marked after
+ // the loop header.
+ succ->getPredecessor(1)->unmarkUnchecked();
+ }
+ }
+
+ // OSR fixup blocks are needed if and only if the loop header is
+ // reachable from its backedge (via the OSR block) and not from its
+ // original loop predecessor.
+ //
+ // Thus OSR fixup blocks are removed if the loop header is not
+ // reachable, or if the loop header is reachable from both its backedge
+ // and its original loop predecessor.
+ if (block->isLoopHeader()) {
+ MBasicBlock* maybeFixupBlock = nullptr;
+ if (block->numPredecessors() == 2) {
+ maybeFixupBlock = block->getPredecessor(0);
+ } else {
+ MOZ_ASSERT(block->numPredecessors() == 3);
+ if (!block->loopPredecessor()->isMarked()) {
+ maybeFixupBlock = block->getPredecessor(1);
+ }
+ }
+
+ if (maybeFixupBlock && !maybeFixupBlock->isMarked() &&
+ maybeFixupBlock->numPredecessors() == 0) {
+ MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1,
+ "OSR fixup block should have exactly one successor");
+ MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(),
+ "OSR fixup block shouldn't be the entry block");
+ MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(),
+ "OSR fixup block shouldn't be the OSR entry block");
+ maybeFixupBlock->mark();
+ }
+ }
+ }
+
+ // And sweep.
+ return RemoveUnmarkedBlocks(mir_, graph_, numMarked);
+}
+
+ValueNumberer::ValueNumberer(MIRGenerator* mir, MIRGraph& graph)
+ : mir_(mir),
+ graph_(graph),
+ // Initialize the value set. It's tempting to pass in a length that is a
+ // function of graph_.getNumInstructionIds(). But if we start out with a
+ // large capacity, it will be far larger than the actual element count for
+ // most of the pass, so when we remove elements, it would often think it
+ // needs to compact itself. Empirically, just letting the HashTable grow
+ // as needed on its own seems to work pretty well.
+ values_(graph.alloc()),
+ deadDefs_(graph.alloc()),
+ remainingBlocks_(graph.alloc()),
+ nextDef_(nullptr),
+ totalNumVisited_(0),
+ rerun_(false),
+ blocksRemoved_(false),
+ updateAliasAnalysis_(false),
+ dependenciesBroken_(false),
+ hasOSRFixups_(false) {}
+
+bool ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis) {
+ updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
+
+ JitSpew(JitSpew_GVN, "Running GVN on graph (with %" PRIu64 " blocks)",
+ uint64_t(graph_.numBlocks()));
+
+ // Adding fixup blocks only make sense iff we have a second entry point into
+ // the graph which cannot be reached any more from the entry point.
+ if (graph_.osrBlock()) {
+ if (!insertOSRFixups()) {
+ return false;
+ }
+ }
+
+ // Top level non-sparse iteration loop. If an iteration performs a
+ // significant change, such as discarding a block which changes the
+ // dominator tree and may enable more optimization, this loop takes another
+ // iteration.
+ int runs = 0;
+ for (;;) {
+ if (!visitGraph()) {
+ return false;
+ }
+
+ // Test whether any block which was not removed but which had at least
+ // one predecessor removed will have a new dominator parent.
+ while (!remainingBlocks_.empty()) {
+ MBasicBlock* block = remainingBlocks_.popCopy();
+ if (!block->isDead() && IsDominatorRefined(block)) {
+ JitSpew(JitSpew_GVN,
+ " Dominator for block%u can now be refined; will re-run GVN!",
+ block->id());
+ rerun_ = true;
+ remainingBlocks_.clear();
+ break;
+ }
+ }
+
+ if (blocksRemoved_) {
+ if (!AccountForCFGChanges(mir_, graph_, dependenciesBroken_,
+ /* underValueNumberer = */ true)) {
+ return false;
+ }
+
+ blocksRemoved_ = false;
+ dependenciesBroken_ = false;
+ }
+
+ if (mir_->shouldCancel("GVN (outer loop)")) {
+ return false;
+ }
+
+ // If no further opportunities have been discovered, we're done.
+ if (!rerun_) {
+ break;
+ }
+
+ rerun_ = false;
+
+ // Enforce an arbitrary iteration limit. This is rarely reached, and
+ // isn't even strictly necessary, as the algorithm is guaranteed to
+ // terminate on its own in a finite amount of time (since every time we
+ // re-run we discard the construct which triggered the re-run), but it
+ // does help avoid slow compile times on pathological code.
+ ++runs;
+ if (runs == 6) {
+ JitSpew(JitSpew_GVN, "Re-run cutoff of %d reached. Terminating GVN!",
+ runs);
+ break;
+ }
+
+ JitSpew(JitSpew_GVN,
+ "Re-running GVN on graph (run %d, now with %" PRIu64 " blocks)",
+ runs, uint64_t(graph_.numBlocks()));
+ }
+
+ if (MOZ_UNLIKELY(hasOSRFixups_)) {
+ if (!cleanupOSRFixups()) {
+ return false;
+ }
+ hasOSRFixups_ = false;
+ }
+
+ return true;
+}
diff --git a/js/src/jit/ValueNumbering.h b/js/src/jit/ValueNumbering.h
new file mode 100644
index 0000000000..472d31ecce
--- /dev/null
+++ b/js/src/jit/ValueNumbering.h
@@ -0,0 +1,123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_ValueNumbering_h
+#define jit_ValueNumbering_h
+
+#include "jit/JitAllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/Vector.h"
+
+namespace js {
+namespace jit {
+
+class MDefinition;
+class MBasicBlock;
+class MIRGraph;
+class MPhi;
+class MIRGenerator;
+class MResumePoint;
+
+class ValueNumberer {
+ // Value numbering data.
+ class VisibleValues {
+ // Hash policy for ValueSet.
+ struct ValueHasher {
+ using Lookup = const MDefinition*;
+ using Key = MDefinition*;
+ static HashNumber hash(Lookup ins);
+ static bool match(Key k, Lookup l);
+ static void rekey(Key& k, Key newKey);
+ };
+
+ typedef HashSet<MDefinition*, ValueHasher, JitAllocPolicy> ValueSet;
+
+ ValueSet set_; // Set of visible values
+
+ public:
+ explicit VisibleValues(TempAllocator& alloc);
+
+ using Ptr = ValueSet::Ptr;
+ using AddPtr = ValueSet::AddPtr;
+
+ Ptr findLeader(const MDefinition* def) const;
+ AddPtr findLeaderForAdd(MDefinition* def);
+ [[nodiscard]] bool add(AddPtr p, MDefinition* def);
+ void overwrite(AddPtr p, MDefinition* def);
+ void forget(const MDefinition* def);
+ void clear();
+#ifdef DEBUG
+ bool has(const MDefinition* def) const;
+#endif
+ };
+
+ typedef Vector<MBasicBlock*, 4, JitAllocPolicy> BlockWorklist;
+ typedef Vector<MDefinition*, 4, JitAllocPolicy> DefWorklist;
+
+ MIRGenerator* const mir_;
+ MIRGraph& graph_;
+ VisibleValues values_; // Numbered values
+ DefWorklist deadDefs_; // Worklist for deleting values
+ BlockWorklist remainingBlocks_; // Blocks remaining with fewer preds
+ MDefinition* nextDef_; // The next definition; don't discard
+ size_t totalNumVisited_; // The number of blocks visited
+ bool rerun_; // Should we run another GVN iteration?
+ bool blocksRemoved_; // Have any blocks been removed?
+ bool updateAliasAnalysis_; // Do we care about AliasAnalysis?
+ bool dependenciesBroken_; // Have we broken AliasAnalysis?
+ bool hasOSRFixups_; // Have we created any OSR fixup blocks?
+
+ enum ImplicitUseOption { DontSetImplicitUse, SetImplicitUse };
+ enum class AllowEffectful : bool { No, Yes };
+
+ [[nodiscard]] bool handleUseReleased(MDefinition* def,
+ ImplicitUseOption implicitUseOption);
+ [[nodiscard]] bool discardDefsRecursively(
+ MDefinition* def, AllowEffectful allowEffectful = AllowEffectful::No);
+ [[nodiscard]] bool releaseResumePointOperands(MResumePoint* resume);
+ [[nodiscard]] bool releaseAndRemovePhiOperands(MPhi* phi);
+ [[nodiscard]] bool releaseOperands(MDefinition* def);
+ [[nodiscard]] bool discardDef(
+ MDefinition* def, AllowEffectful allowEffectful = AllowEffectful::No);
+ [[nodiscard]] bool processDeadDefs();
+
+ [[nodiscard]] bool fixupOSROnlyLoop(MBasicBlock* block);
+ [[nodiscard]] bool removePredecessorAndDoDCE(MBasicBlock* block,
+ MBasicBlock* pred,
+ size_t predIndex);
+ [[nodiscard]] bool removePredecessorAndCleanUp(MBasicBlock* block,
+ MBasicBlock* pred);
+
+ MDefinition* simplified(MDefinition* def) const;
+ MDefinition* leader(MDefinition* def);
+ bool hasLeader(const MPhi* phi, const MBasicBlock* phiBlock) const;
+ bool loopHasOptimizablePhi(MBasicBlock* header) const;
+
+ [[nodiscard]] bool visitDefinition(MDefinition* def);
+ [[nodiscard]] bool visitControlInstruction(MBasicBlock* block);
+ [[nodiscard]] bool visitUnreachableBlock(MBasicBlock* block);
+ [[nodiscard]] bool visitBlock(MBasicBlock* block);
+ [[nodiscard]] bool visitDominatorTree(MBasicBlock* root);
+ [[nodiscard]] bool visitGraph();
+
+ [[nodiscard]] bool insertOSRFixups();
+ [[nodiscard]] bool cleanupOSRFixups();
+
+ public:
+ ValueNumberer(MIRGenerator* mir, MIRGraph& graph);
+
+ enum UpdateAliasAnalysisFlag { DontUpdateAliasAnalysis, UpdateAliasAnalysis };
+
+ // Optimize the graph, performing expression simplification and
+ // canonicalization, eliminating statically fully-redundant expressions,
+ // deleting dead instructions, and removing unreachable blocks.
+ [[nodiscard]] bool run(UpdateAliasAnalysisFlag updateAliasAnalysis);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_ValueNumbering_h */
diff --git a/js/src/jit/WarpBuilder.cpp b/js/src/jit/WarpBuilder.cpp
new file mode 100644
index 0000000000..f005a6ea5e
--- /dev/null
+++ b/js/src/jit/WarpBuilder.cpp
@@ -0,0 +1,3576 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WarpBuilder.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/BaselineFrame.h"
+#include "jit/CacheIR.h"
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/WarpCacheIRTranspiler.h"
+#include "jit/WarpSnapshot.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_BAD_CONST_ASSIGN
+#include "vm/GeneratorObject.h"
+#include "vm/Interpreter.h"
+#include "vm/Opcodes.h"
+
+#include "gc/ObjectKind-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/BytecodeLocation-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Used for building the outermost script.
+WarpBuilder::WarpBuilder(WarpSnapshot& snapshot, MIRGenerator& mirGen,
+ WarpCompilation* warpCompilation)
+ : WarpBuilderShared(snapshot, mirGen, nullptr),
+ warpCompilation_(warpCompilation),
+ graph_(mirGen.graph()),
+ info_(mirGen.outerInfo()),
+ scriptSnapshot_(snapshot.rootScript()),
+ script_(snapshot.rootScript()->script()),
+ loopStack_(mirGen.alloc()) {
+ opSnapshotIter_ = scriptSnapshot_->opSnapshots().getFirst();
+}
+
+// Used for building inlined scripts.
+WarpBuilder::WarpBuilder(WarpBuilder* caller, WarpScriptSnapshot* snapshot,
+ CompileInfo& compileInfo, CallInfo* inlineCallInfo,
+ MResumePoint* callerResumePoint)
+ : WarpBuilderShared(caller->snapshot(), caller->mirGen(), nullptr),
+ warpCompilation_(caller->warpCompilation()),
+ graph_(caller->mirGen().graph()),
+ info_(compileInfo),
+ scriptSnapshot_(snapshot),
+ script_(snapshot->script()),
+ loopStack_(caller->mirGen().alloc()),
+ callerBuilder_(caller),
+ callerResumePoint_(callerResumePoint),
+ inlineCallInfo_(inlineCallInfo) {
+ opSnapshotIter_ = snapshot->opSnapshots().getFirst();
+}
+
+BytecodeSite* WarpBuilder::newBytecodeSite(BytecodeLocation loc) {
+ jsbytecode* pc = loc.toRawBytecode();
+ MOZ_ASSERT(info().inlineScriptTree()->script()->containsPC(pc));
+ return new (alloc()) BytecodeSite(info().inlineScriptTree(), pc);
+}
+
+const WarpOpSnapshot* WarpBuilder::getOpSnapshotImpl(
+ BytecodeLocation loc, WarpOpSnapshot::Kind kind) {
+ uint32_t offset = loc.bytecodeToOffset(script_);
+
+ // Skip snapshots until we get to a snapshot with offset >= offset. This is
+ // a loop because WarpBuilder can skip unreachable bytecode ops.
+ while (opSnapshotIter_ && opSnapshotIter_->offset() < offset) {
+ opSnapshotIter_ = opSnapshotIter_->getNext();
+ }
+
+ if (!opSnapshotIter_ || opSnapshotIter_->offset() != offset ||
+ opSnapshotIter_->kind() != kind) {
+ return nullptr;
+ }
+
+ return opSnapshotIter_;
+}
+
+void WarpBuilder::initBlock(MBasicBlock* block) {
+ graph().addBlock(block);
+
+ block->setLoopDepth(loopDepth());
+
+ current = block;
+}
+
+bool WarpBuilder::startNewBlock(MBasicBlock* predecessor, BytecodeLocation loc,
+ size_t numToPop) {
+ MBasicBlock* block =
+ MBasicBlock::NewPopN(graph(), info(), predecessor, newBytecodeSite(loc),
+ MBasicBlock::NORMAL, numToPop);
+ if (!block) {
+ return false;
+ }
+
+ initBlock(block);
+ return true;
+}
+
+bool WarpBuilder::startNewEntryBlock(size_t stackDepth, BytecodeLocation loc) {
+ MBasicBlock* block =
+ MBasicBlock::New(graph(), stackDepth, info(), /* maybePred = */ nullptr,
+ newBytecodeSite(loc), MBasicBlock::NORMAL);
+ if (!block) {
+ return false;
+ }
+
+ initBlock(block);
+ return true;
+}
+
+bool WarpBuilder::startNewLoopHeaderBlock(BytecodeLocation loopHead) {
+ MBasicBlock* header = MBasicBlock::NewPendingLoopHeader(
+ graph(), info(), current, newBytecodeSite(loopHead));
+ if (!header) {
+ return false;
+ }
+
+ initBlock(header);
+ return loopStack_.emplaceBack(header);
+}
+
+bool WarpBuilder::startNewOsrPreHeaderBlock(BytecodeLocation loopHead) {
+ MOZ_ASSERT(loopHead.is(JSOp::LoopHead));
+ MOZ_ASSERT(loopHead.toRawBytecode() == info().osrPc());
+
+ // Create two blocks:
+ // * The OSR entry block. This is always the graph's second block and has no
+ // predecessors. This is the entry point for OSR from the Baseline JIT.
+ // * The OSR preheader block. This has two predecessors: the OSR entry block
+ // and the current block.
+
+ MBasicBlock* pred = current;
+
+ // Create the OSR entry block.
+ if (!startNewEntryBlock(pred->stackDepth(), loopHead)) {
+ return false;
+ }
+
+ MBasicBlock* osrBlock = current;
+ graph().setOsrBlock(osrBlock);
+ graph().moveBlockAfter(*graph().begin(), osrBlock);
+
+ MOsrEntry* entry = MOsrEntry::New(alloc());
+ osrBlock->add(entry);
+
+ // Initialize environment chain.
+ {
+ uint32_t slot = info().environmentChainSlot();
+ MInstruction* envv;
+ if (usesEnvironmentChain()) {
+ envv = MOsrEnvironmentChain::New(alloc(), entry);
+ } else {
+ // Use an undefined value if the script does not need its environment
+ // chain, to match the main entry point.
+ envv = MConstant::New(alloc(), UndefinedValue());
+ }
+ osrBlock->add(envv);
+ osrBlock->initSlot(slot, envv);
+ }
+
+ // Initialize return value.
+ {
+ MInstruction* returnValue;
+ if (!script_->noScriptRval()) {
+ returnValue = MOsrReturnValue::New(alloc(), entry);
+ } else {
+ returnValue = MConstant::New(alloc(), UndefinedValue());
+ }
+ osrBlock->add(returnValue);
+ osrBlock->initSlot(info().returnValueSlot(), returnValue);
+ }
+
+ // Initialize arguments object.
+ MInstruction* argsObj = nullptr;
+ if (info().needsArgsObj()) {
+ argsObj = MOsrArgumentsObject::New(alloc(), entry);
+ osrBlock->add(argsObj);
+ osrBlock->initSlot(info().argsObjSlot(), argsObj);
+ }
+
+ if (info().hasFunMaybeLazy()) {
+ // Initialize |this| parameter.
+ MParameter* thisv = MParameter::New(alloc(), MParameter::THIS_SLOT);
+ osrBlock->add(thisv);
+ osrBlock->initSlot(info().thisSlot(), thisv);
+
+ // Initialize arguments. There are three cases:
+ //
+ // 1) There's no ArgumentsObject or it doesn't alias formals. In this case
+ // we can just use the frame's argument slot.
+ // 2) The ArgumentsObject aliases formals and the argument is stored in the
+ // CallObject. Use |undefined| because we can't load from the arguments
+ // object and code will use the CallObject anyway.
+ // 3) The ArgumentsObject aliases formals and the argument isn't stored in
+ // the CallObject. We have to load it from the ArgumentsObject.
+ for (uint32_t i = 0; i < info().nargs(); i++) {
+ uint32_t slot = info().argSlotUnchecked(i);
+ MInstruction* osrv;
+ if (!info().argsObjAliasesFormals()) {
+ osrv = MParameter::New(alloc().fallible(), i);
+ } else if (script_->formalIsAliased(i)) {
+ osrv = MConstant::New(alloc().fallible(), UndefinedValue());
+ } else {
+ osrv = MGetArgumentsObjectArg::New(alloc().fallible(), argsObj, i);
+ }
+ if (!osrv) {
+ return false;
+ }
+ current->add(osrv);
+ current->initSlot(slot, osrv);
+ }
+ }
+
+ // Initialize locals.
+ uint32_t nlocals = info().nlocals();
+ for (uint32_t i = 0; i < nlocals; i++) {
+ uint32_t slot = info().localSlot(i);
+ ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(i);
+ MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
+ if (!osrv) {
+ return false;
+ }
+ current->add(osrv);
+ current->initSlot(slot, osrv);
+ }
+
+ // Initialize expression stack slots.
+ uint32_t numStackSlots = current->stackDepth() - info().firstStackSlot();
+ for (uint32_t i = 0; i < numStackSlots; i++) {
+ uint32_t slot = info().stackSlot(i);
+ ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(nlocals + i);
+ MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
+ if (!osrv) {
+ return false;
+ }
+ current->add(osrv);
+ current->initSlot(slot, osrv);
+ }
+
+ MStart* start = MStart::New(alloc());
+ current->add(start);
+
+ // Note: phi specialization can add type guard instructions to the OSR entry
+ // block if needed. See TypeAnalyzer::shouldSpecializeOsrPhis.
+
+ // Create the preheader block, with the predecessor block and OSR block as
+ // predecessors.
+ if (!startNewBlock(pred, loopHead)) {
+ return false;
+ }
+
+ pred->end(MGoto::New(alloc(), current));
+ osrBlock->end(MGoto::New(alloc(), current));
+
+ if (!current->addPredecessor(alloc(), osrBlock)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool WarpBuilder::addPendingEdge(BytecodeLocation target, MBasicBlock* block,
+ uint32_t successor, uint32_t numToPop) {
+ MOZ_ASSERT(successor < block->lastIns()->numSuccessors());
+ MOZ_ASSERT(numToPop <= block->stackDepth());
+
+ jsbytecode* targetPC = target.toRawBytecode();
+ PendingEdgesMap::AddPtr p = pendingEdges_.lookupForAdd(targetPC);
+ if (p) {
+ return p->value().emplaceBack(block, successor, numToPop);
+ }
+
+ PendingEdges edges;
+ static_assert(PendingEdges::InlineLength >= 1,
+ "Appending one element should be infallible");
+ MOZ_ALWAYS_TRUE(edges.emplaceBack(block, successor, numToPop));
+
+ return pendingEdges_.add(p, targetPC, std::move(edges));
+}
+
+bool WarpBuilder::build() {
+ if (!buildPrologue()) {
+ return false;
+ }
+
+ if (!buildBody()) {
+ return false;
+ }
+
+ if (!MPhi::markIteratorPhis(*iterators())) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(info().osrPc(), graph().osrBlock());
+ MOZ_ASSERT(loopStack_.empty());
+ MOZ_ASSERT(loopDepth() == 0);
+
+ return true;
+}
+
+bool WarpBuilder::buildInline() {
+ if (!buildInlinePrologue()) {
+ return false;
+ }
+
+ if (!buildBody()) {
+ return false;
+ }
+
+ MOZ_ASSERT(loopStack_.empty());
+ return true;
+}
+
+MInstruction* WarpBuilder::buildNamedLambdaEnv(MDefinition* callee,
+ MDefinition* env,
+ NamedLambdaObject* templateObj) {
+ MOZ_ASSERT(templateObj->numDynamicSlots() == 0);
+
+ MInstruction* namedLambda = MNewNamedLambdaObject::New(alloc(), templateObj);
+ current->add(namedLambda);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barriers.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), namedLambda, env));
+ current->add(
+ MAssertCanElidePostWriteBarrier::New(alloc(), namedLambda, callee));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here:
+ // the object will be allocated in the nursery if possible, and if the
+ // tenured heap is used instead, a minor collection will have been performed
+ // that moved env/callee to the tenured heap.
+ size_t enclosingSlot = NamedLambdaObject::enclosingEnvironmentSlot();
+ size_t lambdaSlot = NamedLambdaObject::lambdaSlot();
+ current->add(MStoreFixedSlot::NewUnbarriered(alloc(), namedLambda,
+ enclosingSlot, env));
+ current->add(MStoreFixedSlot::NewUnbarriered(alloc(), namedLambda, lambdaSlot,
+ callee));
+
+ return namedLambda;
+}
+
+MInstruction* WarpBuilder::buildCallObject(MDefinition* callee,
+ MDefinition* env,
+ CallObject* templateObj) {
+ MConstant* templateCst = constant(ObjectValue(*templateObj));
+
+ MNewCallObject* callObj = MNewCallObject::New(alloc(), templateCst);
+ current->add(callObj);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barriers.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), callObj, env));
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), callObj, callee));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ size_t enclosingSlot = CallObject::enclosingEnvironmentSlot();
+ size_t calleeSlot = CallObject::calleeSlot();
+ current->add(
+ MStoreFixedSlot::NewUnbarriered(alloc(), callObj, enclosingSlot, env));
+ current->add(
+ MStoreFixedSlot::NewUnbarriered(alloc(), callObj, calleeSlot, callee));
+
+ return callObj;
+}
+
+bool WarpBuilder::buildEnvironmentChain() {
+ const WarpEnvironment& env = scriptSnapshot()->environment();
+
+ if (env.is<NoEnvironment>()) {
+ return true;
+ }
+
+ MInstruction* envDef = env.match(
+ [](const NoEnvironment&) -> MInstruction* {
+ MOZ_CRASH("Already handled");
+ },
+ [this](JSObject* obj) -> MInstruction* {
+ return constant(ObjectValue(*obj));
+ },
+ [this](const FunctionEnvironment& env) -> MInstruction* {
+ MDefinition* callee = getCallee();
+ MInstruction* envDef = MFunctionEnvironment::New(alloc(), callee);
+ current->add(envDef);
+ if (NamedLambdaObject* obj = env.namedLambdaTemplate) {
+ envDef = buildNamedLambdaEnv(callee, envDef, obj);
+ }
+ if (CallObject* obj = env.callObjectTemplate) {
+ envDef = buildCallObject(callee, envDef, obj);
+ if (!envDef) {
+ return nullptr;
+ }
+ }
+ return envDef;
+ });
+ if (!envDef) {
+ return false;
+ }
+
+ // Update the environment slot from UndefinedValue only after the initial
+ // environment is created so that bailout doesn't see a partial environment.
+ // See: |BaselineStackBuilder::buildBaselineFrame|
+ current->setEnvironmentChain(envDef);
+ return true;
+}
+
+bool WarpBuilder::buildPrologue() {
+ BytecodeLocation startLoc(script_, script_->code());
+ if (!startNewEntryBlock(info().firstStackSlot(), startLoc)) {
+ return false;
+ }
+
+ if (info().hasFunMaybeLazy()) {
+ // Initialize |this|.
+ MParameter* param = MParameter::New(alloc(), MParameter::THIS_SLOT);
+ current->add(param);
+ current->initSlot(info().thisSlot(), param);
+
+ // Initialize arguments.
+ for (uint32_t i = 0; i < info().nargs(); i++) {
+ MParameter* param = MParameter::New(alloc().fallible(), i);
+ if (!param) {
+ return false;
+ }
+ current->add(param);
+ current->initSlot(info().argSlotUnchecked(i), param);
+ }
+ }
+
+ MConstant* undef = constant(UndefinedValue());
+
+ // Initialize local slots.
+ for (uint32_t i = 0; i < info().nlocals(); i++) {
+ current->initSlot(info().localSlot(i), undef);
+ }
+
+ // Initialize the environment chain, return value, and arguments object slots.
+ current->initSlot(info().environmentChainSlot(), undef);
+ current->initSlot(info().returnValueSlot(), undef);
+ if (info().needsArgsObj()) {
+ current->initSlot(info().argsObjSlot(), undef);
+ }
+
+ current->add(MStart::New(alloc()));
+
+ // Guard against over-recursion.
+ MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
+ current->add(check);
+
+ if (!buildEnvironmentChain()) {
+ return false;
+ }
+
+#ifdef JS_CACHEIR_SPEW
+ if (snapshot().needsFinalWarmUpCount()) {
+ MIncrementWarmUpCounter* ins =
+ MIncrementWarmUpCounter::New(alloc(), script_);
+ current->add(ins);
+ }
+#endif
+
+ return true;
+}
+
+bool WarpBuilder::buildInlinePrologue() {
+ // Generate entry block.
+ BytecodeLocation startLoc(script_, script_->code());
+ if (!startNewEntryBlock(info().firstStackSlot(), startLoc)) {
+ return false;
+ }
+ current->setCallerResumePoint(callerResumePoint());
+
+ // Connect the entry block to the last block in the caller's graph.
+ MBasicBlock* pred = callerBuilder()->current;
+ MOZ_ASSERT(pred == callerResumePoint()->block());
+
+ pred->end(MGoto::New(alloc(), current));
+ if (!current->addPredecessorWithoutPhis(pred)) {
+ return false;
+ }
+
+ MConstant* undef = constant(UndefinedValue());
+
+ // Initialize env chain slot to Undefined. It's set later by
+ // |buildEnvironmentChain|.
+ current->initSlot(info().environmentChainSlot(), undef);
+
+ // Initialize |return value| slot.
+ current->initSlot(info().returnValueSlot(), undef);
+
+ // Initialize |arguments| slot if needed.
+ if (info().needsArgsObj()) {
+ current->initSlot(info().argsObjSlot(), undef);
+ }
+
+ // Initialize |this| slot.
+ current->initSlot(info().thisSlot(), inlineCallInfo()->thisArg());
+
+ uint32_t callerArgs = inlineCallInfo()->argc();
+ uint32_t actualArgs = info().nargs();
+ uint32_t passedArgs = std::min<uint32_t>(callerArgs, actualArgs);
+
+ // Initialize actually set arguments.
+ for (uint32_t i = 0; i < passedArgs; i++) {
+ MDefinition* arg = inlineCallInfo()->getArg(i);
+ current->initSlot(info().argSlotUnchecked(i), arg);
+ }
+
+ // Pass undefined for missing arguments.
+ for (uint32_t i = passedArgs; i < actualArgs; i++) {
+ current->initSlot(info().argSlotUnchecked(i), undef);
+ }
+
+ // Initialize local slots.
+ for (uint32_t i = 0; i < info().nlocals(); i++) {
+ current->initSlot(info().localSlot(i), undef);
+ }
+
+ MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());
+
+ if (!buildEnvironmentChain()) {
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+// In debug builds, after compiling a bytecode op, this class is used to check
+// that all values popped by this opcode either:
+//
+// (1) Have the ImplicitlyUsed flag set on them.
+// (2) Have more uses than before compiling this op (the value is
+// used as operand of a new MIR instruction).
+//
+// This is used to catch problems where WarpBuilder pops a value without
+// adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
+class MOZ_RAII WarpPoppedValueUseChecker {
+ Vector<MDefinition*, 4, SystemAllocPolicy> popped_;
+ Vector<size_t, 4, SystemAllocPolicy> poppedUses_;
+ MBasicBlock* current_;
+ BytecodeLocation loc_;
+
+ public:
+ WarpPoppedValueUseChecker(MBasicBlock* current, BytecodeLocation loc)
+ : current_(current), loc_(loc) {}
+
+ [[nodiscard]] bool init() {
+ // Don't require SSA uses for values popped by these ops.
+ switch (loc_.getOp()) {
+ case JSOp::Pop:
+ case JSOp::PopN:
+ case JSOp::DupAt:
+ case JSOp::Dup:
+ case JSOp::Dup2:
+ case JSOp::Pick:
+ case JSOp::Unpick:
+ case JSOp::Swap:
+ case JSOp::SetArg:
+ case JSOp::SetLocal:
+ case JSOp::InitLexical:
+ case JSOp::SetRval:
+ case JSOp::Void:
+ // Basic stack/local/argument management opcodes.
+ return true;
+
+ case JSOp::Case:
+ case JSOp::Default:
+ // These ops have to pop the switch value when branching but don't
+ // actually use it.
+ return true;
+
+ default:
+ break;
+ }
+
+ unsigned nuses = loc_.useCount();
+
+ for (unsigned i = 0; i < nuses; i++) {
+ MDefinition* def = current_->peek(-int32_t(i + 1));
+ if (!popped_.append(def) || !poppedUses_.append(def->defUseCount())) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void checkAfterOp() {
+ for (size_t i = 0; i < popped_.length(); i++) {
+ // First value popped by JSOp::EndIter is not used at all, it's similar
+ // to JSOp::Pop above.
+ if (loc_.is(JSOp::EndIter) && i == 0) {
+ continue;
+ }
+ MOZ_ASSERT(popped_[i]->isImplicitlyUsed() ||
+ popped_[i]->defUseCount() > poppedUses_[i]);
+ }
+ }
+};
+#endif
+
+bool WarpBuilder::buildBody() {
+ for (BytecodeLocation loc : AllBytecodesIterable(script_)) {
+ if (mirGen().shouldCancel("WarpBuilder (opcode loop)")) {
+ return false;
+ }
+
+ // Skip unreachable ops (for example code after a 'return' or 'throw') until
+ // we get to the next jump target.
+ if (hasTerminatedBlock()) {
+ // Finish any "broken" loops with an unreachable backedge. For example:
+ //
+ // do {
+ // ...
+ // return;
+ // ...
+ // } while (x);
+ //
+ // This loop never actually loops.
+ if (loc.isBackedge() && !loopStack_.empty()) {
+ BytecodeLocation loopHead(script_, loopStack_.back().header()->pc());
+ if (loc.isBackedgeForLoophead(loopHead)) {
+ decLoopDepth();
+ loopStack_.popBack();
+ }
+ }
+ if (!loc.isJumpTarget()) {
+ continue;
+ }
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+#ifdef DEBUG
+ WarpPoppedValueUseChecker useChecker(current, loc);
+ if (!useChecker.init()) {
+ return false;
+ }
+#endif
+
+ JSOp op = loc.getOp();
+
+#define BUILD_OP(OP, ...) \
+ case JSOp::OP: \
+ if (MOZ_UNLIKELY(!this->build_##OP(loc))) { \
+ return false; \
+ } \
+ break;
+ switch (op) { FOR_EACH_OPCODE(BUILD_OP) }
+#undef BUILD_OP
+
+#ifdef DEBUG
+ useChecker.checkAfterOp();
+#endif
+ }
+
+ return true;
+}
+
+#define DEF_OP(OP) \
+ bool WarpBuilder::build_##OP(BytecodeLocation) { \
+ MOZ_CRASH("Unsupported op"); \
+ }
+WARP_UNSUPPORTED_OPCODE_LIST(DEF_OP)
+#undef DEF_OP
+
+bool WarpBuilder::build_Nop(BytecodeLocation) { return true; }
+
+bool WarpBuilder::build_NopDestructuring(BytecodeLocation) { return true; }
+
+bool WarpBuilder::build_TryDestructuring(BytecodeLocation) {
+ // Set the hasTryBlock flag to turn off optimizations that eliminate dead
+ // resume points operands because the exception handler code for
+ // TryNoteKind::Destructuring is effectively a (specialized) catch-block.
+ graph().setHasTryBlock();
+ return true;
+}
+
+bool WarpBuilder::build_Lineno(BytecodeLocation) { return true; }
+
+bool WarpBuilder::build_DebugLeaveLexicalEnv(BytecodeLocation) { return true; }
+
+bool WarpBuilder::build_Undefined(BytecodeLocation) {
+ pushConstant(UndefinedValue());
+ return true;
+}
+
+bool WarpBuilder::build_Void(BytecodeLocation) {
+ current->pop();
+ pushConstant(UndefinedValue());
+ return true;
+}
+
+bool WarpBuilder::build_Null(BytecodeLocation) {
+ pushConstant(NullValue());
+ return true;
+}
+
+bool WarpBuilder::build_Hole(BytecodeLocation) {
+ pushConstant(MagicValue(JS_ELEMENTS_HOLE));
+ return true;
+}
+
+bool WarpBuilder::build_Uninitialized(BytecodeLocation) {
+ pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ return true;
+}
+
+bool WarpBuilder::build_IsConstructing(BytecodeLocation) {
+ pushConstant(MagicValue(JS_IS_CONSTRUCTING));
+ return true;
+}
+
+bool WarpBuilder::build_False(BytecodeLocation) {
+ pushConstant(BooleanValue(false));
+ return true;
+}
+
+bool WarpBuilder::build_True(BytecodeLocation) {
+ pushConstant(BooleanValue(true));
+ return true;
+}
+
+bool WarpBuilder::build_Pop(BytecodeLocation) {
+ current->pop();
+ return true;
+}
+
+bool WarpBuilder::build_PopN(BytecodeLocation loc) {
+ for (uint32_t i = 0, n = loc.getPopCount(); i < n; i++) {
+ current->pop();
+ }
+ return true;
+}
+
+bool WarpBuilder::build_Dup(BytecodeLocation) {
+ current->pushSlot(current->stackDepth() - 1);
+ return true;
+}
+
+bool WarpBuilder::build_Dup2(BytecodeLocation) {
+ uint32_t lhsSlot = current->stackDepth() - 2;
+ uint32_t rhsSlot = current->stackDepth() - 1;
+ current->pushSlot(lhsSlot);
+ current->pushSlot(rhsSlot);
+ return true;
+}
+
+bool WarpBuilder::build_DupAt(BytecodeLocation loc) {
+ current->pushSlot(current->stackDepth() - 1 - loc.getDupAtIndex());
+ return true;
+}
+
+bool WarpBuilder::build_Swap(BytecodeLocation) {
+ current->swapAt(-1);
+ return true;
+}
+
+bool WarpBuilder::build_Pick(BytecodeLocation loc) {
+ int32_t depth = -int32_t(loc.getPickDepth());
+ current->pick(depth);
+ return true;
+}
+
+bool WarpBuilder::build_Unpick(BytecodeLocation loc) {
+ int32_t depth = -int32_t(loc.getUnpickDepth());
+ current->unpick(depth);
+ return true;
+}
+
+bool WarpBuilder::build_Zero(BytecodeLocation) {
+ pushConstant(Int32Value(0));
+ return true;
+}
+
+bool WarpBuilder::build_One(BytecodeLocation) {
+ pushConstant(Int32Value(1));
+ return true;
+}
+
+bool WarpBuilder::build_Int8(BytecodeLocation loc) {
+ pushConstant(Int32Value(loc.getInt8()));
+ return true;
+}
+
+bool WarpBuilder::build_Uint16(BytecodeLocation loc) {
+ pushConstant(Int32Value(loc.getUint16()));
+ return true;
+}
+
+bool WarpBuilder::build_Uint24(BytecodeLocation loc) {
+ pushConstant(Int32Value(loc.getUint24()));
+ return true;
+}
+
+bool WarpBuilder::build_Int32(BytecodeLocation loc) {
+ pushConstant(Int32Value(loc.getInt32()));
+ return true;
+}
+
+bool WarpBuilder::build_Double(BytecodeLocation loc) {
+ pushConstant(loc.getInlineValue());
+ return true;
+}
+
+bool WarpBuilder::build_BigInt(BytecodeLocation loc) {
+ BigInt* bi = loc.getBigInt(script_);
+ pushConstant(BigIntValue(bi));
+ return true;
+}
+
+bool WarpBuilder::build_String(BytecodeLocation loc) {
+ JSString* str = loc.getString(script_);
+ pushConstant(StringValue(str));
+ return true;
+}
+
+bool WarpBuilder::build_Symbol(BytecodeLocation loc) {
+ uint32_t which = loc.getSymbolIndex();
+ JS::Symbol* sym = mirGen().runtime->wellKnownSymbols().get(which);
+ pushConstant(SymbolValue(sym));
+ return true;
+}
+
+bool WarpBuilder::build_RegExp(BytecodeLocation loc) {
+ RegExpObject* reObj = loc.getRegExp(script_);
+
+ auto* snapshot = getOpSnapshot<WarpRegExp>(loc);
+
+ MRegExp* regexp = MRegExp::New(alloc(), reObj, snapshot->hasShared());
+ current->add(regexp);
+ current->push(regexp);
+
+ return true;
+}
+
+bool WarpBuilder::build_Return(BytecodeLocation) {
+ MDefinition* def = current->pop();
+
+ MReturn* ret = MReturn::New(alloc(), def);
+ current->end(ret);
+
+ if (!graph().addReturn(current)) {
+ return false;
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_RetRval(BytecodeLocation) {
+ MDefinition* rval;
+ if (script_->noScriptRval()) {
+ rval = constant(UndefinedValue());
+ } else {
+ rval = current->getSlot(info().returnValueSlot());
+ }
+
+ MReturn* ret = MReturn::New(alloc(), rval);
+ current->end(ret);
+
+ if (!graph().addReturn(current)) {
+ return false;
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_SetRval(BytecodeLocation) {
+ MOZ_ASSERT(!script_->noScriptRval());
+ MDefinition* rval = current->pop();
+ current->setSlot(info().returnValueSlot(), rval);
+ return true;
+}
+
+bool WarpBuilder::build_GetRval(BytecodeLocation) {
+ MOZ_ASSERT(!script_->noScriptRval());
+ MDefinition* rval = current->getSlot(info().returnValueSlot());
+ current->push(rval);
+ return true;
+}
+
+bool WarpBuilder::build_GetLocal(BytecodeLocation loc) {
+ current->pushLocal(loc.local());
+ return true;
+}
+
+bool WarpBuilder::build_SetLocal(BytecodeLocation loc) {
+ current->setLocal(loc.local());
+ return true;
+}
+
+bool WarpBuilder::build_InitLexical(BytecodeLocation loc) {
+ current->setLocal(loc.local());
+ return true;
+}
+
+bool WarpBuilder::build_GetArg(BytecodeLocation loc) {
+ uint32_t arg = loc.arg();
+ if (info().argsObjAliasesFormals()) {
+ MDefinition* argsObj = current->argumentsObject();
+ auto* getArg = MGetArgumentsObjectArg::New(alloc(), argsObj, arg);
+ current->add(getArg);
+ current->push(getArg);
+ } else {
+ current->pushArg(arg);
+ }
+ return true;
+}
+
+bool WarpBuilder::build_GetFrameArg(BytecodeLocation loc) {
+ current->pushArgUnchecked(loc.arg());
+ return true;
+}
+
+bool WarpBuilder::build_SetArg(BytecodeLocation loc) {
+ uint32_t arg = loc.arg();
+ MDefinition* val = current->peek(-1);
+
+ if (!info().argsObjAliasesFormals()) {
+ // Either |arguments| is never referenced within this function, or
+ // it doesn't map to the actual arguments values. Either way, we
+ // don't need to worry about synchronizing the argument values
+ // when writing to them.
+ current->setArg(arg);
+ return true;
+ }
+
+ // If an arguments object is in use, and it aliases formals, then all SetArgs
+ // must go through the arguments object.
+ MDefinition* argsObj = current->argumentsObject();
+ current->add(MPostWriteBarrier::New(alloc(), argsObj, val));
+ auto* ins = MSetArgumentsObjectArg::New(alloc(), argsObj, val, arg);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_ArgumentsLength(BytecodeLocation) {
+ if (inlineCallInfo()) {
+ pushConstant(Int32Value(inlineCallInfo()->argc()));
+ } else {
+ auto* argsLength = MArgumentsLength::New(alloc());
+ current->add(argsLength);
+ current->push(argsLength);
+ }
+ return true;
+}
+
+bool WarpBuilder::build_GetActualArg(BytecodeLocation) {
+ MDefinition* index = current->pop();
+ MInstruction* arg;
+ if (inlineCallInfo()) {
+ arg = MGetInlinedArgument::New(alloc(), index, *inlineCallInfo());
+ } else {
+ arg = MGetFrameArgument::New(alloc(), index);
+ }
+ current->add(arg);
+ current->push(arg);
+ return true;
+}
+
+bool WarpBuilder::build_ToNumeric(BytecodeLocation loc) {
+ return buildUnaryOp(loc);
+}
+
+bool WarpBuilder::buildUnaryOp(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+ return buildIC(loc, CacheKind::UnaryArith, {value});
+}
+
+bool WarpBuilder::build_Inc(BytecodeLocation loc) { return buildUnaryOp(loc); }
+
+bool WarpBuilder::build_Dec(BytecodeLocation loc) { return buildUnaryOp(loc); }
+
+bool WarpBuilder::build_Pos(BytecodeLocation loc) { return buildUnaryOp(loc); }
+
+bool WarpBuilder::build_Neg(BytecodeLocation loc) { return buildUnaryOp(loc); }
+
+bool WarpBuilder::build_BitNot(BytecodeLocation loc) {
+ return buildUnaryOp(loc);
+}
+
+bool WarpBuilder::buildBinaryOp(BytecodeLocation loc) {
+ MDefinition* right = current->pop();
+ MDefinition* left = current->pop();
+ return buildIC(loc, CacheKind::BinaryArith, {left, right});
+}
+
+bool WarpBuilder::build_Add(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Sub(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Mul(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Div(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Mod(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Pow(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_BitAnd(BytecodeLocation loc) {
+ return buildBinaryOp(loc);
+}
+
+bool WarpBuilder::build_BitOr(BytecodeLocation loc) {
+ return buildBinaryOp(loc);
+}
+
+bool WarpBuilder::build_BitXor(BytecodeLocation loc) {
+ return buildBinaryOp(loc);
+}
+
+bool WarpBuilder::build_Lsh(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Rsh(BytecodeLocation loc) { return buildBinaryOp(loc); }
+
+bool WarpBuilder::build_Ursh(BytecodeLocation loc) {
+ return buildBinaryOp(loc);
+}
+
+bool WarpBuilder::buildCompareOp(BytecodeLocation loc) {
+ MDefinition* right = current->pop();
+ MDefinition* left = current->pop();
+ return buildIC(loc, CacheKind::Compare, {left, right});
+}
+
+bool WarpBuilder::build_Eq(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_Ne(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_Lt(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_Le(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_Gt(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_Ge(BytecodeLocation loc) { return buildCompareOp(loc); }
+
+bool WarpBuilder::build_StrictEq(BytecodeLocation loc) {
+ return buildCompareOp(loc);
+}
+
+bool WarpBuilder::build_StrictNe(BytecodeLocation loc) {
+ return buildCompareOp(loc);
+}
+
+// Returns true iff the MTest added for |op| has a true-target corresponding
+// with the join point in the bytecode.
+static bool TestTrueTargetIsJoinPoint(JSOp op) {
+ switch (op) {
+ case JSOp::JumpIfTrue:
+ case JSOp::Or:
+ case JSOp::Case:
+ return true;
+
+ case JSOp::JumpIfFalse:
+ case JSOp::And:
+ case JSOp::Coalesce:
+ return false;
+
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+}
+
+bool WarpBuilder::build_JumpTarget(BytecodeLocation loc) {
+ PendingEdgesMap::Ptr p = pendingEdges_.lookup(loc.toRawBytecode());
+ if (!p) {
+ // No (reachable) jumps so this is just a no-op.
+ return true;
+ }
+
+ PendingEdges edges(std::move(p->value()));
+ pendingEdges_.remove(p);
+
+ MOZ_ASSERT(!edges.empty());
+
+ // Create join block if there's fall-through from the previous bytecode op.
+ if (!hasTerminatedBlock()) {
+ MBasicBlock* pred = current;
+ if (!startNewBlock(pred, loc)) {
+ return false;
+ }
+ pred->end(MGoto::New(alloc(), current));
+ }
+
+ for (const PendingEdge& edge : edges) {
+ MBasicBlock* source = edge.block();
+ uint32_t numToPop = edge.numToPop();
+
+ if (hasTerminatedBlock()) {
+ if (!startNewBlock(source, loc, numToPop)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(source->stackDepth() - numToPop == current->stackDepth());
+ if (!current->addPredecessorPopN(alloc(), source, numToPop)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(source->lastIns()->isTest() || source->lastIns()->isGoto() ||
+ source->lastIns()->isTableSwitch());
+ source->lastIns()->initSuccessor(edge.successor(), current);
+ }
+
+ MOZ_ASSERT(!hasTerminatedBlock());
+ return true;
+}
+
+bool WarpBuilder::addIteratorLoopPhis(BytecodeLocation loopHead) {
+ // When unwinding the stack for a thrown exception, the exception handler must
+ // close live iterators. For ForIn and Destructuring loops, the exception
+ // handler needs access to values on the stack. To prevent them from being
+ // optimized away (and replaced with the JS_OPTIMIZED_OUT MagicValue), we need
+ // to mark the phis (and phis they flow into) as having implicit uses.
+ // See ProcessTryNotes in vm/Interpreter.cpp and CloseLiveIteratorIon in
+ // jit/JitFrames.cpp
+
+ MOZ_ASSERT(current->stackDepth() >= info().firstStackSlot());
+
+ bool emptyStack = current->stackDepth() == info().firstStackSlot();
+ if (emptyStack) {
+ return true;
+ }
+
+ jsbytecode* loopHeadPC = loopHead.toRawBytecode();
+
+ for (TryNoteIterAllNoGC tni(script_, loopHeadPC); !tni.done(); ++tni) {
+ const TryNote& tn = **tni;
+
+ // Stop if we reach an outer loop because outer loops were already
+ // processed when we visited their loop headers.
+ if (tn.isLoop()) {
+ BytecodeLocation tnStart = script_->offsetToLocation(tn.start);
+ if (tnStart != loopHead) {
+ MOZ_ASSERT(tnStart.is(JSOp::LoopHead));
+ MOZ_ASSERT(tnStart < loopHead);
+ return true;
+ }
+ }
+
+ switch (tn.kind()) {
+ case TryNoteKind::Destructuring:
+ case TryNoteKind::ForIn: {
+ // For for-in loops we add the iterator object to iterators(). For
+ // destructuring loops we add the "done" value that's on top of the
+ // stack and used in the exception handler.
+ MOZ_ASSERT(tn.stackDepth >= 1);
+ uint32_t slot = info().stackSlot(tn.stackDepth - 1);
+ MPhi* phi = current->getSlot(slot)->toPhi();
+ if (!iterators()->append(phi)) {
+ return false;
+ }
+ break;
+ }
+ case TryNoteKind::Loop:
+ case TryNoteKind::ForOf:
+ // Regular loops do not have iterators to close. ForOf loops handle
+ // unwinding using catch blocks.
+ break;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool WarpBuilder::build_LoopHead(BytecodeLocation loc) {
+ // All loops have the following bytecode structure:
+ //
+ // LoopHead
+ // ...
+ // JumpIfTrue/Goto to LoopHead
+
+ if (hasTerminatedBlock()) {
+ // The whole loop is unreachable.
+ return true;
+ }
+
+ // Handle OSR from Baseline JIT code.
+ if (loc.toRawBytecode() == info().osrPc()) {
+ if (!startNewOsrPreHeaderBlock(loc)) {
+ return false;
+ }
+ }
+
+ incLoopDepth();
+
+ MBasicBlock* pred = current;
+ if (!startNewLoopHeaderBlock(loc)) {
+ return false;
+ }
+
+ pred->end(MGoto::New(alloc(), current));
+
+ if (!addIteratorLoopPhis(loc)) {
+ return false;
+ }
+
+ MInterruptCheck* check = MInterruptCheck::New(alloc());
+ current->add(check);
+
+#ifdef JS_CACHEIR_SPEW
+ if (snapshot().needsFinalWarmUpCount()) {
+ MIncrementWarmUpCounter* ins =
+ MIncrementWarmUpCounter::New(alloc(), script_);
+ current->add(ins);
+ }
+#endif
+
+ return true;
+}
+
+bool WarpBuilder::buildTestOp(BytecodeLocation loc) {
+ MDefinition* originalValue = current->peek(-1);
+
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ // If we have CacheIR, we can use it to refine the input. Note that
+ // the transpiler doesn't generate any control instructions. Instead,
+ // we fall through and generate them below.
+ MDefinition* value = current->pop();
+ if (!TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, {value})) {
+ return false;
+ }
+ }
+
+ if (loc.isBackedge()) {
+ return buildTestBackedge(loc);
+ }
+
+ JSOp op = loc.getOp();
+ BytecodeLocation target1 = loc.next();
+ BytecodeLocation target2 = loc.getJumpTarget();
+
+ if (TestTrueTargetIsJoinPoint(op)) {
+ std::swap(target1, target2);
+ }
+
+ MDefinition* value = current->pop();
+
+ // JSOp::And and JSOp::Or leave the top stack value unchanged. The
+ // top stack value may have been converted to bool by a transpiled
+ // ToBool IC, so we push the original value.
+ bool mustKeepCondition = (op == JSOp::And || op == JSOp::Or);
+ if (mustKeepCondition) {
+ current->push(originalValue);
+ }
+
+ // If this op always branches to the same location we treat this as a
+ // JSOp::Goto.
+ if (target1 == target2) {
+ value->setImplicitlyUsedUnchecked();
+ return buildForwardGoto(target1);
+ }
+
+ MTest* test = MTest::New(alloc(), value, /* ifTrue = */ nullptr,
+ /* ifFalse = */ nullptr);
+ current->end(test);
+
+ // JSOp::Case must pop a second value on the true-branch (the input to the
+ // switch-statement).
+ uint32_t numToPop = (loc.getOp() == JSOp::Case) ? 1 : 0;
+
+ if (!addPendingEdge(target1, current, MTest::TrueBranchIndex, numToPop)) {
+ return false;
+ }
+ if (!addPendingEdge(target2, current, MTest::FalseBranchIndex)) {
+ return false;
+ }
+
+ if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
+ test->setObservedTypes(typesSnapshot->list());
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::buildTestBackedge(BytecodeLocation loc) {
+ MOZ_ASSERT(loc.is(JSOp::JumpIfTrue));
+ MOZ_ASSERT(loopDepth() > 0);
+
+ MDefinition* value = current->pop();
+
+ BytecodeLocation loopHead = loc.getJumpTarget();
+ MOZ_ASSERT(loopHead.is(JSOp::LoopHead));
+
+ BytecodeLocation successor = loc.next();
+
+ // We can finish the loop now. Use the loophead pc instead of the current pc
+ // because the stack depth at the start of that op matches the current stack
+ // depth (after popping our operand).
+ MBasicBlock* pred = current;
+ if (!startNewBlock(current, loopHead)) {
+ return false;
+ }
+
+ MTest* test = MTest::New(alloc(), value, /* ifTrue = */ current,
+ /* ifFalse = */ nullptr);
+ pred->end(test);
+
+ if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
+ test->setObservedTypes(typesSnapshot->list());
+ }
+
+ if (!addPendingEdge(successor, pred, MTest::FalseBranchIndex)) {
+ return false;
+ }
+
+ return buildBackedge();
+}
+
+bool WarpBuilder::build_JumpIfFalse(BytecodeLocation loc) {
+ return buildTestOp(loc);
+}
+
+bool WarpBuilder::build_JumpIfTrue(BytecodeLocation loc) {
+ return buildTestOp(loc);
+}
+
+bool WarpBuilder::build_And(BytecodeLocation loc) { return buildTestOp(loc); }
+
+bool WarpBuilder::build_Or(BytecodeLocation loc) { return buildTestOp(loc); }
+
+bool WarpBuilder::build_Case(BytecodeLocation loc) { return buildTestOp(loc); }
+
+bool WarpBuilder::build_Default(BytecodeLocation loc) {
+ current->pop();
+ return buildForwardGoto(loc.getJumpTarget());
+}
+
+bool WarpBuilder::build_Coalesce(BytecodeLocation loc) {
+ BytecodeLocation target1 = loc.next();
+ BytecodeLocation target2 = loc.getJumpTarget();
+ MOZ_ASSERT(target2 > target1);
+
+ MDefinition* value = current->peek(-1);
+
+ MInstruction* isNullOrUndefined = MIsNullOrUndefined::New(alloc(), value);
+ current->add(isNullOrUndefined);
+
+ current->end(MTest::New(alloc(), isNullOrUndefined, /* ifTrue = */ nullptr,
+ /* ifFalse = */ nullptr));
+
+ if (!addPendingEdge(target1, current, MTest::TrueBranchIndex)) {
+ return false;
+ }
+ if (!addPendingEdge(target2, current, MTest::FalseBranchIndex)) {
+ return false;
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::buildBackedge() {
+ decLoopDepth();
+
+ MBasicBlock* header = loopStack_.popCopy().header();
+ current->end(MGoto::New(alloc(), header));
+
+ if (!header->setBackedge(current)) {
+ return false;
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::buildForwardGoto(BytecodeLocation target) {
+ current->end(MGoto::New(alloc(), nullptr));
+
+ if (!addPendingEdge(target, current, MGoto::TargetIndex)) {
+ return false;
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_Goto(BytecodeLocation loc) {
+ if (loc.isBackedge()) {
+ return buildBackedge();
+ }
+
+ return buildForwardGoto(loc.getJumpTarget());
+}
+
+bool WarpBuilder::build_IsNullOrUndefined(BytecodeLocation loc) {
+ MDefinition* value = current->peek(-1);
+ auto* isNullOrUndef = MIsNullOrUndefined::New(alloc(), value);
+ current->add(isNullOrUndef);
+ current->push(isNullOrUndef);
+ return true;
+}
+
+bool WarpBuilder::build_DebugCheckSelfHosted(BytecodeLocation loc) {
+#ifdef DEBUG
+ MDefinition* val = current->pop();
+ MDebugCheckSelfHosted* check = MDebugCheckSelfHosted::New(alloc(), val);
+ current->add(check);
+ current->push(check);
+ if (!resumeAfter(check, loc)) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+bool WarpBuilder::build_DynamicImport(BytecodeLocation loc) {
+ MDefinition* options = current->pop();
+ MDefinition* specifier = current->pop();
+ MDynamicImport* ins = MDynamicImport::New(alloc(), specifier, options);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_Not(BytecodeLocation loc) {
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ // If we have CacheIR, we can use it to refine the input before
+ // emitting the MNot.
+ MDefinition* value = current->pop();
+ if (!TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, {value})) {
+ return false;
+ }
+ }
+
+ MDefinition* value = current->pop();
+ MNot* ins = MNot::New(alloc(), value);
+ current->add(ins);
+ current->push(ins);
+
+ if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
+ ins->setObservedTypes(typesSnapshot->list());
+ }
+
+ return true;
+}
+
+bool WarpBuilder::build_ToString(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+
+ if (value->type() == MIRType::String) {
+ value->setImplicitlyUsedUnchecked();
+ current->push(value);
+ return true;
+ }
+
+ MToString* ins =
+ MToString::New(alloc(), value, MToString::SideEffectHandling::Supported);
+ current->add(ins);
+ current->push(ins);
+ if (ins->isEffectful()) {
+ return resumeAfter(ins, loc);
+ }
+ return true;
+}
+
+bool WarpBuilder::usesEnvironmentChain() const {
+ return script_->jitScript()->usesEnvironmentChain();
+}
+
+bool WarpBuilder::build_GlobalOrEvalDeclInstantiation(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->isForEval(), "Eval scripts not supported");
+ auto* redeclCheck = MGlobalDeclInstantiation::New(alloc());
+ current->add(redeclCheck);
+ return resumeAfter(redeclCheck, loc);
+}
+
+bool WarpBuilder::build_BindVar(BytecodeLocation) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* env = current->environmentChain();
+ MCallBindVar* ins = MCallBindVar::New(alloc(), env);
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::build_MutateProto(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+ MDefinition* obj = current->peek(-1);
+ MMutateProto* mutate = MMutateProto::New(alloc(), obj, value);
+ current->add(mutate);
+ return resumeAfter(mutate, loc);
+}
+
+MDefinition* WarpBuilder::getCallee() {
+ if (inlineCallInfo()) {
+ return inlineCallInfo()->callee();
+ }
+
+ MInstruction* callee = MCallee::New(alloc());
+ current->add(callee);
+ return callee;
+}
+
+bool WarpBuilder::build_Callee(BytecodeLocation) {
+ MDefinition* callee = getCallee();
+ current->push(callee);
+ return true;
+}
+
+bool WarpBuilder::build_ToAsyncIter(BytecodeLocation loc) {
+ MDefinition* nextMethod = current->pop();
+ MDefinition* iterator = current->pop();
+ MToAsyncIter* ins = MToAsyncIter::New(alloc(), iterator, nextMethod);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_ToPropertyKey(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+ return buildIC(loc, CacheKind::ToPropertyKey, {value});
+}
+
+bool WarpBuilder::build_Typeof(BytecodeLocation loc) {
+ MDefinition* input = current->pop();
+
+ if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
+ auto* typeOf = MTypeOf::New(alloc(), input);
+ typeOf->setObservedTypes(typesSnapshot->list());
+ current->add(typeOf);
+
+ auto* ins = MTypeOfName::New(alloc(), typeOf);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+
+ return buildIC(loc, CacheKind::TypeOf, {input});
+}
+
+bool WarpBuilder::build_TypeofExpr(BytecodeLocation loc) {
+ return build_Typeof(loc);
+}
+
+bool WarpBuilder::build_Arguments(BytecodeLocation loc) {
+ auto* snapshot = getOpSnapshot<WarpArguments>(loc);
+ MOZ_ASSERT(info().needsArgsObj());
+ MOZ_ASSERT(snapshot);
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ ArgumentsObject* templateObj = snapshot->templateObj();
+ MDefinition* env = current->environmentChain();
+
+ MInstruction* argsObj;
+ if (inlineCallInfo()) {
+ argsObj = MCreateInlinedArgumentsObject::New(
+ alloc(), env, getCallee(), inlineCallInfo()->argv(), templateObj);
+ if (!argsObj) {
+ return false;
+ }
+ } else {
+ argsObj = MCreateArgumentsObject::New(alloc(), env, templateObj);
+ }
+ current->add(argsObj);
+ current->setArgumentsObject(argsObj);
+ current->push(argsObj);
+
+ return true;
+}
+
+bool WarpBuilder::build_ObjWithProto(BytecodeLocation loc) {
+ MDefinition* proto = current->pop();
+ MInstruction* ins = MObjectWithProto::New(alloc(), proto);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+MDefinition* WarpBuilder::walkEnvironmentChain(uint32_t numHops) {
+ MDefinition* env = current->environmentChain();
+
+ for (uint32_t i = 0; i < numHops; i++) {
+ if (!alloc().ensureBallast()) {
+ return nullptr;
+ }
+
+ MInstruction* ins = MEnclosingEnvironment::New(alloc(), env);
+ current->add(ins);
+ env = ins;
+ }
+
+ return env;
+}
+
+bool WarpBuilder::build_GetAliasedVar(BytecodeLocation loc) {
+ EnvironmentCoordinate ec = loc.getEnvironmentCoordinate();
+ MDefinition* obj = walkEnvironmentChain(ec.hops());
+ if (!obj) {
+ return false;
+ }
+
+ MInstruction* load;
+ if (EnvironmentObject::nonExtensibleIsFixedSlot(ec)) {
+ load = MLoadFixedSlot::New(alloc(), obj, ec.slot());
+ } else {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ uint32_t slot = EnvironmentObject::nonExtensibleDynamicSlotIndex(ec);
+ load = MLoadDynamicSlot::New(alloc(), slots, slot);
+ }
+
+ current->add(load);
+ current->push(load);
+ return true;
+}
+
+bool WarpBuilder::build_SetAliasedVar(BytecodeLocation loc) {
+ EnvironmentCoordinate ec = loc.getEnvironmentCoordinate();
+ MDefinition* val = current->peek(-1);
+ MDefinition* obj = walkEnvironmentChain(ec.hops());
+ if (!obj) {
+ return false;
+ }
+
+ current->add(MPostWriteBarrier::New(alloc(), obj, val));
+
+ MInstruction* store;
+ if (EnvironmentObject::nonExtensibleIsFixedSlot(ec)) {
+ store = MStoreFixedSlot::NewBarriered(alloc(), obj, ec.slot(), val);
+ } else {
+ MInstruction* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ uint32_t slot = EnvironmentObject::nonExtensibleDynamicSlotIndex(ec);
+ store = MStoreDynamicSlot::NewBarriered(alloc(), slots, slot, val);
+ }
+
+ current->add(store);
+ return resumeAfter(store, loc);
+}
+
+bool WarpBuilder::build_InitAliasedLexical(BytecodeLocation loc) {
+ return build_SetAliasedVar(loc);
+}
+
+bool WarpBuilder::build_EnvCallee(BytecodeLocation loc) {
+ uint32_t numHops = loc.getEnvCalleeNumHops();
+ MDefinition* env = walkEnvironmentChain(numHops);
+ if (!env) {
+ return false;
+ }
+
+ auto* callee = MLoadFixedSlot::New(alloc(), env, CallObject::calleeSlot());
+ current->add(callee);
+ current->push(callee);
+ return true;
+}
+
+bool WarpBuilder::build_Iter(BytecodeLocation loc) {
+ MDefinition* obj = current->pop();
+ return buildIC(loc, CacheKind::GetIterator, {obj});
+}
+
+bool WarpBuilder::build_MoreIter(BytecodeLocation loc) {
+ MDefinition* iter = current->peek(-1);
+ MInstruction* ins = MIteratorMore::New(alloc(), iter);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_EndIter(BytecodeLocation loc) {
+ current->pop(); // Iterator value is not used.
+ MDefinition* iter = current->pop();
+ MInstruction* ins = MIteratorEnd::New(alloc(), iter);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_CloseIter(BytecodeLocation loc) {
+ MDefinition* iter = current->pop();
+ return buildIC(loc, CacheKind::CloseIter, {iter});
+}
+
+bool WarpBuilder::build_IsNoIter(BytecodeLocation) {
+ MDefinition* def = current->peek(-1);
+ MOZ_ASSERT(def->isIteratorMore());
+ MInstruction* ins = MIsNoIter::New(alloc(), def);
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::transpileCall(BytecodeLocation loc,
+ const WarpCacheIR* cacheIRSnapshot,
+ CallInfo* callInfo) {
+ // Synthesize the constant number of arguments for this call op.
+ auto* argc = MConstant::New(alloc(), Int32Value(callInfo->argc()));
+ current->add(argc);
+
+ return TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, {argc}, callInfo);
+}
+
+void WarpBuilder::buildCreateThis(CallInfo& callInfo) {
+ MOZ_ASSERT(callInfo.constructing());
+
+ // Inline the this-object allocation on the caller-side.
+ MDefinition* callee = callInfo.callee();
+ MDefinition* newTarget = callInfo.getNewTarget();
+ auto* createThis = MCreateThis::New(alloc(), callee, newTarget);
+ current->add(createThis);
+ callInfo.thisArg()->setImplicitlyUsedUnchecked();
+ callInfo.setThis(createThis);
+}
+
+bool WarpBuilder::buildCallOp(BytecodeLocation loc) {
+ uint32_t argc = loc.getCallArgc();
+ JSOp op = loc.getOp();
+ bool constructing = IsConstructOp(op);
+ bool ignoresReturnValue = (op == JSOp::CallIgnoresRv || loc.resultIsPopped());
+
+ CallInfo callInfo(alloc(), constructing, ignoresReturnValue);
+ if (!callInfo.init(current, argc)) {
+ return false;
+ }
+
+ if (const auto* inliningSnapshot = getOpSnapshot<WarpInlinedCall>(loc)) {
+ // Transpile the CacheIR to generate the correct guards before
+ // inlining. In this case, CacheOp::CallInlinedFunction updates
+ // the CallInfo, but does not generate a call.
+ callInfo.markAsInlined();
+ if (!transpileCall(loc, inliningSnapshot->cacheIRSnapshot(), &callInfo)) {
+ return false;
+ }
+
+ // Generate the body of the inlined function.
+ return buildInlinedCall(loc, inliningSnapshot, callInfo);
+ }
+
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ return transpileCall(loc, cacheIRSnapshot, &callInfo);
+ }
+
+ if (getOpSnapshot<WarpBailout>(loc)) {
+ callInfo.setImplicitlyUsedUnchecked();
+ return buildBailoutForColdIC(loc, CacheKind::Call);
+ }
+
+ bool needsThisCheck = false;
+ if (callInfo.constructing()) {
+ buildCreateThis(callInfo);
+ needsThisCheck = true;
+ }
+
+ MCall* call = makeCall(callInfo, needsThisCheck);
+ if (!call) {
+ return false;
+ }
+
+ current->add(call);
+ current->push(call);
+ return resumeAfter(call, loc);
+}
+
+bool WarpBuilder::build_Call(BytecodeLocation loc) { return buildCallOp(loc); }
+
+bool WarpBuilder::build_CallContent(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_CallIgnoresRv(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_CallIter(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_CallContentIter(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_New(BytecodeLocation loc) { return buildCallOp(loc); }
+
+bool WarpBuilder::build_NewContent(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_SuperCall(BytecodeLocation loc) {
+ return buildCallOp(loc);
+}
+
+bool WarpBuilder::build_FunctionThis(BytecodeLocation loc) {
+ MOZ_ASSERT(info().hasFunMaybeLazy());
+
+ if (script_->strict()) {
+ // No need to wrap primitive |this| in strict mode.
+ current->pushSlot(info().thisSlot());
+ return true;
+ }
+
+ MOZ_ASSERT(!script_->hasNonSyntacticScope(),
+ "WarpOracle should have aborted compilation");
+
+ MDefinition* def = current->getSlot(info().thisSlot());
+ JSObject* globalThis = snapshot().globalLexicalEnvThis();
+
+ auto* thisObj = MBoxNonStrictThis::New(alloc(), def, globalThis);
+ current->add(thisObj);
+ current->push(thisObj);
+
+ return true;
+}
+
+bool WarpBuilder::build_GlobalThis(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+ JSObject* obj = snapshot().globalLexicalEnvThis();
+ pushConstant(ObjectValue(*obj));
+ return true;
+}
+
+MConstant* WarpBuilder::globalLexicalEnvConstant() {
+ JSObject* globalLexical = snapshot().globalLexicalEnv();
+ return constant(ObjectValue(*globalLexical));
+}
+
+bool WarpBuilder::build_GetName(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* env = current->environmentChain();
+ return buildIC(loc, CacheKind::GetName, {env});
+}
+
+bool WarpBuilder::build_GetGName(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ MDefinition* env = globalLexicalEnvConstant();
+ return buildIC(loc, CacheKind::GetName, {env});
+}
+
+bool WarpBuilder::build_BindName(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* env = current->environmentChain();
+ return buildIC(loc, CacheKind::BindName, {env});
+}
+
+bool WarpBuilder::build_BindGName(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ if (const auto* snapshot = getOpSnapshot<WarpBindGName>(loc)) {
+ JSObject* globalEnv = snapshot->globalEnv();
+ pushConstant(ObjectValue(*globalEnv));
+ return true;
+ }
+
+ MDefinition* env = globalLexicalEnvConstant();
+ return buildIC(loc, CacheKind::BindName, {env});
+}
+
+bool WarpBuilder::build_GetProp(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ return buildIC(loc, CacheKind::GetProp, {val});
+}
+
+bool WarpBuilder::build_GetElem(BytecodeLocation loc) {
+ MDefinition* id = current->pop();
+ MDefinition* val = current->pop();
+ return buildIC(loc, CacheKind::GetElem, {val, id});
+}
+
+bool WarpBuilder::build_SetProp(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* obj = current->pop();
+ current->push(val);
+ return buildIC(loc, CacheKind::SetProp, {obj, val});
+}
+
+bool WarpBuilder::build_StrictSetProp(BytecodeLocation loc) {
+ return build_SetProp(loc);
+}
+
+bool WarpBuilder::build_SetName(BytecodeLocation loc) {
+ return build_SetProp(loc);
+}
+
+bool WarpBuilder::build_StrictSetName(BytecodeLocation loc) {
+ return build_SetProp(loc);
+}
+
+bool WarpBuilder::build_SetGName(BytecodeLocation loc) {
+ return build_SetProp(loc);
+}
+
+bool WarpBuilder::build_StrictSetGName(BytecodeLocation loc) {
+ return build_SetProp(loc);
+}
+
+bool WarpBuilder::build_InitGLexical(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+
+ MDefinition* globalLexical = globalLexicalEnvConstant();
+ MDefinition* val = current->peek(-1);
+
+ return buildIC(loc, CacheKind::SetProp, {globalLexical, val});
+}
+
+bool WarpBuilder::build_SetElem(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->pop();
+ current->push(val);
+ return buildIC(loc, CacheKind::SetElem, {obj, id, val});
+}
+
+bool WarpBuilder::build_StrictSetElem(BytecodeLocation loc) {
+ return build_SetElem(loc);
+}
+
+bool WarpBuilder::build_DelProp(BytecodeLocation loc) {
+ PropertyName* name = loc.getPropertyName(script_);
+ MDefinition* obj = current->pop();
+ bool strict = loc.getOp() == JSOp::StrictDelProp;
+
+ MInstruction* ins = MDeleteProperty::New(alloc(), obj, name, strict);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_StrictDelProp(BytecodeLocation loc) {
+ return build_DelProp(loc);
+}
+
+bool WarpBuilder::build_DelElem(BytecodeLocation loc) {
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->pop();
+ bool strict = loc.getOp() == JSOp::StrictDelElem;
+
+ MInstruction* ins = MDeleteElement::New(alloc(), obj, id, strict);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_StrictDelElem(BytecodeLocation loc) {
+ return build_DelElem(loc);
+}
+
+bool WarpBuilder::build_SetFunName(BytecodeLocation loc) {
+ FunctionPrefixKind prefixKind = loc.getFunctionPrefixKind();
+ MDefinition* name = current->pop();
+ MDefinition* fun = current->pop();
+
+ MSetFunName* ins = MSetFunName::New(alloc(), fun, name, uint8_t(prefixKind));
+ current->add(ins);
+ current->push(fun);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_PushLexicalEnv(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ const auto* snapshot = getOpSnapshot<WarpLexicalEnvironment>(loc);
+ MOZ_ASSERT(snapshot);
+
+ MDefinition* env = current->environmentChain();
+ MConstant* templateCst = constant(ObjectValue(*snapshot->templateObj()));
+
+ auto* ins = MNewLexicalEnvironmentObject::New(alloc(), templateCst);
+ current->add(ins);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), ins, env));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), ins, EnvironmentObject::enclosingEnvironmentSlot(), env));
+
+ current->setEnvironmentChain(ins);
+ return true;
+}
+
+bool WarpBuilder::build_PushClassBodyEnv(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ const auto* snapshot = getOpSnapshot<WarpClassBodyEnvironment>(loc);
+ MOZ_ASSERT(snapshot);
+
+ MDefinition* env = current->environmentChain();
+ MConstant* templateCst = constant(ObjectValue(*snapshot->templateObj()));
+
+ auto* ins = MNewClassBodyEnvironmentObject::New(alloc(), templateCst);
+ current->add(ins);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), ins, env));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), ins, EnvironmentObject::enclosingEnvironmentSlot(), env));
+
+ current->setEnvironmentChain(ins);
+ return true;
+}
+
+bool WarpBuilder::build_PopLexicalEnv(BytecodeLocation) {
+ MDefinition* enclosingEnv = walkEnvironmentChain(1);
+ if (!enclosingEnv) {
+ return false;
+ }
+ current->setEnvironmentChain(enclosingEnv);
+ return true;
+}
+
+bool WarpBuilder::build_FreshenLexicalEnv(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ const auto* snapshot = getOpSnapshot<WarpLexicalEnvironment>(loc);
+ MOZ_ASSERT(snapshot);
+
+ MDefinition* enclosingEnv = walkEnvironmentChain(1);
+ if (!enclosingEnv) {
+ return false;
+ }
+
+ MDefinition* env = current->environmentChain();
+ MConstant* templateCst = constant(ObjectValue(*snapshot->templateObj()));
+
+ auto* templateObj = snapshot->templateObj();
+ auto* scope = &templateObj->scope();
+ MOZ_ASSERT(scope->hasEnvironment());
+
+ auto* ins = MNewLexicalEnvironmentObject::New(alloc(), templateCst);
+ current->add(ins);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(
+ MAssertCanElidePostWriteBarrier::New(alloc(), ins, enclosingEnv));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), ins, EnvironmentObject::enclosingEnvironmentSlot(),
+ enclosingEnv));
+
+ // Copy environment slots.
+ MSlots* envSlots = nullptr;
+ MSlots* slots = nullptr;
+ for (BindingIter iter(scope); iter; iter++) {
+ auto loc = iter.location();
+ if (loc.kind() != BindingLocation::Kind::Environment) {
+ MOZ_ASSERT(loc.kind() == BindingLocation::Kind::Frame);
+ continue;
+ }
+
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ uint32_t slot = loc.slot();
+ uint32_t numFixedSlots = templateObj->numFixedSlots();
+ if (slot >= numFixedSlots) {
+ if (!envSlots) {
+ envSlots = MSlots::New(alloc(), env);
+ current->add(envSlots);
+ }
+ if (!slots) {
+ slots = MSlots::New(alloc(), ins);
+ current->add(slots);
+ }
+
+ uint32_t dynamicSlot = slot - numFixedSlots;
+
+ auto* load = MLoadDynamicSlot::New(alloc(), envSlots, dynamicSlot);
+ current->add(load);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), ins, load));
+#endif
+
+ current->add(
+ MStoreDynamicSlot::NewUnbarriered(alloc(), slots, dynamicSlot, load));
+ } else {
+ auto* load = MLoadFixedSlot::New(alloc(), env, slot);
+ current->add(load);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), ins, load));
+#endif
+
+ current->add(MStoreFixedSlot::NewUnbarriered(alloc(), ins, slot, load));
+ }
+ }
+
+ current->setEnvironmentChain(ins);
+ return true;
+}
+
+bool WarpBuilder::build_RecreateLexicalEnv(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ const auto* snapshot = getOpSnapshot<WarpLexicalEnvironment>(loc);
+ MOZ_ASSERT(snapshot);
+
+ MDefinition* enclosingEnv = walkEnvironmentChain(1);
+ if (!enclosingEnv) {
+ return false;
+ }
+
+ MConstant* templateCst = constant(ObjectValue(*snapshot->templateObj()));
+
+ auto* ins = MNewLexicalEnvironmentObject::New(alloc(), templateCst);
+ current->add(ins);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(
+ MAssertCanElidePostWriteBarrier::New(alloc(), ins, enclosingEnv));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), ins, EnvironmentObject::enclosingEnvironmentSlot(),
+ enclosingEnv));
+
+ current->setEnvironmentChain(ins);
+ return true;
+}
+
+bool WarpBuilder::build_PushVarEnv(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ const auto* snapshot = getOpSnapshot<WarpVarEnvironment>(loc);
+ MOZ_ASSERT(snapshot);
+
+ MDefinition* env = current->environmentChain();
+ MConstant* templateCst = constant(ObjectValue(*snapshot->templateObj()));
+
+ auto* ins = MNewVarEnvironmentObject::New(alloc(), templateCst);
+ current->add(ins);
+
+#ifdef DEBUG
+ // Assert in debug mode we can elide the post write barrier.
+ current->add(MAssertCanElidePostWriteBarrier::New(alloc(), ins, env));
+#endif
+
+ // Initialize the object's reserved slots. No post barrier is needed here,
+ // for the same reason as in buildNamedLambdaEnv.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), ins, EnvironmentObject::enclosingEnvironmentSlot(), env));
+
+ current->setEnvironmentChain(ins);
+ return true;
+}
+
+bool WarpBuilder::build_ImplicitThis(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ PropertyName* name = loc.getPropertyName(script_);
+ MDefinition* env = current->environmentChain();
+
+ auto* ins = MImplicitThis::New(alloc(), env, name);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_CheckClassHeritage(BytecodeLocation loc) {
+ MDefinition* def = current->pop();
+ auto* ins = MCheckClassHeritage::New(alloc(), def);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_CheckThis(BytecodeLocation loc) {
+ MDefinition* def = current->pop();
+ auto* ins = MCheckThis::New(alloc(), def);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_CheckThisReinit(BytecodeLocation loc) {
+ MDefinition* def = current->pop();
+ auto* ins = MCheckThisReinit::New(alloc(), def);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_Generator(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* callee = getCallee();
+ MDefinition* environmentChain = current->environmentChain();
+ MDefinition* argsObj = info().needsArgsObj() ? current->argumentsObject()
+ : constant(Int32Value(0));
+
+ MGenerator* generator =
+ MGenerator::New(alloc(), callee, environmentChain, argsObj);
+
+ current->add(generator);
+ current->push(generator);
+ return resumeAfter(generator, loc);
+}
+
+bool WarpBuilder::build_AfterYield(BytecodeLocation loc) {
+ // Unreachable blocks don't need to generate a bail.
+ if (hasTerminatedBlock()) {
+ return true;
+ }
+
+ // This comes after a yield, which we generate as a return,
+ // so we know this should be unreachable code.
+ //
+ // We emit an unreachable bail for this, which will assert if we
+ // ever execute this.
+ //
+ // An Unreachable bail, instead of MUnreachable, because MUnreachable
+ // is a control instruction, and injecting it in the middle of a block
+ // causes various graph state assertions to fail.
+ MBail* bail = MBail::New(alloc(), BailoutKind::Unreachable);
+ current->add(bail);
+
+ return true;
+}
+
+bool WarpBuilder::build_FinalYieldRval(BytecodeLocation loc) {
+ MDefinition* gen = current->pop();
+
+ auto setSlotNull = [this, gen](size_t slot) {
+ auto* ins = MStoreFixedSlot::NewBarriered(alloc(), gen, slot,
+ constant(NullValue()));
+ current->add(ins);
+ };
+
+ // Close the generator
+ setSlotNull(AbstractGeneratorObject::calleeSlot());
+ setSlotNull(AbstractGeneratorObject::envChainSlot());
+ setSlotNull(AbstractGeneratorObject::argsObjectSlot());
+ setSlotNull(AbstractGeneratorObject::stackStorageSlot());
+ setSlotNull(AbstractGeneratorObject::resumeIndexSlot());
+
+ // Return
+ return build_RetRval(loc);
+}
+
+bool WarpBuilder::build_AsyncResolve(BytecodeLocation loc) {
+ MDefinition* generator = current->pop();
+ MDefinition* valueOrReason = current->pop();
+ auto resolveKind = loc.getAsyncFunctionResolveKind();
+
+ MAsyncResolve* resolve =
+ MAsyncResolve::New(alloc(), generator, valueOrReason, resolveKind);
+ current->add(resolve);
+ current->push(resolve);
+ return resumeAfter(resolve, loc);
+}
+
+bool WarpBuilder::build_ResumeKind(BytecodeLocation loc) {
+ GeneratorResumeKind resumeKind = loc.resumeKind();
+
+ current->push(constant(Int32Value(static_cast<int32_t>(resumeKind))));
+ return true;
+}
+
+bool WarpBuilder::build_CheckResumeKind(BytecodeLocation loc) {
+ // Outside of `yield*`, this is normally unreachable code in Warp,
+ // so we just manipulate the stack appropriately to ensure correct
+ // MIR generation.
+ //
+ // However, `yield*` emits a forced generator return which can be
+ // warp compiled, so in order to correctly handle these semantics
+ // we also generate a bailout, so that the forced generator return
+ // runs in baseline.
+ MDefinition* resumeKind = current->pop();
+ MDefinition* gen = current->pop();
+ MDefinition* rval = current->peek(-1);
+
+ // Mark operands as implicitly used.
+ resumeKind->setImplicitlyUsedUnchecked();
+ gen->setImplicitlyUsedUnchecked();
+ rval->setImplicitlyUsedUnchecked();
+
+ // Bail out if we encounter CheckResumeKind.
+ MBail* bail = MBail::New(alloc(), BailoutKind::Inevitable);
+ current->add(bail);
+ current->setAlwaysBails();
+
+ return true;
+}
+
+bool WarpBuilder::build_CanSkipAwait(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+
+ MCanSkipAwait* canSkip = MCanSkipAwait::New(alloc(), val);
+ current->add(canSkip);
+
+ current->push(val);
+ current->push(canSkip);
+
+ return resumeAfter(canSkip, loc);
+}
+
+bool WarpBuilder::build_MaybeExtractAwaitValue(BytecodeLocation loc) {
+ MDefinition* canSkip = current->pop();
+ MDefinition* value = current->pop();
+
+ MMaybeExtractAwaitValue* extracted =
+ MMaybeExtractAwaitValue::New(alloc(), value, canSkip);
+ current->add(extracted);
+
+ current->push(extracted);
+ current->push(canSkip);
+
+ return resumeAfter(extracted, loc);
+}
+
+bool WarpBuilder::build_InitialYield(BytecodeLocation loc) {
+ MDefinition* gen = current->pop();
+ return buildSuspend(loc, gen, gen);
+}
+
+bool WarpBuilder::build_Await(BytecodeLocation loc) {
+ MDefinition* gen = current->pop();
+ MDefinition* promiseOrGenerator = current->pop();
+
+ return buildSuspend(loc, gen, promiseOrGenerator);
+}
+bool WarpBuilder::build_Yield(BytecodeLocation loc) { return build_Await(loc); }
+
+bool WarpBuilder::buildSuspend(BytecodeLocation loc, MDefinition* gen,
+ MDefinition* retVal) {
+ // If required, unbox the generator object explicitly and infallibly.
+ //
+ // This is done to avoid fuzz-bugs where ApplyTypeInformation does the
+ // unboxing, and generates fallible unboxes which can lead to torn object
+ // state due to `bailAfter`.
+ MDefinition* genObj = gen;
+ if (genObj->type() != MIRType::Object) {
+ auto* unbox =
+ MUnbox::New(alloc(), gen, MIRType::Object, MUnbox::Mode::Infallible);
+ current->add(unbox);
+
+ genObj = unbox;
+ }
+
+ int32_t slotsToCopy = current->stackDepth() - info().firstLocalSlot();
+ MOZ_ASSERT(slotsToCopy >= 0);
+ if (slotsToCopy > 0) {
+ auto* arrayObj = MLoadFixedSlotAndUnbox::New(
+ alloc(), genObj, AbstractGeneratorObject::stackStorageSlot(),
+ MUnbox::Mode::Infallible, MIRType::Object);
+ current->add(arrayObj);
+
+ auto* stackStorage = MElements::New(alloc(), arrayObj);
+ current->add(stackStorage);
+
+ for (int32_t i = 0; i < slotsToCopy; i++) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+ // Use peekUnchecked because we're also writing out the argument slots
+ int32_t peek = -slotsToCopy + i;
+ MDefinition* stackElem = current->peekUnchecked(peek);
+ auto* store = MStoreElement::NewUnbarriered(
+ alloc(), stackStorage, constant(Int32Value(i)), stackElem,
+ /* needsHoleCheck = */ false);
+
+ current->add(store);
+ current->add(MPostWriteBarrier::New(alloc(), arrayObj, stackElem));
+ }
+
+ auto* len = constant(Int32Value(slotsToCopy - 1));
+
+ auto* setInitLength =
+ MSetInitializedLength::New(alloc(), stackStorage, len);
+ current->add(setInitLength);
+
+ auto* setLength = MSetArrayLength::New(alloc(), stackStorage, len);
+ current->add(setLength);
+ }
+
+ // Update Generator Object state
+ uint32_t resumeIndex = loc.getResumeIndex();
+
+ // This store is unbarriered, as it's only ever storing an integer, and as
+ // such doesn't partake of object tracing.
+ current->add(MStoreFixedSlot::NewUnbarriered(
+ alloc(), genObj, AbstractGeneratorObject::resumeIndexSlot(),
+ constant(Int32Value(resumeIndex))));
+
+ // This store is barriered because it stores an object value.
+ current->add(MStoreFixedSlot::NewBarriered(
+ alloc(), genObj, AbstractGeneratorObject::envChainSlot(),
+ current->environmentChain()));
+
+ current->add(
+ MPostWriteBarrier::New(alloc(), genObj, current->environmentChain()));
+
+ // GeneratorReturn will return from the method, however to support MIR
+ // generation isn't treated like the end of a block
+ MGeneratorReturn* ret = MGeneratorReturn::New(alloc(), retVal);
+ current->add(ret);
+
+ // To ensure the rest of the MIR generation looks correct, fill the stack with
+ // the appropriately typed MUnreachable's for the stack pushes from this
+ // opcode.
+ auto* unreachableResumeKind =
+ MUnreachableResult::New(alloc(), MIRType::Int32);
+ current->add(unreachableResumeKind);
+ current->push(unreachableResumeKind);
+
+ auto* unreachableGenerator =
+ MUnreachableResult::New(alloc(), MIRType::Object);
+ current->add(unreachableGenerator);
+ current->push(unreachableGenerator);
+
+ auto* unreachableRval = MUnreachableResult::New(alloc(), MIRType::Value);
+ current->add(unreachableRval);
+ current->push(unreachableRval);
+
+ return true;
+}
+
+bool WarpBuilder::build_AsyncAwait(BytecodeLocation loc) {
+ MDefinition* gen = current->pop();
+ MDefinition* value = current->pop();
+
+ MAsyncAwait* asyncAwait = MAsyncAwait::New(alloc(), value, gen);
+ current->add(asyncAwait);
+ current->push(asyncAwait);
+ return resumeAfter(asyncAwait, loc);
+}
+
+bool WarpBuilder::build_CheckReturn(BytecodeLocation loc) {
+ MOZ_ASSERT(!script_->noScriptRval());
+
+ MDefinition* returnValue = current->getSlot(info().returnValueSlot());
+ MDefinition* thisValue = current->pop();
+
+ auto* ins = MCheckReturn::New(alloc(), returnValue, thisValue);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+void WarpBuilder::buildCheckLexicalOp(BytecodeLocation loc) {
+ JSOp op = loc.getOp();
+ MOZ_ASSERT(op == JSOp::CheckLexical || op == JSOp::CheckAliasedLexical);
+
+ MDefinition* input = current->pop();
+ MInstruction* lexicalCheck = MLexicalCheck::New(alloc(), input);
+ current->add(lexicalCheck);
+ current->push(lexicalCheck);
+
+ if (snapshot().bailoutInfo().failedLexicalCheck()) {
+ // If we have previously had a failed lexical check in Ion, we want to avoid
+ // hoisting any lexical checks, which can cause spurious failures. In this
+ // case, we also have to be careful not to hoist any loads of this lexical
+ // past the check. For unaliased lexical variables, we can set the local
+ // slot to create a dependency (see below). For aliased lexicals, that
+ // doesn't work, so we disable LICM instead.
+ lexicalCheck->setNotMovable();
+ if (op == JSOp::CheckAliasedLexical) {
+ mirGen().disableLICM();
+ }
+ }
+
+ if (op == JSOp::CheckLexical) {
+ // Set the local slot so that a subsequent GetLocal without a CheckLexical
+ // (the frontend can elide lexical checks) doesn't let a definition with
+ // MIRType::MagicUninitializedLexical escape to arbitrary MIR instructions.
+ // Note that in this case the GetLocal would be unreachable because we throw
+ // an exception here, but we still generate MIR instructions for it.
+ uint32_t slot = info().localSlot(loc.local());
+ current->setSlot(slot, lexicalCheck);
+ }
+}
+
+bool WarpBuilder::build_CheckLexical(BytecodeLocation loc) {
+ buildCheckLexicalOp(loc);
+ return true;
+}
+
+bool WarpBuilder::build_CheckAliasedLexical(BytecodeLocation loc) {
+ buildCheckLexicalOp(loc);
+ return true;
+}
+
+bool WarpBuilder::build_InitHomeObject(BytecodeLocation loc) {
+ MDefinition* homeObject = current->pop();
+ MDefinition* function = current->pop();
+
+ current->add(MPostWriteBarrier::New(alloc(), function, homeObject));
+
+ auto* ins = MInitHomeObject::New(alloc(), function, homeObject);
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::build_SuperBase(BytecodeLocation) {
+ MDefinition* callee = current->pop();
+
+ auto* homeObject = MHomeObject::New(alloc(), callee);
+ current->add(homeObject);
+
+ auto* superBase = MHomeObjectSuperBase::New(alloc(), homeObject);
+ current->add(superBase);
+ current->push(superBase);
+ return true;
+}
+
+bool WarpBuilder::build_SuperFun(BytecodeLocation) {
+ MDefinition* callee = current->pop();
+ auto* ins = MSuperFunction::New(alloc(), callee);
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::build_BuiltinObject(BytecodeLocation loc) {
+ if (auto* snapshot = getOpSnapshot<WarpBuiltinObject>(loc)) {
+ JSObject* builtin = snapshot->builtin();
+ pushConstant(ObjectValue(*builtin));
+ return true;
+ }
+
+ auto kind = loc.getBuiltinObjectKind();
+ auto* ins = MBuiltinObject::New(alloc(), kind);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_GetIntrinsic(BytecodeLocation loc) {
+ if (auto* snapshot = getOpSnapshot<WarpGetIntrinsic>(loc)) {
+ Value intrinsic = snapshot->intrinsic();
+ pushConstant(intrinsic);
+ return true;
+ }
+
+ PropertyName* name = loc.getPropertyName(script_);
+ MCallGetIntrinsicValue* ins = MCallGetIntrinsicValue::New(alloc(), name);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_ImportMeta(BytecodeLocation loc) {
+ ModuleObject* moduleObj = scriptSnapshot()->moduleObject();
+ MOZ_ASSERT(moduleObj);
+
+ MModuleMetadata* ins = MModuleMetadata::New(alloc(), moduleObj);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_CallSiteObj(BytecodeLocation loc) {
+ return build_Object(loc);
+}
+
+bool WarpBuilder::build_NewArray(BytecodeLocation loc) {
+ return buildIC(loc, CacheKind::NewArray, {});
+}
+
+bool WarpBuilder::build_NewObject(BytecodeLocation loc) {
+ return buildIC(loc, CacheKind::NewObject, {});
+}
+
+bool WarpBuilder::build_NewInit(BytecodeLocation loc) {
+ return build_NewObject(loc);
+}
+
+bool WarpBuilder::build_Object(BytecodeLocation loc) {
+ JSObject* obj = loc.getObject(script_);
+ MConstant* objConst = constant(ObjectValue(*obj));
+
+ current->push(objConst);
+ return true;
+}
+
+bool WarpBuilder::buildInitPropGetterSetterOp(BytecodeLocation loc) {
+ PropertyName* name = loc.getPropertyName(script_);
+ MDefinition* value = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ auto* ins = MInitPropGetterSetter::New(alloc(), obj, value, name);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_InitPropGetter(BytecodeLocation loc) {
+ return buildInitPropGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitPropSetter(BytecodeLocation loc) {
+ return buildInitPropGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitHiddenPropGetter(BytecodeLocation loc) {
+ return buildInitPropGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitHiddenPropSetter(BytecodeLocation loc) {
+ return buildInitPropGetterSetterOp(loc);
+}
+
+bool WarpBuilder::buildInitElemGetterSetterOp(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ auto* ins = MInitElemGetterSetter::New(alloc(), obj, id, value);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_InitElemGetter(BytecodeLocation loc) {
+ return buildInitElemGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitElemSetter(BytecodeLocation loc) {
+ return buildInitElemGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitHiddenElemGetter(BytecodeLocation loc) {
+ return buildInitElemGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_InitHiddenElemSetter(BytecodeLocation loc) {
+ return buildInitElemGetterSetterOp(loc);
+}
+
+bool WarpBuilder::build_In(BytecodeLocation loc) {
+ MDefinition* obj = current->pop();
+ MDefinition* id = current->pop();
+ return buildIC(loc, CacheKind::In, {id, obj});
+}
+
+bool WarpBuilder::build_HasOwn(BytecodeLocation loc) {
+ MDefinition* obj = current->pop();
+ MDefinition* id = current->pop();
+ return buildIC(loc, CacheKind::HasOwn, {id, obj});
+}
+
+bool WarpBuilder::build_CheckPrivateField(BytecodeLocation loc) {
+ MDefinition* id = current->peek(-1);
+ MDefinition* obj = current->peek(-2);
+ return buildIC(loc, CacheKind::CheckPrivateField, {obj, id});
+}
+
+bool WarpBuilder::build_NewPrivateName(BytecodeLocation loc) {
+ JSAtom* name = loc.getAtom(script_);
+
+ auto* ins = MNewPrivateName::New(alloc(), name);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_Instanceof(BytecodeLocation loc) {
+ MDefinition* rhs = current->pop();
+ MDefinition* obj = current->pop();
+ return buildIC(loc, CacheKind::InstanceOf, {obj, rhs});
+}
+
+bool WarpBuilder::build_NewTarget(BytecodeLocation loc) {
+ MOZ_ASSERT(script_->isFunction());
+ MOZ_ASSERT(info().hasFunMaybeLazy());
+ MOZ_ASSERT(!scriptSnapshot()->isArrowFunction());
+
+ if (inlineCallInfo()) {
+ if (inlineCallInfo()->constructing()) {
+ current->push(inlineCallInfo()->getNewTarget());
+ } else {
+ pushConstant(UndefinedValue());
+ }
+ return true;
+ }
+
+ MNewTarget* ins = MNewTarget::New(alloc());
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::build_CheckIsObj(BytecodeLocation loc) {
+ CheckIsObjectKind kind = loc.getCheckIsObjectKind();
+
+ MDefinition* toCheck = current->peek(-1);
+ if (toCheck->type() == MIRType::Object) {
+ toCheck->setImplicitlyUsedUnchecked();
+ return true;
+ }
+
+ MDefinition* val = current->pop();
+ MCheckIsObj* ins = MCheckIsObj::New(alloc(), val, uint8_t(kind));
+ current->add(ins);
+ current->push(ins);
+ return true;
+}
+
+bool WarpBuilder::build_CheckObjCoercible(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MCheckObjCoercible* ins = MCheckObjCoercible::New(alloc(), val);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+MInstruction* WarpBuilder::buildLoadSlot(MDefinition* obj,
+ uint32_t numFixedSlots,
+ uint32_t slot) {
+ if (slot < numFixedSlots) {
+ MLoadFixedSlot* load = MLoadFixedSlot::New(alloc(), obj, slot);
+ current->add(load);
+ return load;
+ }
+
+ MSlots* slots = MSlots::New(alloc(), obj);
+ current->add(slots);
+
+ MLoadDynamicSlot* load =
+ MLoadDynamicSlot::New(alloc(), slots, slot - numFixedSlots);
+ current->add(load);
+ return load;
+}
+
+bool WarpBuilder::build_GetImport(BytecodeLocation loc) {
+ auto* snapshot = getOpSnapshot<WarpGetImport>(loc);
+
+ ModuleEnvironmentObject* targetEnv = snapshot->targetEnv();
+
+ // Load the target environment slot.
+ MConstant* obj = constant(ObjectValue(*targetEnv));
+ auto* load = buildLoadSlot(obj, snapshot->numFixedSlots(), snapshot->slot());
+
+ if (snapshot->needsLexicalCheck()) {
+ // TODO: IonBuilder has code to mark non-movable. See buildCheckLexicalOp.
+ MInstruction* lexicalCheck = MLexicalCheck::New(alloc(), load);
+ current->add(lexicalCheck);
+ current->push(lexicalCheck);
+ } else {
+ current->push(load);
+ }
+
+ return true;
+}
+
+bool WarpBuilder::build_GetPropSuper(BytecodeLocation loc) {
+ MDefinition* obj = current->pop();
+ MDefinition* receiver = current->pop();
+ return buildIC(loc, CacheKind::GetPropSuper, {obj, receiver});
+}
+
+bool WarpBuilder::build_GetElemSuper(BytecodeLocation loc) {
+ MDefinition* obj = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* receiver = current->pop();
+ return buildIC(loc, CacheKind::GetElemSuper, {obj, id, receiver});
+}
+
+bool WarpBuilder::build_InitProp(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* obj = current->peek(-1);
+ return buildIC(loc, CacheKind::SetProp, {obj, val});
+}
+
+bool WarpBuilder::build_InitLockedProp(BytecodeLocation loc) {
+ return build_InitProp(loc);
+}
+
+bool WarpBuilder::build_InitHiddenProp(BytecodeLocation loc) {
+ return build_InitProp(loc);
+}
+
+bool WarpBuilder::build_InitElem(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* id = current->pop();
+ MDefinition* obj = current->peek(-1);
+ return buildIC(loc, CacheKind::SetElem, {obj, id, val});
+}
+
+bool WarpBuilder::build_InitLockedElem(BytecodeLocation loc) {
+ return build_InitElem(loc);
+}
+
+bool WarpBuilder::build_InitHiddenElem(BytecodeLocation loc) {
+ return build_InitElem(loc);
+}
+
+bool WarpBuilder::build_InitElemArray(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ // Note: getInitElemArrayIndex asserts the index fits in int32_t.
+ uint32_t index = loc.getInitElemArrayIndex();
+ MConstant* indexConst = constant(Int32Value(index));
+
+ // Note: InitArrayElemOperation asserts the index does not exceed the array's
+ // dense element capacity.
+
+ auto* elements = MElements::New(alloc(), obj);
+ current->add(elements);
+
+ if (val->type() == MIRType::MagicHole) {
+ val->setImplicitlyUsedUnchecked();
+ auto* store = MStoreHoleValueElement::New(alloc(), elements, indexConst);
+ current->add(store);
+ } else {
+ current->add(MPostWriteBarrier::New(alloc(), obj, val));
+ auto* store =
+ MStoreElement::NewUnbarriered(alloc(), elements, indexConst, val,
+ /* needsHoleCheck = */ false);
+ current->add(store);
+ }
+
+ auto* setLength = MSetInitializedLength::New(alloc(), elements, indexConst);
+ current->add(setLength);
+
+ return resumeAfter(setLength, loc);
+}
+
+bool WarpBuilder::build_InitElemInc(BytecodeLocation loc) {
+ MDefinition* val = current->pop();
+ MDefinition* index = current->pop();
+ MDefinition* obj = current->peek(-1);
+
+ // Push index + 1.
+ MConstant* constOne = constant(Int32Value(1));
+ MAdd* nextIndex = MAdd::New(alloc(), index, constOne, TruncateKind::Truncate);
+ current->add(nextIndex);
+ current->push(nextIndex);
+
+ return buildIC(loc, CacheKind::SetElem, {obj, index, val});
+}
+
+bool WarpBuilder::build_Lambda(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* env = current->environmentChain();
+
+ JSFunction* fun = loc.getFunction(script_);
+ MConstant* funConst = constant(ObjectValue(*fun));
+
+ auto* ins = MLambda::New(alloc(), env, funConst);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_FunWithProto(BytecodeLocation loc) {
+ MOZ_ASSERT(usesEnvironmentChain());
+
+ MDefinition* proto = current->pop();
+ MDefinition* env = current->environmentChain();
+
+ JSFunction* fun = loc.getFunction(script_);
+ MConstant* funConst = constant(ObjectValue(*fun));
+
+ auto* ins = MFunctionWithProto::New(alloc(), env, proto, funConst);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+}
+
+bool WarpBuilder::build_SpreadCall(BytecodeLocation loc) {
+ bool constructing = false;
+ CallInfo callInfo(alloc(), constructing, loc.resultIsPopped());
+ callInfo.initForSpreadCall(current);
+
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ return transpileCall(loc, cacheIRSnapshot, &callInfo);
+ }
+
+ bool needsThisCheck = false;
+ MInstruction* call = makeSpreadCall(callInfo, needsThisCheck);
+ if (!call) {
+ return false;
+ }
+ call->setBailoutKind(BailoutKind::TooManyArguments);
+ current->add(call);
+ current->push(call);
+ return resumeAfter(call, loc);
+}
+
+bool WarpBuilder::build_SpreadNew(BytecodeLocation loc) {
+ bool constructing = true;
+ CallInfo callInfo(alloc(), constructing, loc.resultIsPopped());
+ callInfo.initForSpreadCall(current);
+
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ return transpileCall(loc, cacheIRSnapshot, &callInfo);
+ }
+
+ buildCreateThis(callInfo);
+
+ bool needsThisCheck = true;
+ MInstruction* call = makeSpreadCall(callInfo, needsThisCheck);
+ if (!call) {
+ return false;
+ }
+ call->setBailoutKind(BailoutKind::TooManyArguments);
+ current->add(call);
+ current->push(call);
+ return resumeAfter(call, loc);
+}
+
+bool WarpBuilder::build_SpreadSuperCall(BytecodeLocation loc) {
+ return build_SpreadNew(loc);
+}
+
+bool WarpBuilder::build_OptimizeSpreadCall(BytecodeLocation loc) {
+ MDefinition* value = current->pop();
+ return buildIC(loc, CacheKind::OptimizeSpreadCall, {value});
+}
+
+bool WarpBuilder::build_Debugger(BytecodeLocation loc) {
+ // The |debugger;| statement will bail out to Baseline if the realm is a
+ // debuggee realm with an onDebuggerStatement hook.
+ MDebugger* debugger = MDebugger::New(alloc());
+ current->add(debugger);
+ return resumeAfter(debugger, loc);
+}
+
+bool WarpBuilder::build_TableSwitch(BytecodeLocation loc) {
+ int32_t low = loc.getTableSwitchLow();
+ int32_t high = loc.getTableSwitchHigh();
+ size_t numCases = high - low + 1;
+
+ MDefinition* input = current->pop();
+ MTableSwitch* tableswitch = MTableSwitch::New(alloc(), input, low, high);
+ current->end(tableswitch);
+
+ // Table mapping from target bytecode offset to MTableSwitch successor index.
+ // This prevents adding multiple predecessor/successor edges to the same
+ // target block, which isn't valid in MIR.
+ using TargetToSuccessorMap =
+ InlineMap<uint32_t, uint32_t, 8, DefaultHasher<uint32_t>,
+ SystemAllocPolicy>;
+ TargetToSuccessorMap targetToSuccessor;
+
+ // Create |default| edge.
+ {
+ BytecodeLocation defaultLoc = loc.getTableSwitchDefaultTarget();
+ uint32_t defaultOffset = defaultLoc.bytecodeToOffset(script_);
+
+ size_t index;
+ if (!tableswitch->addDefault(nullptr, &index)) {
+ return false;
+ }
+ if (!addPendingEdge(defaultLoc, current, index)) {
+ return false;
+ }
+ if (!targetToSuccessor.put(defaultOffset, index)) {
+ return false;
+ }
+ }
+
+ // Add all cases.
+ for (size_t i = 0; i < numCases; i++) {
+ BytecodeLocation caseLoc = loc.getTableSwitchCaseTarget(script_, i);
+ uint32_t caseOffset = caseLoc.bytecodeToOffset(script_);
+
+ size_t index;
+ if (auto p = targetToSuccessor.lookupForAdd(caseOffset)) {
+ index = p->value();
+ } else {
+ if (!tableswitch->addSuccessor(nullptr, &index)) {
+ return false;
+ }
+ if (!addPendingEdge(caseLoc, current, index)) {
+ return false;
+ }
+ if (!targetToSuccessor.add(p, caseOffset, index)) {
+ return false;
+ }
+ }
+ if (!tableswitch->addCase(index)) {
+ return false;
+ }
+ }
+
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_Rest(BytecodeLocation loc) {
+ auto* snapshot = getOpSnapshot<WarpRest>(loc);
+ Shape* shape = snapshot ? snapshot->shape() : nullptr;
+
+ // NOTE: Keep this code in sync with |ArgumentsReplacer|.
+
+ if (inlineCallInfo()) {
+ // If we are inlining, we know the actual arguments.
+ unsigned numActuals = inlineCallInfo()->argc();
+ unsigned numFormals = info().nargs() - 1;
+ unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0;
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ // Allocate an array of the correct size.
+ MInstruction* newArray;
+ if (shape && gc::CanUseFixedElementsForArray(numRest)) {
+ auto* shapeConstant = MConstant::NewShape(alloc(), shape);
+ current->add(shapeConstant);
+ newArray = MNewArrayObject::New(alloc(), shapeConstant, numRest, heap);
+ } else {
+ MConstant* templateConst = constant(NullValue());
+ newArray = MNewArray::NewVM(alloc(), numRest, templateConst, heap);
+ }
+ current->add(newArray);
+ current->push(newArray);
+
+ if (numRest == 0) {
+ // No more updating to do.
+ return true;
+ }
+
+ MElements* elements = MElements::New(alloc(), newArray);
+ current->add(elements);
+
+ // Unroll the argument copy loop. We don't need to do any bounds or hole
+ // checking here.
+ MConstant* index = nullptr;
+ for (uint32_t i = numFormals; i < numActuals; i++) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ index = MConstant::New(alloc(), Int32Value(i - numFormals));
+ current->add(index);
+
+ MDefinition* arg = inlineCallInfo()->argv()[i];
+ MStoreElement* store =
+ MStoreElement::NewUnbarriered(alloc(), elements, index, arg,
+ /* needsHoleCheck = */ false);
+ current->add(store);
+ current->add(MPostWriteBarrier::New(alloc(), newArray, arg));
+ }
+
+ // Update the initialized length for all the (necessarily non-hole)
+ // elements added.
+ MSetInitializedLength* initLength =
+ MSetInitializedLength::New(alloc(), elements, index);
+ current->add(initLength);
+
+ return true;
+ }
+
+ MArgumentsLength* numActuals = MArgumentsLength::New(alloc());
+ current->add(numActuals);
+
+ // Pass in the number of actual arguments, the number of formals (not
+ // including the rest parameter slot itself), and the shape.
+ unsigned numFormals = info().nargs() - 1;
+ MRest* rest = MRest::New(alloc(), numActuals, numFormals, shape);
+ current->add(rest);
+ current->push(rest);
+ return true;
+}
+
+bool WarpBuilder::build_Try(BytecodeLocation loc) {
+ graph().setHasTryBlock();
+
+ MBasicBlock* pred = current;
+ if (!startNewBlock(pred, loc.next())) {
+ return false;
+ }
+
+ pred->end(MGoto::New(alloc(), current));
+ return true;
+}
+
+bool WarpBuilder::build_Finally(BytecodeLocation loc) {
+ MOZ_ASSERT(graph().hasTryBlock());
+ return true;
+}
+
+bool WarpBuilder::build_Exception(BytecodeLocation) {
+ MOZ_CRASH("Unreachable because we skip catch-blocks");
+}
+
+bool WarpBuilder::build_Throw(BytecodeLocation loc) {
+ MDefinition* def = current->pop();
+
+ MThrow* ins = MThrow::New(alloc(), def);
+ current->add(ins);
+ if (!resumeAfter(ins, loc)) {
+ return false;
+ }
+
+ // Terminate the block.
+ current->end(MUnreachable::New(alloc()));
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_ThrowSetConst(BytecodeLocation loc) {
+ auto* ins = MThrowRuntimeLexicalError::New(alloc(), JSMSG_BAD_CONST_ASSIGN);
+ current->add(ins);
+ if (!resumeAfter(ins, loc)) {
+ return false;
+ }
+
+ // Terminate the block.
+ current->end(MUnreachable::New(alloc()));
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::build_ThrowMsg(BytecodeLocation loc) {
+ auto* ins = MThrowMsg::New(alloc(), loc.throwMsgKind());
+ current->add(ins);
+ if (!resumeAfter(ins, loc)) {
+ return false;
+ }
+
+ // Terminate the block.
+ current->end(MUnreachable::New(alloc()));
+ setTerminatedBlock();
+ return true;
+}
+
+bool WarpBuilder::buildIC(BytecodeLocation loc, CacheKind kind,
+ std::initializer_list<MDefinition*> inputs) {
+ MOZ_ASSERT(loc.opHasIC());
+
+ mozilla::DebugOnly<size_t> numInputs = inputs.size();
+ MOZ_ASSERT(numInputs == NumInputsForCacheKind(kind));
+
+ if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
+ return TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, inputs);
+ }
+
+ if (getOpSnapshot<WarpBailout>(loc)) {
+ for (MDefinition* input : inputs) {
+ input->setImplicitlyUsedUnchecked();
+ }
+ return buildBailoutForColdIC(loc, kind);
+ }
+
+ if (const auto* inliningSnapshot = getOpSnapshot<WarpInlinedCall>(loc)) {
+ // The CallInfo will be initialized by the transpiler.
+ bool ignoresRval = BytecodeIsPopped(loc.toRawBytecode());
+ CallInfo callInfo(alloc(), /*constructing =*/false, ignoresRval);
+ callInfo.markAsInlined();
+
+ if (!TranspileCacheIRToMIR(this, loc, inliningSnapshot->cacheIRSnapshot(),
+ inputs, &callInfo)) {
+ return false;
+ }
+ return buildInlinedCall(loc, inliningSnapshot, callInfo);
+ }
+
+ // Work around std::initializer_list not defining operator[].
+ auto getInput = [&](size_t index) -> MDefinition* {
+ MOZ_ASSERT(index < numInputs);
+ return inputs.begin()[index];
+ };
+
+ switch (kind) {
+ case CacheKind::UnaryArith: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MUnaryCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::ToPropertyKey: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MToPropertyKeyCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::BinaryArith: {
+ MOZ_ASSERT(numInputs == 2);
+ auto* ins =
+ MBinaryCache::New(alloc(), getInput(0), getInput(1), MIRType::Value);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::Compare: {
+ MOZ_ASSERT(numInputs == 2);
+ auto* ins = MBinaryCache::New(alloc(), getInput(0), getInput(1),
+ MIRType::Boolean);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::In: {
+ MOZ_ASSERT(numInputs == 2);
+ auto* ins = MInCache::New(alloc(), getInput(0), getInput(1));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::HasOwn: {
+ MOZ_ASSERT(numInputs == 2);
+ // Note: the MHasOwnCache constructor takes obj/id instead of id/obj.
+ auto* ins = MHasOwnCache::New(alloc(), getInput(1), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::CheckPrivateField: {
+ MOZ_ASSERT(numInputs == 2);
+ auto* ins =
+ MCheckPrivateFieldCache::New(alloc(), getInput(0), getInput(1));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::InstanceOf: {
+ MOZ_ASSERT(numInputs == 2);
+ auto* ins = MInstanceOfCache::New(alloc(), getInput(0), getInput(1));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::BindName: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MBindNameCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetIterator: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MGetIteratorCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetName: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MGetNameCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetProp: {
+ MOZ_ASSERT(numInputs == 1);
+ PropertyName* name = loc.getPropertyName(script_);
+ MConstant* id = constant(StringValue(name));
+ MDefinition* val = getInput(0);
+ auto* ins = MGetPropertyCache::New(alloc(), val, id);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetElem: {
+ MOZ_ASSERT(numInputs == 2);
+ MDefinition* val = getInput(0);
+ auto* ins = MGetPropertyCache::New(alloc(), val, getInput(1));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::SetProp: {
+ MOZ_ASSERT(numInputs == 2);
+ PropertyName* name = loc.getPropertyName(script_);
+ MConstant* id = constant(StringValue(name));
+ bool strict = loc.isStrictSetOp();
+ auto* ins =
+ MSetPropertyCache::New(alloc(), getInput(0), id, getInput(1), strict);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::SetElem: {
+ MOZ_ASSERT(numInputs == 3);
+ bool strict = loc.isStrictSetOp();
+ auto* ins = MSetPropertyCache::New(alloc(), getInput(0), getInput(1),
+ getInput(2), strict);
+ current->add(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetPropSuper: {
+ MOZ_ASSERT(numInputs == 2);
+ PropertyName* name = loc.getPropertyName(script_);
+ MConstant* id = constant(StringValue(name));
+ auto* ins =
+ MGetPropSuperCache::New(alloc(), getInput(0), getInput(1), id);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetElemSuper: {
+ MOZ_ASSERT(numInputs == 3);
+ // Note: CacheIR expects obj/id/receiver but MGetPropSuperCache takes
+ // obj/receiver/id so swap the last two inputs.
+ auto* ins = MGetPropSuperCache::New(alloc(), getInput(0), getInput(2),
+ getInput(1));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::OptimizeSpreadCall: {
+ MOZ_ASSERT(numInputs == 1);
+ auto* ins = MOptimizeSpreadCallCache::New(alloc(), getInput(0));
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::TypeOf: {
+ // Note: Warp does not have a TypeOf IC, it just inlines the operation.
+ MOZ_ASSERT(numInputs == 1);
+ auto* typeOf = MTypeOf::New(alloc(), getInput(0));
+ current->add(typeOf);
+
+ auto* ins = MTypeOfName::New(alloc(), typeOf);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+ case CacheKind::NewObject: {
+ auto* templateConst = constant(NullValue());
+ MNewObject* ins = MNewObject::NewVM(
+ alloc(), templateConst, gc::Heap::Default, MNewObject::ObjectLiteral);
+ current->add(ins);
+ current->push(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::NewArray: {
+ uint32_t length = loc.getNewArrayLength();
+ MConstant* templateConst = constant(NullValue());
+ MNewArray* ins =
+ MNewArray::NewVM(alloc(), length, templateConst, gc::Heap::Default);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+ case CacheKind::CloseIter: {
+ MOZ_ASSERT(numInputs == 1);
+ static_assert(sizeof(CompletionKind) == sizeof(uint8_t));
+ CompletionKind kind = loc.getCompletionKind();
+ auto* ins = MCloseIterCache::New(alloc(), getInput(0), uint8_t(kind));
+ current->add(ins);
+ return resumeAfter(ins, loc);
+ }
+ case CacheKind::GetIntrinsic:
+ case CacheKind::ToBool:
+ case CacheKind::Call:
+ // We're currently not using an IC or transpiling CacheIR for these kinds.
+ MOZ_CRASH("Unexpected kind");
+ }
+
+ return true;
+}
+
+bool WarpBuilder::buildBailoutForColdIC(BytecodeLocation loc, CacheKind kind) {
+ MOZ_ASSERT(loc.opHasIC());
+
+ MBail* bail = MBail::New(alloc(), BailoutKind::FirstExecution);
+ current->add(bail);
+ current->setAlwaysBails();
+
+ MIRType resultType;
+ switch (kind) {
+ case CacheKind::UnaryArith:
+ case CacheKind::BinaryArith:
+ case CacheKind::GetName:
+ case CacheKind::GetProp:
+ case CacheKind::GetElem:
+ case CacheKind::GetPropSuper:
+ case CacheKind::GetElemSuper:
+ case CacheKind::GetIntrinsic:
+ case CacheKind::Call:
+ case CacheKind::ToPropertyKey:
+ case CacheKind::OptimizeSpreadCall:
+ resultType = MIRType::Value;
+ break;
+ case CacheKind::BindName:
+ case CacheKind::GetIterator:
+ case CacheKind::NewArray:
+ case CacheKind::NewObject:
+ resultType = MIRType::Object;
+ break;
+ case CacheKind::TypeOf:
+ resultType = MIRType::String;
+ break;
+ case CacheKind::ToBool:
+ case CacheKind::Compare:
+ case CacheKind::In:
+ case CacheKind::HasOwn:
+ case CacheKind::CheckPrivateField:
+ case CacheKind::InstanceOf:
+ resultType = MIRType::Boolean;
+ break;
+ case CacheKind::SetProp:
+ case CacheKind::SetElem:
+ case CacheKind::CloseIter:
+ return true; // No result.
+ }
+
+ auto* ins = MUnreachableResult::New(alloc(), resultType);
+ current->add(ins);
+ current->push(ins);
+
+ return true;
+}
+
+class MOZ_RAII AutoAccumulateReturns {
+ MIRGraph& graph_;
+ MIRGraphReturns* prev_;
+
+ public:
+ AutoAccumulateReturns(MIRGraph& graph, MIRGraphReturns& returns)
+ : graph_(graph) {
+ prev_ = graph_.returnAccumulator();
+ graph_.setReturnAccumulator(&returns);
+ }
+ ~AutoAccumulateReturns() { graph_.setReturnAccumulator(prev_); }
+};
+
+bool WarpBuilder::buildInlinedCall(BytecodeLocation loc,
+ const WarpInlinedCall* inlineSnapshot,
+ CallInfo& callInfo) {
+ jsbytecode* pc = loc.toRawBytecode();
+
+ if (callInfo.isSetter()) {
+ // build_SetProp pushes the rhs argument onto the stack. Remove it
+ // in preparation for pushCallStack.
+ current->pop();
+ }
+
+ callInfo.setImplicitlyUsedUnchecked();
+
+ // Capture formals in the outer resume point.
+ if (!callInfo.pushCallStack(current)) {
+ return false;
+ }
+ MResumePoint* outerResumePoint =
+ MResumePoint::New(alloc(), current, pc, callInfo.inliningResumeMode());
+ if (!outerResumePoint) {
+ return false;
+ }
+ current->setOuterResumePoint(outerResumePoint);
+
+ // Pop formals again, except leave |callee| on stack for duration of call.
+ callInfo.popCallStack(current);
+ current->push(callInfo.callee());
+
+ // Build the graph.
+ CompileInfo* calleeCompileInfo = inlineSnapshot->info();
+ MIRGraphReturns returns(alloc());
+ AutoAccumulateReturns aar(graph(), returns);
+ WarpBuilder inlineBuilder(this, inlineSnapshot->scriptSnapshot(),
+ *calleeCompileInfo, &callInfo, outerResumePoint);
+ if (!inlineBuilder.buildInline()) {
+ // Note: Inlining only aborts on OOM. If inlining would fail for
+ // any other reason, we detect it in advance and don't inline.
+ return false;
+ }
+
+ // We mark scripts as uninlineable in BytecodeAnalysis if we cannot
+ // reach a return statement (without going through a catch/finally).
+ MOZ_ASSERT(!returns.empty());
+
+ // Create return block
+ BytecodeLocation postCall = loc.next();
+ MBasicBlock* prev = current;
+ if (!startNewEntryBlock(prev->stackDepth(), postCall)) {
+ return false;
+ }
+ // Restore previous value of callerResumePoint.
+ current->setCallerResumePoint(callerResumePoint());
+ current->inheritSlots(prev);
+
+ // Pop |callee|.
+ current->pop();
+
+ // Accumulate return values.
+ MDefinition* returnValue =
+ patchInlinedReturns(calleeCompileInfo, callInfo, returns, current);
+ if (!returnValue) {
+ return false;
+ }
+ current->push(returnValue);
+
+ // Initialize entry slots
+ if (!current->initEntrySlots(alloc())) {
+ return false;
+ }
+
+ return true;
+}
+
+MDefinition* WarpBuilder::patchInlinedReturns(CompileInfo* calleeCompileInfo,
+ CallInfo& callInfo,
+ MIRGraphReturns& exits,
+ MBasicBlock* returnBlock) {
+ if (exits.length() == 1) {
+ return patchInlinedReturn(calleeCompileInfo, callInfo, exits[0],
+ returnBlock);
+ }
+
+ // Accumulate multiple returns with a phi.
+ MPhi* phi = MPhi::New(alloc());
+ if (!phi->reserveLength(exits.length())) {
+ return nullptr;
+ }
+
+ for (auto* exit : exits) {
+ MDefinition* rdef =
+ patchInlinedReturn(calleeCompileInfo, callInfo, exit, returnBlock);
+ if (!rdef) {
+ return nullptr;
+ }
+ phi->addInput(rdef);
+ }
+ returnBlock->addPhi(phi);
+ return phi;
+}
+
+MDefinition* WarpBuilder::patchInlinedReturn(CompileInfo* calleeCompileInfo,
+ CallInfo& callInfo,
+ MBasicBlock* exit,
+ MBasicBlock* returnBlock) {
+ // Replace the MReturn in the exit block with an MGoto branching to
+ // the return block.
+ MDefinition* rdef = exit->lastIns()->toReturn()->input();
+ exit->discardLastIns();
+
+ // Constructors must be patched by the caller to always return an object.
+ // Derived class constructors contain extra bytecode to ensure an object
+ // is always returned, so no additional patching is needed.
+ if (callInfo.constructing() &&
+ !calleeCompileInfo->isDerivedClassConstructor()) {
+ auto* filter = MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
+ exit->add(filter);
+ rdef = filter;
+ } else if (callInfo.isSetter()) {
+ // Setters return the rhs argument, not whatever value is returned.
+ rdef = callInfo.getArg(0);
+ }
+
+ exit->end(MGoto::New(alloc(), returnBlock));
+ if (!returnBlock->addPredecessorWithoutPhis(exit)) {
+ return nullptr;
+ }
+
+ return rdef;
+}
diff --git a/js/src/jit/WarpBuilder.h b/js/src/jit/WarpBuilder.h
new file mode 100644
index 0000000000..90776b8351
--- /dev/null
+++ b/js/src/jit/WarpBuilder.h
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_WarpBuilder_h
+#define jit_WarpBuilder_h
+
+#include <initializer_list>
+
+#include "ds/InlineTable.h"
+#include "jit/JitContext.h"
+#include "jit/MIR.h"
+#include "jit/WarpBuilderShared.h"
+#include "jit/WarpSnapshot.h"
+#include "vm/Opcodes.h"
+
+namespace js {
+namespace jit {
+
+// JSOps not yet supported by WarpBuilder. See warning at the end of the list.
+#define WARP_UNSUPPORTED_OPCODE_LIST(_) \
+ /* Intentionally not implemented */ \
+ _(ForceInterpreter) \
+ /* With */ \
+ _(EnterWith) \
+ _(LeaveWith) \
+ /* Eval */ \
+ _(Eval) \
+ _(StrictEval) \
+ _(SpreadEval) \
+ _(StrictSpreadEval) \
+ /* Super */ \
+ _(SetPropSuper) \
+ _(SetElemSuper) \
+ _(StrictSetPropSuper) \
+ _(StrictSetElemSuper) \
+ /* Compound assignment */ \
+ _(GetBoundName) \
+ /* Generators / Async (bug 1317690) */ \
+ _(IsGenClosing) \
+ _(Resume) \
+ /* Misc */ \
+ _(DelName) \
+ _(SetIntrinsic) \
+ /* Private Fields */ \
+ _(GetAliasedDebugVar) \
+ /* Non-syntactic scope */ \
+ _(NonSyntacticGlobalThis) \
+ /* Records and Tuples */ \
+ IF_RECORD_TUPLE(_(InitRecord)) \
+ IF_RECORD_TUPLE(_(AddRecordProperty)) \
+ IF_RECORD_TUPLE(_(AddRecordSpread)) \
+ IF_RECORD_TUPLE(_(FinishRecord)) \
+ IF_RECORD_TUPLE(_(InitTuple)) \
+ IF_RECORD_TUPLE(_(AddTupleElement)) \
+ IF_RECORD_TUPLE(_(FinishTuple)) \
+ // === !! WARNING WARNING WARNING !! ===
+ // Do you really want to sacrifice performance by not implementing this
+ // operation in the optimizing compiler?
+
+class MIRGenerator;
+class MIRGraph;
+class WarpSnapshot;
+
+enum class CacheKind : uint8_t;
+
+// [SMDOC] Control Flow handling in WarpBuilder.
+//
+// WarpBuilder traverses the script's bytecode and compiles each instruction to
+// corresponding MIR instructions. Handling control flow bytecode ops requires
+// some special machinery:
+//
+// Forward branches
+// ----------------
+// Most branches in the bytecode are forward branches to a JSOp::JumpTarget
+// instruction that we have not inspected yet. We compile them in two phases:
+//
+// 1) When compiling the source instruction: the MBasicBlock is terminated
+// with a control instruction that has a nullptr successor block. We also add
+// a PendingEdge instance to the PendingEdges list for the target bytecode
+// location.
+//
+// 2) When finally compiling the JSOp::JumpTarget: WarpBuilder::build_JumpTarget
+// creates the target block and uses the list of PendingEdges to 'link' the
+// blocks.
+//
+// Loops
+// -----
+// Loops may be nested within other loops, so each WarpBuilder has a LoopState
+// stack. This is used to link the backedge to the loop's header block.
+//
+// Unreachable/dead code
+// ---------------------
+// Some bytecode instructions never fall through to the next instruction, for
+// example JSOp::Return, JSOp::Goto, or JSOp::Throw. Code after such
+// instructions is guaranteed to be dead so WarpBuilder skips it until it gets
+// to a jump target instruction with pending edges.
+//
+// Note: The frontend may generate unnecessary JSOp::JumpTarget instructions we
+// can ignore when they have no incoming pending edges.
+//
+// Try-catch
+// ---------
+// WarpBuilder supports scripts with try-catch by only compiling the try-block
+// and bailing out (to the Baseline Interpreter) from the exception handler
+// whenever we need to execute the catch-block.
+//
+// Because we don't compile the catch-block and the code after the try-catch may
+// only be reachable via the catch-block, Baseline's BytecodeAnalysis ensures
+// Baseline does not attempt OSR into Warp at loops that are only reachable via
+// catch/finally blocks.
+//
+// Finally-blocks are compiled by WarpBuilder, but when we have to enter a
+// finally-block from the exception handler, we bail out to the Baseline
+// Interpreter.
+
+// PendingEdge is used whenever a block is terminated with a forward branch in
+// the bytecode. When we reach the jump target we use this information to link
+// the block to the jump target's block.
+class PendingEdge {
+ MBasicBlock* block_;
+ uint32_t successor_;
+ uint8_t numToPop_;
+
+ public:
+ PendingEdge(MBasicBlock* block, uint32_t successor, uint32_t numToPop)
+ : block_(block), successor_(successor), numToPop_(numToPop) {
+ MOZ_ASSERT(numToPop_ == numToPop, "value must fit in field");
+ }
+
+ MBasicBlock* block() const { return block_; }
+ uint32_t successor() const { return successor_; }
+ uint8_t numToPop() const { return numToPop_; }
+};
+
+// PendingEdgesMap maps a bytecode instruction to a Vector of PendingEdges
+// targeting it. We use InlineMap<> for this because most of the time there are
+// only a few pending edges but there can be many when switch-statements are
+// involved.
+using PendingEdges = Vector<PendingEdge, 2, SystemAllocPolicy>;
+using PendingEdgesMap =
+ InlineMap<jsbytecode*, PendingEdges, 8, PointerHasher<jsbytecode*>,
+ SystemAllocPolicy>;
+
+// LoopState stores information about a loop that's being compiled to MIR.
+class LoopState {
+ MBasicBlock* header_ = nullptr;
+
+ public:
+ explicit LoopState(MBasicBlock* header) : header_(header) {}
+
+ MBasicBlock* header() const { return header_; }
+};
+using LoopStateStack = Vector<LoopState, 4, JitAllocPolicy>;
+
+// Data that is shared across all WarpBuilders for a given compilation.
+class MOZ_STACK_CLASS WarpCompilation {
+ // The total loop depth, including loops in the caller while
+ // compiling inlined functions.
+ uint32_t loopDepth_ = 0;
+
+ // Loop phis for iterators that need to be kept alive.
+ PhiVector iterators_;
+
+ public:
+ explicit WarpCompilation(TempAllocator& alloc) : iterators_(alloc) {}
+
+ uint32_t loopDepth() const { return loopDepth_; }
+ void incLoopDepth() { loopDepth_++; }
+ void decLoopDepth() {
+ MOZ_ASSERT(loopDepth() > 0);
+ loopDepth_--;
+ }
+
+ PhiVector* iterators() { return &iterators_; }
+};
+
+// WarpBuilder builds a MIR graph from WarpSnapshot. Unlike WarpOracle,
+// WarpBuilder can run off-thread.
+class MOZ_STACK_CLASS WarpBuilder : public WarpBuilderShared {
+ WarpCompilation* warpCompilation_;
+ MIRGraph& graph_;
+ const CompileInfo& info_;
+ const WarpScriptSnapshot* scriptSnapshot_;
+ JSScript* script_;
+
+ // Pointer to a WarpOpSnapshot or nullptr if we reached the end of the list.
+ // Because bytecode is compiled from first to last instruction (and
+ // WarpOpSnapshot is sorted the same way), the iterator always moves forward.
+ const WarpOpSnapshot* opSnapshotIter_ = nullptr;
+
+ // Note: loopStack_ is builder-specific. loopStack_.length is the
+ // depth relative to the current script. The overall loop depth is
+ // stored in the WarpCompilation.
+ LoopStateStack loopStack_;
+ PendingEdgesMap pendingEdges_;
+
+ // These are only initialized when building an inlined script.
+ WarpBuilder* callerBuilder_ = nullptr;
+ MResumePoint* callerResumePoint_ = nullptr;
+ CallInfo* inlineCallInfo_ = nullptr;
+
+ WarpCompilation* warpCompilation() const { return warpCompilation_; }
+ MIRGraph& graph() { return graph_; }
+ const CompileInfo& info() const { return info_; }
+ const WarpScriptSnapshot* scriptSnapshot() const { return scriptSnapshot_; }
+
+ uint32_t loopDepth() const { return warpCompilation_->loopDepth(); }
+ void incLoopDepth() { warpCompilation_->incLoopDepth(); }
+ void decLoopDepth() { warpCompilation_->decLoopDepth(); }
+ PhiVector* iterators() { return warpCompilation_->iterators(); }
+
+ WarpBuilder* callerBuilder() const { return callerBuilder_; }
+ MResumePoint* callerResumePoint() const { return callerResumePoint_; }
+
+ BytecodeSite* newBytecodeSite(BytecodeLocation loc);
+
+ const WarpOpSnapshot* getOpSnapshotImpl(BytecodeLocation loc,
+ WarpOpSnapshot::Kind kind);
+
+ template <typename T>
+ const T* getOpSnapshot(BytecodeLocation loc) {
+ const WarpOpSnapshot* snapshot = getOpSnapshotImpl(loc, T::ThisKind);
+ return snapshot ? snapshot->as<T>() : nullptr;
+ }
+
+ void initBlock(MBasicBlock* block);
+ [[nodiscard]] bool startNewEntryBlock(size_t stackDepth,
+ BytecodeLocation loc);
+ [[nodiscard]] bool startNewBlock(MBasicBlock* predecessor,
+ BytecodeLocation loc, size_t numToPop = 0);
+ [[nodiscard]] bool startNewLoopHeaderBlock(BytecodeLocation loopHead);
+ [[nodiscard]] bool startNewOsrPreHeaderBlock(BytecodeLocation loopHead);
+
+ bool hasTerminatedBlock() const { return current == nullptr; }
+ void setTerminatedBlock() { current = nullptr; }
+
+ [[nodiscard]] bool addPendingEdge(BytecodeLocation target, MBasicBlock* block,
+ uint32_t successor, uint32_t numToPop = 0);
+ [[nodiscard]] bool buildForwardGoto(BytecodeLocation target);
+ [[nodiscard]] bool buildBackedge();
+ [[nodiscard]] bool buildTestBackedge(BytecodeLocation loc);
+
+ [[nodiscard]] bool addIteratorLoopPhis(BytecodeLocation loopHead);
+
+ [[nodiscard]] bool buildPrologue();
+ [[nodiscard]] bool buildBody();
+
+ [[nodiscard]] bool buildInlinePrologue();
+
+ [[nodiscard]] bool buildIC(BytecodeLocation loc, CacheKind kind,
+ std::initializer_list<MDefinition*> inputs);
+ [[nodiscard]] bool buildBailoutForColdIC(BytecodeLocation loc,
+ CacheKind kind);
+
+ [[nodiscard]] bool buildEnvironmentChain();
+ MInstruction* buildNamedLambdaEnv(MDefinition* callee, MDefinition* env,
+ NamedLambdaObject* templateObj);
+ MInstruction* buildCallObject(MDefinition* callee, MDefinition* env,
+ CallObject* templateObj);
+ MInstruction* buildLoadSlot(MDefinition* obj, uint32_t numFixedSlots,
+ uint32_t slot);
+
+ MConstant* globalLexicalEnvConstant();
+ MDefinition* getCallee();
+
+ [[nodiscard]] bool buildUnaryOp(BytecodeLocation loc);
+ [[nodiscard]] bool buildBinaryOp(BytecodeLocation loc);
+ [[nodiscard]] bool buildCompareOp(BytecodeLocation loc);
+ [[nodiscard]] bool buildTestOp(BytecodeLocation loc);
+ [[nodiscard]] bool buildCallOp(BytecodeLocation loc);
+
+ [[nodiscard]] bool buildInitPropGetterSetterOp(BytecodeLocation loc);
+ [[nodiscard]] bool buildInitElemGetterSetterOp(BytecodeLocation loc);
+
+ [[nodiscard]] bool buildSuspend(BytecodeLocation loc, MDefinition* gen,
+ MDefinition* retVal);
+
+ void buildCheckLexicalOp(BytecodeLocation loc);
+
+ bool usesEnvironmentChain() const;
+ MDefinition* walkEnvironmentChain(uint32_t numHops);
+
+ void buildCreateThis(CallInfo& callInfo);
+
+ [[nodiscard]] bool transpileCall(BytecodeLocation loc,
+ const WarpCacheIR* cacheIRSnapshot,
+ CallInfo* callInfo);
+
+ [[nodiscard]] bool buildInlinedCall(BytecodeLocation loc,
+ const WarpInlinedCall* snapshot,
+ CallInfo& callInfo);
+
+ MDefinition* patchInlinedReturns(CompileInfo* calleeCompileInfo,
+ CallInfo& callInfo, MIRGraphReturns& exits,
+ MBasicBlock* returnBlock);
+ MDefinition* patchInlinedReturn(CompileInfo* calleeCompileInfo,
+ CallInfo& callInfo, MBasicBlock* exit,
+ MBasicBlock* returnBlock);
+
+#define BUILD_OP(OP, ...) [[nodiscard]] bool build_##OP(BytecodeLocation loc);
+ FOR_EACH_OPCODE(BUILD_OP)
+#undef BUILD_OP
+
+ public:
+ WarpBuilder(WarpSnapshot& snapshot, MIRGenerator& mirGen,
+ WarpCompilation* warpCompilation);
+ WarpBuilder(WarpBuilder* caller, WarpScriptSnapshot* snapshot,
+ CompileInfo& compileInfo, CallInfo* inlineCallInfo,
+ MResumePoint* callerResumePoint);
+
+ [[nodiscard]] bool build();
+ [[nodiscard]] bool buildInline();
+
+ CallInfo* inlineCallInfo() const { return inlineCallInfo_; }
+ bool isMonomorphicInlined() const {
+ return scriptSnapshot_->isMonomorphicInlined();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_WarpBuilder_h */
diff --git a/js/src/jit/WarpBuilderShared.cpp b/js/src/jit/WarpBuilderShared.cpp
new file mode 100644
index 0000000000..50d25e628a
--- /dev/null
+++ b/js/src/jit/WarpBuilderShared.cpp
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WarpBuilderShared.h"
+
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+WarpBuilderShared::WarpBuilderShared(WarpSnapshot& snapshot,
+ MIRGenerator& mirGen,
+ MBasicBlock* current_)
+ : snapshot_(snapshot),
+ mirGen_(mirGen),
+ alloc_(mirGen.alloc()),
+ current(current_) {}
+
+bool WarpBuilderShared::resumeAfter(MInstruction* ins, BytecodeLocation loc) {
+ // resumeAfter should only be used with effectful instructions. The only
+ // exception is MInt64ToBigInt, it's used to convert the result of a call into
+ // Wasm code so we attach the resume point to that instead of to the call.
+ MOZ_ASSERT(ins->isEffectful() || ins->isInt64ToBigInt());
+ MOZ_ASSERT(!ins->isMovable());
+
+ MResumePoint* resumePoint = MResumePoint::New(
+ alloc(), ins->block(), loc.toRawBytecode(), ResumeMode::ResumeAfter);
+ if (!resumePoint) {
+ return false;
+ }
+
+ ins->setResumePoint(resumePoint);
+ return true;
+}
+
+MConstant* WarpBuilderShared::constant(const Value& v) {
+ MOZ_ASSERT_IF(v.isString(), v.toString()->isLinear());
+ MOZ_ASSERT_IF(v.isGCThing(), !IsInsideNursery(v.toGCThing()));
+
+ MConstant* cst = MConstant::New(alloc(), v);
+ current->add(cst);
+ return cst;
+}
+
+void WarpBuilderShared::pushConstant(const Value& v) {
+ MConstant* cst = constant(v);
+ current->push(cst);
+}
+
+MCall* WarpBuilderShared::makeCall(CallInfo& callInfo, bool needsThisCheck,
+ WrappedFunction* target, bool isDOMCall) {
+ auto addUndefined = [this]() -> MConstant* {
+ return constant(UndefinedValue());
+ };
+
+ return MakeCall(alloc(), addUndefined, callInfo, needsThisCheck, target,
+ isDOMCall);
+}
+
+MInstruction* WarpBuilderShared::makeSpreadCall(CallInfo& callInfo,
+ bool needsThisCheck,
+ bool isSameRealm,
+ WrappedFunction* target) {
+ MOZ_ASSERT(callInfo.argFormat() == CallInfo::ArgFormat::Array);
+ MOZ_ASSERT_IF(needsThisCheck, !target);
+
+ // Load dense elements of the argument array.
+ MElements* elements = MElements::New(alloc(), callInfo.arrayArg());
+ current->add(elements);
+
+ if (callInfo.constructing()) {
+ auto* construct =
+ MConstructArray::New(alloc(), target, callInfo.callee(), elements,
+ callInfo.thisArg(), callInfo.getNewTarget());
+ if (isSameRealm) {
+ construct->setNotCrossRealm();
+ }
+ if (needsThisCheck) {
+ construct->setNeedsThisCheck();
+ }
+ return construct;
+ }
+
+ auto* apply = MApplyArray::New(alloc(), target, callInfo.callee(), elements,
+ callInfo.thisArg());
+
+ if (callInfo.ignoresReturnValue()) {
+ apply->setIgnoresReturnValue();
+ }
+ if (isSameRealm) {
+ apply->setNotCrossRealm();
+ }
+ MOZ_ASSERT(!needsThisCheck);
+ return apply;
+}
diff --git a/js/src/jit/WarpBuilderShared.h b/js/src/jit/WarpBuilderShared.h
new file mode 100644
index 0000000000..fab9b133a7
--- /dev/null
+++ b/js/src/jit/WarpBuilderShared.h
@@ -0,0 +1,425 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_WarpBuilderShared_h
+#define jit_WarpBuilderShared_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/MIRGraph.h"
+#include "js/Value.h"
+
+namespace js {
+
+class BytecodeLocation;
+
+namespace jit {
+
+class MBasicBlock;
+class MCall;
+class MConstant;
+class MInstruction;
+class MIRGenerator;
+class TempAllocator;
+class WarpSnapshot;
+class WrappedFunction;
+
+// Helper class to manage call state.
+class MOZ_STACK_CLASS CallInfo {
+ MDefinition* callee_ = nullptr;
+ MDefinition* thisArg_ = nullptr;
+ MDefinition* newTargetArg_ = nullptr;
+ MDefinitionVector args_;
+
+ bool constructing_;
+
+ // True if the caller does not use the return value.
+ bool ignoresReturnValue_;
+
+ bool inlined_ = false;
+ bool setter_ = false;
+
+ public:
+ // For normal calls and FunCall we can shuffle around definitions in
+ // the CallInfo and use a normal MCall. For others, we need to use a
+ // specialized call.
+ enum class ArgFormat {
+ Standard,
+ Array,
+ FunApplyArgsObj,
+ };
+
+ private:
+ ArgFormat argFormat_ = ArgFormat::Standard;
+ mozilla::Maybe<ResumeMode> inliningMode_;
+
+ public:
+ CallInfo(TempAllocator& alloc, bool constructing, bool ignoresReturnValue,
+ jsbytecode* pc = nullptr)
+ : args_(alloc),
+ constructing_(constructing),
+ ignoresReturnValue_(ignoresReturnValue) {}
+
+ [[nodiscard]] bool init(MBasicBlock* current, uint32_t argc) {
+ MOZ_ASSERT(args_.empty());
+
+ // Get the arguments in the right order
+ if (!args_.reserve(argc)) {
+ return false;
+ }
+
+ if (constructing()) {
+ setNewTarget(current->pop());
+ }
+
+ for (int32_t i = argc; i > 0; i--) {
+ args_.infallibleAppend(current->peek(-i));
+ }
+ current->popn(argc);
+
+ // Get |this| and |callee|
+ setThis(current->pop());
+ setCallee(current->pop());
+
+ return true;
+ }
+
+ void initForSpreadCall(MBasicBlock* current) {
+ MOZ_ASSERT(args_.empty());
+
+ if (constructing()) {
+ setNewTarget(current->pop());
+ }
+
+ // Spread calls have one argument, an Array object containing the args.
+ static_assert(decltype(args_)::InlineLength >= 1,
+ "Appending one argument should be infallible");
+ MOZ_ALWAYS_TRUE(args_.append(current->pop()));
+
+ // Get |this| and |callee|
+ setThis(current->pop());
+ setCallee(current->pop());
+
+ argFormat_ = ArgFormat::Array;
+ }
+
+ void initForGetterCall(MDefinition* callee, MDefinition* thisVal) {
+ MOZ_ASSERT(args_.empty());
+ setCallee(callee);
+ setThis(thisVal);
+ }
+ void initForSetterCall(MDefinition* callee, MDefinition* thisVal,
+ MDefinition* rhs) {
+ MOZ_ASSERT(args_.empty());
+ markAsSetter();
+ setCallee(callee);
+ setThis(thisVal);
+ static_assert(decltype(args_)::InlineLength >= 1,
+ "Appending one argument should be infallible");
+ MOZ_ALWAYS_TRUE(args_.append(rhs));
+ }
+
+ void initForApplyInlinedArgs(MDefinition* callee, MDefinition* thisVal,
+ uint32_t numActuals) {
+ MOZ_ASSERT(args_.empty());
+ MOZ_ASSERT(!constructing_);
+
+ setCallee(callee);
+ setThis(thisVal);
+
+ MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
+ static_assert(
+ ArgumentsObject::MaxInlinedArgs <= decltype(args_)::InlineLength,
+ "Actual arguments can be infallibly stored inline");
+ MOZ_ALWAYS_TRUE(args_.reserve(numActuals));
+ }
+
+ [[nodiscard]] bool initForApplyArray(MDefinition* callee,
+ MDefinition* thisVal,
+ uint32_t numActuals) {
+ MOZ_ASSERT(args_.empty());
+ MOZ_ASSERT(!constructing_);
+
+ setCallee(callee);
+ setThis(thisVal);
+
+ return args_.reserve(numActuals);
+ }
+
+ [[nodiscard]] bool initForConstructArray(MDefinition* callee,
+ MDefinition* thisVal,
+ MDefinition* newTarget,
+ uint32_t numActuals) {
+ MOZ_ASSERT(args_.empty());
+ MOZ_ASSERT(constructing_);
+
+ setCallee(callee);
+ setThis(thisVal);
+ setNewTarget(newTarget);
+
+ return args_.reserve(numActuals);
+ }
+
+ void initForCloseIter(MDefinition* iter, MDefinition* callee) {
+ MOZ_ASSERT(args_.empty());
+ setCallee(callee);
+ setThis(iter);
+ }
+
+ void popCallStack(MBasicBlock* current) { current->popn(numFormals()); }
+
+ [[nodiscard]] bool pushCallStack(MBasicBlock* current) {
+ current->push(callee());
+ current->push(thisArg());
+
+ for (uint32_t i = 0; i < argc(); i++) {
+ current->push(getArg(i));
+ }
+
+ if (constructing()) {
+ current->push(getNewTarget());
+ }
+
+ return true;
+ }
+
+ uint32_t argc() const { return args_.length(); }
+ uint32_t numFormals() const { return argc() + 2 + constructing(); }
+
+ [[nodiscard]] bool setArgs(const MDefinitionVector& args) {
+ MOZ_ASSERT(args_.empty());
+ return args_.appendAll(args);
+ }
+
+ MDefinitionVector& argv() { return args_; }
+
+ const MDefinitionVector& argv() const { return args_; }
+
+ MDefinition* getArg(uint32_t i) const {
+ MOZ_ASSERT(i < argc());
+ return args_[i];
+ }
+
+ void initArg(uint32_t i, MDefinition* def) {
+ MOZ_ASSERT(i == argc());
+ args_.infallibleAppend(def);
+ }
+
+ void setArg(uint32_t i, MDefinition* def) {
+ MOZ_ASSERT(i < argc());
+ args_[i] = def;
+ }
+
+ void removeArg(uint32_t i) { args_.erase(&args_[i]); }
+
+ MDefinition* thisArg() const {
+ MOZ_ASSERT(thisArg_);
+ return thisArg_;
+ }
+
+ void setThis(MDefinition* thisArg) { thisArg_ = thisArg; }
+
+ bool constructing() const { return constructing_; }
+
+ bool ignoresReturnValue() const { return ignoresReturnValue_; }
+
+ void setNewTarget(MDefinition* newTarget) {
+ MOZ_ASSERT(constructing());
+ newTargetArg_ = newTarget;
+ }
+ MDefinition* getNewTarget() const {
+ MOZ_ASSERT(newTargetArg_);
+ return newTargetArg_;
+ }
+
+ bool isSetter() const { return setter_; }
+ void markAsSetter() { setter_ = true; }
+
+ bool isInlined() const { return inlined_; }
+ void markAsInlined() { inlined_ = true; }
+
+ ResumeMode inliningResumeMode() const {
+ MOZ_ASSERT(isInlined());
+ return *inliningMode_;
+ }
+
+ void setInliningResumeMode(ResumeMode mode) {
+ MOZ_ASSERT(isInlined());
+ MOZ_ASSERT(inliningMode_.isNothing());
+ inliningMode_.emplace(mode);
+ }
+
+ MDefinition* callee() const {
+ MOZ_ASSERT(callee_);
+ return callee_;
+ }
+
+ void setCallee(MDefinition* callee) { callee_ = callee; }
+
+ template <typename Fun>
+ void forEachCallOperand(Fun& f) {
+ f(callee_);
+ f(thisArg_);
+ if (newTargetArg_) {
+ f(newTargetArg_);
+ }
+ for (uint32_t i = 0; i < argc(); i++) {
+ f(getArg(i));
+ }
+ }
+
+ // Prepend `numArgs` arguments. Calls `f(i)` for each new argument.
+ template <typename Fun>
+ [[nodiscard]] bool prependArgs(size_t numArgs, const Fun& f) {
+ size_t numArgsBefore = args_.length();
+ if (!args_.growBy(numArgs)) {
+ return false;
+ }
+ for (size_t i = numArgsBefore; i > 0; i--) {
+ args_[numArgs + i - 1] = args_[i - 1];
+ }
+ for (size_t i = 0; i < numArgs; i++) {
+ args_[i] = f(i);
+ }
+ return true;
+ }
+
+ void setImplicitlyUsedUnchecked() {
+ auto setFlag = [](MDefinition* def) { def->setImplicitlyUsedUnchecked(); };
+ forEachCallOperand(setFlag);
+ }
+
+ ArgFormat argFormat() const { return argFormat_; }
+ void setArgFormat(ArgFormat argFormat) { argFormat_ = argFormat; }
+
+ MDefinition* arrayArg() const {
+ MOZ_ASSERT(argFormat_ == ArgFormat::Array);
+ // The array argument for a spread call or FunApply is always the last
+ // argument.
+ return getArg(argc() - 1);
+ }
+};
+
+template <typename Undef>
+MCall* MakeCall(TempAllocator& alloc, Undef addUndefined, CallInfo& callInfo,
+ bool needsThisCheck, WrappedFunction* target, bool isDOMCall) {
+ MOZ_ASSERT(callInfo.argFormat() == CallInfo::ArgFormat::Standard);
+ MOZ_ASSERT_IF(needsThisCheck, !target);
+ MOZ_ASSERT_IF(isDOMCall, target->jitInfo()->type() == JSJitInfo::Method);
+
+ mozilla::Maybe<DOMObjectKind> objKind;
+ if (isDOMCall) {
+ const Shape* shape = callInfo.thisArg()->toGuardShape()->shape();
+ MOZ_ASSERT(shape->getObjectClass()->isDOMClass());
+ if (shape->isNative()) {
+ objKind.emplace(DOMObjectKind::Native);
+ } else {
+ MOZ_ASSERT(shape->isProxy());
+ objKind.emplace(DOMObjectKind::Proxy);
+ }
+ }
+
+ uint32_t targetArgs = callInfo.argc();
+
+ // Collect number of missing arguments provided that the target is
+ // scripted. Native functions are passed an explicit 'argc' parameter.
+ if (target && target->hasJitEntry()) {
+ targetArgs = std::max<uint32_t>(target->nargs(), callInfo.argc());
+ }
+
+ MCall* call =
+ MCall::New(alloc, target, targetArgs + 1 + callInfo.constructing(),
+ callInfo.argc(), callInfo.constructing(),
+ callInfo.ignoresReturnValue(), isDOMCall, objKind);
+ if (!call) {
+ return nullptr;
+ }
+
+ if (callInfo.constructing()) {
+ // Note: setThis should have been done by the caller of makeCall.
+ if (needsThisCheck) {
+ call->setNeedsThisCheck();
+ }
+
+ // Pass |new.target|
+ call->addArg(targetArgs + 1, callInfo.getNewTarget());
+ }
+
+ // Explicitly pad any missing arguments with |undefined|.
+ // This permits skipping the argumentsRectifier.
+ MOZ_ASSERT_IF(target && targetArgs > callInfo.argc(), target->hasJitEntry());
+
+ MConstant* undef = nullptr;
+ for (uint32_t i = targetArgs; i > callInfo.argc(); i--) {
+ if (!undef) {
+ undef = addUndefined();
+ }
+ if (!alloc.ensureBallast()) {
+ return nullptr;
+ }
+ call->addArg(i, undef);
+ }
+
+ // Add explicit arguments.
+ // Skip addArg(0) because it is reserved for |this|.
+ for (int32_t i = callInfo.argc() - 1; i >= 0; i--) {
+ call->addArg(i + 1, callInfo.getArg(i));
+ }
+
+ if (isDOMCall) {
+ // Now that we've told it about all the args, compute whether it's movable
+ call->computeMovable();
+ }
+
+ // Pass |this| and callee.
+ call->addArg(0, callInfo.thisArg());
+ call->initCallee(callInfo.callee());
+
+ if (target) {
+ // The callee must be a JSFunction so we don't need a Class check.
+ call->disableClassCheck();
+ }
+
+ return call;
+}
+
+// Base class for code sharing between WarpBuilder and WarpCacheIRTranspiler.
+// Because this code is used by WarpCacheIRTranspiler we should
+// generally assume that we only have access to the current basic block.
+class WarpBuilderShared {
+ WarpSnapshot& snapshot_;
+ MIRGenerator& mirGen_;
+ TempAllocator& alloc_;
+
+ protected:
+ MBasicBlock* current;
+
+ WarpBuilderShared(WarpSnapshot& snapshot, MIRGenerator& mirGen,
+ MBasicBlock* current_);
+
+ [[nodiscard]] bool resumeAfter(MInstruction* ins, BytecodeLocation loc);
+
+ MConstant* constant(const JS::Value& v);
+ void pushConstant(const JS::Value& v);
+
+ MCall* makeCall(CallInfo& callInfo, bool needsThisCheck,
+ WrappedFunction* target = nullptr, bool isDOMCall = false);
+ MInstruction* makeSpreadCall(CallInfo& callInfo, bool needsThisCheck,
+ bool isSameRealm = false,
+ WrappedFunction* target = nullptr);
+
+ public:
+ MBasicBlock* currentBlock() const { return current; }
+ WarpSnapshot& snapshot() const { return snapshot_; }
+ MIRGenerator& mirGen() { return mirGen_; }
+ TempAllocator& alloc() { return alloc_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
new file mode 100644
index 0000000000..7be5f78526
--- /dev/null
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -0,0 +1,5809 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WarpCacheIRTranspiler.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/Maybe.h"
+
+#include "jsmath.h"
+
+#include "builtin/DataViewObject.h"
+#include "builtin/MapObject.h"
+#include "jit/AtomicOp.h"
+#include "jit/CacheIR.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIROpsGenerated.h"
+#include "jit/CacheIRReader.h"
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/WarpBuilder.h"
+#include "jit/WarpBuilderShared.h"
+#include "jit/WarpSnapshot.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/ArgumentsObject.h"
+#include "vm/BytecodeLocation.h"
+#include "wasm/WasmCode.h"
+
+#include "gc/ObjectKind-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// The CacheIR transpiler generates MIR from Baseline CacheIR.
+class MOZ_RAII WarpCacheIRTranspiler : public WarpBuilderShared {
+ WarpBuilder* builder_;
+ BytecodeLocation loc_;
+ const CacheIRStubInfo* stubInfo_;
+ const uint8_t* stubData_;
+
+ // Vector mapping OperandId to corresponding MDefinition.
+ using MDefinitionStackVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
+ MDefinitionStackVector operands_;
+
+ CallInfo* callInfo_;
+
+ // Array mapping call arguments to OperandId.
+ using ArgumentKindArray =
+ mozilla::EnumeratedArray<ArgumentKind, ArgumentKind::NumKinds, OperandId>;
+ ArgumentKindArray argumentOperandIds_;
+
+ void setArgumentId(ArgumentKind kind, OperandId id) {
+ MOZ_ASSERT(kind != ArgumentKind::Callee);
+ MOZ_ASSERT(!argumentOperandIds_[kind].valid());
+ argumentOperandIds_[kind] = id;
+ }
+
+ void updateArgumentsFromOperands();
+
+#ifdef DEBUG
+ // Used to assert that there is only one effectful instruction
+ // per stub. And that this instruction has a resume point.
+ MInstruction* effectful_ = nullptr;
+ bool pushedResult_ = false;
+#endif
+
+ inline void addUnchecked(MInstruction* ins) {
+ current->add(ins);
+
+ // If we have not set a more specific bailout kind, mark this instruction
+ // as transpiled CacheIR. If one of these instructions bails out, we
+ // expect to hit the baseline fallback stub and invalidate the Warp script
+ // in tryAttach.
+ if (ins->bailoutKind() == BailoutKind::Unknown) {
+ ins->setBailoutKind(BailoutKind::TranspiledCacheIR);
+ }
+ }
+
+ inline void add(MInstruction* ins) {
+ MOZ_ASSERT(!ins->isEffectful());
+ addUnchecked(ins);
+ }
+
+ inline void addEffectful(MInstruction* ins) {
+ MOZ_ASSERT(ins->isEffectful());
+ MOZ_ASSERT(!effectful_, "Can only have one effectful instruction");
+ addUnchecked(ins);
+#ifdef DEBUG
+ effectful_ = ins;
+#endif
+ }
+
+ // Bypasses all checks in addEffectful. Only used for testing functions.
+ inline void addEffectfulUnsafe(MInstruction* ins) {
+ MOZ_ASSERT(ins->isEffectful());
+ addUnchecked(ins);
+ }
+
+ [[nodiscard]] bool resumeAfterUnchecked(MInstruction* ins) {
+ return WarpBuilderShared::resumeAfter(ins, loc_);
+ }
+ [[nodiscard]] bool resumeAfter(MInstruction* ins) {
+ MOZ_ASSERT(effectful_ == ins);
+ return resumeAfterUnchecked(ins);
+ }
+
+ // CacheIR instructions writing to the IC's result register (the *Result
+ // instructions) must call this to push the result onto the virtual stack.
+ void pushResult(MDefinition* result) {
+ MOZ_ASSERT(!pushedResult_, "Can't have more than one result");
+ current->push(result);
+#ifdef DEBUG
+ pushedResult_ = true;
+#endif
+ }
+
+ MDefinition* getOperand(OperandId id) const { return operands_[id.id()]; }
+
+ void setOperand(OperandId id, MDefinition* def) { operands_[id.id()] = def; }
+
+ [[nodiscard]] bool defineOperand(OperandId id, MDefinition* def) {
+ MOZ_ASSERT(id.id() == operands_.length());
+ return operands_.append(def);
+ }
+
+ uintptr_t readStubWord(uint32_t offset) {
+ return stubInfo_->getStubRawWord(stubData_, offset);
+ }
+
+ Shape* shapeStubField(uint32_t offset) {
+ return reinterpret_cast<Shape*>(readStubWord(offset));
+ }
+ GetterSetter* getterSetterStubField(uint32_t offset) {
+ return reinterpret_cast<GetterSetter*>(readStubWord(offset));
+ }
+ const JSClass* classStubField(uint32_t offset) {
+ return reinterpret_cast<const JSClass*>(readStubWord(offset));
+ }
+ JSString* stringStubField(uint32_t offset) {
+ return reinterpret_cast<JSString*>(readStubWord(offset));
+ }
+ JS::Symbol* symbolStubField(uint32_t offset) {
+ return reinterpret_cast<JS::Symbol*>(readStubWord(offset));
+ }
+ BaseScript* baseScriptStubField(uint32_t offset) {
+ return reinterpret_cast<BaseScript*>(readStubWord(offset));
+ }
+ const JSJitInfo* jitInfoStubField(uint32_t offset) {
+ return reinterpret_cast<const JSJitInfo*>(readStubWord(offset));
+ }
+ JSNative jsnativeStubField(uint32_t offset) {
+ return reinterpret_cast<JSNative>(readStubWord(offset));
+ }
+ JS::ExpandoAndGeneration* expandoAndGenerationField(uint32_t offset) {
+ return reinterpret_cast<JS::ExpandoAndGeneration*>(readStubWord(offset));
+ }
+ const wasm::FuncExport* wasmFuncExportField(uint32_t offset) {
+ return reinterpret_cast<const wasm::FuncExport*>(readStubWord(offset));
+ }
+ NativeIteratorListHead* nativeIteratorListHeadStubField(uint32_t offset) {
+ return reinterpret_cast<NativeIteratorListHead*>(readStubWord(offset));
+ }
+ gc::Heap allocSiteInitialHeapField(uint32_t offset) {
+ uintptr_t word = readStubWord(offset);
+ MOZ_ASSERT(word == uintptr_t(gc::Heap::Default) ||
+ word == uintptr_t(gc::Heap::Tenured));
+ return gc::Heap(word);
+ }
+ const void* rawPointerField(uint32_t offset) {
+ return reinterpret_cast<const void*>(readStubWord(offset));
+ }
+ jsid idStubField(uint32_t offset) {
+ return jsid::fromRawBits(readStubWord(offset));
+ }
+ int32_t int32StubField(uint32_t offset) {
+ return static_cast<int32_t>(readStubWord(offset));
+ }
+ uint32_t uint32StubField(uint32_t offset) {
+ return static_cast<uint32_t>(readStubWord(offset));
+ }
+ uint64_t uint64StubField(uint32_t offset) {
+ return static_cast<uint64_t>(stubInfo_->getStubRawInt64(stubData_, offset));
+ }
+ Value valueStubField(uint32_t offset) {
+ uint64_t raw =
+ static_cast<uint64_t>(stubInfo_->getStubRawInt64(stubData_, offset));
+ Value val = Value::fromRawBits(raw);
+ MOZ_ASSERT_IF(val.isGCThing(), val.toGCThing()->isTenured());
+ return val;
+ }
+ double doubleStubField(uint32_t offset) {
+ uint64_t raw =
+ static_cast<uint64_t>(stubInfo_->getStubRawInt64(stubData_, offset));
+ return mozilla::BitwiseCast<double>(raw);
+ }
+
+ // This must only be called when the caller knows the object is tenured and
+ // not a nursery index.
+ JSObject* tenuredObjectStubField(uint32_t offset) {
+ WarpObjectField field = WarpObjectField::fromData(readStubWord(offset));
+ return field.toObject();
+ }
+
+ // Returns either MConstant or MNurseryIndex. See WarpObjectField.
+ MInstruction* objectStubField(uint32_t offset);
+
+ const JSClass* classForGuardClassKind(GuardClassKind kind);
+
+ [[nodiscard]] bool emitGuardTo(ValOperandId inputId, MIRType type);
+
+ [[nodiscard]] bool emitToString(OperandId inputId, StringOperandId resultId);
+
+ template <typename T>
+ [[nodiscard]] bool emitDoubleBinaryArithResult(NumberOperandId lhsId,
+ NumberOperandId rhsId);
+
+ template <typename T>
+ [[nodiscard]] bool emitInt32BinaryArithResult(Int32OperandId lhsId,
+ Int32OperandId rhsId);
+
+ template <typename T>
+ [[nodiscard]] bool emitBigIntBinaryArithResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId);
+
+ template <typename T>
+ [[nodiscard]] bool emitBigIntBinaryArithEffectfulResult(
+ BigIntOperandId lhsId, BigIntOperandId rhsId);
+
+ template <typename T>
+ [[nodiscard]] bool emitBigIntUnaryArithResult(BigIntOperandId inputId);
+
+ [[nodiscard]] bool emitCompareResult(JSOp op, OperandId lhsId,
+ OperandId rhsId,
+ MCompare::CompareType compareType);
+
+ [[nodiscard]] bool emitTruthyResult(OperandId inputId);
+
+ [[nodiscard]] bool emitNewIteratorResult(MNewIterator::Type type,
+ uint32_t templateObjectOffset);
+
+ MInstruction* addBoundsCheck(MDefinition* index, MDefinition* length);
+
+ [[nodiscard]] MInstruction* convertToBoolean(MDefinition* input);
+
+ bool emitAddAndStoreSlotShared(MAddAndStoreSlot::Kind kind,
+ ObjOperandId objId, uint32_t offsetOffset,
+ ValOperandId rhsId, uint32_t newShapeOffset);
+
+ void addDataViewData(MDefinition* obj, Scalar::Type type,
+ MDefinition** offset, MInstruction** elements);
+
+ [[nodiscard]] bool emitAtomicsBinaryOp(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect, AtomicOp op);
+
+ [[nodiscard]] bool emitLoadArgumentSlot(ValOperandId resultId,
+ uint32_t slotIndex);
+
+ // Calls are either Native (native function without a JitEntry),
+ // a DOM Native (native function with a JitInfo OpType::Method),
+ // or Scripted (scripted function or native function with a JitEntry).
+ enum class CallKind { Native, DOM, Scripted };
+
+ [[nodiscard]] bool updateCallInfo(MDefinition* callee, CallFlags flags);
+
+ [[nodiscard]] bool emitCallFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ mozilla::Maybe<ObjOperandId> thisObjId,
+ CallFlags flags, CallKind kind);
+ [[nodiscard]] bool emitFunApplyArgsObj(WrappedFunction* wrappedTarget,
+ CallFlags flags);
+
+ MDefinition* convertWasmArg(MDefinition* arg, wasm::ValType::Kind kind);
+
+ WrappedFunction* maybeWrappedFunction(MDefinition* callee, CallKind kind,
+ uint16_t nargs, FunctionFlags flags);
+ WrappedFunction* maybeCallTarget(MDefinition* callee, CallKind kind);
+
+ bool maybeCreateThis(MDefinition* callee, CallFlags flags, CallKind kind);
+
+ [[nodiscard]] bool emitCallGetterResult(CallKind kind,
+ ValOperandId receiverId,
+ uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset);
+ [[nodiscard]] bool emitCallSetter(CallKind kind, ObjOperandId receiverId,
+ uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset);
+
+ CACHE_IR_TRANSPILER_GENERATED
+
+ public:
+ WarpCacheIRTranspiler(WarpBuilder* builder, BytecodeLocation loc,
+ CallInfo* callInfo, const WarpCacheIR* cacheIRSnapshot)
+ : WarpBuilderShared(builder->snapshot(), builder->mirGen(),
+ builder->currentBlock()),
+ builder_(builder),
+ loc_(loc),
+ stubInfo_(cacheIRSnapshot->stubInfo()),
+ stubData_(cacheIRSnapshot->stubData()),
+ callInfo_(callInfo) {}
+
+ [[nodiscard]] bool transpile(std::initializer_list<MDefinition*> inputs);
+};
+
+bool WarpCacheIRTranspiler::transpile(
+ std::initializer_list<MDefinition*> inputs) {
+ if (!operands_.append(inputs.begin(), inputs.end())) {
+ return false;
+ }
+
+ CacheIRReader reader(stubInfo_);
+ do {
+ CacheOp op = reader.readOp();
+ switch (op) {
+#define DEFINE_OP(op, ...) \
+ case CacheOp::op: \
+ if (!emit##op(reader)) { \
+ return false; \
+ } \
+ break;
+ CACHE_IR_TRANSPILER_OPS(DEFINE_OP)
+#undef DEFINE_OP
+
+ default:
+ fprintf(stderr, "Unsupported op: %s\n", CacheIROpNames[size_t(op)]);
+ MOZ_CRASH("Unsupported op");
+ }
+ } while (reader.more());
+
+ // Effectful instructions should have a resume point. MIonToWasmCall is an
+ // exception: we can attach the resume point to the MInt64ToBigInt instruction
+ // instead.
+ MOZ_ASSERT_IF(effectful_,
+ effectful_->resumePoint() || effectful_->isIonToWasmCall());
+ return true;
+}
+
+MInstruction* WarpCacheIRTranspiler::objectStubField(uint32_t offset) {
+ WarpObjectField field = WarpObjectField::fromData(readStubWord(offset));
+
+ if (field.isNurseryIndex()) {
+ auto* ins = MNurseryObject::New(alloc(), field.toNurseryIndex());
+ add(ins);
+ return ins;
+ }
+
+ auto* ins = MConstant::NewObject(alloc(), field.toObject());
+ add(ins);
+ return ins;
+}
+
+bool WarpCacheIRTranspiler::emitGuardClass(ObjOperandId objId,
+ GuardClassKind kind) {
+ MDefinition* def = getOperand(objId);
+
+ MInstruction* ins;
+ if (kind == GuardClassKind::JSFunction) {
+ ins = MGuardToFunction::New(alloc(), def);
+ } else {
+ const JSClass* classp = classForGuardClassKind(kind);
+ ins = MGuardToClass::New(alloc(), def, classp);
+ }
+
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+const JSClass* WarpCacheIRTranspiler::classForGuardClassKind(
+ GuardClassKind kind) {
+ switch (kind) {
+ case GuardClassKind::Array:
+ return &ArrayObject::class_;
+ case GuardClassKind::PlainObject:
+ return &PlainObject::class_;
+ case GuardClassKind::ArrayBuffer:
+ return &ArrayBufferObject::class_;
+ case GuardClassKind::SharedArrayBuffer:
+ return &SharedArrayBufferObject::class_;
+ case GuardClassKind::DataView:
+ return &DataViewObject::class_;
+ case GuardClassKind::MappedArguments:
+ return &MappedArgumentsObject::class_;
+ case GuardClassKind::UnmappedArguments:
+ return &UnmappedArgumentsObject::class_;
+ case GuardClassKind::WindowProxy:
+ return mirGen().runtime->maybeWindowProxyClass();
+ case GuardClassKind::Set:
+ return &SetObject::class_;
+ case GuardClassKind::Map:
+ return &MapObject::class_;
+ case GuardClassKind::BoundFunction:
+ return &BoundFunctionObject::class_;
+ case GuardClassKind::JSFunction:
+ break;
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
+bool WarpCacheIRTranspiler::emitGuardAnyClass(ObjOperandId objId,
+ uint32_t claspOffset) {
+ MDefinition* def = getOperand(objId);
+ const JSClass* classp = classStubField(claspOffset);
+
+ auto* ins = MGuardToClass::New(alloc(), def, classp);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardShape(ObjOperandId objId,
+ uint32_t shapeOffset) {
+ MDefinition* def = getOperand(objId);
+ Shape* shape = shapeStubField(shapeOffset);
+
+ auto* ins = MGuardShape::New(alloc(), def, shape);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardMultipleShapes(ObjOperandId objId,
+ uint32_t shapesOffset) {
+ MDefinition* def = getOperand(objId);
+ MInstruction* shapeList = objectStubField(shapesOffset);
+
+ auto* ins = MGuardMultipleShapes::New(alloc(), def, shapeList);
+ if (builder_->isMonomorphicInlined()) {
+ ins->setBailoutKind(BailoutKind::MonomorphicInlinedStubFolding);
+ }
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardNullProto(ObjOperandId objId) {
+ MDefinition* def = getOperand(objId);
+
+ auto* ins = MGuardNullProto::New(alloc(), def);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNativeObject(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsNativeObject::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsProxy(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsProxy::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNotProxy(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsNotProxy::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsNotDOMProxy::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardHasGetterSetter(
+ ObjOperandId objId, uint32_t idOffset, uint32_t getterSetterOffset) {
+ MDefinition* obj = getOperand(objId);
+ jsid id = idStubField(idOffset);
+ GetterSetter* gs = getterSetterStubField(getterSetterOffset);
+
+ auto* ins = MGuardHasGetterSetter::New(alloc(), obj, id, gs);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitProxyGetResult(ObjOperandId objId,
+ uint32_t idOffset) {
+ MDefinition* obj = getOperand(objId);
+ jsid id = idStubField(idOffset);
+
+ auto* ins = MProxyGet::New(alloc(), obj, id);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitProxyGetByValueResult(ObjOperandId objId,
+ ValOperandId idId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+
+ auto* ins = MProxyGetByValue::New(alloc(), obj, id);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitProxyHasPropResult(ObjOperandId objId,
+ ValOperandId idId,
+ bool hasOwn) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+
+ auto* ins = MProxyHasProp::New(alloc(), obj, id, hasOwn);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitProxySet(ObjOperandId objId, uint32_t idOffset,
+ ValOperandId rhsId, bool strict) {
+ MDefinition* obj = getOperand(objId);
+ jsid id = idStubField(idOffset);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MProxySet::New(alloc(), obj, rhs, id, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitProxySetByValue(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId,
+ bool strict) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MProxySetByValue::New(alloc(), obj, id, rhs, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitCallSetArrayLength(ObjOperandId objId,
+ bool strict,
+ ValOperandId rhsId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MCallSetArrayLength::New(alloc(), obj, rhs, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitCallDOMGetterResult(ObjOperandId objId,
+ uint32_t jitInfoOffset) {
+ MDefinition* obj = getOperand(objId);
+ const JSJitInfo* jitInfo = jitInfoStubField(jitInfoOffset);
+
+ MInstruction* ins;
+ if (jitInfo->isAlwaysInSlot) {
+ ins = MGetDOMMember::New(alloc(), jitInfo, obj, nullptr, nullptr);
+ } else {
+ // TODO(post-Warp): realms, guard operands (movable?).
+ ins = MGetDOMProperty::New(alloc(), jitInfo, DOMObjectKind::Native,
+ (JS::Realm*)mirGen().realm->realmPtr(), obj,
+ nullptr, nullptr);
+ }
+
+ if (!ins) {
+ return false;
+ }
+
+ if (ins->isEffectful()) {
+ addEffectful(ins);
+ pushResult(ins);
+ return resumeAfter(ins);
+ }
+
+ add(ins);
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallDOMSetter(ObjOperandId objId,
+ uint32_t jitInfoOffset,
+ ValOperandId rhsId) {
+ MDefinition* obj = getOperand(objId);
+ const JSJitInfo* jitInfo = jitInfoStubField(jitInfoOffset);
+ MDefinition* value = getOperand(rhsId);
+
+ MOZ_ASSERT(jitInfo->type() == JSJitInfo::Setter);
+ auto* set =
+ MSetDOMProperty::New(alloc(), jitInfo->setter, DOMObjectKind::Native,
+ (JS::Realm*)mirGen().realm->realmPtr(), obj, value);
+ addEffectful(set);
+ return resumeAfter(set);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDOMExpandoValue(ObjOperandId objId,
+ ValOperandId resultId) {
+ MDefinition* proxy = getOperand(objId);
+
+ auto* ins = MLoadDOMExpandoValue::New(alloc(), proxy);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDOMExpandoValueGuardGeneration(
+ ObjOperandId objId, uint32_t expandoAndGenerationOffset,
+ uint32_t generationOffset, ValOperandId resultId) {
+ MDefinition* proxy = getOperand(objId);
+ JS::ExpandoAndGeneration* expandoAndGeneration =
+ expandoAndGenerationField(expandoAndGenerationOffset);
+ uint64_t generation = uint64StubField(generationOffset);
+
+ auto* ins = MLoadDOMExpandoValueGuardGeneration::New(
+ alloc(), proxy, expandoAndGeneration, generation);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDOMExpandoValueIgnoreGeneration(
+ ObjOperandId objId, ValOperandId resultId) {
+ MDefinition* proxy = getOperand(objId);
+
+ auto* ins = MLoadDOMExpandoValueIgnoreGeneration::New(alloc(), proxy);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardDOMExpandoMissingOrGuardShape(
+ ValOperandId expandoId, uint32_t shapeOffset) {
+ MDefinition* expando = getOperand(expandoId);
+ Shape* shape = shapeStubField(shapeOffset);
+
+ auto* ins = MGuardDOMExpandoMissingOrGuardShape::New(alloc(), expando, shape);
+ add(ins);
+
+ setOperand(expandoId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
+ uint32_t nameOffset) {
+ MDefinition* obj = getOperand(objId);
+ PropertyName* name = stringStubField(nameOffset)->asAtom().asPropertyName();
+
+ auto* ins = MMegamorphicLoadSlot::New(alloc(), obj, NameToId(name));
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMegamorphicLoadSlotByValueResult(
+ ObjOperandId objId, ValOperandId idId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+
+ auto* ins = MMegamorphicLoadSlotByValue::New(alloc(), obj, id);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMegamorphicStoreSlot(ObjOperandId objId,
+ uint32_t idOffset,
+ ValOperandId rhsId,
+ bool strict) {
+ MDefinition* obj = getOperand(objId);
+ jsid id = idStubField(idOffset);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MMegamorphicStoreSlot::New(alloc(), obj, rhs, id, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitMegamorphicHasPropResult(ObjOperandId objId,
+ ValOperandId idId,
+ bool hasOwn) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+
+ auto* ins = MMegamorphicHasProp::New(alloc(), obj, id, hasOwn);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMegamorphicSetElement(ObjOperandId objId,
+ ValOperandId idId,
+ ValOperandId rhsId,
+ bool strict) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MMegamorphicSetElement::New(alloc(), obj, id, rhs, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitObjectToIteratorResult(
+ ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
+ MDefinition* obj = getOperand(objId);
+ NativeIteratorListHead* enumeratorsAddr =
+ nativeIteratorListHeadStubField(enumeratorsAddrOffset);
+
+ auto* ins = MObjectToIterator::New(alloc(), obj, enumeratorsAddr);
+ addEffectful(ins);
+ pushResult(ins);
+ if (!resumeAfter(ins)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitValueToIteratorResult(ValOperandId valId) {
+ MDefinition* val = getOperand(valId);
+
+ auto* ins = MValueToIterator::New(alloc(), val);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNotArrayBufferMaybeShared(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsNotArrayBufferMaybeShared::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsTypedArray(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsTypedArray::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardProto(ObjOperandId objId,
+ uint32_t protoOffset) {
+ MDefinition* def = getOperand(objId);
+ MDefinition* proto = objectStubField(protoOffset);
+
+ auto* ins = MGuardProto::New(alloc(), def, proto);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardDynamicSlotIsSpecificObject(
+ ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
+ size_t slotIndex = int32StubField(slotOffset);
+ MDefinition* obj = getOperand(objId);
+ MDefinition* expected = getOperand(expectedId);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* load = MLoadDynamicSlot::New(alloc(), slots, slotIndex);
+ add(load);
+
+ auto* unbox = MUnbox::New(alloc(), load, MIRType::Object, MUnbox::Fallible);
+ add(unbox);
+
+ auto* guard = MGuardObjectIdentity::New(alloc(), unbox, expected,
+ /* bailOnEquality = */ false);
+ add(guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadDynamicSlot(ValOperandId resultId,
+ ObjOperandId objId,
+ uint32_t slotOffset) {
+ size_t slotIndex = int32StubField(slotOffset);
+ MDefinition* obj = getOperand(objId);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* load = MLoadDynamicSlot::New(alloc(), slots, slotIndex);
+ add(load);
+
+ return defineOperand(resultId, load);
+}
+
+bool WarpCacheIRTranspiler::emitGuardDynamicSlotIsNotObject(
+ ObjOperandId objId, uint32_t slotOffset) {
+ size_t slotIndex = int32StubField(slotOffset);
+ MDefinition* obj = getOperand(objId);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* load = MLoadDynamicSlot::New(alloc(), slots, slotIndex);
+ add(load);
+
+ auto* guard = MGuardIsNotObject::New(alloc(), load);
+ add(guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFixedSlotValue(ObjOperandId objId,
+ uint32_t offsetOffset,
+ uint32_t valOffset) {
+ MDefinition* obj = getOperand(objId);
+
+ size_t offset = int32StubField(offsetOffset);
+ Value val = valueStubField(valOffset);
+
+ uint32_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+
+ auto* load = MLoadFixedSlot::New(alloc(), obj, slotIndex);
+ add(load);
+
+ auto* guard = MGuardValue::New(alloc(), load, val);
+ add(guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardDynamicSlotValue(ObjOperandId objId,
+ uint32_t offsetOffset,
+ uint32_t valOffset) {
+ MDefinition* obj = getOperand(objId);
+
+ size_t offset = int32StubField(offsetOffset);
+ Value val = valueStubField(valOffset);
+
+ size_t slotIndex = NativeObject::getDynamicSlotIndexFromOffset(offset);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* load = MLoadDynamicSlot::New(alloc(), slots, slotIndex);
+ add(load);
+
+ auto* guard = MGuardValue::New(alloc(), load, val);
+ add(guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardSpecificAtom(StringOperandId strId,
+ uint32_t expectedOffset) {
+ MDefinition* str = getOperand(strId);
+ JSString* expected = stringStubField(expectedOffset);
+
+ auto* ins = MGuardSpecificAtom::New(alloc(), str, &expected->asAtom());
+ add(ins);
+
+ setOperand(strId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardSpecificSymbol(SymbolOperandId symId,
+ uint32_t expectedOffset) {
+ MDefinition* symbol = getOperand(symId);
+ JS::Symbol* expected = symbolStubField(expectedOffset);
+
+ auto* ins = MGuardSpecificSymbol::New(alloc(), symbol, expected);
+ add(ins);
+
+ setOperand(symId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardSpecificInt32(Int32OperandId numId,
+ int32_t expected) {
+ MDefinition* num = getOperand(numId);
+
+ auto* ins = MGuardSpecificInt32::New(alloc(), num, expected);
+ add(ins);
+
+ setOperand(numId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardSpecificObject(ObjOperandId objId,
+ uint32_t expectedOffset) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* expected = objectStubField(expectedOffset);
+
+ auto* ins = MGuardObjectIdentity::New(alloc(), obj, expected,
+ /* bailOnEquality = */ false);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardSpecificFunction(
+ ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* expected = objectStubField(expectedOffset);
+ uint32_t nargsAndFlags = uint32StubField(nargsAndFlagsOffset);
+
+ uint16_t nargs = nargsAndFlags >> 16;
+ FunctionFlags flags = FunctionFlags(uint16_t(nargsAndFlags));
+
+ auto* ins = MGuardSpecificFunction::New(alloc(), obj, expected, nargs, flags);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFunctionScript(
+ ObjOperandId funId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
+ MDefinition* fun = getOperand(funId);
+ BaseScript* expected = baseScriptStubField(expectedOffset);
+ uint32_t nargsAndFlags = uint32StubField(nargsAndFlagsOffset);
+
+ uint16_t nargs = nargsAndFlags >> 16;
+ FunctionFlags flags = FunctionFlags(uint16_t(nargsAndFlags));
+
+ auto* ins = MGuardFunctionScript::New(alloc(), fun, expected, nargs, flags);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardStringToIndex(StringOperandId strId,
+ Int32OperandId resultId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* ins = MGuardStringToIndex::New(alloc(), str);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardStringToInt32(StringOperandId strId,
+ Int32OperandId resultId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* ins = MGuardStringToInt32::New(alloc(), str);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardStringToNumber(StringOperandId strId,
+ NumberOperandId resultId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* ins = MGuardStringToDouble::New(alloc(), str);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardNoDenseElements(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardNoDenseElements::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
+ bool constructing) {
+ MDefinition* fun = getOperand(funId);
+ uint16_t expectedFlags = FunctionFlags::HasJitEntryFlags(constructing);
+ uint16_t unexpectedFlags = 0;
+
+ auto* ins =
+ MGuardFunctionFlags::New(alloc(), fun, expectedFlags, unexpectedFlags);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
+ MDefinition* fun = getOperand(funId);
+ uint16_t expectedFlags = 0;
+ uint16_t unexpectedFlags =
+ FunctionFlags::HasJitEntryFlags(/*isConstructing=*/false);
+
+ auto* ins =
+ MGuardFunctionFlags::New(alloc(), fun, expectedFlags, unexpectedFlags);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFunctionIsNonBuiltinCtor(
+ ObjOperandId funId) {
+ MDefinition* fun = getOperand(funId);
+
+ auto* ins = MGuardFunctionIsNonBuiltinCtor::New(alloc(), fun);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
+ MDefinition* fun = getOperand(funId);
+ uint16_t expectedFlags = FunctionFlags::CONSTRUCTOR;
+ uint16_t unexpectedFlags = 0;
+
+ auto* ins =
+ MGuardFunctionFlags::New(alloc(), fun, expectedFlags, unexpectedFlags);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardNotClassConstructor(ObjOperandId funId) {
+ MDefinition* fun = getOperand(funId);
+
+ auto* ins =
+ MGuardFunctionKind::New(alloc(), fun, FunctionFlags::ClassConstructor,
+ /*bailOnEquality=*/true);
+ add(ins);
+
+ setOperand(funId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
+ MDefinition* array = getOperand(arrayId);
+
+ auto* ins = MGuardArrayIsPacked::New(alloc(), array);
+ add(ins);
+
+ setOperand(arrayId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
+ uint8_t flags) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardArgumentsObjectFlags::New(alloc(), obj, flags);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardNonDoubleType(ValOperandId inputId,
+ ValueType type) {
+ switch (type) {
+ case ValueType::String:
+ case ValueType::Symbol:
+ case ValueType::BigInt:
+ case ValueType::Int32:
+ case ValueType::Boolean:
+ return emitGuardTo(inputId, MIRTypeFromValueType(JSValueType(type)));
+ case ValueType::Undefined:
+ return emitGuardIsUndefined(inputId);
+ case ValueType::Null:
+ return emitGuardIsNull(inputId);
+ case ValueType::Double:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ case ValueType::Object:
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ break;
+ }
+
+ MOZ_CRASH("unexpected type");
+}
+
+bool WarpCacheIRTranspiler::emitGuardTo(ValOperandId inputId, MIRType type) {
+ MDefinition* def = getOperand(inputId);
+ if (def->type() == type) {
+ return true;
+ }
+
+ auto* ins = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
+ add(ins);
+
+ setOperand(inputId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardToObject(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::Object);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToString(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::String);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToSymbol(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::Symbol);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToBigInt(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::BigInt);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToBoolean(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::Boolean);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToInt32(ValOperandId inputId) {
+ return emitGuardTo(inputId, MIRType::Int32);
+}
+
+bool WarpCacheIRTranspiler::emitGuardBooleanToInt32(ValOperandId inputId,
+ Int32OperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MBooleanToInt32::New(alloc(), input);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNumber(ValOperandId inputId) {
+ // Prefer MToDouble because it gets further optimizations downstream.
+ MDefinition* def = getOperand(inputId);
+ if (def->type() == MIRType::Int32) {
+ auto* ins = MToDouble::New(alloc(), def);
+ add(ins);
+
+ setOperand(inputId, ins);
+ return true;
+ }
+
+ // MIRType::Double also implies int32 in Ion.
+ return emitGuardTo(inputId, MIRType::Double);
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+ if (input->type() == MIRType::Null || input->type() == MIRType::Undefined) {
+ return true;
+ }
+
+ auto* ins = MGuardNullOrUndefined::New(alloc(), input);
+ add(ins);
+
+ setOperand(inputId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNull(ValOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+ if (input->type() == MIRType::Null) {
+ return true;
+ }
+
+ auto* ins = MGuardValue::New(alloc(), input, NullValue());
+ add(ins);
+ setOperand(inputId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsUndefined(ValOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+ if (input->type() == MIRType::Undefined) {
+ return true;
+ }
+
+ auto* ins = MGuardValue::New(alloc(), input, UndefinedValue());
+ add(ins);
+ setOperand(inputId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsExtensible(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsExtensible::New(alloc(), obj);
+ add(ins);
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardInt32IsNonNegative(
+ Int32OperandId indexId) {
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MGuardInt32IsNonNegative::New(alloc(), index);
+ add(ins);
+ setOperand(indexId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIndexIsNotDenseElement(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MGuardIndexIsNotDenseElement::New(alloc(), obj, index);
+ add(ins);
+ setOperand(indexId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIndexIsValidUpdateOrAdd(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MGuardIndexIsValidUpdateOrAdd::New(alloc(), obj, index);
+ add(ins);
+ setOperand(indexId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallAddOrUpdateSparseElementHelper(
+ ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* id = getOperand(idId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MCallAddOrUpdateSparseElement::New(alloc(), obj, id, rhs, strict);
+ addEffectful(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
+ ValueTagOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MGuardTagNotEqual::New(alloc(), lhs, rhs);
+ add(ins);
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardToInt32Index(ValOperandId inputId,
+ Int32OperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+ auto* ins =
+ MToNumberInt32::New(alloc(), input, IntConversionInputKind::NumbersOnly);
+
+ // ToPropertyKey(-0) is "0", so we can silently convert -0 to 0 here.
+ ins->setNeedsNegativeZeroCheck(false);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitTruncateDoubleToUInt32(
+ NumberOperandId inputId, Int32OperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+ auto* ins = MTruncateToInt32::New(alloc(), input);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToInt32ModUint32(ValOperandId valId,
+ Int32OperandId resultId) {
+ MDefinition* input = getOperand(valId);
+ auto* ins = MTruncateToInt32::New(alloc(), input);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardToUint8Clamped(ValOperandId valId,
+ Int32OperandId resultId) {
+ MDefinition* input = getOperand(valId);
+ auto* ins = MClampToUint8::New(alloc(), input);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitToString(OperandId inputId,
+ StringOperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+ auto* ins =
+ MToString::New(alloc(), input, MToString::SideEffectHandling::Bailout);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitInt32ToIntPtr(Int32OperandId inputId,
+ IntPtrOperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+ auto* ins = MInt32ToIntPtr::New(alloc(), input);
+ add(ins);
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitGuardNumberToIntPtrIndex(
+ NumberOperandId inputId, bool supportOOB, IntPtrOperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+ auto* ins = MGuardNumberToIntPtrIndex::New(alloc(), input, supportOOB);
+ add(ins);
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitCallInt32ToString(Int32OperandId inputId,
+ StringOperandId resultId) {
+ return emitToString(inputId, resultId);
+}
+
+bool WarpCacheIRTranspiler::emitCallNumberToString(NumberOperandId inputId,
+ StringOperandId resultId) {
+ return emitToString(inputId, resultId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32ToStringWithBaseResult(
+ Int32OperandId inputId, Int32OperandId baseId) {
+ MDefinition* input = getOperand(inputId);
+ MDefinition* base = getOperand(baseId);
+
+ auto* guardedBase = MGuardInt32Range::New(alloc(), base, 2, 36);
+ add(guardedBase);
+
+ auto* ins = MInt32ToStringWithBase::New(alloc(), input, guardedBase);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBooleanToString(BooleanOperandId inputId,
+ StringOperandId resultId) {
+ return emitToString(inputId, resultId);
+}
+
+bool WarpCacheIRTranspiler::emitBooleanToNumber(BooleanOperandId inputId,
+ NumberOperandId resultId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MToDouble::New(alloc(), input);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadInt32Result(Int32OperandId valId) {
+ MDefinition* val = getOperand(valId);
+ MOZ_ASSERT(val->type() == MIRType::Int32);
+ pushResult(val);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadDoubleResult(NumberOperandId valId) {
+ MDefinition* val = getOperand(valId);
+ MOZ_ASSERT(val->type() == MIRType::Double);
+ pushResult(val);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadBigIntResult(BigIntOperandId valId) {
+ MDefinition* val = getOperand(valId);
+ MOZ_ASSERT(val->type() == MIRType::BigInt);
+ pushResult(val);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadObjectResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+ MOZ_ASSERT(obj->type() == MIRType::Object);
+ pushResult(obj);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadStringResult(StringOperandId strId) {
+ MDefinition* str = getOperand(strId);
+ MOZ_ASSERT(str->type() == MIRType::String);
+ pushResult(str);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadSymbolResult(SymbolOperandId symId) {
+ MDefinition* sym = getOperand(symId);
+ MOZ_ASSERT(sym->type() == MIRType::Symbol);
+ pushResult(sym);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadUndefinedResult() {
+ pushResult(constant(UndefinedValue()));
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadBooleanResult(bool val) {
+ pushResult(constant(BooleanValue(val)));
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadInt32Constant(uint32_t valOffset,
+ Int32OperandId resultId) {
+ int32_t val = int32StubField(valOffset);
+ auto* valConst = constant(Int32Value(val));
+ return defineOperand(resultId, valConst);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDoubleConstant(uint32_t valOffset,
+ NumberOperandId resultId) {
+ double val = doubleStubField(valOffset);
+ auto* valConst = constant(DoubleValue(val));
+ return defineOperand(resultId, valConst);
+}
+
+bool WarpCacheIRTranspiler::emitLoadBooleanConstant(bool val,
+ BooleanOperandId resultId) {
+ auto* valConst = constant(BooleanValue(val));
+ return defineOperand(resultId, valConst);
+}
+
+bool WarpCacheIRTranspiler::emitLoadUndefined(ValOperandId resultId) {
+ auto* valConst = constant(UndefinedValue());
+ return defineOperand(resultId, valConst);
+}
+
+bool WarpCacheIRTranspiler::emitLoadConstantString(uint32_t strOffset,
+ StringOperandId resultId) {
+ JSString* val = stringStubField(strOffset);
+ auto* valConst = constant(StringValue(val));
+ return defineOperand(resultId, valConst);
+}
+
+bool WarpCacheIRTranspiler::emitLoadConstantStringResult(uint32_t strOffset) {
+ JSString* val = stringStubField(strOffset);
+ auto* valConst = constant(StringValue(val));
+ pushResult(valConst);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+ auto* typeOf = MTypeOf::New(alloc(), obj);
+ add(typeOf);
+
+ auto* ins = MTypeOfName::New(alloc(), typeOf);
+ add(ins);
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadEnclosingEnvironment(
+ ObjOperandId objId, ObjOperandId resultId) {
+ MDefinition* env = getOperand(objId);
+ auto* ins = MEnclosingEnvironment::New(alloc(), env);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadObject(ObjOperandId resultId,
+ uint32_t objOffset) {
+ MInstruction* ins = objectStubField(objOffset);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadProtoObject(ObjOperandId resultId,
+ uint32_t objOffset,
+ ObjOperandId receiverObjId) {
+ MInstruction* ins = objectStubField(objOffset);
+ if (ins->isConstant()) {
+ MDefinition* receiverObj = getOperand(receiverObjId);
+
+ ins = MConstantProto::New(alloc(), ins, receiverObj->skipObjectGuards());
+ add(ins);
+ }
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadProto(ObjOperandId objId,
+ ObjOperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MObjectStaticProto::New(alloc(), obj);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadInstanceOfObjectResult(
+ ValOperandId lhsId, ObjOperandId protoId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* proto = getOperand(protoId);
+
+ auto* instanceOf = MInstanceOf::New(alloc(), lhs, proto);
+ addEffectful(instanceOf);
+
+ pushResult(instanceOf);
+ return resumeAfter(instanceOf);
+}
+
+bool WarpCacheIRTranspiler::emitLoadValueTag(ValOperandId valId,
+ ValueTagOperandId resultId) {
+ MDefinition* val = getOperand(valId);
+
+ auto* ins = MLoadValueTag::New(alloc(), val);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDynamicSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ size_t slotIndex = NativeObject::getDynamicSlotIndexFromOffset(offset);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* load = MLoadDynamicSlot::New(alloc(), slots, slotIndex);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadFixedSlot(ValOperandId resultId,
+ ObjOperandId objId,
+ uint32_t offsetOffset) {
+ MDefinition* obj = getOperand(objId);
+
+ size_t offset = int32StubField(offsetOffset);
+ uint32_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+
+ auto* load = MLoadFixedSlot::New(alloc(), obj, slotIndex);
+ add(load);
+
+ return defineOperand(resultId, load);
+}
+
+bool WarpCacheIRTranspiler::emitLoadFixedSlotResult(ObjOperandId objId,
+ uint32_t offsetOffset) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ uint32_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+
+ auto* load = MLoadFixedSlot::New(alloc(), obj, slotIndex);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadFixedSlotTypedResult(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValueType type) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ uint32_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+
+ auto* load = MLoadFixedSlot::New(alloc(), obj, slotIndex);
+ load->setResultType(MIRTypeFromValueType(JSValueType(type)));
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardIsNotUninitializedLexical(
+ ValOperandId valId) {
+ MDefinition* val = getOperand(valId);
+
+ auto* lexicalCheck = MLexicalCheck::New(alloc(), val);
+ add(lexicalCheck);
+
+ if (snapshot().bailoutInfo().failedLexicalCheck()) {
+ lexicalCheck->setNotMovable();
+ }
+
+ setOperand(valId, lexicalCheck);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MArrayLength::New(alloc(), elements);
+ add(length);
+
+ pushResult(length);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadInt32ArrayLength(ObjOperandId objId,
+ Int32OperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MArrayLength::New(alloc(), elements);
+ add(length);
+
+ return defineOperand(resultId, length);
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentsObjectArgResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* load = MLoadArgumentsObjectArg::New(alloc(), obj, index);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentsObjectArgHoleResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* load = MLoadArgumentsObjectArgHole::New(alloc(), obj, index);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentsObjectArgExistsResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MInArgumentsObjectArg::New(alloc(), obj, index);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentsObjectLengthResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArgumentsObjectLength::New(alloc(), obj);
+ add(length);
+
+ pushResult(length);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentsObjectLength(
+ ObjOperandId objId, Int32OperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArgumentsObjectLength::New(alloc(), obj);
+ add(length);
+
+ return defineOperand(resultId, length);
+}
+
+bool WarpCacheIRTranspiler::emitLoadBoundFunctionNumArgs(
+ ObjOperandId objId, Int32OperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* numArgs = MBoundFunctionNumArgs::New(alloc(), obj);
+ add(numArgs);
+
+ return defineOperand(resultId, numArgs);
+}
+
+bool WarpCacheIRTranspiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
+ ObjOperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* target = MLoadFixedSlotAndUnbox::New(
+ alloc(), obj, BoundFunctionObject::targetSlot(), MUnbox::Mode::Infallible,
+ MIRType::Object);
+ add(target);
+
+ return defineOperand(resultId, target);
+}
+
+bool WarpCacheIRTranspiler::emitGuardBoundFunctionIsConstructor(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* guard = MGuardBoundFunctionIsConstructor::New(alloc(), obj);
+ add(guard);
+
+ setOperand(objId, guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
+ ObjOperandId obj2Id) {
+ MDefinition* obj1 = getOperand(obj1Id);
+ MDefinition* obj2 = getOperand(obj2Id);
+
+ auto* guard = MGuardObjectIdentity::New(alloc(), obj1, obj2,
+ /* bailOnEquality = */ false);
+ add(guard);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitArrayFromArgumentsObjectResult(
+ ObjOperandId objId, uint32_t shapeOffset) {
+ MDefinition* obj = getOperand(objId);
+ Shape* shape = shapeStubField(shapeOffset);
+ MOZ_ASSERT(shape);
+
+ auto* array = MArrayFromArgumentsObject::New(alloc(), obj, shape);
+ addEffectful(array);
+
+ pushResult(array);
+ return resumeAfter(array);
+}
+
+bool WarpCacheIRTranspiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MFunctionLength::New(alloc(), obj);
+ add(length);
+
+ pushResult(length);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadFunctionNameResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* name = MFunctionName::New(alloc(), obj);
+ add(name);
+
+ pushResult(name);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArrayBufferByteLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ pushResult(lengthInt32);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArrayBufferByteLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ pushResult(lengthDouble);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArrayBufferViewLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Use a separate instruction for converting the length to Int32, so that we
+ // can fold the MArrayBufferViewLength instruction with length instructions
+ // added for bounds checks.
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ pushResult(lengthInt32);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadArrayBufferViewLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ pushResult(lengthDouble);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadStringLengthResult(StringOperandId strId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* length = MStringLength::New(alloc(), str);
+ add(length);
+
+ pushResult(length);
+ return true;
+}
+
+MInstruction* WarpCacheIRTranspiler::addBoundsCheck(MDefinition* index,
+ MDefinition* length) {
+ MInstruction* check = MBoundsCheck::New(alloc(), index, length);
+ add(check);
+
+ if (snapshot().bailoutInfo().failedBoundsCheck()) {
+ check->setNotMovable();
+ }
+
+ if (JitOptions.spectreIndexMasking) {
+ // Use a separate MIR instruction for the index masking. Doing this as
+ // part of MBoundsCheck would be unsound because bounds checks can be
+ // optimized or eliminated completely. Consider this:
+ //
+ // for (var i = 0; i < x; i++)
+ // res = arr[i];
+ //
+ // If we can prove |x < arr.length|, we are able to eliminate the bounds
+ // check, but we should not get rid of the index masking because the
+ // |i < x| branch could still be mispredicted.
+ //
+ // Using a separate instruction lets us eliminate the bounds check
+ // without affecting the index masking.
+ check = MSpectreMaskIndex::New(alloc(), check, length);
+ add(check);
+ }
+
+ return check;
+}
+
+bool WarpCacheIRTranspiler::emitLoadDenseElementResult(ObjOperandId objId,
+ Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* load = MLoadElement::New(alloc(), elements, index);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadDenseElementHoleResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ auto* load = MLoadElementHole::New(alloc(), elements, index, length);
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallGetSparseElementResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* call = MCallGetSparseElement::New(alloc(), obj, index);
+ addEffectful(call);
+
+ pushResult(call);
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitCallNativeGetElementResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* call = MCallNativeGetElement::New(alloc(), obj, index);
+ addEffectful(call);
+
+ pushResult(call);
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitCallNativeGetElementSuperResult(
+ ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* receiver = getOperand(receiverId);
+
+ auto* call = MCallNativeGetElementSuper::New(alloc(), obj, index, receiver);
+ addEffectful(call);
+
+ pushResult(call);
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDenseElementExistsResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ // Get the elements vector.
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ // Check if id < initLength.
+ index = addBoundsCheck(index, length);
+
+ // And check elem[id] is not a hole.
+ auto* guard = MGuardElementNotHole::New(alloc(), elements, index);
+ add(guard);
+
+ pushResult(constant(BooleanValue(true)));
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadDenseElementHoleExistsResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ // Get the elements vector.
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ // Check if id < initLength and elem[id] not a hole.
+ auto* ins = MInArray::New(alloc(), elements, index, length, obj);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallObjectHasSparseElementResult(
+ ObjOperandId objId, Int32OperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MCallObjectHasSparseElement::New(alloc(), obj, index);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadTypedArrayElementExistsResult(
+ ObjOperandId objId, IntPtrOperandId indexId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ // Unsigned comparison to catch negative indices.
+ auto* ins = MCompare::New(alloc(), index, length, JSOp::Lt,
+ MCompare::Compare_UIntPtr);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+static MIRType MIRTypeForArrayBufferViewRead(Scalar::Type arrayType,
+ bool forceDoubleForUint32) {
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ return MIRType::Int32;
+ case Scalar::Uint32:
+ return forceDoubleForUint32 ? MIRType::Double : MIRType::Int32;
+ case Scalar::Float32:
+ return MIRType::Float32;
+ case Scalar::Float64:
+ return MIRType::Double;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ return MIRType::BigInt;
+ default:
+ break;
+ }
+ MOZ_CRASH("Unknown typed array type");
+}
+
+bool WarpCacheIRTranspiler::emitLoadTypedArrayElementResult(
+ ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
+ bool handleOOB, bool forceDoubleForUint32) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ if (handleOOB) {
+ auto* load = MLoadTypedArrayElementHole::New(
+ alloc(), obj, index, elementType, forceDoubleForUint32);
+ add(load);
+
+ pushResult(load);
+ return true;
+ }
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType);
+ load->setResultType(
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32));
+ add(load);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLinearizeForCharAccess(
+ StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* ins = MLinearizeForCharAccess::New(alloc(), str, index);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitLoadStringCharResult(StringOperandId strId,
+ Int32OperandId indexId,
+ bool handleOOB) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* index = getOperand(indexId);
+
+ if (handleOOB) {
+ auto* ins = MCharAtMaybeOutOfBounds::New(alloc(), str, index);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+ }
+
+ auto* length = MStringLength::New(alloc(), str);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* charCode = MCharCodeAt::New(alloc(), str, index);
+ add(charCode);
+
+ auto* fromCharCode = MFromCharCode::New(alloc(), charCode);
+ add(fromCharCode);
+
+ pushResult(fromCharCode);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadStringCharCodeResult(StringOperandId strId,
+ Int32OperandId indexId,
+ bool handleOOB) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* index = getOperand(indexId);
+
+ if (handleOOB) {
+ auto* ins = MCharCodeAtMaybeOutOfBounds::New(alloc(), str, index);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+ }
+
+ auto* length = MStringLength::New(alloc(), str);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* charCode = MCharCodeAt::New(alloc(), str, index);
+ add(charCode);
+
+ pushResult(charCode);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNewStringObjectResult(
+ uint32_t templateObjectOffset, StringOperandId strId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+ MDefinition* string = getOperand(strId);
+
+ auto* obj = MNewStringObject::New(alloc(), string, templateObj);
+ addEffectful(obj);
+
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitStringFromCharCodeResult(
+ Int32OperandId codeId) {
+ MDefinition* code = getOperand(codeId);
+
+ auto* fromCharCode = MFromCharCode::New(alloc(), code);
+ add(fromCharCode);
+
+ pushResult(fromCharCode);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringFromCodePointResult(
+ Int32OperandId codeId) {
+ MDefinition* code = getOperand(codeId);
+
+ auto* fromCodePoint = MFromCodePoint::New(alloc(), code);
+ add(fromCodePoint);
+
+ pushResult(fromCodePoint);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringIndexOfResult(
+ StringOperandId strId, StringOperandId searchStrId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* searchStr = getOperand(searchStrId);
+
+ auto* indexOf = MStringIndexOf::New(alloc(), str, searchStr);
+ add(indexOf);
+
+ pushResult(indexOf);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringStartsWithResult(
+ StringOperandId strId, StringOperandId searchStrId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* searchStr = getOperand(searchStrId);
+
+ auto* startsWith = MStringStartsWith::New(alloc(), str, searchStr);
+ add(startsWith);
+
+ pushResult(startsWith);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringEndsWithResult(
+ StringOperandId strId, StringOperandId searchStrId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* searchStr = getOperand(searchStrId);
+
+ auto* endsWith = MStringEndsWith::New(alloc(), str, searchStr);
+ add(endsWith);
+
+ pushResult(endsWith);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringToLowerCaseResult(StringOperandId strId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* convert =
+ MStringConvertCase::New(alloc(), str, MStringConvertCase::LowerCase);
+ add(convert);
+
+ pushResult(convert);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringToUpperCaseResult(StringOperandId strId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* convert =
+ MStringConvertCase::New(alloc(), str, MStringConvertCase::UpperCase);
+ add(convert);
+
+ pushResult(convert);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStoreDynamicSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ size_t slotIndex = NativeObject::getDynamicSlotIndexFromOffset(offset);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), obj, rhs);
+ add(barrier);
+
+ auto* slots = MSlots::New(alloc(), obj);
+ add(slots);
+
+ auto* store = MStoreDynamicSlot::NewBarriered(alloc(), slots, slotIndex, rhs);
+ addEffectful(store);
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitStoreFixedSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ size_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), obj, rhs);
+ add(barrier);
+
+ auto* store = MStoreFixedSlot::NewBarriered(alloc(), obj, slotIndex, rhs);
+ addEffectful(store);
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitStoreFixedSlotUndefinedResult(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId) {
+ int32_t offset = int32StubField(offsetOffset);
+
+ MDefinition* obj = getOperand(objId);
+ size_t slotIndex = NativeObject::getFixedSlotIndexFromOffset(offset);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), obj, rhs);
+ add(barrier);
+
+ auto* store = MStoreFixedSlot::NewBarriered(alloc(), obj, slotIndex, rhs);
+ addEffectful(store);
+
+ auto* undef = constant(UndefinedValue());
+ pushResult(undef);
+
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitAddAndStoreSlotShared(
+ MAddAndStoreSlot::Kind kind, ObjOperandId objId, uint32_t offsetOffset,
+ ValOperandId rhsId, uint32_t newShapeOffset) {
+ int32_t offset = int32StubField(offsetOffset);
+ Shape* shape = shapeStubField(newShapeOffset);
+
+ MDefinition* obj = getOperand(objId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), obj, rhs);
+ add(barrier);
+
+ auto* addAndStore =
+ MAddAndStoreSlot::New(alloc(), obj, rhs, kind, offset, shape);
+ addEffectful(addAndStore);
+
+ return resumeAfter(addAndStore);
+}
+
+bool WarpCacheIRTranspiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
+ uint32_t offsetOffset,
+ ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ return emitAddAndStoreSlotShared(MAddAndStoreSlot::Kind::FixedSlot, objId,
+ offsetOffset, rhsId, newShapeOffset);
+}
+
+bool WarpCacheIRTranspiler::emitAddAndStoreDynamicSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset) {
+ return emitAddAndStoreSlotShared(MAddAndStoreSlot::Kind::DynamicSlot, objId,
+ offsetOffset, rhsId, newShapeOffset);
+}
+
+bool WarpCacheIRTranspiler::emitAllocateAndStoreDynamicSlot(
+ ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
+ uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
+ int32_t offset = int32StubField(offsetOffset);
+ Shape* shape = shapeStubField(newShapeOffset);
+ uint32_t numNewSlots = uint32StubField(numNewSlotsOffset);
+
+ MDefinition* obj = getOperand(objId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* barrier = MPostWriteBarrier::New(alloc(), obj, rhs);
+ add(barrier);
+
+ auto* allocateAndStore =
+ MAllocateAndStoreSlot::New(alloc(), obj, rhs, offset, shape, numNewSlots);
+ addEffectful(allocateAndStore);
+
+ return resumeAfter(allocateAndStore);
+}
+
+bool WarpCacheIRTranspiler::emitAddSlotAndCallAddPropHook(
+ ObjOperandId objId, ValOperandId rhsId, uint32_t newShapeOffset) {
+ Shape* shape = shapeStubField(newShapeOffset);
+ MDefinition* obj = getOperand(objId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* addProp = MAddSlotAndCallAddPropHook::New(alloc(), obj, rhs, shape);
+ addEffectful(addProp);
+
+ return resumeAfter(addProp);
+}
+
+bool WarpCacheIRTranspiler::emitStoreDenseElement(ObjOperandId objId,
+ Int32OperandId indexId,
+ ValOperandId rhsId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* barrier = MPostWriteElementBarrier::New(alloc(), obj, rhs, index);
+ add(barrier);
+
+ bool needsHoleCheck = true;
+ auto* store = MStoreElement::NewBarriered(alloc(), elements, index, rhs,
+ needsHoleCheck);
+ addEffectful(store);
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitStoreDenseElementHole(ObjOperandId objId,
+ Int32OperandId indexId,
+ ValOperandId rhsId,
+ bool handleAdd) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ MInstruction* store;
+ if (handleAdd) {
+ // TODO(post-Warp): Consider changing MStoreElementHole to match IC code.
+ store = MStoreElementHole::New(alloc(), obj, elements, index, rhs);
+ } else {
+ auto* length = MInitializedLength::New(alloc(), elements);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* barrier = MPostWriteElementBarrier::New(alloc(), obj, rhs, index);
+ add(barrier);
+
+ bool needsHoleCheck = false;
+ store = MStoreElement::NewBarriered(alloc(), elements, index, rhs,
+ needsHoleCheck);
+ }
+ addEffectful(store);
+
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(ObjOperandId objId,
+ Scalar::Type elementType,
+ IntPtrOperandId indexId,
+ uint32_t rhsId,
+ bool handleOOB) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* rhs = getOperand(ValOperandId(rhsId));
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ if (!handleOOB) {
+ // MStoreTypedArrayElementHole does the bounds checking.
+ index = addBoundsCheck(index, length);
+ }
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ MInstruction* store;
+ if (handleOOB) {
+ store = MStoreTypedArrayElementHole::New(alloc(), elements, length, index,
+ rhs, elementType);
+ } else {
+ store =
+ MStoreUnboxedScalar::New(alloc(), elements, index, rhs, elementType);
+ }
+ addEffectful(store);
+ return resumeAfter(store);
+}
+
+void WarpCacheIRTranspiler::addDataViewData(MDefinition* obj, Scalar::Type type,
+ MDefinition** offset,
+ MInstruction** elements) {
+ MInstruction* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ // Adjust the length to account for accesses near the end of the dataview.
+ if (size_t byteSize = Scalar::byteSize(type); byteSize > 1) {
+ // To ensure |0 <= offset && offset + byteSize <= length|, first adjust the
+ // length by subtracting |byteSize - 1| (bailing out if that becomes
+ // negative).
+ length = MAdjustDataViewLength::New(alloc(), length, byteSize);
+ add(length);
+ }
+
+ *offset = addBoundsCheck(*offset, length);
+
+ *elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(*elements);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDataViewValueResult(
+ ObjOperandId objId, IntPtrOperandId offsetId,
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ bool forceDoubleForUint32) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* offset = getOperand(offsetId);
+ MDefinition* littleEndian = getOperand(littleEndianId);
+
+ // Add bounds check and get the DataViewObject's elements.
+ MInstruction* elements;
+ addDataViewData(obj, elementType, &offset, &elements);
+
+ // Load the element.
+ MInstruction* load;
+ if (Scalar::byteSize(elementType) == 1) {
+ load = MLoadUnboxedScalar::New(alloc(), elements, offset, elementType);
+ } else {
+ load = MLoadDataViewElement::New(alloc(), elements, offset, littleEndian,
+ elementType);
+ }
+ add(load);
+
+ MIRType knownType =
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
+ load->setResultType(knownType);
+
+ pushResult(load);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStoreDataViewValueResult(
+ ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
+ BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* offset = getOperand(offsetId);
+ MDefinition* value = getOperand(ValOperandId(valueId));
+ MDefinition* littleEndian = getOperand(littleEndianId);
+
+ // Add bounds check and get the DataViewObject's elements.
+ MInstruction* elements;
+ addDataViewData(obj, elementType, &offset, &elements);
+
+ // Store the element.
+ MInstruction* store;
+ if (Scalar::byteSize(elementType) == 1) {
+ store =
+ MStoreUnboxedScalar::New(alloc(), elements, offset, value, elementType);
+ } else {
+ store = MStoreDataViewElement::New(alloc(), elements, offset, value,
+ littleEndian, elementType);
+ }
+ addEffectful(store);
+
+ pushResult(constant(UndefinedValue()));
+
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitInt32IncResult(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constOne = MConstant::New(alloc(), Int32Value(1));
+ add(constOne);
+
+ auto* ins = MAdd::New(alloc(), input, constOne, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitDoubleIncResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constOne = MConstant::New(alloc(), DoubleValue(1.0));
+ add(constOne);
+
+ auto* ins = MAdd::New(alloc(), input, constOne, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitInt32DecResult(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constOne = MConstant::New(alloc(), Int32Value(1));
+ add(constOne);
+
+ auto* ins = MSub::New(alloc(), input, constOne, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitDoubleDecResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constOne = MConstant::New(alloc(), DoubleValue(1.0));
+ add(constOne);
+
+ auto* ins = MSub::New(alloc(), input, constOne, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitInt32NegationResult(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constNegOne = MConstant::New(alloc(), Int32Value(-1));
+ add(constNegOne);
+
+ auto* ins = MMul::New(alloc(), input, constNegOne, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitDoubleNegationResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* constNegOne = MConstant::New(alloc(), DoubleValue(-1.0));
+ add(constNegOne);
+
+ auto* ins = MMul::New(alloc(), input, constNegOne, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitInt32NotResult(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MBitNot::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+template <typename T>
+bool WarpCacheIRTranspiler::emitDoubleBinaryArithResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = T::New(alloc(), lhs, rhs, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitDoubleAddResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MAdd>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitDoubleSubResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MSub>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitDoubleMulResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MMul>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitDoubleDivResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MDiv>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitDoubleModResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MMod>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitDoublePowResult(NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitDoubleBinaryArithResult<MPow>(lhsId, rhsId);
+}
+
+template <typename T>
+bool WarpCacheIRTranspiler::emitInt32BinaryArithResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = T::New(alloc(), lhs, rhs, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitInt32AddResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MAdd>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32SubResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MSub>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32MulResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MMul>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32DivResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MDiv>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32ModResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MMod>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32PowResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MPow>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32BitOrResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MBitOr>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32BitXorResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MBitXor>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32BitAndResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MBitAnd>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MLsh>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32RightShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitInt32BinaryArithResult<MRsh>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitInt32URightShiftResult(Int32OperandId lhsId,
+ Int32OperandId rhsId,
+ bool forceDouble) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ MIRType specialization = forceDouble ? MIRType::Double : MIRType::Int32;
+ auto* ins = MUrsh::New(alloc(), lhs, rhs, specialization);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+template <typename T>
+bool WarpCacheIRTranspiler::emitBigIntBinaryArithResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = T::New(alloc(), lhs, rhs);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBigIntAddResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntAdd>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntSubResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntSub>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntMulResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntMul>(lhsId, rhsId);
+}
+
+template <typename T>
+bool WarpCacheIRTranspiler::emitBigIntBinaryArithEffectfulResult(
+ BigIntOperandId lhsId, BigIntOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = T::New(alloc(), lhs, rhs);
+
+ if (ins->isEffectful()) {
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+ }
+
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBigIntDivResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithEffectfulResult<MBigIntDiv>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntModResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithEffectfulResult<MBigIntMod>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntPowResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithEffectfulResult<MBigIntPow>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntBitAnd>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntBitOr>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntBitXor>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntLsh>(lhsId, rhsId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitBigIntBinaryArithResult<MBigIntRsh>(lhsId, rhsId);
+}
+
+template <typename T>
+bool WarpCacheIRTranspiler::emitBigIntUnaryArithResult(
+ BigIntOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = T::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBigIntIncResult(BigIntOperandId inputId) {
+ return emitBigIntUnaryArithResult<MBigIntIncrement>(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntDecResult(BigIntOperandId inputId) {
+ return emitBigIntUnaryArithResult<MBigIntDecrement>(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntNegationResult(BigIntOperandId inputId) {
+ return emitBigIntUnaryArithResult<MBigIntNegate>(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitBigIntNotResult(BigIntOperandId inputId) {
+ return emitBigIntUnaryArithResult<MBigIntBitNot>(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitCallStringConcatResult(StringOperandId lhsId,
+ StringOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MConcat::New(alloc(), lhs, rhs);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCompareResult(
+ JSOp op, OperandId lhsId, OperandId rhsId,
+ MCompare::CompareType compareType) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MCompare::New(alloc(), lhs, rhs, op, compareType);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCompareInt32Result(JSOp op,
+ Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_Int32);
+}
+
+bool WarpCacheIRTranspiler::emitCompareDoubleResult(JSOp op,
+ NumberOperandId lhsId,
+ NumberOperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_Double);
+}
+
+bool WarpCacheIRTranspiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
+ ObjOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op));
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_Object);
+}
+
+bool WarpCacheIRTranspiler::emitCompareStringResult(JSOp op,
+ StringOperandId lhsId,
+ StringOperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_String);
+}
+
+bool WarpCacheIRTranspiler::emitCompareSymbolResult(JSOp op,
+ SymbolOperandId lhsId,
+ SymbolOperandId rhsId) {
+ MOZ_ASSERT(IsEqualityOp(op));
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_Symbol);
+}
+
+bool WarpCacheIRTranspiler::emitCompareBigIntResult(JSOp op,
+ BigIntOperandId lhsId,
+ BigIntOperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_BigInt);
+}
+
+bool WarpCacheIRTranspiler::emitCompareBigIntInt32Result(JSOp op,
+ BigIntOperandId lhsId,
+ Int32OperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_BigInt_Int32);
+}
+
+bool WarpCacheIRTranspiler::emitCompareBigIntNumberResult(
+ JSOp op, BigIntOperandId lhsId, NumberOperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_BigInt_Double);
+}
+
+bool WarpCacheIRTranspiler::emitCompareBigIntStringResult(
+ JSOp op, BigIntOperandId lhsId, StringOperandId rhsId) {
+ return emitCompareResult(op, lhsId, rhsId, MCompare::Compare_BigInt_String);
+}
+
+bool WarpCacheIRTranspiler::emitCompareNullUndefinedResult(
+ JSOp op, bool isUndefined, ValOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ MOZ_ASSERT(IsEqualityOp(op));
+
+ // A previously emitted guard ensures that one side of the comparison
+ // is null or undefined.
+ MDefinition* cst =
+ isUndefined ? constant(UndefinedValue()) : constant(NullValue());
+ auto compareType =
+ isUndefined ? MCompare::Compare_Undefined : MCompare::Compare_Null;
+ auto* ins = MCompare::New(alloc(), input, cst, op, compareType);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCompareDoubleSameValueResult(
+ NumberOperandId lhsId, NumberOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* sameValue = MSameValueDouble::New(alloc(), lhs, rhs);
+ add(sameValue);
+
+ pushResult(sameValue);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSameValueResult(ValOperandId lhsId,
+ ValOperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* sameValue = MSameValue::New(alloc(), lhs, rhs);
+ add(sameValue);
+
+ pushResult(sameValue);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIndirectTruncateInt32Result(
+ Int32OperandId valId) {
+ MDefinition* val = getOperand(valId);
+ MOZ_ASSERT(val->type() == MIRType::Int32);
+
+ auto* truncate =
+ MLimitedTruncate::New(alloc(), val, TruncateKind::IndirectTruncate);
+ add(truncate);
+
+ pushResult(truncate);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathHypot2NumberResult(
+ NumberOperandId firstId, NumberOperandId secondId) {
+ MDefinitionVector vector(alloc());
+ if (!vector.reserve(2)) {
+ return false;
+ }
+
+ vector.infallibleAppend(getOperand(firstId));
+ vector.infallibleAppend(getOperand(secondId));
+
+ auto* ins = MHypot::New(alloc(), vector);
+ if (!ins) {
+ return false;
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathHypot3NumberResult(
+ NumberOperandId firstId, NumberOperandId secondId,
+ NumberOperandId thirdId) {
+ MDefinitionVector vector(alloc());
+ if (!vector.reserve(3)) {
+ return false;
+ }
+
+ vector.infallibleAppend(getOperand(firstId));
+ vector.infallibleAppend(getOperand(secondId));
+ vector.infallibleAppend(getOperand(thirdId));
+
+ auto* ins = MHypot::New(alloc(), vector);
+ if (!ins) {
+ return false;
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathHypot4NumberResult(
+ NumberOperandId firstId, NumberOperandId secondId, NumberOperandId thirdId,
+ NumberOperandId fourthId) {
+ MDefinitionVector vector(alloc());
+ if (!vector.reserve(4)) {
+ return false;
+ }
+
+ vector.infallibleAppend(getOperand(firstId));
+ vector.infallibleAppend(getOperand(secondId));
+ vector.infallibleAppend(getOperand(thirdId));
+ vector.infallibleAppend(getOperand(fourthId));
+
+ auto* ins = MHypot::New(alloc(), vector);
+ if (!ins) {
+ return false;
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathRandomResult(uint32_t rngOffset) {
+#ifdef DEBUG
+ // CodeGenerator uses CompileRealm::addressOfRandomNumberGenerator. Assert it
+ // matches the RNG pointer stored in the stub field.
+ const void* rng = rawPointerField(rngOffset);
+ MOZ_ASSERT(rng == mirGen().realm->addressOfRandomNumberGenerator());
+#endif
+
+ auto* ins = MRandom::New(alloc());
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
+ Int32OperandId secondId,
+ Int32OperandId resultId) {
+ MDefinition* first = getOperand(firstId);
+ MDefinition* second = getOperand(secondId);
+
+ auto* ins = MMinMax::New(alloc(), first, second, MIRType::Int32, isMax);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitNumberMinMax(bool isMax,
+ NumberOperandId firstId,
+ NumberOperandId secondId,
+ NumberOperandId resultId) {
+ MDefinition* first = getOperand(firstId);
+ MDefinition* second = getOperand(secondId);
+
+ auto* ins = MMinMax::New(alloc(), first, second, MIRType::Double, isMax);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+bool WarpCacheIRTranspiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
+ bool isMax) {
+ MDefinition* array = getOperand(arrayId);
+
+ auto* ins = MMinMaxArray::New(alloc(), array, MIRType::Int32, isMax);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
+ bool isMax) {
+ MDefinition* array = getOperand(arrayId);
+
+ auto* ins = MMinMaxArray::New(alloc(), array, MIRType::Double, isMax);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathAbsInt32Result(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MAbs::New(alloc(), input, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathAbsNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MAbs::New(alloc(), input, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathClz32Result(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MClz::New(alloc(), input, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathSignInt32Result(Int32OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MSign::New(alloc(), input, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathSignNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MSign::New(alloc(), input, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathSignNumberToInt32Result(
+ NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MSign::New(alloc(), input, MIRType::Int32);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathImulResult(Int32OperandId lhsId,
+ Int32OperandId rhsId) {
+ MDefinition* lhs = getOperand(lhsId);
+ MDefinition* rhs = getOperand(rhsId);
+
+ auto* ins = MMul::New(alloc(), lhs, rhs, MIRType::Int32, MMul::Integer);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathFloorToInt32Result(
+ NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MFloor::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MCeil::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathTruncToInt32Result(
+ NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MTrunc::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathRoundToInt32Result(
+ NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MRound::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MSqrt::New(alloc(), input, MIRType::Double);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathFRoundNumberResult(
+ NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MToFloat32::New(alloc(), input);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathAtan2NumberResult(NumberOperandId yId,
+ NumberOperandId xId) {
+ MDefinition* y = getOperand(yId);
+ MDefinition* x = getOperand(xId);
+
+ auto* ins = MAtan2::New(alloc(), y, x);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathFunctionNumberResult(
+ NumberOperandId inputId, UnaryMathFunction fun) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MMathFunction::New(alloc(), input, fun);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathFloorNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ MInstruction* ins;
+ if (MNearbyInt::HasAssemblerSupport(RoundingMode::Down)) {
+ ins = MNearbyInt::New(alloc(), input, MIRType::Double, RoundingMode::Down);
+ } else {
+ ins = MMathFunction::New(alloc(), input, UnaryMathFunction::Floor);
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathCeilNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ MInstruction* ins;
+ if (MNearbyInt::HasAssemblerSupport(RoundingMode::Up)) {
+ ins = MNearbyInt::New(alloc(), input, MIRType::Double, RoundingMode::Up);
+ } else {
+ ins = MMathFunction::New(alloc(), input, UnaryMathFunction::Ceil);
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMathTruncNumberResult(NumberOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ MInstruction* ins;
+ if (MNearbyInt::HasAssemblerSupport(RoundingMode::TowardsZero)) {
+ ins = MNearbyInt::New(alloc(), input, MIRType::Double,
+ RoundingMode::TowardsZero);
+ } else {
+ ins = MMathFunction::New(alloc(), input, UnaryMathFunction::Trunc);
+ }
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNumberParseIntResult(StringOperandId strId,
+ Int32OperandId radixId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* radix = getOperand(radixId);
+
+ auto* ins = MNumberParseInt::New(alloc(), str, radix);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitDoubleParseIntResult(NumberOperandId numId) {
+ MDefinition* num = getOperand(numId);
+
+ auto* ins = MDoubleParseInt::New(alloc(), num);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitObjectToStringResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MObjectClassToString::New(alloc(), obj);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitReflectGetPrototypeOfResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGetPrototypeOf::New(alloc(), obj);
+ addEffectful(ins);
+ pushResult(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitArrayPush(ObjOperandId objId,
+ ValOperandId rhsId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* value = getOperand(rhsId);
+
+ auto* elements = MElements::New(alloc(), obj);
+ add(elements);
+
+ auto* initLength = MInitializedLength::New(alloc(), elements);
+ add(initLength);
+
+ auto* barrier =
+ MPostWriteElementBarrier::New(alloc(), obj, value, initLength);
+ add(barrier);
+
+ auto* ins = MArrayPush::New(alloc(), obj, value);
+ addEffectful(ins);
+ pushResult(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitArrayJoinResult(ObjOperandId objId,
+ StringOperandId sepId) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* sep = getOperand(sepId);
+
+ auto* join = MArrayJoin::New(alloc(), obj, sep);
+ addEffectful(join);
+
+ pushResult(join);
+ return resumeAfter(join);
+}
+
+bool WarpCacheIRTranspiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
+ MDefinition* array = getOperand(arrayId);
+
+ auto* ins = MArrayPopShift::New(alloc(), array, MArrayPopShift::Pop);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
+ MDefinition* array = getOperand(arrayId);
+
+ auto* ins = MArrayPopShift::New(alloc(), array, MArrayPopShift::Shift);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitPackedArraySliceResult(
+ uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+ Int32OperandId endId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ MDefinition* array = getOperand(arrayId);
+ MDefinition* begin = getOperand(beginId);
+ MDefinition* end = getOperand(endId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ auto* ins = MArraySlice::New(alloc(), array, begin, end, templateObj, heap);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitArgumentsSliceResult(
+ uint32_t templateObjectOffset, ObjOperandId argsId, Int32OperandId beginId,
+ Int32OperandId endId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ MDefinition* args = getOperand(argsId);
+ MDefinition* begin = getOperand(beginId);
+ MDefinition* end = getOperand(endId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ auto* ins =
+ MArgumentsSlice::New(alloc(), args, begin, end, templateObj, heap);
+ addEffectful(ins);
+
+ pushResult(ins);
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitHasClassResult(ObjOperandId objId,
+ uint32_t claspOffset) {
+ MDefinition* obj = getOperand(objId);
+ const JSClass* clasp = classStubField(claspOffset);
+
+ auto* hasClass = MHasClass::New(alloc(), obj, clasp);
+ add(hasClass);
+
+ pushResult(hasClass);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallRegExpMatcherResult(
+ ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ MDefinition* regexp = getOperand(regexpId);
+ MDefinition* input = getOperand(inputId);
+ MDefinition* lastIndex = getOperand(lastIndexId);
+
+ auto* matcher = MRegExpMatcher::New(alloc(), regexp, input, lastIndex);
+ addEffectful(matcher);
+ pushResult(matcher);
+
+ return resumeAfter(matcher);
+}
+
+bool WarpCacheIRTranspiler::emitCallRegExpSearcherResult(
+ ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
+ uint32_t stubOffset) {
+ MDefinition* regexp = getOperand(regexpId);
+ MDefinition* input = getOperand(inputId);
+ MDefinition* lastIndex = getOperand(lastIndexId);
+
+ auto* searcher = MRegExpSearcher::New(alloc(), regexp, input, lastIndex);
+ addEffectful(searcher);
+ pushResult(searcher);
+
+ return resumeAfter(searcher);
+}
+
+bool WarpCacheIRTranspiler::emitRegExpBuiltinExecMatchResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ MDefinition* regexp = getOperand(regexpId);
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MRegExpExecMatch::New(alloc(), regexp, input);
+ addEffectful(ins);
+ pushResult(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitRegExpBuiltinExecTestResult(
+ ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
+ MDefinition* regexp = getOperand(regexpId);
+ MDefinition* input = getOperand(inputId);
+
+ auto* ins = MRegExpExecTest::New(alloc(), regexp, input);
+ addEffectful(ins);
+ pushResult(ins);
+
+ return resumeAfter(ins);
+}
+
+MInstruction* WarpCacheIRTranspiler::convertToBoolean(MDefinition* input) {
+ // Convert to bool with the '!!' idiom.
+ //
+ // The FoldTests and GVN passes both specifically handle this pattern. If you
+ // change this code, make sure to update FoldTests and GVN, too.
+
+ auto* resultInverted = MNot::New(alloc(), input);
+ add(resultInverted);
+ auto* result = MNot::New(alloc(), resultInverted);
+ add(result);
+
+ return result;
+}
+
+bool WarpCacheIRTranspiler::emitRegExpFlagResult(ObjOperandId regexpId,
+ int32_t flagsMask) {
+ MDefinition* regexp = getOperand(regexpId);
+
+ auto* flags = MLoadFixedSlot::New(alloc(), regexp, RegExpObject::flagsSlot());
+ flags->setResultType(MIRType::Int32);
+ add(flags);
+
+ auto* mask = MConstant::New(alloc(), Int32Value(flagsMask));
+ add(mask);
+
+ auto* maskedFlag = MBitAnd::New(alloc(), flags, mask, MIRType::Int32);
+ add(maskedFlag);
+
+ auto* result = convertToBoolean(maskedFlag);
+
+ pushResult(result);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallSubstringKernelResult(
+ StringOperandId strId, Int32OperandId beginId, Int32OperandId lengthId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* begin = getOperand(beginId);
+ MDefinition* length = getOperand(lengthId);
+
+ auto* substr = MSubstr::New(alloc(), str, begin, length);
+ add(substr);
+
+ pushResult(substr);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringReplaceStringResult(
+ StringOperandId strId, StringOperandId patternId,
+ StringOperandId replacementId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* pattern = getOperand(patternId);
+ MDefinition* replacement = getOperand(replacementId);
+
+ auto* replace = MStringReplace::New(alloc(), str, pattern, replacement);
+ add(replace);
+
+ pushResult(replace);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitStringSplitStringResult(
+ StringOperandId strId, StringOperandId separatorId) {
+ MDefinition* str = getOperand(strId);
+ MDefinition* separator = getOperand(separatorId);
+
+ auto* split = MStringSplit::New(alloc(), str, separator);
+ add(split);
+
+ pushResult(split);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitRegExpPrototypeOptimizableResult(
+ ObjOperandId protoId) {
+ MDefinition* proto = getOperand(protoId);
+
+ auto* optimizable = MRegExpPrototypeOptimizable::New(alloc(), proto);
+ add(optimizable);
+
+ pushResult(optimizable);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitRegExpInstanceOptimizableResult(
+ ObjOperandId regexpId, ObjOperandId protoId) {
+ MDefinition* regexp = getOperand(regexpId);
+ MDefinition* proto = getOperand(protoId);
+
+ auto* optimizable = MRegExpInstanceOptimizable::New(alloc(), regexp, proto);
+ add(optimizable);
+
+ pushResult(optimizable);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGetFirstDollarIndexResult(
+ StringOperandId strId) {
+ MDefinition* str = getOperand(strId);
+
+ auto* firstDollarIndex = MGetFirstDollarIndex::New(alloc(), str);
+ add(firstDollarIndex);
+
+ pushResult(firstDollarIndex);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsArrayResult(ValOperandId inputId) {
+ MDefinition* value = getOperand(inputId);
+
+ auto* isArray = MIsArray::New(alloc(), value);
+ addEffectful(isArray);
+ pushResult(isArray);
+
+ return resumeAfter(isArray);
+}
+
+bool WarpCacheIRTranspiler::emitIsObjectResult(ValOperandId inputId) {
+ MDefinition* value = getOperand(inputId);
+
+ if (value->type() == MIRType::Object) {
+ pushResult(constant(BooleanValue(true)));
+ } else {
+ auto* isObject = MIsObject::New(alloc(), value);
+ add(isObject);
+ pushResult(isObject);
+ }
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsPackedArrayResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* isPackedArray = MIsPackedArray::New(alloc(), obj);
+ add(isPackedArray);
+
+ pushResult(isPackedArray);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsCallableResult(ValOperandId inputId) {
+ MDefinition* value = getOperand(inputId);
+
+ auto* isCallable = MIsCallable::New(alloc(), value);
+ add(isCallable);
+
+ pushResult(isCallable);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsConstructorResult(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* isConstructor = MIsConstructor::New(alloc(), obj);
+ add(isConstructor);
+
+ pushResult(isConstructor);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsCrossRealmArrayConstructorResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MIsCrossRealmArrayConstructor::New(alloc(), obj);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsTypedArrayResult(ObjOperandId objId,
+ bool isPossiblyWrapped) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MIsTypedArray::New(alloc(), obj, isPossiblyWrapped);
+ if (isPossiblyWrapped) {
+ addEffectful(ins);
+ } else {
+ add(ins);
+ }
+
+ pushResult(ins);
+
+ if (isPossiblyWrapped) {
+ if (!resumeAfter(ins)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitArrayBufferViewByteOffsetInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset = MArrayBufferViewByteOffset::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetInt32 = MNonNegativeIntPtrToInt32::New(alloc(), byteOffset);
+ add(byteOffsetInt32);
+
+ pushResult(byteOffsetInt32);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitArrayBufferViewByteOffsetDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset = MArrayBufferViewByteOffset::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetDouble = MIntPtrToDouble::New(alloc(), byteOffset);
+ add(byteOffsetDouble);
+
+ pushResult(byteOffsetDouble);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitTypedArrayByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* mul = MMul::New(alloc(), lengthInt32, size, MIRType::Int32);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ pushResult(mul);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitTypedArrayByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* mul = MMul::New(alloc(), lengthDouble, size, MIRType::Double);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ pushResult(mul);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitTypedArrayElementSizeResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MTypedArrayElementSize::New(alloc(), obj);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardHasAttachedArrayBuffer(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardHasAttachedArrayBuffer::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitIsTypedArrayConstructorResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MIsTypedArrayConstructor::New(alloc(), obj);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGetNextMapSetEntryForIteratorResult(
+ ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
+ MDefinition* iter = getOperand(iterId);
+ MDefinition* resultArr = getOperand(resultArrId);
+
+ MGetNextEntryForIterator::Mode mode =
+ isMap ? MGetNextEntryForIterator::Map : MGetNextEntryForIterator::Set;
+ auto* ins = MGetNextEntryForIterator::New(alloc(), iter, resultArr, mode);
+ addEffectful(ins);
+ pushResult(ins);
+
+ return resumeAfter(ins);
+}
+
+bool WarpCacheIRTranspiler::emitFrameIsConstructingResult() {
+ if (const CallInfo* callInfo = builder_->inlineCallInfo()) {
+ auto* ins = constant(BooleanValue(callInfo->constructing()));
+ pushResult(ins);
+ return true;
+ }
+
+ auto* ins = MIsConstructing::New(alloc());
+ add(ins);
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNewIteratorResult(
+ MNewIterator::Type type, uint32_t templateObjectOffset) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ auto* templateConst = constant(ObjectValue(*templateObj));
+ auto* iter = MNewIterator::New(alloc(), templateConst, type);
+ add(iter);
+
+ pushResult(iter);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNewArrayIteratorResult(
+ uint32_t templateObjectOffset) {
+ return emitNewIteratorResult(MNewIterator::ArrayIterator,
+ templateObjectOffset);
+}
+
+bool WarpCacheIRTranspiler::emitNewStringIteratorResult(
+ uint32_t templateObjectOffset) {
+ return emitNewIteratorResult(MNewIterator::StringIterator,
+ templateObjectOffset);
+}
+
+bool WarpCacheIRTranspiler::emitNewRegExpStringIteratorResult(
+ uint32_t templateObjectOffset) {
+ return emitNewIteratorResult(MNewIterator::RegExpStringIterator,
+ templateObjectOffset);
+}
+
+bool WarpCacheIRTranspiler::emitObjectCreateResult(
+ uint32_t templateObjectOffset) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ auto* templateConst = constant(ObjectValue(*templateObj));
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+ auto* obj =
+ MNewObject::New(alloc(), templateConst, heap, MNewObject::ObjectCreate);
+ addEffectful(obj);
+
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitNewArrayFromLengthResult(
+ uint32_t templateObjectOffset, Int32OperandId lengthId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+ MDefinition* length = getOperand(lengthId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ if (length->isConstant()) {
+ int32_t lenInt32 = length->toConstant()->toInt32();
+ if (lenInt32 >= 0 &&
+ uint32_t(lenInt32) == templateObj->as<ArrayObject>().length()) {
+ uint32_t len = uint32_t(lenInt32);
+ auto* templateConst = constant(ObjectValue(*templateObj));
+
+ size_t inlineLength =
+ gc::GetGCKindSlots(templateObj->asTenured().getAllocKind()) -
+ ObjectElements::VALUES_PER_HEADER;
+
+ MNewArray* obj;
+ if (len > inlineLength) {
+ obj = MNewArray::NewVM(alloc(), len, templateConst, heap);
+ } else {
+ obj = MNewArray::New(alloc(), len, templateConst, heap);
+ }
+ add(obj);
+ pushResult(obj);
+ return true;
+ }
+ }
+
+ auto* obj = MNewArrayDynamicLength::New(alloc(), length, templateObj, heap);
+ addEffectful(obj);
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitNewTypedArrayFromLengthResult(
+ uint32_t templateObjectOffset, Int32OperandId lengthId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+ MDefinition* length = getOperand(lengthId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ if (length->isConstant()) {
+ int32_t len = length->toConstant()->toInt32();
+ if (len > 0 &&
+ uint32_t(len) == templateObj->as<TypedArrayObject>().length()) {
+ auto* templateConst = constant(ObjectValue(*templateObj));
+ auto* obj = MNewTypedArray::New(alloc(), templateConst, heap);
+ add(obj);
+ pushResult(obj);
+ return true;
+ }
+ }
+
+ auto* obj =
+ MNewTypedArrayDynamicLength::New(alloc(), length, templateObj, heap);
+ addEffectful(obj);
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitNewTypedArrayFromArrayBufferResult(
+ uint32_t templateObjectOffset, ObjOperandId bufferId,
+ ValOperandId byteOffsetId, ValOperandId lengthId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+ MDefinition* buffer = getOperand(bufferId);
+ MDefinition* byteOffset = getOperand(byteOffsetId);
+ MDefinition* length = getOperand(lengthId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ auto* obj = MNewTypedArrayFromArrayBuffer::New(alloc(), buffer, byteOffset,
+ length, templateObj, heap);
+ addEffectful(obj);
+
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitNewTypedArrayFromArrayResult(
+ uint32_t templateObjectOffset, ObjOperandId arrayId) {
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+ MDefinition* array = getOperand(arrayId);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ auto* obj = MNewTypedArrayFromArray::New(alloc(), array, templateObj, heap);
+ addEffectful(obj);
+
+ pushResult(obj);
+ return resumeAfter(obj);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsCompareExchangeResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
+ uint32_t replacementId, Scalar::Type elementType) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* expected = getOperand(ValOperandId(expectedId));
+ MDefinition* replacement = getOperand(ValOperandId(replacementId));
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ bool forceDoubleForUint32 = true;
+ MIRType knownType =
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
+
+ auto* cas = MCompareExchangeTypedArrayElement::New(
+ alloc(), elements, index, elementType, expected, replacement);
+ cas->setResultType(knownType);
+ addEffectful(cas);
+
+ pushResult(cas);
+ return resumeAfter(cas);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsExchangeResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* value = getOperand(ValOperandId(valueId));
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ bool forceDoubleForUint32 = true;
+ MIRType knownType =
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
+
+ auto* exchange = MAtomicExchangeTypedArrayElement::New(
+ alloc(), elements, index, value, elementType);
+ exchange->setResultType(knownType);
+ addEffectful(exchange);
+
+ pushResult(exchange);
+ return resumeAfter(exchange);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect, AtomicOp op) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* value = getOperand(ValOperandId(valueId));
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ bool forceDoubleForUint32 = true;
+ MIRType knownType =
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
+
+ auto* binop = MAtomicTypedArrayElementBinop::New(
+ alloc(), op, elements, index, elementType, value, forEffect);
+ if (!forEffect) {
+ binop->setResultType(knownType);
+ }
+ addEffectful(binop);
+
+ if (!forEffect) {
+ pushResult(binop);
+ } else {
+ pushResult(constant(UndefinedValue()));
+ }
+ return resumeAfter(binop);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsAddResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
+ AtomicFetchAddOp);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsSubResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
+ AtomicFetchSubOp);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsAndResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
+ AtomicFetchAndOp);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsOrResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
+ AtomicFetchOrOp);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsXorResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType,
+ bool forEffect) {
+ return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
+ AtomicFetchXorOp);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ Scalar::Type elementType) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ bool forceDoubleForUint32 = true;
+ MIRType knownType =
+ MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
+
+ auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType,
+ DoesRequireMemoryBarrier);
+ load->setResultType(knownType);
+ addEffectful(load);
+
+ pushResult(load);
+ return resumeAfter(load);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsStoreResult(ObjOperandId objId,
+ IntPtrOperandId indexId,
+ uint32_t valueId,
+ Scalar::Type elementType) {
+ MDefinition* obj = getOperand(objId);
+ MDefinition* index = getOperand(indexId);
+ MDefinition* value = getOperand(ValOperandId(valueId));
+
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ index = addBoundsCheck(index, length);
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
+ auto* store = MStoreUnboxedScalar::New(alloc(), elements, index, value,
+ elementType, DoesRequireMemoryBarrier);
+ addEffectful(store);
+
+ pushResult(value);
+ return resumeAfter(store);
+}
+
+bool WarpCacheIRTranspiler::emitAtomicsIsLockFreeResult(
+ Int32OperandId valueId) {
+ MDefinition* value = getOperand(valueId);
+
+ auto* ilf = MAtomicIsLockFree::New(alloc(), value);
+ add(ilf);
+
+ pushResult(ilf);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
+ BigIntOperandId bigIntId) {
+ MDefinition* bits = getOperand(bitsId);
+ MDefinition* bigInt = getOperand(bigIntId);
+
+ auto* ins = MBigIntAsIntN::New(alloc(), bits, bigInt);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
+ BigIntOperandId bigIntId) {
+ MDefinition* bits = getOperand(bitsId);
+ MDefinition* bigInt = getOperand(bigIntId);
+
+ auto* ins = MBigIntAsUintN::New(alloc(), bits, bigInt);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardToNonGCThing(ValOperandId inputId) {
+ MDefinition* def = getOperand(inputId);
+ if (IsNonGCThing(def->type())) {
+ return true;
+ }
+
+ auto* ins = MGuardNonGCThing::New(alloc(), def);
+ add(ins);
+
+ setOperand(inputId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasNonGCThingResult(ObjOperandId setId,
+ ValOperandId valId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* val = getOperand(valId);
+
+ auto* hashValue = MToHashableNonGCThing::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashNonGCThing::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MSetObjectHasNonBigInt::New(alloc(), set, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasStringResult(ObjOperandId setId,
+ StringOperandId strId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* str = getOperand(strId);
+
+ auto* hashValue = MToHashableString::New(alloc(), str);
+ add(hashValue);
+
+ auto* hash = MHashString::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MSetObjectHasNonBigInt::New(alloc(), set, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasSymbolResult(ObjOperandId setId,
+ SymbolOperandId symId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* sym = getOperand(symId);
+
+ auto* hash = MHashSymbol::New(alloc(), sym);
+ add(hash);
+
+ auto* ins = MSetObjectHasNonBigInt::New(alloc(), set, sym, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasBigIntResult(ObjOperandId setId,
+ BigIntOperandId bigIntId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* bigInt = getOperand(bigIntId);
+
+ auto* hash = MHashBigInt::New(alloc(), bigInt);
+ add(hash);
+
+ auto* ins = MSetObjectHasBigInt::New(alloc(), set, bigInt, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasObjectResult(ObjOperandId setId,
+ ObjOperandId objId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* obj = getOperand(objId);
+
+ auto* hash = MHashObject::New(alloc(), set, obj);
+ add(hash);
+
+ auto* ins = MSetObjectHasNonBigInt::New(alloc(), set, obj, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetHasResult(ObjOperandId setId,
+ ValOperandId valId) {
+ MDefinition* set = getOperand(setId);
+ MDefinition* val = getOperand(valId);
+
+#ifdef JS_PUNBOX64
+ auto* hashValue = MToHashableValue::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashValue::New(alloc(), set, hashValue);
+ add(hash);
+
+ auto* ins = MSetObjectHasValue::New(alloc(), set, hashValue, hash);
+ add(ins);
+#else
+ auto* ins = MSetObjectHasValueVMCall::New(alloc(), set, val);
+ add(ins);
+#endif
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitSetSizeResult(ObjOperandId setId) {
+ MDefinition* set = getOperand(setId);
+
+ auto* ins = MSetObjectSize::New(alloc(), set);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* val = getOperand(valId);
+
+ auto* hashValue = MToHashableNonGCThing::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashNonGCThing::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectHasNonBigInt::New(alloc(), map, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* str = getOperand(strId);
+
+ auto* hashValue = MToHashableString::New(alloc(), str);
+ add(hashValue);
+
+ auto* hash = MHashString::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectHasNonBigInt::New(alloc(), map, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasSymbolResult(ObjOperandId mapId,
+ SymbolOperandId symId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* sym = getOperand(symId);
+
+ auto* hash = MHashSymbol::New(alloc(), sym);
+ add(hash);
+
+ auto* ins = MMapObjectHasNonBigInt::New(alloc(), map, sym, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasBigIntResult(ObjOperandId mapId,
+ BigIntOperandId bigIntId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* bigInt = getOperand(bigIntId);
+
+ auto* hash = MHashBigInt::New(alloc(), bigInt);
+ add(hash);
+
+ auto* ins = MMapObjectHasBigInt::New(alloc(), map, bigInt, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasObjectResult(ObjOperandId mapId,
+ ObjOperandId objId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* obj = getOperand(objId);
+
+ auto* hash = MHashObject::New(alloc(), map, obj);
+ add(hash);
+
+ auto* ins = MMapObjectHasNonBigInt::New(alloc(), map, obj, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapHasResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* val = getOperand(valId);
+
+#ifdef JS_PUNBOX64
+ auto* hashValue = MToHashableValue::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashValue::New(alloc(), map, hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectHasValue::New(alloc(), map, hashValue, hash);
+ add(ins);
+#else
+ auto* ins = MMapObjectHasValueVMCall::New(alloc(), map, val);
+ add(ins);
+#endif
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* val = getOperand(valId);
+
+ auto* hashValue = MToHashableNonGCThing::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashNonGCThing::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectGetNonBigInt::New(alloc(), map, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetStringResult(ObjOperandId mapId,
+ StringOperandId strId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* str = getOperand(strId);
+
+ auto* hashValue = MToHashableString::New(alloc(), str);
+ add(hashValue);
+
+ auto* hash = MHashString::New(alloc(), hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectGetNonBigInt::New(alloc(), map, hashValue, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetSymbolResult(ObjOperandId mapId,
+ SymbolOperandId symId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* sym = getOperand(symId);
+
+ auto* hash = MHashSymbol::New(alloc(), sym);
+ add(hash);
+
+ auto* ins = MMapObjectGetNonBigInt::New(alloc(), map, sym, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetBigIntResult(ObjOperandId mapId,
+ BigIntOperandId bigIntId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* bigInt = getOperand(bigIntId);
+
+ auto* hash = MHashBigInt::New(alloc(), bigInt);
+ add(hash);
+
+ auto* ins = MMapObjectGetBigInt::New(alloc(), map, bigInt, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetObjectResult(ObjOperandId mapId,
+ ObjOperandId objId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* obj = getOperand(objId);
+
+ auto* hash = MHashObject::New(alloc(), map, obj);
+ add(hash);
+
+ auto* ins = MMapObjectGetNonBigInt::New(alloc(), map, obj, hash);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapGetResult(ObjOperandId mapId,
+ ValOperandId valId) {
+ MDefinition* map = getOperand(mapId);
+ MDefinition* val = getOperand(valId);
+
+#ifdef JS_PUNBOX64
+ auto* hashValue = MToHashableValue::New(alloc(), val);
+ add(hashValue);
+
+ auto* hash = MHashValue::New(alloc(), map, hashValue);
+ add(hash);
+
+ auto* ins = MMapObjectGetValue::New(alloc(), map, hashValue, hash);
+ add(ins);
+#else
+ auto* ins = MMapObjectGetValueVMCall::New(alloc(), map, val);
+ add(ins);
+#endif
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitMapSizeResult(ObjOperandId mapId) {
+ MDefinition* map = getOperand(mapId);
+
+ auto* ins = MMapObjectSize::New(alloc(), map);
+ add(ins);
+
+ pushResult(ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitTruthyResult(OperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+
+ auto* result = convertToBoolean(input);
+
+ pushResult(result);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadDoubleTruthyResult(
+ NumberOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadStringTruthyResult(
+ StringOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadObjectTruthyResult(ObjOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadBigIntTruthyResult(
+ BigIntOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadValueTruthyResult(ValOperandId inputId) {
+ return emitTruthyResult(inputId);
+}
+
+bool WarpCacheIRTranspiler::emitLoadOperandResult(ValOperandId inputId) {
+ MDefinition* input = getOperand(inputId);
+ pushResult(input);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitLoadWrapperTarget(ObjOperandId objId,
+ ObjOperandId resultId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MLoadWrapperTarget::New(alloc(), obj);
+ add(ins);
+
+ return defineOperand(resultId, ins);
+}
+
+// When we transpile a call, we may generate guards for some
+// arguments. To make sure the call instruction depends on those
+// guards, when the transpiler creates an operand for an argument, we
+// register the OperandId of that argument in argumentIds_. (See
+// emitLoadArgumentSlot.) Before generating the call, we update the
+// CallInfo to use the appropriate value from operands_.
+// Note: The callee is an explicit argument to the call op, and is
+// tracked separately.
+void WarpCacheIRTranspiler::updateArgumentsFromOperands() {
+ for (uint32_t i = 0; i < uint32_t(ArgumentKind::NumKinds); i++) {
+ ArgumentKind kind = ArgumentKind(i);
+ OperandId id = argumentOperandIds_[kind];
+ if (id.valid()) {
+ switch (kind) {
+ case ArgumentKind::This:
+ callInfo_->setThis(getOperand(id));
+ break;
+ case ArgumentKind::NewTarget:
+ callInfo_->setNewTarget(getOperand(id));
+ break;
+ case ArgumentKind::Arg0:
+ callInfo_->setArg(0, getOperand(id));
+ break;
+ case ArgumentKind::Arg1:
+ callInfo_->setArg(1, getOperand(id));
+ break;
+ case ArgumentKind::Arg2:
+ callInfo_->setArg(2, getOperand(id));
+ break;
+ case ArgumentKind::Arg3:
+ callInfo_->setArg(3, getOperand(id));
+ break;
+ case ArgumentKind::Arg4:
+ callInfo_->setArg(4, getOperand(id));
+ break;
+ case ArgumentKind::Arg5:
+ callInfo_->setArg(5, getOperand(id));
+ break;
+ case ArgumentKind::Arg6:
+ callInfo_->setArg(6, getOperand(id));
+ break;
+ case ArgumentKind::Arg7:
+ callInfo_->setArg(7, getOperand(id));
+ break;
+ case ArgumentKind::Callee:
+ case ArgumentKind::NumKinds:
+ MOZ_CRASH("Unexpected argument kind");
+ }
+ }
+ }
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentSlot(ValOperandId resultId,
+ uint32_t slotIndex) {
+ // Reverse of GetIndexOfArgument.
+
+ // Layout:
+ // NewTarget | Args.. (reversed) | ThisValue | Callee
+ // 0 | ArgC .. Arg1 Arg0 (+1) | argc (+1) | argc + 1 (+ 1)
+ // ^ (if constructing)
+
+ // NewTarget (optional)
+ if (callInfo_->constructing()) {
+ if (slotIndex == 0) {
+ setArgumentId(ArgumentKind::NewTarget, resultId);
+ return defineOperand(resultId, callInfo_->getNewTarget());
+ }
+
+ slotIndex -= 1; // Adjust slot index to match non-constructing calls.
+ }
+
+ // Args..
+ if (slotIndex < callInfo_->argc()) {
+ uint32_t arg = callInfo_->argc() - 1 - slotIndex;
+ ArgumentKind kind = ArgumentKindForArgIndex(arg);
+ MOZ_ASSERT(kind < ArgumentKind::NumKinds);
+ setArgumentId(kind, resultId);
+ return defineOperand(resultId, callInfo_->getArg(arg));
+ }
+
+ // ThisValue
+ if (slotIndex == callInfo_->argc()) {
+ setArgumentId(ArgumentKind::This, resultId);
+ return defineOperand(resultId, callInfo_->thisArg());
+ }
+
+ // Callee
+ MOZ_ASSERT(slotIndex == callInfo_->argc() + 1);
+ return defineOperand(resultId, callInfo_->callee());
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
+ uint8_t slotIndex) {
+ return emitLoadArgumentSlot(resultId, slotIndex);
+}
+
+bool WarpCacheIRTranspiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
+ Int32OperandId argcId,
+ uint8_t slotIndex) {
+#ifdef DEBUG
+ MDefinition* argc = getOperand(argcId);
+ MOZ_ASSERT(argc->toConstant()->toInt32() ==
+ static_cast<int32_t>(callInfo_->argc()));
+#endif
+
+ return emitLoadArgumentSlot(resultId, callInfo_->argc() + slotIndex);
+}
+
+WrappedFunction* WarpCacheIRTranspiler::maybeWrappedFunction(
+ MDefinition* callee, CallKind kind, uint16_t nargs, FunctionFlags flags) {
+ MOZ_ASSERT(callee->isConstant() || callee->isNurseryObject());
+
+ // If this is a native without a JitEntry, WrappedFunction needs to know the
+ // target JSFunction.
+ // TODO: support nursery-allocated natives with WrappedFunction, maybe by
+ // storing the JSNative in the Baseline stub like flags/nargs.
+ bool isNative = flags.isNativeWithoutJitEntry();
+ if (isNative && !callee->isConstant()) {
+ return nullptr;
+ }
+
+ JSFunction* nativeTarget = nullptr;
+ if (isNative) {
+ nativeTarget = &callee->toConstant()->toObject().as<JSFunction>();
+ }
+
+ WrappedFunction* wrappedTarget =
+ new (alloc()) WrappedFunction(nativeTarget, nargs, flags);
+ MOZ_ASSERT_IF(kind == CallKind::Native || kind == CallKind::DOM,
+ wrappedTarget->isNativeWithoutJitEntry());
+ MOZ_ASSERT_IF(kind == CallKind::Scripted, wrappedTarget->hasJitEntry());
+ return wrappedTarget;
+}
+
+WrappedFunction* WarpCacheIRTranspiler::maybeCallTarget(MDefinition* callee,
+ CallKind kind) {
+ // CacheIR emits the following for specialized calls:
+ // GuardSpecificFunction <callee> <func> ..
+ // Call(Native|Scripted)Function <callee> ..
+ // or:
+ // GuardClass <callee> ..
+ // GuardFunctionScript <callee> <script> ..
+ // CallScriptedFunction <callee> ..
+ //
+ // We can use the <func> JSFunction or <script> BaseScript to specialize this
+ // call.
+ if (callee->isGuardSpecificFunction()) {
+ auto* guard = callee->toGuardSpecificFunction();
+ return maybeWrappedFunction(guard->expected(), kind, guard->nargs(),
+ guard->flags());
+ }
+ if (callee->isGuardFunctionScript()) {
+ MOZ_ASSERT(kind == CallKind::Scripted);
+ auto* guard = callee->toGuardFunctionScript();
+ WrappedFunction* wrappedTarget = new (alloc()) WrappedFunction(
+ /* nativeFun = */ nullptr, guard->nargs(), guard->flags());
+ MOZ_ASSERT(wrappedTarget->hasJitEntry());
+ return wrappedTarget;
+ }
+ return nullptr;
+}
+
+// If it is possible to use MCall for this call, update callInfo_ to use
+// the correct arguments. Otherwise, update the ArgFormat of callInfo_.
+bool WarpCacheIRTranspiler::updateCallInfo(MDefinition* callee,
+ CallFlags flags) {
+ // The transpilation will add various guards to the callee.
+ // We replace the callee referenced by the CallInfo, so that
+ // the resulting call instruction depends on these guards.
+ callInfo_->setCallee(callee);
+
+ // The transpilation may also add guards to other arguments.
+ // We replace those arguments in the CallInfo here.
+ updateArgumentsFromOperands();
+
+ switch (flags.getArgFormat()) {
+ case CallFlags::Standard:
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+ break;
+ case CallFlags::Spread:
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Array);
+ break;
+ case CallFlags::FunCall:
+ // Note: We already changed the callee to the target
+ // function instead of the |call| function.
+ MOZ_ASSERT(!callInfo_->constructing());
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+
+ if (callInfo_->argc() == 0) {
+ // Special case for fun.call() with no arguments.
+ auto* undef = constant(UndefinedValue());
+ callInfo_->setThis(undef);
+ } else {
+ // The first argument for |call| is the new this value.
+ callInfo_->setThis(callInfo_->getArg(0));
+
+ // Shift down all other arguments by removing the first.
+ callInfo_->removeArg(0);
+ }
+ break;
+ case CallFlags::FunApplyArgsObj:
+ MOZ_ASSERT(!callInfo_->constructing());
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+
+ callInfo_->setArgFormat(CallInfo::ArgFormat::FunApplyArgsObj);
+ break;
+ case CallFlags::FunApplyArray: {
+ MDefinition* argFunc = callInfo_->thisArg();
+ MDefinition* argThis = callInfo_->getArg(0);
+ callInfo_->setCallee(argFunc);
+ callInfo_->setThis(argThis);
+ callInfo_->setArgFormat(CallInfo::ArgFormat::Array);
+ break;
+ }
+ case CallFlags::FunApplyNullUndefined:
+ // Note: We already changed the callee to the target
+ // function instead of the |apply| function.
+ MOZ_ASSERT(callInfo_->argc() == 2);
+ MOZ_ASSERT(!callInfo_->constructing());
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+ callInfo_->setThis(callInfo_->getArg(0));
+ callInfo_->getArg(1)->setImplicitlyUsedUnchecked();
+ callInfo_->removeArg(1);
+ callInfo_->removeArg(0);
+ break;
+ default:
+ MOZ_CRASH("Unsupported arg format");
+ }
+ return true;
+}
+
+// Returns true if we are generating a call to CreateThisFromIon and
+// must check its return value.
+bool WarpCacheIRTranspiler::maybeCreateThis(MDefinition* callee,
+ CallFlags flags, CallKind kind) {
+ MOZ_ASSERT(kind != CallKind::DOM, "DOM functions are not constructors");
+ MDefinition* thisArg = callInfo_->thisArg();
+
+ if (kind == CallKind::Native) {
+ // Native functions keep the is-constructing MagicValue as |this|.
+ // If one of the arguments uses spread syntax this can be a loop phi with
+ // MIRType::Value.
+ MOZ_ASSERT(thisArg->type() == MIRType::MagicIsConstructing ||
+ thisArg->isPhi());
+ return false;
+ }
+ MOZ_ASSERT(kind == CallKind::Scripted);
+
+ if (thisArg->isNewPlainObject()) {
+ // We have already updated |this| based on MetaScriptedThisShape. We do
+ // not need to generate a check.
+ return false;
+ }
+ if (flags.needsUninitializedThis()) {
+ MConstant* uninit = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
+ thisArg->setImplicitlyUsedUnchecked();
+ callInfo_->setThis(uninit);
+ return false;
+ }
+ // See the Native case above.
+ MOZ_ASSERT(thisArg->type() == MIRType::MagicIsConstructing ||
+ thisArg->isPhi());
+
+ MDefinition* newTarget = callInfo_->getNewTarget();
+ auto* createThis = MCreateThis::New(alloc(), callee, newTarget);
+ add(createThis);
+
+ thisArg->setImplicitlyUsedUnchecked();
+ callInfo_->setThis(createThis);
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallFunction(
+ ObjOperandId calleeId, Int32OperandId argcId,
+ mozilla::Maybe<ObjOperandId> thisObjId, CallFlags flags, CallKind kind) {
+ MDefinition* callee = getOperand(calleeId);
+ if (kind == CallKind::Scripted && callInfo_ && callInfo_->isInlined()) {
+ // We are transpiling to generate the correct guards. We also
+ // update the CallInfo to use the correct arguments. Code for the
+ // inlined function itself will be generated in
+ // WarpBuilder::buildInlinedCall.
+ if (!updateCallInfo(callee, flags)) {
+ return false;
+ }
+ if (callInfo_->constructing()) {
+ MOZ_ASSERT(flags.isConstructing());
+
+ // We call maybeCreateThis to update |this|, but inlined constructors
+ // never need a VM call. CallIRGenerator::getThisForScripted ensures that
+ // we don't attach a specialized stub unless we have a template object or
+ // know that the constructor needs uninitialized this.
+ MOZ_ALWAYS_FALSE(maybeCreateThis(callee, flags, CallKind::Scripted));
+ mozilla::DebugOnly<MDefinition*> thisArg = callInfo_->thisArg();
+ MOZ_ASSERT(thisArg->isNewPlainObject() ||
+ thisArg->type() == MIRType::MagicUninitializedLexical);
+ }
+
+ if (flags.getArgFormat() == CallFlags::FunCall) {
+ callInfo_->setInliningResumeMode(ResumeMode::InlinedFunCall);
+ } else {
+ MOZ_ASSERT(flags.getArgFormat() == CallFlags::Standard);
+ callInfo_->setInliningResumeMode(ResumeMode::InlinedStandardCall);
+ }
+
+ switch (callInfo_->argFormat()) {
+ case CallInfo::ArgFormat::Standard:
+ break;
+ default:
+ MOZ_CRASH("Unsupported arg format");
+ }
+ return true;
+ }
+
+#ifdef DEBUG
+ MDefinition* argc = getOperand(argcId);
+ MOZ_ASSERT(argc->toConstant()->toInt32() ==
+ static_cast<int32_t>(callInfo_->argc()));
+#endif
+
+ if (!updateCallInfo(callee, flags)) {
+ return false;
+ }
+
+ if (kind == CallKind::DOM) {
+ MOZ_ASSERT(flags.getArgFormat() == CallFlags::Standard);
+ // For DOM calls |this| has a class guard.
+ MDefinition* thisObj = getOperand(*thisObjId);
+ callInfo_->setThis(thisObj);
+ }
+
+ WrappedFunction* wrappedTarget = maybeCallTarget(callee, kind);
+
+ bool needsThisCheck = false;
+ if (callInfo_->constructing()) {
+ MOZ_ASSERT(flags.isConstructing());
+ needsThisCheck = maybeCreateThis(callee, flags, kind);
+ if (needsThisCheck) {
+ wrappedTarget = nullptr;
+ }
+ }
+
+ switch (callInfo_->argFormat()) {
+ case CallInfo::ArgFormat::Standard: {
+ MCall* call = makeCall(*callInfo_, needsThisCheck, wrappedTarget,
+ kind == CallKind::DOM);
+ if (!call) {
+ return false;
+ }
+
+ if (flags.isSameRealm()) {
+ call->setNotCrossRealm();
+ }
+
+ if (call->isEffectful()) {
+ addEffectful(call);
+ pushResult(call);
+ return resumeAfter(call);
+ }
+
+ MOZ_ASSERT(kind == CallKind::DOM);
+ add(call);
+ pushResult(call);
+ return true;
+ }
+ case CallInfo::ArgFormat::Array: {
+ MInstruction* call = makeSpreadCall(*callInfo_, needsThisCheck,
+ flags.isSameRealm(), wrappedTarget);
+ if (!call) {
+ return false;
+ }
+ addEffectful(call);
+ pushResult(call);
+
+ return resumeAfter(call);
+ }
+ case CallInfo::ArgFormat::FunApplyArgsObj: {
+ return emitFunApplyArgsObj(wrappedTarget, flags);
+ }
+ }
+ MOZ_CRASH("unreachable");
+}
+
+bool WarpCacheIRTranspiler::emitFunApplyArgsObj(WrappedFunction* wrappedTarget,
+ CallFlags flags) {
+ MOZ_ASSERT(!callInfo_->constructing());
+
+ MDefinition* callee = callInfo_->thisArg();
+ MDefinition* thisArg = callInfo_->getArg(0);
+ MDefinition* argsObj = callInfo_->getArg(1);
+
+ MApplyArgsObj* apply =
+ MApplyArgsObj::New(alloc(), wrappedTarget, callee, argsObj, thisArg);
+
+ if (flags.isSameRealm()) {
+ apply->setNotCrossRealm();
+ }
+ if (callInfo_->ignoresReturnValue()) {
+ apply->setIgnoresReturnValue();
+ }
+
+ addEffectful(apply);
+ pushResult(apply);
+
+ return resumeAfter(apply);
+}
+
+#ifndef JS_SIMULATOR
+bool WarpCacheIRTranspiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ bool ignoresReturnValue) {
+ // Instead of ignoresReturnValue we use CallInfo::ignoresReturnValue.
+ return emitCallFunction(calleeId, argcId, mozilla::Nothing(), flags,
+ CallKind::Native);
+}
+
+bool WarpCacheIRTranspiler::emitCallDOMFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ ObjOperandId thisObjId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ return emitCallFunction(calleeId, argcId, mozilla::Some(thisObjId), flags,
+ CallKind::DOM);
+}
+#else
+bool WarpCacheIRTranspiler::emitCallNativeFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ uint32_t targetOffset) {
+ return emitCallFunction(calleeId, argcId, mozilla::Nothing(), flags,
+ CallKind::Native);
+}
+
+bool WarpCacheIRTranspiler::emitCallDOMFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
+ CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
+ return emitCallFunction(calleeId, argcId, mozilla::Some(thisObjId), flags,
+ CallKind::DOM);
+}
+#endif
+
+bool WarpCacheIRTranspiler::emitCallScriptedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ return emitCallFunction(calleeId, argcId, mozilla::Nothing(), flags,
+ CallKind::Scripted);
+}
+
+bool WarpCacheIRTranspiler::emitCallInlinedFunction(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ uint32_t icScriptOffset,
+ CallFlags flags,
+ uint32_t argcFixed) {
+ return emitCallFunction(calleeId, argcId, mozilla::Nothing(), flags,
+ CallKind::Scripted);
+}
+
+bool WarpCacheIRTranspiler::emitCallClassHook(ObjOperandId calleeId,
+ Int32OperandId argcId,
+ CallFlags flags,
+ uint32_t argcFixed,
+ uint32_t targetOffset) {
+ MDefinition* callee = getOperand(calleeId);
+ JSNative target = jsnativeStubField(targetOffset);
+
+#ifdef DEBUG
+ MDefinition* argc = getOperand(argcId);
+ MOZ_ASSERT(argc->toConstant()->toInt32() ==
+ static_cast<int32_t>(callInfo_->argc()));
+#endif
+
+ if (!updateCallInfo(callee, flags)) {
+ return false;
+ }
+
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+ MOZ_ASSERT(flags.getArgFormat() == CallFlags::ArgFormat::Standard);
+
+ // Callees can be from any realm. If this changes, we should update
+ // MCallClassHook::maybeCrossRealm.
+ MOZ_ASSERT(!flags.isSameRealm());
+
+ auto* call = MCallClassHook::New(alloc(), target, callInfo_->argc(),
+ callInfo_->constructing());
+ if (!call) {
+ return false;
+ }
+
+ if (callInfo_->ignoresReturnValue()) {
+ call->setIgnoresReturnValue();
+ }
+
+ call->initCallee(callInfo_->callee());
+ call->addArg(0, callInfo_->thisArg());
+
+ for (uint32_t i = 0; i < callInfo_->argc(); i++) {
+ call->addArg(i + 1, callInfo_->getArg(i));
+ }
+
+ if (callInfo_->constructing()) {
+ call->addArg(1 + callInfo_->argc(), callInfo_->getNewTarget());
+ }
+
+ addEffectful(call);
+ pushResult(call);
+
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitCallBoundScriptedFunction(
+ ObjOperandId calleeId, ObjOperandId targetId, Int32OperandId argcId,
+ CallFlags flags, uint32_t numBoundArgs) {
+ MDefinition* callee = getOperand(calleeId);
+ MDefinition* target = getOperand(targetId);
+
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+ MOZ_ASSERT(callInfo_->constructing() == flags.isConstructing());
+
+ callInfo_->setCallee(target);
+ updateArgumentsFromOperands();
+
+ WrappedFunction* wrappedTarget = maybeCallTarget(target, CallKind::Scripted);
+
+ bool needsThisCheck = false;
+ if (callInfo_->constructing()) {
+ callInfo_->setNewTarget(target);
+ needsThisCheck = maybeCreateThis(target, flags, CallKind::Scripted);
+ if (needsThisCheck) {
+ wrappedTarget = nullptr;
+ }
+ } else {
+ auto* thisv = MLoadFixedSlot::New(alloc(), callee,
+ BoundFunctionObject::boundThisSlot());
+ add(thisv);
+ callInfo_->thisArg()->setImplicitlyUsedUnchecked();
+ callInfo_->setThis(thisv);
+ }
+
+ bool usingInlineBoundArgs =
+ numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs;
+
+ MElements* elements = nullptr;
+ if (!usingInlineBoundArgs) {
+ auto* boundArgs = MLoadFixedSlot::New(
+ alloc(), callee, BoundFunctionObject::firstInlineBoundArgSlot());
+ add(boundArgs);
+ elements = MElements::New(alloc(), boundArgs);
+ add(elements);
+ }
+
+ auto loadBoundArg = [&](size_t index) {
+ MInstruction* arg;
+ if (usingInlineBoundArgs) {
+ size_t slot = BoundFunctionObject::firstInlineBoundArgSlot() + index;
+ arg = MLoadFixedSlot::New(alloc(), callee, slot);
+ } else {
+ arg = MLoadElement::New(alloc(), elements, constant(Int32Value(index)));
+ }
+ add(arg);
+ return arg;
+ };
+ if (!callInfo_->prependArgs(numBoundArgs, loadBoundArg)) {
+ return false;
+ }
+
+ MCall* call = makeCall(*callInfo_, needsThisCheck, wrappedTarget);
+ if (!call) {
+ return false;
+ }
+
+ if (flags.isSameRealm()) {
+ call->setNotCrossRealm();
+ }
+
+ addEffectful(call);
+ pushResult(call);
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitBindFunctionResult(
+ ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
+ MDefinition* target = getOperand(targetId);
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ MOZ_ASSERT(callInfo_->argc() == argc);
+
+ auto* bound = MBindFunction::New(alloc(), target, argc, templateObj);
+ if (!bound) {
+ return false;
+ }
+ addEffectful(bound);
+
+ for (uint32_t i = 0; i < argc; i++) {
+ bound->initArg(i, callInfo_->getArg(i));
+ }
+
+ pushResult(bound);
+ return resumeAfter(bound);
+}
+
+bool WarpCacheIRTranspiler::emitSpecializedBindFunctionResult(
+ ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
+ MDefinition* target = getOperand(targetId);
+ JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+ MOZ_ASSERT(callInfo_->argc() == argc);
+
+ auto* bound = MNewBoundFunction::New(alloc(), templateObj);
+ add(bound);
+
+ size_t numBoundArgs = argc > 0 ? argc - 1 : 0;
+ MOZ_ASSERT(numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs);
+
+ auto initSlot = [&](size_t slot, MDefinition* value) {
+#ifdef DEBUG
+ // Assert we can elide the post write barrier. See also the comment in
+ // WarpBuilder::buildNamedLambdaEnv.
+ add(MAssertCanElidePostWriteBarrier::New(alloc(), bound, value));
+#endif
+ addUnchecked(MStoreFixedSlot::NewUnbarriered(alloc(), bound, slot, value));
+ };
+
+ initSlot(BoundFunctionObject::targetSlot(), target);
+ if (argc > 0) {
+ initSlot(BoundFunctionObject::boundThisSlot(), callInfo_->getArg(0));
+ }
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ size_t slot = BoundFunctionObject::firstInlineBoundArgSlot() + i;
+ initSlot(slot, callInfo_->getArg(1 + i));
+ }
+
+ pushResult(bound);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallWasmFunction(
+ ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
+ uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
+ MDefinition* callee = getOperand(calleeId);
+#ifdef DEBUG
+ MDefinition* argc = getOperand(argcId);
+ MOZ_ASSERT(argc->toConstant()->toInt32() ==
+ static_cast<int32_t>(callInfo_->argc()));
+#endif
+ JSObject* instanceObject = tenuredObjectStubField(instanceOffset);
+ auto* wasmInstanceObj = &instanceObject->as<WasmInstanceObject>();
+ const wasm::FuncExport* funcExport = wasmFuncExportField(funcExportOffset);
+ const wasm::FuncType& sig =
+ wasmInstanceObj->instance().metadata().getFuncExportType(*funcExport);
+
+ if (!updateCallInfo(callee, flags)) {
+ return false;
+ }
+
+ static_assert(wasm::MaxArgsForJitInlineCall <= MaxNumLInstructionOperands,
+ "arguments must fit in LIR operands");
+ MOZ_ASSERT(sig.args().length() <= wasm::MaxArgsForJitInlineCall);
+
+ MOZ_ASSERT(callInfo_->argFormat() == CallInfo::ArgFormat::Standard);
+
+ auto* call = MIonToWasmCall::New(alloc(), wasmInstanceObj, *funcExport);
+ if (!call) {
+ return false;
+ }
+
+ mozilla::Maybe<MDefinition*> undefined;
+ for (size_t i = 0; i < sig.args().length(); i++) {
+ if (!alloc().ensureBallast()) {
+ return false;
+ }
+
+ MDefinition* arg;
+ if (i < callInfo_->argc()) {
+ arg = callInfo_->getArg(i);
+ } else {
+ if (!undefined) {
+ undefined.emplace(constant(UndefinedValue()));
+ }
+ arg = convertWasmArg(*undefined, sig.args()[i].kind());
+ }
+ call->initArg(i, arg);
+ }
+
+ addEffectful(call);
+
+ // Add any post-function call conversions that are necessary.
+ MInstruction* postConversion = call;
+ const wasm::ValTypeVector& results = sig.results();
+ MOZ_ASSERT(results.length() <= 1, "Multi-value returns not supported.");
+ if (results.length() == 0) {
+ // No results to convert.
+ } else {
+ switch (results[0].kind()) {
+ case wasm::ValType::I64:
+ // JS expects a BigInt from I64 types.
+ postConversion = MInt64ToBigInt::New(alloc(), call);
+
+ // Make non-movable so we can attach a resume point.
+ postConversion->setNotMovable();
+
+ add(postConversion);
+ break;
+ default:
+ // No spectre.index_masking of i32 results required, as the generated
+ // stub takes care of that.
+ break;
+ }
+ }
+
+ // The resume point has to be attached to the post-conversion instruction
+ // (if present) instead of to the call. This way, if the call triggers an
+ // invalidation bailout, we will have the BigInt value on the Baseline stack.
+ // Potential alternative solution: attach the resume point to the call and
+ // have bailouts turn the Int64 value into a BigInt, maybe with a recover
+ // instruction.
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+MDefinition* WarpCacheIRTranspiler::convertWasmArg(MDefinition* arg,
+ wasm::ValType::Kind kind) {
+ // An invariant in this code is that any type conversion operation that has
+ // externally visible effects, such as invoking valueOf on an object argument,
+ // must bailout so that we don't have to worry about replaying effects during
+ // argument conversion.
+ MInstruction* conversion = nullptr;
+ switch (kind) {
+ case wasm::ValType::I32:
+ conversion = MTruncateToInt32::New(alloc(), arg);
+ break;
+ case wasm::ValType::I64:
+ conversion = MToInt64::New(alloc(), arg);
+ break;
+ case wasm::ValType::F32:
+ conversion = MToFloat32::New(alloc(), arg);
+ break;
+ case wasm::ValType::F64:
+ conversion = MToDouble::New(alloc(), arg);
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("Unexpected type for Wasm JitEntry");
+ case wasm::ValType::Ref:
+ // Transform the JS representation into an AnyRef representation.
+ // The resulting type is MIRType::RefOrNull. These cases are all
+ // effect-free.
+ switch (arg->type()) {
+ case MIRType::Object:
+ conversion = MWasmAnyRefFromJSObject::New(alloc(), arg);
+ break;
+ case MIRType::Null:
+ arg->setImplicitlyUsedUnchecked();
+ conversion = MWasmNullConstant::New(alloc());
+ break;
+ default:
+ conversion = MWasmBoxValue::New(alloc(), arg);
+ break;
+ }
+ break;
+ }
+
+ add(conversion);
+ return conversion;
+}
+
+bool WarpCacheIRTranspiler::emitGuardWasmArg(ValOperandId argId,
+ wasm::ValType::Kind kind) {
+ MDefinition* arg = getOperand(argId);
+ MDefinition* conversion = convertWasmArg(arg, kind);
+
+ setOperand(argId, conversion);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCallGetterResult(CallKind kind,
+ ValOperandId receiverId,
+ uint32_t getterOffset,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ MDefinition* receiver = getOperand(receiverId);
+ MDefinition* getter = objectStubField(getterOffset);
+ if (kind == CallKind::Scripted && callInfo_ && callInfo_->isInlined()) {
+ // We are transpiling to generate the correct guards. We also update the
+ // CallInfo to use the correct arguments. Code for the inlined getter
+ // itself will be generated in WarpBuilder::buildInlinedCall.
+ callInfo_->initForGetterCall(getter, receiver);
+ callInfo_->setInliningResumeMode(ResumeMode::InlinedAccessor);
+
+ // Make sure there's enough room to push the arguments on the stack.
+ if (!current->ensureHasSlots(2)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ uint32_t nargsAndFlags = uint32StubField(nargsAndFlagsOffset);
+ uint16_t nargs = nargsAndFlags >> 16;
+ FunctionFlags flags = FunctionFlags(uint16_t(nargsAndFlags));
+ WrappedFunction* wrappedTarget =
+ maybeWrappedFunction(getter, kind, nargs, flags);
+
+ bool ignoresRval = loc_.resultIsPopped();
+ CallInfo callInfo(alloc(), /* constructing = */ false, ignoresRval);
+ callInfo.initForGetterCall(getter, receiver);
+
+ MCall* call = makeCall(callInfo, /* needsThisCheck = */ false, wrappedTarget);
+ if (!call) {
+ return false;
+ }
+
+ if (sameRealm) {
+ call->setNotCrossRealm();
+ }
+
+ addEffectful(call);
+ pushResult(call);
+
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitCallScriptedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ return emitCallGetterResult(CallKind::Scripted, receiverId, getterOffset,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitCallInlinedGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ return emitCallGetterResult(CallKind::Scripted, receiverId, getterOffset,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitCallNativeGetterResult(
+ ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ return emitCallGetterResult(CallKind::Native, receiverId, getterOffset,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitCallSetter(CallKind kind,
+ ObjOperandId receiverId,
+ uint32_t setterOffset,
+ ValOperandId rhsId, bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ MDefinition* receiver = getOperand(receiverId);
+ MDefinition* setter = objectStubField(setterOffset);
+ MDefinition* rhs = getOperand(rhsId);
+ if (kind == CallKind::Scripted && callInfo_ && callInfo_->isInlined()) {
+ // We are transpiling to generate the correct guards. We also update the
+ // CallInfo to use the correct arguments. Code for the inlined setter
+ // itself will be generated in WarpBuilder::buildInlinedCall.
+ callInfo_->initForSetterCall(setter, receiver, rhs);
+ callInfo_->setInliningResumeMode(ResumeMode::InlinedAccessor);
+
+ // Make sure there's enough room to push the arguments on the stack.
+ if (!current->ensureHasSlots(3)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ uint32_t nargsAndFlags = uint32StubField(nargsAndFlagsOffset);
+ uint16_t nargs = nargsAndFlags >> 16;
+ FunctionFlags flags = FunctionFlags(uint16_t(nargsAndFlags));
+ WrappedFunction* wrappedTarget =
+ maybeWrappedFunction(setter, kind, nargs, flags);
+
+ CallInfo callInfo(alloc(), /* constructing = */ false,
+ /* ignoresReturnValue = */ true);
+ callInfo.initForSetterCall(setter, receiver, rhs);
+
+ MCall* call = makeCall(callInfo, /* needsThisCheck = */ false, wrappedTarget);
+ if (!call) {
+ return false;
+ }
+
+ if (sameRealm) {
+ call->setNotCrossRealm();
+ }
+
+ addEffectful(call);
+ return resumeAfter(call);
+}
+
+bool WarpCacheIRTranspiler::emitCallScriptedSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ return emitCallSetter(CallKind::Scripted, receiverId, setterOffset, rhsId,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitCallInlinedSetter(
+ ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
+ uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
+ return emitCallSetter(CallKind::Scripted, receiverId, setterOffset, rhsId,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitCallNativeSetter(ObjOperandId receiverId,
+ uint32_t setterOffset,
+ ValOperandId rhsId,
+ bool sameRealm,
+ uint32_t nargsAndFlagsOffset) {
+ return emitCallSetter(CallKind::Native, receiverId, setterOffset, rhsId,
+ sameRealm, nargsAndFlagsOffset);
+}
+
+bool WarpCacheIRTranspiler::emitMetaScriptedThisShape(
+ uint32_t thisShapeOffset) {
+ SharedShape* shape = &shapeStubField(thisShapeOffset)->asShared();
+ MOZ_ASSERT(shape->getObjectClass() == &PlainObject::class_);
+
+ MConstant* shapeConst = MConstant::NewShape(alloc(), shape);
+ add(shapeConst);
+
+ // TODO: support pre-tenuring.
+ gc::Heap heap = gc::Heap::Default;
+
+ uint32_t numFixedSlots = shape->numFixedSlots();
+ uint32_t numDynamicSlots = NativeObject::calculateDynamicSlots(shape);
+ gc::AllocKind kind = gc::GetGCObjectKind(numFixedSlots);
+ MOZ_ASSERT(gc::CanChangeToBackgroundAllocKind(kind, &PlainObject::class_));
+ kind = gc::ForegroundToBackgroundAllocKind(kind);
+
+ auto* createThis = MNewPlainObject::New(alloc(), shapeConst, numFixedSlots,
+ numDynamicSlots, kind, heap);
+ add(createThis);
+
+ callInfo_->thisArg()->setImplicitlyUsedUnchecked();
+ callInfo_->setThis(createThis);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitReturnFromIC() { return true; }
+
+bool WarpCacheIRTranspiler::emitBailout() {
+ auto* bail = MBail::New(alloc());
+ add(bail);
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitAssertPropertyLookup(ObjOperandId objId,
+ uint32_t idOffset,
+ uint32_t slotOffset) {
+ // We currently only emit checks in baseline.
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitAssertRecoveredOnBailoutResult(
+ ValOperandId valId, bool mustBeRecovered) {
+ MDefinition* val = getOperand(valId);
+
+ // Don't assert for recovered instructions when recovering is disabled.
+ if (JitOptions.disableRecoverIns) {
+ pushResult(constant(UndefinedValue()));
+ return true;
+ }
+
+ if (JitOptions.checkRangeAnalysis) {
+ // If we are checking the range of all instructions, then the guards
+ // inserted by Range Analysis prevent the use of recover instruction. Thus,
+ // we just disable these checks.
+ pushResult(constant(UndefinedValue()));
+ return true;
+ }
+
+ auto* assert = MAssertRecoveredOnBailout::New(alloc(), val, mustBeRecovered);
+ addEffectfulUnsafe(assert);
+ current->push(assert);
+
+ // Create an instruction sequence which implies that the argument of the
+ // assertRecoveredOnBailout function would be encoded at least in one
+ // Snapshot.
+ auto* nop = MNop::New(alloc());
+ add(nop);
+
+ auto* resumePoint = MResumePoint::New(
+ alloc(), nop->block(), loc_.toRawBytecode(), ResumeMode::ResumeAfter);
+ if (!resumePoint) {
+ return false;
+ }
+ nop->setResumePoint(resumePoint);
+
+ auto* encode = MEncodeSnapshot::New(alloc());
+ addEffectfulUnsafe(encode);
+
+ current->pop();
+
+ pushResult(constant(UndefinedValue()));
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardNoAllocationMetadataBuilder(
+ uint32_t builderAddrOffset) {
+ // This is a no-op because we discard all JIT code when set an allocation
+ // metadata callback.
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
+ uint32_t numDynamicSlots,
+ gc::AllocKind allocKind,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ Shape* shape = shapeStubField(shapeOffset);
+ gc::Heap heap = allocSiteInitialHeapField(siteOffset);
+
+ auto* shapeConstant = MConstant::NewShape(alloc(), shape);
+ add(shapeConstant);
+
+ auto* obj = MNewPlainObject::New(alloc(), shapeConstant, numFixedSlots,
+ numDynamicSlots, allocKind, heap);
+ add(obj);
+
+ pushResult(obj);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitNewArrayObjectResult(uint32_t length,
+ uint32_t shapeOffset,
+ uint32_t siteOffset) {
+ Shape* shape = shapeStubField(shapeOffset);
+ gc::Heap heap = allocSiteInitialHeapField(siteOffset);
+
+ auto* shapeConstant = MConstant::NewShape(alloc(), shape);
+ add(shapeConstant);
+
+ auto* obj = MNewArrayObject::New(alloc(), shapeConstant, length, heap);
+ add(obj);
+
+ pushResult(obj);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitCloseIterScriptedResult(ObjOperandId iterId,
+ ObjOperandId calleeId,
+ CompletionKind kind,
+ uint32_t calleeNargs) {
+ MDefinition* iter = getOperand(iterId);
+ MDefinition* callee = getOperand(calleeId);
+
+ WrappedFunction* wrappedTarget = maybeCallTarget(callee, CallKind::Scripted);
+ MOZ_ASSERT(wrappedTarget);
+ MOZ_ASSERT(wrappedTarget->nargs() == calleeNargs);
+ MOZ_ASSERT(wrappedTarget->hasJitEntry());
+
+ bool constructing = false;
+ bool ignoresRval = false;
+ bool needsThisCheck = false;
+ bool isDOMCall = false;
+ CallInfo callInfo(alloc(), constructing, ignoresRval);
+ callInfo.initForCloseIter(iter, callee);
+ MCall* call = makeCall(callInfo, needsThisCheck, wrappedTarget, isDOMCall);
+ if (!call) {
+ return false;
+ }
+ addEffectful(call);
+ if (kind == CompletionKind::Throw) {
+ return resumeAfter(call);
+ }
+
+ // If we bail out here, after the call but before the CheckIsObj, we
+ // can't simply resume in the baseline interpreter. If we resume
+ // after the CloseIter, we won't check the return value. If we
+ // resume at the CloseIter, we will call the |return| method twice.
+ // Instead, we use a special resume mode that captures the
+ // intermediate value, and then checks that it's an object while
+ // bailing out.
+ current->push(call);
+ MResumePoint* resumePoint =
+ MResumePoint::New(alloc(), current, loc_.toRawBytecode(),
+ ResumeMode::ResumeAfterCheckIsObject);
+ if (!resumePoint) {
+ return false;
+ }
+ call->setResumePoint(resumePoint);
+ current->pop();
+
+ MCheckIsObj* check = MCheckIsObj::New(
+ alloc(), call, uint8_t(CheckIsObjectKind::IteratorReturn));
+ add(check);
+
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardGlobalGeneration(
+ uint32_t expectedOffset, uint32_t generationAddrOffset) {
+ uint32_t expected = uint32StubField(expectedOffset);
+ const void* generationAddr = rawPointerField(generationAddrOffset);
+
+ auto guard = MGuardGlobalGeneration::New(alloc(), expected, generationAddr);
+ add(guard);
+
+ return true;
+}
+
+#ifdef FUZZING_JS_FUZZILLI
+bool WarpCacheIRTranspiler::emitFuzzilliHashResult(ValOperandId valId) {
+ MDefinition* input = getOperand(valId);
+
+ auto* hash = MFuzzilliHash::New(alloc(), input);
+ add(hash);
+
+ auto* store = MFuzzilliHashStore::New(alloc(), hash);
+ addEffectful(store);
+ pushResult(constant(UndefinedValue()));
+
+ return resumeAfter(store);
+}
+#endif
+
+static void MaybeSetImplicitlyUsed(uint32_t numInstructionIdsBefore,
+ MDefinition* input) {
+ // When building MIR from bytecode, for each MDefinition that's an operand to
+ // a bytecode instruction, we must either add an SSA use or set the
+ // ImplicitlyUsed flag on that definition. The ImplicitlyUsed flag prevents
+ // the backend from optimizing-out values that will be used by Baseline after
+ // a bailout.
+ //
+ // WarpBuilder uses WarpPoppedValueUseChecker to assert this invariant in
+ // debug builds.
+ //
+ // This function is responsible for setting the ImplicitlyUsed flag for an
+ // input when using the transpiler. It looks at the input's most recent use
+ // and if that's an instruction that was added while transpiling this JSOp
+ // (based on the MIR instruction id) we don't set the ImplicitlyUsed flag.
+
+ if (input->isImplicitlyUsed()) {
+ // Nothing to do.
+ return;
+ }
+
+ // If the most recent use of 'input' is an instruction we just added, there is
+ // nothing to do.
+ MDefinition* inputUse = input->maybeMostRecentlyAddedDefUse();
+ if (inputUse && inputUse->id() >= numInstructionIdsBefore) {
+ return;
+ }
+
+ // The transpiler didn't add a use for 'input'.
+ input->setImplicitlyUsed();
+}
+
+bool jit::TranspileCacheIRToMIR(WarpBuilder* builder, BytecodeLocation loc,
+ const WarpCacheIR* cacheIRSnapshot,
+ std::initializer_list<MDefinition*> inputs,
+ CallInfo* maybeCallInfo) {
+ uint32_t numInstructionIdsBefore =
+ builder->mirGen().graph().getNumInstructionIds();
+
+ WarpCacheIRTranspiler transpiler(builder, loc, maybeCallInfo,
+ cacheIRSnapshot);
+ if (!transpiler.transpile(inputs)) {
+ return false;
+ }
+
+ for (MDefinition* input : inputs) {
+ MaybeSetImplicitlyUsed(numInstructionIdsBefore, input);
+ }
+
+ if (maybeCallInfo) {
+ auto maybeSetFlag = [numInstructionIdsBefore](MDefinition* def) {
+ MaybeSetImplicitlyUsed(numInstructionIdsBefore, def);
+ };
+ maybeCallInfo->forEachCallOperand(maybeSetFlag);
+ }
+
+ return true;
+}
diff --git a/js/src/jit/WarpCacheIRTranspiler.h b/js/src/jit/WarpCacheIRTranspiler.h
new file mode 100644
index 0000000000..19022b9e1a
--- /dev/null
+++ b/js/src/jit/WarpCacheIRTranspiler.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_WarpCacheIRTranspiler_h
+#define jit_WarpCacheIRTranspiler_h
+
+#include <initializer_list>
+
+namespace js {
+
+class BytecodeLocation;
+
+namespace jit {
+
+class CallInfo;
+class MDefinition;
+class WarpBuilder;
+class WarpCacheIR;
+
+// Generate MIR from a Baseline ICStub's CacheIR.
+[[nodiscard]] bool TranspileCacheIRToMIR(
+ WarpBuilder* builder, BytecodeLocation loc,
+ const WarpCacheIR* cacheIRSnapshot,
+ std::initializer_list<MDefinition*> inputs,
+ CallInfo* maybeCallInfo = nullptr);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_WarpCacheIRTranspiler_h */
diff --git a/js/src/jit/WarpOracle.cpp b/js/src/jit/WarpOracle.cpp
new file mode 100644
index 0000000000..f4b4228c8f
--- /dev/null
+++ b/js/src/jit/WarpOracle.cpp
@@ -0,0 +1,1226 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WarpOracle.h"
+
+#include "mozilla/ScopeExit.h"
+
+#include <algorithm>
+
+#include "jit/CacheIR.h"
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRReader.h"
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRealm.h"
+#include "jit/JitScript.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/TrialInlining.h"
+#include "jit/TypeData.h"
+#include "jit/WarpBuilder.h"
+#include "util/DifferentialTesting.h"
+#include "vm/BuiltinObjectKind.h"
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+
+#include "jit/InlineScriptTree-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/BytecodeLocation-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/Interpreter-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+// WarpScriptOracle creates a WarpScriptSnapshot for a single JSScript. Note
+// that a single WarpOracle can use multiple WarpScriptOracles when scripts are
+// inlined.
+class MOZ_STACK_CLASS WarpScriptOracle {
+ JSContext* cx_;
+ WarpOracle* oracle_;
+ MIRGenerator& mirGen_;
+ TempAllocator& alloc_;
+ HandleScript script_;
+ const CompileInfo* info_;
+ ICScript* icScript_;
+
+ // Index of the next ICEntry for getICEntry. This assumes the script's
+ // bytecode is processed from first to last instruction.
+ uint32_t icEntryIndex_ = 0;
+
+ template <typename... Args>
+ mozilla::GenericErrorResult<AbortReason> abort(Args&&... args) {
+ return oracle_->abort(script_, args...);
+ }
+
+ WarpEnvironment createEnvironment();
+ AbortReasonOr<Ok> maybeInlineIC(WarpOpSnapshotList& snapshots,
+ BytecodeLocation loc);
+ AbortReasonOr<bool> maybeInlineCall(WarpOpSnapshotList& snapshots,
+ BytecodeLocation loc, ICCacheIRStub* stub,
+ ICFallbackStub* fallbackStub,
+ uint8_t* stubDataCopy);
+ AbortReasonOr<bool> maybeInlinePolymorphicTypes(WarpOpSnapshotList& snapshots,
+ BytecodeLocation loc,
+ ICCacheIRStub* firstStub,
+ ICFallbackStub* fallbackStub);
+ [[nodiscard]] bool replaceNurseryAndAllocSitePointers(
+ ICCacheIRStub* stub, const CacheIRStubInfo* stubInfo,
+ uint8_t* stubDataCopy);
+
+ public:
+ WarpScriptOracle(JSContext* cx, WarpOracle* oracle, HandleScript script,
+ const CompileInfo* info, ICScript* icScript)
+ : cx_(cx),
+ oracle_(oracle),
+ mirGen_(oracle->mirGen()),
+ alloc_(mirGen_.alloc()),
+ script_(script),
+ info_(info),
+ icScript_(icScript) {}
+
+ AbortReasonOr<WarpScriptSnapshot*> createScriptSnapshot();
+
+ ICEntry& getICEntryAndFallback(BytecodeLocation loc,
+ ICFallbackStub** fallback);
+};
+
+WarpOracle::WarpOracle(JSContext* cx, MIRGenerator& mirGen,
+ HandleScript outerScript)
+ : cx_(cx),
+ mirGen_(mirGen),
+ alloc_(mirGen.alloc()),
+ outerScript_(outerScript) {}
+
+mozilla::GenericErrorResult<AbortReason> WarpOracle::abort(HandleScript script,
+ AbortReason r) {
+ auto res = mirGen_.abort(r);
+ JitSpew(JitSpew_IonAbort, "aborted @ %s", script->filename());
+ return res;
+}
+
+mozilla::GenericErrorResult<AbortReason> WarpOracle::abort(HandleScript script,
+ AbortReason r,
+ const char* message,
+ ...) {
+ va_list ap;
+ va_start(ap, message);
+ auto res = mirGen_.abortFmt(r, message, ap);
+ va_end(ap);
+ JitSpew(JitSpew_IonAbort, "aborted @ %s", script->filename());
+ return res;
+}
+
+void WarpOracle::addScriptSnapshot(WarpScriptSnapshot* scriptSnapshot,
+ ICScript* icScript, size_t bytecodeLength) {
+ scriptSnapshots_.insertBack(scriptSnapshot);
+ accumulatedBytecodeSize_ += bytecodeLength;
+#ifdef DEBUG
+ runningScriptHash_ = mozilla::AddToHash(runningScriptHash_, icScript->hash());
+#endif
+}
+
+AbortReasonOr<WarpSnapshot*> WarpOracle::createSnapshot() {
+#ifdef JS_JITSPEW
+ const char* mode;
+ if (outerScript_->hasIonScript()) {
+ mode = "Recompiling";
+ } else {
+ mode = "Compiling";
+ }
+ JitSpew(JitSpew_IonScripts,
+ "Warp %s script %s:%u:%u (%p) (warmup-counter=%" PRIu32 ",%s%s)",
+ mode, outerScript_->filename(), outerScript_->lineno(),
+ outerScript_->column(), static_cast<JSScript*>(outerScript_),
+ outerScript_->getWarmUpCount(),
+ outerScript_->isGenerator() ? " isGenerator" : "",
+ outerScript_->isAsync() ? " isAsync" : "");
+#endif
+
+ accumulatedBytecodeSize_ = outerScript_->length();
+
+ MOZ_ASSERT(outerScript_->hasJitScript());
+ ICScript* icScript = outerScript_->jitScript()->icScript();
+ WarpScriptOracle scriptOracle(cx_, this, outerScript_, &mirGen_.outerInfo(),
+ icScript);
+
+ WarpScriptSnapshot* scriptSnapshot;
+ MOZ_TRY_VAR(scriptSnapshot, scriptOracle.createScriptSnapshot());
+
+ // Insert the outermost scriptSnapshot at the front of the list.
+ scriptSnapshots_.insertFront(scriptSnapshot);
+
+ bool recordFinalWarmUpCount = false;
+#ifdef JS_CACHEIR_SPEW
+ recordFinalWarmUpCount = outerScript_->needsFinalWarmUpCount();
+#endif
+
+ auto* snapshot = new (alloc_.fallible())
+ WarpSnapshot(cx_, alloc_, std::move(scriptSnapshots_), bailoutInfo_,
+ recordFinalWarmUpCount);
+ if (!snapshot) {
+ return abort(outerScript_, AbortReason::Alloc);
+ }
+
+ if (!snapshot->nurseryObjects().appendAll(nurseryObjects_)) {
+ return abort(outerScript_, AbortReason::Alloc);
+ }
+
+#ifdef JS_JITSPEW
+ if (JitSpewEnabled(JitSpew_WarpSnapshots)) {
+ Fprinter& out = JitSpewPrinter();
+ snapshot->dump(out);
+ }
+#endif
+
+#ifdef DEBUG
+ // When transpiled CacheIR bails out, we do not want to recompile
+ // with the exact same data and get caught in an invalidation loop.
+ //
+ // To avoid this, we store a hash of the stub pointers and entry
+ // counts in this snapshot, save that hash in the JitScript if we
+ // have a TranspiledCacheIR or MonomorphicInlinedStubFolding bailout,
+ // and assert that the hash has changed when we recompile.
+ //
+ // Note: this assertion catches potential performance issues.
+ // Failing this assertion is not a correctness/security problem.
+ // We therefore ignore cases involving resource exhaustion (OOM,
+ // stack overflow, etc), or stubs purged by GC.
+ HashNumber hash = mozilla::AddToHash(icScript->hash(), runningScriptHash_);
+ if (outerScript_->jitScript()->hasFailedICHash()) {
+ HashNumber oldHash = outerScript_->jitScript()->getFailedICHash();
+ MOZ_ASSERT_IF(hash == oldHash && !js::SupportDifferentialTesting(),
+ cx_->hadResourceExhaustion());
+ }
+ snapshot->setICHash(hash);
+#endif
+
+ return snapshot;
+}
+
+template <typename T, typename... Args>
+[[nodiscard]] static bool AddOpSnapshot(TempAllocator& alloc,
+ WarpOpSnapshotList& snapshots,
+ uint32_t offset, Args&&... args) {
+ T* snapshot = new (alloc.fallible()) T(offset, std::forward<Args>(args)...);
+ if (!snapshot) {
+ return false;
+ }
+
+ snapshots.insertBack(snapshot);
+ return true;
+}
+
+[[nodiscard]] static bool AddWarpGetImport(TempAllocator& alloc,
+ WarpOpSnapshotList& snapshots,
+ uint32_t offset, JSScript* script,
+ PropertyName* name) {
+ ModuleEnvironmentObject* env = GetModuleEnvironmentForScript(script);
+ MOZ_ASSERT(env);
+
+ mozilla::Maybe<PropertyInfo> prop;
+ ModuleEnvironmentObject* targetEnv;
+ MOZ_ALWAYS_TRUE(env->lookupImport(NameToId(name), &targetEnv, &prop));
+
+ uint32_t numFixedSlots = targetEnv->numFixedSlots();
+ uint32_t slot = prop->slot();
+
+ // In the rare case where this import hasn't been initialized already (we have
+ // an import cycle where modules reference each other's imports), we need a
+ // check.
+ bool needsLexicalCheck =
+ targetEnv->getSlot(slot).isMagic(JS_UNINITIALIZED_LEXICAL);
+
+ return AddOpSnapshot<WarpGetImport>(alloc, snapshots, offset, targetEnv,
+ numFixedSlots, slot, needsLexicalCheck);
+}
+
+ICEntry& WarpScriptOracle::getICEntryAndFallback(BytecodeLocation loc,
+ ICFallbackStub** fallback) {
+ const uint32_t offset = loc.bytecodeToOffset(script_);
+
+ do {
+ *fallback = icScript_->fallbackStub(icEntryIndex_);
+ icEntryIndex_++;
+ } while ((*fallback)->pcOffset() < offset);
+
+ MOZ_ASSERT((*fallback)->pcOffset() == offset);
+ return icScript_->icEntry(icEntryIndex_ - 1);
+}
+
+WarpEnvironment WarpScriptOracle::createEnvironment() {
+ // Don't do anything if the script doesn't use the environment chain.
+ if (!script_->jitScript()->usesEnvironmentChain()) {
+ return WarpEnvironment(NoEnvironment());
+ }
+
+ if (script_->isModule()) {
+ ModuleObject* module = script_->module();
+ JSObject* obj = &module->initialEnvironment();
+ return WarpEnvironment(ConstantObjectEnvironment(obj));
+ }
+
+ JSFunction* fun = script_->function();
+ if (!fun) {
+ // For global scripts without a non-syntactic global scope, the environment
+ // chain is the global lexical environment.
+ MOZ_ASSERT(!script_->isForEval());
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+ JSObject* obj = &script_->global().lexicalEnvironment();
+ return WarpEnvironment(ConstantObjectEnvironment(obj));
+ }
+
+ JSObject* templateEnv = script_->jitScript()->templateEnvironment();
+
+ CallObject* callObjectTemplate = nullptr;
+ if (fun->needsCallObject()) {
+ callObjectTemplate = &templateEnv->as<CallObject>();
+ }
+
+ NamedLambdaObject* namedLambdaTemplate = nullptr;
+ if (fun->needsNamedLambdaEnvironment()) {
+ if (callObjectTemplate) {
+ templateEnv = templateEnv->enclosingEnvironment();
+ }
+ namedLambdaTemplate = &templateEnv->as<NamedLambdaObject>();
+ }
+
+ return WarpEnvironment(
+ FunctionEnvironment(callObjectTemplate, namedLambdaTemplate));
+}
+
+AbortReasonOr<WarpScriptSnapshot*> WarpScriptOracle::createScriptSnapshot() {
+ MOZ_ASSERT(script_->hasJitScript());
+
+ if (!script_->jitScript()->ensureHasCachedIonData(cx_, script_)) {
+ return abort(AbortReason::Error);
+ }
+
+ if (script_->failedBoundsCheck()) {
+ oracle_->bailoutInfo().setFailedBoundsCheck();
+ }
+ if (script_->failedLexicalCheck()) {
+ oracle_->bailoutInfo().setFailedLexicalCheck();
+ }
+
+ WarpEnvironment environment = createEnvironment();
+
+ // Unfortunately LinkedList<> asserts the list is empty in its destructor.
+ // Clear the list if we abort compilation.
+ WarpOpSnapshotList opSnapshots;
+ auto autoClearOpSnapshots =
+ mozilla::MakeScopeExit([&] { opSnapshots.clear(); });
+
+ ModuleObject* moduleObject = nullptr;
+
+ // Analyze the bytecode. Abort compilation for unsupported ops and create
+ // WarpOpSnapshots.
+ for (BytecodeLocation loc : AllBytecodesIterable(script_)) {
+ JSOp op = loc.getOp();
+ uint32_t offset = loc.bytecodeToOffset(script_);
+ switch (op) {
+ case JSOp::Arguments: {
+ MOZ_ASSERT(script_->needsArgsObj());
+ bool mapped = script_->hasMappedArgsObj();
+ ArgumentsObject* templateObj =
+ script_->global().maybeArgumentsTemplateObject(mapped);
+ if (!AddOpSnapshot<WarpArguments>(alloc_, opSnapshots, offset,
+ templateObj)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+ case JSOp::RegExp: {
+ bool hasShared = loc.getRegExp(script_)->hasShared();
+ if (!AddOpSnapshot<WarpRegExp>(alloc_, opSnapshots, offset,
+ hasShared)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+
+ case JSOp::FunctionThis:
+ if (!script_->strict() && script_->hasNonSyntacticScope()) {
+ // Abort because MBoxNonStrictThis doesn't support non-syntactic
+ // scopes (a deprecated SpiderMonkey mechanism). If this becomes an
+ // issue we could support it by refactoring GetFunctionThis to not
+ // take a frame pointer and then call that.
+ return abort(AbortReason::Disable,
+ "JSOp::FunctionThis with non-syntactic scope");
+ }
+ break;
+
+ case JSOp::GlobalThis:
+ MOZ_ASSERT(!script_->hasNonSyntacticScope());
+ break;
+
+ case JSOp::BuiltinObject: {
+ // If we already resolved this built-in we can bake it in.
+ auto kind = loc.getBuiltinObjectKind();
+ if (JSObject* proto = MaybeGetBuiltinObject(cx_->global(), kind)) {
+ if (!AddOpSnapshot<WarpBuiltinObject>(alloc_, opSnapshots, offset,
+ proto)) {
+ return abort(AbortReason::Alloc);
+ }
+ }
+ break;
+ }
+
+ case JSOp::GetIntrinsic: {
+ // If we already cloned this intrinsic we can bake it in.
+ // NOTE: When the initializer runs in a content global, we also have to
+ // worry about nursery objects. These quickly tenure and stay that
+ // way so this is only a temporary problem.
+ PropertyName* name = loc.getPropertyName(script_);
+ Value val;
+ if (cx_->global()->maybeGetIntrinsicValue(name, &val, cx_) &&
+ JS::GCPolicy<Value>::isTenured(val)) {
+ if (!AddOpSnapshot<WarpGetIntrinsic>(alloc_, opSnapshots, offset,
+ val)) {
+ return abort(AbortReason::Alloc);
+ }
+ }
+ break;
+ }
+
+ case JSOp::ImportMeta: {
+ if (!moduleObject) {
+ moduleObject = GetModuleObjectForScript(script_);
+ MOZ_ASSERT(moduleObject->isTenured());
+ }
+ break;
+ }
+
+ case JSOp::GetImport: {
+ PropertyName* name = loc.getPropertyName(script_);
+ if (!AddWarpGetImport(alloc_, opSnapshots, offset, script_, name)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+
+ case JSOp::Lambda: {
+ JSFunction* fun = loc.getFunction(script_);
+ if (IsAsmJSModule(fun)) {
+ return abort(AbortReason::Disable, "asm.js module function lambda");
+ }
+ break;
+ }
+
+ case JSOp::GetElemSuper: {
+#if defined(JS_CODEGEN_X86)
+ // x86 does not have enough registers.
+ return abort(AbortReason::Disable,
+ "GetElemSuper is not supported on x86");
+#else
+ MOZ_TRY(maybeInlineIC(opSnapshots, loc));
+ break;
+#endif
+ }
+
+ case JSOp::Rest: {
+ if (Shape* shape =
+ script_->global().maybeArrayShapeWithDefaultProto()) {
+ if (!AddOpSnapshot<WarpRest>(alloc_, opSnapshots, offset, shape)) {
+ return abort(AbortReason::Alloc);
+ }
+ }
+ break;
+ }
+
+ case JSOp::BindGName: {
+ Rooted<GlobalObject*> global(cx_, &script_->global());
+ Rooted<PropertyName*> name(cx_, loc.getPropertyName(script_));
+ if (JSObject* env = MaybeOptimizeBindGlobalName(cx_, global, name)) {
+ MOZ_ASSERT(env->isTenured());
+ if (!AddOpSnapshot<WarpBindGName>(alloc_, opSnapshots, offset, env)) {
+ return abort(AbortReason::Alloc);
+ }
+ } else {
+ MOZ_TRY(maybeInlineIC(opSnapshots, loc));
+ }
+ break;
+ }
+
+ case JSOp::PushVarEnv: {
+ Rooted<VarScope*> scope(cx_, &loc.getScope(script_)->as<VarScope>());
+
+ auto* templateObj =
+ VarEnvironmentObject::createTemplateObject(cx_, scope);
+ if (!templateObj) {
+ return abort(AbortReason::Alloc);
+ }
+ MOZ_ASSERT(templateObj->isTenured());
+
+ if (!AddOpSnapshot<WarpVarEnvironment>(alloc_, opSnapshots, offset,
+ templateObj)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+
+ case JSOp::PushLexicalEnv:
+ case JSOp::FreshenLexicalEnv:
+ case JSOp::RecreateLexicalEnv: {
+ Rooted<LexicalScope*> scope(cx_,
+ &loc.getScope(script_)->as<LexicalScope>());
+
+ auto* templateObj =
+ BlockLexicalEnvironmentObject::createTemplateObject(cx_, scope);
+ if (!templateObj) {
+ return abort(AbortReason::Alloc);
+ }
+ MOZ_ASSERT(templateObj->isTenured());
+
+ if (!AddOpSnapshot<WarpLexicalEnvironment>(alloc_, opSnapshots, offset,
+ templateObj)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+
+ case JSOp::PushClassBodyEnv: {
+ Rooted<ClassBodyScope*> scope(
+ cx_, &loc.getScope(script_)->as<ClassBodyScope>());
+
+ auto* templateObj =
+ ClassBodyLexicalEnvironmentObject::createTemplateObject(cx_, scope);
+ if (!templateObj) {
+ return abort(AbortReason::Alloc);
+ }
+ MOZ_ASSERT(templateObj->isTenured());
+
+ if (!AddOpSnapshot<WarpClassBodyEnvironment>(alloc_, opSnapshots,
+ offset, templateObj)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
+ }
+
+ case JSOp::GetName:
+ case JSOp::GetGName:
+ case JSOp::GetProp:
+ case JSOp::GetElem:
+ case JSOp::SetProp:
+ case JSOp::StrictSetProp:
+ case JSOp::Call:
+ case JSOp::CallContent:
+ case JSOp::CallIgnoresRv:
+ case JSOp::CallIter:
+ case JSOp::CallContentIter:
+ case JSOp::New:
+ case JSOp::NewContent:
+ case JSOp::SuperCall:
+ case JSOp::SpreadCall:
+ case JSOp::SpreadNew:
+ case JSOp::SpreadSuperCall:
+ case JSOp::ToNumeric:
+ case JSOp::Pos:
+ case JSOp::Inc:
+ case JSOp::Dec:
+ case JSOp::Neg:
+ case JSOp::BitNot:
+ case JSOp::Iter:
+ case JSOp::Eq:
+ case JSOp::Ne:
+ case JSOp::Lt:
+ case JSOp::Le:
+ case JSOp::Gt:
+ case JSOp::Ge:
+ case JSOp::StrictEq:
+ case JSOp::StrictNe:
+ case JSOp::BindName:
+ case JSOp::Add:
+ case JSOp::Sub:
+ case JSOp::Mul:
+ case JSOp::Div:
+ case JSOp::Mod:
+ case JSOp::Pow:
+ case JSOp::BitAnd:
+ case JSOp::BitOr:
+ case JSOp::BitXor:
+ case JSOp::Lsh:
+ case JSOp::Rsh:
+ case JSOp::Ursh:
+ case JSOp::In:
+ case JSOp::HasOwn:
+ case JSOp::CheckPrivateField:
+ case JSOp::Instanceof:
+ case JSOp::GetPropSuper:
+ case JSOp::InitProp:
+ case JSOp::InitLockedProp:
+ case JSOp::InitHiddenProp:
+ case JSOp::InitElem:
+ case JSOp::InitHiddenElem:
+ case JSOp::InitLockedElem:
+ case JSOp::InitElemInc:
+ case JSOp::SetName:
+ case JSOp::StrictSetName:
+ case JSOp::SetGName:
+ case JSOp::StrictSetGName:
+ case JSOp::InitGLexical:
+ case JSOp::SetElem:
+ case JSOp::StrictSetElem:
+ case JSOp::ToPropertyKey:
+ case JSOp::OptimizeSpreadCall:
+ case JSOp::Typeof:
+ case JSOp::TypeofExpr:
+ case JSOp::NewObject:
+ case JSOp::NewInit:
+ case JSOp::NewArray:
+ case JSOp::JumpIfFalse:
+ case JSOp::JumpIfTrue:
+ case JSOp::And:
+ case JSOp::Or:
+ case JSOp::Not:
+ case JSOp::CloseIter:
+ MOZ_TRY(maybeInlineIC(opSnapshots, loc));
+ break;
+
+ case JSOp::Nop:
+ case JSOp::NopDestructuring:
+ case JSOp::TryDestructuring:
+ case JSOp::Lineno:
+ case JSOp::DebugLeaveLexicalEnv:
+ case JSOp::Undefined:
+ case JSOp::Void:
+ case JSOp::Null:
+ case JSOp::Hole:
+ case JSOp::Uninitialized:
+ case JSOp::IsConstructing:
+ case JSOp::False:
+ case JSOp::True:
+ case JSOp::Zero:
+ case JSOp::One:
+ case JSOp::Int8:
+ case JSOp::Uint16:
+ case JSOp::Uint24:
+ case JSOp::Int32:
+ case JSOp::Double:
+ case JSOp::BigInt:
+ case JSOp::String:
+ case JSOp::Symbol:
+ case JSOp::Pop:
+ case JSOp::PopN:
+ case JSOp::Dup:
+ case JSOp::Dup2:
+ case JSOp::DupAt:
+ case JSOp::Swap:
+ case JSOp::Pick:
+ case JSOp::Unpick:
+ case JSOp::GetLocal:
+ case JSOp::SetLocal:
+ case JSOp::InitLexical:
+ case JSOp::GetArg:
+ case JSOp::GetFrameArg:
+ case JSOp::SetArg:
+ case JSOp::ArgumentsLength:
+ case JSOp::GetActualArg:
+ case JSOp::JumpTarget:
+ case JSOp::LoopHead:
+ case JSOp::Case:
+ case JSOp::Default:
+ case JSOp::Coalesce:
+ case JSOp::Goto:
+ case JSOp::DebugCheckSelfHosted:
+ case JSOp::DynamicImport:
+ case JSOp::ToString:
+ case JSOp::GlobalOrEvalDeclInstantiation:
+ case JSOp::BindVar:
+ case JSOp::MutateProto:
+ case JSOp::Callee:
+ case JSOp::ToAsyncIter:
+ case JSOp::ObjWithProto:
+ case JSOp::GetAliasedVar:
+ case JSOp::SetAliasedVar:
+ case JSOp::InitAliasedLexical:
+ case JSOp::EnvCallee:
+ case JSOp::MoreIter:
+ case JSOp::EndIter:
+ case JSOp::IsNoIter:
+ case JSOp::IsNullOrUndefined:
+ case JSOp::DelProp:
+ case JSOp::StrictDelProp:
+ case JSOp::DelElem:
+ case JSOp::StrictDelElem:
+ case JSOp::SetFunName:
+ case JSOp::PopLexicalEnv:
+ case JSOp::ImplicitThis:
+ case JSOp::CheckClassHeritage:
+ case JSOp::CheckThis:
+ case JSOp::CheckThisReinit:
+ case JSOp::Generator:
+ case JSOp::AfterYield:
+ case JSOp::FinalYieldRval:
+ case JSOp::AsyncResolve:
+ case JSOp::CheckResumeKind:
+ case JSOp::CanSkipAwait:
+ case JSOp::MaybeExtractAwaitValue:
+ case JSOp::AsyncAwait:
+ case JSOp::Await:
+ case JSOp::CheckReturn:
+ case JSOp::CheckLexical:
+ case JSOp::CheckAliasedLexical:
+ case JSOp::InitHomeObject:
+ case JSOp::SuperBase:
+ case JSOp::SuperFun:
+ case JSOp::InitElemArray:
+ case JSOp::InitPropGetter:
+ case JSOp::InitPropSetter:
+ case JSOp::InitHiddenPropGetter:
+ case JSOp::InitHiddenPropSetter:
+ case JSOp::InitElemGetter:
+ case JSOp::InitElemSetter:
+ case JSOp::InitHiddenElemGetter:
+ case JSOp::InitHiddenElemSetter:
+ case JSOp::NewTarget:
+ case JSOp::Object:
+ case JSOp::CallSiteObj:
+ case JSOp::CheckIsObj:
+ case JSOp::CheckObjCoercible:
+ case JSOp::FunWithProto:
+ case JSOp::Debugger:
+ case JSOp::TableSwitch:
+ case JSOp::Exception:
+ case JSOp::Throw:
+ case JSOp::ThrowSetConst:
+ case JSOp::SetRval:
+ case JSOp::GetRval:
+ case JSOp::Return:
+ case JSOp::RetRval:
+ case JSOp::InitialYield:
+ case JSOp::Yield:
+ case JSOp::ResumeKind:
+ case JSOp::ThrowMsg:
+ case JSOp::Try:
+ case JSOp::Finally:
+ case JSOp::NewPrivateName:
+ // Supported by WarpBuilder. Nothing to do.
+ break;
+
+ // Unsupported ops. Don't use a 'default' here, we want to trigger a
+ // compiler warning when adding a new JSOp.
+#define DEF_CASE(OP) case JSOp::OP:
+ WARP_UNSUPPORTED_OPCODE_LIST(DEF_CASE)
+#undef DEF_CASE
+#ifdef DEBUG
+ return abort(AbortReason::Disable, "Unsupported opcode: %s",
+ CodeName(op));
+#else
+ return abort(AbortReason::Disable, "Unsupported opcode: %u",
+ uint8_t(op));
+#endif
+ }
+ }
+
+ auto* scriptSnapshot = new (alloc_.fallible()) WarpScriptSnapshot(
+ script_, environment, std::move(opSnapshots), moduleObject);
+ if (!scriptSnapshot) {
+ return abort(AbortReason::Alloc);
+ }
+
+ autoClearOpSnapshots.release();
+ return scriptSnapshot;
+}
+
+static void LineNumberAndColumn(HandleScript script, BytecodeLocation loc,
+ unsigned* line, unsigned* column) {
+#ifdef DEBUG
+ *line = PCToLineNumber(script, loc.toRawBytecode(), column);
+#else
+ *line = script->lineno();
+ *column = script->column();
+#endif
+}
+
+AbortReasonOr<Ok> WarpScriptOracle::maybeInlineIC(WarpOpSnapshotList& snapshots,
+ BytecodeLocation loc) {
+ // Do one of the following:
+ //
+ // * If the Baseline IC has a single ICStub we can inline, add a WarpCacheIR
+ // snapshot to transpile it to MIR.
+ //
+ // * If that single ICStub is a call IC with a known target, instead add a
+ // WarpInline snapshot to transpile the guards to MIR and inline the target.
+ //
+ // * If the Baseline IC is cold (never executed), add a WarpBailout snapshot
+ // so that we can collect information in Baseline.
+ //
+ // * Else, don't add a snapshot and rely on WarpBuilder adding an Ion IC.
+
+ MOZ_ASSERT(loc.opHasIC());
+
+ // Don't create snapshots when testing ICs.
+ if (JitOptions.forceInlineCaches) {
+ return Ok();
+ }
+
+ ICFallbackStub* fallbackStub;
+ const ICEntry& entry = getICEntryAndFallback(loc, &fallbackStub);
+ ICStub* firstStub = entry.firstStub();
+
+ uint32_t offset = loc.bytecodeToOffset(script_);
+
+ // Clear the used-by-transpiler flag on the IC. It can still be set from a
+ // previous compilation because we don't clear the flag on every IC when
+ // invalidating.
+ fallbackStub->clearUsedByTranspiler();
+
+ if (firstStub == fallbackStub) {
+ [[maybe_unused]] unsigned line, column;
+ LineNumberAndColumn(script_, loc, &line, &column);
+
+ // No optimized stubs.
+ JitSpew(JitSpew_WarpTranspiler,
+ "fallback stub (entered-count: %" PRIu32
+ ") for JSOp::%s @ %s:%u:%u",
+ fallbackStub->enteredCount(), CodeName(loc.getOp()),
+ script_->filename(), line, column);
+
+ // If the fallback stub was used but there's no optimized stub, use an IC.
+ if (fallbackStub->enteredCount() != 0) {
+ return Ok();
+ }
+
+ // Cold IC. Bailout to collect information.
+ if (!AddOpSnapshot<WarpBailout>(alloc_, snapshots, offset)) {
+ return abort(AbortReason::Alloc);
+ }
+ return Ok();
+ }
+
+ ICCacheIRStub* stub = firstStub->toCacheIRStub();
+
+ // Don't transpile if there are other stubs with entered-count > 0. Counters
+ // are reset when a new stub is attached so this means the stub that was added
+ // most recently didn't handle all cases.
+ // If this code is changed, ICScript::hash may also need changing.
+ bool firstStubHandlesAllCases = true;
+ for (ICStub* next = stub->next(); next; next = next->maybeNext()) {
+ if (next->enteredCount() != 0) {
+ firstStubHandlesAllCases = false;
+ break;
+ }
+ }
+
+ if (!firstStubHandlesAllCases) {
+ // In some polymorphic cases, we can generate better code than the
+ // default fallback if we know the observed types of the operands
+ // and their relative frequency.
+ if (ICSupportsPolymorphicTypeData(loc.getOp()) &&
+ fallbackStub->enteredCount() == 0) {
+ bool inlinedPolymorphicTypes = false;
+ MOZ_TRY_VAR(
+ inlinedPolymorphicTypes,
+ maybeInlinePolymorphicTypes(snapshots, loc, stub, fallbackStub));
+ if (inlinedPolymorphicTypes) {
+ return Ok();
+ }
+ }
+
+ [[maybe_unused]] unsigned line, column;
+ LineNumberAndColumn(script_, loc, &line, &column);
+
+ JitSpew(JitSpew_WarpTranspiler,
+ "multiple active stubs for JSOp::%s @ %s:%u:%u",
+ CodeName(loc.getOp()), script_->filename(), line, column);
+ return Ok();
+ }
+
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ const uint8_t* stubData = stub->stubDataStart();
+
+ // Only create a snapshot if all opcodes are supported by the transpiler.
+ CacheIRReader reader(stubInfo);
+ while (reader.more()) {
+ CacheOp op = reader.readOp();
+ CacheIROpInfo opInfo = CacheIROpInfos[size_t(op)];
+ reader.skip(opInfo.argLength);
+
+ if (!opInfo.transpile) {
+ [[maybe_unused]] unsigned line, column;
+ LineNumberAndColumn(script_, loc, &line, &column);
+
+ MOZ_ASSERT(
+ fallbackStub->trialInliningState() != TrialInliningState::Inlined,
+ "Trial-inlined stub not supported by transpiler");
+
+ // Unsupported CacheIR opcode.
+ JitSpew(JitSpew_WarpTranspiler,
+ "unsupported CacheIR opcode %s for JSOp::%s @ %s:%u:%u",
+ CacheIROpNames[size_t(op)], CodeName(loc.getOp()),
+ script_->filename(), line, column);
+ return Ok();
+ }
+
+ // While on the main thread, ensure code stubs exist for ops that require
+ // them.
+ switch (op) {
+ case CacheOp::CallRegExpMatcherResult:
+ if (!cx_->realm()->jitRealm()->ensureRegExpMatcherStubExists(cx_)) {
+ return abort(AbortReason::Error);
+ }
+ break;
+ case CacheOp::CallRegExpSearcherResult:
+ if (!cx_->realm()->jitRealm()->ensureRegExpSearcherStubExists(cx_)) {
+ return abort(AbortReason::Error);
+ }
+ break;
+ case CacheOp::RegExpBuiltinExecMatchResult:
+ if (!cx_->realm()->jitRealm()->ensureRegExpExecMatchStubExists(cx_)) {
+ return abort(AbortReason::Error);
+ }
+ break;
+ case CacheOp::RegExpBuiltinExecTestResult:
+ if (!cx_->realm()->jitRealm()->ensureRegExpExecTestStubExists(cx_)) {
+ return abort(AbortReason::Error);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Copy the ICStub data to protect against the stub being unlinked or mutated.
+ // We don't need to copy the CacheIRStubInfo: because we store and trace the
+ // stub's JitCode*, the baselineCacheIRStubCodes_ map in JitZone will keep it
+ // alive.
+ uint8_t* stubDataCopy = nullptr;
+ size_t bytesNeeded = stubInfo->stubDataSize();
+ if (bytesNeeded > 0) {
+ stubDataCopy = alloc_.allocateArray<uint8_t>(bytesNeeded);
+ if (!stubDataCopy) {
+ return abort(AbortReason::Alloc);
+ }
+
+ // Note: nursery pointers are handled below so we don't need to trigger any
+ // GC barriers and can do a bitwise copy.
+ std::copy_n(stubData, bytesNeeded, stubDataCopy);
+
+ if (!replaceNurseryAndAllocSitePointers(stub, stubInfo, stubDataCopy)) {
+ return abort(AbortReason::Alloc);
+ }
+ }
+
+ JitCode* jitCode = stub->jitCode();
+
+ if (fallbackStub->trialInliningState() == TrialInliningState::Inlined ||
+ fallbackStub->trialInliningState() ==
+ TrialInliningState::MonomorphicInlined) {
+ bool inlinedCall;
+ MOZ_TRY_VAR(inlinedCall, maybeInlineCall(snapshots, loc, stub, fallbackStub,
+ stubDataCopy));
+ if (inlinedCall) {
+ return Ok();
+ }
+ }
+
+ if (!AddOpSnapshot<WarpCacheIR>(alloc_, snapshots, offset, jitCode, stubInfo,
+ stubDataCopy)) {
+ return abort(AbortReason::Alloc);
+ }
+
+ fallbackStub->setUsedByTranspiler();
+
+ return Ok();
+}
+
+AbortReasonOr<bool> WarpScriptOracle::maybeInlineCall(
+ WarpOpSnapshotList& snapshots, BytecodeLocation loc, ICCacheIRStub* stub,
+ ICFallbackStub* fallbackStub, uint8_t* stubDataCopy) {
+ Maybe<InlinableOpData> inlineData = FindInlinableOpData(stub, loc);
+ if (inlineData.isNothing()) {
+ return false;
+ }
+
+ RootedFunction targetFunction(cx_, inlineData->target);
+ if (!TrialInliner::canInline(targetFunction, script_, loc)) {
+ return false;
+ }
+
+ bool isTrialInlined =
+ fallbackStub->trialInliningState() == TrialInliningState::Inlined;
+ MOZ_ASSERT_IF(!isTrialInlined, fallbackStub->trialInliningState() ==
+ TrialInliningState::MonomorphicInlined);
+
+ RootedScript targetScript(cx_, targetFunction->nonLazyScript());
+ ICScript* icScript = nullptr;
+ if (isTrialInlined) {
+ icScript = inlineData->icScript;
+ } else {
+ JitScript* jitScript = targetScript->jitScript();
+ icScript = jitScript->icScript();
+ }
+
+ if (!icScript) {
+ return false;
+ }
+
+ // This is just a cheap check to limit the damage we can do to ourselves if
+ // we try to monomorphically inline an indirectly recursive call.
+ const uint32_t maxInliningDepth = 8;
+ if (!isTrialInlined &&
+ info_->inlineScriptTree()->depth() > maxInliningDepth) {
+ return false;
+ }
+
+ // And this is a second cheap check to ensure monomorphic inlining doesn't
+ // cause us to blow past our script size budget.
+ if (oracle_->accumulatedBytecodeSize() + targetScript->length() >
+ JitOptions.ionMaxScriptSize) {
+ return false;
+ }
+
+ // Add the inlined script to the inline script tree.
+ LifoAlloc* lifoAlloc = alloc_.lifoAlloc();
+ InlineScriptTree* inlineScriptTree = info_->inlineScriptTree()->addCallee(
+ &alloc_, loc.toRawBytecode(), targetScript);
+ if (!inlineScriptTree) {
+ return abort(AbortReason::Alloc);
+ }
+
+ // Create a CompileInfo for the inlined script.
+ jsbytecode* osrPc = nullptr;
+ bool needsArgsObj = targetScript->needsArgsObj();
+ CompileInfo* info = lifoAlloc->new_<CompileInfo>(
+ mirGen_.runtime, targetScript, targetFunction, osrPc, needsArgsObj,
+ inlineScriptTree);
+ if (!info) {
+ return abort(AbortReason::Alloc);
+ }
+
+ // Take a snapshot of the CacheIR.
+ uint32_t offset = loc.bytecodeToOffset(script_);
+ JitCode* jitCode = stub->jitCode();
+ const CacheIRStubInfo* stubInfo = stub->stubInfo();
+ WarpCacheIR* cacheIRSnapshot = new (alloc_.fallible())
+ WarpCacheIR(offset, jitCode, stubInfo, stubDataCopy);
+ if (!cacheIRSnapshot) {
+ return abort(AbortReason::Alloc);
+ }
+
+ // Take a snapshot of the inlined script (which may do more
+ // inlining recursively).
+ WarpScriptOracle scriptOracle(cx_, oracle_, targetScript, info, icScript);
+
+ AbortReasonOr<WarpScriptSnapshot*> maybeScriptSnapshot =
+ scriptOracle.createScriptSnapshot();
+
+ if (maybeScriptSnapshot.isErr()) {
+ JitSpew(JitSpew_WarpTranspiler, "Can't create snapshot for JSOp::%s",
+ CodeName(loc.getOp()));
+
+ switch (maybeScriptSnapshot.unwrapErr()) {
+ case AbortReason::Disable: {
+ // If the target script can't be warp-compiled, mark it as
+ // uninlineable, clean up, and fall through to the non-inlined path.
+ ICEntry* entry = icScript_->icEntryForStub(fallbackStub);
+ if (entry->firstStub() == stub) {
+ fallbackStub->unlinkStub(cx_->zone(), entry, /*prev=*/nullptr, stub);
+ }
+ targetScript->setUninlineable();
+ info_->inlineScriptTree()->removeCallee(inlineScriptTree);
+ if (isTrialInlined) {
+ icScript_->removeInlinedChild(loc.bytecodeToOffset(script_));
+ }
+ fallbackStub->setTrialInliningState(TrialInliningState::Failure);
+ return false;
+ }
+ case AbortReason::Error:
+ case AbortReason::Alloc:
+ return Err(maybeScriptSnapshot.unwrapErr());
+ default:
+ MOZ_CRASH("Unexpected abort reason");
+ }
+ }
+
+ WarpScriptSnapshot* scriptSnapshot = maybeScriptSnapshot.unwrap();
+ if (!isTrialInlined) {
+ scriptSnapshot->markIsMonomorphicInlined();
+ }
+
+ oracle_->addScriptSnapshot(scriptSnapshot, icScript, targetScript->length());
+
+ if (!AddOpSnapshot<WarpInlinedCall>(alloc_, snapshots, offset,
+ cacheIRSnapshot, scriptSnapshot, info)) {
+ return abort(AbortReason::Alloc);
+ }
+ fallbackStub->setUsedByTranspiler();
+ return true;
+}
+
+struct TypeFrequency {
+ TypeData typeData_;
+ uint32_t successCount_;
+ TypeFrequency(TypeData typeData, uint32_t successCount)
+ : typeData_(typeData), successCount_(successCount) {}
+
+ // Sort highest frequency first.
+ bool operator<(const TypeFrequency& other) const {
+ return other.successCount_ < successCount_;
+ }
+};
+
+AbortReasonOr<bool> WarpScriptOracle::maybeInlinePolymorphicTypes(
+ WarpOpSnapshotList& snapshots, BytecodeLocation loc,
+ ICCacheIRStub* firstStub, ICFallbackStub* fallbackStub) {
+ MOZ_ASSERT(ICSupportsPolymorphicTypeData(loc.getOp()));
+
+ // We use polymorphic type data if there are multiple active stubs,
+ // all of which have type data available.
+ Vector<TypeFrequency, 6, SystemAllocPolicy> candidates;
+ for (ICStub* stub = firstStub; !stub->isFallback();
+ stub = stub->maybeNext()) {
+ ICCacheIRStub* cacheIRStub = stub->toCacheIRStub();
+ uint32_t successCount =
+ cacheIRStub->enteredCount() - cacheIRStub->next()->enteredCount();
+ if (successCount == 0) {
+ continue;
+ }
+ TypeData types = cacheIRStub->typeData();
+ if (!types.hasData()) {
+ return false;
+ }
+ if (!candidates.append(TypeFrequency(types, successCount))) {
+ return abort(AbortReason::Alloc);
+ }
+ }
+ if (candidates.length() < 2) {
+ return false;
+ }
+
+ // Sort candidates by success frequency.
+ std::sort(candidates.begin(), candidates.end());
+
+ TypeDataList list;
+ for (auto& candidate : candidates) {
+ list.addTypeData(candidate.typeData_);
+ }
+
+ uint32_t offset = loc.bytecodeToOffset(script_);
+ if (!AddOpSnapshot<WarpPolymorphicTypes>(alloc_, snapshots, offset, list)) {
+ return abort(AbortReason::Alloc);
+ }
+
+ return true;
+}
+
+bool WarpScriptOracle::replaceNurseryAndAllocSitePointers(
+ ICCacheIRStub* stub, const CacheIRStubInfo* stubInfo,
+ uint8_t* stubDataCopy) {
+ // If the stub data contains nursery object pointers, replace them with the
+ // corresponding nursery index. See WarpObjectField.
+ //
+ // If the stub data contains allocation site pointers replace them with the
+ // initial heap to use, because the site's state may be mutated by the main
+ // thread while we are compiling.
+ //
+ // Also asserts non-object fields don't contain nursery pointers.
+
+ uint32_t field = 0;
+ size_t offset = 0;
+ while (true) {
+ StubField::Type fieldType = stubInfo->fieldType(field);
+ switch (fieldType) {
+ case StubField::Type::RawInt32:
+ case StubField::Type::RawPointer:
+ case StubField::Type::RawInt64:
+ case StubField::Type::Double:
+ break;
+ case StubField::Type::Shape:
+ static_assert(std::is_convertible_v<Shape*, gc::TenuredCell*>,
+ "Code assumes shapes are tenured");
+ break;
+ case StubField::Type::GetterSetter:
+ static_assert(std::is_convertible_v<GetterSetter*, gc::TenuredCell*>,
+ "Code assumes GetterSetters are tenured");
+ break;
+ case StubField::Type::Symbol:
+ static_assert(std::is_convertible_v<JS::Symbol*, gc::TenuredCell*>,
+ "Code assumes symbols are tenured");
+ break;
+ case StubField::Type::BaseScript:
+ static_assert(std::is_convertible_v<BaseScript*, gc::TenuredCell*>,
+ "Code assumes scripts are tenured");
+ break;
+ case StubField::Type::JitCode:
+ static_assert(std::is_convertible_v<JitCode*, gc::TenuredCell*>,
+ "Code assumes JitCodes are tenured");
+ break;
+ case StubField::Type::JSObject: {
+ JSObject* obj =
+ stubInfo->getStubField<ICCacheIRStub, JSObject*>(stub, offset);
+ if (IsInsideNursery(obj)) {
+ uint32_t nurseryIndex;
+ if (!oracle_->registerNurseryObject(obj, &nurseryIndex)) {
+ return false;
+ }
+ uintptr_t oldWord = WarpObjectField::fromObject(obj).rawData();
+ uintptr_t newWord =
+ WarpObjectField::fromNurseryIndex(nurseryIndex).rawData();
+ stubInfo->replaceStubRawWord(stubDataCopy, offset, oldWord, newWord);
+ }
+ break;
+ }
+ case StubField::Type::String: {
+#ifdef DEBUG
+ JSString* str =
+ stubInfo->getStubField<ICCacheIRStub, JSString*>(stub, offset);
+ MOZ_ASSERT(!IsInsideNursery(str));
+#endif
+ break;
+ }
+ case StubField::Type::Id: {
+#ifdef DEBUG
+ // jsid never contains nursery-allocated things.
+ jsid id = stubInfo->getStubField<ICCacheIRStub, jsid>(stub, offset);
+ MOZ_ASSERT_IF(id.isGCThing(),
+ !IsInsideNursery(id.toGCCellPtr().asCell()));
+#endif
+ break;
+ }
+ case StubField::Type::Value: {
+#ifdef DEBUG
+ Value v =
+ stubInfo->getStubField<ICCacheIRStub, JS::Value>(stub, offset);
+ MOZ_ASSERT_IF(v.isGCThing(), !IsInsideNursery(v.toGCThing()));
+#endif
+ break;
+ }
+ case StubField::Type::AllocSite: {
+ uintptr_t oldWord = stubInfo->getStubRawWord(stub, offset);
+ auto* site = reinterpret_cast<gc::AllocSite*>(oldWord);
+ gc::Heap initialHeap = site->initialHeap();
+ uintptr_t newWord = uintptr_t(initialHeap);
+ stubInfo->replaceStubRawWord(stubDataCopy, offset, oldWord, newWord);
+ break;
+ }
+ case StubField::Type::Limit:
+ return true; // Done.
+ }
+ field++;
+ offset += StubField::sizeInBytes(fieldType);
+ }
+}
+
+bool WarpOracle::registerNurseryObject(JSObject* obj, uint32_t* nurseryIndex) {
+ MOZ_ASSERT(IsInsideNursery(obj));
+
+ auto p = nurseryObjectsMap_.lookupForAdd(obj);
+ if (p) {
+ *nurseryIndex = p->value();
+ return true;
+ }
+
+ if (!nurseryObjects_.append(obj)) {
+ return false;
+ }
+ *nurseryIndex = nurseryObjects_.length() - 1;
+ return nurseryObjectsMap_.add(p, obj, *nurseryIndex);
+}
diff --git a/js/src/jit/WarpOracle.h b/js/src/jit/WarpOracle.h
new file mode 100644
index 0000000000..f8db42d15b
--- /dev/null
+++ b/js/src/jit/WarpOracle.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_WarpOracle_h
+#define jit_WarpOracle_h
+
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitContext.h"
+#include "jit/WarpSnapshot.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+
+// WarpOracle creates a WarpSnapshot data structure that's used by WarpBuilder
+// to generate the MIR graph off-thread.
+class MOZ_STACK_CLASS WarpOracle {
+ JSContext* cx_;
+ MIRGenerator& mirGen_;
+ TempAllocator& alloc_;
+ HandleScript outerScript_;
+ WarpBailoutInfo bailoutInfo_;
+ WarpScriptSnapshotList scriptSnapshots_;
+ size_t accumulatedBytecodeSize_ = 0;
+#ifdef DEBUG
+ mozilla::HashNumber runningScriptHash_ = 0;
+#endif
+
+ // List of nursery objects to copy to the snapshot. See WarpObjectField.
+ // The HashMap is used to de-duplicate the Vector. It maps each object to the
+ // corresponding nursery index (index into the Vector).
+ // Note: this stores raw object pointers because WarpOracle can't GC.
+ Vector<JSObject*, 8, SystemAllocPolicy> nurseryObjects_;
+ using NurseryObjectsMap =
+ HashMap<JSObject*, uint32_t, DefaultHasher<JSObject*>, SystemAllocPolicy>;
+ NurseryObjectsMap nurseryObjectsMap_;
+
+ public:
+ WarpOracle(JSContext* cx, MIRGenerator& mirGen, HandleScript outerScript);
+ ~WarpOracle() { scriptSnapshots_.clear(); }
+
+ MIRGenerator& mirGen() { return mirGen_; }
+ WarpBailoutInfo& bailoutInfo() { return bailoutInfo_; }
+
+ [[nodiscard]] bool registerNurseryObject(JSObject* obj,
+ uint32_t* nurseryIndex);
+
+ AbortReasonOr<WarpSnapshot*> createSnapshot();
+
+ mozilla::GenericErrorResult<AbortReason> abort(HandleScript script,
+ AbortReason r);
+ mozilla::GenericErrorResult<AbortReason> abort(HandleScript script,
+ AbortReason r,
+ const char* message, ...);
+ void addScriptSnapshot(WarpScriptSnapshot* scriptSnapshot, ICScript* icScript,
+ size_t bytecodeLength);
+
+ size_t accumulatedBytecodeSize() { return accumulatedBytecodeSize_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_WarpOracle_h */
diff --git a/js/src/jit/WarpSnapshot.cpp b/js/src/jit/WarpSnapshot.cpp
new file mode 100644
index 0000000000..830429a984
--- /dev/null
+++ b/js/src/jit/WarpSnapshot.cpp
@@ -0,0 +1,408 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WarpSnapshot.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include <type_traits>
+
+#include "jit/CacheIRCompiler.h"
+#include "jit/CacheIRSpewer.h"
+#include "js/Printer.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/GetterSetter.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(!std::is_polymorphic_v<WarpOpSnapshot>,
+ "WarpOpSnapshot should not have any virtual methods");
+
+WarpSnapshot::WarpSnapshot(JSContext* cx, TempAllocator& alloc,
+ WarpScriptSnapshotList&& scriptSnapshots,
+ const WarpBailoutInfo& bailoutInfo,
+ bool needsFinalWarmUpCount)
+ : scriptSnapshots_(std::move(scriptSnapshots)),
+ globalLexicalEnv_(&cx->global()->lexicalEnvironment()),
+ globalLexicalEnvThis_(globalLexicalEnv_->thisObject()),
+ bailoutInfo_(bailoutInfo),
+ nurseryObjects_(alloc) {
+#ifdef JS_CACHEIR_SPEW
+ needsFinalWarmUpCount_ = needsFinalWarmUpCount;
+#endif
+}
+
+WarpScriptSnapshot::WarpScriptSnapshot(JSScript* script,
+ const WarpEnvironment& env,
+ WarpOpSnapshotList&& opSnapshots,
+ ModuleObject* moduleObject)
+ : script_(script),
+ environment_(env),
+ opSnapshots_(std::move(opSnapshots)),
+ moduleObject_(moduleObject),
+ isArrowFunction_(script->isFunction() && script->function()->isArrow()),
+ isMonomorphicInlined_(false) {}
+
+#ifdef JS_JITSPEW
+void WarpSnapshot::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+}
+
+void WarpSnapshot::dump(GenericPrinter& out) const {
+ out.printf("WarpSnapshot (0x%p)\n", this);
+ out.printf("------------------------------\n");
+ out.printf("globalLexicalEnv: 0x%p\n", globalLexicalEnv());
+ out.printf("globalLexicalEnvThis: 0x%p\n", globalLexicalEnvThis());
+ out.printf("failedBoundsCheck: %u\n", bailoutInfo().failedBoundsCheck());
+ out.printf("failedLexicalCheck: %u\n", bailoutInfo().failedLexicalCheck());
+ out.printf("\n");
+
+ out.printf("Nursery objects (%u):\n", unsigned(nurseryObjects_.length()));
+ for (size_t i = 0; i < nurseryObjects_.length(); i++) {
+ out.printf(" %u: 0x%p\n", unsigned(i), nurseryObjects_[i]);
+ }
+ out.printf("\n");
+
+ for (auto* scriptSnapshot : scriptSnapshots_) {
+ scriptSnapshot->dump(out);
+ }
+}
+
+void WarpScriptSnapshot::dump(GenericPrinter& out) const {
+ out.printf("WarpScriptSnapshot (0x%p)\n", this);
+ out.printf("------------------------------\n");
+ out.printf("Script: %s:%u:%u (0x%p)\n", script_->filename(),
+ script_->lineno(), script_->column(),
+ static_cast<JSScript*>(script_));
+ out.printf(" moduleObject: 0x%p\n", moduleObject());
+ out.printf(" isArrowFunction: %u\n", isArrowFunction());
+
+ out.printf(" environment: ");
+ environment_.match(
+ [&](const NoEnvironment&) { out.printf("None\n"); },
+ [&](JSObject* obj) { out.printf("Object: 0x%p\n", obj); },
+ [&](const FunctionEnvironment& env) {
+ out.printf(
+ "Function: callobject template 0x%p, named lambda template: 0x%p\n",
+ static_cast<JSObject*>(env.callObjectTemplate),
+ static_cast<JSObject*>(env.namedLambdaTemplate));
+ });
+
+ out.printf("\n");
+ for (const WarpOpSnapshot* snapshot : opSnapshots()) {
+ snapshot->dump(out, script_);
+ out.printf("\n");
+ }
+}
+
+static const char* OpSnapshotKindString(WarpOpSnapshot::Kind kind) {
+ static const char* const names[] = {
+# define NAME(x) #x,
+ WARP_OP_SNAPSHOT_LIST(NAME)
+# undef NAME
+ };
+ return names[unsigned(kind)];
+}
+
+void WarpOpSnapshot::dump(GenericPrinter& out, JSScript* script) const {
+ jsbytecode* pc = script->offsetToPC(offset_);
+ out.printf(" %s (offset %u, JSOp::%s)\n", OpSnapshotKindString(kind_),
+ offset_, CodeName(JSOp(*pc)));
+
+ // Dispatch to dumpData() methods.
+ switch (kind_) {
+# define DUMP(kind) \
+ case Kind::kind: \
+ as<kind>()->dumpData(out); \
+ break;
+ WARP_OP_SNAPSHOT_LIST(DUMP)
+# undef DUMP
+ }
+}
+
+void WarpArguments::dumpData(GenericPrinter& out) const {
+ out.printf(" template: 0x%p\n", templateObj());
+}
+
+void WarpRegExp::dumpData(GenericPrinter& out) const {
+ out.printf(" hasShared: %u\n", hasShared());
+}
+
+void WarpBuiltinObject::dumpData(GenericPrinter& out) const {
+ out.printf(" builtin: 0x%p\n", builtin());
+}
+
+void WarpGetIntrinsic::dumpData(GenericPrinter& out) const {
+ out.printf(" intrinsic: 0x%016" PRIx64 "\n", intrinsic().asRawBits());
+}
+
+void WarpGetImport::dumpData(GenericPrinter& out) const {
+ out.printf(" targetEnv: 0x%p\n", targetEnv());
+ out.printf(" numFixedSlots: %u\n", numFixedSlots());
+ out.printf(" slot: %u\n", slot());
+ out.printf(" needsLexicalCheck: %u\n", needsLexicalCheck());
+}
+
+void WarpRest::dumpData(GenericPrinter& out) const {
+ out.printf(" shape: 0x%p\n", shape());
+}
+
+void WarpBindGName::dumpData(GenericPrinter& out) const {
+ out.printf(" globalEnv: 0x%p\n", globalEnv());
+}
+
+void WarpVarEnvironment::dumpData(GenericPrinter& out) const {
+ out.printf(" template: 0x%p\n", templateObj());
+}
+
+void WarpLexicalEnvironment::dumpData(GenericPrinter& out) const {
+ out.printf(" template: 0x%p\n", templateObj());
+}
+
+void WarpClassBodyEnvironment::dumpData(GenericPrinter& out) const {
+ out.printf(" template: 0x%p\n", templateObj());
+}
+
+void WarpBailout::dumpData(GenericPrinter& out) const {
+ // No fields.
+}
+
+void WarpCacheIR::dumpData(GenericPrinter& out) const {
+ out.printf(" stubCode: 0x%p\n", static_cast<JitCode*>(stubCode_));
+ out.printf(" stubInfo: 0x%p\n", stubInfo_);
+ out.printf(" stubData: 0x%p\n", stubData_);
+# ifdef JS_CACHEIR_SPEW
+ out.printf(" IR:\n");
+ SpewCacheIROps(out, " ", stubInfo_);
+# else
+ out.printf("(CacheIR spew unavailable)\n");
+# endif
+}
+
+void WarpInlinedCall::dumpData(GenericPrinter& out) const {
+ out.printf(" scriptSnapshot: 0x%p\n", scriptSnapshot_);
+ out.printf(" info: 0x%p\n", info_);
+ cacheIRSnapshot_->dumpData(out);
+}
+
+void WarpPolymorphicTypes::dumpData(GenericPrinter& out) const {
+ out.printf(" types:\n");
+ for (auto& typeData : list_) {
+ out.printf(" %s\n", ValTypeToString(typeData.type()));
+ }
+}
+
+#endif // JS_JITSPEW
+
+template <typename T>
+static void TraceWarpGCPtr(JSTracer* trc, const WarpGCPtr<T>& thing,
+ const char* name) {
+ T thingRaw = thing;
+ TraceManuallyBarrieredEdge(trc, &thingRaw, name);
+ MOZ_ASSERT(static_cast<T>(thing) == thingRaw, "Unexpected moving GC!");
+}
+
+void WarpSnapshot::trace(JSTracer* trc) {
+ // Nursery objects can be tenured in parallel with Warp compilation.
+ // Note: don't use TraceWarpGCPtr here as that asserts non-moving.
+ for (size_t i = 0; i < nurseryObjects_.length(); i++) {
+ TraceManuallyBarrieredEdge(trc, &nurseryObjects_[i], "warp-nursery-object");
+ }
+
+ // Other GC things are not in the nursery.
+ if (trc->runtime()->heapState() == JS::HeapState::MinorCollecting) {
+ return;
+ }
+
+ for (auto* script : scriptSnapshots_) {
+ script->trace(trc);
+ }
+ TraceWarpGCPtr(trc, globalLexicalEnv_, "warp-lexical");
+ TraceWarpGCPtr(trc, globalLexicalEnvThis_, "warp-lexicalthis");
+}
+
+void WarpScriptSnapshot::trace(JSTracer* trc) {
+ TraceWarpGCPtr(trc, script_, "warp-script");
+
+ environment_.match(
+ [](const NoEnvironment&) {},
+ [trc](WarpGCPtr<JSObject*>& obj) {
+ TraceWarpGCPtr(trc, obj, "warp-env-object");
+ },
+ [trc](FunctionEnvironment& env) {
+ if (env.callObjectTemplate) {
+ TraceWarpGCPtr(trc, env.callObjectTemplate, "warp-env-callobject");
+ }
+ if (env.namedLambdaTemplate) {
+ TraceWarpGCPtr(trc, env.namedLambdaTemplate, "warp-env-namedlambda");
+ }
+ });
+
+ for (WarpOpSnapshot* snapshot : opSnapshots_) {
+ snapshot->trace(trc);
+ }
+
+ if (moduleObject_) {
+ TraceWarpGCPtr(trc, moduleObject_, "warp-module-obj");
+ }
+}
+
+void WarpOpSnapshot::trace(JSTracer* trc) {
+ // Dispatch to traceData() methods.
+ switch (kind_) {
+#define TRACE(kind) \
+ case Kind::kind: \
+ as<kind>()->traceData(trc); \
+ break;
+ WARP_OP_SNAPSHOT_LIST(TRACE)
+#undef TRACE
+ }
+}
+
+void WarpArguments::traceData(JSTracer* trc) {
+ if (templateObj_) {
+ TraceWarpGCPtr(trc, templateObj_, "warp-args-template");
+ }
+}
+
+void WarpRegExp::traceData(JSTracer* trc) {
+ // No GC pointers.
+}
+
+void WarpBuiltinObject::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, builtin_, "warp-builtin-object");
+}
+
+void WarpGetIntrinsic::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, intrinsic_, "warp-intrinsic");
+}
+
+void WarpGetImport::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, targetEnv_, "warp-import-env");
+}
+
+void WarpRest::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, shape_, "warp-rest-shape");
+}
+
+void WarpBindGName::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, globalEnv_, "warp-bindgname-globalenv");
+}
+
+void WarpVarEnvironment::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, templateObj_, "warp-varenv-template");
+}
+
+void WarpLexicalEnvironment::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, templateObj_, "warp-lexenv-template");
+}
+
+void WarpClassBodyEnvironment::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, templateObj_, "warp-classbodyenv-template");
+}
+
+void WarpBailout::traceData(JSTracer* trc) {
+ // No GC pointers.
+}
+
+void WarpPolymorphicTypes::traceData(JSTracer* trc) {
+ // No GC pointers.
+}
+
+template <typename T>
+static void TraceWarpStubPtr(JSTracer* trc, uintptr_t word, const char* name) {
+ T* ptr = reinterpret_cast<T*>(word);
+ TraceWarpGCPtr(trc, WarpGCPtr<T*>(ptr), name);
+}
+
+void WarpCacheIR::traceData(JSTracer* trc) {
+ TraceWarpGCPtr(trc, stubCode_, "warp-stub-code");
+ if (stubData_) {
+ uint32_t field = 0;
+ size_t offset = 0;
+ while (true) {
+ StubField::Type fieldType = stubInfo_->fieldType(field);
+ switch (fieldType) {
+ case StubField::Type::RawInt32:
+ case StubField::Type::RawPointer:
+ case StubField::Type::RawInt64:
+ case StubField::Type::Double:
+ break;
+ case StubField::Type::Shape: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<Shape>(trc, word, "warp-cacheir-shape");
+ break;
+ }
+ case StubField::Type::GetterSetter: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<GetterSetter>(trc, word,
+ "warp-cacheir-getter-setter");
+ break;
+ }
+ case StubField::Type::JSObject: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ WarpObjectField field = WarpObjectField::fromData(word);
+ if (!field.isNurseryIndex()) {
+ TraceWarpStubPtr<JSObject>(trc, word, "warp-cacheir-object");
+ }
+ break;
+ }
+ case StubField::Type::Symbol: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<JS::Symbol>(trc, word, "warp-cacheir-symbol");
+ break;
+ }
+ case StubField::Type::String: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<JSString>(trc, word, "warp-cacheir-string");
+ break;
+ }
+ case StubField::Type::BaseScript: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<BaseScript>(trc, word, "warp-cacheir-script");
+ break;
+ }
+ case StubField::Type::JitCode: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ TraceWarpStubPtr<JitCode>(trc, word, "warp-cacheir-jitcode");
+ break;
+ }
+ case StubField::Type::Id: {
+ uintptr_t word = stubInfo_->getStubRawWord(stubData_, offset);
+ jsid id = jsid::fromRawBits(word);
+ TraceWarpGCPtr(trc, WarpGCPtr<jsid>(id), "warp-cacheir-jsid");
+ break;
+ }
+ case StubField::Type::Value: {
+ uint64_t data = stubInfo_->getStubRawInt64(stubData_, offset);
+ Value val = Value::fromRawBits(data);
+ TraceWarpGCPtr(trc, WarpGCPtr<Value>(val), "warp-cacheir-value");
+ break;
+ }
+ case StubField::Type::AllocSite: {
+ mozilla::DebugOnly<uintptr_t> word =
+ stubInfo_->getStubRawWord(stubData_, offset);
+ MOZ_ASSERT(word == uintptr_t(gc::Heap::Default) ||
+ word == uintptr_t(gc::Heap::Tenured));
+ break;
+ }
+ case StubField::Type::Limit:
+ return; // Done.
+ }
+ field++;
+ offset += StubField::sizeInBytes(fieldType);
+ }
+ }
+}
+
+void WarpInlinedCall::traceData(JSTracer* trc) {
+ // Note: scriptSnapshot_ is traced through WarpSnapshot.
+ cacheIRSnapshot_->trace(trc);
+}
diff --git a/js/src/jit/WarpSnapshot.h b/js/src/jit/WarpSnapshot.h
new file mode 100644
index 0000000000..05359905f6
--- /dev/null
+++ b/js/src/jit/WarpSnapshot.h
@@ -0,0 +1,627 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_WarpSnapshot_h
+#define jit_WarpSnapshot_h
+
+#include "mozilla/LinkedList.h"
+#include "mozilla/Variant.h"
+
+#include "builtin/ModuleObject.h"
+#include "gc/Policy.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitContext.h"
+#include "jit/TypeData.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+
+namespace js {
+
+class ArgumentsObject;
+class CallObject;
+class GlobalLexicalEnvironmentObject;
+class LexicalEnvironmentObject;
+class ModuleEnvironmentObject;
+class NamedLambdaObject;
+
+namespace jit {
+
+class CacheIRStubInfo;
+class CompileInfo;
+class WarpScriptSnapshot;
+
+#define WARP_OP_SNAPSHOT_LIST(_) \
+ _(WarpArguments) \
+ _(WarpRegExp) \
+ _(WarpBuiltinObject) \
+ _(WarpGetIntrinsic) \
+ _(WarpGetImport) \
+ _(WarpRest) \
+ _(WarpBindGName) \
+ _(WarpVarEnvironment) \
+ _(WarpLexicalEnvironment) \
+ _(WarpClassBodyEnvironment) \
+ _(WarpBailout) \
+ _(WarpCacheIR) \
+ _(WarpInlinedCall) \
+ _(WarpPolymorphicTypes)
+
+// Wrapper for GC things stored in WarpSnapshot. Asserts the GC pointer is not
+// nursery-allocated. These pointers must be traced using TraceWarpGCPtr.
+template <typename T>
+class WarpGCPtr {
+ // Note: no pre-barrier is needed because this is a constant. No post-barrier
+ // is needed because the value is always tenured.
+ const T ptr_;
+
+ public:
+ explicit WarpGCPtr(const T& ptr) : ptr_(ptr) {
+ MOZ_ASSERT(JS::GCPolicy<T>::isTenured(ptr),
+ "WarpSnapshot pointers must be tenured");
+ }
+ WarpGCPtr(const WarpGCPtr<T>& other) = default;
+
+ operator T() const { return ptr_; }
+ T operator->() const { return ptr_; }
+
+ private:
+ WarpGCPtr() = delete;
+ void operator=(WarpGCPtr<T>& other) = delete;
+};
+
+// WarpOpSnapshot is the base class for data attached to a single bytecode op by
+// WarpOracle. This is typically data that WarpBuilder can't read off-thread
+// without racing.
+class WarpOpSnapshot : public TempObject,
+ public mozilla::LinkedListElement<WarpOpSnapshot> {
+ public:
+ enum class Kind : uint16_t {
+#define DEF_KIND(KIND) KIND,
+ WARP_OP_SNAPSHOT_LIST(DEF_KIND)
+#undef DEF_KIND
+ };
+
+ private:
+ // Bytecode offset.
+ uint32_t offset_ = 0;
+
+ Kind kind_;
+
+ protected:
+ WarpOpSnapshot(Kind kind, uint32_t offset) : offset_(offset), kind_(kind) {}
+
+ public:
+ uint32_t offset() const { return offset_; }
+ Kind kind() const { return kind_; }
+
+ template <typename T>
+ const T* as() const {
+ MOZ_ASSERT(kind_ == T::ThisKind);
+ return static_cast<const T*>(this);
+ }
+
+ template <typename T>
+ T* as() {
+ MOZ_ASSERT(kind_ == T::ThisKind);
+ return static_cast<T*>(this);
+ }
+
+ void trace(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out, JSScript* script) const;
+#endif
+};
+
+using WarpOpSnapshotList = mozilla::LinkedList<WarpOpSnapshot>;
+
+// Template object for JSOp::Arguments.
+class WarpArguments : public WarpOpSnapshot {
+ // Note: this can be nullptr if the realm has no template object yet.
+ WarpGCPtr<ArgumentsObject*> templateObj_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpArguments;
+
+ WarpArguments(uint32_t offset, ArgumentsObject* templateObj)
+ : WarpOpSnapshot(ThisKind, offset), templateObj_(templateObj) {}
+ ArgumentsObject* templateObj() const { return templateObj_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// The "has RegExpShared" state for JSOp::RegExp's template object.
+class WarpRegExp : public WarpOpSnapshot {
+ bool hasShared_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpRegExp;
+
+ WarpRegExp(uint32_t offset, bool hasShared)
+ : WarpOpSnapshot(ThisKind, offset), hasShared_(hasShared) {}
+ bool hasShared() const { return hasShared_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// The object for JSOp::BuiltinObject if it exists at compile-time.
+class WarpBuiltinObject : public WarpOpSnapshot {
+ WarpGCPtr<JSObject*> builtin_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpBuiltinObject;
+
+ WarpBuiltinObject(uint32_t offset, JSObject* builtin)
+ : WarpOpSnapshot(ThisKind, offset), builtin_(builtin) {
+ MOZ_ASSERT(builtin);
+ }
+ JSObject* builtin() const { return builtin_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// The intrinsic for JSOp::GetIntrinsic if it exists at compile-time.
+class WarpGetIntrinsic : public WarpOpSnapshot {
+ WarpGCPtr<Value> intrinsic_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpGetIntrinsic;
+
+ WarpGetIntrinsic(uint32_t offset, const Value& intrinsic)
+ : WarpOpSnapshot(ThisKind, offset), intrinsic_(intrinsic) {}
+ Value intrinsic() const { return intrinsic_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Target module environment and slot information for JSOp::GetImport.
+class WarpGetImport : public WarpOpSnapshot {
+ WarpGCPtr<ModuleEnvironmentObject*> targetEnv_;
+ uint32_t numFixedSlots_;
+ uint32_t slot_;
+ bool needsLexicalCheck_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpGetImport;
+
+ WarpGetImport(uint32_t offset, ModuleEnvironmentObject* targetEnv,
+ uint32_t numFixedSlots, uint32_t slot, bool needsLexicalCheck)
+ : WarpOpSnapshot(ThisKind, offset),
+ targetEnv_(targetEnv),
+ numFixedSlots_(numFixedSlots),
+ slot_(slot),
+ needsLexicalCheck_(needsLexicalCheck) {}
+ ModuleEnvironmentObject* targetEnv() const { return targetEnv_; }
+ uint32_t numFixedSlots() const { return numFixedSlots_; }
+ uint32_t slot() const { return slot_; }
+ bool needsLexicalCheck() const { return needsLexicalCheck_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Informs WarpBuilder that an IC site is cold and execution should bail out.
+class WarpBailout : public WarpOpSnapshot {
+ public:
+ static constexpr Kind ThisKind = Kind::WarpBailout;
+
+ explicit WarpBailout(uint32_t offset) : WarpOpSnapshot(ThisKind, offset) {}
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Information from a Baseline IC stub.
+class WarpCacheIR : public WarpOpSnapshot {
+ // Baseline stub code. Stored here to keep the CacheIRStubInfo alive.
+ WarpGCPtr<JitCode*> stubCode_;
+ const CacheIRStubInfo* stubInfo_;
+
+ // Copied Baseline stub data. Allocated in the same LifoAlloc.
+ const uint8_t* stubData_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpCacheIR;
+
+ WarpCacheIR(uint32_t offset, JitCode* stubCode,
+ const CacheIRStubInfo* stubInfo, const uint8_t* stubData)
+ : WarpOpSnapshot(ThisKind, offset),
+ stubCode_(stubCode),
+ stubInfo_(stubInfo),
+ stubData_(stubData) {}
+
+ const CacheIRStubInfo* stubInfo() const { return stubInfo_; }
+ const uint8_t* stubData() const { return stubData_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// [SMDOC] Warp Nursery Object support
+//
+// CacheIR stub data can contain nursery allocated objects. This can happen for
+// example for GuardSpecificObject/GuardSpecificFunction or GuardProto.
+//
+// To support nursery GCs in parallel with off-thread compilation, we use the
+// following mechanism:
+//
+// * When WarpOracle copies stub data, it builds a Vector of nursery objects.
+// The nursery object pointers in the stub data are replaced with the
+// corresponding index into this Vector.
+// See WarpScriptOracle::replaceNurseryPointers.
+//
+// * The Vector is copied to the snapshot and, at the end of compilation, to
+// the IonScript. The Vector is only accessed on the main thread.
+//
+// * The MIR backend never accesses the raw JSObject*. Instead, it uses
+// MNurseryObject which will load the object at runtime from the IonScript.
+//
+// WarpObjectField is a helper class to encode/decode a stub data field that
+// either stores an object or a nursery index.
+class WarpObjectField {
+ // This is a nursery index if the low bit is set. Else it's a JSObject*.
+ static constexpr uintptr_t NurseryIndexTag = 0x1;
+ static constexpr uintptr_t NurseryIndexShift = 1;
+
+ uintptr_t data_;
+
+ explicit WarpObjectField(uintptr_t data) : data_(data) {}
+
+ public:
+ static WarpObjectField fromData(uintptr_t data) {
+ return WarpObjectField(data);
+ }
+ static WarpObjectField fromObject(JSObject* obj) {
+ return WarpObjectField(uintptr_t(obj));
+ }
+ static WarpObjectField fromNurseryIndex(uint32_t index) {
+ uintptr_t data = (uintptr_t(index) << NurseryIndexShift) | NurseryIndexTag;
+ return WarpObjectField(data);
+ }
+
+ uintptr_t rawData() const { return data_; }
+
+ bool isNurseryIndex() const { return (data_ & NurseryIndexTag) != 0; }
+
+ uint32_t toNurseryIndex() const {
+ MOZ_ASSERT(isNurseryIndex());
+ return data_ >> NurseryIndexShift;
+ }
+
+ JSObject* toObject() const {
+ MOZ_ASSERT(!isNurseryIndex());
+ return reinterpret_cast<JSObject*>(data_);
+ }
+};
+
+// Information for inlining a scripted call IC.
+class WarpInlinedCall : public WarpOpSnapshot {
+ // Used for generating the correct guards.
+ WarpCacheIR* cacheIRSnapshot_;
+
+ // Used for generating the inlined code.
+ WarpScriptSnapshot* scriptSnapshot_;
+ CompileInfo* info_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpInlinedCall;
+
+ WarpInlinedCall(uint32_t offset, WarpCacheIR* cacheIRSnapshot,
+ WarpScriptSnapshot* scriptSnapshot, CompileInfo* info)
+ : WarpOpSnapshot(ThisKind, offset),
+ cacheIRSnapshot_(cacheIRSnapshot),
+ scriptSnapshot_(scriptSnapshot),
+ info_(info) {}
+
+ WarpCacheIR* cacheIRSnapshot() const { return cacheIRSnapshot_; }
+ WarpScriptSnapshot* scriptSnapshot() const { return scriptSnapshot_; }
+ CompileInfo* info() const { return info_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Information for inlining an ordered set of types
+class WarpPolymorphicTypes : public WarpOpSnapshot {
+ TypeDataList list_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpPolymorphicTypes;
+
+ WarpPolymorphicTypes(uint32_t offset, TypeDataList list)
+ : WarpOpSnapshot(ThisKind, offset), list_(list) {}
+
+ const TypeDataList& list() const { return list_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Shape for JSOp::Rest.
+class WarpRest : public WarpOpSnapshot {
+ WarpGCPtr<Shape*> shape_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpRest;
+
+ WarpRest(uint32_t offset, Shape* shape)
+ : WarpOpSnapshot(ThisKind, offset), shape_(shape) {}
+
+ Shape* shape() const { return shape_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Global environment for BindGName
+class WarpBindGName : public WarpOpSnapshot {
+ WarpGCPtr<JSObject*> globalEnv_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpBindGName;
+
+ WarpBindGName(uint32_t offset, JSObject* globalEnv)
+ : WarpOpSnapshot(ThisKind, offset), globalEnv_(globalEnv) {}
+
+ JSObject* globalEnv() const { return globalEnv_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Block environment for PushVarEnv
+class WarpVarEnvironment : public WarpOpSnapshot {
+ WarpGCPtr<VarEnvironmentObject*> templateObj_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpVarEnvironment;
+
+ WarpVarEnvironment(uint32_t offset, VarEnvironmentObject* templateObj)
+ : WarpOpSnapshot(ThisKind, offset), templateObj_(templateObj) {}
+
+ VarEnvironmentObject* templateObj() const { return templateObj_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Block environment for PushLexicalEnv, FreshenLexicalEnv, RecreateLexicalEnv
+class WarpLexicalEnvironment : public WarpOpSnapshot {
+ WarpGCPtr<BlockLexicalEnvironmentObject*> templateObj_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpLexicalEnvironment;
+
+ WarpLexicalEnvironment(uint32_t offset,
+ BlockLexicalEnvironmentObject* templateObj)
+ : WarpOpSnapshot(ThisKind, offset), templateObj_(templateObj) {}
+
+ BlockLexicalEnvironmentObject* templateObj() const { return templateObj_; }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+// Class body lexical environment for PushClassBodyEnv
+class WarpClassBodyEnvironment : public WarpOpSnapshot {
+ WarpGCPtr<ClassBodyLexicalEnvironmentObject*> templateObj_;
+
+ public:
+ static constexpr Kind ThisKind = Kind::WarpClassBodyEnvironment;
+
+ WarpClassBodyEnvironment(uint32_t offset,
+ ClassBodyLexicalEnvironmentObject* templateObj)
+ : WarpOpSnapshot(ThisKind, offset), templateObj_(templateObj) {}
+
+ ClassBodyLexicalEnvironmentObject* templateObj() const {
+ return templateObj_;
+ }
+
+ void traceData(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dumpData(GenericPrinter& out) const;
+#endif
+};
+
+struct NoEnvironment {};
+using ConstantObjectEnvironment = WarpGCPtr<JSObject*>;
+struct FunctionEnvironment {
+ WarpGCPtr<CallObject*> callObjectTemplate;
+ WarpGCPtr<NamedLambdaObject*> namedLambdaTemplate;
+
+ public:
+ FunctionEnvironment(CallObject* callObjectTemplate,
+ NamedLambdaObject* namedLambdaTemplate)
+ : callObjectTemplate(callObjectTemplate),
+ namedLambdaTemplate(namedLambdaTemplate) {}
+};
+
+// Snapshot data for the environment object(s) created in the script's prologue.
+//
+// One of:
+//
+// * NoEnvironment: No environment object should be set. Leave the slot
+// initialized to |undefined|.
+//
+// * ConstantObjectEnvironment: Use this JSObject* as environment object.
+//
+// * FunctionEnvironment: Use the callee's environment chain. Optionally
+// allocate a new NamedLambdaObject and/or CallObject based on
+// namedLambdaTemplate and callObjectTemplate.
+using WarpEnvironment =
+ mozilla::Variant<NoEnvironment, ConstantObjectEnvironment,
+ FunctionEnvironment>;
+
+// Snapshot data for a single JSScript.
+class WarpScriptSnapshot
+ : public TempObject,
+ public mozilla::LinkedListElement<WarpScriptSnapshot> {
+ WarpGCPtr<JSScript*> script_;
+ WarpEnvironment environment_;
+ WarpOpSnapshotList opSnapshots_;
+
+ // If the script has a JSOp::ImportMeta op, this is the module to bake in.
+ WarpGCPtr<ModuleObject*> moduleObject_;
+
+ // Whether this script is for an arrow function.
+ bool isArrowFunction_;
+ bool isMonomorphicInlined_;
+
+ public:
+ WarpScriptSnapshot(JSScript* script, const WarpEnvironment& env,
+ WarpOpSnapshotList&& opSnapshots,
+ ModuleObject* moduleObject);
+
+ JSScript* script() const { return script_; }
+ const WarpEnvironment& environment() const { return environment_; }
+ const WarpOpSnapshotList& opSnapshots() const { return opSnapshots_; }
+ ModuleObject* moduleObject() const { return moduleObject_; }
+
+ bool isArrowFunction() const { return isArrowFunction_; }
+ bool isMonomorphicInlined() const { return isMonomorphicInlined_; }
+ void markIsMonomorphicInlined() { isMonomorphicInlined_ = true; }
+
+ void trace(JSTracer* trc);
+
+#ifdef JS_JITSPEW
+ void dump(GenericPrinter& out) const;
+#endif
+};
+
+// Captures information from previous bailouts to prevent bailout/recompile
+// loops.
+class WarpBailoutInfo {
+ // True if any script in the compilation has the failedBoundsCheck flag. In
+ // this case mark bounds checks as non-movable to prevent hoisting them in
+ // TryEliminateBoundsCheck.
+ bool failedBoundsCheck_ = false;
+
+ // True if any script in the compilation has the failedLexicalCheck flag. In
+ // this case mark lexical checks as non-movable.
+ bool failedLexicalCheck_ = false;
+
+ public:
+ bool failedBoundsCheck() const { return failedBoundsCheck_; }
+ void setFailedBoundsCheck() { failedBoundsCheck_ = true; }
+
+ bool failedLexicalCheck() const { return failedLexicalCheck_; }
+ void setFailedLexicalCheck() { failedLexicalCheck_ = true; }
+};
+
+using WarpScriptSnapshotList = mozilla::LinkedList<WarpScriptSnapshot>;
+
+// Data allocated by WarpOracle on the main thread that's used off-thread by
+// WarpBuilder to build the MIR graph.
+class WarpSnapshot : public TempObject {
+ // The scripts being compiled.
+ WarpScriptSnapshotList scriptSnapshots_;
+
+ // The global lexical environment and its thisObject(). We don't inline
+ // cross-realm calls so this can be stored once per snapshot.
+ WarpGCPtr<GlobalLexicalEnvironmentObject*> globalLexicalEnv_;
+ WarpGCPtr<JSObject*> globalLexicalEnvThis_;
+
+ const WarpBailoutInfo bailoutInfo_;
+
+ // List of (originally) nursery-allocated objects. Must only be accessed on
+ // the main thread. See also WarpObjectField.
+ using NurseryObjectVector = Vector<JSObject*, 0, JitAllocPolicy>;
+ NurseryObjectVector nurseryObjects_;
+
+#ifdef JS_CACHEIR_SPEW
+ bool needsFinalWarmUpCount_ = false;
+#endif
+
+#ifdef DEBUG
+ // A hash of the stub pointers and entry counts for each of the ICs
+ // in this snapshot.
+ mozilla::HashNumber icHash_ = 0;
+#endif
+
+ public:
+ explicit WarpSnapshot(JSContext* cx, TempAllocator& alloc,
+ WarpScriptSnapshotList&& scriptSnapshots,
+ const WarpBailoutInfo& bailoutInfo,
+ bool recordWarmUpCount);
+
+ WarpScriptSnapshot* rootScript() { return scriptSnapshots_.getFirst(); }
+ const WarpScriptSnapshotList& scripts() const { return scriptSnapshots_; }
+
+ GlobalLexicalEnvironmentObject* globalLexicalEnv() const {
+ return globalLexicalEnv_;
+ }
+ JSObject* globalLexicalEnvThis() const { return globalLexicalEnvThis_; }
+
+ void trace(JSTracer* trc);
+
+ const WarpBailoutInfo& bailoutInfo() const { return bailoutInfo_; }
+
+ NurseryObjectVector& nurseryObjects() { return nurseryObjects_; }
+ const NurseryObjectVector& nurseryObjects() const { return nurseryObjects_; }
+
+#ifdef DEBUG
+ mozilla::HashNumber icHash() const { return icHash_; }
+ void setICHash(mozilla::HashNumber hash) { icHash_ = hash; }
+#endif
+
+#ifdef JS_JITSPEW
+ void dump() const;
+ void dump(GenericPrinter& out) const;
+#endif
+
+#ifdef JS_CACHEIR_SPEW
+ bool needsFinalWarmUpCount() const { return needsFinalWarmUpCount_; }
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_WarpSnapshot_h */
diff --git a/js/src/jit/WasmBCE.cpp b/js/src/jit/WasmBCE.cpp
new file mode 100644
index 0000000000..f855aab41d
--- /dev/null
+++ b/js/src/jit/WasmBCE.cpp
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/WasmBCE.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>,
+ SystemAllocPolicy>
+ LastSeenMap;
+
+// The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks
+// on SSA values that have already been checked. (in the same block or in a
+// dominating block). These bounds checks are redundant and thus eliminated.
+//
+// Note: This is safe in the presense of dynamic memory sizes as long as they
+// can ONLY GROW. If we allow SHRINKING the heap, this pass should be
+// RECONSIDERED.
+//
+// TODO (dbounov): Are there a lot of cases where there is no single dominating
+// check, but a set of checks that together dominate a redundant check?
+//
+// TODO (dbounov): Generalize to constant additions relative to one base
+bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_WasmBCE, "Begin");
+ // Map for dominating block where a given definition was checked
+ LastSeenMap lastSeen;
+
+ for (ReversePostorderIterator bIter(graph.rpoBegin());
+ bIter != graph.rpoEnd(); bIter++) {
+ MBasicBlock* block = *bIter;
+ for (MDefinitionIterator dIter(block); dIter;) {
+ MDefinition* def = *dIter++;
+
+ switch (def->op()) {
+ case MDefinition::Opcode::WasmBoundsCheck: {
+ MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
+ MDefinition* addr = bc->index();
+
+ // We only support bounds check elimination on wasm memory, not
+ // tables. See bug 1625891.
+ if (!bc->isMemory()) {
+ continue;
+ }
+
+ // Eliminate constant-address memory bounds checks to addresses below
+ // the heap minimum.
+ //
+ // The payload of the MConstant will be Double if the constant
+ // result is above 2^31-1, but we don't care about that for BCE.
+
+ if (addr->isConstant() &&
+ ((addr->toConstant()->type() == MIRType::Int32 &&
+ uint64_t(addr->toConstant()->toInt32()) <
+ mir->minWasmHeapLength()) ||
+ (addr->toConstant()->type() == MIRType::Int64 &&
+ uint64_t(addr->toConstant()->toInt64()) <
+ mir->minWasmHeapLength()))) {
+ bc->setRedundant();
+ if (JitOptions.spectreIndexMasking) {
+ bc->replaceAllUsesWith(addr);
+ } else {
+ MOZ_ASSERT(!bc->hasUses());
+ }
+ } else {
+ LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
+ if (ptr) {
+ MDefinition* prevCheckOrPhi = ptr->value();
+ if (prevCheckOrPhi->block()->dominates(block)) {
+ bc->setRedundant();
+ if (JitOptions.spectreIndexMasking) {
+ bc->replaceAllUsesWith(prevCheckOrPhi);
+ } else {
+ MOZ_ASSERT(!bc->hasUses());
+ }
+ }
+ } else {
+ if (!lastSeen.add(ptr, addr->id(), def)) {
+ return false;
+ }
+ }
+ }
+ break;
+ }
+ case MDefinition::Opcode::Phi: {
+ MPhi* phi = def->toPhi();
+ bool phiChecked = true;
+
+ MOZ_ASSERT(phi->numOperands() > 0);
+
+ // If all incoming values to a phi node are safe (i.e. have a
+ // check that dominates this block) then we can consider this
+ // phi node checked.
+ //
+ // Note that any phi that is part of a cycle
+ // will not be "safe" since the value coming on the backedge
+ // cannot be in lastSeen because its block hasn't been traversed yet.
+ for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) {
+ MDefinition* src = phi->getOperand(i);
+
+ if (JitOptions.spectreIndexMasking) {
+ if (src->isWasmBoundsCheck()) {
+ src = src->toWasmBoundsCheck()->index();
+ }
+ } else {
+ MOZ_ASSERT(!src->isWasmBoundsCheck());
+ }
+
+ LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
+ if (!checkPtr || !checkPtr->value()->block()->dominates(block)) {
+ phiChecked = false;
+ break;
+ }
+ }
+
+ if (phiChecked) {
+ if (!lastSeen.put(def->id(), def)) {
+ return false;
+ }
+ }
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/jit/WasmBCE.h b/js/src/jit/WasmBCE.h
new file mode 100644
index 0000000000..8f8a15f067
--- /dev/null
+++ b/js/src/jit/WasmBCE.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef jit_wasmbce_h
+#define jit_wasmbce_h
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+bool EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_wasmbce_h */
diff --git a/js/src/jit/XrayJitInfo.cpp b/js/src/jit/XrayJitInfo.cpp
new file mode 100644
index 0000000000..70e00f4296
--- /dev/null
+++ b/js/src/jit/XrayJitInfo.cpp
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/friend/XrayJitInfo.h"
+
+#include "jit/CacheIRGenerator.h" // js::jit::GetXrayJitInfo
+
+using JS::XrayJitInfo;
+
+static XrayJitInfo* gXrayJitInfo = nullptr;
+
+void JS::SetXrayJitInfo(XrayJitInfo* info) { gXrayJitInfo = info; }
+
+XrayJitInfo* js::jit::GetXrayJitInfo() { return gXrayJitInfo; }
diff --git a/js/src/jit/arm/Architecture-arm.cpp b/js/src/jit/arm/Architecture-arm.cpp
new file mode 100644
index 0000000000..d4c5026705
--- /dev/null
+++ b/js/src/jit/arm/Architecture-arm.cpp
@@ -0,0 +1,540 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/Architecture-arm.h"
+
+#if !defined(JS_SIMULATOR_ARM) && !defined(__APPLE__)
+# include <elf.h>
+#endif
+
+#include <fcntl.h>
+#ifdef XP_UNIX
+# include <unistd.h>
+#endif
+
+#if defined(XP_IOS)
+# include <libkern/OSCacheControl.h>
+#endif
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/arm/Simulator-arm.h"
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/RegisterSets.h"
+
+#if !defined(__linux__) || defined(ANDROID) || defined(JS_SIMULATOR_ARM)
+// The Android NDK and B2G do not include the hwcap.h kernel header, and it is
+// not defined when building the simulator, so inline the header defines we
+// need.
+# define HWCAP_VFP (1 << 6)
+# define HWCAP_NEON (1 << 12)
+# define HWCAP_VFPv3 (1 << 13)
+# define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+# define HWCAP_VFPv4 (1 << 16)
+# define HWCAP_IDIVA (1 << 17)
+# define HWCAP_IDIVT (1 << 18)
+# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+# define AT_HWCAP 16
+#else
+# include <asm/hwcap.h>
+# if !defined(HWCAP_IDIVA)
+# define HWCAP_IDIVA (1 << 17)
+# endif
+# if !defined(HWCAP_VFPD32)
+# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+# endif
+#endif
+
+namespace js {
+namespace jit {
+
+// Parse the Linux kernel cpuinfo features. This is also used to parse the
+// override features which has some extensions: 'armv7', 'align' and 'hardfp'.
+static uint32_t ParseARMCpuFeatures(const char* features,
+ bool override = false) {
+ uint32_t flags = 0;
+
+ // For ease of running tests we want it to be the default to fixup faults.
+ bool fixupAlignmentFault = true;
+
+ for (;;) {
+ char ch = *features;
+ if (!ch) {
+ // End of string.
+ break;
+ }
+ if (ch == ' ' || ch == ',') {
+ // Skip separator characters.
+ features++;
+ continue;
+ }
+ // Find the end of the token.
+ const char* end = features + 1;
+ for (;; end++) {
+ ch = *end;
+ if (!ch || ch == ' ' || ch == ',') {
+ break;
+ }
+ }
+ size_t count = end - features;
+ if (count == 3 && strncmp(features, "vfp", 3) == 0) {
+ flags |= HWCAP_VFP;
+ } else if (count == 5 && strncmp(features, "vfpv2", 5) == 0) {
+ flags |= HWCAP_VFP; // vfpv2 is the same as vfp
+ } else if (count == 4 && strncmp(features, "neon", 4) == 0) {
+ flags |= HWCAP_NEON;
+ } else if (count == 5 && strncmp(features, "vfpv3", 5) == 0) {
+ flags |= HWCAP_VFPv3;
+ } else if (count == 8 && strncmp(features, "vfpv3d16", 8) == 0) {
+ flags |= HWCAP_VFPv3D16;
+ } else if (count == 5 && strncmp(features, "vfpv4", 5) == 0) {
+ flags |= HWCAP_VFPv4;
+ } else if (count == 5 && strncmp(features, "idiva", 5) == 0) {
+ flags |= HWCAP_IDIVA;
+ } else if (count == 5 && strncmp(features, "idivt", 5) == 0) {
+ flags |= HWCAP_IDIVT;
+ } else if (count == 6 && strncmp(features, "vfpd32", 6) == 0) {
+ flags |= HWCAP_VFPD32;
+ } else if (count == 5 && strncmp(features, "armv7", 5) == 0) {
+ flags |= HWCAP_ARMv7;
+ } else if (count == 5 && strncmp(features, "align", 5) == 0) {
+ flags |= HWCAP_ALIGNMENT_FAULT | HWCAP_FIXUP_FAULT;
+#if defined(JS_SIMULATOR_ARM)
+ } else if (count == 7 && strncmp(features, "nofixup", 7) == 0) {
+ fixupAlignmentFault = false;
+ } else if (count == 6 && strncmp(features, "hardfp", 6) == 0) {
+ flags |= HWCAP_USE_HARDFP_ABI;
+#endif
+ } else if (override) {
+ fprintf(stderr, "Warning: unexpected ARM feature at: %s\n", features);
+ }
+ features = end;
+ }
+
+ if (!fixupAlignmentFault) {
+ flags &= ~HWCAP_FIXUP_FAULT;
+ }
+
+ return flags;
+}
+
+static uint32_t CanonicalizeARMHwCapFlags(uint32_t flags) {
+ // Canonicalize the flags. These rules are also applied to the features
+ // supplied for simulation.
+
+ // VFPv3 is a subset of VFPv4, force this if the input string omits it.
+ if (flags & HWCAP_VFPv4) {
+ flags |= HWCAP_VFPv3;
+ }
+
+ // The VFPv3 feature is expected when the VFPv3D16 is reported, but add it
+ // just in case of a kernel difference in feature reporting.
+ if (flags & HWCAP_VFPv3D16) {
+ flags |= HWCAP_VFPv3;
+ }
+
+ // VFPv2 is a subset of VFPv3, force this if the input string omits it. VFPv2
+ // is just an alias for VFP.
+ if (flags & HWCAP_VFPv3) {
+ flags |= HWCAP_VFP;
+ }
+
+ // If we have Neon we have floating point.
+ if (flags & HWCAP_NEON) {
+ flags |= HWCAP_VFP;
+ }
+
+ // If VFPv3 or Neon is supported then this must be an ARMv7.
+ if (flags & (HWCAP_VFPv3 | HWCAP_NEON)) {
+ flags |= HWCAP_ARMv7;
+ }
+
+ // Some old kernels report VFP and not VFPv3, but if ARMv7 then it must be
+ // VFPv3.
+ if ((flags & HWCAP_VFP) && (flags & HWCAP_ARMv7)) {
+ flags |= HWCAP_VFPv3;
+ }
+
+ // Older kernels do not implement the HWCAP_VFPD32 flag.
+ if ((flags & HWCAP_VFPv3) && !(flags & HWCAP_VFPv3D16)) {
+ flags |= HWCAP_VFPD32;
+ }
+
+ return flags;
+}
+
+#if !defined(JS_SIMULATOR_ARM) && (defined(__linux__) || defined(ANDROID))
+static bool forceDoubleCacheFlush = false;
+#endif
+
+// The override flags parsed from the ARMHWCAP environment variable or from the
+// --arm-hwcap js shell argument. They are stable after startup: there is no
+// longer a programmatic way of setting these from JS.
+volatile uint32_t armHwCapFlags = HWCAP_UNINITIALIZED;
+
+bool CPUFlagsHaveBeenComputed() { return armHwCapFlags != HWCAP_UNINITIALIZED; }
+
+static const char* gArmHwCapString = nullptr;
+
+void SetARMHwCapFlagsString(const char* armHwCap) {
+ MOZ_ASSERT(!CPUFlagsHaveBeenComputed());
+ gArmHwCapString = armHwCap;
+}
+
+static void ParseARMHwCapFlags(const char* armHwCap) {
+ MOZ_ASSERT(armHwCap);
+
+ if (strstr(armHwCap, "help")) {
+ fflush(NULL);
+ printf(
+ "\n"
+ "usage: ARMHWCAP=option,option,option,... where options can be:\n"
+ "\n"
+ " vfp \n"
+ " neon \n"
+ " vfpv3 \n"
+ " vfpv3d16 \n"
+ " vfpv4 \n"
+ " idiva \n"
+ " idivt \n"
+ " vfpd32 \n"
+ " armv7 \n"
+ " align - unaligned accesses will trap and be emulated\n"
+#ifdef JS_SIMULATOR_ARM
+ " nofixup - disable emulation of unaligned accesses\n"
+ " hardfp \n"
+#endif
+ "\n");
+ exit(0);
+ /*NOTREACHED*/
+ }
+
+ uint32_t flags = ParseARMCpuFeatures(armHwCap, /* override = */ true);
+
+#ifdef JS_CODEGEN_ARM_HARDFP
+ flags |= HWCAP_USE_HARDFP_ABI;
+#endif
+
+ armHwCapFlags = CanonicalizeARMHwCapFlags(flags);
+ JitSpew(JitSpew_Codegen, "ARM HWCAP: 0x%x\n", armHwCapFlags);
+}
+
+void InitARMFlags() {
+ MOZ_RELEASE_ASSERT(armHwCapFlags == HWCAP_UNINITIALIZED);
+
+ if (const char* env = getenv("ARMHWCAP")) {
+ ParseARMHwCapFlags(env);
+ return;
+ }
+
+ if (gArmHwCapString) {
+ ParseARMHwCapFlags(gArmHwCapString);
+ return;
+ }
+
+ uint32_t flags = 0;
+#ifdef JS_SIMULATOR_ARM
+ // HWCAP_FIXUP_FAULT is on by default even if HWCAP_ALIGNMENT_FAULT is
+ // not on by default, because some memory access instructions always fault.
+ // Notably, this is true for floating point accesses.
+ flags = HWCAP_ARMv7 | HWCAP_VFP | HWCAP_VFPv3 | HWCAP_VFPv4 | HWCAP_NEON |
+ HWCAP_IDIVA | HWCAP_FIXUP_FAULT;
+#else
+
+# if defined(__linux__) || defined(ANDROID)
+ // This includes Android and B2G.
+ bool readAuxv = false;
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd > 0) {
+ struct {
+ uint32_t a_type;
+ uint32_t a_val;
+ } aux;
+ while (read(fd, &aux, sizeof(aux))) {
+ if (aux.a_type == AT_HWCAP) {
+ flags = aux.a_val;
+ readAuxv = true;
+ break;
+ }
+ }
+ close(fd);
+ }
+
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (fp) {
+ char buf[1024] = {};
+ size_t len = fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ buf[len] = '\0';
+
+ // Read the cpuinfo Features if the auxv is not available.
+ if (!readAuxv) {
+ char* featureList = strstr(buf, "Features");
+ if (featureList) {
+ if (char* featuresEnd = strstr(featureList, "\n")) {
+ *featuresEnd = '\0';
+ }
+ flags = ParseARMCpuFeatures(featureList + 8);
+ }
+ if (strstr(buf, "ARMv7")) {
+ flags |= HWCAP_ARMv7;
+ }
+ }
+
+ // The exynos7420 cpu (EU galaxy S6 (Note)) has a bug where sometimes
+ // flushing doesn't invalidate the instruction cache. As a result we force
+ // it by calling the cacheFlush twice on different start addresses.
+ char* exynos7420 = strstr(buf, "Exynos7420");
+ if (exynos7420) {
+ forceDoubleCacheFlush = true;
+ }
+ }
+# endif
+
+ // If compiled to use specialized features then these features can be
+ // assumed to be present otherwise the compiler would fail to run.
+
+# ifdef JS_CODEGEN_ARM_HARDFP
+ // Compiled to use the hardfp ABI.
+ flags |= HWCAP_USE_HARDFP_ABI;
+# endif
+
+# if defined(__VFP_FP__) && !defined(__SOFTFP__)
+ // Compiled to use VFP instructions so assume VFP support.
+ flags |= HWCAP_VFP;
+# endif
+
+# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+ // Compiled to use ARMv7 instructions so assume the ARMv7 arch.
+ flags |= HWCAP_ARMv7;
+# endif
+
+# if defined(__APPLE__)
+# if defined(__ARM_NEON__)
+ flags |= HWCAP_NEON;
+# endif
+# if defined(__ARMVFPV3__)
+ flags |= HWCAP_VFPv3 | HWCAP_VFPD32
+# endif
+# endif
+
+#endif // JS_SIMULATOR_ARM
+
+ armHwCapFlags = CanonicalizeARMHwCapFlags(flags);
+
+ JitSpew(JitSpew_Codegen, "ARM HWCAP: 0x%x\n", armHwCapFlags);
+ return;
+}
+
+uint32_t GetARMFlags() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags;
+}
+
+bool HasNEON() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_NEON;
+}
+
+bool HasARMv7() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasMOVWT() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasLDSTREXBHD() {
+ // These are really available from ARMv6K and later, but why bother?
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasDMBDSBISB() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ARMv7;
+}
+
+bool HasVFPv3() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFPv3;
+}
+
+bool HasVFP() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFP;
+}
+
+bool Has32DP() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_VFPD32;
+}
+
+bool HasIDIV() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_IDIVA;
+}
+
+// This is defined in the header and inlined when not using the simulator.
+#ifdef JS_SIMULATOR_ARM
+bool UseHardFpABI() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_USE_HARDFP_ABI;
+}
+#endif
+
+Registers::Code Registers::FromName(const char* name) {
+ // Check for some register aliases first.
+ if (strcmp(name, "ip") == 0) {
+ return ip;
+ }
+ if (strcmp(name, "r13") == 0) {
+ return r13;
+ }
+ if (strcmp(name, "lr") == 0) {
+ return lr;
+ }
+ if (strcmp(name, "r15") == 0) {
+ return r15;
+ }
+
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < TotalSingle; ++i) {
+ if (strcmp(GetSingleName(Encoding(i)), name) == 0) {
+ return VFPRegister(i, VFPRegister::Single).code();
+ }
+ }
+ for (size_t i = 0; i < TotalDouble; ++i) {
+ if (strcmp(GetDoubleName(Encoding(i)), name) == 0) {
+ return VFPRegister(i, VFPRegister::Double).code();
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet VFPRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Add in just this float.
+ mod.addUnchecked(*iter);
+ } else if ((*iter).id() < 16) {
+ // A double with an overlay, add in both floats.
+ mod.addUnchecked((*iter).singleOverlay(0));
+ mod.addUnchecked((*iter).singleOverlay(1));
+ } else {
+ // Add in the lone double in the range 16-31.
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t VFPRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ uint32_t ret = mozilla::CountPopulation32(bits & 0xffffffff) * sizeof(float);
+ ret += mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t VFPRegister::getRegisterDumpOffsetInBytes() {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (isSingle()) {
+ return id() * sizeof(float);
+ }
+ if (isDouble()) {
+ return id() * sizeof(double);
+ }
+ MOZ_CRASH("not Single or Double");
+}
+
+uint32_t FloatRegisters::ActualTotalPhys() {
+ if (Has32DP()) {
+ return 32;
+ }
+ return 16;
+}
+
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR_ARM)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif (defined(__linux__) || defined(ANDROID)) && defined(__GNUC__)
+ void* end = (void*)(reinterpret_cast<char*>(code) + size);
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r"(code), "r"(end)
+ : "r0", "r1", "r2");
+
+ if (forceDoubleCacheFlush) {
+ void* start = (void*)((uintptr_t)code + 1);
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r"(start), "r"(end)
+ : "r0", "r1", "r2");
+ }
+
+#elif defined(__FreeBSD__) || defined(__NetBSD__)
+ __clear_cache(code, reinterpret_cast<char*>(code) + size);
+
+#elif defined(XP_IOS)
+ sys_icache_invalidate(code, size);
+
+#else
+# error "Unexpected platform"
+#endif
+}
+
+void FlushExecutionContext() {
+#ifndef JS_SIMULATOR_ARM
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the icache.
+ asm volatile("isb\n" : : : "memory");
+#else
+ // We assume the icache flushing routines on other platforms take care of this
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm/Architecture-arm.h b/js/src/jit/arm/Architecture-arm.h
new file mode 100644
index 0000000000..fa2ae8e0ed
--- /dev/null
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -0,0 +1,733 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Architecture_arm_h
+#define jit_arm_Architecture_arm_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+// GCC versions 4.6 and above define __ARM_PCS_VFP to denote a hard-float
+// ABI target. The iOS toolchain doesn't define anything specific here,
+// but iOS always supports VFP.
+#if defined(__ARM_PCS_VFP) || defined(XP_IOS)
+# define JS_CODEGEN_ARM_HARDFP
+#endif
+
+namespace js {
+namespace jit {
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+static const uint32_t ShadowStackSpace = 0;
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = 20 * 1024 * 1024;
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ fp = r11,
+ r12,
+ ip = r12,
+ r13,
+ sp = r13,
+ r14,
+ lr = r14,
+ r15,
+ pc = r15,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ static const char* const Names[] = {"r0", "r1", "r2", "r3", "r4", "r5",
+ "r6", "r7", "r8", "r9", "r10", "r11",
+ "r12", "sp", "r14", "pc"};
+ return Names[code];
+ }
+ static const char* GetName(Encoding i) { return GetName(Code(i)); }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 16;
+ static const uint32_t Allocatable = 13;
+
+ typedef uint32_t SetType;
+
+ static const SetType AllMask = (1 << Total) - 1;
+ static const SetType ArgRegMask =
+ (1 << r0) | (1 << r1) | (1 << r2) | (1 << r3);
+
+ static const SetType VolatileMask =
+ (1 << r0) | (1 << r1) | (1 << Registers::r2) |
+ (1 << Registers::r3)
+#if defined(XP_IOS)
+ // per
+ // https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html#//apple_ref/doc/uid/TP40009021-SW4
+ | (1 << Registers::r9)
+#endif
+ ;
+
+ static const SetType NonVolatileMask =
+ (1 << Registers::r4) | (1 << Registers::r5) | (1 << Registers::r6) |
+ (1 << Registers::r7) | (1 << Registers::r8) |
+#if !defined(XP_IOS)
+ (1 << Registers::r9) |
+#endif
+ (1 << Registers::r10) | (1 << Registers::r11) | (1 << Registers::r12) |
+ (1 << Registers::r14);
+
+ static const SetType WrapperMask = VolatileMask | // = arguments
+ (1 << Registers::r4) | // = outReg
+ (1 << Registers::r5); // = argBase
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::sp) | (1 << Registers::r12) | // r12 = ip = scratch
+ (1 << Registers::lr) | (1 << Registers::pc) | (1 << Registers::fp);
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::r2) | (1 << Registers::r3);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask =
+ (1 << Registers::r0) |
+ (1 << Registers::r1); // Used for double-size returns.
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint16_t PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ invalid_freg
+ };
+
+ typedef uint32_t Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetDoubleName(Encoding code) {
+ static const char* const Names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+ return Names[code];
+ }
+ static const char* GetSingleName(Encoding code) {
+ static const char* const Names[] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const Encoding Invalid = invalid_freg;
+ static const uint32_t Total = 48;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t TotalSingle = 32;
+ static const uint32_t Allocatable = 45;
+ // There are only 32 places that we can put values.
+ static const uint32_t TotalPhys = 32;
+ static uint32_t ActualTotalPhys();
+
+ /* clang-format off */
+ // ARM float registers overlap in a way that for 1 double registers, in the
+ // range d0-d15, we have 2 singles register in the range s0-s31. d16-d31
+ // have no single register aliases. The aliasing rule state that d{n}
+ // aliases s{2n} and s{2n+1}, for n in [0 .. 15].
+ //
+ // The register set is used to represent either allocatable register or live
+ // registers. The register maps d0-d15 and s0-s31 to a single bit each. The
+ // registers d16-d31 are not used at the moment.
+ //
+ // uuuu uuuu uuuu uuuu dddd dddd dddd dddd ssss ssss ssss ssss ssss ssss ssss ssss
+ // ^ ^ ^ ^
+ // '-- d15 d0 --' '-- s31 s0 --'
+ //
+ // LiveSet are handled by adding the bit of each register without
+ // considering the aliases.
+ //
+ // AllocatableSet are handled by adding and removing the bit of each
+ // aligned-or-dominated-aliased registers.
+ //
+ // ...0...00... : s{2n}, s{2n+1} and d{n} are not available
+ // ...1...01... : s{2n} is available (*)
+ // ...0...10... : s{2n+1} is available
+ // ...1...11... : s{2n}, s{2n+1} and d{n} are available
+ //
+ // (*) Note that d{n} bit is set, but is not available because s{2n+1} bit
+ // is not set, which is required as d{n} dominates s{2n+1}. The d{n} bit is
+ // set, because s{2n} is aligned.
+ //
+ // | d{n} |
+ // | s{2n+1} | s{2n} |
+ //
+ /* clang-format on */
+ typedef uint64_t SetType;
+ static const SetType AllSingleMask = (1ull << TotalSingle) - 1;
+ static const SetType AllDoubleMask = ((1ull << TotalDouble) - 1)
+ << TotalSingle;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ // d15 is the ScratchFloatReg.
+ static const SetType NonVolatileDoubleMask =
+ ((1ULL << d8) | (1ULL << d9) | (1ULL << d10) | (1ULL << d11) |
+ (1ULL << d12) | (1ULL << d13) | (1ULL << d14));
+ // s30 and s31 alias d15.
+ static const SetType NonVolatileMask =
+ (NonVolatileDoubleMask |
+ ((1 << s16) | (1 << s17) | (1 << s18) | (1 << s19) | (1 << s20) |
+ (1 << s21) | (1 << s22) | (1 << s23) | (1 << s24) | (1 << s25) |
+ (1 << s26) | (1 << s27) | (1 << s28) | (1 << s29) | (1 << s30)));
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+ static const SetType VolatileDoubleMask =
+ AllDoubleMask & ~NonVolatileDoubleMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // d15 is the ARM scratch float register.
+ // s30 and s31 alias d15.
+ static const SetType NonAllocatableMask =
+ ((1ULL << d15)) | (1ULL << s30) | (1ULL << s31);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+template <typename T>
+class TypedRegisterSet;
+
+class VFPRegister {
+ public:
+ // What type of data is being stored in this register? UInt / Int are
+ // specifically for vcvt, where we need to know how the data is supposed to
+ // be converted.
+ enum RegType : uint8_t { Single = 0x0, Double = 0x1, UInt = 0x2, Int = 0x3 };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
+ public:
+ // ARM doesn't have more than 32 registers of each type, so 5 bits should
+ // suffice.
+ uint32_t code_ : 5;
+
+ protected:
+ uint32_t kind : 2;
+ uint32_t _isInvalid : 1;
+ uint32_t _isMissing : 1;
+
+ public:
+ constexpr VFPRegister(uint32_t r, RegType k)
+ : code_(Code(r)), kind(k), _isInvalid(false), _isMissing(false) {}
+ constexpr VFPRegister()
+ : code_(Code(0)), kind(Double), _isInvalid(true), _isMissing(false) {}
+
+ constexpr VFPRegister(RegType k, uint32_t id, bool invalid, bool missing)
+ : code_(Code(id)), kind(k), _isInvalid(invalid), _isMissing(missing) {}
+
+ explicit constexpr VFPRegister(Code id)
+ : code_(id), kind(Double), _isInvalid(false), _isMissing(false) {}
+ bool operator==(const VFPRegister& other) const {
+ return kind == other.kind && code_ == other.code_ &&
+ isInvalid() == other.isInvalid();
+ }
+ bool operator!=(const VFPRegister& other) const { return !operator==(other); }
+
+ bool isSingle() const { return kind == Single; }
+ bool isDouble() const { return kind == Double; }
+ bool isSimd128() const { return false; }
+ bool isFloat() const { return (kind == Double) || (kind == Single); }
+ bool isInt() const { return (kind == UInt) || (kind == Int); }
+ bool isSInt() const { return kind == Int; }
+ bool isUInt() const { return kind == UInt; }
+ bool equiv(const VFPRegister& other) const { return other.kind == kind; }
+ size_t size() const { return (kind == Double) ? 8 : 4; }
+ bool isInvalid() const { return _isInvalid; }
+ bool isMissing() const {
+ MOZ_ASSERT(!_isInvalid);
+ return _isMissing;
+ }
+
+ VFPRegister doubleOverlay(unsigned int which = 0) const;
+ VFPRegister singleOverlay(unsigned int which = 0) const;
+ VFPRegister sintOverlay(unsigned int which = 0) const;
+ VFPRegister uintOverlay(unsigned int which = 0) const;
+
+ VFPRegister asSingle() const { return singleOverlay(); }
+ VFPRegister asDouble() const { return doubleOverlay(); }
+ VFPRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ struct VFPRegIndexSplit;
+ VFPRegIndexSplit encode();
+
+ // For serializing values.
+ struct VFPRegIndexSplit {
+ const uint32_t block : 4;
+ const uint32_t bit : 1;
+
+ private:
+ friend VFPRegIndexSplit js::jit::VFPRegister::encode();
+
+ VFPRegIndexSplit(uint32_t block_, uint32_t bit_)
+ : block(block_), bit(bit_) {
+ MOZ_ASSERT(block == block_);
+ MOZ_ASSERT(bit == bit_);
+ }
+ };
+
+ Code code() const {
+ MOZ_ASSERT(!_isInvalid && !_isMissing);
+ // This should only be used in areas where we only have doubles and
+ // singles.
+ MOZ_ASSERT(isFloat());
+ return Code(code_ | (kind << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!_isInvalid && !_isMissing);
+ return Encoding(code_);
+ }
+ uint32_t id() const { return code_; }
+ static VFPRegister FromCode(uint32_t i) {
+ uint32_t code = i & 31;
+ uint32_t kind = i >> 5;
+ return VFPRegister(code, RegType(kind));
+ }
+ bool volatile_() const {
+ if (isDouble()) {
+ return !!((1ULL << (code_ >> 1)) & FloatRegisters::VolatileMask);
+ }
+ return !!((1ULL << code_) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const {
+ if (isDouble()) {
+ return FloatRegisters::GetDoubleName(Encoding(code_));
+ }
+ return FloatRegisters::GetSingleName(Encoding(code_));
+ }
+ bool aliases(const VFPRegister& other) {
+ if (kind == other.kind) {
+ return code_ == other.code_;
+ }
+ return doubleOverlay() == other.doubleOverlay();
+ }
+ static const int NumAliasedDoubles = 16;
+ uint32_t numAliased() const {
+ if (isDouble()) {
+ if (code_ < NumAliasedDoubles) {
+ return 3;
+ }
+ return 1;
+ }
+ return 2;
+ }
+
+ VFPRegister aliased(uint32_t aliasIdx) {
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ if (isDouble()) {
+ MOZ_ASSERT(code_ < NumAliasedDoubles);
+ MOZ_ASSERT(aliasIdx <= 2);
+ return singleOverlay(aliasIdx - 1);
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ return doubleOverlay(aliasIdx - 1);
+ }
+ uint32_t numAlignedAliased() const {
+ if (isDouble()) {
+ if (code_ < NumAliasedDoubles) {
+ return 2;
+ }
+ return 1;
+ }
+ // s1 has 0 other aligned aliases, 1 total.
+ // s0 has 1 other aligned aliase, 2 total.
+ return 2 - (code_ & 1);
+ }
+ // | d0 |
+ // | s0 | s1 |
+ // If we've stored s0 and s1 in memory, we also want to say that d0 is
+ // stored there, but it is only stored at the location where it is aligned
+ // e.g. at s0, not s1.
+ VFPRegister alignedAliased(uint32_t aliasIdx) {
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ MOZ_ASSERT(code_ < NumAliasedDoubles);
+ return singleOverlay(aliasIdx - 1);
+ }
+ MOZ_ASSERT((code_ & 1) == 0);
+ return doubleOverlay(aliasIdx - 1);
+ }
+
+ typedef FloatRegisters::SetType SetType;
+
+ // This function is used to ensure that Register set can take all Single
+ // registers, even if we are taking a mix of either double or single
+ // registers.
+ //
+ // s0.alignedOrDominatedAliasedSet() == s0 | d0.
+ // s1.alignedOrDominatedAliasedSet() == s1.
+ // d0.alignedOrDominatedAliasedSet() == s0 | s1 | d0.
+ //
+ // This way the Allocatable register set does not have to do any arithmetics
+ // to know if a register is available or not, as we have the following
+ // relations:
+ //
+ // d0.alignedOrDominatedAliasedSet() ==
+ // s0.alignedOrDominatedAliasedSet() | s1.alignedOrDominatedAliasedSet()
+ //
+ // s0.alignedOrDominatedAliasedSet() & s1.alignedOrDominatedAliasedSet() == 0
+ //
+ SetType alignedOrDominatedAliasedSet() const {
+ if (isSingle()) {
+ if (code_ % 2 != 0) {
+ return SetType(1) << code_;
+ }
+ return (SetType(1) << code_) | (SetType(1) << (32 + code_ / 2));
+ }
+
+ MOZ_ASSERT(isDouble());
+ return (SetType(0b11) << (code_ * 2)) | (SetType(1) << (32 + code_));
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return SetType(0);
+ }
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<VFPRegister> ReduceSetForPush(
+ const TypedRegisterSet<VFPRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<VFPRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+};
+
+template <>
+inline VFPRegister::SetType
+VFPRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline VFPRegister::SetType
+VFPRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline VFPRegister::SetType VFPRegister::LiveAsIndexableSet<RegTypeName::Any>(
+ SetType set) {
+ return set;
+}
+
+template <>
+inline VFPRegister::SetType
+VFPRegister::AllocatableAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ // Single registers are not dominating any smaller registers, thus masking
+ // is enough to convert an allocatable set into a set of register list all
+ // single register available.
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline VFPRegister::SetType
+VFPRegister::AllocatableAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ /* clang-format off */
+ // An allocatable float register set is represented as follow:
+ //
+ // uuuu uuuu uuuu uuuu dddd dddd dddd dddd ssss ssss ssss ssss ssss ssss ssss ssss
+ // ^ ^ ^ ^
+ // '-- d15 d0 --' '-- s31 s0 --'
+ //
+ // ...0...00... : s{2n}, s{2n+1} and d{n} are not available
+ // ...1...01... : s{2n} is available
+ // ...0...10... : s{2n+1} is available
+ // ...1...11... : s{2n}, s{2n+1} and d{n} are available
+ //
+ // The goal of this function is to return the set of double registers which
+ // are available as an indexable bit set. This implies that iff a double bit
+ // is set in the returned set, then the register is available.
+ //
+ // To do so, this functions converts the 32 bits set of single registers
+ // into a 16 bits set of equivalent double registers. Then, we mask out
+ // double registers which do not have all the single register that compose
+ // them. As d{n} bit is set when s{2n} is available, we only need to take
+ // s{2n+1} into account.
+ /* clang-format on */
+
+ // Convert s7s6s5s4 s3s2s1s0 into s7s5s3s1, for all s0-s31.
+ SetType s2d = AllocatableAsIndexableSet<RegTypeName::Float32>(set);
+ static_assert(FloatRegisters::TotalSingle == 32, "Wrong mask");
+ s2d = (0xaaaaaaaa & s2d) >> 1; // Filter s{2n+1} registers.
+ // Group adjacent bits as follow:
+ // 0.0.s3.s1 == ((0.s3.0.s1) >> 1 | (0.s3.0.s1)) & 0b0011;
+ s2d = ((s2d >> 1) | s2d) & 0x33333333; // 0a0b --> 00ab
+ s2d = ((s2d >> 2) | s2d) & 0x0f0f0f0f; // 00ab00cd --> 0000abcd
+ s2d = ((s2d >> 4) | s2d) & 0x00ff00ff;
+ s2d = ((s2d >> 8) | s2d) & 0x0000ffff;
+ // Move the s7s5s3s1 to the aliased double positions.
+ s2d = s2d << FloatRegisters::TotalSingle;
+
+ // Note: We currently do not use any representation for d16-d31.
+ static_assert(FloatRegisters::TotalDouble == 16,
+ "d16-d31 do not have a single register mapping");
+
+ // Filter out any double register which are not allocatable due to
+ // non-aligned dominated single registers.
+ return set & s2d;
+}
+
+// The only floating point register set that we work with are the VFP Registers.
+typedef VFPRegister FloatRegister;
+
+uint32_t GetARMFlags();
+bool HasARMv7();
+bool HasMOVWT();
+bool HasLDSTREXBHD(); // {LD,ST}REX{B,H,D}
+bool HasDMBDSBISB(); // DMB, DSB, and ISB
+bool HasVFPv3();
+bool HasVFP();
+bool Has32DP();
+bool HasIDIV();
+bool HasNEON();
+
+extern volatile uint32_t armHwCapFlags;
+
+// Not part of the HWCAP flag, but we need to know these and these bits are not
+// used. Define these here so that their use can be inlined by the simulator.
+
+// A bit to flag when signaled alignment faults are to be fixed up.
+#define HWCAP_FIXUP_FAULT (1 << 24)
+
+// A bit to flag when the flags are uninitialized, so they can be atomically
+// set.
+#define HWCAP_UNINITIALIZED (1 << 25)
+
+// A bit to flag when alignment faults are enabled and signal.
+#define HWCAP_ALIGNMENT_FAULT (1 << 26)
+
+// A bit to flag the use of the hardfp ABI.
+#define HWCAP_USE_HARDFP_ABI (1 << 27)
+
+// A bit to flag the use of the ARMv7 arch, otherwise ARMv6.
+#define HWCAP_ARMv7 (1 << 28)
+
+// Top three bits are reserved, do not use them.
+
+// Returns true when cpu alignment faults are enabled and signaled, and thus we
+// should ensure loads and stores are aligned.
+inline bool HasAlignmentFault() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_ALIGNMENT_FAULT;
+}
+
+#ifdef JS_SIMULATOR_ARM
+// Returns true when cpu alignment faults will be fixed up by the
+// "operating system", which functionality we will emulate.
+inline bool FixupFault() {
+ MOZ_ASSERT(armHwCapFlags != HWCAP_UNINITIALIZED);
+ return armHwCapFlags & HWCAP_FIXUP_FAULT;
+}
+#endif
+
+// Arm/D32 has double registers that can NOT be treated as float32 and this
+// requires some dances in lowering.
+inline bool hasUnaliasedDouble() { return Has32DP(); }
+
+// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 to
+// a double as a temporary, you need a temporary double register.
+inline bool hasMultiAlias() { return true; }
+
+// InitARMFlags is called from the JitContext constructor to read the hardware
+// flags. The call is a no-op after the first call, or if the JS shell has
+// already set the flags (it has a command line switch for this, see
+// ParseARMHwCapFlags).
+//
+// If the environment variable ARMHWCAP is set then the flags are read from it
+// instead; see ParseARMHwCapFlags.
+void InitARMFlags();
+
+// Register a string denoting ARM hardware flags. During engine initialization,
+// these flags will then be used instead of the actual hardware capabilities.
+// This must be called before JS_Init and the passed string's buffer must
+// outlive the JS_Init call.
+void SetARMHwCapFlagsString(const char* armHwCap);
+
+// Retrive the ARM hardware flags at a bitmask. They must have been set.
+uint32_t GetARMFlags();
+
+// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is
+// static and useHardFpABI is inlined so that unused branches can be optimized
+// away.
+#ifdef JS_SIMULATOR_ARM
+bool UseHardFpABI();
+#else
+static inline bool UseHardFpABI() {
+# if defined(JS_CODEGEN_ARM_HARDFP)
+ return true;
+# else
+ return false;
+# endif
+}
+#endif
+
+// In order to handle SoftFp ABI calls, we need to be able to express that we
+// have ABIArg which are represented by pair of general purpose registers.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Architecture_arm_h */
diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp
new file mode 100644
index 0000000000..a1213b6f21
--- /dev/null
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -0,0 +1,2832 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/Assembler-arm.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Sprintf.h"
+
+#include <type_traits>
+
+#include "gc/Marking.h"
+#include "jit/arm/disasm/Disasm-arm.h"
+#include "jit/arm/MacroAssembler-arm.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/MacroAssembler.h"
+#include "vm/Realm.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::CountLeadingZeroes32;
+using mozilla::DebugOnly;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+using LiteralDoc = DisassemblerSpew::LiteralDoc;
+
+void dbg_break() {}
+
+// The ABIArgGenerator is used for making system ABI calls and for inter-wasm
+// calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
+// are always HardFp calls. The initialization defaults to HardFp, and the ABI
+// choice is made before any system ABI calls with the method "setUseHardFp".
+ABIArgGenerator::ABIArgGenerator()
+ : intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+ current_(),
+ useHardFp_(true) {}
+
+// See the "Parameter Passing" section of the "Procedure Call Standard for the
+// ARM Architecture" documentation.
+ABIArg ABIArgGenerator::softNext(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Int64:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(uint64_t) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_),
+ Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ case MIRType::Float32:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Double:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(double) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_),
+ Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ return current_;
+}
+
+ABIArg ABIArgGenerator::hardNext(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+ case MIRType::Int64:
+ // Make sure to use an even register index. Increase to next even number
+ // when odd.
+ intRegIndex_ = (intRegIndex_ + 1) & ~1;
+ if (intRegIndex_ == NumIntArgRegs) {
+ // Align the stack on 8 bytes.
+ static const uint32_t align = sizeof(uint64_t) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_),
+ Register::FromCode(intRegIndex_ + 1));
+ intRegIndex_ += 2;
+ break;
+ case MIRType::Float32:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ }
+ current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
+ floatRegIndex_++;
+ break;
+ case MIRType::Double:
+ // Double register are composed of 2 float registers, thus we have to
+ // skip any float register which cannot be used in a pair of float
+ // registers in which a double value can be stored.
+ floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ static const uint32_t align = sizeof(double) - 1;
+ stackOffset_ = (stackOffset_ + align) & ~align;
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
+ floatRegIndex_ += 2;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+
+ return current_;
+}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ if (useHardFp_) {
+ return hardNext(type);
+ }
+ return softNext(type);
+}
+
+bool js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access) {
+ if (!access.align()) {
+ return false;
+ }
+
+ if (access.type() == Scalar::Float64 && access.align() >= 4) {
+ return false;
+ }
+
+ return access.align() < access.byteSize();
+}
+
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. These should never be called with an InvalidReg.
+uint32_t js::jit::RT(Register r) {
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t js::jit::RN(Register r) {
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 16;
+}
+
+uint32_t js::jit::RD(Register r) {
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t js::jit::RM(Register r) {
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 8;
+}
+
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. For these, an InvalidReg is used to indicate a optional
+// register that has been omitted.
+uint32_t js::jit::maybeRT(Register r) {
+ if (r == InvalidReg) {
+ return 0;
+ }
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+uint32_t js::jit::maybeRN(Register r) {
+ if (r == InvalidReg) {
+ return 0;
+ }
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 16;
+}
+
+uint32_t js::jit::maybeRD(Register r) {
+ if (r == InvalidReg) {
+ return 0;
+ }
+
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return r.code() << 12;
+}
+
+Register js::jit::toRD(Instruction i) {
+ return Register::FromCode((i.encode() >> 12) & 0xf);
+}
+Register js::jit::toR(Instruction i) {
+ return Register::FromCode(i.encode() & 0xf);
+}
+
+Register js::jit::toRM(Instruction i) {
+ return Register::FromCode((i.encode() >> 8) & 0xf);
+}
+
+Register js::jit::toRN(Instruction i) {
+ return Register::FromCode((i.encode() >> 16) & 0xf);
+}
+
+uint32_t js::jit::VD(VFPRegister vr) {
+ if (vr.isMissing()) {
+ return 0;
+ }
+
+ // Bits 15,14,13,12, 22.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 22 | s.block << 12;
+}
+uint32_t js::jit::VN(VFPRegister vr) {
+ if (vr.isMissing()) {
+ return 0;
+ }
+
+ // Bits 19,18,17,16, 7.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 7 | s.block << 16;
+}
+uint32_t js::jit::VM(VFPRegister vr) {
+ if (vr.isMissing()) {
+ return 0;
+ }
+
+ // Bits 5, 3,2,1,0.
+ VFPRegister::VFPRegIndexSplit s = vr.encode();
+ return s.bit << 5 | s.block;
+}
+
+VFPRegister::VFPRegIndexSplit jit::VFPRegister::encode() {
+ MOZ_ASSERT(!_isInvalid);
+
+ switch (kind) {
+ case Double:
+ return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
+ case Single:
+ return VFPRegIndexSplit(code_ >> 1, code_ & 1);
+ default:
+ // VFP register treated as an integer, NOT a gpr.
+ return VFPRegIndexSplit(code_ >> 1, code_ & 1);
+ }
+}
+
+bool InstDTR::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
+}
+
+InstDTR* InstDTR::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstDTR*)&i;
+ }
+ return nullptr;
+}
+
+bool InstLDR::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
+}
+
+InstLDR* InstLDR::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstLDR*)&i;
+ }
+ return nullptr;
+}
+
+InstNOP* InstNOP::AsTHIS(Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstNOP*)&i;
+ }
+ return nullptr;
+}
+
+bool InstNOP::IsTHIS(const Instruction& i) {
+ return (i.encode() & 0x0fffffff) == NopInst;
+}
+
+bool InstBranchReg::IsTHIS(const Instruction& i) {
+ return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
+}
+
+InstBranchReg* InstBranchReg::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBranchReg*)&i;
+ }
+ return nullptr;
+}
+void InstBranchReg::extractDest(Register* dest) { *dest = toR(*this); }
+bool InstBranchReg::checkDest(Register dest) { return dest == toR(*this); }
+
+bool InstBranchImm::IsTHIS(const Instruction& i) {
+ return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
+}
+
+InstBranchImm* InstBranchImm::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBranchImm*)&i;
+ }
+ return nullptr;
+}
+
+void InstBranchImm::extractImm(BOffImm* dest) { *dest = BOffImm(*this); }
+
+bool InstBXReg::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsBRegMask) == IsBX;
+}
+
+InstBXReg* InstBXReg::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBXReg*)&i;
+ }
+ return nullptr;
+}
+
+bool InstBLXReg::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsBRegMask) == IsBLX;
+}
+InstBLXReg* InstBLXReg::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBLXReg*)&i;
+ }
+ return nullptr;
+}
+
+bool InstBImm::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsBImmMask) == IsB;
+}
+InstBImm* InstBImm::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBImm*)&i;
+ }
+ return nullptr;
+}
+
+bool InstBLImm::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsBImmMask) == IsBL;
+}
+InstBLImm* InstBLImm::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstBLImm*)&i;
+ }
+ return nullptr;
+}
+
+bool InstMovWT::IsTHIS(Instruction& i) {
+ return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
+}
+InstMovWT* InstMovWT::AsTHIS(Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstMovWT*)&i;
+ }
+ return nullptr;
+}
+
+void InstMovWT::extractImm(Imm16* imm) { *imm = Imm16(*this); }
+bool InstMovWT::checkImm(Imm16 imm) {
+ return imm.decode() == Imm16(*this).decode();
+}
+
+void InstMovWT::extractDest(Register* dest) { *dest = toRD(*this); }
+bool InstMovWT::checkDest(Register dest) { return dest == toRD(*this); }
+
+bool InstMovW::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsWTMask) == IsW;
+}
+
+InstMovW* InstMovW::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstMovW*)&i;
+ }
+ return nullptr;
+}
+InstMovT* InstMovT::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstMovT*)&i;
+ }
+ return nullptr;
+}
+
+bool InstMovT::IsTHIS(const Instruction& i) {
+ return (i.encode() & IsWTMask) == IsT;
+}
+
+InstALU* InstALU::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstALU*)&i;
+ }
+ return nullptr;
+}
+bool InstALU::IsTHIS(const Instruction& i) {
+ return (i.encode() & ALUMask) == 0;
+}
+void InstALU::extractOp(ALUOp* ret) { *ret = ALUOp(encode() & (0xf << 21)); }
+bool InstALU::checkOp(ALUOp op) {
+ ALUOp mine;
+ extractOp(&mine);
+ return mine == op;
+}
+void InstALU::extractDest(Register* ret) { *ret = toRD(*this); }
+bool InstALU::checkDest(Register rd) { return rd == toRD(*this); }
+void InstALU::extractOp1(Register* ret) { *ret = toRN(*this); }
+bool InstALU::checkOp1(Register rn) { return rn == toRN(*this); }
+Operand2 InstALU::extractOp2() { return Operand2(encode()); }
+
+InstCMP* InstCMP::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstCMP*)&i;
+ }
+ return nullptr;
+}
+
+bool InstCMP::IsTHIS(const Instruction& i) {
+ return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) &&
+ InstALU::AsTHIS(i)->checkOp(OpCmp);
+}
+
+InstMOV* InstMOV::AsTHIS(const Instruction& i) {
+ if (IsTHIS(i)) {
+ return (InstMOV*)&i;
+ }
+ return nullptr;
+}
+
+bool InstMOV::IsTHIS(const Instruction& i) {
+ return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) &&
+ InstALU::AsTHIS(i)->checkOp(OpMov);
+}
+
+Op2Reg Operand2::toOp2Reg() const { return *(Op2Reg*)this; }
+
+Imm16::Imm16(Instruction& inst)
+ : lower_(inst.encode() & 0xfff),
+ upper_(inst.encode() >> 16),
+ invalid_(0xfff) {}
+
+Imm16::Imm16(uint32_t imm)
+ : lower_(imm & 0xfff), pad_(0), upper_((imm >> 12) & 0xf), invalid_(0) {
+ MOZ_ASSERT(decode() == imm);
+}
+
+Imm16::Imm16() : invalid_(0xfff) {}
+
+void Assembler::finish() {
+ flush();
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
+ flush();
+ return m_buffer.appendRawCode(code, numBytes);
+}
+
+bool Assembler::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool Assembler::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+class RelocationIterator {
+ CompactBufferReader reader_;
+ // Offset in bytes.
+ uint32_t offset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
+
+ bool read() {
+ if (!reader_.more()) {
+ return false;
+ }
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const { return offset_; }
+};
+
+template <class Iter>
+const uint32_t* Assembler::GetCF32Target(Iter* iter) {
+ Instruction* inst1 = iter->cur();
+
+ if (inst1->is<InstBranchImm>()) {
+ // See if we have a simple case, b #offset.
+ BOffImm imm;
+ InstBranchImm* jumpB = inst1->as<InstBranchImm>();
+ jumpB->extractImm(&imm);
+ return imm.getDest(inst1)->raw();
+ }
+
+ if (inst1->is<InstMovW>()) {
+ // See if we have the complex case:
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+ // bx r_temp
+ // OR
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+ // str pc, [sp]
+ // bx r_temp
+
+ Imm16 targ_bot;
+ Imm16 targ_top;
+ Register temp;
+
+ // Extract both the temp register and the bottom immediate.
+ InstMovW* bottom = inst1->as<InstMovW>();
+ bottom->extractImm(&targ_bot);
+ bottom->extractDest(&temp);
+
+ // Extract the top part of the immediate.
+ Instruction* inst2 = iter->next();
+ MOZ_ASSERT(inst2->is<InstMovT>());
+ InstMovT* top = inst2->as<InstMovT>();
+ top->extractImm(&targ_top);
+
+ // Make sure they are being loaded into the same register.
+ MOZ_ASSERT(top->checkDest(temp));
+
+ // Make sure we're branching to the same register.
+#ifdef DEBUG
+ // A toggled call sometimes has a NOP instead of a branch for the third
+ // instruction. No way to assert that it's valid in that situation.
+ Instruction* inst3 = iter->next();
+ if (!inst3->is<InstNOP>()) {
+ InstBranchReg* realBranch = nullptr;
+ if (inst3->is<InstBranchReg>()) {
+ realBranch = inst3->as<InstBranchReg>();
+ } else {
+ Instruction* inst4 = iter->next();
+ realBranch = inst4->as<InstBranchReg>();
+ }
+ MOZ_ASSERT(realBranch->checkDest(temp));
+ }
+#endif
+
+ uint32_t* dest = (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
+ return dest;
+ }
+
+ if (inst1->is<InstLDR>()) {
+ return *(uint32_t**)inst1->as<InstLDR>()->dest();
+ }
+
+ MOZ_CRASH("unsupported branch relocation");
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ InstructionIterator iter((Instruction*)instPtr);
+ uintptr_t ret = (uintptr_t)GetPtr32Target(iter, nullptr, nullptr);
+ return ret;
+}
+
+const uint32_t* Assembler::GetPtr32Target(InstructionIterator start,
+ Register* dest, RelocStyle* style) {
+ Instruction* load1 = start.cur();
+ Instruction* load2 = start.next();
+
+ if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
+ if (style) {
+ *style = L_MOVWT;
+ }
+
+ // See if we have the complex case:
+ // movw r_temp, #imm1
+ // movt r_temp, #imm2
+
+ Imm16 targ_bot;
+ Imm16 targ_top;
+ Register temp;
+
+ // Extract both the temp register and the bottom immediate.
+ InstMovW* bottom = load1->as<InstMovW>();
+ bottom->extractImm(&targ_bot);
+ bottom->extractDest(&temp);
+
+ // Extract the top part of the immediate.
+ InstMovT* top = load2->as<InstMovT>();
+ top->extractImm(&targ_top);
+
+ // Make sure they are being loaded into the same register.
+ MOZ_ASSERT(top->checkDest(temp));
+
+ if (dest) {
+ *dest = temp;
+ }
+
+ uint32_t* value =
+ (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
+ return value;
+ }
+
+ if (load1->is<InstLDR>()) {
+ if (style) {
+ *style = L_LDR;
+ }
+ if (dest) {
+ *dest = toRD(*load1);
+ }
+ return *(uint32_t**)load1->as<InstLDR>()->dest();
+ }
+
+ MOZ_CRASH("unsupported relocation");
+}
+
+static JitCode* CodeFromJump(InstructionIterator* jump) {
+ uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ InstructionIterator institer((Instruction*)(code->raw() + iter.offset()));
+ JitCode* child = CodeFromJump(&institer);
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, InstructionIterator iter) {
+ Register dest;
+ Assembler::RelocStyle rs;
+ const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
+ void* ptr = const_cast<void*>(prior);
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+
+ MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
+ Assembler::Always, rs, iter);
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ InstructionIterator iter((Instruction*)(code->raw() + offset));
+ TraceOneDataRelocation(trc, awjc, code, iter);
+ }
+}
+
+void Assembler::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void Assembler::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+void Assembler::writeCodePointer(CodeLabel* label) {
+ m_buffer.assertNoPoolAndNoNops();
+ BufferOffset off = writeInst(-1);
+ label->patchAt()->bind(off.getOffset());
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ size_t offset = label.patchAt().offset();
+ size_t target = label.target().offset();
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+}
+
+Assembler::Condition Assembler::InvertCondition(Condition cond) {
+ const uint32_t ConditionInversionBit = 0x10000000;
+ return Condition(ConditionInversionBit ^ cond);
+}
+
+Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
+ const uint32_t ConditionInversionBit = 0x10000000;
+ return DoubleCondition(ConditionInversionBit ^ cond);
+}
+
+Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) {
+ // In the ideal case, we are looking for a number that (in binary) looks
+ // like:
+ // 0b((00)*)n_1((00)*)n_2((00)*)
+ // left n1 mid n2
+ // where both n_1 and n_2 fit into 8 bits.
+ // Since this is being done with rotates, we also need to handle the case
+ // that one of these numbers is in fact split between the left and right
+ // sides, in which case the constant will look like:
+ // 0bn_1a((00)*)n_2((00)*)n_1b
+ // n1a mid n2 rgh n1b
+ // Also remember, values are rotated by multiples of two, and left, mid or
+ // right can have length zero.
+ uint32_t imm1, imm2;
+ int left = CountLeadingZeroes32(imm) & 0x1E;
+ uint32_t no_n1 = imm & ~(0xff << (24 - left));
+
+ // Not technically needed: this case only happens if we can encode as a
+ // single imm8m. There is a perfectly reasonable encoding in this case, but
+ // we shouldn't encourage people to do things like this.
+ if (no_n1 == 0) {
+ return TwoImm8mData();
+ }
+
+ int mid = CountLeadingZeroes32(no_n1) & 0x1E;
+ uint32_t no_n2 =
+ no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
+
+ if (no_n2 == 0) {
+ // We hit the easy case, no wraparound.
+ // Note: a single constant *may* look like this.
+ int imm1shift = left + 8;
+ int imm2shift = mid + 8;
+ imm1 = (imm >> (32 - imm1shift)) & 0xff;
+ if (imm2shift >= 32) {
+ imm2shift = 0;
+ // This assert does not always hold, in fact, this would lead to
+ // some incredibly subtle bugs.
+ // assert((imm & 0xff) == no_n1);
+ imm2 = no_n1;
+ } else {
+ imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
+ MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
+ }
+ MOZ_ASSERT((imm1shift & 0x1) == 0);
+ MOZ_ASSERT((imm2shift & 0x1) == 0);
+ return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
+ datastore::Imm8mData(imm2, imm2shift >> 1));
+ }
+
+ // Either it wraps, or it does not fit. If we initially chopped off more
+ // than 8 bits, then it won't fit.
+ if (left >= 8) {
+ return TwoImm8mData();
+ }
+
+ int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
+ // All remaining set bits *must* fit into the lower 8 bits.
+ // The right == 8 case should be handled by the previous case.
+ if (right > 8) {
+ return TwoImm8mData();
+ }
+
+ // Make sure the initial bits that we removed for no_n1 fit into the
+ // 8-(32-right) leftmost bits.
+ if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
+ // BUT we may have removed more bits than we needed to for no_n1
+ // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
+ // with a second, but we try to encode 0x0410000 and find that we need a
+ // second op for 0x4000, and 0x1 cannot be included in the encoding of
+ // 0x04100000.
+ no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
+ mid = CountLeadingZeroes32(no_n1) & 30;
+ no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31));
+ if (no_n2 != 0) {
+ return TwoImm8mData();
+ }
+ }
+
+ // Now assemble all of this information into a two coherent constants it is
+ // a rotate right from the lower 8 bits.
+ int imm1shift = 8 - right;
+ imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
+ MOZ_ASSERT((imm1shift & ~0x1e) == 0);
+ // left + 8 + mid is the position of the leftmost bit of n_2.
+ // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
+ // then shift again by the leftmost bit in order to get the constant that we
+ // care about.
+ int imm2shift = mid + 8;
+ imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
+ MOZ_ASSERT((imm1shift & 0x1) == 0);
+ MOZ_ASSERT((imm2shift & 0x1) == 0);
+ return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
+ datastore::Imm8mData(imm2, imm2shift >> 1));
+}
+
+ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
+ Register* negDest) {
+ // Find an alternate ALUOp to get the job done, and use a different imm.
+ *negDest = dest;
+ switch (op) {
+ case OpMov:
+ *imm = Imm32(~imm->value);
+ return OpMvn;
+ case OpMvn:
+ *imm = Imm32(~imm->value);
+ return OpMov;
+ case OpAnd:
+ *imm = Imm32(~imm->value);
+ return OpBic;
+ case OpBic:
+ *imm = Imm32(~imm->value);
+ return OpAnd;
+ case OpAdd:
+ *imm = Imm32(-imm->value);
+ return OpSub;
+ case OpSub:
+ *imm = Imm32(-imm->value);
+ return OpAdd;
+ case OpCmp:
+ *imm = Imm32(-imm->value);
+ return OpCmn;
+ case OpCmn:
+ *imm = Imm32(-imm->value);
+ return OpCmp;
+ case OpTst:
+ MOZ_ASSERT(dest == InvalidReg);
+ *imm = Imm32(~imm->value);
+ *negDest = scratch;
+ return OpBic;
+ // orr has orn on thumb2 only.
+ default:
+ return OpInvalid;
+ }
+}
+
+bool jit::can_dbl(ALUOp op) {
+ // Some instructions can't be processed as two separate instructions such as
+ // and, and possibly add (when we're setting ccodes). There is also some
+ // hilarity with *reading* condition codes. For example, adc dest, src1,
+ // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
+ // dest, dest, 0xff, since "reading" the condition code increments the
+ // result by one conditionally, that only needs to be done on one of the two
+ // instructions.
+ switch (op) {
+ case OpBic:
+ case OpAdd:
+ case OpSub:
+ case OpEor:
+ case OpOrr:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool jit::condsAreSafe(ALUOp op) {
+ // Even when we are setting condition codes, sometimes we can get away with
+ // splitting an operation into two. For example, if our immediate is
+ // 0x00ff00ff, and the operation is eors we can split this in half, since x
+ // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
+ // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
+ // cannot split this in half. If the source on the add is 0xfff00ff0, the
+ // result sholud be 0xef10ef, but do we set the overflow bit or not?
+ // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
+ // V bit will be set differently, and *not* updating the V bit would be
+ // wrong. Theoretically, the following should work:
+ // adds r0, r1, 0x00ff0000;
+ // addsvs r0, r1, 0x000000ff;
+ // addvc r0, r1, 0x000000ff;
+ // But this is 3 instructions, and at that point, we might as well use
+ // something else.
+ switch (op) {
+ case OpBic:
+ case OpOrr:
+ case OpEor:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ALUOp jit::getDestVariant(ALUOp op) {
+ // All of the compare operations are dest-less variants of a standard
+ // operation. Given the dest-less variant, return the dest-ful variant.
+ switch (op) {
+ case OpCmp:
+ return OpSub;
+ case OpCmn:
+ return OpAdd;
+ case OpTst:
+ return OpAnd;
+ case OpTeq:
+ return OpEor;
+ default:
+ return op;
+ }
+}
+
+O2RegImmShift jit::O2Reg(Register r) { return O2RegImmShift(r, LSL, 0); }
+
+O2RegImmShift jit::lsl(Register r, int amt) {
+ MOZ_ASSERT(0 <= amt && amt <= 31);
+ return O2RegImmShift(r, LSL, amt);
+}
+
+O2RegImmShift jit::lsr(Register r, int amt) {
+ MOZ_ASSERT(1 <= amt && amt <= 32);
+ return O2RegImmShift(r, LSR, amt);
+}
+
+O2RegImmShift jit::ror(Register r, int amt) {
+ MOZ_ASSERT(1 <= amt && amt <= 31);
+ return O2RegImmShift(r, ROR, amt);
+}
+O2RegImmShift jit::rol(Register r, int amt) {
+ MOZ_ASSERT(1 <= amt && amt <= 31);
+ return O2RegImmShift(r, ROR, 32 - amt);
+}
+
+O2RegImmShift jit::asr(Register r, int amt) {
+ MOZ_ASSERT(1 <= amt && amt <= 32);
+ return O2RegImmShift(r, ASR, amt);
+}
+
+O2RegRegShift jit::lsl(Register r, Register amt) {
+ return O2RegRegShift(r, LSL, amt);
+}
+
+O2RegRegShift jit::lsr(Register r, Register amt) {
+ return O2RegRegShift(r, LSR, amt);
+}
+
+O2RegRegShift jit::ror(Register r, Register amt) {
+ return O2RegRegShift(r, ROR, amt);
+}
+
+O2RegRegShift jit::asr(Register r, Register amt) {
+ return O2RegRegShift(r, ASR, amt);
+}
+
+static js::jit::DoubleEncoder doubleEncoder;
+
+/* static */
+const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
+
+js::jit::VFPImm::VFPImm(uint32_t top) {
+ data_ = -1;
+ datastore::Imm8VFPImmData tmp;
+ if (doubleEncoder.lookup(top, &tmp)) {
+ data_ = tmp.encode();
+ }
+}
+
+BOffImm::BOffImm(const Instruction& inst) : data_(inst.encode() & 0x00ffffff) {}
+
+Instruction* BOffImm::getDest(Instruction* src) const {
+ // TODO: It is probably worthwhile to verify that src is actually a branch.
+ // NOTE: This does not explicitly shift the offset of the destination left by
+ // 2, since it is indexing into an array of instruction sized objects.
+ return &src[((int32_t(data_) << 8) >> 8) + 2];
+}
+
+const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
+#include "jit/arm/DoubleEntryTable.tbl"
+};
+
+// VFPRegister implementation
+VFPRegister VFPRegister::doubleOverlay(unsigned int which) const {
+ MOZ_ASSERT(!_isInvalid);
+ MOZ_ASSERT(which == 0);
+ if (kind != Double) {
+ return VFPRegister(code_ >> 1, Double);
+ }
+ return *this;
+}
+VFPRegister VFPRegister::singleOverlay(unsigned int which) const {
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, Single);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, Single);
+}
+
+static_assert(
+ FloatRegisters::TotalDouble <= 16,
+ "We assume that every Double register also has an Integer personality");
+
+VFPRegister VFPRegister::sintOverlay(unsigned int which) const {
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, Int);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, Int);
+}
+VFPRegister VFPRegister::uintOverlay(unsigned int which) const {
+ MOZ_ASSERT(!_isInvalid);
+ if (kind == Double) {
+ // There are no corresponding float registers for d16-d31.
+ MOZ_ASSERT(code_ < 16);
+ MOZ_ASSERT(which < 2);
+ return VFPRegister((code_ << 1) + which, UInt);
+ }
+ MOZ_ASSERT(which == 0);
+ return VFPRegister(code_, UInt);
+}
+
+bool Assembler::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+}
+
+// Size of the instruction stream, in bytes. Including pools. This function
+// expects all pools that need to be placed have been placed. If they haven't
+// then we need to go an flush the pools :(
+size_t Assembler::size() const { return m_buffer.size(); }
+// Size of the relocation table, in bytes.
+size_t Assembler::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+size_t Assembler::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t Assembler::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+// Allocate memory for a branch instruction, it will be overwritten
+// subsequently and should not be disassembled.
+
+BufferOffset Assembler::allocBranchInst() {
+ return m_buffer.putInt(Always | InstNOP::NopInst);
+}
+
+void Assembler::WriteInstStatic(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+void Assembler::haltingAlign(int alignment) {
+ // HLT with payload 0xBAAD
+ m_buffer.align(alignment, 0xE1000070 | (0xBAA << 8) | 0xD);
+}
+
+void Assembler::nopAlign(int alignment) { m_buffer.align(alignment); }
+
+BufferOffset Assembler::as_nop() { return writeInst(0xe320f000); }
+
+static uint32_t EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op,
+ SBit s, Assembler::Condition c) {
+ return (int)op | (int)s | (int)c | op2.encode() |
+ ((dest == InvalidReg) ? 0 : RD(dest)) |
+ ((src1 == InvalidReg) ? 0 : RN(src1));
+}
+
+BufferOffset Assembler::as_alu(Register dest, Register src1, Operand2 op2,
+ ALUOp op, SBit s, Condition c) {
+ return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
+}
+
+BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SBit s,
+ Condition c) {
+ return as_alu(dest, InvalidReg, op2, OpMov, s, c);
+}
+
+/* static */
+void Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2,
+ ALUOp op, SBit s, Condition c, uint32_t* pos) {
+ WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
+}
+
+/* static */
+void Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
+ uint32_t* pos) {
+ as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
+}
+
+BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SBit s,
+ Condition c) {
+ return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
+}
+
+// Logical operations.
+BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpAnd, s, c);
+}
+BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpBic, s, c);
+}
+BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpEor, s, c);
+}
+BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpOrr, s, c);
+}
+
+// Reverse byte operations.
+BufferOffset Assembler::as_rev(Register dest, Register src, Condition c) {
+ return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'0011'0000 |
+ RD(dest) | src.code());
+}
+BufferOffset Assembler::as_rev16(Register dest, Register src, Condition c) {
+ return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'1011'0000 |
+ RD(dest) | src.code());
+}
+BufferOffset Assembler::as_revsh(Register dest, Register src, Condition c) {
+ return writeInst((int)c | 0b0000'0110'1111'1111'0000'1111'1011'0000 |
+ RD(dest) | src.code());
+}
+
+// Mathematical operations.
+BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpAdc, s, c);
+}
+BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpAdd, s, c);
+}
+BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpSbc, s, c);
+}
+BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpSub, s, c);
+}
+BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpRsb, s, c);
+}
+BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2,
+ SBit s, Condition c) {
+ return as_alu(dest, src1, op2, OpRsc, s, c);
+}
+
+// Test operations.
+BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) {
+ return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
+}
+BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) {
+ return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
+}
+BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) {
+ return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
+}
+BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) {
+ return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
+}
+
+static constexpr Register NoAddend{Registers::pc};
+
+static const int SignExtend = 0x06000070;
+
+enum SignExtend {
+ SxSxtb = 10 << 20,
+ SxSxth = 11 << 20,
+ SxUxtb = 14 << 20,
+ SxUxth = 15 << 20
+};
+
+// Sign extension operations.
+BufferOffset Assembler::as_sxtb(Register dest, Register src, int rotate,
+ Condition c) {
+ return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) |
+ ((rotate & 3) << 10) | src.code());
+}
+BufferOffset Assembler::as_sxth(Register dest, Register src, int rotate,
+ Condition c) {
+ return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) |
+ ((rotate & 3) << 10) | src.code());
+}
+BufferOffset Assembler::as_uxtb(Register dest, Register src, int rotate,
+ Condition c) {
+ return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) |
+ ((rotate & 3) << 10) | src.code());
+}
+BufferOffset Assembler::as_uxth(Register dest, Register src, int rotate,
+ Condition c) {
+ return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) |
+ ((rotate & 3) << 10) | src.code());
+}
+
+static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
+ MOZ_ASSERT(HasMOVWT());
+ return 0x03000000 | c | imm.encode() | RD(dest);
+}
+
+static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
+ MOZ_ASSERT(HasMOVWT());
+ return 0x03400000 | c | imm.encode() | RD(dest);
+}
+
+// Not quite ALU worthy, but these are useful none the less. These also have
+// the isue of these being formatted completly differently from the standard ALU
+// operations.
+BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) {
+ return writeInst(EncodeMovW(dest, imm, c));
+}
+
+/* static */
+void Assembler::as_movw_patch(Register dest, Imm16 imm, Condition c,
+ Instruction* pos) {
+ WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
+}
+
+BufferOffset Assembler::as_movt(Register dest, Imm16 imm, Condition c) {
+ return writeInst(EncodeMovT(dest, imm, c));
+}
+
+/* static */
+void Assembler::as_movt_patch(Register dest, Imm16 imm, Condition c,
+ Instruction* pos) {
+ WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
+}
+
+static const int mull_tag = 0x90;
+
+BufferOffset Assembler::as_genmul(Register dhi, Register dlo, Register rm,
+ Register rn, MULOp op, SBit s, Condition c) {
+ return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c |
+ mull_tag);
+}
+BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2,
+ SBit s, Condition c) {
+ return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
+}
+BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1,
+ Register src2, SBit s, Condition c) {
+ return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
+}
+BufferOffset Assembler::as_umaal(Register destHI, Register destLO,
+ Register src1, Register src2, Condition c) {
+ return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
+}
+BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1,
+ Register src2, Condition c) {
+ return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
+}
+
+BufferOffset Assembler::as_umull(Register destHI, Register destLO,
+ Register src1, Register src2, SBit s,
+ Condition c) {
+ return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
+}
+
+BufferOffset Assembler::as_umlal(Register destHI, Register destLO,
+ Register src1, Register src2, SBit s,
+ Condition c) {
+ return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
+}
+
+BufferOffset Assembler::as_smull(Register destHI, Register destLO,
+ Register src1, Register src2, SBit s,
+ Condition c) {
+ return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
+}
+
+BufferOffset Assembler::as_smlal(Register destHI, Register destLO,
+ Register src1, Register src2, SBit s,
+ Condition c) {
+ return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
+}
+
+BufferOffset Assembler::as_sdiv(Register rd, Register rn, Register rm,
+ Condition c) {
+ return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
+}
+
+BufferOffset Assembler::as_udiv(Register rd, Register rn, Register rm,
+ Condition c) {
+ return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
+}
+
+BufferOffset Assembler::as_clz(Register dest, Register src, Condition c) {
+ MOZ_ASSERT(src != pc && dest != pc);
+ return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
+}
+
+// Data transfer instructions: ldr, str, ldrb, strb. Using an int to
+// differentiate between 8 bits and 32 bits is overkill, but meh.
+
+static uint32_t EncodeDtr(LoadStore ls, int size, Index mode, Register rt,
+ DTRAddr addr, Assembler::Condition c) {
+ MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
+ MOZ_ASSERT(size == 32 || size == 8);
+ return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) |
+ addr.encode();
+}
+
+BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt,
+ DTRAddr addr, Condition c) {
+ return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
+}
+
+/* static */
+void Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
+ DTRAddr addr, Condition c, uint32_t* dest) {
+ WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
+}
+
+class PoolHintData {
+ public:
+ enum LoadType {
+ // Set 0 to bogus, since that is the value most likely to be
+ // accidentally left somewhere.
+ PoolBOGUS = 0,
+ PoolDTR = 1,
+ PoolBranch = 2,
+ PoolVDTR = 3
+ };
+
+ private:
+ uint32_t index_ : 16;
+ uint32_t cond_ : 4;
+ uint32_t loadType_ : 2;
+ uint32_t destReg_ : 5;
+ uint32_t destType_ : 1;
+ uint32_t ONES : 4;
+
+ static const uint32_t ExpectedOnes = 0xfu;
+
+ public:
+ void init(uint32_t index, Assembler::Condition cond, LoadType lt,
+ Register destReg) {
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ cond_ = cond >> 28;
+ MOZ_ASSERT(cond_ == cond >> 28);
+ loadType_ = lt;
+ ONES = ExpectedOnes;
+ destReg_ = destReg.code();
+ destType_ = 0;
+ }
+ void init(uint32_t index, Assembler::Condition cond, LoadType lt,
+ const VFPRegister& destReg) {
+ MOZ_ASSERT(destReg.isFloat());
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ cond_ = cond >> 28;
+ MOZ_ASSERT(cond_ == cond >> 28);
+ loadType_ = lt;
+ ONES = ExpectedOnes;
+ destReg_ = destReg.id();
+ destType_ = destReg.isDouble();
+ }
+ Assembler::Condition getCond() const {
+ return Assembler::Condition(cond_ << 28);
+ }
+
+ Register getReg() const { return Register::FromCode(destReg_); }
+ VFPRegister getVFPReg() const {
+ VFPRegister r = VFPRegister(
+ destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
+ return r;
+ }
+
+ int32_t getIndex() const { return index_; }
+ void setIndex(uint32_t index) {
+ MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
+ index_ = index;
+ MOZ_ASSERT(index_ == index);
+ }
+
+ LoadType getLoadType() const {
+ // If this *was* a PoolBranch, but the branch has already been bound
+ // then this isn't going to look like a real poolhintdata, but we still
+ // want to lie about it so everyone knows it *used* to be a branch.
+ if (ONES != ExpectedOnes) {
+ return PoolHintData::PoolBranch;
+ }
+ return static_cast<LoadType>(loadType_);
+ }
+
+ bool isValidPoolHint() const {
+ // Most instructions cannot have a condition that is 0xf. Notable
+ // exceptions are blx and the entire NEON instruction set. For the
+ // purposes of pool loads, and possibly patched branches, the possible
+ // instructions are ldr and b, neither of which can have a condition
+ // code of 0xf.
+ return ONES == ExpectedOnes;
+ }
+};
+
+union PoolHintPun {
+ PoolHintData phd;
+ uint32_t raw;
+};
+
+// Handles all of the other integral data transferring functions: ldrsb, ldrsh,
+// ldrd, etc. The size is given in bits.
+BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
+ Index mode, Register rt, EDtrAddr addr,
+ Condition c) {
+ int extra_bits2 = 0;
+ int extra_bits1 = 0;
+ switch (size) {
+ case 8:
+ MOZ_ASSERT(IsSigned);
+ MOZ_ASSERT(ls != IsStore);
+ extra_bits1 = 0x1;
+ extra_bits2 = 0x2;
+ break;
+ case 16:
+ // 'case 32' doesn't need to be handled, it is handled by the default
+ // ldr/str.
+ extra_bits2 = 0x01;
+ extra_bits1 = (ls == IsStore) ? 0 : 1;
+ if (IsSigned) {
+ MOZ_ASSERT(ls != IsStore);
+ extra_bits2 |= 0x2;
+ }
+ break;
+ case 64:
+ extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
+ extra_bits1 = 0;
+ break;
+ default:
+ MOZ_CRASH("unexpected size in as_extdtr");
+ }
+ return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
+ RT(rt) | mode | c);
+}
+
+BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
+ DTMMode mode, DTMWriteBack wb, Condition c) {
+ return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
+}
+
+BufferOffset Assembler::allocLiteralLoadEntry(
+ size_t numInst, unsigned numPoolEntries, PoolHintPun& php, uint8_t* data,
+ const LiteralDoc& doc, ARMBuffer::PoolEntry* pe, bool loadToPC) {
+ uint8_t* inst = (uint8_t*)&php.raw;
+
+ MOZ_ASSERT(inst);
+ MOZ_ASSERT(numInst == 1); // Or fix the disassembly
+
+ BufferOffset offs =
+ m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe);
+ propagateOOM(offs.assigned());
+#ifdef JS_DISASM_ARM
+ Instruction* instruction = m_buffer.getInstOrNull(offs);
+ if (instruction) {
+ spewLiteralLoad(php, loadToPC, instruction, doc);
+ }
+#endif
+ return offs;
+}
+
+// This is also used for instructions that might be resolved into branches,
+// or might not. If dest==pc then it is effectively a branch.
+
+BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value,
+ Condition c) {
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolDTR, dest);
+ BufferOffset offs = allocLiteralLoadEntry(
+ 1, 1, php, (uint8_t*)&value, LiteralDoc(value), nullptr, dest == pc);
+ return offs;
+}
+
+/* static */
+void Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data) {
+ MOZ_ASSERT(addr->is<InstLDR>());
+ *addr->as<InstLDR>()->dest() = data;
+ MOZ_ASSERT(addr->extractCond() == c);
+}
+
+BufferOffset Assembler::as_FImm64Pool(VFPRegister dest, double d, Condition c) {
+ MOZ_ASSERT(dest.isDouble());
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
+ return allocLiteralLoadEntry(1, 2, php, (uint8_t*)&d, LiteralDoc(d));
+}
+
+BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) {
+ // Insert floats into the double pool as they have the same limitations on
+ // immediate offset. This wastes 4 bytes padding per float. An alternative
+ // would be to have a separate pool for floats.
+ MOZ_ASSERT(dest.isSingle());
+ PoolHintPun php;
+ php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
+ return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
+}
+
+// Pool callbacks stuff:
+void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
+ uint32_t* load = (uint32_t*)load_;
+ PoolHintPun php;
+ php.raw = *load;
+ php.phd.setIndex(index);
+ *load = php.raw;
+}
+
+// patchConstantPoolLoad takes the address of the instruction that wants to be
+// patched, and the address of the start of the constant pool, and figures
+// things out from there.
+void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ PoolHintData data = *(PoolHintData*)loadAddr;
+ uint32_t* instAddr = (uint32_t*)loadAddr;
+ int offset = (char*)constPoolAddr - (char*)loadAddr;
+ switch (data.getLoadType()) {
+ case PoolHintData::PoolBOGUS:
+ MOZ_CRASH("bogus load type!");
+ case PoolHintData::PoolDTR:
+ Assembler::as_dtr_patch(
+ IsLoad, 32, Offset, data.getReg(),
+ DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
+ data.getCond(), instAddr);
+ break;
+ case PoolHintData::PoolBranch:
+ // Either this used to be a poolBranch, and the label was already bound,
+ // so it was replaced with a real branch, or this may happen in the
+ // future. If this is going to happen in the future, then the actual
+ // bits that are written here don't matter (except the condition code,
+ // since that is always preserved across patchings) but if it does not
+ // get bound later, then we want to make sure this is a load from the
+ // pool entry (and the pool entry should be nullptr so it will crash).
+ if (data.isValidPoolHint()) {
+ Assembler::as_dtr_patch(
+ IsLoad, 32, Offset, pc,
+ DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
+ data.getCond(), instAddr);
+ }
+ break;
+ case PoolHintData::PoolVDTR: {
+ VFPRegister dest = data.getVFPReg();
+ int32_t imm = offset + (data.getIndex() * 4) - 8;
+ MOZ_ASSERT(-1024 < imm && imm < 1024);
+ Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)),
+ data.getCond(), instAddr);
+ break;
+ }
+ }
+}
+
+// Atomic instruction stuff:
+
+BufferOffset Assembler::as_ldrexd(Register rt, Register rt2, Register rn,
+ Condition c) {
+ MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
+ MOZ_ASSERT(rt.code() != 14 && rn.code() != 15);
+ return writeInst(0x01b00f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset Assembler::as_ldrex(Register rt, Register rn, Condition c) {
+ MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
+ return writeInst(0x01900f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset Assembler::as_ldrexh(Register rt, Register rn, Condition c) {
+ MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
+ return writeInst(0x01f00f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset Assembler::as_ldrexb(Register rt, Register rn, Condition c) {
+ MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
+ return writeInst(0x01d00f9f | (int)c | RT(rt) | RN(rn));
+}
+
+BufferOffset Assembler::as_strexd(Register rd, Register rt, Register rt2,
+ Register rn, Condition c) {
+ MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
+ MOZ_ASSERT(rt.code() != 14 && rn.code() != 15 && rd.code() != 15);
+ MOZ_ASSERT(rd != rn && rd != rt && rd != rt2);
+ return writeInst(0x01a00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset Assembler::as_strex(Register rd, Register rt, Register rn,
+ Condition c) {
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01800f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset Assembler::as_strexh(Register rd, Register rt, Register rn,
+ Condition c) {
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01e00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset Assembler::as_strexb(Register rd, Register rt, Register rn,
+ Condition c) {
+ MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
+ return writeInst(0x01c00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
+}
+
+BufferOffset Assembler::as_clrex() { return writeInst(0xf57ff01f); }
+
+// Memory barrier stuff:
+
+BufferOffset Assembler::as_dmb(BarrierOption option) {
+ return writeInst(0xf57ff050U | (int)option);
+}
+BufferOffset Assembler::as_dsb(BarrierOption option) {
+ return writeInst(0xf57ff040U | (int)option);
+}
+BufferOffset Assembler::as_isb() {
+ return writeInst(0xf57ff06fU); // option == SY
+}
+BufferOffset Assembler::as_dsb_trap() {
+ // DSB is "mcr 15, 0, r0, c7, c10, 4".
+ // See eg https://bugs.kde.org/show_bug.cgi?id=228060.
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070f9a);
+}
+BufferOffset Assembler::as_dmb_trap() {
+ // DMB is "mcr 15, 0, r0, c7, c10, 5".
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070fba);
+}
+BufferOffset Assembler::as_isb_trap() {
+ // ISB is "mcr 15, 0, r0, c7, c5, 4".
+ // ARMv7 manual, "VMSA CP15 c7 register summary".
+ // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
+ // ARMv8 manual E2.7.3 and G3.18.16.
+ return writeInst(0xee070f94);
+}
+
+BufferOffset Assembler::as_csdb() {
+ // NOP (see as_nop) on architectures where this instruction is not defined.
+ //
+ // https://developer.arm.com/-/media/developer/pdf/Cache_Speculation_Side-channels_22Feb18.pdf
+ // CSDB A32: 1110_0011_0010_0000_1111_0000_0001_0100
+ return writeInst(0xe320f000 | 0x14);
+}
+
+// Control flow stuff:
+
+// bx can *only* branch to a register, never to an immediate.
+BufferOffset Assembler::as_bx(Register r, Condition c) {
+ BufferOffset ret = writeInst(((int)c) | OpBx | r.code());
+ return ret;
+}
+
+void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
+ BufferOffset afterPool) {
+ BOffImm off = afterPool.diffB<BOffImm>(branch);
+ if (off.isInvalid()) {
+ MOZ_CRASH("BOffImm invalid");
+ }
+ *dest = InstBImm(off, Always);
+}
+
+// Branch can branch to an immediate *or* to a register.
+// Branches to immediates are pc relative, branches to registers are absolute.
+BufferOffset Assembler::as_b(BOffImm off, Condition c, Label* documentation) {
+ return writeBranchInst(((int)c) | OpB | off.encode(),
+ refLabel(documentation));
+}
+
+BufferOffset Assembler::as_b(Label* l, Condition c) {
+ if (l->bound()) {
+ // Note only one instruction is emitted here, the NOP is overwritten.
+ BufferOffset ret = allocBranchInst();
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
+ MOZ_RELEASE_ASSERT(!offset.isInvalid(),
+ "Buffer size limit should prevent this");
+ as_b(offset, c, ret);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
+#endif
+ return ret;
+ }
+
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret;
+ if (l->used()) {
+ int32_t old = l->offset();
+ MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
+ "Buffer size limit should prevent this");
+ ret = as_b(BOffImm(old), c, l);
+ } else {
+ BOffImm inv;
+ ret = as_b(inv, c, l);
+ }
+
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ l->use(ret.getOffset());
+ return ret;
+}
+
+BufferOffset Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) {
+ // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use
+ // this to patchup old code. Must disassemble in caller where it makes sense.
+ // Not many callers.
+ *editSrc(inst) = InstBImm(off, c);
+ return inst;
+}
+
+// blx can go to either an immediate or a register.
+// When blx'ing to a register, we change processor state depending on the low
+// bit of the register when blx'ing to an immediate, we *always* change
+// processor state.
+
+BufferOffset Assembler::as_blx(Register r, Condition c) {
+ return writeInst(((int)c) | OpBlx | r.code());
+}
+
+// bl can only branch to an pc-relative immediate offset
+// It cannot change the processor state.
+BufferOffset Assembler::as_bl(BOffImm off, Condition c, Label* documentation) {
+ return writeBranchInst(((int)c) | OpBl | off.encode(),
+ refLabel(documentation));
+}
+
+BufferOffset Assembler::as_bl(Label* l, Condition c) {
+ if (l->bound()) {
+ // Note only one instruction is emitted here, the NOP is overwritten.
+ BufferOffset ret = allocBranchInst();
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
+ MOZ_RELEASE_ASSERT(!offset.isInvalid(),
+ "Buffer size limit should prevent this");
+
+ as_bl(offset, c, ret);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
+#endif
+ return ret;
+ }
+
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret;
+ // See if the list was empty.
+ if (l->used()) {
+ int32_t old = l->offset();
+ MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
+ "Buffer size limit should prevent this");
+ ret = as_bl(BOffImm(old), c, l);
+ } else {
+ BOffImm inv;
+ ret = as_bl(inv, c, l);
+ }
+
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ l->use(ret.getOffset());
+ return ret;
+}
+
+BufferOffset Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) {
+ *editSrc(inst) = InstBLImm(off, c);
+ return inst;
+}
+
+BufferOffset Assembler::as_mrs(Register r, Condition c) {
+ return writeInst(0x010f0000 | int(c) | RD(r));
+}
+
+BufferOffset Assembler::as_msr(Register r, Condition c) {
+ // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
+ // are the two high bits of the 'c' in this constant.
+ MOZ_ASSERT((r.code() & ~0xf) == 0);
+ return writeInst(0x012cf000 | int(c) | r.code());
+}
+
+// VFP instructions!
+enum vfp_tags { VfpTag = 0x0C000A00, VfpArith = 0x02000000 };
+
+BufferOffset Assembler::writeVFPInst(vfp_size sz, uint32_t blob) {
+ MOZ_ASSERT((sz & blob) == 0);
+ MOZ_ASSERT((VfpTag & blob) == 0);
+ return writeInst(VfpTag | std::underlying_type_t<vfp_size>(sz) | blob);
+}
+
+/* static */
+void Assembler::WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest) {
+ MOZ_ASSERT((sz & blob) == 0);
+ MOZ_ASSERT((VfpTag & blob) == 0);
+ WriteInstStatic(VfpTag | std::underlying_type_t<vfp_size>(sz) | blob, dest);
+}
+
+// Unityped variants: all registers hold the same (ieee754 single/double)
+// notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
+BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn,
+ VFPRegister vm, VFPOp op, Condition c) {
+ // Make sure we believe that all of our operands are the same kind.
+ MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
+ MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
+}
+
+BufferOffset Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ return as_vfp_float(vd, vn, vm, OpvAdd, c);
+}
+
+BufferOffset Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ return as_vfp_float(vd, vn, vm, OpvDiv, c);
+}
+
+BufferOffset Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ return as_vfp_float(vd, vn, vm, OpvMul, c);
+}
+
+BufferOffset Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ return as_vfp_float(vd, vn, vm, OpvMul, c);
+}
+
+BufferOffset Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ MOZ_CRASH("Feature NYI");
+}
+
+BufferOffset Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ MOZ_CRASH("Feature NYI");
+}
+
+BufferOffset Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
+}
+
+BufferOffset Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
+}
+
+BufferOffset Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
+}
+
+BufferOffset Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c) {
+ return as_vfp_float(vd, vn, vm, OpvSub, c);
+}
+
+BufferOffset Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
+}
+
+BufferOffset Assembler::as_vcmpz(VFPRegister vd, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
+}
+
+// Specifically, a move between two same sized-registers.
+BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) {
+ return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
+}
+
+// Transfer between Core and VFP.
+
+// Unlike the next function, moving between the core registers and vfp registers
+// can't be *that* properly typed. Namely, since I don't want to munge the type
+// VFPRegister to also include core registers. Thus, the core and vfp registers
+// are passed in based on their type, and src/dest is determined by the
+// float2core.
+
+BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm,
+ FloatToCore_ f2c, Condition c, int idx) {
+ vfp_size sz = IsSingle;
+ if (vm.isDouble()) {
+ // Technically, this can be done with a vmov à la ARM ARM under vmov
+ // however, that requires at least an extra bit saying if the operation
+ // should be performed on the lower or upper half of the double. Moving
+ // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
+ // registers, and 32 double registers so there is no way to encode the
+ // last 16 double registers.
+ sz = IsDouble;
+ MOZ_ASSERT(idx == 0 || idx == 1);
+ // If we are transferring a single half of the double then it must be
+ // moving a VFP reg to a core reg.
+ MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
+ idx = idx << 21;
+ } else {
+ MOZ_ASSERT(idx == 0);
+ }
+
+ if (vt2 == InvalidReg) {
+ return writeVFPInst(sz, WordTransfer |
+ std::underlying_type_t<FloatToCore_>(f2c) |
+ std::underlying_type_t<Condition>(c) | RT(vt1) |
+ maybeRN(vt2) | VN(vm) | idx);
+ }
+
+ // We are doing a 64 bit transfer.
+ return writeVFPInst(sz, DoubleTransfer |
+ std::underlying_type_t<FloatToCore_>(f2c) |
+ std::underlying_type_t<Condition>(c) | RT(vt1) |
+ maybeRN(vt2) | VM(vm) | idx);
+}
+
+enum vcvt_destFloatness { VcvtToInteger = 1 << 18, VcvtToFloat = 0 << 18 };
+enum vcvt_toZero {
+ VcvtToZero =
+ 1 << 7, // Use the default rounding mode, which rounds truncates.
+ VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
+};
+enum vcvt_Signedness {
+ VcvtToSigned = 1 << 16,
+ VcvtToUnsigned = 0 << 16,
+ VcvtFromSigned = 1 << 7,
+ VcvtFromUnsigned = 0 << 7
+};
+
+// Our encoding actually allows just the src and the dest (and their types) to
+// uniquely specify the encoding that we are going to use.
+BufferOffset Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
+ Condition c) {
+ // Unlike other cases, the source and dest types cannot be the same.
+ MOZ_ASSERT(!vd.equiv(vm));
+ vfp_size sz = IsDouble;
+ if (vd.isFloat() && vm.isFloat()) {
+ // Doing a float -> float conversion.
+ if (vm.isSingle()) {
+ sz = IsSingle;
+ }
+ return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
+ }
+
+ // At least one of the registers should be a float.
+ vcvt_destFloatness destFloat;
+ vcvt_Signedness opSign;
+ vcvt_toZero doToZero = VcvtToFPSCR;
+ MOZ_ASSERT(vd.isFloat() || vm.isFloat());
+ if (vd.isSingle() || vm.isSingle()) {
+ sz = IsSingle;
+ }
+
+ if (vd.isFloat()) {
+ destFloat = VcvtToFloat;
+ opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
+ } else {
+ destFloat = VcvtToInteger;
+ opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
+ doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
+ }
+ return writeVFPInst(
+ sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
+}
+
+BufferOffset Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned,
+ uint32_t fixedPoint, bool toFixed,
+ Condition c) {
+ MOZ_ASSERT(vd.isFloat());
+ uint32_t sx = 0x1;
+ vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
+ int32_t imm5 = fixedPoint;
+ imm5 = (sx ? 32 : 16) - imm5;
+ MOZ_ASSERT(imm5 >= 0);
+ imm5 = imm5 >> 1 | (imm5 & 1) << 5;
+ return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
+ (!isSigned) << 16 | imm5 | c);
+}
+
+// Transfer between VFP and memory.
+static uint32_t EncodeVdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Assembler::Condition c) {
+ return ls | 0x01000000 | addr.encode() | VD(vd) | c;
+}
+
+BufferOffset Assembler::as_vdtr(
+ LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c /* vfp doesn't have a wb option */) {
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, EncodeVdtr(ls, vd, addr, c));
+}
+
+/* static */
+void Assembler::as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c, uint32_t* dest) {
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ WriteVFPInstStatic(sz, EncodeVdtr(ls, vd, addr, c), dest);
+}
+
+// VFP's ldm/stm work differently from the standard arm ones. You can only
+// transfer a range.
+
+BufferOffset Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd,
+ int length,
+ /* also has update conditions */ Condition c) {
+ MOZ_ASSERT(length <= 16 && length >= 0);
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+
+ if (vd.isDouble()) {
+ length *= 2;
+ }
+
+ return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length | dtmMode |
+ dtmUpdate | dtmCond);
+}
+
+BufferOffset Assembler::as_vldr_unaligned(VFPRegister vd, Register rn) {
+ MOZ_ASSERT(HasNEON());
+ if (vd.isDouble()) {
+ // vld1 (multiple single elements) with align=0, size=3, numregs=1
+ return writeInst(0xF42007CF | RN(rn) | VD(vd));
+ }
+ // vld1 (single element to single lane) with index=0, size=2
+ MOZ_ASSERT(vd.isFloat());
+ MOZ_ASSERT((vd.code() & 1) == 0);
+ return writeInst(0xF4A0080F | RN(rn) | VD(vd.asDouble()));
+}
+
+BufferOffset Assembler::as_vstr_unaligned(VFPRegister vd, Register rn) {
+ MOZ_ASSERT(HasNEON());
+ if (vd.isDouble()) {
+ // vst1 (multiple single elements) with align=0, size=3, numregs=1
+ return writeInst(0xF40007CF | RN(rn) | VD(vd));
+ }
+ // vst1 (single element from one lane) with index=0, size=2
+ MOZ_ASSERT(vd.isFloat());
+ MOZ_ASSERT((vd.code() & 1) == 0);
+ return writeInst(0xF480080F | RN(rn) | VD(vd.asDouble()));
+}
+
+BufferOffset Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) {
+ MOZ_ASSERT(imm.isValid());
+ vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+ return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
+}
+
+BufferOffset Assembler::as_vmrs(Register r, Condition c) {
+ return writeInst(c | 0x0ef10a10 | RT(r));
+}
+
+BufferOffset Assembler::as_vmsr(Register r, Condition c) {
+ return writeInst(c | 0x0ee10a10 | RT(r));
+}
+
+bool Assembler::nextLink(BufferOffset b, BufferOffset* next) {
+ Instruction branch = *editSrc(b);
+ MOZ_ASSERT(branch.is<InstBranchImm>());
+
+ BOffImm destOff;
+ branch.as<InstBranchImm>()->extractImm(&destOff);
+ if (destOff.isInvalid()) {
+ return false;
+ }
+
+ // Propagate the next link back to the caller, by constructing a new
+ // BufferOffset into the space they provided.
+ new (next) BufferOffset(destOff.decode());
+ return true;
+}
+
+void Assembler::bind(Label* label, BufferOffset boff) {
+#ifdef JS_DISASM_ARM
+ spew_.spewBind(label);
+#endif
+ if (oom()) {
+ // Ensure we always bind the label. This matches what we do on
+ // x86/x64 and silences the assert in ~Label.
+ label->bind(0);
+ return;
+ }
+
+ if (label->used()) {
+ bool more;
+ // If our caller didn't give us an explicit target to bind to then we
+ // want to bind to the location of the next instruction.
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ BufferOffset b(label);
+ do {
+ BufferOffset next;
+ more = nextLink(b, &next);
+ Instruction branch = *editSrc(b);
+ Condition c = branch.extractCond();
+ BOffImm offset = dest.diffB<BOffImm>(b);
+ MOZ_RELEASE_ASSERT(!offset.isInvalid(),
+ "Buffer size limit should prevent this");
+ if (branch.is<InstBImm>()) {
+ as_b(offset, c, b);
+ } else if (branch.is<InstBLImm>()) {
+ as_bl(offset, c, b);
+ } else {
+ MOZ_CRASH("crazy fixup!");
+ }
+ b = next;
+ } while (more);
+ }
+ label->bind(nextOffset().getOffset());
+ MOZ_ASSERT(!oom());
+}
+
+void Assembler::retarget(Label* label, Label* target) {
+#ifdef JS_DISASM_ARM
+ spew_.spewRetarget(label, target);
+#endif
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ BufferOffset labelBranchOffset(label);
+ BufferOffset next;
+
+ // Find the head of the use chain for label.
+ while (nextLink(labelBranchOffset, &next)) {
+ labelBranchOffset = next;
+ }
+
+ // Then patch the head of label's use chain to the tail of target's
+ // use chain, prepending the entire use chain of target.
+ Instruction branch = *editSrc(labelBranchOffset);
+ Condition c = branch.extractCond();
+ int32_t prev = target->offset();
+ target->use(label->offset());
+ if (branch.is<InstBImm>()) {
+ as_b(BOffImm(prev), c, labelBranchOffset);
+ } else if (branch.is<InstBLImm>()) {
+ as_bl(BOffImm(prev), c, labelBranchOffset);
+ } else {
+ MOZ_CRASH("crazy fixup!");
+ }
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+static int stopBKPT = -1;
+void Assembler::as_bkpt() {
+ // This is a count of how many times a breakpoint instruction has been
+ // generated. It is embedded into the instruction for debugging
+ // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
+ // breakpoint with the number xxx embedded into it. If this breakpoint is
+ // being hit, then you can run (in gdb):
+ // >b dbg_break
+ // >b main
+ // >commands
+ // >set stopBKPT = xxx
+ // >c
+ // >end
+ // which will set a breakpoint on the function dbg_break above set a
+ // scripted breakpoint on main that will set the (otherwise unmodified)
+ // value to the number of the breakpoint, so dbg_break will actuall be
+ // called and finally, when you run the executable, execution will halt when
+ // that breakpoint is generated.
+ static int hit = 0;
+ if (stopBKPT == hit) {
+ dbg_break();
+ }
+ writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
+ hit++;
+}
+
+BufferOffset Assembler::as_illegal_trap() {
+ // Encoding of the permanently-undefined 'udf' instruction, with the imm16
+ // set to 0.
+ return writeInst(0xe7f000f0);
+}
+
+void Assembler::flushBuffer() { m_buffer.flushPool(); }
+
+void Assembler::enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
+
+void Assembler::leaveNoPool() { m_buffer.leaveNoPool(); }
+
+void Assembler::enterNoNops() { m_buffer.enterNoNops(); }
+
+void Assembler::leaveNoNops() { m_buffer.leaveNoNops(); }
+
+struct PoolHeader : Instruction {
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4 bytes), not byte.
+ uint32_t size : 15;
+ uint32_t isNatural : 1;
+ uint32_t ONES : 16;
+
+ Header(int size_, bool isNatural_)
+ : size(size_), isNatural(isNatural_), ONES(0xffff) {}
+
+ explicit Header(const Instruction* i) {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ memcpy(this, i, sizeof(Header));
+ MOZ_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ uint32_t dest;
+ memcpy(&dest, this, sizeof(Header));
+ return dest;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : Instruction(Header(size_, isNatural_).raw(), true) {}
+
+ uint32_t size() const {
+ Header tmp(this);
+ return tmp.size;
+ }
+ uint32_t isNatural() const {
+ Header tmp(this);
+ return tmp.isNatural;
+ }
+
+ static bool IsTHIS(const Instruction& i) {
+ return (*i.raw() & 0xffff0000) == 0xffff0000;
+ }
+ static const PoolHeader* AsTHIS(const Instruction& i) {
+ if (!IsTHIS(i)) {
+ return nullptr;
+ }
+ return static_cast<const PoolHeader*>(&i);
+ }
+};
+
+void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
+ static_assert(sizeof(PoolHeader) == 4,
+ "PoolHandler must have the correct size.");
+ uint8_t* pool = start + 4;
+ // Go through the usual rigmarole to get the size of the pool.
+ pool += p->getPoolSize();
+ uint32_t size = pool - start;
+ MOZ_ASSERT((size & 3) == 0);
+ size = size >> 2;
+ MOZ_ASSERT(size < (1 << 15));
+ PoolHeader header(size, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+// The size of an arbitrary 32-bit call in the instruction stream. On ARM this
+// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
+uint32_t Assembler::PatchWrite_NearCallSize() { return sizeof(uint32_t); }
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ // Overwrite whatever instruction used to be here with a call. Since the
+ // destination is in the same function, it will be within range of the
+ // 24 << 2 byte bl instruction.
+ uint8_t* dest = toCall.raw();
+ new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always);
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* ptr = reinterpret_cast<Instruction*>(label.raw());
+
+ Register dest;
+ Assembler::RelocStyle rs;
+
+ {
+ InstructionIterator iter(ptr);
+ DebugOnly<const uint32_t*> val = GetPtr32Target(iter, &dest, &rs);
+ MOZ_ASSERT(uint32_t((const uint32_t*)val) == uint32_t(expectedValue.value));
+ }
+
+ // Patch over actual instructions.
+ {
+ InstructionIterator iter(ptr);
+ MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always,
+ rs, iter);
+ }
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should be
+// totally safe. Since that instruction will never be executed again, a ICache
+// flush should not be necessary
+void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will end up being
+ // the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t* Assembler::NextInstruction(uint8_t* inst_, uint32_t* count) {
+ if (count != nullptr) {
+ *count += sizeof(Instruction);
+ }
+
+ InstructionIterator iter(reinterpret_cast<Instruction*>(inst_));
+ return reinterpret_cast<uint8_t*>(iter.next());
+}
+
+static bool InstIsGuard(Instruction* inst, const PoolHeader** ph) {
+ Assembler::Condition c = inst->extractCond();
+ if (c != Assembler::Always) {
+ return false;
+ }
+ if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
+ return false;
+ }
+ // See if the next instruction is a pool header.
+ *ph = (inst + 1)->as<const PoolHeader>();
+ return *ph != nullptr;
+}
+
+static bool InstIsGuard(BufferInstructionIterator& iter,
+ const PoolHeader** ph) {
+ Instruction* inst = iter.cur();
+ Assembler::Condition c = inst->extractCond();
+ if (c != Assembler::Always) {
+ return false;
+ }
+ if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
+ return false;
+ }
+ // See if the next instruction is a pool header.
+ *ph = iter.peek()->as<const PoolHeader>();
+ return *ph != nullptr;
+}
+
+template <class T>
+static bool InstIsBNop(const T& iter) {
+ // In some special situations, it is necessary to insert a NOP into the
+ // instruction stream that nobody knows about, since nobody should know
+ // about it, make sure it gets skipped when Instruction::next() is called.
+ // this generates a very specific nop, namely a branch to the next
+ // instruction.
+ const Instruction* cur = iter.cur();
+ Assembler::Condition c = cur->extractCond();
+ if (c != Assembler::Always) {
+ return false;
+ }
+ if (!cur->is<InstBImm>()) {
+ return false;
+ }
+ InstBImm* b = cur->as<InstBImm>();
+ BOffImm offset;
+ b->extractImm(&offset);
+ return offset.decode() == 4;
+}
+
+Instruction* InstructionIterator::maybeSkipAutomaticInstructions() {
+ // If the current instruction was automatically-inserted, skip past it.
+ const PoolHeader* ph;
+
+ // Loop until an intentionally-placed instruction is found.
+ while (true) {
+ if (InstIsGuard(cur(), &ph)) {
+ // Don't skip a natural guard.
+ if (ph->isNatural()) {
+ return cur();
+ }
+ advanceRaw(1 + ph->size());
+ } else if (InstIsBNop<InstructionIterator>(*this)) {
+ advanceRaw(1);
+ } else {
+ return cur();
+ }
+ }
+}
+
+Instruction* BufferInstructionIterator::maybeSkipAutomaticInstructions() {
+ const PoolHeader* ph;
+ // If this is a guard, and the next instruction is a header, always work
+ // around the pool. If it isn't a guard, then start looking ahead.
+ if (InstIsGuard(*this, &ph)) {
+ // Don't skip a natural guard.
+ if (ph->isNatural()) {
+ return cur();
+ }
+ advance(sizeof(Instruction) * ph->size());
+ return next();
+ }
+ if (InstIsBNop<BufferInstructionIterator>(*this)) {
+ return next();
+ }
+ return cur();
+}
+
+// Cases to be handled:
+// 1) no pools or branches in sight => return this+1
+// 2) branch to next instruction => return this+2, because a nop needed to be
+// inserted into the stream.
+// 3) this+1 is an artificial guard for a pool => return first instruction
+// after the pool
+// 4) this+1 is a natural guard => return the branch
+// 5) this is a branch, right before a pool => return first instruction after
+// the pool
+// in assembly form:
+// 1) add r0, r0, r0 <= this
+// add r1, r1, r1 <= returned value
+// add r2, r2, r2
+//
+// 2) add r0, r0, r0 <= this
+// b foo
+// foo:
+// add r2, r2, r2 <= returned value
+//
+// 3) add r0, r0, r0 <= this
+// b after_pool;
+// .word 0xffff0002 # bit 15 being 0 indicates that the branch was not
+// # requested by the assembler
+// 0xdeadbeef # the 2 indicates that there is 1 pool entry, and the
+// # pool header
+// add r4, r4, r4 <= returned value
+// 4) add r0, r0, r0 <= this
+// b after_pool <= returned value
+// .word 0xffff8002 # bit 15 being 1 indicates that the branch was
+// # requested by the assembler
+// 0xdeadbeef
+// add r4, r4, r4
+// 5) b after_pool <= this
+// .word 0xffff8002 # bit 15 has no bearing on the returned value
+// 0xdeadbeef
+// add r4, r4, r4 <= returned value
+
+Instruction* InstructionIterator::next() {
+ const PoolHeader* ph;
+
+ // If the current instruction is followed by a pool header,
+ // move past the current instruction and the pool.
+ if (InstIsGuard(cur(), &ph)) {
+ advanceRaw(1 + ph->size());
+ return maybeSkipAutomaticInstructions();
+ }
+
+ // The next instruction is then known to not be a PoolHeader.
+ advanceRaw(1);
+ return maybeSkipAutomaticInstructions();
+}
+
+void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
+ uint32_t* ptr = (uint32_t*)inst_.raw();
+
+ DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(inst->is<InstCMP>());
+
+ // Zero bits 20-27, then set 24-27 to be correct for a branch.
+ // 20-23 will be party of the B's immediate, and should be 0.
+ *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
+}
+
+void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
+ uint32_t* ptr = (uint32_t*)inst_.raw();
+
+ DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(inst->is<InstBImm>());
+
+ // Ensure that this masking operation doesn't affect the offset of the
+ // branch instruction when it gets toggled back.
+ MOZ_ASSERT((*ptr & (0xf << 20)) == 0);
+
+ // Also make sure that the CMP is valid. Part of having a valid CMP is that
+ // all of the bits describing the destination in most ALU instructions are
+ // all unset (looks like it is encoding r0).
+ MOZ_ASSERT(toRD(*inst) == r0);
+
+ // Zero out bits 20-27, then set them to be correct for a compare.
+ *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ InstructionIterator iter(reinterpret_cast<Instruction*>(inst_.raw()));
+ MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
+
+ if (iter.cur()->is<InstMovW>()) {
+ // If it looks like the start of a movw/movt sequence, then make sure we
+ // have all of it (and advance the iterator past the full sequence).
+ iter.next();
+ MOZ_ASSERT(iter.cur()->is<InstMovT>());
+ }
+
+ iter.next();
+ MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
+
+ if (enabled == iter.cur()->is<InstBLXReg>()) {
+ // Nothing to do.
+ return;
+ }
+
+ Instruction* inst = iter.cur();
+
+ if (enabled) {
+ *inst = InstBLXReg(ScratchRegister, Always);
+ } else {
+ *inst = InstNOP();
+ }
+}
+
+size_t Assembler::ToggledCallSize(uint8_t* code) {
+ InstructionIterator iter(reinterpret_cast<Instruction*>(code));
+ MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
+
+ if (iter.cur()->is<InstMovW>()) {
+ // If it looks like the start of a movw/movt sequence, then make sure we
+ // have all of it (and advance the iterator past the full sequence).
+ iter.next();
+ MOZ_ASSERT(iter.cur()->is<InstMovT>());
+ }
+
+ iter.next();
+ MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
+ return uintptr_t(iter.cur()) + 4 - uintptr_t(code);
+}
+
+uint32_t Assembler::NopFill = 0;
+
+uint32_t Assembler::GetNopFill() {
+ static bool isSet = false;
+ if (!isSet) {
+ char* fillStr = getenv("ARM_ASM_NOP_FILL");
+ uint32_t fill;
+ if (fillStr && sscanf(fillStr, "%u", &fill) == 1) {
+ NopFill = fill;
+ }
+ if (NopFill > 8) {
+ MOZ_CRASH("Nop fill > 8 is not supported");
+ }
+ isSet = true;
+ }
+ return NopFill;
+}
+
+uint32_t Assembler::AsmPoolMaxOffset = 1024;
+
+uint32_t Assembler::GetPoolMaxOffset() {
+ static bool isSet = false;
+ if (!isSet) {
+ char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
+ uint32_t poolMaxOffset;
+ if (poolMaxOffsetStr &&
+ sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
+ AsmPoolMaxOffset = poolMaxOffset;
+ }
+ isSet = true;
+ }
+ return AsmPoolMaxOffset;
+}
+
+SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, masm.getSecondScratchReg()) {}
+
+#ifdef JS_DISASM_ARM
+
+/* static */
+void Assembler::disassembleInstruction(const Instruction* i,
+ DisasmBuffer& buffer) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
+ dasm.InstructionDecode(buffer, loc);
+}
+
+void Assembler::initDisassembler() {
+ // The line is normally laid out like this:
+ //
+ // xxxxxxxx ldr r, op ; comment
+ //
+ // where xx...x is the instruction bit pattern.
+ //
+ // Labels are laid out by themselves to line up with the instructions above
+ // and below:
+ //
+ // nnnn:
+ //
+ // Branch targets are normally on the same line as the branch instruction,
+ // but when they cannot be they will be on a line by themselves, indented
+ // significantly:
+ //
+ // -> label
+
+ spew_.setLabelIndent(" "); // 10
+ spew_.setTargetIndent(" "); // 20
+}
+
+void Assembler::finishDisassembler() { spew_.spewOrphans(); }
+
+// Labels are named as they are encountered by adding names to a
+// table, using the Label address as the key. This is made tricky by
+// the (memory for) Label objects being reused, but reused label
+// objects are recognizable from being marked as not used or not
+// bound. See spew_.refLabel().
+//
+// In a number of cases there is no information about the target, and
+// we just end up printing "patchable constant load to PC". This is
+// true especially for jumps to bailout handlers (which have no
+// names). See allocLiteralLoadEntry() and its callers. In some cases
+// (loop back edges) some information about the intended target may be
+// propagated from higher levels, and if so it's printed here.
+
+void Assembler::spew(Instruction* i) {
+ if (spew_.isDisabled() || !i) {
+ return;
+ }
+
+ DisasmBuffer buffer;
+ disassembleInstruction(i, buffer);
+ spew_.spew("%s", buffer.start());
+}
+
+// If a target label is known, always print that and do not attempt to
+// disassemble the branch operands, as they will often be encoding
+// metainformation (pointers for a chain of jump instructions), and
+// not actual branch targets.
+
+void Assembler::spewBranch(Instruction* i, const LabelDoc& target) {
+ if (spew_.isDisabled() || !i) {
+ return;
+ }
+
+ DisasmBuffer buffer;
+ disassembleInstruction(i, buffer);
+
+ char labelBuf[128];
+ labelBuf[0] = 0;
+
+ bool haveTarget = target.valid;
+ if (!haveTarget) {
+ SprintfLiteral(labelBuf, " -> (link-time target)");
+ }
+
+ if (InstBranchImm::IsTHIS(*i)) {
+ InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
+ BOffImm destOff;
+ bimm->extractImm(&destOff);
+ if (destOff.isInvalid() || haveTarget) {
+ // The target information in the instruction is likely garbage, so remove
+ // it. The target label will in any case be printed if we have it.
+ //
+ // The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
+ // where the \S+ string is the opcode. Strip everything after the opcode,
+ // and attach the label if we have it.
+ int i;
+ for (i = 8; i < buffer.length() && buffer[i] == ' '; i++) {
+ }
+ for (; i < buffer.length() && buffer[i] != ' '; i++) {
+ }
+ buffer[i] = 0;
+ if (haveTarget) {
+ SprintfLiteral(labelBuf, " -> %d%s", target.doc,
+ !target.bound ? "f" : "");
+ haveTarget = false;
+ }
+ }
+ }
+ spew_.spew("%s%s", buffer.start(), labelBuf);
+
+ if (haveTarget) {
+ spew_.spewRef(target);
+ }
+}
+
+void Assembler::spewLiteralLoad(PoolHintPun& php, bool loadToPC,
+ const Instruction* i, const LiteralDoc& doc) {
+ if (spew_.isDisabled()) {
+ return;
+ }
+
+ char litbuf[2048];
+ spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
+
+ // See patchConstantPoolLoad, above. We assemble the instruction into a
+ // buffer with a zero offset, as documentation, but the offset will be
+ // patched later.
+
+ uint32_t inst;
+ PoolHintData& data = php.phd;
+ switch (php.phd.getLoadType()) {
+ case PoolHintData::PoolDTR:
+ Assembler::as_dtr_patch(IsLoad, 32, Offset, data.getReg(),
+ DTRAddr(pc, DtrOffImm(0)), data.getCond(), &inst);
+ break;
+ case PoolHintData::PoolBranch:
+ if (data.isValidPoolHint()) {
+ Assembler::as_dtr_patch(IsLoad, 32, Offset, pc,
+ DTRAddr(pc, DtrOffImm(0)), data.getCond(),
+ &inst);
+ }
+ break;
+ case PoolHintData::PoolVDTR:
+ Assembler::as_vdtr_patch(IsLoad, data.getVFPReg(),
+ VFPAddr(pc, VFPOffImm(0)), data.getCond(),
+ &inst);
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+
+ DisasmBuffer buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(&inst));
+ spew_.spew("%s ; .const %s", buffer.start(), litbuf);
+}
+
+#endif // JS_DISASM_ARM
diff --git a/js/src/jit/arm/Assembler-arm.h b/js/src/jit/arm/Assembler-arm.h
new file mode 100644
index 0000000000..fdbac15a80
--- /dev/null
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -0,0 +1,2296 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Assembler_arm_h
+#define jit_arm_Assembler_arm_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+
+#include "jit/arm/Architecture-arm.h"
+#include "jit/arm/disasm/Disasm-arm.h"
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+#include "wasm/WasmTypeDecls.h"
+
+union PoolHintPun;
+
+namespace js {
+namespace jit {
+
+using LiteralDoc = DisassemblerSpew::LiteralDoc;
+using LabelDoc = DisassemblerSpew::LabelDoc;
+
+// NOTE: there are duplicates in this list! Sometimes we want to specifically
+// refer to the link register as a link register (bl lr is much clearer than bl
+// r14). HOWEVER, this register can easily be a gpr when it is not busy holding
+// the return address.
+static constexpr Register r0{Registers::r0};
+static constexpr Register r1{Registers::r1};
+static constexpr Register r2{Registers::r2};
+static constexpr Register r3{Registers::r3};
+static constexpr Register r4{Registers::r4};
+static constexpr Register r5{Registers::r5};
+static constexpr Register r6{Registers::r6};
+static constexpr Register r7{Registers::r7};
+static constexpr Register r8{Registers::r8};
+static constexpr Register r9{Registers::r9};
+static constexpr Register r10{Registers::r10};
+static constexpr Register r11{Registers::r11};
+static constexpr Register r12{Registers::ip};
+static constexpr Register ip{Registers::ip};
+static constexpr Register sp{Registers::sp};
+static constexpr Register r14{Registers::lr};
+static constexpr Register lr{Registers::lr};
+static constexpr Register pc{Registers::pc};
+
+static constexpr Register ScratchRegister{Registers::ip};
+
+// Helper class for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of the scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+
+struct SecondScratchRegisterScope : public AutoRegisterScope {
+ explicit SecondScratchRegisterScope(MacroAssembler& masm);
+};
+
+static constexpr Register OsrFrameReg = r3;
+static constexpr Register CallTempReg0 = r5;
+static constexpr Register CallTempReg1 = r6;
+static constexpr Register CallTempReg2 = r7;
+static constexpr Register CallTempReg3 = r8;
+static constexpr Register CallTempReg4 = r0;
+static constexpr Register CallTempReg5 = r1;
+
+static constexpr Register IntArgReg0 = r0;
+static constexpr Register IntArgReg1 = r1;
+static constexpr Register IntArgReg2 = r2;
+static constexpr Register IntArgReg3 = r3;
+static constexpr Register HeapReg = r10;
+static constexpr Register CallTempNonArgRegs[] = {r5, r6, r7, r8};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+// These register assignments for the 64-bit atomic ops are frequently too
+// constraining, but we have no way of expressing looser constraints to the
+// register allocator.
+
+// CompareExchange: Any two odd/even pairs would do for `new` and `out`, and any
+// pair would do for `old`, so long as none of them overlap.
+
+static constexpr Register CmpXchgOldLo = r4;
+static constexpr Register CmpXchgOldHi = r5;
+static constexpr Register64 CmpXchgOld64 =
+ Register64(CmpXchgOldHi, CmpXchgOldLo);
+static constexpr Register CmpXchgNewLo = IntArgReg2;
+static constexpr Register CmpXchgNewHi = IntArgReg3;
+static constexpr Register64 CmpXchgNew64 =
+ Register64(CmpXchgNewHi, CmpXchgNewLo);
+static constexpr Register CmpXchgOutLo = IntArgReg0;
+static constexpr Register CmpXchgOutHi = IntArgReg1;
+static constexpr Register64 CmpXchgOut64 =
+ Register64(CmpXchgOutHi, CmpXchgOutLo);
+
+// Exchange: Any two non-equal odd/even pairs would do for `new` and `out`.
+
+static constexpr Register XchgNewLo = IntArgReg2;
+static constexpr Register XchgNewHi = IntArgReg3;
+static constexpr Register64 XchgNew64 = Register64(XchgNewHi, XchgNewLo);
+static constexpr Register XchgOutLo = IntArgReg0;
+static constexpr Register XchgOutHi = IntArgReg1;
+
+// Atomic rmw operations: Any two odd/even pairs would do for `tmp` and `out`,
+// and any pair would do for `val`, so long as none of them overlap.
+
+static constexpr Register FetchOpValLo = r4;
+static constexpr Register FetchOpValHi = r5;
+static constexpr Register64 FetchOpVal64 =
+ Register64(FetchOpValHi, FetchOpValLo);
+static constexpr Register FetchOpTmpLo = IntArgReg2;
+static constexpr Register FetchOpTmpHi = IntArgReg3;
+static constexpr Register64 FetchOpTmp64 =
+ Register64(FetchOpTmpHi, FetchOpTmpLo);
+static constexpr Register FetchOpOutLo = IntArgReg0;
+static constexpr Register FetchOpOutHi = IntArgReg1;
+static constexpr Register64 FetchOpOut64 =
+ Register64(FetchOpOutHi, FetchOpOutLo);
+
+class ABIArgGenerator {
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ // ARM can either use HardFp (use float registers for float arguments), or
+ // SoftFp (use general registers for float arguments) ABI. We keep this
+ // switch as a runtime switch because wasm always use the HardFp back-end
+ // while the calls to native functions have to use the one provided by the
+ // system.
+ bool useHardFp_;
+
+ ABIArg softNext(MIRType argType);
+ ABIArg hardNext(MIRType argType);
+
+ public:
+ ABIArgGenerator();
+
+ void setUseHardFp(bool useHardFp) {
+ MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
+ useHardFp_ = useHardFp;
+ }
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+bool IsUnaligned(const wasm::MemoryAccessDesc& access);
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = r4;
+static constexpr Register ABINonArgReg1 = r5;
+static constexpr Register ABINonArgReg2 = r6;
+static constexpr Register ABINonArgReg3 = r7;
+
+// This register may be volatile or nonvolatile. Avoid d15 which is the
+// ScratchDoubleReg_.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::d8,
+ VFPRegister::Double};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = r4;
+static constexpr Register ABINonArgReturnReg1 = r5;
+static constexpr Register ABINonVolatileReg = r6;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = lr;
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = r9;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = r5;
+
+static constexpr Register PreBarrierReg = r1;
+
+static constexpr Register InterpreterPCReg = r9;
+
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register JSReturnReg_Type = r3;
+static constexpr Register JSReturnReg_Data = r2;
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = r11;
+static constexpr Register ReturnReg = r0;
+static constexpr Register64 ReturnReg64(r1, r0);
+
+// The attribute '__value_in_regs' alters the calling convention of a function
+// so that a structure of up to four elements can be returned via the argument
+// registers rather than being written to memory.
+static constexpr Register ReturnRegVal0 = IntArgReg0;
+static constexpr Register ReturnRegVal1 = IntArgReg1;
+static constexpr Register ReturnRegVal2 = IntArgReg2;
+static constexpr Register ReturnRegVal3 = IntArgReg3;
+
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::d0,
+ VFPRegister::Single};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::d0,
+ VFPRegister::Double};
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchFloat32Reg_ = {FloatRegisters::s30,
+ VFPRegister::Single};
+static constexpr FloatRegister ScratchDoubleReg_ = {FloatRegisters::d15,
+ VFPRegister::Double};
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchUIntReg = {FloatRegisters::d15,
+ VFPRegister::UInt};
+static constexpr FloatRegister ScratchIntReg = {FloatRegisters::d15,
+ VFPRegister::Int};
+
+// Do not reference ScratchFloat32Reg_ directly, use ScratchFloat32Scope
+// instead.
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg_) {}
+};
+
+// Do not reference ScratchDoubleReg_ directly, use ScratchDoubleScope instead.
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {}
+};
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr FloatRegister d0 = {FloatRegisters::d0, VFPRegister::Double};
+static constexpr FloatRegister d1 = {FloatRegisters::d1, VFPRegister::Double};
+static constexpr FloatRegister d2 = {FloatRegisters::d2, VFPRegister::Double};
+static constexpr FloatRegister d3 = {FloatRegisters::d3, VFPRegister::Double};
+static constexpr FloatRegister d4 = {FloatRegisters::d4, VFPRegister::Double};
+static constexpr FloatRegister d5 = {FloatRegisters::d5, VFPRegister::Double};
+static constexpr FloatRegister d6 = {FloatRegisters::d6, VFPRegister::Double};
+static constexpr FloatRegister d7 = {FloatRegisters::d7, VFPRegister::Double};
+static constexpr FloatRegister d8 = {FloatRegisters::d8, VFPRegister::Double};
+static constexpr FloatRegister d9 = {FloatRegisters::d9, VFPRegister::Double};
+static constexpr FloatRegister d10 = {FloatRegisters::d10, VFPRegister::Double};
+static constexpr FloatRegister d11 = {FloatRegisters::d11, VFPRegister::Double};
+static constexpr FloatRegister d12 = {FloatRegisters::d12, VFPRegister::Double};
+static constexpr FloatRegister d13 = {FloatRegisters::d13, VFPRegister::Double};
+static constexpr FloatRegister d14 = {FloatRegisters::d14, VFPRegister::Double};
+static constexpr FloatRegister d15 = {FloatRegisters::d15, VFPRegister::Double};
+
+// For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
+// load/store) operate in a single cycle when the address they are dealing with
+// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
+// function boundaries. I'm trying to make sure this is always true.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t CodeAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+static constexpr uint32_t SimdMemoryAlignment = 8;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments "
+ "which are used for "
+ "spilled values. Thus it should be larger than the alignment "
+ "for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static const Scale ScalePointer = TimesFour;
+
+class Instruction;
+class InstBranchImm;
+uint32_t RM(Register r);
+uint32_t RS(Register r);
+uint32_t RD(Register r);
+uint32_t RT(Register r);
+uint32_t RN(Register r);
+
+uint32_t maybeRD(Register r);
+uint32_t maybeRT(Register r);
+uint32_t maybeRN(Register r);
+
+Register toRN(Instruction i);
+Register toRM(Instruction i);
+Register toRD(Instruction i);
+Register toR(Instruction i);
+
+class VFPRegister;
+uint32_t VD(VFPRegister vr);
+uint32_t VN(VFPRegister vr);
+uint32_t VM(VFPRegister vr);
+
+// For being passed into the generic vfp instruction generator when there is an
+// instruction that only takes two registers.
+static constexpr VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true);
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+enum Index {
+ Offset = 0 << 21 | 1 << 24,
+ PreIndex = 1 << 21 | 1 << 24,
+ PostIndex = 0 << 21 | 0 << 24
+ // The docs were rather unclear on this. It sounds like
+ // 1 << 21 | 0 << 24 encodes dtrt.
+};
+
+enum IsImmOp2_ { IsImmOp2 = 1 << 25, IsNotImmOp2 = 0 << 25 };
+enum IsImmDTR_ { IsImmDTR = 0 << 25, IsNotImmDTR = 1 << 25 };
+// For the extra memory operations, ldrd, ldrsb, ldrh.
+enum IsImmEDTR_ { IsImmEDTR = 1 << 22, IsNotImmEDTR = 0 << 22 };
+
+enum ShiftType {
+ LSL = 0, // << 5
+ LSR = 1, // << 5
+ ASR = 2, // << 5
+ ROR = 3, // << 5
+ RRX = ROR // RRX is encoded as ROR with a 0 offset.
+};
+
+// Modes for STM/LDM. Names are the suffixes applied to the instruction.
+enum DTMMode {
+ A = 0 << 24, // empty / after
+ B = 1 << 24, // full / before
+ D = 0 << 23, // decrement
+ I = 1 << 23, // increment
+ DA = D | A,
+ DB = D | B,
+ IA = I | A,
+ IB = I | B
+};
+
+enum DTMWriteBack { WriteBack = 1 << 21, NoWriteBack = 0 << 21 };
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+enum LoadStore { IsLoad = 1 << 20, IsStore = 0 << 20 };
+
+// You almost never want to use this directly. Instead, you wantto pass in a
+// signed constant, and let this bit be implicitly set for you. This is however,
+// necessary if we want a negative index.
+enum IsUp_ { IsUp = 1 << 23, IsDown = 0 << 23 };
+enum ALUOp {
+ OpMov = 0xd << 21,
+ OpMvn = 0xf << 21,
+ OpAnd = 0x0 << 21,
+ OpBic = 0xe << 21,
+ OpEor = 0x1 << 21,
+ OpOrr = 0xc << 21,
+ OpAdc = 0x5 << 21,
+ OpAdd = 0x4 << 21,
+ OpSbc = 0x6 << 21,
+ OpSub = 0x2 << 21,
+ OpRsb = 0x3 << 21,
+ OpRsc = 0x7 << 21,
+ OpCmn = 0xb << 21,
+ OpCmp = 0xa << 21,
+ OpTeq = 0x9 << 21,
+ OpTst = 0x8 << 21,
+ OpInvalid = -1
+};
+
+enum MULOp {
+ OpmMul = 0 << 21,
+ OpmMla = 1 << 21,
+ OpmUmaal = 2 << 21,
+ OpmMls = 3 << 21,
+ OpmUmull = 4 << 21,
+ OpmUmlal = 5 << 21,
+ OpmSmull = 6 << 21,
+ OpmSmlal = 7 << 21
+};
+enum BranchTag {
+ OpB = 0x0a000000,
+ OpBMask = 0x0f000000,
+ OpBDestMask = 0x00ffffff,
+ OpBl = 0x0b000000,
+ OpBlx = 0x012fff30,
+ OpBx = 0x012fff10
+};
+
+// Just like ALUOp, but for the vfp instruction set.
+enum VFPOp {
+ OpvMul = 0x2 << 20,
+ OpvAdd = 0x3 << 20,
+ OpvSub = 0x3 << 20 | 0x1 << 6,
+ OpvDiv = 0x8 << 20,
+ OpvMov = 0xB << 20 | 0x1 << 6,
+ OpvAbs = 0xB << 20 | 0x3 << 6,
+ OpvNeg = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
+ OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
+ OpvCmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
+ OpvCmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16
+};
+
+// Negate the operation, AND negate the immediate that we were passed in.
+ALUOp ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
+ Register* negDest);
+bool can_dbl(ALUOp op);
+bool condsAreSafe(ALUOp op);
+
+// If there is a variant of op that has a dest (think cmp/sub) return that
+// variant of it.
+ALUOp getDestVariant(ALUOp op);
+
+static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
+ JSReturnReg_Data};
+static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
+
+// All of these classes exist solely to shuffle data into the various operands.
+// For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
+// register-shifted-by-a-register. We represent this in C++ by having a base
+// class Operand2, which just stores the 32 bits of data as they will be encoded
+// in the instruction. You cannot directly create an Operand2 since it is
+// tricky, and not entirely sane to do so. Instead, you create one of its child
+// classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
+// will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
+// using an Imm8data, and finally call its parent's (Operand2) constructor with
+// the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
+// function to extract the raw bits from it.
+//
+// In the future, we should be able to extract data from the Operand2 by asking
+// it for its component Imm8data structures. The reason this is so horribly
+// round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
+// directly from Operand2 but have all of them take up only a single word of
+// storage. We also wanted to avoid passing around raw integers at all since
+// they are error prone.
+class Op2Reg;
+class O2RegImmShift;
+class O2RegRegShift;
+
+namespace datastore {
+
+class Reg {
+ // The "second register".
+ uint32_t rm_ : 4;
+ // Do we get another register for shifting.
+ uint32_t rrs_ : 1;
+ uint32_t type_ : 2;
+ // We'd like this to be a more sensible encoding, but that would need to be
+ // a struct and that would not pack :(
+ uint32_t shiftAmount_ : 5;
+
+ protected:
+ // Mark as a protected field to avoid unused private field warnings.
+ uint32_t pad_ : 20;
+
+ public:
+ Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftAmount)
+ : rm_(rm), rrs_(rsr), type_(type), shiftAmount_(shiftAmount), pad_(0) {}
+ explicit Reg(const Op2Reg& op) { memcpy(this, &op, sizeof(*this)); }
+
+ uint32_t shiftAmount() const { return shiftAmount_; }
+
+ uint32_t encode() const {
+ return rm_ | (rrs_ << 4) | (type_ << 5) | (shiftAmount_ << 7);
+ }
+};
+
+// Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
+// Some instructions actually get 8 bits of data, which is called Imm8Data
+// below. These should have edit distance > 1, but this is how it is for now.
+class Imm8mData {
+ uint32_t data_ : 8;
+ uint32_t rot_ : 4;
+
+ protected:
+ // Mark as a protected field to avoid unused private field warnings.
+ uint32_t buff_ : 19;
+
+ private:
+ // Throw in an extra bit that will be 1 if we can't encode this properly.
+ // if we can encode it properly, a simple "|" will still suffice to meld it
+ // into the instruction.
+ uint32_t invalid_ : 1;
+
+ public:
+ // Default constructor makes an invalid immediate.
+ Imm8mData() : data_(0xff), rot_(0xf), buff_(0), invalid_(true) {}
+
+ Imm8mData(uint32_t data, uint32_t rot)
+ : data_(data), rot_(rot), buff_(0), invalid_(false) {
+ MOZ_ASSERT(data == data_);
+ MOZ_ASSERT(rot == rot_);
+ }
+
+ bool invalid() const { return invalid_; }
+
+ uint32_t encode() const {
+ MOZ_ASSERT(!invalid_);
+ return data_ | (rot_ << 8);
+ };
+};
+
+class Imm8Data {
+ uint32_t imm4L_ : 4;
+
+ protected:
+ // Mark as a protected field to avoid unused private field warnings.
+ uint32_t pad_ : 4;
+
+ private:
+ uint32_t imm4H_ : 4;
+
+ public:
+ explicit Imm8Data(uint32_t imm) : imm4L_(imm & 0xf), imm4H_(imm >> 4) {
+ MOZ_ASSERT(imm <= 0xff);
+ }
+
+ uint32_t encode() const { return imm4L_ | (imm4H_ << 8); };
+};
+
+// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
+class Imm8VFPOffData {
+ uint32_t data_;
+
+ public:
+ explicit Imm8VFPOffData(uint32_t imm) : data_(imm) {
+ MOZ_ASSERT((imm & ~(0xff)) == 0);
+ }
+ uint32_t encode() const { return data_; };
+};
+
+// ARM can magically encode 256 very special immediates to be moved into a
+// register.
+struct Imm8VFPImmData {
+ // This structure's members are public and it has no constructor to
+ // initialize them, for a very special reason. Were this structure to
+ // have a constructor, the initialization for DoubleEncoder's internal
+ // table (see below) would require a rather large static constructor on
+ // some of our supported compilers. The known solution to this is to mark
+ // the constructor constexpr, but, again, some of our supported
+ // compilers don't support constexpr! So we are reduced to public
+ // members and eschewing a constructor in hopes that the initialization
+ // of DoubleEncoder's table is correct.
+ uint32_t imm4L : 4;
+ uint32_t imm4H : 4;
+ int32_t isInvalid : 24;
+
+ uint32_t encode() const {
+ // This assert is an attempting at ensuring that we don't create random
+ // instances of this structure and then asking to encode() it.
+ MOZ_ASSERT(isInvalid == 0);
+ return imm4L | (imm4H << 16);
+ };
+};
+
+class Imm12Data {
+ uint32_t data_ : 12;
+
+ public:
+ explicit Imm12Data(uint32_t imm) : data_(imm) { MOZ_ASSERT(data_ == imm); }
+
+ uint32_t encode() const { return data_; }
+};
+
+class RIS {
+ uint32_t shiftAmount_ : 5;
+
+ public:
+ explicit RIS(uint32_t imm) : shiftAmount_(imm) {
+ MOZ_ASSERT(shiftAmount_ == imm);
+ }
+
+ explicit RIS(Reg r) : shiftAmount_(r.shiftAmount()) {}
+
+ uint32_t encode() const { return shiftAmount_; }
+};
+
+class RRS {
+ protected:
+ // Mark as a protected field to avoid unused private field warnings.
+ uint32_t mustZero_ : 1;
+
+ private:
+ // The register that holds the shift amount.
+ uint32_t rs_ : 4;
+
+ public:
+ explicit RRS(uint32_t rs) : rs_(rs) { MOZ_ASSERT(rs_ == rs); }
+
+ uint32_t encode() const { return rs_ << 1; }
+};
+
+} // namespace datastore
+
+class MacroAssemblerARM;
+class Operand;
+
+class Operand2 {
+ friend class Operand;
+ friend class MacroAssemblerARM;
+ friend class InstALU;
+
+ uint32_t oper_ : 31;
+ uint32_t invalid_ : 1;
+
+ protected:
+ explicit Operand2(datastore::Imm8mData base)
+ : oper_(base.invalid() ? -1 : (base.encode() | uint32_t(IsImmOp2))),
+ invalid_(base.invalid()) {}
+
+ explicit Operand2(datastore::Reg base)
+ : oper_(base.encode() | uint32_t(IsNotImmOp2)), invalid_(false) {}
+
+ private:
+ explicit Operand2(uint32_t blob) : oper_(blob), invalid_(false) {}
+
+ public:
+ bool isO2Reg() const { return !(oper_ & IsImmOp2); }
+
+ Op2Reg toOp2Reg() const;
+
+ bool isImm8() const { return oper_ & IsImmOp2; }
+
+ bool invalid() const { return invalid_; }
+
+ uint32_t encode() const { return oper_; }
+};
+
+class Imm8 : public Operand2 {
+ public:
+ explicit Imm8(uint32_t imm) : Operand2(EncodeImm(imm)) {}
+
+ static datastore::Imm8mData EncodeImm(uint32_t imm) {
+ // RotateLeft below may not be called with a shift of zero.
+ if (imm <= 0xFF) {
+ return datastore::Imm8mData(imm, 0);
+ }
+
+ // An encodable integer has a maximum of 8 contiguous set bits,
+ // with an optional wrapped left rotation to even bit positions.
+ for (int rot = 1; rot < 16; rot++) {
+ uint32_t rotimm = mozilla::RotateLeft(imm, rot * 2);
+ if (rotimm <= 0xFF) {
+ return datastore::Imm8mData(rotimm, rot);
+ }
+ }
+ return datastore::Imm8mData();
+ }
+
+ // Pair template?
+ struct TwoImm8mData {
+ datastore::Imm8mData fst_, snd_;
+
+ TwoImm8mData() = default;
+
+ TwoImm8mData(datastore::Imm8mData fst, datastore::Imm8mData snd)
+ : fst_(fst), snd_(snd) {}
+
+ datastore::Imm8mData fst() const { return fst_; }
+ datastore::Imm8mData snd() const { return snd_; }
+ };
+
+ static TwoImm8mData EncodeTwoImms(uint32_t);
+};
+
+class Op2Reg : public Operand2 {
+ public:
+ explicit Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
+ : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode())) {}
+
+ explicit Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
+ : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode())) {}
+};
+
+static_assert(sizeof(Op2Reg) == sizeof(datastore::Reg),
+ "datastore::Reg(const Op2Reg&) constructor relies on Reg/Op2Reg "
+ "having same size");
+
+class O2RegImmShift : public Op2Reg {
+ public:
+ explicit O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
+ : Op2Reg(rn, type, datastore::RIS(shift)) {}
+};
+
+class O2RegRegShift : public Op2Reg {
+ public:
+ explicit O2RegRegShift(Register rn, ShiftType type, Register rs)
+ : Op2Reg(rn, type, datastore::RRS(rs.code())) {}
+};
+
+O2RegImmShift O2Reg(Register r);
+O2RegImmShift lsl(Register r, int amt);
+O2RegImmShift lsr(Register r, int amt);
+O2RegImmShift asr(Register r, int amt);
+O2RegImmShift rol(Register r, int amt);
+O2RegImmShift ror(Register r, int amt);
+
+O2RegRegShift lsl(Register r, Register amt);
+O2RegRegShift lsr(Register r, Register amt);
+O2RegRegShift asr(Register r, Register amt);
+O2RegRegShift ror(Register r, Register amt);
+
+// An offset from a register to be used for ldr/str. This should include the
+// sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
+// unsigned offset, then the instruction specifies if the offset is positive or
+// negative. The +/- bit is necessary if the instruction set wants to be able to
+// have a negative register offset e.g. ldr pc, [r1,-r2];
+class DtrOff {
+ uint32_t data_;
+
+ protected:
+ explicit DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
+ : data_(immdata.encode() | uint32_t(IsImmDTR) | uint32_t(iu)) {}
+
+ explicit DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
+ : data_(reg.encode() | uint32_t(IsNotImmDTR) | iu) {}
+
+ public:
+ uint32_t encode() const { return data_; }
+};
+
+class DtrOffImm : public DtrOff {
+ public:
+ explicit DtrOffImm(int32_t imm)
+ : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)),
+ imm >= 0 ? IsUp : IsDown) {
+ MOZ_ASSERT(mozilla::Abs(imm) < 4096);
+ }
+};
+
+class DtrOffReg : public DtrOff {
+ // These are designed to be called by a constructor of a subclass.
+ // Constructing the necessary RIS/RRS structures is annoying.
+
+ protected:
+ explicit DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm,
+ IsUp_ iu = IsUp)
+ : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu) {}
+
+ explicit DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg,
+ IsUp_ iu = IsUp)
+ : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu) {}
+};
+
+class DtrRegImmShift : public DtrOffReg {
+ public:
+ explicit DtrRegImmShift(Register rn, ShiftType type, uint32_t shift,
+ IsUp_ iu = IsUp)
+ : DtrOffReg(rn, type, datastore::RIS(shift), iu) {}
+};
+
+class DtrRegRegShift : public DtrOffReg {
+ public:
+ explicit DtrRegRegShift(Register rn, ShiftType type, Register rs,
+ IsUp_ iu = IsUp)
+ : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu) {}
+};
+
+// We will frequently want to bundle a register with its offset so that we have
+// an "operand" to a load instruction.
+class DTRAddr {
+ friend class Operand;
+
+ uint32_t data_;
+
+ public:
+ explicit DTRAddr(Register reg, DtrOff dtr)
+ : data_(dtr.encode() | (reg.code() << 16)) {}
+
+ uint32_t encode() const { return data_; }
+
+ Register getBase() const { return Register::FromCode((data_ >> 16) & 0xf); }
+};
+
+// Offsets for the extended data transfer instructions:
+// ldrsh, ldrd, ldrsb, etc.
+class EDtrOff {
+ uint32_t data_;
+
+ protected:
+ explicit EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
+ : data_(imm8.encode() | IsImmEDTR | uint32_t(iu)) {}
+
+ explicit EDtrOff(Register rm, IsUp_ iu = IsUp)
+ : data_(rm.code() | IsNotImmEDTR | iu) {}
+
+ public:
+ uint32_t encode() const { return data_; }
+};
+
+class EDtrOffImm : public EDtrOff {
+ public:
+ explicit EDtrOffImm(int32_t imm)
+ : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)),
+ (imm >= 0) ? IsUp : IsDown) {
+ MOZ_ASSERT(mozilla::Abs(imm) < 256);
+ }
+};
+
+// This is the most-derived class, since the extended data transfer instructions
+// don't support any sort of modifying the "index" operand.
+class EDtrOffReg : public EDtrOff {
+ public:
+ explicit EDtrOffReg(Register rm) : EDtrOff(rm) {}
+};
+
+class EDtrAddr {
+ uint32_t data_;
+
+ public:
+ explicit EDtrAddr(Register r, EDtrOff off) : data_(RN(r) | off.encode()) {}
+
+ uint32_t encode() const { return data_; }
+#ifdef DEBUG
+ Register maybeOffsetRegister() const {
+ if (data_ & IsImmEDTR) {
+ return InvalidReg;
+ }
+ return Register::FromCode(data_ & 0xf);
+ }
+#endif
+};
+
+class VFPOff {
+ uint32_t data_;
+
+ protected:
+ explicit VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
+ : data_(imm.encode() | uint32_t(isup)) {}
+
+ public:
+ uint32_t encode() const { return data_; }
+};
+
+class VFPOffImm : public VFPOff {
+ public:
+ explicit VFPOffImm(int32_t imm)
+ : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4),
+ imm < 0 ? IsDown : IsUp) {
+ MOZ_ASSERT(mozilla::Abs(imm) <= 255 * 4);
+ }
+};
+
+class VFPAddr {
+ friend class Operand;
+
+ uint32_t data_;
+
+ public:
+ explicit VFPAddr(Register base, VFPOff off)
+ : data_(RN(base) | off.encode()) {}
+
+ uint32_t encode() const { return data_; }
+};
+
+class VFPImm {
+ uint32_t data_;
+
+ public:
+ explicit VFPImm(uint32_t topWordOfDouble);
+
+ static const VFPImm One;
+
+ uint32_t encode() const { return data_; }
+ bool isValid() const { return data_ != (~0U); }
+};
+
+// A BOffImm is an immediate that is used for branches. Namely, it is the offset
+// that will be encoded in the branch instruction. This is the only sane way of
+// constructing a branch.
+class BOffImm {
+ friend class InstBranchImm;
+
+ uint32_t data_;
+
+ public:
+ explicit BOffImm(int offset) : data_((offset - 8) >> 2 & 0x00ffffff) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ if (!IsInRange(offset)) {
+ MOZ_CRASH("BOffImm offset out of range");
+ }
+ }
+
+ explicit BOffImm() : data_(INVALID) {}
+
+ private:
+ explicit BOffImm(const Instruction& inst);
+
+ public:
+ static const uint32_t INVALID = 0x00800000;
+
+ uint32_t encode() const { return data_; }
+ int32_t decode() const { return ((int32_t(data_) << 8) >> 6) + 8; }
+
+ static bool IsInRange(int offset) {
+ if ((offset - 8) < -33554432) {
+ return false;
+ }
+ if ((offset - 8) > 33554428) {
+ return false;
+ }
+ return true;
+ }
+
+ bool isInvalid() const { return data_ == INVALID; }
+ Instruction* getDest(Instruction* src) const;
+};
+
+class Imm16 {
+ uint32_t lower_ : 12;
+
+ protected:
+ // Mark as a protected field to avoid unused private field warnings.
+ uint32_t pad_ : 4;
+
+ private:
+ uint32_t upper_ : 4;
+ uint32_t invalid_ : 12;
+
+ public:
+ explicit Imm16();
+ explicit Imm16(uint32_t imm);
+ explicit Imm16(Instruction& inst);
+
+ uint32_t encode() const { return lower_ | (upper_ << 16); }
+ uint32_t decode() const { return lower_ | (upper_ << 12); }
+
+ bool isInvalid() const { return invalid_; }
+};
+
+// I would preffer that these do not exist, since there are essentially no
+// instructions that would ever take more than one of these, however, the MIR
+// wants to only have one type of arguments to functions, so bugger.
+class Operand {
+ // The encoding of registers is the same for OP2, DTR and EDTR yet the type
+ // system doesn't let us express this, so choices must be made.
+ public:
+ enum class Tag : uint8_t { OP2, MEM, FOP };
+
+ private:
+ uint32_t tag_ : 8;
+ uint32_t reg_ : 5;
+ int32_t offset_;
+
+ protected:
+ Operand(Tag tag, uint32_t regCode, int32_t offset)
+ : tag_(static_cast<uint32_t>(tag)), reg_(regCode), offset_(offset) {}
+
+ public:
+ explicit Operand(Register reg) : Operand(Tag::OP2, reg.code(), 0) {}
+
+ explicit Operand(FloatRegister freg) : Operand(Tag::FOP, freg.code(), 0) {}
+
+ explicit Operand(Register base, Imm32 off)
+ : Operand(Tag::MEM, base.code(), off.value) {}
+
+ explicit Operand(Register base, int32_t off)
+ : Operand(Tag::MEM, base.code(), off) {}
+
+ explicit Operand(const Address& addr)
+ : Operand(Tag::MEM, addr.base.code(), addr.offset) {}
+
+ public:
+ Tag tag() const { return static_cast<Tag>(tag_); }
+
+ Operand2 toOp2() const {
+ MOZ_ASSERT(tag() == Tag::OP2);
+ return O2Reg(Register::FromCode(reg_));
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag() == Tag::OP2);
+ return Register::FromCode(reg_);
+ }
+
+ Address toAddress() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return Address(Register::FromCode(reg_), offset_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return offset_;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return reg_;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return Register::FromCode(reg_);
+ }
+ DTRAddr toDTRAddr() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return DTRAddr(baseReg(), DtrOffImm(offset_));
+ }
+ VFPAddr toVFPAddr() const {
+ MOZ_ASSERT(tag() == Tag::MEM);
+ return VFPAddr(baseReg(), VFPOffImm(offset_));
+ }
+};
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+class InstructionIterator {
+ private:
+ Instruction* inst_;
+
+ public:
+ explicit InstructionIterator(Instruction* inst) : inst_(inst) {
+ maybeSkipAutomaticInstructions();
+ }
+
+ // Advances to the next intentionally-inserted instruction.
+ Instruction* next();
+
+ // Advances past any automatically-inserted instructions.
+ Instruction* maybeSkipAutomaticInstructions();
+
+ Instruction* cur() const { return inst_; }
+
+ protected:
+ // Advances past the given number of instruction-length bytes.
+ inline void advanceRaw(ptrdiff_t instructions = 1);
+};
+
+class Assembler;
+typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction,
+ Assembler>
+ ARMBuffer;
+
+class Assembler : public AssemblerShared {
+ public:
+ // ARM conditional constants:
+ enum ARMCondition : uint32_t {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ };
+
+ enum Condition : uint32_t {
+ Equal = EQ,
+ NotEqual = NE,
+ Above = HI,
+ AboveOrEqual = CS,
+ Below = CC,
+ BelowOrEqual = LS,
+ GreaterThan = GT,
+ GreaterThanOrEqual = GE,
+ LessThan = LT,
+ LessThanOrEqual = LE,
+ Overflow = VS,
+ CarrySet = CS,
+ CarryClear = CC,
+ Signed = MI,
+ NotSigned = PL,
+ Zero = EQ,
+ NonZero = NE,
+ Always = AL,
+
+ VFP_NotEqualOrUnordered = NE,
+ VFP_Equal = EQ,
+ VFP_Unordered = VS,
+ VFP_NotUnordered = VC,
+ VFP_GreaterThanOrEqualOrUnordered = CS,
+ VFP_GreaterThanOrEqual = GE,
+ VFP_GreaterThanOrUnordered = HI,
+ VFP_GreaterThan = GT,
+ VFP_LessThanOrEqualOrUnordered = LE,
+ VFP_LessThanOrEqual = LS,
+ VFP_LessThanOrUnordered = LT,
+ VFP_LessThan = CC // MI is valid too.
+ };
+
+ // Bit set when a DoubleCondition does not map to a single ARM condition.
+ // The macro assembler has to special-case these conditions, or else
+ // ConditionFromDoubleCondition will complain.
+ static const int DoubleConditionBitSpecial = 0x1;
+
+ enum DoubleCondition : uint32_t {
+ // These conditions will only evaluate to true if the comparison is
+ // ordered - i.e. neither operand is NaN.
+ DoubleOrdered = VFP_NotUnordered,
+ DoubleEqual = VFP_Equal,
+ DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
+ DoubleGreaterThan = VFP_GreaterThan,
+ DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
+ DoubleLessThan = VFP_LessThan,
+ DoubleLessThanOrEqual = VFP_LessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = VFP_Unordered,
+ DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered = VFP_LessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered
+ };
+
+ Condition getCondition(uint32_t inst) {
+ return (Condition)(0xf0000000 & inst);
+ }
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ return static_cast<Condition>(cond);
+ }
+
+ enum BarrierOption {
+ BarrierSY = 15, // Full system barrier
+ BarrierST = 14 // StoreStore barrier
+ };
+
+ // This should be protected, but since CodeGenerator wants to use it, it
+ // needs to go out here :(
+
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+
+ protected:
+ // Shim around AssemblerBufferWithConstantPools::allocEntry.
+ BufferOffset allocLiteralLoadEntry(size_t numInst, unsigned numPoolEntries,
+ PoolHintPun& php, uint8_t* data,
+ const LiteralDoc& doc = LiteralDoc(),
+ ARMBuffer::PoolEntry* pe = nullptr,
+ bool loadToPC = false);
+
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+#ifdef JS_DISASM_ARM
+ typedef disasm::EmbeddedVector<char, disasm::ReasonableBufferSize>
+ DisasmBuffer;
+
+ static void disassembleInstruction(const Instruction* i,
+ DisasmBuffer& buffer);
+
+ void initDisassembler();
+ void finishDisassembler();
+ void spew(Instruction* i);
+ void spewBranch(Instruction* i, const LabelDoc& target);
+ void spewLiteralLoad(PoolHintPun& php, bool loadToPC, const Instruction* offs,
+ const LiteralDoc& doc);
+#endif
+
+ public:
+ void resetCounter();
+ static uint32_t NopFill;
+ static uint32_t GetNopFill();
+ static uint32_t AsmPoolMaxOffset;
+ static uint32_t GetPoolMaxOffset();
+
+ protected:
+ // Structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.).
+ class RelativePatch {
+ void* target_;
+ RelocationKind kind_;
+
+ public:
+ RelativePatch(void* target, RelocationKind kind)
+ : target_(target), kind_(kind) {}
+ void* target() const { return target_; }
+ RelocationKind kind() const { return kind_; }
+ };
+
+ // TODO: this should actually be a pool-like object. It is currently a big
+ // hack, and probably shouldn't exist.
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ ARMBuffer m_buffer;
+
+#ifdef JS_DISASM_ARM
+ DisassemblerSpew spew_;
+#endif
+
+ public:
+ // For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
+ // For the nopFill use a branch to the next instruction: 0xeaffffff.
+ Assembler()
+ : m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff,
+ GetNopFill()),
+ isFinished(false),
+ dtmActive(false),
+ dtmCond(Always) {
+#ifdef JS_DISASM_ARM
+ initDisassembler();
+#endif
+ }
+
+ ~Assembler() {
+#ifdef JS_DISASM_ARM
+ finishDisassembler();
+#endif
+ }
+
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+
+ static Condition InvertCondition(Condition cond);
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ void writeDataRelocation(BufferOffset offset, ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // Assembler::TraceDataRelocations.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(offset.getOffset());
+ }
+ }
+
+ enum RelocBranchStyle { B_MOVWT, B_LDR_BX, B_LDR, B_MOVW_ADD };
+
+ enum RelocStyle { L_MOVWT, L_LDR };
+
+ public:
+ // Given the start of a Control Flow sequence, grab the value that is
+ // finally branched to given the start of a function that loads an address
+ // into a register get the address that ends up in the register.
+ template <class Iter>
+ static const uint32_t* GetCF32Target(Iter* iter);
+
+ static uintptr_t GetPointer(uint8_t*);
+ static const uint32_t* GetPtr32Target(InstructionIterator iter,
+ Register* dest = nullptr,
+ RelocStyle* rs = nullptr);
+
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_DISASM_ARM
+ spew_.setPrinter(sp);
+#endif
+ }
+
+ Register getStackPointer() const { return StackPointer; }
+
+ private:
+ bool isFinished;
+
+ protected:
+ LabelDoc refLabel(const Label* label) {
+#ifdef JS_DISASM_ARM
+ return spew_.refLabel(label);
+#else
+ return LabelDoc();
+#endif
+ }
+
+ public:
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes, after pools are flushed.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a single instruction into the instruction stream. Very hot,
+ // inlined for performance
+ MOZ_ALWAYS_INLINE BufferOffset writeInst(uint32_t x) {
+ MOZ_ASSERT(hasCreator());
+ BufferOffset offs = m_buffer.putInt(x);
+#ifdef JS_DISASM_ARM
+ spew(m_buffer.getInstOrNull(offs));
+#endif
+ return offs;
+ }
+
+ // As above, but also mark the instruction as a branch. Very hot, inlined
+ // for performance
+ MOZ_ALWAYS_INLINE BufferOffset
+ writeBranchInst(uint32_t x, const LabelDoc& documentation) {
+ BufferOffset offs = m_buffer.putInt(x);
+#ifdef JS_DISASM_ARM
+ spewBranch(m_buffer.getInstOrNull(offs), documentation);
+#endif
+ return offs;
+ }
+
+ // Write a placeholder NOP for a branch into the instruction stream
+ // (in order to adjust assembler addresses and mark it as a branch), it will
+ // be overwritten subsequently.
+ BufferOffset allocBranchInst();
+
+ // A static variant for the cases where we don't want to have an assembler
+ // object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ void writeCodePointer(CodeLabel* label);
+
+ void haltingAlign(int alignment);
+ void nopAlign(int alignment);
+ BufferOffset as_nop();
+ BufferOffset as_alu(Register dest, Register src1, Operand2 op2, ALUOp op,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_mov(Register dest, Operand2 op2, SBit s = LeaveCC,
+ Condition c = Always);
+ BufferOffset as_mvn(Register dest, Operand2 op2, SBit s = LeaveCC,
+ Condition c = Always);
+
+ static void as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op,
+ SBit s, Condition c, uint32_t* pos);
+ static void as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
+ uint32_t* pos);
+
+ // Logical operations:
+ BufferOffset as_and(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_bic(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_eor(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_orr(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ // Reverse byte operations:
+ BufferOffset as_rev(Register dest, Register src, Condition c = Always);
+ BufferOffset as_rev16(Register dest, Register src, Condition c = Always);
+ BufferOffset as_revsh(Register dest, Register src, Condition c = Always);
+ // Mathematical operations:
+ BufferOffset as_adc(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_add(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_sbc(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_sub(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_rsb(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_rsc(Register dest, Register src1, Operand2 op2,
+ SBit s = LeaveCC, Condition c = Always);
+ // Test operations:
+ BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_cmp(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_teq(Register src1, Operand2 op2, Condition c = Always);
+ BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always);
+
+ // Sign extension operations:
+ BufferOffset as_sxtb(Register dest, Register src, int rotate,
+ Condition c = Always);
+ BufferOffset as_sxth(Register dest, Register src, int rotate,
+ Condition c = Always);
+ BufferOffset as_uxtb(Register dest, Register src, int rotate,
+ Condition c = Always);
+ BufferOffset as_uxth(Register dest, Register src, int rotate,
+ Condition c = Always);
+
+ // Not quite ALU worthy, but useful none the less: These also have the issue
+ // of these being formatted completly differently from the standard ALU
+ // operations.
+ BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always);
+ BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always);
+
+ static void as_movw_patch(Register dest, Imm16 imm, Condition c,
+ Instruction* pos);
+ static void as_movt_patch(Register dest, Imm16 imm, Condition c,
+ Instruction* pos);
+
+ BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
+ MULOp op, SBit s, Condition c = Always);
+ BufferOffset as_mul(Register dest, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
+ SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_umaal(Register dest1, Register dest2, Register src1,
+ Register src2, Condition c = Always);
+ BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
+ Condition c = Always);
+ BufferOffset as_umull(Register dest1, Register dest2, Register src1,
+ Register src2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_umlal(Register dest1, Register dest2, Register src1,
+ Register src2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_smull(Register dest1, Register dest2, Register src1,
+ Register src2, SBit s = LeaveCC, Condition c = Always);
+ BufferOffset as_smlal(Register dest1, Register dest2, Register src1,
+ Register src2, SBit s = LeaveCC, Condition c = Always);
+
+ BufferOffset as_sdiv(Register dest, Register num, Register div,
+ Condition c = Always);
+ BufferOffset as_udiv(Register dest, Register num, Register div,
+ Condition c = Always);
+ BufferOffset as_clz(Register dest, Register src, Condition c = Always);
+
+ // Data transfer instructions: ldr, str, ldrb, strb.
+ // Using an int to differentiate between 8 bits and 32 bits is overkill.
+ BufferOffset as_dtr(LoadStore ls, int size, Index mode, Register rt,
+ DTRAddr addr, Condition c = Always);
+
+ static void as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
+ DTRAddr addr, Condition c, uint32_t* dest);
+
+ // Handles all of the other integral data transferring functions:
+ // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
+ BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
+ Register rt, EDtrAddr addr, Condition c = Always);
+
+ BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask, DTMMode mode,
+ DTMWriteBack wb, Condition c = Always);
+
+ // Overwrite a pool entry with new data.
+ static void WritePoolEntry(Instruction* addr, Condition c, uint32_t data);
+
+ // Load a 32 bit immediate from a pool into a register.
+ BufferOffset as_Imm32Pool(Register dest, uint32_t value,
+ Condition c = Always);
+
+ // Load a 64 bit floating point immediate from a pool into a register.
+ BufferOffset as_FImm64Pool(VFPRegister dest, double value,
+ Condition c = Always);
+ // Load a 32 bit floating point immediate from a pool into a register.
+ BufferOffset as_FImm32Pool(VFPRegister dest, float value,
+ Condition c = Always);
+
+ // Atomic instructions: ldrexd, ldrex, ldrexh, ldrexb, strexd, strex, strexh,
+ // strexb.
+ //
+ // The doubleword, halfword, and byte versions are available from ARMv6K
+ // forward.
+ //
+ // The word versions are available from ARMv6 forward and can be used to
+ // implement the halfword and byte versions on older systems.
+
+ // LDREXD rt, rt2, [rn]. Constraint: rt even register, rt2=rt+1.
+ BufferOffset as_ldrexd(Register rt, Register rt2, Register rn,
+ Condition c = Always);
+
+ // LDREX rt, [rn]
+ BufferOffset as_ldrex(Register rt, Register rn, Condition c = Always);
+ BufferOffset as_ldrexh(Register rt, Register rn, Condition c = Always);
+ BufferOffset as_ldrexb(Register rt, Register rn, Condition c = Always);
+
+ // STREXD rd, rt, rt2, [rn]. Constraint: rt even register, rt2=rt+1.
+ BufferOffset as_strexd(Register rd, Register rt, Register rt2, Register rn,
+ Condition c = Always);
+
+ // STREX rd, rt, [rn]. Constraint: rd != rn, rd != rt.
+ BufferOffset as_strex(Register rd, Register rt, Register rn,
+ Condition c = Always);
+ BufferOffset as_strexh(Register rd, Register rt, Register rn,
+ Condition c = Always);
+ BufferOffset as_strexb(Register rd, Register rt, Register rn,
+ Condition c = Always);
+
+ // CLREX
+ BufferOffset as_clrex();
+
+ // Memory synchronization.
+ // These are available from ARMv7 forward.
+ BufferOffset as_dmb(BarrierOption option = BarrierSY);
+ BufferOffset as_dsb(BarrierOption option = BarrierSY);
+ BufferOffset as_isb();
+
+ // Memory synchronization for architectures before ARMv7.
+ BufferOffset as_dsb_trap();
+ BufferOffset as_dmb_trap();
+ BufferOffset as_isb_trap();
+
+ // Speculation barrier
+ BufferOffset as_csdb();
+
+ // Control flow stuff:
+
+ // bx can *only* branch to a register never to an immediate.
+ BufferOffset as_bx(Register r, Condition c = Always);
+
+ // Branch can branch to an immediate *or* to a register. Branches to
+ // immediates are pc relative, branches to registers are absolute.
+ BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
+
+ BufferOffset as_b(Label* l, Condition c = Always);
+ BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
+
+ // blx can go to either an immediate or a register. When blx'ing to a
+ // register, we change processor mode depending on the low bit of the
+ // register when blx'ing to an immediate, we *always* change processor
+ // state.
+ BufferOffset as_blx(Label* l);
+
+ BufferOffset as_blx(Register r, Condition c = Always);
+ BufferOffset as_bl(BOffImm off, Condition c, Label* documentation = nullptr);
+ // bl can only branch+link to an immediate, never to a register it never
+ // changes processor state.
+ BufferOffset as_bl();
+ // bl #imm can have a condition code, blx #imm cannot.
+ // blx reg can be conditional.
+ BufferOffset as_bl(Label* l, Condition c);
+ BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
+
+ BufferOffset as_mrs(Register r, Condition c = Always);
+ BufferOffset as_msr(Register r, Condition c = Always);
+
+ // VFP instructions!
+ private:
+ enum vfp_size { IsDouble = 1 << 8, IsSingle = 0 << 8 };
+
+ BufferOffset writeVFPInst(vfp_size sz, uint32_t blob);
+
+ static void WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest);
+
+ // Unityped variants: all registers hold the same (ieee754 single/double)
+ // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
+ BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ VFPOp op, Condition c = Always);
+
+ public:
+ BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+ Condition c = Always);
+ BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, Condition c = Always);
+ BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always);
+
+ // Specifically, a move between two same sized-registers.
+ BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
+
+ // Transfer between Core and VFP.
+ enum FloatToCore_ { FloatToCore = 1 << 20, CoreToFloat = 0 << 20 };
+
+ private:
+ enum VFPXferSize { WordTransfer = 0x02000010, DoubleTransfer = 0x00400010 };
+
+ public:
+ // Unlike the next function, moving between the core registers and vfp
+ // registers can't be *that* properly typed. Namely, since I don't want to
+ // munge the type VFPRegister to also include core registers. Thus, the core
+ // and vfp registers are passed in based on their type, and src/dest is
+ // determined by the float2core.
+
+ BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm,
+ FloatToCore_ f2c, Condition c = Always, int idx = 0);
+
+ // Our encoding actually allows just the src and the dest (and their types)
+ // to uniquely specify the encoding that we are going to use.
+ BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
+ Condition c = Always);
+
+ // Hard coded to a 32 bit fixed width result for now.
+ BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint,
+ bool toFixed, Condition c = Always);
+
+ // Transfer between VFP and memory.
+ BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c = Always /* vfp doesn't have a wb option*/);
+
+ static void as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
+ Condition c /* vfp doesn't have a wb option */,
+ uint32_t* dest);
+
+ // VFP's ldm/stm work differently from the standard arm ones. You can only
+ // transfer a range.
+
+ BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
+ /* also has update conditions */ Condition c = Always);
+
+ // vldr/vstr variants that handle unaligned accesses. These encode as NEON
+ // single-element instructions and can only be used if NEON is available.
+ // Here, vd must be tagged as a float or double register.
+ BufferOffset as_vldr_unaligned(VFPRegister vd, Register rn);
+ BufferOffset as_vstr_unaligned(VFPRegister vd, Register rn);
+
+ BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
+
+ BufferOffset as_vmrs(Register r, Condition c = Always);
+ BufferOffset as_vmsr(Register r, Condition c = Always);
+
+ // Label operations.
+ bool nextLink(BufferOffset b, BufferOffset* next);
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+ // I'm going to pretend this doesn't exist for now.
+ void retarget(Label* label, void* target, RelocationKind reloc);
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void as_bkpt();
+ BufferOffset as_illegal_trap();
+
+ public:
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind() == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ static bool SupportsFloatingPoint() { return HasVFP(); }
+ static bool SupportsUnalignedAccesses() { return HasARMv7(); }
+ // Note, returning false here is technically wrong, but one has to go via the
+ // as_vldr_unaligned and as_vstr_unaligned instructions to get proper behavior
+ // and those are NEON-specific and have to be asked for specifically.
+ static bool SupportsFastUnalignedFPAccesses() { return false; }
+
+ static bool HasRoundInstruction(RoundingMode mode) { return false; }
+
+ protected:
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ public:
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void flush() {
+ MOZ_ASSERT(!isFinished);
+ m_buffer.flushPool();
+ return;
+ }
+
+ void comment(const char* msg) {
+#ifdef JS_DISASM_ARM
+ spew_.spew("; %s", msg);
+#endif
+ }
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ // Since I can't think of a reasonable default for the mode, I'm going to
+ // leave it as a required argument.
+ void startDataTransferM(LoadStore ls, Register rm, DTMMode mode,
+ DTMWriteBack update = NoWriteBack,
+ Condition c = Always) {
+ MOZ_ASSERT(!dtmActive);
+ dtmUpdate = update;
+ dtmBase = rm;
+ dtmLoadStore = ls;
+ dtmLastReg = -1;
+ dtmRegBitField = 0;
+ dtmActive = 1;
+ dtmCond = c;
+ dtmMode = mode;
+ }
+
+ void transferReg(Register rn) {
+ MOZ_ASSERT(dtmActive);
+ MOZ_ASSERT(rn.code() > dtmLastReg);
+ dtmRegBitField |= 1 << rn.code();
+ if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) {
+ MOZ_CRASH("ARM Spec says this is invalid");
+ }
+ }
+ void finishDataTransfer() {
+ dtmActive = false;
+ as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond);
+ }
+
+ void startFloatTransferM(LoadStore ls, Register rm, DTMMode mode,
+ DTMWriteBack update = NoWriteBack,
+ Condition c = Always) {
+ MOZ_ASSERT(!dtmActive);
+ dtmActive = true;
+ dtmUpdate = update;
+ dtmLoadStore = ls;
+ dtmBase = rm;
+ dtmCond = c;
+ dtmLastReg = -1;
+ dtmMode = mode;
+ dtmDelta = 0;
+ }
+ void transferFloatReg(VFPRegister rn) {
+ if (dtmLastReg == -1) {
+ vdtmFirstReg = rn.code();
+ } else {
+ if (dtmDelta == 0) {
+ dtmDelta = rn.code() - dtmLastReg;
+ MOZ_ASSERT(dtmDelta == 1 || dtmDelta == -1);
+ }
+ MOZ_ASSERT(dtmLastReg >= 0);
+ MOZ_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta);
+ }
+
+ dtmLastReg = rn.code();
+ }
+ void finishFloatTransfer() {
+ MOZ_ASSERT(dtmActive);
+ dtmActive = false;
+ MOZ_ASSERT(dtmLastReg != -1);
+ dtmDelta = dtmDelta ? dtmDelta : 1;
+ // The operand for the vstr/vldr instruction is the lowest register in the
+ // range.
+ int low = std::min(dtmLastReg, vdtmFirstReg);
+ int high = std::max(dtmLastReg, vdtmFirstReg);
+ // Fencepost problem.
+ int len = high - low + 1;
+ // vdtm can only transfer 16 registers at once. If we need to transfer
+ // more, then either hoops are necessary, or we need to be updating the
+ // register.
+ MOZ_ASSERT_IF(len > 16, dtmUpdate == WriteBack);
+
+ int adjustLow = dtmLoadStore == IsStore ? 0 : 1;
+ int adjustHigh = dtmLoadStore == IsStore ? -1 : 0;
+ while (len > 0) {
+ // Limit the instruction to 16 registers.
+ int curLen = std::min(len, 16);
+ // If it is a store, we want to start at the high end and move down
+ // (e.g. vpush d16-d31; vpush d0-d15).
+ int curStart = (dtmLoadStore == IsStore) ? high - curLen + 1 : low;
+ as_vdtm(dtmLoadStore, dtmBase,
+ VFPRegister(FloatRegister::FromCode(curStart)), curLen, dtmCond);
+ // Update the bounds.
+ low += adjustLow * curLen;
+ high += adjustHigh * curLen;
+ // Update the length parameter.
+ len -= curLen;
+ }
+ }
+
+ private:
+ int dtmRegBitField;
+ int vdtmFirstReg;
+ int dtmLastReg;
+ int dtmDelta;
+ Register dtmBase;
+ DTMWriteBack dtmUpdate;
+ DTMMode dtmMode;
+ LoadStore dtmLoadStore;
+ bool dtmActive;
+ Condition dtmCond;
+
+ public:
+ enum {
+ PadForAlign8 = (int)0x00,
+ PadForAlign16 = (int)0x0000,
+ PadForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff'
+ };
+
+ // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+ // initial placeholder instruction that we want to later fix up.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+
+ // Take the stub value that was written in before, and write in an actual
+ // load using the index we'd computed previously as well as the address of
+ // the pool start.
+ static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // We're not tracking short-range branches for ARM for now.
+ static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx,
+ BufferOffset deadline,
+ BufferOffset veneer) {
+ MOZ_CRASH();
+ }
+ // END API
+
+ // Move our entire pool into the instruction stream. This is to force an
+ // opportunistic dump of the pool, prefferably when it is more convenient to
+ // do a dump.
+ void flushBuffer();
+ void enterNoPool(size_t maxInst);
+ void leaveNoPool();
+ void enterNoNops();
+ void leaveNoNops();
+
+ static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst,
+ BufferOffset dest);
+
+ static uint32_t PatchWrite_NearCallSize();
+ static uint32_t NopSize() { return 4; }
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint32_t AlignDoubleArg(uint32_t offset) { return (offset + 1) & ~1; }
+ static uint8_t* NextInstruction(uint8_t* instruction,
+ uint32_t* count = nullptr);
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ static size_t ToggledCallSize(uint8_t* code);
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ // Implement this if we implement a disassembler.
+ }
+}; // Assembler
+
+// An Instruction is a structure for both encoding and decoding any and all ARM
+// instructions. Many classes have not been implemented thus far.
+class Instruction {
+ uint32_t data;
+
+ protected:
+ // This is not for defaulting to always, this is for instructions that
+ // cannot be made conditional, and have the usually invalid 4b1111 cond
+ // field.
+ explicit Instruction(uint32_t data_, bool fake = false)
+ : data(data_ | 0xf0000000) {
+ MOZ_ASSERT(fake || ((data_ & 0xf0000000) == 0));
+ }
+ // Standard constructor.
+ Instruction(uint32_t data_, Assembler::Condition c)
+ : data(data_ | (uint32_t)c) {
+ MOZ_ASSERT((data_ & 0xf0000000) == 0);
+ }
+ // You should never create an instruction directly. You should create a more
+ // specific instruction which will eventually call one of these constructors
+ // for you.
+ public:
+ uint32_t encode() const { return data; }
+ // Check if this instruction is really a particular case.
+ template <class C>
+ bool is() const {
+ return C::IsTHIS(*this);
+ }
+
+ // Safely get a more specific variant of this pointer.
+ template <class C>
+ C* as() const {
+ return C::AsTHIS(*this);
+ }
+
+ const Instruction& operator=(Instruction src) {
+ data = src.data;
+ return *this;
+ }
+ // Since almost all instructions have condition codes, the condition code
+ // extractor resides in the base class.
+ Assembler::Condition extractCond() const {
+ MOZ_ASSERT(data >> 28 != 0xf,
+ "The instruction does not have condition code");
+ return (Assembler::Condition)(data & 0xf0000000);
+ }
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
+ // instruction. raw() just coerces this into a pointer to a uint32_t.
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// Make sure that it is the right size.
+static_assert(sizeof(Instruction) == 4);
+
+inline void InstructionIterator::advanceRaw(ptrdiff_t instructions) {
+ inst_ = inst_ + instructions;
+}
+
+// Data Transfer Instructions.
+class InstDTR : public Instruction {
+ public:
+ enum IsByte_ { IsByte = 0x00400000, IsWord = 0x00000000 };
+ static const int IsDTR = 0x04000000;
+ static const int IsDTRMask = 0x0c000000;
+
+ // TODO: Replace the initialization with something that is safer.
+ InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr,
+ Assembler::Condition c)
+ : Instruction(std::underlying_type_t<LoadStore>(ls) |
+ std::underlying_type_t<IsByte_>(ib) |
+ std::underlying_type_t<Index>(mode) | RT(rt) |
+ addr.encode() | IsDTR,
+ c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstDTR* AsTHIS(const Instruction& i);
+};
+static_assert(sizeof(InstDTR) == sizeof(Instruction));
+
+class InstLDR : public InstDTR {
+ public:
+ InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
+ : InstDTR(IsLoad, IsWord, mode, rt, addr, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstLDR* AsTHIS(const Instruction& i);
+
+ int32_t signedOffset() const {
+ int32_t offset = encode() & 0xfff;
+ if (IsUp_(encode() & IsUp) != IsUp) {
+ return -offset;
+ }
+ return offset;
+ }
+ uint32_t* dest() const {
+ int32_t offset = signedOffset();
+ // When patching the load in PatchConstantPoolLoad, we ensure that the
+ // offset is a multiple of 4, offset by 8 bytes from the actual
+ // location. Indeed, when the base register is PC, ARM's 3 stages
+ // pipeline design makes it that PC is off by 8 bytes (= 2 *
+ // sizeof(uint32*)) when we actually executed it.
+ MOZ_ASSERT(offset % 4 == 0);
+ offset >>= 2;
+ return (uint32_t*)raw() + offset + 2;
+ }
+};
+static_assert(sizeof(InstDTR) == sizeof(InstLDR));
+
+class InstNOP : public Instruction {
+ public:
+ static const uint32_t NopInst = 0x0320f000;
+
+ InstNOP() : Instruction(NopInst, Assembler::Always) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstNOP* AsTHIS(Instruction& i);
+};
+
+// Branching to a register, or calling a register
+class InstBranchReg : public Instruction {
+ protected:
+ // Don't use BranchTag yourself, use a derived instruction.
+ enum BranchTag { IsBX = 0x012fff10, IsBLX = 0x012fff30 };
+
+ static const uint32_t IsBRegMask = 0x0ffffff0;
+
+ InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
+ : Instruction(tag | rm.code(), c) {}
+
+ public:
+ static bool IsTHIS(const Instruction& i);
+ static InstBranchReg* AsTHIS(const Instruction& i);
+
+ // Get the register that is being branched to
+ void extractDest(Register* dest);
+ // Make sure we are branching to a pre-known register
+ bool checkDest(Register dest);
+};
+static_assert(sizeof(InstBranchReg) == sizeof(Instruction));
+
+// Branching to an immediate offset, or calling an immediate offset
+class InstBranchImm : public Instruction {
+ protected:
+ enum BranchTag { IsB = 0x0a000000, IsBL = 0x0b000000 };
+
+ static const uint32_t IsBImmMask = 0x0f000000;
+
+ InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
+ : Instruction(tag | off.encode(), c) {}
+
+ public:
+ static bool IsTHIS(const Instruction& i);
+ static InstBranchImm* AsTHIS(const Instruction& i);
+
+ void extractImm(BOffImm* dest);
+};
+static_assert(sizeof(InstBranchImm) == sizeof(Instruction));
+
+// Very specific branching instructions.
+class InstBXReg : public InstBranchReg {
+ public:
+ static bool IsTHIS(const Instruction& i);
+ static InstBXReg* AsTHIS(const Instruction& i);
+};
+
+class InstBLXReg : public InstBranchReg {
+ public:
+ InstBLXReg(Register reg, Assembler::Condition c)
+ : InstBranchReg(IsBLX, reg, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstBLXReg* AsTHIS(const Instruction& i);
+};
+
+class InstBImm : public InstBranchImm {
+ public:
+ InstBImm(BOffImm off, Assembler::Condition c) : InstBranchImm(IsB, off, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstBImm* AsTHIS(const Instruction& i);
+};
+
+class InstBLImm : public InstBranchImm {
+ public:
+ InstBLImm(BOffImm off, Assembler::Condition c)
+ : InstBranchImm(IsBL, off, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstBLImm* AsTHIS(const Instruction& i);
+};
+
+// Both movw and movt. The layout of both the immediate and the destination
+// register is the same so the code is being shared.
+class InstMovWT : public Instruction {
+ protected:
+ enum WT { IsW = 0x03000000, IsT = 0x03400000 };
+ static const uint32_t IsWTMask = 0x0ff00000;
+
+ InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c)
+ : Instruction(RD(rd) | imm.encode() | wt, c) {}
+
+ public:
+ void extractImm(Imm16* dest);
+ void extractDest(Register* dest);
+ bool checkImm(Imm16 dest);
+ bool checkDest(Register dest);
+
+ static bool IsTHIS(Instruction& i);
+ static InstMovWT* AsTHIS(Instruction& i);
+};
+static_assert(sizeof(InstMovWT) == sizeof(Instruction));
+
+class InstMovW : public InstMovWT {
+ public:
+ InstMovW(Register rd, Imm16 imm, Assembler::Condition c)
+ : InstMovWT(rd, imm, IsW, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstMovW* AsTHIS(const Instruction& i);
+};
+
+class InstMovT : public InstMovWT {
+ public:
+ InstMovT(Register rd, Imm16 imm, Assembler::Condition c)
+ : InstMovWT(rd, imm, IsT, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstMovT* AsTHIS(const Instruction& i);
+};
+
+class InstALU : public Instruction {
+ static const int32_t ALUMask = 0xc << 24;
+
+ public:
+ InstALU(Register rd, Register rn, Operand2 op2, ALUOp op, SBit s,
+ Assembler::Condition c)
+ : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | s, c) {}
+
+ static bool IsTHIS(const Instruction& i);
+ static InstALU* AsTHIS(const Instruction& i);
+
+ void extractOp(ALUOp* ret);
+ bool checkOp(ALUOp op);
+ void extractDest(Register* ret);
+ bool checkDest(Register rd);
+ void extractOp1(Register* ret);
+ bool checkOp1(Register rn);
+ Operand2 extractOp2();
+};
+
+class InstCMP : public InstALU {
+ public:
+ static bool IsTHIS(const Instruction& i);
+ static InstCMP* AsTHIS(const Instruction& i);
+};
+
+class InstMOV : public InstALU {
+ public:
+ static bool IsTHIS(const Instruction& i);
+ static InstMOV* AsTHIS(const Instruction& i);
+};
+
+// Compile-time iterator over instructions, with a safe interface that
+// references not-necessarily-linear Instructions by linear BufferOffset.
+class BufferInstructionIterator
+ : public ARMBuffer::AssemblerBufferInstIterator {
+ public:
+ BufferInstructionIterator(BufferOffset bo, ARMBuffer* buffer)
+ : ARMBuffer::AssemblerBufferInstIterator(bo, buffer) {}
+
+ // Advances the buffer to the next intentionally-inserted instruction.
+ Instruction* next() {
+ advance(cur()->size());
+ maybeSkipAutomaticInstructions();
+ return cur();
+ }
+
+ // Advances the BufferOffset past any automatically-inserted instructions.
+ Instruction* maybeSkipAutomaticInstructions();
+};
+
+static const uint32_t NumIntArgRegs = 4;
+
+// There are 16 *float* registers available for arguments
+// If doubles are used, only half the number of registers are available.
+static const uint32_t NumFloatArgRegs = 16;
+
+static inline bool GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
+ Register* out) {
+ if (usedIntArgs >= NumIntArgRegs) {
+ return false;
+ }
+
+ *out = Register::FromCode(usedIntArgs);
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
+ return true;
+ }
+
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
+
+static inline bool GetFloat32ArgReg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs,
+ FloatRegister* out) {
+ MOZ_ASSERT(UseHardFpABI());
+ if (usedFloatArgs >= NumFloatArgRegs) {
+ return false;
+ }
+ *out = VFPRegister(usedFloatArgs, VFPRegister::Single);
+ return true;
+}
+static inline bool GetDoubleArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
+ FloatRegister* out) {
+ MOZ_ASSERT(UseHardFpABI());
+ MOZ_ASSERT((usedFloatArgs % 2) == 0);
+ if (usedFloatArgs >= NumFloatArgRegs) {
+ return false;
+ }
+ *out = VFPRegister(usedFloatArgs >> 1, VFPRegister::Double);
+ return true;
+}
+
+#endif
+
+class DoubleEncoder {
+ struct DoubleEntry {
+ uint32_t dblTop;
+ datastore::Imm8VFPImmData data;
+ };
+
+ static const DoubleEntry table[256];
+
+ public:
+ bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) const {
+ for (int i = 0; i < 256; i++) {
+ if (table[i].dblTop == top) {
+ *ret = table[i].data;
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+// Forbids nop filling for testing purposes. Not nestable.
+class AutoForbidNops {
+ protected:
+ Assembler* masm_;
+
+ public:
+ explicit AutoForbidNops(Assembler* masm) : masm_(masm) {
+ masm_->enterNoNops();
+ }
+ ~AutoForbidNops() { masm_->leaveNoNops(); }
+};
+
+class AutoForbidPoolsAndNops : public AutoForbidNops {
+ public:
+ // The maxInst argument is the maximum number of word sized instructions
+ // that will be allocated within this context. It is used to determine if
+ // the pool needs to be dumped before entering this content. The debug code
+ // checks that no more than maxInst instructions are actually allocated.
+ //
+ // Allocation of pool entries is not supported within this content so the
+ // code can not use large integers or float constants etc.
+ AutoForbidPoolsAndNops(Assembler* masm, size_t maxInst)
+ : AutoForbidNops(masm) {
+ masm_->enterNoPool(maxInst);
+ }
+
+ ~AutoForbidPoolsAndNops() { masm_->leaveNoPool(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Assembler_arm_h */
diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp
new file mode 100644
index 0000000000..1526be81c9
--- /dev/null
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -0,0 +1,3154 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/CodeGenerator-arm.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include <iterator>
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorARM::CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Register64 CodeGeneratorARM::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+
+void CodeGeneratorARM::emitBranch(Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ jumpToBlock(mirTrue, cond);
+ } else {
+ jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorARM* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ // Test the operand
+ masm.as_cmp(ToRegister(opd), Imm8(0));
+
+ if (isNextBlock(ifFalse->lir())) {
+ jumpToBlock(ifTrue, Assembler::NonZero);
+ } else if (isNextBlock(ifTrue->lir())) {
+ jumpToBlock(ifFalse, Assembler::Zero);
+ } else {
+ jumpToBlock(ifFalse, Assembler::Zero);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ Assembler::Condition cond =
+ JSOpToCondition(comp->mir()->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (right->isConstant()) {
+ masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
+ } else if (right->isRegister()) {
+ masm.ma_cmp(ToRegister(left), ToRegister(right));
+ } else {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
+ }
+ masm.ma_mov(Imm32(0), ToRegister(def));
+ masm.ma_mov(Imm32(1), ToRegister(def), cond);
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ Assembler::Condition cond =
+ JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
+ const LAllocation* left = comp->left();
+ const LAllocation* right = comp->right();
+
+ ScratchRegisterScope scratch(masm);
+
+ if (right->isConstant()) {
+ masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
+ } else if (right->isRegister()) {
+ masm.ma_cmp(ToRegister(left), ToRegister(right));
+ } else {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
+ }
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+bool CodeGeneratorARM::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ masm.push(Imm32(frameSize()));
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorARM::bailoutIf(Assembler::Condition condition,
+ LSnapshot* snapshot) {
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool =
+ new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.ma_b(ool->entry(), condition);
+}
+
+void CodeGeneratorARM::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool =
+ new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorARM::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.ma_b(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ masm.push(Imm32(ool->snapshot()->snapshotOffset()));
+ masm.ma_b(&deoptLabel_);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (rhs->isConstant()) {
+ masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
+ SetCC);
+ } else if (rhs->isRegister()) {
+ masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
+ } else {
+ masm.ma_add(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
+ SetCC);
+ }
+
+ if (ins->snapshot()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ if (rhs->isConstant()) {
+ masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
+ SetCC);
+ } else if (rhs->isRegister()) {
+ masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
+ } else {
+ masm.ma_sub(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
+ SetCC);
+ }
+
+ if (ins->snapshot()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ MMul* mul = ins->mir();
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ // Bailout when this condition is met.
+ Assembler::Condition c = Assembler::Overflow;
+ // Bailout on -0.0
+ int32_t constant = ToInt32(rhs);
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition bailoutCond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ masm.as_cmp(ToRegister(lhs), Imm8(0));
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+ // TODO: move these to ma_mul.
+ switch (constant) {
+ case -1:
+ masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), SetCC);
+ break;
+ case 0:
+ masm.ma_mov(Imm32(0), ToRegister(dest));
+ return; // Escape overflow check;
+ case 1:
+ // Nop
+ masm.ma_mov(ToRegister(lhs), ToRegister(dest));
+ return; // Escape overflow check;
+ case 2:
+ masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
+ // Overflow is handled later.
+ break;
+ default: {
+ bool handled = false;
+ if (constant > 0) {
+ // Try shift and add sequences for a positive constant.
+ if (!mul->canOverflow()) {
+ // If it cannot overflow, we can do lots of optimizations.
+ Register src = ToRegister(lhs);
+ uint32_t shift = FloorLog2(constant);
+ uint32_t rest = constant - (1 << shift);
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
+ handled = true;
+ } else {
+ // If the constant cannot be encoded as (1 << C1), see
+ // if it can be encoded as (1 << C1) | (1 << C2), which
+ // can be computed using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if ((1u << shift_rest) == rest) {
+ masm.as_add(ToRegister(dest), src,
+ lsl(src, shift - shift_rest));
+ if (shift_rest != 0) {
+ masm.ma_lsl(Imm32(shift_rest), ToRegister(dest),
+ ToRegister(dest));
+ }
+ handled = true;
+ }
+ }
+ } else if (ToRegister(lhs) != ToRegister(dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ uint32_t shift = FloorLog2(constant);
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2,shift)
+ masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
+ // At runtime, check (lhs == dest >> shift), if this
+ // does not hold, some bits were lost due to overflow,
+ // and the computation should be resumed as a double.
+ masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
+ c = Assembler::NotEqual;
+ handled = true;
+ }
+ }
+ }
+
+ if (!handled) {
+ ScratchRegisterScope scratch(masm);
+ if (mul->canOverflow()) {
+ c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)),
+ ToRegister(dest), scratch, c);
+ } else {
+ masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest),
+ scratch);
+ }
+ }
+ }
+ }
+ // Bailout on overflow.
+ if (mul->canOverflow()) {
+ bailoutIf(c, ins->snapshot());
+ }
+ } else {
+ Assembler::Condition c = Assembler::Overflow;
+
+ if (mul->canOverflow()) {
+ ScratchRegisterScope scratch(masm);
+ c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest),
+ scratch, c);
+ } else {
+ masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
+ }
+
+ // Bailout on overflow.
+ if (mul->canOverflow()) {
+ bailoutIf(c, ins->snapshot());
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.as_cmp(ToRegister(dest), Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+
+ // Result is -0 if lhs or rhs is negative.
+ masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs,
+ Register output, LSnapshot* snapshot,
+ Label& done) {
+ ScratchRegisterScope scratch(masm);
+
+ if (mir->canBeNegativeOverflow()) {
+ // Handle INT32_MIN / -1;
+ // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
+
+ // Sets EQ if lhs == INT32_MIN.
+ masm.ma_cmp(lhs, Imm32(INT32_MIN), scratch);
+ // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
+ masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
+ if (mir->canTruncateOverflow()) {
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(&ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else {
+ // (-INT32_MIN)|0 = INT32_MIN
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(INT32_MIN), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.as_cmp(rhs, Imm8(0));
+ if (mir->canTruncateInfinities()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(&nonZero, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+
+ // Handle negative 0.
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.as_cmp(lhs, Imm8(0));
+ masm.ma_b(&nonzero, Assembler::NotEqual);
+ masm.as_cmp(rhs, Imm8(0));
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::LessThan, snapshot);
+ masm.bind(&nonzero);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register temp = ToRegister(ins->getTemp(0));
+ Register output = ToRegister(ins->output());
+ MDiv* mir = ins->mir();
+
+ Label done;
+ divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ if (mir->canTruncateRemainder()) {
+ masm.ma_sdiv(lhs, rhs, output);
+ } else {
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_sdiv(lhs, rhs, temp);
+ masm.ma_mul(temp, rhs, scratch);
+ masm.ma_cmp(lhs, scratch);
+ }
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ masm.ma_mov(temp, output);
+ }
+
+ masm.bind(&done);
+}
+
+extern "C" {
+extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
+extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
+}
+
+void CodeGenerator::visitSoftDivI(LSoftDivI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ MDiv* mir = ins->mir();
+
+ Label done;
+ divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::aeabi_idivmod,
+ mozilla::Some(instanceOffset));
+ masm.Pop(InstanceReg);
+ } else {
+ using Fn = int64_t (*)(int, int);
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ masm.callWithABI<Fn, __aeabi_idivmod>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+ }
+
+ // idivmod returns the quotient in r0, and the remainder in r1.
+ if (!mir->canTruncateRemainder()) {
+ MOZ_ASSERT(mir->fallible());
+ masm.as_cmp(r1, Imm8(0));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ MDiv* mir = ins->mir();
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ int32_t shift = ins->shift();
+
+ if (shift == 0) {
+ masm.ma_mov(lhs, output);
+ return;
+ }
+
+ if (!mir->isTruncated()) {
+ // If the remainder is != 0, bailout since this must be a double.
+ {
+ // The bailout code also needs the scratch register.
+ // Here it is only used as a dummy target to set CC flags.
+ ScratchRegisterScope scratch(masm);
+ masm.as_mov(scratch, lsl(lhs, 32 - shift), SetCC);
+ }
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.as_mov(output, asr(lhs, shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ ScratchRegisterScope scratch(masm);
+
+ if (shift > 1) {
+ masm.as_mov(scratch, asr(lhs, 31));
+ masm.as_add(scratch, lhs, lsr(scratch, 32 - shift));
+ } else {
+ masm.as_add(scratch, lhs, lsr(lhs, 32 - shift));
+ }
+
+ // Do the shift.
+ masm.as_mov(output, asr(scratch, shift));
+}
+
+void CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs,
+ Register output, LSnapshot* snapshot,
+ Label& done) {
+ // X % 0 is bad because it will give garbage (or abort), when it should give
+ // NaN.
+
+ if (mir->canBeDivideByZero()) {
+ masm.as_cmp(rhs, Imm8(0));
+ if (mir->isTruncated()) {
+ Label nonZero;
+ masm.ma_b(&nonZero, Assembler::NotEqual);
+ if (mir->trapOnError()) {
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ } else {
+ // NaN|0 == 0
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ }
+ masm.bind(&nonZero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ MMod* mir = ins->mir();
+
+ // Contrary to other architectures (notably x86) INT_MIN % -1 doesn't need to
+ // be handled separately. |ma_smod| computes the remainder using the |SDIV|
+ // and the |MLS| instructions. On overflow, |SDIV| truncates the result to
+ // 32-bit and returns INT_MIN, see ARM Architecture Reference Manual, SDIV
+ // instruction.
+ //
+ // mls(INT_MIN, sdiv(INT_MIN, -1), -1)
+ // = INT_MIN - (sdiv(INT_MIN, -1) * -1)
+ // = INT_MIN - (INT_MIN * -1)
+ // = INT_MIN - INT_MIN
+ // = 0
+ //
+ // And a zero remainder with a negative dividend is already handled below.
+
+ Label done;
+ modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_smod(lhs, rhs, output, scratch);
+ }
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.as_cmp(output, Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+ masm.as_cmp(lhs, Imm8(0));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitSoftModI(LSoftModI* ins) {
+ // Extract the registers from this instruction.
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done;
+
+ // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs <
+ // 0.
+ MOZ_ASSERT(callTemp != lhs);
+ MOZ_ASSERT(callTemp != rhs);
+ masm.ma_mov(lhs, callTemp);
+
+ // Prevent INT_MIN % -1.
+ //
+ // |aeabi_idivmod| is allowed to return any arbitrary value when called with
+ // |(INT_MIN, -1)|, see "Run-time ABI for the ARM architecture manual". Most
+ // implementations perform a non-trapping signed integer division and
+ // return the expected result, i.e. INT_MIN. But since we can't rely on this
+ // behavior, handle this case separately here.
+ if (mir->canBeNegativeDividend()) {
+ {
+ ScratchRegisterScope scratch(masm);
+ // Sets EQ if lhs == INT_MIN
+ masm.ma_cmp(lhs, Imm32(INT_MIN), scratch);
+ // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
+ masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
+ }
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(&done);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ }
+
+ modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
+
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::aeabi_idivmod,
+ mozilla::Some(instanceOffset));
+ masm.Pop(InstanceReg);
+ } else {
+ using Fn = int64_t (*)(int, int);
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ masm.callWithABI<Fn, __aeabi_idivmod>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+ }
+
+ MOZ_ASSERT(r1 != output);
+ masm.move32(r1, output);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.as_cmp(output, Imm8(0));
+ masm.ma_b(&done, Assembler::NotEqual);
+ masm.as_cmp(callTemp, Imm8(0));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label fin;
+ // bug 739870, jbramley has a different sequence that may help with speed
+ // here.
+
+ masm.ma_mov(in, out, SetCC);
+ masm.ma_b(&fin, Assembler::Zero);
+ masm.as_rsb(out, out, Imm8(0), LeaveCC, Assembler::Signed);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(Imm32((1 << ins->shift()) - 1), out, scratch);
+ }
+ masm.as_rsb(out, out, Imm8(0), SetCC, Assembler::Signed);
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&fin);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp1 = ToRegister(ins->getTemp(0));
+ Register tmp2 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+
+ masm.ma_mod_mask(src, dest, tmp1, tmp2, scratch, scratch2, ins->shift());
+
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+}
+
+void CodeGeneratorARM::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ if (HasIDIV()) {
+ masm.ma_sdiv(dividend, divisor, /* result= */ dividend);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+
+ return;
+ }
+
+ // idivmod returns the quotient in r0, and the remainder in r1.
+ MOZ_ASSERT(dividend == r0);
+ MOZ_ASSERT(divisor == r1);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
+ volatileRegs.takeUnchecked(dividend);
+ volatileRegs.takeUnchecked(divisor);
+ volatileRegs.takeUnchecked(output);
+
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = int64_t (*)(int, int);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(dividend);
+ masm.passABIArg(divisor);
+ masm.callWithABI<Fn, __aeabi_idivmod>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorARM::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ if (HasIDIV()) {
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_smod(dividend, divisor, /* result= */ dividend, scratch);
+ }
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+
+ return;
+ }
+
+ // idivmod returns the quotient in r0, and the remainder in r1.
+ MOZ_ASSERT(dividend == r0);
+ MOZ_ASSERT(divisor == r1);
+
+ LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
+ volatileRegs.takeUnchecked(dividend);
+ volatileRegs.takeUnchecked(divisor);
+ volatileRegs.takeUnchecked(output);
+
+ masm.PushRegsInMask(volatileRegs);
+
+ using Fn = int64_t (*)(int, int);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(dividend);
+ masm.passABIArg(divisor);
+ masm.callWithABI<Fn, __aeabi_idivmod>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.PopRegsInMask(volatileRegs);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, dividend, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, divisor);
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ // This will not actually be true on arm. We can not an imm8m in order to
+ // get a wider range of numbers
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_mvn(ToRegister(input), ToRegister(dest));
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ ScratchRegisterScope scratch(masm);
+
+ // All of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
+ scratch);
+ } else {
+ masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
+ scratch);
+ } else {
+ masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
+ scratch);
+ } else {
+ masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.ma_lsl(Imm32(shift), lhs, dest);
+ } else {
+ masm.ma_mov(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.ma_asr(Imm32(shift), lhs, dest);
+ } else {
+ masm.ma_mov(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.ma_lsr(Imm32(shift), lhs, dest);
+ } else {
+ // x >>> 0 can overflow.
+ masm.ma_mov(lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ masm.as_cmp(dest, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range since arm
+ // shifts by the lower byte of the register (it will attempt to shift by
+ // 250 if you ask it to).
+ masm.as_and(dest, ToRegister(rhs), Imm8(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.ma_lsl(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.ma_asr(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.ma_lsr(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.as_cmp(dest, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ if (shift) {
+ masm.ma_lsr(Imm32(shift), lhs, temp);
+ } else {
+ masm.ma_mov(lhs, temp);
+ }
+ } else {
+ masm.as_and(temp, ToRegister(rhs), Imm8(0x1F));
+ masm.ma_lsr(temp, lhs, temp);
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.clz32(input, output, /* knownNotZero = */ false);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ctz32(input, output, /* knownNotZero = */ false);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope scratch(masm);
+
+ Label done;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
+ masm.compareDouble(input, scratch);
+ masm.ma_vneg(scratch, output, Assembler::Equal);
+ masm.ma_b(&done, Assembler::Equal);
+
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_vadd(scratch, input, output);
+ masm.ma_vsqrt(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand CodeGeneratorARM::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address addr = ToAddress(a);
+ MOZ_ASSERT((addr.offset & 3) == 0);
+ return MoveOperand(addr, kind);
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorARM> {
+ MTableSwitch* mir_;
+ Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
+
+ void accept(CodeGeneratorARM* codegen) override {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
+ : mir_(mir), codeLabels_(alloc) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ bool addCodeLabel(CodeLabel label) { return codeLabels_.append(label); }
+ CodeLabel codeLabel(unsigned i) { return codeLabels_[i]; }
+};
+
+void CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ size_t numCases = mir->numCases();
+ for (size_t i = 0; i < numCases; i++) {
+ LBlock* caseblock =
+ skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl = ool->codeLabel(i);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index, Register base) {
+ // The code generated by this is utter hax.
+ // The end result looks something like:
+ // SUBS index, input, #base
+ // RSBSPL index, index, #max
+ // LDRPL pc, pc, index lsl 2
+ // B default
+
+ // If the range of targets in N through M, we first subtract off the lowest
+ // case (N), which both shifts the arguments into the range 0 to (M - N)
+ // with and sets the MInus flag if the argument was out of range on the low
+ // end.
+
+ // Then we a reverse subtract with the size of the jump table, which will
+ // reverse the order of range (It is size through 0, rather than 0 through
+ // size). The main purpose of this is that we set the same flag as the lower
+ // bound check for the upper bound check. Lastly, we do this conditionally
+ // on the previous check succeeding.
+
+ // Then we conditionally load the pc offset by the (reversed) index (times
+ // the address size) into the pc, which branches to the correct case. NOTE:
+ // when we go to read the pc, the value that we get back is the pc of the
+ // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
+ // $pc+8. In other words, there is an empty word after the branch into the
+ // switch table before the table actually starts. Since the only other
+ // unhandled case is the default case (both out of range high and out of
+ // range low) I then insert a branch to default case into the extra slot,
+ // which ensures we don't attempt to execute the address table.
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ ScratchRegisterScope scratch(masm);
+
+ int32_t cases = mir->numCases();
+ // Lower value with low value.
+ masm.ma_sub(index, Imm32(mir->low()), index, scratch, SetCC);
+ masm.ma_rsb(index, Imm32(cases - 1), index, scratch, SetCC,
+ Assembler::NotSigned);
+ // Inhibit pools within the following sequence because we are indexing into
+ // a pc relative table. The region will have one instruction for ma_ldr, one
+ // for ma_b, and each table case takes one word.
+ AutoForbidPoolsAndNops afp(&masm, 1 + 1 + cases);
+ masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset,
+ Assembler::NotSigned);
+ masm.ma_b(defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first generate
+ // the case entries (we don't yet know their offsets in the instruction
+ // stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(alloc(), mir);
+ for (int32_t i = 0; i < cases; i++) {
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ masm.propagateOOM(ool->addCodeLabel(cl));
+ }
+ addOutOfLineCode(ool, mir);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.ma_vadd(src1, src2, output);
+ break;
+ case JSOp::Sub:
+ masm.ma_vsub(src1, src2, output);
+ break;
+ case JSOp::Mul:
+ masm.ma_vmul(src1, src2, output);
+ break;
+ case JSOp::Div:
+ masm.ma_vdiv(src1, src2, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.ma_vadd_f32(src1, src2, output);
+ break;
+ case JSOp::Sub:
+ masm.ma_vsub_f32(src1, src2, output);
+ break;
+ case JSOp::Mul:
+ masm.ma_vmul_f32(src1, src2, output);
+ break;
+ case JSOp::Div:
+ masm.ma_vdiv_f32(src1, src2, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->getOperand(0)),
+ ToRegister(ins->getDef(0)), ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->getOperand(0)),
+ ToRegister(ins->getDef(0)), ins->mir());
+}
+
+ValueOperand CodeGeneratorARM::ToValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand CodeGeneratorARM::ToTempValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // On arm, the input operand and the output payload have the same virtual
+ // register. All that needs to be written is the type tag for the type
+ // definition.
+ masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
+ const AnyRegister in = ToAnyRegister(box->getOperand(0));
+ const ValueOperand out = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), in), out);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+ Register payload = ToRegister(unbox->payload());
+ Register output = ToRegister(unbox->output());
+
+ mozilla::Maybe<ScratchRegisterScope> scratch;
+ scratch.emplace(masm);
+
+ JSValueTag tag = MIRTypeToTag(mir->type());
+ if (mir->fallible()) {
+ masm.ma_cmp(type, Imm32(tag), *scratch);
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ } else {
+#ifdef DEBUG
+ Label ok;
+ masm.ma_cmp(type, Imm32(tag), *scratch);
+ masm.ma_b(&ok, Assembler::Equal);
+ scratch.reset();
+ masm.assumeUnreachable("Infallible unbox type mismatch");
+ masm.bind(&ok);
+#endif
+ }
+
+ // Note: If spectreValueMasking is disabled, then this instruction will
+ // default to a no-op as long as the lowering allocate the same register for
+ // the output and the payload.
+ masm.unboxNonDouble(ValueOperand(type, payload), output,
+ ValueTypeFromMIRType(mir->type()));
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGeneratorARM::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ const LAllocation* opd = test->input();
+ masm.ma_vcmpz(ToFloatRegister(opd));
+ masm.as_vmrs(pc);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+ // If the compare set the 0 bit, then the result is definitely false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+ // It is also false if one of the operands is NAN, which is shown as
+ // Overflow.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ const LAllocation* opd = test->input();
+ masm.ma_vcmpz_f32(ToFloatRegister(opd));
+ masm.as_vmrs(pc);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+ // If the compare set the 0 bit, then the result is definitely false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+ // It is also false if one of the operands is NAN, which is shown as
+ // Overflow.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.compareDouble(lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
+ ToRegister(comp->output()));
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.compareFloat(lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
+ ToRegister(comp->output()));
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ masm.compareDouble(lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
+ comp->ifFalse());
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ masm.compareFloat(lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
+ comp->ifFalse());
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
+ // LBitAndAndBranch only represents single-word ANDs, hence it can't be
+ // 64-bit here.
+ MOZ_ASSERT(!baab->is64());
+ Register regL = ToRegister(baab->left());
+ if (baab->right()->isConstant()) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_tst(regL, Imm32(ToInt32(baab->right())), scratch);
+ } else {
+ masm.ma_tst(regL, ToRegister(baab->right()));
+ }
+ emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ // It is hard to optimize !x, so just do it the basic way for now.
+ masm.as_cmp(ToRegister(ins->input()), Imm8(0));
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_orr(input.low, input.high, output);
+ masm.as_cmp(output, Imm8(0));
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if the double is
+ // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+ // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
+ FloatRegister opd = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ // Do the compare.
+ masm.ma_vcmpz(opd);
+ // TODO There are three variations here to compare performance-wise.
+ bool nocond = true;
+ if (nocond) {
+ // Load the value into the dest register.
+ masm.as_vmrs(dest);
+ masm.ma_lsr(Imm32(28), dest, dest);
+ // 28 + 2 = 30
+ masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
+ masm.as_and(dest, dest, Imm8(1));
+ } else {
+ masm.as_vmrs(pc);
+ masm.ma_mov(Imm32(0), dest);
+ masm.ma_mov(Imm32(1), dest, Assembler::Equal);
+ masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
+ }
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if the double is
+ // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+ // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
+ FloatRegister opd = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ // Do the compare.
+ masm.ma_vcmpz_f32(opd);
+ // TODO There are three variations here to compare performance-wise.
+ bool nocond = true;
+ if (nocond) {
+ // Load the value into the dest register.
+ masm.as_vmrs(dest);
+ masm.ma_lsr(Imm32(28), dest, dest);
+ // 28 + 2 = 30
+ masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
+ masm.as_and(dest, dest, Imm8(1));
+ } else {
+ masm.as_vmrs(pc);
+ masm.ma_mov(Imm32(0), dest);
+ masm.ma_mov(Imm32(1), dest, Assembler::Equal);
+ masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
+ }
+}
+
+void CodeGeneratorARM::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint patching
+ // to occur. Otherwise, we could overwrite the invalidation epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at onto the stack.
+ masm.Push(lr);
+
+ // Push the Ion script onto the stack (when we determine what that pointer
+ // is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+ masm.jump(thunk);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->temp1());
+ Register outTemp =
+ lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp, outTemp,
+ output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->flagTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, temp64);
+ }
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register64 temp3 = ToRegister64(lir->temp3());
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, temp2);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp3);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp3);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp3, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 temp64 = Register64(temp2, out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp64);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp64, temp1);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp64, temp1);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp1, out, temp2);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register64 temp3 = ToRegister64(lir->temp3());
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest, temp2,
+ temp3);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest, temp2,
+ temp3);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp3, out, temp2.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ masm.as_cmp(cond, Imm8(0));
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register falseExpr = ToRegister(ins->falseExpr());
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ masm.ma_mov(falseExpr, out, LeaveCC, Assembler::Zero);
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ FloatRegister falseExpr = ToFloatRegister(ins->falseExpr());
+
+ if (mirType == MIRType::Double) {
+ masm.moveDouble(falseExpr, out, Assembler::Zero);
+ } else if (mirType == MIRType::Float32) {
+ masm.moveFloat32(falseExpr, out, Assembler::Zero);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(static_cast<MIRType>(from) == MIRType::Float32);
+ masm.ma_vxfer(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(static_cast<MIRType>(from) == MIRType::Int32);
+ masm.ma_vxfer(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ ScratchRegisterScope scratch(masm);
+ VFPRegister vd(ToFloatRegister(ins->output()));
+ if (size == 32) {
+ masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), scratch,
+ Assembler::Always);
+ } else {
+ masm.ma_vldr(Address(HeapReg, ptrImm), vd, scratch, Assembler::Always);
+ }
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
+ ToRegister(ins->output()), scratch, Offset,
+ Assembler::Always);
+ }
+ } else {
+ Register ptrReg = ToRegister(ptr);
+ if (isFloat) {
+ FloatRegister output = ToFloatRegister(ins->output());
+ if (size == 32) {
+ output = output.singleOverlay();
+ }
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
+ if (size == 32) {
+ masm.ma_vimm_f32(GenericNaN(), output, Assembler::AboveOrEqual);
+ } else {
+ masm.ma_vimm(GenericNaN(), output, Assembler::AboveOrEqual);
+ }
+ cond = Assembler::Below;
+ }
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
+ } else {
+ Register output = ToRegister(ins->output());
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
+ masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
+ cond = Assembler::Below;
+ }
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output,
+ scratch, Offset, cond);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorARM::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ MIRType resultType = mir->type();
+ Register ptr;
+
+ if (mir->access().offset() || mir->access().type() == Scalar::Int64) {
+ ptr = ToRegister(lir->ptrCopy());
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ ptr = ToRegister(lir->ptr());
+ }
+
+ if (resultType == MIRType::Int64) {
+ masm.wasmLoadI64(mir->access(), HeapReg, ptr, ptr, ToOutRegister64(lir));
+ } else {
+ masm.wasmLoad(mir->access(), HeapReg, ptr, ptr,
+ ToAnyRegister(lir->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.ma_b(ool->entry(), Assembler::CarrySet);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(base.low != out.high && base.high != out.low);
+
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(base.low, Imm32(mir->offset()), out.low, scratch, SetCC);
+ masm.ma_adc(base.high, Imm32(mir->offset() >> 32), out.high, scratch, SetCC);
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.ma_b(ool->entry(), Assembler::CarrySet);
+}
+
+template <typename T>
+void CodeGeneratorARM::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ Scalar::Type accessType = mir->access().type();
+ Register ptr;
+
+ // Maybe add the offset.
+ if (mir->access().offset() || accessType == Scalar::Int64) {
+ ptr = ToRegister(lir->ptrCopy());
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ ptr = ToRegister(lir->ptr());
+ }
+
+ if (accessType == Scalar::Int64) {
+ masm.wasmStoreI64(mir->access(),
+ ToRegister64(lir->getInt64Operand(lir->ValueIndex)),
+ HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(mir->access(),
+ ToAnyRegister(lir->getOperand(lir->ValueIndex)), HeapReg,
+ ptr, ptr);
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ emitWasmStore(lir);
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ VFPRegister vd(ToFloatRegister(ins->value()));
+ Address addr(HeapReg, ptrImm);
+ if (size == 32) {
+ masm.storeFloat32(vd, addr);
+ } else {
+ masm.storeDouble(vd, addr);
+ }
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
+ ToRegister(ins->value()), scratch, Offset,
+ Assembler::Always);
+ }
+ } else {
+ Register ptrReg = ToRegister(ptr);
+
+ Assembler::Condition cond = Assembler::Always;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
+ cond = Assembler::Below;
+ }
+
+ if (isFloat) {
+ ScratchRegisterScope scratch(masm);
+ FloatRegister value = ToFloatRegister(ins->value());
+ if (size == 32) {
+ value = value.singleOverlay();
+ }
+
+ masm.ma_vstr(value, HeapReg, ptrReg, scratch, 0, Assembler::Below);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ Register value = ToRegister(ins->value());
+ masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value,
+ scratch, Offset, cond);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+
+ const LAllocation* ptr = ins->ptr();
+ Register ptrReg = ToRegister(ptr);
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register out = ToRegister(ins->output());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, out);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register output = ToRegister(ins->output());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->hasUses());
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register output = ToRegister(ins->output());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), srcAddr,
+ flagTemp, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), srcAddr,
+ flagTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+
+ if (ins->arg()->isConstant()) {
+ masm.ma_mov(Imm32(ToInt32(ins->arg())), scratch);
+ masm.ma_str(scratch, dst, scratch2);
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.ma_str(ToRegister(ins->arg()), dst, scratch);
+ } else {
+ masm.ma_vstr(ToFloatRegister(ins->arg()), dst, scratch);
+ }
+ }
+}
+
+void CodeGenerator::visitUDiv(LUDiv* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ Label done;
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
+
+ masm.ma_udiv(lhs, rhs, output);
+
+ // Check for large unsigned result - represent as double.
+ if (!ins->mir()->isTruncated()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ // Check for non-zero remainder if not truncating to int.
+ if (!ins->mir()->canTruncateRemainder()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_mul(rhs, output, scratch);
+ masm.ma_cmp(scratch, lhs);
+ }
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitUMod(LUMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ Label done;
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
+
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_umod(lhs, rhs, output, scratch);
+ }
+
+ // Check for large unsigned result - represent as double.
+ if (!ins->mir()->isTruncated()) {
+ MOZ_ASSERT(ins->mir()->fallible());
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+template <class T>
+void CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output,
+ Label* done,
+ LSnapshot* snapshot, T* mir) {
+ if (!mir) {
+ return;
+ }
+ if (mir->canBeDivideByZero()) {
+ masm.as_cmp(rhs, Imm8(0));
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(&nonZero, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(&skip, Assembler::NotEqual);
+ // Infinity|0 == 0
+ masm.ma_mov(Imm32(0), output);
+ masm.ma_b(done);
+ masm.bind(&skip);
+ }
+ } else {
+ // Bailout for divide by zero
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, snapshot);
+ }
+ }
+}
+
+void CodeGenerator::visitSoftUDivOrMod(LSoftUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT(lhs == r0);
+ MOZ_ASSERT(rhs == r1);
+ MOZ_ASSERT(output == r0);
+
+ Label done;
+ MDiv* div = ins->mir()->isDiv() ? ins->mir()->toDiv() : nullptr;
+ MMod* mod = !div ? ins->mir()->toMod() : nullptr;
+
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
+ generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
+
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ wasm::BytecodeOffset bytecodeOffset =
+ (div ? div->bytecodeOffset() : mod->bytecodeOffset());
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::aeabi_uidivmod,
+ mozilla::Some(instanceOffset));
+ masm.Pop(InstanceReg);
+ } else {
+ using Fn = int64_t (*)(int, int);
+ masm.setupAlignedABICall();
+ masm.passABIArg(lhs);
+ masm.passABIArg(rhs);
+ masm.callWithABI<Fn, __aeabi_uidivmod>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+ }
+
+ if (mod) {
+ MOZ_ASSERT(output == r0, "output should not be r1 for mod");
+ masm.move32(r1, output);
+ }
+
+ // uidivmod returns the quotient in r0, and the remainder in r1.
+ if (div && !div->canTruncateRemainder()) {
+ MOZ_ASSERT(div->fallible());
+ masm.as_cmp(r1, Imm8(0));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ // Bailout for big unsigned results
+ if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
+ DebugOnly<bool> isFallible =
+ (div && div->fallible()) || (mod && mod->fallible());
+ MOZ_ASSERT(isFallible);
+ masm.as_cmp(output, Imm8(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.as_add(output, base, lsl(index, mir->scale()));
+ masm.ma_add(Imm32(mir->displacement()), output, scratch);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ masm.ma_neg(input, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ masm.ma_vneg(input, ToFloatRegister(ins->output()));
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ OutOfLineWasmTruncateCheck* ool = nullptr;
+ Label* oolEntry = nullptr;
+ if (!lir->mir()->isSaturating()) {
+ ool = new (alloc())
+ OutOfLineWasmTruncateCheck(mir, input, Register::Invalid());
+ addOutOfLineCode(ool, mir);
+ oolEntry = ool->entry();
+ }
+
+ masm.wasmTruncateToInt32(input, output, fromType, mir->isUnsigned(),
+ mir->isSaturating(), oolEntry);
+
+ if (!lir->mir()->isSaturating()) {
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister inputDouble = input;
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmBuiltinTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ OutOfLineWasmTruncateCheck* ool = nullptr;
+ if (!lir->mir()->isSaturating()) {
+ ool = new (alloc())
+ OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
+ addOutOfLineCode(ool, mir);
+ }
+
+ ScratchDoubleScope fpscratch(masm);
+ if (fromType == MIRType::Float32) {
+ inputDouble = fpscratch;
+ masm.convertFloat32ToDouble(input, inputDouble);
+ }
+
+ masm.Push(input);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(inputDouble, MoveOp::DOUBLE);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (lir->mir()->isSaturating()) {
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64,
+ mozilla::Some(instanceOffset));
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToUint64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToInt64,
+ mozilla::Some(instanceOffset));
+ }
+ }
+
+ masm.Pop(input);
+ masm.Pop(InstanceReg);
+
+ // TruncateDoubleTo{UI,I}nt64 returns 0x8000000000000000 to indicate
+ // exceptional results, so check for that and produce the appropriate
+ // traps. The Saturating form always returns a normal value and never
+ // needs traps.
+ if (!lir->mir()->isSaturating()) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_cmp(output.high, Imm32(0x80000000), scratch);
+ masm.as_cmp(output.low, Imm8(0x00000000), Assembler::Equal);
+ masm.ma_b(ool->entry(), Assembler::Equal);
+
+ masm.bind(ool->rejoin());
+ }
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void CodeGeneratorARM::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ // On ARM, saturating truncation codegen handles saturating itself rather than
+ // relying on out-of-line fixup code.
+ if (ool->isSaturating()) {
+ return;
+ }
+
+ masm.outOfLineWasmTruncateToIntCheck(ool->input(), ool->fromType(),
+ ool->toType(), ool->isUnsigned(),
+ ool->rejoin(), ool->bytecodeOffset());
+}
+
+void CodeGenerator::visitInt64ToFloatingPointCall(
+ LInt64ToFloatingPointCall* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LInt64ToFloatingPointCall::Instance)) ==
+ InstanceReg);
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ MBuiltinInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ masm.setupWasmABICall();
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+
+ bool isUnsigned = mir->isUnsigned();
+ wasm::SymbolicAddress callee =
+ toType == MIRType::Float32
+ ? (isUnsigned ? wasm::SymbolicAddress::Uint64ToFloat32
+ : wasm::SymbolicAddress::Int64ToFloat32)
+ : (isUnsigned ? wasm::SymbolicAddress::Uint64ToDouble
+ : wasm::SymbolicAddress::Int64ToDouble);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ MoveOp::Type result =
+ toType == MIRType::Float32 ? MoveOp::FLOAT32 : MoveOp::DOUBLE;
+ masm.callWithABI(mir->bytecodeOffset(), callee, mozilla::Some(instanceOffset),
+ result);
+
+ DebugOnly<FloatRegister> output(ToFloatRegister(lir->output()));
+ MOZ_ASSERT_IF(toType == MIRType::Double, output.value == ReturnDoubleReg);
+ MOZ_ASSERT_IF(toType == MIRType::Float32, output.value == ReturnFloat32Reg);
+
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.ma_vxfer(lhs, lhsi);
+ masm.ma_vxfer(rhs, rhsi);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Clear lhs's sign.
+ masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
+
+ // Keep rhs's sign.
+ masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
+
+ // Combine.
+ masm.ma_orr(lhsi, rhsi, rhsi);
+
+ masm.ma_vxfer(rhsi, output);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore,
+ Assembler::Always, 1);
+ masm.as_vxfer(rhsi, InvalidReg, rhs, Assembler::FloatToCore,
+ Assembler::Always, 1);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Clear lhs's sign.
+ masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
+
+ // Keep rhs's sign.
+ masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
+
+ // Combine.
+ masm.ma_orr(lhsi, rhsi, rhsi);
+
+ // Reconstruct the output.
+ masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore,
+ Assembler::Always, 0);
+ masm.ma_vxfer(lhsi, rhsi, output);
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.move32(ToRegister(input.low()), output);
+ } else {
+ masm.move32(ToRegister(input.high()), output);
+ }
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister(lir->input()) == output.low);
+
+ if (lir->mir()->isUnsigned()) {
+ masm.ma_mov(Imm32(0), output.high);
+ } else {
+ masm.ma_asr(Imm32(31), output.low, output.high);
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32(input.low, output.low);
+ break;
+ }
+ masm.ma_asr(Imm32(31), output.low, output.high);
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
+ MOZ_CRASH("64-bit only");
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ // Generates no code on this platform because we just return the low part of
+ // the input register pair.
+ MOZ_ASSERT(ToRegister(lir->input()) == ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ auto* mir = lir->mir();
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (mir->isWasmBuiltinModI64()) {
+ masm.xor64(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
+ mozilla::Some(instanceOffset));
+ }
+
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MDefinition* mir = lir->mir();
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
+ mozilla::Some(instanceOffset));
+ }
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ masm.move64(input, output);
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ masm.as_cmp(cond, Imm8(0));
+ if (falseExpr.low().isRegister()) {
+ masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC,
+ Assembler::Equal);
+ masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC,
+ Assembler::Equal);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(ToAddress(falseExpr.low()), out.low, scratch, Offset,
+ Assembler::Equal);
+ masm.ma_ldr(ToAddress(falseExpr.high()), out.high, scratch, Offset,
+ Assembler::Equal);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.ma_vxfer(input.low, input.high, output);
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ma_vxfer(input, output.low, output.high);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToRegister(lir->getTemp(0));
+
+ masm.popcnt64(input, output, temp);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(lir));
+ masm.ma_mvn(input.high, input.high);
+ masm.ma_mvn(input.low, input.low);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.as_cmp(input.high, Imm8(0));
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.as_cmp(input.low, Imm8(0));
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 output = ToOutRegister64(lir);
+ Register64 tmp(InvalidReg, InvalidReg);
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
+ masm.wasmAtomicLoad64(lir->mir()->access(), addr, tmp, output);
+}
+
+void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, tmp);
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 expected = ToRegister64(lir->expected());
+ Register64 replacement = ToRegister64(lir->replacement());
+ Register64 out = ToOutRegister64(lir);
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, expected, replacement,
+ out);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 out = ToOutRegister64(lir);
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
+ Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
+ masm.wasmAtomicFetchOp64(lir->access(), lir->operation(), value, addr, tmp,
+ out);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 out = ToOutRegister64(lir);
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
+ masm.wasmAtomicExchange64(lir->access(), addr, value, out);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/arm/CodeGenerator-arm.h b/js/src/jit/arm/CodeGenerator-arm.h
new file mode 100644
index 0000000000..f7cf2b263e
--- /dev/null
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_CodeGenerator_arm_h
+#define jit_arm_CodeGenerator_arm_h
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/shared/CodeGenerator-shared.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorARM;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorARM>;
+
+class CodeGeneratorARM : public CodeGeneratorShared {
+ friend class MoveResolverARM;
+
+ protected:
+ CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ bailoutIf(Assembler::Zero, snapshot);
+ }
+
+ template <class T>
+ void generateUDivModZeroCheck(Register rhs, Register output, Label* done,
+ LSnapshot* snapshot, T* mir);
+
+ bool generateOutOfLineCode();
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmUnalignedLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+ template <typename T>
+ void emitWasmUnalignedStore(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+
+ void divICommon(MDiv* mir, Register lhs, Register rhs, Register output,
+ LSnapshot* snapshot, Label& done);
+ void modICommon(MMod* mir, Register lhs, Register rhs, Register output,
+ LSnapshot* snapshot, Label& done);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp);
+
+ public:
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+};
+
+typedef CodeGeneratorARM CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM> {
+ protected: // Silence Clang warning.
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot), frameSize_(frameSize) {}
+
+ void accept(CodeGeneratorARM* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_CodeGenerator_arm_h */
diff --git a/js/src/jit/arm/DoubleEntryTable.tbl b/js/src/jit/arm/DoubleEntryTable.tbl
new file mode 100644
index 0000000000..2e9e8c4a34
--- /dev/null
+++ b/js/src/jit/arm/DoubleEntryTable.tbl
@@ -0,0 +1,257 @@
+/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py. */
+ { 0x40000000, { 0, 0, 0 } },
+ { 0x40010000, { 1, 0, 0 } },
+ { 0x40020000, { 2, 0, 0 } },
+ { 0x40030000, { 3, 0, 0 } },
+ { 0x40040000, { 4, 0, 0 } },
+ { 0x40050000, { 5, 0, 0 } },
+ { 0x40060000, { 6, 0, 0 } },
+ { 0x40070000, { 7, 0, 0 } },
+ { 0x40080000, { 8, 0, 0 } },
+ { 0x40090000, { 9, 0, 0 } },
+ { 0x400a0000, { 10, 0, 0 } },
+ { 0x400b0000, { 11, 0, 0 } },
+ { 0x400c0000, { 12, 0, 0 } },
+ { 0x400d0000, { 13, 0, 0 } },
+ { 0x400e0000, { 14, 0, 0 } },
+ { 0x400f0000, { 15, 0, 0 } },
+ { 0x40100000, { 0, 1, 0 } },
+ { 0x40110000, { 1, 1, 0 } },
+ { 0x40120000, { 2, 1, 0 } },
+ { 0x40130000, { 3, 1, 0 } },
+ { 0x40140000, { 4, 1, 0 } },
+ { 0x40150000, { 5, 1, 0 } },
+ { 0x40160000, { 6, 1, 0 } },
+ { 0x40170000, { 7, 1, 0 } },
+ { 0x40180000, { 8, 1, 0 } },
+ { 0x40190000, { 9, 1, 0 } },
+ { 0x401a0000, { 10, 1, 0 } },
+ { 0x401b0000, { 11, 1, 0 } },
+ { 0x401c0000, { 12, 1, 0 } },
+ { 0x401d0000, { 13, 1, 0 } },
+ { 0x401e0000, { 14, 1, 0 } },
+ { 0x401f0000, { 15, 1, 0 } },
+ { 0x40200000, { 0, 2, 0 } },
+ { 0x40210000, { 1, 2, 0 } },
+ { 0x40220000, { 2, 2, 0 } },
+ { 0x40230000, { 3, 2, 0 } },
+ { 0x40240000, { 4, 2, 0 } },
+ { 0x40250000, { 5, 2, 0 } },
+ { 0x40260000, { 6, 2, 0 } },
+ { 0x40270000, { 7, 2, 0 } },
+ { 0x40280000, { 8, 2, 0 } },
+ { 0x40290000, { 9, 2, 0 } },
+ { 0x402a0000, { 10, 2, 0 } },
+ { 0x402b0000, { 11, 2, 0 } },
+ { 0x402c0000, { 12, 2, 0 } },
+ { 0x402d0000, { 13, 2, 0 } },
+ { 0x402e0000, { 14, 2, 0 } },
+ { 0x402f0000, { 15, 2, 0 } },
+ { 0x40300000, { 0, 3, 0 } },
+ { 0x40310000, { 1, 3, 0 } },
+ { 0x40320000, { 2, 3, 0 } },
+ { 0x40330000, { 3, 3, 0 } },
+ { 0x40340000, { 4, 3, 0 } },
+ { 0x40350000, { 5, 3, 0 } },
+ { 0x40360000, { 6, 3, 0 } },
+ { 0x40370000, { 7, 3, 0 } },
+ { 0x40380000, { 8, 3, 0 } },
+ { 0x40390000, { 9, 3, 0 } },
+ { 0x403a0000, { 10, 3, 0 } },
+ { 0x403b0000, { 11, 3, 0 } },
+ { 0x403c0000, { 12, 3, 0 } },
+ { 0x403d0000, { 13, 3, 0 } },
+ { 0x403e0000, { 14, 3, 0 } },
+ { 0x403f0000, { 15, 3, 0 } },
+ { 0x3fc00000, { 0, 4, 0 } },
+ { 0x3fc10000, { 1, 4, 0 } },
+ { 0x3fc20000, { 2, 4, 0 } },
+ { 0x3fc30000, { 3, 4, 0 } },
+ { 0x3fc40000, { 4, 4, 0 } },
+ { 0x3fc50000, { 5, 4, 0 } },
+ { 0x3fc60000, { 6, 4, 0 } },
+ { 0x3fc70000, { 7, 4, 0 } },
+ { 0x3fc80000, { 8, 4, 0 } },
+ { 0x3fc90000, { 9, 4, 0 } },
+ { 0x3fca0000, { 10, 4, 0 } },
+ { 0x3fcb0000, { 11, 4, 0 } },
+ { 0x3fcc0000, { 12, 4, 0 } },
+ { 0x3fcd0000, { 13, 4, 0 } },
+ { 0x3fce0000, { 14, 4, 0 } },
+ { 0x3fcf0000, { 15, 4, 0 } },
+ { 0x3fd00000, { 0, 5, 0 } },
+ { 0x3fd10000, { 1, 5, 0 } },
+ { 0x3fd20000, { 2, 5, 0 } },
+ { 0x3fd30000, { 3, 5, 0 } },
+ { 0x3fd40000, { 4, 5, 0 } },
+ { 0x3fd50000, { 5, 5, 0 } },
+ { 0x3fd60000, { 6, 5, 0 } },
+ { 0x3fd70000, { 7, 5, 0 } },
+ { 0x3fd80000, { 8, 5, 0 } },
+ { 0x3fd90000, { 9, 5, 0 } },
+ { 0x3fda0000, { 10, 5, 0 } },
+ { 0x3fdb0000, { 11, 5, 0 } },
+ { 0x3fdc0000, { 12, 5, 0 } },
+ { 0x3fdd0000, { 13, 5, 0 } },
+ { 0x3fde0000, { 14, 5, 0 } },
+ { 0x3fdf0000, { 15, 5, 0 } },
+ { 0x3fe00000, { 0, 6, 0 } },
+ { 0x3fe10000, { 1, 6, 0 } },
+ { 0x3fe20000, { 2, 6, 0 } },
+ { 0x3fe30000, { 3, 6, 0 } },
+ { 0x3fe40000, { 4, 6, 0 } },
+ { 0x3fe50000, { 5, 6, 0 } },
+ { 0x3fe60000, { 6, 6, 0 } },
+ { 0x3fe70000, { 7, 6, 0 } },
+ { 0x3fe80000, { 8, 6, 0 } },
+ { 0x3fe90000, { 9, 6, 0 } },
+ { 0x3fea0000, { 10, 6, 0 } },
+ { 0x3feb0000, { 11, 6, 0 } },
+ { 0x3fec0000, { 12, 6, 0 } },
+ { 0x3fed0000, { 13, 6, 0 } },
+ { 0x3fee0000, { 14, 6, 0 } },
+ { 0x3fef0000, { 15, 6, 0 } },
+ { 0x3ff00000, { 0, 7, 0 } },
+ { 0x3ff10000, { 1, 7, 0 } },
+ { 0x3ff20000, { 2, 7, 0 } },
+ { 0x3ff30000, { 3, 7, 0 } },
+ { 0x3ff40000, { 4, 7, 0 } },
+ { 0x3ff50000, { 5, 7, 0 } },
+ { 0x3ff60000, { 6, 7, 0 } },
+ { 0x3ff70000, { 7, 7, 0 } },
+ { 0x3ff80000, { 8, 7, 0 } },
+ { 0x3ff90000, { 9, 7, 0 } },
+ { 0x3ffa0000, { 10, 7, 0 } },
+ { 0x3ffb0000, { 11, 7, 0 } },
+ { 0x3ffc0000, { 12, 7, 0 } },
+ { 0x3ffd0000, { 13, 7, 0 } },
+ { 0x3ffe0000, { 14, 7, 0 } },
+ { 0x3fff0000, { 15, 7, 0 } },
+ { 0xc0000000, { 0, 8, 0 } },
+ { 0xc0010000, { 1, 8, 0 } },
+ { 0xc0020000, { 2, 8, 0 } },
+ { 0xc0030000, { 3, 8, 0 } },
+ { 0xc0040000, { 4, 8, 0 } },
+ { 0xc0050000, { 5, 8, 0 } },
+ { 0xc0060000, { 6, 8, 0 } },
+ { 0xc0070000, { 7, 8, 0 } },
+ { 0xc0080000, { 8, 8, 0 } },
+ { 0xc0090000, { 9, 8, 0 } },
+ { 0xc00a0000, { 10, 8, 0 } },
+ { 0xc00b0000, { 11, 8, 0 } },
+ { 0xc00c0000, { 12, 8, 0 } },
+ { 0xc00d0000, { 13, 8, 0 } },
+ { 0xc00e0000, { 14, 8, 0 } },
+ { 0xc00f0000, { 15, 8, 0 } },
+ { 0xc0100000, { 0, 9, 0 } },
+ { 0xc0110000, { 1, 9, 0 } },
+ { 0xc0120000, { 2, 9, 0 } },
+ { 0xc0130000, { 3, 9, 0 } },
+ { 0xc0140000, { 4, 9, 0 } },
+ { 0xc0150000, { 5, 9, 0 } },
+ { 0xc0160000, { 6, 9, 0 } },
+ { 0xc0170000, { 7, 9, 0 } },
+ { 0xc0180000, { 8, 9, 0 } },
+ { 0xc0190000, { 9, 9, 0 } },
+ { 0xc01a0000, { 10, 9, 0 } },
+ { 0xc01b0000, { 11, 9, 0 } },
+ { 0xc01c0000, { 12, 9, 0 } },
+ { 0xc01d0000, { 13, 9, 0 } },
+ { 0xc01e0000, { 14, 9, 0 } },
+ { 0xc01f0000, { 15, 9, 0 } },
+ { 0xc0200000, { 0, 10, 0 } },
+ { 0xc0210000, { 1, 10, 0 } },
+ { 0xc0220000, { 2, 10, 0 } },
+ { 0xc0230000, { 3, 10, 0 } },
+ { 0xc0240000, { 4, 10, 0 } },
+ { 0xc0250000, { 5, 10, 0 } },
+ { 0xc0260000, { 6, 10, 0 } },
+ { 0xc0270000, { 7, 10, 0 } },
+ { 0xc0280000, { 8, 10, 0 } },
+ { 0xc0290000, { 9, 10, 0 } },
+ { 0xc02a0000, { 10, 10, 0 } },
+ { 0xc02b0000, { 11, 10, 0 } },
+ { 0xc02c0000, { 12, 10, 0 } },
+ { 0xc02d0000, { 13, 10, 0 } },
+ { 0xc02e0000, { 14, 10, 0 } },
+ { 0xc02f0000, { 15, 10, 0 } },
+ { 0xc0300000, { 0, 11, 0 } },
+ { 0xc0310000, { 1, 11, 0 } },
+ { 0xc0320000, { 2, 11, 0 } },
+ { 0xc0330000, { 3, 11, 0 } },
+ { 0xc0340000, { 4, 11, 0 } },
+ { 0xc0350000, { 5, 11, 0 } },
+ { 0xc0360000, { 6, 11, 0 } },
+ { 0xc0370000, { 7, 11, 0 } },
+ { 0xc0380000, { 8, 11, 0 } },
+ { 0xc0390000, { 9, 11, 0 } },
+ { 0xc03a0000, { 10, 11, 0 } },
+ { 0xc03b0000, { 11, 11, 0 } },
+ { 0xc03c0000, { 12, 11, 0 } },
+ { 0xc03d0000, { 13, 11, 0 } },
+ { 0xc03e0000, { 14, 11, 0 } },
+ { 0xc03f0000, { 15, 11, 0 } },
+ { 0xbfc00000, { 0, 12, 0 } },
+ { 0xbfc10000, { 1, 12, 0 } },
+ { 0xbfc20000, { 2, 12, 0 } },
+ { 0xbfc30000, { 3, 12, 0 } },
+ { 0xbfc40000, { 4, 12, 0 } },
+ { 0xbfc50000, { 5, 12, 0 } },
+ { 0xbfc60000, { 6, 12, 0 } },
+ { 0xbfc70000, { 7, 12, 0 } },
+ { 0xbfc80000, { 8, 12, 0 } },
+ { 0xbfc90000, { 9, 12, 0 } },
+ { 0xbfca0000, { 10, 12, 0 } },
+ { 0xbfcb0000, { 11, 12, 0 } },
+ { 0xbfcc0000, { 12, 12, 0 } },
+ { 0xbfcd0000, { 13, 12, 0 } },
+ { 0xbfce0000, { 14, 12, 0 } },
+ { 0xbfcf0000, { 15, 12, 0 } },
+ { 0xbfd00000, { 0, 13, 0 } },
+ { 0xbfd10000, { 1, 13, 0 } },
+ { 0xbfd20000, { 2, 13, 0 } },
+ { 0xbfd30000, { 3, 13, 0 } },
+ { 0xbfd40000, { 4, 13, 0 } },
+ { 0xbfd50000, { 5, 13, 0 } },
+ { 0xbfd60000, { 6, 13, 0 } },
+ { 0xbfd70000, { 7, 13, 0 } },
+ { 0xbfd80000, { 8, 13, 0 } },
+ { 0xbfd90000, { 9, 13, 0 } },
+ { 0xbfda0000, { 10, 13, 0 } },
+ { 0xbfdb0000, { 11, 13, 0 } },
+ { 0xbfdc0000, { 12, 13, 0 } },
+ { 0xbfdd0000, { 13, 13, 0 } },
+ { 0xbfde0000, { 14, 13, 0 } },
+ { 0xbfdf0000, { 15, 13, 0 } },
+ { 0xbfe00000, { 0, 14, 0 } },
+ { 0xbfe10000, { 1, 14, 0 } },
+ { 0xbfe20000, { 2, 14, 0 } },
+ { 0xbfe30000, { 3, 14, 0 } },
+ { 0xbfe40000, { 4, 14, 0 } },
+ { 0xbfe50000, { 5, 14, 0 } },
+ { 0xbfe60000, { 6, 14, 0 } },
+ { 0xbfe70000, { 7, 14, 0 } },
+ { 0xbfe80000, { 8, 14, 0 } },
+ { 0xbfe90000, { 9, 14, 0 } },
+ { 0xbfea0000, { 10, 14, 0 } },
+ { 0xbfeb0000, { 11, 14, 0 } },
+ { 0xbfec0000, { 12, 14, 0 } },
+ { 0xbfed0000, { 13, 14, 0 } },
+ { 0xbfee0000, { 14, 14, 0 } },
+ { 0xbfef0000, { 15, 14, 0 } },
+ { 0xbff00000, { 0, 15, 0 } },
+ { 0xbff10000, { 1, 15, 0 } },
+ { 0xbff20000, { 2, 15, 0 } },
+ { 0xbff30000, { 3, 15, 0 } },
+ { 0xbff40000, { 4, 15, 0 } },
+ { 0xbff50000, { 5, 15, 0 } },
+ { 0xbff60000, { 6, 15, 0 } },
+ { 0xbff70000, { 7, 15, 0 } },
+ { 0xbff80000, { 8, 15, 0 } },
+ { 0xbff90000, { 9, 15, 0 } },
+ { 0xbffa0000, { 10, 15, 0 } },
+ { 0xbffb0000, { 11, 15, 0 } },
+ { 0xbffc0000, { 12, 15, 0 } },
+ { 0xbffd0000, { 13, 15, 0 } },
+ { 0xbffe0000, { 14, 15, 0 } },
+ { 0xbfff0000, { 15, 15, 0 } },
diff --git a/js/src/jit/arm/LIR-arm.h b/js/src/jit/arm/LIR-arm.h
new file mode 100644
index 0000000000..395b285c93
--- /dev/null
+++ b/js/src/jit/arm/LIR-arm.h
@@ -0,0 +1,511 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_LIR_arm_h
+#define jit_arm_LIR_arm_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp,
+ MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox() : LInstructionHelper(classOpcode) {}
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const LAllocation* payload() { return getOperand(0); }
+ const LAllocation* type() { return getOperand(1); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+// LSoftDivI is a software divide for ARM cores that don't support a hardware
+// divide instruction, implemented as a C++ native call.
+class LSoftDivI : public LBinaryCallInstructionHelper<1, 0> {
+ public:
+ LIR_HEADER(SoftDivI);
+
+ LSoftDivI(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryCallInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+
+ int32_t shift() { return shift_; }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LSoftModI : public LBinaryCallInstructionHelper<1, 1> {
+ public:
+ LIR_HEADER(SoftModI);
+
+ LSoftModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryCallInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift() { return shift_; }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp1,
+ const LDefinition& temp2, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ int32_t shift() const { return shift_; }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide.
+class LTableSwitch : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return nullptr; }
+};
+
+// Takes a tableswitch with an integer to decide.
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return nullptr; }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDiv : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDiv);
+
+ LUDiv() : LBinaryMath(classOpcode) {}
+
+ MDiv* mir() { return mir_->toDiv(); }
+};
+
+class LUMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UMod);
+
+ LUMod() : LBinaryMath(classOpcode) {}
+
+ MMod* mir() { return mir_->toMod(); }
+};
+
+class LSoftUDivOrMod : public LBinaryCallInstructionHelper<1, 0> {
+ public:
+ LIR_HEADER(SoftUDivOrMod);
+
+ LSoftUDivOrMod(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryCallInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MInstruction* mir() { return mir_->toInstruction(); }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 2, 0> {
+ static const size_t Input = 0;
+ static const size_t Instance = 1;
+
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(Input, in);
+ setOperand(Instance, instance);
+ }
+
+ LAllocation* input() { return getOperand(Input); }
+ LAllocation* instance() { return getOperand(Instance); }
+
+ MWasmBuiltinTruncateToInt64* mir() const {
+ return mir_->toWasmBuiltinTruncateToInt64();
+ }
+};
+
+class LInt64ToFloatingPointCall
+ : public LCallInstructionHelper<1, INT64_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPointCall);
+
+ static const size_t Input = 0;
+ static const size_t Instance = INT64_PIECES;
+
+ LInt64ToFloatingPointCall(const LInt64Allocation& in,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Input, in);
+ setOperand(Instance, instance);
+ }
+
+ LAllocation* input() { return getOperand(Input); }
+ LAllocation* instance() { return getOperand(Instance); }
+
+ MBuiltinInt64ToFloatingPoint* mir() const {
+ return mir_->toBuiltinInt64ToFloatingPoint();
+ }
+};
+
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmAtomicLoadI64);
+
+ explicit LWasmAtomicLoadI64(const LAllocation& ptr)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ }
+
+ MWasmLoad* mir() const { return mir_->toWasmLoad(); }
+ const LAllocation* ptr() { return getOperand(0); }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 1 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicStoreI64);
+
+ LWasmAtomicStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& tmpLow, const LDefinition& tmpHigh)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setTemp(0, tmpLow);
+ setTemp(1, tmpHigh);
+ }
+
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LDefinition* tmpLow() { return getTemp(0); }
+ const LDefinition* tmpHigh() { return getTemp(1); }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& expected,
+ const LInt64Allocation& replacement)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, expected);
+ setInt64Operand(1 + INT64_PIECES, replacement);
+ }
+
+ MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation expected() { return getInt64Operand(1); }
+ const LInt64Allocation replacement() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2> {
+ const wasm::MemoryAccessDesc& access_;
+ AtomicOp op_;
+
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& tmpLow, const LDefinition& tmpHigh,
+ const wasm::MemoryAccessDesc& access, AtomicOp op)
+ : LInstructionHelper(classOpcode), access_(access), op_(op) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setTemp(0, tmpLow);
+ setTemp(1, tmpHigh);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+ AtomicOp operation() const { return op_; }
+ const LDefinition* tmpLow() { return getTemp(0); }
+ const LDefinition* tmpHigh() { return getTemp(1); }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0> {
+ const wasm::MemoryAccessDesc& access_;
+
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const wasm::MemoryAccessDesc& access)
+ : LInstructionHelper(classOpcode), access_(access) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_LIR_arm_h */
diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp
new file mode 100644
index 0000000000..e384ee7911
--- /dev/null
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -0,0 +1,1223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/Lowering-arm.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LBoxAllocation LIRGeneratorARM::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation LIRGeneratorARM::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorARM::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorARM::tempByteOpRegister() { return temp(); }
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new (alloc()) LBoxFloatingPoint(
+ useRegisterAtStart(inner), tempCopy(inner, 0), inner->type()),
+ box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new (alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* inner = unbox->getOperand(0);
+
+ // An unbox on arm reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir =
+ new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new (alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void LIRGeneratorARM::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void LIRGeneratorARM::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition,
+ LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+// x = !y
+void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(
+ 0, ins->snapshot() ? useRegister(input) : useRegisterAtStart(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ // Some operations depend on checking inputs after writing the result, e.g.
+ // MulI, but only for bail out paths so useAtStart when no bailouts.
+ ins->setOperand(0,
+ ins->snapshot() ? useRegister(lhs) : useRegisterAtStart(lhs));
+ ins->setOperand(1, ins->snapshot() ? useRegisterOrConstant(rhs)
+ : useRegisterOrConstantAtStart(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorARM::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorARM::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorARM::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorARM::visitMulI64
+ if (constant >= -1 && constant <= 2) {
+ needsTemp = false;
+ }
+ if (constant > 0 && int64_t(1) << shift == constant) {
+ needsTemp = false;
+ }
+ }
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorARM::lowerForCompareI64AndBranch(MTest* mir, MCompare* comp,
+ JSOp op, MDefinition* left,
+ MDefinition* right,
+ MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegisterAtStart(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, useRegisterAtStart(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorARM::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void LIRGeneratorARM::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixedAtStart(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixedAtStart(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorARM::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition,
+ LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void LIRGeneratorARM::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorARM::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ if (mir->isRotate() && !rhs->isConstant()) {
+ ins->setTemp(0, temp());
+ }
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorARM::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+void LIRGeneratorARM::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ if (HasIDIV()) {
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+
+ LSoftDivI* lir = new (alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0),
+ useFixedAtStart(div->rhs(), r1));
+
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorARM::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+
+void LIRGeneratorARM::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ // Reuse the input. Define + use-at-start would create risk that the output
+ // uses the same register pair as the input but in reverse order. Reusing
+ // probably has less spilling than the alternative, define + use.
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGeneratorARM::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorARM::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ MOZ_ASSERT(rhs);
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+
+ if (HasIDIV()) {
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+
+ LSoftModI* lir =
+ new (alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0),
+ useFixedAtStart(mod->rhs(), r1), tempFixed(r2));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorARM::lowerDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
+}
+
+void LIRGeneratorARM::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ if (div->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorARM::lowerModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorARM::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ if (mod->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorARM::lowerUDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
+}
+
+void LIRGeneratorARM::lowerUModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGeneratorARM::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorARM::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+LTableSwitch* LIRGeneratorARM::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorARM::newLTableSwitchV(MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), tableswitch);
+}
+
+void LIRGeneratorARM::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorARM::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorARM::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM::lowerBigIntDiv(MBigIntDiv* ins) {
+ LDefinition temp1, temp2;
+ if (HasIDIV()) {
+ temp1 = temp();
+ temp2 = temp();
+ } else {
+ temp1 = tempFixed(r0);
+ temp2 = tempFixed(r1);
+ }
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM::lowerBigIntMod(MBigIntMod* ins) {
+ LDefinition temp1, temp2;
+ if (HasIDIV()) {
+ temp1 = temp();
+ temp2 = temp();
+ } else {
+ temp1 = tempFixed(r0);
+ temp2 = tempFixed(r1);
+ }
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGeneratorARM::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ if (HasIDIV()) {
+ LUDiv* lir = new (alloc()) LUDiv;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+
+ LSoftUDivOrMod* lir = new (alloc())
+ LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
+
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorARM::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ if (HasIDIV()) {
+ LUMod* lir = new (alloc()) LUMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+
+ LSoftUDivOrMod* lir = new (alloc())
+ LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ defineReturn(lir, mod);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicLoadI64(useRegisterAtStart(base));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
+ LAllocation(AnyRegister(IntArgReg0))));
+ return;
+ }
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset() || ins->access().type() == Scalar::Int64) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicStoreI64(
+ useRegister(base),
+ useInt64Fixed(ins->value(), Register64(IntArgReg1, IntArgReg0)),
+ tempFixed(IntArgReg2), tempFixed(IntArgReg3));
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->value()->type() == MIRType::Int64) {
+ LInt64Allocation value = useInt64RegisterAtStart(ins->value());
+ auto* lir = new (alloc()) LWasmStoreI64(ptr, value);
+ if (ins->access().offset() || ins->access().type() == Scalar::Int64) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation value = useRegisterAtStart(ins->value());
+ auto* lir = new (alloc()) LWasmStore(ptr, value);
+
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For the ARM it is best to keep the 'base' in a register if a bounds check
+ // is needed.
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc())
+ LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc())
+ LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(HasLDSTREXBHD());
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ // The two register pairs must be distinct.
+ LInt64Definition temp1 = tempInt64Fixed(Register64(IntArgReg3, IntArgReg2));
+ LDefinition temp2 = tempFixed(IntArgReg1);
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ defineFixed(lir, ins, LAllocation(AnyRegister(IntArgReg0)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ tempDef = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir = new (alloc())
+ LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ // Wasm additionally pins the value register to `FetchOpVal64`, but it's
+ // unclear why this was deemed necessary.
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64Fixed(FetchOpTmp64);
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ LInt64Definition temp3 = tempInt64Fixed(FetchOpOut64);
+
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinop64(
+ elements, index, value, temp1, temp2, temp3);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir = new (alloc())
+ LAtomicTypedArrayElementBinopForEffect(elements, index, value,
+ /* flagTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+ //
+ // Optimization opportunity (bug 1077317): We can do better by
+ // allowing 'value' to remain as an imm32 if it is small enough to
+ // fit in an instruction.
+
+ LDefinition flagTemp = temp();
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ // On arm, map flagTemp to temp1 and outTemp to temp2, at least for now.
+
+ LAtomicTypedArrayElementBinop* lir = new (alloc())
+ LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ // The three register pairs must be distinct.
+ LInt64Definition temp1 = tempInt64Fixed(CmpXchgOld64);
+ LInt64Definition temp2 = tempInt64Fixed(CmpXchgNew64);
+ LInt64Definition temp3 = tempInt64Fixed(CmpXchgOut64);
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2, temp3);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+ //
+ // Optimization opportunity (bug 1077317): We could do better by
+ // allowing oldval to remain an immediate, if it is small enough
+ // to fit in an instruction.
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ tempDef = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir =
+ new (alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval,
+ newval, tempDef);
+
+ define(lir, ins);
+}
+
+void LIRGeneratorARM::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc())
+ LAtomicLoad64(elements, index, temp(),
+ tempInt64Fixed(Register64(IntArgReg1, IntArgReg0)));
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+ LInt64Definition temp1 = tempInt64Fixed(Register64(IntArgReg1, IntArgReg0));
+ LInt64Definition temp2 = tempInt64Fixed(Register64(IntArgReg3, IntArgReg2));
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, temp1, temp2), ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (ins->access().type() == Scalar::Int64) {
+ // The three register pairs must be distinct.
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Fixed(ins->oldValue(), CmpXchgOld64),
+ useInt64Fixed(ins->newValue(), CmpXchgNew64));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(CmpXchgOutHi)),
+ LAllocation(AnyRegister(CmpXchgOutLo))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
+
+ LWasmCompareExchangeHeap* lir = new (alloc())
+ LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()));
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(ins->base()), useInt64Fixed(ins->value(), XchgNew64),
+ ins->access());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(XchgOutHi)),
+ LAllocation(AnyRegister(XchgOutLo))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+ define(new (alloc()) LWasmAtomicExchangeHeap(base, value), ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicBinopI64(
+ useRegister(ins->base()), useInt64Fixed(ins->value(), FetchOpVal64),
+ tempFixed(FetchOpTmpLo), tempFixed(FetchOpTmpHi), ins->access(),
+ ins->operation());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(FetchOpOutHi)),
+ LAllocation(AnyRegister(FetchOpOutLo))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir =
+ new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()),
+ /* flagTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ /* temp = */ LDefinition::BogusTemp(),
+ /* flagTemp= */ temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use MWasmTruncateToInt64 for arm");
+}
+
+void LIRGeneratorARM::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MDefinition* instance = ins->instance();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineReturn(new (alloc())
+ LWasmTruncateToInt64(useRegisterAtStart(opd),
+ useFixedAtStart(instance, InstanceReg)),
+ ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We use BuiltinInt64ToFloatingPoint instead.");
+}
+
+void LIRGeneratorARM::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Float32);
+
+ auto* lir = new (alloc())
+ LInt64ToFloatingPointCall(useInt64RegisterAtStart(ins->input()),
+ useFixedAtStart(ins->instance(), InstanceReg));
+ defineReturn(lir, ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lowerForFPU(lir, ins, lhs, rhs);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ auto* lir =
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input()));
+ defineInt64(lir, ins);
+
+ LDefinition def(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(0);
+ def.setVirtualRegister(ins->virtualRegister());
+
+ lir->setDef(0, def);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+// On arm we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
+
+bool MWasmBinarySimd128::canPmaddubsw() { return false; }
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/arm/Lowering-arm.h b/js/src/jit/arm/Lowering-arm.h
new file mode 100644
index 0000000000..3f03d22941
--- /dev/null
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_Lowering_arm_h
+#define jit_arm_Lowering_arm_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorARM : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorARM(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on ARM all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() { return LDefinition::BogusTemp(); }
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerPowOfTwoI(MPow* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerPhi(MPhi* phi);
+};
+
+typedef LIRGeneratorARM LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_Lowering_arm_h */
diff --git a/js/src/jit/arm/MacroAssembler-arm-inl.h b/js/src/jit/arm/MacroAssembler-arm-inl.h
new file mode 100644
index 0000000000..94d323207e
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -0,0 +1,2582 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MacroAssembler_arm_inl_h
+#define jit_arm_MacroAssembler_arm_inl_h
+
+#include "jit/arm/MacroAssembler-arm.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ ma_vxfer(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ ma_vxfer(src, dest);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ as_sxtb(dest, src, 0);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ as_sxth(dest, src, 0);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ ma_vxfer(src, dest.low, dest.high);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ ma_vxfer(src.low, src.high, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ if (src.low != dest) {
+ move32(src.low, dest);
+ }
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ move32(Imm32(0), dest.high);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ as_sxtb(dest.low, src, 0);
+ ma_asr(Imm32(31), dest.low, dest.high);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ as_sxth(dest.low, src, 0);
+ ma_asr(Imm32(31), dest.low, dest.high);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ ma_asr(Imm32(31), dest.low, dest.high);
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ move32(src, dest);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ move32(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { ma_mvn(reg, reg); }
+
+void MacroAssembler::notPtr(Register reg) { ma_mvn(reg, reg); }
+
+void MacroAssembler::and32(Register src, Register dest) {
+ ma_and(src, dest, SetCC);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_and(imm, dest, scratch, SetCC);
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_and(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_and(scratch, dest, SetCC);
+}
+
+void MacroAssembler::andPtr(Register src, Register dest) { ma_and(src, dest); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_and(imm, dest, scratch);
+}
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.low(), dest.low);
+ }
+ if (imm.hi().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ or32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ or32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ xor32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ xor32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::or32(Register src, Register dest) { ma_orr(src, dest); }
+
+void MacroAssembler::or32(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_orr(imm, dest, scratch);
+}
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_orr(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { ma_orr(src, dest); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_orr(imm, dest, scratch);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_eor(src.low, dest.low);
+ ma_eor(src.high, dest.high);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) {
+ ma_eor(src, dest, SetCC);
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_eor(imm, dest, scratch, SetCC);
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_eor(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_eor(scratch, dest, SetCC);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { ma_eor(src, dest); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_eor(imm, dest, scratch);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) { as_revsh(reg, reg); }
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ as_rev16(reg, reg);
+ as_uxth(reg, reg, 0);
+}
+
+void MacroAssembler::byteSwap32(Register reg) { as_rev(reg, reg); }
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ as_rev(reg.high, reg.high);
+ as_rev(reg.low, reg.low);
+
+ ScratchRegisterScope scratch(*this);
+ ma_mov(reg.high, scratch);
+ ma_mov(reg.low, reg.high);
+ ma_mov(scratch, reg.low);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::add32(Register src, Register dest) {
+ ma_add(src, dest, SetCC);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest, scratch, SetCC);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_add(imm, scratch, scratch2, SetCC);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::addPtr(Register src, Register dest) { ma_add(src, dest); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest, scratch);
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ addPtr(Imm32(imm.value), dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_add(imm, scratch, scratch2);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_add(scratch, dest, SetCC);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ ma_add(src.low, dest.low, SetCC);
+ ma_adc(src.high, dest.high);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm, dest.low, scratch, SetCC);
+ as_adc(dest.high, dest.high, Imm8(0), LeaveCC);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm.low(), dest.low, scratch, SetCC);
+ ma_adc(imm.hi(), dest.high, scratch, LeaveCC);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ ScratchRegisterScope scratch(*this);
+ CodeOffset offs = CodeOffset(currentOffset());
+ ma_movPatchable(Imm32(0), scratch, Always);
+ ma_sub(getStackPointer(), scratch, dest);
+ return offs;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ ScratchRegisterScope scratch(*this);
+ BufferInstructionIterator iter(BufferOffset(offset.offset()), &m_buffer);
+ iter.maybeSkipAutomaticInstructions();
+ ma_mov_patch(imm, scratch, Always, HasMOVWT() ? L_MOVWT : L_LDR, iter);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ ma_vadd(dest, src, dest);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vadd_f32(dest, src, dest);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ ma_sub(src, dest, SetCC);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm, dest, scratch, SetCC);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_sub(scratch, dest, SetCC);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) { ma_sub(src, dest); }
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(dest, scratch, scratch2);
+ ma_sub(src, scratch);
+ ma_str(scratch, dest, scratch2);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm, dest, scratch);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(addr, scratch, scratch2);
+ ma_sub(scratch, dest);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ ma_sub(src.low, dest.low, SetCC);
+ ma_sbc(src.high, dest.high, LeaveCC);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_sub(imm.low(), dest.low, scratch, SetCC);
+ ma_sbc(imm.hi(), dest.high, scratch, LeaveCC);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ ma_vsub(dest, src, dest);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vsub_f32(dest, src, dest);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ ScratchRegisterScope scratch(*this);
+ move32(imm, scratch);
+ mul32(scratch, srcDest);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_umull(src, imm, dest, scratch, scratch);
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ ma_mov(Imm32(imm.value & 0xFFFFFFFFL), scratch);
+ as_mul(dest.high, dest.high, scratch);
+
+ // high:low = LOW(dest) * LOW(imm);
+ as_umull(scratch2, scratch, dest.low, scratch);
+
+ // HIGH(dest) += high;
+ as_add(dest.high, dest.high, O2Reg(scratch2));
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5) {
+ as_add(scratch2, dest.low, lsl(dest.low, 2));
+ } else {
+ MOZ_CRASH("Not supported imm");
+ }
+ as_add(dest.high, dest.high, O2Reg(scratch2));
+
+ // LOW(dest) = low;
+ ma_mov(scratch, dest.low);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ // Compute mul64
+ ScratchRegisterScope scratch(*this);
+ ma_mul(dest.high, imm.low(), dest.high, scratch); // (2)
+ ma_mul(dest.low, imm.hi(), temp, scratch); // (3)
+ ma_add(dest.high, temp, temp);
+ ma_umull(dest.low, imm.low(), dest.high, dest.low, scratch); // (4) + (1)
+ ma_add(temp, dest.high, dest.high);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ // Compute mul64
+ ma_mul(dest.high, src.low, dest.high); // (2)
+ ma_mul(src.high, dest.low, temp); // (3)
+ ma_add(dest.high, temp, temp);
+ ma_umull(dest.low, src.low, dest.high, dest.low); // (4) + (1)
+ ma_add(temp, dest.high, dest.high);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ as_add(dest, src, lsl(src, 1));
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vmul_f32(dest, src, dest);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ ma_vmul(dest, src, dest);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(*this);
+ ScratchDoubleScope scratchDouble(*this);
+
+ movePtr(imm, scratch);
+ ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
+ mulDouble(scratchDouble, dest);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ MOZ_ASSERT(HasIDIV());
+ if (isUnsigned) {
+ ma_udiv(srcDest, rhs, srcDest);
+ } else {
+ ma_sdiv(srcDest, rhs, srcDest);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ MOZ_ASSERT(HasIDIV());
+
+ ScratchRegisterScope scratch(*this);
+ if (isUnsigned) {
+ ma_umod(srcDest, rhs, srcDest, scratch);
+ } else {
+ ma_smod(srcDest, rhs, srcDest, scratch);
+ }
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vdiv_f32(dest, src, dest);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ ma_vdiv(dest, src, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(*this);
+
+ ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex);
+
+ ma_mov(Imm32((int32_t)dest.addr), scratch);
+ ma_ldrd(EDtrAddr(scratch, EDtrOffImm(0)), r0, r1);
+
+ as_add(r0, r0, Imm8(1), SetCC);
+ as_adc(r1, r1, Imm8(0), LeaveCC);
+
+ ma_strd(r0, r1, EDtrAddr(scratch, EDtrOffImm(0)));
+ ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex);
+}
+
+void MacroAssembler::neg32(Register reg) { ma_neg(reg, reg, SetCC); }
+
+void MacroAssembler::neg64(Register64 reg) {
+ as_rsb(reg.low, reg.low, Imm8(0), SetCC);
+ as_rsc(reg.high, reg.high, Imm8(0));
+}
+
+void MacroAssembler::negPtr(Register reg) { neg32(reg); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { ma_vneg(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { ma_vneg_f32(reg, reg); }
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ as_cmp(src, Imm8(0));
+ as_rsb(dest, src, Imm8(0), LeaveCC, LessThan);
+ if (dest != src) {
+ as_mov(dest, O2Reg(src), LeaveCC, GreaterThanOrEqual);
+ }
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vabs_f32(src, dest);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ ma_vabs(src, dest);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ ma_vsqrt_f32(src, dest);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ ma_vsqrt(src, dest);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_lsl(imm, dest, dest);
+}
+
+void MacroAssembler::lshiftPtr(Register src, Register dest) {
+ ma_lsl(src, dest, dest);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value == 0) {
+ return;
+ }
+
+ if (imm.value < 32) {
+ as_mov(dest.high, lsl(dest.high, imm.value));
+ as_orr(dest.high, dest.high, lsr(dest.low, 32 - imm.value));
+ as_mov(dest.low, lsl(dest.low, imm.value));
+ } else {
+ as_mov(dest.high, lsl(dest.low, imm.value - 32));
+ ma_mov(Imm32(0), dest.low);
+ }
+}
+
+void MacroAssembler::lshift64(Register unmaskedShift, Register64 dest) {
+ // dest.high = dest.high << shift | dest.low << shift - 32 | dest.low >> 32 -
+ // shift Note: one of the two dest.low shift will always yield zero due to
+ // negative shift.
+
+ ScratchRegisterScope shift(*this);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, lsl(dest.high, shift));
+ as_sub(shift, shift, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(dest.low, shift));
+ ma_neg(shift, shift);
+ as_orr(dest.high, dest.high, lsr(dest.low, shift));
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsl(dest.low, shift));
+}
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ ma_lsl(src, dest, dest);
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, src, Imm8(0x1F));
+ lshift32(scratch, dest);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ lshiftPtr(imm, dest);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ if (imm.value) {
+ ma_lsr(imm, dest, dest);
+ }
+}
+
+void MacroAssembler::rshiftPtr(Register src, Register dest) {
+ ma_lsr(src, dest, dest);
+}
+
+void MacroAssembler::rshift32(Register src, Register dest) {
+ ma_lsr(src, dest, dest);
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, src, Imm8(0x1F));
+ rshift32(scratch, dest);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ rshiftPtr(imm, dest);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ if (imm.value) {
+ ma_asr(imm, dest, dest);
+ }
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (!imm.value) {
+ return;
+ }
+
+ if (imm.value < 32) {
+ as_mov(dest.low, lsr(dest.low, imm.value));
+ as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
+ as_mov(dest.high, asr(dest.high, imm.value));
+ } else if (imm.value == 32) {
+ as_mov(dest.low, O2Reg(dest.high));
+ as_mov(dest.high, asr(dest.high, 31));
+ } else {
+ as_mov(dest.low, asr(dest.high, imm.value - 32));
+ as_mov(dest.high, asr(dest.high, 31));
+ }
+}
+
+void MacroAssembler::rshift64Arithmetic(Register unmaskedShift,
+ Register64 dest) {
+ Label proceed;
+
+ // dest.low = dest.low >>> shift | dest.high <<< 32 - shift
+ // if (shift - 32 >= 0)
+ // dest.low |= dest.high >>> shift - 32
+ // Note: Negative shifts yield a zero as result, except for the signed
+ // right shift. Therefore we need to test for it and only do it if
+ // it isn't negative.
+ ScratchRegisterScope shift(*this);
+
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsr(dest.low, shift));
+ as_rsb(shift, shift, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(dest.high, shift));
+ ma_neg(shift, shift, SetCC);
+ ma_b(&proceed, Signed);
+
+ as_orr(dest.low, dest.low, asr(dest.high, shift));
+
+ bind(&proceed);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, asr(dest.high, shift));
+}
+
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ ma_asr(src, dest, dest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ rshiftPtrArithmetic(imm, dest);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, src, Imm8(0x1F));
+ rshift32Arithmetic(scratch, dest);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (!imm.value) {
+ return;
+ }
+
+ if (imm.value < 32) {
+ as_mov(dest.low, lsr(dest.low, imm.value));
+ as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
+ as_mov(dest.high, lsr(dest.high, imm.value));
+ } else if (imm.value == 32) {
+ ma_mov(dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+ } else {
+ ma_lsr(Imm32(imm.value - 32), dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+ }
+}
+
+void MacroAssembler::rshift64(Register unmaskedShift, Register64 dest) {
+ // dest.low = dest.low >> shift | dest.high >> shift - 32 | dest.high << 32 -
+ // shift Note: one of the two dest.high shifts will always yield zero due to
+ // negative shift.
+
+ ScratchRegisterScope shift(*this);
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.low, lsr(dest.low, shift));
+ as_sub(shift, shift, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(dest.high, shift));
+ ma_neg(shift, shift);
+ as_orr(dest.low, dest.low, lsl(dest.high, shift));
+ as_and(shift, unmaskedShift, Imm8(0x3f));
+ as_mov(dest.high, lsr(dest.high, shift));
+}
+
+// ===============================================================
+// Rotate functions
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_rol(count, input, dest);
+ } else {
+ ma_mov(input, dest);
+ }
+}
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ ma_rol(count, input, dest, scratch);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_mov(input.low, dest.low);
+ ma_mov(input.high, dest.high);
+ } else if (amount == 32) {
+ ma_mov(input.low, scratch);
+ ma_mov(input.high, dest.low);
+ ma_mov(scratch, dest.high);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_mov(dest.high, scratch);
+ as_mov(dest.high, lsl(dest.high, amount));
+ as_orr(dest.high, dest.high, lsr(dest.low, 32 - amount));
+ as_mov(dest.low, lsl(dest.low, amount));
+ as_orr(dest.low, dest.low, lsr(scratch, 32 - amount));
+ }
+ }
+}
+
+void MacroAssembler::rotateLeft64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(shift != temp);
+ MOZ_ASSERT(src == dest);
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done;
+
+ ma_mov(src.high, temp);
+ as_and(shift_value, shift, Imm8(0x3f));
+ as_cmp(shift_value, Imm8(32));
+ ma_b(&high, GreaterThanOrEqual);
+
+ // high = high << shift | low >> 32 - shift
+ // low = low << shift | high >> 32 - shift
+ as_mov(dest.high, lsl(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsr(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsl(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(temp, shift_value));
+
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ as_rsb(shift_value, shift_value, Imm8(64));
+
+ as_mov(dest.high, lsr(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsr(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(temp, shift_value));
+
+ bind(&done);
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_ror(count, input, dest);
+ } else {
+ ma_mov(input, dest);
+ }
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ ma_ror(count, input, dest);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_mov(input.low, dest.low);
+ ma_mov(input.high, dest.high);
+ } else if (amount == 32) {
+ ma_mov(input.low, scratch);
+ ma_mov(input.high, dest.low);
+ ma_mov(scratch, dest.high);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_mov(dest.high, scratch);
+ as_mov(dest.high, lsr(dest.high, amount));
+ as_orr(dest.high, dest.high, lsl(dest.low, 32 - amount));
+ as_mov(dest.low, lsr(dest.low, amount));
+ as_orr(dest.low, dest.low, lsl(scratch, 32 - amount));
+ }
+ }
+}
+
+void MacroAssembler::rotateRight64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(shift != temp);
+ MOZ_ASSERT(src == dest);
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done;
+
+ ma_mov(src.high, temp);
+ as_and(shift_value, shift, Imm8(0x3f));
+ as_cmp(shift_value, Imm8(32));
+ ma_b(&high, GreaterThanOrEqual);
+
+ // high = high >> shift | low << 32 - shift
+ // low = low >> shift | high << 32 - shift
+ as_mov(dest.high, lsr(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsl(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsr(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsl(temp, shift_value));
+
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ as_rsb(shift_value, shift_value, Imm8(64));
+
+ as_mov(dest.high, lsl(src.high, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.high, dest.high, lsr(src.low, shift_value));
+
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_mov(dest.low, lsl(src.low, shift_value));
+ as_rsb(shift_value, shift_value, Imm8(32));
+ as_orr(dest.low, dest.low, lsr(temp, shift_value));
+
+ bind(&done);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Inlined calls to load8{Zero,Sign}Extend() and cmp32Set() to acquire
+ // exclusive access to scratch registers.
+
+ bool isSigned;
+ Imm32 imm(0);
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ isSigned = false;
+ imm = Imm32(uint8_t(rhs.value));
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ isSigned = true;
+ imm = Imm32(int8_t(rhs.value));
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+
+ ma_dataTransferN(IsLoad, 8, isSigned, lhs.base, Imm32(lhs.offset), scratch,
+ scratch2);
+ ma_cmp(scratch, imm, scratch2);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Inlined calls to load16{Zero,Sign}Extend() and cmp32Set() to acquire
+ // exclusive access to scratch registers.
+
+ bool isSigned;
+ Imm32 imm(0);
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ isSigned = false;
+ imm = Imm32(uint16_t(rhs.value));
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ isSigned = true;
+ imm = Imm32(int16_t(rhs.value));
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+
+ ma_dataTransferN(IsLoad, 16, isSigned, lhs.base, Imm32(lhs.offset), scratch,
+ scratch2);
+ ma_cmp(scratch, imm, scratch2);
+ emitSet(cond, dest);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ Label success, done;
+
+ branch64(cond, lhs, rhs, &success);
+ move32(Imm32(0), dest);
+ jump(&done);
+ bind(&success);
+ move32(Imm32(1), dest);
+ bind(&done);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ ma_clz(src, dest);
+}
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+
+ ma_clz(src.high, scratch);
+ as_cmp(scratch, Imm8(32));
+ ma_mov(scratch, dest, LeaveCC, NotEqual);
+ ma_clz(src.low, dest, Equal);
+ as_add(dest, dest, Imm8(32), LeaveCC, Equal);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ ScratchRegisterScope scratch(*this);
+ ma_ctz(src, dest, scratch);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ Label done, high;
+ as_cmp(src.low, Imm8(0));
+ ma_b(&high, Equal);
+
+ ctz32(src.low, dest, /* knownNotZero = */ true);
+ ma_b(&done);
+
+ bind(&high);
+ ctz32(src.high, dest, /* knownNotZero = */ false);
+ as_add(dest, dest, Imm8(32));
+
+ bind(&done);
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+
+ ScratchRegisterScope scratch(*this);
+
+ if (input != output) {
+ ma_mov(input, output);
+ }
+ as_mov(tmp, asr(output, 1));
+ ma_and(Imm32(0x55555555), tmp, scratch);
+ ma_sub(output, tmp, output);
+ as_mov(tmp, asr(output, 2));
+ ma_mov(Imm32(0x33333333), scratch);
+ ma_and(scratch, output);
+ ma_and(scratch, tmp);
+ ma_add(output, tmp, output);
+ as_add(output, output, lsr(output, 4));
+ ma_and(Imm32(0xF0F0F0F), output, scratch);
+ as_add(output, output, lsl(output, 8));
+ as_add(output, output, lsl(output, 16));
+ as_mov(output, asr(output, 24));
+}
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+ // The source and destination can overlap. Therefore make sure we don't
+ // clobber the source before we have the data.
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ ma_add(dest.high, dest.low);
+ ma_mov(Imm32(0), dest.high);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Inlined calls to load8{Zero,Sign}Extend() and branch32() to acquire
+ // exclusive access to scratch registers.
+
+ bool isSigned;
+ Imm32 imm(0);
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ isSigned = false;
+ imm = Imm32(uint8_t(rhs.value));
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ isSigned = true;
+ imm = Imm32(int8_t(rhs.value));
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+
+ ma_dataTransferN(IsLoad, 8, isSigned, lhs.base, Imm32(lhs.offset), scratch,
+ scratch2);
+ ma_cmp(scratch, imm, scratch2);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Inlined calls to load8{Zero,Sign}Extend() and branch32() to acquire
+ // exclusive access to scratch registers.
+
+ bool isSigned;
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ isSigned = false;
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ isSigned = true;
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+
+ if (isSigned) {
+ Register index = lhs.index;
+
+ // ARMv7 does not have LSL on an index register with an extended load.
+ if (lhs.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(lhs.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (lhs.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(lhs.offset), index, scratch2);
+ }
+ ma_ldrsb(EDtrAddr(lhs.base, EDtrOffReg(index)), scratch);
+ } else {
+ Register base = lhs.base;
+ uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
+
+ if (lhs.offset == 0) {
+ ma_ldrb(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch);
+ } else {
+ ma_add(base, Imm32(lhs.offset), scratch, scratch2);
+ ma_ldrb(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch);
+ }
+ }
+
+ ma_cmp(scratch, rhs);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Inlined calls to load16{Zero,Sign}Extend() and branch32() to acquire
+ // exclusive access to scratch registers.
+
+ bool isSigned;
+ Imm32 imm(0);
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ isSigned = false;
+ imm = Imm32(uint16_t(rhs.value));
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ isSigned = true;
+ imm = Imm32(int16_t(rhs.value));
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+
+ ma_dataTransferN(IsLoad, 16, isSigned, lhs.base, Imm32(lhs.offset), scratch,
+ scratch2);
+ ma_cmp(scratch, imm, scratch2);
+ ma_b(label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_cmp(lhs, rhs);
+ ma_b(label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ ScratchRegisterScope scratch(*this);
+
+ ma_cmp(lhs, rhs, scratch);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+
+ // Load into scratch.
+ movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ // Load into scratch.
+ movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ {
+ ScratchRegisterScope scratch(*this);
+
+ Register base = lhs.base;
+ uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
+
+ // Load lhs into scratch2.
+ if (lhs.offset != 0) {
+ ma_add(base, Imm32(lhs.offset), scratch, scratch2);
+ ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ } else {
+ ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ }
+ }
+ branch32(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ {
+ ScratchRegisterScope scratch(*this);
+
+ Register base = lhs.base;
+ uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
+
+ // Load lhs into scratch2.
+ if (lhs.offset != 0) {
+ ma_add(base, Imm32(lhs.offset), scratch, scratch2);
+ ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ } else {
+ ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
+ }
+ }
+ branch32(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ movePtr(lhs, scratch);
+ ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
+
+ ma_cmp(scratch, rhs, scratch2);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
+ val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, rhs.low, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, rhs.low, label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), rhs.high,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+ }
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ ma_b(success, cond1);
+ ma_b(fail, cond2);
+ cmp32(lhs.low, val.low());
+ ma_b(success, cond3);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ ma_b(success, cond1);
+ ma_b(fail, cond2);
+ cmp32(lhs.low, rhs.low);
+ ma_b(success, cond3);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ branch32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ branch32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ movePtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ branch32(cond, lhs, Imm32(rhs.value), label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ branch32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ branch32(cond, lhs, Imm32(rhs.value), label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ branch32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareFloat(lhs, rhs);
+
+ if (cond == DoubleNotEqual) {
+ // Force the unordered cases not to jump.
+ Label unordered;
+ ma_b(&unordered, VFP_Unordered);
+ ma_b(label, VFP_NotEqualOrUnordered);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ ma_b(label, VFP_Unordered);
+ ma_b(label, VFP_Equal);
+ return;
+ }
+
+ ma_b(label, ConditionFromDoubleCondition(cond));
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchFloat32Scope scratchFloat32(*this);
+ ScratchRegisterScope scratch(*this);
+
+ ma_vcvt_F32_I32(src, scratchFloat32.sintOverlay());
+ ma_vxfer(scratchFloat32, dest);
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareDouble(lhs, rhs);
+
+ if (cond == DoubleNotEqual) {
+ // Force the unordered cases not to jump.
+ Label unordered;
+ ma_b(&unordered, VFP_Unordered);
+ ma_b(label, VFP_NotEqualOrUnordered);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ ma_b(label, VFP_Unordered);
+ ma_b(label, VFP_Equal);
+ return;
+ }
+
+ ma_b(label, ConditionFromDoubleCondition(cond));
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+// There are two options for implementing branchTruncateDoubleToInt32:
+//
+// 1. Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value
+// really was supposed to be INT_MAX / INT_MIN then it will be wrong.
+//
+// 2. Convert the floating point value to an integer, if it did not fit, then it
+// set one or two bits in the fpcsr. Check those.
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchDoubleScope scratchDouble(*this);
+ FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
+ ScratchRegisterScope scratch(*this);
+
+ ma_vcvt_F64_I32(src, scratchSIntReg);
+ ma_vxfer(scratchSIntReg, dest);
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* label) {
+ add32(src, dest);
+ as_b(label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ sub32(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ScratchRegisterScope scratch(*this);
+ Assembler::Condition overflow_cond =
+ ma_check_mul(src, dest, dest, scratch, cond);
+ j(overflow_cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ j(cond, label);
+}
+
+void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ma_add(imm.low(), dest.low, scratch, SetCC);
+ ma_adc(imm.hi(), dest.high, scratch, SetCC);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ branchAdd32(cond, src, dest, label);
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ branchSub32(cond, src, dest, label);
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ branchMul32(cond, src, dest, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ma_sub(rhs, lhs, scratch, SetCC);
+ as_b(label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ // x86 likes test foo, foo rather than cmp foo, #0.
+ // Convert the former into the latter.
+ if (lhs == rhs && (cond == Zero || cond == NonZero)) {
+ as_cmp(lhs, Imm8(0));
+ } else {
+ ma_tst(lhs, rhs);
+ }
+ ma_b(label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ScratchRegisterScope scratch(*this);
+ ma_tst(lhs, rhs, scratch);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(lhs, scratch2);
+ branchTest32(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(lhs, scratch2);
+ branchTest32(cond, scratch2, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ branchTest32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ branchTest32(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ branchTest32(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ if (cond == Assembler::Zero || cond == Assembler::NonZero) {
+ ScratchRegisterScope scratch(*this);
+
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ ma_orr(lhs.low, lhs.high, scratch);
+ branchTestPtr(cond, scratch, scratch, label);
+ } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
+ branchTest32(cond, lhs.high, rhs.high, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testUndefined(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testInt32(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testInt32Truthy(truthy, value);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testDouble(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
+ Label* label) {
+ Condition c = testDoubleTruthy(truthy, reg);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testNumber(cond, t);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testBoolean(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testBooleanTruthy(truthy, value);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ branchTestStringImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testString(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testStringTruthy(truthy, value);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testSymbol(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ branchTestBigIntImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestBigIntImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testBigInt(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testBigIntTruthy(truthy, value);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ branchTestNullImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testNull(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testObject(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestGCThingImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testGCThing(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testPrimitive(cond, t);
+ ma_b(label, c);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
+ cond = testMagic(cond, t);
+ ma_b(label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notMagic;
+ if (cond == Assembler::Equal) {
+ branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
+ } else {
+ branchTestMagic(Assembler::NotEqual, valaddr, label);
+ }
+
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+ bind(&notMagic);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notSameValue;
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), &notSameValue);
+ } else {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), label);
+ }
+
+ branch32(cond, ToPayload(lhs), rhs.payloadReg(), label);
+ bind(&notSameValue);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testNumber(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBoolean(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testString(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testSymbol(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBigInt(cond, src);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ MOZ_ASSERT(
+ addr.offset == 0,
+ "NYI: offsets from pc should be shifted by the number of instructions.");
+
+ Register base = addr.base;
+ uint32_t scale = Imm32::ShiftOf(addr.scale).value;
+
+ ma_ldr(DTRAddr(base, DtrRegImmShift(addr.index, LSL, scale)), pc);
+
+ if (base == pc) {
+ // When loading from pc, the pc is shifted to the next instruction, we
+ // add one extra instruction to accomodate for this shifted offset.
+ breakpoint();
+ }
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ ma_mov(src, dest, LeaveCC, cond);
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ ma_mov(src, dest, LeaveCC, cond);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+ ma_ldr(rhs, scratch, scratch2);
+ cmp32Move32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ // This is never used, but must be present to facilitate linking on arm.
+ MOZ_CRASH("No known use cases");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ // This is never used, but must be present to facilitate linking on arm.
+ MOZ_CRASH("No known use cases");
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ cmp32(lhs, rhs);
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(src, dest, scratch, Offset, cond);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(src, dest, scratch, Offset, cond);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ ma_mov(src, dest, LeaveCC, cond);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ ma_mov(src, dest, LeaveCC, cond);
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register,
+ Register dest) {
+ ma_mov(Imm32(0), dest, cond);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ ma_mov(Imm32(0), index, Assembler::BelowOrEqual);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ ma_mov(Imm32(0), index, Assembler::BelowOrEqual);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+// ========================================================================
+// Memory access primitives.
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& addr) {
+ ScratchRegisterScope scratch(*this);
+ ma_vstr(src, addr, scratch);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& addr) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+ uint32_t scale = Imm32::ShiftOf(addr.scale).value;
+ ma_vstr(src, addr.base, addr.index, scratch, scratch2, scale, addr.offset);
+}
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ ScratchRegisterScope scratch(*this);
+ ma_vstr(src.asSingle(), addr, scratch);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+ uint32_t scale = Imm32::ShiftOf(addr.scale).value;
+ ma_vstr(src.asSingle(), addr.base, addr.index, scratch, scratch2, scale,
+ addr.offset);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ // On ARMv6 the optional argument (BarrierST, etc) is ignored.
+ if (barrier == (MembarStoreStore | MembarSynchronizing)) {
+ ma_dsb(BarrierST);
+ } else if (barrier & MembarSynchronizing) {
+ ma_dsb();
+ } else if (barrier == MembarStoreStore) {
+ ma_dmb(BarrierST);
+ } else if (barrier) {
+ ma_dmb();
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
+ // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
+ ScratchRegisterScope scratch(*this);
+ as_mov(scratch, asr(reg, 8), SetCC);
+ ma_mov(Imm32(0xff), reg, NotEqual);
+ ma_mov(Imm32(0), reg, Signed);
+}
+
+template <typename T>
+void MacroAssemblerARMCompat::fallibleUnboxPtrImpl(const T& src, Register dest,
+ JSValueType type,
+ Label* fail) {
+ switch (type) {
+ case JSVAL_TYPE_OBJECT:
+ asMasm().branchTestObject(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_STRING:
+ asMasm().branchTestString(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ asMasm().branchTestSymbol(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_BIGINT:
+ asMasm().branchTestBigInt(Assembler::NotEqual, src, fail);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ unboxNonDouble(src, dest, type);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void MacroAssemblerARMCompat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MacroAssembler_arm_inl_h */
diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp
new file mode 100644
index 0000000000..da358c5ec9
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -0,0 +1,6382 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/MacroAssembler-arm.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "jsmath.h"
+
+#include "jit/arm/Simulator-arm.h"
+#include "jit/AtomicOp.h"
+#include "jit/AtomicOperations.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/Memory.h"
+#include "vm/BigIntType.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::IsPositiveZero;
+using mozilla::Maybe;
+
+bool isValueDTRDCandidate(ValueOperand& val) {
+ // In order to be used for a DTRD memory function, the two target registers
+ // need to be a) Adjacent, with the tag larger than the payload, and b)
+ // Aligned to a multiple of two.
+ if ((val.typeReg().code() != (val.payloadReg().code() + 1))) {
+ return false;
+ }
+ if ((val.payloadReg().code() & 1) != 0) {
+ return false;
+ }
+ return true;
+}
+
+void MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ as_and(dest, source, Imm8(0xff));
+}
+
+void MacroAssemblerARM::convertInt32ToDouble(Register src,
+ FloatRegister dest_) {
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
+ as_vcvt(dest, dest.sintOverlay());
+}
+
+void MacroAssemblerARM::convertInt32ToDouble(const Address& src,
+ FloatRegister dest) {
+ ScratchDoubleScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_vldr(src, scratch, scratch2);
+ as_vcvt(dest, VFPRegister(scratch).sintOverlay());
+}
+
+void MacroAssemblerARM::convertInt32ToDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset != 0) {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ base = scratch;
+ }
+ ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), scratch);
+ convertInt32ToDouble(scratch, dest);
+}
+
+void MacroAssemblerARM::convertUInt32ToDouble(Register src,
+ FloatRegister dest_) {
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
+ as_vcvt(dest, dest.uintOverlay());
+}
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+void MacroAssemblerARM::convertUInt32ToFloat32(Register src,
+ FloatRegister dest_) {
+ // Direct conversions aren't possible.
+ VFPRegister dest = VFPRegister(dest_);
+ as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
+ as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay());
+}
+
+void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest,
+ Condition c) {
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ // Convert the floating point value to an integer, if it did not fit, then
+ // when we convert it *back* to a float, it will have a different value,
+ // which we can test.
+ ScratchDoubleScope scratchDouble(asMasm());
+ ScratchRegisterScope scratch(asMasm());
+
+ FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
+
+ ma_vcvt_F64_I32(src, scratchSIntReg);
+ // Move the value into the dest register.
+ ma_vxfer(scratchSIntReg, dest);
+ ma_vcvt_I32_F64(scratchSIntReg, scratchDouble);
+ ma_vcmp(src, scratchDouble);
+ as_vmrs(pc);
+ ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
+
+ if (negativeZeroCheck) {
+ as_cmp(dest, Imm8(0));
+ // Test and bail for -0.0, when integer result is 0. Move the top word
+ // of the double into the output reg, if it is non-zero, then the
+ // original value was -0.0.
+ as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
+ ma_b(fail, Assembler::Equal);
+ }
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ // Converting the floating point value to an integer and then converting it
+ // back to a float32 would not work, as float to int32 conversions are
+ // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+ // and then back to float(INT32_MAX + 1)). If this ever happens, we just
+ // bail out.
+ ScratchFloat32Scope scratchFloat(asMasm());
+ ScratchRegisterScope scratch(asMasm());
+
+ FloatRegister ScratchSIntReg = scratchFloat.sintOverlay();
+ ma_vcvt_F32_I32(src, ScratchSIntReg);
+
+ // Store the result
+ ma_vxfer(ScratchSIntReg, dest);
+
+ ma_vcvt_I32_F32(ScratchSIntReg, scratchFloat);
+ ma_vcmp(src, scratchFloat);
+ as_vmrs(pc);
+ ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
+
+ // Bail out in the clamped cases.
+ ma_cmp(dest, Imm32(0x7fffffff), scratch);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
+ ma_b(fail, Assembler::Equal);
+
+ if (negativeZeroCheck) {
+ as_cmp(dest, Imm8(0));
+ // Test and bail for -0.0, when integer result is 0. Move the float into
+ // the output reg, and if it is non-zero then the original value was
+ // -0.0
+ as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore,
+ Assembler::Equal, 0);
+ ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
+ ma_b(fail, Assembler::Equal);
+ }
+}
+
+void MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(dest.isDouble());
+ MOZ_ASSERT(src.isSingle());
+ as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay());
+}
+
+void MacroAssemblerARM::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ // Direct conversions aren't possible.
+ as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
+ as_vcvt(dest.singleOverlay(), dest.sintOverlay());
+}
+
+void MacroAssemblerARM::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ScratchFloat32Scope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_vldr(src, scratch, scratch2);
+ as_vcvt(dest, VFPRegister(scratch).sintOverlay());
+}
+
+bool MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest,
+ ALUOp op, SBit s, Condition c) {
+ if ((s == SetCC && !condsAreSafe(op)) || !can_dbl(op)) {
+ return false;
+ }
+
+ ALUOp interop = getDestVariant(op);
+ Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
+ if (both.fst().invalid()) {
+ return false;
+ }
+
+ // For the most part, there is no good reason to set the condition codes for
+ // the first instruction. We can do better things if the second instruction
+ // doesn't have a dest, such as check for overflow by doing first operation
+ // don't do second operation if first operation overflowed. This preserves
+ // the overflow condition code. Unfortunately, it is horribly brittle.
+ as_alu(dest, src1, Operand2(both.fst()), interop, LeaveCC, c);
+ as_alu(dest, dest, Operand2(both.snd()), op, s, c);
+ return true;
+}
+
+void MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, ALUOp op, SBit s,
+ Condition c) {
+ // ma_mov should be used for moves.
+ MOZ_ASSERT(op != OpMov);
+ MOZ_ASSERT(op != OpMvn);
+ MOZ_ASSERT(src1 != scratch);
+
+ // As it turns out, if you ask for a compare-like instruction you *probably*
+ // want it to set condition codes.
+ MOZ_ASSERT_IF(dest == InvalidReg, s == SetCC);
+
+ // The operator gives us the ability to determine how this can be used.
+ Imm8 imm8 = Imm8(imm.value);
+ // One instruction: If we can encode it using an imm8m, then do so.
+ if (!imm8.invalid()) {
+ as_alu(dest, src1, imm8, op, s, c);
+ return;
+ }
+
+ // One instruction, negated:
+ Imm32 negImm = imm;
+ Register negDest;
+ ALUOp negOp = ALUNeg(op, dest, scratch, &negImm, &negDest);
+ Imm8 negImm8 = Imm8(negImm.value);
+ // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'.
+ // The dest can be replaced (InvalidReg => scratch).
+ // This is useful if we wish to negate tst. tst has an invalid (aka not
+ // used) dest, but its negation bic requires a dest.
+ if (negOp != OpInvalid && !negImm8.invalid()) {
+ as_alu(negDest, src1, negImm8, negOp, s, c);
+ return;
+ }
+
+ // Start by attempting to generate a two instruction form. Some things
+ // cannot be made into two-inst forms correctly. Namely, adds dest, src,
+ // 0xffff. Since we want the condition codes (and don't know which ones
+ // will be checked), we need to assume that the overflow flag will be
+ // checked and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not
+ // guaranteed to set the overflof flag the same as the (theoretical) one
+ // instruction variant.
+ if (alu_dbl(src1, imm, dest, op, s, c)) {
+ return;
+ }
+
+ // And try with its negative.
+ if (negOp != OpInvalid && alu_dbl(src1, negImm, negDest, negOp, s, c)) {
+ return;
+ }
+
+ ma_mov(imm, scratch, c);
+ as_alu(dest, src1, O2Reg(scratch), op, s, c);
+}
+
+void MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest,
+ ALUOp op, SBit s, Assembler::Condition c) {
+ MOZ_ASSERT(op2.tag() == Operand::Tag::OP2);
+ as_alu(dest, src1, op2.toOp2(), op, s, c);
+}
+
+void MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest,
+ ALUOp op, SBit s, Condition c) {
+ as_alu(dest, src1, op2, op, s, c);
+}
+
+void MacroAssemblerARM::ma_nop() { as_nop(); }
+
+BufferOffset MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest,
+ Assembler::Condition c) {
+ int32_t imm = imm_.value;
+ if (HasMOVWT()) {
+ BufferOffset offset = as_movw(dest, Imm16(imm & 0xffff), c);
+ as_movt(dest, Imm16(imm >> 16 & 0xffff), c);
+ return offset;
+ } else {
+ return as_Imm32Pool(dest, imm, c);
+ }
+}
+
+BufferOffset MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest,
+ Assembler::Condition c) {
+ return ma_movPatchable(Imm32(int32_t(imm.value)), dest, c);
+}
+
+/* static */
+template <class Iter>
+void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
+ Assembler::Condition c, RelocStyle rs,
+ Iter iter) {
+ // The current instruction must be an actual instruction,
+ // not automatically-inserted boilerplate.
+ MOZ_ASSERT(iter.cur());
+ MOZ_ASSERT(iter.cur() == iter.maybeSkipAutomaticInstructions());
+
+ int32_t imm = imm32.value;
+ switch (rs) {
+ case L_MOVWT:
+ Assembler::as_movw_patch(dest, Imm16(imm & 0xffff), c, iter.cur());
+ Assembler::as_movt_patch(dest, Imm16(imm >> 16 & 0xffff), c, iter.next());
+ break;
+ case L_LDR:
+ Assembler::WritePoolEntry(iter.cur(), c, imm);
+ break;
+ }
+}
+
+template void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
+ Assembler::Condition c,
+ RelocStyle rs,
+ InstructionIterator iter);
+template void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
+ Assembler::Condition c,
+ RelocStyle rs,
+ BufferInstructionIterator iter);
+
+void MacroAssemblerARM::ma_mov(Register src, Register dest, SBit s,
+ Assembler::Condition c) {
+ if (s == SetCC || dest != src) {
+ as_mov(dest, O2Reg(src), s, c);
+ }
+}
+
+void MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
+ Assembler::Condition c) {
+ // Try mov with Imm8 operand.
+ Imm8 imm8 = Imm8(imm.value);
+ if (!imm8.invalid()) {
+ as_alu(dest, InvalidReg, imm8, OpMov, LeaveCC, c);
+ return;
+ }
+
+ // Try mvn with Imm8 operand.
+ Imm8 negImm8 = Imm8(~imm.value);
+ if (!negImm8.invalid()) {
+ as_alu(dest, InvalidReg, negImm8, OpMvn, LeaveCC, c);
+ return;
+ }
+
+ // Try movw/movt.
+ if (HasMOVWT()) {
+ // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
+ // so we can set the register this way. movt leaves the bottom 16
+ // bits in tact, so we always need a movw.
+ as_movw(dest, Imm16(imm.value & 0xffff), c);
+ if (uint32_t(imm.value) >> 16) {
+ as_movt(dest, Imm16(uint32_t(imm.value) >> 16), c);
+ }
+ return;
+ }
+
+ // If we don't have movw/movt, we need a load.
+ as_Imm32Pool(dest, imm.value, c);
+}
+
+void MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
+ Assembler::Condition c) {
+ ma_mov(Imm32(imm.value), dest, c);
+}
+
+void MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest) {
+ BufferOffset offset =
+ ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always);
+ writeDataRelocation(offset, ptr);
+}
+
+// Shifts (just a move with a shifting op2)
+void MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) {
+ as_mov(dst, lsl(src, shift.value));
+}
+
+void MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) {
+ as_mov(dst, lsr(src, shift.value));
+}
+
+void MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) {
+ as_mov(dst, asr(src, shift.value));
+}
+
+void MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) {
+ as_mov(dst, ror(src, shift.value));
+}
+
+void MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) {
+ as_mov(dst, rol(src, shift.value));
+}
+
+// Shifts (just a move with a shifting op2)
+void MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) {
+ as_mov(dst, lsl(src, shift));
+}
+
+void MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) {
+ as_mov(dst, lsr(src, shift));
+}
+
+void MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) {
+ as_mov(dst, asr(src, shift));
+}
+
+void MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) {
+ as_mov(dst, ror(src, shift));
+}
+
+void MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst,
+ AutoRegisterScope& scratch) {
+ as_rsb(scratch, shift, Imm8(32));
+ as_mov(dst, ror(src, scratch));
+}
+
+// Move not (dest <- ~src)
+void MacroAssemblerARM::ma_mvn(Register src1, Register dest, SBit s,
+ Assembler::Condition c) {
+ as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, s, c);
+}
+
+// Negate (dest <- -src), src is a register, rather than a general op2.
+void MacroAssemblerARM::ma_neg(Register src1, Register dest, SBit s,
+ Assembler::Condition c) {
+ as_rsb(dest, src1, Imm8(0), s, c);
+}
+
+void MacroAssemblerARM::ma_neg(Register64 src, Register64 dest) {
+ as_rsb(dest.low, src.low, Imm8(0), SetCC);
+ as_rsc(dest.high, src.high, Imm8(0));
+}
+
+// And.
+void MacroAssemblerARM::ma_and(Register src, Register dest, SBit s,
+ Assembler::Condition c) {
+ ma_and(dest, src, dest);
+}
+
+void MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c) {
+ as_and(dest, src1, O2Reg(src2), s, c);
+}
+
+void MacroAssemblerARM::ma_and(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpAnd, s, c);
+}
+
+void MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(src1, imm, dest, scratch, OpAnd, s, c);
+}
+
+// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
+void MacroAssemblerARM::ma_bic(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpBic, s, c);
+}
+
+// Exclusive or.
+void MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s,
+ Assembler::Condition c) {
+ ma_eor(dest, src, dest, s, c);
+}
+
+void MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c) {
+ as_eor(dest, src1, O2Reg(src2), s, c);
+}
+
+void MacroAssemblerARM::ma_eor(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpEor, s, c);
+}
+
+void MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(src1, imm, dest, scratch, OpEor, s, c);
+}
+
+// Or.
+void MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s,
+ Assembler::Condition c) {
+ ma_orr(dest, src, dest, s, c);
+}
+
+void MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
+ SBit s, Assembler::Condition c) {
+ as_orr(dest, src1, O2Reg(src2), s, c);
+}
+
+void MacroAssemblerARM::ma_orr(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpOrr, s, c);
+}
+
+void MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Assembler::Condition c) {
+ ma_alu(src1, imm, dest, scratch, OpOrr, s, c);
+}
+
+// Arithmetic-based ops.
+// Add with carry.
+void MacroAssemblerARM::ma_adc(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpAdc, s, c);
+}
+
+void MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s,
+ Condition c) {
+ as_alu(dest, dest, O2Reg(src), OpAdc, s, c);
+}
+
+void MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ as_alu(dest, src1, O2Reg(src2), OpAdc, s, c);
+}
+
+void MacroAssemblerARM::ma_adc(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(src1, op, dest, scratch, OpAdc, s, c);
+}
+
+// Add.
+void MacroAssemblerARM::ma_add(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpAdd, s, c);
+}
+
+void MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s,
+ Condition c) {
+ ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c);
+}
+
+void MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ as_alu(dest, src1, O2Reg(src2), OpAdd, s, c);
+}
+
+void MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s,
+ Condition c) {
+ ma_alu(src1, op, dest, OpAdd, s, c);
+}
+
+void MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(src1, op, dest, scratch, OpAdd, s, c);
+}
+
+// Subtract with carry.
+void MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpSbc, s, c);
+}
+
+void MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s,
+ Condition c) {
+ as_alu(dest, dest, O2Reg(src1), OpSbc, s, c);
+}
+
+void MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ as_alu(dest, src1, O2Reg(src2), OpSbc, s, c);
+}
+
+// Subtract.
+void MacroAssemblerARM::ma_sub(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpSub, s, c);
+}
+
+void MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s,
+ Condition c) {
+ ma_alu(dest, Operand(src1), dest, OpSub, s, c);
+}
+
+void MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ ma_alu(src1, Operand(src2), dest, OpSub, s, c);
+}
+
+void MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s,
+ Condition c) {
+ ma_alu(src1, op, dest, OpSub, s, c);
+}
+
+void MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(src1, op, dest, scratch, OpSub, s, c);
+}
+
+// Reverse subtract.
+void MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpRsb, s, c);
+}
+
+void MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s,
+ Condition c) {
+ as_alu(dest, src1, O2Reg(dest), OpRsb, s, c);
+}
+
+void MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ as_alu(dest, src1, O2Reg(src2), OpRsb, s, c);
+}
+
+void MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(src1, op2, dest, scratch, OpRsb, s, c);
+}
+
+// Reverse subtract with carry.
+void MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, SBit s,
+ Condition c) {
+ ma_alu(dest, imm, dest, scratch, OpRsc, s, c);
+}
+
+void MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s,
+ Condition c) {
+ as_alu(dest, dest, O2Reg(src1), OpRsc, s, c);
+}
+
+void MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest,
+ SBit s, Condition c) {
+ as_alu(dest, src1, O2Reg(src2), OpRsc, s, c);
+}
+
+// Compares/tests.
+// Compare negative (sets condition codes as src1 + src2 would).
+void MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_alu(src1, imm, InvalidReg, scratch, OpCmn, SetCC, c);
+}
+
+void MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) {
+ as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c);
+}
+
+void MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) {
+ MOZ_CRASH("Feature NYI");
+}
+
+// Compare (src - src2).
+void MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_alu(src1, imm, InvalidReg, scratch, OpCmp, SetCC, c);
+}
+
+void MacroAssemblerARM::ma_cmp(Register src1, ImmTag tag, Condition c) {
+ // ImmTag comparisons can always be done without use of a scratch register.
+ Imm8 negtag = Imm8(-tag.value);
+ MOZ_ASSERT(!negtag.invalid());
+ as_cmn(src1, negtag, c);
+}
+
+void MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_cmp(src1, Imm32(ptr.value), scratch, c);
+}
+
+void MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_mov(ptr, scratch);
+ ma_cmp(src1, scratch, c);
+}
+
+void MacroAssemblerARM::ma_cmp(Register src1, Operand op,
+ AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, Condition c) {
+ switch (op.tag()) {
+ case Operand::Tag::OP2:
+ as_cmp(src1, op.toOp2(), c);
+ break;
+ case Operand::Tag::MEM:
+ ma_ldr(op.toAddress(), scratch, scratch2);
+ as_cmp(src1, O2Reg(scratch), c);
+ break;
+ default:
+ MOZ_CRASH("trying to compare FP and integer registers");
+ }
+}
+
+void MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) {
+ as_cmp(src1, O2Reg(src2), c);
+}
+
+// Test for equality, (src1 ^ src2).
+void MacroAssemblerARM::ma_teq(Register src1, Imm32 imm,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_alu(src1, imm, InvalidReg, scratch, OpTeq, SetCC, c);
+}
+
+void MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) {
+ as_tst(src1, O2Reg(src2), c);
+}
+
+void MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) {
+ as_teq(src1, op.toOp2(), c);
+}
+
+// Test (src1 & src2).
+void MacroAssemblerARM::ma_tst(Register src1, Imm32 imm,
+ AutoRegisterScope& scratch, Condition c) {
+ ma_alu(src1, imm, InvalidReg, scratch, OpTst, SetCC, c);
+}
+
+void MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) {
+ as_tst(src1, O2Reg(src2), c);
+}
+
+void MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) {
+ as_tst(src1, op.toOp2(), c);
+}
+
+void MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) {
+ as_mul(dest, src1, src2);
+}
+
+void MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch) {
+ ma_mov(imm, scratch);
+ as_mul(dest, src1, scratch);
+}
+
+Assembler::Condition MacroAssemblerARM::ma_check_mul(Register src1,
+ Register src2,
+ Register dest,
+ AutoRegisterScope& scratch,
+ Condition cond) {
+ // TODO: this operation is illegal on armv6 and earlier
+ // if src2 == scratch or src2 == dest.
+ if (cond == Equal || cond == NotEqual) {
+ as_smull(scratch, dest, src1, src2, SetCC);
+ return cond;
+ }
+
+ if (cond == Overflow) {
+ as_smull(scratch, dest, src1, src2);
+ as_cmp(scratch, asr(dest, 31));
+ return NotEqual;
+ }
+
+ MOZ_CRASH("Condition NYI");
+}
+
+Assembler::Condition MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm,
+ Register dest,
+ AutoRegisterScope& scratch,
+ Condition cond) {
+ ma_mov(imm, scratch);
+
+ if (cond == Equal || cond == NotEqual) {
+ as_smull(scratch, dest, scratch, src1, SetCC);
+ return cond;
+ }
+
+ if (cond == Overflow) {
+ as_smull(scratch, dest, scratch, src1);
+ as_cmp(scratch, asr(dest, 31));
+ return NotEqual;
+ }
+
+ MOZ_CRASH("Condition NYI");
+}
+
+void MacroAssemblerARM::ma_umull(Register src1, Imm32 imm, Register destHigh,
+ Register destLow, AutoRegisterScope& scratch) {
+ ma_mov(imm, scratch);
+ as_umull(destHigh, destLow, src1, scratch);
+}
+
+void MacroAssemblerARM::ma_umull(Register src1, Register src2,
+ Register destHigh, Register destLow) {
+ as_umull(destHigh, destLow, src1, src2);
+}
+
+void MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold,
+ Register tmp, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2,
+ int32_t shift) {
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ //
+ // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
+ // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ //
+ // 2. Since both addition and multiplication commute with modulus:
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ //
+ // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
+ // simplifies to: c_0 + c_1 + c_2 ... c_n % C
+ //
+ // Each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head;
+
+ // Register 'hold' holds -1 if the value was negative, 1 otherwise. The
+ // scratch reg holds the remaining bits that have not been processed lr
+ // serves as a temporary location to store extracted bits into as well as
+ // holding the trial subtraction as a temp value dest is the accumulator
+ // (and holds the final result)
+ //
+ // Move the whole value into tmp, setting the codition codes so we can muck
+ // with them later.
+ as_mov(tmp, O2Reg(src), SetCC);
+ // Zero out the dest.
+ ma_mov(Imm32(0), dest);
+ // Set the hold appropriately.
+ ma_mov(Imm32(1), hold);
+ ma_mov(Imm32(-1), hold, Signed);
+ as_rsb(tmp, tmp, Imm8(0), SetCC, Signed);
+
+ // Begin the main loop.
+ bind(&head);
+ {
+ // Extract the bottom bits.
+ ma_and(Imm32(mask), tmp, scratch, scratch2);
+ // Add those bits to the accumulator.
+ ma_add(scratch, dest, dest);
+ // Do a trial subtraction, this is the same operation as cmp, but we store
+ // the dest.
+ ma_sub(dest, Imm32(mask), scratch, scratch2, SetCC);
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
+ ma_mov(scratch, dest, LeaveCC, NotSigned);
+ // Get rid of the bits that we extracted before, and set the condition
+ // codes.
+ as_mov(tmp, lsr(tmp, shift), SetCC);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(&head, NonZero);
+ }
+
+ // Check the hold to see if we need to negate the result. Hold can only be
+ // 1 or -1, so this will never set the 0 flag.
+ as_cmp(hold, Imm8(0));
+ // If the hold was non-zero, negate the result to be in line with what JS
+ // wants this will set the condition codes if we try to negate.
+ as_rsb(dest, dest, Imm8(0), SetCC, Signed);
+ // Since the Zero flag is not set by the compare, we can *only* set the Zero
+ // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
+ // the computation was -0.0).
+}
+
+void MacroAssemblerARM::ma_smod(Register num, Register div, Register dest,
+ AutoRegisterScope& scratch) {
+ as_sdiv(scratch, num, div);
+ as_mls(dest, num, scratch, div);
+}
+
+void MacroAssemblerARM::ma_umod(Register num, Register div, Register dest,
+ AutoRegisterScope& scratch) {
+ as_udiv(scratch, num, div);
+ as_mls(dest, num, scratch, div);
+}
+
+// Division
+void MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest,
+ Condition cond) {
+ as_sdiv(dest, num, div, cond);
+}
+
+void MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest,
+ Condition cond) {
+ as_udiv(dest, num, div, cond);
+}
+
+// Miscellaneous instructions.
+void MacroAssemblerARM::ma_clz(Register src, Register dest, Condition cond) {
+ as_clz(dest, src, cond);
+}
+
+void MacroAssemblerARM::ma_ctz(Register src, Register dest,
+ AutoRegisterScope& scratch) {
+ // int c = __clz(a & -a);
+ // return a ? 31 - c : c;
+ as_rsb(scratch, src, Imm8(0), SetCC);
+ as_and(dest, src, O2Reg(scratch), LeaveCC);
+ as_clz(dest, dest);
+ as_rsb(dest, dest, Imm8(0x1F), LeaveCC, Assembler::NotEqual);
+}
+
+// Memory.
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset,
+ Register rt, AutoRegisterScope& scratch,
+ Index mode, Assembler::Condition cc) {
+ ma_dataTransferN(ls, 32, true, rn, offset, rt, scratch, mode, cc);
+}
+
+void MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Address& addr,
+ AutoRegisterScope& scratch, Index mode,
+ Condition cc) {
+ ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, scratch,
+ mode, cc);
+}
+
+void MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode,
+ Condition cc) {
+ as_dtr(IsStore, 32, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_str(Register rt, const Address& addr,
+ AutoRegisterScope& scratch, Index mode,
+ Condition cc) {
+ ma_dtr(IsStore, rt, addr, scratch, mode, cc);
+}
+
+void MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2,
+ EDtrAddr addr, Index mode, Condition cc) {
+ MOZ_ASSERT((rt.code() & 1) == 0);
+ MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
+ as_extdtr(IsStore, 64, true, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode,
+ Condition cc) {
+ as_dtr(IsLoad, 32, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldr(const Address& addr, Register rt,
+ AutoRegisterScope& scratch, Index mode,
+ Condition cc) {
+ ma_dtr(IsLoad, rt, addr, scratch, mode, cc);
+}
+
+void MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode,
+ Condition cc) {
+ as_dtr(IsLoad, 8, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode,
+ Condition cc) {
+ as_extdtr(IsLoad, 16, true, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode,
+ Condition cc) {
+ as_extdtr(IsLoad, 16, false, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode,
+ Condition cc) {
+ as_extdtr(IsLoad, 8, true, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt,
+ DebugOnly<Register> rt2, Index mode,
+ Condition cc) {
+ MOZ_ASSERT((rt.code() & 1) == 0);
+ MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
+ MOZ_ASSERT(addr.maybeOffsetRegister() !=
+ rt); // Undefined behavior if rm == rt/rt2.
+ MOZ_ASSERT(addr.maybeOffsetRegister() != rt2);
+ as_extdtr(IsLoad, 64, true, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode,
+ Condition cc) {
+ as_extdtr(IsStore, 16, false, mode, rt, addr, cc);
+}
+
+void MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode,
+ Condition cc) {
+ as_dtr(IsStore, 8, mode, rt, addr, cc);
+}
+
+// Specialty for moving N bits of data, where n == 8,16,32,64.
+BufferOffset MacroAssemblerARM::ma_dataTransferN(
+ LoadStore ls, int size, bool IsSigned, Register rn, Register rm,
+ Register rt, AutoRegisterScope& scratch, Index mode,
+ Assembler::Condition cc, Scale scale) {
+ MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
+
+ if (size == 32 || (size == 8 && !IsSigned)) {
+ return as_dtr(ls, size, mode, rt,
+ DTRAddr(rn, DtrRegImmShift(rm, LSL, scale)), cc);
+ }
+
+ if (scale != TimesOne) {
+ ma_lsl(Imm32(scale), rm, scratch);
+ rm = scratch;
+ }
+
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)),
+ cc);
+}
+
+// No scratch register is required if scale is TimesOne.
+BufferOffset MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
+ bool IsSigned, Register rn,
+ Register rm, Register rt,
+ Index mode,
+ Assembler::Condition cc) {
+ MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
+ if (size == 32 || (size == 8 && !IsSigned)) {
+ return as_dtr(ls, size, mode, rt,
+ DTRAddr(rn, DtrRegImmShift(rm, LSL, TimesOne)), cc);
+ }
+ return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)),
+ cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
+ bool IsSigned, Register rn,
+ Imm32 offset, Register rt,
+ AutoRegisterScope& scratch,
+ Index mode,
+ Assembler::Condition cc) {
+ MOZ_ASSERT(!(ls == IsLoad && mode == PostIndex && rt == pc),
+ "Large-offset PostIndex loading into PC requires special logic: "
+ "see ma_popn_pc().");
+
+ int off = offset.value;
+
+ // We can encode this as a standard ldr.
+ if (size == 32 || (size == 8 && !IsSigned)) {
+ if (off < 4096 && off > -4096) {
+ // This encodes as a single instruction, Emulating mode's behavior
+ // in a multi-instruction sequence is not necessary.
+ return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
+ }
+
+ // We cannot encode this offset in a single ldr. For mode == index,
+ // try to encode it as |add scratch, base, imm; ldr dest, [scratch,
+ // +offset]|. This does not wark for mode == PreIndex or mode == PostIndex.
+ // PreIndex is simple, just do the add into the base register first,
+ // then do a PreIndex'ed load. PostIndexed loads can be tricky.
+ // Normally, doing the load with an index of 0, then doing an add would
+ // work, but if the destination is the PC, you don't get to execute the
+ // instruction after the branch, which will lead to the base register
+ // not being updated correctly. Explicitly handle this case, without
+ // doing anything fancy, then handle all of the other cases.
+
+ // mode == Offset
+ // add scratch, base, offset_hi
+ // ldr dest, [scratch, +offset_lo]
+ //
+ // mode == PreIndex
+ // add base, base, offset_hi
+ // ldr dest, [base, +offset_lo]!
+
+ int bottom = off & 0xfff;
+ int neg_bottom = 0x1000 - bottom;
+
+ MOZ_ASSERT(rn != scratch);
+ MOZ_ASSERT(mode != PostIndex);
+
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x1000 can not be encoded as an immediate
+ // negative offset in the instruction and this occurs when bottom is
+ // zero, so this case is guarded against below.
+ if (off < 0) {
+ Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)),
+ cc);
+ }
+
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x1000);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt,
+ DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)),
+ cc);
+ }
+
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x1000);
+ // sub_off = neg_bottom + off
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_dtr(ls, size, Offset, rt,
+ DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
+ }
+ }
+
+ ma_mov(offset, scratch);
+ return as_dtr(ls, size, mode, rt,
+ DTRAddr(rn, DtrRegImmShift(scratch, LSL, 0)));
+ } else {
+ // Should attempt to use the extended load/store instructions.
+ if (off < 256 && off > -256) {
+ return as_extdtr(ls, size, IsSigned, mode, rt,
+ EDtrAddr(rn, EDtrOffImm(off)), cc);
+ }
+
+ // We cannot encode this offset in a single extldr. Try to encode it as
+ // an add scratch, base, imm; extldr dest, [scratch, +offset].
+ int bottom = off & 0xff;
+ int neg_bottom = 0x100 - bottom;
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x100 can not be encoded as an immediate
+ // negative offset in the instruction and this occurs when bottom is
+ // zero, so this case is guarded against below.
+ if (off < 0) {
+ // sub_off = bottom - off
+ Operand2 sub_off = Imm8(-(off - bottom));
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(bottom)), cc);
+ }
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x100);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(bottom)), cc);
+ }
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x100);
+ // sub_off = neg_bottom + off
+ as_add(scratch, rn, sub_off, LeaveCC, cc);
+ return as_extdtr(ls, size, IsSigned, Offset, rt,
+ EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), cc);
+ }
+ }
+ ma_mov(offset, scratch);
+ return as_extdtr(ls, size, IsSigned, mode, rt,
+ EDtrAddr(rn, EDtrOffReg(scratch)), cc);
+ }
+}
+
+void MacroAssemblerARM::ma_pop(Register r) {
+ as_dtr(IsLoad, 32, PostIndex, r, DTRAddr(sp, DtrOffImm(4)));
+}
+
+void MacroAssemblerARM::ma_popn_pc(Imm32 n, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2) {
+ // pc <- [sp]; sp += n
+ int32_t nv = n.value;
+
+ if (nv < 4096 && nv >= -4096) {
+ as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(nv)));
+ } else {
+ ma_mov(sp, scratch);
+ ma_add(Imm32(n), sp, scratch2);
+ as_dtr(IsLoad, 32, Offset, pc, DTRAddr(scratch, DtrOffImm(0)));
+ }
+}
+
+void MacroAssemblerARM::ma_push(Register r) {
+ MOZ_ASSERT(r != sp, "Use ma_push_sp().");
+ as_dtr(IsStore, 32, PreIndex, r, DTRAddr(sp, DtrOffImm(-4)));
+}
+
+void MacroAssemblerARM::ma_push_sp(Register r, AutoRegisterScope& scratch) {
+ // Pushing sp is not well-defined: use two instructions.
+ MOZ_ASSERT(r == sp);
+ ma_mov(sp, scratch);
+ as_dtr(IsStore, 32, PreIndex, scratch, DTRAddr(sp, DtrOffImm(-4)));
+}
+
+void MacroAssemblerARM::ma_vpop(VFPRegister r) {
+ startFloatTransferM(IsLoad, sp, IA, WriteBack);
+ transferFloatReg(r);
+ finishFloatTransfer();
+}
+
+void MacroAssemblerARM::ma_vpush(VFPRegister r) {
+ startFloatTransferM(IsStore, sp, DB, WriteBack);
+ transferFloatReg(r);
+ finishFloatTransfer();
+}
+
+// Barriers
+void MacroAssemblerARM::ma_dmb(BarrierOption option) {
+ if (HasDMBDSBISB()) {
+ as_dmb(option);
+ } else {
+ as_dmb_trap();
+ }
+}
+
+void MacroAssemblerARM::ma_dsb(BarrierOption option) {
+ if (HasDMBDSBISB()) {
+ as_dsb(option);
+ } else {
+ as_dsb_trap();
+ }
+}
+
+// Branches when done from within arm-specific code.
+BufferOffset MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c) {
+ return as_b(dest, c);
+}
+
+void MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) {
+ as_bx(dest, c);
+}
+
+void MacroAssemblerARM::ma_b(void* target, Assembler::Condition c) {
+ // An immediate pool is used for easier patching.
+ as_Imm32Pool(pc, uint32_t(target), c);
+}
+
+// This is almost NEVER necessary: we'll basically never be calling a label,
+// except possibly in the crazy bailout-table case.
+void MacroAssemblerARM::ma_bl(Label* dest, Assembler::Condition c) {
+ as_bl(dest, c);
+}
+
+void MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) {
+ as_blx(reg, c);
+}
+
+// VFP/ALU
+void MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+
+void MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2,
+ FloatRegister dst) {
+ as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
+ VFPRegister(src2).singleOverlay());
+}
+
+void MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vmov(dest, src, cc);
+}
+
+void MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
+ cc);
+}
+
+void MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vneg(dest, src, cc);
+}
+
+void MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
+ cc);
+}
+
+void MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vabs(dest, src, cc);
+}
+
+void MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
+ cc);
+}
+
+void MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vsqrt(dest, src, cc);
+}
+
+void MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
+ cc);
+}
+
+static inline uint32_t DoubleHighWord(double d) {
+ return static_cast<uint32_t>(BitwiseCast<uint64_t>(d) >> 32);
+}
+
+static inline uint32_t DoubleLowWord(double d) {
+ return static_cast<uint32_t>(BitwiseCast<uint64_t>(d)) & uint32_t(0xffffffff);
+}
+
+void MacroAssemblerARM::ma_vimm(double value, FloatRegister dest,
+ Condition cc) {
+ if (HasVFPv3()) {
+ if (DoubleLowWord(value) == 0) {
+ if (DoubleHighWord(value) == 0) {
+ // To zero a register, load 1.0, then execute dN <- dN - dN
+ as_vimm(dest, VFPImm::One, cc);
+ as_vsub(dest, dest, dest, cc);
+ return;
+ }
+
+ VFPImm enc(DoubleHighWord(value));
+ if (enc.isValid()) {
+ as_vimm(dest, enc, cc);
+ return;
+ }
+ }
+ }
+ // Fall back to putting the value in a pool.
+ as_FImm64Pool(dest, value, cc);
+}
+
+void MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest,
+ Condition cc) {
+ VFPRegister vd = VFPRegister(dest).singleOverlay();
+ if (HasVFPv3()) {
+ if (IsPositiveZero(value)) {
+ // To zero a register, load 1.0, then execute sN <- sN - sN.
+ as_vimm(vd, VFPImm::One, cc);
+ as_vsub(vd, vd, vd, cc);
+ return;
+ }
+
+ // Note that the vimm immediate float32 instruction encoding differs
+ // from the vimm immediate double encoding, but this difference matches
+ // the difference in the floating point formats, so it is possible to
+ // convert the float32 to a double and then use the double encoding
+ // paths. It is still necessary to firstly check that the double low
+ // word is zero because some float32 numbers set these bits and this can
+ // not be ignored.
+ double doubleValue(value);
+ if (DoubleLowWord(doubleValue) == 0) {
+ VFPImm enc(DoubleHighWord(doubleValue));
+ if (enc.isValid()) {
+ as_vimm(vd, enc, cc);
+ return;
+ }
+ }
+ }
+
+ // Fall back to putting the value in a pool.
+ as_FImm32Pool(vd, value, cc);
+}
+
+void MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2,
+ Condition cc) {
+ as_vcmp(VFPRegister(src1), VFPRegister(src2), cc);
+}
+
+void MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2,
+ Condition cc) {
+ as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(),
+ cc);
+}
+
+void MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) {
+ as_vcmpz(VFPRegister(src1), cc);
+}
+
+void MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) {
+ as_vcmpz(VFPRegister(src1).singleOverlay(), cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isDouble());
+ MOZ_ASSERT(dest.isSInt());
+ as_vcvt(dest, src, false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isDouble());
+ MOZ_ASSERT(dest.isUInt());
+ as_vcvt(dest, src, false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isSInt());
+ MOZ_ASSERT(dest.isDouble());
+ as_vcvt(dest, src, false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isUInt());
+ MOZ_ASSERT(dest.isDouble());
+ as_vcvt(dest, src, false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isSingle());
+ MOZ_ASSERT(dest.isSInt());
+ as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(),
+ false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isSingle());
+ MOZ_ASSERT(dest.isUInt());
+ as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(),
+ false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isSInt());
+ MOZ_ASSERT(dest.isSingle());
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(),
+ false, cc);
+}
+
+void MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest,
+ Condition cc) {
+ MOZ_ASSERT(src.isUInt());
+ MOZ_ASSERT(dest.isSingle());
+ as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(),
+ false, cc);
+}
+
+void MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest,
+ Condition cc) {
+ as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc);
+}
+
+void MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1,
+ Register dest2, Condition cc) {
+ as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc);
+}
+
+void MacroAssemblerARM::ma_vxfer(Register src, FloatRegister dest,
+ Condition cc) {
+ as_vxfer(src, InvalidReg, VFPRegister(dest).singleOverlay(), CoreToFloat, cc);
+}
+
+void MacroAssemblerARM::ma_vxfer(Register src1, Register src2,
+ FloatRegister dest, Condition cc) {
+ as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vdtr(LoadStore ls, const Address& addr,
+ VFPRegister rt,
+ AutoRegisterScope& scratch,
+ Condition cc) {
+ int off = addr.offset;
+ MOZ_ASSERT((off & 3) == 0);
+ Register base = addr.base;
+ if (off > -1024 && off < 1024) {
+ return as_vdtr(ls, rt, Operand(addr).toVFPAddr(), cc);
+ }
+
+ // We cannot encode this offset in a a single ldr. Try to encode it as an
+ // add scratch, base, imm; ldr dest, [scratch, +offset].
+ int bottom = off & (0xff << 2);
+ int neg_bottom = (0x100 << 2) - bottom;
+ // At this point, both off - bottom and off + neg_bottom will be
+ // reasonable-ish quantities.
+ //
+ // Note a neg_bottom of 0x400 can not be encoded as an immediate negative
+ // offset in the instruction and this occurs when bottom is zero, so this
+ // case is guarded against below.
+ if (off < 0) {
+ // sub_off = bottom - off
+ Operand2 sub_off = Imm8(-(off - bottom));
+ if (!sub_off.invalid()) {
+ // - sub_off = off - bottom
+ as_sub(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc);
+ }
+ // sub_off = -neg_bottom - off
+ sub_off = Imm8(-(off + neg_bottom));
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x400);
+ // - sub_off = neg_bottom + off
+ as_sub(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc);
+ }
+ } else {
+ // sub_off = off - bottom
+ Operand2 sub_off = Imm8(off - bottom);
+ if (!sub_off.invalid()) {
+ // sub_off = off - bottom
+ as_add(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc);
+ }
+ // sub_off = neg_bottom + off
+ sub_off = Imm8(off + neg_bottom);
+ if (!sub_off.invalid() && bottom != 0) {
+ // Guarded against by: bottom != 0
+ MOZ_ASSERT(neg_bottom < 0x400);
+ // sub_off = neg_bottom + off
+ as_add(scratch, base, sub_off, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc);
+ }
+ }
+
+ // Safe to use scratch as dest, since ma_add() overwrites dest at the end
+ // and can't use it as internal scratch since it may also == base.
+ ma_add(base, Imm32(off), scratch, scratch, LeaveCC, cc);
+ return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(0)), cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest,
+ Condition cc) {
+ return as_vdtr(IsLoad, dest, addr, cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest,
+ AutoRegisterScope& scratch,
+ Condition cc) {
+ return ma_vdtr(IsLoad, addr, dest, scratch, cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vldr(VFPRegister src, Register base,
+ Register index,
+ AutoRegisterScope& scratch,
+ int32_t shift, Condition cc) {
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return as_vdtr(IsLoad, src, Operand(Address(scratch, 0)).toVFPAddr(), cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr,
+ Condition cc) {
+ return as_vdtr(IsStore, src, addr, cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vstr(VFPRegister src, const Address& addr,
+ AutoRegisterScope& scratch,
+ Condition cc) {
+ return ma_vdtr(IsStore, addr, src, scratch, cc);
+}
+
+BufferOffset MacroAssemblerARM::ma_vstr(
+ VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, int32_t shift, int32_t offset, Condition cc) {
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return ma_vstr(src, Address(scratch, offset), scratch2, cc);
+}
+
+// Without an offset, no second scratch register is necessary.
+BufferOffset MacroAssemblerARM::ma_vstr(VFPRegister src, Register base,
+ Register index,
+ AutoRegisterScope& scratch,
+ int32_t shift, Condition cc) {
+ as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
+ return as_vdtr(IsStore, src, Operand(Address(scratch, 0)).toVFPAddr(), cc);
+}
+
+bool MacroAssemblerARMCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerARMCompat::move32(Imm32 imm, Register dest) {
+ ma_mov(imm, dest);
+}
+
+void MacroAssemblerARMCompat::move32(Register src, Register dest) {
+ ma_mov(src, dest);
+}
+
+void MacroAssemblerARMCompat::movePtr(Register src, Register dest) {
+ ma_mov(src, dest);
+}
+
+void MacroAssemblerARMCompat::movePtr(ImmWord imm, Register dest) {
+ ma_mov(Imm32(imm.value), dest);
+}
+
+void MacroAssemblerARMCompat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_mov(imm, dest);
+}
+
+void MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+void MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(currentOffset()), imm));
+ ma_movPatchable(Imm32(-1), dest, Always);
+}
+
+void MacroAssemblerARMCompat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset == 0) {
+ ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
+ } else {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ ma_ldrb(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest);
+ }
+}
+
+void MacroAssemblerARMCompat::load8SignExtend(const Address& address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // ARMv7 does not have LSL on an index register with an extended load.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void MacroAssemblerARMCompat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // ARMv7 does not have LSL on an index register with an extended load.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void MacroAssemblerARMCompat::load16SignExtend(const Address& address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ Register index = src.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (src.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(src.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (src.offset != 0) {
+ if (index != scratch) {
+ ma_mov(index, scratch);
+ index = scratch;
+ }
+ ma_add(Imm32(src.offset), index, scratch2);
+ }
+ ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
+}
+
+void MacroAssemblerARMCompat::load32(const Address& address, Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerARMCompat::load32(const BaseIndex& address, Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerARMCompat::load32(AbsoluteAddress address, Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerARMCompat::loadPtr(const Address& address, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(address, dest, scratch);
+}
+
+void MacroAssemblerARMCompat::loadPtr(const BaseIndex& src, Register dest) {
+ Register base = src.base;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (src.offset != 0) {
+ ma_add(base, Imm32(src.offset), scratch, scratch2);
+ ma_ldr(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest);
+ } else {
+ ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
+ }
+}
+
+void MacroAssemblerARMCompat::loadPtr(AbsoluteAddress address, Register dest) {
+ MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
+ movePtr(ImmWord(uintptr_t(address.addr)), dest);
+ loadPtr(Address(dest, 0), dest);
+}
+
+void MacroAssemblerARMCompat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
+ movePtr(address, dest);
+ loadPtr(Address(dest, 0), dest);
+}
+
+void MacroAssemblerARMCompat::loadPrivate(const Address& address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(ToPayload(address), dest, scratch);
+}
+
+void MacroAssemblerARMCompat::loadDouble(const Address& address,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(address, dest, scratch);
+}
+
+void MacroAssemblerARMCompat::loadDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), dest, scratch2);
+}
+
+void MacroAssemblerARMCompat::loadFloatAsDouble(const Address& address,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+
+ VFPRegister rt = dest;
+ ma_vldr(address, rt.singleOverlay(), scratch);
+ as_vcvt(rt, rt.singleOverlay());
+}
+
+void MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+ VFPRegister rt = dest;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), rt.singleOverlay(), scratch2);
+ as_vcvt(rt, rt.singleOverlay());
+}
+
+void MacroAssemblerARMCompat::loadFloat32(const Address& address,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(address, VFPRegister(dest).singleOverlay(), scratch);
+}
+
+void MacroAssemblerARMCompat::loadFloat32(const BaseIndex& src,
+ FloatRegister dest) {
+ // VFP instructions don't even support register Base + register Index modes,
+ // so just add the index, then handle the offset like normal.
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ as_add(scratch, base, lsl(index, scale));
+ ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay(),
+ scratch2);
+}
+
+void MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch2);
+ store8(scratch2, address);
+}
+
+void MacroAssemblerARMCompat::store8(Register src, const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::store8(Imm32 imm, const BaseIndex& dest) {
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_strb(scratch2, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch2);
+ ma_strb(scratch2, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void MacroAssemblerARMCompat::store8(Register src, const BaseIndex& dest) {
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_strb(src, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void MacroAssemblerARMCompat::store16(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch2);
+ store16(scratch2, address);
+}
+
+void MacroAssemblerARMCompat::store16(Register src, const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::store16(Imm32 imm, const BaseIndex& dest) {
+ Register index = dest.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (dest.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(dest.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (dest.offset != 0) {
+ ma_add(index, Imm32(dest.offset), scratch, scratch2);
+ index = scratch;
+ }
+
+ ma_mov(imm, scratch2);
+ ma_strh(scratch2, EDtrAddr(dest.base, EDtrOffReg(index)));
+}
+
+void MacroAssemblerARMCompat::store16(Register src, const BaseIndex& address) {
+ Register index = address.index;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // We don't have LSL on index register yet.
+ if (address.scale != TimesOne) {
+ ma_lsl(Imm32::ShiftOf(address.scale), index, scratch);
+ index = scratch;
+ }
+
+ if (address.offset != 0) {
+ ma_add(index, Imm32(address.offset), scratch, scratch2);
+ index = scratch;
+ }
+ ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index)));
+}
+
+void MacroAssemblerARMCompat::store32(Register src, AbsoluteAddress address) {
+ storePtr(src, address);
+}
+
+void MacroAssemblerARMCompat::store32(Register src, const Address& address) {
+ storePtr(src, address);
+}
+
+void MacroAssemblerARMCompat::store32(Imm32 src, const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ move32(src, scratch);
+ ma_str(scratch, address, scratch2);
+}
+
+void MacroAssemblerARMCompat::store32(Imm32 imm, const BaseIndex& dest) {
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch);
+ ma_str(scratch, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void MacroAssemblerARMCompat::store32(Register src, const BaseIndex& dest) {
+ Register base = dest.base;
+ uint32_t scale = Imm32::ShiftOf(dest.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (dest.offset != 0) {
+ ma_add(base, Imm32(dest.offset), scratch, scratch2);
+ ma_str(src, DTRAddr(scratch, DtrRegImmShift(dest.index, LSL, scale)));
+ } else {
+ ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale)));
+ }
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmWord imm, const Address& address) {
+ store32(Imm32(imm.value), address);
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmWord imm, const BaseIndex& address) {
+ store32(Imm32(imm.value), address);
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmPtr imm, const Address& address) {
+ store32(Imm32(uintptr_t(imm.value)), address);
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmPtr imm, const BaseIndex& address) {
+ store32(Imm32(uintptr_t(imm.value)), address);
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_mov(imm, scratch);
+ ma_str(scratch, address, scratch2);
+}
+
+void MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const BaseIndex& address) {
+ Register base = address.base;
+ uint32_t scale = Imm32::ShiftOf(address.scale).value;
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (address.offset != 0) {
+ ma_add(base, Imm32(address.offset), scratch, scratch2);
+ ma_mov(imm, scratch2);
+ ma_str(scratch2,
+ DTRAddr(scratch, DtrRegImmShift(address.index, LSL, scale)));
+ } else {
+ ma_mov(imm, scratch);
+ ma_str(scratch, DTRAddr(base, DtrRegImmShift(address.index, LSL, scale)));
+ }
+}
+
+void MacroAssemblerARMCompat::storePtr(Register src, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_str(src, address, scratch2);
+}
+
+void MacroAssemblerARMCompat::storePtr(Register src, const BaseIndex& address) {
+ store32(src, address);
+}
+
+void MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmWord(uintptr_t(dest.addr)), scratch);
+ ma_str(src, DTRAddr(scratch, DtrOffImm(0)));
+}
+
+// Note: this function clobbers the input register.
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ if (HasVFPv3()) {
+ Label notSplit;
+ {
+ ScratchDoubleScope scratchDouble(*this);
+ MOZ_ASSERT(input != scratchDouble);
+ loadConstantDouble(0.5, scratchDouble);
+
+ ma_vadd(input, scratchDouble, scratchDouble);
+ // Convert the double into an unsigned fixed point value with 24 bits of
+ // precision. The resulting number will look like 0xII.DDDDDD
+ as_vcvtFixed(scratchDouble, false, 24, true);
+ }
+
+ // Move the fixed point value into an integer register.
+ {
+ ScratchFloat32Scope scratchFloat(*this);
+ as_vxfer(output, InvalidReg, scratchFloat.uintOverlay(), FloatToCore);
+ }
+
+ ScratchRegisterScope scratch(*this);
+
+ // See if this value *might* have been an exact integer after adding
+ // 0.5. This tests the 1/2 through 1/16,777,216th places, but 0.5 needs
+ // to be tested out to the 1/140,737,488,355,328th place.
+ ma_tst(output, Imm32(0x00ffffff), scratch);
+ // Convert to a uint8 by shifting out all of the fraction bits.
+ ma_lsr(Imm32(24), output, output);
+ // If any of the bottom 24 bits were non-zero, then we're good, since
+ // this number can't be exactly XX.0
+ ma_b(&notSplit, NonZero);
+ as_vxfer(scratch, InvalidReg, input, FloatToCore);
+ as_cmp(scratch, Imm8(0));
+ // If the lower 32 bits of the double were 0, then this was an exact number,
+ // and it should be even.
+ as_bic(output, output, Imm8(1), LeaveCC, Zero);
+ bind(&notSplit);
+ } else {
+ ScratchDoubleScope scratchDouble(*this);
+ MOZ_ASSERT(input != scratchDouble);
+ loadConstantDouble(0.5, scratchDouble);
+
+ Label outOfRange;
+ ma_vcmpz(input);
+ // Do the add, in place so we can reference it later.
+ ma_vadd(input, scratchDouble, input);
+ // Do the conversion to an integer.
+ as_vcvt(VFPRegister(scratchDouble).uintOverlay(), VFPRegister(input));
+ // Copy the converted value out.
+ as_vxfer(output, InvalidReg, scratchDouble, FloatToCore);
+ as_vmrs(pc);
+ ma_mov(Imm32(0), output, Overflow); // NaN => 0
+ ma_b(&outOfRange, Overflow); // NaN
+ as_cmp(output, Imm8(0xff));
+ ma_mov(Imm32(0xff), output, Above);
+ ma_b(&outOfRange, Above);
+ // Convert it back to see if we got the same value back.
+ as_vcvt(scratchDouble, VFPRegister(scratchDouble).uintOverlay());
+ // Do the check.
+ as_vcmp(scratchDouble, input);
+ as_vmrs(pc);
+ as_bic(output, output, Imm8(1), LeaveCC, Zero);
+ bind(&outOfRange);
+ }
+}
+
+void MacroAssemblerARMCompat::cmp32(Register lhs, Imm32 rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(lhs, rhs, scratch);
+}
+
+void MacroAssemblerARMCompat::cmp32(Register lhs, Register rhs) {
+ ma_cmp(lhs, rhs);
+}
+
+void MacroAssemblerARMCompat::cmp32(const Address& lhs, Imm32 rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+}
+
+void MacroAssemblerARMCompat::cmp32(const Address& lhs, Register rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmWord rhs) {
+ cmp32(lhs, Imm32(rhs.value));
+}
+
+void MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+}
+
+void MacroAssemblerARMCompat::cmpPtr(Register lhs, Register rhs) {
+ ma_cmp(lhs, rhs);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(lhs, rhs, scratch);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(Register lhs, Imm32 rhs) {
+ cmp32(lhs, rhs);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Register rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmWord rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, Imm32(rhs.value), scratch2);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+}
+
+void MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+}
+
+void MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Imm32 rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(lhs, scratch, scratch2);
+ ma_cmp(scratch, rhs, scratch2);
+}
+
+void MacroAssemblerARMCompat::setStackArg(Register reg, uint32_t arg) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg,
+ scratch);
+}
+
+void MacroAssemblerARMCompat::minMaxDouble(FloatRegister srcDest,
+ FloatRegister second, bool canBeNaN,
+ bool isMax) {
+ FloatRegister first = srcDest;
+
+ Label nan, equal, returnSecond, done;
+
+ Assembler::Condition cond = isMax ? Assembler::VFP_LessThanOrEqual
+ : Assembler::VFP_GreaterThanOrEqual;
+
+ compareDouble(first, second);
+ // First or second is NaN, result is NaN.
+ ma_b(&nan, Assembler::VFP_Unordered);
+ // Make sure we handle -0 and 0 right.
+ ma_b(&equal, Assembler::VFP_Equal);
+ ma_b(&returnSecond, cond);
+ ma_b(&done);
+
+ // Check for zero.
+ bind(&equal);
+ compareDouble(first, NoVFPRegister);
+ // First wasn't 0 or -0, so just return it.
+ ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ ma_vadd(second, first, first);
+ } else {
+ ma_vneg(first, first);
+ ma_vsub(first, second, first);
+ ma_vneg(first, first);
+ }
+ ma_b(&done);
+
+ bind(&nan);
+ // If the first argument is the NaN, return it; otherwise return the second
+ // operand.
+ compareDouble(first, first);
+ ma_vmov(first, srcDest, Assembler::VFP_Unordered);
+ ma_b(&done, Assembler::VFP_Unordered);
+
+ bind(&returnSecond);
+ ma_vmov(second, srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerARMCompat::minMaxFloat32(FloatRegister srcDest,
+ FloatRegister second, bool canBeNaN,
+ bool isMax) {
+ FloatRegister first = srcDest;
+
+ Label nan, equal, returnSecond, done;
+
+ Assembler::Condition cond = isMax ? Assembler::VFP_LessThanOrEqual
+ : Assembler::VFP_GreaterThanOrEqual;
+
+ compareFloat(first, second);
+ // First or second is NaN, result is NaN.
+ ma_b(&nan, Assembler::VFP_Unordered);
+ // Make sure we handle -0 and 0 right.
+ ma_b(&equal, Assembler::VFP_Equal);
+ ma_b(&returnSecond, cond);
+ ma_b(&done);
+
+ // Check for zero.
+ bind(&equal);
+ compareFloat(first, NoVFPRegister);
+ // First wasn't 0 or -0, so just return it.
+ ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ ma_vadd_f32(second, first, first);
+ } else {
+ ma_vneg_f32(first, first);
+ ma_vsub_f32(first, second, first);
+ ma_vneg_f32(first, first);
+ }
+ ma_b(&done);
+
+ bind(&nan);
+ // See comment in minMaxDouble.
+ compareFloat(first, first);
+ ma_vmov_f32(first, srcDest, Assembler::VFP_Unordered);
+ ma_b(&done, Assembler::VFP_Unordered);
+
+ bind(&returnSecond);
+ ma_vmov_f32(second, srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerARMCompat::compareDouble(FloatRegister lhs,
+ FloatRegister rhs) {
+ // Compare the doubles, setting vector status flags.
+ if (rhs.isMissing()) {
+ ma_vcmpz(lhs);
+ } else {
+ ma_vcmp(lhs, rhs);
+ }
+
+ // Move vector status bits to normal status flags.
+ as_vmrs(pc);
+}
+
+void MacroAssemblerARMCompat::compareFloat(FloatRegister lhs,
+ FloatRegister rhs) {
+ // Compare the doubles, setting vector status flags.
+ if (rhs.isMissing()) {
+ as_vcmpz(VFPRegister(lhs).singleOverlay());
+ } else {
+ as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay());
+ }
+
+ // Move vector status bits to normal status flags.
+ as_vmrs(pc);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testInt32(
+ Assembler::Condition cond, const ValueOperand& value) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBoolean(
+ Assembler::Condition cond, const ValueOperand& value) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testDouble(
+ Assembler::Condition cond, const ValueOperand& value) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ScratchRegisterScope scratch(asMasm());
+ ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), scratch);
+ return actual;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNull(
+ Assembler::Condition cond, const ValueOperand& value) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testUndefined(
+ Assembler::Condition cond, const ValueOperand& value) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testString(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testString(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testSymbol(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testSymbol(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBigInt(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testBigInt(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testObject(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testObject(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNumber(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testNumber(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testMagic(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testPrimitive(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testPrimitive(cond, value.typeReg());
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testGCThing(
+ Assembler::Condition cond, const ValueOperand& value) {
+ return testGCThing(cond, value.typeReg());
+}
+
+// Register-based tests.
+Assembler::Condition MacroAssemblerARMCompat::testInt32(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBoolean(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNull(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testUndefined(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testString(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testSymbol(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBigInt(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testObject(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testMagic(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testPrimitive(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
+ return cond == Equal ? Below : AboveOrEqual;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testGCThing(
+ Assembler::Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testGCThing(
+ Assembler::Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ ma_cmp(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testMagic(
+ Assembler::Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testInt32(
+ Assembler::Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testDouble(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testDouble(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBoolean(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testBoolean(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNull(Condition cond,
+ const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testNull(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testUndefined(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testUndefined(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testString(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testString(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testSymbol(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testSymbol(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBigInt(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testBigInt(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testObject(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testObject(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNumber(
+ Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ return testNumber(cond, tag);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testDouble(Condition cond,
+ Register tag) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNumber(Condition cond,
+ Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
+ return cond == Equal ? BelowOrEqual : Above;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testUndefined(
+ Condition cond, const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testNull(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBoolean(
+ Condition cond, const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testString(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testSymbol(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBigInt(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testInt32(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testObject(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testDouble(Condition cond,
+ const BaseIndex& src) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(src, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testMagic(
+ Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testGCThing(
+ Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ Register tag = extractTag(address, scratch);
+ ma_cmp(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+}
+
+// Unboxing code.
+void MacroAssemblerARMCompat::unboxNonDouble(const ValueOperand& operand,
+ Register dest, JSValueType type) {
+ auto movPayloadToDest = [&]() {
+ if (operand.payloadReg() != dest) {
+ ma_mov(operand.payloadReg(), dest, LeaveCC);
+ }
+ };
+ if (!JitOptions.spectreValueMasking) {
+ movPayloadToDest();
+ return;
+ }
+
+ // Spectre mitigation: We zero the payload if the tag does not match the
+ // expected type and if this is a pointer type.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movPayloadToDest();
+ return;
+ }
+
+ // We zero the destination register and move the payload into it if
+ // the tag corresponds to the given type.
+ ma_cmp(operand.typeReg(), ImmType(type));
+ movPayloadToDest();
+ ma_mov(Imm32(0), dest, NotEqual);
+}
+
+void MacroAssemblerARMCompat::unboxNonDouble(const Address& src, Register dest,
+ JSValueType type) {
+ ScratchRegisterScope scratch(asMasm());
+ if (!JitOptions.spectreValueMasking) {
+ ma_ldr(ToPayload(src), dest, scratch);
+ return;
+ }
+
+ // Spectre mitigation: We zero the payload if the tag does not match the
+ // expected type and if this is a pointer type.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_ldr(ToPayload(src), dest, scratch);
+ return;
+ }
+
+ // We zero the destination register and move the payload into it if
+ // the tag corresponds to the given type.
+ ma_ldr(ToType(src), scratch, scratch);
+ ma_cmp(scratch, ImmType(type));
+ ma_ldr(ToPayload(src), dest, scratch, Offset, Equal);
+ ma_mov(Imm32(0), dest, NotEqual);
+}
+
+void MacroAssemblerARMCompat::unboxNonDouble(const BaseIndex& src,
+ Register dest, JSValueType type) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_alu(src.base, lsl(src.index, src.scale), scratch2, OpAdd);
+ Address value(scratch2, src.offset);
+ unboxNonDouble(value, dest, type);
+}
+
+void MacroAssemblerARMCompat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ MOZ_ASSERT(dest.isDouble());
+ as_vxfer(operand.payloadReg(), operand.typeReg(), VFPRegister(dest),
+ CoreToFloat);
+}
+
+void MacroAssemblerARMCompat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ MOZ_ASSERT(dest.isDouble());
+ loadDouble(src, dest);
+}
+
+void MacroAssemblerARMCompat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ MOZ_ASSERT(dest.isDouble());
+ loadDouble(src, dest);
+}
+
+void MacroAssemblerARMCompat::unboxValue(const ValueOperand& src,
+ AnyRegister dest, JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerARMCompat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore);
+}
+
+void MacroAssemblerARMCompat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ if (src != dest.payloadReg()) {
+ ma_mov(src, dest.payloadReg());
+ }
+ ma_mov(ImmType(type), dest.typeReg());
+}
+
+void MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ VFPRegister d = VFPRegister(dest);
+ loadConstantDouble(1.0, dest);
+ as_cmp(operand.payloadReg(), Imm8(0));
+ // If the source is 0, then subtract the dest from itself, producing 0.
+ as_vsub(d, d, d, Equal);
+}
+
+void MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ // Transfer the integral value to a floating point register.
+ VFPRegister vfpdest = VFPRegister(dest);
+ as_vxfer(operand.payloadReg(), InvalidReg, vfpdest.sintOverlay(),
+ CoreToFloat);
+ // Convert the value to a double.
+ as_vcvt(vfpdest, vfpdest.sintOverlay());
+}
+
+void MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ VFPRegister d = VFPRegister(dest).singleOverlay();
+ loadConstantFloat32(1.0, dest);
+ as_cmp(operand.payloadReg(), Imm8(0));
+ // If the source is 0, then subtract the dest from itself, producing 0.
+ as_vsub(d, d, d, Equal);
+}
+
+void MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ // Transfer the integral value to a floating point register.
+ VFPRegister vfpdest = VFPRegister(dest).singleOverlay();
+ as_vxfer(operand.payloadReg(), InvalidReg, vfpdest.sintOverlay(),
+ CoreToFloat);
+ // Convert the value to a float.
+ as_vcvt(vfpdest, vfpdest.sintOverlay());
+}
+
+void MacroAssemblerARMCompat::loadConstantFloat32(float f, FloatRegister dest) {
+ ma_vimm_f32(f, dest);
+}
+
+void MacroAssemblerARMCompat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+
+ // If it's an int, convert to a double.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_ldr(ToType(src), scratch, scratch2);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch, &notInt32);
+ ma_ldr(ToPayload(src), scratch, scratch2);
+ convertInt32ToDouble(scratch, dest);
+ ma_b(&end);
+ }
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_vldr(src, dest, scratch);
+ }
+ bind(&end);
+}
+
+void MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest,
+ int32_t shift) {
+ Label notInt32, end;
+
+ static_assert(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ // If it's an int, convert it to double.
+ ma_alu(base, lsl(index, shift), scratch, OpAdd);
+
+ // Since we only have one scratch register, we need to stomp over it with
+ // the tag.
+ ma_ldr(DTRAddr(scratch, DtrOffImm(NUNBOX32_TYPE_OFFSET)), scratch);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch, &notInt32);
+
+ // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided
+ ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), scratch);
+ convertInt32ToDouble(scratch, dest);
+ ma_b(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ ma_alu(base, lsl(index, shift), scratch, OpAdd);
+ ma_vldr(VFPAddr(scratch, VFPOffImm(0)), dest);
+ bind(&end);
+}
+
+void MacroAssemblerARMCompat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_vimm(dp, dest);
+}
+
+// Treat the value as a boolean, and set condition codes accordingly.
+Assembler::Condition MacroAssemblerARMCompat::testInt32Truthy(
+ bool truthy, const ValueOperand& operand) {
+ ma_tst(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBooleanTruthy(
+ bool truthy, const ValueOperand& operand) {
+ ma_tst(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testDoubleTruthy(
+ bool truthy, FloatRegister reg) {
+ as_vcmpz(VFPRegister(reg));
+ as_vmrs(pc);
+ as_cmp(r0, O2Reg(r0), Overflow);
+ return truthy ? NonZero : Zero;
+}
+
+Register MacroAssemblerARMCompat::extractObject(const Address& address,
+ Register scratch) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(ToPayload(address), scratch, scratch2);
+ return scratch;
+}
+
+Register MacroAssemblerARMCompat::extractTag(const Address& address,
+ Register scratch) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(ToType(address), scratch, scratch2);
+ return scratch;
+}
+
+Register MacroAssemblerARMCompat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd,
+ LeaveCC);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common (ARM too now) interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerARMCompat::storeValue(ValueOperand val, const Address& dst) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_str(val.payloadReg(), ToPayload(dst), scratch2);
+ ma_str(val.typeReg(), ToType(dst), scratch2);
+}
+
+void MacroAssemblerARMCompat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ ScratchRegisterScope scratch(asMasm());
+
+ if (isValueDTRDCandidate(val) && Abs(dest.offset) <= 255) {
+ Register tmpIdx;
+ if (dest.offset == 0) {
+ if (dest.scale == TimesOne) {
+ tmpIdx = dest.index;
+ } else {
+ ma_lsl(Imm32(dest.scale), dest.index, scratch);
+ tmpIdx = scratch;
+ }
+ ma_strd(val.payloadReg(), val.typeReg(),
+ EDtrAddr(dest.base, EDtrOffReg(tmpIdx)));
+ } else {
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ ma_strd(val.payloadReg(), val.typeReg(),
+ EDtrAddr(scratch, EDtrOffImm(dest.offset)));
+ }
+ } else {
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ storeValue(val, Address(scratch, dest.offset));
+ }
+}
+
+void MacroAssemblerARMCompat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ ScratchRegisterScope scratch(asMasm());
+
+ if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) {
+ Register tmpIdx;
+ if (addr.offset == 0) {
+ if (addr.scale == TimesOne) {
+ // If the offset register is the same as one of the destination
+ // registers, LDRD's behavior is undefined. Use the scratch
+ // register to avoid this.
+ if (val.aliases(addr.index)) {
+ ma_mov(addr.index, scratch);
+ tmpIdx = scratch;
+ } else {
+ tmpIdx = addr.index;
+ }
+ } else {
+ ma_lsl(Imm32(addr.scale), addr.index, scratch);
+ tmpIdx = scratch;
+ }
+ ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(),
+ val.typeReg());
+ } else {
+ ma_alu(addr.base, lsl(addr.index, addr.scale), scratch, OpAdd);
+ ma_ldrd(EDtrAddr(scratch, EDtrOffImm(addr.offset)), val.payloadReg(),
+ val.typeReg());
+ }
+ } else {
+ ma_alu(addr.base, lsl(addr.index, addr.scale), scratch, OpAdd);
+ loadValue(Address(scratch, addr.offset), val);
+ }
+}
+
+void MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) {
+ // TODO: copy this code into a generic function that acts on all sequences
+ // of memory accesses
+ if (isValueDTRDCandidate(val)) {
+ // If the value we want is in two consecutive registers starting with an
+ // even register, they can be combined as a single ldrd.
+ int offset = src.offset;
+ if (offset < 256 && offset > -256) {
+ ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), val.payloadReg(),
+ val.typeReg());
+ return;
+ }
+ }
+ // If the value is lower than the type, then we may be able to use an ldm
+ // instruction.
+
+ if (val.payloadReg().code() < val.typeReg().code()) {
+ if (src.offset <= 4 && src.offset >= -8 && (src.offset & 3) == 0) {
+ // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly
+ // with one of LDM{DB, DA, IA, IB}
+ DTMMode mode;
+ switch (src.offset) {
+ case -8:
+ mode = DB;
+ break;
+ case -4:
+ mode = DA;
+ break;
+ case 0:
+ mode = IA;
+ break;
+ case 4:
+ mode = IB;
+ break;
+ default:
+ MOZ_CRASH("Bogus Offset for LoadValue as DTM");
+ }
+ startDataTransferM(IsLoad, src.base, mode);
+ transferReg(val.payloadReg());
+ transferReg(val.typeReg());
+ finishDataTransfer();
+ return;
+ }
+ }
+
+ loadUnalignedValue(src, val);
+}
+
+void MacroAssemblerARMCompat::loadUnalignedValue(const Address& src,
+ ValueOperand dest) {
+ Address payload = ToPayload(src);
+ Address type = ToType(src);
+
+ // Ensure that loading the payload does not erase the pointer to the Value
+ // in memory.
+ if (type.base != dest.payloadReg()) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(payload, dest.payloadReg(), scratch2);
+ ma_ldr(type, dest.typeReg(), scratch2);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(type, dest.typeReg(), scratch2);
+ ma_ldr(payload, dest.payloadReg(), scratch2);
+ }
+}
+
+void MacroAssemblerARMCompat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg()) {
+ ma_mov(payload, dest.payloadReg());
+ }
+ ma_mov(ImmType(type), dest.typeReg());
+}
+
+void MacroAssemblerARMCompat::pushValue(ValueOperand val) {
+ ma_push(val.typeReg());
+ ma_push(val.payloadReg());
+}
+
+void MacroAssemblerARMCompat::pushValue(const Address& addr) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_ldr(ToType(addr), scratch, scratch2);
+ ma_push(scratch);
+ ma_ldr(ToPayloadAfterStackPush(addr), scratch, scratch2);
+ ma_push(scratch);
+}
+
+void MacroAssemblerARMCompat::pushValue(const BaseIndex& addr,
+ Register scratch) {
+ computeEffectiveAddress(addr, scratch);
+ pushValue(Address(scratch, 0));
+}
+
+void MacroAssemblerARMCompat::popValue(ValueOperand val) {
+ ma_pop(val.payloadReg());
+ ma_pop(val.typeReg());
+}
+
+void MacroAssemblerARMCompat::storePayload(const Value& val,
+ const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (val.isGCThing()) {
+ ma_mov(ImmGCPtr(val.toGCThing()), scratch);
+ } else {
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+ }
+ ma_str(scratch, ToPayload(dest), scratch2);
+}
+
+void MacroAssemblerARMCompat::storePayload(Register src, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_str(src, ToPayload(dest), scratch);
+}
+
+void MacroAssemblerARMCompat::storePayload(const Value& val,
+ const BaseIndex& dest) {
+ unsigned shift = ScaleToShift(dest.scale);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (val.isGCThing()) {
+ ma_mov(ImmGCPtr(val.toGCThing()), scratch);
+ } else {
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+ }
+
+ // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+ // << shift + imm] cannot be encoded into a single instruction, and cannot
+ // be integrated into the as_dtr call.
+ static_assert(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ // If an offset is used, modify the base so that a [base + index << shift]
+ // instruction format can be used.
+ if (dest.offset != 0) {
+ ma_add(dest.base, Imm32(dest.offset), dest.base, scratch2);
+ }
+
+ as_dtr(IsStore, 32, Offset, scratch,
+ DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
+
+ // Restore the original value of the base, if necessary.
+ if (dest.offset != 0) {
+ ma_sub(dest.base, Imm32(dest.offset), dest.base, scratch);
+ }
+}
+
+void MacroAssemblerARMCompat::storePayload(Register src,
+ const BaseIndex& dest) {
+ unsigned shift = ScaleToShift(dest.scale);
+ MOZ_ASSERT(shift < 32);
+
+ ScratchRegisterScope scratch(asMasm());
+
+ // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+ // << shift + imm] cannot be encoded into a single instruction, and cannot
+ // be integrated into the as_dtr call.
+ static_assert(NUNBOX32_PAYLOAD_OFFSET == 0);
+
+ // Save/restore the base if the BaseIndex has an offset, as above.
+ if (dest.offset != 0) {
+ ma_add(dest.base, Imm32(dest.offset), dest.base, scratch);
+ }
+
+ // Technically, shift > -32 can be handle by changing LSL to ASR, but should
+ // never come up, and this is one less code path to get wrong.
+ as_dtr(IsStore, 32, Offset, src,
+ DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
+
+ if (dest.offset != 0) {
+ ma_sub(dest.base, Imm32(dest.offset), dest.base, scratch);
+ }
+}
+
+void MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_mov(tag, scratch);
+ ma_str(scratch, ToType(dest), scratch2);
+}
+
+void MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest) {
+ Register base = dest.base;
+ Register index = dest.index;
+ unsigned shift = ScaleToShift(dest.scale);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ MOZ_ASSERT(base != scratch && base != scratch2);
+ MOZ_ASSERT(index != scratch && index != scratch2);
+
+ ma_add(base, Imm32(dest.offset + NUNBOX32_TYPE_OFFSET), scratch2, scratch);
+ ma_mov(tag, scratch);
+ ma_str(scratch, DTRAddr(scratch2, DtrRegImmShift(index, LSL, shift)));
+}
+
+void MacroAssemblerARM::ma_call(ImmPtr dest) {
+ ma_movPatchable(dest, CallReg, Always);
+ as_blx(CallReg);
+}
+
+void MacroAssemblerARMCompat::breakpoint() { as_bkpt(); }
+
+void MacroAssemblerARMCompat::simulatorStop(const char* msg) {
+#ifdef JS_SIMULATOR_ARM
+ MOZ_ASSERT(sizeof(char*) == 4);
+ writeInst(0xefffffff);
+ writeInst((int)msg);
+#endif
+}
+
+void MacroAssemblerARMCompat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerARMCompat::breakpoint(Condition cc) {
+ ma_ldr(DTRAddr(r12, DtrRegImmShift(r12, LSL, 0, IsDown)), r12, Offset, cc);
+}
+
+void MacroAssemblerARMCompat::checkStackAlignment() {
+ asMasm().assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssemblerARMCompat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + 7) & ~7;
+
+ Imm8 size8(size);
+ as_sub(sp, sp, size8);
+ ma_mov(sp, r0);
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(r1);
+ asMasm().passABIArg(r0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfKind()), r0, scratch);
+ }
+
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+
+ // We're going to be returning by the ion calling convention, which returns
+ // by ??? (for now, I think ldr pc, [sp]!)
+ as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4)));
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfTarget()), r0, scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+ jump(r0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(r1, r2);
+ loadValue(Operand(sp, ResumeFromException::offsetOfException()), exception);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfTarget()), r0, scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(r0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+ loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ma_mov(r11, sp);
+ pop(r11);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), r2,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ ma_mov(Imm32(1), ReturnReg);
+ }
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ ma_mov(Imm32(int32_t(wasm::FailInstanceReg)), InstanceReg);
+ }
+ as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4)));
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(Address(sp, ResumeFromException::offsetOfTarget()), r1, scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfFramePointer()), r11,
+ scratch);
+ ma_ldr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp,
+ scratch);
+ }
+ jump(r1);
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testStringTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register string = value.payloadReg();
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLength()), scratch, scratch2);
+ as_cmp(scratch, Imm8(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+Assembler::Condition MacroAssemblerARMCompat::testBigIntTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register bi = value.payloadReg();
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_dtr(IsLoad, bi, Imm32(BigInt::offsetOfDigitLength()), scratch, scratch2);
+ as_cmp(scratch, Imm8(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+void MacroAssemblerARMCompat::floor(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ compareDouble(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory. Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+ ma_vcvt_F64_U32(input, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing.
+ ma_vneg(input, input);
+ ma_vcvt_F64_U32(input, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_vcvt_U32_F64(scratchDouble.uintOverlay(), scratchDouble);
+ compareDouble(scratchDouble, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+ // Flip the negated input back to its original value.
+ ma_vneg(input, input);
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required. Zero is also caught
+ // by this case, but floor of a negative number should never be zero.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::floorf(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+ compareFloat(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ ma_vcvt_F32_U32(input, scratch.uintOverlay());
+ ma_vxfer(VFPRegister(scratch).uintOverlay(), output);
+ }
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore,
+ Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing.
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ ma_vneg_f32(input, input);
+ ma_vcvt_F32_U32(input, scratch.uintOverlay());
+ ma_vxfer(VFPRegister(scratch).uintOverlay(), output);
+ ma_vcvt_U32_F32(scratch.uintOverlay(), scratch);
+ compareFloat(scratch, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ }
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+ // Flip the negated input back to its original value.
+ ma_vneg_f32(input, input);
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required. Zero is also caught
+ // by this case, but floor of a negative number should never be zero.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::ceil(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareDouble(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ loadConstantDouble(-1.0, scratchDouble);
+ compareDouble(input, scratchDouble);
+ ma_b(bail, Assembler::GreaterThan);
+
+ // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ ma_vneg(input, scratchDouble);
+ FloatRegister ScratchUIntReg = scratchDouble.uintOverlay();
+ ma_vcvt_F64_U32(scratchDouble, ScratchUIntReg);
+ ma_vxfer(ScratchUIntReg, output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
+ // non integer values, maybe bail if overflow.
+ bind(&handlePos);
+ ma_vcvt_F64_U32(input, ScratchUIntReg);
+ ma_vxfer(ScratchUIntReg, output);
+ ma_vcvt_U32_F64(ScratchUIntReg, scratchDouble);
+ compareDouble(scratchDouble, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+ // Bail out if the add overflowed or the result is non positive.
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(bail, Zero);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::ceilf(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareFloat(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ loadConstantFloat32(-1.f, scratch);
+ compareFloat(input, scratch);
+ ma_b(bail, Assembler::GreaterThan);
+ }
+
+ // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ {
+ ScratchDoubleScope scratchDouble(asMasm());
+ FloatRegister scratchFloat = scratchDouble.asSingle();
+ FloatRegister scratchUInt = scratchDouble.uintOverlay();
+
+ ma_vneg_f32(input, scratchFloat);
+ ma_vcvt_F32_U32(scratchFloat, scratchUInt);
+ ma_vxfer(scratchUInt, output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+ }
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore,
+ Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
+ // non integer values, maybe bail if overflow.
+ bind(&handlePos);
+ {
+ ScratchDoubleScope scratchDouble(asMasm());
+ FloatRegister scratchFloat = scratchDouble.asSingle();
+ FloatRegister scratchUInt = scratchDouble.uintOverlay();
+
+ ma_vcvt_F32_U32(input, scratchUInt);
+ ma_vxfer(scratchUInt, output);
+ ma_vcvt_U32_F32(scratchUInt, scratchFloat);
+ compareFloat(scratchFloat, input);
+ as_add(output, output, Imm8(1), LeaveCC, NotEqual);
+
+ // Bail on overflow or non-positive result.
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(bail, Zero);
+ }
+
+ bind(&fin);
+}
+
+CodeOffset MacroAssemblerARMCompat::toggledJump(Label* label) {
+ // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ BufferOffset b = ma_b(label, Always);
+ CodeOffset ret(b.getOffset());
+ return ret;
+}
+
+CodeOffset MacroAssemblerARMCompat::toggledCall(JitCode* target, bool enabled) {
+ BufferOffset bo = nextOffset();
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ScratchRegisterScope scratch(asMasm());
+ ma_movPatchable(ImmPtr(target->raw()), scratch, Always);
+ if (enabled) {
+ ma_blx(scratch);
+ } else {
+ ma_nop();
+ }
+ return CodeOffset(bo.getOffset());
+}
+
+void MacroAssemblerARMCompat::round(FloatRegister input, Register output,
+ Label* bail, FloatRegister tmp) {
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // Do a compare based on the original value, then do most other things based
+ // on the shifted value.
+ ma_vcmpz(input);
+ // Since we already know the sign bit, flip all numbers to be positive,
+ // stored in tmp.
+ ma_vabs(input, tmp);
+ as_vmrs(pc);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+
+ // Add the biggest number less than 0.5 (not 0.5, because adding that to
+ // the biggest number less than 0.5 would undesirably round up to 1), and
+ // store the result into tmp.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), scratchDouble);
+ ma_vadd(scratchDouble, tmp, tmp);
+
+ ma_vcvt_F64_U32(tmp, scratchDouble.uintOverlay());
+ ma_vxfer(VFPRegister(scratchDouble).uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+ // Negative case, negate, then start dancing. This number may be positive,
+ // since we added 0.5.
+
+ // Add 0.5 to negative numbers, store the result into tmp
+ loadConstantDouble(0.5, scratchDouble);
+ ma_vadd(scratchDouble, tmp, tmp);
+
+ ma_vcvt_F64_U32(tmp, scratchDouble.uintOverlay());
+ ma_vxfer(VFPRegister(scratchDouble).uintOverlay(), output);
+
+ // -output is now a correctly rounded value, unless the original value was
+ // exactly halfway between two integers, at which point, it has been rounded
+ // away from zero, when it should be rounded towards \infty.
+ ma_vcvt_U32_F64(scratchDouble.uintOverlay(), scratchDouble);
+ compareDouble(scratchDouble, tmp);
+ as_sub(output, output, Imm8(1), LeaveCC, Equal);
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ as_rsb(output, output, Imm8(0), SetCC);
+
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required, or it was zero,
+ // which means the result is actually -0.0 which also requires special
+ // handling.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::roundf(FloatRegister input, Register output,
+ Label* bail, FloatRegister tmp) {
+ Label handleZero;
+ Label handleNeg;
+ Label fin;
+
+ ScratchFloat32Scope scratchFloat(asMasm());
+
+ // Do a compare based on the original value, then do most other things based
+ // on the shifted value.
+ compareFloat(input, NoVFPRegister);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handleNeg, Assembler::Signed);
+
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+
+ // The argument is a positive number, truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+
+ // Add the biggest number less than 0.5f (not 0.5f, because adding that to
+ // the biggest number less than 0.5f would undesirably round up to 1), and
+ // store the result into tmp.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), scratchFloat);
+ ma_vadd_f32(scratchFloat, input, tmp);
+
+ // Note: it doesn't matter whether x + .5 === x or not here, as it doesn't
+ // affect the semantics of the float to unsigned conversion (in particular,
+ // we are not applying any fixup after the operation).
+ ma_vcvt_F32_U32(tmp, scratchFloat.uintOverlay());
+ ma_vxfer(VFPRegister(scratchFloat).uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ ma_b(&fin);
+
+ bind(&handleZero);
+
+ // Move the whole float32 into the output reg, if it is non-zero, then the
+ // original value was -0.0.
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ bind(&handleNeg);
+
+ // Add 0.5 to negative numbers, storing the result into tmp.
+ ma_vneg_f32(input, tmp);
+ loadConstantFloat32(0.5f, scratchFloat);
+ ma_vadd_f32(tmp, scratchFloat, scratchFloat);
+
+ // Adding 0.5 to a float input has chances to yield the wrong result, if
+ // the input is too large. In this case, skip the -1 adjustment made below.
+ compareFloat(scratchFloat, tmp);
+
+ // Negative case, negate, then start dancing. This number may be positive,
+ // since we added 0.5.
+ // /!\ The conditional jump afterwards depends on these two instructions
+ // *not* setting the status flags. They need to not change after the
+ // comparison above.
+ ma_vcvt_F32_U32(scratchFloat, tmp.uintOverlay());
+ ma_vxfer(VFPRegister(tmp).uintOverlay(), output);
+
+ Label flipSign;
+ ma_b(&flipSign, Equal);
+
+ // -output is now a correctly rounded value, unless the original value was
+ // exactly halfway between two integers, at which point, it has been rounded
+ // away from zero, when it should be rounded towards \infty.
+ ma_vcvt_U32_F32(tmp.uintOverlay(), tmp);
+ compareFloat(tmp, scratchFloat);
+ as_sub(output, output, Imm8(1), LeaveCC, Equal);
+
+ // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+ // result will still be a negative number.
+ bind(&flipSign);
+ as_rsb(output, output, Imm8(0), SetCC);
+
+ // If the result looks non-negative, then this value didn't actually fit
+ // into the int range, and special handling is required, or it was zero,
+ // which means the result is actually -0.0 which also requires special
+ // handling.
+ ma_b(bail, NotSigned);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::trunc(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareDouble(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ loadConstantDouble(-1.0, scratchDouble);
+ compareDouble(input, scratchDouble);
+ ma_b(bail, Assembler::GreaterThan);
+
+ // We are in the ]-Inf; -1] range: trunc(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ ma_vneg(input, scratchDouble);
+ ma_vcvt_F64_U32(scratchDouble, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncation is the path to glory. Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ // that clamps to INT_MAX.
+ bind(&handlePos);
+ ma_vcvt_F64_U32(input, scratchDouble.uintOverlay());
+ ma_vxfer(scratchDouble.uintOverlay(), output);
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::truncf(FloatRegister input, Register output,
+ Label* bail) {
+ Label handleZero;
+ Label handlePos;
+ Label fin;
+
+ compareFloat(input, NoVFPRegister);
+ // NaN is always a bail condition, just bail directly.
+ ma_b(bail, Assembler::Overflow);
+ ma_b(&handleZero, Assembler::Equal);
+ ma_b(&handlePos, Assembler::NotSigned);
+
+ // We are in the ]-Inf; 0[ range
+ // If we are in the ]-1; 0[ range => bailout
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ loadConstantFloat32(-1.f, scratch);
+ compareFloat(input, scratch);
+ ma_b(bail, Assembler::GreaterThan);
+ }
+
+ // We are in the ]-Inf; -1] range: trunc(x) == -floor(-x) and floor can be
+ // computed with direct truncation here (x > 0).
+ {
+ ScratchDoubleScope scratchDouble(asMasm());
+ FloatRegister scratchFloat = scratchDouble.asSingle();
+ FloatRegister scratchUInt = scratchDouble.uintOverlay();
+
+ ma_vneg_f32(input, scratchFloat);
+ ma_vcvt_F32_U32(scratchFloat, scratchUInt);
+ ma_vxfer(scratchUInt, output);
+ ma_neg(output, output, SetCC);
+ ma_b(bail, NotSigned);
+ ma_b(&fin);
+ }
+
+ // Test for 0.0 / -0.0: if the top word of the input double is not zero,
+ // then it was -0 and we need to bail out.
+ bind(&handleZero);
+ as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore,
+ Always, 0);
+ as_cmp(output, Imm8(0));
+ ma_b(bail, NonZero);
+ ma_b(&fin);
+
+ // We are in the ]0; +inf] range: truncation is the path to glory; Since
+ // it is known to be > 0.0, explicitly convert to a larger range, then a
+ // value that rounds to INT_MAX is explicitly different from an argument
+ bind(&handlePos);
+ {
+ // The argument is a positive number,
+ // that clamps to INT_MAX.
+ {
+ ScratchFloat32Scope scratch(asMasm());
+ ma_vcvt_F32_U32(input, scratch.uintOverlay());
+ ma_vxfer(VFPRegister(scratch).uintOverlay(), output);
+ }
+ ma_mov(output, output, SetCC);
+ ma_b(bail, Signed);
+ }
+
+ bind(&fin);
+}
+
+void MacroAssemblerARMCompat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerARMCompat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler& MacroAssemblerARM::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerARM::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+MacroAssembler& MacroAssemblerARMCompat::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerARMCompat::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ ScratchRegisterScope scratch(*this);
+ if (imm32.value) {
+ ma_sub(imm32, sp, scratch);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() { Assembler::flush(); }
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ mozilla::DebugOnly<size_t> framePushedInitial = framePushed();
+
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ if (set.gprs().size() > 1) {
+ adjustFrame(diffG);
+ startDataTransferM(IsStore, StackPointer, DB, WriteBack);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ transferReg(*iter);
+ }
+ finishDataTransfer();
+ } else {
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ // It's possible that the logic is just fine as it is if the reduced set
+ // maps SIMD pairs to plain doubles and transferMultipleByRuns() stores
+ // and loads doubles.
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ adjustFrame(diffF);
+ diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB);
+ MOZ_ASSERT(diffF == 0);
+
+ MOZ_ASSERT(framePushed() - framePushedInitial ==
+ PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register scratch) {
+ mozilla::DebugOnly<size_t> offsetInitial = dest.offset;
+
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffF + diffG);
+
+ if (set.gprs().size() > 1) {
+ computeEffectiveAddress(dest, scratch);
+
+ startDataTransferM(IsStore, scratch, DB, WriteBack);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ transferReg(*iter);
+ }
+ finishDataTransfer();
+ } else {
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ }
+ MOZ_ASSERT(diffG == 0);
+ (void)diffG;
+
+ // See above.
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ MOZ_ASSERT(diffF >= 0);
+ if (diffF > 0) {
+ computeEffectiveAddress(dest, scratch);
+ diffF += transferMultipleByRuns(set.fpus(), IsStore, scratch, DB);
+ }
+
+ MOZ_ASSERT(diffF == 0);
+
+ // "The amount of space actually used does not exceed what
+ // `PushRegsInMaskSizeInBytes` claims will be used."
+ MOZ_ASSERT(offsetInitial - dest.offset <= PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ mozilla::DebugOnly<size_t> framePushedInitial = framePushed();
+
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ // See above.
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ // ARM can load multiple registers at once, but only if we want back all
+ // the registers we previously saved to the stack.
+ if (ignore.emptyFloat()) {
+ diffF -= transferMultipleByRuns(set.fpus(), IsLoad, StackPointer, IA);
+ adjustFrame(-reservedF);
+ } else {
+ LiveFloatRegisterSet fpset(set.fpus().reduceSetForPush());
+ LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
+ for (FloatRegisterBackwardIterator iter(fpset); iter.more(); ++iter) {
+ diffF -= (*iter).size();
+ if (!fpignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diffF), *iter);
+ }
+ }
+ freeStack(reservedF);
+ }
+ MOZ_ASSERT(diffF == 0);
+
+ if (set.gprs().size() > 1 && ignore.emptyGeneral()) {
+ startDataTransferM(IsLoad, StackPointer, IA, WriteBack);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ transferReg(*iter);
+ }
+ finishDataTransfer();
+ adjustFrame(-reservedG);
+ } else {
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ }
+ freeStack(reservedG);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ MOZ_ASSERT(framePushedInitial - framePushed() ==
+ PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::Push(Register reg) {
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(FloatRegister reg) {
+ VFPRegister r = VFPRegister(reg);
+ ma_vpush(VFPRegister(reg));
+ adjustFrame(r.size());
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ Push(reg);
+}
+
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-sizeof(intptr_t));
+}
+
+void MacroAssembler::Pop(FloatRegister reg) {
+ ma_vpop(reg);
+ adjustFrame(-reg.size());
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-sizeof(Value));
+}
+
+void MacroAssembler::PopStackPtr() {
+ as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
+ adjustFrame(-sizeof(intptr_t));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ as_blx(reg);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ // For now, assume that it'll be nearby.
+ as_bl(label, Always);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::call(ImmWord imm) { call(ImmPtr((void*)imm.value)); }
+
+void MacroAssembler::call(ImmPtr imm) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, imm, RelocationKind::HARDCODED);
+ ma_call(imm);
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress imm) {
+ movePtr(imm, CallReg);
+ return call(CallReg);
+}
+
+void MacroAssembler::call(const Address& addr) {
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+
+void MacroAssembler::call(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ScratchRegisterScope scratch(*this);
+ ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
+ callJitNoProfiler(scratch);
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ // The caller ensures that the call is always in range using thunks (below)
+ // as necessary.
+ as_bl(BOffImm(), Always, /* documentation */ nullptr);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ BufferOffset inst(callerOffset - 4);
+ BOffImm off = BufferOffset(calleeOffset).diffB<BOffImm>(inst);
+ MOZ_RELEASE_ASSERT(!off.isInvalid(),
+ "Failed to insert necessary far jump islands");
+ as_bl(off, Always, inst);
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ static_assert(32 * 1024 * 1024 - JumpImmediateRange >
+ wasm::MaxFuncs * 3 * sizeof(Instruction),
+ "always enough space for thunks");
+
+ // The goal of the thunk is to be able to jump to any address without the
+ // usual 32MiB branch range limitation. Additionally, to make the thunk
+ // simple to use, the thunk does not use the constant pool or require
+ // patching an absolute address. Instead, a relative offset is used which
+ // can be patched during compilation.
+
+ // Inhibit pools since these three words must be contiguous so that the offset
+ // calculations below are valid.
+ AutoForbidPoolsAndNops afp(this, 3);
+
+ // When pc is used, the read value is the address of the instruction + 8.
+ // This is exactly the address of the uint32 word we want to load.
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(DTRAddr(pc, DtrOffImm(0)), scratch);
+
+ // Branch by making pc the destination register.
+ ma_add(pc, scratch, pc, LeaveCC, Always);
+
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ writeInst(UINT32_MAX);
+
+ return farJump;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+
+ uint32_t addOffset = farJump.offset() - 4;
+ MOZ_ASSERT(editSrc(BufferOffset(addOffset))->is<InstALU>());
+
+ // When pc is read as the operand of the add, its value is the address of
+ // the add instruction + 8.
+ *u32 = (targetOffset - addOffset) - 8;
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 1);
+ ma_nop();
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ uint8_t* inst = call - 4;
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(inst)->is<InstBLImm>() ||
+ reinterpret_cast<Instruction*>(inst)->is<InstNOP>());
+
+ new (inst) InstBLImm(BOffImm(target - inst), Assembler::Always);
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ uint8_t* inst = call - 4;
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(inst)->is<InstBLImm>() ||
+ reinterpret_cast<Instruction*>(inst)->is<InstNOP>());
+ new (inst) InstNOP();
+}
+
+void MacroAssembler::pushReturnAddress() { push(lr); }
+
+void MacroAssembler::popReturnAddress() { pop(lr); }
+
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ ma_mov(sp, scratch);
+ // Force sp to be aligned.
+ as_bic(sp, sp, Imm8(ABIStackAlignment - 1));
+ ma_push(scratch);
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+
+ // Save the lr register if we need to preserve it.
+ if (secondScratchReg_ != lr) {
+ ma_mov(lr, secondScratchReg_);
+ }
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ if (secondScratchReg_ != lr) {
+ ma_mov(secondScratchReg_, lr);
+ }
+
+ // Calls to native functions in wasm pass through a thunk which already
+ // fixes up the return value for us.
+ if (!callFromWasm && !UseHardFpABI()) {
+ switch (result) {
+ case MoveOp::DOUBLE:
+ // Move double from r0/r1 to ReturnFloatReg.
+ ma_vxfer(r0, r1, ReturnDoubleReg);
+ break;
+ case MoveOp::FLOAT32:
+ // Move float32 from r0 to ReturnFloatReg.
+ ma_vxfer(r0, ReturnFloat32Reg);
+ break;
+ case MoveOp::GENERAL:
+ break;
+ default:
+ MOZ_CRASH("unexpected callWithABI result");
+ }
+ }
+
+ freeStack(stackAdjust);
+
+ if (dynamicAlignment_) {
+ // While the x86 supports pop esp, on ARM that isn't well defined, so
+ // just do it manually.
+ as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in r12, as above.
+ ma_mov(fun, r12);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(r12);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in r12, no instruction between the ldr and call should
+ // clobber it. Note that we can't use fun.base because it may be one of the
+ // IntArg registers clobbered before the call.
+ {
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(fun, r12, scratch);
+ }
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(r12);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ // On ARM any references to the pc, adds an additional 8 to it, which
+ // correspond to 2 instructions of 4 bytes. Thus we use an additional nop
+ // to pad until we reach the pushed pc.
+ //
+ // Note: In practice this should not be necessary, as this fake return
+ // address is never used for resuming any execution. Thus theoriticaly we
+ // could just do a Push(pc), and ignore the nop as well as the pool.
+ enterNoPool(2);
+ DebugOnly<uint32_t> offsetBeforePush = currentOffset();
+ Push(pc); // actually pushes $pc + 8.
+ ma_nop();
+ uint32_t pseudoReturnOffset = currentOffset();
+ leaveNoPool();
+
+ MOZ_ASSERT_IF(!oom(), pseudoReturnOffset - offsetBeforePush == 8);
+ return pseudoReturnOffset;
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ if (reg.gpr() != dest.payloadReg()) {
+ mov(reg.gpr(), dest.payloadReg());
+ }
+ mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
+ return;
+ }
+
+ ScratchFloat32Scope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ ma_vxfer(freg, dest.payloadReg(), dest.typeReg());
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ Register s0 = src.typeReg();
+ Register s1 = src.payloadReg();
+ Register d0 = dest.typeReg();
+ Register d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(d1 != scratch);
+ MOZ_ASSERT(d0 != scratch);
+ ma_mov(d1, scratch);
+ ma_mov(d0, d1);
+ ma_mov(scratch, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ ma_mov(s0, d0);
+ }
+ if (s1 != d1) {
+ ma_mov(s1, d1);
+ }
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ ma_mov(Imm32(src.toNunboxTag()), dest.typeReg());
+ if (src.isGCThing()) {
+ ma_mov(ImmGCPtr(src.toGCThing()), dest.payloadReg());
+ } else {
+ ma_mov(Imm32(src.toNunboxPayload()), dest.payloadReg());
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_lsr(Imm32(gc::ChunkShift), ptr, buffer);
+ ma_lsl(Imm32(gc::ChunkShift), buffer, buffer);
+ load32(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ Maybe<SecondScratchRegisterScope> scratch2;
+ if (temp == Register::Invalid()) {
+ scratch2.emplace(*this);
+ temp = scratch2.ref();
+ }
+
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+
+ ma_lsr(Imm32(gc::ChunkShift), ptr, temp);
+ ma_lsl(Imm32(gc::ChunkShift), temp, temp);
+ loadPtr(Address(temp, gc::ChunkStoreBufferOffset), temp);
+ branchPtr(InvertCondition(cond), temp, ImmWord(0), label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, address,
+ cond == Assembler::Equal ? &done : label);
+
+ loadPtr(ToPayload(address), temp);
+ SecondScratchRegisterScope scratch2(*this);
+ branchPtrInNurseryChunk(cond, temp, scratch2, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ // If cond == NotEqual, branch when a.payload != b.payload || a.tag !=
+ // b.tag. If the payloads are equal, compare the tags. If the payloads are
+ // not equal, short circuit true (NotEqual).
+ //
+ // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag.
+ // If the payloads are equal, compare the tags. If the payloads are not
+ // equal, short circuit false (NotEqual).
+ ScratchRegisterScope scratch(*this);
+
+ if (rhs.isGCThing()) {
+ ma_cmp(lhs.payloadReg(), ImmGCPtr(rhs.toGCThing()), scratch);
+ } else {
+ ma_cmp(lhs.payloadReg(), Imm32(rhs.toNunboxPayload()), scratch);
+ }
+ ma_cmp(lhs.typeReg(), Imm32(rhs.toNunboxTag()), scratch, Equal);
+ ma_b(label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag.
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value.constant()) {
+ storePayload(value.value(), dest);
+ } else {
+ storePayload(value.reg().typedReg().gpr(), dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ return CodeOffset(as_illegal_trap().getOffset());
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ as_cmp(index, O2Reg(boundsCheckLimit));
+ as_b(ok, cond);
+ if (JitOptions.spectreIndexMasking) {
+ ma_mov(boundsCheckLimit, index, LeaveCC, cond);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ ScratchRegisterScope scratch(*this);
+ ma_ldr(DTRAddr(boundsCheckLimit.base, DtrOffImm(boundsCheckLimit.offset)),
+ scratch);
+ as_cmp(index, O2Reg(scratch));
+ as_b(ok, cond);
+ if (JitOptions.spectreIndexMasking) {
+ ma_mov(scratch, index, LeaveCC, cond);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit.low, ok);
+ bind(&notOk);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit, ok);
+ bind(&notOk);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ wasmTruncateToInt32(input, output, MIRType::Double, /* isUnsigned= */ true,
+ isSaturating, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ wasmTruncateToInt32(input, output, MIRType::Double, /* isUnsigned= */ false,
+ isSaturating, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ true,
+ isSaturating, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ false,
+ isSaturating, oolEntry);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToIntCheck(input, MIRType::Float32, MIRType::Int32,
+ flags, rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToIntCheck(input, MIRType::Double, MIRType::Int32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToIntCheck(input, MIRType::Float32, MIRType::Int64,
+ flags, rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToIntCheck(input, MIRType::Double, MIRType::Int64, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output,
+ Register64::Invalid());
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ MOZ_ASSERT_IF(access.isAtomic(), access.byteSize() <= 4);
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(), output);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, Register64::Invalid(), memoryBase, ptr,
+ ptrScratch);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ MOZ_ASSERT(!access.isAtomic());
+ wasmStoreImpl(access, AnyRegister(), value, memoryBase, ptr, ptrScratch);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+static Register ComputePointerForAtomic(MacroAssembler& masm,
+ const BaseIndex& src, Register r) {
+ Register base = src.base;
+ Register index = src.index;
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ int32_t offset = src.offset;
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.as_add(r, base, lsl(index, scale));
+ if (offset != 0) {
+ masm.ma_add(r, Imm32(offset), r, scratch);
+ }
+ return r;
+}
+
+static Register ComputePointerForAtomic(MacroAssembler& masm,
+ const Address& src, Register r) {
+ ScratchRegisterScope scratch(masm);
+ if (src.offset == 0) {
+ return src.base;
+ }
+ masm.ma_add(src.base, Imm32(src.offset), r, scratch);
+ return r;
+}
+
+// General algorithm:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* output, [ptr]
+// sxt* output, output, 0 ; sign-extend if applicable
+// *xt* tmp, oldval, 0 ; sign-extend or zero-extend if applicable
+// cmp output, tmp
+// bne L1 ; failed - values are different
+// strex* tmp, newval, [ptr]
+// cmp tmp, 1
+// beq L0 ; failed - location is dirty, retry
+// L1 dmb
+//
+// Discussion here: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html.
+// However note that that discussion uses 'isb' as the trailing fence.
+// I've not quite figured out why, and I've gone with dmb here which
+// is safe. Also see the LLVM source, which uses 'dmb ish' generally.
+// (Apple's Swift CPU apparently handles ish in a non-default, faster
+// way.)
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ MOZ_ASSERT(nbytes <= 4);
+
+ Label again;
+ Label done;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ ScratchRegisterScope scratch(masm);
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ BufferOffset firstAccess;
+ switch (nbytes) {
+ case 1:
+ firstAccess = masm.as_ldrexb(output, ptr);
+ if (signExtend) {
+ masm.as_sxtb(output, output, 0);
+ masm.as_sxtb(scratch, oldval, 0);
+ } else {
+ masm.as_uxtb(scratch, oldval, 0);
+ }
+ break;
+ case 2:
+ firstAccess = masm.as_ldrexh(output, ptr);
+ if (signExtend) {
+ masm.as_sxth(output, output, 0);
+ masm.as_sxth(scratch, oldval, 0);
+ } else {
+ masm.as_uxth(scratch, oldval, 0);
+ }
+ break;
+ case 4:
+ firstAccess = masm.as_ldrex(output, ptr);
+ break;
+ }
+ if (access) {
+ masm.append(*access, firstAccess.getOffset());
+ }
+
+ if (nbytes < 4) {
+ masm.as_cmp(output, O2Reg(scratch));
+ } else {
+ masm.as_cmp(output, O2Reg(oldval));
+ }
+ masm.as_b(&done, MacroAssembler::NotEqual);
+ switch (nbytes) {
+ case 1:
+ masm.as_strexb(scratch, newval, ptr);
+ break;
+ case 2:
+ masm.as_strexh(scratch, newval, ptr);
+ break;
+ case 4:
+ masm.as_strex(scratch, newval, ptr);
+ break;
+ }
+ masm.as_cmp(scratch, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+ masm.bind(&done);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& address, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& address, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, output);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ MOZ_ASSERT(nbytes <= 4);
+
+ // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
+ Label again;
+ Label done;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ ScratchRegisterScope scratch(masm);
+
+ // NOTE: the generated code must match the assembly code in gen_exchange in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ BufferOffset firstAccess;
+ switch (nbytes) {
+ case 1:
+ firstAccess = masm.as_ldrexb(output, ptr);
+ if (signExtend) {
+ masm.as_sxtb(output, output, 0);
+ }
+ masm.as_strexb(scratch, value, ptr);
+ break;
+ case 2:
+ firstAccess = masm.as_ldrexh(output, ptr);
+ if (signExtend) {
+ masm.as_sxth(output, output, 0);
+ }
+ masm.as_strexh(scratch, value, ptr);
+ break;
+ case 4:
+ firstAccess = masm.as_ldrex(output, ptr);
+ masm.as_strex(scratch, value, ptr);
+ break;
+ }
+ if (access) {
+ masm.append(*access, firstAccess.getOffset());
+ }
+
+ masm.as_cmp(scratch, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+ masm.bind(&done);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& address, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, sync, address, value, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& address, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, sync, address, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ output);
+}
+
+// General algorithm:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* output, [ptr]
+// sxt* output, output, 0 ; sign-extend if applicable
+// OP tmp, output, value ; compute value to store
+// strex* tmp2, tmp, [ptr] ; tmp2 required by strex
+// cmp tmp2, 1
+// beq L0 ; failed - location is dirty, retry
+// dmb ; ordering barrier required
+//
+// Also see notes above at compareExchange re the barrier strategy.
+//
+// Observe that the value being operated into the memory element need
+// not be sign-extended because no OP will make use of bits to the
+// left of the bits indicated by the width of the element, and neither
+// output nor the bits stored are affected by OP.
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const Register& value, const T& mem,
+ Register flagTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ MOZ_ASSERT(nbytes <= 4);
+ MOZ_ASSERT(flagTemp != InvalidReg);
+ MOZ_ASSERT(output != value);
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ // NOTE: the generated code must match the assembly code in gen_fetchop in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.bind(&again);
+
+ BufferOffset firstAccess;
+ switch (nbytes) {
+ case 1:
+ firstAccess = masm.as_ldrexb(output, ptr);
+ if (signExtend) {
+ masm.as_sxtb(output, output, 0);
+ }
+ break;
+ case 2:
+ firstAccess = masm.as_ldrexh(output, ptr);
+ if (signExtend) {
+ masm.as_sxth(output, output, 0);
+ }
+ break;
+ case 4:
+ firstAccess = masm.as_ldrex(output, ptr);
+ break;
+ }
+ if (access) {
+ masm.append(*access, firstAccess.getOffset());
+ }
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchOrOp:
+ masm.as_orr(scratch, output, O2Reg(value));
+ break;
+ case AtomicFetchXorOp:
+ masm.as_eor(scratch, output, O2Reg(value));
+ break;
+ }
+ // Rd must differ from the two other arguments to strex.
+ switch (nbytes) {
+ case 1:
+ masm.as_strexb(flagTemp, scratch, ptr);
+ break;
+ case 2:
+ masm.as_strexh(flagTemp, scratch, ptr);
+ break;
+ case 4:
+ masm.as_strex(flagTemp, scratch, ptr);
+ break;
+ }
+ masm.as_cmp(flagTemp, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, value, mem,
+ temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, value, mem,
+ temp, output);
+}
+
+// Uses both scratch registers, one for the address and one for a temp,
+// but needs two temps for strex:
+//
+// ... ptr, <addr> ; compute address of item
+// dmb
+// L0 ldrex* temp, [ptr]
+// OP temp, temp, value ; compute value to store
+// strex* temp2, temp, [ptr]
+// cmp temp2, 1
+// beq L0 ; failed - location is dirty, retry
+// dmb ; ordering barrier required
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const Register& value, const T& mem,
+ Register flagTemp) {
+ unsigned nbytes = Scalar::byteSize(type);
+
+ MOZ_ASSERT(nbytes <= 4);
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ masm.memoryBarrierBefore(sync);
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.bind(&again);
+
+ BufferOffset firstAccess;
+ switch (nbytes) {
+ case 1:
+ firstAccess = masm.as_ldrexb(scratch, ptr);
+ break;
+ case 2:
+ firstAccess = masm.as_ldrexh(scratch, ptr);
+ break;
+ case 4:
+ firstAccess = masm.as_ldrex(scratch, ptr);
+ break;
+ }
+ if (access) {
+ masm.append(*access, firstAccess.getOffset());
+ }
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchOrOp:
+ masm.as_orr(scratch, scratch, O2Reg(value));
+ break;
+ case AtomicFetchXorOp:
+ masm.as_eor(scratch, scratch, O2Reg(value));
+ break;
+ }
+ // Rd must differ from the two other arguments to strex.
+ switch (nbytes) {
+ case 1:
+ masm.as_strexb(flagTemp, scratch, ptr);
+ break;
+ case 2:
+ masm.as_strexh(flagTemp, scratch, ptr);
+ break;
+ case 4:
+ masm.as_strex(flagTemp, scratch, ptr);
+ break;
+ }
+ masm.as_cmp(flagTemp, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, value, mem,
+ temp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, value, mem,
+ temp);
+}
+
+template <typename T>
+static void AtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 output) {
+ MOZ_ASSERT((output.low.code() & 1) == 0);
+ MOZ_ASSERT(output.low.code() + 1 == output.high.code());
+
+ masm.memoryBarrierBefore(sync);
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ BufferOffset load = masm.as_ldrexd(output.low, output.high, ptr);
+ if (access) {
+ masm.append(*access, load.getOffset());
+ }
+ masm.as_clrex();
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void WasmAtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access, const T& mem,
+ Register64 temp, Register64 output) {
+ MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
+
+ AtomicLoad64(masm, &access, access.sync(), mem, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ WasmAtomicLoad64(*this, access, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ WasmAtomicLoad64(*this, access, mem, temp, output);
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != replace && replace != output && output != expect);
+
+ MOZ_ASSERT((replace.low.code() & 1) == 0);
+ MOZ_ASSERT(replace.low.code() + 1 == replace.high.code());
+
+ MOZ_ASSERT((output.low.code() & 1) == 0);
+ MOZ_ASSERT(output.low.code() + 1 == output.high.code());
+
+ Label again;
+ Label done;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+ BufferOffset load = masm.as_ldrexd(output.low, output.high, ptr);
+ if (access) {
+ masm.append(*access, load.getOffset());
+ }
+
+ masm.as_cmp(output.low, O2Reg(expect.low));
+ masm.as_cmp(output.high, O2Reg(expect.high), MacroAssembler::Equal);
+ masm.as_b(&done, MacroAssembler::NotEqual);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Rd (temp) must differ from the two other arguments to strex.
+ masm.as_strexd(scratch, replace.low, replace.high, ptr);
+ masm.as_cmp(scratch, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+ masm.bind(&done);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(output != value);
+
+ MOZ_ASSERT((value.low.code() & 1) == 0);
+ MOZ_ASSERT(value.low.code() + 1 == value.high.code());
+
+ MOZ_ASSERT((output.low.code() & 1) == 0);
+ MOZ_ASSERT(output.low.code() + 1 == output.high.code());
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+ BufferOffset load = masm.as_ldrexd(output.low, output.high, ptr);
+ if (access) {
+ masm.append(*access, load.getOffset());
+ }
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.as_strexd(scratch, value.low, value.high, ptr);
+ masm.as_cmp(scratch, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(temp.low != InvalidReg && temp.high != InvalidReg);
+ MOZ_ASSERT(output != value);
+ MOZ_ASSERT(temp != value);
+
+ MOZ_ASSERT((temp.low.code() & 1) == 0);
+ MOZ_ASSERT(temp.low.code() + 1 == temp.high.code());
+
+ // We could avoid this pair requirement but in that case we would end up
+ // with two moves in the loop to preserve the loaded value in output. The
+ // prize would be less register spilling around this op since the pair
+ // requirement will tend to force more spilling.
+
+ MOZ_ASSERT((output.low.code() & 1) == 0);
+ MOZ_ASSERT(output.low.code() + 1 == output.high.code());
+
+ Label again;
+
+ SecondScratchRegisterScope scratch2(masm);
+ Register ptr = ComputePointerForAtomic(masm, mem, scratch2);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+ BufferOffset load = masm.as_ldrexd(output.low, output.high, ptr);
+ if (access) {
+ masm.append(*access, load.getOffset());
+ }
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add(temp.low, output.low, O2Reg(value.low), SetCC);
+ masm.as_adc(temp.high, output.high, O2Reg(value.high));
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub(temp.low, output.low, O2Reg(value.low), SetCC);
+ masm.as_sbc(temp.high, output.high, O2Reg(value.high));
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.low, output.low, O2Reg(value.low));
+ masm.as_and(temp.high, output.high, O2Reg(value.high));
+ break;
+ case AtomicFetchOrOp:
+ masm.as_orr(temp.low, output.low, O2Reg(value.low));
+ masm.as_orr(temp.high, output.high, O2Reg(value.high));
+ break;
+ case AtomicFetchXorOp:
+ masm.as_eor(temp.low, output.low, O2Reg(value.low));
+ masm.as_eor(temp.high, output.high, O2Reg(value.high));
+ break;
+ }
+
+ ScratchRegisterScope scratch(masm);
+
+ // Rd (temp) must differ from the two other arguments to strex.
+ masm.as_strexd(scratch, temp.low, temp.high, ptr);
+ masm.as_cmp(scratch, Imm8(1));
+ masm.as_b(&again, MacroAssembler::Equal);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void WasmAtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value, const T& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(masm, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ WasmAtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ WasmAtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register temp1,
+ Register temp2, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+ masm.convertUInt32ToDouble(temp1, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+void MacroAssembler::atomicLoad64(const Synchronization& sync,
+ const Address& mem, Register64 output) {
+ AtomicLoad64(*this, nullptr, sync, mem, output);
+}
+
+void MacroAssembler::atomicLoad64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 output) {
+ AtomicLoad64(*this, nullptr, sync, mem, output);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 temp) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, temp);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 temp) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, temp);
+}
+
+// ========================================================================
+// Convert floating point.
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchDoubleScope scratchDouble(*this);
+
+ convertUInt32ToDouble(src.high, dest);
+ {
+ ScratchRegisterScope scratch(*this);
+ movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), scratch);
+ ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
+ }
+ mulDouble(scratchDouble, dest);
+ convertUInt32ToDouble(src.low, scratchDouble);
+ addDouble(scratchDouble, dest);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ ScratchDoubleScope scratchDouble(*this);
+
+ convertInt32ToDouble(src.high, dest);
+ {
+ ScratchRegisterScope scratch(*this);
+ movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), scratch);
+ ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
+ }
+ mulDouble(scratchDouble, dest);
+ convertUInt32ToDouble(src.low, scratchDouble);
+ addDouble(scratchDouble, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt32ToDouble(src, dest);
+}
+
+extern "C" {
+extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
+extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
+}
+
+inline void EmitRemainderOrQuotient(bool isRemainder, MacroAssembler& masm,
+ Register rhs, Register lhsOutput,
+ bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ // Currently this helper can't handle this situation.
+ MOZ_ASSERT(lhsOutput != rhs);
+
+ if (HasIDIV()) {
+ if (isRemainder) {
+ masm.remainder32(rhs, lhsOutput, isUnsigned);
+ } else {
+ masm.quotient32(rhs, lhsOutput, isUnsigned);
+ }
+ } else {
+ // Ensure that the output registers are saved and restored properly,
+ MOZ_ASSERT(volatileLiveRegs.has(ReturnRegVal0));
+ MOZ_ASSERT(volatileLiveRegs.has(ReturnRegVal1));
+
+ masm.PushRegsInMask(volatileLiveRegs);
+ using Fn = int64_t (*)(int, int);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.setupUnalignedABICall(scratch);
+ }
+ masm.passABIArg(lhsOutput);
+ masm.passABIArg(rhs);
+ if (isUnsigned) {
+ masm.callWithABI<Fn, __aeabi_uidivmod>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+ } else {
+ masm.callWithABI<Fn, __aeabi_idivmod>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ if (isRemainder) {
+ masm.mov(ReturnRegVal1, lhsOutput);
+ } else {
+ masm.mov(ReturnRegVal0, lhsOutput);
+ }
+
+ LiveRegisterSet ignore;
+ ignore.add(lhsOutput);
+ masm.PopRegsInMaskIgnore(volatileLiveRegs, ignore);
+ }
+}
+
+void MacroAssembler::flexibleQuotient32(
+ Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ EmitRemainderOrQuotient(false, *this, rhs, srcDest, isUnsigned,
+ volatileLiveRegs);
+}
+
+void MacroAssembler::flexibleRemainder32(
+ Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ EmitRemainderOrQuotient(true, *this, rhs, srcDest, isUnsigned,
+ volatileLiveRegs);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register lhsOutput,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ // Currently this helper can't handle this situation.
+ MOZ_ASSERT(lhsOutput != rhs);
+
+ if (HasIDIV()) {
+ mov(lhsOutput, remOutput);
+ remainder32(rhs, remOutput, isUnsigned);
+ quotient32(rhs, lhsOutput, isUnsigned);
+ } else {
+ // Ensure that the output registers are saved and restored properly,
+ MOZ_ASSERT(volatileLiveRegs.has(ReturnRegVal0));
+ MOZ_ASSERT(volatileLiveRegs.has(ReturnRegVal1));
+ PushRegsInMask(volatileLiveRegs);
+
+ using Fn = int64_t (*)(int, int);
+ {
+ ScratchRegisterScope scratch(*this);
+ setupUnalignedABICall(scratch);
+ }
+ passABIArg(lhsOutput);
+ passABIArg(rhs);
+ if (isUnsigned) {
+ callWithABI<Fn, __aeabi_uidivmod>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ } else {
+ callWithABI<Fn, __aeabi_idivmod>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ moveRegPair(ReturnRegVal0, ReturnRegVal1, lhsOutput, remOutput);
+
+ LiveRegisterSet ignore;
+ ignore.add(remOutput);
+ ignore.add(lhsOutput);
+ PopRegsInMaskIgnore(volatileLiveRegs, ignore);
+ }
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() {
+ // Spectre mitigation recommended by ARM for cases where csel/cmov cannot be
+ // used.
+ as_csdb();
+}
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ floorf(src, dest, fail);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ floor(src, dest, fail);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ceilf(src, dest, fail);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ceil(src, dest, fail);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ roundf(src, dest, fail, temp);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ round(src, dest, fail, temp);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ truncf(src, dest, fail);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ trunc(src, dest, fail);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+//}}} check_macroassembler_style
+
+void MacroAssemblerARM::wasmTruncateToInt32(FloatRegister input,
+ Register output, MIRType fromType,
+ bool isUnsigned, bool isSaturating,
+ Label* oolEntry) {
+ ScratchDoubleScope scratchScope(asMasm());
+ ScratchRegisterScope scratchReg(asMasm());
+ FloatRegister scratch = scratchScope.uintOverlay();
+
+ // ARM conversion instructions clamp the value to ensure it fits within the
+ // target's type bounds, so every time we see those, we need to check the
+ // input. A NaN check is not necessary because NaN is converted to zero and
+ // on a zero result we branch out of line to do further processing anyway.
+ if (isUnsigned) {
+ if (fromType == MIRType::Double) {
+ ma_vcvt_F64_U32(input, scratch);
+ } else if (fromType == MIRType::Float32) {
+ ma_vcvt_F32_U32(input, scratch);
+ } else {
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+ }
+
+ ma_vxfer(scratch, output);
+
+ if (!isSaturating) {
+ // int32_t(UINT32_MAX) == -1.
+ ma_cmp(output, Imm32(-1), scratchReg);
+ as_cmp(output, Imm8(0), Assembler::NotEqual);
+ ma_b(oolEntry, Assembler::Equal);
+ }
+
+ return;
+ }
+
+ // vcvt* converts NaN into 0, so check for NaNs here.
+ if (!isSaturating) {
+ if (fromType == MIRType::Double) {
+ asMasm().compareDouble(input, input);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().compareFloat(input, input);
+ } else {
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+ }
+
+ ma_b(oolEntry, Assembler::VFP_Unordered);
+ }
+
+ scratch = scratchScope.sintOverlay();
+
+ if (fromType == MIRType::Double) {
+ ma_vcvt_F64_I32(input, scratch);
+ } else if (fromType == MIRType::Float32) {
+ ma_vcvt_F32_I32(input, scratch);
+ } else {
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+ }
+
+ ma_vxfer(scratch, output);
+
+ if (!isSaturating) {
+ ma_cmp(output, Imm32(INT32_MAX), scratchReg);
+ ma_cmp(output, Imm32(INT32_MIN), scratchReg, Assembler::NotEqual);
+ ma_b(oolEntry, Assembler::Equal);
+ }
+}
+
+void MacroAssemblerARM::outOfLineWasmTruncateToIntCheck(
+ FloatRegister input, MIRType fromType, MIRType toType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ // On ARM, saturating truncation codegen handles saturating itself rather
+ // than relying on out-of-line fixup code.
+ if (flags & TRUNC_SATURATING) {
+ return;
+ }
+
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ ScratchDoubleScope scratchScope(asMasm());
+ FloatRegister scratch;
+
+ // Eagerly take care of NaNs.
+ Label inputIsNaN;
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ } else {
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+ }
+
+ // Handle special values.
+ Label fail;
+
+ // By default test for the following inputs and bail:
+ // signed: ] -Inf, INTXX_MIN - 1.0 ] and [ INTXX_MAX + 1.0 : +Inf [
+ // unsigned: ] -Inf, -1.0 ] and [ UINTXX_MAX + 1.0 : +Inf [
+ // Note: we cannot always represent those exact values. As a result
+ // this changes the actual comparison a bit.
+ double minValue, maxValue;
+ Assembler::DoubleCondition minCond = Assembler::DoubleLessThanOrEqual;
+ Assembler::DoubleCondition maxCond = Assembler::DoubleGreaterThanOrEqual;
+ if (toType == MIRType::Int64) {
+ if (isUnsigned) {
+ minValue = -1;
+ maxValue = double(UINT64_MAX) + 1.0;
+ } else {
+ // In the float32/double range there exists no value between
+ // INT64_MIN and INT64_MIN - 1.0. Making INT64_MIN the lower-bound.
+ minValue = double(INT64_MIN);
+ minCond = Assembler::DoubleLessThan;
+ maxValue = double(INT64_MAX) + 1.0;
+ }
+ } else {
+ if (isUnsigned) {
+ minValue = -1;
+ maxValue = double(UINT32_MAX) + 1.0;
+ } else {
+ if (fromType == MIRType::Float32) {
+ // In the float32 range there exists no value between
+ // INT32_MIN and INT32_MIN - 1.0. Making INT32_MIN the lower-bound.
+ minValue = double(INT32_MIN);
+ minCond = Assembler::DoubleLessThan;
+ } else {
+ minValue = double(INT32_MIN) - 1.0;
+ }
+ maxValue = double(INT32_MAX) + 1.0;
+ }
+ }
+
+ if (fromType == MIRType::Double) {
+ scratch = scratchScope.doubleOverlay();
+ asMasm().loadConstantDouble(minValue, scratch);
+ asMasm().branchDouble(minCond, input, scratch, &fail);
+
+ asMasm().loadConstantDouble(maxValue, scratch);
+ asMasm().branchDouble(maxCond, input, scratch, &fail);
+ } else {
+ MOZ_ASSERT(fromType == MIRType::Float32);
+ scratch = scratchScope.singleOverlay();
+ asMasm().loadConstantFloat32(float(minValue), scratch);
+ asMasm().branchFloat(minCond, input, scratch, &fail);
+
+ asMasm().loadConstantFloat32(float(maxValue), scratch);
+ asMasm().branchFloat(maxCond, input, scratch, &fail);
+ }
+
+ // We had an actual correct value, get back to where we were.
+ ma_b(rejoin);
+
+ // Handle errors.
+ bind(&fail);
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+
+ bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerARM::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output,
+ Register64 out64) {
+ MOZ_ASSERT(ptr == ptrScratch);
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+
+ Scalar::Type type = access.type();
+
+ // Maybe add the offset.
+ if (offset || type == Scalar::Int64) {
+ ScratchRegisterScope scratch(asMasm());
+ if (offset) {
+ ma_add(Imm32(offset), ptr, scratch);
+ }
+ }
+
+ bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 ||
+ type == Scalar::Int32 || type == Scalar::Int64;
+ unsigned byteSize = access.byteSize();
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ asMasm().memoryBarrierBefore(access.sync());
+
+ BufferOffset load;
+ if (out64 != Register64::Invalid()) {
+ if (type == Scalar::Int64) {
+ static_assert(INT64LOW_OFFSET == 0);
+
+ load = ma_dataTransferN(IsLoad, 32, /* signed = */ false, memoryBase, ptr,
+ out64.low);
+ append(access, load.getOffset());
+
+ as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+ load =
+ ma_dataTransferN(IsLoad, 32, isSigned, memoryBase, ptr, out64.high);
+ append(access, load.getOffset());
+ } else {
+ load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, memoryBase, ptr,
+ out64.low);
+ append(access, load.getOffset());
+
+ if (isSigned) {
+ ma_asr(Imm32(31), out64.low, out64.high);
+ } else {
+ ma_mov(Imm32(0), out64.high);
+ }
+ }
+ } else {
+ bool isFloat = output.isFloat();
+ if (isFloat) {
+ MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
+ ScratchRegisterScope scratch(asMasm());
+ FloatRegister dest = output.fpu();
+ ma_add(memoryBase, ptr, scratch);
+
+ // FP loads can't use VLDR as that has stringent alignment checks and will
+ // SIGBUS on unaligned accesses. Choose a different strategy depending on
+ // the available hardware. We don't gate Wasm on the presence of NEON.
+ if (HasNEON()) {
+ // NEON available: The VLD1 multiple-single-elements variant will only
+ // trap if SCTRL.A==1, but we already assume (for integer accesses) that
+ // the hardware/OS handles that transparently.
+ //
+ // An additional complication is that if we're targeting the high single
+ // then an unaligned load is not possible, and we may need to go via the
+ // FPR scratch.
+ if (byteSize == 4 && dest.code() & 1) {
+ ScratchFloat32Scope fscratch(asMasm());
+ load = as_vldr_unaligned(fscratch, scratch);
+ as_vmov(dest, fscratch);
+ } else {
+ load = as_vldr_unaligned(dest, scratch);
+ }
+ } else {
+ // NEON not available: Load to GPR scratch, move to FPR destination. We
+ // don't have adjacent scratches for the f64, so use individual LDRs,
+ // not LDRD.
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (byteSize == 4) {
+ load = as_dtr(IsLoad, 32, Offset, scratch2,
+ DTRAddr(scratch, DtrOffImm(0)), Always);
+ as_vxfer(scratch2, InvalidReg, VFPRegister(dest), CoreToFloat,
+ Always);
+ } else {
+ // The trap information is associated with the load of the high word,
+ // which must be done first.
+ load = as_dtr(IsLoad, 32, Offset, scratch2,
+ DTRAddr(scratch, DtrOffImm(4)), Always);
+ as_dtr(IsLoad, 32, Offset, scratch, DTRAddr(scratch, DtrOffImm(0)),
+ Always);
+ as_vxfer(scratch, scratch2, VFPRegister(dest), CoreToFloat, Always);
+ }
+ }
+ append(access, load.getOffset());
+ } else {
+ load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, memoryBase, ptr,
+ output.gpr());
+ append(access, load.getOffset());
+ }
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerARM::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register64 val64,
+ Register memoryBase, Register ptr,
+ Register ptrScratch) {
+ static_assert(INT64LOW_OFFSET == 0);
+ static_assert(INT64HIGH_OFFSET == 4);
+
+ MOZ_ASSERT(ptr == ptrScratch);
+
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+
+ unsigned byteSize = access.byteSize();
+ Scalar::Type type = access.type();
+
+ // Maybe add the offset.
+ if (offset || type == Scalar::Int64) {
+ ScratchRegisterScope scratch(asMasm());
+ // We need to store the high word of an Int64 first, so always adjust the
+ // pointer to point to the high word in this case. The adjustment is always
+ // OK because wasmMaxOffsetGuardLimit is computed so that we can add up to
+ // sizeof(LargestValue)-1 without skipping past the guard page, and we
+ // assert above that offset < wasmMaxOffsetGuardLimit.
+ if (type == Scalar::Int64) {
+ offset += INT64HIGH_OFFSET;
+ }
+ if (offset) {
+ ma_add(Imm32(offset), ptr, scratch);
+ }
+ }
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ asMasm().memoryBarrierBefore(access.sync());
+
+ BufferOffset store;
+ if (type == Scalar::Int64) {
+ store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false,
+ memoryBase, ptr, val64.high);
+ append(access, store.getOffset());
+
+ as_sub(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+ store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true,
+ memoryBase, ptr, val64.low);
+ append(access, store.getOffset());
+ } else {
+ if (value.isFloat()) {
+ ScratchRegisterScope scratch(asMasm());
+ FloatRegister val = value.fpu();
+ MOZ_ASSERT((byteSize == 4) == val.isSingle());
+ ma_add(memoryBase, ptr, scratch);
+
+ // See comments above at wasmLoadImpl for more about this logic.
+ if (HasNEON()) {
+ if (byteSize == 4 && (val.code() & 1)) {
+ ScratchFloat32Scope fscratch(asMasm());
+ as_vmov(fscratch, val);
+ store = as_vstr_unaligned(fscratch, scratch);
+ } else {
+ store = as_vstr_unaligned(val, scratch);
+ }
+ } else {
+ // NEON not available: Move FPR to GPR scratch, store GPR. We have only
+ // one scratch to hold the value, so for f64 we must do two separate
+ // moves. That's OK - this is really a corner case. If we really cared
+ // we would pass in a temp to avoid the second move.
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (byteSize == 4) {
+ as_vxfer(scratch2, InvalidReg, VFPRegister(val), FloatToCore, Always);
+ store = as_dtr(IsStore, 32, Offset, scratch2,
+ DTRAddr(scratch, DtrOffImm(0)), Always);
+ } else {
+ // The trap information is associated with the store of the high word,
+ // which must be done first.
+ as_vxfer(scratch2, InvalidReg, VFPRegister(val).singleOverlay(1),
+ FloatToCore, Always);
+ store = as_dtr(IsStore, 32, Offset, scratch2,
+ DTRAddr(scratch, DtrOffImm(4)), Always);
+ as_vxfer(scratch2, InvalidReg, VFPRegister(val).singleOverlay(0),
+ FloatToCore, Always);
+ as_dtr(IsStore, 32, Offset, scratch2, DTRAddr(scratch, DtrOffImm(0)),
+ Always);
+ }
+ }
+ append(access, store.getOffset());
+ } else {
+ bool isSigned = type == Scalar::Uint32 ||
+ type == Scalar::Int32; // see AsmJSStoreHeap;
+ Register val = value.gpr();
+
+ store = ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned,
+ memoryBase, ptr, val);
+ append(access, store.getOffset());
+ }
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
diff --git a/js/src/jit/arm/MacroAssembler-arm.h b/js/src/jit/arm/MacroAssembler-arm.h
new file mode 100644
index 0000000000..958cdf4718
--- /dev/null
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -0,0 +1,1392 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MacroAssembler_arm_h
+#define jit_arm_MacroAssembler_arm_h
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/MoveResolver.h"
+#include "vm/BytecodeUtil.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenTypes.h"
+
+namespace js {
+namespace jit {
+
+static Register CallReg = ip;
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value));
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ const ValueOperand& v_;
+
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
+ operator Register() { return v_.typeReg(); }
+ void release() {}
+ void reacquire() {}
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+// MacroAssemblerARM is inheriting form Assembler defined in
+// Assembler-arm.{h,cpp}
+class MacroAssemblerARM : public Assembler {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ // On ARM, some instructions require a second scratch register. This
+ // register defaults to lr, since it's non-allocatable (as it can be
+ // clobbered by some instructions). Allow the baseline compiler to override
+ // this though, since baseline IC stubs rely on lr holding the return
+ // address.
+ Register secondScratchReg_;
+
+ public:
+ Register getSecondScratchReg() const { return secondScratchReg_; }
+
+ public:
+ // Higher level tag testing code.
+ // TODO: Can probably remove the Operand versions.
+ Operand ToPayload(Operand base) const {
+ return Operand(Register::FromCode(base.base()), base.disp());
+ }
+ Address ToPayload(const Address& base) const { return base; }
+ BaseIndex ToPayload(const BaseIndex& base) const { return base; }
+
+ protected:
+ Operand ToType(Operand base) const {
+ return Operand(Register::FromCode(base.base()),
+ base.disp() + sizeof(void*));
+ }
+ Address ToType(const Address& base) const {
+ return ToType(Operand(base)).toAddress();
+ }
+ BaseIndex ToType(const BaseIndex& base) const {
+ return BaseIndex(base.base, base.index, base.scale,
+ base.offset + sizeof(void*));
+ }
+
+ Address ToPayloadAfterStackPush(const Address& base) const {
+ // If we are based on StackPointer, pass over the type tag just pushed.
+ if (base.base == StackPointer) {
+ return Address(base.base, base.offset + sizeof(void*));
+ }
+ return ToPayload(base);
+ }
+
+ public:
+ MacroAssemblerARM() : secondScratchReg_(lr) {}
+
+ void setSecondScratchReg(Register reg) {
+ MOZ_ASSERT(reg != ScratchRegister);
+ secondScratchReg_ = reg;
+ }
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
+ Condition c = Always);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
+ }
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void wasmTruncateToInt32(FloatRegister input, Register output,
+ MIRType fromType, bool isUnsigned, bool isSaturating,
+ Label* oolEntry);
+ void outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType fromType,
+ MIRType toType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+
+ // Somewhat direct wrappers for the low-level assembler funcitons
+ // bitops. Attempt to encode a virtual alu instruction using two real
+ // instructions.
+ private:
+ bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, SBit s,
+ Condition c);
+
+ public:
+ void ma_alu(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, ALUOp op, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_nop();
+
+ BufferOffset ma_movPatchable(Imm32 imm, Register dest,
+ Assembler::Condition c);
+ BufferOffset ma_movPatchable(ImmPtr imm, Register dest,
+ Assembler::Condition c);
+
+ // To be used with Iter := InstructionIterator or BufferInstructionIterator.
+ template <class Iter>
+ static void ma_mov_patch(Imm32 imm, Register dest, Assembler::Condition c,
+ RelocStyle rs, Iter iter);
+
+ // ALU based ops
+ // mov
+ void ma_mov(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_mov(Imm32 imm, Register dest, Condition c = Always);
+ void ma_mov(ImmWord imm, Register dest, Condition c = Always);
+
+ void ma_mov(ImmGCPtr ptr, Register dest);
+
+ // Shifts (just a move with a shifting op2)
+ void ma_lsl(Imm32 shift, Register src, Register dst);
+ void ma_lsr(Imm32 shift, Register src, Register dst);
+ void ma_asr(Imm32 shift, Register src, Register dst);
+ void ma_ror(Imm32 shift, Register src, Register dst);
+ void ma_rol(Imm32 shift, Register src, Register dst);
+
+ void ma_lsl(Register shift, Register src, Register dst);
+ void ma_lsr(Register shift, Register src, Register dst);
+ void ma_asr(Register shift, Register src, Register dst);
+ void ma_ror(Register shift, Register src, Register dst);
+ void ma_rol(Register shift, Register src, Register dst,
+ AutoRegisterScope& scratch);
+
+ // Move not (dest <- ~src)
+ void ma_mvn(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Negate (dest <- -src) implemented as rsb dest, src, 0
+ void ma_neg(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_neg(Register64 src, Register64 dest);
+
+ // And
+ void ma_and(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_and(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_and(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_and(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
+ void ma_bic(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ // Exclusive or
+ void ma_eor(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_eor(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_eor(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_eor(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Or
+ void ma_orr(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_orr(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ void ma_orr(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+
+ void ma_orr(Imm32 imm, Register src1, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Arithmetic based ops.
+ // Add with carry:
+ void ma_adc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_adc(Register src, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_adc(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Add:
+ void ma_add(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_add(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_add(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Subtract with carry:
+ void ma_sbc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_sbc(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Subtract:
+ void ma_sub(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_sub(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_sub(Register src1, Imm32 op, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Reverse subtract:
+ void ma_rsb(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_rsb(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_rsb(Register src1, Imm32 op2, Register dest,
+ AutoRegisterScope& scratch, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Reverse subtract with carry:
+ void ma_rsc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
+ SBit s = LeaveCC, Condition c = Always);
+ void ma_rsc(Register src1, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+ void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
+ Condition c = Always);
+
+ // Compares/tests.
+ // Compare negative (sets condition codes as src1 + src2 would):
+ void ma_cmn(Register src1, Imm32 imm, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_cmn(Register src1, Register src2, Condition c = Always);
+ void ma_cmn(Register src1, Operand op, Condition c = Always);
+
+ // Compare (src - src2):
+ void ma_cmp(Register src1, Imm32 imm, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_cmp(Register src1, ImmTag tag, Condition c = Always);
+ void ma_cmp(Register src1, ImmWord ptr, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_cmp(Register src1, ImmGCPtr ptr, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_cmp(Register src1, Operand op, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2, Condition c = Always);
+ void ma_cmp(Register src1, Register src2, Condition c = Always);
+
+ // Test for equality, (src1 ^ src2):
+ void ma_teq(Register src1, Imm32 imm, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_teq(Register src1, Register src2, Condition c = Always);
+ void ma_teq(Register src1, Operand op, Condition c = Always);
+
+ // Test (src1 & src2):
+ void ma_tst(Register src1, Imm32 imm, AutoRegisterScope& scratch,
+ Condition c = Always);
+ void ma_tst(Register src1, Register src2, Condition c = Always);
+ void ma_tst(Register src1, Operand op, Condition c = Always);
+
+ // Multiplies. For now, there are only two that we care about.
+ void ma_mul(Register src1, Register src2, Register dest);
+ void ma_mul(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch);
+ Condition ma_check_mul(Register src1, Register src2, Register dest,
+ AutoRegisterScope& scratch, Condition cond);
+ Condition ma_check_mul(Register src1, Imm32 imm, Register dest,
+ AutoRegisterScope& scratch, Condition cond);
+
+ void ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow,
+ AutoRegisterScope& scratch);
+ void ma_umull(Register src1, Register src2, Register destHigh,
+ Register destLow);
+
+ // Fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence.
+ void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
+ AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
+ int32_t shift);
+
+ // Mod - depends on integer divide instructions being supported.
+ void ma_smod(Register num, Register div, Register dest,
+ AutoRegisterScope& scratch);
+ void ma_umod(Register num, Register div, Register dest,
+ AutoRegisterScope& scratch);
+
+ // Division - depends on integer divide instructions being supported.
+ void ma_sdiv(Register num, Register div, Register dest,
+ Condition cond = Always);
+ void ma_udiv(Register num, Register div, Register dest,
+ Condition cond = Always);
+ // Misc operations
+ void ma_clz(Register src, Register dest, Condition cond = Always);
+ void ma_ctz(Register src, Register dest, AutoRegisterScope& scratch);
+ // Memory:
+ // Shortcut for when we know we're transferring 32 bits of data.
+ void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
+ AutoRegisterScope& scratch, Index mode = Offset,
+ Condition cc = Always);
+ void ma_dtr(LoadStore ls, Register rt, const Address& addr,
+ AutoRegisterScope& scratch, Index mode, Condition cc);
+
+ void ma_str(Register rt, DTRAddr addr, Index mode = Offset,
+ Condition cc = Always);
+ void ma_str(Register rt, const Address& addr, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+
+ void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset,
+ Condition cc = Always);
+ void ma_ldr(const Address& addr, Register rt, AutoRegisterScope& scratch,
+ Index mode = Offset, Condition cc = Always);
+
+ void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset,
+ Condition cc = Always);
+ void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset,
+ Condition cc = Always);
+ void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset,
+ Condition cc = Always);
+ void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset,
+ Condition cc = Always);
+ void ma_ldrd(EDtrAddr addr, Register rt, mozilla::DebugOnly<Register> rt2,
+ Index mode = Offset, Condition cc = Always);
+ void ma_strb(Register rt, DTRAddr addr, Index mode = Offset,
+ Condition cc = Always);
+ void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset,
+ Condition cc = Always);
+ void ma_strd(Register rt, mozilla::DebugOnly<Register> rt2, EDtrAddr addr,
+ Index mode = Offset, Condition cc = Always);
+
+ // Specialty for moving N bits of data, where n == 8,16,32,64.
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt,
+ AutoRegisterScope& scratch, Index mode = Offset,
+ Condition cc = Always, Scale scale = TimesOne);
+
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Register rm, Register rt,
+ Index mode = Offset, Condition cc = Always);
+
+ BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
+ Register rn, Imm32 offset, Register rt,
+ AutoRegisterScope& scratch, Index mode = Offset,
+ Condition cc = Always);
+
+ void ma_pop(Register r);
+ void ma_popn_pc(Imm32 n, AutoRegisterScope& scratch,
+ AutoRegisterScope& scratch2);
+ void ma_push(Register r);
+ void ma_push_sp(Register r, AutoRegisterScope& scratch);
+
+ void ma_vpop(VFPRegister r);
+ void ma_vpush(VFPRegister r);
+
+ // Barriers.
+ void ma_dmb(BarrierOption option = BarrierSY);
+ void ma_dsb(BarrierOption option = BarrierSY);
+
+ // Branches when done from within arm-specific code.
+ BufferOffset ma_b(Label* dest, Condition c = Always);
+ void ma_b(void* target, Condition c = Always);
+ void ma_bx(Register dest, Condition c = Always);
+
+ // This is almost NEVER necessary, we'll basically never be calling a label
+ // except, possibly in the crazy bailout-table case.
+ void ma_bl(Label* dest, Condition c = Always);
+
+ void ma_blx(Register dest, Condition c = Always);
+
+ // VFP/ALU:
+ void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vmov_f32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+ void ma_vabs(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vabs_f32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ void ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc = Always);
+ void ma_vsqrt_f32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ void ma_vimm(double value, FloatRegister dest, Condition cc = Always);
+ void ma_vimm_f32(float value, FloatRegister dest, Condition cc = Always);
+
+ void ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc = Always);
+ void ma_vcmp_f32(FloatRegister src1, FloatRegister src2,
+ Condition cc = Always);
+ void ma_vcmpz(FloatRegister src1, Condition cc = Always);
+ void ma_vcmpz_f32(FloatRegister src1, Condition cc = Always);
+
+ void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+ void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+ void ma_vneg_f32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ // Source is F64, dest is I32:
+ void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+ void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ // Source is I32, dest is F64:
+ void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+ void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ // Source is F32, dest is I32:
+ void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+ void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ // Source is I32, dest is F32:
+ void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+ void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always);
+
+ // Transfer (do not coerce) a float into a gpr.
+ void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
+ // Transfer (do not coerce) a double into a couple of gpr.
+ void ma_vxfer(VFPRegister src, Register dest1, Register dest2,
+ Condition cc = Always);
+
+ // Transfer (do not coerce) a gpr into a float
+ void ma_vxfer(Register src, FloatRegister dest, Condition cc = Always);
+ // Transfer (do not coerce) a couple of gpr into a double
+ void ma_vxfer(Register src1, Register src2, FloatRegister dest,
+ Condition cc = Always);
+
+ BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest,
+ AutoRegisterScope& scratch, Condition cc = Always);
+
+ BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
+ BufferOffset ma_vldr(const Address& addr, VFPRegister dest,
+ AutoRegisterScope& scratch, Condition cc = Always);
+ BufferOffset ma_vldr(VFPRegister src, Register base, Register index,
+ AutoRegisterScope& scratch, int32_t shift = defaultShift,
+ Condition cc = Always);
+
+ BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, const Address& addr,
+ AutoRegisterScope& scratch, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, Register base, Register index,
+ AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
+ int32_t shift, int32_t offset, Condition cc = Always);
+ BufferOffset ma_vstr(VFPRegister src, Register base, Register index,
+ AutoRegisterScope& scratch, int32_t shift,
+ Condition cc = Always);
+
+ void ma_call(ImmPtr dest);
+
+ // Float registers can only be loaded/stored in continuous runs when using
+ // vstm/vldm. This function breaks set into continuous runs and loads/stores
+ // them at [rm]. rm will be modified and left in a state logically suitable
+ // for the next load/store. Returns the offset from [dm] for the logical
+ // next load/store.
+ int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
+ Register rm, DTMMode mode) {
+ if (mode == IA) {
+ return transferMultipleByRunsImpl<FloatRegisterForwardIterator>(
+ set, ls, rm, mode, 1);
+ }
+ if (mode == DB) {
+ return transferMultipleByRunsImpl<FloatRegisterBackwardIterator>(
+ set, ls, rm, mode, -1);
+ }
+ MOZ_CRASH("Invalid data transfer addressing mode");
+ }
+
+ // `outAny` is valid if and only if `out64` == Register64::Invalid().
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister outAny,
+ Register64 out64);
+
+ // `valAny` is valid if and only if `val64` == Register64::Invalid().
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valAny,
+ Register64 val64, Register memoryBase, Register ptr,
+ Register ptrScratch);
+
+ private:
+ // Implementation for transferMultipleByRuns so we can use different
+ // iterators for forward/backward traversals. The sign argument should be 1
+ // if we traverse forwards, -1 if we traverse backwards.
+ template <typename RegisterIterator>
+ int32_t transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
+ Register rm, DTMMode mode, int32_t sign) {
+ MOZ_ASSERT(sign == 1 || sign == -1);
+
+ int32_t delta = sign * sizeof(float);
+ int32_t offset = 0;
+ // Build up a new set, which is the sum of all of the single and double
+ // registers. This set can have up to 48 registers in it total
+ // s0-s31 and d16-d31
+ FloatRegisterSet mod = set.reduceSetForPush();
+
+ RegisterIterator iter(mod);
+ while (iter.more()) {
+ startFloatTransferM(ls, rm, mode, WriteBack);
+ int32_t reg = (*iter).code();
+ do {
+ offset += delta;
+ if ((*iter).isDouble()) {
+ offset += delta;
+ }
+ transferFloatReg(*iter);
+ } while ((++iter).more() && int32_t((*iter).code()) == (reg += sign));
+ finishFloatTransfer();
+ }
+ return offset;
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerARMCompat : public MacroAssemblerARM {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ public:
+ MacroAssemblerARMCompat() {}
+
+ public:
+ // Jumps + other functions that should be called from non-arm specific
+ // code. Basically, an x86 front end on top of the ARM code.
+ void j(Condition code, Label* dest) { as_b(dest, code); }
+ void j(Label* dest) { as_b(dest, Always); }
+
+ void mov(Register src, Register dest) { ma_mov(src, dest); }
+ void mov(ImmWord imm, Register dest) { ma_mov(Imm32(imm.value), dest); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ScratchRegisterScope scratch(asMasm());
+ ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
+ ma_bx(scratch);
+ }
+ void branch(const Register reg) { ma_bx(reg); }
+ void nop() { ma_nop(); }
+ void shortJumpSizedNop() { ma_nop(); }
+ void ret() { ma_pop(pc); }
+ void retn(Imm32 n) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_popn_pc(n, scratch, scratch2);
+ }
+ void push(Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mov(imm, scratch);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) { push(Imm32(imm.value)); }
+ void push(ImmGCPtr imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mov(imm, scratch);
+ ma_push(scratch);
+ }
+ void push(const Address& addr) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_push(scratch);
+ }
+ void push(Register reg) {
+ if (reg == sp) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_push_sp(reg, scratch);
+ } else {
+ ma_push(reg);
+ }
+ }
+ void push(FloatRegister reg) { ma_vpush(VFPRegister(reg)); }
+ void pushWithPadding(Register reg, const Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_dtr(IsStore, sp, totSpace, reg, scratch, PreIndex);
+ }
+ void pushWithPadding(Imm32 imm, const Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_mov(imm, scratch);
+ ma_dtr(IsStore, sp, totSpace, scratch, scratch2, PreIndex);
+ }
+
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_vpop(VFPRegister(reg)); }
+
+ void popN(Register reg, Imm32 extraSpace) {
+ ScratchRegisterScope scratch(asMasm());
+ Imm32 totSpace = Imm32(extraSpace.value + 4);
+ ma_dtr(IsLoad, sp, totSpace, reg, scratch, PostIndex);
+ }
+
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a BLX or NOP instruction. ToggleCall can be used to patch this
+ // instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ CodeOffset label = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_movPatchable(Imm32(imm.value), dest, Always);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void jump(Label* label) { as_b(label); }
+ void jump(JitCode* code) { branch(code); }
+ void jump(ImmPtr ptr) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ptr, scratch);
+ ma_bx(scratch);
+ }
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+ void jump(Register reg) { ma_bx(reg); }
+ void jump(const Address& addr) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_bx(scratch);
+ }
+
+ void negl(Register reg) { ma_neg(reg, reg, SetCC); }
+ void test32(Register lhs, Register rhs) { ma_tst(lhs, rhs); }
+ void test32(Register lhs, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_tst(lhs, imm, scratch);
+ }
+ void test32(const Address& addr, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ldr(addr, scratch, scratch2);
+ ma_tst(scratch, imm, scratch2);
+ }
+ void testPtr(Register lhs, Register rhs) { test32(lhs, rhs); }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+ }
+
+ // Higher level tag testing code.
+ Condition testInt32(Condition cond, const ValueOperand& value);
+ Condition testBoolean(Condition cond, const ValueOperand& value);
+ Condition testDouble(Condition cond, const ValueOperand& value);
+ Condition testNull(Condition cond, const ValueOperand& value);
+ Condition testUndefined(Condition cond, const ValueOperand& value);
+ Condition testString(Condition cond, const ValueOperand& value);
+ Condition testSymbol(Condition cond, const ValueOperand& value);
+ Condition testBigInt(Condition cond, const ValueOperand& value);
+ Condition testObject(Condition cond, const ValueOperand& value);
+ Condition testNumber(Condition cond, const ValueOperand& value);
+ Condition testMagic(Condition cond, const ValueOperand& value);
+
+ Condition testPrimitive(Condition cond, const ValueOperand& value);
+ Condition testGCThing(Condition cond, const ValueOperand& value);
+
+ // Register-based tests.
+ Condition testInt32(Condition cond, Register tag);
+ Condition testBoolean(Condition cond, Register tag);
+ Condition testNull(Condition cond, Register tag);
+ Condition testUndefined(Condition cond, Register tag);
+ Condition testString(Condition cond, Register tag);
+ Condition testSymbol(Condition cond, Register tag);
+ Condition testBigInt(Condition cond, Register tag);
+ Condition testObject(Condition cond, Register tag);
+ Condition testDouble(Condition cond, Register tag);
+ Condition testNumber(Condition cond, Register tag);
+ Condition testMagic(Condition cond, Register tag);
+ Condition testPrimitive(Condition cond, Register tag);
+ Condition testGCThing(Condition cond, Register tag);
+
+ Condition testGCThing(Condition cond, const Address& address);
+ Condition testMagic(Condition cond, const Address& address);
+ Condition testInt32(Condition cond, const Address& address);
+ Condition testDouble(Condition cond, const Address& address);
+ Condition testBoolean(Condition cond, const Address& address);
+ Condition testNull(Condition cond, const Address& address);
+ Condition testUndefined(Condition cond, const Address& address);
+ Condition testString(Condition cond, const Address& address);
+ Condition testSymbol(Condition cond, const Address& address);
+ Condition testBigInt(Condition cond, const Address& address);
+ Condition testObject(Condition cond, const Address& address);
+ Condition testNumber(Condition cond, const Address& address);
+
+ Condition testUndefined(Condition cond, const BaseIndex& src);
+ Condition testNull(Condition cond, const BaseIndex& src);
+ Condition testBoolean(Condition cond, const BaseIndex& src);
+ Condition testString(Condition cond, const BaseIndex& src);
+ Condition testSymbol(Condition cond, const BaseIndex& src);
+ Condition testBigInt(Condition cond, const BaseIndex& src);
+ Condition testInt32(Condition cond, const BaseIndex& src);
+ Condition testObject(Condition cond, const BaseIndex& src);
+ Condition testDouble(Condition cond, const BaseIndex& src);
+ Condition testMagic(Condition cond, const BaseIndex& src);
+ Condition testGCThing(Condition cond, const BaseIndex& src);
+
+ // Unboxing code.
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type);
+ void unboxNonDouble(const Address& src, Register dest, JSValueType type);
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType type);
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxString(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxSymbol(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxSymbol(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxBigInt(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObjectOrNull(const ValueOperand& src, Register dest) {
+ // Due to Spectre mitigation logic (see Value.h), if the value is an Object
+ // then this yields the object; otherwise it yields zero (null), as desired.
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObjectOrNull(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObjectOrNull(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ // See comment in MacroAssembler-x64.h.
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ load32(ToPayload(src), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ as_eor(val.payloadReg(), val.payloadReg(), Imm8(1));
+ }
+
+ template <typename T>
+ void fallibleUnboxPtrImpl(const T& src, Register dest, JSValueType type,
+ Label* fail);
+
+ // Boxing code.
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_OBJECT);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_SYMBOL);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index, FloatRegister dest,
+ int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ // Treat the value as a boolean, and set condition codes accordingly.
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand);
+ Condition testBooleanTruthy(bool truthy, const ValueOperand& operand);
+ Condition testDoubleTruthy(bool truthy, FloatRegister reg);
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+ Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_ldr(address, dest.gpr(), scratch);
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ } else {
+ load32(address, dest.gpr());
+ }
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeValue(ValueOperand val, const Address& dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
+ int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
+
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+
+ // Store the payload.
+ if (payloadoffset < 4096 && payloadoffset > -4096) {
+ ma_str(reg, DTRAddr(scratch, DtrOffImm(payloadoffset)));
+ } else {
+ ma_str(reg, Address(scratch, payloadoffset), scratch2);
+ }
+
+ // Store the type.
+ if (typeoffset < 4096 && typeoffset > -4096) {
+ // Encodable as DTRAddr, so only two instructions needed.
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
+ } else {
+ // Since there are only two scratch registers, the offset must be
+ // applied early using a third instruction to be safe.
+ ma_add(Imm32(typeoffset), scratch, scratch2);
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ }
+ }
+ void storeValue(JSValueType type, Register reg, Address dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_str(reg, dest, scratch2);
+ ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch);
+ ma_str(scratch, Address(dest.base, dest.offset + NUNBOX32_TYPE_OFFSET),
+ scratch2);
+ }
+ void storeValue(const Value& val, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ ma_mov(Imm32(val.toNunboxTag()), scratch);
+ ma_str(scratch, ToType(dest), scratch2);
+ if (val.isGCThing()) {
+ ma_mov(ImmGCPtr(val.toGCThing()), scratch);
+ } else {
+ ma_mov(Imm32(val.toNunboxPayload()), scratch);
+ }
+ ma_str(scratch, ToPayload(dest), scratch2);
+ }
+ void storeValue(const Value& val, BaseIndex dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
+ int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
+
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+
+ // Store the type.
+ if (typeoffset < 4096 && typeoffset > -4096) {
+ ma_mov(Imm32(val.toNunboxTag()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
+ } else {
+ ma_add(Imm32(typeoffset), scratch, scratch2);
+ ma_mov(Imm32(val.toNunboxTag()), scratch2);
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ // Restore scratch for the payload store.
+ ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
+ }
+
+ // Store the payload, marking if necessary.
+ if (payloadoffset < 4096 && payloadoffset > -4096) {
+ if (val.isGCThing()) {
+ ma_mov(ImmGCPtr(val.toGCThing()), scratch2);
+ } else {
+ ma_mov(Imm32(val.toNunboxPayload()), scratch2);
+ }
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(payloadoffset)));
+ } else {
+ ma_add(Imm32(payloadoffset), scratch, scratch2);
+ if (val.isGCThing()) {
+ ma_mov(ImmGCPtr(val.toGCThing()), scratch2);
+ } else {
+ ma_mov(Imm32(val.toNunboxPayload()), scratch2);
+ }
+ ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
+ }
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ store32(src, ToPayload(dest));
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ storePtr(imm, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+
+ // Like loadValue but guaranteed to not use LDRD or LDM instructions (these
+ // don't support unaligned accesses).
+ void loadUnalignedValue(const Address& src, ValueOperand dest);
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch);
+
+ void storePayload(const Value& val, const Address& dest);
+ void storePayload(Register src, const Address& dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, const Address& dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ void not32(Register reg);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ // load16SignExtend uses |ldrsh|, which supports unaligned access.
+ load16SignExtend(src, dest);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ // load16ZeroExtend uses |ldrh|, which supports unaligned access.
+ load16ZeroExtend(src, dest);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ // load32 uses |ldr|, which supports unaligned access.
+ load32(src, dest);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ bool highBeforeLow = address.base == dest.low;
+ if (highBeforeLow) {
+ load32(HighWord(address), dest.high);
+ load32(LowWord(address), dest.low);
+ } else {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ // If you run into this, relax your register allocation constraints.
+ MOZ_RELEASE_ASSERT(
+ !((address.base == dest.low || address.base == dest.high) &&
+ (address.index == dest.low || address.index == dest.high)));
+ bool highBeforeLow = address.base == dest.low || address.index == dest.low;
+ if (highBeforeLow) {
+ load32(HighWord(address), dest.high);
+ load32(LowWord(address), dest.low);
+ } else {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ // load64 calls load32, which supports unaligned accesses.
+ load64(src, dest);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename S, typename T>
+ void store16Unaligned(const S& src, const T& dest) {
+ // store16 uses |strh|, which supports unaligned access.
+ store16(src, dest);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename S, typename T>
+ void store32Unaligned(const S& src, const T& dest) {
+ // store32 uses |str|, which supports unaligned access.
+ store32(src, dest);
+ }
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, LowWord(address));
+ store32(src.high, HighWord(address));
+ }
+
+ void store64(Register64 src, const BaseIndex& address) {
+ store32(src.low, LowWord(address));
+ store32(src.high, HighWord(address));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), LowWord(address));
+ store32(imm.hi(), HighWord(address));
+ }
+
+ void store64(Imm64 imm, const BaseIndex& address) {
+ store32(imm.low(), LowWord(address));
+ store32(imm.hi(), HighWord(address));
+ }
+
+ template <typename S, typename T>
+ void store64Unaligned(const S& src, const T& dest) {
+ // store64 calls store32, which supports unaligned access.
+ store64(src, dest);
+ }
+
+ void storePtr(ImmWord imm, const Address& address);
+ void storePtr(ImmWord imm, const BaseIndex& address);
+ void storePtr(ImmPtr imm, const Address& address);
+ void storePtr(ImmPtr imm, const BaseIndex& address);
+ void storePtr(ImmGCPtr imm, const Address& address);
+ void storePtr(ImmGCPtr imm, const BaseIndex& address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest,
+ Condition cc = Always) {
+ ma_vmov(src, dest, cc);
+ }
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void cmp32(Register lhs, Imm32 rhs);
+ void cmp32(Register lhs, Register rhs);
+ void cmp32(const Address& lhs, Imm32 rhs);
+ void cmp32(const Address& lhs, Register rhs);
+
+ void cmpPtr(Register lhs, Register rhs);
+ void cmpPtr(Register lhs, ImmWord rhs);
+ void cmpPtr(Register lhs, ImmPtr rhs);
+ void cmpPtr(Register lhs, ImmGCPtr rhs);
+ void cmpPtr(Register lhs, Imm32 rhs);
+ void cmpPtr(const Address& lhs, Register rhs);
+ void cmpPtr(const Address& lhs, ImmWord rhs);
+ void cmpPtr(const Address& lhs, ImmPtr rhs);
+ void cmpPtr(const Address& lhs, ImmGCPtr rhs);
+ void cmpPtr(const Address& lhs, Imm32 rhs);
+
+ void setStackArg(Register reg, uint32_t arg);
+
+ void breakpoint();
+ // Conditional breakpoint.
+ void breakpoint(Condition cc);
+
+ // Trigger the simulator's interactive read-eval-print loop.
+ // The message will be printed at the stopping point.
+ // (On non-simulator builds, does nothing.)
+ void simulatorStop(const char* msg);
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Checks for NaN if canBeNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool canBeNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool canBeNaN,
+ bool isMax);
+
+ void compareDouble(FloatRegister lhs, FloatRegister rhs);
+
+ void compareFloat(FloatRegister lhs, FloatRegister rhs);
+
+ void checkStackAlignment();
+
+ // If source is a double, load it into dest. If source is int32, convert it
+ // to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void emitSet(Assembler::Condition cond, Register dest) {
+ ma_mov(Imm32(0), dest);
+ ma_mov(Imm32(1), dest, cond);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ public:
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_add(address.base, Imm32(address.offset), dest, scratch, LeaveCC);
+ }
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd,
+ LeaveCC);
+ if (address.offset) {
+ ma_add(dest, Imm32(address.offset), dest, scratch, LeaveCC);
+ }
+ }
+ void floor(FloatRegister input, Register output, Label* handleNotAnInt);
+ void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
+ void ceil(FloatRegister input, Register output, Label* handleNotAnInt);
+ void ceilf(FloatRegister input, Register output, Label* handleNotAnInt);
+ void round(FloatRegister input, Register output, Label* handleNotAnInt,
+ FloatRegister tmp);
+ void roundf(FloatRegister input, Register output, Label* handleNotAnInt,
+ FloatRegister tmp);
+ void trunc(FloatRegister input, Register output, Label* handleNotAnInt);
+ void truncf(FloatRegister input, Register output, Label* handleNotAnInt);
+
+ void lea(Operand addr, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_add(addr.baseReg(), Imm32(addr.disp()), dest, scratch);
+ }
+
+ void abiret() { as_bx(lr); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest,
+ Condition cc = Always) {
+ as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
+ cc);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MacroAssembler_arm_h */
diff --git a/js/src/jit/arm/MoveEmitter-arm.cpp b/js/src/jit/arm/MoveEmitter-arm.cpp
new file mode 100644
index 0000000000..1807c41b50
--- /dev/null
+++ b/js/src/jit/arm/MoveEmitter-arm.cpp
@@ -0,0 +1,413 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/MoveEmitter-arm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveEmitterARM::MoveEmitterARM(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {
+ pushedAtStart_ = masm.framePushed();
+}
+
+void MoveEmitterARM::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+MoveEmitterARM::~MoveEmitterARM() { assertDone(); }
+
+Address MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(offset < 4096 && offset > -4096);
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+Address MoveEmitterARM::spillSlot() const {
+ int32_t offset = masm.framePushed() - pushedAtSpill_;
+ MOZ_ASSERT(offset < 4096 && offset > -4096);
+ return Address(StackPointer, offset);
+}
+
+Address MoveEmitterARM::toAddress(const MoveOperand& operand) const {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+
+ if (operand.base() != StackPointer) {
+ return Address(operand.base(), operand.disp());
+ }
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ return Address(StackPointer,
+ operand.disp() + (masm.framePushed() - pushedAtStart_));
+}
+
+Register MoveEmitterARM::tempReg() {
+ if (spilledReg_ != InvalidReg) {
+ return spilledReg_;
+ }
+
+ // For now, just pick r12/ip as the eviction point. This is totally random,
+ // and if it ends up being bad, we can use actual heuristics later. r12 is
+ // actually a bad choice. It is the scratch register, which is frequently
+ // used for address computations, such as those found when we attempt to
+ // access values more than 4096 off of the stack pointer. Instead, use lr,
+ // the LinkRegister.
+ spilledReg_ = r14;
+ if (pushedAtSpill_ == -1) {
+ masm.Push(spilledReg_);
+ pushedAtSpill_ = masm.framePushed();
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_str(spilledReg_, spillSlot(), scratch);
+ }
+ return spilledReg_;
+}
+
+void MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+
+ ScratchRegisterScope scratch(masm);
+
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(toAddress(to), scratchFloat32, scratch);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 0), scratch);
+ masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 4), scratch);
+ } else if (to.isGeneralReg()) {
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.ma_str(to.reg(), cycleSlot(slotId, 0), scratch);
+ masm.ma_str(to.reg(), cycleSlot(slotId, 4), scratch);
+ } else {
+ FloatRegister src = to.floatReg();
+ // Just always store the largest possible size. Currently, this is
+ // a double. When SIMD is added, two doubles will need to be stored.
+ masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0), scratch);
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(to), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
+ } else if (to.isGeneralRegPair()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vxfer(to.evenReg(), to.oddReg(), scratchDouble);
+ masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
+ } else {
+ masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0),
+ scratch);
+ }
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ // an non-vfp value
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.ma_ldr(toAddress(to), temp, scratch);
+ masm.ma_str(temp, cycleSlot(0, 0), scratch);
+ } else {
+ if (to.reg() == spilledReg_) {
+ // If the destination was spilled, restore it first.
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ spilledReg_ = InvalidReg;
+ }
+ masm.ma_str(to.reg(), cycleSlot(0, 0), scratch);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterARM::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+
+ ScratchRegisterScope scratch(masm);
+
+ switch (type) {
+ case MoveOp::FLOAT32:
+ MOZ_ASSERT(!to.isGeneralRegPair());
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(cycleSlot(slotId, 0), scratchFloat32, scratch);
+ masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(type == MoveOp::FLOAT32);
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ } else {
+ uint32_t offset = 0;
+ if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1) {
+ offset = sizeof(float);
+ }
+ masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
+ }
+ break;
+ case MoveOp::DOUBLE:
+ MOZ_ASSERT(!to.isGeneralReg());
+ if (to.isMemory()) {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(cycleSlot(slotId, 0), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ } else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(type == MoveOp::DOUBLE);
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(from), scratchDouble, scratch);
+ masm.ma_vxfer(scratchDouble, to.evenReg(), to.oddReg());
+ } else {
+ uint32_t offset = 0;
+ if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1) {
+ offset = sizeof(float);
+ }
+ masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
+ }
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.ma_ldr(cycleSlot(slotId, 0), temp, scratch);
+ masm.ma_str(temp, toAddress(to), scratch);
+ } else {
+ if (to.reg() == spilledReg_) {
+ // Make sure we don't re-clobber the spilled register later.
+ spilledReg_ = InvalidReg;
+ }
+ masm.ma_ldr(cycleSlot(slotId, 0), to.reg(), scratch);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to) {
+ // Register pairs are used to store Double values during calls.
+ MOZ_ASSERT(!from.isGeneralRegPair());
+ MOZ_ASSERT(!to.isGeneralRegPair());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (to.isGeneralReg() && to.reg() == spilledReg_) {
+ // If the destination is the spilled register, make sure we
+ // don't re-clobber its value.
+ spilledReg_ = InvalidReg;
+ }
+
+ if (from.isGeneralReg()) {
+ if (from.reg() == spilledReg_) {
+ // If the source is a register that has been spilled, make sure
+ // to load the source back into that register.
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ spilledReg_ = InvalidReg;
+ }
+ if (to.isMemoryOrEffectiveAddress()) {
+ masm.ma_str(from.reg(), toAddress(to), scratch);
+ } else {
+ masm.ma_mov(from.reg(), to.reg());
+ }
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory()) {
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ } else {
+ masm.ma_add(from.base(), Imm32(from.disp()), to.reg(), scratch);
+ }
+ } else {
+ // Memory to memory gpr move.
+ Register reg = tempReg();
+
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory()) {
+ masm.ma_ldr(toAddress(from), reg, scratch);
+ } else {
+ masm.ma_add(from.base(), Imm32(from.disp()), reg, scratch);
+ }
+ MOZ_ASSERT(to.base() != reg);
+ masm.ma_str(reg, toAddress(to), scratch);
+ }
+}
+
+void MoveEmitterARM::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ // Register pairs are used to store Double values during calls.
+ MOZ_ASSERT(!from.isGeneralRegPair());
+ MOZ_ASSERT(!to.isGeneralRegPair());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.ma_vmov_f32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.ma_vxfer(from.floatReg(), to.reg());
+ } else {
+ masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to),
+ scratch);
+ }
+ } else if (from.isGeneralReg()) {
+ if (to.isFloatReg()) {
+ masm.ma_vxfer(from.reg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.ma_mov(from.reg(), to.reg());
+ } else {
+ masm.ma_str(from.reg(), toAddress(to), scratch);
+ }
+ } else if (to.isFloatReg()) {
+ masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay(),
+ scratch);
+ } else if (to.isGeneralReg()) {
+ masm.ma_ldr(toAddress(from), to.reg(), scratch);
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchFloat32Scope scratchFloat32(masm);
+ masm.ma_vldr(toAddress(from), scratchFloat32, scratch);
+ masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
+ }
+}
+
+void MoveEmitterARM::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ // Registers are used to store pointers / int32 / float32 values.
+ MOZ_ASSERT(!from.isGeneralReg());
+ MOZ_ASSERT(!to.isGeneralReg());
+
+ ScratchRegisterScope scratch(masm);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.ma_vmov(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
+ } else {
+ masm.ma_vstr(from.floatReg(), toAddress(to), scratch);
+ }
+ } else if (from.isGeneralRegPair()) {
+ if (to.isFloatReg()) {
+ masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(!from.aliases(to));
+ masm.ma_mov(from.evenReg(), to.evenReg());
+ masm.ma_mov(from.oddReg(), to.oddReg());
+ } else {
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vxfer(from.evenReg(), from.oddReg(), scratchDouble);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ }
+ } else if (to.isFloatReg()) {
+ masm.ma_vldr(toAddress(from), to.floatReg(), scratch);
+ } else if (to.isGeneralRegPair()) {
+ MOZ_ASSERT(from.isMemory());
+ Address src = toAddress(from);
+ // Note: We can safely use the MoveOperand's displacement here,
+ // even if the base is SP: MoveEmitter::toOperand adjusts
+ // SP-relative operands by the difference between the current
+ // stack usage and stackAdjust, which emitter.finish() resets to
+ // 0.
+ //
+ // Warning: if the offset isn't within [-255,+255] then this
+ // will assert-fail (or, if non-debug, load the wrong words).
+ // Nothing uses such an offset at the time of this writing.
+ masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(),
+ to.oddReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchDoubleScope scratchDouble(masm);
+ masm.ma_vldr(toAddress(from), scratchDouble, scratch);
+ masm.ma_vstr(scratchDouble, toAddress(to), scratch);
+ }
+}
+
+void MoveEmitterARM::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterARM::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterARM::finish() {
+ assertDone();
+
+ if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_ldr(spillSlot(), spilledReg_, scratch);
+ }
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/arm/MoveEmitter-arm.h b/js/src/jit/arm/MoveEmitter-arm.h
new file mode 100644
index 0000000000..26a84fdbcc
--- /dev/null
+++ b/js/src/jit/arm/MoveEmitter-arm.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_MoveEmitter_arm_h
+#define jit_arm_MoveEmitter_arm_h
+
+#include <stdint.h>
+
+#include "jit/MoveResolver.h"
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+struct Address;
+class MacroAssembler;
+
+class MoveEmitterARM {
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot) const;
+ Address spillSlot() const;
+ Address toAddress(const MoveOperand& operand) const;
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void emit(const MoveOp& move);
+
+ public:
+ explicit MoveEmitterARM(MacroAssembler& masm);
+ ~MoveEmitterARM();
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterARM MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_MoveEmitter_arm_h */
diff --git a/js/src/jit/arm/SharedICHelpers-arm-inl.h b/js/src/jit/arm/SharedICHelpers-arm-inl.h
new file mode 100644
index 0000000000..2943bafbd8
--- /dev/null
+++ b/js/src/jit/arm/SharedICHelpers-arm-inl.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_SharedICHelpers_arm_inl_h
+#define jit_arm_SharedICHelpers_arm_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ // We assume during this that R0 and R1 have been pushed, and that R2 is
+ // unused.
+ static_assert(R2 == ValueOperand(r1, r0));
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.movePtr(FramePointer, r0);
+ masm.ma_sub(StackPointer, r0);
+ masm.sub32(Imm32(argSize), r0);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(r0, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls), but the VMWrapper code being called
+ // expects the return address to also be pushed on the stack.
+ static_assert(ICTailCallReg == lr);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(lr);
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.mov(FramePointer, scratch);
+ masm.ma_sub(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ masm.Push(ICStubReg);
+
+ // We pushed 4 words, so the stack is still aligned to 8 bytes.
+ masm.checkStackAlignment();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_SharedICHelpers_arm_inl_h */
diff --git a/js/src/jit/arm/SharedICHelpers-arm.h b/js/src/jit/arm/SharedICHelpers-arm.h
new file mode 100644
index 0000000000..93475abc62
--- /dev/null
+++ b/js/src/jit/arm/SharedICHelpers-arm.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_SharedICHelpers_arm_h
+#define jit_arm_SharedICHelpers_arm_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on the
+// stack on ARM).
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use r0.
+ static_assert(R2 == ValueOperand(r1, r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Call the stubcode via a direct branch-and-link.
+ masm.ma_blx(r0);
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.ma_mov(lr, pc); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
+ masm.loadPtr(stubAddr, ICStubReg);
+
+ masm.mov(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ ScratchRegisterScope scratch(masm);
+ masm.Pop(scratch);
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On ARM, lr is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(lr);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(lr);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ static_assert(ICTailCallReg == lr);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_SharedICHelpers_arm_h */
diff --git a/js/src/jit/arm/SharedICRegisters-arm.h b/js/src/jit/arm/SharedICRegisters-arm.h
new file mode 100644
index 0000000000..16aabbf0b3
--- /dev/null
+++ b/js/src/jit/arm/SharedICRegisters-arm.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm_SharedICRegisters_arm_h
+#define jit_arm_SharedICRegisters_arm_h
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// r15 = program-counter
+// r14 = link-register
+// r13 = stack-pointer
+// r11 = frame-pointer
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(r3, r2);
+static constexpr ValueOperand R1(r5, r4);
+static constexpr ValueOperand R2(r1, r0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = r14;
+static constexpr Register ICStubReg = r9;
+
+// Register used internally by MacroAssemblerARM.
+static constexpr Register BaselineSecondScratchReg = r6;
+
+// R7 - R9 are generally available for use within stubcode.
+
+// Note that ICTailCallReg is actually just the link register. In ARM code
+// emission, we do not clobber ICTailCallReg since we keep the return
+// address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = d0;
+static constexpr FloatRegister FloatReg1 = d1;
+static constexpr FloatRegister FloatReg2 = d2;
+static constexpr FloatRegister FloatReg3 = d3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm_SharedICRegisters_arm_h */
diff --git a/js/src/jit/arm/Simulator-arm.cpp b/js/src/jit/arm/Simulator-arm.cpp
new file mode 100644
index 0000000000..2afd6cb0de
--- /dev/null
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -0,0 +1,5472 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm/Simulator-arm.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm/Assembler-arm.h"
+#include "jit/arm/disasm/Constants-arm.h"
+#include "jit/AtomicOperations.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "vm/SharedMem.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+extern "C" {
+
+MOZ_EXPORT int64_t __aeabi_idivmod(int x, int y) {
+ // Run-time ABI for the ARM architecture specifies that for |INT_MIN / -1|
+ // "an implementation is (sic) may return any convenient value, possibly the
+ // original numerator."
+ //
+ // |INT_MIN / -1| traps on x86, which isn't listed as an allowed behavior in
+ // the ARM docs, so instead follow LLVM and return the numerator. (And zero
+ // for the remainder.)
+
+ if (x == INT32_MIN && y == -1) {
+ return uint32_t(x);
+ }
+
+ uint32_t lo = uint32_t(x / y);
+ uint32_t hi = uint32_t(x % y);
+ return (int64_t(hi) << 32) | lo;
+}
+
+MOZ_EXPORT int64_t __aeabi_uidivmod(int x, int y) {
+ uint32_t lo = uint32_t(x) / uint32_t(y);
+ uint32_t hi = uint32_t(x) % uint32_t(y);
+ return (int64_t(hi) << 32) | lo;
+}
+}
+
+namespace js {
+namespace jit {
+
+// For decoding load-exclusive and store-exclusive instructions.
+namespace excl {
+
+// Bit positions.
+enum {
+ ExclusiveOpHi = 24, // Hi bit of opcode field
+ ExclusiveOpLo = 23, // Lo bit of opcode field
+ ExclusiveSizeHi = 22, // Hi bit of operand size field
+ ExclusiveSizeLo = 21, // Lo bit of operand size field
+ ExclusiveLoad = 20 // Bit indicating load
+};
+
+// Opcode bits for exclusive instructions.
+enum { ExclusiveOpcode = 3 };
+
+// Operand size, Bits(ExclusiveSizeHi,ExclusiveSizeLo).
+enum {
+ ExclusiveWord = 0,
+ ExclusiveDouble = 1,
+ ExclusiveByte = 2,
+ ExclusiveHalf = 3
+};
+
+} // namespace excl
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0 | 0 | 0) << 21, // Decrement after.
+ ia_x = (0 | 4 | 0) << 21, // Increment after.
+ db_x = (8 | 0 | 0) << 21, // Decrement before.
+ ib_x = (8 | 4 | 0) << 21, // Increment before.
+};
+
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision { kSinglePrecision = 0, kDoublePrecision = 1 };
+
+enum NeonListType { nlt_1 = 0x7, nlt_2 = 0xA, nlt_3 = 0x6, nlt_4 = 0x2 };
+
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ kCallRtRedirected = 0x10, // Transition to C code.
+ kBreakpoint = 0x20, // Breakpoint.
+ kStopCode = 1 << 23 // Stop.
+};
+
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
+// return ((type == 0) || (type == 1)) && instr->hasS();
+// }
+//
+class SimInstruction {
+ public:
+ enum { kInstrSize = 4, kPCReadOffset = 8 };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int bitField(int hi, int lo) const {
+ return instructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
+ // original place in the instruction encoding.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 conditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 conditionField(instr) will return 0xC.
+
+ // Generally applicable fields
+ inline Assembler::ARMCondition conditionField() const {
+ return static_cast<Assembler::ARMCondition>(bitField(31, 28));
+ }
+ inline int typeValue() const { return bits(27, 25); }
+ inline int specialValue() const { return bits(27, 23); }
+
+ inline int rnValue() const { return bits(19, 16); }
+ inline int rdValue() const { return bits(15, 12); }
+
+ inline int coprocessorValue() const { return bits(11, 8); }
+
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int vnValue() const { return bits(19, 16); }
+ inline int vmValue() const { return bits(3, 0); }
+ inline int vdValue() const { return bits(15, 12); }
+ inline int nValue() const { return bit(7); }
+ inline int mValue() const { return bit(5); }
+ inline int dValue() const { return bit(22); }
+ inline int rtValue() const { return bits(15, 12); }
+ inline int pValue() const { return bit(24); }
+ inline int uValue() const { return bit(23); }
+ inline int opc1Value() const { return (bit(23) << 2) | bits(21, 20); }
+ inline int opc2Value() const { return bits(19, 16); }
+ inline int opc3Value() const { return bits(7, 6); }
+ inline int szValue() const { return bit(8); }
+ inline int VLValue() const { return bit(20); }
+ inline int VCValue() const { return bit(8); }
+ inline int VAValue() const { return bits(23, 21); }
+ inline int VBValue() const { return bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 16, 7);
+ }
+ inline int VFPMRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 0, 5);
+ }
+ inline int VFPDRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 12, 22);
+ }
+
+ // Fields used in Data processing instructions.
+ inline int opcodeValue() const { return static_cast<ALUOp>(bits(24, 21)); }
+ inline ALUOp opcodeField() const {
+ return static_cast<ALUOp>(bitField(24, 21));
+ }
+ inline int sValue() const { return bit(20); }
+
+ // With register.
+ inline int rmValue() const { return bits(3, 0); }
+ inline ShiftType shifttypeValue() const {
+ return static_cast<ShiftType>(bits(6, 5));
+ }
+ inline int rsValue() const { return bits(11, 8); }
+ inline int shiftAmountValue() const { return bits(11, 7); }
+
+ // With immediate.
+ inline int rotateValue() const { return bits(11, 8); }
+ inline int immed8Value() const { return bits(7, 0); }
+ inline int immed4Value() const { return bits(19, 16); }
+ inline int immedMovwMovtValue() const {
+ return immed4Value() << 12 | offset12Value();
+ }
+
+ // Fields used in Load/Store instructions.
+ inline int PUValue() const { return bits(24, 23); }
+ inline int PUField() const { return bitField(24, 23); }
+ inline int bValue() const { return bit(22); }
+ inline int wValue() const { return bit(21); }
+ inline int lValue() const { return bit(20); }
+
+ // With register uses same fields as Data processing instructions above with
+ // immediate.
+ inline int offset12Value() const { return bits(11, 0); }
+
+ // Multiple.
+ inline int rlistValue() const { return bits(15, 0); }
+
+ // Extra loads and stores.
+ inline int signValue() const { return bit(6); }
+ inline int hValue() const { return bit(5); }
+ inline int immedHValue() const { return bits(11, 8); }
+ inline int immedLValue() const { return bits(3, 0); }
+
+ // Fields used in Branch instructions.
+ inline int linkValue() const { return bit(24); }
+ inline int sImmed24Value() const { return ((instructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions.
+ inline SoftwareInterruptCodes svcValue() const {
+ return static_cast<SoftwareInterruptCodes>(bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and
+ // stores, as well as multiplications).
+ inline bool isSpecialType0() const { return (bit(7) == 1) && (bit(4) == 1); }
+
+ // Test for miscellaneous instructions encodings of type 0 instructions.
+ inline bool isMiscType0() const {
+ return bit(24) == 1 && bit(23) == 0 && bit(20) == 0 && (bit(7) == 0);
+ }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool isNopType1() const { return bits(24, 0) == 0x0120F000; }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool isCsdbType1() const { return bits(24, 0) == 0x0120F014; }
+
+ // Test for a stop instruction.
+ inline bool isStop() const {
+ return typeValue() == 7 && bit(24) == 1 && svcValue() >= kStopCode;
+ }
+
+ // Test for a udf instruction, which falls under type 3.
+ inline bool isUDF() const {
+ return (instructionBits() & 0xfff000f0) == 0xe7f000f0;
+ }
+
+ // Special accessors that test for existence of a value.
+ inline bool hasS() const { return sValue() == 1; }
+ inline bool hasB() const { return bValue() == 1; }
+ inline bool hasW() const { return wValue() == 1; }
+ inline bool hasL() const { return lValue() == 1; }
+ inline bool hasU() const { return uValue() == 1; }
+ inline bool hasSign() const { return signValue() == 1; }
+ inline bool hasH() const { return hValue() == 1; }
+ inline bool hasLink() const { return linkValue() == 1; }
+
+ // Decoding the double immediate in the vmov instruction.
+ double doubleImmedVmov() const;
+ // Decoding the float32 immediate in the vmov.f32 instruction.
+ float float32ImmedVmov() const;
+
+ private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision) {
+ return (bits(four_bit + 3, four_bit) << 1) | bit(one_bit);
+ }
+ return (bit(one_bit) << 4) | bits(four_bit + 3, four_bit);
+ }
+
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+double SimInstruction::doubleImmedVmov() const {
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (bits(17, 16) << 4) | bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ return mozilla::BitwiseCast<double>(imm);
+}
+
+float SimInstruction::float32ImmedVmov() const {
+ // Reconstruct a float32 from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // float32: [aBbbbbbc, defgh000, 00000000, 00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint32_t imm;
+ imm = (bits(17, 16) << 23) | (bits(3, 0) << 19); // xxxxxxxc,defgh000.0.0
+ imm |= (0x1f * bit(18)) << 25; // xxbbbbbx,xxxxxxxx.0.0
+ imm |= (bit(18) ^ 1) << 30; // xBxxxxxx,xxxxxxxx.0.0
+ imm |= bit(19) << 31; // axxxxxxx,xxxxxxxx.0.0
+
+ return mozilla::BitwiseCast<float>(imm);
+}
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1L;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ char* stopAtStr = getenv("ARM_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+void Simulator::disassemble(SimInstruction* instr, size_t n) {
+#ifdef JS_DISASM_ARM
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ while (n-- > 0) {
+ dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(instr));
+ fprintf(stderr, " 0x%08x %s\n", uint32_t(instr), buffer.start());
+ instr = reinterpret_cast<SimInstruction*>(
+ reinterpret_cast<uint8_t*>(instr) + 4);
+ }
+#endif
+}
+
+void Simulator::disasm(SimInstruction* instr) { disassemble(instr, 1); }
+
+void Simulator::disasm(SimInstruction* instr, size_t n) {
+ disassemble(instr, n);
+}
+
+void Simulator::disasm(SimInstruction* instr, size_t m, size_t n) {
+ disassemble(reinterpret_cast<SimInstruction*>(
+ reinterpret_cast<uint8_t*>(instr) - m * 4),
+ n);
+}
+
+// The ArmDebugger class is used by the simulator while debugging simulated ARM
+// code.
+class ArmDebugger {
+ public:
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+
+ private:
+ static const Instr kBreakpointInstr =
+ (Assembler::AL | (7 * (1 << 25)) | (1 * (1 << 24)) | kBreakpoint);
+ static const Instr kNopInstr = (Assembler::AL | (13 * (1 << 21)));
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ double getRegisterPairDoubleValue(int regnum);
+ void getVFPDoubleRegisterValue(int regnum, double* value);
+ bool getValue(const char* desc, int32_t* value);
+ bool getVFPDoubleValue(const char* desc, double* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+void ArmDebugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->svcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t ArmDebugger::getRegisterValue(int regnum) {
+ if (regnum == Registers::pc) {
+ return sim_->get_pc();
+ }
+ return sim_->get_register(regnum);
+}
+
+double ArmDebugger::getRegisterPairDoubleValue(int regnum) {
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+void ArmDebugger::getVFPDoubleRegisterValue(int regnum, double* out) {
+ sim_->get_double_from_d_register(regnum, out);
+}
+
+bool ArmDebugger::getValue(const char* desc, int32_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
+}
+
+bool ArmDebugger::getVFPDoubleValue(const char* desc, double* value) {
+ FloatRegister reg = FloatRegister::FromCode(FloatRegister::FromName(desc));
+ if (reg.isInvalid()) {
+ return false;
+ }
+
+ if (reg.isSingle()) {
+ float fval;
+ sim_->get_float_from_s_register(reg.id(), &fval);
+ *value = fval;
+ return true;
+ }
+
+ sim_->get_double_from_d_register(reg.id(), value);
+ return true;
+}
+
+bool ArmDebugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool ArmDebugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void ArmDebugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void ArmDebugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char line_buf[256];
+ int offset = 0;
+ bool keep_going = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keep_going) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(line_buf);
+ if (len > 0 && line_buf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This will
+ // exit the loop after copying this buffer into the result.
+ keep_going = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating
+ // '\0'.
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, line_buf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+void ArmDebugger::debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+#ifndef JS_DISASM_ARM
+ static bool disasm_warning_printed = false;
+ if (!disasm_warning_printed) {
+ printf(
+ " No ARM disassembler present. Enable JS_DISASM_ARM in "
+ "configure.in.");
+ disasm_warning_printed = true;
+ }
+#endif
+
+ while (!done && !sim_->has_bad_pc()) {
+ if (last_pc != sim_->get_pc()) {
+#ifdef JS_DISASM_ARM
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<uint8_t*>(sim_->get_pc()));
+ printf(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
+#endif
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if (argc < 0) {
+ continue;
+ } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "skip") == 0)) {
+ sim_->set_pc(sim_->get_pc() + 4);
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints
+ // disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+ int32_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d", Registers::GetName(i), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+ (i % 2) == 0) {
+ dvalue = getRegisterPairDoubleValue(i);
+ printf(" (%.16g)\n", dvalue);
+ } else {
+ printf("\n");
+ }
+ }
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ getVFPDoubleRegisterValue(i, &dvalue);
+ uint64_t as_words = mozilla::BitwiseCast<uint64_t>(dvalue);
+ printf("%3s: %.16g 0x%08x %08x\n",
+ FloatRegister::FromCode(i).name(), dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ }
+ } else {
+ if (getValue(arg1, &value)) {
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (getVFPDoubleValue(arg1, &dvalue)) {
+ uint64_t as_words = mozilla::BitwiseCast<uint64_t>(dvalue);
+ printf("%s: %.16g 0x%08x %08x\n", arg1, dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+#ifdef JS_DISASM_ARM
+ uint8_t* prev = nullptr;
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+ while (cur < end) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+
+ prev = cur;
+ cur += dasm.InstructionDecode(buffer, cur);
+ printf(" 0x%08x %s\n", reinterpret_cast<uint32_t>(prev),
+ buffer.start());
+ }
+#endif
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+#ifdef _MSC_VER
+ __debugbreak();
+#else
+ asm("int $3");
+#endif
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("N flag: %d; ", sim_->n_flag_);
+ printf("Z flag: %d; ", sim_->z_flag_);
+ printf("C flag: %d; ", sim_->c_flag_);
+ printf("V flag: %d\n", sim_->v_flag_);
+ printf("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ printf("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ printf("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ printf("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ printf("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("skip\n");
+ printf(" skip one instruction (set pc to next instruction)\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf(" add argument 'fp' to print register pair double values\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the ArmDebugger.\n");
+ printf(" The first %d stop codes are watched:\n",
+ Simulator::kNumOfWatchedStops);
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ mozilla::DebugOnly<int> cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ JitSpewCont(JitSpew_CacheFlush, "[%p %zx]", start_addr, size);
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0L;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+ skipCalleeSavedRegsCheck = false;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < num_registers; i++) {
+ registers_[i] = 0;
+ }
+
+ n_flag_ = false;
+ z_flag_ = false;
+ c_flag_ = false;
+ v_flag_ = false;
+
+ for (int i = 0; i < num_d_registers * 2; i++) {
+ vfp_registers_[i] = 0;
+ }
+
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ FPSCR_rounding_mode_ = SimRZ;
+ FPSCR_default_NaN_mode_ = true;
+
+ inv_op_vfp_flag_ = false;
+ div_zero_vfp_flag_ = false;
+ overflow_vfp_flag_ = false;
+ underflow_vfp_flag_ = false;
+ inexact_vfp_flag_ = false;
+
+ // The lr and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_lr;
+ registers_[lr] = bad_lr;
+
+ lastDebuggerInput_ = nullptr;
+
+ exclusiveMonitorHeld_ = false;
+ exclusiveMonitor_ = 0;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls a VM function (masm.callWithABI) we need to
+// call that function instead of trying to execute it with the simulator
+// (because it's x86 code instead of arm code). We do that by redirecting the VM
+// call to a svc (Supervisor Call) instruction that is handled by the
+// simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(Assembler::AL | (0xf * (1 << 24)) | kCallRtRedirected),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("ARM_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+ MOZ_ASSERT(reg >= 0 && reg < num_registers);
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+ registers_[reg] = value;
+}
+
+// Get the register from the architecture state. This function does handle the
+// special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+ MOZ_ASSERT(reg >= 0 && reg < num_registers);
+ // Work around GCC bug: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= num_registers) return 0;
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0);
+
+ // Read the bits from the unsigned integer register_[] array into the double
+ // precision floating point value and return it.
+ double dm_val = 0.0;
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return dm_val;
+}
+
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0);
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+void Simulator::set_dw_register(int dreg, const int* dbl) {
+ MOZ_ASSERT(dreg >= 0 && dreg < num_d_registers);
+ registers_[dreg] = dbl[0];
+ registers_[dreg + 1] = dbl[1];
+}
+
+void Simulator::get_d_register(int dreg, uint64_t* value) {
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+void Simulator::set_d_register(int dreg, const uint64_t* value) {
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+void Simulator::get_d_register(int dreg, uint32_t* value) {
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+void Simulator::set_d_register(int dreg, const uint32_t* value) {
+ MOZ_ASSERT(dreg >= 0 && dreg < int(FloatRegisters::TotalPhys));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+void Simulator::get_q_register(int qreg, uint64_t* value) {
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+void Simulator::set_q_register(int qreg, const uint64_t* value) {
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+void Simulator::get_q_register(int qreg, uint32_t* value) {
+ MOZ_ASSERT(qreg >= 0 && qreg < num_q_registers);
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+void Simulator::set_q_register(int qreg, const uint32_t* value) {
+ MOZ_ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return registers_[pc] == bad_lr || registers_[pc] == end_sim_pc;
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const { return registers_[pc]; }
+
+void Simulator::set_s_register(int sreg, unsigned int value) {
+ MOZ_ASSERT(sreg >= 0 && sreg < num_s_registers);
+ vfp_registers_[sreg] = value;
+}
+
+unsigned Simulator::get_s_register(int sreg) const {
+ MOZ_ASSERT(sreg >= 0 && sreg < num_s_registers);
+ return vfp_registers_[sreg];
+}
+
+template <class InputType, int register_size>
+void Simulator::setVFPRegister(int reg_index, const InputType& value) {
+ MOZ_ASSERT(reg_index >= 0);
+ MOZ_ASSERT_IF(register_size == 1, reg_index < num_s_registers);
+ MOZ_ASSERT_IF(register_size == 2, reg_index < int(FloatRegisters::TotalPhys));
+
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
+}
+
+template <class ReturnType, int register_size>
+void Simulator::getFromVFPRegister(int reg_index, ReturnType* out) {
+ MOZ_ASSERT(reg_index >= 0);
+ MOZ_ASSERT_IF(register_size == 1, reg_index < num_s_registers);
+ MOZ_ASSERT_IF(register_size == 2, reg_index < int(FloatRegisters::TotalPhys));
+
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ memcpy(out, buffer, register_size * sizeof(vfp_registers_[0]));
+}
+
+// These forced-instantiations are for jsapi-tests. Evidently, nothing
+// requires these to be instantiated.
+template void Simulator::getFromVFPRegister<double, 2>(int reg_index,
+ double* out);
+template void Simulator::getFromVFPRegister<float, 1>(int reg_index,
+ float* out);
+template void Simulator::setVFPRegister<double, 2>(int reg_index,
+ const double& value);
+template void Simulator::setVFPRegister<float, 1>(int reg_index,
+ const float& value);
+
+void Simulator::getFpArgs(double* x, double* y, int32_t* z) {
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, x);
+ get_double_from_d_register(1, y);
+ *z = get_register(0);
+ } else {
+ *x = get_double_from_register_pair(0);
+ *y = get_double_from_register_pair(2);
+ *z = get_register(2);
+ }
+}
+
+void Simulator::getFpFromStack(int32_t* stack, double* x) {
+ MOZ_ASSERT(stack && x);
+ char buffer[2 * sizeof(stack[0])];
+ memcpy(buffer, stack, 2 * sizeof(stack[0]));
+ memcpy(x, buffer, 2 * sizeof(stack[0]));
+}
+
+void Simulator::setCallResultDouble(double result) {
+ // The return value is either in r0/r1 or d0.
+ if (UseHardFpABI()) {
+ char buffer[2 * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to d0.
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to r0 and r1.
+ memcpy(registers_, buffer, sizeof(buffer));
+ }
+}
+
+void Simulator::setCallResultFloat(float result) {
+ if (UseHardFpABI()) {
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to s0.
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
+ } else {
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to r0.
+ memcpy(registers_, buffer, sizeof(buffer));
+ }
+}
+
+void Simulator::setCallResult(int64_t res) {
+ set_register(r0, static_cast<int32_t>(res));
+ set_register(r1, static_cast<int32_t>(res >> 32));
+}
+
+void Simulator::exclusiveMonitorSet(uint64_t value) {
+ exclusiveMonitor_ = value;
+ exclusiveMonitorHeld_ = true;
+}
+
+uint64_t Simulator::exclusiveMonitorGetAndClear(bool* held) {
+ *held = exclusiveMonitorHeld_;
+ exclusiveMonitorHeld_ = false;
+ return *held ? exclusiveMonitor_ : 0;
+}
+
+void Simulator::exclusiveMonitorClear() { exclusiveMonitorHeld_ = false; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)get_register(fp);
+ state.sp = (void*)get_register(sp);
+ state.lr = (void*)get_register(lr);
+ return state;
+}
+
+uint64_t Simulator::readQ(int32_t addr, SimInstruction* instr,
+ UnalignedPolicy f) {
+ if (handleWasmSegFault(addr, 8)) {
+ return UINT64_MAX;
+ }
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ return *ptr;
+ }
+
+ // See the comments below in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ uint64_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void Simulator::writeQ(int32_t addr, uint64_t value, SimInstruction* instr,
+ UnalignedPolicy f) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments below in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+int Simulator::readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+
+ // In WebAssembly, we want unaligned accesses to either raise a signal or
+ // do the right thing. Making this simulator properly emulate the behavior
+ // of raising a signal is complex, so as a special-case, when in wasm code,
+ // we just do the right thing.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ int value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void Simulator::writeW(int32_t addr, int value, SimInstruction* instr,
+ UnalignedPolicy f) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+// For the time being, define Relaxed operations in terms of SeqCst
+// operations - we don't yet need Relaxed operations anywhere else in
+// the system, and the distinction is not important to the simulation
+// at the level where we're operating.
+
+template <typename T>
+static T loadRelaxed(SharedMem<T*> addr) {
+ return AtomicOperations::loadSeqCst(addr);
+}
+
+template <typename T>
+static T compareExchangeRelaxed(SharedMem<T*> addr, T oldval, T newval) {
+ return AtomicOperations::compareExchangeSeqCst(addr, oldval, newval);
+}
+
+int Simulator::readExW(int32_t addr, SimInstruction* instr) {
+ if (addr & 3) {
+ MOZ_CRASH("Unaligned exclusive read");
+ }
+
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+ int32_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+}
+
+int32_t Simulator::writeExW(int32_t addr, int value, SimInstruction* instr) {
+ if (addr & 3) {
+ MOZ_CRASH("Unaligned exclusive write");
+ }
+
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+ bool held;
+ int32_t expected = int32_t(exclusiveMonitorGetAndClear(&held));
+ if (!held) {
+ return 1;
+ }
+ int32_t old = compareExchangeRelaxed(ptr, expected, int32_t(value));
+ return old != expected;
+}
+
+uint16_t Simulator::readHU(int32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return UINT16_MAX;
+ }
+
+ // The regexp engine emits unaligned loads, so we don't check for them here
+ // like most of the other methods do.
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+
+ // See comments above in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ uint16_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t Simulator::readH(int32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+
+ // See comments above in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ int16_t value;
+ memcpy(&value, ptr, sizeof(value));
+ return value;
+ }
+
+ printf("Unaligned signed halfword read at 0x%08x\n", addr);
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeH(int32_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+void Simulator::writeH(int32_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 || !HasAlignmentFault()) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+
+ // See the comments above in readW.
+ if (FixupFault() && wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ char* ptr = reinterpret_cast<char*>(addr);
+ memcpy(ptr, &value, sizeof(value));
+ return;
+ }
+
+ printf("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ MOZ_CRASH();
+}
+
+uint16_t Simulator::readExHU(int32_t addr, SimInstruction* instr) {
+ if (addr & 1) {
+ MOZ_CRASH("Unaligned exclusive read");
+ }
+
+ if (handleWasmSegFault(addr, 2)) {
+ return UINT16_MAX;
+ }
+
+ SharedMem<uint16_t*> ptr =
+ SharedMem<uint16_t*>::shared(reinterpret_cast<uint16_t*>(addr));
+ uint16_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+}
+
+int32_t Simulator::writeExH(int32_t addr, uint16_t value,
+ SimInstruction* instr) {
+ if (addr & 1) {
+ MOZ_CRASH("Unaligned exclusive write");
+ }
+
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ SharedMem<uint16_t*> ptr =
+ SharedMem<uint16_t*>::shared(reinterpret_cast<uint16_t*>(addr));
+ bool held;
+ uint16_t expected = uint16_t(exclusiveMonitorGetAndClear(&held));
+ if (!held) {
+ return 1;
+ }
+ uint16_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+}
+
+uint8_t Simulator::readBU(int32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return UINT8_MAX;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+uint8_t Simulator::readExBU(int32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return UINT8_MAX;
+ }
+
+ SharedMem<uint8_t*> ptr =
+ SharedMem<uint8_t*>::shared(reinterpret_cast<uint8_t*>(addr));
+ uint8_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ return value;
+}
+
+int32_t Simulator::writeExB(int32_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ SharedMem<uint8_t*> ptr =
+ SharedMem<uint8_t*>::shared(reinterpret_cast<uint8_t*>(addr));
+ bool held;
+ uint8_t expected = uint8_t(exclusiveMonitorGetAndClear(&held));
+ if (!held) {
+ return 1;
+ }
+ uint8_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+}
+
+int8_t Simulator::readB(int32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(int32_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::writeB(int32_t addr, int8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+int32_t* Simulator::readDW(int32_t addr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return nullptr;
+ }
+
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return ptr;
+ }
+
+ printf("Unaligned read at 0x%08x\n", addr);
+ MOZ_CRASH();
+}
+
+void Simulator::writeDW(int32_t addr, int32_t value1, int32_t value2) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if ((addr & 3) == 0) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr++ = value1;
+ *ptr = value2;
+ return;
+ }
+
+ printf("Unaligned write at 0x%08x\n", addr);
+ MOZ_CRASH();
+}
+
+int32_t Simulator::readExDW(int32_t addr, int32_t* hibits) {
+ if (addr & 3) {
+ MOZ_CRASH("Unaligned exclusive read");
+ }
+
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ SharedMem<uint64_t*> ptr =
+ SharedMem<uint64_t*>::shared(reinterpret_cast<uint64_t*>(addr));
+ // The spec says that the low part of value shall be read from addr and
+ // the high part shall be read from addr+4. On a little-endian system
+ // where we read a 64-bit quadword the low part of the value will be in
+ // the low part of the quadword, and the high part of the value in the
+ // high part of the quadword.
+ uint64_t value = loadRelaxed(ptr);
+ exclusiveMonitorSet(value);
+ *hibits = int32_t(value >> 32);
+ return int32_t(value);
+}
+
+int32_t Simulator::writeExDW(int32_t addr, int32_t value1, int32_t value2) {
+ if (addr & 3) {
+ MOZ_CRASH("Unaligned exclusive write");
+ }
+
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ SharedMem<uint64_t*> ptr =
+ SharedMem<uint64_t*>::shared(reinterpret_cast<uint64_t*>(addr));
+ // The spec says that value1 shall be stored at addr and value2 at
+ // addr+4. On a little-endian system that means constructing a 64-bit
+ // value where value1 is in the low half of a 64-bit quadword and value2
+ // is in the high half of the quadword.
+ uint64_t value = (uint64_t(value2) << 32) | uint32_t(value1);
+ bool held;
+ uint64_t expected = exclusiveMonitorGetAndClear(&held);
+ if (!held) {
+ return 1;
+ }
+ uint64_t old = compareExchangeRelaxed(ptr, expected, value);
+ return old != expected;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = get_register(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = get_register(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Checks if the current instruction should be executed based on its condition
+// bits.
+bool Simulator::conditionallyExecute(SimInstruction* instr) {
+ switch (instr->conditionField()) {
+ case Assembler::EQ:
+ return z_flag_;
+ case Assembler::NE:
+ return !z_flag_;
+ case Assembler::CS:
+ return c_flag_;
+ case Assembler::CC:
+ return !c_flag_;
+ case Assembler::MI:
+ return n_flag_;
+ case Assembler::PL:
+ return !n_flag_;
+ case Assembler::VS:
+ return v_flag_;
+ case Assembler::VC:
+ return !v_flag_;
+ case Assembler::HI:
+ return c_flag_ && !z_flag_;
+ case Assembler::LS:
+ return !c_flag_ || z_flag_;
+ case Assembler::GE:
+ return n_flag_ == v_flag_;
+ case Assembler::LT:
+ return n_flag_ != v_flag_;
+ case Assembler::GT:
+ return !z_flag_ && (n_flag_ == v_flag_);
+ case Assembler::LE:
+ return z_flag_ || (n_flag_ != v_flag_);
+ case Assembler::AL:
+ return true;
+ default:
+ MOZ_CRASH();
+ }
+ return false;
+}
+
+// Calculate and set the Negative and Zero flags.
+void Simulator::setNZFlags(int32_t val) {
+ n_flag_ = (val < 0);
+ z_flag_ = (val == 0);
+}
+
+// Set the Carry flag.
+void Simulator::setCFlag(bool val) { c_flag_ = val; }
+
+// Set the oVerflow flag.
+void Simulator::setVFlag(bool val) { v_flag_ = val; }
+
+// Calculate C flag value for additions.
+bool Simulator::carryFrom(int32_t left, int32_t right, int32_t carry) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+ return (uright > urest) ||
+ (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+// Calculate C flag value for subtractions.
+bool Simulator::borrowFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ return (uright > uleft);
+}
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::overflowFrom(int32_t alu_out, int32_t left, int32_t right,
+ bool addition) {
+ bool overflow;
+ if (addition) {
+ // Operands have the same sign.
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // And operands and result have different sign.
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // Operands have different signs.
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // And first operand and result have different signs.
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+// Support for VFP comparisons.
+void Simulator::compute_FPSCR_Flags(double val1, double val2) {
+ if (std::isnan(val1) || std::isnan(val2)) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = true;
+ // All non-NaN cases.
+ } else if (val1 == val2) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = true;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ } else if (val1 < val2) {
+ n_flag_FPSCR_ = true;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ } else {
+ // Case when (val1 > val2).
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ }
+}
+
+void Simulator::copy_FPSCR_to_APSR() {
+ n_flag_ = n_flag_FPSCR_;
+ z_flag_ = z_flag_FPSCR_;
+ c_flag_ = c_flag_FPSCR_;
+ v_flag_ = v_flag_FPSCR_;
+}
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with register.
+int32_t Simulator::getShiftRm(SimInstruction* instr, bool* carry_out) {
+ ShiftType shift = instr->shifttypeValue();
+ int shift_amount = instr->shiftAmountValue();
+ int32_t result = get_register(instr->rmValue());
+ if (instr->bit(4) == 0) {
+ // By immediate.
+ if (shift == ROR && shift_amount == 0) {
+ MOZ_CRASH("NYI");
+ return result;
+ }
+ if ((shift == LSR || shift == ASR) && shift_amount == 0) {
+ shift_amount = 32;
+ }
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ if (result < 0) {
+ result = 0xffffffff;
+ *carry_out = true;
+ } else {
+ result = 0;
+ *carry_out = false;
+ }
+ } else {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ result = 0;
+ *carry_out = c_flag_;
+ } else {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ }
+ break;
+ }
+
+ case ROR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ // By register.
+ int rs = instr->rsValue();
+ shift_amount = get_register(rs) & 0xff;
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ } else {
+ MOZ_ASSERT(shift_amount >= 32);
+ if (result < 0) {
+ *carry_out = true;
+ result = 0xffffffff;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ } else if (shift_amount == 32) {
+ *carry_out = (result & 1) == 1;
+ result = 0;
+ } else {
+ MOZ_ASSERT(shift_amount > 32);
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ } else if (shift_amount == 32) {
+ *carry_out = (result < 0);
+ result = 0;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case ROR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
+ break;
+ }
+
+ default:
+ MOZ_CRASH();
+ }
+ }
+ return result;
+}
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with immediate.
+int32_t Simulator::getImm(SimInstruction* instr, bool* carry_out) {
+ int rotate = instr->rotateValue() * 2;
+ int immed8 = instr->immed8Value();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
+ return imm;
+}
+
+int32_t Simulator::processPU(SimInstruction* instr, int num_regs, int reg_size,
+ intptr_t* start_address, intptr_t* end_address) {
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_CRASH();
+ break;
+ case ia_x:
+ *start_address = rn_val;
+ *end_address = rn_val + (num_regs * reg_size) - reg_size;
+ rn_val = rn_val + (num_regs * reg_size);
+ break;
+ case db_x:
+ *start_address = rn_val - (num_regs * reg_size);
+ *end_address = rn_val - reg_size;
+ rn_val = *start_address;
+ break;
+ case ib_x:
+ *start_address = rn_val + reg_size;
+ *end_address = rn_val + (num_regs * reg_size);
+ rn_val = *end_address;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return rn_val;
+}
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::handleRList(SimInstruction* instr, bool load) {
+ int rlist = instr->rlistValue();
+ int num_regs = mozilla::CountPopulation32(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ int32_t rn_val =
+ processPU(instr, num_regs, sizeof(void*), &start_address, &end_address);
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+
+ // Catch null pointers a little earlier.
+ MOZ_ASSERT(start_address > 8191 || start_address < 0);
+
+ int reg = 0;
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ if (load) {
+ set_register(reg, *address);
+ } else {
+ *address = get_register(reg);
+ }
+ address += 1;
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ MOZ_ASSERT(end_address == ((intptr_t)address) - 4);
+ if (instr->hasW()) {
+ set_register(instr->rnValue(), rn_val);
+ }
+}
+
+// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
+void Simulator::handleVList(SimInstruction* instr) {
+ VFPRegPrecision precision =
+ (instr->szValue() == 0) ? kSinglePrecision : kDoublePrecision;
+ int operand_size = (precision == kSinglePrecision) ? 4 : 8;
+ bool load = (instr->VLValue() == 0x1);
+
+ int vd;
+ int num_regs;
+ vd = instr->VFPDRegValue(precision);
+ if (precision == kSinglePrecision) {
+ num_regs = instr->immed8Value();
+ } else {
+ num_regs = instr->immed8Value() / 2;
+ }
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ int32_t rn_val =
+ processPU(instr, num_regs, operand_size, &start_address, &end_address);
+
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ for (int reg = vd; reg < vd + num_regs; reg++) {
+ if (precision == kSinglePrecision) {
+ if (load) {
+ set_s_register_from_sinteger(
+ reg, readW(reinterpret_cast<int32_t>(address), instr));
+ } else {
+ writeW(reinterpret_cast<int32_t>(address),
+ get_sinteger_from_s_register(reg), instr);
+ }
+ address += 1;
+ } else {
+ if (load) {
+ int32_t data[] = {readW(reinterpret_cast<int32_t>(address), instr),
+ readW(reinterpret_cast<int32_t>(address + 1), instr)};
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(reg, d);
+ } else {
+ int32_t data[2];
+ double d;
+ get_double_from_d_register(reg, &d);
+ memcpy(data, &d, 8);
+ writeW(reinterpret_cast<int32_t>(address), data[0], instr);
+ writeW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
+ }
+ address += 2;
+ }
+ }
+ MOZ_ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ if (instr->hasW()) {
+ set_register(instr->rnValue(), rn_val);
+ }
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the r1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int32_t arg0,
+ int32_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1,
+ int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1,
+ int32_t arg2, int32_t arg3);
+
+typedef int32_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+typedef float (*Prototype_Float32_IntInt)(int arg0, int arg1);
+
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+typedef int32_t (*Prototype_Int_DoubleInt)(double arg0, int32_t arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int32_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int32_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int32_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General)(
+ int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (
+ *Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General)(
+ int32_t, int32_t, float, float, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (
+ *Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General)(
+ int32_t, int32_t, float, float, float, float, int32_t, int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (
+ *Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General)(
+ int32_t, int32_t, float, float, int32_t, float, float, int32_t, float,
+ int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int32_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int32_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int32_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int32_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int32_t, int64_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int32_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int32_t, int64_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int32_t, int64_t, int64_t, int64_t, int32_t);
+typedef int32_t (*Prototype_General_GeneralInt32)(int32_t, int32_t);
+typedef int32_t (*Prototype_General_GeneralInt32Int32)(int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_General_GeneralInt32General)(int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int64_t (*Prototype_Int64_General)(int32_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int32_t, int64_t);
+
+// Fill the volatile registers with scratch values.
+//
+// Some of the ABI calls assume that the float registers are not scratched,
+// even though the ABI defines them as volatile - a performance
+// optimization. These are all calls passing operands in integer registers,
+// so for now the simulator does not scratch any float registers for these
+// calls. Should try to narrow it further in future.
+//
+void Simulator::scratchVolatileRegisters(bool scratchFloat) {
+ int32_t scratch_value = 0xa5a5a5a5 ^ uint32_t(icount_);
+ set_register(r0, scratch_value);
+ set_register(r1, scratch_value);
+ set_register(r2, scratch_value);
+ set_register(r3, scratch_value);
+ set_register(r12, scratch_value); // Intra-Procedure-call scratch register.
+ set_register(r14, scratch_value); // Link register.
+
+ if (scratchFloat) {
+ uint64_t scratch_value_d =
+ 0x5a5a5a5a5a5a5a5aLU ^ uint64_t(icount_) ^ (uint64_t(icount_) << 30);
+ for (uint32_t i = d0; i < d8; i++) {
+ set_d_register(i, &scratch_value_d);
+ }
+ for (uint32_t i = d16; i < FloatRegisters::TotalPhys; i++) {
+ set_d_register(i, &scratch_value_d);
+ }
+ }
+}
+
+static int64_t MakeInt64(int32_t first, int32_t second) {
+ // Little-endian order.
+ return ((int64_t)second << 32) | (uint32_t)first;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ int svc = instr->svcValue();
+ switch (svc) {
+ case kCallRtRedirected: {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = get_register(r0);
+ int32_t arg1 = get_register(r1);
+ int32_t arg2 = get_register(r2);
+ int32_t arg3 = get_register(r3);
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+ int32_t arg4 = stack_pointer[0];
+ int32_t arg5 = stack_pointer[1];
+ int32_t arg6 = stack_pointer[2];
+ int32_t arg7 = stack_pointer[3];
+ int32_t arg8 = stack_pointer[4];
+ int32_t arg9 = stack_pointer[5];
+ int32_t arg10 = stack_pointer[6];
+ int32_t arg11 = stack_pointer[7];
+ int32_t arg12 = stack_pointer[8];
+ int32_t arg13 = stack_pointer[9];
+
+ int32_t saved_lr = get_register(lr);
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ // The ARM backend makes calls to __aeabi_idivmod and
+ // __aeabi_uidivmod assuming that the float registers are
+ // non-volatile as a performance optimization, so the float
+ // registers must not be scratch when calling these.
+ bool scratchFloat =
+ target != __aeabi_idivmod && target != __aeabi_uidivmod;
+ scratchVolatileRegisters(/* scratchFloat = */ scratchFloat);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true*/);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true*/);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[2];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[2];
+ int32_t arg7 = stack_pointer[3];
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ // The int64 arg is not split across register and stack
+ int64_t result = target(arg0, arg1, arg2, MakeInt64(arg4, arg5));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result =
+ target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target =
+ reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, res);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(0, &fval0);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg0);
+ }
+ auto target = reinterpret_cast<Prototype_Int_Float32>(external);
+ int32_t res = target(fval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(0, &fval0);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg0);
+ }
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0, fval1;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(0, &fval0);
+ get_float_from_s_register(1, &fval1);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg0);
+ fval1 = mozilla::BitwiseCast<float>(arg1);
+ }
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Float32_IntInt: {
+ Prototype_Float32_IntInt target =
+ reinterpret_cast<Prototype_Float32_IntInt>(external);
+ float fresult = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target =
+ reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = get_register(0);
+ double dval0;
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval0);
+ } else {
+ dval0 = get_double_from_register_pair(2);
+ }
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = get_register(0);
+ double dval0;
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval0);
+ } else {
+ dval0 = get_double_from_register_pair(2);
+ }
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval;
+ int32_t result;
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval);
+ result = target(dval, arg0);
+ } else {
+ dval = get_double_from_register_pair(0);
+ result = target(dval, arg2);
+ }
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval;
+ int32_t result;
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval);
+ result = target(dval, arg0, arg1);
+ } else {
+ dval = get_double_from_register_pair(0);
+ result = target(dval, arg2, arg3);
+ }
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval;
+ int32_t result;
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ if (UseHardFpABI()) {
+ get_double_from_d_register(0, &dval);
+ result = target(arg0, dval, arg1, arg2);
+ } else {
+ dval = get_double_from_register_pair(2);
+ result = target(arg0, dval, arg4, arg5);
+ }
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ set_register(r0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer, &dval2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer, &dval2);
+ getFpFromStack(stack_pointer + 2, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResultDouble(dresult);
+ break;
+ }
+
+ case Args_Int32_General: {
+ Prototype_Int32_General target =
+ reinterpret_cast<Prototype_Int32_General>(external);
+ int64_t result = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ Prototype_Int32_GeneralInt32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32>(external);
+ int64_t result = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ Prototype_Int32_GeneralInt32Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ Prototype_Int32_GeneralInt32Int32Int32Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ Prototype_Int32_GeneralInt32Int32Int32Int32Int32 target =
+ reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ Prototype_Int32_GeneralInt32Int32Int32Int32General target =
+ reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General: {
+ Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General target =
+ reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General>(
+ external);
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General: {
+ float fval0, fval1;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(2, &fval0);
+ get_float_from_s_register(3, &fval1);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg2);
+ fval1 = mozilla::BitwiseCast<float>(arg3);
+ }
+ Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ target = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General>(
+ external);
+ int64_t result =
+ target(arg0, arg1, fval0, fval1, arg4, arg5, arg6, arg7);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General: {
+ float fval0, fval1, fval2, fval3;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(2, &fval0);
+ get_float_from_s_register(3, &fval1);
+ get_float_from_s_register(4, &fval2);
+ get_float_from_s_register(5, &fval3);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg2);
+ fval1 = mozilla::BitwiseCast<float>(arg3);
+ fval2 = mozilla::BitwiseCast<float>(arg4);
+ fval3 = mozilla::BitwiseCast<float>(arg5);
+ }
+ Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General
+ target = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General>(
+ external);
+ int64_t result = target(arg0, arg1, fval0, fval1, fval2, fval3, arg6,
+ arg7, arg8, arg9, arg10);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General: {
+ float fval0, fval1, fval2, fval3, fval4;
+ if (UseHardFpABI()) {
+ get_float_from_s_register(2, &fval0);
+ get_float_from_s_register(3, &fval1);
+ get_float_from_s_register(5, &fval2);
+ get_float_from_s_register(6, &fval3);
+ get_float_from_s_register(8, &fval4);
+ } else {
+ fval0 = mozilla::BitwiseCast<float>(arg2);
+ fval1 = mozilla::BitwiseCast<float>(arg3);
+ fval2 = mozilla::BitwiseCast<float>(arg5);
+ fval3 = mozilla::BitwiseCast<float>(arg6);
+ fval4 = mozilla::BitwiseCast<float>(arg8);
+ }
+ Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General
+ target = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General>(
+ external);
+ int64_t result =
+ target(arg0, arg1, fval0, fval1, arg4, fval2, fval3, arg7, fval4,
+ arg9, arg10, arg11, arg12, arg13);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ Prototype_Int32_GeneralInt32Int32Int32General target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ Prototype_Int32_GeneralInt32Int32Int64 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, MakeInt64(arg3, arg4));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ Prototype_Int32_GeneralInt32Int32General target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ Prototype_Int32_GeneralInt32Int64Int64 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ external);
+ int64_t result =
+ target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ Prototype_Int32_GeneralInt32GeneralInt32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ Prototype_Int32_GeneralInt32GeneralInt32Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ Prototype_Int32_GeneralGeneral target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneral>(external);
+ int64_t result = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ Prototype_Int32_GeneralGeneralGeneral target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ Prototype_Int32_GeneralGeneralInt32Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int32Int32Int32: {
+ Prototype_Int32_GeneralInt64Int32Int32Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ external);
+ int64_t result =
+ target(arg0, MakeInt64(arg2, arg3), arg4, arg5, arg6);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int32: {
+ Prototype_Int32_GeneralInt64Int32 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(external);
+ int64_t result = target(arg0, MakeInt64(arg2, arg3), arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int32Int64: {
+ Prototype_Int32_GeneralInt64Int32Int64 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ external);
+ int64_t result =
+ target(arg0, MakeInt64(arg2, arg3), arg4, MakeInt64(arg6, arg7));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int32Int64General: {
+ Prototype_Int32_GeneralInt64Int32Int64General target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ external);
+ int64_t result = target(arg0, MakeInt64(arg2, arg3), arg4,
+ MakeInt64(arg6, arg7), arg8);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int64Int64: {
+ Prototype_Int32_GeneralInt64Int64Int64 target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ external);
+ int64_t result = target(arg0, MakeInt64(arg2, arg3),
+ MakeInt64(arg4, arg5), MakeInt64(arg6, arg7));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int64General: {
+ Prototype_Int32_GeneralInt64Int64General target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ external);
+ int64_t result =
+ target(arg0, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5), arg6);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int32_GeneralInt64Int64Int64General: {
+ Prototype_Int32_GeneralInt64Int64Int64General target =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ external);
+ int64_t result =
+ target(arg0, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5),
+ MakeInt64(arg6, arg7), arg8);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ Prototype_General_GeneralInt32 target =
+ reinterpret_cast<Prototype_General_GeneralInt32>(external);
+ int64_t result = target(arg0, arg1);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ Prototype_General_GeneralInt32Int32 target =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ Prototype_General_GeneralInt32General target =
+ reinterpret_cast<Prototype_General_GeneralInt32General>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ Prototype_General_GeneralInt32Int32GeneralInt32 target =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ Prototype_Int32_GeneralGeneralInt32General target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32 target =
+ reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_General: {
+ Prototype_Int64_General target =
+ reinterpret_cast<Prototype_Int64_General>(external);
+ int64_t result = target(arg0);
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_GeneralInt64: {
+ Prototype_Int64_GeneralInt64 target =
+ reinterpret_cast<Prototype_Int64_GeneralInt64>(external);
+ int64_t result = target(arg0, MakeInt64(arg2, arg3));
+ scratchVolatileRegisters(/* scratchFloat = true */);
+ setCallResult(result);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("call");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ set_register(lr, saved_lr);
+ set_pc(get_register(lr));
+ break;
+ }
+ case kBreakpoint: {
+ ArmDebugger dbg(this);
+ dbg.debug();
+ break;
+ }
+ default: { // Stop uses all codes greater than 1 << 23.
+ if (svc >= (1 << 23)) {
+ uint32_t code = svc & kStopCodeMask;
+ if (isWatchedStop(code)) {
+ increaseStopCounter(code);
+ }
+
+ // Stop if it is enabled, otherwise go on jumping over the stop and
+ // the message address.
+ if (isEnabledStop(code)) {
+ ArmDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // This is not a valid svc code.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ }
+}
+
+void Simulator::canonicalizeNaN(double* value) {
+ if (!wasm::CodeExists && !wasm::LookupCodeSegment(get_pc_as<void*>()) &&
+ FPSCR_default_NaN_mode_) {
+ *value = JS::CanonicalizeNaN(*value);
+ }
+}
+
+void Simulator::canonicalizeNaN(float* value) {
+ if (!wasm::CodeExists && !wasm::LookupCodeSegment(get_pc_as<void*>()) &&
+ FPSCR_default_NaN_mode_) {
+ *value = JS::CanonicalizeNaN(*value);
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ return (instr->bits(27, 24) == 0xF) && (instr->svcValue() >= kStopCode);
+}
+
+bool Simulator::isWatchedStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ return code < kNumOfWatchedStops;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ // Unwatched stops are always enabled.
+ return !isWatchedStop(code) ||
+ !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ MOZ_ASSERT(isWatchedStop(code));
+ if (!isEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ MOZ_ASSERT(isWatchedStop(code));
+ if (isEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(isWatchedStop(code));
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ enableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if (!isWatchedStop(code)) {
+ printf("Stop not watched.");
+ } else {
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+ state, count, watched_stops_[code].desc);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+ }
+}
+
+// Instruction types 0 and 1 are both rolled into one function because they only
+// differ in the handling of the shifter_operand.
+void Simulator::decodeType01(SimInstruction* instr) {
+ int type = instr->typeValue();
+ if (type == 0 && instr->isSpecialType0()) {
+ // Multiply instruction or extra loads and stores.
+ if (instr->bits(7, 4) == 9) {
+ if (instr->bit(24) == 0) {
+ // Raw field decoding here. Multiply instructions have their Rd
+ // in funny places.
+ int rn = instr->rnValue();
+ int rm = instr->rmValue();
+ int rs = instr->rsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t rm_val = get_register(rm);
+ if (instr->bit(23) == 0) {
+ if (instr->bit(21) == 0) {
+ // The MUL instruction description (A 4.1.33) refers to
+ // Rd as being the destination for the operation, but it
+ // confusingly uses the Rn field to encode it.
+ int rd = rn; // Remap the rn field to the Rd register.
+ int32_t alu_out = rm_val * rs_val;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ }
+ } else {
+ int rd = instr->rdValue();
+ int32_t acc_value = get_register(rd);
+ if (instr->bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers
+ // to the order of registers as "Rd, Rm, Rs,
+ // Rn". But confusingly it uses the Rn field to
+ // encode the Rd register and the Rd field to encode
+ // the Rn register.
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value + mul_out;
+ set_register(rn, result);
+ } else {
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value - mul_out;
+ set_register(rn, result);
+ }
+ }
+ } else {
+ // The signed/long multiply instructions use the terms RdHi
+ // and RdLo when referring to the target registers. They are
+ // mapped to the Rn and Rd fields as follows:
+ // RdLo == Rd
+ // RdHi == Rn (This is confusingly stored in variable rd here
+ // because the mul instruction from above uses the
+ // Rn field to encode the Rd register. Good luck figuring
+ // this out without reading the ARM instruction manual
+ // at a very detailed level.)
+ int rd_hi = rn; // Remap the rn field to the RdHi register.
+ int rd_lo = instr->rdValue();
+ int32_t hi_res = 0;
+ int32_t lo_res = 0;
+ if (instr->bit(22) == 1) {
+ int64_t left_op = static_cast<int32_t>(rm_val);
+ int64_t right_op = static_cast<int32_t>(rs_val);
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ } else {
+ // Unsigned multiply.
+ uint64_t left_op = static_cast<uint32_t>(rm_val);
+ uint64_t right_op = static_cast<uint32_t>(rs_val);
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ }
+ set_register(rd_lo, lo_res);
+ set_register(rd_hi, hi_res);
+ if (instr->hasS()) {
+ MOZ_CRASH();
+ }
+ }
+ } else {
+ if (instr->bits(excl::ExclusiveOpHi, excl::ExclusiveOpLo) ==
+ excl::ExclusiveOpcode) {
+ // Load-exclusive / store-exclusive.
+ if (instr->bit(excl::ExclusiveLoad)) {
+ int rn = instr->rnValue();
+ int rt = instr->rtValue();
+ int32_t address = get_register(rn);
+ switch (instr->bits(excl::ExclusiveSizeHi, excl::ExclusiveSizeLo)) {
+ case excl::ExclusiveWord:
+ set_register(rt, readExW(address, instr));
+ break;
+ case excl::ExclusiveDouble: {
+ MOZ_ASSERT((rt % 2) == 0);
+ int32_t hibits;
+ int32_t lobits = readExDW(address, &hibits);
+ set_register(rt, lobits);
+ set_register(rt + 1, hibits);
+ break;
+ }
+ case excl::ExclusiveByte:
+ set_register(rt, readExBU(address));
+ break;
+ case excl::ExclusiveHalf:
+ set_register(rt, readExHU(address, instr));
+ break;
+ }
+ } else {
+ int rn = instr->rnValue();
+ int rd = instr->rdValue();
+ int rt = instr->bits(3, 0);
+ int32_t address = get_register(rn);
+ int32_t value = get_register(rt);
+ int32_t result = 0;
+ switch (instr->bits(excl::ExclusiveSizeHi, excl::ExclusiveSizeLo)) {
+ case excl::ExclusiveWord:
+ result = writeExW(address, value, instr);
+ break;
+ case excl::ExclusiveDouble: {
+ MOZ_ASSERT((rt % 2) == 0);
+ int32_t value2 = get_register(rt + 1);
+ result = writeExDW(address, value, value2);
+ break;
+ }
+ case excl::ExclusiveByte:
+ result = writeExB(address, (uint8_t)value);
+ break;
+ case excl::ExclusiveHalf:
+ result = writeExH(address, (uint16_t)value, instr);
+ break;
+ }
+ set_register(rd, result);
+ }
+ } else {
+ MOZ_CRASH(); // Not used atm
+ }
+ }
+ } else {
+ // Extra load/store instructions.
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t addr = 0;
+ if (instr->bit(22) == 0) {
+ int rm = instr->rmValue();
+ int32_t rm_val = get_register(rm);
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= rm_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += rm_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= rm_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ case ib_x:
+ rn_val += rm_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ default:
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ } else {
+ int32_t imm_val = (instr->immedHValue() << 4) | instr->immedLValue();
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= imm_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += imm_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= imm_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ case ib_x:
+ rn_val += imm_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ default:
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ if ((instr->bits(7, 4) & 0xd) == 0xd && instr->bit(20) == 0) {
+ MOZ_ASSERT((rd % 2) == 0);
+ if (instr->hasH()) {
+ // The strd instruction.
+ int32_t value1 = get_register(rd);
+ int32_t value2 = get_register(rd + 1);
+ writeDW(addr, value1, value2);
+ } else {
+ // The ldrd instruction.
+ int* rn_data = readDW(addr);
+ if (rn_data) {
+ set_dw_register(rd, rn_data);
+ }
+ }
+ } else if (instr->hasH()) {
+ if (instr->hasSign()) {
+ if (instr->hasL()) {
+ int16_t val = readH(addr, instr);
+ set_register(rd, val);
+ } else {
+ int16_t val = get_register(rd);
+ writeH(addr, val, instr);
+ }
+ } else {
+ if (instr->hasL()) {
+ uint16_t val = readHU(addr, instr);
+ set_register(rd, val);
+ } else {
+ uint16_t val = get_register(rd);
+ writeH(addr, val, instr);
+ }
+ }
+ } else {
+ // Signed byte loads.
+ MOZ_ASSERT(instr->hasSign());
+ MOZ_ASSERT(instr->hasL());
+ int8_t val = readB(addr);
+ set_register(rd, val);
+ }
+ return;
+ }
+ } else if ((type == 0) && instr->isMiscType0()) {
+ if (instr->bits(7, 4) == 0) {
+ if (instr->bit(21) == 0) {
+ // mrs
+ int rd = instr->rdValue();
+ uint32_t flags;
+ if (instr->bit(22) == 0) {
+ // CPSR. Note: The Q flag is not yet implemented!
+ flags = (n_flag_ << 31) | (z_flag_ << 30) | (c_flag_ << 29) |
+ (v_flag_ << 28);
+ } else {
+ // SPSR
+ MOZ_CRASH();
+ }
+ set_register(rd, flags);
+ } else {
+ // msr
+ if (instr->bits(27, 23) == 2) {
+ // Register operand. For now we only emit mask 0b1100.
+ int rm = instr->rmValue();
+ mozilla::DebugOnly<uint32_t> mask = instr->bits(19, 16);
+ MOZ_ASSERT(mask == (3 << 2));
+
+ uint32_t flags = get_register(rm);
+ n_flag_ = (flags >> 31) & 1;
+ z_flag_ = (flags >> 30) & 1;
+ c_flag_ = (flags >> 29) & 1;
+ v_flag_ = (flags >> 28) & 1;
+ } else {
+ MOZ_CRASH();
+ }
+ }
+ } else if (instr->bits(22, 21) == 1) {
+ int rm = instr->rmValue();
+ switch (instr->bits(7, 4)) {
+ case 1: // BX
+ set_pc(get_register(rm));
+ break;
+ case 3: { // BLX
+ uint32_t old_pc = get_pc();
+ set_pc(get_register(rm));
+ set_register(lr, old_pc + SimInstruction::kInstrSize);
+ break;
+ }
+ case 7: { // BKPT
+ fprintf(stderr, "Simulator hit BKPT.\n");
+ if (getenv("ARM_SIM_DEBUGGER")) {
+ ArmDebugger dbg(this);
+ dbg.debug();
+ } else {
+ fprintf(stderr,
+ "Use ARM_SIM_DEBUGGER=1 to enter the builtin debugger.\n");
+ MOZ_CRASH("ARM simulator breakpoint");
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->bits(22, 21) == 3) {
+ int rm = instr->rmValue();
+ int rd = instr->rdValue();
+ switch (instr->bits(7, 4)) {
+ case 1: { // CLZ
+ uint32_t bits = get_register(rm);
+ int leading_zeros = 0;
+ if (bits == 0) {
+ leading_zeros = 32;
+ } else {
+ leading_zeros = mozilla::CountLeadingZeroes32(bits);
+ }
+ set_register(rd, leading_zeros);
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ } else {
+ printf("%08x\n", instr->instructionBits());
+ MOZ_CRASH();
+ }
+ } else if ((type == 1) && instr->isNopType1()) {
+ // NOP.
+ } else if ((type == 1) && instr->isCsdbType1()) {
+ // Speculation barrier. (No-op for the simulator)
+ } else {
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t shifter_operand = 0;
+ bool shifter_carry_out = 0;
+ if (type == 0) {
+ shifter_operand = getShiftRm(instr, &shifter_carry_out);
+ } else {
+ MOZ_ASSERT(instr->typeValue() == 1);
+ shifter_operand = getImm(instr, &shifter_carry_out);
+ }
+ int32_t alu_out;
+ switch (instr->opcodeField()) {
+ case OpAnd:
+ alu_out = rn_val & shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpEor:
+ alu_out = rn_val ^ shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpSub:
+ alu_out = rn_val - shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
+ break;
+ case OpRsb:
+ alu_out = shifter_operand - rn_val;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(shifter_operand, rn_val));
+ setVFlag(overflowFrom(alu_out, shifter_operand, rn_val, false));
+ }
+ break;
+ case OpAdd:
+ alu_out = rn_val + shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ case OpAdc:
+ alu_out = rn_val + shifter_operand + getCarry();
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand, getCarry()));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ case OpSbc:
+ alu_out = rn_val - shifter_operand - (getCarry() == 0 ? 1 : 0);
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ MOZ_CRASH();
+ }
+ break;
+ case OpRsc:
+ alu_out = shifter_operand - rn_val - (getCarry() == 0 ? 1 : 0);
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ MOZ_CRASH();
+ }
+ break;
+ case OpTst:
+ if (instr->hasS()) {
+ alu_out = rn_val & shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ } else {
+ alu_out = instr->immedMovwMovtValue();
+ set_register(rd, alu_out);
+ }
+ break;
+ case OpTeq:
+ if (instr->hasS()) {
+ alu_out = rn_val ^ shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ case OpCmp:
+ if (instr->hasS()) {
+ alu_out = rn_val - shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(!borrowFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false));
+ } else {
+ alu_out =
+ (get_register(rd) & 0xffff) | (instr->immedMovwMovtValue() << 16);
+ set_register(rd, alu_out);
+ }
+ break;
+ case OpCmn:
+ if (instr->hasS()) {
+ alu_out = rn_val + shifter_operand;
+ setNZFlags(alu_out);
+ setCFlag(carryFrom(rn_val, shifter_operand));
+ setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ case OpOrr:
+ alu_out = rn_val | shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpMov:
+ alu_out = shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpBic:
+ alu_out = rn_val & ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ case OpMvn:
+ alu_out = ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->hasS()) {
+ setNZFlags(alu_out);
+ setCFlag(shifter_carry_out);
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+void Simulator::decodeType2(SimInstruction* instr) {
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ int32_t im_val = instr->offset12Value();
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val -= im_val;
+ set_register(rn, rn_val);
+ break;
+ case ia_x:
+ MOZ_ASSERT(!instr->hasW());
+ addr = rn_val;
+ rn_val += im_val;
+ set_register(rn, rn_val);
+ break;
+ case db_x:
+ rn_val -= im_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ case ib_x:
+ rn_val += im_val;
+ addr = rn_val;
+ if (instr->hasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ if (instr->hasB()) {
+ if (instr->hasL()) {
+ uint8_t val = readBU(addr);
+ set_register(rd, val);
+ } else {
+ uint8_t val = get_register(rd);
+ writeB(addr, val);
+ }
+ } else {
+ if (instr->hasL()) {
+ set_register(rd, readW(addr, instr, AllowUnaligned));
+ } else {
+ writeW(addr, get_register(rd), instr, AllowUnaligned);
+ }
+ }
+}
+
+static uint32_t rotateBytes(uint32_t val, int32_t rotate) {
+ switch (rotate) {
+ default:
+ return val;
+ case 1:
+ return (val >> 8) | (val << 24);
+ case 2:
+ return (val >> 16) | (val << 16);
+ case 3:
+ return (val >> 24) | (val << 8);
+ }
+}
+
+void Simulator::decodeType3(SimInstruction* instr) {
+ if (MOZ_UNLIKELY(instr->isUDF())) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc((int32_t)newPC);
+ return;
+ }
+ MOZ_CRASH("illegal instruction encountered");
+ }
+
+ int rd = instr->rdValue();
+ int rn = instr->rnValue();
+ int32_t rn_val = get_register(rn);
+ bool shifter_carry_out = 0;
+ int32_t shifter_operand = getShiftRm(instr, &shifter_carry_out);
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case da_x:
+ MOZ_ASSERT(!instr->hasW());
+ MOZ_CRASH();
+ break;
+ case ia_x: {
+ if (instr->bit(4) == 0) {
+ // Memop.
+ } else {
+ if (instr->bit(5) == 0) {
+ switch (instr->bits(22, 21)) {
+ case 0:
+ if (instr->bit(20) == 0) {
+ if (instr->bit(6) == 0) {
+ // Pkhbt.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->rmValue());
+ int32_t shift = instr->bits(11, 7);
+ rm_val <<= shift;
+ set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+ } else {
+ // Pkhtb.
+ uint32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->rmValue());
+ int32_t shift = instr->bits(11, 7);
+ if (shift == 0) {
+ shift = 32;
+ }
+ rm_val >>= shift;
+ set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 1:
+ MOZ_CRASH();
+ break;
+ case 2:
+ MOZ_CRASH();
+ break;
+ case 3: {
+ // Usat.
+ int32_t sat_pos = instr->bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->bits(11, 7);
+ int32_t shift_type = instr->bit(6);
+ int32_t rm_val = get_register(instr->rmValue());
+ if (shift_type == 0) { // LSL
+ rm_val <<= shift;
+ } else { // ASR
+ rm_val >>= shift;
+ }
+
+ // If saturation occurs, the Q flag should be set in the
+ // CPSR. There is no Q flag yet, and no instruction (MRS)
+ // to read the CPSR directly.
+ if (rm_val > sat_val) {
+ rm_val = sat_val;
+ } else if (rm_val < 0) {
+ rm_val = 0;
+ }
+ set_register(rd, rm_val);
+ break;
+ }
+ }
+ } else {
+ switch (instr->bits(22, 21)) {
+ case 0:
+ MOZ_CRASH();
+ break;
+ case 1:
+ if (instr->bits(7, 4) == 7 && instr->bits(19, 16) == 15) {
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ if (instr->bit(20)) {
+ // Sxth.
+ set_register(rd, (int32_t)(int16_t)(rm_val & 0xFFFF));
+ } else {
+ // Sxtb.
+ set_register(rd, (int32_t)(int8_t)(rm_val & 0xFF));
+ }
+ } else if (instr->bits(20, 16) == 0b1'1111 &&
+ instr->bits(11, 4) == 0b1111'0011) {
+ // Rev
+ uint32_t rm_val = get_register(instr->rmValue());
+
+ static_assert(MOZ_LITTLE_ENDIAN());
+ set_register(rd,
+ mozilla::NativeEndian::swapToBigEndian(rm_val));
+ } else if (instr->bits(20, 16) == 0b1'1111 &&
+ instr->bits(11, 4) == 0b1111'1011) {
+ // Rev16
+ uint32_t rm_val = get_register(instr->rmValue());
+
+ static_assert(MOZ_LITTLE_ENDIAN());
+ uint32_t hi = mozilla::NativeEndian::swapToBigEndian(
+ uint16_t(rm_val >> 16));
+ uint32_t lo =
+ mozilla::NativeEndian::swapToBigEndian(uint16_t(rm_val));
+ set_register(rd, (hi << 16) | lo);
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ if ((instr->bit(20) == 0) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxtb16.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 3:
+ if ((instr->bit(20) == 0) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, rn_val + (rm_val & 0xFF));
+ }
+ } else if ((instr->bit(20) == 1) && (instr->bits(9, 6) == 1)) {
+ if (instr->bits(19, 16) == 0xF) {
+ // Uxth.
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, (rm_val & 0xFFFF));
+ } else {
+ // Uxtah.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = rotateBytes(get_register(instr->rmValue()),
+ instr->bits(11, 10));
+ set_register(rd, rn_val + (rm_val & 0xFFFF));
+ }
+ } else if (instr->bits(20, 16) == 0b1'1111 &&
+ instr->bits(11, 4) == 0b1111'1011) {
+ // Revsh
+ uint32_t rm_val = get_register(instr->rmValue());
+
+ static_assert(MOZ_LITTLE_ENDIAN());
+ set_register(
+ rd, int32_t(int16_t(mozilla::NativeEndian::swapToBigEndian(
+ uint16_t(rm_val)))));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ }
+ return;
+ }
+ break;
+ }
+ case db_x: { // sudiv
+ if (instr->bit(22) == 0x0 && instr->bit(20) == 0x1 &&
+ instr->bits(15, 12) == 0x0f && instr->bits(7, 4) == 0x1) {
+ if (!instr->hasW()) {
+ // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs.
+ int rm = instr->rmValue();
+ int32_t rm_val = get_register(rm);
+ int rs = instr->rsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t ret_val = 0;
+ MOZ_ASSERT(rs_val != 0);
+ if ((rm_val == INT32_MIN) && (rs_val == -1)) {
+ ret_val = INT32_MIN;
+ } else {
+ ret_val = rm_val / rs_val;
+ }
+ set_register(rn, ret_val);
+ return;
+ } else {
+ // udiv (in V8 notation matching ARM ISA format) rn = rm/rs.
+ int rm = instr->rmValue();
+ uint32_t rm_val = get_register(rm);
+ int rs = instr->rsValue();
+ uint32_t rs_val = get_register(rs);
+ uint32_t ret_val = 0;
+ MOZ_ASSERT(rs_val != 0);
+ ret_val = rm_val / rs_val;
+ set_register(rn, ret_val);
+ return;
+ }
+ }
+
+ addr = rn_val - shifter_operand;
+ if (instr->hasW()) {
+ set_register(rn, addr);
+ }
+ break;
+ }
+ case ib_x: {
+ if (instr->hasW() && (instr->bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->bits(11, 7));
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ if (instr->bit(22)) {
+ // ubfx - unsigned bitfield extract.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->rmValue()));
+ uint32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->rdValue(), extr_val);
+ } else {
+ // sbfx - signed bitfield extract.
+ int32_t rm_val = get_register(instr->rmValue());
+ int32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->rdValue(), extr_val);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ return;
+ } else if (!instr->hasW() && (instr->bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->bits(20, 16));
+ if (msbit >= lsbit) {
+ // bfc or bfi - bitfield clear/insert.
+ uint32_t rd_val =
+ static_cast<uint32_t>(get_register(instr->rdValue()));
+ uint32_t bitcount = msbit - lsbit + 1;
+ uint32_t mask = (1 << bitcount) - 1;
+ rd_val &= ~(mask << lsbit);
+ if (instr->rmValue() != 15) {
+ // bfi - bitfield insert.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->rmValue()));
+ rm_val &= mask;
+ rd_val |= rm_val << lsbit;
+ }
+ set_register(instr->rdValue(), rd_val);
+ } else {
+ MOZ_CRASH();
+ }
+ return;
+ } else {
+ addr = rn_val + shifter_operand;
+ if (instr->hasW()) {
+ set_register(rn, addr);
+ }
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ if (instr->hasB()) {
+ if (instr->hasL()) {
+ uint8_t byte = readB(addr);
+ set_register(rd, byte);
+ } else {
+ uint8_t byte = get_register(rd);
+ writeB(addr, byte);
+ }
+ } else {
+ if (instr->hasL()) {
+ set_register(rd, readW(addr, instr, AllowUnaligned));
+ } else {
+ writeW(addr, get_register(rd), instr, AllowUnaligned);
+ }
+ }
+}
+
+void Simulator::decodeType4(SimInstruction* instr) {
+ // Only allowed to be set in privileged mode.
+ MOZ_ASSERT(instr->bit(22) == 0);
+ bool load = instr->hasL();
+ handleRList(instr, load);
+}
+
+void Simulator::decodeType5(SimInstruction* instr) {
+ int off = instr->sImmed24Value() << 2;
+ intptr_t pc_address = get_pc();
+ if (instr->hasLink()) {
+ set_register(lr, pc_address + SimInstruction::kInstrSize);
+ }
+ int pc_reg = get_register(pc);
+ set_pc(pc_reg + off);
+}
+
+void Simulator::decodeType6(SimInstruction* instr) {
+ decodeType6CoprocessorIns(instr);
+}
+
+void Simulator::decodeType7(SimInstruction* instr) {
+ if (instr->bit(24) == 1) {
+ softwareInterrupt(instr);
+ } else if (instr->bit(4) == 1 && instr->bits(11, 9) != 5) {
+ decodeType7CoprocessorIns(instr);
+ } else {
+ decodeTypeVFP(instr);
+ }
+}
+
+void Simulator::decodeType7CoprocessorIns(SimInstruction* instr) {
+ if (instr->bit(20) == 0) {
+ // MCR, MCR2
+ if (instr->coprocessorValue() == 15) {
+ int opc1 = instr->bits(23, 21);
+ int opc2 = instr->bits(7, 5);
+ int CRn = instr->bits(19, 16);
+ int CRm = instr->bits(3, 0);
+ if (opc1 == 0 && opc2 == 4 && CRn == 7 && CRm == 10) {
+ // ARMv6 DSB instruction. We do not use DSB.
+ MOZ_CRASH("DSB not implemented");
+ } else if (opc1 == 0 && opc2 == 5 && CRn == 7 && CRm == 10) {
+ // ARMv6 DMB instruction.
+ AtomicOperations::fenceSeqCst();
+ } else if (opc1 == 0 && opc2 == 4 && CRn == 7 && CRm == 5) {
+ // ARMv6 ISB instruction. We do not use ISB.
+ MOZ_CRASH("ISB not implemented");
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ // MRC, MRC2
+ MOZ_CRASH();
+ }
+}
+
+void Simulator::decodeTypeVFP(SimInstruction* instr) {
+ MOZ_ASSERT(instr->typeValue() == 7 && instr->bit(24) == 0);
+ MOZ_ASSERT(instr->bits(11, 9) == 0x5);
+
+ // Obtain double precision register codes.
+ VFPRegPrecision precision =
+ (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+ int vm = instr->VFPMRegValue(precision);
+ int vd = instr->VFPDRegValue(precision);
+ int vn = instr->VFPNRegValue(precision);
+
+ if (instr->bit(4) == 0) {
+ if (instr->opc1Value() == 0x7) {
+ // Other data processing instructions.
+ if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x1)) {
+ // vmov register to register.
+ if (instr->szValue() == 0x1) {
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
+ double temp;
+ get_double_from_d_register(m, &temp);
+ set_d_register_from_double(d, temp);
+ } else {
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float temp;
+ get_float_from_s_register(m, &temp);
+ set_s_register_from_float(d, temp);
+ }
+ } else if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x3)) {
+ // vabs
+ if (instr->szValue() == 0x1) {
+ union {
+ double f64;
+ uint64_t u64;
+ } u;
+ get_double_from_d_register(vm, &u.f64);
+ u.u64 &= 0x7fffffffffffffffu;
+ double dd_value = u.f64;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ union {
+ float f32;
+ uint32_t u32;
+ } u;
+ get_float_from_s_register(vm, &u.f32);
+ u.u32 &= 0x7fffffffu;
+ float fd_value = u.f32;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if ((instr->opc2Value() == 0x1) && (instr->opc3Value() == 0x1)) {
+ // vneg
+ if (instr->szValue() == 0x1) {
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = -dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = -fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if ((instr->opc2Value() == 0x7) && (instr->opc3Value() == 0x3)) {
+ decodeVCVTBetweenDoubleAndSingle(instr);
+ } else if ((instr->opc2Value() == 0x8) && (instr->opc3Value() & 0x1)) {
+ decodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->opc2Value() == 0xA) && (instr->opc3Value() == 0x3) &&
+ (instr->bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>.
+ int fraction_bits = 32 - ((instr->bits(3, 0) << 1) | instr->bit(5));
+ int fixed_value = get_sinteger_from_s_register(vd * 2);
+ double divide = 1 << fraction_bits;
+ set_d_register_from_double(vd, fixed_value / divide);
+ } else if (((instr->opc2Value() >> 1) == 0x6) &&
+ (instr->opc3Value() & 0x1)) {
+ decodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if (((instr->opc2Value() == 0x4) || (instr->opc2Value() == 0x5)) &&
+ (instr->opc3Value() & 0x1)) {
+ decodeVCMP(instr);
+ } else if (((instr->opc2Value() == 0x1)) && (instr->opc3Value() == 0x3)) {
+ // vsqrt
+ if (instr->szValue() == 0x1) {
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = std::sqrt(dm_value);
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = std::sqrt(fm_value);
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else if (instr->opc3Value() == 0x0) {
+ // vmov immediate.
+ if (instr->szValue() == 0x1) {
+ set_d_register_from_double(vd, instr->doubleImmedVmov());
+ } else {
+ // vmov.f32 immediate.
+ set_s_register_from_float(vd, instr->float32ImmedVmov());
+ }
+ } else {
+ decodeVCVTBetweenFloatingPointAndIntegerFrac(instr);
+ }
+ } else if (instr->opc1Value() == 0x3) {
+ if (instr->szValue() != 0x1) {
+ if (instr->opc3Value() & 0x1) {
+ // vsub
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value - fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ // vadd
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value + fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ }
+ } else {
+ if (instr->opc3Value() & 0x1) {
+ // vsub
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value - dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ // vadd
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value + dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ }
+ } else if ((instr->opc1Value() == 0x2) && !(instr->opc3Value() & 0x1)) {
+ // vmul
+ if (instr->szValue() != 0x1) {
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value * fm_value;
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value * dm_value;
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ } else if ((instr->opc1Value() == 0x0)) {
+ // vmla, vmls
+ const bool is_vmls = (instr->opc3Value() & 0x1);
+
+ if (instr->szValue() != 0x1) {
+ MOZ_CRASH("Not used by V8.");
+ }
+
+ double dd_val;
+ get_double_from_d_register(vd, &dd_val);
+ double dn_val;
+ get_double_from_d_register(vn, &dn_val);
+ double dm_val;
+ get_double_from_d_register(vm, &dm_val);
+
+ // Note: we do the mul and add/sub in separate steps to avoid
+ // getting a result with too high precision.
+ set_d_register_from_double(vd, dn_val * dm_val);
+ double temp;
+ get_double_from_d_register(vd, &temp);
+ if (is_vmls) {
+ temp = dd_val - temp;
+ } else {
+ temp = dd_val + temp;
+ }
+ canonicalizeNaN(&temp);
+ set_d_register_from_double(vd, temp);
+ } else if ((instr->opc1Value() == 0x4) && !(instr->opc3Value() & 0x1)) {
+ // vdiv
+ if (instr->szValue() != 0x1) {
+ float fn_value;
+ get_float_from_s_register(vn, &fn_value);
+ float fm_value;
+ get_float_from_s_register(vm, &fm_value);
+ float fd_value = fn_value / fm_value;
+ div_zero_vfp_flag_ = (fm_value == 0);
+ canonicalizeNaN(&fd_value);
+ set_s_register_from_float(vd, fd_value);
+ } else {
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ double dm_value;
+ get_double_from_d_register(vm, &dm_value);
+ double dd_value = dn_value / dm_value;
+ div_zero_vfp_flag_ = (dm_value == 0);
+ canonicalizeNaN(&dd_value);
+ set_d_register_from_double(vd, dd_value);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ if (instr->VCValue() == 0x0 && instr->VAValue() == 0x0) {
+ decodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1) &&
+ (instr->bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar).
+ int vd = instr->bits(19, 16) | (instr->bit(7) << 4);
+ double dd_value;
+ get_double_from_d_register(vd, &dd_value);
+ int32_t data[2];
+ memcpy(data, &dd_value, 8);
+ data[instr->bit(21)] = get_register(instr->rtValue());
+ memcpy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1) &&
+ (instr->bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register).
+ int vn = instr->bits(19, 16) | (instr->bit(7) << 4);
+ double dn_value;
+ get_double_from_d_register(vn, &dn_value);
+ int32_t data[2];
+ memcpy(data, &dn_value, 8);
+ set_register(instr->rtValue(), data[instr->bit(21)]);
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) && (instr->bits(19, 16) == 0x1)) {
+ // vmrs
+ uint32_t rt = instr->rtValue();
+ if (rt == 0xF) {
+ copy_FPSCR_to_APSR();
+ } else {
+ // Emulate FPSCR from the Simulator flags.
+ uint32_t fpscr = (n_flag_FPSCR_ << 31) | (z_flag_FPSCR_ << 30) |
+ (c_flag_FPSCR_ << 29) | (v_flag_FPSCR_ << 28) |
+ (FPSCR_default_NaN_mode_ << 25) |
+ (inexact_vfp_flag_ << 4) | (underflow_vfp_flag_ << 3) |
+ (overflow_vfp_flag_ << 2) | (div_zero_vfp_flag_ << 1) |
+ (inv_op_vfp_flag_ << 0) | (FPSCR_rounding_mode_);
+ set_register(rt, fpscr);
+ }
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) && (instr->bits(19, 16) == 0x1)) {
+ // vmsr
+ uint32_t rt = instr->rtValue();
+ if (rt == pc) {
+ MOZ_CRASH();
+ } else {
+ uint32_t rt_value = get_register(rt);
+ n_flag_FPSCR_ = (rt_value >> 31) & 1;
+ z_flag_FPSCR_ = (rt_value >> 30) & 1;
+ c_flag_FPSCR_ = (rt_value >> 29) & 1;
+ v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
+ inexact_vfp_flag_ = (rt_value >> 4) & 1;
+ underflow_vfp_flag_ = (rt_value >> 3) & 1;
+ overflow_vfp_flag_ = (rt_value >> 2) & 1;
+ div_zero_vfp_flag_ = (rt_value >> 1) & 1;
+ inv_op_vfp_flag_ = (rt_value >> 0) & 1;
+ FPSCR_rounding_mode_ =
+ static_cast<VFPRoundingMode>((rt_value)&kVFPRoundingModeMask);
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ }
+}
+
+void Simulator::decodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ SimInstruction* instr) {
+ MOZ_ASSERT(instr->bit(4) == 1 && instr->VCValue() == 0x0 &&
+ instr->VAValue() == 0x0);
+
+ int t = instr->rtValue();
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ bool to_arm_register = (instr->VLValue() == 0x1);
+ if (to_arm_register) {
+ int32_t int_value = get_sinteger_from_s_register(n);
+ set_register(t, int_value);
+ } else {
+ int32_t rs_val = get_register(t);
+ set_s_register_from_sinteger(n, rs_val);
+ }
+}
+
+void Simulator::decodeVCMP(SimInstruction* instr) {
+ MOZ_ASSERT((instr->bit(4) == 0) && (instr->opc1Value() == 0x7));
+ MOZ_ASSERT(((instr->opc2Value() == 0x4) || (instr->opc2Value() == 0x5)) &&
+ (instr->opc3Value() & 0x1));
+ // Comparison.
+
+ VFPRegPrecision precision = kSinglePrecision;
+ if (instr->szValue() == 1) {
+ precision = kDoublePrecision;
+ }
+
+ int d = instr->VFPDRegValue(precision);
+ int m = 0;
+ if (instr->opc2Value() == 0x4) {
+ m = instr->VFPMRegValue(precision);
+ }
+
+ if (precision == kDoublePrecision) {
+ double dd_value;
+ get_double_from_d_register(d, &dd_value);
+ double dm_value = 0.0;
+ if (instr->opc2Value() == 0x4) {
+ get_double_from_d_register(m, &dm_value);
+ }
+
+ // Raise exceptions for quiet NaNs if necessary.
+ if (instr->bit(7) == 1) {
+ if (std::isnan(dd_value)) {
+ inv_op_vfp_flag_ = true;
+ }
+ }
+ compute_FPSCR_Flags(dd_value, dm_value);
+ } else {
+ float fd_value;
+ get_float_from_s_register(d, &fd_value);
+ float fm_value = 0.0;
+ if (instr->opc2Value() == 0x4) {
+ get_float_from_s_register(m, &fm_value);
+ }
+
+ // Raise exceptions for quiet NaNs if necessary.
+ if (instr->bit(7) == 1) {
+ if (std::isnan(fd_value)) {
+ inv_op_vfp_flag_ = true;
+ }
+ }
+ compute_FPSCR_Flags(fd_value, fm_value);
+ }
+}
+
+void Simulator::decodeVCVTBetweenDoubleAndSingle(SimInstruction* instr) {
+ MOZ_ASSERT(instr->bit(4) == 0 && instr->opc1Value() == 0x7);
+ MOZ_ASSERT(instr->opc2Value() == 0x7 && instr->opc3Value() == 0x3);
+
+ VFPRegPrecision dst_precision = kDoublePrecision;
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->szValue() == 1) {
+ dst_precision = kSinglePrecision;
+ src_precision = kDoublePrecision;
+ }
+
+ int dst = instr->VFPDRegValue(dst_precision);
+ int src = instr->VFPMRegValue(src_precision);
+
+ if (dst_precision == kSinglePrecision) {
+ double val;
+ get_double_from_d_register(src, &val);
+ set_s_register_from_float(dst, static_cast<float>(val));
+ } else {
+ float val;
+ get_float_from_s_register(src, &val);
+ set_d_register_from_double(dst, static_cast<double>(val));
+ }
+}
+
+static bool get_inv_op_vfp_flag(VFPRoundingMode mode, double val,
+ bool unsigned_) {
+ MOZ_ASSERT(mode == SimRN || mode == SimRM || mode == SimRZ);
+ double max_uint = static_cast<double>(0xffffffffu);
+ double max_int = static_cast<double>(INT32_MAX);
+ double min_int = static_cast<double>(INT32_MIN);
+
+ // Check for NaN.
+ if (val != val) {
+ return true;
+ }
+
+ // Check for overflow. This code works because 32bit integers can be exactly
+ // represented by ieee-754 64bit floating-point values.
+ switch (mode) {
+ case SimRN:
+ return unsigned_ ? (val >= (max_uint + 0.5)) || (val < -0.5)
+ : (val >= (max_int + 0.5)) || (val < (min_int - 0.5));
+ case SimRM:
+ return unsigned_ ? (val >= (max_uint + 1.0)) || (val < 0)
+ : (val >= (max_int + 1.0)) || (val < min_int);
+ case SimRZ:
+ return unsigned_ ? (val >= (max_uint + 1.0)) || (val <= -1)
+ : (val >= (max_int + 1.0)) || (val <= (min_int - 1.0));
+ default:
+ MOZ_CRASH();
+ return true;
+ }
+}
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+static int VFPConversionSaturate(double val, bool unsigned_res) {
+ if (val != val) { // NaN.
+ return 0;
+ }
+ if (unsigned_res) {
+ return (val < 0) ? 0 : 0xffffffffu;
+ }
+ return (val < 0) ? INT32_MIN : INT32_MAX;
+}
+
+void Simulator::decodeVCVTBetweenFloatingPointAndInteger(
+ SimInstruction* instr) {
+ MOZ_ASSERT((instr->bit(4) == 0) && (instr->opc1Value() == 0x7) &&
+ (instr->bits(27, 23) == 0x1D));
+ MOZ_ASSERT(
+ ((instr->opc2Value() == 0x8) && (instr->opc3Value() & 0x1)) ||
+ (((instr->opc2Value() >> 1) == 0x6) && (instr->opc3Value() & 0x1)));
+
+ // Conversion between floating-point and integer.
+ bool to_integer = (instr->bit(18) == 1);
+
+ VFPRegPrecision src_precision =
+ (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+
+ if (to_integer) {
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note: C++ defines default type casting from floating point to integer
+ // as (close to) rounding toward zero ("fractional part discarded").
+
+ int dst = instr->VFPDRegValue(kSinglePrecision);
+ int src = instr->VFPMRegValue(src_precision);
+
+ // Bit 7 in vcvt instructions indicates if we should use the FPSCR
+ // rounding mode or the default Round to Zero mode.
+ VFPRoundingMode mode = (instr->bit(7) != 1) ? FPSCR_rounding_mode_ : SimRZ;
+ MOZ_ASSERT(mode == SimRM || mode == SimRZ || mode == SimRN);
+
+ bool unsigned_integer = (instr->bit(16) == 0);
+ bool double_precision = (src_precision == kDoublePrecision);
+
+ double val;
+ if (double_precision) {
+ get_double_from_d_register(src, &val);
+ } else {
+ float fval;
+ get_float_from_s_register(src, &fval);
+ val = double(fval);
+ }
+
+ int temp = unsigned_integer ? static_cast<uint32_t>(val)
+ : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+
+ double abs_diff = unsigned_integer
+ ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
+ } else {
+ switch (mode) {
+ case SimRN: {
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ temp += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+ }
+ break;
+ }
+
+ case SimRM:
+ temp = temp > val ? temp - 1 : temp;
+ break;
+
+ case SimRZ:
+ // Nothing to do.
+ break;
+
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ // Update the destination register.
+ set_s_register_from_sinteger(dst, temp);
+ } else {
+ bool unsigned_integer = (instr->bit(7) == 0);
+ int dst = instr->VFPDRegValue(src_precision);
+ int src = instr->VFPMRegValue(kSinglePrecision);
+
+ int val = get_sinteger_from_s_register(src);
+
+ if (src_precision == kDoublePrecision) {
+ if (unsigned_integer) {
+ set_d_register_from_double(
+ dst, static_cast<double>(static_cast<uint32_t>(val)));
+ } else {
+ set_d_register_from_double(dst, static_cast<double>(val));
+ }
+ } else {
+ if (unsigned_integer) {
+ set_s_register_from_float(
+ dst, static_cast<float>(static_cast<uint32_t>(val)));
+ } else {
+ set_s_register_from_float(dst, static_cast<float>(val));
+ }
+ }
+ }
+}
+
+// A VFPv3 specific instruction.
+void Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(
+ SimInstruction* instr) {
+ MOZ_ASSERT(instr->bits(27, 24) == 0xE && instr->opc1Value() == 0x7 &&
+ instr->bit(19) == 1 && instr->bit(17) == 1 &&
+ instr->bits(11, 9) == 0x5 && instr->bit(6) == 1 &&
+ instr->bit(4) == 0);
+
+ int size = (instr->bit(7) == 1) ? 32 : 16;
+
+ int fraction_bits = size - ((instr->bits(3, 0) << 1) | instr->bit(5));
+ double mult = 1 << fraction_bits;
+
+ MOZ_ASSERT(size == 32); // Only handling size == 32 for now.
+
+ // Conversion between floating-point and integer.
+ bool to_fixed = (instr->bit(18) == 1);
+
+ VFPRegPrecision precision =
+ (instr->szValue() == 1) ? kDoublePrecision : kSinglePrecision;
+
+ if (to_fixed) {
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note: C++ defines default type casting from floating point to integer
+ // as (close to) rounding toward zero ("fractional part discarded").
+
+ int dst = instr->VFPDRegValue(precision);
+
+ bool unsigned_integer = (instr->bit(16) == 1);
+ bool double_precision = (precision == kDoublePrecision);
+
+ double val;
+ if (double_precision) {
+ get_double_from_d_register(dst, &val);
+ } else {
+ float fval;
+ get_float_from_s_register(dst, &fval);
+ val = double(fval);
+ }
+
+ // Scale value by specified number of fraction bits.
+ val *= mult;
+
+ // Rounding down towards zero. No need to account for the rounding error
+ // as this instruction always rounds down towards zero. See SimRZ below.
+ int temp = unsigned_integer ? static_cast<uint32_t>(val)
+ : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(SimRZ, val, unsigned_integer);
+
+ double abs_diff = unsigned_integer
+ ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
+ }
+
+ // Update the destination register.
+ if (double_precision) {
+ uint32_t dbl[2];
+ dbl[0] = temp;
+ dbl[1] = 0;
+ set_d_register(dst, dbl);
+ } else {
+ set_s_register_from_sinteger(dst, temp);
+ }
+ } else {
+ MOZ_CRASH(); // Not implemented, fixed to float.
+ }
+}
+
+void Simulator::decodeType6CoprocessorIns(SimInstruction* instr) {
+ MOZ_ASSERT(instr->typeValue() == 6);
+
+ if (instr->coprocessorValue() == 0xA) {
+ switch (instr->opcodeValue()) {
+ case 0x8:
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store single precision float to memory.
+ int rn = instr->rnValue();
+ int vd = instr->VFPDRegValue(kSinglePrecision);
+ int offset = instr->immed8Value();
+ if (!instr->hasU()) {
+ offset = -offset;
+ }
+
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->hasL()) {
+ // Load double from memory: vldr.
+ set_s_register_from_sinteger(vd, readW(address, instr));
+ } else {
+ // Store double to memory: vstr.
+ writeW(address, get_sinteger_from_s_register(vd), instr);
+ }
+ break;
+ }
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple single from memory: vldm/vstm.
+ handleVList(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->coprocessorValue() == 0xB) {
+ switch (instr->opcodeValue()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->bits(7, 6) != 0 || instr->bit(4) != 1) {
+ MOZ_CRASH(); // Not used atm.
+ } else {
+ int rt = instr->rtValue();
+ int rn = instr->rnValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ if (instr->hasL()) {
+ int32_t data[2];
+ double d;
+ get_double_from_d_register(vm, &d);
+ memcpy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
+ } else {
+ int32_t data[] = {get_register(rt), get_register(rn)};
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(vm, d);
+ }
+ }
+ break;
+ case 0x8:
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
+ int rn = instr->rnValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int offset = instr->immed8Value();
+ if (!instr->hasU()) {
+ offset = -offset;
+ }
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->hasL()) {
+ // Load double from memory: vldr.
+ uint64_t data = readQ(address, instr);
+ double val;
+ memcpy(&val, &data, 8);
+ set_d_register_from_double(vd, val);
+ } else {
+ // Store double to memory: vstr.
+ uint64_t data;
+ double val;
+ get_double_from_d_register(vd, &val);
+ memcpy(&data, &val, 8);
+ writeQ(address, data, instr);
+ }
+ break;
+ }
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple double from memory: vldm/vstm.
+ handleVList(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+}
+
+void Simulator::decodeSpecialCondition(SimInstruction* instr) {
+ switch (instr->specialValue()) {
+ case 5:
+ if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 &&
+ instr->bit(4) == 1) {
+ // vmovl signed
+ if ((instr->vdValue() & 1) != 0) {
+ MOZ_CRASH("Undefined behavior");
+ }
+ int Vd = (instr->bit(22) << 3) | (instr->vdValue() >> 1);
+ int Vm = (instr->bit(5) << 4) | instr->vmValue();
+ int imm3 = instr->bits(21, 19);
+ if (imm3 != 1 && imm3 != 2 && imm3 != 4) {
+ MOZ_CRASH();
+ }
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ int8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ int16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 7:
+ if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 &&
+ instr->bit(4) == 1) {
+ // vmovl unsigned.
+ if ((instr->vdValue() & 1) != 0) {
+ MOZ_CRASH("Undefined behavior");
+ }
+ int Vd = (instr->bit(22) << 3) | (instr->vdValue() >> 1);
+ int Vm = (instr->bit(5) << 4) | instr->vmValue();
+ int imm3 = instr->bits(21, 19);
+ if (imm3 != 1 && imm3 != 2 && imm3 != 4) {
+ MOZ_CRASH();
+ }
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ uint8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ uint16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 8:
+ if (instr->bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->bit(22) << 4) | instr->vdValue();
+ int Rn = instr->vnValue();
+ int type = instr->bits(11, 8);
+ int Rm = instr->vmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ get_d_register(Vd + r, data);
+ // TODO: We should AllowUnaligned here only if the alignment attribute
+ // of the instruction calls for default alignment.
+ //
+ // Use writeQ to get handling of traps right. (The spec says to
+ // perform two individual word writes, but let's not worry about
+ // that.)
+ writeQ(address, (uint64_t(data[1]) << 32) | uint64_t(data[0]), instr,
+ AllowUnaligned);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else if (instr->bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->bit(22) << 4) | instr->vdValue();
+ int Rn = instr->vnValue();
+ int type = instr->bits(11, 8);
+ int Rm = instr->vmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ // TODO: We should AllowUnaligned here only if the alignment attribute
+ // of the instruction calls for default alignment.
+ //
+ // Use readQ to get handling of traps right. (The spec says to
+ // perform two individual word reads, but let's not worry about that.)
+ uint64_t tmp = readQ(address, instr, AllowUnaligned);
+ data[0] = tmp;
+ data[1] = tmp >> 32;
+ set_d_register(Vd + r, data);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 9:
+ if (instr->bits(9, 8) == 0) {
+ int Vd = (instr->bit(22) << 4) | instr->vdValue();
+ int Rn = instr->vnValue();
+ int size = instr->bits(11, 10);
+ int Rm = instr->vmValue();
+ int index = instr->bits(7, 5);
+ int align = instr->bit(4);
+ int32_t address = get_register(Rn);
+ if (size != 2 || align) {
+ MOZ_CRASH("NYI");
+ }
+ int a = instr->bits(5, 4);
+ if (a != 0 && a != 3) {
+ MOZ_CRASH("Unspecified");
+ }
+ if (index > 1) {
+ Vd++;
+ index -= 2;
+ }
+ uint32_t data[2];
+ get_d_register(Vd, data);
+ switch (instr->bits(21, 20)) {
+ case 0:
+ // vst1 single element from one lane
+ writeW(address, data[index], instr, AllowUnaligned);
+ break;
+ case 2:
+ // vld1 single element to one lane
+ data[index] = readW(address, instr, AllowUnaligned);
+ set_d_register(Vd, data);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ address += 4;
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0xA:
+ if (instr->bits(31, 20) == 0xf57) {
+ switch (instr->bits(7, 4)) {
+ case 1: // CLREX
+ exclusiveMonitorClear();
+ break;
+ case 5: // DMB
+ AtomicOperations::fenceSeqCst();
+ break;
+ case 4: // DSB
+ // We do not use DSB.
+ MOZ_CRASH("DSB unimplemented");
+ case 6: // ISB
+ // We do not use ISB.
+ MOZ_CRASH("ISB unimplemented");
+ default:
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0xB:
+ if (instr->bits(22, 20) == 5 && instr->bits(15, 12) == 0xf) {
+ // pld: ignore instruction.
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 0x1C:
+ case 0x1D:
+ if (instr->bit(4) == 1 && instr->bits(11, 9) != 5) {
+ // MCR, MCR2, MRC, MRC2 with cond == 15
+ decodeType7CoprocessorIns(instr);
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+
+ pc_modified_ = false;
+
+ static const uint32_t kSpecialCondition = 15 << 28;
+ if (instr->conditionField() == kSpecialCondition) {
+ decodeSpecialCondition(instr);
+ } else if (conditionallyExecute(instr)) {
+ switch (instr->typeValue()) {
+ case 0:
+ case 1:
+ decodeType01(instr);
+ break;
+ case 2:
+ decodeType2(instr);
+ break;
+ case 3:
+ decodeType3(instr);
+ break;
+ case 4:
+ decodeType4(instr);
+ break;
+ case 5:
+ decodeType5(instr);
+ break;
+ case 6:
+ decodeType6(instr);
+ break;
+ case 7:
+ decodeType7(instr);
+ break;
+ default:
+ MOZ_CRASH();
+ break;
+ }
+ // If the instruction is a non taken conditional stop, we need to skip
+ // the inlined message address.
+ } else if (instr->isStop()) {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+ if (!pc_modified_) {
+ set_register(pc,
+ reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool EnableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the raw
+ // PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (EnableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ fprintf(stderr, "\nStopped simulation at icount %lld\n", icount_);
+ ArmDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ set_register(pc, reinterpret_cast<int32_t>(entry));
+
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(lr, end_sim_pc);
+
+ // Remember the values of callee-saved registers. The code below assumes
+ // that r9 is not used as sb (static base) in simulator code and therefore
+ // is regarded as a callee-saved register.
+ int32_t r4_val = get_register(r4);
+ int32_t r5_val = get_register(r5);
+ int32_t r6_val = get_register(r6);
+ int32_t r7_val = get_register(r7);
+ int32_t r8_val = get_register(r8);
+ int32_t r9_val = get_register(r9);
+ int32_t r10_val = get_register(r10);
+ int32_t r11_val = get_register(r11);
+
+ // Remember d8 to d15 which are callee-saved.
+ uint64_t d8_val;
+ get_d_register(d8, &d8_val);
+ uint64_t d9_val;
+ get_d_register(d9, &d9_val);
+ uint64_t d10_val;
+ get_d_register(d10, &d10_val);
+ uint64_t d11_val;
+ get_d_register(d11, &d11_val);
+ uint64_t d12_val;
+ get_d_register(d12, &d12_val);
+ uint64_t d13_val;
+ get_d_register(d13, &d13_val);
+ uint64_t d14_val;
+ get_d_register(d14, &d14_val);
+ uint64_t d15_val;
+ get_d_register(d15, &d15_val);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = uint32_t(icount_);
+ uint64_t callee_saved_value_d = uint64_t(icount_);
+
+ if (!skipCalleeSavedRegsCheck) {
+ set_register(r4, callee_saved_value);
+ set_register(r5, callee_saved_value);
+ set_register(r6, callee_saved_value);
+ set_register(r7, callee_saved_value);
+ set_register(r8, callee_saved_value);
+ set_register(r9, callee_saved_value);
+ set_register(r10, callee_saved_value);
+ set_register(r11, callee_saved_value);
+
+ set_d_register(d8, &callee_saved_value_d);
+ set_d_register(d9, &callee_saved_value_d);
+ set_d_register(d10, &callee_saved_value_d);
+ set_d_register(d11, &callee_saved_value_d);
+ set_d_register(d12, &callee_saved_value_d);
+ set_d_register(d13, &callee_saved_value_d);
+ set_d_register(d14, &callee_saved_value_d);
+ set_d_register(d15, &callee_saved_value_d);
+ }
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1L) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ if (!skipCalleeSavedRegsCheck) {
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == get_register(r4));
+ MOZ_ASSERT(callee_saved_value == get_register(r5));
+ MOZ_ASSERT(callee_saved_value == get_register(r6));
+ MOZ_ASSERT(callee_saved_value == get_register(r7));
+ MOZ_ASSERT(callee_saved_value == get_register(r8));
+ MOZ_ASSERT(callee_saved_value == get_register(r9));
+ MOZ_ASSERT(callee_saved_value == get_register(r10));
+ MOZ_ASSERT(callee_saved_value == get_register(r11));
+
+ uint64_t value;
+ get_d_register(d8, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d9, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d10, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d11, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d12, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d13, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d14, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+ get_d_register(d15, &value);
+ MOZ_ASSERT(callee_saved_value_d == value);
+
+ // Restore callee-saved registers with the original value.
+ set_register(r4, r4_val);
+ set_register(r5, r5_val);
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+
+ set_d_register(d8, &d8_val);
+ set_d_register(d9, &d9_val);
+ set_d_register(d10, &d10_val);
+ set_d_register(d11, &d11_val);
+ set_d_register(d12, &d12_val);
+ set_d_register(d13, &d13_val);
+ set_d_register(d14, &d14_val);
+ set_d_register(d15, &d15_val);
+ }
+}
+
+int32_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ // First four arguments passed in registers.
+ if (argument_count >= 1) {
+ set_register(r0, va_arg(parameters, int32_t));
+ }
+ if (argument_count >= 2) {
+ set_register(r1, va_arg(parameters, int32_t));
+ }
+ if (argument_count >= 3) {
+ set_register(r2, va_arg(parameters, int32_t));
+ }
+ if (argument_count >= 4) {
+ set_register(r3, va_arg(parameters, int32_t));
+ }
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ int entry_stack = original_stack;
+ if (argument_count >= 4) {
+ entry_stack -= (argument_count - 4) * sizeof(int32_t);
+ }
+
+ entry_stack &= ~ABIStackAlignment;
+
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == get_register(sp));
+ set_register(sp, original_stack);
+
+ int32_t result = get_register(r0);
+ return result;
+}
+
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/arm/Simulator-arm.h b/js/src/jit/arm/Simulator-arm.h
new file mode 100644
index 0000000000..fba0f8ce5e
--- /dev/null
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -0,0 +1,632 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_arm_Simulator_arm_h
+#define jit_arm_Simulator_arm_h
+
+#ifdef JS_SIMULATOR_ARM
+
+# include "mozilla/Atomics.h"
+
+# include "jit/arm/Architecture-arm.h"
+# include "jit/arm/disasm/Disasm-arm.h"
+# include "jit/IonTypes.h"
+# include "js/AllocPolicy.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+namespace jit {
+
+class JitActivation;
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum VFPRoundingMode {
+ SimRN = 0 << 22, // Round to Nearest.
+ SimRP = 1 << 22, // Round towards Plus Infinity.
+ SimRM = 2 << 22, // Round towards Minus Infinity.
+ SimRZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = SimRN,
+ kRoundToPlusInf = SimRP,
+ kRoundToMinusInf = SimRM,
+ kRoundToZero = SimRZ
+};
+
+const uint32_t kVFPRoundingModeMask = 3 << 22;
+
+typedef int32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ public:
+ friend class ArmDebugger;
+ enum Register {
+ no_reg = -1,
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ num_registers,
+ fp = 11,
+ ip = 12,
+ sp = 13,
+ lr = 14,
+ pc = 15,
+ s0 = 0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ num_s_registers = 32,
+ d0 = 0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ num_d_registers = 32,
+ q0 = 0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ num_q_registers = 16
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static uintptr_t StackLimit() { return Simulator::Current()->stackLimit(); }
+
+ // Disassemble some instructions starting at instr and print them
+ // on stdout. Useful for working within GDB after a MOZ_CRASH(),
+ // among other things.
+ //
+ // Typical use within a crashed instruction decoding method is simply:
+ //
+ // call Simulator::disassemble(instr, 1)
+ //
+ // or use one of the more convenient inline methods below.
+ static void disassemble(SimInstruction* instr, size_t n);
+
+ // Disassemble one instruction.
+ // "call disasm(instr)"
+ void disasm(SimInstruction* instr);
+
+ // Disassemble n instructions starting at instr.
+ // "call disasm(instr, 3)"
+ void disasm(SimInstruction* instr, size_t n);
+
+ // Skip backwards m instructions before starting, then disassemble n
+ // instructions.
+ // "call disasm(instr, 3, 7)"
+ void disasm(SimInstruction* instr, size_t m, size_t n);
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the ARM
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int32_t value);
+ int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
+ void set_dw_register(int dreg, const int* dbl);
+
+ // Support for VFP.
+ void get_d_register(int dreg, uint64_t* value);
+ void set_d_register(int dreg, const uint64_t* value);
+ void get_d_register(int dreg, uint32_t* value);
+ void set_d_register(int dreg, const uint32_t* value);
+ void get_q_register(int qreg, uint64_t* value);
+ void set_q_register(int qreg, const uint64_t* value);
+ void get_q_register(int qreg, uint32_t* value);
+ void set_q_register(int qreg, const uint32_t* value);
+ void set_s_register(int reg, unsigned int value);
+ unsigned int get_s_register(int reg) const;
+
+ void set_d_register_from_double(int dreg, const double& dbl) {
+ setVFPRegister<double, 2>(dreg, dbl);
+ }
+ void get_double_from_d_register(int dreg, double* out) {
+ getFromVFPRegister<double, 2>(dreg, out);
+ }
+ void set_s_register_from_float(int sreg, const float flt) {
+ setVFPRegister<float, 1>(sreg, flt);
+ }
+ void get_float_from_s_register(int sreg, float* out) {
+ getFromVFPRegister<float, 1>(sreg, out);
+ }
+ void set_s_register_from_sinteger(int sreg, const int sint) {
+ setVFPRegister<int, 1>(sreg, sint);
+ }
+ int get_sinteger_from_s_register(int sreg) {
+ int ret;
+ getFromVFPRegister<int, 1>(sreg, &ret);
+ return ret;
+ }
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes ARM instructions until the PC reaches end_sim_pc.
+ template <bool EnableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_lr, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // ForbidUnaligned means "always fault on unaligned access".
+ //
+ // AllowUnaligned means "allow the unaligned access if other conditions are
+ // met". The "other conditions" vary with the instruction: For all
+ // instructions the base condition is !HasAlignmentFault(), ie, the chip is
+ // configured to allow unaligned accesses. For instructions like VLD1
+ // there is an additional constraint that the alignment attribute in the
+ // instruction must be set to "default alignment".
+
+ enum UnalignedPolicy { ForbidUnaligned, AllowUnaligned };
+
+ bool init();
+
+ // Checks if the current instruction should be executed based on its
+ // condition bits.
+ inline bool conditionallyExecute(SimInstruction* instr);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ void setNZFlags(int32_t val);
+ void setCFlag(bool val);
+ void setVFlag(bool val);
+ bool carryFrom(int32_t left, int32_t right, int32_t carry = 0);
+ bool borrowFrom(int32_t left, int32_t right);
+ bool overflowFrom(int32_t alu_out, int32_t left, int32_t right,
+ bool addition);
+
+ inline int getCarry() { return c_flag_ ? 1 : 0; };
+
+ // Support for VFP.
+ void compute_FPSCR_Flags(double val1, double val2);
+ void copy_FPSCR_to_APSR();
+ inline void canonicalizeNaN(double* value);
+ inline void canonicalizeNaN(float* value);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t getShiftRm(SimInstruction* instr, bool* carry_out);
+ int32_t getImm(SimInstruction* instr, bool* carry_out);
+ int32_t processPU(SimInstruction* instr, int num_regs, int operand_size,
+ intptr_t* start_address, intptr_t* end_address);
+ void handleRList(SimInstruction* instr, bool load);
+ void handleVList(SimInstruction* inst);
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ inline bool isStopInstruction(SimInstruction* instr);
+ inline bool isWatchedStop(uint32_t bkpt_code);
+ inline bool isEnabledStop(uint32_t bkpt_code);
+ inline void enableStop(uint32_t bkpt_code);
+ inline void disableStop(uint32_t bkpt_code);
+ inline void increaseStopCounter(uint32_t bkpt_code);
+ void printStopInfo(uint32_t code);
+
+ // Handle a wasm interrupt triggered by an async signal handler.
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(int32_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ set_pc(int32_t(newPC));
+ return true;
+ }
+
+ // Read and write memory.
+ inline uint8_t readBU(int32_t addr);
+ inline int8_t readB(int32_t addr);
+ inline void writeB(int32_t addr, uint8_t value);
+ inline void writeB(int32_t addr, int8_t value);
+
+ inline uint8_t readExBU(int32_t addr);
+ inline int32_t writeExB(int32_t addr, uint8_t value);
+
+ inline uint16_t readHU(int32_t addr, SimInstruction* instr);
+ inline int16_t readH(int32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(int32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(int32_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint16_t readExHU(int32_t addr, SimInstruction* instr);
+ inline int32_t writeExH(int32_t addr, uint16_t value, SimInstruction* instr);
+
+ inline int readW(int32_t addr, SimInstruction* instr,
+ UnalignedPolicy f = ForbidUnaligned);
+ inline void writeW(int32_t addr, int value, SimInstruction* instr,
+ UnalignedPolicy f = ForbidUnaligned);
+
+ inline uint64_t readQ(int32_t addr, SimInstruction* instr,
+ UnalignedPolicy f = ForbidUnaligned);
+ inline void writeQ(int32_t addr, uint64_t value, SimInstruction* instr,
+ UnalignedPolicy f = ForbidUnaligned);
+
+ inline int readExW(int32_t addr, SimInstruction* instr);
+ inline int writeExW(int32_t addr, int value, SimInstruction* instr);
+
+ int32_t* readDW(int32_t addr);
+ void writeDW(int32_t addr, int32_t value1, int32_t value2);
+
+ int32_t readExDW(int32_t addr, int32_t* hibits);
+ int32_t writeExDW(int32_t addr, int32_t value1, int32_t value2);
+
+ // Executing is handled based on the instruction type.
+ // Both type 0 and type 1 rolled into one.
+ void decodeType01(SimInstruction* instr);
+ void decodeType2(SimInstruction* instr);
+ void decodeType3(SimInstruction* instr);
+ void decodeType4(SimInstruction* instr);
+ void decodeType5(SimInstruction* instr);
+ void decodeType6(SimInstruction* instr);
+ void decodeType7(SimInstruction* instr);
+
+ // Support for VFP.
+ void decodeTypeVFP(SimInstruction* instr);
+ void decodeType6CoprocessorIns(SimInstruction* instr);
+ void decodeSpecialCondition(SimInstruction* instr);
+
+ void decodeVMOVBetweenCoreAndSinglePrecisionRegisters(SimInstruction* instr);
+ void decodeVCMP(SimInstruction* instr);
+ void decodeVCVTBetweenDoubleAndSingle(SimInstruction* instr);
+ void decodeVCVTBetweenFloatingPointAndInteger(SimInstruction* instr);
+ void decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction* instr);
+
+ // Support for some system functions.
+ void decodeType7CoprocessorIns(SimInstruction* instr);
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+
+ public:
+ static int64_t StopSimAt;
+
+ // For testing the MoveResolver code, a MoveResolver is set up, and
+ // the VFP registers are loaded with pre-determined values,
+ // then the sequence of code is simulated. In order to test this with the
+ // simulator, the callee-saved registers can't be trashed. This flag
+ // disables that feature.
+ bool skipCalleeSavedRegsCheck;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x1);
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void scratchVolatileRegisters(bool scratchFloat = true);
+
+ template <class ReturnType, int register_size>
+ void getFromVFPRegister(int reg_index, ReturnType* out);
+
+ template <class InputType, int register_size>
+ void setVFPRegister(int reg_index, const InputType& value);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Saturating instructions require a Q flag to indicate saturation.
+ // There is currently no way to read the CPSR directly, and thus read the Q
+ // flag, so this is left unimplemented.
+ int32_t registers_[16];
+ bool n_flag_;
+ bool z_flag_;
+ bool c_flag_;
+ bool v_flag_;
+
+ // VFP architecture state.
+ uint32_t vfp_registers_[num_d_registers * 2];
+ bool n_flag_FPSCR_;
+ bool z_flag_FPSCR_;
+ bool c_flag_FPSCR_;
+ bool v_flag_FPSCR_;
+
+ // VFP rounding mode. See ARM DDI 0406B Page A2-29.
+ VFPRoundingMode FPSCR_rounding_mode_;
+ bool FPSCR_default_NaN_mode_;
+
+ // VFP FP exception flags architecture state.
+ bool inv_op_vfp_flag_;
+ bool div_zero_vfp_flag_;
+ bool overflow_vfp_flag_;
+ bool underflow_vfp_flag_;
+ bool inexact_vfp_flag_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Breakpoint is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+
+ public:
+ int64_t icount() { return icount_; }
+
+ private:
+ // Exclusive access monitor
+ void exclusiveMonitorSet(uint64_t value);
+ uint64_t exclusiveMonitorGetAndClear(bool* held);
+ void exclusiveMonitorClear();
+
+ bool exclusiveMonitorHeld_;
+ uint64_t exclusiveMonitor_;
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_ARM */
+
+#endif /* jit_arm_Simulator_arm_h */
diff --git a/js/src/jit/arm/Trampoline-arm.cpp b/js/src/jit/arm/Trampoline-arm.cpp
new file mode 100644
index 0000000000..551f243bd3
--- /dev/null
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -0,0 +1,831 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm/SharedICHelpers-arm.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static const FloatRegisterSet NonVolatileFloatRegs = FloatRegisterSet(
+ (1ULL << FloatRegisters::d8) | (1ULL << FloatRegisters::d9) |
+ (1ULL << FloatRegisters::d10) | (1ULL << FloatRegisters::d11) |
+ (1ULL << FloatRegisters::d12) | (1ULL << FloatRegisters::d13) |
+ (1ULL << FloatRegisters::d14) | (1ULL << FloatRegisters::d15));
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ // Restore non-volatile floating point registers.
+ masm.transferMultipleByRuns(NonVolatileFloatRegs, IsLoad, StackPointer, IA);
+
+ // Get rid of padding word.
+ masm.addPtr(Imm32(sizeof(void*)), sp);
+
+ // Set up return value
+ masm.ma_mov(Imm32(returnCode), r0);
+
+ // Pop and return
+ masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
+ masm.transferReg(r4);
+ masm.transferReg(r5);
+ masm.transferReg(r6);
+ masm.transferReg(r7);
+ masm.transferReg(r8);
+ masm.transferReg(r9);
+ masm.transferReg(r10);
+ masm.transferReg(r11);
+ // r12 isn't saved, so it shouldn't be restored.
+ masm.transferReg(pc);
+ masm.finishDataTransfer();
+ masm.flushBuffer();
+}
+
+struct EnterJITStack {
+ double d8;
+ double d9;
+ double d10;
+ double d11;
+ double d12;
+ double d13;
+ double d14;
+ double d15;
+
+ // Padding.
+ void* padding;
+
+ // Non-volatile registers.
+ void* r4;
+ void* r5;
+ void* r6;
+ void* r7;
+ void* r8;
+ void* r9;
+ void* r10;
+ void* r11;
+ // The abi does not expect r12 (ip) to be preserved
+ void* lr;
+
+ // Arguments.
+ // code == r0
+ // argc == r1
+ // argv == r2
+ // frame == r3
+ CalleeToken token;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ * CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Address slot_token(sp, offsetof(EnterJITStack, token));
+ const Address slot_vp(sp, offsetof(EnterJITStack, vp));
+
+ static_assert(OsrFrameReg == r3);
+
+ Assembler* aasm = &masm;
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ masm.transferReg(r4); // [sp,0]
+ masm.transferReg(r5); // [sp,4]
+ masm.transferReg(r6); // [sp,8]
+ masm.transferReg(r7); // [sp,12]
+ masm.transferReg(r8); // [sp,16]
+ masm.transferReg(r9); // [sp,20]
+ masm.transferReg(r10); // [sp,24]
+ masm.transferReg(r11); // [sp,28]
+ // The abi does not expect r12 (ip) to be preserved
+ masm.transferReg(lr); // [sp,32]
+ // The 5th argument is located at [sp, 36]
+ masm.finishDataTransfer();
+
+ // Add padding word.
+ masm.subPtr(Imm32(sizeof(void*)), sp);
+
+ // Push the float registers.
+ masm.transferMultipleByRuns(NonVolatileFloatRegs, IsStore, sp, DB);
+
+ // Load calleeToken into r9.
+ masm.loadPtr(slot_token, r9);
+
+ // Save stack pointer.
+ masm.movePtr(sp, r11);
+
+ // Load the number of actual arguments into r10.
+ masm.loadPtr(slot_vp, r10);
+ masm.unboxInt32(Address(r10, 0), r10);
+
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, r9,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), r1);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code moves the stack pointer to the location where it should be when
+ // we enter the Jit frame. It moves the stack pointer such that we have
+ // enough space reserved for pushing the arguments, and the JitFrameLayout.
+ // The stack pointer is also aligned on the alignment expected by the Jit
+ // frames.
+ //
+ // At the end the register r4, is a pointer to the stack where the first
+ // argument is expected by the Jit frame.
+ //
+ aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); // r4 = sp - argc*8
+ aasm->as_bic(r4, r4, Imm8(JitStackAlignment - 1));
+ // r4 is now the aligned on the bottom of the list of arguments.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ // sp' = ~(JitStackAlignment - 1) & (sp - argc * sizeof(Value))
+ masm.movePtr(r4, sp);
+
+ // Get a copy of the number of args to use as a decrement counter, also set
+ // the zero condition code.
+ aasm->as_mov(r5, O2Reg(r1), SetCC);
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ {
+ Label header, footer;
+ // If there aren't any arguments, don't do anything.
+ aasm->as_b(&footer, Assembler::Zero);
+ // Get the top of the loop.
+ masm.bind(&header);
+ aasm->as_sub(r5, r5, Imm8(1), SetCC);
+ // We could be more awesome, and unroll this, using a loadm
+ // (particularly since the offset is effectively 0) but that seems more
+ // error prone, and complex.
+ // BIG FAT WARNING: this loads both r6 and r7.
+ aasm->as_extdtr(IsLoad, 64, true, PostIndex, r6,
+ EDtrAddr(r2, EDtrOffImm(8)));
+ aasm->as_extdtr(IsStore, 64, true, PostIndex, r6,
+ EDtrAddr(r4, EDtrOffImm(8)));
+ aasm->as_b(&header, Assembler::NonZero);
+ masm.bind(&footer);
+ }
+
+ // Push the callee token.
+ masm.push(r9);
+
+ // Push the frame descriptor.
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, r10, r10);
+
+ Label returnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(r11));
+ regs.take(OsrFrameReg);
+ regs.take(r0); // jitcode
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches r0");
+
+ const Address slot_numStackValues(r11,
+ offsetof(EnterJITStack, numStackValues));
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slot_numStackValues, numStackValues);
+
+ // Write return address. On ARM, CodeLabel is only used for tableswitch,
+ // so we can't use it here to get the return address. Instead, we use pc
+ // + a fixed offset to a jump to returnLabel. The pc register holds pc +
+ // 8, so we add the size of 2 instructions to skip the instructions
+ // emitted by push and jump(&skipJump).
+ {
+ AutoForbidPoolsAndNops afp(&masm, 5);
+ Label skipJump;
+ masm.mov(pc, scratch);
+ masm.addPtr(Imm32(2 * sizeof(uint32_t)), scratch);
+ masm.push(scratch);
+ masm.jump(&skipJump);
+ masm.jump(&returnLabel);
+ masm.bind(&skipJump);
+ }
+
+ // Frame prologue.
+ masm.push(FramePointer);
+ masm.mov(sp, FramePointer);
+
+ // Reserve frame.
+ masm.subPtr(Imm32(BaselineFrame::Size()), sp);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.touchFrameValues(numStackValues, scratch, framePtrScratch);
+ masm.mov(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.ma_lsl(Imm32(3), numStackValues, scratch);
+ masm.ma_sub(sp, scratch, sp);
+
+ // Enter exit frame.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(Imm32(0)); // Fake return address.
+ masm.push(FramePointer);
+ // No GC things to mark on the stack, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.push(r0); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Register jitcode = regs.takeAny();
+ masm.pop(jitcode);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), sp);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(FramePointer, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: frame epilogue, load error value, discard return address and return.
+ masm.bind(&error);
+ masm.mov(FramePointer, sp);
+ masm.pop(FramePointer);
+ masm.addPtr(Imm32(sizeof(uintptr_t)), sp); // Return address.
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&returnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != r0);
+ masm.loadPtr(Address(r11, offsetof(EnterJITStack, scopeChain)),
+ R1.scratchReg());
+ }
+
+ // The callee will push the return address and frame pointer on the stack,
+ // thus we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function.
+ masm.callJitNoProfiler(r0);
+
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITStack
+ // on the stack.
+ masm.mov(r11, sp);
+
+ // Store the returned value into the slot_vp
+ masm.loadPtr(slot_vp, r5);
+ masm.storeValue(JSReturnOperand, Address(r5, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, true);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ // See large comment in x86's JitRuntime::generateInvalidator.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // At this point, one of two things has happened:
+ // 1) Execution has just returned from C code, which left the stack aligned
+ // 2) Execution has just returned from Ion code, which left the stack
+ // unaligned. The old return address should not matter, but we still want the
+ // stack to be aligned, and there is no good reason to automatically align it
+ // with a call to setupUnalignedABICall.
+ masm.as_bic(sp, sp, Imm8(7));
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ // We don't have to push everything, but this is likely easier.
+ // Setting regs_.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ masm.transferReg(Register::FromCode(i));
+ }
+ masm.finishDataTransfer();
+
+ // Since our datastructures for stack inspection are compile-time fixed,
+ // if there are only 16 double registers, then we need to reserve
+ // space on the stack for the missing 16.
+ if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
+ ScratchRegisterScope scratch(masm);
+ int missingRegs =
+ FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
+ masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
+ }
+
+ masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
+ for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++) {
+ masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
+ }
+ masm.finishFloatTransfer();
+
+ masm.ma_mov(sp, r0);
+ // Reserve 8 bytes for the outparam to ensure alignment for
+ // setupAlignedABICall.
+ masm.reserveStack(sizeof(void*) * 2);
+ masm.mov(sp, r1);
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(r2); // Get bailoutInfo outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ static_assert(JitStackAlignment == sizeof(Value));
+
+ // Copy number of actual arguments into r0 and r8.
+ masm.loadNumActualArgs(FramePointer, r0);
+ masm.mov(r0, r8);
+
+ // Load the number of |undefined|s to push into r6.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()), r1);
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(Imm32(CalleeTokenMask), r1, r6, scratch);
+ }
+ masm.loadFunctionArgCount(r6, r6);
+
+ masm.ma_sub(r6, r8, r2);
+
+ // Get the topmost argument.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_alu(sp, lsl(r8, 3), r3, OpAdd); // r3 <- sp + nargs * 8
+ masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3, scratch);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, r1,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.as_extdtr(IsLoad, 64, true, Offset, r4, EDtrAddr(r3, EDtrOffImm(8)));
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4,
+ EDtrAddr(sp, EDtrOffImm(-8)));
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), ValueOperand(r5, r4));
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4,
+ EDtrAddr(sp, EDtrOffImm(-8)));
+ masm.as_sub(r2, r2, Imm8(1), SetCC);
+
+ masm.ma_b(&undefLoopTop, Assembler::NonZero);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop;
+ masm.bind(&copyLoopTop);
+ masm.as_extdtr(IsLoad, 64, true, PostIndex, r4,
+ EDtrAddr(r3, EDtrOffImm(-8)));
+ masm.as_extdtr(IsStore, 64, true, PreIndex, r4,
+ EDtrAddr(sp, EDtrOffImm(-8)));
+
+ masm.as_sub(r8, r8, Imm8(1), SetCC);
+ masm.ma_b(&copyLoopTop, Assembler::NotSigned);
+ }
+
+ // Construct JitFrameLayout.
+ masm.ma_push(r1); // callee token
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, r0, r0);
+
+ // Call the target function.
+ masm.andPtr(Imm32(CalleeTokenMask), r1);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(r1, r3);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(r3);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(r1, r3, &noBaselineScript);
+ masm.callJitNoProfiler(r3);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(r1, r3);
+ masm.callJitNoProfiler(r3);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ // STEP 1a: Save our register sets to the stack so Bailout() can read
+ // everything.
+ // sp % 8 == 0
+
+ masm.startDataTransferM(IsStore, sp, DB, WriteBack);
+ // We don't have to push everything, but this is likely easier.
+ // Setting regs_.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ masm.transferReg(Register::FromCode(i));
+ }
+ masm.finishDataTransfer();
+
+ ScratchRegisterScope scratch(masm);
+
+ // Since our datastructures for stack inspection are compile-time fixed,
+ // if there are only 16 double registers, then we need to reserve
+ // space on the stack for the missing 16.
+ if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
+ int missingRegs =
+ FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
+ masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
+ }
+ masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
+ for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++) {
+ masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
+ }
+ masm.finishFloatTransfer();
+
+ // The current stack pointer is the first argument to jit::Bailout.
+ masm.ma_mov(sp, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, r0);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.mov(sp, r1);
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ masm.pop(r2); // Get the bailoutInfo outparam.
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set must be a superset of Volatile register set.");
+
+ // The context is the first argument; r0 is the first argument register.
+ Register cxreg = r0;
+ regs.take(cxreg);
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args] + argPadding
+ // +0 ExitFrame
+ //
+ // If it isn't a tail call, then the return address needs to be saved.
+ // Push the frame pointer to finish the exit frame, then link it up.
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = r5;
+ regs.take(argsBase);
+ ScratchRegisterScope scratch(masm);
+ masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase,
+ scratch);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(Value));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = r4;
+ regs.take(outReg);
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ case Type_Bool:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(int32_t));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = r4;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(double));
+ masm.ma_mov(sp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ // Values should be passed by reference, not by value, so we assert
+ // that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.branchIfFalseBool(r0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(sp, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ masm.load32(Address(sp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(sp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.loadDouble(Address(sp, 0), ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ masm.pushReturnAddress();
+
+ static_assert(PreBarrierReg == r1);
+ Register temp1 = r2;
+ Register temp2 = r3;
+ Register temp3 = r4;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() =
+ RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileDoubleMask));
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), r0);
+
+ masm.setupUnalignedABICall(r2);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(r1, r2);
+}
diff --git a/js/src/jit/arm/disasm/Constants-arm.cpp b/js/src/jit/arm/disasm/Constants-arm.cpp
new file mode 100644
index 0000000000..408e2df686
--- /dev/null
+++ b/js/src/jit/arm/disasm/Constants-arm.cpp
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "jit/arm/disasm/Constants-arm.h"
+
+#ifdef JS_DISASM_ARM
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+double Instruction::DoubleImmedVmov() const {
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ double d;
+ memcpy(&d, &imm, 8);
+ return d;
+}
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
+};
+
+// List of alias names which can be used when referring to ARM registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {10, "sl"}, {11, "r11"}, {12, "r12"}, {13, "r13"},
+ {14, "r14"}, {15, "r15"}, {kNoRegister, NULL}};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* VFPRegisters::names_[kNumVFPRegisters] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
+ "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
+ "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "d0",
+ "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
+ "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
+ "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* VFPRegisters::Name(int reg, bool is_double) {
+ MOZ_ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
+}
+
+int VFPRegisters::Number(const char* name, bool* is_double) {
+ for (int i = 0; i < kNumVFPRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ if (i < kNumVFPSingleRegisters) {
+ *is_double = false;
+ return i;
+ } else {
+ *is_double = true;
+ return i - kNumVFPSingleRegisters;
+ }
+ }
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kNoRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
diff --git a/js/src/jit/arm/disasm/Constants-arm.h b/js/src/jit/arm/disasm/Constants-arm.h
new file mode 100644
index 0000000000..0128062b3f
--- /dev/null
+++ b/js/src/jit/arm/disasm/Constants-arm.h
@@ -0,0 +1,684 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_arm_disasm_Constants_arm_h
+#define jit_arm_disasm_Constants_arm_h
+
+#ifdef JS_DISASM_ARM
+
+# include "mozilla/Assertions.h"
+# include "mozilla/Types.h"
+
+# include <string.h>
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+// Constant pool marker.
+// Use UDF, the permanently undefined instruction.
+const int kConstantPoolMarkerMask = 0xfff000f0;
+const int kConstantPoolMarker = 0xe7f000f0;
+const int kConstantPoolLengthMaxMask = 0xffff;
+
+inline int EncodeConstantPoolLength(int length) {
+ MOZ_ASSERT((length & kConstantPoolLengthMaxMask) == length);
+ return ((length & 0xfff0) << 4) | (length & 0xf);
+}
+
+inline int DecodeConstantPoolLength(int instr) {
+ MOZ_ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ return ((instr >> 4) & 0xfff0) | (instr & 0xf);
+}
+
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
+
+// Number of registers in normal ARM mode.
+const int kNumRegisters = 16;
+
+// VFP support.
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 32;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+
+// PC is register 15.
+const int kPCRegister = 15;
+const int kNoRegister = -1;
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+// Values for the condition field as defined in section A3.2
+enum Condition {
+ kNoCondition = -1,
+
+ eq = 0 << 28, // Z set Equal.
+ ne = 1 << 28, // Z clear Not equal.
+ cs = 2 << 28, // C set Unsigned higher or same.
+ cc = 3 << 28, // C clear Unsigned lower.
+ mi = 4 << 28, // N set Negative.
+ pl = 5 << 28, // N clear Positive or zero.
+ vs = 6 << 28, // V set Overflow.
+ vc = 7 << 28, // V clear No overflow.
+ hi = 8 << 28, // C set, Z clear Unsigned higher.
+ ls = 9 << 28, // C clear or Z set Unsigned lower or same.
+ ge = 10 << 28, // N == V Greater or equal.
+ lt = 11 << 28, // N != V Less than.
+ gt = 12 << 28, // Z clear, N == V Greater than.
+ le = 13 << 28, // Z set or N != V Less then or equal
+ al = 14 << 28, // Always.
+
+ kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
+ kNumberOfConditions = 16,
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc // C clear Unsigned lower.
+};
+
+inline Condition NegateCondition(Condition cond) {
+ MOZ_ASSERT(cond != al);
+ return static_cast<Condition>(cond ^ ne);
+}
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cond;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+ AND = 0 << 21, // Logical AND.
+ EOR = 1 << 21, // Logical Exclusive OR.
+ SUB = 2 << 21, // Subtract.
+ RSB = 3 << 21, // Reverse Subtract.
+ ADD = 4 << 21, // Add.
+ ADC = 5 << 21, // Add with Carry.
+ SBC = 6 << 21, // Subtract with Carry.
+ RSC = 7 << 21, // Reverse Subtract with Carry.
+ TST = 8 << 21, // Test.
+ TEQ = 9 << 21, // Test Equivalence.
+ CMP = 10 << 21, // Compare.
+ CMN = 11 << 21, // Compare Negated.
+ ORR = 12 << 21, // Logical (inclusive) OR.
+ MOV = 13 << 21, // Move.
+ BIC = 14 << 21, // Bit Clear.
+ MVN = 15 << 21 // Move Not.
+};
+
+// The bits for bit 7-4 for some type 0 miscellaneous instructions.
+enum MiscInstructionsBits74 {
+ // With bits 22-21 01.
+ BX = 1 << 4,
+ BXJ = 2 << 4,
+ BLX = 3 << 4,
+ BKPT = 7 << 4,
+
+ // With bits 22-21 11.
+ CLZ = 1 << 4
+};
+
+// Load and store exclusive instructions.
+
+// Bit positions.
+enum {
+ ExclusiveOpHi = 24, // Hi bit of opcode field
+ ExclusiveOpLo = 23, // Lo bit of opcode field
+ ExclusiveSizeHi = 22, // Hi bit of operand size field
+ ExclusiveSizeLo = 21, // Lo bit of operand size field
+ ExclusiveLoad = 20 // Bit indicating load
+};
+
+// Opcode bits for exclusive instructions.
+enum { ExclusiveOpcode = 3 };
+
+// Operand size, Bits(ExclusiveSizeHi,ExclusiveSizeLo).
+enum {
+ ExclusiveWord = 0,
+ ExclusiveDouble = 1,
+ ExclusiveByte = 2,
+ ExclusiveHalf = 3
+};
+
+// Instruction encoding bits and masks.
+enum {
+ H = 1 << 5, // Halfword (or byte).
+ S6 = 1 << 6, // Signed (or unsigned).
+ L = 1 << 20, // Load (or store).
+ S = 1 << 20, // Set condition code (or leave unchanged).
+ W = 1 << 21, // Writeback base register (or leave unchanged).
+ A = 1 << 21, // Accumulate in multiply instruction (or not).
+ B = 1 << 22, // Unsigned byte (or word).
+ N = 1 << 22, // Long (or short).
+ U = 1 << 23, // Positive (or negative) offset/index.
+ P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
+ I = 1 << 25, // Immediate shifter operand (or not).
+ B0 = 1 << 0,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+
+ // Instruction bit masks.
+ kCondMask = 15 << 28,
+ kALUMask = 0x6f << 21,
+ kRdMask = 15 << 12, // In str instruction.
+ kCoprocessorMask = 15 << 8,
+ kOpCodeMask = 15 << 21, // In data-processing instructions.
+ kImm24Mask = (1 << 24) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
+ kOff12Mask = (1 << 12) - 1,
+ kOff8Mask = (1 << 8) - 1
+};
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+// Status register selection.
+enum SRegister { CPSR = 0 << 22, SPSR = 1 << 22 };
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum ShiftOp {
+ LSL = 0 << 5, // Logical shift left.
+ LSR = 1 << 5, // Logical shift right.
+ ASR = 2 << 5, // Arithmetic shift right.
+ ROR = 3 << 5, // Rotate right.
+
+ // RRX is encoded as ROR with shift_imm == 0.
+ // Use a special code to make the distinction. The RRX ShiftOp is only used
+ // as an argument, and will never actually be encoded. The Assembler will
+ // detect it and emit the correct ROR shift operand with shift_imm == 0.
+ RRX = -1,
+ kNumberOfShifts = 4
+};
+
+// Status register fields.
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values).
+typedef uint32_t SRegisterFieldMask;
+
+// Memory operand addressing mode.
+enum AddrMode {
+ // Bit encoding P U W.
+ Offset = (8 | 4 | 0) << 21, // Offset (without writeback to base).
+ PreIndex = (8 | 4 | 1) << 21, // Pre-indexed addressing with writeback.
+ PostIndex = (0 | 4 | 0) << 21, // Post-indexed addressing with writeback.
+ NegOffset =
+ (8 | 0 | 0) << 21, // Negative offset (without writeback to base).
+ NegPreIndex = (8 | 0 | 1) << 21, // Negative pre-indexed with writeback.
+ NegPostIndex = (0 | 0 | 0) << 21 // Negative post-indexed with writeback.
+};
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Bit encoding P U W .
+ da = (0 | 0 | 0) << 21, // Decrement after.
+ ia = (0 | 4 | 0) << 21, // Increment after.
+ db = (8 | 0 | 0) << 21, // Decrement before.
+ ib = (8 | 4 | 0) << 21, // Increment before.
+ da_w = (0 | 0 | 1) << 21, // Decrement after with writeback to base.
+ ia_w = (0 | 4 | 1) << 21, // Increment after with writeback to base.
+ db_w = (8 | 0 | 1) << 21, // Decrement before with writeback to base.
+ ib_w = (8 | 4 | 1) << 21, // Increment before with writeback to base.
+
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0 | 0 | 0) << 21, // Decrement after.
+ ia_x = (0 | 4 | 0) << 21, // Increment after.
+ db_x = (8 | 0 | 0) << 21, // Decrement before.
+ ib_x = (8 | 4 | 0) << 21, // Increment before.
+
+ kBlockAddrModeMask = (8 | 4 | 1) << 21
+};
+
+// Coprocessor load/store operand size.
+enum LFlag {
+ Long = 1 << 22, // Long load/store coprocessor.
+ Short = 0 << 22 // Short load/store coprocessor.
+};
+
+// NEON data type
+enum NeonDataType {
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
+ NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonDataTypeSizeMask = 0x7,
+ NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType { nlt_1 = 0x7, nlt_2 = 0xA, nlt_3 = 0x6, nlt_4 = 0x2 };
+
+enum NeonSize { Neon8 = 0x0, Neon16 = 0x1, Neon32 = 0x2, Neon64 = 0x3 };
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ // transition to C code
+ kCallRtRedirected = 0x10,
+ // break point
+ kBreakpoint = 0x20,
+ // stop
+ kStopCode = 1 << 23
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision { kSinglePrecision = 0, kDoublePrecision = 1 };
+
+// VFP FPSCR constants.
+enum VFPConversionMode { kFPSCRRounding = 0, kDefaultRoundToZero = 1 };
+
+// This mask does not include the "inexact" or "input denormal" cumulative
+// exceptions flags, because we usually don't want to check for it.
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
+
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
+};
+
+const uint32_t kVFPRoundingModeMask = 3 << 22;
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
+// return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instruction {
+ public:
+ enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPCReadOffset = 8 };
+
+ // Helper macro to define static accessors.
+ // We use the cast to char* trick to bypass the strict anti-aliasing rules.
+# define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+# define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
+ // original place in the instruction encoding.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC.
+
+ // Generally applicable fields
+ inline Condition ConditionValue() const {
+ return static_cast<Condition>(Bits(31, 28));
+ }
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(BitField(31, 28));
+ }
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
+
+ inline int TypeValue() const { return Bits(27, 25); }
+ inline int SpecialValue() const { return Bits(27, 23); }
+
+ inline int RnValue() const { return Bits(19, 16); }
+ DECLARE_STATIC_ACCESSOR(RnValue);
+ inline int RdValue() const { return Bits(15, 12); }
+ DECLARE_STATIC_ACCESSOR(RdValue);
+
+ inline int CoprocessorValue() const { return Bits(11, 8); }
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int VnValue() const { return Bits(19, 16); }
+ inline int VmValue() const { return Bits(3, 0); }
+ inline int VdValue() const { return Bits(15, 12); }
+ inline int NValue() const { return Bit(7); }
+ inline int MValue() const { return Bit(5); }
+ inline int DValue() const { return Bit(22); }
+ inline int RtValue() const { return Bits(15, 12); }
+ inline int PValue() const { return Bit(24); }
+ inline int UValue() const { return Bit(23); }
+ inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
+ inline int Opc2Value() const { return Bits(19, 16); }
+ inline int Opc3Value() const { return Bits(7, 6); }
+ inline int SzValue() const { return Bit(8); }
+ inline int VLValue() const { return Bit(20); }
+ inline int VCValue() const { return Bit(8); }
+ inline int VAValue() const { return Bits(23, 21); }
+ inline int VBValue() const { return Bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 16, 7);
+ }
+ inline int VFPMRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 0, 5);
+ }
+ inline int VFPDRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 12, 22);
+ }
+
+ // Fields used in Data processing instructions
+ inline int OpcodeValue() const { return static_cast<Opcode>(Bits(24, 21)); }
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(BitField(24, 21));
+ }
+ inline int SValue() const { return Bit(20); }
+ // with register
+ inline int RmValue() const { return Bits(3, 0); }
+ DECLARE_STATIC_ACCESSOR(RmValue);
+ inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
+ inline ShiftOp ShiftField() const {
+ return static_cast<ShiftOp>(BitField(6, 5));
+ }
+ inline int RegShiftValue() const { return Bit(4); }
+ inline int RsValue() const { return Bits(11, 8); }
+ inline int ShiftAmountValue() const { return Bits(11, 7); }
+ // with immediate
+ inline int RotateValue() const { return Bits(11, 8); }
+ DECLARE_STATIC_ACCESSOR(RotateValue);
+ inline int Immed8Value() const { return Bits(7, 0); }
+ DECLARE_STATIC_ACCESSOR(Immed8Value);
+ inline int Immed4Value() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtValue() const {
+ return Immed4Value() << 12 | Offset12Value();
+ }
+ DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
+
+ // Fields used in Load/Store instructions
+ inline int PUValue() const { return Bits(24, 23); }
+ inline int PUField() const { return BitField(24, 23); }
+ inline int BValue() const { return Bit(22); }
+ inline int WValue() const { return Bit(21); }
+ inline int LValue() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
+ inline int Offset12Value() const { return Bits(11, 0); }
+ // multiple
+ inline int RlistValue() const { return Bits(15, 0); }
+ // extra loads and stores
+ inline int SignValue() const { return Bit(6); }
+ inline int HValue() const { return Bit(5); }
+ inline int ImmedHValue() const { return Bits(11, 8); }
+ inline int ImmedLValue() const { return Bits(3, 0); }
+
+ // Fields used in Branch instructions
+ inline int LinkValue() const { return Bit(24); }
+ inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SvcValue() const {
+ return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and stores,
+ // as well as multiplications).
+ inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
+
+ // Test for miscellaneous instructions encodings of type 0 instructions.
+ inline bool IsMiscType0() const {
+ return (Bit(24) == 1) && (Bit(23) == 0) && (Bit(20) == 0) &&
+ ((Bit(7) == 0));
+ }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsCsdbType1() const { return Bits(24, 0) == 0x0120F014; }
+
+ // Test for a stop instruction.
+ inline bool IsStop() const {
+ return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
+ }
+
+ // Special accessors that test for existence of a value.
+ inline bool HasS() const { return SValue() == 1; }
+ inline bool HasB() const { return BValue() == 1; }
+ inline bool HasW() const { return WValue() == 1; }
+ inline bool HasL() const { return LValue() == 1; }
+ inline bool HasU() const { return UValue() == 1; }
+ inline bool HasSign() const { return SignValue() == 1; }
+ inline bool HasH() const { return HValue() == 1; }
+ inline bool HasLink() const { return LinkValue() == 1; }
+
+ // Decoding the double immediate in the vmov instruction.
+ double DoubleImmedVmov() const;
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(uint8_t* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision) {
+ return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+ }
+ return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+ }
+
+ // We need to prevent the creation of instances of class Instruction.
+ Instruction() = delete;
+ Instruction(const Instruction&) = delete;
+ void operator=(const Instruction&) = delete;
+};
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between VFP register numbers and names.
+class VFPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg, bool is_double);
+
+ // Lookup the register number for the name provided.
+ // Set flag pointed by is_double to true if register
+ // is double-precision.
+ static int Number(const char* name, bool* is_double);
+
+ private:
+ static const char* names_[kNumVFPRegisters];
+};
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
+
+#endif // jit_arm_disasm_Constants_arm_h
diff --git a/js/src/jit/arm/disasm/Disasm-arm.cpp b/js/src/jit/arm/disasm/Disasm-arm.cpp
new file mode 100644
index 0000000000..97f39e1331
--- /dev/null
+++ b/js/src/jit/arm/disasm/Disasm-arm.cpp
@@ -0,0 +1,2031 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// disasm::NameConverter converter;
+// disasm::Disassembler d(converter);
+// for (uint8_t* pc = begin; pc < end;) {
+// disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+// uint8_t* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include "jit/arm/disasm/Disasm-arm.h"
+
+#ifdef JS_DISASM_ARM
+
+# include <stdarg.h>
+# include <stdio.h>
+# include <string.h>
+
+# include "jit/arm/disasm/Constants-arm.h"
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+// Helper function for printing to a Vector.
+static int MOZ_FORMAT_PRINTF(2, 3)
+ SNPrintF(V8Vector<char> str, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = vsnprintf(str.start(), str.length(), format, args);
+ va_end(args);
+ return result;
+}
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter, V8Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(uint8_t* instruction);
+
+ static bool IsConstantPoolAt(uint8_t* instr_ptr);
+ static int ConstantPoolSizeAt(uint8_t* instr_ptr);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintSRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatVFPRegister(Instruction* instr, const char* format);
+ void PrintMovwMovt(Instruction* instr);
+ int FormatVFPinstruction(Instruction* instr, const char* format);
+ void PrintCondition(Instruction* instr);
+ void PrintShiftRm(Instruction* instr);
+ void PrintShiftImm(Instruction* instr);
+ void PrintShiftSat(Instruction* instr);
+ void PrintPU(Instruction* instr);
+ void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ void FormatNeonList(int Vd, int type);
+ void FormatNeonMemory(int Rn, int align, int Rm);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type, a 3-bit
+ // field in the instruction encoding.
+ // Types 0 and 1 are combined as they are largely the same except for the way
+ // they interpret the shifter operand.
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
+ // Type 7 includes special Debugger instructions.
+ int DecodeType7(Instruction* instr);
+ // For VFP support.
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
+
+ void DecodeSpecialCondition(Instruction* instr);
+
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ V8Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ // Disallow copy and assign.
+ Decoder(const Decoder&) = delete;
+ void operator=(const Decoder&) = delete;
+};
+
+// Support for assertions in the Decoder formatting functions.
+# define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < int(out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// These condition names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* const cond_names[kNumberOfConditions] = {
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+};
+
+// Print the condition guarding the instruction.
+void Decoder::PrintCondition(Instruction* instr) {
+ Print(cond_names[instr->ConditionValue()]);
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+// Print the VFP S register name according to the active name converter.
+void Decoder::PrintSRegister(int reg) { Print(VFPRegisters::Name(reg, false)); }
+
+// Print the VFP D register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) { Print(VFPRegisters::Name(reg, true)); }
+
+// These shift names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* const shift_names[kNumberOfShifts] = {"lsl", "lsr", "asr",
+ "ror"};
+
+// Print the register shift operands for the instruction. Generally used for
+// data processing instructions.
+void Decoder::PrintShiftRm(Instruction* instr) {
+ ShiftOp shift = instr->ShiftField();
+ int shift_index = instr->ShiftValue();
+ int shift_amount = instr->ShiftAmountValue();
+ int rm = instr->RmValue();
+
+ PrintRegister(rm);
+
+ if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ // Special case for using rm only.
+ return;
+ }
+ if (instr->RegShiftValue() == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ Print(", RRX");
+ return;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", %s #%d",
+ shift_names[shift_index], shift_amount);
+ } else {
+ // by register
+ int rs = instr->RsValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", %s ",
+ shift_names[shift_index]);
+ PrintRegister(rs);
+ }
+}
+
+static inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+ if (shift == 0) return value;
+ return (value >> shift) | (value << (32 - shift));
+}
+
+// Print the immediate operand for the instruction. Generally used for data
+// processing instructions.
+void Decoder::PrintShiftImm(Instruction* instr) {
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
+ int imm = RotateRight32(immed8, rotate);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
+}
+
+// Print the optional shift and immediate used by saturating instructions.
+void Decoder::PrintShiftSat(Instruction* instr) {
+ int shift = instr->Bits(11, 7);
+ if (shift > 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ", %s #%d",
+ shift_names[instr->Bit(6) * 2], instr->Bits(11, 7));
+ }
+}
+
+// Print PU formatting to reduce complexity of FormatOption.
+void Decoder::PrintPU(Instruction* instr) {
+ switch (instr->PUField()) {
+ case da_x: {
+ Print("da");
+ break;
+ }
+ case ia_x: {
+ Print("ia");
+ break;
+ }
+ case db_x: {
+ Print("db");
+ break;
+ }
+ case ib_x: {
+ Print("ib");
+ break;
+ }
+ default: {
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
+ switch (svc) {
+ case kCallRtRedirected:
+ Print("call rt redirected");
+ return;
+ case kBreakpoint:
+ Print("breakpoint");
+ return;
+ default:
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+ svc & kStopCodeMask, svc & kStopCodeMask);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
+ }
+ return;
+ }
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT(format[0] == 'r');
+ if (format[1] == 'n') { // 'rn: Rn register
+ int reg = instr->RnValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: Rd register
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 's') { // 'rs: Rs register
+ int reg = instr->RsValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') { // 'rm: Rm register
+ int reg = instr->RmValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'rt: Rt register
+ int reg = instr->RtValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'l') {
+ // 'rlist: register list for load and store multiple instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "rlist"));
+ int rlist = instr->RlistValue();
+ int reg = 0;
+ Print("{");
+ // Print register list in ascending order, by scanning the bit mask.
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ PrintRegister(reg);
+ if ((rlist >> 1) != 0) {
+ Print(", ");
+ }
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ Print("}");
+ return 5;
+ }
+ MOZ_CRASH();
+ return -1;
+}
+
+// Handle all VFP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT((format[0] == 'S') || (format[0] == 'D'));
+
+ VFPRegPrecision precision =
+ format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
+
+ int retval = 2;
+ int reg = -1;
+ if (format[1] == 'n') {
+ reg = instr->VFPNRegValue(precision);
+ } else if (format[1] == 'm') {
+ reg = instr->VFPMRegValue(precision);
+ } else if (format[1] == 'd') {
+ if ((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) && (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
+ if (format[2] == '+') {
+ int immed8 = instr->Immed8Value();
+ if (format[0] == 'S') reg += immed8 - 1;
+ if (format[0] == 'D') reg += (immed8 / 2 - 1);
+ }
+ if (format[2] == '+') retval = 3;
+ } else {
+ MOZ_CRASH();
+ }
+
+ if (precision == kSinglePrecision) {
+ PrintSRegister(reg);
+ } else {
+ PrintDRegister(reg);
+ }
+
+ return retval;
+}
+
+int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
+ Print(format);
+ return 0;
+}
+
+void Decoder::FormatNeonList(int Vd, int type) {
+ if (type == nlt_1) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d}", Vd);
+ } else if (type == nlt_2) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d, d%d}", Vd, Vd + 1);
+ } else if (type == nlt_3) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ } else if (type == nlt_4) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d, d%d, d%d, d%d}", Vd,
+ Vd + 1, Vd + 2, Vd + 3);
+ }
+}
+
+void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "[r%d", Rn);
+ if (align != 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ":%d", (1 << align) << 6);
+ }
+ if (Rm == 15) {
+ Print("]");
+ } else if (Rm == 13) {
+ Print("]!");
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "], r%d", Rm);
+ }
+}
+
+// Print the movw or movt instruction.
+void Decoder::PrintMovwMovt(Instruction* instr) {
+ int imm = instr->ImmedMovwMovtValue();
+ int rd = instr->RdValue();
+ PrintRegister(rd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'a': { // 'a: accumulate multiplies
+ if (instr->Bit(21) == 0) {
+ Print("ul");
+ } else {
+ Print("la");
+ }
+ return 1;
+ }
+ case 'b': { // 'b: byte loads or stores
+ if (instr->HasB()) {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'c': { // 'cond: conditional execution
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "cond"));
+ PrintCondition(instr);
+ return 4;
+ }
+ case 'd': { // 'd: vmov double immediate.
+ double d = instr->DoubleImmedVmov();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
+ return 1;
+ }
+ case 'f': { // 'f: bitfield instructions - v7 and above.
+ uint32_t lsbit = instr->Bits(11, 7);
+ uint32_t width = instr->Bits(20, 16) + 1;
+ if (instr->Bit(21) == 0) {
+ // BFC/BFI:
+ // Bits 20-16 represent most-significant bit. Covert to width.
+ width -= lsbit;
+ MOZ_ASSERT(width > 0);
+ }
+ MOZ_ASSERT((width + lsbit) <= 32);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "#%d, #%d", lsbit, width);
+ return 1;
+ }
+ case 'h': { // 'h: halfword operation for extra loads and stores
+ if (instr->HasH()) {
+ Print("h");
+ } else {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'i': { // 'i: immediate value from adjacent bits.
+ // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
+ int width = (format[3] - '0') * 10 + (format[4] - '0');
+ int lsb = (format[6] - '0') * 10 + (format[7] - '0');
+
+ MOZ_ASSERT((width >= 1) && (width <= 32));
+ MOZ_ASSERT((lsb >= 0) && (lsb <= 31));
+ MOZ_ASSERT((width + lsb) <= 32);
+
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
+ instr->Bits(width + lsb - 1, lsb));
+ return 8;
+ }
+ case 'l': { // 'l: branch and link
+ if (instr->HasLink()) {
+ Print("l");
+ }
+ return 1;
+ }
+ case 'm': {
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "memop"));
+ if (instr->HasL()) {
+ Print("ldr");
+ } else {
+ if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+ (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+ if (instr->Bit(5) == 1) {
+ Print("strd");
+ } else {
+ Print("ldrd");
+ }
+ return 5;
+ }
+ Print("str");
+ }
+ return 5;
+ }
+ // 'msg: for simulator break instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "msg"));
+ uint8_t* str =
+ reinterpret_cast<uint8_t*>(instr->InstructionBits() & 0x0fffffff);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameInCode(str));
+ return 3;
+ }
+ case 'o': {
+ if ((format[3] == '1') && (format[4] == '2')) {
+ // 'off12: 12-bit offset for load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off12"));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
+ instr->Offset12Value());
+ return 5;
+ } else if (format[3] == '0') {
+ // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
+ (instr->Bits(19, 8) << 4) + instr->Bits(3, 0));
+ return 15;
+ }
+ // 'off8: 8-bit offset for extra load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "off8"));
+ int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
+ return 4;
+ }
+ case 'p': { // 'pu: P and U bits for load and store instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "pu"));
+ PrintPU(instr);
+ return 2;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
+ if (format[6] == 'o') { // 'shift_op
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+ if (instr->TypeValue() == 0) {
+ PrintShiftRm(instr);
+ } else {
+ MOZ_ASSERT(instr->TypeValue() == 1);
+ PrintShiftImm(instr);
+ }
+ return 8;
+ } else if (format[6] == 's') { // 'shift_sat.
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
+ PrintShiftSat(instr);
+ return 9;
+ } else { // 'shift_rm
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+ PrintShiftRm(instr);
+ return 8;
+ }
+ } else if (format[1] == 'v') { // 'svc
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "svc"));
+ PrintSoftwareInterrupt(instr->SvcValue());
+ return 3;
+ } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "sign"));
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ }
+ // 's: S field of data processing instructions
+ if (instr->HasS()) {
+ Print("s");
+ }
+ return 1;
+ }
+ case 't': { // 'target: target of branch instructions
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "target"));
+ int off = (instr->SImmed24Value() << 2) + 8;
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<uint8_t*>(instr) + off));
+ return 6;
+ }
+ case 'u': { // 'u: signed or unsigned multiplies
+ // The manual gets the meaning of bit 22 backwards in the multiply
+ // instruction overview on page A3.16.2. The instructions that
+ // exist in u and s variants are the following:
+ // smull A4.1.87
+ // umull A4.1.129
+ // umlal A4.1.128
+ // smlal A4.1.76
+ // For these 0 means u and 1 means s. As can be seen on their individual
+ // pages. The other 18 mul instructions have the bit set or unset in
+ // arbitrary ways that are unrelated to the signedness of the instruction.
+ // None of these 18 instructions exist in both a 'u' and an 's' variant.
+
+ if (instr->Bit(22) == 0) {
+ Print("u");
+ } else {
+ Print("s");
+ }
+ return 1;
+ }
+ case 'v': {
+ return FormatVFPinstruction(instr, format);
+ }
+ case 'S':
+ case 'D': {
+ return FormatVFPRegister(instr, format);
+ }
+ case 'w': { // 'w: W field of load and store instructions
+ if (instr->HasW()) {
+ Print("!");
+ }
+ return 1;
+ }
+ default: {
+ MOZ_CRASH();
+ break;
+ }
+ }
+ MOZ_CRASH();
+ return -1;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+# define VERIFY(condition) \
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+void Decoder::DecodeType01(Instruction* instr) {
+ int type = instr->TypeValue();
+ if ((type == 0) && instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ // The MUL instruction description (A 4.1.33) refers to Rd as being
+ // the destination for the operation, but it confusingly uses the
+ // Rn field to encode it.
+ Format(instr, "mul'cond's 'rn, 'rm, 'rs");
+ } else {
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ } else {
+ // The MLS instruction description (A 4.1.29) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ }
+ }
+ } else {
+ // The signed/long multiply instructions use the terms RdHi and RdLo
+ // when referring to the target registers. They are mapped to the Rn
+ // and Rd fields as follows:
+ // RdLo == Rd field
+ // RdHi == Rn field
+ // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
+ Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
+ }
+ } else {
+ if (instr->Bits(ExclusiveOpHi, ExclusiveOpLo) == ExclusiveOpcode) {
+ if (instr->Bit(ExclusiveLoad) == 1) {
+ switch (instr->Bits(ExclusiveSizeHi, ExclusiveSizeLo)) {
+ case ExclusiveWord:
+ Format(instr, "ldrex'cond 'rt, ['rn]");
+ break;
+ case ExclusiveDouble:
+ Format(instr, "ldrexd'cond 'rt, ['rn]");
+ break;
+ case ExclusiveByte:
+ Format(instr, "ldrexb'cond 'rt, ['rn]");
+ break;
+ case ExclusiveHalf:
+ Format(instr, "ldrexh'cond 'rt, ['rn]");
+ break;
+ }
+ } else {
+ // The documentation names the low four bits of the
+ // store-exclusive instructions "Rt" but canonically
+ // for disassembly they are really "Rm".
+ switch (instr->Bits(ExclusiveSizeHi, ExclusiveSizeLo)) {
+ case ExclusiveWord:
+ Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveDouble:
+ Format(instr, "strexd'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveByte:
+ Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+ break;
+ case ExclusiveHalf:
+ Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+ break;
+ }
+ }
+ } else {
+ Unknown(instr);
+ }
+ }
+ } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
+ // ldrd, strd
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case ib_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ } else {
+ // extra load/store instructions
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case ib_x: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ return;
+ }
+ } else if ((type == 0) && instr->IsMiscType0()) {
+ if (instr->Bits(22, 21) == 1) {
+ switch (instr->BitField(7, 4)) {
+ case BX:
+ Format(instr, "bx'cond 'rm");
+ break;
+ case BLX:
+ Format(instr, "blx'cond 'rm");
+ break;
+ case BKPT:
+ Format(instr, "bkpt 'off0to3and8to19");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
+ } else if (instr->Bits(22, 21) == 3) {
+ switch (instr->BitField(7, 4)) {
+ case CLZ:
+ Format(instr, "clz'cond 'rd, 'rm");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ } else if ((type == 1) && instr->IsNopType1()) {
+ Format(instr, "nop'cond");
+ } else if ((type == 1) && instr->IsCsdbType1()) {
+ Format(instr, "csdb'cond");
+ } else {
+ switch (instr->OpcodeField()) {
+ case AND: {
+ Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case EOR: {
+ Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SUB: {
+ Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSB: {
+ Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADD: {
+ Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case TST: {
+ if (instr->HasS()) {
+ Format(instr, "tst'cond 'rn, 'shift_op");
+ } else {
+ Format(instr, "movw'cond 'mw");
+ }
+ break;
+ }
+ case TEQ: {
+ if (instr->HasS()) {
+ Format(instr, "teq'cond 'rn, 'shift_op");
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case CMP: {
+ if (instr->HasS()) {
+ Format(instr, "cmp'cond 'rn, 'shift_op");
+ } else {
+ Format(instr, "movt'cond 'mw");
+ }
+ break;
+ }
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_op");
+ } else {
+ // Other instructions matching this pattern are handled in the
+ // miscellaneous instructions part above.
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case ORR: {
+ Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MOV: {
+ Format(instr, "mov'cond's 'rd, 'shift_op");
+ break;
+ }
+ case BIC: {
+ Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MVN: {
+ Format(instr, "mvn'cond's 'rd, 'shift_op");
+ break;
+ }
+ default: {
+ // The Opcode field is a 4-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ }
+}
+
+void Decoder::DecodeType2(Instruction* instr) {
+ switch (instr->PUField()) {
+ case da_x: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ break;
+ }
+ case ia_x: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ return;
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ break;
+ }
+ case db_x: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ break;
+ }
+ case ib_x: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+void Decoder::DecodeType3(Instruction* instr) {
+ switch (instr->PUField()) {
+ case da_x: {
+ VERIFY(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case ia_x: {
+ if (instr->Bit(4) == 0) {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+ } else {
+ if (instr->Bits(11, 7) == 0) {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+ } else {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 1:
+ MOZ_CRASH();
+ break;
+ case 2:
+ MOZ_CRASH();
+ break;
+ case 3:
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ break;
+ }
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ MOZ_CRASH();
+ break;
+ case 1:
+ if (instr->Bits(9, 6) == 1) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb16'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ case 3:
+ if ((instr->Bits(9, 6) == 1)) {
+ if ((instr->Bit(20) == 0)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxth'cond 'rd, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxth'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
+ break;
+ case 1:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case db_x: {
+ if (instr->Bits(22, 20) == 0x5) {
+ if (instr->Bits(7, 4) == 0x1) {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "smmul'cond 'rn, 'rm, 'rs");
+ } else {
+ // SMMLA (in V8 notation matching ARM ISA format)
+ Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
+ }
+ break;
+ }
+ }
+ bool FLAG_enable_sudiv = true; // Flag doesn't exist in our engine.
+ if (FLAG_enable_sudiv) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ if (instr->Bit(21) == 0x1) {
+ // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+ } else {
+ // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
+ }
+ break;
+ }
+ }
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ break;
+ }
+ case ib_x: {
+ if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+ uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = widthminus1 + lsbit;
+ if (msbit <= 31) {
+ if (instr->Bit(22)) {
+ Format(instr, "ubfx'cond 'rd, 'rm, 'f");
+ } else {
+ Format(instr, "sbfx'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ if (instr->RmValue() == 15) {
+ Format(instr, "bfc'cond 'rd, 'f");
+ } else {
+ Format(instr, "bfi'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ MOZ_CRASH();
+ }
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ MOZ_CRASH();
+ break;
+ }
+ }
+}
+
+void Decoder::DecodeType4(Instruction* instr) {
+ if (instr->Bit(22) != 0) {
+ // Privileged mode currently not supported.
+ Unknown(instr);
+ } else {
+ if (instr->HasL()) {
+ Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ } else {
+ Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ }
+ }
+}
+
+void Decoder::DecodeType5(Instruction* instr) {
+ Format(instr, "b'l'cond 'target");
+}
+
+void Decoder::DecodeType6(Instruction* instr) {
+ DecodeType6CoprocessorIns(instr);
+}
+
+int Decoder::DecodeType7(Instruction* instr) {
+ if (instr->Bit(24) == 1) {
+ if (instr->SvcValue() >= kStopCode) {
+ Format(instr, "stop'cond 'svc");
+ // Also print the stop message. Its address is encoded
+ // in the following 4 bytes.
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n %p %08x stop message: %s",
+ reinterpret_cast<void*>(instr + Instruction::kInstrSize),
+ *reinterpret_cast<uint32_t*>(instr + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr + Instruction::kInstrSize));
+ // We have decoded 2 * Instruction::kInstrSize bytes.
+ return 2 * Instruction::kInstrSize;
+ } else {
+ Format(instr, "svc'cond 'svc");
+ }
+ } else {
+ DecodeTypeVFP(instr);
+ }
+ return Instruction::kInstrSize;
+}
+
+// void Decoder::DecodeTypeVFP(Instruction* instr)
+// vmov: Sn = Rt
+// vmov: Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
+// Dd = vabs(Dm)
+// Sd = vabs(Sm)
+// Dd = vneg(Dm)
+// Sd = vneg(Sm)
+// Dd = vadd(Dn, Dm)
+// Sd = vadd(Sn, Sm)
+// Dd = vsub(Dn, Dm)
+// Sd = vsub(Sn, Sm)
+// Dd = vmul(Dn, Dm)
+// Sd = vmul(Sn, Sm)
+// Dd = vmla(Dn, Dm)
+// Sd = vmla(Sn, Sm)
+// Dd = vmls(Dn, Dm)
+// Sd = vmls(Sn, Sm)
+// Dd = vdiv(Dn, Dm)
+// Sd = vdiv(Sn, Sm)
+// vcmp(Dd, Dm)
+// vcmp(Sd, Sm)
+// Dd = vsqrt(Dm)
+// Sd = vsqrt(Sm)
+// vmrs
+// vmsr
+void Decoder::DecodeTypeVFP(Instruction* instr) {
+ VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
+ VERIFY(instr->Bits(11, 9) == 0x5);
+
+ if (instr->Bit(4) == 0) {
+ if (instr->Opc1Value() == 0x7) {
+ // Other data processing instructions
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmov'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vmov'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
+ // vabs
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vabs'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vabs'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+ // vneg
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vneg'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vneg'cond.f32 'Sd, 'Sm");
+ }
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
+ DecodeVCVTBetweenDoubleAndSingle(instr);
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
+ DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", fraction_bits);
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
+ DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
+ DecodeVCMP(instr);
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vsqrt'cond.f32 'Sd, 'Sm");
+ }
+ } else if (instr->Opc3Value() == 0x0) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmov'cond.f64 'Dd, 'd");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
+ // vrintz - round towards zero (truncate)
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintz'cond.f32.f32 'Sd, 'Sm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() == 0x1) {
+ if (instr->Opc3Value() & 0x1) {
+ Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
+ }
+ } else {
+ if (instr->Opc3Value() & 0x1) {
+ Format(instr, "vsub'cond.f32 'Sd, 'Sn, 'Sm");
+ } else {
+ Format(instr, "vadd'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ }
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmul'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmla'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmls'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vdiv'cond.f32 'Sd, 'Sn, 'Sm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)) {
+ DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
+ } else if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x7) &&
+ (instr->Bits(19, 16) == 0x1)) {
+ if (instr->VLValue() == 0) {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "vmsr'cond FPSCR, APSR");
+ } else {
+ Format(instr, "vmsr'cond FPSCR, 'rt");
+ }
+ } else {
+ if (instr->Bits(15, 12) == 0xF) {
+ Format(instr, "vmrs'cond APSR, FPSCR");
+ } else {
+ Format(instr, "vmrs'cond 'rt, FPSCR");
+ }
+ }
+ }
+ }
+}
+
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ Instruction* instr) {
+ VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
+
+ bool to_arm_register = (instr->VLValue() == 0x1);
+
+ if (to_arm_register) {
+ Format(instr, "vmov'cond 'rt, 'Sn");
+ } else {
+ Format(instr, "vmov'cond 'Sn, 'rt");
+ }
+}
+
+void Decoder::DecodeVCMP(Instruction* instr) {
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
+
+ // Comparison.
+ bool dp_operation = (instr->SzValue() == 1);
+ bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
+
+ if (dp_operation && !raise_exception_for_qnan) {
+ if (instr->Opc2Value() == 0x4) {
+ Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
+ } else if (instr->Opc2Value() == 0x5) {
+ Format(instr, "vcmp'cond.f64 'Dd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
+ } else if (!raise_exception_for_qnan) {
+ if (instr->Opc2Value() == 0x4) {
+ Format(instr, "vcmp'cond.f32 'Sd, 'Sm");
+ } else if (instr->Opc2Value() == 0x5) {
+ Format(instr, "vcmp'cond.f32 'Sd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+
+ bool double_to_single = (instr->SzValue() == 1);
+
+ if (double_to_single) {
+ Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
+ } else {
+ Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
+ }
+}
+
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
+
+ bool to_integer = (instr->Bit(18) == 1);
+ bool dp_operation = (instr->SzValue() == 1);
+ if (to_integer) {
+ bool unsigned_integer = (instr->Bit(16) == 0);
+
+ if (dp_operation) {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
+ } else {
+ Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
+ }
+ } else {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
+ }
+ }
+ } else {
+ bool unsigned_integer = (instr->Bit(7) == 0);
+
+ if (dp_operation) {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
+ }
+ } else {
+ if (unsigned_integer) {
+ Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
+ } else {
+ Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
+ }
+ }
+ }
+}
+
+// Decode Type 6 coprocessor instructions.
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
+void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
+ VERIFY(instr->TypeValue() == 6);
+
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
+ case 0x8:
+ case 0xA:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
+ }
+ break;
+ case 0xC:
+ case 0xE:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ }
+ break;
+ }
+ default:
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->HasL()) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ } else {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ }
+ break;
+ case 0x8:
+ case 0xA:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
+ }
+ break;
+ case 0xC:
+ case 0xE:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ }
+ break;
+ }
+ default:
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+void Decoder::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
+ (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
+ (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 9:
+ if (instr->Bits(21, 20) == 0 && instr->Bits(9, 8) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int size = instr->Bits(11, 10);
+ int index = instr->Bits(7, 5);
+ int align = instr->Bit(4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d {d%d[%d]}, ",
+ (1 << size) << 3, Vd, index);
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2 && instr->Bits(9, 8) == 0) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int size = instr->Bits(11, 10);
+ int index = instr->Bits(7, 5);
+ int align = instr->Bit(4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d {d%d[%d]}, ",
+ (1 << size) << 3, Vd, index);
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0xA:
+ if (instr->Bits(22, 20) == 7) {
+ const char* option = "?";
+ switch (instr->Bits(3, 0)) {
+ case 2:
+ option = "oshst";
+ break;
+ case 3:
+ option = "osh";
+ break;
+ case 6:
+ option = "nshst";
+ break;
+ case 7:
+ option = "nsh";
+ break;
+ case 10:
+ option = "ishst";
+ break;
+ case 11:
+ option = "ish";
+ break;
+ case 14:
+ option = "st";
+ break;
+ case 15:
+ option = "sy";
+ break;
+ }
+ switch (instr->Bits(7, 4)) {
+ case 1:
+ Print("clrex");
+ break;
+ case 4:
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s", option);
+ break;
+ case 5:
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s", option);
+ break;
+ default:
+ Unknown(instr);
+ }
+ break;
+ }
+ [[fallthrough]];
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ int Rn = instr->Bits(19, 16);
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x1D:
+ if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
+ instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
+ instr->Bit(4) == 0x0) {
+ // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ MOZ_CRASH(); // Case analysis is exhaustive.
+ break;
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
+}
+
+# undef VERIFIY
+
+bool Decoder::IsConstantPoolAt(uint8_t* instr_ptr) {
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
+}
+
+int Decoder::ConstantPoolSizeAt(uint8_t* instr_ptr) {
+ if (IsConstantPoolAt(instr_ptr)) {
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return DecodeConstantPoolLength(instruction_bits);
+ } else {
+ return -1;
+ }
+}
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(uint8_t* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
+ if (instr->ConditionField() == kSpecialCondition) {
+ DecodeSpecialCondition(instr);
+ return Instruction::kInstrSize;
+ }
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "constant pool begin (length %d)",
+ DecodeConstantPoolLength(instruction_bits));
+ return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ SNPrintF(out_buffer_ + out_buffer_pos_, " (0x%08x)",
+ target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
+ }
+ switch (instr->TypeValue()) {
+ case 0:
+ case 1: {
+ DecodeType01(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ return DecodeType7(instr);
+ }
+ default: {
+ // The type field is 3-bits in the ARM encoding.
+ MOZ_CRASH();
+ break;
+ }
+ }
+ return Instruction::kInstrSize;
+}
+
+} // namespace disasm
+
+# undef STRING_STARTS_WITH
+# undef VERIFY
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(uint8_t* addr) const {
+ SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+const char* NameConverter::NameOfConstant(uint8_t* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return disasm::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ MOZ_CRASH(); // ARM does not have the concept of a byte register
+ return "nobytereg";
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ MOZ_CRASH(); // ARM does not have any XMM registers
+ return "noxmmreg";
+}
+
+const char* NameConverter::NameInCode(uint8_t* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+Disassembler::~Disassembler() {}
+
+int Disassembler::InstructionDecode(V8Vector<char> buffer,
+ uint8_t* instruction) {
+ Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(uint8_t* instruction) {
+ return Decoder::ConstantPoolSizeAt(instruction);
+}
+
+void Disassembler::Disassemble(FILE* f, uint8_t* begin, uint8_t* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (uint8_t* pc = begin; pc < end;) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p %08x %s\n", prev_pc,
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
diff --git a/js/src/jit/arm/disasm/Disasm-arm.h b/js/src/jit/arm/disasm/Disasm-arm.h
new file mode 100644
index 0000000000..8a0dd97c32
--- /dev/null
+++ b/js/src/jit/arm/disasm/Disasm-arm.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_arm_disasm_Disasm_arm_h
+#define jit_arm_disasm_Disasm_arm_h
+
+#ifdef JS_DISASM_ARM
+
+# include "mozilla/Assertions.h"
+# include "mozilla/Types.h"
+
+# include <stdio.h>
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+typedef unsigned char byte;
+
+// A reasonable (ie, safe) buffer size for the disassembly of a single
+// instruction.
+const int ReasonableBufferSize = 256;
+
+// Vector as used by the original code to allow for minimal modification.
+// Functions exactly like a character array with helper methods.
+template <typename T>
+class V8Vector {
+ public:
+ V8Vector() : start_(nullptr), length_(0) {}
+ V8Vector(T* data, int length) : start_(data), length_(length) {
+ MOZ_ASSERT(length == 0 || (length > 0 && data != nullptr));
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ MOZ_ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ V8Vector<T> operator+(int offset) const {
+ MOZ_ASSERT(offset < length_);
+ return V8Vector<T>(start_ + offset, length_ - offset);
+ }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+template <typename T, int kSize>
+class EmbeddedVector : public V8Vector<T> {
+ public:
+ EmbeddedVector() : V8Vector<T>(buffer_, kSize) {}
+
+ explicit EmbeddedVector(T initial_value) : V8Vector<T>(buffer_, kSize) {
+ for (int i = 0; i < kSize; ++i) {
+ buffer_[i] = initial_value;
+ }
+ }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs) : V8Vector<T>(rhs) {
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ V8Vector<T>::operator=(rhs);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ return *this;
+ }
+
+ private:
+ T buffer_[kSize];
+};
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text. The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+ virtual ~NameConverter() {}
+ virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfByteCPURegister(int reg) const;
+ virtual const char* NameOfXMMRegister(int reg) const;
+ virtual const char* NameOfAddress(byte* addr) const;
+ virtual const char* NameOfConstant(byte* addr) const;
+ virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ EmbeddedVector<char, 128> tmp_buffer_;
+};
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+ // Caller deallocates converter.
+ explicit Disassembler(const NameConverter& converter);
+
+ virtual ~Disassembler();
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(V8Vector<char> buffer, uint8_t* instruction);
+
+ // Returns -1 if instruction does not mark the beginning of a constant pool,
+ // or the number of entries in the constant pool beginning here.
+ int ConstantPoolSizeAt(byte* instruction);
+
+ // Write disassembly into specified file 'f' using specified NameConverter
+ // (see constructor).
+ static void Disassemble(FILE* f, uint8_t* begin, uint8_t* end);
+
+ private:
+ const NameConverter& converter_;
+
+ // Disallow implicit constructors.
+ Disassembler() = delete;
+ Disassembler(const Disassembler&) = delete;
+ void operator=(const Disassembler&) = delete;
+};
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // JS_DISASM_ARM
+
+#endif // jit_arm_disasm_Disasm_arm_h
diff --git a/js/src/jit/arm/gen-double-encoder-table.py b/js/src/jit/arm/gen-double-encoder-table.py
new file mode 100644
index 0000000000..fd622da82e
--- /dev/null
+++ b/js/src/jit/arm/gen-double-encoder-table.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""Generate tables of immediately-encodable VFP doubles.
+
+DOES NOT get automatically run during the build process. If you need to
+modify this file (which is unlikely), you must re-run this script:
+
+python gen-double-encode-table.py > $(topsrcdir)/path/to/DoubleEntryTable.tbl
+"""
+
+import operator
+
+
+def rep(bit, count):
+ return reduce(operator.ior, [bit << c for c in range(count)])
+
+
+def encodeDouble(value):
+ """Generate an ARM ARM 'VFP modified immediate constant' with format:
+ aBbbbbbb bbcdefgh 000...
+
+ We will return the top 32 bits of the double; the rest are 0."""
+ assert (0 <= value) and (value <= 255)
+ a = value >> 7
+ b = (value >> 6) & 1
+ B = int(b == 0)
+ cdefgh = value & 0x3F
+ return (a << 31) | (B << 30) | (rep(b, 8) << 22) | cdefgh << 16
+
+
+print("/* THIS FILE IS AUTOMATICALLY GENERATED BY gen-double-encode-table.py. */")
+for i in range(256):
+ print(" { 0x%08x, { %d, %d, 0 } }," % (encodeDouble(i), i & 0xF, i >> 4))
diff --git a/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S
new file mode 100644
index 0000000000..0237f2221d
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S
@@ -0,0 +1,27 @@
+//===-- aeabi_idivmod.S - EABI idivmod implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int quot, int rem} __aeabi_idivmod(int numerator, int denominator) {
+// int rem, quot;
+// quot = __divmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__divmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S
new file mode 100644
index 0000000000..f7e1d2ebed
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S
@@ -0,0 +1,28 @@
+//===-- aeabi_uidivmod.S - EABI uidivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { unsigned quot, unsigned rem}
+// __aeabi_uidivmod(unsigned numerator, unsigned denominator) {
+// unsigned rem, quot;
+// quot = __udivmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__udivmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/js/src/jit/arm/llvm-compiler-rt/assembly.h b/js/src/jit/arm/llvm-compiler-rt/assembly.h
new file mode 100644
index 0000000000..802d1e2870
--- /dev/null
+++ b/js/src/jit/arm/llvm-compiler-rt/assembly.h
@@ -0,0 +1,67 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+# define SEPARATOR @
+#else
+# define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+# define HIDDEN_DIRECTIVE .private_extern
+# define LOCAL_LABEL(name) L_##name
+#else
+# define HIDDEN_DIRECTIVE .hidden
+# define LOCAL_LABEL(name) .L_##name
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+# define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN_DIRECTIVE SYMBOL_NAME(name) SEPARATOR
+#else
+# define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ .globl SYMBOL_NAME(name) \
+ SEPARATOR DECLARE_SYMBOL_VISIBILITY(name) SYMBOL_NAME(name) :
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ .globl SYMBOL_NAME(name) \
+ SEPARATOR HIDDEN_DIRECTIVE SYMBOL_NAME(name) \
+ SEPARATOR SYMBOL_NAME(name) :
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ .globl name SEPARATOR HIDDEN_DIRECTIVE name SEPARATOR name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR.set SYMBOL_NAME(name), \
+ SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+# define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+# define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/js/src/jit/arm64/Architecture-arm64.cpp b/js/src/jit/arm64/Architecture-arm64.cpp
new file mode 100644
index 0000000000..eb3dd67b1a
--- /dev/null
+++ b/js/src/jit/arm64/Architecture-arm64.cpp
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/Architecture-arm64.h"
+
+#include <cstring>
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+Registers::Code Registers::FromName(const char* name) {
+ // Check for some register aliases first.
+ if (strcmp(name, "ip0") == 0) {
+ return ip0;
+ }
+ if (strcmp(name, "ip1") == 0) {
+ return ip1;
+ }
+ if (strcmp(name, "fp") == 0) {
+ return fp;
+ }
+
+ for (uint32_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+// This must sync with GetPushSizeInBytes just below and also with
+// MacroAssembler::PushRegsInMask.
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+ SetType all = s.bits();
+ SetType set128b =
+ (all & FloatRegisters::AllSimd128Mask) >> FloatRegisters::ShiftSimd128;
+ SetType doubleSet =
+ (all & FloatRegisters::AllDoubleMask) >> FloatRegisters::ShiftDouble;
+ SetType singleSet =
+ (all & FloatRegisters::AllSingleMask) >> FloatRegisters::ShiftSingle;
+
+ // See GetPushSizeInBytes.
+ SetType set64b = (singleSet | doubleSet) & ~set128b;
+
+ SetType reduced = (set128b << FloatRegisters::ShiftSimd128) |
+ (set64b << FloatRegisters::ShiftDouble);
+ return FloatRegisterSet(reduced);
+}
+
+// Compute the size of the dump area for |s.ReduceSetForPush()|, as defined by
+// MacroAssembler::PushRegsInMask for this target.
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+ SetType all = s.bits();
+ SetType set128b =
+ (all & FloatRegisters::AllSimd128Mask) >> FloatRegisters::ShiftSimd128;
+ SetType doubleSet =
+ (all & FloatRegisters::AllDoubleMask) >> FloatRegisters::ShiftDouble;
+ SetType singleSet =
+ (all & FloatRegisters::AllSingleMask) >> FloatRegisters::ShiftSingle;
+
+ // PushRegsInMask pushes singles as if they were doubles. Also we need to
+ // remove singles or doubles which are also pushed as part of a vector
+ // register.
+ SetType set64b = (singleSet | doubleSet) & ~set128b;
+
+ // The "+ 1) & ~1" is to take into account the alignment hole below the
+ // double-reg dump area. See MacroAssembler::PushRegsInMaskSizeInBytes.
+ return ((set64b.size() + 1) & ~1) * sizeof(double) +
+ set128b.size() * SizeOfSimd128;
+}
+
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+ // See block comment in MacroAssembler.h for further required invariants.
+ static_assert(sizeof(jit::FloatRegisters::RegisterContent) == 16);
+ return encoding() * sizeof(jit::FloatRegisters::RegisterContent);
+}
+
+// For N in 0..31, if any of sN, dN or qN is a member of `s`, the returned set
+// will contain all of sN, dN and qN.
+FloatRegisterSet FloatRegister::BroadcastToAllSizes(const FloatRegisterSet& s) {
+ SetType all = s.bits();
+ SetType set128b =
+ (all & FloatRegisters::AllSimd128Mask) >> FloatRegisters::ShiftSimd128;
+ SetType doubleSet =
+ (all & FloatRegisters::AllDoubleMask) >> FloatRegisters::ShiftDouble;
+ SetType singleSet =
+ (all & FloatRegisters::AllSingleMask) >> FloatRegisters::ShiftSingle;
+
+ SetType merged = set128b | doubleSet | singleSet;
+ SetType broadcasted = (merged << FloatRegisters::ShiftSimd128) |
+ (merged << FloatRegisters::ShiftDouble) |
+ (merged << FloatRegisters::ShiftSingle);
+
+ return FloatRegisterSet(broadcasted);
+}
+
+uint32_t GetARM64Flags() { return 0; }
+
+// CPU flags handling on ARM64 is currently different from other platforms:
+// the flags are computed and stored per-assembler and are thus "always
+// computed".
+bool CPUFlagsHaveBeenComputed() { return true; }
+
+void FlushICache(void* code, size_t size) {
+ vixl::CPU::EnsureIAndDCacheCoherency(code, size);
+}
+
+void FlushExecutionContext() { vixl::CPU::FlushExecutionContext(); }
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h
new file mode 100644
index 0000000000..96bbc63848
--- /dev/null
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -0,0 +1,773 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_Architecture_arm64_h
+#define jit_arm64_Architecture_arm64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+#define JS_HAS_HIDDEN_SP
+static const uint32_t HiddenSPEncoding = vixl::kSPRegInternalCode;
+
+namespace js {
+namespace jit {
+
+// AArch64 has 32 64-bit integer registers, x0 though x31.
+//
+// x31 (or, more accurately, the integer register with encoding 31, since
+// there is no x31 per se) is special and functions as both the stack pointer
+// and a zero register.
+//
+// The bottom 32 bits of each of the X registers is accessible as w0 through
+// w31. The program counter is not accessible as a register.
+//
+// SIMD and scalar floating-point registers share a register bank.
+// 32 bit float registers are s0 through s31.
+// 64 bit double registers are d0 through d31.
+// 128 bit SIMD registers are v0 through v31.
+// e.g., s0 is the bottom 32 bits of d0, which is the bottom 64 bits of v0.
+
+// AArch64 Calling Convention:
+// x0 - x7: arguments and return value
+// x8: indirect result (struct) location
+// x9 - x15: temporary registers
+// x16 - x17: intra-call-use registers (PLT, linker)
+// x18: platform specific use (TLS)
+// x19 - x28: callee-saved registers
+// x29: frame pointer
+// x30: link register
+
+// AArch64 Calling Convention for Floats:
+// d0 - d7: arguments and return value
+// d8 - d15: callee-saved registers
+// Bits 64:128 are not saved for v8-v15.
+// d16 - d31: temporary registers
+
+// AArch64 does not have soft float.
+
+class Registers {
+ public:
+ enum RegisterID {
+ w0 = 0,
+ x0 = 0,
+ w1 = 1,
+ x1 = 1,
+ w2 = 2,
+ x2 = 2,
+ w3 = 3,
+ x3 = 3,
+ w4 = 4,
+ x4 = 4,
+ w5 = 5,
+ x5 = 5,
+ w6 = 6,
+ x6 = 6,
+ w7 = 7,
+ x7 = 7,
+ w8 = 8,
+ x8 = 8,
+ w9 = 9,
+ x9 = 9,
+ w10 = 10,
+ x10 = 10,
+ w11 = 11,
+ x11 = 11,
+ w12 = 12,
+ x12 = 12,
+ w13 = 13,
+ x13 = 13,
+ w14 = 14,
+ x14 = 14,
+ w15 = 15,
+ x15 = 15,
+ w16 = 16,
+ x16 = 16,
+ ip0 = 16, // MacroAssembler scratch register 1.
+ w17 = 17,
+ x17 = 17,
+ ip1 = 17, // MacroAssembler scratch register 2.
+ w18 = 18,
+ x18 = 18,
+ tls = 18, // Platform-specific use (TLS).
+ w19 = 19,
+ x19 = 19,
+ w20 = 20,
+ x20 = 20,
+ w21 = 21,
+ x21 = 21,
+ w22 = 22,
+ x22 = 22,
+ w23 = 23,
+ x23 = 23,
+ w24 = 24,
+ x24 = 24,
+ w25 = 25,
+ x25 = 25,
+ w26 = 26,
+ x26 = 26,
+ w27 = 27,
+ x27 = 27,
+ w28 = 28,
+ x28 = 28,
+ w29 = 29,
+ x29 = 29,
+ fp = 29,
+ w30 = 30,
+ x30 = 30,
+ lr = 30,
+ w31 = 31,
+ x31 = 31,
+ wzr = 31,
+ xzr = 31,
+ sp = 31, // Special: both stack pointer and a zero register.
+ };
+ typedef uint8_t Code;
+ typedef uint32_t Encoding;
+ typedef uint32_t SetType;
+
+ static const Code Invalid = 0xFF;
+
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"};
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable =
+ 27; // No named special-function registers.
+
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType NoneMask = 0x0;
+
+ static const SetType ArgRegMask =
+ (1 << Registers::x0) | (1 << Registers::x1) | (1 << Registers::x2) |
+ (1 << Registers::x3) | (1 << Registers::x4) | (1 << Registers::x5) |
+ (1 << Registers::x6) | (1 << Registers::x7) | (1 << Registers::x8);
+
+ static const SetType VolatileMask =
+ (1 << Registers::x0) | (1 << Registers::x1) | (1 << Registers::x2) |
+ (1 << Registers::x3) | (1 << Registers::x4) | (1 << Registers::x5) |
+ (1 << Registers::x6) | (1 << Registers::x7) | (1 << Registers::x8) |
+ (1 << Registers::x9) | (1 << Registers::x10) | (1 << Registers::x11) |
+ (1 << Registers::x12) | (1 << Registers::x13) | (1 << Registers::x14) |
+ (1 << Registers::x15) | (1 << Registers::x16) | (1 << Registers::x17) |
+ (1 << Registers::x18);
+
+ static const SetType NonVolatileMask =
+ (1 << Registers::x19) | (1 << Registers::x20) | (1 << Registers::x21) |
+ (1 << Registers::x22) | (1 << Registers::x23) | (1 << Registers::x24) |
+ (1 << Registers::x25) | (1 << Registers::x26) | (1 << Registers::x27) |
+ (1 << Registers::x28) | (1 << Registers::x29) | (1 << Registers::x30);
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::x28) | // PseudoStackPointer.
+ (1 << Registers::ip0) | // First scratch register.
+ (1 << Registers::ip1) | // Second scratch register.
+ (1 << Registers::tls) | (1 << Registers::lr) | (1 << Registers::sp) |
+ (1 << Registers::fp);
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::x2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::x0);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+template <typename T>
+class TypedRegisterSet;
+
+// 128-bit bitset for FloatRegisters::SetType.
+
+class Bitset128 {
+ // The order (hi, lo) looks best in the debugger.
+ uint64_t hi, lo;
+
+ public:
+ MOZ_IMPLICIT constexpr Bitset128(uint64_t initial) : hi(0), lo(initial) {}
+ MOZ_IMPLICIT constexpr Bitset128(const Bitset128& that)
+ : hi(that.hi), lo(that.lo) {}
+
+ constexpr Bitset128(uint64_t hi, uint64_t lo) : hi(hi), lo(lo) {}
+
+ constexpr uint64_t high() const { return hi; }
+
+ constexpr uint64_t low() const { return lo; }
+
+ constexpr Bitset128 operator|(Bitset128 that) const {
+ return Bitset128(hi | that.hi, lo | that.lo);
+ }
+
+ constexpr Bitset128 operator&(Bitset128 that) const {
+ return Bitset128(hi & that.hi, lo & that.lo);
+ }
+
+ constexpr Bitset128 operator^(Bitset128 that) const {
+ return Bitset128(hi ^ that.hi, lo ^ that.lo);
+ }
+
+ constexpr Bitset128 operator~() const { return Bitset128(~hi, ~lo); }
+
+ // We must avoid shifting by the word width, which is complex. Inlining plus
+ // shift-by-constant will remove a lot of code in the normal case.
+
+ constexpr Bitset128 operator<<(size_t shift) const {
+ if (shift == 0) {
+ return *this;
+ }
+ if (shift < 64) {
+ return Bitset128((hi << shift) | (lo >> (64 - shift)), lo << shift);
+ }
+ if (shift == 64) {
+ return Bitset128(lo, 0);
+ }
+ return Bitset128(lo << (shift - 64), 0);
+ }
+
+ constexpr Bitset128 operator>>(size_t shift) const {
+ if (shift == 0) {
+ return *this;
+ }
+ if (shift < 64) {
+ return Bitset128(hi >> shift, (lo >> shift) | (hi << (64 - shift)));
+ }
+ if (shift == 64) {
+ return Bitset128(0, hi);
+ }
+ return Bitset128(0, hi >> (shift - 64));
+ }
+
+ constexpr bool operator==(Bitset128 that) const {
+ return lo == that.lo && hi == that.hi;
+ }
+
+ constexpr bool operator!=(Bitset128 that) const {
+ return lo != that.lo || hi != that.hi;
+ }
+
+ constexpr bool operator!() const { return (hi | lo) == 0; }
+
+ Bitset128& operator|=(const Bitset128& that) {
+ hi |= that.hi;
+ lo |= that.lo;
+ return *this;
+ }
+
+ Bitset128& operator&=(const Bitset128& that) {
+ hi &= that.hi;
+ lo &= that.lo;
+ return *this;
+ }
+
+ uint32_t size() const {
+ return mozilla::CountPopulation64(hi) + mozilla::CountPopulation64(lo);
+ }
+
+ uint32_t countTrailingZeroes() const {
+ if (lo) {
+ return mozilla::CountTrailingZeroes64(lo);
+ }
+ return mozilla::CountTrailingZeroes64(hi) + 64;
+ }
+
+ uint32_t countLeadingZeroes() const {
+ if (hi) {
+ return mozilla::CountLeadingZeroes64(hi);
+ }
+ return mozilla::CountLeadingZeroes64(lo) + 64;
+ }
+};
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ s0 = 0,
+ d0 = 0,
+ v0 = 0,
+ s1 = 1,
+ d1 = 1,
+ v1 = 1,
+ s2 = 2,
+ d2 = 2,
+ v2 = 2,
+ s3 = 3,
+ d3 = 3,
+ v3 = 3,
+ s4 = 4,
+ d4 = 4,
+ v4 = 4,
+ s5 = 5,
+ d5 = 5,
+ v5 = 5,
+ s6 = 6,
+ d6 = 6,
+ v6 = 6,
+ s7 = 7,
+ d7 = 7,
+ v7 = 7,
+ s8 = 8,
+ d8 = 8,
+ v8 = 8,
+ s9 = 9,
+ d9 = 9,
+ v9 = 9,
+ s10 = 10,
+ d10 = 10,
+ v10 = 10,
+ s11 = 11,
+ d11 = 11,
+ v11 = 11,
+ s12 = 12,
+ d12 = 12,
+ v12 = 12,
+ s13 = 13,
+ d13 = 13,
+ v13 = 13,
+ s14 = 14,
+ d14 = 14,
+ v14 = 14,
+ s15 = 15,
+ d15 = 15,
+ v15 = 15,
+ s16 = 16,
+ d16 = 16,
+ v16 = 16,
+ s17 = 17,
+ d17 = 17,
+ v17 = 17,
+ s18 = 18,
+ d18 = 18,
+ v18 = 18,
+ s19 = 19,
+ d19 = 19,
+ v19 = 19,
+ s20 = 20,
+ d20 = 20,
+ v20 = 20,
+ s21 = 21,
+ d21 = 21,
+ v21 = 21,
+ s22 = 22,
+ d22 = 22,
+ v22 = 22,
+ s23 = 23,
+ d23 = 23,
+ v23 = 23,
+ s24 = 24,
+ d24 = 24,
+ v24 = 24,
+ s25 = 25,
+ d25 = 25,
+ v25 = 25,
+ s26 = 26,
+ d26 = 26,
+ v26 = 26,
+ s27 = 27,
+ d27 = 27,
+ v27 = 27,
+ s28 = 28,
+ d28 = 28,
+ v28 = 28,
+ s29 = 29,
+ d29 = 29,
+ v29 = 29,
+ s30 = 30,
+ d30 = 30,
+ v30 = 30,
+ s31 = 31,
+ d31 = 31,
+ v31 = 31, // Scratch register.
+ };
+
+ // Eight bits: (invalid << 7) | (kind << 5) | encoding
+ typedef uint8_t Code;
+ typedef FPRegisterID Encoding;
+ typedef Bitset128 SetType;
+
+ enum Kind : uint8_t { Single, Double, Simd128, NumTypes };
+
+ static constexpr Code Invalid = 0x80;
+
+ static const char* GetName(uint32_t code) {
+ // clang-format off
+ static const char* const Names[] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9",
+ "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19",
+ "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29",
+ "s30", "s31",
+
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
+ "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
+ "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
+ "d30", "d31",
+
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
+ "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
+ "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
+ "v30", "v31",
+ };
+ // clang-format on
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Total = TotalPhys * NumTypes;
+ static const uint32_t Allocatable = 31; // Without d31, the scratch register.
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ static constexpr unsigned ShiftSingle = uint32_t(Single) * TotalPhys;
+ static constexpr unsigned ShiftDouble = uint32_t(Double) * TotalPhys;
+ static constexpr unsigned ShiftSimd128 = uint32_t(Simd128) * TotalPhys;
+
+ static constexpr SetType NoneMask = SetType(0);
+ static constexpr SetType AllPhysMask = ~(~SetType(0) << TotalPhys);
+ static constexpr SetType AllSingleMask = AllPhysMask << ShiftSingle;
+ static constexpr SetType AllDoubleMask = AllPhysMask << ShiftDouble;
+ static constexpr SetType AllSimd128Mask = AllPhysMask << ShiftSimd128;
+ static constexpr SetType AllMask =
+ AllDoubleMask | AllSingleMask | AllSimd128Mask;
+ static constexpr SetType AliasMask = (SetType(1) << ShiftSingle) |
+ (SetType(1) << ShiftDouble) |
+ (SetType(1) << ShiftSimd128);
+
+ static_assert(ShiftSingle == 0,
+ "Or the NonVolatileMask must be computed differently");
+
+ // s31 is the ScratchFloatReg.
+ static constexpr SetType NonVolatileSingleMask =
+ SetType((1 << FloatRegisters::s8) | (1 << FloatRegisters::s9) |
+ (1 << FloatRegisters::s10) | (1 << FloatRegisters::s11) |
+ (1 << FloatRegisters::s12) | (1 << FloatRegisters::s13) |
+ (1 << FloatRegisters::s14) | (1 << FloatRegisters::s15) |
+ (1 << FloatRegisters::s16) | (1 << FloatRegisters::s17) |
+ (1 << FloatRegisters::s18) | (1 << FloatRegisters::s19) |
+ (1 << FloatRegisters::s20) | (1 << FloatRegisters::s21) |
+ (1 << FloatRegisters::s22) | (1 << FloatRegisters::s23) |
+ (1 << FloatRegisters::s24) | (1 << FloatRegisters::s25) |
+ (1 << FloatRegisters::s26) | (1 << FloatRegisters::s27) |
+ (1 << FloatRegisters::s28) | (1 << FloatRegisters::s29) |
+ (1 << FloatRegisters::s30));
+
+ static constexpr SetType NonVolatileMask =
+ (NonVolatileSingleMask << ShiftSingle) |
+ (NonVolatileSingleMask << ShiftDouble) |
+ (NonVolatileSingleMask << ShiftSimd128);
+
+ static constexpr SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static constexpr SetType WrapperMask = VolatileMask;
+
+ static_assert(ShiftSingle == 0,
+ "Or the NonAllocatableMask must be computed differently");
+
+ // d31 is the ScratchFloatReg.
+ static constexpr SetType NonAllocatableSingleMask =
+ (SetType(1) << FloatRegisters::s31);
+
+ static constexpr SetType NonAllocatableMask =
+ NonAllocatableSingleMask | (NonAllocatableSingleMask << ShiftDouble) |
+ (NonAllocatableSingleMask << ShiftSimd128);
+
+ static constexpr SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ float s;
+ double d;
+ uint8_t v128[16];
+ };
+
+ static constexpr Encoding encoding(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total);
+ return Encoding(c & 31);
+ }
+
+ static constexpr Kind kind(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total && ((c >> 5) & 3) < NumTypes);
+ return Kind((c >> 5) & 3);
+ }
+
+ static constexpr Code fromParts(uint32_t encoding, uint32_t kind,
+ uint32_t invalid) {
+ return Code((invalid << 7) | (kind << 5) | encoding);
+ }
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+static const uint32_t ShadowStackSpace = 0;
+
+// When our only strategy for far jumps is to encode the offset directly, and
+// not insert any jump islands during assembly for even further jumps, then the
+// architecture restricts us to -2^27 .. 2^27-4, to fit into a signed 28-bit
+// value. We further reduce this range to allow the far-jump inserting code to
+// have some breathing room.
+static const uint32_t JumpImmediateRange = ((1 << 27) - (20 * 1024 * 1024));
+
+static const uint32_t ABIStackAlignment = 16;
+static const uint32_t CodeAlignment = 16;
+static const bool StackKeptAligned = false;
+
+// Although sp is only usable if 16-byte alignment is kept,
+// the Pseudo-StackPointer enables use of 8-byte alignment.
+static const uint32_t StackAlignment = 8;
+static const uint32_t NativeFrameSize = 8;
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 16, "SetType must be 128 bits");
+ x |= x >> FloatRegisters::TotalPhys;
+ x |= x >> FloatRegisters::TotalPhys;
+ x &= FloatRegisters::AllPhysMask;
+ MOZ_ASSERT(x.high() == 0);
+ MOZ_ASSERT((x.low() >> 32) == 0);
+ return mozilla::CountPopulation32(x.low());
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 16, "SetType");
+ return x.countTrailingZeroes();
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 16, "SetType");
+ return 127 - x.countLeadingZeroes();
+ }
+
+ static constexpr size_t SizeOfSimd128 = 16;
+
+ private:
+ // These fields only hold valid values: an invalid register is always
+ // represented as a valid encoding and kind with the invalid_ bit set.
+ uint8_t encoding_; // 32 encodings
+ uint8_t kind_; // Double, Single, Simd128
+ bool invalid_;
+
+ typedef Codes::Kind Kind;
+
+ public:
+ constexpr FloatRegister(Encoding encoding, Kind kind)
+ : encoding_(encoding), kind_(kind), invalid_(false) {
+ // assert(uint32_t(encoding) < Codes::TotalPhys);
+ }
+
+ constexpr FloatRegister()
+ : encoding_(0), kind_(FloatRegisters::Double), invalid_(true) {}
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Codes::Total);
+ return FloatRegister(FloatRegisters::encoding(i), FloatRegisters::kind(i));
+ }
+
+ bool isSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Double;
+ }
+ bool isSimd128() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Simd128;
+ }
+ bool isInvalid() const { return invalid_; }
+
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Double);
+ }
+ FloatRegister asSimd128() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Simd128);
+ }
+
+ constexpr uint32_t size() const {
+ MOZ_ASSERT(!invalid_);
+ if (kind_ == FloatRegisters::Double) {
+ return sizeof(double);
+ }
+ if (kind_ == FloatRegisters::Single) {
+ return sizeof(float);
+ }
+ MOZ_ASSERT(kind_ == FloatRegisters::Simd128);
+ return SizeOfSimd128;
+ }
+
+ constexpr Code code() const {
+ // assert(!invalid_);
+ return Codes::fromParts(encoding_, kind_, invalid_);
+ }
+
+ constexpr Encoding encoding() const {
+ MOZ_ASSERT(!invalid_);
+ return Encoding(encoding_);
+ }
+
+ const char* name() const { return FloatRegisters::GetName(code()); }
+ bool volatile_() const {
+ MOZ_ASSERT(!invalid_);
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ constexpr bool operator!=(FloatRegister other) const {
+ return code() != other.code();
+ }
+ constexpr bool operator==(FloatRegister other) const {
+ return code() == other.code();
+ }
+
+ bool aliases(FloatRegister other) const {
+ return other.encoding_ == encoding_;
+ }
+ // This function mostly exists for the ARM backend. It is to ensure that two
+ // floating point registers' types are equivalent. e.g. S0 is not equivalent
+ // to D16, since S0 holds a float32, and D16 holds a Double.
+ // Since all floating point registers on x86 and x64 are equivalent, it is
+ // reasonable for this function to do the same.
+ bool equiv(FloatRegister other) const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == other.kind_;
+ }
+
+ uint32_t numAliased() const { return Codes::NumTypes; }
+ uint32_t numAlignedAliased() { return numAliased(); }
+
+ FloatRegister aliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(!invalid_);
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return FloatRegister(Encoding(encoding_),
+ Kind((aliasIdx + kind_) % Codes::NumTypes));
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) { return aliased(aliasIdx); }
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::AliasMask << encoding_;
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName Name = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+
+ // For N in 0..31, if any of sN, dN or qN is a member of `s`, the
+ // returned set will contain all of sN, dN and qN.
+ static TypedRegisterSet<FloatRegister> BroadcastToAllSizes(
+ const TypedRegisterSet<FloatRegister>& s);
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Vector128>(SetType set) {
+ return set & FloatRegisters::AllSimd128Mask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+// ARM/D32 has double registers that cannot be treated as float32.
+// Luckily, ARMv8 doesn't have the same misfortune.
+inline bool hasUnaliasedDouble() { return false; }
+
+// ARM prior to ARMv8 also has doubles that alias multiple floats.
+// Again, ARMv8 is in the clear.
+inline bool hasMultiAlias() { return false; }
+
+uint32_t GetARM64Flags();
+
+bool CanFlushICacheFromBackgroundThreads();
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_Architecture_arm64_h
diff --git a/js/src/jit/arm64/Assembler-arm64.cpp b/js/src/jit/arm64/Assembler-arm64.cpp
new file mode 100644
index 0000000000..1e441ae635
--- /dev/null
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -0,0 +1,609 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/Assembler-arm64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/arm64/Architecture-arm64.h"
+#include "jit/arm64/MacroAssembler-arm64.h"
+#include "jit/arm64/vixl/Disasm-vixl.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "vm/Realm.h"
+
+#include "gc/StoreBuffer-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::CountLeadingZeroes32;
+using mozilla::DebugOnly;
+
+// Note this is used for inter-wasm calls and may pass arguments and results
+// in floating point registers even if the system ABI does not.
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_));
+ intRegIndex_++;
+ break;
+
+ case MIRType::Float32:
+ case MIRType::Double:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(FloatRegisters::Encoding(floatRegIndex_),
+ type == MIRType::Double
+ ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += FloatRegister::SizeOfSimd128;
+ break;
+ }
+ current_ = ABIArg(FloatRegister(FloatRegisters::Encoding(floatRegIndex_),
+ FloatRegisters::Simd128));
+ floatRegIndex_++;
+ break;
+#endif
+
+ default:
+ // Note that in Assembler-x64.cpp there's a special case for Win64 which
+ // does not allow passing SIMD by value. Since there's Win64 on ARM64 we
+ // may need to duplicate that logic here.
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+namespace js {
+namespace jit {
+
+void Assembler::finish() {
+ armbuffer_.flushPool();
+
+ // The extended jump table is part of the code buffer.
+ ExtendedJumpTable_ = emitExtendedJumpTable();
+ Assembler::FinalizeCode();
+}
+
+bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
+ flush();
+ return armbuffer_.appendRawCode(code, numBytes);
+}
+
+bool Assembler::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool Assembler::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ armbuffer_.executableCopy(bytes.begin());
+ return true;
+}
+
+BufferOffset Assembler::emitExtendedJumpTable() {
+ if (!pendingJumps_.length() || oom()) {
+ return BufferOffset();
+ }
+
+ armbuffer_.flushPool();
+ armbuffer_.align(SizeOfJumpTableEntry);
+
+ BufferOffset tableOffset = armbuffer_.nextOffset();
+
+ for (size_t i = 0; i < pendingJumps_.length(); i++) {
+ // Each JumpTableEntry is of the form:
+ // LDR ip0 [PC, 8]
+ // BR ip0
+ // [Patchable 8-byte constant low bits]
+ // [Patchable 8-byte constant high bits]
+ DebugOnly<size_t> preOffset = size_t(armbuffer_.nextOffset().getOffset());
+
+ // The unguarded use of ScratchReg64 here is OK:
+ //
+ // - The present function is called from code that does not claim any
+ // scratch registers, we're done compiling user code and are emitting jump
+ // tables. Hence the scratch registers are available when we enter.
+ //
+ // - The pendingJumps_ represent jumps to other code sections that are not
+ // known to this MacroAssembler instance, and we're generating code to
+ // jump there. It is safe to assume that any code using such a generated
+ // branch to an unknown location did not store any valuable value in any
+ // scratch register. Hence the scratch registers can definitely be
+ // clobbered here.
+ //
+ // - Scratch register usage is restricted to sequential control flow within
+ // MacroAssembler functions. Hence the scratch registers will not be
+ // clobbered by ldr and br as they are Assembler primitives, not
+ // MacroAssembler functions.
+
+ ldr(ScratchReg64, ptrdiff_t(8 / vixl::kInstructionSize));
+ br(ScratchReg64);
+
+ DebugOnly<size_t> prePointer = size_t(armbuffer_.nextOffset().getOffset());
+ MOZ_ASSERT_IF(!oom(),
+ prePointer - preOffset == OffsetOfJumpTableEntryPointer);
+
+ brk(0x0);
+ brk(0x0);
+
+ DebugOnly<size_t> postOffset = size_t(armbuffer_.nextOffset().getOffset());
+
+ MOZ_ASSERT_IF(!oom(), postOffset - preOffset == SizeOfJumpTableEntry);
+ }
+
+ if (oom()) {
+ return BufferOffset();
+ }
+
+ return tableOffset;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ // Copy the code and all constant pools into the output buffer.
+ armbuffer_.executableCopy(buffer);
+
+ // Patch any relative jumps that target code outside the buffer.
+ // The extended jump table may be used for distant jumps.
+ for (size_t i = 0; i < pendingJumps_.length(); i++) {
+ RelativePatch& rp = pendingJumps_[i];
+ MOZ_ASSERT(rp.target);
+
+ Instruction* target = (Instruction*)rp.target;
+ Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset());
+ JumpTableEntry* extendedJumpTable = reinterpret_cast<JumpTableEntry*>(
+ buffer + ExtendedJumpTable_.getOffset());
+ if (branch->BranchType() != vixl::UnknownBranchType) {
+ if (branch->IsTargetReachable(target)) {
+ branch->SetImmPCOffsetTarget(target);
+ } else {
+ JumpTableEntry* entry = &extendedJumpTable[i];
+ branch->SetImmPCOffsetTarget(entry->getLdr());
+ entry->data = target;
+ }
+ } else {
+ // Currently a two-instruction call, it should be possible to optimize
+ // this into a single instruction call + nop in some instances, but this
+ // will work.
+ }
+ }
+}
+
+BufferOffset Assembler::immPool(ARMRegister dest, uint8_t* value,
+ vixl::LoadLiteralOp op, const LiteralDoc& doc,
+ ARMBuffer::PoolEntry* pe) {
+ uint32_t inst = op | Rt(dest);
+ const size_t numInst = 1;
+ const unsigned sizeOfPoolEntryInBytes = 4;
+ const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes;
+ return allocLiteralLoadEntry(numInst, numPoolEntries, (uint8_t*)&inst, value,
+ doc, pe);
+}
+
+BufferOffset Assembler::immPool64(ARMRegister dest, uint64_t value,
+ ARMBuffer::PoolEntry* pe) {
+ return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, LiteralDoc(value),
+ pe);
+}
+
+BufferOffset Assembler::fImmPool(ARMFPRegister dest, uint8_t* value,
+ vixl::LoadLiteralOp op,
+ const LiteralDoc& doc) {
+ uint32_t inst = op | Rt(dest);
+ const size_t numInst = 1;
+ const unsigned sizeOfPoolEntryInBits = 32;
+ const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits;
+ return allocLiteralLoadEntry(numInst, numPoolEntries, (uint8_t*)&inst, value,
+ doc);
+}
+
+BufferOffset Assembler::fImmPool64(ARMFPRegister dest, double value) {
+ return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit, LiteralDoc(value));
+}
+
+BufferOffset Assembler::fImmPool32(ARMFPRegister dest, float value) {
+ return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit, LiteralDoc(value));
+}
+
+void Assembler::bind(Label* label, BufferOffset targetOffset) {
+#ifdef JS_DISASM_ARM64
+ spew_.spewBind(label);
+#endif
+ // Nothing has seen the label yet: just mark the location.
+ // If we've run out of memory, don't attempt to modify the buffer which may
+ // not be there. Just mark the label as bound to the (possibly bogus)
+ // targetOffset.
+ if (!label->used() || oom()) {
+ label->bind(targetOffset.getOffset());
+ return;
+ }
+
+ // Get the most recent instruction that used the label, as stored in the
+ // label. This instruction is the head of an implicit linked list of label
+ // uses.
+ BufferOffset branchOffset(label);
+
+ while (branchOffset.assigned()) {
+ // Before overwriting the offset in this instruction, get the offset of
+ // the next link in the implicit branch list.
+ BufferOffset nextOffset = NextLink(branchOffset);
+
+ // Linking against the actual (Instruction*) would be invalid,
+ // since that Instruction could be anywhere in memory.
+ // Instead, just link against the correct relative offset, assuming
+ // no constant pools, which will be taken into consideration
+ // during finalization.
+ ptrdiff_t relativeByteOffset =
+ targetOffset.getOffset() - branchOffset.getOffset();
+ Instruction* link = getInstructionAt(branchOffset);
+
+ // This branch may still be registered for callbacks. Stop tracking it.
+ vixl::ImmBranchType branchType = link->BranchType();
+ vixl::ImmBranchRangeType branchRange =
+ Instruction::ImmBranchTypeToRange(branchType);
+ if (branchRange < vixl::NumShortBranchRangeTypes) {
+ BufferOffset deadline(
+ branchOffset.getOffset() +
+ Instruction::ImmBranchMaxForwardOffset(branchRange));
+ armbuffer_.unregisterBranchDeadline(branchRange, deadline);
+ }
+
+ // Is link able to reach the label?
+ if (link->IsPCRelAddressing() ||
+ link->IsTargetReachable(link + relativeByteOffset)) {
+ // Write a new relative offset into the instruction.
+ link->SetImmPCOffsetTarget(link + relativeByteOffset);
+ } else {
+ // This is a short-range branch, and it can't reach the label directly.
+ // Verify that it branches to a veneer: an unconditional branch.
+ MOZ_ASSERT(getInstructionAt(nextOffset)->BranchType() ==
+ vixl::UncondBranchType);
+ }
+
+ branchOffset = nextOffset;
+ }
+
+ // Bind the label, so that future uses may encode the offset immediately.
+ label->bind(targetOffset.getOffset());
+}
+
+void Assembler::addPendingJump(BufferOffset src, ImmPtr target,
+ RelocationKind reloc) {
+ MOZ_ASSERT(target.value != nullptr);
+
+ if (reloc == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+
+ // This jump is not patchable at runtime. Extended jump table entry
+ // requirements cannot be known until finalization, so to be safe, give each
+ // jump and entry. This also causes GC tracing of the target.
+ enoughMemory_ &=
+ pendingJumps_.append(RelativePatch(src, target.value, reloc));
+}
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* dest = (Instruction*)start.raw();
+ ptrdiff_t relTarget = (Instruction*)toCall.raw() - dest;
+ ptrdiff_t relTarget00 = relTarget >> 2;
+ MOZ_RELEASE_ASSERT((relTarget & 0x3) == 0);
+ MOZ_RELEASE_ASSERT(vixl::IsInt26(relTarget00));
+
+ bl(dest, relTarget00);
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expected) {
+ Instruction* i = (Instruction*)label.raw();
+ void** pValue = i->LiteralAddress<void**>();
+ MOZ_ASSERT(*pValue == expected.value);
+ *pValue = newValue.value;
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expected) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expected.value));
+}
+
+void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
+ Instruction* i = (Instruction*)inst_.raw();
+ MOZ_ASSERT(i->IsAddSubImmediate());
+
+ // Refer to instruction layout in ToggleToCmp().
+ int imm19 = (int)i->Bits(23, 5);
+ MOZ_ASSERT(vixl::IsInt19(imm19));
+
+ b(i, imm19, Always);
+}
+
+void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
+ Instruction* i = (Instruction*)inst_.raw();
+ MOZ_ASSERT(i->IsCondB());
+
+ int imm19 = i->ImmCondBranch();
+ // bit 23 is reserved, and the simulator throws an assertion when this happens
+ // It'll be messy to decode, but we can steal bit 30 or bit 31.
+ MOZ_ASSERT(vixl::IsInt18(imm19));
+
+ // 31 - 64-bit if set, 32-bit if unset. (OK!)
+ // 30 - sub if set, add if unset. (OK!)
+ // 29 - SetFlagsBit. Must be set.
+ // 22:23 - ShiftAddSub. (OK!)
+ // 10:21 - ImmAddSub. (OK!)
+ // 5:9 - First source register (Rn). (OK!)
+ // 0:4 - Destination Register. Must be xzr.
+
+ // From the above, there is a safe 19-bit contiguous region from 5:23.
+ Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB |
+ Flags(vixl::SetFlags) | Rd(vixl::xzr) |
+ (imm19 << vixl::Rn_offset));
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw());
+ Instruction* load;
+ Instruction* call;
+
+ // There might be a constant pool at the very first instruction.
+ first = first->skipPool();
+
+ // Skip the stack pointer restore instruction.
+ if (first->IsStackPtrSync()) {
+ first = first->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ }
+
+ load = const_cast<Instruction*>(first);
+
+ // The call instruction follows the load, but there may be an injected
+ // constant pool.
+ call = const_cast<Instruction*>(
+ load->InstructionAtOffset(vixl::kInstructionSize)->skipPool());
+
+ if (call->IsBLR() == enabled) {
+ return;
+ }
+
+ if (call->IsBLR()) {
+ // If the second instruction is blr(), then we have:
+ // ldr x17, [pc, offset]
+ // blr x17
+ MOZ_ASSERT(load->IsLDR());
+ // We want to transform this to:
+ // adr xzr, [pc, offset]
+ // nop
+ int32_t offset = load->ImmLLiteral();
+ adr(load, xzr, int32_t(offset));
+ nop(call);
+ } else {
+ // We have:
+ // adr xzr, [pc, offset] (or ldr x17, [pc, offset])
+ // nop
+ MOZ_ASSERT(load->IsADR() || load->IsLDR());
+ MOZ_ASSERT(call->IsNOP());
+ // Transform this to:
+ // ldr x17, [pc, offset]
+ // blr x17
+ int32_t offset = (int)load->ImmPCRawOffset();
+ MOZ_ASSERT(vixl::IsInt19(offset));
+ ldr(load, ScratchReg2_64, int32_t(offset));
+ blr(call, ScratchReg2_64);
+ }
+}
+
+// Patches loads generated by MacroAssemblerCompat::mov(CodeLabel*, Register).
+// The loading code is implemented in movePatchablePtr().
+void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
+ MOZ_ASSERT(inst0->IsLDR());
+ uint64_t* literal = inst0->LiteralAddress<uint64_t*>();
+ *literal = value;
+}
+
+class RelocationIterator {
+ CompactBufferReader reader_;
+ uint32_t offset_ = 0;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
+
+ bool read() {
+ if (!reader_.more()) {
+ return false;
+ }
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const { return offset_; }
+};
+
+static JitCode* CodeFromJump(JitCode* code, uint8_t* jump) {
+ const Instruction* inst = (const Instruction*)jump;
+ uint8_t* target;
+
+ // We're expecting a call created by MacroAssembler::call(JitCode*).
+ // It looks like:
+ //
+ // ldr scratch, [pc, offset]
+ // blr scratch
+ //
+ // If the call has been toggled by ToggleCall(), it looks like:
+ //
+ // adr xzr, [pc, offset]
+ // nop
+ //
+ // There might be a constant pool at the very first instruction.
+ // See also ToggleCall().
+ inst = inst->skipPool();
+
+ // Skip the stack pointer restore instruction.
+ if (inst->IsStackPtrSync()) {
+ inst = inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ }
+
+ if (inst->BranchType() != vixl::UnknownBranchType) {
+ // This is an immediate branch.
+ target = (uint8_t*)inst->ImmPCOffsetTarget();
+ } else if (inst->IsLDR()) {
+ // This is an ldr+blr call that is enabled. See ToggleCall().
+ mozilla::DebugOnly<const Instruction*> nextInst =
+ inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ MOZ_ASSERT(nextInst->IsNOP() || nextInst->IsBLR());
+ target = (uint8_t*)inst->Literal64();
+ } else if (inst->IsADR()) {
+ // This is a disabled call: adr+nop. See ToggleCall().
+ mozilla::DebugOnly<const Instruction*> nextInst =
+ inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+ MOZ_ASSERT(nextInst->IsNOP());
+ ptrdiff_t offset = inst->ImmPCRawOffset() << vixl::kLiteralEntrySizeLog2;
+ // This is what Literal64 would do with the corresponding ldr.
+ memcpy(&target, inst + offset, sizeof(target));
+ } else {
+ MOZ_CRASH("Unrecognized jump instruction.");
+ }
+
+ // If the jump is within the code buffer, it uses the extended jump table.
+ if (target >= code->raw() &&
+ target < code->raw() + code->instructionsSize()) {
+ MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <=
+ code->raw() + code->instructionsSize());
+
+ uint8_t** patchablePtr =
+ (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer);
+ target = *patchablePtr;
+ }
+
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+
+ uint8_t* buffer = code->raw();
+
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* load = (Instruction*)&buffer[offset];
+
+ // The only valid traceable operation is a 64-bit load to an ARMRegister.
+ // Refer to movePatchablePtr() for generation.
+ MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit);
+
+ uintptr_t* literalAddr = load->LiteralAddress<uintptr_t*>();
+ uintptr_t literal = *literalAddr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+
+ if (literal >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(literal);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ if (*literalAddr != v.asRawBits()) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ *literalAddr = v.asRawBits();
+ }
+ continue;
+ }
+
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barriers needed since the pointers are constants.
+ gc::Cell* cell = reinterpret_cast<gc::Cell*>(literal);
+ MOZ_ASSERT(gc::IsCellPointerValid(cell));
+ TraceManuallyBarrieredGenericPointerEdge(trc, &cell, "jit-masm-ptr");
+ if (uintptr_t(cell) != literal) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ *literalAddr = uintptr_t(cell);
+ }
+ }
+}
+
+void Assembler::retarget(Label* label, Label* target) {
+#ifdef JS_DISASM_ARM64
+ spew_.spewRetarget(label, target);
+#endif
+ if (label->used()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ BufferOffset next = NextLink(labelBranchOffset);
+ while (next.assigned()) {
+ labelBranchOffset = next;
+ next = NextLink(next);
+ }
+
+ // Then patch the head of label's use chain to the tail of target's
+ // use chain, prepending the entire use chain of target.
+ SetNextLink(labelBranchOffset, BufferOffset(target));
+ target->use(label->offset());
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm64.h
new file mode 100644
index 0000000000..9745e9d262
--- /dev/null
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -0,0 +1,793 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef A64_ASSEMBLER_A64_H_
+#define A64_ASSEMBLER_A64_H_
+
+#include <iterator>
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+// VIXL imports.
+typedef vixl::Register ARMRegister;
+typedef vixl::FPRegister ARMFPRegister;
+using vixl::ARMBuffer;
+using vixl::Instruction;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+using LiteralDoc = DisassemblerSpew::LiteralDoc;
+
+static const uint32_t AlignmentAtPrologue = 0;
+static const uint32_t AlignmentMidPrologue = 8;
+static const Scale ScalePointer = TimesEight;
+
+// The MacroAssembler uses scratch registers extensively and unexpectedly.
+// For safety, scratch registers should always be acquired using
+// vixl::UseScratchRegisterScope.
+static constexpr Register ScratchReg{Registers::ip0};
+static constexpr ARMRegister ScratchReg64 = {ScratchReg, 64};
+
+static constexpr Register ScratchReg2{Registers::ip1};
+static constexpr ARMRegister ScratchReg2_64 = {ScratchReg2, 64};
+
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::d0,
+ FloatRegisters::Double};
+static constexpr FloatRegister ScratchDoubleReg_ = {FloatRegisters::d31,
+ FloatRegisters::Double};
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {}
+};
+
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::s0,
+ FloatRegisters::Single};
+static constexpr FloatRegister ScratchFloat32Reg_ = {FloatRegisters::s31,
+ FloatRegisters::Single};
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg_) {}
+};
+
+#ifdef ENABLE_WASM_SIMD
+static constexpr FloatRegister ReturnSimd128Reg = {FloatRegisters::v0,
+ FloatRegisters::Simd128};
+static constexpr FloatRegister ScratchSimd128Reg = {FloatRegisters::v31,
+ FloatRegisters::Simd128};
+struct ScratchSimd128Scope : public AutoFloatRegisterScope {
+ explicit ScratchSimd128Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchSimd128Reg) {}
+};
+#else
+struct ScratchSimd128Scope : public AutoFloatRegisterScope {
+ explicit ScratchSimd128Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {
+ MOZ_CRASH("SIMD not enabled");
+ }
+};
+#endif
+
+static constexpr Register InvalidReg{Registers::Invalid};
+static constexpr FloatRegister InvalidFloatReg = {};
+
+static constexpr Register OsrFrameReg{Registers::x3};
+static constexpr Register CallTempReg0{Registers::x9};
+static constexpr Register CallTempReg1{Registers::x10};
+static constexpr Register CallTempReg2{Registers::x11};
+static constexpr Register CallTempReg3{Registers::x12};
+static constexpr Register CallTempReg4{Registers::x13};
+static constexpr Register CallTempReg5{Registers::x14};
+
+static constexpr Register PreBarrierReg{Registers::x1};
+
+static constexpr Register InterpreterPCReg{Registers::x9};
+
+static constexpr Register ReturnReg{Registers::x0};
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr Register JSReturnReg{Registers::x2};
+static constexpr Register FramePointer{Registers::fp};
+static constexpr ARMRegister FramePointer64{FramePointer, 64};
+static constexpr Register ZeroRegister{Registers::sp};
+static constexpr ARMRegister ZeroRegister64{Registers::sp, 64};
+static constexpr ARMRegister ZeroRegister32{Registers::sp, 32};
+
+// [SMDOC] AArch64 Stack Pointer and Pseudo Stack Pointer conventions
+//
+// ================
+//
+// Stack pointer (SP), PseudoStackPointer (PSP), and RealStackPointer:
+//
+// The ARM64 real SP has a constraint: it must be 16-byte aligned whenever it
+// is used as the base pointer for a memory access. (SP+offset need not be
+// 16-byte aligned, but the SP value itself must be.) The SP register may
+// take on unaligned values but may not be used for a memory access while it
+// is unaligned.
+//
+// Stack-alignment checking can be enabled or disabled by a control register;
+// however that register cannot be modified by user space. We have to assume
+// stack alignment checking is enabled, and that does usually appear to be the
+// case. See the ARM Architecture Reference Manual, "D1.8.2 SP alignment
+// checking", for further details.
+//
+// A second constraint is forced upon us by the ARM64 ABI. This requires that
+// all accesses to the stack must be at or above SP. Accesses below SP are
+// strictly forbidden, presumably because the kernel might use that area of
+// memory for its own purposes -- in particular, signal delivery -- and hence
+// it may get trashed at any time.
+//
+// Note this doesn't mean that accesses to the stack must be based off
+// register SP. Only that the effective addresses must be >= SP, regardless
+// of how the address is formed.
+//
+// In order to allow word-wise pushes and pops, some of our ARM64 jits
+// (JS-Baseline, JS-Ion, and Wasm-Ion, but not Wasm-Baseline) dedicate x28 to
+// be used as a PseudoStackPointer (PSP).
+//
+// Initially the PSP will have the same value as the SP. Code can, if it
+// wants, push a single word by subtracting 8 from the PSP, doing SP := PSP,
+// then storing the value at PSP+0. Given other constraints on the alignment
+// of the SP at function call boundaries, this works out OK, at the cost of
+// the two extra instructions per push / pop.
+//
+// This is all a bit messy, and is probably not robustly adhered to. However,
+// the following appear to be the intended, and mostly implemented, current
+// invariants:
+//
+// (1) PSP is "primary", SP is "secondary". Most stack refs are
+// PSP-relative. SP-relative is rare and (obviously) only done when we
+// know that SP is aligned.
+//
+// (2) At all times, the relationship SP <= PSP is maintained. The fact that
+// SP may validly be less than PSP means that pushes on the stack force
+// the two values to become equal, by copying PSP into SP. However, pops
+// behave differently: PSP moves back up and SP stays the same, since that
+// doesn't break the SP <= PSP invariant.
+//
+// (3) However, immediately before a call instruction, SP and PSP must be the
+// same. To enforce this, PSP is copied into SP by the arm64-specific
+// MacroAssembler::call routines.
+//
+// (4) Also, after a function has returned, it is expected that SP holds the
+// "primary" value. How exactly this is implemented remains not entirely
+// clear and merits further investigation. The following points are
+// believed to be relevant:
+//
+// - For calls to functions observing the system AArch64 ABI, PSP (x28) is
+// callee-saved. That, combined with (3) above, implies SP == PSP
+// immediately after the call returns.
+//
+// - JIT-generated routines return using MacroAssemblerCompat::retn, and
+// that copies PSP into SP (bizarrely; this would make more sense if it
+// copied SP into PSP); but in any case, the point is that they are the
+// same at the point that the return instruction executes.
+//
+// - MacroAssembler::callWithABIPost copies PSP into SP after the return
+// of a call requiring dynamic alignment.
+//
+// Given the above, it is unclear exactly where in the return sequence it
+// is expected that SP == PSP, and also whether it is the callee or caller
+// that is expected to enforce it.
+//
+// In general it would be nice to be able to move (at some time in the future,
+// not now) to a world where *every* assignment to PSP or SP is followed
+// immediately by a copy into the other register. That would make all
+// required correctness proofs trivial in the sense that it would require only
+// local inspection of code immediately following (dominated by) any such
+// assignment. For the moment, however, this is a guideline, not a hard
+// requirement.
+//
+// ================
+//
+// Mechanics of keeping the stack pointers in sync:
+//
+// The following two methods require that the masm's SP has been set to the PSP
+// with MacroAssembler::SetStackPointer64(PseudoStackPointer64), or they will be
+// no-ops. The setup is performed manually by the jits after creating the masm.
+//
+// * MacroAssembler::syncStackPtr() performs SP := PSP, presumably after PSP has
+// been updated, so SP needs to move too. This is used pretty liberally
+// throughout the code base.
+//
+// * MacroAssembler::initPseudoStackPtr() performs PSP := SP. This can be used
+// after calls to non-ABI compliant code; it's not used much.
+//
+// In the ARM64 assembler there is a function Instruction::IsStackPtrSync() that
+// recognizes the instruction emitted by syncStackPtr(), and this is used to
+// skip that instruction a few places, should it be present, in the JS JIT where
+// code is generated to deal with toggled calls.
+//
+// In various places there are calls to MacroAssembler::syncStackPtr() which
+// appear to be redundant. Investigation shows that they often are redundant,
+// but not always. Finding and removing such redundancies would be quite some
+// work, so we live for now with the occasional redundant update. Perusal of
+// the Cortex-A55 and -A72 optimization guides shows no evidence that such
+// assignments are any more expensive than assignments between vanilla integer
+// registers, so the costs of such redundant updates are assumed to be small.
+//
+// Invariants on the PSP at function call boundaries:
+//
+// It *appears* that the following invariants exist:
+//
+// * On entry to JIT code, PSP == SP, ie the stack pointer is transmitted via
+// both registers.
+//
+// * On entry to C++ code, PSP == SP. Certainly it appears that all calls
+// created by the MacroAssembler::call(..) routines perform 'syncStackPtr'
+// immediately before the call, and all ABI calls are routed through the
+// MacroAssembler::call layer.
+//
+// * The stubs generated by WasmStubs.cpp assume that, on entry, SP is the
+// active stack pointer and that PSP is dead.
+//
+// * The PSP is non-volatile (callee-saved). Along a normal return path from
+// JIT code, simply having PSP == SP on exit is correct, since the exit SP is
+// the same as the entry SP by the JIT ABI.
+//
+// * Call-outs to non-JIT C++ code do not need to set up the PSP (it won't be
+// used), and will not need to restore the PSP on return because x28 is
+// non-volatile in the ARM64 ABI.
+//
+// ================
+//
+// Future cleanups to the SP-vs-PSP machinery:
+//
+// Currently we have somewhat unclear invariants, which are not obviously
+// always enforced, and which may require complex non-local reasoning.
+// Auditing the code to ensure that the invariants always hold, whilst not
+// generating duplicate syncs, is close to impossible. A future rework to
+// tidy this might be as follows. (This suggestion pertains the the entire
+// JIT complex: all of the JS compilers, wasm compilers, stub generators,
+// regexp compilers, etc).
+//
+// Currently we have that, in JIT-generated code, PSP is "primary" and SP is
+// "secondary", meaning that PSP has the "real" stack pointer value and SP is
+// updated whenever PSP acquires a lower value, so as to ensure that SP <= PSP.
+// An exception to this scheme is the stubs code generated by WasmStubs.cpp,
+// which assumes that SP is "primary" and PSP is dead.
+//
+// It might give us an easier incremental path to eventually removing PSP
+// entirely if we switched to having SP always be the primary. That is:
+//
+// (1) SP is primary, PSP is secondary
+// (2) After any assignment to SP, it is copied into PSP
+// (3) All (non-frame-pointer-based) stack accesses are PSP-relative
+// (as at present)
+//
+// This would have the effect that:
+//
+// * It would reinstate the invariant that on all targets, the "real" SP value
+// is in the ABI-and-or-hardware-mandated stack pointer register.
+//
+// * It would give us a simple story about calls and returns:
+// - for calls to non-JIT generated code (viz, C++ etc), we need no extra
+// copies, because PSP (x28) is callee-saved
+// - for calls to JIT-generated code, we need no extra copies, because of (2)
+// above
+//
+// * We could incrementally migrate those parts of the code generator where we
+// know that SP is 16-aligned, to use SP- rather than PSP-relative accesses
+//
+// * The consistent use of (2) would remove the requirement to have to perform
+// path-dependent reasoning (for paths in the generated code, not in the
+// compiler) when reading/understanding the code.
+//
+// * x28 would become free for use by stubs and the baseline compiler without
+// having to worry about interoperating with code that expects x28 to hold a
+// valid PSP.
+//
+// One might ask what mechanical checks we can add to ensure correctness, rather
+// than having to verify these invariants by hand indefinitely. Maybe some
+// combination of:
+//
+// * In debug builds, compiling-in assert(SP == PSP) at critical places. This
+// can be done using the existing `assertStackPtrsSynced` function.
+//
+// * In debug builds, scanning sections of generated code to ensure no
+// SP-relative stack accesses have been created -- for some sections, at
+// least every assignment to SP is immediately followed by a copy to x28.
+// This would also facilitate detection of duplicate syncs.
+//
+// ================
+//
+// Other investigative notes, for the code base at present:
+//
+// * Some disassembly dumps suggest that we sync the stack pointer too often.
+// This could be the result of various pieces of code working at cross
+// purposes when syncing the stack pointer, or of not paying attention to the
+// precise invariants.
+//
+// * As documented in RegExpNativeMacroAssembler.cpp, function
+// SMRegExpMacroAssembler::createStackFrame:
+//
+// // ARM64 communicates stack address via SP, but uses a pseudo-sp (PSP) for
+// // addressing. The register we use for PSP may however also be used by
+// // calling code, and it is nonvolatile, so save it. Do this as a special
+// // case first because the generic save/restore code needs the PSP to be
+// // initialized already.
+//
+// and also in function SMRegExpMacroAssembler::exitHandler:
+//
+// // Restore the saved value of the PSP register, this value is whatever the
+// // caller had saved in it, not any actual SP value, and it must not be
+// // overwritten subsequently.
+//
+// The original source for these comments was a patch for bug 1445907.
+//
+// * MacroAssembler-arm64.h has an interesting comment in the retn()
+// function:
+//
+// syncStackPtr(); // SP is always used to transmit the stack between calls.
+//
+// Same comment at abiret() in that file, and in MacroAssembler-arm64.cpp,
+// at callWithABIPre and callWithABIPost.
+//
+// * In Trampoline-arm64.cpp function JitRuntime::generateVMWrapper we find
+//
+// // SP is used to transfer stack across call boundaries.
+// masm.initPseudoStackPtr();
+//
+// after the return point of a callWithVMWrapper. The only reasonable
+// conclusion from all those (assuming they are right) is that SP == PSP.
+//
+// * Wasm-Baseline does not use the PSP, but as Wasm-Ion code requires SP==PSP
+// and tiered code can have Baseline->Ion calls, Baseline will set PSP=SP
+// before a call to wasm code.
+//
+// ================
+
+// StackPointer is intentionally undefined on ARM64 to prevent misuse: using
+// sp as a base register is only valid if sp % 16 == 0.
+static constexpr Register RealStackPointer{Registers::sp};
+
+static constexpr Register PseudoStackPointer{Registers::x28};
+static constexpr ARMRegister PseudoStackPointer64 = {Registers::x28, 64};
+static constexpr ARMRegister PseudoStackPointer32 = {Registers::x28, 32};
+
+static constexpr Register IntArgReg0{Registers::x0};
+static constexpr Register IntArgReg1{Registers::x1};
+static constexpr Register IntArgReg2{Registers::x2};
+static constexpr Register IntArgReg3{Registers::x3};
+static constexpr Register IntArgReg4{Registers::x4};
+static constexpr Register IntArgReg5{Registers::x5};
+static constexpr Register IntArgReg6{Registers::x6};
+static constexpr Register IntArgReg7{Registers::x7};
+static constexpr Register HeapReg{Registers::x21};
+
+// Define unsized Registers.
+#define DEFINE_UNSIZED_REGISTERS(N) \
+ static constexpr Register r##N{Registers::x##N};
+REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS)
+#undef DEFINE_UNSIZED_REGISTERS
+static constexpr Register ip0{Registers::x16};
+static constexpr Register ip1{Registers::x17};
+static constexpr Register fp{Registers::x29};
+static constexpr Register lr{Registers::x30};
+static constexpr Register rzr{Registers::xzr};
+
+// Import VIXL registers into the js::jit namespace.
+#define IMPORT_VIXL_REGISTERS(N) \
+ static constexpr ARMRegister w##N = vixl::w##N; \
+ static constexpr ARMRegister x##N = vixl::x##N;
+REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS)
+#undef IMPORT_VIXL_REGISTERS
+static constexpr ARMRegister wzr = vixl::wzr;
+static constexpr ARMRegister xzr = vixl::xzr;
+static constexpr ARMRegister wsp = vixl::wsp;
+static constexpr ARMRegister sp = vixl::sp;
+
+// Import VIXL VRegisters into the js::jit namespace.
+#define IMPORT_VIXL_VREGISTERS(N) \
+ static constexpr ARMFPRegister s##N = vixl::s##N; \
+ static constexpr ARMFPRegister d##N = vixl::d##N;
+REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS)
+#undef IMPORT_VIXL_VREGISTERS
+
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type = r3;
+static constexpr Register JSReturnReg_Data = r2;
+
+static constexpr FloatRegister NANReg = {FloatRegisters::d14,
+ FloatRegisters::Single};
+// N.B. r8 isn't listed as an aapcs temp register, but we can use it as such
+// because we never use return-structs.
+static constexpr Register CallTempNonArgRegs[] = {r8, r9, r10, r11,
+ r12, r13, r14, r15};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+class Assembler : public vixl::Assembler {
+ public:
+ Assembler() : vixl::Assembler() {}
+
+ typedef vixl::Condition Condition;
+
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+
+ // Emit the jump table, returning the BufferOffset to the first entry in the
+ // table.
+ BufferOffset emitExtendedJumpTable();
+ BufferOffset ExtendedJumpTable_;
+ void executableCopy(uint8_t* buffer);
+
+ BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op,
+ const LiteralDoc& doc,
+ ARMBuffer::PoolEntry* pe = nullptr);
+ BufferOffset immPool64(ARMRegister dest, uint64_t value,
+ ARMBuffer::PoolEntry* pe = nullptr);
+ BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value,
+ vixl::LoadLiteralOp op, const LiteralDoc& doc);
+ BufferOffset fImmPool64(ARMFPRegister dest, double value);
+ BufferOffset fImmPool32(ARMFPRegister dest, float value);
+
+ uint32_t currentOffset() const { return nextOffset().getOffset(); }
+
+ void bind(Label* label) { bind(label, nextOffset()); }
+ void bind(Label* label, BufferOffset boff);
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+
+ void setUnlimitedBuffer() { armbuffer_.setUnlimited(); }
+ bool oom() const {
+ return AssemblerShared::oom() || armbuffer_.oom() ||
+ jumpRelocations_.oom() || dataRelocations_.oom();
+ }
+
+ void copyJumpRelocationTable(uint8_t* dest) const {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+ }
+ void copyDataRelocationTable(uint8_t* dest) const {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+ }
+
+ size_t jumpRelocationTableBytes() const { return jumpRelocations_.length(); }
+ size_t dataRelocationTableBytes() const { return dataRelocations_.length(); }
+ size_t bytesNeeded() const {
+ return SizeOfCodeGenerated() + jumpRelocationTableBytes() +
+ dataRelocationTableBytes();
+ }
+
+ void processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+ }
+
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label) {
+ auto mode = label.linkMode();
+ size_t patchAtOffset = label.patchAt().offset();
+ size_t targetOffset = label.target().offset();
+
+ if (mode == CodeLabel::MoveImmediate) {
+ Instruction* inst = (Instruction*)(rawCode + patchAtOffset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + targetOffset));
+ } else {
+ *reinterpret_cast<const void**>(rawCode + patchAtOffset) =
+ rawCode + targetOffset;
+ }
+ }
+
+ void retarget(Label* cur, Label* next);
+
+ // The buffer is about to be linked. Ensure any constant pools or
+ // excess bookkeeping has been flushed to the instruction stream.
+ void flush() { armbuffer_.flushPool(); }
+
+ void comment(const char* msg) {
+#ifdef JS_DISASM_ARM64
+ spew_.spew("; %s", msg);
+#endif
+ }
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_DISASM_ARM64
+ spew_.setPrinter(sp);
+#endif
+ }
+
+ static bool SupportsFloatingPoint() { return true; }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+ static bool SupportsWasmSimd() { return true; }
+
+ static bool HasRoundInstruction(RoundingMode mode) {
+ switch (mode) {
+ case RoundingMode::Up:
+ case RoundingMode::Down:
+ case RoundingMode::NearestTiesToEven:
+ case RoundingMode::TowardsZero:
+ return true;
+ }
+ MOZ_CRASH("unexpected mode");
+ }
+
+ protected:
+ // Add a jump whose target is unknown until finalization.
+ // The jump may not be patched at runtime.
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind);
+
+ public:
+ static uint32_t PatchWrite_NearCallSize() { return 4; }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expected);
+
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expected);
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will end up being
+ // the call instruction.
+ *(raw - 1) = imm.value;
+ }
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ MOZ_CRASH("AlignDoubleArg()");
+ }
+ static uintptr_t GetPointer(uint8_t* ptr) {
+ Instruction* i = reinterpret_cast<Instruction*>(ptr);
+ uint64_t ret = i->Literal64();
+ return ret;
+ }
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : pendingJumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ public:
+ // A Jump table entry is 2 instructions, with 8 bytes of raw data
+ static const size_t SizeOfJumpTableEntry = 16;
+
+ struct JumpTableEntry {
+ uint32_t ldr;
+ uint32_t br;
+ void* data;
+
+ Instruction* getLdr() { return reinterpret_cast<Instruction*>(&ldr); }
+ };
+
+ // Offset of the patchable target for the given entry.
+ static const size_t OffsetOfJumpTableEntryPointer = 8;
+
+ public:
+ void writeCodePointer(CodeLabel* label) {
+ armbuffer_.assertNoPoolAndNoNops();
+ uintptr_t x = uintptr_t(-1);
+ BufferOffset off = EmitData(&x, sizeof(uintptr_t));
+ label->patchAt()->bind(off.getOffset());
+ }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ MOZ_CRASH("verifyHeapAccessDisassembly");
+ }
+
+ protected:
+ // Structure for fixing up pc-relative loads/jumps when the machine
+ // code gets moved (executable copy, gc, etc.).
+ struct RelativePatch {
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ // List of jumps for which the target is either unknown until finalization,
+ // or cannot be known due to GC. Each entry here requires a unique entry
+ // in the extended jump table, and is patched at finalization.
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_;
+
+ // Final output formatters.
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+};
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
+
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = r8;
+static constexpr Register ABINonArgReg1 = r9;
+static constexpr Register ABINonArgReg2 = r10;
+static constexpr Register ABINonArgReg3 = r11;
+
+// This register may be volatile or nonvolatile. Avoid d31 which is the
+// ScratchDoubleReg_.
+static constexpr FloatRegister ABINonArgDoubleReg = {FloatRegisters::s16,
+ FloatRegisters::Single};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = r8;
+static constexpr Register ABINonArgReturnReg1 = r9;
+static constexpr Register ABINonVolatileReg{Registers::x19};
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = lr;
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions. Must be nonvolatile.
+static constexpr Register InstanceReg{Registers::x23};
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = r9;
+
+static inline bool GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
+ Register* out) {
+ if (usedIntArgs >= NumIntArgRegs) {
+ return false;
+ }
+ *out = Register::FromCode(usedIntArgs);
+ return true;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
+ FloatRegister* out) {
+ if (usedFloatArgs >= NumFloatArgRegs) {
+ return false;
+ }
+ *out = FloatRegister::FromCode(usedFloatArgs);
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+// Forbids nop filling for testing purposes. Not nestable.
+class AutoForbidNops {
+ protected:
+ Assembler* asm_;
+
+ public:
+ explicit AutoForbidNops(Assembler* asm_) : asm_(asm_) { asm_->enterNoNops(); }
+ ~AutoForbidNops() { asm_->leaveNoNops(); }
+};
+
+// Forbids pool generation during a specified interval. Not nestable.
+class AutoForbidPoolsAndNops : public AutoForbidNops {
+ public:
+ AutoForbidPoolsAndNops(Assembler* asm_, size_t maxInst)
+ : AutoForbidNops(asm_) {
+ asm_->enterNoPool(maxInst);
+ }
+ ~AutoForbidPoolsAndNops() { asm_->leaveNoPool(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // A64_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp
new file mode 100644
index 0000000000..d738ea548e
--- /dev/null
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -0,0 +1,4245 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/CodeGenerator-arm64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "jit/ReciprocalMulConstants.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::FloorLog2;
+using mozilla::Maybe;
+using mozilla::NegativeInfinity;
+using mozilla::Nothing;
+using mozilla::Some;
+
+// shared
+CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+bool CodeGeneratorARM64::generateOutOfLineCode() {
+ AutoCreatedBy acb(masm, "CodeGeneratorARM64::generateOutOfLineCode");
+
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Store the frame size, so the handler can recover the IonScript.
+ masm.push(Imm32(frameSize()));
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorARM64::emitBranch(Assembler::Condition cond,
+ MBasicBlock* mirTrue,
+ MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ jumpToBlock(mirTrue, cond);
+ } else {
+ jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorARM64* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ Register input = ToRegister(test->input());
+ MBasicBlock* mirTrue = test->ifTrue();
+ MBasicBlock* mirFalse = test->ifFalse();
+
+ // Jump to the True block if NonZero.
+ // Jump to the False block if Zero.
+ if (isNextBlock(mirFalse->lir())) {
+ masm.branch32(Assembler::NonZero, input, Imm32(0),
+ getJumpLabelForBranch(mirTrue));
+ } else {
+ masm.branch32(Assembler::Zero, input, Imm32(0),
+ getJumpLabelForBranch(mirFalse));
+ if (!isNextBlock(mirTrue->lir())) {
+ jumpToBlock(mirTrue);
+ }
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ const MCompare* mir = comp->mir();
+ const MCompare::CompareType type = mir->compareType();
+ const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+ const Register leftreg = ToRegister(comp->getOperand(0));
+ const LAllocation* right = comp->getOperand(1);
+ const Register defreg = ToRegister(comp->getDef(0));
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, leftreg, Imm32(ToInt32(right)), defreg);
+ } else {
+ masm.cmpPtrSet(cond, leftreg, ToRegister(right), defreg);
+ }
+ return;
+ }
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, leftreg, Imm32(ToInt32(right)), defreg);
+ } else {
+ masm.cmp32Set(cond, leftreg, ToRegister(right), defreg);
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const MCompare::CompareType type = mir->compareType();
+ const LAllocation* left = comp->left();
+ const LAllocation* right = comp->right();
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
+ masm.cmpPtr(ToRegister(left), Imm32(ToInt32(right)));
+ } else {
+ masm.cmpPtr(ToRegister(left), ToRegister(right));
+ }
+ } else if (right->isConstant()) {
+ masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
+ } else {
+ masm.cmp32(ToRegister(left), ToRegister(right));
+ }
+
+ Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void CodeGeneratorARM64::bailoutIf(Assembler::Condition condition,
+ LSnapshot* snapshot) {
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.B(ool->entry(), condition);
+}
+
+void CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorARM64::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.b(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ masm.push(Imm32(ool->snapshot()->snapshotOffset()));
+ masm.B(&deoptLabel_);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ ARMFPRegister lhs(ToFloatRegister(ins->first()), 64);
+ ARMFPRegister rhs(ToFloatRegister(ins->second()), 64);
+ ARMFPRegister output(ToFloatRegister(ins->output()), 64);
+ if (ins->mir()->isMax()) {
+ masm.Fmax(output, lhs, rhs);
+ } else {
+ masm.Fmin(output, lhs, rhs);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ ARMFPRegister lhs(ToFloatRegister(ins->first()), 32);
+ ARMFPRegister rhs(ToFloatRegister(ins->second()), 32);
+ ARMFPRegister output(ToFloatRegister(ins->output()), 32);
+ if (ins->mir()->isMax()) {
+ masm.Fmax(output, lhs, rhs);
+ } else {
+ masm.Fmin(output, lhs, rhs);
+ }
+}
+
+template <typename T>
+static ARMRegister toWRegister(const T* a) {
+ return ARMRegister(ToRegister(a), 32);
+}
+
+template <typename T>
+static ARMRegister toXRegister(const T* a) {
+ return ARMRegister(ToRegister(a), 64);
+}
+
+Operand toWOperand(const LAllocation* a) {
+ if (a->isConstant()) {
+ return Operand(ToInt32(a));
+ }
+ return Operand(toWRegister(a));
+}
+
+vixl::CPURegister ToCPURegister(const LAllocation* a, Scalar::Type type) {
+ if (a->isFloatReg() && type == Scalar::Float64) {
+ return ARMFPRegister(ToFloatRegister(a), 64);
+ }
+ if (a->isFloatReg() && type == Scalar::Float32) {
+ return ARMFPRegister(ToFloatRegister(a), 32);
+ }
+ if (a->isGeneralReg()) {
+ return ARMRegister(ToRegister(a), 32);
+ }
+ MOZ_CRASH("Unknown LAllocation");
+}
+
+vixl::CPURegister ToCPURegister(const LDefinition* d, Scalar::Type type) {
+ return ToCPURegister(d->output(), type);
+}
+
+// Let |cond| be an ARM64 condition code that we could reasonably use in a
+// conditional branch or select following a comparison instruction. This
+// function returns the condition to use in the case where we swap the two
+// operands of the comparison instruction.
+Assembler::Condition GetCondForSwappedOperands(Assembler::Condition cond) {
+ // EQ and NE map to themselves
+ // Of the remaining 14 cases, 4 other pairings can meaningfully swap:
+ // HS -- LS
+ // LO -- HI
+ // GE -- LE
+ // GT -- LT
+ switch (cond) {
+ case vixl::eq:
+ case vixl::ne:
+ return cond;
+ case vixl::hs:
+ return vixl::ls;
+ case vixl::ls:
+ return vixl::hs;
+ case vixl::lo:
+ return vixl::hi;
+ case vixl::hi:
+ return vixl::lo;
+ case vixl::ge:
+ return vixl::le;
+ case vixl::le:
+ return vixl::ge;
+ case vixl::gt:
+ return vixl::lt;
+ case vixl::lt:
+ return vixl::gt;
+ default:
+ MOZ_CRASH("no meaningful swapped-operand condition");
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ // Platforms with three-operand arithmetic ops don't need recovery.
+ MOZ_ASSERT(!ins->recoversInput());
+
+ if (ins->snapshot()) {
+ masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ } else {
+ masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+ }
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ // Platforms with three-operand arithmetic ops don't need recovery.
+ MOZ_ASSERT(!ins->recoversInput());
+
+ if (ins->snapshot()) {
+ masm.Subs(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ } else {
+ masm.Sub(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+ }
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ MMul* mul = ins->mir();
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ Register lhsreg = ToRegister(lhs);
+ const ARMRegister lhsreg32 = ARMRegister(lhsreg, 32);
+ Register destreg = ToRegister(dest);
+ const ARMRegister destreg32 = ARMRegister(destreg, 32);
+
+ if (rhs->isConstant()) {
+ // Bailout on -0.0.
+ int32_t constant = ToInt32(rhs);
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition bailoutCond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ masm.Cmp(toWRegister(lhs), Operand(0));
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ masm.Negs(destreg32, Operand(lhsreg32));
+ break; // Go to overflow check.
+ case 0:
+ masm.Mov(destreg32, wzr);
+ return; // Avoid overflow check.
+ case 1:
+ if (destreg != lhsreg) {
+ masm.Mov(destreg32, lhsreg32);
+ }
+ return; // Avoid overflow check.
+ case 2:
+ if (!mul->canOverflow()) {
+ masm.Add(destreg32, lhsreg32, Operand(lhsreg32));
+ return; // Avoid overflow check.
+ }
+ masm.Adds(destreg32, lhsreg32, Operand(lhsreg32));
+ break; // Go to overflow check.
+ default:
+ // Use shift if cannot overflow and constant is a power of 2
+ if (!mul->canOverflow() && constant > 0) {
+ int32_t shift = FloorLog2(constant);
+ if ((1 << shift) == constant) {
+ masm.Lsl(destreg32, lhsreg32, shift);
+ return;
+ }
+ }
+
+ // Otherwise, just multiply. We have to check for overflow.
+ // Negative zero was handled above.
+ Label bailout;
+ Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const Register scratch = temps.AcquireW().asUnsized();
+
+ masm.move32(Imm32(constant), scratch);
+ masm.mul32(lhsreg, scratch, destreg, onOverflow);
+
+ if (onOverflow) {
+ MOZ_ASSERT(lhsreg != destreg);
+ bailoutFrom(&bailout, ins->snapshot());
+ }
+ return;
+ }
+
+ // Overflow check.
+ if (mul->canOverflow()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ } else {
+ Register rhsreg = ToRegister(rhs);
+ const ARMRegister rhsreg32 = ARMRegister(rhsreg, 32);
+
+ Label bailout;
+ Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
+
+ if (mul->canBeNegativeZero()) {
+ // The product of two integer operands is negative zero iff one
+ // operand is zero, and the other is negative. Therefore, the
+ // sum of the two operands will also be negative (specifically,
+ // it will be the non-zero operand). If the result of the
+ // multiplication is 0, we can check the sign of the sum to
+ // determine whether we should bail out.
+
+ // This code can bailout, so lowering guarantees that the input
+ // operands are not overwritten.
+ MOZ_ASSERT(destreg != lhsreg);
+ MOZ_ASSERT(destreg != rhsreg);
+
+ // Do the multiplication.
+ masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
+
+ // Set Zero flag if destreg is 0.
+ masm.test32(destreg, destreg);
+
+ // ccmn is 'conditional compare negative'.
+ // If the Zero flag is set:
+ // perform a compare negative (compute lhs+rhs and set flags)
+ // else:
+ // clear flags
+ masm.Ccmn(lhsreg32, rhsreg32, vixl::NoFlag, Assembler::Zero);
+
+ // Bails out if (lhs * rhs == 0) && (lhs + rhs < 0):
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+
+ } else {
+ masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
+ }
+ if (onOverflow) {
+ bailoutFrom(&bailout, ins->snapshot());
+ }
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ const Register lhs = ToRegister(ins->lhs());
+ const Register rhs = ToRegister(ins->rhs());
+ const Register output = ToRegister(ins->output());
+
+ const ARMRegister lhs32 = toWRegister(ins->lhs());
+ const ARMRegister rhs32 = toWRegister(ins->rhs());
+ const ARMRegister temp32 = toWRegister(ins->getTemp(0));
+ const ARMRegister output32 = toWRegister(ins->output());
+
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle division by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.j(Assembler::NonZero, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero: (Infinity|0 = 0).
+ Label nonZero;
+ masm.j(Assembler::NonZero, &nonZero);
+ masm.Mov(output32, wzr);
+ masm.jump(&done);
+ masm.bind(&nonZero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow from (INT32_MIN / -1).
+ // The integer division gives INT32_MIN, but should be -(double)INT32_MIN.
+ if (mir->canBeNegativeOverflow()) {
+ Label notOverflow;
+
+ // Branch to handle the non-overflow cases.
+ masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
+ masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notOverflow);
+
+ // Handle overflow.
+ if (mir->trapOnError()) {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN, which is already in lhs.
+ masm.move32(lhs, output);
+ masm.jump(&done);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailout(ins->snapshot());
+ }
+ masm.bind(&notOverflow);
+ }
+
+ // Handle negative zero: lhs == 0 && rhs < 0.
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonZero;
+ masm.branch32(Assembler::NotEqual, lhs, Imm32(0), &nonZero);
+ masm.cmp32(rhs, Imm32(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ masm.bind(&nonZero);
+ }
+
+ // Perform integer division.
+ if (mir->canTruncateRemainder()) {
+ masm.Sdiv(output32, lhs32, rhs32);
+ } else {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ ARMRegister scratch32 = temps.AcquireW();
+
+ // ARM does not automatically calculate the remainder.
+ // The ISR suggests multiplication to determine whether a remainder exists.
+ masm.Sdiv(scratch32, lhs32, rhs32);
+ masm.Mul(temp32, scratch32, rhs32);
+ masm.Cmp(lhs32, temp32);
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ masm.Mov(output32, scratch32);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ const Register numerator = ToRegister(ins->numerator());
+ const ARMRegister numerator32 = toWRegister(ins->numerator());
+ const ARMRegister output32 = toWRegister(ins->output());
+
+ int32_t shift = ins->shift();
+ bool negativeDivisor = ins->negativeDivisor();
+ MDiv* mir = ins->mir();
+
+ if (!mir->isTruncated() && negativeDivisor) {
+ // 0 divided by a negative number returns a -0 double.
+ bailoutTest32(Assembler::Zero, numerator, numerator, ins->snapshot());
+ }
+
+ if (shift) {
+ if (!mir->isTruncated()) {
+ // If the remainder is != 0, bailout since this must be a double.
+ bailoutTest32(Assembler::NonZero, numerator,
+ Imm32(UINT32_MAX >> (32 - shift)), ins->snapshot());
+ }
+
+ if (mir->isUnsigned()) {
+ // shift right
+ masm.Lsr(output32, numerator32, shift);
+ } else {
+ ARMRegister temp32 = numerator32;
+ // Adjust the value so that shifting produces a correctly
+ // rounded result when the numerator is negative. See 10-1
+ // "Signed Division by a Known Power of 2" in Henry
+ // S. Warren, Jr.'s Hacker's Delight.
+ if (mir->canBeNegativeDividend() && mir->isTruncated()) {
+ if (shift > 1) {
+ // Copy the sign bit of the numerator. (= (2^32 - 1) or 0)
+ masm.Asr(output32, numerator32, 31);
+ temp32 = output32;
+ }
+ // Divide by 2^(32 - shift)
+ // i.e. (= (2^32 - 1) / 2^(32 - shift) or 0)
+ // i.e. (= (2^shift - 1) or 0)
+ masm.Lsr(output32, temp32, 32 - shift);
+ // If signed, make any 1 bit below the shifted bits to bubble up, such
+ // that once shifted the value would be rounded towards 0.
+ masm.Add(output32, output32, numerator32);
+ temp32 = output32;
+ }
+ masm.Asr(output32, temp32, shift);
+
+ if (negativeDivisor) {
+ masm.Neg(output32, output32);
+ }
+ }
+ return;
+ }
+
+ if (negativeDivisor) {
+ // INT32_MIN / -1 overflows.
+ if (!mir->isTruncated()) {
+ masm.Negs(output32, numerator32);
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ } else if (mir->trapOnError()) {
+ Label ok;
+ masm.Negs(output32, numerator32);
+ masm.branch(Assembler::NoOverflow, &ok);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else {
+ // Do not set condition flags.
+ masm.Neg(output32, numerator32);
+ }
+ } else {
+ if (mir->isUnsigned() && !mir->isTruncated()) {
+ // Copy and set flags.
+ masm.Adds(output32, numerator32, 0);
+ // Unsigned division by 1 can overflow if output is not truncated, as we
+ // do not have an Unsigned type for MIR instructions.
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ } else {
+ // Copy the result.
+ masm.Mov(output32, numerator32);
+ }
+ }
+}
+
+void CodeGenerator::visitDivConstantI(LDivConstantI* ins) {
+ const ARMRegister lhs32 = toWRegister(ins->numerator());
+ const ARMRegister lhs64 = toXRegister(ins->numerator());
+ const ARMRegister const32 = toWRegister(ins->temp());
+ const ARMRegister output32 = toWRegister(ins->output());
+ const ARMRegister output64 = toXRegister(ins->output());
+ int32_t d = ins->denominator();
+
+ // The absolute value of the denominator isn't a power of 2.
+ using mozilla::Abs;
+ MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
+
+ // We will first divide by Abs(d), and negate the answer if d is negative.
+ // If desired, this can be avoided by generalizing computeDivisionConstants.
+ auto rmc = ReciprocalMulConstants::computeSignedDivisionConstants(Abs(d));
+
+ // We first compute (M * n) >> 32, where M = rmc.multiplier.
+ masm.Mov(const32, int32_t(rmc.multiplier));
+ if (rmc.multiplier > INT32_MAX) {
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
+
+ // We actually compute (int32_t(M) * n) instead, without the upper bit.
+ // Thus, (M * n) = (int32_t(M) * n) + n << 32.
+ //
+ // ((int32_t(M) * n) + n << 32) can't overflow, as both operands have
+ // opposite signs because int32_t(M) is negative.
+ masm.Lsl(output64, lhs64, 32);
+
+ // Store (M * n) in output64.
+ masm.Smaddl(output64, const32, lhs32, output64);
+ } else {
+ // Store (M * n) in output64.
+ masm.Smull(output64, const32, lhs32);
+ }
+
+ // (M * n) >> (32 + shift) is the truncated division answer if n is
+ // non-negative, as proved in the comments of computeDivisionConstants. We
+ // must add 1 later if n is negative to get the right answer in all cases.
+ masm.Asr(output64, output64, 32 + rmc.shiftAmount);
+
+ // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
+ // computed with just a sign-extending shift of 31 bits.
+ if (ins->canBeNegativeDividend()) {
+ masm.Asr(const32, lhs32, 31);
+ masm.Sub(output32, output32, const32);
+ }
+
+ // After this, output32 contains the correct truncated division result.
+ if (d < 0) {
+ masm.Neg(output32, output32);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ // This is a division op. Multiply the obtained value by d to check if
+ // the correct answer is an integer. This cannot overflow, since |d| > 1.
+ masm.Mov(const32, d);
+ masm.Msub(const32, output32, const32, lhs32);
+ // bailout if (lhs - output * d != 0)
+ masm.Cmp(const32, wzr);
+ auto bailoutCond = Assembler::NonZero;
+
+ // If lhs is zero and the divisor is negative, the answer should have
+ // been -0.
+ if (d < 0) {
+ // or bailout if (lhs == 0).
+ // ^ ^
+ // | '-- masm.Ccmp(lhs32, lhs32, .., ..)
+ // '-- masm.Ccmp(.., .., vixl::ZFlag, ! bailoutCond)
+ masm.Ccmp(lhs32, wzr, vixl::ZFlag, Assembler::Zero);
+ bailoutCond = Assembler::Zero;
+ }
+
+ // bailout if (lhs - output * d != 0) or (d < 0 && lhs == 0)
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitUDivConstantI(LUDivConstantI* ins) {
+ const ARMRegister lhs32 = toWRegister(ins->numerator());
+ const ARMRegister lhs64 = toXRegister(ins->numerator());
+ const ARMRegister const32 = toWRegister(ins->temp());
+ const ARMRegister output32 = toWRegister(ins->output());
+ const ARMRegister output64 = toXRegister(ins->output());
+ uint32_t d = ins->denominator();
+
+ if (d == 0) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->mir()->trapOnError()) {
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero,
+ ins->mir()->bytecodeOffset());
+ } else {
+ masm.Mov(output32, wzr);
+ }
+ } else {
+ bailout(ins->snapshot());
+ }
+ return;
+ }
+
+ // The denominator isn't a power of 2 (see LDivPowTwoI).
+ MOZ_ASSERT((d & (d - 1)) != 0);
+
+ auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(d);
+
+ // We first compute (M * n), where M = rmc.multiplier.
+ masm.Mov(const32, int32_t(rmc.multiplier));
+ masm.Umull(output64, const32, lhs32);
+ if (rmc.multiplier > UINT32_MAX) {
+ // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
+ // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
+ // contradicting the proof of correctness in computeDivisionConstants.
+ MOZ_ASSERT(rmc.shiftAmount > 0);
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
+
+ // We actually compute (uint32_t(M) * n) instead, without the upper bit.
+ // Thus, (M * n) = (uint32_t(M) * n) + n << 32.
+ //
+ // ((uint32_t(M) * n) + n << 32) can overflow. Hacker's Delight explains a
+ // trick to avoid this overflow case, but we can avoid it by computing the
+ // addition on 64 bits registers.
+ //
+ // Compute ((uint32_t(M) * n) >> 32 + n)
+ masm.Add(output64, lhs64, Operand(output64, vixl::LSR, 32));
+
+ // (M * n) >> (32 + shift) is the truncated division answer.
+ masm.Lsr(output64, output64, rmc.shiftAmount);
+ } else {
+ // (M * n) >> (32 + shift) is the truncated division answer.
+ masm.Lsr(output64, output64, 32 + rmc.shiftAmount);
+ }
+
+ // We now have the truncated division value. We are checking whether the
+ // division resulted in an integer, we multiply the obtained value by d and
+ // check the remainder of the division.
+ if (!ins->mir()->isTruncated()) {
+ masm.Mov(const32, d);
+ masm.Msub(const32, output32, const32, lhs32);
+ // bailout if (lhs - output * d != 0)
+ masm.Cmp(const32, const32);
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ ARMRegister lhs = toWRegister(ins->lhs());
+ ARMRegister rhs = toWRegister(ins->rhs());
+ ARMRegister output = toWRegister(ins->output());
+ Label done;
+
+ MMod* mir = ins->mir();
+
+ // Prevent divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.Cbnz(rhs, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Truncated division by zero yields integer zero.
+ masm.Mov(output, rhs);
+ masm.Cbz(rhs, &done);
+ }
+ } else {
+ // Non-truncated division by zero produces a non-integer.
+ MOZ_ASSERT(!gen->compilingWasm());
+ masm.Cmp(rhs, Operand(0));
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ }
+
+ // Signed division.
+ masm.Sdiv(output, lhs, rhs);
+
+ // Compute the remainder: output = lhs - (output * rhs).
+ masm.Msub(output, output, rhs, lhs);
+
+ if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
+ // If output == 0 and lhs < 0, then the result should be double -0.0.
+ // Note that this guard handles lhs == INT_MIN and rhs == -1:
+ // output = INT_MIN - (INT_MIN / -1) * -1
+ // = INT_MIN - INT_MIN
+ // = 0
+ masm.Cbnz(output, &done);
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register lhs = ToRegister(ins->getOperand(0));
+ ARMRegister lhsw = toWRegister(ins->getOperand(0));
+ ARMRegister outw = toWRegister(ins->output());
+
+ int32_t shift = ins->shift();
+ bool canBeNegative =
+ !ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend();
+
+ Label negative;
+ if (canBeNegative) {
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask.
+ masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
+ }
+
+ masm.And(outw, lhsw, Operand((uint32_t(1) << shift) - 1));
+
+ if (canBeNegative) {
+ Label done;
+ masm.jump(&done);
+
+ // Negative numbers need a negate, bitmask, negate.
+ masm.bind(&negative);
+ masm.Neg(outw, Operand(lhsw));
+ masm.And(outw, outw, Operand((uint32_t(1) << shift) - 1));
+
+ // Since a%b has the same sign as b, and a is negative in this branch,
+ // an answer of 0 means the correct result is actually -0. Bail out.
+ if (!ins->mir()->isTruncated()) {
+ masm.Negs(outw, Operand(outw));
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ } else {
+ masm.Neg(outw, Operand(outw));
+ }
+
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ MMod* mir = ins->mir();
+ int32_t shift = ins->shift();
+
+ const Register src = ToRegister(ins->getOperand(0));
+ const Register dest = ToRegister(ins->getDef(0));
+ const Register hold = ToRegister(ins->getTemp(0));
+ const Register remain = ToRegister(ins->getTemp(1));
+
+ const ARMRegister src32 = ARMRegister(src, 32);
+ const ARMRegister dest32 = ARMRegister(dest, 32);
+ const ARMRegister remain32 = ARMRegister(remain, 32);
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+ const Register scratch = scratch32.asUnsized();
+
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ //
+ // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
+ // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ //
+ // 2. Since both addition and multiplication commute with modulus:
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ //
+ // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
+ // simplifies to: c_0 + c_1 + c_2 ... c_n % C
+ //
+ // Each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label loop;
+
+ // Register 'hold' holds -1 if the value was negative, 1 otherwise.
+ // The remain reg holds the remaining bits that have not been processed.
+ // The scratch reg serves as a temporary location to store extracted bits.
+ // The dest reg is the accumulator, becoming final result.
+ //
+ // Move the whole value into the remain.
+ masm.Mov(remain32, src32);
+ // Zero out the dest.
+ masm.Mov(dest32, wzr);
+ // Set the hold appropriately.
+ {
+ Label negative;
+ masm.branch32(Assembler::Signed, remain, Imm32(0), &negative);
+ masm.move32(Imm32(1), hold);
+ masm.jump(&loop);
+
+ masm.bind(&negative);
+ masm.move32(Imm32(-1), hold);
+ masm.neg32(remain);
+ }
+
+ // Begin the main loop.
+ masm.bind(&loop);
+ {
+ // Extract the bottom bits into scratch.
+ masm.And(scratch32, remain32, Operand(mask));
+ // Add those bits to the accumulator.
+ masm.Add(dest32, dest32, scratch32);
+ // Do a trial subtraction. This functions as a cmp but remembers the result.
+ masm.Subs(scratch32, dest32, Operand(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
+ {
+ Label sumSigned;
+ masm.branch32(Assembler::Signed, scratch, scratch, &sumSigned);
+ masm.Mov(dest32, scratch32);
+ masm.bind(&sumSigned);
+ }
+ // Get rid of the bits that we extracted before.
+ masm.Lsr(remain32, remain32, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ masm.branchTest32(Assembler::NonZero, remain, remain, &loop);
+ }
+
+ // Check the hold to see if we need to negate the result.
+ {
+ Label done;
+
+ // If the hold was non-zero, negate the result to match JS expectations.
+ masm.branchTest32(Assembler::NotSigned, hold, hold, &done);
+ if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
+ // Bail in case of negative zero hold.
+ bailoutTest32(Assembler::Zero, hold, hold, ins->snapshot());
+ }
+
+ masm.neg32(dest);
+ masm.bind(&done);
+ }
+}
+
+void CodeGeneratorARM64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ const ARMRegister dividend64(dividend, 64);
+ const ARMRegister divisor64(divisor, 64);
+
+ masm.Sdiv(/* result= */ dividend64, dividend64, divisor64);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorARM64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ const ARMRegister dividend64(dividend, 64);
+ const ARMRegister divisor64(divisor, 64);
+ const ARMRegister output64(output, 64);
+
+ // Signed division.
+ masm.Sdiv(output64, dividend64, divisor64);
+
+ // Compute the remainder: output = dividend - (output * divisor).
+ masm.Msub(/* result= */ dividend64, output64, divisor64, dividend64);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* output = ins->getDef(0);
+ masm.Mvn(toWRegister(output), toWOperand(input));
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ masm.Mvn(vixl::Register(output, 64), vixl::Register(input, 64));
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const ARMRegister lhs = toWRegister(ins->getOperand(0));
+ const Operand rhs = toWOperand(ins->getOperand(1));
+ const ARMRegister dest = toWRegister(ins->getDef(0));
+
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ masm.Orr(dest, lhs, rhs);
+ break;
+ case JSOp::BitXor:
+ masm.Eor(dest, lhs, rhs);
+ break;
+ case JSOp::BitAnd:
+ masm.And(dest, lhs, rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ const ARMRegister lhs = toWRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ const ARMRegister dest = toWRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.Lsl(dest, lhs, shift);
+ break;
+ case JSOp::Rsh:
+ masm.Asr(dest, lhs, shift);
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.Lsr(dest, lhs, shift);
+ } else if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.Ands(dest, lhs, Operand(0xFFFFFFFF));
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ } else {
+ masm.Mov(dest, lhs);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ const ARMRegister rhsreg = toWRegister(rhs);
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.Lsl(dest, lhs, rhsreg);
+ break;
+ case JSOp::Rsh:
+ masm.Asr(dest, lhs, rhsreg);
+ break;
+ case JSOp::Ursh:
+ masm.Lsr(dest, lhs, rhsreg);
+ if (ins->mir()->toUrsh()->fallible()) {
+ /// x >>> 0 can overflow.
+ masm.Cmp(dest, Operand(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ const ARMRegister lhs = toWRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ const FloatRegister out = ToFloatRegister(ins->output());
+
+ const Register temp = ToRegister(ins->temp());
+ const ARMRegister temp32 = toWRegister(ins->temp());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ if (shift) {
+ masm.Lsr(temp32, lhs, shift);
+ masm.convertUInt32ToDouble(temp, out);
+ } else {
+ masm.convertUInt32ToDouble(ToRegister(ins->lhs()), out);
+ }
+ } else {
+ masm.And(temp32, toWRegister(rhs), Operand(0x1F));
+ masm.Lsr(temp32, lhs, temp32);
+ masm.convertUInt32ToDouble(temp, out);
+ }
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ ScratchDoubleScope scratch(masm);
+
+ Label done, sqrt;
+
+ if (!ins->mir()->operandIsNeverNegativeInfinity()) {
+ // Branch if not -Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
+
+ Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
+ if (ins->mir()->operandIsNeverNaN()) {
+ cond = Assembler::DoubleNotEqual;
+ }
+ masm.branchDouble(cond, input, scratch, &sqrt);
+
+ // Math.pow(-Infinity, 0.5) == Infinity.
+ masm.zeroDouble(output);
+ masm.subDouble(scratch, output);
+ masm.jump(&done);
+
+ masm.bind(&sqrt);
+ }
+
+ if (!ins->mir()->operandIsNeverNegativeZero()) {
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.zeroDouble(scratch);
+ masm.addDouble(input, scratch);
+ masm.sqrtDouble(scratch, output);
+ } else {
+ masm.sqrtDouble(input, output);
+ }
+
+ masm.bind(&done);
+}
+
+MoveOperand CodeGeneratorARM64::toMoveOperand(const LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ return MoveOperand(ToAddress(a), kind);
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorARM64> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorARM64* codegen) override {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ // Prevent nop and pools sequences to appear in the jump table.
+ AutoForbidPoolsAndNops afp(
+ &masm, (mir->numCases() + 1) * (sizeof(void*) / vixl::kInstructionSize));
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses,
+ // and thus must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Let the lowest table entry be indexed at 0.
+ if (mir->low() != 0) {
+ masm.sub32(Imm32(mir->low()), index);
+ }
+
+ // Jump to the default case if input is out of range.
+ int32_t cases = mir->numCases();
+ masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase);
+
+ // Because the target code has not yet been generated, we cannot know the
+ // instruction offsets for use as jump targets. Therefore we construct
+ // an OutOfLineTableSwitch that winds up holding the jump table.
+ //
+ // Because the jump table is generated as part of out-of-line code,
+ // it is generated after all the regular codegen, so the jump targets
+ // are guaranteed to exist when generating the jump table.
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Use the index to get the address of the jump target from the table.
+ masm.mov(ool->jumpLabel(), base);
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Load the target from the jump table and branch to it.
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ ARMFPRegister lhs(ToFloatRegister(math->lhs()), 64);
+ ARMFPRegister rhs(ToFloatRegister(math->rhs()), 64);
+ ARMFPRegister output(ToFloatRegister(math->output()), 64);
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.Fadd(output, lhs, rhs);
+ break;
+ case JSOp::Sub:
+ masm.Fsub(output, lhs, rhs);
+ break;
+ case JSOp::Mul:
+ masm.Fmul(output, lhs, rhs);
+ break;
+ case JSOp::Div:
+ masm.Fdiv(output, lhs, rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ ARMFPRegister lhs(ToFloatRegister(math->lhs()), 32);
+ ARMFPRegister rhs(ToFloatRegister(math->rhs()), 32);
+ ARMFPRegister output(ToFloatRegister(math->output()), 32);
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.Fadd(output, lhs, rhs);
+ break;
+ case JSOp::Sub:
+ masm.Fsub(output, lhs, rhs);
+ break;
+ case JSOp::Mul:
+ masm.Fmul(output, lhs, rhs);
+ break;
+ case JSOp::Div:
+ masm.Fdiv(output, lhs, rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitClzI(LClzI* lir) {
+ ARMRegister input = toWRegister(lir->input());
+ ARMRegister output = toWRegister(lir->output());
+ masm.Clz(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ masm.ctz32(input, output, /* knownNotZero = */ false);
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ RoundingMode roundingMode = lir->mir()->roundingMode();
+ masm.nearbyIntDouble(roundingMode, input, output);
+}
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ RoundingMode roundingMode = lir->mir()->roundingMode();
+ masm.nearbyIntFloat32(roundingMode, input, output);
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+ValueOperand CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos) {
+ MOZ_CRASH("CodeGeneratorARM64::ToTempValue");
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ ValueOperand result = ToOutValue(value);
+ masm.moveValue(value->value(), result);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ // Infallible unbox.
+
+ ValueOperand input = ToValue(unbox, LUnbox::Input);
+
+#ifdef DEBUG
+ // Assert the types match.
+ JSValueTag tag = MIRTypeToTag(mir->type());
+ Label ok;
+ {
+ ScratchTagScope scratch(masm, input);
+ masm.splitTagForTest(input, scratch);
+ masm.cmpTag(scratch, ImmTag(tag));
+ }
+ masm.B(&ok, Assembler::Condition::Equal);
+ masm.assumeUnreachable("Infallible unbox type mismatch");
+ masm.bind(&ok);
+#endif
+
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(input, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(input, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(input, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(input, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(input, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(input, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ const LAllocation* opd = test->input();
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 64), 0.0);
+
+ // If the compare set the 0 bit, then the result is definitely false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+
+ // Overflow means one of the operands was NaN, which is also false.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ const LAllocation* opd = test->input();
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 32), 0.0);
+
+ // If the compare set the 0 bit, then the result is definitely false.
+ jumpToBlock(ifFalse, Assembler::Zero);
+
+ // Overflow means one of the operands was NaN, which is also false.
+ jumpToBlock(ifFalse, Assembler::Overflow);
+ jumpToBlock(ifTrue);
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ const FloatRegister left = ToFloatRegister(comp->left());
+ const FloatRegister right = ToFloatRegister(comp->right());
+ ARMRegister output = toWRegister(comp->output());
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ masm.compareDouble(cond, left, right);
+ masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ const FloatRegister left = ToFloatRegister(comp->left());
+ const FloatRegister right = ToFloatRegister(comp->right());
+ ARMRegister output = toWRegister(comp->output());
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ masm.compareFloat(cond, left, right);
+ masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ const FloatRegister left = ToFloatRegister(comp->left());
+ const FloatRegister right = ToFloatRegister(comp->right());
+ Assembler::DoubleCondition doubleCond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ Assembler::Condition cond =
+ Assembler::ConditionFromDoubleCondition(doubleCond);
+
+ masm.compareDouble(doubleCond, left, right);
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ const FloatRegister left = ToFloatRegister(comp->left());
+ const FloatRegister right = ToFloatRegister(comp->right());
+ Assembler::DoubleCondition doubleCond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ Assembler::Condition cond =
+ Assembler::ConditionFromDoubleCondition(doubleCond);
+
+ masm.compareFloat(doubleCond, left, right);
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
+ if (baab->is64()) {
+ ARMRegister regL = toXRegister(baab->left());
+ if (baab->right()->isConstant()) {
+ masm.Tst(regL, Operand(ToInt64(baab->right())));
+ } else {
+ masm.Tst(regL, toXRegister(baab->right()));
+ }
+ } else {
+ ARMRegister regL = toWRegister(baab->left());
+ if (baab->right()->isConstant()) {
+ masm.Tst(regL, Operand(ToInt32(baab->right())));
+ } else {
+ masm.Tst(regL, toWRegister(baab->right()));
+ }
+ }
+ emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ ARMRegister input = toWRegister(ins->input());
+ ARMRegister output = toWRegister(ins->output());
+
+ masm.Cmp(input, ZeroRegister32);
+ masm.Cset(output, Assembler::Zero);
+}
+
+// NZCV
+// NAN -> 0011
+// == -> 0110
+// < -> 1000
+// > -> 0010
+void CodeGenerator::visitNotD(LNotD* ins) {
+ ARMFPRegister input(ToFloatRegister(ins->input()), 64);
+ ARMRegister output = toWRegister(ins->output());
+
+ // Set output to 1 if input compares equal to 0.0, else 0.
+ masm.Fcmp(input, 0.0);
+ masm.Cset(output, Assembler::Equal);
+
+ // Comparison with NaN sets V in the NZCV register.
+ // If the input was NaN, output must now be zero, so it can be incremented.
+ // The instruction is read: "output = if NoOverflow then output else 0+1".
+ masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ ARMFPRegister input(ToFloatRegister(ins->input()), 32);
+ ARMRegister output = toWRegister(ins->output());
+
+ // Set output to 1 input compares equal to 0.0, else 0.
+ masm.Fcmp(input, 0.0);
+ masm.Cset(output, Assembler::Equal);
+
+ // Comparison with NaN sets V in the NZCV register.
+ // If the input was NaN, output must now be zero, so it can be incremented.
+ // The instruction is read: "output = if NoOverflow then output else 0+1".
+ masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
+}
+
+void CodeGeneratorARM64::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint patching
+ // to occur. Otherwise, we could overwrite the invalidation epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailout out onto the stack.
+ masm.push(lr);
+
+ // Push the Ion script onto the stack (when we determine what that pointer
+ // is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+ masm.jump(thunk);
+}
+
+template <class U>
+Register getBase(U* mir) {
+ switch (mir->base()) {
+ case U::Heap:
+ return HeapReg;
+ }
+ return InvalidReg;
+}
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+ Scalar::Type accessType = mir->accessType();
+ bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64;
+ Label done;
+
+ if (mir->needsBoundsCheck()) {
+ Label boundsCheckPassed;
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg,
+ &boundsCheckPassed);
+ // Return a default value in case of a bounds-check failure.
+ if (isFloat) {
+ if (accessType == Scalar::Float32) {
+ masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(ins->output()));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(ins->output()));
+ }
+ } else {
+ masm.Mov(ARMRegister(ToRegister(ins->output()), 64), 0);
+ }
+ masm.jump(&done);
+ masm.bind(&boundsCheckPassed);
+ }
+
+ MemOperand addr(ARMRegister(HeapReg, 64), ARMRegister(ptrReg, 64));
+ switch (accessType) {
+ case Scalar::Int8:
+ masm.Ldrb(toWRegister(ins->output()), addr);
+ masm.Sxtb(toWRegister(ins->output()), toWRegister(ins->output()));
+ break;
+ case Scalar::Uint8:
+ masm.Ldrb(toWRegister(ins->output()), addr);
+ break;
+ case Scalar::Int16:
+ masm.Ldrh(toWRegister(ins->output()), addr);
+ masm.Sxth(toWRegister(ins->output()), toWRegister(ins->output()));
+ break;
+ case Scalar::Uint16:
+ masm.Ldrh(toWRegister(ins->output()), addr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.Ldr(toWRegister(ins->output()), addr);
+ break;
+ case Scalar::Float64:
+ masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 64), addr);
+ break;
+ case Scalar::Float32:
+ masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 32), addr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+
+ Label done;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg, boundsCheckLimitReg,
+ &done);
+ }
+
+ MemOperand addr(ARMRegister(HeapReg, 64), ARMRegister(ptrReg, 64));
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.Strb(toWRegister(ins->value()), addr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.Strh(toWRegister(ins->value()), addr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.Str(toWRegister(ins->value()), addr);
+ break;
+ case Scalar::Float64:
+ masm.Str(ARMFPRegister(ToFloatRegister(ins->value()), 64), addr);
+ break;
+ case Scalar::Float32:
+ masm.Str(ARMFPRegister(ToFloatRegister(ins->value()), 32), addr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (mir->access().type() == Scalar::Int64) {
+ masm.wasmCompareExchange64(mir->access(), srcAddr, Register64(oldval),
+ Register64(newval), Register64(out));
+ } else {
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, out);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->value());
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (mir->access().type() == Scalar::Int64) {
+ masm.wasmAtomicExchange64(mir->access(), srcAddr, Register64(oldval),
+ Register64(out));
+ } else {
+ masm.wasmAtomicExchange(mir->access(), srcAddr, oldval, out);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+
+ MOZ_ASSERT(mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+ AtomicOp op = mir->operation();
+
+ if (mir->access().type() == Scalar::Int64) {
+ masm.wasmAtomicFetchOp64(mir->access(), op, Register64(value), srcAddr,
+ Register64(flagTemp), Register64(out));
+ } else {
+ masm.wasmAtomicFetchOp(mir->access(), op, value, srcAddr, flagTemp, out);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+ AtomicOp op = mir->operation();
+
+ if (mir->access().type() == Scalar::Int64) {
+ masm.wasmAtomicEffectOp64(mir->access(), op, Register64(value), srcAddr,
+ Register64(flagTemp));
+ } else {
+ masm.wasmAtomicEffectOp(mir->access(), op, value, srcAddr, flagTemp);
+ }
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(masm.getStackPointer(), mir->spOffset());
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
+ } else if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), dst);
+ } else {
+ switch (mir->input()->type()) {
+ case MIRType::Double:
+ masm.storeDouble(ToFloatRegister(ins->arg()), dst);
+ return;
+ case MIRType::Float32:
+ masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
+ return;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ masm.storeUnalignedSimd128(ToFloatRegister(ins->arg()), dst);
+ return;
+#endif
+ default:
+ break;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
+ "unexpected mir type in WasmStackArg");
+ }
+}
+
+void CodeGenerator::visitUDiv(LUDiv* ins) {
+ MDiv* mir = ins->mir();
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ ARMRegister lhs32 = ARMRegister(lhs, 32);
+ ARMRegister rhs32 = ARMRegister(rhs, 32);
+ ARMRegister output32 = ARMRegister(output, 32);
+
+ // Prevent divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.branchTest32(Assembler::NonZero, rhs, rhs, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // ARM64 UDIV instruction will return 0 when divided by 0.
+ // No need for extra tests.
+ }
+ } else {
+ bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Unsigned division.
+ masm.Udiv(output32, lhs32, rhs32);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (!mir->canTruncateRemainder()) {
+ Register remainder = ToRegister(ins->remainder());
+ ARMRegister remainder32 = ARMRegister(remainder, 32);
+
+ // Compute the remainder: remainder = lhs - (output * rhs).
+ masm.Msub(remainder32, output32, rhs32, lhs32);
+
+ bailoutTest32(Assembler::NonZero, remainder, remainder, ins->snapshot());
+ }
+
+ // Unsigned div can return a value that's not a signed int32.
+ // If our users aren't expecting that, bail.
+ if (!mir->isTruncated()) {
+ bailoutTest32(Assembler::Signed, output, output, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitUMod(LUMod* ins) {
+ MMod* mir = ins->mir();
+ ARMRegister lhs = toWRegister(ins->lhs());
+ ARMRegister rhs = toWRegister(ins->rhs());
+ ARMRegister output = toWRegister(ins->output());
+ Label done;
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.Cbnz(rhs, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Truncated division by zero yields integer zero.
+ masm.Mov(output, rhs);
+ masm.Cbz(rhs, &done);
+ }
+ } else {
+ // Non-truncated division by zero produces a non-integer.
+ masm.Cmp(rhs, Operand(0));
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ }
+
+ // Unsigned division.
+ masm.Udiv(output, lhs, rhs);
+
+ // Compute the remainder: output = lhs - (output * rhs).
+ masm.Msub(output, output, rhs, lhs);
+
+ if (!mir->isTruncated()) {
+ // Bail if the output would be negative.
+ //
+ // LUMod inputs may be Uint32, so care is taken to ensure the result
+ // is not unexpectedly signed.
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ const ARMRegister base = toWRegister(ins->base());
+ const ARMRegister index = toWRegister(ins->index());
+ const ARMRegister output = toWRegister(ins->output());
+
+ masm.Add(output, base, Operand(index, vixl::LSL, mir->scale()));
+ masm.Add(output, output, Operand(mir->displacement()));
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ const ARMRegister input = toWRegister(ins->input());
+ const ARMRegister output = toWRegister(ins->output());
+ masm.Neg(output, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ const ARMRegister input = toXRegister(ins->input());
+ const ARMRegister output = toXRegister(ins->output());
+ masm.Neg(output, input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ const ARMFPRegister input(ToFloatRegister(ins->input()), 64);
+ const ARMFPRegister output(ToFloatRegister(ins->output()), 64);
+ masm.Fneg(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ const ARMFPRegister input(ToFloatRegister(ins->input()), 32);
+ const ARMFPRegister output(ToFloatRegister(ins->output()), 32);
+ masm.Fneg(output, input);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Load();
+
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Store();
+
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGeneratorARM64::emitSimpleBinaryI64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* lir, JSOp op) {
+ const ARMRegister dest = ARMRegister(ToOutRegister64(lir).reg, 64);
+ const ARMRegister lhs =
+ ARMRegister(ToRegister64(lir->getInt64Operand(0)).reg, 64);
+ const LInt64Allocation rhsAlloc = lir->getInt64Operand(INT64_PIECES);
+ Operand rhs;
+
+ if (IsConstant(rhsAlloc)) {
+ rhs = Operand(ToInt64(rhsAlloc));
+ } else {
+ rhs = Operand(ARMRegister(ToRegister64(rhsAlloc).reg, 64));
+ }
+ switch (op) {
+ case JSOp::Add:
+ masm.Add(dest, lhs, rhs);
+ break;
+ case JSOp::Sub:
+ masm.Sub(dest, lhs, rhs);
+ break;
+ case JSOp::BitOr:
+ masm.Orr(dest, lhs, rhs);
+ break;
+ case JSOp::BitXor:
+ masm.Eor(dest, lhs, rhs);
+ break;
+ case JSOp::BitAnd:
+ masm.And(dest, lhs, rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ emitSimpleBinaryI64(lir, JSOp::Add);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* ins) {
+ masm.clz64(ToRegister64(ins->getInt64Operand(0)), ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* ins) {
+ masm.ctz64(ToRegister64(ins->getInt64Operand(0)), ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ // Ad-hoc strength reduction, cf the x64 code as well as the 32-bit code
+ // higher up in this file. Bug 1712298 will lift this code to the MIR
+ // constant folding pass, or to lowering.
+ //
+ // This is for wasm integers only, so no input guards or overflow checking
+ // are needed.
+ switch (constant) {
+ case -1:
+ masm.Neg(ARMRegister(output.reg, 64),
+ ARMRegister(ToRegister64(lhs).reg, 64));
+ break;
+ case 0:
+ masm.Mov(ARMRegister(output.reg, 64), xzr);
+ break;
+ case 1:
+ if (ToRegister64(lhs) != output) {
+ masm.move64(ToRegister64(lhs), output);
+ }
+ break;
+ case 2:
+ masm.Add(ARMRegister(output.reg, 64),
+ ARMRegister(ToRegister64(lhs).reg, 64),
+ ARMRegister(ToRegister64(lhs).reg, 64));
+ break;
+ default:
+ // Use shift if constant is nonnegative power of 2.
+ if (constant > 0) {
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.Lsl(ARMRegister(output.reg, 64),
+ ARMRegister(ToRegister64(lhs).reg, 64), shift);
+ break;
+ }
+ }
+ masm.mul64(Imm64(constant), ToRegister64(lhs), output);
+ break;
+ }
+ } else {
+ masm.mul64(ToRegister64(lhs), ToRegister64(rhs), output);
+ }
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ const Register64 input = ToRegister64(lir->getInt64Operand(0));
+ const Register64 output = ToOutRegister64(lir);
+ masm.Cmp(ARMRegister(input.reg, 64), ZeroRegister64);
+ masm.Cset(ARMRegister(output.reg, 64), Assembler::Zero);
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ emitSimpleBinaryI64(lir, JSOp::Sub);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register temp = ToRegister(ins->temp0());
+ masm.popcnt32(input, output, temp);
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ emitSimpleBinaryI64(lir, lir->bitop());
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ ARMRegister lhs(ToRegister64(lir->getInt64Operand(LShiftI64::Lhs)).reg, 64);
+ LAllocation* rhsAlloc = lir->getOperand(LShiftI64::Rhs);
+ ARMRegister dest(ToOutRegister64(lir).reg, 64);
+
+ if (rhsAlloc->isConstant()) {
+ int32_t shift = int32_t(rhsAlloc->toConstant()->toInt64() & 0x3F);
+ if (shift == 0) {
+ if (lhs.code() != dest.code()) {
+ masm.Mov(dest, lhs);
+ }
+ } else {
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.Lsl(dest, lhs, shift);
+ break;
+ case JSOp::Rsh:
+ masm.Asr(dest, lhs, shift);
+ break;
+ case JSOp::Ursh:
+ masm.Lsr(dest, lhs, shift);
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+ } else {
+ ARMRegister rhs(ToRegister(rhsAlloc), 64);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.Lsl(dest, lhs, rhs);
+ break;
+ case JSOp::Rsh:
+ masm.Asr(dest, lhs, rhs);
+ break;
+ case JSOp::Ursh:
+ masm.Lsr(dest, lhs, rhs);
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+// If we have a constant base ptr, try to add the offset to it, to generate
+// better code when the full address is known. The addition may overflow past
+// 32 bits because the front end does nothing special if the base is a large
+// constant and base+offset overflows; sidestep this by performing the addition
+// anyway, overflowing to 64-bit.
+
+static Maybe<uint64_t> IsAbsoluteAddress(const LAllocation* ptr,
+ const wasm::MemoryAccessDesc& access) {
+ if (ptr->isConstantValue()) {
+ const MConstant* c = ptr->toConstant();
+ uint64_t base_address = c->type() == MIRType::Int32
+ ? uint64_t(uint32_t(c->toInt32()))
+ : uint64_t(c->toInt64());
+ uint64_t offset = access.offset();
+ return Some(base_address + offset);
+ }
+ return Nothing();
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ if (Maybe<uint64_t> absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) {
+ masm.wasmLoadAbsolute(mir->access(), HeapReg, absAddr.value(),
+ ToAnyRegister(lir->output()), Register64::Invalid());
+ return;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmLoad(mir->access(), HeapReg, ToRegister(lir->ptr()),
+ ToAnyRegister(lir->output()));
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ MOZ_ASSERT(ins->getTemp(0)->isBogusTemp());
+ MOZ_ASSERT(ins->getTemp(1)->isBogusTemp());
+ masm.copySignDouble(ToFloatRegister(ins->getOperand(0)),
+ ToFloatRegister(ins->getOperand(1)),
+ ToFloatRegister(ins->getDef(0)));
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ MOZ_ASSERT(ins->getTemp(0)->isBogusTemp());
+ MOZ_ASSERT(ins->getTemp(1)->isBogusTemp());
+ masm.copySignFloat32(ToFloatRegister(ins->getOperand(0)),
+ ToFloatRegister(ins->getOperand(1)),
+ ToFloatRegister(ins->getDef(0)));
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToRegister(lir->getTemp(0));
+ masm.popcnt64(input, output, temp);
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ bool rotateLeft = lir->mir()->isLeftRotate();
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ const LAllocation* count = lir->count();
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (c == 0) {
+ if (input != output) {
+ masm.move64(input, output);
+ return;
+ }
+ }
+ if (rotateLeft) {
+ masm.rotateLeft64(Imm32(c), input, output, InvalidReg);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, InvalidReg);
+ }
+ } else {
+ Register c = ToRegister(count);
+ if (rotateLeft) {
+ masm.rotateLeft64(c, input, output, InvalidReg);
+ } else {
+ masm.rotateRight64(c, input, output, InvalidReg);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ if (Maybe<uint64_t> absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) {
+ masm.wasmStoreAbsolute(mir->access(), ToAnyRegister(lir->value()),
+ Register64::Invalid(), HeapReg, absAddr.value());
+ return;
+ }
+
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg,
+ ToRegister(lir->ptr()));
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg,
+ ImmWord(ToInt64(rhs)), output);
+ } else if (rhs.value().isGeneralReg()) {
+ masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg,
+ ToRegister64(rhs).reg, output);
+ } else {
+ masm.cmpPtrSet(
+ GetCondForSwappedOperands(JSOpToCondition(lir->jsop(), isSigned)),
+ ToAddress(rhs.value()), lhsReg, output);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* lir) {
+ MIRType mirType = lir->mir()->type();
+ Register condReg = ToRegister(lir->condExpr());
+
+ masm.test32(condReg, condReg);
+
+ switch (mirType) {
+ case MIRType::Int32:
+ case MIRType::RefOrNull: {
+ Register outReg = ToRegister(lir->output());
+ Register trueReg = ToRegister(lir->trueExpr());
+ Register falseReg = ToRegister(lir->falseExpr());
+
+ if (mirType == MIRType::Int32) {
+ masm.Csel(ARMRegister(outReg, 32), ARMRegister(trueReg, 32),
+ ARMRegister(falseReg, 32), Assembler::NonZero);
+ } else {
+ masm.Csel(ARMRegister(outReg, 64), ARMRegister(trueReg, 64),
+ ARMRegister(falseReg, 64), Assembler::NonZero);
+ }
+ break;
+ }
+
+ case MIRType::Float32:
+ case MIRType::Double:
+ case MIRType::Simd128: {
+ FloatRegister outReg = ToFloatRegister(lir->output());
+ FloatRegister trueReg = ToFloatRegister(lir->trueExpr());
+ FloatRegister falseReg = ToFloatRegister(lir->falseExpr());
+
+ switch (mirType) {
+ case MIRType::Float32:
+ masm.Fcsel(ARMFPRegister(outReg, 32), ARMFPRegister(trueReg, 32),
+ ARMFPRegister(falseReg, 32), Assembler::NonZero);
+ break;
+ case MIRType::Double:
+ masm.Fcsel(ARMFPRegister(outReg, 64), ARMFPRegister(trueReg, 64),
+ ARMFPRegister(falseReg, 64), Assembler::NonZero);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128: {
+ MOZ_ASSERT(outReg == trueReg);
+ Label done;
+ masm.j(Assembler::NonZero, &done);
+ masm.moveSimd128(falseReg, outReg);
+ masm.bind(&done);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ }
+
+ default: {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ }
+}
+
+// We expect to handle the cases: compare is {{U,}Int32, {U,}Int64}, Float32,
+// Double}, and select is {{U,}Int32, {U,}Int64}, Float32, Double},
+// independently.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ MCompare::CompareType compTy = ins->compareType();
+
+ // Set flag.
+ if (compTy == MCompare::Compare_Int32 || compTy == MCompare::Compare_UInt32) {
+ Register lhs = ToRegister(ins->leftExpr());
+ if (ins->rightExpr()->isConstant()) {
+ masm.cmp32(lhs, Imm32(ins->rightExpr()->toConstant()->toInt32()));
+ } else {
+ masm.cmp32(lhs, ToRegister(ins->rightExpr()));
+ }
+ } else if (compTy == MCompare::Compare_Int64 ||
+ compTy == MCompare::Compare_UInt64) {
+ Register lhs = ToRegister(ins->leftExpr());
+ if (ins->rightExpr()->isConstant()) {
+ masm.cmpPtr(lhs, Imm64(ins->rightExpr()->toConstant()->toInt64()));
+ } else {
+ masm.cmpPtr(lhs, ToRegister(ins->rightExpr()));
+ }
+ } else if (compTy == MCompare::Compare_Float32) {
+ masm.compareFloat(JSOpToDoubleCondition(ins->jsop()),
+ ToFloatRegister(ins->leftExpr()),
+ ToFloatRegister(ins->rightExpr()));
+ } else if (compTy == MCompare::Compare_Double) {
+ masm.compareDouble(JSOpToDoubleCondition(ins->jsop()),
+ ToFloatRegister(ins->leftExpr()),
+ ToFloatRegister(ins->rightExpr()));
+ } else {
+ // Ref types not supported yet; v128 is not yet observed to be worth
+ // optimizing.
+ MOZ_CRASH("CodeGenerator::visitWasmCompareAndSelect: unexpected type (1)");
+ }
+
+ // Act on flag.
+ Assembler::Condition cond;
+ if (compTy == MCompare::Compare_Float32 ||
+ compTy == MCompare::Compare_Double) {
+ cond = Assembler::ConditionFromDoubleCondition(
+ JSOpToDoubleCondition(ins->jsop()));
+ } else {
+ cond = JSOpToCondition(compTy, ins->jsop());
+ }
+ MIRType insTy = ins->mir()->type();
+ if (insTy == MIRType::Int32 || insTy == MIRType::Int64) {
+ Register destReg = ToRegister(ins->output());
+ Register trueReg = ToRegister(ins->ifTrueExpr());
+ Register falseReg = ToRegister(ins->ifFalseExpr());
+ size_t size = insTy == MIRType::Int32 ? 32 : 64;
+ masm.Csel(ARMRegister(destReg, size), ARMRegister(trueReg, size),
+ ARMRegister(falseReg, size), cond);
+ } else if (insTy == MIRType::Float32 || insTy == MIRType::Double) {
+ FloatRegister destReg = ToFloatRegister(ins->output());
+ FloatRegister trueReg = ToFloatRegister(ins->ifTrueExpr());
+ FloatRegister falseReg = ToFloatRegister(ins->ifFalseExpr());
+ size_t size = MIRTypeToSize(insTy) * 8;
+ masm.Fcsel(ARMFPRegister(destReg, size), ARMFPRegister(trueReg, size),
+ ARMFPRegister(falseReg, size), cond);
+ } else {
+ // See above.
+ MOZ_CRASH("CodeGenerator::visitWasmCompareAndSelect: unexpected type (2)");
+ }
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ if (Maybe<uint64_t> absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) {
+ masm.wasmLoadAbsolute(mir->access(), HeapReg, absAddr.value(),
+ AnyRegister(), ToOutRegister64(lir));
+ return;
+ }
+
+ masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
+ ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ if (Maybe<uint64_t> absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) {
+ masm.wasmStoreAbsolute(mir->access(), AnyRegister(),
+ ToRegister64(lir->value()), HeapReg,
+ absAddr.value());
+ return;
+ }
+
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ToRegister(lir->ptr()));
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ masm.Adds(ARMRegister(out, 32), ARMRegister(base, 32),
+ Operand(mir->offset()));
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.j(Assembler::CarrySet, ool->entry());
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ masm.Adds(ARMRegister(out.reg, 64), ARMRegister(base.reg, 64),
+ Operand(mir->offset()));
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.j(Assembler::CarrySet, ool->entry());
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ Register condReg = ToRegister(lir->condExpr());
+ Register64 trueReg = ToRegister64(lir->trueExpr());
+ Register64 falseReg = ToRegister64(lir->falseExpr());
+ Register64 outReg = ToOutRegister64(lir);
+
+ masm.test32(condReg, condReg);
+ masm.Csel(ARMRegister(outReg.reg, 64), ARMRegister(trueReg.reg, 64),
+ ARMRegister(falseReg.reg, 64), Assembler::NonZero);
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ switch (ins->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8To64SignExtend(input.reg, output);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16To64SignExtend(input.reg, output);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ mozilla::DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.moveFloat32ToGPR(ToFloatRegister(lir->input()),
+ ToRegister(lir->output()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.moveGPRToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(masm.getStackPointer(), mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* mirTrue = lir->ifTrue();
+ MBasicBlock* mirFalse = lir->ifFalse();
+
+ // Jump to the True block if NonZero.
+ // Jump to the False block if Zero.
+ if (isNextBlock(mirFalse->lir())) {
+ masm.Cbnz(ARMRegister(input.reg, 64), getJumpLabelForBranch(mirTrue));
+ } else {
+ masm.Cbz(ARMRegister(input.reg, 64), getJumpLabelForBranch(mirFalse));
+ if (!isNextBlock(mirTrue->lir())) {
+ jumpToBlock(mirTrue);
+ }
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ // Really this is a 64-bit input register and we could use move64To32.
+ masm.Mov(ARMRegister(output, 32), ARMRegister(ToRegister(input), 32));
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register input = ToRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ if (lir->mir()->isUnsigned()) {
+ masm.move32To64ZeroExtend(input, output);
+ } else {
+ masm.move32To64SignExtend(input, output);
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ // Generates no code on this platform because the input is assumed to have
+ // canonical form.
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+ masm.debugAssertCanonicalInt32(output);
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ // Generates no code on this platform because the input is assumed to have
+ // canonical form.
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+ masm.debugAssertCanonicalInt32(output);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ const LInt64Allocation left =
+ comp->getInt64Operand(LCompareI64AndBranch::Lhs);
+ const LInt64Allocation right =
+ comp->getInt64Operand(LCompareI64AndBranch::Rhs);
+
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+ if (IsConstant(right)) {
+ masm.Cmp(ARMRegister(ToRegister64(left).reg, 64), ToInt64(right));
+ } else {
+ masm.Cmp(ARMRegister(ToRegister64(left).reg, 64),
+ ARMRegister(ToRegister64(right).reg, 64));
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(comp->jsop(), isSigned);
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGeneratorARM64::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ FloatRegister input = ool->input();
+ Register output = ool->output();
+ Register64 output64 = ool->output64();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+ Label* oolRejoin = ool->rejoin();
+ TruncFlags flags = ool->flags();
+ wasm::BytecodeOffset off = ool->bytecodeOffset();
+
+ if (fromType == MIRType::Float32) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF32ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else if (fromType == MIRType::Double) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF64ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.moveDoubleToGPR64(ToFloatRegister(lir->input()), ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.moveGPR64ToDouble(
+ ToRegister64(lir->getInt64Operand(LWasmReinterpretFromI64::Input)),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->temp1());
+ Register outTemp =
+ lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp, outTemp,
+ output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->flagTemp());
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, flagTemp);
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label isNotDivByZero;
+ masm.Cbnz(ARMRegister(rhs, 64), &isNotDivByZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&isNotDivByZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label noOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &noOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &noOverflow);
+ if (lir->mir()->isMod()) {
+ masm.movePtr(ImmWord(0), output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&noOverflow);
+ }
+
+ masm.Sdiv(ARMRegister(output, 64), ARMRegister(lhs, 64),
+ ARMRegister(rhs, 64));
+ if (lir->mir()->isMod()) {
+ masm.Msub(ARMRegister(output, 64), ARMRegister(output, 64),
+ ARMRegister(rhs, 64), ARMRegister(lhs, 64));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label isNotDivByZero;
+ masm.Cbnz(ARMRegister(rhs, 64), &isNotDivByZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&isNotDivByZero);
+ }
+
+ masm.Udiv(ARMRegister(output, 64), ARMRegister(lhs, 64),
+ ARMRegister(rhs, 64));
+ if (lir->mir()->isMod()) {
+ masm.Msub(ARMRegister(output, 64), ARMRegister(output, 64),
+ ARMRegister(rhs, 64), ARMRegister(lhs, 64));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitSimd128(LSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantSimd128(ins->simd128(), ToFloatRegister(out));
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128Bitselect: {
+ FloatRegister lhs = ToFloatRegister(ins->v0());
+ FloatRegister rhs = ToFloatRegister(ins->v1());
+ FloatRegister controlDest = ToFloatRegister(ins->v2());
+ masm.bitwiseSelectSimd128(lhs, rhs, controlDest);
+ break;
+ }
+ case wasm::SimdOp::F32x4RelaxedFma:
+ masm.fmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F32x4RelaxedFnma:
+ masm.fnmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F64x2RelaxedFma:
+ masm.fmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F64x2RelaxedFnma:
+ masm.fnmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::I8x16RelaxedLaneSelect:
+ case wasm::SimdOp::I16x8RelaxedLaneSelect:
+ case wasm::SimdOp::I32x4RelaxedLaneSelect:
+ case wasm::SimdOp::I64x2RelaxedLaneSelect: {
+ FloatRegister lhs = ToFloatRegister(ins->v0());
+ FloatRegister rhs = ToFloatRegister(ins->v1());
+ FloatRegister maskDest = ToFloatRegister(ins->v2());
+ masm.laneSelectSimd128(maskDest, lhs, rhs, maskDest);
+ break;
+ }
+ case wasm::SimdOp::I32x4DotI8x16I7x16AddS:
+ masm.dotInt8x16Int7x16ThenAdd(
+ ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()), ToFloatRegister(ins->temp()));
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128And:
+ masm.bitwiseAndSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Or:
+ masm.bitwiseOrSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Xor:
+ masm.bitwiseXorSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128AndNot:
+ masm.bitwiseAndNotSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AvgrU:
+ masm.unsignedAverageInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AvgrU:
+ masm.unsignedAverageInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Add:
+ masm.addInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatS:
+ masm.addSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatU:
+ masm.unsignedAddSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Sub:
+ masm.subInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatS:
+ masm.subSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatU:
+ masm.unsignedSubSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinS:
+ masm.minInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinU:
+ masm.unsignedMinInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxS:
+ masm.maxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxU:
+ masm.unsignedMaxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Add:
+ masm.addInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatS:
+ masm.addSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatU:
+ masm.unsignedAddSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Sub:
+ masm.subInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatS:
+ masm.subSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatU:
+ masm.unsignedSubSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Mul:
+ masm.mulInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinS:
+ masm.minInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinU:
+ masm.unsignedMinInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxS:
+ masm.maxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxU:
+ masm.unsignedMaxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Add:
+ masm.addInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Sub:
+ masm.subInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Mul:
+ masm.mulInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinS:
+ masm.minInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinU:
+ masm.unsignedMinInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxS:
+ masm.maxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxU:
+ masm.unsignedMaxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Add:
+ masm.addInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Sub:
+ masm.subInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Mul: {
+ auto temp1 = ToFloatRegister(ins->getTemp(0));
+ auto temp2 = ToFloatRegister(ins->getTemp(1));
+ masm.mulInt64x2(lhs, rhs, dest, temp1, temp2);
+ break;
+ }
+ case wasm::SimdOp::F32x4Add:
+ masm.addFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Sub:
+ masm.subFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Mul:
+ masm.mulFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Div:
+ masm.divFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Min:
+ masm.minFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Max:
+ masm.maxFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Add:
+ masm.addFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Sub:
+ masm.subFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Mul:
+ masm.mulFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Div:
+ masm.divFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Min:
+ masm.minFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Max:
+ masm.maxFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Swizzle:
+ masm.swizzleInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16RelaxedSwizzle:
+ masm.swizzleInt8x16Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8S:
+ masm.narrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8U:
+ masm.unsignedNarrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4S:
+ masm.narrowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4U:
+ masm.unsignedNarrowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Eq:
+ masm.compareInt8x16(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Ne:
+ masm.compareInt8x16(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LtS:
+ masm.compareInt8x16(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GtS:
+ masm.compareInt8x16(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LeS:
+ masm.compareInt8x16(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GeS:
+ masm.compareInt8x16(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LtU:
+ masm.compareInt8x16(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GtU:
+ masm.compareInt8x16(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LeU:
+ masm.compareInt8x16(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GeU:
+ masm.compareInt8x16(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Eq:
+ masm.compareInt16x8(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Ne:
+ masm.compareInt16x8(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LtS:
+ masm.compareInt16x8(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GtS:
+ masm.compareInt16x8(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LeS:
+ masm.compareInt16x8(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GeS:
+ masm.compareInt16x8(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LtU:
+ masm.compareInt16x8(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GtU:
+ masm.compareInt16x8(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LeU:
+ masm.compareInt16x8(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GeU:
+ masm.compareInt16x8(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Eq:
+ masm.compareInt32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Ne:
+ masm.compareInt32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LtS:
+ masm.compareInt32x4(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GtS:
+ masm.compareInt32x4(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LeS:
+ masm.compareInt32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GeS:
+ masm.compareInt32x4(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LtU:
+ masm.compareInt32x4(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GtU:
+ masm.compareInt32x4(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LeU:
+ masm.compareInt32x4(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GeU:
+ masm.compareInt32x4(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Eq:
+ masm.compareInt64x2(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2LtS:
+ masm.compareInt64x2(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2GtS:
+ masm.compareInt64x2(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2LeS:
+ masm.compareInt64x2(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2GeS:
+ masm.compareInt64x2(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Ne:
+ masm.compareInt64x2(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Eq:
+ masm.compareFloat32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Ne:
+ masm.compareFloat32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Lt:
+ masm.compareFloat32x4(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Gt:
+ masm.compareFloat32x4(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Le:
+ masm.compareFloat32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Ge:
+ masm.compareFloat32x4(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Eq:
+ masm.compareFloat64x2(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Ne:
+ masm.compareFloat64x2(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Lt:
+ masm.compareFloat64x2(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Gt:
+ masm.compareFloat64x2(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Le:
+ masm.compareFloat64x2(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Ge:
+ masm.compareFloat64x2(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4PMax:
+ masm.pseudoMaxFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4PMin:
+ masm.pseudoMinFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2PMax:
+ masm.pseudoMaxFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2PMin:
+ masm.pseudoMinFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4DotI16x8S:
+ masm.widenDotInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulLowI8x16S:
+ masm.extMulLowInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulHighI8x16S:
+ masm.extMulHighInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulLowI8x16U:
+ masm.unsignedExtMulLowInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulHighI8x16U:
+ masm.unsignedExtMulHighInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulLowI16x8S:
+ masm.extMulLowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulHighI16x8S:
+ masm.extMulHighInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulLowI16x8U:
+ masm.unsignedExtMulLowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulHighI16x8U:
+ masm.unsignedExtMulHighInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulLowI32x4S:
+ masm.extMulLowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulHighI32x4S:
+ masm.extMulHighInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulLowI32x4U:
+ masm.unsignedExtMulLowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulHighI32x4U:
+ masm.unsignedExtMulHighInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Q15MulrSatS:
+ masm.q15MulrSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4RelaxedMin:
+ masm.minFloat32x4Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4RelaxedMax:
+ masm.maxFloat32x4Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2RelaxedMin:
+ masm.minFloat64x2Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2RelaxedMax:
+ masm.maxFloat64x2Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8RelaxedQ15MulrS:
+ masm.q15MulrInt16x8Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8DotI8x16I7x16S:
+ masm.dotInt8x16Int7x16(lhs, rhs, dest);
+ break;
+ default:
+ MOZ_CRASH("Binary SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ masm.leftShiftInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrS:
+ masm.rightShiftInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrU:
+ masm.unsignedRightShiftInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ masm.leftShiftInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrS:
+ masm.rightShiftInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrU:
+ masm.unsignedRightShiftInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ masm.leftShiftInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrS:
+ masm.rightShiftInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrU:
+ masm.unsignedRightShiftInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ masm.leftShiftInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrS:
+ masm.rightShiftInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrU:
+ masm.unsignedRightShiftInt64x2(lhs, rhs, dest);
+ break;
+ default:
+ MOZ_CRASH("Shift SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ int32_t shift = ins->shift();
+
+ if (shift == 0) {
+ if (src != dest) {
+ masm.moveSimd128(src, dest);
+ }
+ return;
+ }
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ masm.leftShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrS:
+ masm.rightShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrU:
+ masm.unsignedRightShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ masm.leftShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrS:
+ masm.rightShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrU:
+ masm.unsignedRightShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ masm.leftShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrS:
+ masm.rightShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrU:
+ masm.unsignedRightShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ masm.leftShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrS:
+ masm.rightShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrU:
+ masm.unsignedRightShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ default:
+ MOZ_CRASH("Shift SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhs());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ SimdConstant control = ins->control();
+ switch (ins->op()) {
+ case SimdShuffleOp::BLEND_8x16: {
+ masm.blendInt8x16(reinterpret_cast<const uint8_t*>(control.asInt8x16()),
+ lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::BLEND_16x8: {
+ masm.blendInt16x8(reinterpret_cast<const uint16_t*>(control.asInt16x8()),
+ lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::CONCAT_RIGHT_SHIFT_8x16: {
+ int8_t count = 16 - control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.concatAndRightShiftSimd128(lhs, rhs, dest, count);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_8x16: {
+ masm.interleaveHighInt8x16(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_16x8: {
+ masm.interleaveHighInt16x8(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_32x4: {
+ masm.interleaveHighInt32x4(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_64x2: {
+ masm.interleaveHighInt64x2(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_8x16: {
+ masm.interleaveLowInt8x16(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_16x8: {
+ masm.interleaveLowInt16x8(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_32x4: {
+ masm.interleaveLowInt32x4(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_64x2: {
+ masm.interleaveLowInt64x2(lhs, rhs, dest);
+ break;
+ }
+ case SimdShuffleOp::SHUFFLE_BLEND_8x16: {
+ masm.shuffleInt8x16(reinterpret_cast<const uint8_t*>(control.asInt8x16()),
+ lhs, rhs, dest);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unsupported SIMD shuffle operation");
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ SimdConstant control = ins->control();
+ switch (ins->op()) {
+ case SimdPermuteOp::BROADCAST_8x16: {
+ const SimdConstant::I8x16& mask = control.asInt8x16();
+ int8_t source = mask[0];
+ masm.splatX16(source, src, dest);
+ break;
+ }
+ case SimdPermuteOp::BROADCAST_16x8: {
+ const SimdConstant::I16x8& mask = control.asInt16x8();
+ int16_t source = mask[0];
+ masm.splatX8(source, src, dest);
+ break;
+ }
+ case SimdPermuteOp::MOVE: {
+ masm.moveSimd128(src, dest);
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_8x16: {
+ const SimdConstant::I8x16& mask = control.asInt8x16();
+# ifdef DEBUG
+ mozilla::DebugOnly<int> i;
+ for (i = 0; i < 16 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 16, "Should have been a MOVE operation");
+# endif
+ masm.permuteInt8x16(reinterpret_cast<const uint8_t*>(mask), src, dest);
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_16x8: {
+ const SimdConstant::I16x8& mask = control.asInt16x8();
+# ifdef DEBUG
+ mozilla::DebugOnly<int> i;
+ for (i = 0; i < 8 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 8, "Should have been a MOVE operation");
+# endif
+ masm.permuteInt16x8(reinterpret_cast<const uint16_t*>(mask), src, dest);
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_32x4: {
+ const SimdConstant::I32x4& mask = control.asInt32x4();
+# ifdef DEBUG
+ mozilla::DebugOnly<int> i;
+ for (i = 0; i < 4 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 4, "Should have been a MOVE operation");
+# endif
+ masm.permuteInt32x4(reinterpret_cast<const uint32_t*>(mask), src, dest);
+ break;
+ }
+ case SimdPermuteOp::ROTATE_RIGHT_8x16: {
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.rotateRightSimd128(src, dest, count);
+ break;
+ }
+ case SimdPermuteOp::SHIFT_LEFT_8x16: {
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.leftShiftSimd128(Imm32(count), src, dest);
+ break;
+ }
+ case SimdPermuteOp::SHIFT_RIGHT_8x16: {
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.rightShiftSimd128(Imm32(count), src, dest);
+ break;
+ }
+ case SimdPermuteOp::REVERSE_16x8:
+ masm.reverseInt16x8(src, dest);
+ break;
+ case SimdPermuteOp::REVERSE_32x4:
+ masm.reverseInt32x4(src, dest);
+ break;
+ case SimdPermuteOp::REVERSE_64x2:
+ masm.reverseInt64x2(src, dest);
+ break;
+ default: {
+ MOZ_CRASH("Unsupported SIMD permutation operation");
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ToFloatRegister(ins->lhs()) == ToFloatRegister(ins->output()));
+ FloatRegister lhsDest = ToFloatRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ uint32_t laneIndex = ins->laneIndex();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16ReplaceLane:
+ masm.replaceLaneInt8x16(laneIndex, ToRegister(rhs), lhsDest);
+ break;
+ case wasm::SimdOp::I16x8ReplaceLane:
+ masm.replaceLaneInt16x8(laneIndex, ToRegister(rhs), lhsDest);
+ break;
+ case wasm::SimdOp::I32x4ReplaceLane:
+ masm.replaceLaneInt32x4(laneIndex, ToRegister(rhs), lhsDest);
+ break;
+ case wasm::SimdOp::F32x4ReplaceLane:
+ masm.replaceLaneFloat32x4(laneIndex, ToFloatRegister(rhs), lhsDest);
+ break;
+ case wasm::SimdOp::F64x2ReplaceLane:
+ masm.replaceLaneFloat64x2(laneIndex, ToFloatRegister(rhs), lhsDest);
+ break;
+ default:
+ MOZ_CRASH("ReplaceLane SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_RELEASE_ASSERT(ins->simdOp() == wasm::SimdOp::I64x2ReplaceLane);
+ MOZ_ASSERT(ToFloatRegister(ins->lhs()) == ToFloatRegister(ins->output()));
+ masm.replaceLaneInt64x2(ins->laneIndex(), ToRegister64(ins->rhs()),
+ ToFloatRegister(ins->lhs()));
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Splat:
+ masm.splatX16(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::I16x8Splat:
+ masm.splatX8(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::I32x4Splat:
+ masm.splatX4(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::F32x4Splat:
+ masm.splatX4(ToFloatRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::F64x2Splat:
+ masm.splatX2(ToFloatRegister(ins->src()), dest);
+ break;
+ default:
+ MOZ_CRASH("ScalarToSimd128 SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ Register64 src = ToRegister64(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I64x2Splat:
+ masm.splatX2(src, dest);
+ break;
+ case wasm::SimdOp::V128Load8x8S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt8x16(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt8x16(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt16x8(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt16x8(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt32x4(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt32x4(dest, dest);
+ break;
+ default:
+ MOZ_CRASH("Int64ToSimd128 SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Neg:
+ masm.negInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8Neg:
+ masm.negInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendLowI8x16S:
+ masm.widenLowInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendHighI8x16S:
+ masm.widenHighInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendLowI8x16U:
+ masm.unsignedWidenLowInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendHighI8x16U:
+ masm.unsignedWidenHighInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I32x4Neg:
+ masm.negInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendLowI16x8S:
+ masm.widenLowInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendHighI16x8S:
+ masm.widenHighInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendLowI16x8U:
+ masm.unsignedWidenLowInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendHighI16x8U:
+ masm.unsignedWidenHighInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF32x4S:
+ masm.truncSatFloat32x4ToInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF32x4U:
+ masm.unsignedTruncSatFloat32x4ToInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2Neg:
+ masm.negInt64x2(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendLowI32x4S:
+ masm.widenLowInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendHighI32x4S:
+ masm.widenHighInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendLowI32x4U:
+ masm.unsignedWidenLowInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendHighI32x4U:
+ masm.unsignedWidenHighInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Abs:
+ masm.absFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Neg:
+ masm.negFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Sqrt:
+ masm.sqrtFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4ConvertI32x4S:
+ masm.convertInt32x4ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4ConvertI32x4U:
+ masm.unsignedConvertInt32x4ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Abs:
+ masm.absFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Neg:
+ masm.negFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Sqrt:
+ masm.sqrtFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::V128Not:
+ masm.bitwiseNotSimd128(src, dest);
+ break;
+ case wasm::SimdOp::I8x16Abs:
+ masm.absInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8Abs:
+ masm.absInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4Abs:
+ masm.absInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2Abs:
+ masm.absInt64x2(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Ceil:
+ masm.ceilFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Floor:
+ masm.floorFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Trunc:
+ masm.truncFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Nearest:
+ masm.nearestFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Ceil:
+ masm.ceilFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Floor:
+ masm.floorFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Trunc:
+ masm.truncFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Nearest:
+ masm.nearestFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F32x4DemoteF64x2Zero:
+ masm.convertFloat64x2ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2PromoteLowF32x4:
+ masm.convertFloat32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2ConvertLowI32x4S:
+ masm.convertInt32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2ConvertLowI32x4U:
+ masm.unsignedConvertInt32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF64x2SZero:
+ masm.truncSatFloat64x2ToInt32x4(src, dest, ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I32x4TruncSatF64x2UZero:
+ masm.unsignedTruncSatFloat64x2ToInt32x4(src, dest,
+ ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16S:
+ masm.extAddPairwiseInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16U:
+ masm.unsignedExtAddPairwiseInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8S:
+ masm.extAddPairwiseInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8U:
+ masm.unsignedExtAddPairwiseInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I8x16Popcnt:
+ masm.popcntInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4S:
+ masm.truncFloat32x4ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4U:
+ masm.unsignedTruncFloat32x4ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2SZero:
+ masm.truncFloat64x2ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2UZero:
+ masm.unsignedTruncFloat64x2ToInt32x4Relaxed(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Unary SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ const LDefinition* dest = ins->output();
+ uint32_t imm = ins->imm();
+ FloatRegister temp = ToTempFloatRegisterOrInvalid(ins->getTemp(0));
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128AnyTrue:
+ masm.anyTrueSimd128(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16AllTrue:
+ masm.allTrueInt8x16(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8AllTrue:
+ masm.allTrueInt16x8(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I32x4AllTrue:
+ masm.allTrueInt32x4(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I64x2AllTrue:
+ masm.allTrueInt64x2(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16Bitmask:
+ masm.bitmaskInt8x16(src, ToRegister(dest), temp);
+ break;
+ case wasm::SimdOp::I16x8Bitmask:
+ masm.bitmaskInt16x8(src, ToRegister(dest), temp);
+ break;
+ case wasm::SimdOp::I32x4Bitmask:
+ masm.bitmaskInt32x4(src, ToRegister(dest), temp);
+ break;
+ case wasm::SimdOp::I64x2Bitmask:
+ masm.bitmaskInt64x2(src, ToRegister(dest), temp);
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneS:
+ masm.extractLaneInt8x16(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneU:
+ masm.unsignedExtractLaneInt8x16(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneS:
+ masm.extractLaneInt16x8(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneU:
+ masm.unsignedExtractLaneInt16x8(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I32x4ExtractLane:
+ masm.extractLaneInt32x4(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::F32x4ExtractLane:
+ masm.extractLaneFloat32x4(imm, src, ToFloatRegister(dest));
+ break;
+ case wasm::SimdOp::F64x2ExtractLane:
+ masm.extractLaneFloat64x2(imm, src, ToFloatRegister(dest));
+ break;
+ default:
+ MOZ_CRASH("Reduce SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+
+ ScratchSimd128Scope scratch(masm);
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const Register test = temps.AcquireX().asUnsized();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128AnyTrue:
+ masm.Addp(Simd1D(scratch), Simd2D(src));
+ masm.Umov(ARMRegister(test, 64), Simd1D(scratch), 0);
+ masm.branch64(Assembler::Equal, Register64(test), Imm64(0),
+ getJumpLabelForBranch(ins->ifFalse()));
+ jumpToBlock(ins->ifTrue());
+ break;
+ case wasm::SimdOp::I8x16AllTrue:
+ case wasm::SimdOp::I16x8AllTrue:
+ case wasm::SimdOp::I32x4AllTrue:
+ case wasm::SimdOp::I64x2AllTrue: {
+ // Compare all lanes to zero.
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16AllTrue:
+ masm.Cmeq(Simd16B(scratch), Simd16B(src), 0);
+ break;
+ case wasm::SimdOp::I16x8AllTrue:
+ masm.Cmeq(Simd8H(scratch), Simd8H(src), 0);
+ break;
+ case wasm::SimdOp::I32x4AllTrue:
+ masm.Cmeq(Simd4S(scratch), Simd4S(src), 0);
+ break;
+ case wasm::SimdOp::I64x2AllTrue:
+ masm.Cmeq(Simd2D(scratch), Simd2D(src), 0);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ masm.Addp(Simd1D(scratch), Simd2D(scratch));
+ masm.Umov(ARMRegister(test, 64), Simd1D(scratch), 0);
+ masm.branch64(Assembler::NotEqual, Register64(test), Imm64(0),
+ getJumpLabelForBranch(ins->ifFalse()));
+ jumpToBlock(ins->ifTrue());
+ break;
+ }
+ default:
+ MOZ_CRASH("Reduce-and-branch SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ Register64 dest = ToOutRegister64(ins);
+ uint32_t imm = ins->imm();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I64x2ExtractLane:
+ masm.extractLaneInt64x2(imm, src, dest);
+ break;
+ default:
+ MOZ_CRASH("Reduce SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+static inline wasm::MemoryAccessDesc DeriveMemoryAccessDesc(
+ const wasm::MemoryAccessDesc& access, Scalar::Type type) {
+ return wasm::MemoryAccessDesc(type, access.align(), access.offset(),
+ access.trapOffset());
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // Forward loading to wasmLoad, and use replaceLane after that.
+ const MWasmLoadLaneSimd128* mir = ins->mir();
+ Register temp = ToRegister(ins->temp());
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ // replaceLane takes an lhsDest argument.
+ masm.moveSimd128(src, dest);
+ switch (ins->laneSize()) {
+ case 1: {
+ masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int8),
+ HeapReg, ToRegister(ins->ptr()), AnyRegister(temp));
+ masm.replaceLaneInt8x16(ins->laneIndex(), temp, dest);
+ break;
+ }
+ case 2: {
+ masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int16),
+ HeapReg, ToRegister(ins->ptr()), AnyRegister(temp));
+ masm.replaceLaneInt16x8(ins->laneIndex(), temp, dest);
+ break;
+ }
+ case 4: {
+ masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int32),
+ HeapReg, ToRegister(ins->ptr()), AnyRegister(temp));
+ masm.replaceLaneInt32x4(ins->laneIndex(), temp, dest);
+ break;
+ }
+ case 8: {
+ masm.wasmLoadI64(DeriveMemoryAccessDesc(mir->access(), Scalar::Int64),
+ HeapReg, ToRegister(ins->ptr()), Register64(temp));
+ masm.replaceLaneInt64x2(ins->laneIndex(), Register64(temp), dest);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported load lane size");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // Forward storing to wasmStore for the result of extractLane.
+ const MWasmStoreLaneSimd128* mir = ins->mir();
+ Register temp = ToRegister(ins->temp());
+ FloatRegister src = ToFloatRegister(ins->src());
+ switch (ins->laneSize()) {
+ case 1: {
+ masm.extractLaneInt8x16(ins->laneIndex(), src, temp);
+ masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int8),
+ AnyRegister(temp), HeapReg, ToRegister(ins->ptr()));
+ break;
+ }
+ case 2: {
+ masm.extractLaneInt16x8(ins->laneIndex(), src, temp);
+ masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int16),
+ AnyRegister(temp), HeapReg, ToRegister(ins->ptr()));
+ break;
+ }
+ case 4: {
+ masm.extractLaneInt32x4(ins->laneIndex(), src, temp);
+ masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int32),
+ AnyRegister(temp), HeapReg, ToRegister(ins->ptr()));
+ break;
+ }
+ case 8: {
+ masm.extractLaneInt64x2(ins->laneIndex(), src, Register64(temp));
+ masm.wasmStoreI64(DeriveMemoryAccessDesc(mir->access(), Scalar::Int64),
+ Register64(temp), HeapReg, ToRegister(ins->ptr()));
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported store lane size");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.h b/js/src/jit/arm64/CodeGenerator-arm64.h
new file mode 100644
index 0000000000..43cd24fddf
--- /dev/null
+++ b/js/src/jit/arm64/CodeGenerator-arm64.h
@@ -0,0 +1,135 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_CodeGenerator_arm64_h
+#define jit_arm64_CodeGenerator_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorARM64;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorARM64>;
+
+class CodeGeneratorARM64 : public CodeGeneratorShared {
+ friend class MoveResolverARM64;
+
+ protected:
+ CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ MoveOperand toMoveOperand(const LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ return bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ return bailoutIf(Assembler::Zero, snapshot);
+ }
+
+ bool generateOutOfLineCode();
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitSimpleBinaryI64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* lir, JSOp op);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void generateInvalidateEpilogue();
+
+ public:
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+};
+
+typedef CodeGeneratorARM64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM64> {
+ protected: // Silence Clang warning.
+ LSnapshot* snapshot_;
+
+ public:
+ explicit OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorARM64* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_CodeGenerator_arm64_h */
diff --git a/js/src/jit/arm64/LIR-arm64.h b/js/src/jit/arm64/LIR-arm64.h
new file mode 100644
index 0000000000..d825209b1e
--- /dev/null
+++ b/js/src/jit/arm64/LIR-arm64.h
@@ -0,0 +1,373 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_LIR_arm64_h
+#define jit_arm64_LIR_arm64_h
+
+namespace js {
+namespace jit {
+
+class LUnboxBase : public LInstructionHelper<1, 1, 0> {
+ public:
+ LUnboxBase(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+};
+
+class LUnbox : public LUnboxBase {
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LUnboxBase(classOpcode, input) {}
+
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnboxBase {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnboxBase(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+ const bool negativeDivisor_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, bool negativeDivisor)
+ : LInstructionHelper(classOpcode),
+ shift_(shift),
+ negativeDivisor_(negativeDivisor) {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+
+ int32_t shift() { return shift_; }
+ bool negativeDivisor() { return negativeDivisor_; }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivConstantI : public LInstructionHelper<1, 1, 1> {
+ const int32_t denominator_;
+
+ public:
+ LIR_HEADER(DivConstantI)
+
+ LDivConstantI(const LAllocation& lhs, int32_t denominator,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), denominator_(denominator) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ int32_t denominator() const { return denominator_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+ bool canBeNegativeDividend() const { return mir()->canBeNegativeDividend(); }
+};
+
+class LUDivConstantI : public LInstructionHelper<1, 1, 1> {
+ const int32_t denominator_;
+
+ public:
+ LIR_HEADER(UDivConstantI)
+
+ LUDivConstantI(const LAllocation& lhs, int32_t denominator,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), denominator_(denominator) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ int32_t denominator() const { return denominator_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift() { return shift_; }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp1,
+ const LDefinition& temp2, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ int32_t shift() const { return shift_; }
+
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDiv : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDiv);
+
+ LUDiv(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& remainder)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, remainder);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+
+ MDiv* mir() { return mir_->toDiv(); }
+};
+
+class LUMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UMod);
+
+ LUMod(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MMod* mir() { return mir_->toMod(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LDivOrModI64 : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_LIR_arm64_h */
diff --git a/js/src/jit/arm64/Lowering-arm64.cpp b/js/src/jit/arm64/Lowering-arm64.cpp
new file mode 100644
index 0000000000..d71f22089d
--- /dev/null
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -0,0 +1,1438 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/Lowering-arm64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LBoxAllocation LIRGeneratorARM64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorARM64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorARM64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorARM64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorARM64::tempByteOpRegister() { return temp(); }
+
+LDefinition LIRGeneratorARM64::tempToUnbox() { return temp(); }
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnboxBase* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ // FIXME: It should be possible to useAtStart() here, but the DEBUG
+ // code in CodeGenerator::visitUnbox() needs to handle non-Register
+ // cases. ARM64 doesn't have an Operand type.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+// x = !y
+void LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(
+ 0, ins->snapshot() ? useRegister(input) : useRegisterAtStart(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0,
+ ins->snapshot() ? useRegister(lhs) : useRegisterAtStart(lhs));
+ ins->setOperand(1, ins->snapshot() ? useRegisterOrConstant(rhs)
+ : useRegisterOrConstantAtStart(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegisterAtStart(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, useRegisterAtStart(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorARM64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64(ins, mir);
+}
+
+// These all currently have codegen that depends on reuse but only because the
+// masm API depends on that. We need new three-address masm APIs, for both
+// constant and variable rhs.
+//
+// MAdd => LAddI64
+// MSub => LSubI64
+// MBitAnd, MBitOr, MBitXor => LBitOpI64
+void LIRGeneratorARM64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64RegisterOrConstantAtStart(rhs));
+ defineInt64(ins, mir);
+}
+
+void LIRGeneratorARM64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(LMulI64::Lhs, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(LMulI64::Rhs, useInt64RegisterOrConstantAtStart(rhs));
+ defineInt64(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorARM64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstantAtStart(rhs));
+ defineInt64(ins, mir);
+}
+
+template void LIRGeneratorARM64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorARM64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+void LIRGeneratorARM64::lowerForCompareI64AndBranch(MTest* mir, MCompare* comp,
+ JSOp op, MDefinition* left,
+ MDefinition* right,
+ MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ auto* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64RegisterOrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorARM64::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void LIRGeneratorARM64::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorARM64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void LIRGeneratorARM64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+void LIRGeneratorARM64::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ if (div->rhs()->isConstant()) {
+ LAllocation lhs = useRegister(div->lhs());
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ int32_t shift = mozilla::FloorLog2(mozilla::Abs(rhs));
+
+ if (rhs != 0 && uint32_t(1) << shift == mozilla::Abs(rhs)) {
+ LDivPowTwoI* lir = new (alloc()) LDivPowTwoI(lhs, shift, rhs < 0);
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ if (rhs != 0) {
+ LDivConstantI* lir = new (alloc()) LDivConstantI(lhs, rhs, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorARM64::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+
+void LIRGeneratorARM64::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64(new (alloc()) LNegI64(useInt64RegisterAtStart(input)), ins);
+}
+
+void LIRGeneratorARM64::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorARM64::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ }
+ }
+
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGeneratorARM64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()));
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorARM64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()));
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorARM64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()));
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorARM64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorARM64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()));
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorARM64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegister(input));
+ define(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerWasmSelectI(MWasmSelect* select) {
+ if (select->type() == MIRType::Simd128) {
+ LAllocation t = useRegisterAtStart(select->trueExpr());
+ LAllocation f = useRegister(select->falseExpr());
+ LAllocation c = useRegister(select->condExpr());
+ auto* lir = new (alloc()) LWasmSelect(t, f, c);
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+ } else {
+ LAllocation t = useRegisterAtStart(select->trueExpr());
+ LAllocation f = useRegisterAtStart(select->falseExpr());
+ LAllocation c = useRegisterAtStart(select->condExpr());
+ define(new (alloc()) LWasmSelect(t, f, c), select);
+ }
+}
+
+void LIRGeneratorARM64::lowerWasmSelectI64(MWasmSelect* select) {
+ LInt64Allocation t = useInt64RegisterAtStart(select->trueExpr());
+ LInt64Allocation f = useInt64RegisterAtStart(select->falseExpr());
+ LAllocation c = useRegisterAtStart(select->condExpr());
+ defineInt64(new (alloc()) LWasmSelectI64(t, f, c), select);
+}
+
+// On arm64 we specialize the cases: compare is {{U,}Int32, {U,}Int64},
+// Float32, Double}, and select is {{U,}Int32, {U,}Int64}, Float32, Double},
+// independently.
+bool LIRGeneratorARM64::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return (insTy == MIRType::Int32 || insTy == MIRType::Int64 ||
+ insTy == MIRType::Float32 || insTy == MIRType::Double) &&
+ (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32 ||
+ compTy == MCompare::Compare_Int64 ||
+ compTy == MCompare::Compare_UInt64 ||
+ compTy == MCompare::Compare_Float32 ||
+ compTy == MCompare::Compare_Double);
+}
+
+void LIRGeneratorARM64::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ LAllocation rhsAlloc;
+ if (compTy == MCompare::Compare_Float32 ||
+ compTy == MCompare::Compare_Double) {
+ rhsAlloc = useRegisterAtStart(rhs);
+ } else if (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32 ||
+ compTy == MCompare::Compare_Int64 ||
+ compTy == MCompare::Compare_UInt64) {
+ rhsAlloc = useRegisterOrConstantAtStart(rhs);
+ } else {
+ MOZ_CRASH("Unexpected type");
+ }
+ auto* lir = new (alloc())
+ LWasmCompareAndSelect(useRegisterAtStart(lhs), rhsAlloc, compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()),
+ useRegisterAtStart(ins->falseExpr()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+LTableSwitch* LIRGeneratorARM64::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorARM64::newLTableSwitchV(MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorARM64::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorARM64::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorARM64::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+bool LIRGeneratorARM64::canFoldReduceSimd128AndBranch(wasm::SimdOp op) {
+ switch (op) {
+ case wasm::SimdOp::V128AnyTrue:
+ case wasm::SimdOp::I8x16AllTrue:
+ case wasm::SimdOp::I16x8AllTrue:
+ case wasm::SimdOp::I32x4AllTrue:
+ case wasm::SimdOp::I64x2AllTrue:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool LIRGeneratorARM64::canEmitWasmReduceSimd128AtUses(
+ MWasmReduceSimd128* ins) {
+ if (!ins->canEmitAtUses()) {
+ return false;
+ }
+ // Only specific ops generating int32.
+ if (ins->type() != MIRType::Int32) {
+ return false;
+ }
+ if (!canFoldReduceSimd128AndBranch(ins->simdOp())) {
+ return false;
+ }
+ // If never used then defer (it will be removed).
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd()) {
+ return true;
+ }
+ // We require an MTest consumer.
+ MNode* node = iter->consumer();
+ if (!node->isDefinition() || !node->toDefinition()->isTest()) {
+ return false;
+ }
+ // Defer only if there's only one use.
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+#endif
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ switch (ins->type()) {
+ case MIRType::Int32:
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ break;
+ case MIRType::Float32:
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ break;
+ case MIRType::Double:
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void LIRGeneratorARM64::lowerUDiv(MDiv* div) {
+ LAllocation lhs = useRegister(div->lhs());
+ if (div->rhs()->isConstant()) {
+ // NOTE: the result of toInt32 is coerced to uint32_t.
+ uint32_t rhs = div->rhs()->toConstant()->toInt32();
+ int32_t shift = mozilla::FloorLog2(rhs);
+
+ if (rhs != 0 && uint32_t(1) << shift == rhs) {
+ LDivPowTwoI* lir = new (alloc()) LDivPowTwoI(lhs, shift, false);
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+
+ LUDivConstantI* lir = new (alloc()) LUDivConstantI(lhs, rhs, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+
+ // Generate UDiv
+ LAllocation rhs = useRegister(div->rhs());
+ LDefinition remainder = LDefinition::BogusTemp();
+ if (!div->canTruncateRemainder()) {
+ remainder = temp();
+ }
+
+ LUDiv* lir = new (alloc()) LUDiv(lhs, rhs, remainder);
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorARM64::lowerUMod(MMod* mod) {
+ LUMod* lir = new (alloc())
+ LUMod(useRegister(mod->getOperand(0)), useRegister(mod->getOperand(1)));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // We have no memory-base value, meaning that HeapReg is to be used as the
+ // memory base. This follows from the definition of
+ // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ // Note, the access type may be Int64 here.
+
+ LWasmCompareExchangeHeap* lir = new (alloc())
+ LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()));
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ // Note, the access type may be Int64 here.
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ // Note, the access type may be Int64 here.
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir =
+ new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()),
+ /* flagTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ /* temp= */ LDefinition::BogusTemp(),
+ /* flagTemp= */ temp());
+ define(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+ define(new (alloc())
+ LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorARM64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+ define(new (alloc())
+ LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinopForEffect(elements, index, value, temp());
+ add(lir, ins);
+ return;
+ }
+
+ LDefinition tempDef1 = temp();
+ LDefinition tempDef2 = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ tempDef2 = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir = new (alloc())
+ LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is an FPReg then we need a temporary at the CodeGenerator
+ // level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ outTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir =
+ new (alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval,
+ newval, outTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ tempDef = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir = new (alloc())
+ LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
+
+ define(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorARM64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGeneratorARM64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use WasmBuiltinTruncateToInt64 for arm64");
+}
+
+void LIRGeneratorARM64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit it is
+ // zero-extended and can act as 64-bit.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation ptr = useRegisterOrConstantAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ defineInt64(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+
+ if (ins->access().type() == Scalar::Int64) {
+ LAllocation baseAlloc = useRegisterOrConstantAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterOrConstantAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useRegister(rhs)
+ : useRegisterAtStart(rhs));
+ // The copySignDouble and copySignFloat32 are optimized for lhs == output.
+ // It also prevents rhs == output when lhs != output, avoids clobbering.
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->v0()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->v1()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->v2()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128Bitselect: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()));
+ // On ARM64, control register is used as output at machine instruction.
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ case wasm::SimdOp::F32x4RelaxedFma:
+ case wasm::SimdOp::F32x4RelaxedFnma:
+ case wasm::SimdOp::F64x2RelaxedFma:
+ case wasm::SimdOp::F64x2RelaxedFnma: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()));
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ case wasm::SimdOp::I32x4DotI8x16I7x16AddS: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()), tempSimd128());
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ case wasm::SimdOp::I8x16RelaxedLaneSelect:
+ case wasm::SimdOp::I16x8RelaxedLaneSelect:
+ case wasm::SimdOp::I32x4RelaxedLaneSelect:
+ case wasm::SimdOp::I64x2RelaxedLaneSelect: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()));
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ default:
+ MOZ_CRASH("NYI");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ wasm::SimdOp op = ins->simdOp();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(rhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ LAllocation lhsAlloc = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc = useRegisterAtStart(rhs);
+ LDefinition tempReg0 = LDefinition::BogusTemp();
+ LDefinition tempReg1 = LDefinition::BogusTemp();
+ if (op == wasm::SimdOp::I64x2Mul) {
+ tempReg0 = tempSimd128();
+ tempReg1 = tempSimd128();
+ }
+ auto* lir = new (alloc())
+ LWasmBinarySimd128(op, lhsAlloc, rhsAlloc, tempReg0, tempReg1);
+ define(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
+
+bool MWasmBinarySimd128::canPmaddubsw() { return false; }
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ if (rhs->isConstant()) {
+ int32_t shiftCount = rhs->toConstant()->toInt32();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ case wasm::SimdOp::I8x16ShrU:
+ case wasm::SimdOp::I8x16ShrS:
+ shiftCount &= 7;
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ case wasm::SimdOp::I16x8ShrU:
+ case wasm::SimdOp::I16x8ShrS:
+ shiftCount &= 15;
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ case wasm::SimdOp::I32x4ShrU:
+ case wasm::SimdOp::I32x4ShrS:
+ shiftCount &= 31;
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ case wasm::SimdOp::I64x2ShrU:
+ case wasm::SimdOp::I64x2ShrS:
+ shiftCount &= 63;
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift operation");
+ }
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("shift -> constant shift");
+# endif
+ auto* lir = new (alloc())
+ LWasmConstantShiftSimd128(useRegisterAtStart(lhs), shiftCount);
+ define(lir, ins);
+ return;
+ }
+
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("shift -> variable shift");
+# endif
+
+ LAllocation lhsDestAlloc = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc = useRegisterAtStart(rhs);
+ auto* lir = new (alloc()) LWasmVariableShiftSimd128(lhsDestAlloc, rhsAlloc,
+ LDefinition::BogusTemp());
+ define(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ SimdShuffle s = ins->shuffle();
+ switch (s.opd) {
+ case SimdShuffle::Operand::LEFT:
+ case SimdShuffle::Operand::RIGHT: {
+ LAllocation src;
+ switch (*s.permuteOp) {
+ case SimdPermuteOp::MOVE:
+ case SimdPermuteOp::BROADCAST_8x16:
+ case SimdPermuteOp::BROADCAST_16x8:
+ case SimdPermuteOp::PERMUTE_8x16:
+ case SimdPermuteOp::PERMUTE_16x8:
+ case SimdPermuteOp::PERMUTE_32x4:
+ case SimdPermuteOp::ROTATE_RIGHT_8x16:
+ case SimdPermuteOp::SHIFT_LEFT_8x16:
+ case SimdPermuteOp::SHIFT_RIGHT_8x16:
+ case SimdPermuteOp::REVERSE_16x8:
+ case SimdPermuteOp::REVERSE_32x4:
+ case SimdPermuteOp::REVERSE_64x2:
+ break;
+ default:
+ MOZ_CRASH("Unexpected operator");
+ }
+ if (s.opd == SimdShuffle::Operand::LEFT) {
+ src = useRegisterAtStart(ins->lhs());
+ } else {
+ src = useRegisterAtStart(ins->rhs());
+ }
+ auto* lir =
+ new (alloc()) LWasmPermuteSimd128(src, *s.permuteOp, s.control);
+ define(lir, ins);
+ break;
+ }
+ case SimdShuffle::Operand::BOTH:
+ case SimdShuffle::Operand::BOTH_SWAPPED: {
+ LDefinition temp = LDefinition::BogusTemp();
+ LAllocation lhs;
+ LAllocation rhs;
+ if (s.opd == SimdShuffle::Operand::BOTH) {
+ lhs = useRegisterAtStart(ins->lhs());
+ rhs = useRegisterAtStart(ins->rhs());
+ } else {
+ lhs = useRegisterAtStart(ins->rhs());
+ rhs = useRegisterAtStart(ins->lhs());
+ }
+ auto* lir = new (alloc())
+ LWasmShuffleSimd128(lhs, rhs, temp, *s.shuffleOp, s.control);
+ define(lir, ins);
+ break;
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ // Optimal code generation reuses the lhs register because the rhs scalar is
+ // merged into a vector lhs.
+ LAllocation lhs = useRegisterAtStart(ins->lhs());
+ if (ins->rhs()->type() == MIRType::Int64) {
+ auto* lir = new (alloc())
+ LWasmReplaceInt64LaneSimd128(lhs, useInt64Register(ins->rhs()));
+ defineReuseInput(lir, ins, 0);
+ } else {
+ auto* lir =
+ new (alloc()) LWasmReplaceLaneSimd128(lhs, useRegister(ins->rhs()));
+ defineReuseInput(lir, ins, 0);
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ switch (ins->input()->type()) {
+ case MIRType::Int64: {
+ // 64-bit integer splats.
+ // Load-and-(sign|zero)extend.
+ auto* lir = new (alloc())
+ LWasmInt64ToSimd128(useInt64RegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ // Floating-point splats.
+ auto* lir =
+ new (alloc()) LWasmScalarToSimd128(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ default: {
+ // 32-bit integer splats.
+ auto* lir =
+ new (alloc()) LWasmScalarToSimd128(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->input()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ LDefinition tempReg = LDefinition::BogusTemp();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Neg:
+ case wasm::SimdOp::I16x8Neg:
+ case wasm::SimdOp::I32x4Neg:
+ case wasm::SimdOp::I64x2Neg:
+ case wasm::SimdOp::F32x4Neg:
+ case wasm::SimdOp::F64x2Neg:
+ case wasm::SimdOp::F32x4Abs:
+ case wasm::SimdOp::F64x2Abs:
+ case wasm::SimdOp::V128Not:
+ case wasm::SimdOp::F32x4Sqrt:
+ case wasm::SimdOp::F64x2Sqrt:
+ case wasm::SimdOp::I8x16Abs:
+ case wasm::SimdOp::I16x8Abs:
+ case wasm::SimdOp::I32x4Abs:
+ case wasm::SimdOp::I64x2Abs:
+ case wasm::SimdOp::I32x4TruncSatF32x4S:
+ case wasm::SimdOp::F32x4ConvertI32x4U:
+ case wasm::SimdOp::I32x4TruncSatF32x4U:
+ case wasm::SimdOp::I16x8ExtendLowI8x16S:
+ case wasm::SimdOp::I16x8ExtendHighI8x16S:
+ case wasm::SimdOp::I16x8ExtendLowI8x16U:
+ case wasm::SimdOp::I16x8ExtendHighI8x16U:
+ case wasm::SimdOp::I32x4ExtendLowI16x8S:
+ case wasm::SimdOp::I32x4ExtendHighI16x8S:
+ case wasm::SimdOp::I32x4ExtendLowI16x8U:
+ case wasm::SimdOp::I32x4ExtendHighI16x8U:
+ case wasm::SimdOp::I64x2ExtendLowI32x4S:
+ case wasm::SimdOp::I64x2ExtendHighI32x4S:
+ case wasm::SimdOp::I64x2ExtendLowI32x4U:
+ case wasm::SimdOp::I64x2ExtendHighI32x4U:
+ case wasm::SimdOp::F32x4ConvertI32x4S:
+ case wasm::SimdOp::F32x4Ceil:
+ case wasm::SimdOp::F32x4Floor:
+ case wasm::SimdOp::F32x4Trunc:
+ case wasm::SimdOp::F32x4Nearest:
+ case wasm::SimdOp::F64x2Ceil:
+ case wasm::SimdOp::F64x2Floor:
+ case wasm::SimdOp::F64x2Trunc:
+ case wasm::SimdOp::F64x2Nearest:
+ case wasm::SimdOp::F32x4DemoteF64x2Zero:
+ case wasm::SimdOp::F64x2PromoteLowF32x4:
+ case wasm::SimdOp::F64x2ConvertLowI32x4S:
+ case wasm::SimdOp::F64x2ConvertLowI32x4U:
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16S:
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16U:
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8S:
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8U:
+ case wasm::SimdOp::I8x16Popcnt:
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4S:
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4U:
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2SZero:
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2UZero:
+ break;
+ case wasm::SimdOp::I32x4TruncSatF64x2SZero:
+ case wasm::SimdOp::I32x4TruncSatF64x2UZero:
+ tempReg = tempSimd128();
+ break;
+ default:
+ MOZ_CRASH("Unary SimdOp not implemented");
+ }
+
+ LUse input = useRegisterAtStart(ins->input());
+ LWasmUnarySimd128* lir = new (alloc()) LWasmUnarySimd128(input, tempReg);
+ define(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ if (canEmitWasmReduceSimd128AtUses(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ // Reductions (any_true, all_true, bitmask, extract_lane) uniformly prefer
+ // useRegisterAtStart:
+ //
+ // - In most cases, the input type differs from the output type, so there's no
+ // conflict and it doesn't really matter.
+ //
+ // - For extract_lane(0) on F32x4 and F64x2, input == output results in zero
+ // code being generated.
+ //
+ // - For extract_lane(k > 0) on F32x4 and F64x2, allowing the input register
+ // to be targeted lowers register pressure if it's the last use of the
+ // input.
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc())
+ LWasmReduceSimd128ToInt64(useRegisterAtStart(ins->input()));
+ defineInt64(lir, ins);
+ } else {
+ LDefinition tempReg = LDefinition::BogusTemp();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Bitmask:
+ case wasm::SimdOp::I16x8Bitmask:
+ case wasm::SimdOp::I32x4Bitmask:
+ case wasm::SimdOp::I64x2Bitmask:
+ tempReg = tempSimd128();
+ break;
+ default:
+ break;
+ }
+
+ // Ideally we would reuse the input register for floating extract_lane if
+ // the lane is zero, but constraints in the register allocator require the
+ // input and output register types to be the same.
+ auto* lir = new (alloc())
+ LWasmReduceSimd128(useRegisterAtStart(ins->input()), tempReg);
+ define(lir, ins);
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // On 64-bit systems, the base pointer can be 32 bits or 64 bits. Either way,
+ // it fits in a GPR so we can ignore the Register/Register64 distinction here.
+
+ // Optimal allocation here reuses the value input for the output register
+ // because codegen otherwise has to copy the input to the output; this is
+ // because load-lane is implemented as load + replace-lane. Bug 1706106 may
+ // change all of that, so leave it alone for now.
+ LUse base = useRegisterAtStart(ins->base());
+ LUse inputUse = useRegisterAtStart(ins->value());
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ LWasmLoadLaneSimd128* lir =
+ new (alloc()) LWasmLoadLaneSimd128(base, inputUse, temp(), LAllocation());
+ define(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // See comment above about the base pointer.
+
+ LUse base = useRegisterAtStart(ins->base());
+ LUse input = useRegisterAtStart(ins->value());
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ LWasmStoreLaneSimd128* lir =
+ new (alloc()) LWasmStoreLaneSimd128(base, input, temp(), LAllocation());
+ add(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
diff --git a/js/src/jit/arm64/Lowering-arm64.h b/js/src/jit/arm64/Lowering-arm64.h
new file mode 100644
index 0000000000..4ab52dd464
--- /dev/null
+++ b/js/src/jit/arm64/Lowering-arm64.h
@@ -0,0 +1,135 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_Lowering_arm64_h
+#define jit_arm64_Lowering_arm64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorARM64 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorARM64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ // ARM64 has a scratch register, so no need for another temp for dispatch ICs.
+ LDefinition tempForDispatchCache(MIRType outputType = MIRType::None) {
+ return LDefinition::BogusTemp();
+ }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+ }
+ void defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+ }
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerPowOfTwoI(MPow* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ bool canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,
+ MIRType insTy);
+ void lowerWasmCompareAndSelect(MWasmSelect* ins, MDefinition* lhs,
+ MDefinition* rhs, MCompare::CompareType compTy,
+ JSOp jsop);
+
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+
+#ifdef ENABLE_WASM_SIMD
+ bool canFoldReduceSimd128AndBranch(wasm::SimdOp op);
+ bool canEmitWasmReduceSimd128AtUses(MWasmReduceSimd128* ins);
+#endif
+
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+
+ void lowerPhi(MPhi* phi);
+};
+
+typedef LIRGeneratorARM64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_Lowering_arm64_h */
diff --git a/js/src/jit/arm64/MacroAssembler-arm64-inl.h b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
new file mode 100644
index 0000000000..283867a29a
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -0,0 +1,4079 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MacroAssembler_arm64_inl_h
+#define jit_arm64_MacroAssembler_arm64_inl_h
+
+#include "jit/arm64/MacroAssembler-arm64.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ Mov(ARMRegister(dest.reg, 64), imm.value);
+}
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ Uxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 64));
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ Sxtw(ARMRegister(dest, 64), ARMRegister(src, 32));
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ Uxtw(ARMRegister(dest, 64), ARMRegister(src, 64));
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+ move32To64SignExtend(dest, Register64(dest));
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) {
+ Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
+}
+
+void MacroAssembler::notPtr(Register reg) {
+ Orn(ARMRegister(reg, 64), vixl::xzr, ARMRegister(reg, 64));
+}
+
+void MacroAssembler::and32(Register src, Register dest) {
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
+ And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+ load32(dest, scratch32.asUnsized());
+ And(scratch32, scratch32, Operand(imm.value));
+ store32(scratch32.asUnsized(), dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
+void MacroAssembler::andPtr(Register src, Register dest) {
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+}
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
+ ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ Orr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+void MacroAssembler::or32(Imm32 imm, Register dest) {
+ Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void MacroAssembler::or32(Register src, Register dest) {
+ Orr(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+}
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+ load32(dest, scratch32.asUnsized());
+ Orr(scratch32, scratch32, Operand(imm.value));
+ store32(scratch32.asUnsized(), dest);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) {
+ Orr(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+}
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) {
+ Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ orPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ xorPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) {
+ Eor(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+ load32(dest, scratch32.asUnsized());
+ Eor(scratch32, scratch32, Operand(imm.value));
+ store32(scratch32.asUnsized(), dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) {
+ Eor(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+}
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ Eor(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
+ sxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
+ uxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
+}
+
+void MacroAssembler::byteSwap32(Register reg) {
+ rev(ARMRegister(reg, 32), ARMRegister(reg, 32));
+}
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ rev(ARMRegister(reg.reg, 64), ARMRegister(reg.reg, 64));
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::add32(Register src, Register dest) {
+ Add(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+
+ Ldr(scratch32, toMemOperand(dest));
+ Add(scratch32, scratch32, Operand(imm.value));
+ Str(scratch32, toMemOperand(dest));
+}
+
+void MacroAssembler::addPtr(Register src, Register dest) {
+ addPtr(src, dest, dest);
+}
+
+void MacroAssembler::addPtr(Register src1, Register src2, Register dest) {
+ Add(ARMRegister(dest, 64), ARMRegister(src1, 64),
+ Operand(ARMRegister(src2, 64)));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ addPtr(imm, dest, dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) {
+ Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != dest.base);
+
+ Ldr(scratch64, toMemOperand(dest));
+ Add(scratch64, scratch64, Operand(imm.value));
+ Str(scratch64, toMemOperand(dest));
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+
+ Ldr(scratch64, toMemOperand(src));
+ Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 3);
+ CodeOffset offs = CodeOffset(currentOffset());
+ movz(scratch, 0, 0);
+ movk(scratch, 0, 16);
+ Sub(ARMRegister(dest, 64), sp, scratch);
+ return offs;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
+ MOZ_ASSERT(i1->IsMovz());
+ i1->SetInstructionBits(i1->InstructionBits() |
+ ImmMoveWide(uint16_t(imm.value)));
+
+ Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
+ MOZ_ASSERT(i2->IsMovk());
+ i2->SetInstructionBits(i2->InstructionBits() |
+ ImmMoveWide(uint16_t(imm.value >> 16)));
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
+ ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
+ ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != dest.base);
+
+ Ldr(scratch64, toMemOperand(dest));
+ Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
+ Str(scratch64, toMemOperand(dest));
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != addr.base);
+
+ Ldr(scratch64, toMemOperand(addr));
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
+ ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
+ ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
+ ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ mul32(srcDest, rhs, srcDest, nullptr);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ move32(imm, scratch32.asUnsized());
+ mul32(scratch32.asUnsized(), srcDest);
+}
+
+void MacroAssembler::mul32(Register src1, Register src2, Register dest,
+ Label* onOver) {
+ if (onOver) {
+ Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
+ Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
+ B(onOver, NotEqual);
+
+ // Clear upper 32 bits.
+ Uxtw(ARMRegister(dest, 64), ARMRegister(dest, 64));
+ } else {
+ Mul(ARMRegister(dest, 32), ARMRegister(src1, 32), ARMRegister(src2, 32));
+ }
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ Mov(scratch32, int32_t(imm.value));
+ Umull(ARMRegister(dest, 64), scratch32, ARMRegister(src, 32));
+
+ Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), 32);
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ Mul(ARMRegister(srcDest, 64), ARMRegister(srcDest, 64), ARMRegister(rhs, 64));
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(dest.reg != scratch64.asUnsized());
+ mov(ImmWord(imm.value), scratch64.asUnsized());
+ Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
+ ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::mul64(const Register64& src1, const Register64& src2,
+ const Register64& dest) {
+ Mul(ARMRegister(dest.reg, 64), ARMRegister(src1.reg, 64),
+ ARMRegister(src2.reg, 64));
+}
+
+void MacroAssembler::mul64(Imm64 src1, const Register64& src2,
+ const Register64& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(dest.reg != scratch64.asUnsized());
+ mov(ImmWord(src1.value), scratch64.asUnsized());
+ Mul(ARMRegister(dest.reg, 64), ARMRegister(src2.reg, 64), scratch64);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ ARMRegister xdest(dest, 64);
+ ARMRegister xsrc(src, 64);
+ Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
+ ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
+ ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(temp != scratch);
+ movePtr(imm, scratch);
+ const ARMFPRegister scratchDouble = temps.AcquireD();
+ Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
+ fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
+ ARMRegister(rhs, 32));
+ } else {
+ Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
+ ARMRegister(rhs, 32));
+ }
+}
+
+// This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
+// those cases when they may occur.
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireW();
+ if (isUnsigned) {
+ Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
+ } else {
+ Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
+ }
+ Mul(scratch, scratch, ARMRegister(rhs, 32));
+ Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
+ ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
+ ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratchAddr64 = temps.AcquireX();
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ Mov(scratchAddr64, uint64_t(dest.addr));
+ Ldr(scratch64, MemOperand(scratchAddr64, 0));
+ Add(scratch64, scratch64, Operand(1));
+ Str(scratch64, MemOperand(scratchAddr64, 0));
+}
+
+void MacroAssembler::neg32(Register reg) {
+ Neg(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
+}
+
+void MacroAssembler::neg64(Register64 reg) { negPtr(reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) {
+ Neg(ARMRegister(reg, 64), Operand(ARMRegister(reg, 64)));
+}
+
+void MacroAssembler::negateFloat(FloatRegister reg) {
+ fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
+}
+
+void MacroAssembler::negateDouble(FloatRegister reg) {
+ fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
+}
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ Cmp(ARMRegister(src, 32), wzr);
+ Cneg(ARMRegister(dest, 32), ARMRegister(src, 32), Assembler::LessThan);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_ASSERT(handleNaN); // Always true for wasm
+ fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
+ ARMFPRegister(other, 32));
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_ASSERT(handleNaN); // Always true for wasm
+ fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
+ ARMFPRegister(other, 64));
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_ASSERT(handleNaN); // Always true for wasm
+ fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
+ ARMFPRegister(other, 32));
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_ASSERT(handleNaN); // Always true for wasm
+ fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
+ ARMFPRegister(other, 64));
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ lshiftPtr(imm, dest.reg);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
+ Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
+ ARMRegister(shift, 64));
+}
+
+void MacroAssembler::lshift32(Register shift, Register dest) {
+ Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
+}
+
+void MacroAssembler::rshift32(Register shift, Register dest) {
+ Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
+}
+
+void MacroAssembler::rshift32Arithmetic(Register shift, Register dest) {
+ Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ rshiftPtr(imm, dest.reg);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
+ Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
+ ARMRegister(shift, 64));
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
+ Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
+ ARMRegister(shift, 64));
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch);
+ cmp32Set(cond, scratch, Imm32(uint8_t(rhs.value)), dest);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch);
+ cmp32Set(cond, scratch, Imm32(int8_t(rhs.value)), dest);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch);
+ cmp32Set(cond, scratch, Imm32(uint16_t(rhs.value)), dest);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch);
+ cmp32Set(cond, scratch, Imm32(int16_t(rhs.value)), dest);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ cmpPtrSet(cond, lhs, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
+}
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireW();
+ // Really 32 - count, but the upper bits of the result are ignored.
+ Neg(scratch, ARMRegister(count, 32));
+ Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ // Really 64 - count, but the upper bits of the result are ignored.
+ Neg(scratch, ARMRegister(count, 64));
+ Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
+ (64 - count.value) & 63);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
+ ARMRegister(count, 64));
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
+ Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
+}
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
+ Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
+}
+
+void MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_) {
+ MOZ_ASSERT(tmp_ != Register::Invalid());
+
+ // Equivalent to mozilla::CountPopulation32().
+
+ ARMRegister src(src_, 32);
+ ARMRegister dest(dest_, 32);
+ ARMRegister tmp(tmp_, 32);
+
+ Mov(tmp, src);
+ if (src_ != dest_) {
+ Mov(dest, src);
+ }
+ Lsr(dest, dest, 1);
+ And(dest, dest, 0x55555555);
+ Sub(dest, tmp, dest);
+ Lsr(tmp, dest, 2);
+ And(tmp, tmp, 0x33333333);
+ And(dest, dest, 0x33333333);
+ Add(dest, tmp, dest);
+ Add(dest, dest, Operand(dest, vixl::LSR, 4));
+ And(dest, dest, 0x0F0F0F0F);
+ Add(dest, dest, Operand(dest, vixl::LSL, 8));
+ Add(dest, dest, Operand(dest, vixl::LSL, 16));
+ Lsr(dest, dest, 24);
+}
+
+void MacroAssembler::popcnt64(Register64 src_, Register64 dest_,
+ Register tmp_) {
+ MOZ_ASSERT(tmp_ != Register::Invalid());
+
+ // Equivalent to mozilla::CountPopulation64(), though likely more efficient.
+
+ ARMRegister src(src_.reg, 64);
+ ARMRegister dest(dest_.reg, 64);
+ ARMRegister tmp(tmp_, 64);
+
+ Mov(tmp, src);
+ if (src_ != dest_) {
+ Mov(dest, src);
+ }
+ Lsr(dest, dest, 1);
+ And(dest, dest, 0x5555555555555555);
+ Sub(dest, tmp, dest);
+ Lsr(tmp, dest, 2);
+ And(tmp, tmp, 0x3333333333333333);
+ And(dest, dest, 0x3333333333333333);
+ Add(dest, tmp, dest);
+ Add(dest, dest, Operand(dest, vixl::LSR, 4));
+ And(dest, dest, 0x0F0F0F0F0F0F0F0F);
+ Add(dest, dest, Operand(dest, vixl::LSL, 8));
+ Add(dest, dest, Operand(dest, vixl::LSL, 16));
+ Add(dest, dest, Operand(dest, vixl::LSL, 32));
+ Lsr(dest, dest, 56);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch);
+ branch32(cond, scratch, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch);
+ branch32(cond, scratch, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch);
+ branch32(cond, scratch, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch);
+ branch32(cond, scratch, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch);
+ branch32(cond, scratch, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch);
+ branch32(cond, scratch, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ cmp32(lhs, rhs);
+ B(label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ if (imm.value == 0 && cond == Assembler::Equal) {
+ Cbz(ARMRegister(lhs, 32), label);
+ } else if (imm.value == 0 && cond == Assembler::NotEqual) {
+ Cbnz(ARMRegister(lhs, 32), label);
+ } else {
+ cmp32(lhs, imm);
+ B(label, cond);
+ }
+}
+
+void MacroAssembler::branch32(Condition cond, Register lhs, const Address& rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs);
+ MOZ_ASSERT(scratch != rhs.base);
+ load32(rhs, scratch);
+ branch32(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != rhs);
+ load32(lhs, scratch);
+ branch32(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ load32(lhs, scratch);
+ branch32(cond, scratch, imm, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ load32(lhs, scratch);
+ branch32(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
+ doBaseIndex(scratch32, lhs, vixl::LDR_w);
+ branch32(cond, scratch32.asUnsized(), rhs, label);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(lhs, scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ if (val.value == 0 && cond == Assembler::Equal) {
+ Cbz(ARMRegister(lhs.reg, 64), success);
+ } else if (val.value == 0 && cond == Assembler::NotEqual) {
+ Cbnz(ARMRegister(lhs.reg, 64), success);
+ } else {
+ Cmp(ARMRegister(lhs.reg, 64), val.value);
+ B(success, cond);
+ }
+ if (fail) {
+ B(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
+ B(success, cond);
+ if (fail) {
+ B(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
+ B(label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ if (rhs.value == 0 && cond == Assembler::Equal) {
+ Cbz(ARMRegister(lhs, 64), label);
+ } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
+ Cbnz(ARMRegister(lhs, 64), label);
+ } else {
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ if (rhs.value == 0 && cond == Assembler::Equal) {
+ Cbz(ARMRegister(lhs, 64), label);
+ } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
+ Cbnz(ARMRegister(lhs, 64), label);
+ } else {
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs);
+ movePtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ if (rhs.value == 0 && cond == Assembler::Equal) {
+ Cbz(ARMRegister(lhs, 64), label);
+ } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
+ Cbnz(ARMRegister(lhs, 64), label);
+ } else {
+ cmpPtr(lhs, rhs);
+ B(label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch1_64 = temps.AcquireX();
+ const ARMRegister scratch2_64 = temps.AcquireX();
+ MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
+
+ movePtr(rhs, scratch1_64.asUnsized());
+ loadPtr(lhs, scratch2_64.asUnsized());
+ branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != rhs);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != lhs.index);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ MOZ_ASSERT(scratch != lhs.index);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareFloat(cond, lhs, rhs);
+ switch (cond) {
+ case DoubleNotEqual: {
+ Label unordered;
+ // not equal *and* ordered
+ branch(Overflow, &unordered);
+ branch(NotEqual, label);
+ bind(&unordered);
+ break;
+ }
+ case DoubleEqualOrUnordered:
+ branch(Overflow, label);
+ branch(Equal, label);
+ break;
+ default:
+ branch(Condition(cond), label);
+ }
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ ARMFPRegister src32(src, 32);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch64.Is(dest64));
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtzs(dest64, src32);
+
+ // Fail if the result is saturated, i.e. it's either INT64_MIN or INT64_MAX.
+ Add(scratch64, dest64, Operand(0x7fff'ffff'ffff'ffff));
+ Cmn(scratch64, 3);
+ B(fail, Assembler::Above);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertFloat32ToInt32(src, dest, fail, false);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareDouble(cond, lhs, rhs);
+ switch (cond) {
+ case DoubleNotEqual: {
+ Label unordered;
+ // not equal *and* ordered
+ branch(Overflow, &unordered);
+ branch(NotEqual, label);
+ bind(&unordered);
+ break;
+ }
+ case DoubleEqualOrUnordered:
+ branch(Overflow, label);
+ branch(Equal, label);
+ break;
+ default:
+ branch(Condition(cond), label);
+ }
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ // ARMv8.3 chips support the FJCVTZS instruction, which handles exactly this
+ // logic. But the simulator does not implement it, and when the simulator runs
+ // on ARM64 hardware we want to override vixl's detection of it.
+#if defined(JS_SIMULATOR_ARM64) && (defined(__aarch64__) || defined(_M_ARM64))
+ const bool fjscvt = false;
+#else
+ const bool fjscvt = CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT);
+#endif
+ if (fjscvt) {
+ Fjcvtzs(ARMRegister(dest, 32), ARMFPRegister(src, 64));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // An out of range integer will be saturated to the destination size.
+ ARMFPRegister src64(src, 64);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch64.Is(dest64));
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtzs(dest64, src64);
+
+ // Fail if the result is saturated, i.e. it's either INT64_MIN or INT64_MAX.
+ Add(scratch64, dest64, Operand(0x7fff'ffff'ffff'ffff));
+ Cmn(scratch64, 3);
+ B(fail, Assembler::Above);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ARMFPRegister src64(src, 64);
+ ARMRegister dest64(dest, 64);
+ ARMRegister dest32(dest, 32);
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtzs(dest64, src64);
+
+ // Fail on overflow cases.
+ Cmp(dest64, Operand(dest32, vixl::SXTW));
+ B(fail, Assembler::NotEqual);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* label) {
+ adds32(src, dest);
+ B(label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ subs32(src, dest);
+ branch(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ vixl::UseScratchRegisterScope temps(this);
+ mul32(src, dest, dest, label);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ negs32(reg);
+ B(label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ adds64(src, dest);
+ B(label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ subs64(src, dest);
+ B(label, cond);
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ const ARMRegister src64(src, 64);
+ const ARMRegister dest64(dest, 64);
+
+ Smulh(scratch64, dest64, src64);
+ Mul(dest64, dest64, src64);
+ Cmp(scratch64, Operand(dest64, vixl::ASR, 63));
+ B(label, NotEqual);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value));
+ B(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ // The x86-biased front end prefers |test foo, foo| to |cmp foo, #0|. We look
+ // for the former pattern and expand as Cbz/Cbnz when possible.
+ if (lhs == rhs && cond == Zero) {
+ Cbz(ARMRegister(lhs, 32), label);
+ } else if (lhs == rhs && cond == NonZero) {
+ Cbnz(ARMRegister(lhs, 32), label);
+ } else {
+ test32(lhs, rhs);
+ B(label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ test32(lhs, rhs);
+ B(label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ load32(lhs, scratch);
+ branchTest32(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ load32(lhs, scratch);
+ branchTest32(cond, scratch, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ // See branchTest32.
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs && cond == Zero) {
+ Cbz(ARMRegister(lhs, 64), label);
+ } else if (lhs == rhs && cond == NonZero) {
+ Cbnz(ARMRegister(lhs, 64), label);
+ } else {
+ Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
+ B(label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ Tst(ARMRegister(lhs, 64), Operand(rhs.value));
+ B(label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ branchTestPtr(cond, scratch, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testUndefined(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testInt32(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testInt32Truthy(truthy, value);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testDouble(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
+ Label* label) {
+ Fcmp(ARMFPRegister(reg, 64), 0.0);
+ if (!truthy) {
+ // falsy values are zero, and NaN.
+ branch(Zero, label);
+ branch(Overflow, label);
+ } else {
+ // truthy values are non-zero and not nan.
+ // If it is overflow
+ Label onFalse;
+ branch(Zero, &onFalse);
+ branch(Overflow, &onFalse);
+ B(label);
+ bind(&onFalse);
+ }
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testNumber(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag,
+ Label* label) {
+ Condition c = testBoolean(cond, tag);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testBooleanTruthy(truthy, value);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ branchTestStringImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testString(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testStringTruthy(truthy, value);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testSymbol(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ branchTestBigIntImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestBigIntImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testBigInt(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition c = testBigIntTruthy(truthy, value);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ branchTestNullImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testNull(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testObject(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestGCThingImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src,
+ Label* label) {
+ Condition c = testGCThing(cond, src);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
+ Label* label) {
+ Condition c = testPrimitive(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
+ Condition c = testMagic(cond, t);
+ B(label, c);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(valaddr, ImmWord(magic));
+ B(label, cond);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testNumber(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBoolean(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testString(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testSymbol(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBigInt(cond, src);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ vixl::UseScratchRegisterScope temps(&this->asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+ loadPtr(addr, scratch64.asUnsized());
+ Br(scratch64);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
+ cond);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmpPtr(lhs, rhs);
+ Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
+ cond);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
+ cond);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ // ARM64 does not support conditional loads, so we use a branch with a CSel
+ // (to prevent Spectre attacks).
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // Can't use branch32() here, because it may select Cbz/Cbnz which don't
+ // affect condition flags.
+ Label done;
+ cmp32(lhs, rhs);
+ B(&done, Assembler::InvertCondition(cond));
+
+ loadPtr(src, scratch64.asUnsized());
+ Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
+ bind(&done);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+
+ // ARM64 does not support conditional loads, so we use a branch with a CSel
+ // (to prevent Spectre attacks).
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Label done;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &done);
+ loadPtr(src, scratch64.asUnsized());
+ Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
+ bind(&done);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
+ cond);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
+ cond);
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register,
+ Register dest) {
+ Csel(ARMRegister(dest, 64), ARMRegister(dest, 64), vixl::xzr,
+ Assembler::InvertCondition(cond));
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
+ Assembler::Above);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
+ Assembler::Above);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ Csel(ARMRegister(index, 64), ARMRegister(index, 64), vixl::xzr,
+ Assembler::Above);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ Csel(ARMRegister(index, 64), ARMRegister(index, 64), vixl::xzr,
+ Assembler::Above);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& dest) {
+ Str(ARMFPRegister(src, 64), toMemOperand(dest));
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& dest) {
+ doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
+}
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ Str(ARMFPRegister(src, 32), toMemOperand(addr));
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ // Bug 1715494: Discriminating barriers such as StoreStore are hard to reason
+ // about. Execute the full barrier for everything that requires a barrier.
+ if (barrier) {
+ Dmb(vixl::InnerShareable, vixl::BarrierAll);
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ const ARMRegister reg32(reg, 32);
+ MOZ_ASSERT(!scratch32.Is(reg32));
+
+ Cmp(reg32, Operand(reg32, vixl::UXTB));
+ Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
+ Mov(scratch32, Operand(0xff));
+ Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // fail if dest >> JSVAL_TAG_SHIFT != 0
+ const ARMRegister src64(src.valueReg(), 64);
+ const ARMRegister dest64(dest, 64);
+ Eor(dest64, src64, Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ Cmp(vixl::xzr, Operand(dest64, vixl::LSR, JSVAL_TAG_SHIFT));
+ j(Assembler::NotEqual, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+
+// Wasm SIMD
+
+static inline ARMFPRegister SimdReg(FloatRegister r) {
+ MOZ_ASSERT(r.isSimd128());
+ return ARMFPRegister(r, 128);
+}
+
+static inline ARMFPRegister Simd16B(FloatRegister r) {
+ return SimdReg(r).V16B();
+}
+
+static inline ARMFPRegister Simd8B(FloatRegister r) { return SimdReg(r).V8B(); }
+
+static inline ARMFPRegister Simd8H(FloatRegister r) { return SimdReg(r).V8H(); }
+
+static inline ARMFPRegister Simd4H(FloatRegister r) { return SimdReg(r).V4H(); }
+
+static inline ARMFPRegister Simd4S(FloatRegister r) { return SimdReg(r).V4S(); }
+
+static inline ARMFPRegister Simd2S(FloatRegister r) { return SimdReg(r).V2S(); }
+
+static inline ARMFPRegister Simd2D(FloatRegister r) { return SimdReg(r).V2D(); }
+
+static inline ARMFPRegister Simd1D(FloatRegister r) { return SimdReg(r).V1D(); }
+
+static inline ARMFPRegister SimdQ(FloatRegister r) { return SimdReg(r).Q(); }
+
+//{{{ check_macroassembler_style
+
+// Moves
+
+void MacroAssembler::moveSimd128(FloatRegister src, FloatRegister dest) {
+ if (src != dest) {
+ Mov(SimdReg(dest), SimdReg(src));
+ }
+}
+
+void MacroAssembler::loadConstantSimd128(const SimdConstant& v,
+ FloatRegister dest) {
+ // Movi does not yet generate good code for many cases, bug 1664397.
+ SimdConstant c = SimdConstant::CreateX2((const int64_t*)v.bytes());
+ Movi(SimdReg(dest), c.asInt64x2()[1], c.asInt64x2()[0]);
+}
+
+// Splat
+
+void MacroAssembler::splatX16(Register src, FloatRegister dest) {
+ Dup(Simd16B(dest), ARMRegister(src, 32));
+}
+
+void MacroAssembler::splatX16(uint32_t srcLane, FloatRegister src,
+ FloatRegister dest) {
+ Dup(Simd16B(dest), Simd16B(src), srcLane);
+}
+
+void MacroAssembler::splatX8(Register src, FloatRegister dest) {
+ Dup(Simd8H(dest), ARMRegister(src, 32));
+}
+
+void MacroAssembler::splatX8(uint32_t srcLane, FloatRegister src,
+ FloatRegister dest) {
+ Dup(Simd8H(dest), Simd8H(src), srcLane);
+}
+
+void MacroAssembler::splatX4(Register src, FloatRegister dest) {
+ Dup(Simd4S(dest), ARMRegister(src, 32));
+}
+
+void MacroAssembler::splatX4(FloatRegister src, FloatRegister dest) {
+ Dup(Simd4S(dest), ARMFPRegister(src), 0);
+}
+
+void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
+ Dup(Simd2D(dest), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::splatX2(FloatRegister src, FloatRegister dest) {
+ Dup(Simd2D(dest), ARMFPRegister(src), 0);
+}
+
+// Extract lane as scalar. Float extraction does not canonicalize the value.
+
+void MacroAssembler::extractLaneInt8x16(uint32_t lane, FloatRegister src,
+ Register dest_) {
+ MOZ_ASSERT(lane < 16);
+ ARMRegister dest(dest_, 32);
+ Umov(dest, Simd4S(src), lane / 4);
+ Sbfx(dest, dest, (lane % 4) * 8, 8);
+}
+
+void MacroAssembler::unsignedExtractLaneInt8x16(uint32_t lane,
+ FloatRegister src,
+ Register dest_) {
+ MOZ_ASSERT(lane < 16);
+ ARMRegister dest(dest_, 32);
+ Umov(dest, Simd4S(src), lane / 4);
+ Ubfx(dest, dest, (lane % 4) * 8, 8);
+}
+
+void MacroAssembler::extractLaneInt16x8(uint32_t lane, FloatRegister src,
+ Register dest_) {
+ MOZ_ASSERT(lane < 8);
+ ARMRegister dest(dest_, 32);
+ Umov(dest, Simd4S(src), lane / 2);
+ Sbfx(dest, dest, (lane % 2) * 16, 16);
+}
+
+void MacroAssembler::unsignedExtractLaneInt16x8(uint32_t lane,
+ FloatRegister src,
+ Register dest_) {
+ MOZ_ASSERT(lane < 8);
+ ARMRegister dest(dest_, 32);
+ Umov(dest, Simd4S(src), lane / 2);
+ Ubfx(dest, dest, (lane % 2) * 16, 16);
+}
+
+void MacroAssembler::extractLaneInt32x4(uint32_t lane, FloatRegister src,
+ Register dest_) {
+ MOZ_ASSERT(lane < 4);
+ ARMRegister dest(dest_, 32);
+ Umov(dest, Simd4S(src), lane);
+}
+
+void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
+ Register64 dest_) {
+ MOZ_ASSERT(lane < 2);
+ ARMRegister dest(dest_.reg, 64);
+ Umov(dest, Simd2D(src), lane);
+}
+
+void MacroAssembler::extractLaneFloat32x4(uint32_t lane, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(lane < 4);
+ Mov(ARMFPRegister(dest).V4S(), 0, Simd4S(src), lane);
+}
+
+void MacroAssembler::extractLaneFloat64x2(uint32_t lane, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(lane < 2);
+ Mov(ARMFPRegister(dest).V2D(), 0, Simd2D(src), lane);
+}
+
+// Replace lane value
+
+void MacroAssembler::replaceLaneInt8x16(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 16);
+ Mov(Simd16B(lhsDest), lane, ARMRegister(rhs, 32));
+}
+
+void MacroAssembler::replaceLaneInt16x8(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 8);
+ Mov(Simd8H(lhsDest), lane, ARMRegister(rhs, 32));
+}
+
+void MacroAssembler::replaceLaneInt32x4(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 4);
+ Mov(Simd4S(lhsDest), lane, ARMRegister(rhs, 32));
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 2);
+ Mov(Simd2D(lhsDest), lane, ARMRegister(rhs.reg, 64));
+}
+
+void MacroAssembler::replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 4);
+ Mov(Simd4S(lhsDest), lane, ARMFPRegister(rhs).V4S(), 0);
+}
+
+void MacroAssembler::replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest) {
+ MOZ_ASSERT(lane < 2);
+ Mov(Simd2D(lhsDest), lane, ARMFPRegister(rhs).V2D(), 0);
+}
+
+// Shuffle - blend and permute with immediate indices, and its many
+// specializations. Lane values other than those mentioned are illegal.
+
+// lane values 0..31
+void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ // The general solution generates ho-hum code. Realistic programs will use
+ // patterns that can be specialized, and this will be much better. That will
+ // be handled by bug 1656834, so don't worry about it here.
+
+ // Set scratch to the lanevalue when it selects from lhs or ~lanevalue when it
+ // selects from rhs.
+ ScratchSimd128Scope scratch(*this);
+ int8_t idx[16];
+
+ if (lhs == rhs) {
+ for (unsigned i = 0; i < 16; i++) {
+ idx[i] = lanes[i] < 16 ? lanes[i] : (lanes[i] - 16);
+ }
+ loadConstantSimd128(SimdConstant::CreateX16(idx), scratch);
+ Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
+ return;
+ }
+
+ if (rhs != dest) {
+ for (unsigned i = 0; i < 16; i++) {
+ idx[i] = lanes[i] < 16 ? lanes[i] : ~(lanes[i] - 16);
+ }
+ } else {
+ MOZ_ASSERT(lhs != dest);
+ for (unsigned i = 0; i < 16; i++) {
+ idx[i] = lanes[i] < 16 ? ~lanes[i] : (lanes[i] - 16);
+ }
+ std::swap(lhs, rhs);
+ }
+ loadConstantSimd128(SimdConstant::CreateX16(idx), scratch);
+ Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
+ Not(Simd16B(scratch), Simd16B(scratch));
+ Tbx(Simd16B(dest), Simd16B(rhs), Simd16B(scratch));
+}
+
+void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
+ FloatRegister lhsDest) {
+ shuffleInt8x16(lanes, lhsDest, rhs, lhsDest);
+}
+
+void MacroAssembler::blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ int8_t lanes_[16];
+
+ if (rhs == dest) {
+ for (unsigned i = 0; i < 16; i++) {
+ lanes_[i] = lanes[i] == 0 ? i : 16 + i;
+ }
+ loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
+ Tbx(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
+ return;
+ }
+
+ moveSimd128(lhs, dest);
+ for (unsigned i = 0; i < 16; i++) {
+ lanes_[i] = lanes[i] != 0 ? i : 16 + i;
+ }
+ loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
+ Tbx(Simd16B(dest), Simd16B(rhs), Simd16B(scratch));
+}
+
+void MacroAssembler::blendInt16x8(const uint16_t lanes[8], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ static_assert(sizeof(const uint16_t /*lanes*/[8]) == sizeof(uint8_t[16]));
+ blendInt8x16(reinterpret_cast<const uint8_t*>(lanes), lhs, rhs, dest);
+}
+
+void MacroAssembler::laneSelectSimd128(FloatRegister mask, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ MOZ_ASSERT(mask == dest);
+ Bsl(Simd16B(mask), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::interleaveHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip2(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::interleaveHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip2(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::interleaveHighInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip2(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+void MacroAssembler::interleaveHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip2(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::interleaveLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip1(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::interleaveLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip1(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::interleaveLowInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip1(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+void MacroAssembler::interleaveLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Zip1(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ loadConstantSimd128(SimdConstant::CreateX16((const int8_t*)lanes), scratch);
+ Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
+}
+
+void MacroAssembler::permuteInt16x8(const uint16_t lanes[8], FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(lanes[0] < 8 && lanes[1] < 8 && lanes[2] < 8 && lanes[3] < 8 &&
+ lanes[4] < 8 && lanes[5] < 8 && lanes[6] < 8 && lanes[7] < 8);
+ const int8_t lanes_[16] = {
+ (int8_t)(lanes[0] << 1), (int8_t)((lanes[0] << 1) + 1),
+ (int8_t)(lanes[1] << 1), (int8_t)((lanes[1] << 1) + 1),
+ (int8_t)(lanes[2] << 1), (int8_t)((lanes[2] << 1) + 1),
+ (int8_t)(lanes[3] << 1), (int8_t)((lanes[3] << 1) + 1),
+ (int8_t)(lanes[4] << 1), (int8_t)((lanes[4] << 1) + 1),
+ (int8_t)(lanes[5] << 1), (int8_t)((lanes[5] << 1) + 1),
+ (int8_t)(lanes[6] << 1), (int8_t)((lanes[6] << 1) + 1),
+ (int8_t)(lanes[7] << 1), (int8_t)((lanes[7] << 1) + 1),
+ };
+ ScratchSimd128Scope scratch(*this);
+ loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
+ Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
+}
+
+void MacroAssembler::permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ const int8_t lanes_[16] = {
+ (int8_t)(lanes[0] << 2), (int8_t)((lanes[0] << 2) + 1),
+ (int8_t)((lanes[0] << 2) + 2), (int8_t)((lanes[0] << 2) + 3),
+ (int8_t)(lanes[1] << 2), (int8_t)((lanes[1] << 2) + 1),
+ (int8_t)((lanes[1] << 2) + 2), (int8_t)((lanes[1] << 2) + 3),
+ (int8_t)(lanes[2] << 2), (int8_t)((lanes[2] << 2) + 1),
+ (int8_t)((lanes[2] << 2) + 2), (int8_t)((lanes[2] << 2) + 3),
+ (int8_t)(lanes[3] << 2), (int8_t)((lanes[3] << 2) + 1),
+ (int8_t)((lanes[3] << 2) + 2), (int8_t)((lanes[3] << 2) + 3),
+ };
+ loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
+ Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
+}
+
+void MacroAssembler::rotateRightSimd128(FloatRegister src, FloatRegister dest,
+ uint32_t shift) {
+ Ext(Simd16B(dest), Simd16B(src), Simd16B(src), shift);
+}
+
+void MacroAssembler::leftShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(count.value < 16);
+ ScratchSimd128Scope scratch(*this);
+ Movi(Simd16B(scratch), 0);
+ Ext(Simd16B(dest), Simd16B(scratch), Simd16B(src), 16 - count.value);
+}
+
+void MacroAssembler::rightShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(count.value < 16);
+ ScratchSimd128Scope scratch(*this);
+ Movi(Simd16B(scratch), 0);
+ Ext(Simd16B(dest), Simd16B(src), Simd16B(scratch), count.value);
+}
+
+void MacroAssembler::concatAndRightShiftSimd128(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest,
+ uint32_t shift) {
+ MOZ_ASSERT(shift < 16);
+ Ext(Simd16B(dest), Simd16B(rhs), Simd16B(lhs), shift);
+}
+
+// Reverse bytes in lanes.
+
+void MacroAssembler::reverseInt16x8(FloatRegister src, FloatRegister dest) {
+ Rev16(Simd16B(dest), Simd16B(src));
+}
+
+void MacroAssembler::reverseInt32x4(FloatRegister src, FloatRegister dest) {
+ Rev32(Simd16B(dest), Simd16B(src));
+}
+
+void MacroAssembler::reverseInt64x2(FloatRegister src, FloatRegister dest) {
+ Rev64(Simd16B(dest), Simd16B(src));
+}
+
+// Swizzle - permute with variable indices. `rhs` holds the lanes parameter.
+
+void MacroAssembler::swizzleInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::swizzleInt8x16Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+// Integer Add
+
+void MacroAssembler::addInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Add(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::addInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Add(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::addInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Add(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::addInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Add(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Integer Subtract
+
+void MacroAssembler::subInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::subInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::subInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sub(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::subInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sub(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Integer Multiply
+
+void MacroAssembler::mulInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Mul(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::mulInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Mul(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::mulInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) {
+ // As documented at https://chromium-review.googlesource.com/c/v8/v8/+/1781696
+ // lhs = <D C> <B A>
+ // rhs = <H G> <F E>
+ // result = <(DG+CH)_low+CG_high CG_low> <(BE+AF)_low+AE_high AE_low>
+ ScratchSimd128Scope scratch(*this);
+ Rev64(Simd4S(temp2), Simd4S(lhs)); // temp2 = <C D> <A B>
+ Mul(Simd4S(temp2), Simd4S(temp2), Simd4S(rhs)); // temp2 = <CH DG> <AF BE>
+ Xtn(Simd2S(temp1), Simd2D(rhs)); // temp1 = <0 0> <G E>
+ Addp(Simd4S(temp2), Simd4S(temp2), Simd4S(temp2)); // temp2 = <CH+DG AF+BE>..
+ Xtn(Simd2S(scratch), Simd2D(lhs)); // scratch = <0 0> <C A>
+ Shll(Simd2D(dest), Simd2S(temp2), 32); // dest = <(DG+CH)_low 0>
+ // <(BE+AF)_low 0>
+ Umlal(Simd2D(dest), Simd2S(scratch), Simd2S(temp1));
+}
+
+void MacroAssembler::extMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull(Simd8H(dest), Simd8B(lhs), Simd8B(rhs));
+}
+
+void MacroAssembler::extMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull2(Simd8H(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedExtMulLowInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull(Simd8H(dest), Simd8B(lhs), Simd8B(rhs));
+}
+
+void MacroAssembler::unsignedExtMulHighInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull2(Simd8H(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::extMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull(Simd4S(dest), Simd4H(lhs), Simd4H(rhs));
+}
+
+void MacroAssembler::extMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedExtMulLowInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull(Simd4S(dest), Simd4H(lhs), Simd4H(rhs));
+}
+
+void MacroAssembler::unsignedExtMulHighInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::extMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull(Simd2D(dest), Simd2S(lhs), Simd2S(rhs));
+}
+
+void MacroAssembler::extMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smull2(Simd2D(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::unsignedExtMulLowInt32x4(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull(Simd2D(dest), Simd2S(lhs), Simd2S(rhs));
+}
+
+void MacroAssembler::unsignedExtMulHighInt32x4(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Umull2(Simd2D(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::q15MulrSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqrdmulh(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::q15MulrInt16x8Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqrdmulh(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+// Integer Negate
+
+void MacroAssembler::negInt8x16(FloatRegister src, FloatRegister dest) {
+ Neg(Simd16B(dest), Simd16B(src));
+}
+
+void MacroAssembler::negInt16x8(FloatRegister src, FloatRegister dest) {
+ Neg(Simd8H(dest), Simd8H(src));
+}
+
+void MacroAssembler::negInt32x4(FloatRegister src, FloatRegister dest) {
+ Neg(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::negInt64x2(FloatRegister src, FloatRegister dest) {
+ Neg(Simd2D(dest), Simd2D(src));
+}
+
+// Saturating integer add
+
+void MacroAssembler::addSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedAddSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Uqadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::addSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedAddSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Uqadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+// Saturating integer subtract
+
+void MacroAssembler::subSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqsub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedSubSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Uqsub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::subSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Sqsub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedSubSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Uqsub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+// Lane-wise integer minimum
+
+void MacroAssembler::minInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smin(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedMinInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umin(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::minInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smin(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedMinInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umin(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::minInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::unsignedMinInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+// Lane-wise integer maximum
+
+void MacroAssembler::maxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smax(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedMaxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umax(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::maxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smax(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedMaxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umax(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::maxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Smax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::unsignedMaxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Umax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+// Lane-wise integer rounding average
+
+void MacroAssembler::unsignedAverageInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Urhadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::unsignedAverageInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ Urhadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+// Lane-wise integer absolute value
+
+void MacroAssembler::absInt8x16(FloatRegister src, FloatRegister dest) {
+ Abs(Simd16B(dest), Simd16B(src));
+}
+
+void MacroAssembler::absInt16x8(FloatRegister src, FloatRegister dest) {
+ Abs(Simd8H(dest), Simd8H(src));
+}
+
+void MacroAssembler::absInt32x4(FloatRegister src, FloatRegister dest) {
+ Abs(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::absInt64x2(FloatRegister src, FloatRegister dest) {
+ Abs(Simd2D(dest), Simd2D(src));
+}
+
+// Left shift by variable scalar
+
+void MacroAssembler::leftShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope vscratch(*this);
+ Dup(Simd16B(vscratch), ARMRegister(rhs, 32));
+ Sshl(Simd16B(dest), Simd16B(lhs), Simd16B(vscratch));
+}
+
+void MacroAssembler::leftShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Shl(Simd16B(dest), Simd16B(src), count.value);
+}
+
+void MacroAssembler::leftShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope vscratch(*this);
+ Dup(Simd8H(vscratch), ARMRegister(rhs, 32));
+ Sshl(Simd8H(dest), Simd8H(lhs), Simd8H(vscratch));
+}
+
+void MacroAssembler::leftShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Shl(Simd8H(dest), Simd8H(src), count.value);
+}
+
+void MacroAssembler::leftShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope vscratch(*this);
+ Dup(Simd4S(vscratch), ARMRegister(rhs, 32));
+ Sshl(Simd4S(dest), Simd4S(lhs), Simd4S(vscratch));
+}
+
+void MacroAssembler::leftShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Shl(Simd4S(dest), Simd4S(src), count.value);
+}
+
+void MacroAssembler::leftShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope vscratch(*this);
+ Dup(Simd2D(vscratch), ARMRegister(rhs, 64));
+ Sshl(Simd2D(dest), Simd2D(lhs), Simd2D(vscratch));
+}
+
+void MacroAssembler::leftShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Shl(Simd2D(dest), Simd2D(src), count.value);
+}
+
+// Right shift by variable scalar
+
+void MacroAssembler::rightShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt8x16(lhs, rhs, dest,
+ /* isUnsigned */ false);
+}
+
+void MacroAssembler::rightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Sshr(Simd16B(dest), Simd16B(src), count.value);
+}
+
+void MacroAssembler::unsignedRightShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt8x16(lhs, rhs, dest,
+ /* isUnsigned */ true);
+}
+
+void MacroAssembler::unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Ushr(Simd16B(dest), Simd16B(src), count.value);
+}
+
+void MacroAssembler::rightShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt16x8(lhs, rhs, dest,
+ /* isUnsigned */ false);
+}
+
+void MacroAssembler::rightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Sshr(Simd8H(dest), Simd8H(src), count.value);
+}
+
+void MacroAssembler::unsignedRightShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt16x8(lhs, rhs, dest,
+ /* isUnsigned */ true);
+}
+
+void MacroAssembler::unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Ushr(Simd8H(dest), Simd8H(src), count.value);
+}
+
+void MacroAssembler::rightShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt32x4(lhs, rhs, dest,
+ /* isUnsigned */ false);
+}
+
+void MacroAssembler::rightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Sshr(Simd4S(dest), Simd4S(src), count.value);
+}
+
+void MacroAssembler::unsignedRightShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt32x4(lhs, rhs, dest,
+ /* isUnsigned */ true);
+}
+
+void MacroAssembler::unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Ushr(Simd4S(dest), Simd4S(src), count.value);
+}
+
+void MacroAssembler::rightShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt64x2(lhs, rhs, dest,
+ /* isUnsigned */ false);
+}
+
+void MacroAssembler::rightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Sshr(Simd2D(dest), Simd2D(src), count.value);
+}
+
+void MacroAssembler::unsignedRightShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest) {
+ MacroAssemblerCompat::rightShiftInt64x2(lhs, rhs, dest,
+ /* isUnsigned */ true);
+}
+
+void MacroAssembler::unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ Ushr(Simd2D(dest), Simd2D(src), count.value);
+}
+
+// Bitwise and, or, xor, not
+
+void MacroAssembler::bitwiseAndSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ And(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseAndSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ And(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseOrSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ Orr(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseOrSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Orr(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseXorSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ Eor(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseXorSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Eor(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::bitwiseNotSimd128(FloatRegister src, FloatRegister dest) {
+ Not(Simd16B(dest), Simd16B(src));
+}
+
+void MacroAssembler::bitwiseAndNotSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Bic(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+// Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
+// wants but what the x86 hardware offers. Hence the name. Since arm64 has
+// dest = lhs & ~rhs we just swap operands.
+
+void MacroAssembler::bitwiseNotAndSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ Bic(Simd16B(lhsDest), Simd16B(rhs), Simd16B(lhsDest));
+}
+
+// Bitwise select
+
+void MacroAssembler::bitwiseSelectSimd128(FloatRegister onTrue,
+ FloatRegister onFalse,
+ FloatRegister maskDest) {
+ Bsl(Simd16B(maskDest), Simd16B(onTrue), Simd16B(onFalse));
+}
+
+// Population count
+
+void MacroAssembler::popcntInt8x16(FloatRegister src, FloatRegister dest) {
+ Cnt(Simd16B(dest), Simd16B(src));
+}
+
+// Any lane true, ie, any bit set
+
+void MacroAssembler::anyTrueSimd128(FloatRegister src, Register dest_) {
+ ScratchSimd128Scope scratch_(*this);
+ ARMFPRegister scratch(Simd1D(scratch_));
+ ARMRegister dest(dest_, 64);
+ Addp(scratch, Simd2D(src));
+ Umov(dest, scratch, 0);
+ Cmp(dest, Operand(0));
+ Cset(dest, Assembler::NonZero);
+}
+
+// All lanes true
+
+void MacroAssembler::allTrueInt8x16(FloatRegister src, Register dest_) {
+ ScratchSimd128Scope scratch(*this);
+ ARMRegister dest(dest_, 64);
+ Cmeq(Simd16B(scratch), Simd16B(src), 0);
+ Addp(Simd1D(scratch), Simd2D(scratch));
+ Umov(dest, Simd1D(scratch), 0);
+ Cmp(dest, Operand(0));
+ Cset(dest, Assembler::Zero);
+}
+
+void MacroAssembler::allTrueInt16x8(FloatRegister src, Register dest_) {
+ ScratchSimd128Scope scratch(*this);
+ ARMRegister dest(dest_, 64);
+ Cmeq(Simd8H(scratch), Simd8H(src), 0);
+ Addp(Simd1D(scratch), Simd2D(scratch));
+ Umov(dest, Simd1D(scratch), 0);
+ Cmp(dest, Operand(0));
+ Cset(dest, Assembler::Zero);
+}
+
+void MacroAssembler::allTrueInt32x4(FloatRegister src, Register dest_) {
+ ScratchSimd128Scope scratch(*this);
+ ARMRegister dest(dest_, 64);
+ Cmeq(Simd4S(scratch), Simd4S(src), 0);
+ Addp(Simd1D(scratch), Simd2D(scratch));
+ Umov(dest, Simd1D(scratch), 0);
+ Cmp(dest, Operand(0));
+ Cset(dest, Assembler::Zero);
+}
+
+void MacroAssembler::allTrueInt64x2(FloatRegister src, Register dest_) {
+ ScratchSimd128Scope scratch(*this);
+ ARMRegister dest(dest_, 64);
+ Cmeq(Simd2D(scratch), Simd2D(src), 0);
+ Addp(Simd1D(scratch), Simd2D(scratch));
+ Umov(dest, Simd1D(scratch), 0);
+ Cmp(dest, Operand(0));
+ Cset(dest, Assembler::Zero);
+}
+
+// Bitmask, ie extract and compress high bits of all lanes
+//
+// There's no direct support for this on the chip. These implementations come
+// from the writeup that added the instruction to the SIMD instruction set.
+// Generally, shifting and masking is used to isolate the sign bit of each
+// element in the right position, then a horizontal add creates the result. For
+// 8-bit elements an intermediate step is needed to assemble the bits of the
+// upper and lower 8 bytes into 8 halfwords.
+
+void MacroAssembler::bitmaskInt8x16(FloatRegister src, Register dest,
+ FloatRegister temp) {
+ ScratchSimd128Scope scratch(*this);
+ int8_t values[] = {1, 2, 4, 8, 16, 32, 64, -128,
+ 1, 2, 4, 8, 16, 32, 64, -128};
+ loadConstantSimd128(SimdConstant::CreateX16(values), temp);
+ Sshr(Simd16B(scratch), Simd16B(src), 7);
+ And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
+ Ext(Simd16B(temp), Simd16B(scratch), Simd16B(scratch), 8);
+ Zip1(Simd16B(temp), Simd16B(scratch), Simd16B(temp));
+ Addv(ARMFPRegister(temp, 16), Simd8H(temp));
+ Mov(ARMRegister(dest, 32), Simd8H(temp), 0);
+}
+
+void MacroAssembler::bitmaskInt16x8(FloatRegister src, Register dest,
+ FloatRegister temp) {
+ ScratchSimd128Scope scratch(*this);
+ int16_t values[] = {1, 2, 4, 8, 16, 32, 64, 128};
+ loadConstantSimd128(SimdConstant::CreateX8(values), temp);
+ Sshr(Simd8H(scratch), Simd8H(src), 15);
+ And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
+ Addv(ARMFPRegister(scratch, 16), Simd8H(scratch));
+ Mov(ARMRegister(dest, 32), Simd8H(scratch), 0);
+}
+
+void MacroAssembler::bitmaskInt32x4(FloatRegister src, Register dest,
+ FloatRegister temp) {
+ ScratchSimd128Scope scratch(*this);
+ int32_t values[] = {1, 2, 4, 8};
+ loadConstantSimd128(SimdConstant::CreateX4(values), temp);
+ Sshr(Simd4S(scratch), Simd4S(src), 31);
+ And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
+ Addv(ARMFPRegister(scratch, 32), Simd4S(scratch));
+ Mov(ARMRegister(dest, 32), Simd4S(scratch), 0);
+}
+
+void MacroAssembler::bitmaskInt64x2(FloatRegister src, Register dest,
+ FloatRegister temp) {
+ Sqxtn(Simd2S(temp), Simd2D(src));
+ Ushr(Simd2S(temp), Simd2S(temp), 31);
+ Usra(ARMFPRegister(temp, 64), ARMFPRegister(temp, 64), 31);
+ Fmov(ARMRegister(dest, 32), ARMFPRegister(temp, 32));
+}
+
+// Comparisons (integer and floating-point)
+
+void MacroAssembler::compareInt8x16(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ compareSimd128Int(cond, Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
+}
+
+void MacroAssembler::compareInt8x16(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Int(cond, Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
+}
+
+void MacroAssembler::compareInt16x8(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ compareSimd128Int(cond, Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
+}
+
+void MacroAssembler::compareInt16x8(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Int(cond, Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
+}
+
+void MacroAssembler::compareInt32x4(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ compareSimd128Int(cond, Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
+}
+
+void MacroAssembler::compareInt32x4(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Int(cond, Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::compareInt64x2(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ compareSimd128Int(cond, Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
+}
+
+void MacroAssembler::compareInt64x2(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Int(cond, Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister rhs,
+ FloatRegister lhsDest) {
+ compareSimd128Float(cond, Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
+}
+
+void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Float(cond, Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister rhs,
+ FloatRegister lhsDest) {
+ compareSimd128Float(cond, Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
+}
+
+void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ compareSimd128Float(cond, Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Load
+
+void MacroAssembler::loadUnalignedSimd128(const Address& src,
+ FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 128), toMemOperand(src));
+}
+
+void MacroAssembler::loadUnalignedSimd128(const BaseIndex& address,
+ FloatRegister dest) {
+ doBaseIndex(ARMFPRegister(dest, 128), address, vixl::LDR_q);
+}
+
+// Store
+
+void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
+ const Address& dest) {
+ Str(ARMFPRegister(src, 128), toMemOperand(dest));
+}
+
+void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
+ const BaseIndex& dest) {
+ doBaseIndex(ARMFPRegister(src, 128), dest, vixl::STR_q);
+}
+
+// Floating point negation
+
+void MacroAssembler::negFloat32x4(FloatRegister src, FloatRegister dest) {
+ Fneg(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::negFloat64x2(FloatRegister src, FloatRegister dest) {
+ Fneg(Simd2D(dest), Simd2D(src));
+}
+
+// Floating point absolute value
+
+void MacroAssembler::absFloat32x4(FloatRegister src, FloatRegister dest) {
+ Fabs(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::absFloat64x2(FloatRegister src, FloatRegister dest) {
+ Fabs(Simd2D(dest), Simd2D(src));
+}
+
+// NaN-propagating minimum
+
+void MacroAssembler::minFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::minFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
+ Fmin(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
+}
+
+void MacroAssembler::minFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
+ Fmin(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
+}
+
+void MacroAssembler::minFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmin(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// NaN-propagating maximum
+
+void MacroAssembler::maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::maxFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
+ Fmax(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
+}
+
+void MacroAssembler::maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmax(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+void MacroAssembler::maxFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
+ Fmax(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
+}
+
+// Floating add
+
+void MacroAssembler::addFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fadd(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::addFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fadd(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Floating subtract
+
+void MacroAssembler::subFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fsub(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::subFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fsub(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Floating division
+
+void MacroAssembler::divFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fdiv(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::divFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fdiv(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Floating Multiply
+
+void MacroAssembler::mulFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmul(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
+}
+
+void MacroAssembler::mulFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmul(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
+}
+
+// Pairwise add
+
+void MacroAssembler::extAddPairwiseInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ Saddlp(Simd8H(dest), Simd16B(src));
+}
+
+void MacroAssembler::unsignedExtAddPairwiseInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ Uaddlp(Simd8H(dest), Simd16B(src));
+}
+
+void MacroAssembler::extAddPairwiseInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ Saddlp(Simd4S(dest), Simd8H(src));
+}
+
+void MacroAssembler::unsignedExtAddPairwiseInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ Uaddlp(Simd4S(dest), Simd8H(src));
+}
+
+// Floating square root
+
+void MacroAssembler::sqrtFloat32x4(FloatRegister src, FloatRegister dest) {
+ Fsqrt(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::sqrtFloat64x2(FloatRegister src, FloatRegister dest) {
+ Fsqrt(Simd2D(dest), Simd2D(src));
+}
+
+// Integer to floating point with rounding
+
+void MacroAssembler::convertInt32x4ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ Scvtf(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ Ucvtf(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::convertInt32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ Sshll(Simd2D(dest), Simd2S(src), 0);
+ Scvtf(Simd2D(dest), Simd2D(dest));
+}
+
+void MacroAssembler::unsignedConvertInt32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ Ushll(Simd2D(dest), Simd2S(src), 0);
+ Ucvtf(Simd2D(dest), Simd2D(dest));
+}
+
+// Floating point to integer with saturation
+
+void MacroAssembler::truncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtzs(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtzu(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::truncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp) {
+ Fcvtzs(Simd2D(dest), Simd2D(src));
+ Sqxtn(Simd2S(dest), Simd2D(dest));
+}
+
+void MacroAssembler::unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp) {
+ Fcvtzu(Simd2D(dest), Simd2D(src));
+ Uqxtn(Simd2S(dest), Simd2D(dest));
+}
+
+void MacroAssembler::truncFloat32x4ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtzs(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::unsignedTruncFloat32x4ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ Fcvtzu(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::truncFloat64x2ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtzs(Simd2D(dest), Simd2D(src));
+ Sqxtn(Simd2S(dest), Simd2D(dest));
+}
+
+void MacroAssembler::unsignedTruncFloat64x2ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ Fcvtzu(Simd2D(dest), Simd2D(src));
+ Uqxtn(Simd2S(dest), Simd2D(dest));
+}
+
+// Floating point narrowing
+
+void MacroAssembler::convertFloat64x2ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtn(Simd2S(dest), Simd2D(src));
+}
+
+// Floating point widening
+
+void MacroAssembler::convertFloat32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ Fcvtl(Simd2D(dest), Simd2S(src));
+}
+
+// Integer to integer narrowing
+
+void MacroAssembler::narrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (rhs == dest) {
+ Mov(scratch, SimdReg(rhs));
+ rhs = scratch;
+ }
+ Sqxtn(Simd8B(dest), Simd8H(lhs));
+ Sqxtn2(Simd16B(dest), Simd8H(rhs));
+}
+
+void MacroAssembler::unsignedNarrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (rhs == dest) {
+ Mov(scratch, SimdReg(rhs));
+ rhs = scratch;
+ }
+ Sqxtun(Simd8B(dest), Simd8H(lhs));
+ Sqxtun2(Simd16B(dest), Simd8H(rhs));
+}
+
+void MacroAssembler::narrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (rhs == dest) {
+ Mov(scratch, SimdReg(rhs));
+ rhs = scratch;
+ }
+ Sqxtn(Simd4H(dest), Simd4S(lhs));
+ Sqxtn2(Simd8H(dest), Simd4S(rhs));
+}
+
+void MacroAssembler::unsignedNarrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (rhs == dest) {
+ Mov(scratch, SimdReg(rhs));
+ rhs = scratch;
+ }
+ Sqxtun(Simd4H(dest), Simd4S(lhs));
+ Sqxtun2(Simd8H(dest), Simd4S(rhs));
+}
+
+// Integer to integer widening
+
+void MacroAssembler::widenLowInt8x16(FloatRegister src, FloatRegister dest) {
+ Sshll(Simd8H(dest), Simd8B(src), 0);
+}
+
+void MacroAssembler::widenHighInt8x16(FloatRegister src, FloatRegister dest) {
+ Sshll2(Simd8H(dest), Simd16B(src), 0);
+}
+
+void MacroAssembler::unsignedWidenLowInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ Ushll(Simd8H(dest), Simd8B(src), 0);
+}
+
+void MacroAssembler::unsignedWidenHighInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ Ushll2(Simd8H(dest), Simd16B(src), 0);
+}
+
+void MacroAssembler::widenLowInt16x8(FloatRegister src, FloatRegister dest) {
+ Sshll(Simd4S(dest), Simd4H(src), 0);
+}
+
+void MacroAssembler::widenHighInt16x8(FloatRegister src, FloatRegister dest) {
+ Sshll2(Simd4S(dest), Simd8H(src), 0);
+}
+
+void MacroAssembler::unsignedWidenLowInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ Ushll(Simd4S(dest), Simd4H(src), 0);
+}
+
+void MacroAssembler::unsignedWidenHighInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ Ushll2(Simd4S(dest), Simd8H(src), 0);
+}
+
+void MacroAssembler::widenLowInt32x4(FloatRegister src, FloatRegister dest) {
+ Sshll(Simd2D(dest), Simd2S(src), 0);
+}
+
+void MacroAssembler::unsignedWidenLowInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ Ushll(Simd2D(dest), Simd2S(src), 0);
+}
+
+void MacroAssembler::widenHighInt32x4(FloatRegister src, FloatRegister dest) {
+ Sshll2(Simd2D(dest), Simd4S(src), 0);
+}
+
+void MacroAssembler::unsignedWidenHighInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ Ushll2(Simd2D(dest), Simd4S(src), 0);
+}
+
+// Compare-based minimum/maximum (experimental as of August, 2020)
+// https://github.com/WebAssembly/simd/pull/122
+
+void MacroAssembler::pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ // Shut up the linter by using the same names as in the declaration, then
+ // aliasing here.
+ FloatRegister rhs = rhsOrRhsDest;
+ FloatRegister lhsDest = lhsOrLhsDest;
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd4S(scratch), Simd4S(lhsDest), Simd4S(rhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
+ Mov(SimdReg(lhsDest), scratch);
+}
+
+void MacroAssembler::pseudoMinFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd4S(scratch), Simd4S(lhs), Simd4S(rhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
+ Mov(SimdReg(dest), scratch);
+}
+
+void MacroAssembler::pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhs = rhsOrRhsDest;
+ FloatRegister lhsDest = lhsOrLhsDest;
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd2D(scratch), Simd2D(lhsDest), Simd2D(rhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
+ Mov(SimdReg(lhsDest), scratch);
+}
+
+void MacroAssembler::pseudoMinFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd2D(scratch), Simd2D(lhs), Simd2D(rhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
+ Mov(SimdReg(dest), scratch);
+}
+
+void MacroAssembler::pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhs = rhsOrRhsDest;
+ FloatRegister lhsDest = lhsOrLhsDest;
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd4S(scratch), Simd4S(rhs), Simd4S(lhsDest));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
+ Mov(SimdReg(lhsDest), scratch);
+}
+
+void MacroAssembler::pseudoMaxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd4S(scratch), Simd4S(rhs), Simd4S(lhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
+ Mov(SimdReg(dest), scratch);
+}
+
+void MacroAssembler::pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhs = rhsOrRhsDest;
+ FloatRegister lhsDest = lhsOrLhsDest;
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd2D(scratch), Simd2D(rhs), Simd2D(lhsDest));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
+ Mov(SimdReg(lhsDest), scratch);
+}
+
+void MacroAssembler::pseudoMaxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Fcmgt(Simd2D(scratch), Simd2D(rhs), Simd2D(lhs));
+ Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
+ Mov(SimdReg(dest), scratch);
+}
+
+// Widening/pairwise integer dot product (experimental as of August, 2020)
+// https://github.com/WebAssembly/simd/pull/127
+
+void MacroAssembler::widenDotInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Smull(Simd4S(scratch), Simd4H(lhs), Simd4H(rhs));
+ Smull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
+ Addp(Simd4S(dest), Simd4S(scratch), Simd4S(dest));
+}
+
+void MacroAssembler::dotInt8x16Int7x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ Smull(Simd8H(scratch), Simd8B(lhs), Simd8B(rhs));
+ Smull2(Simd8H(dest), Simd16B(lhs), Simd16B(rhs));
+ Addp(Simd8H(dest), Simd8H(scratch), Simd8H(dest));
+}
+
+void MacroAssembler::dotInt8x16Int7x16ThenAdd(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest,
+ FloatRegister temp) {
+ ScratchSimd128Scope scratch(*this);
+ Smull(Simd8H(scratch), Simd8B(lhs), Simd8B(rhs));
+ Smull2(Simd8H(temp), Simd16B(lhs), Simd16B(rhs));
+ Addp(Simd8H(temp), Simd8H(scratch), Simd8H(temp));
+ Sadalp(Simd4S(dest), Simd8H(temp));
+}
+
+// Floating point rounding (experimental as of August, 2020)
+// https://github.com/WebAssembly/simd/pull/232
+
+void MacroAssembler::ceilFloat32x4(FloatRegister src, FloatRegister dest) {
+ Frintp(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::ceilFloat64x2(FloatRegister src, FloatRegister dest) {
+ Frintp(Simd2D(dest), Simd2D(src));
+}
+
+void MacroAssembler::floorFloat32x4(FloatRegister src, FloatRegister dest) {
+ Frintm(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::floorFloat64x2(FloatRegister src, FloatRegister dest) {
+ Frintm(Simd2D(dest), Simd2D(src));
+}
+
+void MacroAssembler::truncFloat32x4(FloatRegister src, FloatRegister dest) {
+ Frintz(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::truncFloat64x2(FloatRegister src, FloatRegister dest) {
+ Frintz(Simd2D(dest), Simd2D(src));
+}
+
+void MacroAssembler::nearestFloat32x4(FloatRegister src, FloatRegister dest) {
+ Frintn(Simd4S(dest), Simd4S(src));
+}
+
+void MacroAssembler::nearestFloat64x2(FloatRegister src, FloatRegister dest) {
+ Frintn(Simd2D(dest), Simd2D(src));
+}
+
+// Floating multiply-accumulate: srcDest [+-]= src1 * src2
+
+void MacroAssembler::fmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ Fmla(Simd4S(srcDest), Simd4S(src1), Simd4S(src2));
+}
+
+void MacroAssembler::fnmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ Fmls(Simd4S(srcDest), Simd4S(src1), Simd4S(src2));
+}
+
+void MacroAssembler::fmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ Fmla(Simd2D(srcDest), Simd2D(src1), Simd2D(src2));
+}
+
+void MacroAssembler::fnmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ Fmls(Simd2D(srcDest), Simd2D(src1), Simd2D(src2));
+}
+
+void MacroAssembler::minFloat32x4Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ Fmin(Simd4S(srcDest), Simd4S(src), Simd4S(srcDest));
+}
+
+void MacroAssembler::minFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmin(Simd4S(dest), Simd4S(rhs), Simd4S(lhs));
+}
+
+void MacroAssembler::maxFloat32x4Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ Fmax(Simd4S(srcDest), Simd4S(src), Simd4S(srcDest));
+}
+
+void MacroAssembler::maxFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmax(Simd4S(dest), Simd4S(rhs), Simd4S(lhs));
+}
+
+void MacroAssembler::minFloat64x2Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ Fmin(Simd2D(srcDest), Simd2D(src), Simd2D(srcDest));
+}
+
+void MacroAssembler::minFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmin(Simd2D(dest), Simd2D(rhs), Simd2D(lhs));
+}
+
+void MacroAssembler::maxFloat64x2Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ Fmax(Simd2D(srcDest), Simd2D(src), Simd2D(srcDest));
+}
+
+void MacroAssembler::maxFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ Fmax(Simd2D(dest), Simd2D(rhs), Simd2D(lhs));
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void MacroAssemblerCompat::addToStackPtr(Register src) {
+ Add(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
+ // Given that required invariant SP <= PSP, this is probably pointless,
+ // since it gives PSP a larger value.
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::addToStackPtr(Imm32 imm) {
+ Add(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
+ // As above, probably pointless.
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::addToStackPtr(const Address& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Ldr(scratch, toMemOperand(src));
+ Add(GetStackPointer64(), GetStackPointer64(), scratch);
+ // As above, probably pointless.
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::addStackPtrTo(Register dest) {
+ Add(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
+}
+
+void MacroAssemblerCompat::subFromStackPtr(Register src) {
+ Sub(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::subFromStackPtr(Imm32 imm) {
+ Sub(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::subStackPtrFrom(Register dest) {
+ Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
+}
+
+void MacroAssemblerCompat::andToStackPtr(Imm32 imm) {
+ if (sp.Is(GetStackPointer64())) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, sp);
+ And(sp, scratch, Operand(imm.value));
+ // syncStackPtr() not needed since our SP is the real SP.
+ } else {
+ And(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
+ syncStackPtr();
+ }
+}
+
+void MacroAssemblerCompat::moveToStackPtr(Register src) {
+ Mov(GetStackPointer64(), ARMRegister(src, 64));
+ syncStackPtr();
+}
+
+void MacroAssemblerCompat::moveStackPtrTo(Register dest) {
+ Mov(ARMRegister(dest, 64), GetStackPointer64());
+}
+
+void MacroAssemblerCompat::loadStackPtr(const Address& src) {
+ if (sp.Is(GetStackPointer64())) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Ldr(scratch, toMemOperand(src));
+ Mov(sp, scratch);
+ // syncStackPtr() not needed since our SP is the real SP.
+ } else {
+ Ldr(GetStackPointer64(), toMemOperand(src));
+ syncStackPtr();
+ }
+}
+
+void MacroAssemblerCompat::storeStackPtr(const Address& dest) {
+ if (sp.Is(GetStackPointer64())) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, sp);
+ Str(scratch, toMemOperand(dest));
+ } else {
+ Str(GetStackPointer64(), toMemOperand(dest));
+ }
+}
+
+void MacroAssemblerCompat::branchTestStackPtr(Condition cond, Imm32 rhs,
+ Label* label) {
+ if (sp.Is(GetStackPointer64())) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, sp);
+ Tst(scratch, Operand(rhs.value));
+ } else {
+ Tst(GetStackPointer64(), Operand(rhs.value));
+ }
+ B(label, cond);
+}
+
+void MacroAssemblerCompat::branchStackPtr(Condition cond, Register rhs_,
+ Label* label) {
+ ARMRegister rhs(rhs_, 64);
+ if (sp.Is(GetStackPointer64())) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, sp);
+ Cmp(scratch, rhs);
+ } else {
+ Cmp(GetStackPointer64(), rhs);
+ }
+ B(label, cond);
+}
+
+void MacroAssemblerCompat::branchStackPtrRhs(Condition cond, Address lhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ Ldr(scratch, toMemOperand(lhs));
+ // Cmp disallows SP as the rhs, so flip the operands and invert the
+ // condition.
+ Cmp(GetStackPointer64(), scratch);
+ B(label, Assembler::InvertCondition(cond));
+}
+
+void MacroAssemblerCompat::branchStackPtrRhs(Condition cond,
+ AbsoluteAddress lhs,
+ Label* label) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ loadPtr(lhs, scratch.asUnsized());
+ // Cmp disallows SP as the rhs, so flip the operands and invert the
+ // condition.
+ Cmp(GetStackPointer64(), scratch);
+ B(label, Assembler::InvertCondition(cond));
+}
+
+// If source is a double, load into dest.
+// If source is int32, convert to double and store in dest.
+// Else, branch to failure.
+void MacroAssemblerCompat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ convertInt32ToDouble(source.valueReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_MacroAssembler_arm64_inl_h */
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
new file mode 100644
index 0000000000..a4aff730e6
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -0,0 +1,3416 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/MacroAssembler-arm64.h"
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "jsmath.h"
+
+#include "jit/arm64/MoveEmitter-arm64.h"
+#include "jit/arm64/SharedICRegisters-arm64.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "util/Memory.h"
+#include "vm/BigIntType.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+enum class Width { _32 = 32, _64 = 64 };
+
+static inline ARMRegister X(Register r) { return ARMRegister(r, 64); }
+
+static inline ARMRegister X(MacroAssembler& masm, RegisterOrSP r) {
+ return masm.toARMRegister(r, 64);
+}
+
+static inline ARMRegister W(Register r) { return ARMRegister(r, 32); }
+
+static inline ARMRegister R(Register r, Width w) {
+ return ARMRegister(r, unsigned(w));
+}
+
+void MacroAssemblerCompat::boxValue(JSValueType type, Register src,
+ Register dest) {
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsZeroed;
+ movePtr(ImmWord(UINT32_MAX), dest);
+ asMasm().branchPtr(Assembler::BelowOrEqual, src, dest, &upper32BitsZeroed);
+ breakpoint();
+ bind(&upper32BitsZeroed);
+ }
+#endif
+ Orr(ARMRegister(dest, 64), ARMRegister(src, 64),
+ Operand(ImmShiftedTag(type).value));
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MacroAssembler::MustMaskShiftCountSimd128(wasm::SimdOp op, int32_t* mask) {
+ switch (op) {
+ case wasm::SimdOp::I8x16Shl:
+ case wasm::SimdOp::I8x16ShrU:
+ case wasm::SimdOp::I8x16ShrS:
+ *mask = 7;
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ case wasm::SimdOp::I16x8ShrU:
+ case wasm::SimdOp::I16x8ShrS:
+ *mask = 15;
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ case wasm::SimdOp::I32x4ShrU:
+ case wasm::SimdOp::I32x4ShrS:
+ *mask = 31;
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ case wasm::SimdOp::I64x2ShrU:
+ case wasm::SimdOp::I64x2ShrS:
+ *mask = 63;
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift operation");
+ }
+ return true;
+}
+#endif
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ ARMRegister dest(output, 32);
+ Fcvtns(dest, ARMFPRegister(input, 64));
+
+ {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+
+ Mov(scratch32, Operand(0xff));
+ Cmp(dest, scratch32);
+ Csel(dest, dest, scratch32, LessThan);
+ }
+
+ Cmp(dest, Operand(0));
+ Csel(dest, dest, wzr, GreaterThan);
+}
+
+js::jit::MacroAssembler& MacroAssemblerCompat::asMasm() {
+ return *static_cast<js::jit::MacroAssembler*>(this);
+}
+
+const js::jit::MacroAssembler& MacroAssemblerCompat::asMasm() const {
+ return *static_cast<const js::jit::MacroAssembler*>(this);
+}
+
+vixl::MacroAssembler& MacroAssemblerCompat::asVIXL() {
+ return *static_cast<vixl::MacroAssembler*>(this);
+}
+
+const vixl::MacroAssembler& MacroAssemblerCompat::asVIXL() const {
+ return *static_cast<const vixl::MacroAssembler*>(this);
+}
+
+void MacroAssemblerCompat::mov(CodeLabel* label, Register dest) {
+ BufferOffset bo = movePatchablePtr(ImmWord(/* placeholder */ 0), dest);
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+BufferOffset MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest) {
+ const size_t numInst = 1; // Inserting one load instruction.
+ const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
+ uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const.
+
+ // Scratch space for generating the load instruction.
+ //
+ // allocLiteralLoadEntry() will use InsertIndexIntoTag() to store a temporary
+ // index to the corresponding PoolEntry in the instruction itself.
+ //
+ // That index will be fixed up later when finishPool()
+ // walks over all marked loads and calls PatchConstantPoolLoad().
+ uint32_t instructionScratch = 0;
+
+ // Emit the instruction mask in the scratch space.
+ // The offset doesn't matter: it will be fixed up later.
+ vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64),
+ 0);
+
+ // Add the entry to the pool, fix up the LDR imm19 offset,
+ // and add the completed instruction to the buffer.
+ return allocLiteralLoadEntry(numInst, numPoolEntries,
+ (uint8_t*)&instructionScratch, literalAddr);
+}
+
+BufferOffset MacroAssemblerCompat::movePatchablePtr(ImmWord ptr,
+ Register dest) {
+ const size_t numInst = 1; // Inserting one load instruction.
+ const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
+ uint8_t* literalAddr = (uint8_t*)(&ptr.value);
+
+ // Scratch space for generating the load instruction.
+ //
+ // allocLiteralLoadEntry() will use InsertIndexIntoTag() to store a temporary
+ // index to the corresponding PoolEntry in the instruction itself.
+ //
+ // That index will be fixed up later when finishPool()
+ // walks over all marked loads and calls PatchConstantPoolLoad().
+ uint32_t instructionScratch = 0;
+
+ // Emit the instruction mask in the scratch space.
+ // The offset doesn't matter: it will be fixed up later.
+ vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64),
+ 0);
+
+ // Add the entry to the pool, fix up the LDR imm19 offset,
+ // and add the completed instruction to the buffer.
+ return allocLiteralLoadEntry(numInst, numPoolEntries,
+ (uint8_t*)&instructionScratch, literalAddr);
+}
+
+void MacroAssemblerCompat::loadPrivate(const Address& src, Register dest) {
+ loadPtr(src, dest);
+}
+
+void MacroAssemblerCompat::handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail) {
+ // Fail rather than silently create wrong code.
+ MOZ_RELEASE_ASSERT(GetStackPointer64().Is(PseudoStackPointer64));
+
+ // Reserve space for exception information.
+ int64_t size = (sizeof(ResumeFromException) + 7) & ~7;
+ Sub(PseudoStackPointer64, PseudoStackPointer64, Operand(size));
+ syncStackPtr();
+
+ MOZ_ASSERT(!x0.Is(PseudoStackPointer64));
+ Mov(x0, PseudoStackPointer64);
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException* rfe);
+ asMasm().setupUnalignedABICall(r1);
+ asMasm().passABIArg(r0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Check the `asMasm` calls above didn't mess with the StackPointer identity.
+ MOZ_ASSERT(GetStackPointer64().Is(PseudoStackPointer64));
+
+ loadPtr(Address(PseudoStackPointer, ResumeFromException::offsetOfKind()), r0);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, r0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfStackPointer()),
+ PseudoStackPointer);
+
+ // `retn` does indeed sync the stack pointer, but before doing that it reads
+ // from the stack. Consequently, if we remove this call to syncStackPointer
+ // then we take on the requirement to prove that the immediately preceding
+ // loadPtr produces a value for PSP which maintains the SP <= PSP invariant.
+ // That's a proof burden we don't want to take on. In general it would be
+ // good to move (at some time in the future, not now) to a world where
+ // *every* assignment to PSP or SP is followed immediately by a copy into
+ // the other register. That would make all required correctness proofs
+ // trivial in the sense that it requires only local inspection of code
+ // immediately following (dominated by) any such assignment.
+ syncStackPtr();
+ retn(Imm32(1 * sizeof(void*))); // Pop from stack and return.
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(PseudoStackPointer, ResumeFromException::offsetOfTarget()),
+ r0);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfStackPointer()),
+ PseudoStackPointer);
+ syncStackPtr();
+ Br(x0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ARMRegister exception = x1;
+ Ldr(exception, MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfException()));
+ Ldr(x0,
+ MemOperand(PseudoStackPointer64, ResumeFromException::offsetOfTarget()));
+ Ldr(ARMRegister(FramePointer, 64),
+ MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfFramePointer()));
+ Ldr(PseudoStackPointer64,
+ MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfStackPointer()));
+ syncStackPtr();
+ push(exception);
+ pushValue(BooleanValue(true));
+ Br(x0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfStackPointer()),
+ PseudoStackPointer);
+ // See comment further up beginning "`retn` does indeed sync the stack
+ // pointer". That comment applies here too.
+ syncStackPtr();
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(
+ Address(PseudoStackPointer, offsetof(ResumeFromException, framePointer)),
+ FramePointer);
+ loadPtr(
+ Address(PseudoStackPointer, offsetof(ResumeFromException, stackPointer)),
+ PseudoStackPointer);
+ syncStackPtr();
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ movePtr(FramePointer, PseudoStackPointer);
+ syncStackPtr();
+ vixl::MacroAssembler::Pop(ARMRegister(FramePointer, 64));
+
+ vixl::MacroAssembler::Pop(vixl::lr);
+ syncStackPtr();
+ vixl::MacroAssembler::Ret(vixl::lr);
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub. Load 1 (true) in x0 (ReturnReg) to indicate success.
+ bind(&bailout);
+ Ldr(x2, MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfBailoutInfo()));
+ Ldr(PseudoStackPointer64,
+ MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfStackPointer()));
+ syncStackPtr();
+ Mov(x0, 1);
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ Ldr(x29, MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfFramePointer()));
+ Ldr(PseudoStackPointer64,
+ MemOperand(PseudoStackPointer64,
+ ResumeFromException::offsetOfStackPointer()));
+ syncStackPtr();
+ Mov(x23, int64_t(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(PseudoStackPointer, ResumeFromException::offsetOfTarget()),
+ r0);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfFramePointer()),
+ r29);
+ loadPtr(
+ Address(PseudoStackPointer, ResumeFromException::offsetOfStackPointer()),
+ PseudoStackPointer);
+ syncStackPtr();
+ Br(x0);
+
+ MOZ_ASSERT(GetStackPointer64().Is(PseudoStackPointer64));
+}
+
+void MacroAssemblerCompat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerCompat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+Assembler::Condition MacroAssemblerCompat::testStringTruthy(
+ bool truthy, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ const ARMRegister scratch32(scratch, 32);
+ const ARMRegister scratch64(scratch, 64);
+
+ MOZ_ASSERT(value.valueReg() != scratch);
+
+ unboxString(value, scratch);
+ Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength()));
+ Cmp(scratch32, Operand(0));
+ return truthy ? Condition::NonZero : Condition::Zero;
+}
+
+Assembler::Condition MacroAssemblerCompat::testBigIntTruthy(
+ bool truthy, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+
+ MOZ_ASSERT(value.valueReg() != scratch);
+
+ unboxBigInt(value, scratch);
+ load32(Address(scratch, BigInt::offsetOfDigitLength()), scratch);
+ cmp32(scratch, Imm32(0));
+ return truthy ? Condition::NonZero : Condition::Zero;
+}
+
+void MacroAssemblerCompat::breakpoint() {
+ // Note, other payloads are possible, but GDB is known to misinterpret them
+ // sometimes and iloop on the breakpoint instead of stopping properly.
+ Brk(0);
+}
+
+// Either `any` is valid or `sixtyfour` is valid. Return a 32-bit ARMRegister
+// in the first case and an ARMRegister of the desired size in the latter case.
+
+static inline ARMRegister SelectGPReg(AnyRegister any, Register64 sixtyfour,
+ unsigned size = 64) {
+ MOZ_ASSERT(any.isValid() != (sixtyfour != Register64::Invalid()));
+
+ if (sixtyfour == Register64::Invalid()) {
+ return ARMRegister(any.gpr(), 32);
+ }
+
+ return ARMRegister(sixtyfour.reg, size);
+}
+
+// Assert that `sixtyfour` is invalid and then return an FP register from `any`
+// of the desired size.
+
+static inline ARMFPRegister SelectFPReg(AnyRegister any, Register64 sixtyfour,
+ unsigned size) {
+ MOZ_ASSERT(sixtyfour == Register64::Invalid());
+ return ARMFPRegister(any.fpu(), size);
+}
+
+void MacroAssemblerCompat::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase_, Register ptr_,
+ AnyRegister outany, Register64 out64) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+
+ ARMRegister memoryBase(memoryBase_, 64);
+ ARMRegister ptr(ptr_, 64);
+ if (offset) {
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireX();
+ Add(scratch, ptr, Operand(offset));
+ MemOperand srcAddr(memoryBase, scratch);
+ wasmLoadImpl(access, srcAddr, outany, out64);
+ } else {
+ MemOperand srcAddr(memoryBase, ptr);
+ wasmLoadImpl(access, srcAddr, outany, out64);
+ }
+}
+
+void MacroAssemblerCompat::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ MemOperand srcAddr, AnyRegister outany,
+ Register64 out64) {
+ // Reg+Reg and Reg+SmallImm addressing is directly encodable in one Load
+ // instruction, hence we expect exactly one instruction to be emitted in the
+ // window.
+ int32_t instructionsExpected = 1;
+
+ // Splat and widen however require an additional instruction to be emitted
+ // after the load, so allow one more instruction in the window.
+ if (access.isSplatSimd128Load() || access.isWidenSimd128Load()) {
+ MOZ_ASSERT(access.type() == Scalar::Float64);
+ instructionsExpected++;
+ }
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ asMasm().memoryBarrierBefore(access.sync());
+
+ {
+ // The AutoForbidPoolsAndNops asserts if we emit more than the expected
+ // number of instructions and thus ensures that the access metadata is
+ // emitted at the address of the Load.
+ AutoForbidPoolsAndNops afp(this, instructionsExpected);
+
+ append(access, asMasm().currentOffset());
+ switch (access.type()) {
+ case Scalar::Int8:
+ Ldrsb(SelectGPReg(outany, out64), srcAddr);
+ break;
+ case Scalar::Uint8:
+ Ldrb(SelectGPReg(outany, out64), srcAddr);
+ break;
+ case Scalar::Int16:
+ Ldrsh(SelectGPReg(outany, out64), srcAddr);
+ break;
+ case Scalar::Uint16:
+ Ldrh(SelectGPReg(outany, out64), srcAddr);
+ break;
+ case Scalar::Int32:
+ if (out64 != Register64::Invalid()) {
+ Ldrsw(SelectGPReg(outany, out64), srcAddr);
+ } else {
+ Ldr(SelectGPReg(outany, out64, 32), srcAddr);
+ }
+ break;
+ case Scalar::Uint32:
+ Ldr(SelectGPReg(outany, out64, 32), srcAddr);
+ break;
+ case Scalar::Int64:
+ Ldr(SelectGPReg(outany, out64), srcAddr);
+ break;
+ case Scalar::Float32:
+ // LDR does the right thing also for access.isZeroExtendSimd128Load()
+ Ldr(SelectFPReg(outany, out64, 32), srcAddr);
+ break;
+ case Scalar::Float64:
+ if (access.isSplatSimd128Load() || access.isWidenSimd128Load()) {
+ ScratchSimd128Scope scratch_(asMasm());
+ ARMFPRegister scratch = Simd1D(scratch_);
+ Ldr(scratch, srcAddr);
+ if (access.isSplatSimd128Load()) {
+ Dup(SelectFPReg(outany, out64, 128).V2D(), scratch, 0);
+ } else {
+ MOZ_ASSERT(access.isWidenSimd128Load());
+ switch (access.widenSimdOp()) {
+ case wasm::SimdOp::V128Load8x8S:
+ Sshll(SelectFPReg(outany, out64, 128).V8H(), scratch.V8B(), 0);
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ Ushll(SelectFPReg(outany, out64, 128).V8H(), scratch.V8B(), 0);
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ Sshll(SelectFPReg(outany, out64, 128).V4S(), scratch.V4H(), 0);
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ Ushll(SelectFPReg(outany, out64, 128).V4S(), scratch.V4H(), 0);
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ Sshll(SelectFPReg(outany, out64, 128).V2D(), scratch.V2S(), 0);
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ Ushll(SelectFPReg(outany, out64, 128).V2D(), scratch.V2S(), 0);
+ break;
+ default:
+ MOZ_CRASH("Unexpected widening op for wasmLoad");
+ }
+ }
+ } else {
+ // LDR does the right thing also for access.isZeroExtendSimd128Load()
+ Ldr(SelectFPReg(outany, out64, 64), srcAddr);
+ }
+ break;
+ case Scalar::Simd128:
+ Ldr(SelectFPReg(outany, out64, 128), srcAddr);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+// Return true if `address` can be represented as an immediate (possibly scaled
+// by the access size) in an LDR/STR type instruction.
+//
+// For more about the logic here, see vixl::MacroAssembler::LoadStoreMacro().
+static bool IsLSImmediateOffset(uint64_t address, size_t accessByteSize) {
+ // The predicates below operate on signed values only.
+ if (address > INT64_MAX) {
+ return false;
+ }
+
+ // The access size is always a power of 2, so computing the log amounts to
+ // counting trailing zeroes.
+ unsigned logAccessSize = mozilla::CountTrailingZeroes32(accessByteSize);
+ return (MacroAssemblerCompat::IsImmLSUnscaled(int64_t(address)) ||
+ MacroAssemblerCompat::IsImmLSScaled(int64_t(address), logAccessSize));
+}
+
+void MacroAssemblerCompat::wasmLoadAbsolute(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, uint64_t address,
+ AnyRegister output, Register64 out64) {
+ if (!IsLSImmediateOffset(address, access.byteSize())) {
+ // The access will require the constant to be loaded into a temp register.
+ // Do so here, to keep the logic in wasmLoadImpl() tractable wrt emitting
+ // trap information.
+ //
+ // Almost all constant addresses will in practice be handled by a single MOV
+ // so do not worry about additional optimizations here.
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, address);
+ MemOperand srcAddr(X(memoryBase), scratch);
+ wasmLoadImpl(access, srcAddr, output, out64);
+ } else {
+ MemOperand srcAddr(X(memoryBase), address);
+ wasmLoadImpl(access, srcAddr, output, out64);
+ }
+}
+
+void MacroAssemblerCompat::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister valany, Register64 val64,
+ Register memoryBase_, Register ptr_) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+
+ ARMRegister memoryBase(memoryBase_, 64);
+ ARMRegister ptr(ptr_, 64);
+ if (offset) {
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireX();
+ Add(scratch, ptr, Operand(offset));
+ MemOperand destAddr(memoryBase, scratch);
+ wasmStoreImpl(access, destAddr, valany, val64);
+ } else {
+ MemOperand destAddr(memoryBase, ptr);
+ wasmStoreImpl(access, destAddr, valany, val64);
+ }
+}
+
+void MacroAssemblerCompat::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ MemOperand dstAddr, AnyRegister valany,
+ Register64 val64) {
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ asMasm().memoryBarrierBefore(access.sync());
+
+ {
+ // Reg+Reg addressing is directly encodable in one Store instruction, hence
+ // the AutoForbidPoolsAndNops will ensure that the access metadata is
+ // emitted at the address of the Store. The AutoForbidPoolsAndNops will
+ // assert if we emit more than one instruction.
+
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 1);
+
+ append(access, asMasm().currentOffset());
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ Strb(SelectGPReg(valany, val64), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ Strh(SelectGPReg(valany, val64), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ Str(SelectGPReg(valany, val64), dstAddr);
+ break;
+ case Scalar::Int64:
+ Str(SelectGPReg(valany, val64), dstAddr);
+ break;
+ case Scalar::Float32:
+ Str(SelectFPReg(valany, val64, 32), dstAddr);
+ break;
+ case Scalar::Float64:
+ Str(SelectFPReg(valany, val64, 64), dstAddr);
+ break;
+ case Scalar::Simd128:
+ Str(SelectFPReg(valany, val64, 128), dstAddr);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerCompat::wasmStoreAbsolute(
+ const wasm::MemoryAccessDesc& access, AnyRegister value, Register64 value64,
+ Register memoryBase, uint64_t address) {
+ // See comments in wasmLoadAbsolute.
+ unsigned logAccessSize = mozilla::CountTrailingZeroes32(access.byteSize());
+ if (address > INT64_MAX || !(IsImmLSScaled(int64_t(address), logAccessSize) ||
+ IsImmLSUnscaled(int64_t(address)))) {
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireX();
+ Mov(scratch, address);
+ MemOperand destAddr(X(memoryBase), scratch);
+ wasmStoreImpl(access, destAddr, value, value64);
+ } else {
+ MemOperand destAddr(X(memoryBase), address);
+ wasmStoreImpl(access, destAddr, value, value64);
+ }
+}
+
+void MacroAssemblerCompat::compareSimd128Int(Assembler::Condition cond,
+ ARMFPRegister dest,
+ ARMFPRegister lhs,
+ ARMFPRegister rhs) {
+ switch (cond) {
+ case Assembler::Equal:
+ Cmeq(dest, lhs, rhs);
+ break;
+ case Assembler::NotEqual:
+ Cmeq(dest, lhs, rhs);
+ Mvn(dest, dest);
+ break;
+ case Assembler::GreaterThan:
+ Cmgt(dest, lhs, rhs);
+ break;
+ case Assembler::GreaterThanOrEqual:
+ Cmge(dest, lhs, rhs);
+ break;
+ case Assembler::LessThan:
+ Cmgt(dest, rhs, lhs);
+ break;
+ case Assembler::LessThanOrEqual:
+ Cmge(dest, rhs, lhs);
+ break;
+ case Assembler::Above:
+ Cmhi(dest, lhs, rhs);
+ break;
+ case Assembler::AboveOrEqual:
+ Cmhs(dest, lhs, rhs);
+ break;
+ case Assembler::Below:
+ Cmhi(dest, rhs, lhs);
+ break;
+ case Assembler::BelowOrEqual:
+ Cmhs(dest, rhs, lhs);
+ break;
+ default:
+ MOZ_CRASH("Unexpected SIMD integer condition");
+ }
+}
+
+void MacroAssemblerCompat::compareSimd128Float(Assembler::Condition cond,
+ ARMFPRegister dest,
+ ARMFPRegister lhs,
+ ARMFPRegister rhs) {
+ switch (cond) {
+ case Assembler::Equal:
+ Fcmeq(dest, lhs, rhs);
+ break;
+ case Assembler::NotEqual:
+ Fcmeq(dest, lhs, rhs);
+ Mvn(dest, dest);
+ break;
+ case Assembler::GreaterThan:
+ Fcmgt(dest, lhs, rhs);
+ break;
+ case Assembler::GreaterThanOrEqual:
+ Fcmge(dest, lhs, rhs);
+ break;
+ case Assembler::LessThan:
+ Fcmgt(dest, rhs, lhs);
+ break;
+ case Assembler::LessThanOrEqual:
+ Fcmge(dest, rhs, lhs);
+ break;
+ default:
+ MOZ_CRASH("Unexpected SIMD integer condition");
+ }
+}
+
+void MacroAssemblerCompat::rightShiftInt8x16(FloatRegister lhs, Register rhs,
+ FloatRegister dest,
+ bool isUnsigned) {
+ ScratchSimd128Scope scratch_(asMasm());
+ ARMFPRegister shift = Simd16B(scratch_);
+
+ Dup(shift, ARMRegister(rhs, 32));
+ Neg(shift, shift);
+
+ if (isUnsigned) {
+ Ushl(Simd16B(dest), Simd16B(lhs), shift);
+ } else {
+ Sshl(Simd16B(dest), Simd16B(lhs), shift);
+ }
+}
+
+void MacroAssemblerCompat::rightShiftInt16x8(FloatRegister lhs, Register rhs,
+ FloatRegister dest,
+ bool isUnsigned) {
+ ScratchSimd128Scope scratch_(asMasm());
+ ARMFPRegister shift = Simd8H(scratch_);
+
+ Dup(shift, ARMRegister(rhs, 32));
+ Neg(shift, shift);
+
+ if (isUnsigned) {
+ Ushl(Simd8H(dest), Simd8H(lhs), shift);
+ } else {
+ Sshl(Simd8H(dest), Simd8H(lhs), shift);
+ }
+}
+
+void MacroAssemblerCompat::rightShiftInt32x4(FloatRegister lhs, Register rhs,
+ FloatRegister dest,
+ bool isUnsigned) {
+ ScratchSimd128Scope scratch_(asMasm());
+ ARMFPRegister shift = Simd4S(scratch_);
+
+ Dup(shift, ARMRegister(rhs, 32));
+ Neg(shift, shift);
+
+ if (isUnsigned) {
+ Ushl(Simd4S(dest), Simd4S(lhs), shift);
+ } else {
+ Sshl(Simd4S(dest), Simd4S(lhs), shift);
+ }
+}
+
+void MacroAssemblerCompat::rightShiftInt64x2(FloatRegister lhs, Register rhs,
+ FloatRegister dest,
+ bool isUnsigned) {
+ ScratchSimd128Scope scratch_(asMasm());
+ ARMFPRegister shift = Simd2D(scratch_);
+
+ Dup(shift, ARMRegister(rhs, 64));
+ Neg(shift, shift);
+
+ if (isUnsigned) {
+ Ushl(Simd2D(dest), Simd2D(lhs), shift);
+ } else {
+ Sshl(Simd2D(dest), Simd2D(lhs), shift);
+ }
+}
+
+void MacroAssembler::reserveStack(uint32_t amount) {
+ // TODO: This bumps |sp| every time we reserve using a second register.
+ // It would save some instructions if we had a fixed frame size.
+ vixl::MacroAssembler::Claim(Operand(amount));
+ adjustFrame(amount);
+}
+
+void MacroAssembler::Push(RegisterOrSP reg) {
+ if (IsHiddenSP(reg)) {
+ push(sp);
+ } else {
+ push(AsRegister(reg));
+ }
+ adjustFrame(sizeof(intptr_t));
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() { Assembler::flush(); }
+
+// ===============================================================
+// Stack manipulation functions.
+
+// Routines for saving/restoring registers on the stack. The format is:
+//
+// (highest address)
+//
+// integer (X) regs in any order size: 8 * # int regs
+//
+// if # int regs is odd,
+// then an 8 byte alignment hole size: 0 or 8
+//
+// double (D) regs in any order size: 8 * # double regs
+//
+// if # double regs is odd,
+// then an 8 byte alignment hole size: 0 or 8
+//
+// vector (Q) regs in any order size: 16 * # vector regs
+//
+// (lowest address)
+//
+// Hence the size of the save area is 0 % 16. And, provided that the base
+// (highest) address is 16-aligned, then the vector reg save/restore accesses
+// will also be 16-aligned, as will pairwise operations for the double regs.
+//
+// Implied by this is that the format of the double and vector dump area
+// corresponds with what FloatRegister::GetPushSizeInBytes computes.
+// See block comment in MacroAssembler.h for more details.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ size_t numIntRegs = set.gprs().size();
+ return ((numIntRegs + 1) & ~1) * sizeof(intptr_t) +
+ FloatRegister::GetPushSizeInBytes(set.fpus());
+}
+
+// Generate code to dump the values in `set`, either on the stack if `dest` is
+// `Nothing` or working backwards from the address denoted by `dest` if it is
+// `Some`. These two cases are combined so as to minimise the chance of
+// mistakenly generating different formats for the same `set`, given that the
+// `Some` `dest` case is used extremely rarely.
+static void PushOrStoreRegsInMask(MacroAssembler* masm, LiveRegisterSet set,
+ mozilla::Maybe<Address> dest) {
+ static_assert(sizeof(FloatRegisters::RegisterContent) == 16);
+
+ // If we're saving to arbitrary memory, check the destination is big enough.
+ if (dest) {
+ mozilla::DebugOnly<size_t> bytesRequired =
+ masm->PushRegsInMaskSizeInBytes(set);
+ MOZ_ASSERT(dest->offset >= 0);
+ MOZ_ASSERT(((size_t)dest->offset) >= bytesRequired);
+ }
+
+ // Note the high limit point; we'll check it again later.
+ mozilla::DebugOnly<size_t> maxExtentInitial =
+ dest ? dest->offset : masm->framePushed();
+
+ // Gather up the integer registers in groups of four, and either push each
+ // group as a single transfer so as to minimise the number of stack pointer
+ // changes, or write them individually to memory. Take care to ensure the
+ // space used remains 16-aligned.
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();) {
+ vixl::CPURegister src[4] = {vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg,
+ vixl::NoCPUReg};
+ size_t i;
+ for (i = 0; i < 4 && iter.more(); i++) {
+ src[i] = ARMRegister(*iter, 64);
+ ++iter;
+ }
+ MOZ_ASSERT(i > 0);
+
+ if (i == 1 || i == 3) {
+ // Ensure the stack remains 16-aligned
+ MOZ_ASSERT(!iter.more());
+ src[i] = vixl::xzr;
+ i++;
+ }
+ MOZ_ASSERT(i == 2 || i == 4);
+
+ if (dest) {
+ for (size_t j = 0; j < i; j++) {
+ Register ireg = Register::FromCode(src[j].IsZero() ? Registers::xzr
+ : src[j].code());
+ dest->offset -= sizeof(intptr_t);
+ masm->storePtr(ireg, *dest);
+ }
+ } else {
+ masm->adjustFrame(i * 8);
+ masm->vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
+ }
+ }
+
+ // Now the same for the FP double registers. Note that because of how
+ // ReduceSetForPush works, an underlying AArch64 SIMD/FP register can either
+ // be present as a double register, or as a V128 register, but not both.
+ // Firstly, round up the registers to be pushed.
+
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ vixl::CPURegister allSrcs[FloatRegisters::TotalPhys];
+ size_t numAllSrcs = 0;
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ if (reg.isDouble()) {
+ MOZ_RELEASE_ASSERT(numAllSrcs < FloatRegisters::TotalPhys);
+ allSrcs[numAllSrcs] = ARMFPRegister(reg, 64);
+ numAllSrcs++;
+ } else {
+ MOZ_ASSERT(reg.isSimd128());
+ }
+ }
+ MOZ_RELEASE_ASSERT(numAllSrcs <= FloatRegisters::TotalPhys);
+
+ if ((numAllSrcs & 1) == 1) {
+ // We've got an odd number of doubles. In order to maintain 16-alignment,
+ // push the last register twice. We'll skip over the duplicate in
+ // PopRegsInMaskIgnore.
+ allSrcs[numAllSrcs] = allSrcs[numAllSrcs - 1];
+ numAllSrcs++;
+ }
+ MOZ_RELEASE_ASSERT(numAllSrcs <= FloatRegisters::TotalPhys);
+ MOZ_RELEASE_ASSERT((numAllSrcs & 1) == 0);
+
+ // And now generate the transfers.
+ size_t i;
+ if (dest) {
+ for (i = 0; i < numAllSrcs; i++) {
+ FloatRegister freg =
+ FloatRegister(FloatRegisters::FPRegisterID(allSrcs[i].code()),
+ FloatRegisters::Kind::Double);
+ dest->offset -= sizeof(double);
+ masm->storeDouble(freg, *dest);
+ }
+ } else {
+ i = 0;
+ while (i < numAllSrcs) {
+ vixl::CPURegister src[4] = {vixl::NoCPUReg, vixl::NoCPUReg,
+ vixl::NoCPUReg, vixl::NoCPUReg};
+ size_t j;
+ for (j = 0; j < 4 && j + i < numAllSrcs; j++) {
+ src[j] = allSrcs[j + i];
+ }
+ masm->adjustFrame(8 * j);
+ masm->vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
+ i += j;
+ }
+ }
+ MOZ_ASSERT(i == numAllSrcs);
+
+ // Finally, deal with the SIMD (V128) registers. This is a bit simpler
+ // as there's no need for special-casing to maintain 16-alignment.
+
+ numAllSrcs = 0;
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ if (reg.isSimd128()) {
+ MOZ_RELEASE_ASSERT(numAllSrcs < FloatRegisters::TotalPhys);
+ allSrcs[numAllSrcs] = ARMFPRegister(reg, 128);
+ numAllSrcs++;
+ }
+ }
+ MOZ_RELEASE_ASSERT(numAllSrcs <= FloatRegisters::TotalPhys);
+
+ // Generate the transfers.
+ if (dest) {
+ for (i = 0; i < numAllSrcs; i++) {
+ FloatRegister freg =
+ FloatRegister(FloatRegisters::FPRegisterID(allSrcs[i].code()),
+ FloatRegisters::Kind::Simd128);
+ dest->offset -= FloatRegister::SizeOfSimd128;
+ masm->storeUnalignedSimd128(freg, *dest);
+ }
+ } else {
+ i = 0;
+ while (i < numAllSrcs) {
+ vixl::CPURegister src[4] = {vixl::NoCPUReg, vixl::NoCPUReg,
+ vixl::NoCPUReg, vixl::NoCPUReg};
+ size_t j;
+ for (j = 0; j < 4 && j + i < numAllSrcs; j++) {
+ src[j] = allSrcs[j + i];
+ }
+ masm->adjustFrame(16 * j);
+ masm->vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
+ i += j;
+ }
+ }
+ MOZ_ASSERT(i == numAllSrcs);
+
+ // Final overrun check.
+ if (dest) {
+ MOZ_ASSERT(maxExtentInitial - dest->offset ==
+ masm->PushRegsInMaskSizeInBytes(set));
+ } else {
+ MOZ_ASSERT(masm->framePushed() - maxExtentInitial ==
+ masm->PushRegsInMaskSizeInBytes(set));
+ }
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ PushOrStoreRegsInMask(this, set, mozilla::Nothing());
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register scratch) {
+ PushOrStoreRegsInMask(this, set, mozilla::Some(dest));
+}
+
+// This is a helper function for PopRegsInMaskIgnore below. It emits the
+// loads described by dests[0] and [1] and offsets[0] and [1], generating a
+// load-pair if it can.
+static void GeneratePendingLoadsThenFlush(MacroAssembler* masm,
+ vixl::CPURegister* dests,
+ uint32_t* offsets,
+ uint32_t transactionSize) {
+ // Generate the loads ..
+ if (!dests[0].IsNone()) {
+ if (!dests[1].IsNone()) {
+ // [0] and [1] both present.
+ if (offsets[0] + transactionSize == offsets[1]) {
+ masm->Ldp(dests[0], dests[1],
+ MemOperand(masm->GetStackPointer64(), offsets[0]));
+ } else {
+ // Theoretically we could check for a load-pair with the destinations
+ // switched, but our callers will never generate that. Hence there's
+ // no loss in giving up at this point and generating two loads.
+ masm->Ldr(dests[0], MemOperand(masm->GetStackPointer64(), offsets[0]));
+ masm->Ldr(dests[1], MemOperand(masm->GetStackPointer64(), offsets[1]));
+ }
+ } else {
+ // [0] only.
+ masm->Ldr(dests[0], MemOperand(masm->GetStackPointer64(), offsets[0]));
+ }
+ } else {
+ if (!dests[1].IsNone()) {
+ // [1] only. Can't happen because callers always fill [0] before [1].
+ MOZ_CRASH("GenerateLoadsThenFlush");
+ } else {
+ // Neither entry valid. This can happen.
+ }
+ }
+
+ // .. and flush.
+ dests[0] = dests[1] = vixl::NoCPUReg;
+ offsets[0] = offsets[1] = 0;
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ mozilla::DebugOnly<size_t> framePushedInitial = framePushed();
+
+ // The offset of the data from the stack pointer.
+ uint32_t offset = 0;
+
+ // The set of FP/SIMD registers we need to restore.
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+
+ // The set of registers to ignore. BroadcastToAllSizes() is used to avoid
+ // any ambiguities arising from (eg) `fpuSet` containing q17 but `ignore`
+ // containing d17.
+ FloatRegisterSet ignoreFpusBroadcasted(
+ FloatRegister::BroadcastToAllSizes(ignore.fpus()));
+
+ // First recover the SIMD (V128) registers. This is straightforward in that
+ // we don't need to think about alignment holes.
+
+ // These three form a two-entry queue that holds loads that we know we
+ // need, but which we haven't yet emitted.
+ vixl::CPURegister pendingDests[2] = {vixl::NoCPUReg, vixl::NoCPUReg};
+ uint32_t pendingOffsets[2] = {0, 0};
+ size_t nPending = 0;
+
+ for (FloatRegisterIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ if (reg.isDouble()) {
+ continue;
+ }
+ MOZ_RELEASE_ASSERT(reg.isSimd128());
+
+ uint32_t offsetForReg = offset;
+ offset += FloatRegister::SizeOfSimd128;
+
+ if (ignoreFpusBroadcasted.hasRegisterIndex(reg)) {
+ continue;
+ }
+
+ MOZ_ASSERT(nPending <= 2);
+ if (nPending == 2) {
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 16);
+ nPending = 0;
+ }
+ pendingDests[nPending] = ARMFPRegister(reg, 128);
+ pendingOffsets[nPending] = offsetForReg;
+ nPending++;
+ }
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 16);
+ nPending = 0;
+
+ MOZ_ASSERT((offset % 16) == 0);
+
+ // Now recover the FP double registers. This is more tricky in that we need
+ // to skip over the lowest-addressed of them if the number of them was odd.
+
+ if ((((fpuSet.bits() & FloatRegisters::AllDoubleMask).size()) & 1) == 1) {
+ offset += sizeof(double);
+ }
+
+ for (FloatRegisterIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ if (reg.isSimd128()) {
+ continue;
+ }
+ /* true but redundant, per loop above: MOZ_RELEASE_ASSERT(reg.isDouble()) */
+
+ uint32_t offsetForReg = offset;
+ offset += sizeof(double);
+
+ if (ignoreFpusBroadcasted.hasRegisterIndex(reg)) {
+ continue;
+ }
+
+ MOZ_ASSERT(nPending <= 2);
+ if (nPending == 2) {
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 8);
+ nPending = 0;
+ }
+ pendingDests[nPending] = ARMFPRegister(reg, 64);
+ pendingOffsets[nPending] = offsetForReg;
+ nPending++;
+ }
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 8);
+ nPending = 0;
+
+ MOZ_ASSERT((offset % 16) == 0);
+ MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes());
+
+ // And finally recover the integer registers, again skipping an alignment
+ // hole if it exists.
+
+ if ((set.gprs().size() & 1) == 1) {
+ offset += sizeof(uint64_t);
+ }
+
+ for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ++iter) {
+ Register reg = *iter;
+
+ uint32_t offsetForReg = offset;
+ offset += sizeof(uint64_t);
+
+ if (ignore.has(reg)) {
+ continue;
+ }
+
+ MOZ_ASSERT(nPending <= 2);
+ if (nPending == 2) {
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 8);
+ nPending = 0;
+ }
+ pendingDests[nPending] = ARMRegister(reg, 64);
+ pendingOffsets[nPending] = offsetForReg;
+ nPending++;
+ }
+ GeneratePendingLoadsThenFlush(this, pendingDests, pendingOffsets, 8);
+
+ MOZ_ASSERT((offset % 16) == 0);
+
+ size_t bytesPushed = PushRegsInMaskSizeInBytes(set);
+ MOZ_ASSERT(offset == bytesPushed);
+ freeStack(bytesPushed);
+}
+
+void MacroAssembler::Push(Register reg) {
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(Register reg1, Register reg2, Register reg3,
+ Register reg4) {
+ push(reg1, reg2, reg3, reg4);
+ adjustFrame(4 * sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ push(f);
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ pop(reg);
+ adjustFrame(-1 * int64_t(sizeof(int64_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ loadDouble(Address(getStackPointer(), 0), f);
+ freeStack(sizeof(double));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ pop(val);
+ adjustFrame(-1 * int64_t(sizeof(int64_t)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ // This sync has been observed (and is expected) to be necessary.
+ // eg testcase: tests/debug/bug1107525.js
+ syncStackPtr();
+ Blr(ARMRegister(reg, 64));
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ // This sync has been observed (and is expected) to be necessary.
+ // eg testcase: tests/basic/testBug504520Harder.js
+ syncStackPtr();
+ Bl(label);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::call(ImmPtr imm) {
+ // This sync has been observed (and is expected) to be necessary.
+ // eg testcase: asm.js/testTimeout5.js
+ syncStackPtr();
+ vixl::UseScratchRegisterScope temps(this);
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
+ temps.Exclude(ScratchReg64);
+ movePtr(imm, ScratchReg64.asUnsized());
+ Blr(ScratchReg64);
+}
+
+void MacroAssembler::call(ImmWord imm) { call(ImmPtr((void*)imm.value)); }
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress imm) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ // This sync is believed to be necessary, although no case in jit-test/tests
+ // has been observed to cause SP != PSP here.
+ syncStackPtr();
+ movePtr(imm, scratch);
+ Blr(ARMRegister(scratch, 64));
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::call(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ // This sync has been observed (and is expected) to be necessary.
+ // eg testcase: tests/backup-point-bug1315634.js
+ syncStackPtr();
+ loadPtr(addr, scratch);
+ Blr(ARMRegister(scratch, 64));
+}
+
+void MacroAssembler::call(JitCode* c) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ // This sync has been observed (and is expected) to be necessary.
+ // eg testcase: arrays/new-array-undefined-undefined-more-args-2.js
+ syncStackPtr();
+ BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
+ addPendingJump(off, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ blr(scratch64);
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ // This needs to sync. Wasm goes through this one for intramodule calls.
+ //
+ // In other cases, wasm goes through masm.wasmCallImport(),
+ // masm.wasmCallBuiltinInstanceMethod, masm.wasmCallIndirect, all of which
+ // sync.
+ //
+ // This sync is believed to be necessary, although no case in jit-test/tests
+ // has been observed to cause SP != PSP here.
+ syncStackPtr();
+ bl(0, LabelDoc());
+ return CodeOffset(currentOffset());
+}
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ Instruction* inst = getInstructionAt(BufferOffset(callerOffset - 4));
+ MOZ_ASSERT(inst->IsBL());
+ ptrdiff_t relTarget = (int)calleeOffset - ((int)callerOffset - 4);
+ ptrdiff_t relTarget00 = relTarget >> 2;
+ MOZ_RELEASE_ASSERT((relTarget & 0x3) == 0);
+ MOZ_RELEASE_ASSERT(vixl::IsInt26(relTarget00));
+ bl(inst, relTarget00);
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ const ARMRegister scratch2 = temps.AcquireX();
+
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 7);
+
+ mozilla::DebugOnly<uint32_t> before = currentOffset();
+
+ align(8); // At most one nop
+
+ Label branch;
+ adr(scratch2, &branch);
+ ldr(scratch, vixl::MemOperand(scratch2, 4));
+ add(scratch2, scratch2, scratch);
+ CodeOffset offs(currentOffset());
+ bind(&branch);
+ br(scratch2);
+ Emit(UINT32_MAX);
+ Emit(UINT32_MAX);
+
+ mozilla::DebugOnly<uint32_t> after = currentOffset();
+
+ MOZ_ASSERT(after - before == 24 || after - before == 28);
+
+ return offs;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ Instruction* inst1 = getInstructionAt(BufferOffset(farJump.offset() + 4));
+ Instruction* inst2 = getInstructionAt(BufferOffset(farJump.offset() + 8));
+
+ int64_t distance = (int64_t)targetOffset - (int64_t)farJump.offset();
+
+ MOZ_ASSERT(inst1->InstructionBits() == UINT32_MAX);
+ MOZ_ASSERT(inst2->InstructionBits() == UINT32_MAX);
+
+ inst1->SetInstructionBits((uint32_t)distance);
+ inst2->SetInstructionBits((uint32_t)(distance >> 32));
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 1);
+ Nop();
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ uint8_t* inst = call - 4;
+ Instruction* instr = reinterpret_cast<Instruction*>(inst);
+ MOZ_ASSERT(instr->IsBL() || instr->IsNOP());
+ bl(instr, (target - inst) >> 2);
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ uint8_t* inst = call - 4;
+ Instruction* instr = reinterpret_cast<Instruction*>(inst);
+ MOZ_ASSERT(instr->IsBL() || instr->IsNOP());
+ nop(instr);
+}
+
+void MacroAssembler::pushReturnAddress() {
+ MOZ_RELEASE_ASSERT(!sp.Is(GetStackPointer64()), "Not valid");
+ push(lr);
+}
+
+void MacroAssembler::popReturnAddress() {
+ MOZ_RELEASE_ASSERT(!sp.Is(GetStackPointer64()), "Not valid");
+ pop(lr);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ // Because wasm operates without the need for dynamic alignment of SP, it is
+ // implied that this routine should never be called when generating wasm.
+ MOZ_ASSERT(!IsCompilingWasm());
+
+ // The following won't work for SP -- needs slightly different logic.
+ MOZ_RELEASE_ASSERT(GetStackPointer64().Is(PseudoStackPointer64));
+
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
+ ARMRegister scratch64(scratch, 64);
+ MOZ_ASSERT(!scratch64.Is(PseudoStackPointer64));
+
+ // Always save LR -- Baseline ICs assume that LR isn't modified.
+ push(lr);
+
+ // Remember the stack address on entry. This is reloaded in callWithABIPost
+ // below.
+ Mov(scratch64, PseudoStackPointer64);
+
+ // Make alignment, including the effective push of the previous sp.
+ Sub(PseudoStackPointer64, PseudoStackPointer64, Operand(8));
+ And(PseudoStackPointer64, PseudoStackPointer64, Operand(alignment));
+ syncStackPtr();
+
+ // Store previous sp to the top of the stack, aligned. This is also
+ // reloaded in callWithABIPost.
+ Str(scratch64, MemOperand(PseudoStackPointer64, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ // wasm operates without the need for dynamic alignment of SP.
+ MOZ_ASSERT(!(dynamicAlignment_ && callFromWasm));
+
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // ARM64 *really* wants SP to always be 16-aligned, so ensure this now.
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
+ } else {
+ // This can happen when we attach out-of-line stubs for rare cases. For
+ // example CodeGenerator::visitWasmTruncateToInt32 adds an out-of-line
+ // chunk.
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(*stackAdjust);
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ // Call boundaries communicate stack via SP.
+ // (jseward, 2021Mar03) This sync may well be redundant, given that all of
+ // the MacroAssembler::call methods generate a sync before the call.
+ // Removing it does not cause any failures for all of jit-tests.
+ syncStackPtr();
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // wasm operates without the need for dynamic alignment of SP.
+ MOZ_ASSERT(!(dynamicAlignment_ && callFromWasm));
+
+ // Call boundaries communicate stack via SP, so we must resync PSP now.
+ initPseudoStackPtr();
+
+ freeStack(stackAdjust);
+
+ if (dynamicAlignment_) {
+ // This then-clause makes more sense if you first read
+ // setupUnalignedABICall above.
+ //
+ // Restore the stack pointer from entry. The stack pointer will have been
+ // saved by setupUnalignedABICall. This is fragile in that it assumes
+ // that uses of this routine (callWithABIPost) with `dynamicAlignment_ ==
+ // true` are preceded by matching calls to setupUnalignedABICall. But
+ // there's nothing that enforce that mechanically. If we really want to
+ // enforce this, we could add a debug-only CallWithABIState enum to the
+ // MacroAssembler and assert that setupUnalignedABICall updates it before
+ // we get here, then reset it to its initial state.
+ Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
+ syncStackPtr();
+
+ // Restore LR. This restores LR to the value stored by
+ // setupUnalignedABICall, which should have been called just before
+ // callWithABIPre. This is, per the above comment, also fragile.
+ pop(lr);
+
+ // SP may be < PSP now. That is expected from the behaviour of `pop`. It
+ // is not clear why the following `syncStackPtr` is necessary, but it is:
+ // without it, the following test segfaults:
+ // tests/backup-point-bug1315634.js
+ syncStackPtr();
+ }
+
+ // If the ABI's return regs are where ION is expecting them, then
+ // no other work needs to be done.
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ movePtr(fun, scratch);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ loadPtr(fun, scratch);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ enterNoPool(3);
+ Label fakeCallsite;
+
+ Adr(ARMRegister(scratch, 64), &fakeCallsite);
+ Push(scratch);
+ bind(&fakeCallsite);
+ uint32_t pseudoReturnOffset = currentOffset();
+
+ leaveNoPool();
+ return pseudoReturnOffset;
+}
+
+bool MacroAssemblerCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS);
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ movePtr(ImmWord(src.asRawBits()), dest.valueReg());
+ return;
+ }
+
+ BufferOffset load =
+ movePatchablePtr(ImmPtr(src.bitsAsPunboxPointer()), dest.valueReg());
+ writeDataRelocation(src, load);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ And(ARMRegister(buffer, 64), ARMRegister(ptr, 64),
+ Operand(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchReg &&
+ ptr != ScratchReg2); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2);
+
+ And(ARMRegister(temp, 64), ARMRegister(ptr, 64),
+ Operand(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != ScratchReg &&
+ temp != ScratchReg2); // Both may be used internally.
+
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ getGCThingValueChunk(value, temp);
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.valueReg());
+ moveValue(rhs, ValueOperand(scratch64.asUnsized()));
+ Cmp(ARMRegister(lhs.valueReg(), 64), scratch64);
+ B(label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ========================================================================
+// wasm support
+
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 1);
+ CodeOffset offs(currentOffset());
+ Unreachable();
+ return offs;
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ branch32(cond, index, boundsCheckLimit, ok);
+ if (JitOptions.spectreIndexMasking) {
+ csel(ARMRegister(index, 32), vixl::wzr, ARMRegister(index, 32), cond);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ branch32(cond, index, boundsCheckLimit, ok);
+ if (JitOptions.spectreIndexMasking) {
+ csel(ARMRegister(index, 32), vixl::wzr, ARMRegister(index, 32), cond);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ branchPtr(cond, index.reg, boundsCheckLimit.reg, ok);
+ if (JitOptions.spectreIndexMasking) {
+ csel(ARMRegister(index.reg, 64), vixl::xzr, ARMRegister(index.reg, 64),
+ cond);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ branchPtr(InvertCondition(cond), boundsCheckLimit, index.reg, ok);
+ if (JitOptions.spectreIndexMasking) {
+ csel(ARMRegister(index.reg, 64), vixl::xzr, ARMRegister(index.reg, 64),
+ cond);
+ }
+}
+
+// FCVTZU behaves as follows:
+//
+// on NaN it produces zero
+// on too large it produces UINT_MAX (for appropriate type)
+// on too small it produces zero
+//
+// FCVTZS behaves as follows:
+//
+// on NaN it produces zero
+// on too large it produces INT_MAX (for appropriate type)
+// on too small it produces INT_MIN (ditto)
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input_,
+ Register output_,
+ bool isSaturating,
+ Label* oolEntry) {
+ ARMRegister output(output_, 32);
+ ARMFPRegister input(input_, 64);
+ Fcvtzu(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, -1, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input_,
+ Register output_,
+ bool isSaturating,
+ Label* oolEntry) {
+ ARMRegister output(output_, 32);
+ ARMFPRegister input(input_, 32);
+ Fcvtzu(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, -1, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input_,
+ Register output_,
+ bool isSaturating,
+ Label* oolEntry) {
+ ARMRegister output(output_, 32);
+ ARMFPRegister input(input_, 64);
+ Fcvtzs(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, INT32_MAX, vixl::ZFlag, Assembler::NotEqual);
+ Ccmp(output, INT32_MIN, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input_,
+ Register output_,
+ bool isSaturating,
+ Label* oolEntry) {
+ ARMRegister output(output_, 32);
+ ARMFPRegister input(input_, 32);
+ Fcvtzs(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, INT32_MAX, vixl::ZFlag, Assembler::NotEqual);
+ Ccmp(output, INT32_MIN, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input_, Register64 output_, bool isSaturating,
+ Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+
+ ARMRegister output(output_.reg, 64);
+ ARMFPRegister input(input_, 64);
+ Fcvtzu(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, -1, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input_, Register64 output_, bool isSaturating,
+ Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+
+ ARMRegister output(output_.reg, 64);
+ ARMFPRegister input(input_, 32);
+ Fcvtzu(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, -1, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input_, Register64 output_, bool isSaturating,
+ Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+
+ ARMRegister output(output_.reg, 64);
+ ARMFPRegister input(input_, 64);
+ Fcvtzs(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, INT64_MAX, vixl::ZFlag, Assembler::NotEqual);
+ Ccmp(output, INT64_MIN, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input_, Register64 output_, bool isSaturating,
+ Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble) {
+ ARMRegister output(output_.reg, 64);
+ ARMFPRegister input(input_, 32);
+ Fcvtzs(output, input);
+ if (!isSaturating) {
+ Cmp(output, 0);
+ Ccmp(output, INT64_MAX, vixl::ZFlag, Assembler::NotEqual);
+ Ccmp(output, INT64_MIN, vixl::ZFlag, Assembler::NotEqual);
+ B(oolEntry, Assembler::Equal);
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_31 = -float(INT32_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_31 * 2, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantFloat32(-1.0f, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThan, input, fpscratch, rejoin);
+ } else {
+ loadConstantFloat32(two_31, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantFloat32(-two_31, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch, rejoin);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_31 = -double(INT32_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_31 * 2, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantDouble(-1.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch, rejoin);
+ } else {
+ loadConstantDouble(two_31, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantDouble(-two_31 - 1, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch, rejoin);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_63 = -float(INT64_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_63 * 2, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantFloat32(-1.0f, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThan, input, fpscratch, rejoin);
+ } else {
+ loadConstantFloat32(two_63, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantFloat32(-two_63, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch, rejoin);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_63 = -double(INT64_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_63 * 2, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantDouble(-1.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch, rejoin);
+ } else {
+ loadConstantDouble(two_63, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &isOverflow);
+ loadConstantDouble(-two_63, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch, rejoin);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, output, Register64::Invalid());
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register64 output) {
+ wasmLoadImpl(access, memoryBase, ptr, AnyRegister(), output);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr) {
+ wasmStoreImpl(access, value, Register64::Invalid(), memoryBase, ptr);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr) {
+ wasmStoreImpl(access, AnyRegister(), value, memoryBase, ptr);
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ // Wasm stubs use the native SP, not the PSP.
+
+ linkExitFrame(cxreg, scratch);
+
+ MOZ_RELEASE_ASSERT(sp.Is(GetStackPointer64()));
+
+ // SP has to be 16-byte aligned when we do a load/store, so push |type| twice
+ // and then add 8 bytes to SP. This leaves SP unaligned.
+ move32(Imm32(int32_t(type)), scratch);
+ push(scratch, scratch);
+ Add(sp, sp, 8);
+
+ // Despite the above assertion, it is possible for control to flow from here
+ // to the code generated by
+ // MacroAssemblerCompat::handleFailureWithHandlerTail without any
+ // intervening assignment to PSP. But handleFailureWithHandlerTail assumes
+ // that PSP is the active stack pointer. Hence the following is necessary
+ // for safety. Note we can't use initPseudoStackPtr here as that would
+ // generate no instructions.
+ Mov(PseudoStackPointer64, sp);
+}
+
+void MacroAssembler::widenInt32(Register r) {
+ move32To64ZeroExtend(r, Register64(r));
+}
+
+// ========================================================================
+// Convert floating point.
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 32), ARMRegister(src.reg, 64));
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+// The computed MemOperand must be Reg+0 because the load/store exclusive
+// instructions only take a single pointer register.
+
+static MemOperand ComputePointerForAtomic(MacroAssembler& masm,
+ const Address& address,
+ Register scratch) {
+ if (address.offset == 0) {
+ return MemOperand(X(masm, address.base), 0);
+ }
+
+ masm.Add(X(scratch), X(masm, address.base), address.offset);
+ return MemOperand(X(scratch), 0);
+}
+
+static MemOperand ComputePointerForAtomic(MacroAssembler& masm,
+ const BaseIndex& address,
+ Register scratch) {
+ masm.Add(X(scratch), X(masm, address.base),
+ Operand(X(address.index), vixl::LSL, address.scale));
+ if (address.offset) {
+ masm.Add(X(scratch), X(scratch), address.offset);
+ }
+ return MemOperand(X(scratch), 0);
+}
+
+// This sign extends to targetWidth and leaves any higher bits zero.
+
+static void SignOrZeroExtend(MacroAssembler& masm, Scalar::Type srcType,
+ Width targetWidth, Register src, Register dest) {
+ bool signExtend = Scalar::isSignedIntType(srcType);
+
+ switch (Scalar::byteSize(srcType)) {
+ case 1:
+ if (signExtend) {
+ masm.Sbfm(R(dest, targetWidth), R(src, targetWidth), 0, 7);
+ } else {
+ masm.Ubfm(R(dest, targetWidth), R(src, targetWidth), 0, 7);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.Sbfm(R(dest, targetWidth), R(src, targetWidth), 0, 15);
+ } else {
+ masm.Ubfm(R(dest, targetWidth), R(src, targetWidth), 0, 15);
+ }
+ break;
+ case 4:
+ if (targetWidth == Width::_64) {
+ if (signExtend) {
+ masm.Sbfm(X(dest), X(src), 0, 31);
+ } else {
+ masm.Ubfm(X(dest), X(src), 0, 31);
+ }
+ } else if (src != dest) {
+ masm.Mov(R(dest, targetWidth), R(src, targetWidth));
+ }
+ break;
+ case 8:
+ if (src != dest) {
+ masm.Mov(R(dest, targetWidth), R(src, targetWidth));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Exclusive-loads zero-extend their values to the full width of the X register.
+//
+// Note, we've promised to leave the high bits of the 64-bit register clear if
+// the targetWidth is 32.
+
+static void LoadExclusive(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type srcType, Width targetWidth,
+ MemOperand ptr, Register dest) {
+ bool signExtend = Scalar::isSignedIntType(srcType);
+
+ // With this address form, a single native ldxr* will be emitted, and the
+ // AutoForbidPoolsAndNops ensures that the metadata is emitted at the address
+ // of the ldxr*.
+ MOZ_ASSERT(ptr.IsImmediateOffset() && ptr.offset() == 0);
+
+ switch (Scalar::byteSize(srcType)) {
+ case 1: {
+ {
+ AutoForbidPoolsAndNops afp(
+ &masm,
+ /* max number of instructions in scope = */ 1);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ masm.Ldxrb(W(dest), ptr);
+ }
+ if (signExtend) {
+ masm.Sbfm(R(dest, targetWidth), R(dest, targetWidth), 0, 7);
+ }
+ break;
+ }
+ case 2: {
+ {
+ AutoForbidPoolsAndNops afp(
+ &masm,
+ /* max number of instructions in scope = */ 1);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ masm.Ldxrh(W(dest), ptr);
+ }
+ if (signExtend) {
+ masm.Sbfm(R(dest, targetWidth), R(dest, targetWidth), 0, 15);
+ }
+ break;
+ }
+ case 4: {
+ {
+ AutoForbidPoolsAndNops afp(
+ &masm,
+ /* max number of instructions in scope = */ 1);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ masm.Ldxr(W(dest), ptr);
+ }
+ if (targetWidth == Width::_64 && signExtend) {
+ masm.Sbfm(X(dest), X(dest), 0, 31);
+ }
+ break;
+ }
+ case 8: {
+ {
+ AutoForbidPoolsAndNops afp(
+ &masm,
+ /* max number of instructions in scope = */ 1);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ masm.Ldxr(X(dest), ptr);
+ }
+ break;
+ }
+ default: {
+ MOZ_CRASH();
+ }
+ }
+}
+
+static void StoreExclusive(MacroAssembler& masm, Scalar::Type type,
+ Register status, Register src, MemOperand ptr) {
+ switch (Scalar::byteSize(type)) {
+ case 1:
+ masm.Stxrb(W(status), W(src), ptr);
+ break;
+ case 2:
+ masm.Stxrh(W(status), W(src), ptr);
+ break;
+ case 4:
+ masm.Stxr(W(status), W(src), ptr);
+ break;
+ case 8:
+ masm.Stxr(W(status), X(src), ptr);
+ break;
+ }
+}
+
+static bool HasAtomicInstructions(MacroAssembler& masm) {
+ return masm.asVIXL().GetCPUFeatures()->Has(vixl::CPUFeatures::kAtomics);
+}
+
+static inline bool SupportedAtomicInstructionOperands(Scalar::Type type,
+ Width targetWidth) {
+ if (targetWidth == Width::_32) {
+ return byteSize(type) <= 4;
+ }
+ if (targetWidth == Width::_64) {
+ return byteSize(type) == 8;
+ }
+ return false;
+}
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, Width targetWidth,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval, Register output) {
+ MOZ_ASSERT(oldval != output && newval != output);
+
+ vixl::UseScratchRegisterScope temps(&masm);
+
+ Register ptrScratch = temps.AcquireX().asUnsized();
+ MemOperand ptr = ComputePointerForAtomic(masm, mem, ptrScratch);
+
+ MOZ_ASSERT(ptr.base().asUnsized() != output);
+
+ if (HasAtomicInstructions(masm) &&
+ SupportedAtomicInstructionOperands(type, targetWidth)) {
+ masm.Mov(X(output), X(oldval));
+ // Capal is using same atomic mechanism as Ldxr/Stxr, and
+ // consider it is the same for "Inner Shareable" domain.
+ // Not updated gen_cmpxchg in GenerateAtomicOperations.py.
+ masm.memoryBarrierBefore(sync);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ switch (byteSize(type)) {
+ case 1:
+ masm.Casalb(R(output, targetWidth), R(newval, targetWidth), ptr);
+ break;
+ case 2:
+ masm.Casalh(R(output, targetWidth), R(newval, targetWidth), ptr);
+ break;
+ case 4:
+ case 8:
+ masm.Casal(R(output, targetWidth), R(newval, targetWidth), ptr);
+ break;
+ default:
+ MOZ_CRASH("CompareExchange unsupported type");
+ }
+ masm.memoryBarrierAfter(sync);
+ SignOrZeroExtend(masm, type, targetWidth, output, output);
+ return;
+ }
+
+ // The target doesn't support atomics, so generate a LL-SC loop. This requires
+ // only AArch64 v8.0.
+ Label again;
+ Label done;
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ Register scratch = temps.AcquireX().asUnsized();
+
+ masm.bind(&again);
+ SignOrZeroExtend(masm, type, targetWidth, oldval, scratch);
+ LoadExclusive(masm, access, type, targetWidth, ptr, output);
+ masm.Cmp(R(output, targetWidth), R(scratch, targetWidth));
+ masm.B(&done, MacroAssembler::NotEqual);
+ StoreExclusive(masm, type, scratch, newval, ptr);
+ masm.Cbnz(W(scratch), &again);
+ masm.bind(&done);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, Width targetWidth,
+ const Synchronization& sync, const T& mem,
+ Register value, Register output) {
+ MOZ_ASSERT(value != output);
+
+ vixl::UseScratchRegisterScope temps(&masm);
+
+ Register ptrScratch = temps.AcquireX().asUnsized();
+ MemOperand ptr = ComputePointerForAtomic(masm, mem, ptrScratch);
+
+ if (HasAtomicInstructions(masm) &&
+ SupportedAtomicInstructionOperands(type, targetWidth)) {
+ // Swpal is using same atomic mechanism as Ldxr/Stxr, and
+ // consider it is the same for "Inner Shareable" domain.
+ // Not updated gen_exchange in GenerateAtomicOperations.py.
+ masm.memoryBarrierBefore(sync);
+ if (access) {
+ masm.append(*access, masm.currentOffset());
+ }
+ switch (byteSize(type)) {
+ case 1:
+ masm.Swpalb(R(value, targetWidth), R(output, targetWidth), ptr);
+ break;
+ case 2:
+ masm.Swpalh(R(value, targetWidth), R(output, targetWidth), ptr);
+ break;
+ case 4:
+ case 8:
+ masm.Swpal(R(value, targetWidth), R(output, targetWidth), ptr);
+ break;
+ default:
+ MOZ_CRASH("AtomicExchange unsupported type");
+ }
+ masm.memoryBarrierAfter(sync);
+ SignOrZeroExtend(masm, type, targetWidth, output, output);
+ return;
+ }
+
+ // The target doesn't support atomics, so generate a LL-SC loop. This requires
+ // only AArch64 v8.0.
+ Label again;
+
+ // NOTE: the generated code must match the assembly code in gen_exchange in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ Register scratch = temps.AcquireX().asUnsized();
+
+ masm.bind(&again);
+ LoadExclusive(masm, access, type, targetWidth, ptr, output);
+ StoreExclusive(masm, type, scratch, value, ptr);
+ masm.Cbnz(W(scratch), &again);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <bool wantResult, typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, Width targetWidth,
+ const Synchronization& sync, AtomicOp op,
+ const T& mem, Register value, Register temp,
+ Register output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ MOZ_ASSERT_IF(wantResult, output != temp);
+
+ vixl::UseScratchRegisterScope temps(&masm);
+
+ Register ptrScratch = temps.AcquireX().asUnsized();
+ MemOperand ptr = ComputePointerForAtomic(masm, mem, ptrScratch);
+
+ if (HasAtomicInstructions(masm) &&
+ SupportedAtomicInstructionOperands(type, targetWidth) &&
+ !isFloatingType(type)) {
+ // LdXXXal/StXXXl is using same atomic mechanism as Ldxr/Stxr, and
+ // consider it is the same for "Inner Shareable" domain.
+ // Not updated gen_fetchop in GenerateAtomicOperations.py.
+ masm.memoryBarrierBefore(sync);
+
+#define FETCH_OP_CASE(op, arg) \
+ if (access) { \
+ masm.append(*access, masm.currentOffset()); \
+ } \
+ switch (byteSize(type)) { \
+ case 1: \
+ if (wantResult) { \
+ masm.Ld##op##alb(R(arg, targetWidth), R(output, targetWidth), ptr); \
+ } else { \
+ masm.St##op##lb(R(arg, targetWidth), ptr); \
+ } \
+ break; \
+ case 2: \
+ if (wantResult) { \
+ masm.Ld##op##alh(R(arg, targetWidth), R(output, targetWidth), ptr); \
+ } else { \
+ masm.St##op##lh(R(arg, targetWidth), ptr); \
+ } \
+ break; \
+ case 4: \
+ case 8: \
+ if (wantResult) { \
+ masm.Ld##op##al(R(arg, targetWidth), R(output, targetWidth), ptr); \
+ } else { \
+ masm.St##op##l(R(arg, targetWidth), ptr); \
+ } \
+ break; \
+ default: \
+ MOZ_CRASH("AtomicFetchOp unsupported type"); \
+ }
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ FETCH_OP_CASE(add, value);
+ break;
+ case AtomicFetchSubOp: {
+ Register scratch = temps.AcquireX().asUnsized();
+ masm.Neg(X(scratch), X(value));
+ FETCH_OP_CASE(add, scratch);
+ break;
+ }
+ case AtomicFetchAndOp: {
+ Register scratch = temps.AcquireX().asUnsized();
+ masm.Eor(X(scratch), X(value), Operand(~0));
+ FETCH_OP_CASE(clr, scratch);
+ break;
+ }
+ case AtomicFetchOrOp:
+ FETCH_OP_CASE(set, value);
+ break;
+ case AtomicFetchXorOp:
+ FETCH_OP_CASE(eor, value);
+ break;
+ }
+ masm.memoryBarrierAfter(sync);
+ if (wantResult) {
+ SignOrZeroExtend(masm, type, targetWidth, output, output);
+ }
+ return;
+ }
+
+#undef FETCH_OP_CASE
+
+ // The target doesn't support atomics, so generate a LL-SC loop. This requires
+ // only AArch64 v8.0.
+ Label again;
+
+ // NOTE: the generated code must match the assembly code in gen_fetchop in
+ // GenerateAtomicOperations.py
+ masm.memoryBarrierBefore(sync);
+
+ Register scratch = temps.AcquireX().asUnsized();
+
+ masm.bind(&again);
+ LoadExclusive(masm, access, type, targetWidth, ptr, output);
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.Add(X(temp), X(output), X(value));
+ break;
+ case AtomicFetchSubOp:
+ masm.Sub(X(temp), X(output), X(value));
+ break;
+ case AtomicFetchAndOp:
+ masm.And(X(temp), X(output), X(value));
+ break;
+ case AtomicFetchOrOp:
+ masm.Orr(X(temp), X(output), X(value));
+ break;
+ case AtomicFetchXorOp:
+ masm.Eor(X(temp), X(output), X(value));
+ break;
+ }
+ StoreExclusive(masm, type, scratch, temp, ptr);
+ masm.Cbnz(W(scratch), &again);
+ if (wantResult) {
+ SignOrZeroExtend(masm, type, targetWidth, output, output);
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
+ output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
+ output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
+ expect.reg, replace.reg, output.reg);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
+ expect.reg, replace.reg, output.reg);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
+ value.reg, output.reg);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
+ value.reg, output.reg);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
+ value.reg, temp.reg, output.reg);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
+ value.reg, temp.reg, output.reg);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
+ value.reg, temp.reg, temp.reg);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
+ value.reg, temp.reg, temp.reg);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), Width::_32, access.sync(), mem,
+ oldval, newval, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), Width::_32, access.sync(), mem,
+ oldval, newval, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), Width::_32, access.sync(), mem,
+ value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), Width::_32, access.sync(), mem,
+ value, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp, Register output) {
+ AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
+ temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp, Register output) {
+ AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
+ temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
+ AtomicFetchOp<true>(*this, &access, access.type(), Width::_32, access.sync(),
+ op, mem, value, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
+ AtomicFetchOp<true>(*this, &access, access.type(), Width::_32, access.sync(),
+ op, mem, value, temp, output);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
+ AtomicFetchOp<false>(*this, &access, access.type(), Width::_32, access.sync(),
+ op, mem, value, temp, temp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
+ AtomicFetchOp<false>(*this, &access, access.type(), Width::_32, access.sync(),
+ op, mem, value, temp, temp);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange(*this, &access, Scalar::Int64, Width::_64, access.sync(), mem,
+ expect.reg, replace.reg, output.reg);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange(*this, &access, Scalar::Int64, Width::_64, access.sync(), mem,
+ expect.reg, replace.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange(*this, &access, Scalar::Int64, Width::_64, access.sync(), mem,
+ value.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ AtomicExchange(*this, &access, Scalar::Int64, Width::_64, access.sync(), mem,
+ value.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp<true>(*this, &access, Scalar::Int64, Width::_64, access.sync(),
+ op, mem, value.reg, temp.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp<true>(*this, &access, Scalar::Int64, Width::_64, access.sync(),
+ op, mem, value.reg, temp.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp<false>(*this, &access, Scalar::Int64, Width::_64, access.sync(),
+ op, mem, value.reg, temp.reg, temp.reg);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register temp1,
+ Register temp2, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+ masm.convertUInt32ToDouble(temp1, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp) {
+ AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
+ value, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp) {
+ AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
+ value, temp, temp);
+}
+
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch = temps.AcquireW();
+ ARMRegister src = temps.AcquireW();
+
+ // Preserve src for remainder computation
+ Mov(src, ARMRegister(srcDest, 32));
+
+ if (isUnsigned) {
+ Udiv(ARMRegister(srcDest, 32), src, ARMRegister(rhs, 32));
+ } else {
+ Sdiv(ARMRegister(srcDest, 32), src, ARMRegister(rhs, 32));
+ }
+ // Compute remainder
+ Mul(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
+ Sub(ARMRegister(remOutput, 32), src, scratch);
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ AutoForbidPoolsAndNops afp(this,
+ /* max number of instructions in scope = */ 1);
+ CodeOffset offset(currentOffset());
+ adr(ARMRegister(dest, 64), 0, LabelDoc());
+ return offset;
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ ptrdiff_t off = target - loc;
+ MOZ_RELEASE_ASSERT(vixl::IsInt21(off));
+
+ Instruction* cur = reinterpret_cast<Instruction*>(loc.raw());
+ MOZ_ASSERT(cur->IsADR());
+
+ vixl::Register rd = vixl::Register::XRegFromCode(cur->Rd());
+ adr(cur, rd, off);
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() {
+ // Conditional speculation barrier.
+ csdb();
+}
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister iFlt(src, 32);
+ ARMRegister o64(dest, 64);
+ ARMRegister o32(dest, 32);
+
+ Label handleZero;
+ Label fin;
+
+ // Handle ±0 and NaN first.
+ Fcmp(iFlt, 0.0);
+ B(Assembler::Equal, &handleZero);
+ // NaN is always a bail condition, just bail directly.
+ B(Assembler::Overflow, fail);
+
+ // Round towards negative infinity.
+ Fcvtms(o64, iFlt);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(o64, o64);
+ B(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the float into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ Fmov(o32, iFlt);
+ Cbnz(o32, fail);
+ bind(&fin);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister iDbl(src, 64);
+ ARMRegister o64(dest, 64);
+ ARMRegister o32(dest, 32);
+
+ Label handleZero;
+ Label fin;
+
+ // Handle ±0 and NaN first.
+ Fcmp(iDbl, 0.0);
+ B(Assembler::Equal, &handleZero);
+ // NaN is always a bail condition, just bail directly.
+ B(Assembler::Overflow, fail);
+
+ // Round towards negative infinity.
+ Fcvtms(o64, iDbl);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(o64, o64);
+ B(&fin);
+
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value was -0.0.
+ Fmov(o64, iDbl);
+ Cbnz(o64, fail);
+ bind(&fin);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister iFlt(src, 32);
+ ARMRegister o64(dest, 64);
+ ARMRegister o32(dest, 32);
+
+ Label handleZero;
+ Label fin;
+
+ // Round towards positive infinity.
+ Fcvtps(o64, iFlt);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // We have to check for (-1, -0] and NaN when the result is zero.
+ Cbz(o64, &handleZero);
+
+ // Clear upper 32 bits.
+ Uxtw(o64, o64);
+ B(&fin);
+
+ // Bail if the input is in (-1, -0] or NaN.
+ bind(&handleZero);
+ // Move the top word of the float into the output reg, if it is non-zero,
+ // then the original value wasn't +0.0.
+ Fmov(o32, iFlt);
+ Cbnz(o32, fail);
+ bind(&fin);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister iDbl(src, 64);
+ ARMRegister o64(dest, 64);
+ ARMRegister o32(dest, 32);
+
+ Label handleZero;
+ Label fin;
+
+ // Round towards positive infinity.
+ Fcvtps(o64, iDbl);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(o64, Operand(o64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // We have to check for (-1, -0] and NaN when the result is zero.
+ Cbz(o64, &handleZero);
+
+ // Clear upper 32 bits.
+ Uxtw(o64, o64);
+ B(&fin);
+
+ // Bail if the input is in (-1, -0] or NaN.
+ bind(&handleZero);
+ // Move the top word of the double into the output reg, if it is non-zero,
+ // then the original value wasn't +0.0.
+ Fmov(o64, iDbl);
+ Cbnz(o64, fail);
+ bind(&fin);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister src32(src, 32);
+ ARMRegister dest32(dest, 32);
+ ARMRegister dest64(dest, 64);
+
+ Label done, zeroCase;
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtzs(dest64, src32);
+
+ // If the output was zero, worry about special cases.
+ Cbz(dest64, &zeroCase);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(dest64, Operand(dest64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+
+ // If the output was non-zero and wasn't saturated, just return it.
+ B(&done);
+
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ {
+ bind(&zeroCase);
+
+ // Combine test for negative and NaN values using a single bitwise
+ // operation.
+ //
+ // | Decimal number | Bitwise representation |
+ // |----------------|------------------------|
+ // | -0 | 8000'0000 |
+ // | +0 | 0000'0000 |
+ // | +1 | 3f80'0000 |
+ // | NaN (or +Inf) | 7fyx'xxxx, y >= 8 |
+ // | -NaN (or -Inf) | ffyx'xxxx, y >= 8 |
+ //
+ // If any of two most significant bits is set, the number isn't in [0, 1).
+ // (Recall that floating point numbers, except for NaN, are strictly ordered
+ // when comparing their bitwise representation as signed integers.)
+
+ Fmov(dest32, src32);
+ Lsr(dest32, dest32, 30);
+ Cbnz(dest32, fail);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ARMFPRegister src64(src, 64);
+ ARMRegister dest64(dest, 64);
+ ARMRegister dest32(dest, 32);
+
+ Label done, zeroCase;
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtzs(dest64, src64);
+
+ // If the output was zero, worry about special cases.
+ Cbz(dest64, &zeroCase);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(dest64, Operand(dest64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+
+ // If the output was non-zero and wasn't saturated, just return it.
+ B(&done);
+
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ {
+ bind(&zeroCase);
+
+ // Combine test for negative and NaN values using a single bitwise
+ // operation.
+ //
+ // | Decimal number | Bitwise representation |
+ // |----------------|------------------------|
+ // | -0 | 8000'0000'0000'0000 |
+ // | +0 | 0000'0000'0000'0000 |
+ // | +1 | 3ff0'0000'0000'0000 |
+ // | NaN (or +Inf) | 7ffx'xxxx'xxxx'xxxx |
+ // | -NaN (or -Inf) | fffx'xxxx'xxxx'xxxx |
+ //
+ // If any of two most significant bits is set, the number isn't in [0, 1).
+ // (Recall that floating point numbers, except for NaN, are strictly ordered
+ // when comparing their bitwise representation as signed integers.)
+
+ Fmov(dest64, src64);
+ Lsr(dest64, dest64, 62);
+ Cbnz(dest64, fail);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ARMFPRegister src32(src, 32);
+ ARMRegister dest32(dest, 32);
+ ARMRegister dest64(dest, 64);
+
+ Label negative, saturated, done;
+
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ Fcmp(src32, 0.0);
+ B(&negative, Assembler::Condition::lo);
+
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 64-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtas(dest64, src32);
+
+ // In the case of zero, the input may have been NaN or -0, which must bail.
+ Cbnz(dest64, &saturated);
+
+ // Combine test for -0 and NaN values using a single bitwise operation.
+ // See truncFloat32ToInt32 for an explanation.
+ Fmov(dest32, src32);
+ Lsr(dest32, dest32, 30);
+ Cbnz(dest32, fail);
+
+ B(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) are rounded to -0. Fail.
+ loadConstantFloat32(-0.5f, temp);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, src, temp, fail);
+
+ // Other negative inputs need the biggest double less than 0.5 added.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+ addFloat32(src, temp);
+
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ Fcvtms(dest64, temp);
+ }
+
+ bind(&saturated);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(dest64, Operand(dest64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+
+ bind(&done);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ARMFPRegister src64(src, 64);
+ ARMRegister dest64(dest, 64);
+ ARMRegister dest32(dest, 32);
+
+ Label negative, saturated, done;
+
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ Fcmp(src64, 0.0);
+ B(&negative, Assembler::Condition::lo);
+
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 64-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ Fcvtas(dest64, src64);
+
+ // In the case of zero, the input may have been NaN or -0, which must bail.
+ Cbnz(dest64, &saturated);
+
+ // Combine test for -0 and NaN values using a single bitwise operation.
+ // See truncDoubleToInt32 for an explanation.
+ Fmov(dest64, src64);
+ Lsr(dest64, dest64, 62);
+ Cbnz(dest64, fail);
+
+ B(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) are rounded to -0. Fail.
+ loadConstantDouble(-0.5, temp);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, src, temp, fail);
+
+ // Other negative inputs need the biggest double less than 0.5 added.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+ addDouble(src, temp);
+
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ Fcvtms(dest64, temp);
+ }
+
+ bind(&saturated);
+
+ // Sign extend lower 32 bits to test if the result isn't an Int32.
+ Cmp(dest64, Operand(dest64, vixl::SXTW));
+ B(NotEqual, fail);
+
+ // Clear upper 32 bits.
+ Uxtw(dest64, dest64);
+
+ bind(&done);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ switch (mode) {
+ case RoundingMode::Up:
+ frintp(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ return;
+ case RoundingMode::Down:
+ frintm(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ return;
+ case RoundingMode::NearestTiesToEven:
+ frintn(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ return;
+ case RoundingMode::TowardsZero:
+ frintz(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ return;
+ }
+ MOZ_CRASH("unexpected mode");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ switch (mode) {
+ case RoundingMode::Up:
+ frintp(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ return;
+ case RoundingMode::Down:
+ frintm(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ return;
+ case RoundingMode::NearestTiesToEven:
+ frintn(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ return;
+ case RoundingMode::TowardsZero:
+ frintz(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ return;
+ }
+ MOZ_CRASH("unexpected mode");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ ScratchDoubleScope scratch(*this);
+
+ // Double with only the sign bit set
+ loadConstantDouble(-0.0, scratch);
+
+ if (lhs != output) {
+ moveDouble(lhs, output);
+ }
+
+ bit(ARMFPRegister(output.encoding(), vixl::VectorFormat::kFormat8B),
+ ARMFPRegister(rhs.encoding(), vixl::VectorFormat::kFormat8B),
+ ARMFPRegister(scratch.encoding(), vixl::VectorFormat::kFormat8B));
+}
+
+void MacroAssembler::copySignFloat32(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ ScratchFloat32Scope scratch(*this);
+
+ // Float with only the sign bit set
+ loadConstantFloat32(-0.0f, scratch);
+
+ if (lhs != output) {
+ moveFloat32(lhs, output);
+ }
+
+ bit(ARMFPRegister(output.encoding(), vixl::VectorFormat::kFormat8B),
+ ARMFPRegister(rhs.encoding(), vixl::VectorFormat::kFormat8B),
+ ARMFPRegister(scratch.encoding(), vixl::VectorFormat::kFormat8B));
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ Add(ARMRegister(pointer, 64), ARMRegister(pointer, 64),
+ Operand(ARMRegister(indexTemp32, 64), vixl::LSL, shift));
+}
+
+//}}} check_macroassembler_style
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h
new file mode 100644
index 0000000000..edfd8c9d3e
--- /dev/null
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -0,0 +1,2206 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MacroAssembler_arm64_h
+#define jit_arm64_MacroAssembler_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+#include "jit/AtomicOp.h"
+#include "jit/MoveResolver.h"
+#include "vm/BigIntType.h" // JS::BigInt
+#include "wasm/WasmBuiltins.h"
+
+#ifdef _M_ARM64
+# ifdef move32
+# undef move32
+# endif
+# ifdef move64
+# undef move64
+# endif
+#endif
+
+namespace js {
+namespace jit {
+
+// Import VIXL operands directly into the jit namespace for shared code.
+using vixl::MemOperand;
+using vixl::Operand;
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
+ }
+};
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag tag) : Imm32(tag) {}
+};
+
+class ScratchTagScope;
+
+class MacroAssemblerCompat : public vixl::MacroAssembler {
+ public:
+ typedef vixl::Condition Condition;
+
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ js::jit::MacroAssembler& asMasm();
+ const js::jit::MacroAssembler& asMasm() const;
+
+ public:
+ // Restrict to only VIXL-internal functions.
+ vixl::MacroAssembler& asVIXL();
+ const MacroAssembler& asVIXL() const;
+
+ protected:
+ bool enoughMemory_;
+ uint32_t framePushed_;
+
+ MacroAssemblerCompat()
+ : vixl::MacroAssembler(), enoughMemory_(true), framePushed_(0) {}
+
+ protected:
+ MoveResolver moveResolver_;
+
+ public:
+ bool oom() const { return Assembler::oom() || !enoughMemory_; }
+ static ARMRegister toARMRegister(RegisterOrSP r, size_t size) {
+ if (IsHiddenSP(r)) {
+ MOZ_ASSERT(size == 64);
+ return sp;
+ }
+ return ARMRegister(AsRegister(r), size);
+ }
+ static MemOperand toMemOperand(const Address& a) {
+ return MemOperand(toARMRegister(a.base, 64), a.offset);
+ }
+ void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr,
+ vixl::LoadStoreOp op) {
+ const ARMRegister base = toARMRegister(addr.base, 64);
+ const ARMRegister index = ARMRegister(addr.index, 64);
+ const unsigned scale = addr.scale;
+
+ if (!addr.offset &&
+ (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) {
+ LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op);
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(this);
+ ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(!scratch64.Is(rt));
+ MOZ_ASSERT(!scratch64.Is(base));
+ MOZ_ASSERT(!scratch64.Is(index));
+
+ Add(scratch64, base, Operand(index, vixl::LSL, scale));
+ LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op);
+ }
+ void Push(ARMRegister reg) {
+ push(reg);
+ adjustFrame(reg.size() / 8);
+ }
+ void Push(Register reg) {
+ vixl::MacroAssembler::Push(ARMRegister(reg, 64));
+ adjustFrame(8);
+ }
+ void Push(Imm32 imm) {
+ push(imm);
+ adjustFrame(8);
+ }
+ void Push(FloatRegister f) {
+ push(ARMFPRegister(f, 64));
+ adjustFrame(8);
+ }
+ void Push(ImmPtr imm) {
+ push(imm);
+ adjustFrame(sizeof(void*));
+ }
+ void push(FloatRegister f) {
+ vixl::MacroAssembler::Push(ARMFPRegister(f, 64));
+ }
+ void push(ARMFPRegister f) { vixl::MacroAssembler::Push(f); }
+ void push(Imm32 imm) {
+ if (imm.value == 0) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ move32(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmWord imm) {
+ if (imm.value == 0) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, imm.value);
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmPtr imm) {
+ if (imm.value == nullptr) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ImmGCPtr imm) {
+ if (imm.value == nullptr) {
+ vixl::MacroAssembler::Push(vixl::xzr);
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(imm, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+ }
+ void push(ARMRegister reg) { vixl::MacroAssembler::Push(reg); }
+ void push(Address a) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(a.base != scratch64.asUnsized());
+ loadPtr(a, scratch64.asUnsized());
+ vixl::MacroAssembler::Push(scratch64);
+ }
+
+ // Push registers.
+ void push(Register reg) { vixl::MacroAssembler::Push(ARMRegister(reg, 64)); }
+ void push(RegisterOrSP reg) {
+ if (IsHiddenSP(reg)) {
+ vixl::MacroAssembler::Push(sp);
+ }
+ vixl::MacroAssembler::Push(toARMRegister(reg, 64));
+ }
+ void push(Register r0, Register r1) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64));
+ }
+ void push(Register r0, Register r1, Register r2) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64));
+ }
+ void push(Register r0, Register r1, Register r2, Register r3) {
+ vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64), ARMRegister(r3, 64));
+ }
+ void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
+ ARMFPRegister r3) {
+ vixl::MacroAssembler::Push(r0, r1, r2, r3);
+ }
+
+ // Pop registers.
+ void pop(Register reg) { vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); }
+ void pop(Register r0, Register r1) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64));
+ }
+ void pop(Register r0, Register r1, Register r2) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64));
+ }
+ void pop(Register r0, Register r1, Register r2, Register r3) {
+ vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
+ ARMRegister(r2, 64), ARMRegister(r3, 64));
+ }
+ void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
+ ARMFPRegister r3) {
+ vixl::MacroAssembler::Pop(r0, r1, r2, r3);
+ }
+
+ void pop(const ValueOperand& v) { pop(v.valueReg()); }
+ void pop(const FloatRegister& f) {
+ vixl::MacroAssembler::Pop(ARMFPRegister(f, 64));
+ }
+
+ void implicitPop(uint32_t args) {
+ MOZ_ASSERT(args % sizeof(intptr_t) == 0);
+ adjustFrame(0 - args);
+ }
+ void Pop(ARMRegister r) {
+ vixl::MacroAssembler::Pop(r);
+ adjustFrame(0 - r.size() / 8);
+ }
+ // FIXME: This is the same on every arch.
+ // FIXME: If we can share framePushed_, we can share this.
+ // FIXME: Or just make it at the highest level.
+ CodeOffset PushWithPatch(ImmWord word) {
+ framePushed_ += sizeof(word.value);
+ return pushWithPatch(word);
+ }
+ CodeOffset PushWithPatch(ImmPtr ptr) {
+ return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
+ }
+
+ uint32_t framePushed() const { return framePushed_; }
+ void adjustFrame(int32_t diff) { setFramePushed(framePushed_ + diff); }
+
+ void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; }
+
+ void freeStack(Register amount) {
+ vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64)));
+ }
+
+ // Update sp with the value of the current active stack pointer, if necessary.
+ void syncStackPtr() {
+ if (!GetStackPointer64().Is(vixl::sp)) {
+ Mov(vixl::sp, GetStackPointer64());
+ }
+ }
+ void initPseudoStackPtr() {
+ if (!GetStackPointer64().Is(vixl::sp)) {
+ Mov(GetStackPointer64(), vixl::sp);
+ }
+ }
+ // In debug builds only, cause a trap if PSP is active and PSP != SP
+ void assertStackPtrsSynced(uint32_t id) {
+#ifdef DEBUG
+ // The add and sub instructions below will only take a 12-bit immediate.
+ MOZ_ASSERT(id <= 0xFFF);
+ if (!GetStackPointer64().Is(vixl::sp)) {
+ Label ok;
+ // Add a marker, so we can figure out who requested the check when
+ // inspecting the generated code. Note, a more concise way to encode
+ // the marker would be to use it as an immediate for the `brk`
+ // instruction as generated by `Unreachable()`, and removing the add/sub.
+ Add(GetStackPointer64(), GetStackPointer64(), Operand(id));
+ Sub(GetStackPointer64(), GetStackPointer64(), Operand(id));
+ Cmp(vixl::sp, GetStackPointer64());
+ B(Equal, &ok);
+ Unreachable();
+ bind(&ok);
+ }
+#endif
+ }
+ // In debug builds only, add a marker that doesn't change the machine's
+ // state. Note these markers are x16-based, as opposed to the x28-based
+ // ones made by `assertStackPtrsSynced`.
+ void addMarker(uint32_t id) {
+#ifdef DEBUG
+ // Only 12 bits of immediate are allowed.
+ MOZ_ASSERT(id <= 0xFFF);
+ ARMRegister x16 = ARMRegister(r16, 64);
+ Add(x16, x16, Operand(id));
+ Sub(x16, x16, Operand(id));
+#endif
+ }
+
+ void storeValue(ValueOperand val, const Address& dest) {
+ storePtr(val.valueReg(), dest);
+ }
+
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != reg);
+ tagValue(type, reg, ValueOperand(scratch));
+ storeValue(ValueOperand(scratch), dest);
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ moveValue(val, ValueOperand(scratch));
+ storeValue(ValueOperand(scratch), dest);
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storePtr(val.valueReg(), dest);
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, Register val) {
+ Ldr(ARMRegister(val, 64), MemOperand(src));
+ }
+ void loadValue(Address src, ValueOperand val) {
+ Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src));
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x);
+ }
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ // This could be cleverer, but the first attempt had bugs.
+ Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64),
+ Operand(ImmShiftedTag(type).value));
+ }
+ void pushValue(ValueOperand val) {
+ vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64));
+ }
+ void popValue(ValueOperand val) {
+ vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64));
+ // SP may be < PSP now (that's OK).
+ // eg testcase: tests/backup-point-bug1315634.js
+ }
+ void pushValue(const Value& val) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ if (val.isGCThing()) {
+ BufferOffset load =
+ movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch);
+ writeDataRelocation(val, load);
+ push(scratch);
+ } else {
+ moveValue(val, scratch);
+ push(scratch);
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != reg);
+ tagValue(type, reg, ValueOperand(scratch));
+ push(scratch);
+ }
+ void pushValue(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != addr.base);
+ loadValue(addr, scratch);
+ push(scratch);
+ }
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch);
+ } else {
+ unboxNonDouble(value, scratch, type);
+ }
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+ void moveValue(const Value& val, Register dest) {
+ if (val.isGCThing()) {
+ BufferOffset load =
+ movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest);
+ writeDataRelocation(val, load);
+ } else {
+ movePtr(ImmWord(val.asRawBits()), dest);
+ }
+ }
+ void moveValue(const Value& src, const ValueOperand& dest) {
+ moveValue(src, dest.valueReg());
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ CodeOffset label = movWithPatch(imm, scratch);
+ push(scratch);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
+ return CodeOffset(off.getOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
+ return CodeOffset(off.getOffset());
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest);
+
+ void splitSignExtTag(Register src, Register dest) {
+ sbfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT,
+ (64 - JSVAL_TAG_SHIFT));
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
+ loadPtr(address, scratch);
+ splitSignExtTag(scratch, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ splitSignExtTag(value.valueReg(), scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(address, scratch);
+ unboxObject(scratch, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void emitSet(Condition cond, Register dest) {
+ Cset(ARMRegister(dest, 64), cond);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void convertBoolToInt32(Register source, Register dest) {
+ Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64));
+ }
+
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 64),
+ ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertInt32ToDouble(scratch, dest);
+ }
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ load32(src, scratch);
+ convertInt32ToDouble(scratch, dest);
+ }
+
+ void convertInt32ToFloat32(Register src, FloatRegister dest) {
+ Scvtf(ARMFPRegister(dest, 32),
+ ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertInt32ToFloat32(scratch, dest);
+ }
+
+ void convertUInt32ToDouble(Register src, FloatRegister dest) {
+ Ucvtf(ARMFPRegister(dest, 64),
+ ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertUInt32ToDouble(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertUInt32ToDouble(scratch, dest);
+ }
+
+ void convertUInt32ToFloat32(Register src, FloatRegister dest) {
+ Ucvtf(ARMFPRegister(dest, 32),
+ ARMRegister(src, 32)); // Uses FPCR rounding mode.
+ }
+ void convertUInt32ToFloat32(const Address& src, FloatRegister dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != src.base);
+ load32(src, scratch);
+ convertUInt32ToFloat32(scratch, dest);
+ }
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
+ Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32));
+ }
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
+ Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
+ }
+
+ using vixl::MacroAssembler::B;
+
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ ARMFPRegister fsrc64(src, 64);
+ ARMRegister dest32(dest, 32);
+
+ // ARMv8.3 chips support the FJCVTZS instruction, which handles exactly this
+ // logic. But the simulator does not implement it, and when the simulator
+ // runs on ARM64 hardware we want to override vixl's detection of it.
+#if defined(JS_SIMULATOR_ARM64) && (defined(__aarch64__) || defined(_M_ARM64))
+ const bool fjscvt = false;
+#else
+ const bool fjscvt =
+ CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT);
+#endif
+ if (fjscvt) {
+ // Convert double to integer, rounding toward zero.
+ // The Z-flag is set iff the conversion is exact. -0 unsets the Z-flag.
+ Fjcvtzs(dest32, fsrc64);
+
+ if (negativeZeroCheck) {
+ B(fail, Assembler::NonZero);
+ } else {
+ Label done;
+ B(&done, Assembler::Zero); // If conversion was exact, go to end.
+
+ // The conversion was inexact, but the caller intends to allow -0.
+
+ // Compare fsrc64 to 0.
+ // If fsrc64 == 0 and FJCVTZS conversion was inexact, then fsrc64 is -0.
+ Fcmp(fsrc64, 0.0);
+ B(fail, Assembler::NotEqual); // Pass through -0; fail otherwise.
+
+ bind(&done);
+ }
+ } else {
+ // Older processors use a significantly slower path.
+ ARMRegister dest64(dest, 64);
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratch64 = temps.AcquireD();
+ MOZ_ASSERT(!scratch64.Is(fsrc64));
+
+ Fcvtzs(dest32, fsrc64); // Convert, rounding toward zero.
+ Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode.
+ Fcmp(scratch64, fsrc64);
+ B(fail, Assembler::NotEqual);
+
+ if (negativeZeroCheck) {
+ Label nonzero;
+ Cbnz(dest32, &nonzero);
+ Fmov(dest64, fsrc64);
+ Cbnz(dest64, fail);
+ bind(&nonzero);
+ }
+ }
+ }
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratch32 = temps.AcquireS();
+
+ ARMFPRegister fsrc(src, 32);
+ ARMRegister dest32(dest, 32);
+ ARMRegister dest64(dest, 64);
+
+ MOZ_ASSERT(!scratch32.Is(fsrc));
+
+ Fcvtzs(dest64, fsrc); // Convert, rounding toward zero.
+ Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode.
+ Fcmp(scratch32, fsrc);
+ B(fail, Assembler::NotEqual);
+
+ if (negativeZeroCheck) {
+ Label nonzero;
+ Cbnz(dest32, &nonzero);
+ Fmov(dest32, fsrc);
+ Cbnz(dest32, fail);
+ bind(&nonzero);
+ }
+ Uxtw(dest64, dest64);
+ }
+
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ ARMFPRegister fsrc64(src, 64);
+ ARMRegister dest64(dest, 64);
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMFPRegister scratch64 = temps.AcquireD();
+ MOZ_ASSERT(!scratch64.Is(fsrc64));
+
+ // Note: we can't use the FJCVTZS instruction here because that only works
+ // for 32-bit values.
+
+ Fcvtzs(dest64, fsrc64); // Convert, rounding toward zero.
+ Scvtf(scratch64, dest64); // Convert back, using FPCR rounding mode.
+ Fcmp(scratch64, fsrc64);
+ B(fail, Assembler::NotEqual);
+
+ if (negativeZeroCheck) {
+ Label nonzero;
+ Cbnz(dest64, &nonzero);
+ Fmov(dest64, fsrc64);
+ Cbnz(dest64, fail);
+ bind(&nonzero);
+ }
+ }
+
+ void jump(Label* label) { B(label); }
+ void jump(JitCode* code) { branch(code); }
+ void jump(ImmPtr ptr) {
+ // It is unclear why this sync is necessary:
+ // * PSP and SP have been observed to be different in testcase
+ // tests/asm.js/testBug1046688.js.
+ // * Removing the sync causes no failures in all of jit-tests.
+ //
+ // Also see branch(JitCode*) below. This version of jump() is called only
+ // from jump(TrampolinePtr) which is called on various very slow paths,
+ // probably only in JS.
+ syncStackPtr();
+ BufferOffset loc =
+ b(-1,
+ LabelDoc()); // The jump target will be patched by executableCopy().
+ addPendingJump(loc, ptr, RelocationKind::HARDCODED);
+ }
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+ void jump(Register reg) { Br(ARMRegister(reg, 64)); }
+ void jump(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
+ temps.Exclude(ScratchReg64);
+ MOZ_ASSERT(addr.base != ScratchReg64.asUnsized());
+ loadPtr(addr, ScratchReg64.asUnsized());
+ br(ScratchReg64);
+ }
+
+ void align(int alignment) { armbuffer_.align(alignment); }
+
+ void haltingAlign(int alignment) {
+ armbuffer_.align(alignment, vixl::HLT | ImmException(0xBAAD));
+ }
+ void nopAlign(int alignment) { armbuffer_.align(alignment); }
+
+ void movePtr(Register src, Register dest) {
+ Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
+ }
+ void movePtr(ImmWord imm, Register dest) {
+ Mov(ARMRegister(dest, 64), int64_t(imm.value));
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ Mov(ARMRegister(dest, 64), int64_t(imm.value));
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
+ append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm));
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
+ writeDataRelocation(imm, load);
+ }
+
+ void mov(ImmWord imm, Register dest) { movePtr(imm, dest); }
+ void mov(ImmPtr imm, Register dest) { movePtr(imm, dest); }
+ void mov(wasm::SymbolicAddress imm, Register dest) { movePtr(imm, dest); }
+ void mov(Register src, Register dest) { movePtr(src, dest); }
+ void mov(CodeLabel* label, Register dest);
+
+ void move32(Imm32 imm, Register dest) {
+ Mov(ARMRegister(dest, 32), (int64_t)imm.value);
+ }
+ void move32(Register src, Register dest) {
+ Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
+ }
+
+ // Move a pointer using a literal pool, so that the pointer
+ // may be easily patched or traced.
+ // Returns the BufferOffset of the load instruction emitted.
+ BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
+ BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
+
+ void loadPtr(wasm::SymbolicAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ movePtr(address, scratch.asUnsized());
+ Ldr(ARMRegister(dest, 64), MemOperand(scratch));
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
+ Ldr(ARMRegister(dest, 64), MemOperand(scratch));
+ }
+ void loadPtr(const Address& address, Register dest) {
+ Ldr(ARMRegister(dest, 64), MemOperand(address));
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ ARMRegister base = toARMRegister(src.base, 64);
+ uint32_t scale = Imm32::ShiftOf(src.scale).value;
+ ARMRegister dest64(dest, 64);
+ ARMRegister index64(src.index, 64);
+
+ if (src.offset) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch = temps.AcquireX();
+ MOZ_ASSERT(!scratch.Is(base));
+ MOZ_ASSERT(!scratch.Is(dest64));
+ MOZ_ASSERT(!scratch.Is(index64));
+
+ Add(scratch, base, Operand(int64_t(src.offset)));
+ Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale));
+ return;
+ }
+
+ Ldr(dest64, MemOperand(base, index64, vixl::LSL, scale));
+ }
+ void loadPrivate(const Address& src, Register dest);
+
+ void store8(Register src, const Address& address) {
+ Strb(ARMRegister(src, 32), toMemOperand(address));
+ }
+ void store8(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ move32(imm, scratch32.asUnsized());
+ Strb(scratch32, toMemOperand(address));
+ }
+ void store8(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w);
+ }
+ void store8(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, Operand(imm.value));
+ doBaseIndex(scratch32, address, vixl::STRB_w);
+ }
+
+ void store16(Register src, const Address& address) {
+ Strh(ARMRegister(src, 32), toMemOperand(address));
+ }
+ void store16(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ move32(imm, scratch32.asUnsized());
+ Strh(scratch32, toMemOperand(address));
+ }
+ void store16(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w);
+ }
+ void store16(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, Operand(imm.value));
+ doBaseIndex(scratch32, address, vixl::STRH_w);
+ }
+ template <typename S, typename T>
+ void store16Unaligned(const S& src, const T& dest) {
+ store16(src, dest);
+ }
+
+ void storePtr(ImmWord imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ movePtr(imm, scratch);
+ storePtr(scratch, address);
+ }
+ void storePtr(ImmPtr imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ Mov(scratch64, uint64_t(imm.value));
+ Str(scratch64, toMemOperand(address));
+ }
+ void storePtr(ImmGCPtr imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ movePtr(imm, scratch);
+ storePtr(scratch, address);
+ }
+ void storePtr(Register src, const Address& address) {
+ Str(ARMRegister(src, 64), toMemOperand(address));
+ }
+
+ void storePtr(ImmWord imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ MOZ_ASSERT(scratch64.asUnsized() != address.index);
+ Mov(scratch64, Operand(imm.value));
+ doBaseIndex(scratch64, address, vixl::STR_x);
+ }
+ void storePtr(ImmGCPtr imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != address.base);
+ MOZ_ASSERT(scratch != address.index);
+ movePtr(imm, scratch);
+ doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x);
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x);
+ }
+
+ void storePtr(Register src, AbsoluteAddress address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, uint64_t(address.addr));
+ Str(ARMRegister(src, 64), MemOperand(scratch64));
+ }
+
+ void store32(Register src, AbsoluteAddress address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ Mov(scratch64, uint64_t(address.addr));
+ Str(ARMRegister(src, 32), MemOperand(scratch64));
+ }
+ void store32(Imm32 imm, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ Mov(scratch32, uint64_t(imm.value));
+ Str(scratch32, toMemOperand(address));
+ }
+ void store32(Register r, const Address& address) {
+ Str(ARMRegister(r, 32), toMemOperand(address));
+ }
+ void store32(Imm32 imm, const BaseIndex& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != address.base);
+ MOZ_ASSERT(scratch32.asUnsized() != address.index);
+ Mov(scratch32, imm.value);
+ doBaseIndex(scratch32, address, vixl::STR_w);
+ }
+ void store32(Register r, const BaseIndex& address) {
+ doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w);
+ }
+
+ template <typename S, typename T>
+ void store32Unaligned(const S& src, const T& dest) {
+ store32(src, dest);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Imm64 imm, const Address& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ template <typename S, typename T>
+ void store64Unaligned(const S& src, const T& dest) {
+ store64(src, dest);
+ }
+
+ // StackPointer manipulation.
+ inline void addToStackPtr(Register src);
+ inline void addToStackPtr(Imm32 imm);
+ inline void addToStackPtr(const Address& src);
+ inline void addStackPtrTo(Register dest);
+
+ inline void subFromStackPtr(Register src);
+ inline void subFromStackPtr(Imm32 imm);
+ inline void subStackPtrFrom(Register dest);
+
+ inline void andToStackPtr(Imm32 t);
+
+ inline void moveToStackPtr(Register src);
+ inline void moveStackPtrTo(Register dest);
+
+ inline void loadStackPtr(const Address& src);
+ inline void storeStackPtr(const Address& dest);
+
+ // StackPointer testing functions.
+ inline void branchTestStackPtr(Condition cond, Imm32 rhs, Label* label);
+ inline void branchStackPtr(Condition cond, Register rhs, Label* label);
+ inline void branchStackPtrRhs(Condition cond, Address lhs, Label* label);
+ inline void branchStackPtrRhs(Condition cond, AbsoluteAddress lhs,
+ Label* label);
+
+ void testPtr(Register lhs, Register rhs) {
+ Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
+ }
+ void test32(Register lhs, Register rhs) {
+ Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32)));
+ }
+ void test32(const Address& addr, Imm32 imm) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != addr.base);
+ load32(addr, scratch32.asUnsized());
+ Tst(scratch32, Operand(imm.value));
+ }
+ void test32(Register lhs, Imm32 rhs) {
+ Tst(ARMRegister(lhs, 32), Operand(rhs.value));
+ }
+ void cmp32(Register lhs, Imm32 rhs) {
+ Cmp(ARMRegister(lhs, 32), Operand(rhs.value));
+ }
+ void cmp32(Register a, Register b) {
+ Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32)));
+ }
+ void cmp32(const Address& lhs, Imm32 rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
+ Ldr(scratch32, toMemOperand(lhs));
+ Cmp(scratch32, Operand(rhs.value));
+ }
+ void cmp32(const Address& lhs, Register rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch32.asUnsized() != rhs);
+ Ldr(scratch32, toMemOperand(lhs));
+ Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
+ }
+ void cmp32(const vixl::Operand& lhs, Imm32 rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ Mov(scratch32, lhs);
+ Cmp(scratch32, Operand(rhs.value));
+ }
+ void cmp32(const vixl::Operand& lhs, Register rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ Mov(scratch32, lhs);
+ Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
+ }
+
+ void cmn32(Register lhs, Imm32 rhs) {
+ Cmn(ARMRegister(lhs, 32), Operand(rhs.value));
+ }
+
+ void cmpPtr(Register lhs, Imm32 rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
+ }
+ void cmpPtr(Register lhs, ImmWord rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
+ }
+ void cmpPtr(Register lhs, ImmPtr rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, Imm64 rhs) {
+ Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
+ }
+ void cmpPtr(Register lhs, ImmGCPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs);
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+
+ void cmpPtr(const Address& lhs, Register rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ MOZ_ASSERT(scratch64.asUnsized() != rhs);
+ Ldr(scratch64, toMemOperand(lhs));
+ Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
+ }
+ void cmpPtr(const Address& lhs, ImmWord rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ Ldr(scratch64, toMemOperand(lhs));
+ Cmp(scratch64, Operand(rhs.value));
+ }
+ void cmpPtr(const Address& lhs, ImmPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
+ Ldr(scratch64, toMemOperand(lhs));
+ Cmp(scratch64, Operand(uint64_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, ImmGCPtr rhs) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != lhs.base);
+ loadPtr(lhs, scratch);
+ cmpPtr(scratch, rhs);
+ }
+
+ void loadDouble(const Address& src, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 64), MemOperand(src));
+ }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base = toARMRegister(src.base, 64);
+ ARMRegister index(src.index, 64);
+
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 64),
+ MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset));
+ }
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
+ fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
+ }
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base = toARMRegister(src.base, 64);
+ ARMRegister index(src.index, 64);
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 32),
+ MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
+ }
+ fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
+ }
+
+ void loadFloat32(const Address& addr, FloatRegister dest) {
+ Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ ARMRegister base = toARMRegister(src.base, 64);
+ ARMRegister index(src.index, 64);
+ if (src.offset == 0) {
+ Ldr(ARMFPRegister(dest, 32),
+ MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
+ } else {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != src.base);
+ MOZ_ASSERT(scratch64.asUnsized() != src.index);
+
+ Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
+ Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
+ }
+ }
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
+ }
+ void zeroDouble(FloatRegister reg) {
+ fmov(ARMFPRegister(reg, 64), vixl::xzr);
+ }
+ void zeroFloat32(FloatRegister reg) {
+ fmov(ARMFPRegister(reg, 32), vixl::wzr);
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
+ }
+ void moveFloatAsDouble(Register src, FloatRegister dest) {
+ MOZ_CRASH("moveFloatAsDouble");
+ }
+
+ void moveSimd128(FloatRegister src, FloatRegister dest) {
+ fmov(ARMFPRegister(dest, 128), ARMFPRegister(src, 128));
+ }
+
+ void splitSignExtTag(const ValueOperand& operand, Register dest) {
+ splitSignExtTag(operand.valueReg(), dest);
+ }
+ void splitSignExtTag(const Address& operand, Register dest) {
+ loadPtr(operand, dest);
+ splitSignExtTag(dest, dest);
+ }
+ void splitSignExtTag(const BaseIndex& operand, Register dest) {
+ loadPtr(operand, dest);
+ splitSignExtTag(dest, dest);
+ }
+
+ // Extracts the tag of a value and places it in tag
+ inline void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+ void cmpTag(const ValueOperand& operand, ImmTag tag) { MOZ_CRASH("cmpTag"); }
+
+ void load32(const Address& address, Register dest) {
+ Ldr(ARMRegister(dest, 32), toMemOperand(address));
+ }
+ void load32(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized());
+ ldr(ARMRegister(dest, 32), MemOperand(scratch64));
+ }
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+
+ void load8SignExtend(const Address& address, Register dest) {
+ Ldrsb(ARMRegister(dest, 32), toMemOperand(address));
+ }
+ void load8SignExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w);
+ }
+
+ void load8ZeroExtend(const Address& address, Register dest) {
+ Ldrb(ARMRegister(dest, 32), toMemOperand(address));
+ }
+ void load8ZeroExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w);
+ }
+
+ void load16SignExtend(const Address& address, Register dest) {
+ Ldrsh(ARMRegister(dest, 32), toMemOperand(address));
+ }
+ void load16SignExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w);
+ }
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest) {
+ Ldrh(ARMRegister(dest, 32), toMemOperand(address));
+ }
+ void load16ZeroExtend(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w);
+ }
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+
+ void adds32(Register src, Register dest) {
+ Adds(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+ }
+ void adds32(Imm32 imm, Register dest) {
+ Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+ }
+ void adds32(Imm32 imm, const Address& dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != dest.base);
+
+ Ldr(scratch32, toMemOperand(dest));
+ Adds(scratch32, scratch32, Operand(imm.value));
+ Str(scratch32, toMemOperand(dest));
+ }
+ void adds64(Imm32 imm, Register dest) {
+ Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+ }
+ void adds64(ImmWord imm, Register dest) {
+ Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+ }
+ void adds64(Register src, Register dest) {
+ Adds(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+ }
+
+ void subs32(Imm32 imm, Register dest) {
+ Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
+ }
+ void subs32(Register src, Register dest) {
+ Subs(ARMRegister(dest, 32), ARMRegister(dest, 32),
+ Operand(ARMRegister(src, 32)));
+ }
+ void subs64(Imm32 imm, Register dest) {
+ Subs(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
+ }
+ void subs64(Register src, Register dest) {
+ Subs(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(ARMRegister(src, 64)));
+ }
+
+ void negs32(Register reg) {
+ Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
+ }
+
+ void ret() {
+ pop(lr);
+ abiret();
+ }
+
+ void retn(Imm32 n) {
+ vixl::UseScratchRegisterScope temps(this);
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
+ temps.Exclude(ScratchReg64);
+ // ip0 <- [sp]; sp += n; ret ip0
+ Ldr(ScratchReg64,
+ MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex));
+ syncStackPtr(); // SP is always used to transmit the stack between calls.
+ Ret(ScratchReg64);
+ }
+
+ void j(Condition cond, Label* dest) { B(dest, cond); }
+
+ void branch(Condition cond, Label* label) { B(label, cond); }
+ void branch(JitCode* target) {
+ // It is unclear why this sync is necessary:
+ // * PSP and SP have been observed to be different in testcase
+ // tests/async/debugger-reject-after-fulfill.js
+ // * Removing the sync causes no failures in all of jit-tests.
+ //
+ // Also see jump() above. This is used only to implement jump(JitCode*)
+ // and only for JS, it appears.
+ syncStackPtr();
+ BufferOffset loc =
+ b(-1,
+ LabelDoc()); // The jump target will be patched by executableCopy().
+ addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+
+ void compareDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs) {
+ Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
+ }
+
+ void compareFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs) {
+ Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
+ }
+
+ void compareSimd128Int(Assembler::Condition cond, ARMFPRegister dest,
+ ARMFPRegister lhs, ARMFPRegister rhs);
+ void compareSimd128Float(Assembler::Condition cond, ARMFPRegister dest,
+ ARMFPRegister lhs, ARMFPRegister rhs);
+ void rightShiftInt8x16(FloatRegister lhs, Register rhs, FloatRegister dest,
+ bool isUnsigned);
+ void rightShiftInt16x8(FloatRegister lhs, Register rhs, FloatRegister dest,
+ bool isUnsigned);
+ void rightShiftInt32x4(FloatRegister lhs, Register rhs, FloatRegister dest,
+ bool isUnsigned);
+ void rightShiftInt64x2(FloatRegister lhs, Register rhs, FloatRegister dest,
+ bool isUnsigned);
+
+ void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) {
+ MOZ_CRASH("branchNegativeZero");
+ }
+ void branchNegativeZeroFloat32(FloatRegister reg, Register scratch,
+ Label* label) {
+ MOZ_CRASH("branchNegativeZeroFloat32");
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister) {
+ Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64));
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ boxValue(type, src, dest.valueReg());
+ }
+
+ // Note that the |dest| register here may be ScratchReg, so we shouldn't use
+ // it.
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ void unboxInt32(const Address& src, Register dest) { load32(src, dest); }
+ void unboxInt32(const BaseIndex& src, Register dest) { load32(src, dest); }
+
+ template <typename T>
+ void unboxDouble(const T& src, FloatRegister dest) {
+ loadDouble(src, dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64));
+ }
+
+ void unboxArgObjMagic(const ValueOperand& src, Register dest) {
+ MOZ_CRASH("unboxArgObjMagic");
+ }
+ void unboxArgObjMagic(const Address& src, Register dest) {
+ MOZ_CRASH("unboxArgObjMagic");
+ }
+
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ void unboxBoolean(const Address& src, Register dest) { load32(src, dest); }
+ void unboxBoolean(const BaseIndex& src, Register dest) { load32(src, dest); }
+
+ void unboxMagic(const ValueOperand& src, Register dest) {
+ move32(src.valueReg(), dest);
+ }
+ void unboxNonDouble(const ValueOperand& src, Register dest,
+ JSValueType type) {
+ unboxNonDouble(src.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ move32(src, dest);
+ return;
+ }
+ Eor(ARMRegister(dest, 64), ARMRegister(src, 64),
+ Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ ARMRegister r(val.valueReg(), 64);
+ eor(r, r, Operand(1));
+ }
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src.valueReg(), dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x);
+ unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(~JS::detail::ValueObjectOrNullBit));
+ }
+
+ // See comment in MacroAssembler-x64.h.
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(JS::detail::ValueGCThingPayloadMask));
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64),
+ Operand(JS::detail::ValueGCThingPayloadMask));
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ And(ARMRegister(dest, 64), ARMRegister(dest, 64),
+ Operand(JS::detail::ValueGCThingPayloadChunkMask));
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64),
+ Operand(JS::detail::ValueGCThingPayloadChunkMask));
+ }
+
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type);
+
+ void unboxString(const ValueOperand& operand, Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxSymbol(const ValueOperand& operand, Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxSymbol(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxBigInt(const ValueOperand& operand, Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ // These two functions use the low 32-bits of the full value register.
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest) {
+ ARMFPRegister r(dest, 64);
+ if (d == 0.0) {
+ // Clang11 does movi for 0 and movi+fneg for -0, and this seems like a
+ // good implementation-independent strategy as it avoids any gpr->fpr
+ // moves or memory traffic.
+ Movi(r, 0);
+ if (std::signbit(d)) {
+ Fneg(r, r);
+ }
+ } else {
+ Fmov(r, d);
+ }
+ }
+ void loadConstantFloat32(float f, FloatRegister dest) {
+ ARMFPRegister r(dest, 32);
+ if (f == 0.0) {
+ // See comments above. There's not a movi variant for a single register,
+ // so clear the double.
+ Movi(ARMFPRegister(dest, 64), 0);
+ if (std::signbit(f)) {
+ Fneg(r, r);
+ }
+ } else {
+ Fmov(r, f);
+ }
+ }
+
+ void cmpTag(Register tag, ImmTag ref) {
+ // As opposed to other architecture, splitTag is replaced by splitSignExtTag
+ // which extract the tag with a sign extension. The reason being that cmp32
+ // with a tag value would be too large to fit as a 12 bits immediate value,
+ // and would require the VIXL macro assembler to add an extra instruction
+ // and require extra scratch register to load the Tag value.
+ //
+ // Instead, we compare with the negative value of the sign extended tag with
+ // the CMN instruction. The sign extended tag is expected to be a negative
+ // value. Therefore the negative of the sign extended tag is expected to be
+ // near 0 and fit on 12 bits.
+ //
+ // Ignoring the sign extension, the logic is the following:
+ //
+ // CMP32(Reg, Tag) = Reg - Tag
+ // = Reg + (-Tag)
+ // = CMN32(Reg, -Tag)
+ //
+ // Note: testGCThing, testPrimitive and testNumber which are checking for
+ // inequalities should use unsigned comparisons (as done by default) in
+ // order to keep the same relation order after the sign extension, i.e.
+ // using Above or Below which are based on the carry flag.
+ uint32_t hiShift = JSVAL_TAG_SHIFT - 32;
+ int32_t seTag = int32_t(ref.value);
+ seTag = (seTag << hiShift) >> hiShift;
+ MOZ_ASSERT(seTag < 0);
+ int32_t negTag = -seTag;
+ // Check thest negTag is encoded on a 12 bits immediate value.
+ MOZ_ASSERT((negTag & ~0xFFF) == 0);
+ cmn32(tag, Imm32(negTag));
+ }
+
+ // Register-based tests.
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE));
+ // Requires unsigned comparison due to cmpTag internals.
+ return (cond == Equal) ? BelowOrEqual : Above;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
+ // Requires unsigned comparison due to cmpTag internals.
+ return (cond == Equal) ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ // Requires unsigned comparison due to cmpTag internals.
+ return (cond == Equal) ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmpTag(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
+ // Requires unsigned comparison due to cmpTag internals.
+ return (cond == Equal) ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+
+ // ValueOperand-based tests.
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ // The incoming ValueOperand may use scratch registers.
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(scratch != value.valueReg());
+
+ splitSignExtTag(value, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testPrimitive(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& value) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(value.valueReg() != scratch);
+ splitSignExtTag(value, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+
+ // Address-based tests.
+ Condition testGCThing(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testNull(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testUndefined(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testString(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const Address& address) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(address.base != scratch);
+ splitSignExtTag(address, scratch);
+ return testNumber(cond, scratch);
+ }
+
+ // BaseIndex-based tests.
+ Condition testUndefined(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testNull(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testString(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testObject(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& src) {
+ vixl::UseScratchRegisterScope temps(this);
+ const Register scratch = temps.AcquireX().asUnsized();
+ MOZ_ASSERT(src.base != scratch);
+ MOZ_ASSERT(src.index != scratch);
+ splitSignExtTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ ARMRegister payload32(operand.valueReg(), 32);
+ Tst(payload32, payload32);
+ return truthy ? NonZero : Zero;
+ }
+
+ Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) {
+ ARMRegister payload32(operand.valueReg(), 32);
+ Tst(payload32, payload32);
+ return truthy ? NonZero : Zero;
+ }
+
+ Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+
+ void int32OrDouble(Register src, ARMFPRegister dest) {
+ Label isInt32;
+ Label join;
+ testInt32(Equal, ValueOperand(src));
+ B(&isInt32, Equal);
+ // is double, move the bits as is
+ Fmov(dest, ARMRegister(src, 64));
+ B(&join);
+ bind(&isInt32);
+ // is int32, do a conversion while moving
+ Scvtf(dest, ARMRegister(src, 32));
+ bind(&join);
+ }
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ Ldr(scratch64, toMemOperand(address));
+ int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT(scratch64.asUnsized() != address.base);
+ MOZ_ASSERT(scratch64.asUnsized() != address.index);
+ doBaseIndex(scratch64, address, vixl::LDR_x);
+ int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label) {
+ BufferOffset offset = b(label, Always);
+ CodeOffset ret(offset.getOffset());
+ return ret;
+ }
+
+ // load: offset to the load instruction obtained by movePatchablePtr().
+ void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // Assembler::TraceDataRelocations.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(load.getOffset());
+ }
+ }
+ void writeDataRelocation(const Value& val, BufferOffset load) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // Assembler::TraceDataRelocations.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(load.getOffset());
+ }
+ }
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ Add(ARMRegister(dest, 64), toARMRegister(address.base, 64),
+ Operand(address.offset));
+ }
+ void computeEffectiveAddress(const Address& address, RegisterOrSP dest) {
+ Add(toARMRegister(dest, 64), toARMRegister(address.base, 64),
+ Operand(address.offset));
+ }
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ ARMRegister dest64(dest, 64);
+ ARMRegister base64 = toARMRegister(address.base, 64);
+ ARMRegister index64(address.index, 64);
+
+ Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
+ if (address.offset) {
+ Add(dest64, dest64, Operand(address.offset));
+ }
+ }
+
+ public:
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, AnyRegister outany, Register64 out64);
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, MemOperand srcAddr,
+ AnyRegister outany, Register64 out64);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany,
+ Register64 val64, Register memoryBase, Register ptr);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, MemOperand destAddr,
+ AnyRegister valany, Register64 val64);
+ // The complete address is in `address`, and `access` is used for its type
+ // attributes only; its `offset` is ignored.
+ void wasmLoadAbsolute(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, uint64_t address, AnyRegister out,
+ Register64 out64);
+ void wasmStoreAbsolute(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register64 value64,
+ Register memoryBase, uint64_t address);
+
+ // Emit a BLR or NOP instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ // The returned offset must be to the first instruction generated,
+ // for the debugger to match offset with Baseline's pcMappingEntries_.
+ BufferOffset offset = nextOffset();
+
+ // It is unclear why this sync is necessary:
+ // * PSP and SP have been observed to be different in testcase
+ // tests/cacheir/bug1448136.js
+ // * Removing the sync causes no failures in all of jit-tests.
+ syncStackPtr();
+
+ BufferOffset loadOffset;
+ {
+ vixl::UseScratchRegisterScope temps(this);
+
+ // The register used for the load is hardcoded, so that ToggleCall
+ // can patch in the branch instruction easily. This could be changed,
+ // but then ToggleCall must read the target register from the load.
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64));
+ temps.Exclude(ScratchReg2_64);
+
+ loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
+
+ if (enabled) {
+ blr(ScratchReg2_64);
+ } else {
+ nop();
+ }
+ }
+
+ addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ CodeOffset ret(offset.getOffset());
+ return ret;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // The call site is a sequence of two or three instructions:
+ //
+ // syncStack (optional)
+ // ldr/adr
+ // nop/blr
+ //
+ // Flushed constant pools can appear before any of the instructions.
+
+ const Instruction* cur = (const Instruction*)code;
+ cur = cur->skipPool();
+ if (cur->IsStackPtrSync()) cur = cur->NextInstruction();
+ cur = cur->skipPool();
+ cur = cur->NextInstruction(); // LDR/ADR
+ cur = cur->skipPool();
+ cur = cur->NextInstruction(); // NOP/BLR
+ return (uint8_t*)cur - code;
+ }
+
+ void checkARMRegAlignment(const ARMRegister& reg) {
+#ifdef DEBUG
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch64 = temps.AcquireX();
+ MOZ_ASSERT_IF(!reg.IsSP(), scratch64.asUnsized() != reg.asUnsized());
+ Label aligned;
+ Mov(scratch64, reg);
+ Tst(scratch64, Operand(StackAlignment - 1));
+ B(Zero, &aligned);
+ breakpoint();
+ bind(&aligned);
+ Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity.
+#endif
+ }
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ checkARMRegAlignment(GetStackPointer64());
+
+ // If another register is being used to track pushes, check sp explicitly.
+ if (!GetStackPointer64().Is(vixl::sp)) {
+ checkARMRegAlignment(vixl::sp);
+ }
+#endif
+ }
+
+ void abiret() {
+ syncStackPtr(); // SP is always used to transmit the stack between calls.
+ vixl::MacroAssembler::Ret(vixl::lr);
+ }
+
+ void incrementInt32Value(const Address& addr) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != addr.base);
+
+ load32(addr, scratch32.asUnsized());
+ Add(scratch32, scratch32, Operand(1));
+ store32(scratch32.asUnsized(), addr);
+ }
+
+ void breakpoint();
+
+ // Emits a simulator directive to save the current sp on an internal stack.
+ void simulatorMarkSP() {
+#ifdef JS_SIMULATOR_ARM64
+ svc(vixl::kMarkStackPointer);
+#endif
+ }
+
+ // Emits a simulator directive to pop from its internal stack
+ // and assert that the value is equal to the current sp.
+ void simulatorCheckSP() {
+#ifdef JS_SIMULATOR_ARM64
+ svc(vixl::kCheckStackPointer);
+#endif
+ }
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+};
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ vixl::UseScratchRegisterScope temps_;
+ ARMRegister scratch64_;
+ bool owned_;
+ mozilla::DebugOnly<bool> released_;
+
+ public:
+ ScratchTagScope(MacroAssemblerCompat& masm, const ValueOperand&)
+ : temps_(&masm), owned_(true), released_(false) {
+ scratch64_ = temps_.AcquireX();
+ }
+
+ operator Register() {
+ MOZ_ASSERT(!released_);
+ return scratch64_.asUnsized();
+ }
+
+ void release() {
+ MOZ_ASSERT(!released_);
+ released_ = true;
+ if (owned_) {
+ temps_.Release(scratch64_);
+ owned_ = false;
+ }
+ }
+
+ void reacquire() {
+ MOZ_ASSERT(released_);
+ released_ = false;
+ }
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+inline void MacroAssemblerCompat::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ splitSignExtTag(value, tag);
+}
+
+typedef MacroAssemblerCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_MacroAssembler_arm64_h
diff --git a/js/src/jit/arm64/MoveEmitter-arm64.cpp b/js/src/jit/arm64/MoveEmitter-arm64.cpp
new file mode 100644
index 0000000000..fa1bb1209e
--- /dev/null
+++ b/js/src/jit/arm64/MoveEmitter-arm64.cpp
@@ -0,0 +1,329 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/MoveEmitter-arm64.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+MemOperand MoveEmitterARM64::toMemOperand(const MoveOperand& operand) const {
+ MOZ_ASSERT(operand.isMemory());
+ ARMRegister base(operand.base(), 64);
+ if (operand.base() == masm.getStackPointer()) {
+ return MemOperand(base,
+ operand.disp() + (masm.framePushed() - pushedAtStart_));
+ }
+ return MemOperand(base, operand.disp());
+}
+
+void MoveEmitterARM64::emit(const MoveResolver& moves) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ // We have two scratch general registers, so use one as temporary storage for
+ // breaking cycles and leave the other available for memory to memory moves.
+ //
+ // This register is used when breaking GENERAL, INT32, FLOAT32, and DOUBLE
+ // move cycles. For FLOAT32/DOUBLE, this involves a fmov between float and
+ // general registers. We could avoid this if we had an extra scratch float
+ // register, otherwise we need the scratch float register for memory to
+ // memory moves that may happen in the cycle. We cannot use the scratch
+ // general register for SIMD128 cycles as it is not large enough.
+ cycleGeneralReg_ = temps.AcquireX();
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emitMove(moves.getMove(i));
+ }
+
+ cycleGeneralReg_ = ARMRegister();
+}
+
+void MoveEmitterARM64::finish() {
+ assertDone();
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+ MOZ_ASSERT(masm.framePushed() == pushedAtStart_);
+}
+
+void MoveEmitterARM64::emitMove(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleBegin()) {
+ MOZ_ASSERT(!inCycle_ && !move.isCycleEnd());
+ breakCycle(from, to, move.endCycleType());
+ inCycle_ = true;
+ } else if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type());
+ inCycle_ = false;
+ return;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::SIMD128:
+ emitSimd128Move(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitGeneralMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterARM64::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.Fmov(toFPReg(to, MoveOp::FLOAT32), toFPReg(from, MoveOp::FLOAT32));
+ } else {
+ masm.Str(toFPReg(from, MoveOp::FLOAT32), toMemOperand(to));
+ }
+ return;
+ }
+
+ if (to.isFloatReg()) {
+ masm.Ldr(toFPReg(to, MoveOp::FLOAT32), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch32 = temps.AcquireS();
+ masm.Ldr(scratch32, toMemOperand(from));
+ masm.Str(scratch32, toMemOperand(to));
+}
+
+void MoveEmitterARM64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.Fmov(toFPReg(to, MoveOp::DOUBLE), toFPReg(from, MoveOp::DOUBLE));
+ } else {
+ masm.Str(toFPReg(from, MoveOp::DOUBLE), toMemOperand(to));
+ }
+ return;
+ }
+
+ if (to.isFloatReg()) {
+ masm.Ldr(toFPReg(to, MoveOp::DOUBLE), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch = temps.AcquireD();
+ masm.Ldr(scratch, toMemOperand(from));
+ masm.Str(scratch, toMemOperand(to));
+}
+
+void MoveEmitterARM64::emitSimd128Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.Mov(toFPReg(to, MoveOp::SIMD128), toFPReg(from, MoveOp::SIMD128));
+ } else {
+ masm.Str(toFPReg(from, MoveOp::SIMD128), toMemOperand(to));
+ }
+ return;
+ }
+
+ if (to.isFloatReg()) {
+ masm.Ldr(toFPReg(to, MoveOp::SIMD128), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch = temps.AcquireQ();
+ masm.Ldr(scratch, toMemOperand(from));
+ masm.Str(scratch, toMemOperand(to));
+}
+
+void MoveEmitterARM64::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.Mov(toARMReg32(to), toARMReg32(from));
+ } else {
+ masm.Str(toARMReg32(from), toMemOperand(to));
+ }
+ return;
+ }
+
+ if (to.isGeneralReg()) {
+ masm.Ldr(toARMReg32(to), toMemOperand(from));
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch32 = temps.AcquireW();
+ masm.Ldr(scratch32, toMemOperand(from));
+ masm.Str(scratch32, toMemOperand(to));
+}
+
+void MoveEmitterARM64::emitGeneralMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ MOZ_ASSERT(to.isGeneralReg() || to.isMemory());
+ if (to.isGeneralReg()) {
+ masm.Mov(toARMReg64(to), toARMReg64(from));
+ } else {
+ masm.Str(toARMReg64(from), toMemOperand(to));
+ }
+ return;
+ }
+
+ // {Memory OR EffectiveAddress} -> Register move.
+ if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory()) {
+ masm.Ldr(toARMReg64(to), toMemOperand(from));
+ } else {
+ masm.Add(toARMReg64(to), toARMReg64(from), Operand(from.disp()));
+ }
+ return;
+ }
+
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ // Memory -> Memory move.
+ if (from.isMemory()) {
+ MOZ_ASSERT(to.isMemory());
+ masm.Ldr(scratch64, toMemOperand(from));
+ masm.Str(scratch64, toMemOperand(to));
+ return;
+ }
+
+ // EffectiveAddress -> Memory move.
+ MOZ_ASSERT(from.isEffectiveAddress());
+ MOZ_ASSERT(to.isMemory());
+ masm.Add(scratch64, toARMReg64(from), Operand(from.disp()));
+ masm.Str(scratch64, toMemOperand(to));
+}
+
+MemOperand MoveEmitterARM64::cycleSlot() {
+ // Using SP as stack pointer requires alignment preservation below.
+ MOZ_ASSERT(!masm.GetStackPointer64().Is(sp));
+
+ // Allocate a slot for breaking cycles if we have not already
+ if (pushedAtCycle_ == -1) {
+ static_assert(SpillSlotSize == 16);
+ masm.reserveStack(SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ return MemOperand(masm.GetStackPointer64(),
+ masm.framePushed() - pushedAtCycle_);
+}
+
+void MoveEmitterARM64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type) {
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ masm.Ldr(cycleGeneralReg_.W(), toMemOperand(to));
+ } else {
+ masm.Fmov(cycleGeneralReg_.W(), toFPReg(to, type));
+ }
+ break;
+
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ masm.Ldr(cycleGeneralReg_.X(), toMemOperand(to));
+ } else {
+ masm.Fmov(cycleGeneralReg_.X(), toFPReg(to, type));
+ }
+ break;
+
+ case MoveOp::SIMD128:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch128 = temps.AcquireQ();
+ masm.Ldr(scratch128, toMemOperand(to));
+ masm.Str(scratch128, cycleSlot());
+ } else {
+ masm.Str(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ masm.Ldr(cycleGeneralReg_.W(), toMemOperand(to));
+ } else {
+ masm.Mov(cycleGeneralReg_.W(), toARMReg32(to));
+ }
+ break;
+
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ masm.Ldr(cycleGeneralReg_.X(), toMemOperand(to));
+ } else {
+ masm.Mov(cycleGeneralReg_.X(), toARMReg64(to));
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterARM64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type) {
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ masm.Str(cycleGeneralReg_.W(), toMemOperand(to));
+ } else {
+ masm.Fmov(toFPReg(to, type), cycleGeneralReg_.W());
+ }
+ break;
+
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ masm.Str(cycleGeneralReg_.X(), toMemOperand(to));
+ } else {
+ masm.Fmov(toFPReg(to, type), cycleGeneralReg_.X());
+ }
+ break;
+
+ case MoveOp::SIMD128:
+ if (to.isMemory()) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMFPRegister scratch = temps.AcquireQ();
+ masm.Ldr(scratch, cycleSlot());
+ masm.Str(scratch, toMemOperand(to));
+ } else {
+ masm.Ldr(toFPReg(to, type), cycleSlot());
+ }
+ break;
+
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ masm.Str(cycleGeneralReg_.W(), toMemOperand(to));
+ } else {
+ masm.Mov(toARMReg32(to), cycleGeneralReg_.W());
+ }
+ break;
+
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ masm.Str(cycleGeneralReg_.X(), toMemOperand(to));
+ } else {
+ masm.Mov(toARMReg64(to), cycleGeneralReg_.X());
+ }
+ break;
+
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
diff --git a/js/src/jit/arm64/MoveEmitter-arm64.h b/js/src/jit/arm64/MoveEmitter-arm64.h
new file mode 100644
index 0000000000..fec2e3e012
--- /dev/null
+++ b/js/src/jit/arm64/MoveEmitter-arm64.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_MoveEmitter_arm64_h
+#define jit_arm64_MoveEmitter_arm64_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+
+class MoveEmitterARM64 {
+ bool inCycle_;
+ MacroAssembler& masm;
+
+ // A scratch general register used to break cycles.
+ ARMRegister cycleGeneralReg_;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // This stores a stack offset to a spill location, snapshotting
+ // codegen->framePushed_ at the time it was allocated. It is -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+
+ void assertDone() { MOZ_ASSERT(!inCycle_); }
+
+ MemOperand cycleSlot();
+ MemOperand toMemOperand(const MoveOperand& operand) const;
+ ARMRegister toARMReg32(const MoveOperand& operand) const {
+ MOZ_ASSERT(operand.isGeneralReg());
+ return ARMRegister(operand.reg(), 32);
+ }
+ ARMRegister toARMReg64(const MoveOperand& operand) const {
+ if (operand.isGeneralReg()) {
+ return ARMRegister(operand.reg(), 64);
+ } else {
+ return ARMRegister(operand.base(), 64);
+ }
+ }
+ ARMFPRegister toFPReg(const MoveOperand& operand, MoveOp::Type t) const {
+ MOZ_ASSERT(operand.isFloatReg());
+ switch (t) {
+ case MoveOp::FLOAT32:
+ return ARMFPRegister(operand.floatReg().encoding(), 32);
+ case MoveOp::DOUBLE:
+ return ARMFPRegister(operand.floatReg().encoding(), 64);
+ case MoveOp::SIMD128:
+ return ARMFPRegister(operand.floatReg().encoding(), 128);
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad register type");
+ }
+ }
+
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void emitSimd128Move(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitGeneralMove(const MoveOperand& from, const MoveOperand& to);
+
+ void emitMove(const MoveOp& move);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type);
+
+ public:
+ explicit MoveEmitterARM64(MacroAssembler& masm)
+ : inCycle_(false),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1) {}
+
+ ~MoveEmitterARM64() { assertDone(); }
+
+ void emit(const MoveResolver& moves);
+ void finish();
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterARM64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_arm64_MoveEmitter_arm64_h */
diff --git a/js/src/jit/arm64/SharedICHelpers-arm64-inl.h b/js/src/jit/arm64/SharedICHelpers-arm64-inl.h
new file mode 100644
index 0000000000..8261a8b94f
--- /dev/null
+++ b/js/src/jit/arm64/SharedICHelpers-arm64-inl.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_SharedICHelpers_arm64_inl_h
+#define jit_arm64_SharedICHelpers_arm64_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ // We assume that R0 has been pushed, and R2 is unused.
+ static_assert(R2 == ValueOperand(r0));
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.Sub(x0, FramePointer64, masm.GetStackPointer64());
+ masm.Sub(w0, w0, Operand(argSize));
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(w0.asUnsized(), frameSizeAddr);
+#endif
+
+ // Push frame descriptor (minus the return address) and perform the tail call.
+ static_assert(ICTailCallReg == lr);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+
+ // The return address will be pushed by the VM wrapper, for compatibility
+ // with direct calls. Refer to the top of generateVMWrapper().
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls).
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.Sub(ARMRegister(scratch, 64), FramePointer64, masm.GetStackPointer64());
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and return address.
+ // Save old frame pointer, stack pointer, and stub reg.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+ masm.Push(FramePointer);
+
+ // Update the frame register.
+ masm.Mov(FramePointer64, masm.GetStackPointer64());
+
+ masm.Push(ICStubReg);
+
+ // Stack should remain 16-byte aligned.
+ masm.checkStackAlignment();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_SharedICHelpers_arm64_inl_h
diff --git a/js/src/jit/arm64/SharedICHelpers-arm64.h b/js/src/jit/arm64/SharedICHelpers-arm64.h
new file mode 100644
index 0000000000..2ea45c80fb
--- /dev/null
+++ b/js/src/jit/arm64/SharedICHelpers-arm64.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_SharedICHelpers_arm64_h
+#define jit_arm64_SharedICHelpers_arm64_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on the
+// stack on ARM).
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on ARM because link register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use r0.
+ static_assert(R2 == ValueOperand(r0));
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
+
+ // Call the stubcode via a direct branch-and-link.
+ masm.Blr(x0);
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) {
+ masm.abiret(); // Defaults to lr.
+}
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ const ARMRegister scratch64 = temps.AcquireX();
+
+ Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
+ masm.loadPtr(stubAddr, ICStubReg);
+
+ masm.moveToStackPtr(FramePointer);
+
+ // Pop values, discarding the frame descriptor.
+ masm.pop(FramePointer, ICTailCallReg, scratch64.asUnsized());
+
+ // Stack should remain 16-byte aligned.
+ masm.checkStackAlignment();
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On AArch64, lr is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(lr);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(lr);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_SharedICHelpers_arm64_h
diff --git a/js/src/jit/arm64/SharedICRegisters-arm64.h b/js/src/jit/arm64/SharedICRegisters-arm64.h
new file mode 100644
index 0000000000..1aa49d651c
--- /dev/null
+++ b/js/src/jit/arm64/SharedICRegisters-arm64.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_arm64_SharedICRegisters_arm64_h
+#define jit_arm64_SharedICRegisters_arm64_h
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls.
+// R1 value should be preserved across calls.
+static constexpr Register R0_ = r2;
+static constexpr Register R1_ = r19;
+static constexpr Register R2_ = r0;
+
+static constexpr ValueOperand R0(R0_);
+static constexpr ValueOperand R1(R1_);
+static constexpr ValueOperand R2(R2_);
+
+// ICTailCallReg and ICStubReg use registers that are not preserved across
+// calls.
+static constexpr Register ICTailCallReg = r30;
+static constexpr Register ICStubReg = r9;
+
+// R7 - R9 are generally available for use within stubcode.
+
+// Note that BaselineTailCallReg is actually just the link
+// register. In ARM code emission, we do not clobber BaselineTailCallReg
+// since we keep the return address for calls there.
+
+static constexpr FloatRegister FloatReg0 = {FloatRegisters::d0,
+ FloatRegisters::Double};
+static constexpr FloatRegister FloatReg1 = {FloatRegisters::d1,
+ FloatRegisters::Double};
+static constexpr FloatRegister FloatReg2 = {FloatRegisters::d2,
+ FloatRegisters::Double};
+static constexpr FloatRegister FloatReg3 = {FloatRegisters::d3,
+ FloatRegisters::Double};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_arm64_SharedICRegisters_arm64_h
diff --git a/js/src/jit/arm64/Trampoline-arm64.cpp b/js/src/jit/arm64/Trampoline-arm64.cpp
new file mode 100644
index 0000000000..36f7f24d02
--- /dev/null
+++ b/js/src/jit/arm64/Trampoline-arm64.cpp
@@ -0,0 +1,840 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/arm64/SharedICHelpers-arm64.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+/* This method generates a trampoline on ARM64 for a c++ function with
+ * the following signature:
+ * bool blah(void* code, int argc, Value* argv,
+ * JSObject* scopeChain, Value* vp)
+ * ...using standard AArch64 calling convention
+ */
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0; // EnterJitData::jitcode.
+ const Register reg_argc = IntArgReg1; // EnterJitData::maxArgc.
+ const Register reg_argv = IntArgReg2; // EnterJitData::maxArgv.
+ const Register reg_osrFrame = IntArgReg3; // EnterJitData::osrFrame.
+ const Register reg_callee = IntArgReg4; // EnterJitData::calleeToken.
+ const Register reg_scope = IntArgReg5; // EnterJitData::scopeChain.
+ const Register reg_osrNStack =
+ IntArgReg6; // EnterJitData::osrNumStackValues.
+ const Register reg_vp = IntArgReg7; // Address of EnterJitData::result.
+
+ static_assert(OsrFrameReg == IntArgReg3);
+
+ // During the pushes below, use the normal stack pointer.
+ masm.SetStackPointer64(sp);
+
+ // Save return address and old frame pointer; set new frame pointer.
+ masm.push(r30, r29);
+ masm.moveStackPtrTo(r29);
+
+ // Save callee-save integer registers.
+ // Also save x7 (reg_vp) and x30 (lr), for use later.
+ masm.push(r19, r20, r21, r22);
+ masm.push(r23, r24, r25, r26);
+ masm.push(r27, r28, r7, r30);
+
+ // Save callee-save floating-point registers.
+ // AArch64 ABI specifies that only the lower 64 bits must be saved.
+ masm.push(d8, d9, d10, d11);
+ masm.push(d12, d13, d14, d15);
+
+#ifdef DEBUG
+ // Emit stack canaries.
+ masm.movePtr(ImmWord(0xdeadd00d), r23);
+ masm.movePtr(ImmWord(0xdeadd11d), r24);
+ masm.push(r23, r24);
+#endif
+
+ // Common code below attempts to push single registers at a time,
+ // which breaks the stack pointer's 16-byte alignment requirement.
+ // Note that movePtr() is invalid because StackPointer is treated as xzr.
+ //
+ // FIXME: After testing, this entire function should be rewritten to not
+ // use the PseudoStackPointer: since the amount of data pushed is
+ // precalculated, we can just allocate the whole frame header at once and
+ // index off sp. This will save a significant number of instructions where
+ // Push() updates sp.
+ masm.Mov(PseudoStackPointer64, sp);
+ masm.SetStackPointer64(PseudoStackPointer64);
+
+ // Remember stack depth without padding and arguments.
+ masm.moveStackPtrTo(r19);
+
+ // If constructing, include newTarget in argument vector.
+ {
+ Label noNewTarget;
+ Imm32 constructingToken(CalleeToken_FunctionConstructing);
+ masm.branchTest32(Assembler::Zero, reg_callee, constructingToken,
+ &noNewTarget);
+ masm.add32(Imm32(1), reg_argc);
+ masm.bind(&noNewTarget);
+ }
+
+ // JitFrameLayout is as follows (higher is higher in memory):
+ // N*8 - [ JS argument vector ] (base 16-byte aligned)
+ // 8 - calleeToken
+ // 8 - frameDescriptor (16-byte aligned)
+ // 8 - returnAddress
+ // 8 - frame pointer (16-byte aligned, pushed by callee)
+
+ // Touch frame incrementally (a requirement for Windows).
+ //
+ // Use already saved callee-save registers r20 and r21 as temps.
+ //
+ // This has to be done outside the ScratchRegisterScope, as the temps are
+ // under demand inside the touchFrameValues call.
+
+ // Give sp 16-byte alignment and sync stack pointers.
+ masm.andToStackPtr(Imm32(~0xf));
+ // We needn't worry about the Gecko Profiler mark because touchFrameValues
+ // touches in large increments.
+ masm.touchFrameValues(reg_argc, r20, r21);
+ // Restore stack pointer, preserved above.
+ masm.moveToStackPtr(r19);
+
+ // Push the argument vector onto the stack.
+ // WARNING: destructively modifies reg_argv
+ {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+
+ const ARMRegister tmp_argc = temps.AcquireX();
+ const ARMRegister tmp_sp = temps.AcquireX();
+
+ Label noArguments;
+ Label loopHead;
+
+ masm.movePtr(reg_argc, tmp_argc.asUnsized());
+
+ // sp -= 8
+ // Since we're using PostIndex Str below, this is necessary to avoid
+ // overwriting the Gecko Profiler mark pushed above.
+ masm.subFromStackPtr(Imm32(8));
+
+ // sp -= 8 * argc
+ masm.Sub(PseudoStackPointer64, PseudoStackPointer64,
+ Operand(tmp_argc, vixl::SXTX, 3));
+
+ // Give sp 16-byte alignment and sync stack pointers.
+ masm.andToStackPtr(Imm32(~0xf));
+ masm.moveStackPtrTo(tmp_sp.asUnsized());
+
+ masm.branchTestPtr(Assembler::Zero, reg_argc, reg_argc, &noArguments);
+
+ // Begin argument-pushing loop.
+ // This could be optimized using Ldp and Stp.
+ {
+ masm.bind(&loopHead);
+
+ // Load an argument from argv, then increment argv by 8.
+ masm.Ldr(x24, MemOperand(ARMRegister(reg_argv, 64), Operand(8),
+ vixl::PostIndex));
+
+ // Store the argument to tmp_sp, then increment tmp_sp by 8.
+ masm.Str(x24, MemOperand(tmp_sp, Operand(8), vixl::PostIndex));
+
+ // Decrement tmp_argc and set the condition codes for the new value.
+ masm.Subs(tmp_argc, tmp_argc, Operand(1));
+
+ // Branch if arguments remain.
+ masm.B(&loopHead, vixl::Condition::NonZero);
+ }
+
+ masm.bind(&noArguments);
+ }
+ masm.checkStackAlignment();
+
+ // Push the calleeToken and the frame descriptor.
+ // The result address is used to store the actual number of arguments
+ // without adding an argument to EnterJIT.
+ {
+ vixl::UseScratchRegisterScope temps(&masm.asVIXL());
+ MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
+ temps.Exclude(ScratchReg64);
+ Register scratch = ScratchReg64.asUnsized();
+ masm.push(reg_callee);
+
+ // Push the descriptor.
+ masm.unboxInt32(Address(reg_vp, 0x0), scratch);
+ masm.PushFrameDescriptorForJitCall(FrameType::CppToJSJit, scratch, scratch);
+ }
+ masm.checkStackAlignment();
+
+ Label osrReturnPoint;
+ {
+ // Check for Interpreter -> Baseline OSR.
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ regs.take(reg_osrNStack);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register scratch = regs.takeAny();
+
+ // Frame prologue.
+ masm.Adr(ARMRegister(scratch, 64), &osrReturnPoint);
+ masm.push(scratch, FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // Reserve frame.
+ masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
+
+ Register framePtrScratch = regs.takeAny();
+ masm.touchFrameValues(reg_osrNStack, scratch, framePtrScratch);
+ masm.moveStackPtrTo(framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ // scratch = num_stack_values * sizeof(Value).
+ masm.Lsl(ARMRegister(scratch, 32), ARMRegister(reg_osrNStack, 32), 3);
+ masm.subFromStackPtr(scratch);
+
+ // Enter exit frame.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(xzr); // Push xzr for a fake return address.
+ masm.push(FramePointer);
+ // No GC things to mark: push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.push(reg_code);
+
+ // Initialize the frame, including filling in the slots.
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(r19);
+ masm.passABIArg(framePtrScratch); // BaselineFrame.
+ masm.passABIArg(reg_osrFrame); // InterpreterFrame.
+ masm.passABIArg(reg_osrNStack);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.pop(scratch);
+ MOZ_ASSERT(scratch != ReturnReg);
+
+ masm.addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
+
+ Label error;
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(FramePointer, regs.getAny());
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(scratch);
+
+ // OOM: frame epilogue, load error value, discard return address and return.
+ masm.bind(&error);
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.addToStackPtr(Imm32(sizeof(uintptr_t))); // Return address.
+ masm.syncStackPtr();
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.B(&osrReturnPoint);
+
+ masm.bind(&notOsr);
+ masm.movePtr(reg_scope, R1_);
+ }
+
+ // The callee will push the return address and frame pointer on the stack,
+ // thus we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call function.
+ // Since AArch64 doesn't have the pc register available, the callee must push
+ // lr.
+ masm.callJitNoProfiler(reg_code);
+
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&osrReturnPoint);
+
+ // Discard arguments and padding. Set sp to the address of the saved
+ // registers. In debug builds we have to include the two stack canaries
+ // checked below.
+#ifdef DEBUG
+ static constexpr size_t SavedRegSize = 22 * sizeof(void*);
+#else
+ static constexpr size_t SavedRegSize = 20 * sizeof(void*);
+#endif
+ masm.computeEffectiveAddress(Address(FramePointer, -int32_t(SavedRegSize)),
+ masm.getStackPointer());
+
+ masm.syncStackPtr();
+ masm.SetStackPointer64(sp);
+
+#ifdef DEBUG
+ // Check that canaries placed on function entry are still present.
+ masm.pop(r24, r23);
+ Label x23OK, x24OK;
+
+ masm.branchPtr(Assembler::Equal, r23, ImmWord(0xdeadd00d), &x23OK);
+ masm.breakpoint();
+ masm.bind(&x23OK);
+
+ masm.branchPtr(Assembler::Equal, r24, ImmWord(0xdeadd11d), &x24OK);
+ masm.breakpoint();
+ masm.bind(&x24OK);
+#endif
+
+ // Restore callee-save floating-point registers.
+ masm.pop(d15, d14, d13, d12);
+ masm.pop(d11, d10, d9, d8);
+
+ // Restore callee-save integer registers.
+ // Also restore x7 (reg_vp) and x30 (lr).
+ masm.pop(r30, r7, r28, r27);
+ masm.pop(r26, r25, r24, r23);
+ masm.pop(r22, r21, r20, r19);
+
+ // Store return value (in JSReturnReg = x2 to just-popped reg_vp).
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore old frame pointer.
+ masm.pop(r29, r30);
+
+ // Return using the value popped into x30.
+ masm.abiret();
+
+ // Reset stack pointer.
+ masm.SetStackPointer64(PseudoStackPointer64);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+static void PushRegisterDump(MacroAssembler& masm) {
+ const LiveRegisterSet First28GeneralRegisters = LiveRegisterSet(
+ GeneralRegisterSet(Registers::AllMask &
+ ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28)),
+ FloatRegisterSet(FloatRegisters::NoneMask));
+
+ const LiveRegisterSet AllFloatRegisters =
+ LiveRegisterSet(GeneralRegisterSet(Registers::NoneMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+ // Push all general-purpose registers.
+ //
+ // The ARM64 ABI does not treat SP as a normal register that can
+ // be pushed. So pushing happens in two phases.
+ //
+ // Registers are pushed in reverse order of code.
+ //
+ // See block comment in MacroAssembler.h for further required invariants.
+
+ // First, push the last four registers, passing zero for sp.
+ // Zero is pushed for x28 and x31: the pseudo-SP and SP, respectively.
+ masm.asVIXL().Push(xzr, x30, x29, xzr);
+
+ // Second, push the first 28 registers that serve no special purpose.
+ masm.PushRegsInMask(First28GeneralRegisters);
+
+ // Finally, push all floating-point registers, completing the RegisterDump.
+ masm.PushRegsInMask(AllFloatRegisters);
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // The InvalidationBailoutStack saved in r0 must be:
+ // - osiPointReturnAddress_
+ // - ionScript_ (pushed by CodeGeneratorARM64::generateInvalidateEpilogue())
+ // - regs_ (pushed here)
+ // - fpregs_ (pushed here) [=r0]
+ PushRegisterDump(masm);
+ masm.moveStackPtrTo(r0);
+
+ // Reserve space for InvalidationBailout's bailoutInfo outparam.
+ masm.Sub(x1, masm.GetStackPointer64(), Operand(sizeof(void*)));
+ masm.moveToStackPtr(r1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(r10);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(r2); // Get the bailoutInfo outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+
+ // Save the return address for later.
+ masm.push(lr);
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // Load the information that the rectifier needs from the stack.
+ masm.loadNumActualArgs(FramePointer, r0);
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()), r1);
+
+ // Extract a JSFunction pointer from the callee token and keep the
+ // intermediary to avoid later recalculation.
+ masm.And(x5, x1, Operand(CalleeTokenMask));
+
+ // Get the arguments from the function object.
+ masm.loadFunctionArgCount(x5.asUnsized(), x6.asUnsized());
+
+ static_assert(CalleeToken_FunctionConstructing == 0x1,
+ "Constructing must be low-order bit");
+ masm.And(x4, x1, Operand(CalleeToken_FunctionConstructing));
+ masm.Add(x7, x6, x4);
+
+ // Copy the number of actual arguments into r8.
+ masm.mov(r0, r8);
+
+ // Calculate the position that our arguments are at before sp gets modified.
+ masm.Add(x3, masm.GetStackPointer64(), Operand(x8, vixl::LSL, 3));
+ masm.Add(x3, x3, Operand(sizeof(RectifierFrameLayout)));
+
+ // If the number of Values without |this| is even, push 8 padding bytes to
+ // ensure the stack is 16-byte aligned.
+ Label noPadding;
+ masm.Tbnz(x7, 0, &noPadding);
+ masm.asVIXL().Push(xzr);
+ masm.bind(&noPadding);
+
+ {
+ Label notConstructing;
+ masm.Cbz(x4, &notConstructing);
+
+ // new.target lives at the end of the pushed args
+ // NB: The arg vector holder starts at the beginning of the last arg,
+ // add a value to get to argv[argc]
+ masm.loadPtr(Address(r3, sizeof(Value)), r4);
+ masm.Push(r4);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Calculate the number of undefineds that need to be pushed.
+ masm.Sub(w2, w6, w8);
+
+ // Put an undefined in a register so it can be pushed.
+ masm.moveValue(UndefinedValue(), ValueOperand(r4));
+
+ // Push undefined N times.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+ masm.Push(r4);
+ masm.Subs(w2, w2, Operand(1));
+ masm.B(&undefLoopTop, Assembler::NonZero);
+ }
+
+ // Arguments copy loop. Copy for x8 >= 0 to include |this|.
+ {
+ Label copyLoopTop;
+ masm.bind(&copyLoopTop);
+ masm.Ldr(x4, MemOperand(x3, -sizeof(Value), vixl::PostIndex));
+ masm.Push(r4);
+ masm.Subs(x8, x8, Operand(1));
+ masm.B(&copyLoopTop, Assembler::NotSigned);
+ }
+
+ masm.push(r1); // Callee token.
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, r0, r0);
+
+ // Call the target function.
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(r5, r3);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(r3);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(r5, r3, &noBaselineScript);
+ masm.callJitNoProfiler(r3);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(r5, r3);
+ masm.callJitNoProfiler(r3);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // This assumes no SIMD registers, as JS does not support SIMD.
+
+ // The stack saved in spArg must be (higher entries have higher memory
+ // addresses):
+ // - snapshotOffset_
+ // - frameSize_
+ // - regs_
+ // - fpregs_ (spArg + 0)
+ PushRegisterDump(masm);
+ masm.moveStackPtrTo(spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, r0);
+
+ // SP % 8 == 4
+ // STEP 1c: Call the bailout function, giving a pointer to the
+ // structure we just blitted onto the stack.
+ // Make space for the BaselineBailoutInfo* outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.moveStackPtrTo(r1);
+
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(r2);
+ masm.passABIArg(r0);
+ masm.passABIArg(r1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get the bailoutInfo outparam.
+ masm.pop(r2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set must be a superset of the Volatile register set.");
+
+ // Unlike on other platforms, it is the responsibility of the VM *callee* to
+ // push the return address, while the caller must ensure that the address
+ // is stored in lr on entry. This allows the VM wrapper to work with both
+ // direct calls and tail calls.
+ masm.push(lr);
+
+ // First argument is the JSContext.
+ Register reg_cx = IntArgReg0;
+ regs.take(reg_cx);
+
+ // Stack is:
+ // ... frame ...
+ // +12 [args]
+ // +8 descriptor
+ // +0 returnAddress (pushed by this function, caller sets as lr)
+ //
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(reg_cx);
+ masm.enterExitFrame(reg_cx, regs.getAny(), &f);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ // argsBase can't be an argument register. Bad things would happen if
+ // the MoveResolver didn't throw an assertion failure first.
+ argsBase = r8;
+ regs.take(argsBase);
+ masm.Add(ARMRegister(argsBase, 64), masm.GetStackPointer64(),
+ Operand(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for any outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int64_t));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.moveStackPtrTo(outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(reg_cx);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp),
+ (f.argPassedInFloatReg(explicitArg) ? MoveOp::DOUBLE
+ : MoveOp::GENERAL));
+ argDisp += sizeof(void*);
+ break;
+
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH("NYI: AArch64 callVM should not be used with 128bit values.");
+ }
+ }
+
+ // Copy the semi-implicit outparam, if any.
+ // It is not a C++-abi outparam, which would get passed in the
+ // outparam register, but a real parameter to the function, which
+ // was stack-allocated above.
+ if (outReg != InvalidReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // SP is used to transfer stack across call boundaries.
+ masm.initPseudoStackPtr();
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.branchIfFalseBool(r0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Value:
+ masm.Ldr(ARMRegister(JSReturnReg, 64),
+ MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Int32:
+ masm.Ldr(ARMRegister(ReturnReg, 32),
+ MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(int64_t));
+ break;
+
+ case Type_Bool:
+ masm.Ldrb(ARMRegister(ReturnReg, 32),
+ MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(int64_t));
+ break;
+
+ case Type_Double:
+ masm.Ldr(ARMFPRegister(ReturnDoubleReg, 64),
+ MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(double));
+ break;
+
+ case Type_Pointer:
+ masm.Ldr(ARMRegister(ReturnReg, 64),
+ MemOperand(masm.GetStackPointer64()));
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ static_assert(PreBarrierReg == r1);
+ Register temp1 = r2;
+ Register temp2 = r3;
+ Register temp3 = r4;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet regs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+
+ // Also preserve the return address.
+ regs.add(lr);
+
+ masm.PushRegsInMask(regs);
+
+ masm.movePtr(ImmPtr(cx->runtime()), r3);
+
+ masm.setupUnalignedABICall(r0);
+ masm.passABIArg(r3);
+ masm.passABIArg(PreBarrierReg);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ // Pop the volatile regs and restore LR.
+ masm.PopRegsInMask(regs);
+ masm.abiret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(r1, r2);
+}
diff --git a/js/src/jit/arm64/vixl/.clang-format b/js/src/jit/arm64/vixl/.clang-format
new file mode 100644
index 0000000000..122a79540d
--- /dev/null
+++ b/js/src/jit/arm64/vixl/.clang-format
@@ -0,0 +1,4 @@
+BasedOnStyle: Chromium
+
+# Ignore all comments because they aren't reflowed properly.
+CommentPragmas: "^"
diff --git a/js/src/jit/arm64/vixl/AUTHORS b/js/src/jit/arm64/vixl/AUTHORS
new file mode 100644
index 0000000000..257ec9d32b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/AUTHORS
@@ -0,0 +1,8 @@
+# Below is a list of people and organisations that have contributed to the VIXL
+# project. Entries should be added to the list as:
+#
+# Name/Organization <email address>
+
+ARM Ltd. <*@arm.com>
+Google Inc. <*@google.com>
+Linaro <*@linaro.org>
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.cpp b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
new file mode 100644
index 0000000000..6ed31cef78
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
@@ -0,0 +1,5318 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+#include <cmath>
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+namespace vixl {
+
+// CPURegList utilities.
+CPURegister CPURegList::PopLowestIndex() {
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_);
+ VIXL_ASSERT((1ULL << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ VIXL_ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_);
+ index = kRegListSizeInBits - 1 - index;
+ VIXL_ASSERT((1ULL << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+bool CPURegList::IsValid() const {
+ if ((type_ == CPURegister::kRegister) ||
+ (type_ == CPURegister::kVRegister)) {
+ bool is_valid = true;
+ // Try to create a CPURegister for each element in the list.
+ for (int i = 0; i < kRegListSizeInBits; i++) {
+ if (((list_ >> i) & 1) != 0) {
+ is_valid &= CPURegister(i, size_, type_).IsValid();
+ }
+ }
+ return is_valid;
+ } else if (type_ == CPURegister::kNoRegister) {
+ // We can't use IsEmpty here because that asserts IsValid().
+ return list_ == 0;
+ } else {
+ return false;
+ }
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kVRegister) {
+ Remove(GetCalleeSavedV(RegisterSizeInBits()));
+ } else {
+ VIXL_ASSERT(type() == CPURegister::kNoRegister);
+ VIXL_ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Union(list_1, Union(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Union(Union(list_1, list_2), Union(list_3, list_4));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Intersection(list_1, Intersection(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Intersection(Intersection(list_1, list_2),
+ Intersection(list_3, list_4));
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
+ return CPURegList(CPURegister::kVRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ // Do not use lr directly to avoid initialisation order fiasco bugs for users.
+ list.Combine(Register(30, kXRegSize));
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedV(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
+ return list;
+}
+
+
+const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
+const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
+const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
+const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
+
+
+// Registers.
+#define WREG(n) w##n,
+const Register Register::wregisters[] = {
+REGISTER_CODE_LIST(WREG)
+};
+#undef WREG
+
+#define XREG(n) x##n,
+const Register Register::xregisters[] = {
+REGISTER_CODE_LIST(XREG)
+};
+#undef XREG
+
+#define BREG(n) b##n,
+const VRegister VRegister::bregisters[] = {
+REGISTER_CODE_LIST(BREG)
+};
+#undef BREG
+
+#define HREG(n) h##n,
+const VRegister VRegister::hregisters[] = {
+REGISTER_CODE_LIST(HREG)
+};
+#undef HREG
+
+#define SREG(n) s##n,
+const VRegister VRegister::sregisters[] = {
+REGISTER_CODE_LIST(SREG)
+};
+#undef SREG
+
+#define DREG(n) d##n,
+const VRegister VRegister::dregisters[] = {
+REGISTER_CODE_LIST(DREG)
+};
+#undef DREG
+
+#define QREG(n) q##n,
+const VRegister VRegister::qregisters[] = {
+REGISTER_CODE_LIST(QREG)
+};
+#undef QREG
+
+#define VREG(n) v##n,
+const VRegister VRegister::vregisters[] = {
+REGISTER_CODE_LIST(VREG)
+};
+#undef VREG
+
+
+const Register& Register::WRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return wsp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return wregisters[code];
+ }
+}
+
+
+const Register& Register::XRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return sp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return xregisters[code];
+ }
+}
+
+
+const VRegister& VRegister::BRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return bregisters[code];
+}
+
+
+const VRegister& VRegister::HRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return hregisters[code];
+}
+
+
+const VRegister& VRegister::SRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return sregisters[code];
+}
+
+
+const VRegister& VRegister::DRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return dregisters[code];
+}
+
+
+const VRegister& VRegister::QRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return qregisters[code];
+}
+
+
+const VRegister& VRegister::VRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vregisters[code];
+}
+
+
+const Register& CPURegister::W() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::WRegFromCode(code_);
+}
+
+
+const Register& CPURegister::X() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::XRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::B() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::BRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::H() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::HRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::S() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::SRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::D() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::DRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::Q() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::QRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::V() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::VRegFromCode(code_);
+}
+
+
+// Operand.
+Operand::Operand(int64_t immediate)
+ : immediate_(immediate),
+ reg_(NoReg),
+ shift_(NO_SHIFT),
+ extend_(NO_EXTEND),
+ shift_amount_(0) {}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(shift != MSL);
+ VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
+ VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
+ VIXL_ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(reg.IsValid());
+ VIXL_ASSERT(shift_amount <= 4);
+ VIXL_ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+// MemOperand
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(!regoffset.IsSP());
+ VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ VIXL_ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+ } else if (offset.IsShiftedRegister()) {
+ VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
+
+ regoffset_ = offset.reg();
+ shift_ = offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ VIXL_ASSERT(shift_ == LSL);
+ } else {
+ VIXL_ASSERT(offset.IsExtendedRegister());
+ VIXL_ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_ = NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ VIXL_ASSERT(!regoffset_.IsSP());
+ VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+
+void MemOperand::AddOffset(int64_t offset) {
+ VIXL_ASSERT(IsImmediateOffset());
+ offset_ += offset;
+}
+
+
+// Assembler
+Assembler::Assembler(PositionIndependentCodeOption pic)
+ : pic_(pic),
+ cpu_features_(CPUFeatures::AArch64LegacyBaseline())
+{
+ // Mozilla change: always use maximally-present features.
+ cpu_features_.Combine(CPUFeatures::InferFromOS());
+
+ // Mozilla change: Compile time hard-coded value from js-config.mozbuild.
+#ifndef MOZ_AARCH64_JSCVT
+# error "MOZ_AARCH64_JSCVT must be defined."
+#elif MOZ_AARCH64_JSCVT >= 1
+ // Note, vixl backend implements the JSCVT flag as a boolean despite having 3
+ // extra bits reserved for forward compatibility in the ARMv8 documentation.
+ cpu_features_.Combine(CPUFeatures::kJSCVT);
+#endif
+}
+
+
+// Code generation.
+void Assembler::br(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op) {
+ VIXL_ASSERT(vd.Is16B() || vd.Is8B());
+ VIXL_ASSERT(vn.Is16B());
+ VIXL_ASSERT(AreSameFormat(vd, vm));
+ Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBL_1v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_2v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_3v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_4v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBX_1v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_2v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_3v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_4v);
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::smulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
+}
+
+
+void Assembler::umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ VIXL_ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
+
+ int offset = static_cast<int>(addr.offset());
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(offset, CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ VIXL_ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ VIXL_ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ VIXL_ASSERT(!rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(addr.IsImmediateOffset());
+
+ unsigned size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), size));
+ int offset = static_cast<int>(addr.offset());
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::stur(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, int imm19) {
+ Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::ldr(const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+// clang-format off
+#define COMPARE_AND_SWAP_W_X_LIST(V) \
+ V(cas, CAS) \
+ V(casa, CASA) \
+ V(casl, CASL) \
+ V(casal, CASAL)
+// clang-format on
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
+ LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+// clang-format off
+#define COMPARE_AND_SWAP_W_LIST(V) \
+ V(casb, CASB) \
+ V(casab, CASAB) \
+ V(caslb, CASLB) \
+ V(casalb, CASALB) \
+ V(cash, CASH) \
+ V(casah, CASAH) \
+ V(caslh, CASLH) \
+ V(casalh, CASALH)
+// clang-format on
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
+ Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+// clang-format off
+#define COMPARE_AND_SWAP_PAIR_LIST(V) \
+ V(casp, CASP) \
+ V(caspa, CASPA) \
+ V(caspl, CASPL) \
+ V(caspal, CASPAL)
+// clang-format on
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rs1, \
+ const Register& rt, const Register& rt1, \
+ const MemOperand& src) { \
+ USE(rs1, rt1); \
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
+ VIXL_ASSERT(AreEven(rs, rt)); \
+ VIXL_ASSERT(AreConsecutive(rs, rs1)); \
+ VIXL_ASSERT(AreConsecutive(rt, rt1)); \
+ LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::prfm(PrefetchOperation op, int imm19) {
+ Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
+}
+
+
+// Exclusive-access instructions.
+void Assembler::stxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlrb(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlrh(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlr(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldarb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldarh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldar(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+// These macros generate all the variations of the atomic memory operations,
+// e.g. ldadd, ldadda, ldaddb, staddl, etc.
+// For a full list of the methods with comments, see the assembler header file.
+
+// clang-format off
+#define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \
+ V(DEF, add, LDADD) \
+ V(DEF, clr, LDCLR) \
+ V(DEF, eor, LDEOR) \
+ V(DEF, set, LDSET) \
+ V(DEF, smax, LDSMAX) \
+ V(DEF, smin, LDSMIN) \
+ V(DEF, umax, LDUMAX) \
+ V(DEF, umin, LDUMIN)
+
+#define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
+ V(NAME, OP##_x, OP##_w) \
+ V(NAME##l, OP##L_x, OP##L_w) \
+ V(NAME##b, OP##B, OP##B) \
+ V(NAME##lb, OP##LB, OP##LB) \
+ V(NAME##h, OP##H, OP##H) \
+ V(NAME##lh, OP##LH, OP##LH)
+
+#define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \
+ ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
+ V(NAME##a, OP##A_x, OP##A_w) \
+ V(NAME##al, OP##AL_x, OP##AL_w) \
+ V(NAME##ab, OP##AB, OP##AB) \
+ V(NAME##alb, OP##ALB, OP##ALB) \
+ V(NAME##ah, OP##AH, OP##AH) \
+ V(NAME##alh, OP##ALH, OP##ALH)
+// clang-format on
+
+#define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \
+ void Assembler::ld##FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
+ AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
+ Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
+ }
+#define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \
+ void Assembler::st##FN(const Register& rs, const MemOperand& src) { \
+ VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \
+ ld##FN(rs, AppropriateZeroRegFor(rs), src); \
+ }
+
+ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES,
+ DEFINE_ASM_LOAD_FUNC)
+ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES,
+ DEFINE_ASM_STORE_FUNC)
+
+#define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
+ AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
+ Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
+ }
+
+ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP)
+
+#undef DEFINE_ASM_LOAD_FUNC
+#undef DEFINE_ASM_STORE_FUNC
+#undef DEFINE_ASM_SWP_FUNC
+
+void Assembler::prfm(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::prfum(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) {
+ Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt));
+}
+
+
+void Assembler::sys(int op, const Register& rt) {
+ Emit(SYS | SysOp(op) | Rt(rt));
+}
+
+
+void Assembler::dc(DataCacheOp op, const Register& rt) {
+ VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA));
+ sys(op, rt);
+}
+
+
+void Assembler::ic(InstructionCacheOp op, const Register& rt) {
+ VIXL_ASSERT(op == IVAU);
+ sys(op, rt);
+}
+
+
+// NEON structure loads and stores.
+Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
+ Instr addr_field = RnSP(addr.base());
+
+ if (addr.IsPostIndex()) {
+ VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
+ static_cast<NEONLoadStoreMultiStructPostIndexOp>(
+ NEONLoadStoreSingleStructPostIndex));
+
+ addr_field |= NEONLoadStoreMultiStructPostIndex;
+ if (addr.offset() == 0) {
+ addr_field |= RmNot31(addr.regoffset());
+ } else {
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_field |= (0x1f << Rm_offset);
+ }
+ } else {
+ VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0));
+ }
+ return addr_field;
+}
+
+void Assembler::LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op) {
+#ifdef DEBUG
+ // Assert that addressing mode is either offset (with immediate 0), post
+ // index by immediate of the size of the register list, or post index by a
+ // value in a core register.
+ if (addr.IsImmediateOffset()) {
+ VIXL_ASSERT(addr.offset() == 0);
+ } else {
+ int offset = vt.SizeInBytes();
+ switch (op) {
+ case NEON_LD1_1v:
+ case NEON_ST1_1v:
+ offset *= 1; break;
+ case NEONLoadStoreSingleStructLoad1:
+ case NEONLoadStoreSingleStructStore1:
+ case NEON_LD1R:
+ offset = (offset / vt.lanes()) * 1; break;
+
+ case NEON_LD1_2v:
+ case NEON_ST1_2v:
+ case NEON_LD2:
+ case NEON_ST2:
+ offset *= 2;
+ break;
+ case NEONLoadStoreSingleStructLoad2:
+ case NEONLoadStoreSingleStructStore2:
+ case NEON_LD2R:
+ offset = (offset / vt.lanes()) * 2; break;
+
+ case NEON_LD1_3v:
+ case NEON_ST1_3v:
+ case NEON_LD3:
+ case NEON_ST3:
+ offset *= 3; break;
+ case NEONLoadStoreSingleStructLoad3:
+ case NEONLoadStoreSingleStructStore3:
+ case NEON_LD3R:
+ offset = (offset / vt.lanes()) * 3; break;
+
+ case NEON_LD1_4v:
+ case NEON_ST1_4v:
+ case NEON_LD4:
+ case NEON_ST4:
+ offset *= 4; break;
+ case NEONLoadStoreSingleStructLoad4:
+ case NEONLoadStoreSingleStructStore4:
+ case NEON_LD4R:
+ offset = (offset / vt.lanes()) * 4; break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ VIXL_ASSERT(!addr.regoffset().Is(NoReg) ||
+ addr.offset() == offset);
+ }
+#else
+ USE(vt, addr, op);
+#endif
+}
+
+void Assembler::LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ VIXL_ASSERT(vt.IsVector() || vt.Is1D());
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_LD1_1v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD1_2v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD1_3v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD1_4v);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD2);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
+}
+
+
+void Assembler::ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD3);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
+}
+
+
+void Assembler::ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD4);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
+}
+
+
+void Assembler::ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_ST1_1v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_ST1_2v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_ST1_3v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_ST1_4v);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, dst, NEON_ST2);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, dst, NEON_ST3);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, dst, NEON_ST4);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
+}
+
+
+void Assembler::LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+
+ // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ unsigned lane_size = vt.LaneSizeInBytes();
+ VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
+
+ // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
+ // S and size fields.
+ lane *= lane_size;
+ if (lane_size == 8) lane++;
+
+ Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
+ Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
+ Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
+
+ Instr instr = op;
+ switch (lane_size) {
+ case 1: instr |= NEONLoadStoreSingle_b; break;
+ case 2: instr |= NEONLoadStoreSingle_h; break;
+ case 4: instr |= NEONLoadStoreSingle_s; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ instr |= NEONLoadStoreSingle_d;
+ }
+
+ Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
+}
+
+
+void Assembler::ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
+}
+
+
+void Assembler::NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vn, vm));
+ VIXL_ASSERT((vn.Is1H() && vd.Is1S()) ||
+ (vn.Is1S() && vd.Is1D()) ||
+ (vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vm.Is8B() && vd.Is8H()) ||
+ (vm.Is4H() && vd.Is4S()) ||
+ (vm.Is2S() && vd.Is2D()) ||
+ (vm.Is16B() && vd.Is8H())||
+ (vm.Is8H() && vd.Is4S()) ||
+ (vm.Is4S() && vd.Is2D()));
+ Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vm, vn));
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3DIFF_LONG_LIST(V) \
+ V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
+ V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
+ V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
+ V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
+ V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
+ V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
+ V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
+ V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
+ V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
+ V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
+ V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
+ V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
+ V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
+ V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
+ V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
+ V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
+ V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
+ V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentL(vd, vn, vm, OP); \
+}
+NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_3DIFF_HN_LIST(V) \
+ V(addhn, NEON_ADDHN, vd.IsD()) \
+ V(addhn2, NEON_ADDHN2, vd.IsQ()) \
+ V(raddhn, NEON_RADDHN, vd.IsD()) \
+ V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
+ V(subhn, NEON_SUBHN, vd.IsD()) \
+ V(subhn2, NEON_SUBHN2, vd.IsQ()) \
+ V(rsubhn, NEON_RSUBHN, vd.IsD()) \
+ V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentHN(vd, vn, vm, OP); \
+}
+NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW);
+}
+
+
+void Assembler::uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
+}
+
+
+void Assembler::saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW);
+}
+
+
+void Assembler::saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
+}
+
+
+void Assembler::usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW);
+}
+
+
+void Assembler::usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
+}
+
+
+void Assembler::ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
+}
+
+
+void Assembler::ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::clrex(int imm4) {
+ Emit(CLREX | CRm(imm4));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(const VRegister& vd, double imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1D());
+ Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2D());
+ Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
+ Instr q = NEON_Q;
+ uint32_t encoded_imm = FP64ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const VRegister& vd, float imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S());
+ Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Instr op = NEONModifiedImmediate_MOVI;
+ Instr q = vd.Is4S() ? NEON_Q : 0;
+ uint32_t encoded_imm = FP32ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(rd.size() == vn.size());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.size() == rn.size());
+ FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.IsSameFormat(vn));
+ Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
+ VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
+ USE(index);
+ Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
+ VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
+ USE(index);
+ Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
+ Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap) {
+ USE(value);
+ // Although the fcmp{e} instructions can strictly only take an immediate
+ // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
+ // doesn't affect the result of the comparison.
+ VIXL_ASSERT(value == 0.0);
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
+ Emit(FPType(vn) | op | Rn(vn));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, EnableTrap);
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, EnableTrap);
+}
+
+
+void Assembler::FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
+}
+
+void Assembler::fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
+}
+
+
+void Assembler::fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
+}
+
+
+void Assembler::fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT));
+ VIXL_ASSERT(rd.IsW() && vn.Is1D());
+ Emit(FJCVTZS | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op) {
+ Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ }
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvt(const VRegister& vd,
+ const VRegister& vn) {
+ FPDataProcessing1SourceOp op;
+ if (vd.Is1D()) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1H());
+ op = vn.Is1S() ? FCVT_ds : FCVT_dh;
+ } else if (vd.Is1S()) {
+ VIXL_ASSERT(vn.Is1D() || vn.Is1H());
+ op = vn.Is1D() ? FCVT_sd : FCVT_sh;
+ } else {
+ VIXL_ASSERT(vd.Is1H());
+ VIXL_ASSERT(vn.Is1D() || vn.Is1S());
+ op = vn.Is1D() ? FCVT_hd : FCVT_hs;
+ }
+ FPDataProcessing1Source(vd, vn, op);
+}
+
+
+void Assembler::fcvtl(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H()) ||
+ (vd.Is2D() && vn.Is2S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtl2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is8H()) ||
+ (vd.Is2D() && vn.Is4S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is4H()) ||
+ (vn.Is2D() && vd.Is2S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is8H()) ||
+ (vn.Is2D() && vd.Is4S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtxn(const VRegister& vd,
+ const VRegister& vn) {
+ Instr format = 1 << NEONSize_offset;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() && vn.Is1D());
+ Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
+ } else {
+ VIXL_ASSERT(vd.Is2S() && vn.Is2D());
+ Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+ }
+}
+
+
+void Assembler::fcvtxn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.Is4S() && vn.Is2D());
+ Instr format = 1 << NEONSize_offset;
+ Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_FCVT_LIST(V) \
+ V(fcvtnu, NEON_FCVTNU, FCVTNU) \
+ V(fcvtns, NEON_FCVTNS, FCVTNS) \
+ V(fcvtpu, NEON_FCVTPU, FCVTPU) \
+ V(fcvtps, NEON_FCVTPS, FCVTPS) \
+ V(fcvtmu, NEON_FCVTMU, FCVTMU) \
+ V(fcvtms, NEON_FCVTMS, FCVTMS) \
+ V(fcvtau, NEON_FCVTAU, FCVTAU) \
+ V(fcvtas, NEON_FCVTAS, FCVTAS)
+
+#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const Register& rd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(rd, vn, SCA_OP); \
+} \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(vd, vn, VEC_OP); \
+}
+NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
+#undef DEFINE_ASM_FUNCS
+
+
+void Assembler::fcvtzs(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
+ }
+}
+
+
+void Assembler::fcvtzu(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
+ }
+}
+
+void Assembler::ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_UCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
+ }
+}
+
+void Assembler::scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_SCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
+ }
+}
+
+
+void Assembler::scvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::ucvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_LIST(V) \
+ V(fabs, NEON_FABS, FABS) \
+ V(fneg, NEON_FNEG, FNEG) \
+ V(fsqrt, NEON_FSQRT, FSQRT) \
+ V(frintn, NEON_FRINTN, FRINTN) \
+ V(frinta, NEON_FRINTA, FRINTA) \
+ V(frintp, NEON_FRINTP, FRINTP) \
+ V(frintm, NEON_FRINTM, FRINTM) \
+ V(frintx, NEON_FRINTX, FRINTX) \
+ V(frintz, NEON_FRINTZ, FRINTZ) \
+ V(frinti, NEON_FRINTI, FRINTI) \
+ V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
+ V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar )
+
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ Instr op; \
+ if (vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP2RegMisc(vd, vn, op); \
+}
+NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0);
+ USE(value);
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
+}
+
+
+void Assembler::cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
+}
+
+
+void Assembler::cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
+}
+
+
+void Assembler::cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
+}
+
+
+void Assembler::cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
+}
+
+
+void Assembler::shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
+ (vd.Is4S() && vn.Is4H() && shift == 16) ||
+ (vd.Is2D() && vn.Is2S() && shift == 32));
+ USE(shift);
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ USE(shift);
+ VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
+ (vd.Is4S() && vn.Is8H() && shift == 16) ||
+ (vd.Is2D() && vn.Is4S() && shift == 32));
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0.0);
+ USE(value);
+
+ Instr op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ op |= NEON_Q | NEONScalar;
+ } else {
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
+ }
+
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
+}
+
+
+void Assembler::fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
+}
+
+
+void Assembler::fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
+}
+
+
+void Assembler::fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
+}
+
+
+void Assembler::fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
+}
+
+
+void Assembler::frecpx(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar());
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3SAME_LIST(V) \
+ V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
+ V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
+ V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
+ V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
+ V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
+ V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
+ V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
+ V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
+ V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
+ V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
+ V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
+ V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
+ V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
+ V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
+ V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
+ V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
+ V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
+ V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
+ V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
+ V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
+ V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
+ V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
+ V(uqadd, NEON_UQADD, true) \
+ V(sqadd, NEON_SQADD, true) \
+ V(uqsub, NEON_UQSUB, true) \
+ V(sqsub, NEON_SQSUB, true) \
+ V(sqshl, NEON_SQSHL, true) \
+ V(uqshl, NEON_UQSHL, true) \
+ V(sqrshl, NEON_SQRSHL, true) \
+ V(uqrshl, NEON_UQRSHL, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3Same(vd, vn, vm, OP); \
+}
+NEON_3SAME_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FP3SAME_OP_LIST(V) \
+ V(fadd, NEON_FADD, FADD) \
+ V(fsub, NEON_FSUB, FSUB) \
+ V(fmul, NEON_FMUL, FMUL) \
+ V(fdiv, NEON_FDIV, FDIV) \
+ V(fmax, NEON_FMAX, FMAX) \
+ V(fmaxnm, NEON_FMAXNM, FMAXNM) \
+ V(fmin, NEON_FMIN, FMIN) \
+ V(fminnm, NEON_FMINNM, FMINNM) \
+ V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
+ V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
+ V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
+ V(fabd, NEON_FABD, NEON_FABD_scalar) \
+ V(fmla, NEON_FMLA, 0) \
+ V(fmls, NEON_FMLS, 0) \
+ V(facge, NEON_FACGE, NEON_FACGE_scalar) \
+ V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
+ V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
+ V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
+ V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
+ V(faddp, NEON_FADDP, 0) \
+ V(fmaxp, NEON_FMAXP, 0) \
+ V(fminp, NEON_FMINP, 0) \
+ V(fmaxnmp, NEON_FMAXNMP, 0) \
+ V(fminnmp, NEON_FMINNMP, 0)
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ Instr op; \
+ if ((SCA_OP != 0) && vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.IsVector()); \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP3Same(vd, vn, vm, op); \
+}
+NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::addp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
+ Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::faddp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_ORR);
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ orr(vd.V8B(), vn.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ orr(vd.V16B(), vn.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_BIC);
+}
+
+
+void Assembler::movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (vd.Is2D() || vd.Is1D()) {
+ VIXL_ASSERT(shift_amount == 0);
+ int imm8 = 0;
+ for (int i = 0; i < 8; ++i) {
+ int byte = (imm >> (i * 8)) & 0xff;
+ VIXL_ASSERT((byte == 0) || (byte == 0xff));
+ if (byte == 0xff) {
+ imm8 |= (1 << i);
+ }
+ }
+ int q = vd.Is2D() ? NEON_Q : 0;
+ Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ } else if (shift == LSL) {
+ VIXL_ASSERT(IsUint8(imm));
+ NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ } else {
+ VIXL_ASSERT(IsUint8(imm));
+ NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ }
+}
+
+
+void Assembler::mvn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ not_(vd.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ not_(vd.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (shift == LSL) {
+ NEONModifiedImmShiftLsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ } else {
+ NEONModifiedImmShiftMsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ }
+}
+
+
+void Assembler::NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()) ||
+ (vd.Is2D() && vm.Is1D()) ||
+ (vd.Is1D() && vm.Is1D()));
+ VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) ||
+ (vm.Is1D() && (vm_index < 2)));
+
+ Instr op = vop;
+ int index_num_bits = vm.Is1S() ? 2 : 1;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ }
+
+ Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is4H() && vm.Is1H()) ||
+ (vd.Is8H() && vm.Is1H()) ||
+ (vd.Is1H() && vm.Is1H()) ||
+ (vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()));
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
+ (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
+ (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
+ (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
+ (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
+ (vd.Is1D() && vn.Is1S() && vm.Is1S()));
+
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_BYELEMENT_LIST(V) \
+ V(mul, NEON_MUL_byelement, vn.IsVector()) \
+ V(mla, NEON_MLA_byelement, vn.IsVector()) \
+ V(mls, NEON_MLS_byelement, vn.IsVector()) \
+ V(sqdmulh, NEON_SQDMULH_byelement, true) \
+ V(sqrdmulh, NEON_SQRDMULH_byelement, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FPBYELEMENT_LIST(V) \
+ V(fmul, NEON_FMUL_byelement) \
+ V(fmla, NEON_FMLA_byelement) \
+ V(fmls, NEON_FMLS_byelement) \
+ V(fmulx, NEON_FMULX_byelement)
+
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ NEONFPByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_BYELEMENT_LONG_LIST(V) \
+ V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElementL(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::suqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SUQADD);
+}
+
+
+void Assembler::usqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_USQADD);
+}
+
+
+void Assembler::abs(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_ABS);
+}
+
+
+void Assembler::sqabs(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQABS);
+}
+
+
+void Assembler::neg(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_NEG);
+}
+
+
+void Assembler::sqneg(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQNEG);
+}
+
+
+void Assembler::NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop) {
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ format = VFormat(vd);
+ }
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::xtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsD());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::xtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::sqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtun(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::sqxtun2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::uqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+void Assembler::uqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
+void Assembler::not_(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rbit(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ VIXL_ASSERT((0 <= index) && (index < vd.lanes()));
+ Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ Instr q, scalar;
+
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ if (vd.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT(!vd.Is1D());
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | scalar | NEON_DUP_ELEMENT |
+ ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.IsScalar());
+ dup(vd, vn, vn_index);
+}
+
+
+void Assembler::dup(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(!vd.Is1D());
+ VIXL_ASSERT(vd.Is2D() == rn.IsX());
+ int q = vd.IsD() ? 0 : NEON_Q;
+ Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
+ ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ ins(vd, vd_index, vn, vn_index);
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rn.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rn.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rn.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rn.IsX());
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ ins(vd, vd_index, rn);
+}
+
+
+void Assembler::umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rd.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rd.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rd.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_2D;
+ q = NEON_Q;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.SizeInBytes() >= 4);
+ umov(rd, vn, vn_index);
+}
+
+
+void Assembler::smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ VIXL_ASSERT(lane_size != 8);
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ default:
+ VIXL_ASSERT(lane_size == 4);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_4S;
+ break;
+ }
+ q = rd.IsW() ? 0 : NEON_Q;
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::cls(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::clz(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cnt(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev16(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev32(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
+ Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev64(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ursqrte(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::urecpe(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op) {
+ VIXL_ASSERT((op == NEON_SADDLP) ||
+ (op == NEON_UADDLP) ||
+ (op == NEON_SADALP) ||
+ (op == NEON_UADALP));
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is4H()) ||
+ (vn.Is4H() && vd.Is2S()) ||
+ (vn.Is2S() && vd.Is1D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADDLP);
+}
+
+
+void Assembler::uaddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADDLP);
+}
+
+
+void Assembler::sadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADALP);
+}
+
+
+void Assembler::uadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADALP);
+}
+
+
+void Assembler::NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1H()) ||
+ (vn.Is16B() && vd.Is1H()) ||
+ (vn.Is4H() && vd.Is1S()) ||
+ (vn.Is8H() && vd.Is1S()) ||
+ (vn.Is4S() && vd.Is1D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_SADDLV);
+}
+
+
+void Assembler::uaddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_UADDLV);
+}
+
+
+void Assembler::NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1B()) ||
+ (vn.Is16B() && vd.Is1B()) ||
+ (vn.Is4H() && vd.Is1H()) ||
+ (vn.Is8H() && vd.Is1H()) ||
+ (vn.Is4S() && vd.Is1S()));
+ if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+ } else {
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+ }
+}
+
+
+#define NEON_ACROSSLANES_LIST(V) \
+ V(fmaxv, NEON_FMAXV, vd.Is1S()) \
+ V(fminv, NEON_FMINV, vd.Is1S()) \
+ V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
+ V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
+ V(addv, NEON_ADDV, true) \
+ V(smaxv, NEON_SMAXV, true) \
+ V(sminv, NEON_SMINV, true) \
+ V(umaxv, NEON_UMAXV, true) \
+ V(uminv, NEON_UMINV, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ VIXL_ASSERT(AS); \
+ NEONAcrossLanes(vd, vn, OP); \
+}
+NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(!vd.Is1D());
+ Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN1);
+}
+
+
+void Assembler::trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN2);
+}
+
+
+void Assembler::uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP1);
+}
+
+
+void Assembler::uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP2);
+}
+
+
+void Assembler::zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP1);
+}
+
+
+void Assembler::zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP2);
+}
+
+
+void Assembler::NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Instr q, scalar;
+ if (vn.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
+}
+
+
+void Assembler::NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
+}
+
+
+void Assembler::NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ int immh_immb = (laneSizeInBits + shift) << 16;
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr q;
+ q = vn.IsD() ? 0 : NEON_Q;
+ Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ Instr q, scalar;
+ int laneSizeInBits = vd.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ int immh_immb = (2 * laneSizeInBits - shift) << 16;
+
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ scalar = 0;
+ q = vd.IsD() ? 0 : NEON_Q;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
+}
+
+
+void Assembler::sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
+}
+
+
+void Assembler::sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
+}
+
+
+void Assembler::sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
+}
+
+
+void Assembler::uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
+}
+
+
+void Assembler::sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sxtl(const VRegister& vd,
+ const VRegister& vn) {
+ sshll(vd, vn, 0);
+}
+
+
+void Assembler::sxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ sshll2(vd, vn, 0);
+}
+
+
+void Assembler::ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::uxtl(const VRegister& vd,
+ const VRegister& vn) {
+ ushll(vd, vn, 0);
+}
+
+
+void Assembler::uxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ ushll2(vd, vn, 0);
+}
+
+
+void Assembler::sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
+}
+
+
+void Assembler::sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
+}
+
+
+void Assembler::ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
+}
+
+
+void Assembler::srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
+}
+
+
+void Assembler::urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
+}
+
+
+void Assembler::ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
+}
+
+
+void Assembler::usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
+}
+
+
+void Assembler::srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
+}
+
+
+void Assembler::ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
+}
+
+
+void Assembler::shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+void Assembler::uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+uint32_t Assembler::FP32ToImm8(float imm) {
+ VIXL_ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return bit7 | bit6 | bit5_to_0;
+}
+
+
+Instr Assembler::ImmFP32(float imm) {
+ return FP32ToImm8(imm) << ImmFP_offset;
+}
+
+
+uint32_t Assembler::FP64ToImm8(double imm) {
+ VIXL_ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bit7: a000.0000
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ return FP64ToImm8(imm) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are zero (a positive 32-bit number) or top
+ // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
+ VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
+ ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
+ imm &= kWRegMask;
+ }
+
+ if (shift >= 0) {
+ // Explicit shift specified.
+ VIXL_ASSERT((shift == 0) || (shift == 16) ||
+ (shift == 32) || (shift == 48));
+ VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & 0xffffffffffff0000) == 0) {
+ // Nothing to do.
+ } else if ((imm & 0xffffffff0000ffff) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & 0xffff0000ffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & 0x0000ffffffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ VIXL_ASSERT(IsUint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ VIXL_ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == operand.reg().size());
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::svc(int code) {
+ Emit(SVC | ImmException(code));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op |
+ ImmCondCmp(static_cast<unsigned>(immediate));
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const VRegister& vd,
+ const VRegister& vn,
+ FPDataProcessing1SourceOp op) {
+ VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
+ Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va,
+ FPDataProcessing3SourceOp op) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
+}
+
+
+void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() ||
+ vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8) ||
+ (left_shift == 16) || (left_shift == 24));
+ VIXL_ASSERT(IsUint8(imm8));
+
+ int cmode_1, cmode_2, cmode_3;
+ if (vd.Is8B() || vd.Is16B()) {
+ VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
+ cmode_1 = 1;
+ cmode_2 = 1;
+ cmode_3 = 1;
+ } else {
+ cmode_1 = (left_shift >> 3) & 1;
+ cmode_2 = left_shift >> 4;
+ cmode_3 = 0;
+ if (vd.Is4H() || vd.Is8H()) {
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
+ cmode_3 = 1;
+ }
+ }
+ int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
+ VIXL_ASSERT(IsUint8(imm8));
+
+ int cmode_0 = (shift_amount >> 4) & 1;
+ int cmode = 0xc | cmode_0;
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ VIXL_ASSERT(rd.size() >= rn.size());
+ unsigned reg_size = rd.size();
+ // Use the correct size of register.
+ Register rn_ = Register(rn.code(), rd.size());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ VIXL_ASSERT(rn.size() == kXRegSize);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option) {
+ Instr base = RnSP(addr.base());
+ int64_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ bool prefer_unscaled = (option == PreferUnscaledOffset) ||
+ (option == RequireUnscaledOffset);
+ if (prefer_unscaled && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+
+ if ((option != RequireUnscaledOffset) &&
+ IsImmLSScaled(offset, access_size)) {
+ // Use the scaled addressing mode.
+ return base | LoadStoreUnsignedOffsetFixed |
+ ImmLSUnsigned(static_cast<int>(offset) >> access_size);
+ }
+
+ if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+ }
+
+ // All remaining addressing modes are register-offset, pre-indexed or
+ // post-indexed modes.
+ VIXL_ASSERT((option != RequireUnscaledOffset) &&
+ (option != RequireScaledOffset));
+
+ if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
+ return base | LoadStoreRegisterOffsetFixed | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
+ }
+
+ if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ // If this point is reached, the MemOperand (addr) cannot be encoded.
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option) {
+ Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
+}
+
+
+void Assembler::Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
+
+ Instr prfop = ImmPrefetchOperation(op);
+ Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return IsUint12(immediate) ||
+ (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return IsUint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0x0000ffffffffffff) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && IsInt7(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && IsUint12(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
+ return IsInt9(offset);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
+
+ bool negate = false;
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1 bits
+ // are set. The pattern is rotated right by R, and repeated across a 32 or
+ // 64-bit value, depending on destination register width.
+ //
+ // Put another way: the basic format of a logical immediate is a single
+ // contiguous stretch of 1 bits, repeated across the whole word at intervals
+ // given by a power of 2. To identify them quickly, we first locate the
+ // lowest stretch of 1 bits, then the next 1 bit above that; that combination
+ // is different for every logical immediate, so it gives us all the
+ // information we need to identify the only logical immediate that our input
+ // could be, and then we simply check if that's the value we actually have.
+ //
+ // (The rotation parameter does give the possibility of the stretch of 1 bits
+ // going 'round the end' of the word. To deal with that, we observe that in
+ // any situation where that happens the bitwise NOT of the value is also a
+ // valid logical immediate. So we simply invert the input whenever its low bit
+ // is set, and then we know that the rotated case can't arise.)
+
+ if (value & 1) {
+ // If the low bit is 1, negate the value, and set a flag to remember that we
+ // did (so that we can adjust the return values appropriately).
+ negate = true;
+ value = ~value;
+ }
+
+ if (width == kWRegSize) {
+ // To handle 32-bit logical immediates, the very easiest thing is to repeat
+ // the input value twice to make a 64-bit word. The correct encoding of that
+ // as a logical immediate will also be the correct encoding of the 32-bit
+ // value.
+
+ // Avoid making the assumption that the most-significant 32 bits are zero by
+ // shifting the value left and duplicating it.
+ value <<= kWRegSize;
+ value |= value >> kWRegSize;
+ }
+
+ // The basic analysis idea: imagine our input word looks like this.
+ //
+ // 0011111000111110001111100011111000111110001111100011111000111110
+ // c b a
+ // |<--d-->|
+ //
+ // We find the lowest set bit (as an actual power-of-2 value, not its index)
+ // and call it a. Then we add a to our original number, which wipes out the
+ // bottommost stretch of set bits and replaces it with a 1 carried into the
+ // next zero bit. Then we look for the new lowest set bit, which is in
+ // position b, and subtract it, so now our number is just like the original
+ // but with the lowest stretch of set bits completely gone. Now we find the
+ // lowest set bit again, which is position c in the diagram above. Then we'll
+ // measure the distance d between bit positions a and c (using CLZ), and that
+ // tells us that the only valid logical immediate that could possibly be equal
+ // to this number is the one in which a stretch of bits running from a to just
+ // below b is replicated every d bits.
+ uint64_t a = LowestSetBit(value);
+ uint64_t value_plus_a = value + a;
+ uint64_t b = LowestSetBit(value_plus_a);
+ uint64_t value_plus_a_minus_b = value_plus_a - b;
+ uint64_t c = LowestSetBit(value_plus_a_minus_b);
+
+ int d, clz_a, out_n;
+ uint64_t mask;
+
+ if (c != 0) {
+ // The general case, in which there is more than one stretch of set bits.
+ // Compute the repeat distance d, and set up a bitmask covering the basic
+ // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
+ // of these cases the N bit of the output will be zero.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ int clz_c = CountLeadingZeros(c, kXRegSize);
+ d = clz_a - clz_c;
+ mask = ((UINT64_C(1) << d) - 1);
+ out_n = 0;
+ } else {
+ // Handle degenerate cases.
+ //
+ // If any of those 'find lowest set bit' operations didn't find a set bit at
+ // all, then the word will have been zero thereafter, so in particular the
+ // last lowest_set_bit operation will have returned zero. So we can test for
+ // all the special case conditions in one go by seeing if c is zero.
+ if (a == 0) {
+ // The input was zero (or all 1 bits, which will come to here too after we
+ // inverted it at the start of the function), for which we just return
+ // false.
+ return false;
+ } else {
+ // Otherwise, if c was zero but a was not, then there's just one stretch
+ // of set bits in our word, meaning that we have the trivial case of
+ // d == 64 and only one 'repetition'. Set up all the same variables as in
+ // the general case above, and set the N bit in the output.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ d = 64;
+ mask = ~UINT64_C(0);
+ out_n = 1;
+ }
+ }
+
+ // If the repeat period d is not a power of two, it can't be encoded.
+ if (!IsPowerOf2(d)) {
+ return false;
+ }
+
+ if (((b - a) & ~mask) != 0) {
+ // If the bit stretch (b - a) does not fit within the mask derived from the
+ // repeat period, then fail.
+ return false;
+ }
+
+ // The only possible option is b - a repeated every d bits. Now we're going to
+ // actually construct the valid logical immediate derived from that
+ // specification, and see if it equals our original input.
+ //
+ // To repeat a value every d bits, we multiply it by a number of the form
+ // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
+ // be derived using a table lookup on CLZ(d).
+ static const uint64_t multipliers[] = {
+ 0x0000000000000001UL,
+ 0x0000000100000001UL,
+ 0x0001000100010001UL,
+ 0x0101010101010101UL,
+ 0x1111111111111111UL,
+ 0x5555555555555555UL,
+ };
+ uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
+ uint64_t candidate = (b - a) * multiplier;
+
+ if (value != candidate) {
+ // The candidate pattern doesn't match our input value, so fail.
+ return false;
+ }
+
+ // We have a match! This is a valid logical immediate, so now we have to
+ // construct the bits and pieces of the instruction encoding that generates
+ // it.
+
+ // Count the set bits in our basic stretch. The special case of clz(0) == -1
+ // makes the answer come out right for stretches that reach the very top of
+ // the word (e.g. numbers like 0xffffc00000000000).
+ int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
+ int s = clz_a - clz_b;
+
+ // Decide how many bits to rotate right by, to put the low bit of that basic
+ // stretch in position a.
+ int r;
+ if (negate) {
+ // If we inverted the input right at the start of this function, here's
+ // where we compensate: the number of set bits becomes the number of clear
+ // bits, and the rotation count is based on position b rather than position
+ // a (since b is the location of the 'lowest' 1 bit after inversion).
+ s = d - s;
+ r = (clz_b + 1) & (d - 1);
+ } else {
+ r = (clz_a + 1) & (d - 1);
+ }
+
+ // Now we're done, except for having to encode the S output in such a way that
+ // it gives both the number of set bits and the length of the repeated
+ // segment. The s field is encoded like this:
+ //
+ // imms size S
+ // ssssss 64 UInt(ssssss)
+ // 0sssss 32 UInt(sssss)
+ // 10ssss 16 UInt(ssss)
+ // 110sss 8 UInt(sss)
+ // 1110ss 4 UInt(ss)
+ // 11110s 2 UInt(s)
+ //
+ // So we 'or' (-d << 1) with our computed s to form imms.
+ if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
+ *n = out_n;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_r = r;
+ }
+
+ return true;
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return LDR_b;
+ case kHRegSize: return LDR_h;
+ case kSRegSize: return LDR_s;
+ case kDRegSize: return LDR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q;
+ }
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return STR_b;
+ case kHRegSize: return STR_h;
+ case kSRegSize: return STR_s;
+ case kDRegSize: return STR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STR_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STP_s;
+ case kDRegSizeInBytes: return STP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STP_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
+ return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
+ LoadStorePairLBit);
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STNP_s;
+ case kDRegSizeInBytes: return STNP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STNP_q;
+ }
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
+ return static_cast<LoadStorePairNonTemporalOp>(
+ StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
+}
+
+
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.IsX() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return LDR_s_lit;
+ case kDRegSizeInBytes: return LDR_d_lit;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q_lit;
+ }
+ }
+}
+
+
+bool Assembler::CPUHas(const CPURegister& rt) const {
+ // Core registers are available without any particular CPU features.
+ if (rt.IsRegister()) return true;
+ VIXL_ASSERT(rt.IsVRegister());
+ // The architecture does not allow FP and NEON to be implemented separately,
+ // but we can crudely categorise them based on register size, since FP only
+ // uses D, S and (occasionally) H registers.
+ if (rt.IsH() || rt.IsS() || rt.IsD()) {
+ return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON);
+ }
+ VIXL_ASSERT(rt.IsB() || rt.IsQ());
+ return CPUHas(CPUFeatures::kNEON);
+}
+
+
+bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const {
+ // This is currently only used for loads and stores, where rt and rt2 must
+ // have the same size and type. We could extend this to cover other cases if
+ // necessary, but for now we can avoid checking both registers.
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ return CPUHas(rt);
+}
+
+
+bool Assembler::CPUHas(SystemRegister sysreg) const {
+ switch (sysreg) {
+ case RNDR:
+ case RNDRRS:
+ return CPUHas(CPUFeatures::kRNG);
+ case FPCR:
+ case NZCV:
+ break;
+ }
+ return true;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsVRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs = CountSetBits(unique_regs);
+ int number_of_unique_fpregs = CountSetBits(unique_fpregs);
+
+ VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+bool AreEven(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4,
+ const CPURegister& reg5,
+ const CPURegister& reg6,
+ const CPURegister& reg7,
+ const CPURegister& reg8) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool even = (reg1.code() % 2) == 0;
+ even &= !reg2.IsValid() || ((reg2.code() % 2) == 0);
+ even &= !reg3.IsValid() || ((reg3.code() % 2) == 0);
+ even &= !reg4.IsValid() || ((reg4.code() % 2) == 0);
+ even &= !reg5.IsValid() || ((reg5.code() % 2) == 0);
+ even &= !reg6.IsValid() || ((reg6.code() % 2) == 0);
+ even &= !reg7.IsValid() || ((reg7.code() % 2) == 0);
+ even &= !reg8.IsValid() || ((reg8.code() % 2) == 0);
+ return even;
+}
+
+bool AreConsecutive(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+
+ if (!reg2.IsValid()) {
+ return true;
+ } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfRegisters)) {
+ return false;
+ }
+
+ if (!reg3.IsValid()) {
+ return true;
+ } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfRegisters)) {
+ return false;
+ }
+
+ if (!reg4.IsValid()) {
+ return true;
+ } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfRegisters)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameFormat(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameFormat(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameFormat(reg1);
+ return match;
+}
+
+
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() ||
+ (reg2.code() == ((reg1.code() + 1) % kNumberOfVRegisters));
+ match &= !reg3.IsValid() ||
+ (reg3.code() == ((reg1.code() + 2) % kNumberOfVRegisters));
+ match &= !reg4.IsValid() ||
+ (reg4.code() == ((reg1.code() + 3) % kNumberOfVRegisters));
+ return match;
+}
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.h b/js/src/jit/arm64/vixl/Assembler-vixl.h
new file mode 100644
index 0000000000..462b359eea
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -0,0 +1,4974 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_ASSEMBLER_A64_H_
+#define VIXL_A64_ASSEMBLER_A64_H_
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/MozBaseAssembler-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include "jit/JitSpewer.h"
+
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+
+#if defined(_M_ARM64)
+#ifdef mvn
+#undef mvn
+#endif
+#endif
+
+namespace vixl {
+
+using js::jit::BufferOffset;
+using js::jit::Label;
+using js::jit::Address;
+using js::jit::BaseIndex;
+using js::jit::DisassemblerSpew;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+
+typedef uint64_t RegList;
+static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
+
+// Registers.
+
+// Some CPURegister methods can return Register or VRegister types, so we need
+// to declare them in advance.
+class Register;
+class VRegister;
+
+class CPURegister {
+ public:
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kVRegister,
+ kFPRegister = kVRegister,
+ kNoRegister
+ };
+
+ constexpr CPURegister() : code_(0), size_(0), type_(kNoRegister) {
+ }
+
+ constexpr CPURegister(unsigned code, unsigned size, RegisterType type)
+ : code_(code), size_(size), type_(type) {
+ }
+
+ unsigned code() const {
+ VIXL_ASSERT(IsValid());
+ return code_;
+ }
+
+ RegisterType type() const {
+ VIXL_ASSERT(IsValidOrNone());
+ return type_;
+ }
+
+ RegList Bit() const {
+ VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
+ return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
+ }
+
+ unsigned size() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ int SizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(size() % 8 == 0);
+ return size_ / 8;
+ }
+
+ int SizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ bool Is8Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 8;
+ }
+
+ bool Is16Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 16;
+ }
+
+ bool Is32Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 32;
+ }
+
+ bool Is64Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 64;
+ }
+
+ bool Is128Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 128;
+ }
+
+ bool IsValid() const {
+ if (IsValidRegister() || IsValidVRegister()) {
+ VIXL_ASSERT(!IsNone());
+ return true;
+ } else {
+ // This assert is hit when the register has not been properly initialized.
+ // One cause for this can be an initialisation order fiasco. See
+ // https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
+ VIXL_ASSERT(IsNone());
+ return false;
+ }
+ }
+
+ bool IsValidRegister() const {
+ return IsRegister() &&
+ ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
+ ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
+ }
+
+ bool IsValidVRegister() const {
+ return IsVRegister() &&
+ ((size_ == kBRegSize) || (size_ == kHRegSize) ||
+ (size_ == kSRegSize) || (size_ == kDRegSize) ||
+ (size_ == kQRegSize)) &&
+ (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsValidFPRegister() const {
+ return IsFPRegister() && (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
+ VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
+
+ return type_ == kNoRegister;
+ }
+
+ bool Aliases(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (code_ == other.code_) && (type_ == other.type_);
+ }
+
+ bool Is(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (size_ == other.size_);
+ }
+
+ bool IsZero() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kZeroRegCode);
+ }
+
+ bool IsSP() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kSPRegInternalCode);
+ }
+
+ bool IsRegister() const {
+ return type_ == kRegister;
+ }
+
+ bool IsVRegister() const {
+ return type_ == kVRegister;
+ }
+
+ bool IsFPRegister() const {
+ return IsS() || IsD();
+ }
+
+ bool IsW() const { return IsValidRegister() && Is32Bits(); }
+ bool IsX() const { return IsValidRegister() && Is64Bits(); }
+
+ // These assertions ensure that the size and type of the register are as
+ // described. They do not consider the number of lanes that make up a vector.
+ // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+ // does not imply Is1D() or Is8B().
+ // Check the number of lanes, ie. the format of the vector, using methods such
+ // as Is8B(), Is1D(), etc. in the VRegister class.
+ bool IsV() const { return IsVRegister(); }
+ bool IsB() const { return IsV() && Is8Bits(); }
+ bool IsH() const { return IsV() && Is16Bits(); }
+ bool IsS() const { return IsV() && Is32Bits(); }
+ bool IsD() const { return IsV() && Is64Bits(); }
+ bool IsQ() const { return IsV() && Is128Bits(); }
+
+ const Register& W() const;
+ const Register& X() const;
+ const VRegister& V() const;
+ const VRegister& B() const;
+ const VRegister& H() const;
+ const VRegister& S() const;
+ const VRegister& D() const;
+ const VRegister& Q() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const {
+ return (size_ == other.size_) && (type_ == other.type_);
+ }
+
+ protected:
+ unsigned code_;
+ unsigned size_;
+ RegisterType type_;
+
+ private:
+ bool IsValidOrNone() const {
+ return IsValid() || IsNone();
+ }
+};
+
+
+class Register : public CPURegister {
+ public:
+ Register() : CPURegister() {}
+ explicit Register(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()) {
+ VIXL_ASSERT(IsValidRegister());
+ }
+ constexpr Register(unsigned code, unsigned size)
+ : CPURegister(code, size, kRegister) {}
+
+ constexpr Register(js::jit::Register r, unsigned size)
+ : CPURegister(r.code(), size, kRegister) {}
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ js::jit::Register asUnsized() const {
+ // asUnsized() is only ever used on temp registers or on registers that
+ // are known not to be SP, and there should be no risk of it being
+ // applied to SP. Check anyway.
+ VIXL_ASSERT(code_ != kSPRegInternalCode);
+ return js::jit::Register::FromCode((js::jit::Register::Code)code_);
+ }
+
+
+ static const Register& WRegFromCode(unsigned code);
+ static const Register& XRegFromCode(unsigned code);
+
+ private:
+ static const Register wregisters[];
+ static const Register xregisters[];
+};
+
+
+class VRegister : public CPURegister {
+ public:
+ VRegister() : CPURegister(), lanes_(1) {}
+ explicit VRegister(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()), lanes_(1) {
+ VIXL_ASSERT(IsValidVRegister());
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(unsigned code, unsigned size, unsigned lanes = 1)
+ : CPURegister(code, size, kVRegister), lanes_(lanes) {
+ // VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(js::jit::FloatRegister r)
+ : CPURegister(r.encoding(), r.size() * 8, kVRegister), lanes_(1) {
+ }
+ constexpr VRegister(js::jit::FloatRegister r, unsigned size)
+ : CPURegister(r.encoding(), size, kVRegister), lanes_(1) {
+ }
+ VRegister(unsigned code, VectorFormat format)
+ : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
+ lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsVRegister() || IsNone());
+ return IsValidVRegister();
+ }
+
+ static const VRegister& BRegFromCode(unsigned code);
+ static const VRegister& HRegFromCode(unsigned code);
+ static const VRegister& SRegFromCode(unsigned code);
+ static const VRegister& DRegFromCode(unsigned code);
+ static const VRegister& QRegFromCode(unsigned code);
+ static const VRegister& VRegFromCode(unsigned code);
+
+ VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
+ VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
+ VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
+ VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
+ VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
+ VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
+ VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
+ VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
+
+ bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
+
+ // For consistency, we assert the number of lanes of these scalar registers,
+ // even though there are no vectors of equivalent total size with which they
+ // could alias.
+ bool Is1B() const {
+ VIXL_ASSERT(!(Is8Bits() && IsVector()));
+ return Is8Bits();
+ }
+ bool Is1H() const {
+ VIXL_ASSERT(!(Is16Bits() && IsVector()));
+ return Is16Bits();
+ }
+ bool Is1S() const {
+ VIXL_ASSERT(!(Is32Bits() && IsVector()));
+ return Is32Bits();
+ }
+
+ bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; }
+ bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; }
+ bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; }
+ bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; }
+
+ int lanes() const {
+ return lanes_;
+ }
+
+ bool IsScalar() const {
+ return lanes_ == 1;
+ }
+
+ bool IsVector() const {
+ return lanes_ > 1;
+ }
+
+ bool IsSameFormat(const VRegister& other) const {
+ return (size_ == other.size_) && (lanes_ == other.lanes_);
+ }
+
+ unsigned LaneSizeInBytes() const {
+ return SizeInBytes() / lanes_;
+ }
+
+ unsigned LaneSizeInBits() const {
+ return LaneSizeInBytes() * 8;
+ }
+
+ private:
+ static const VRegister bregisters[];
+ static const VRegister hregisters[];
+ static const VRegister sregisters[];
+ static const VRegister dregisters[];
+ static const VRegister qregisters[];
+ static const VRegister vregisters[];
+ int lanes_;
+};
+
+
+// Backward compatibility for FPRegisters.
+typedef VRegister FPRegister;
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and VRegister
+// variants are provided for convenience.
+const Register NoReg;
+const VRegister NoVReg;
+const FPRegister NoFPReg; // For backward compatibility.
+const CPURegister NoCPUReg;
+
+
+#define DEFINE_REGISTERS(N) \
+constexpr Register w##N(N, kWRegSize); \
+constexpr Register x##N(N, kXRegSize);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+constexpr Register wsp(kSPRegInternalCode, kWRegSize);
+constexpr Register sp(kSPRegInternalCode, kXRegSize);
+
+
+#define DEFINE_VREGISTERS(N) \
+constexpr VRegister b##N(N, kBRegSize); \
+constexpr VRegister h##N(N, kHRegSize); \
+constexpr VRegister s##N(N, kSRegSize); \
+constexpr VRegister d##N(N, kDRegSize); \
+constexpr VRegister q##N(N, kQRegSize); \
+constexpr VRegister v##N(N, kQRegSize);
+REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
+
+
+// Registers aliases.
+constexpr Register ip0 = x16;
+constexpr Register ip1 = x17;
+constexpr Register lr = x30;
+constexpr Register xzr = x31;
+constexpr Register wzr = w31;
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments
+// set to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+// AreEven returns true if all of the specified registers have even register
+// indices. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreEven(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreConsecutive returns true if all of the specified registers are
+// consecutive in the register file. Arguments set to NoReg are ignored, as are
+// any subsequent arguments. At least one argument (reg1) must be valid
+// (not NoCPUReg).
+bool AreConsecutive(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg);
+
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments set to NoReg are ignored, as are
+// any subsequent arguments. At least one argument (reg1) must be valid
+// (not NoVReg).
+bool AreConsecutive(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.size()), type_(reg1.type()) {
+ VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ VIXL_ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
+ VIXL_ASSERT(last_reg >= first_reg);
+ list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
+ list_ &= ~((UINT64_C(1) << first_reg) - 1);
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ VIXL_ASSERT(IsValid());
+ return type_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+ }
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+ }
+
+ // Variants of Combine and Remove which take a single register.
+ void Combine(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Combine(other.code());
+ }
+
+ void Remove(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Remove(other.code());
+ }
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ |= (UINT64_C(1) << code);
+ }
+
+ void Remove(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ &= ~(UINT64_C(1) << code);
+ }
+
+ static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
+ }
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
+ }
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ bool Overlaps(const CPURegList& other) const {
+ return (type_ == other.type_) && ((list_ & other.list_) != 0);
+ }
+
+ RegList list() const {
+ VIXL_ASSERT(IsValid());
+ return list_;
+ }
+
+ void set_list(RegList new_list) {
+ VIXL_ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
+
+ bool IsEmpty() const {
+ VIXL_ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other) const {
+ VIXL_ASSERT(IsValid());
+ return (type_ == other.type()) && ((other.Bit() & list_) != 0);
+ }
+
+ bool IncludesAliasOf(int code) const {
+ VIXL_ASSERT(IsValid());
+ return ((code & list_) != 0);
+ }
+
+ int Count() const {
+ VIXL_ASSERT(IsValid());
+ return CountSetBits(list_);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ VIXL_ASSERT((size_in_bits % 8) == 0);
+ return size_in_bits / 8;
+ }
+
+ unsigned TotalSizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const;
+};
+
+
+// AAPCS64 callee-saved registers.
+extern const CPURegList kCalleeSaved;
+extern const CPURegList kCalleeSavedV;
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+extern const CPURegList kCallerSaved;
+extern const CPURegList kCallerSavedV;
+
+
+// Operand.
+class Operand {
+ public:
+ // #<immediate>
+ // where <immediate> is int64_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<shift> #<shift_amount>}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<extend> {#<shift_amount>}}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
+
+ bool IsImmediate() const;
+ bool IsShiftedRegister() const;
+ bool IsExtendedRegister() const;
+ bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ Operand ToExtendedRegister() const;
+
+ int64_t immediate() const {
+ VIXL_ASSERT(IsImmediate());
+ return immediate_;
+ }
+
+ Register reg() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+ }
+
+ CPURegister maybeReg() const {
+ if (IsShiftedRegister() || IsExtendedRegister())
+ return reg_;
+ return NoCPUReg;
+ }
+
+ Shift shift() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ return shift_;
+ }
+
+ Extend extend() const {
+ VIXL_ASSERT(IsExtendedRegister());
+ return extend_;
+ }
+
+ unsigned shift_amount() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+ }
+
+ private:
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// MemOperand represents the addressing mode of a load or store instruction.
+class MemOperand {
+ public:
+ explicit MemOperand(Register base,
+ int64_t offset = 0,
+ AddrMode addrmode = Offset);
+ MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ // Adapter constructors using C++11 delegating.
+ // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
+ explicit MemOperand(js::jit::Address addr)
+ : MemOperand(IsHiddenSP(addr.base) ? sp : Register(AsRegister(addr.base), 64),
+ (ptrdiff_t)addr.offset) {
+ }
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ int64_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ bool IsImmediateOffset() const;
+ bool IsRegisterOffset() const;
+ bool IsPreIndex() const;
+ bool IsPostIndex() const;
+
+ void AddOffset(int64_t offset);
+
+ private:
+ Register base_;
+ Register regoffset_;
+ int64_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// Control whether or not position-independent code should be emitted.
+enum PositionIndependentCodeOption {
+ // All code generated will be position-independent; all branches and
+ // references to labels generated with the Label class will use PC-relative
+ // addressing.
+ PositionIndependentCode,
+
+ // Allow VIXL to generate code that refers to absolute addresses. With this
+ // option, it will not be possible to copy the code buffer and run it from a
+ // different address; code must be generated in its final location.
+ PositionDependentCode,
+
+ // Allow VIXL to assume that the bottom 12 bits of the address will be
+ // constant, but that the top 48 bits may change. This allows `adrp` to
+ // function in systems which copy code between pages, but otherwise maintain
+ // 4KB page alignment.
+ PageOffsetDependentCode
+};
+
+
+// Control how scaled- and unscaled-offset loads and stores are generated.
+enum LoadStoreScalingOption {
+ // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferScaledOffset,
+
+ // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferUnscaledOffset,
+
+ // Require scaled-immediate-offset instructions.
+ RequireScaledOffset,
+
+ // Require unscaled-immediate-offset instructions.
+ RequireUnscaledOffset
+};
+
+
+// Assembler.
+class Assembler : public MozBaseAssembler {
+ public:
+ Assembler(PositionIndependentCodeOption pic = PositionIndependentCode);
+
+ // System functions.
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+#define COPYENUM(v) static const Condition v = vixl::v
+#define COPYENUM_(v) static const Condition v = vixl::v##_
+ COPYENUM(Equal);
+ COPYENUM(Zero);
+ COPYENUM(NotEqual);
+ COPYENUM(NonZero);
+ COPYENUM(AboveOrEqual);
+ COPYENUM(CarrySet);
+ COPYENUM(Below);
+ COPYENUM(CarryClear);
+ COPYENUM(Signed);
+ COPYENUM(NotSigned);
+ COPYENUM(Overflow);
+ COPYENUM(NoOverflow);
+ COPYENUM(Above);
+ COPYENUM(BelowOrEqual);
+ COPYENUM_(GreaterThanOrEqual);
+ COPYENUM_(LessThan);
+ COPYENUM_(GreaterThan);
+ COPYENUM_(LessThanOrEqual);
+ COPYENUM(Always);
+ COPYENUM(Never);
+#undef COPYENUM
+#undef COPYENUM_
+
+ // Bit set when a DoubleCondition does not map to a single ARM condition.
+ // The MacroAssembler must special-case these conditions, or else
+ // ConditionFromDoubleCondition will complain.
+ static const int DoubleConditionBitSpecial = 0x100;
+
+ enum DoubleCondition {
+ DoubleOrdered = Condition::vc,
+ DoubleEqual = Condition::eq,
+ DoubleNotEqual = Condition::ne | DoubleConditionBitSpecial,
+ DoubleGreaterThan = Condition::gt,
+ DoubleGreaterThanOrEqual = Condition::ge,
+ DoubleLessThan = Condition::lo, // Could also use Condition::mi.
+ DoubleLessThanOrEqual = Condition::ls,
+
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = Condition::vs,
+ DoubleEqualOrUnordered = Condition::eq | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = Condition::ne,
+ DoubleGreaterThanOrUnordered = Condition::hi,
+ DoubleGreaterThanOrEqualOrUnordered = Condition::hs,
+ DoubleLessThanOrUnordered = Condition::lt,
+ DoubleLessThanOrEqualOrUnordered = Condition::le
+ };
+
+ static inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ // This is chaging the condition codes for cmp a, b to the same codes for cmp b, a.
+ static inline Condition InvertCmpCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ switch (cond) {
+ case eq:
+ case ne:
+ return cond;
+ case gt:
+ return le;
+ case le:
+ return gt;
+ case ge:
+ return lt;
+ case lt:
+ return ge;
+ case hi:
+ return lo;
+ case lo:
+ return hi;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case mi:
+ return pl;
+ case pl:
+ return mi;
+ default:
+ MOZ_CRASH("TODO: figure this case out.");
+ }
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ static inline DoubleCondition InvertCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("Bad condition");
+ }
+ }
+
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ VIXL_ASSERT(!(cond & DoubleConditionBitSpecial));
+ return static_cast<Condition>(cond);
+ }
+
+ // Instruction set functions.
+
+ // Branch / Jump instructions.
+ // Branch to register.
+ void br(const Register& xn);
+ static void br(Instruction* at, const Register& xn);
+
+ // Branch with link to register.
+ void blr(const Register& xn);
+ static void blr(Instruction* at, const Register& blr);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ BufferOffset b(Label* label);
+
+ // Conditional branch to label.
+ BufferOffset b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ BufferOffset b(int imm26, const LabelDoc& doc);
+ static void b(Instruction* at, int imm26);
+
+ // Conditional branch to PC offset.
+ BufferOffset b(int imm19, Condition cond, const LabelDoc& doc);
+ static void b(Instruction*at, int imm19, Condition cond);
+
+ // Branch with link to label.
+ void bl(Label* label);
+
+ // Branch with link to PC offset.
+ void bl(int imm26, const LabelDoc& doc);
+ static void bl(Instruction* at, int imm26);
+
+ // Compare and branch to label if zero.
+ void cbz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if zero.
+ void cbz(const Register& rt, int imm19, const LabelDoc& doc);
+ static void cbz(Instruction* at, const Register& rt, int imm19);
+
+ // Compare and branch to label if not zero.
+ void cbnz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if not zero.
+ void cbnz(const Register& rt, int imm19, const LabelDoc& doc);
+ static void cbnz(Instruction* at, const Register& rt, int imm19);
+
+ // Table lookup from one register.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup from two registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup from three registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup from four registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Table lookup extension from one register.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup extension from two registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup extension from three registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup extension from four registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Test bit and branch to label if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
+ static void tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
+ static void tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+
+ // Calculate the address of a label.
+ void adr(const Register& rd, Label* label);
+
+ // Calculate the address of a PC offset.
+ void adr(const Register& rd, int imm21, const LabelDoc& doc);
+ static void adr(Instruction* at, const Register& rd, int imm21);
+
+ // Calculate the page address of a label.
+ void adrp(const Register& rd, Label* label);
+
+ // Calculate the page address of a PC offset.
+ void adrp(const Register& rd, int imm21, const LabelDoc& doc);
+ static void adrp(Instruction* at, const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ BufferOffset ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test and set flags.
+ BufferOffset tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left by variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right by variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right by variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right by variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ sbfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Signed bitfield insert with zero at right.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.size();
+ VIXL_ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ ubfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Unsigned bitfield insert with zero at right.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set mask: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Rotate right.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // CRC-32 checksum from byte.
+ void crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from half-word.
+ void crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from word.
+ void crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from double word.
+ void crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from byte.
+ void crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from half-word.
+ void crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from word.
+ void crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32C checksum from double word.
+ void crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // Multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // Negated multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed long multiply: 32 x 32 -> 64-bit.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
+ void smulh(const Register& xd, const Register& xn, const Register& xm);
+
+ // Multiply and accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Multiply and subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply: 32 x 32 -> 64-bit.
+ void umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ umaddl(rd, rn, rm, xzr);
+ }
+
+ // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
+ void umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm);
+
+ // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit reverse.
+ void rbit(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 16-bit half words.
+ void rev16(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 32-bit words.
+ void rev32(const Register& rd, const Register& rn);
+
+ // Reverse bytes.
+ void rev(const Register& rd, const Register& rn);
+
+ // Count leading zeroes.
+ void clz(const Register& rd, const Register& rn);
+
+ // Count leading sign bits.
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load integer or FP register (with unscaled offset).
+ void ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store integer or FP register (with unscaled offset).
+ void stur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load word with sign extension.
+ void ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte (with unscaled offset).
+ void ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store byte (with unscaled offset).
+ void sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte with sign extension (and unscaled offset).
+ void ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word (with unscaled offset).
+ void ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store half-word (with unscaled offset).
+ void sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word with sign extension (and unscaled offset).
+ void ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load integer or FP register from pc + imm19 << 2.
+ void ldr(const CPURegister& rt, int imm19);
+ static void ldr(Instruction* at, const CPURegister& rt, int imm19);
+
+ // Load word with sign extension from pc + imm19 << 2.
+ void ldrsw(const Register& rt, int imm19);
+
+ // Store exclusive byte.
+ void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive half-word.
+ void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive register.
+ void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load exclusive byte.
+ void ldxrb(const Register& rt, const MemOperand& src);
+
+ // Load exclusive half-word.
+ void ldxrh(const Register& rt, const MemOperand& src);
+
+ // Load exclusive register.
+ void ldxr(const Register& rt, const MemOperand& src);
+
+ // Store exclusive register pair.
+ void stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load exclusive register pair.
+ void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release exclusive byte.
+ void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive half-word.
+ void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive register.
+ void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load-acquire exclusive byte.
+ void ldaxrb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive half-word.
+ void ldaxrh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive register.
+ void ldaxr(const Register& rt, const MemOperand& src);
+
+ // Store-release exclusive register pair.
+ void stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load-acquire exclusive register pair.
+ void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release byte.
+ void stlrb(const Register& rt, const MemOperand& dst);
+
+ // Store-release half-word.
+ void stlrh(const Register& rt, const MemOperand& dst);
+
+ // Store-release register.
+ void stlr(const Register& rt, const MemOperand& dst);
+
+ // Load-acquire byte.
+ void ldarb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire half-word.
+ void ldarh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire register.
+ void ldar(const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory [Armv8.1].
+ void cas(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory [Armv8.1].
+ void casa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory [Armv8.1].
+ void casl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory [Armv8.1].
+ void casal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory [Armv8.1].
+ void casb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory [Armv8.1].
+ void casab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory [Armv8.1].
+ void caslb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory [Armv8.1].
+ void casalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory [Armv8.1].
+ void cash(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory [Armv8.1].
+ void casah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory [Armv8.1].
+ void caslh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory [Armv8.1].
+ void casalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
+ void casp(const Register& rs,
+ const Register& rs2,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
+ void caspa(const Register& rs,
+ const Register& rs2,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
+ void caspl(const Register& rs,
+ const Register& rs2,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
+ void caspal(const Register& rs,
+ const Register& rs2,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+
+ // Atomic add on byte in memory [Armv8.1]
+ void ldaddb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldaddab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Store-release semantics [Armv8.1]
+ void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory [Armv8.1]
+ void ldaddh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1]
+ void ldaddah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Store-release semantics [Armv8.1]
+ void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory [Armv8.1]
+ void ldadd(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldadda(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldaddl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldaddal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory [Armv8.1]
+ void ldclrb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldclrab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1]
+ void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldclralb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory [Armv8.1]
+ void ldclrh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldclrah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldclralh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory [Armv8.1]
+ void ldclr(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldclra(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldclrl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldclral(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory [Armv8.1]
+ void ldeorb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldeorab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory [Armv8.1]
+ void ldeorh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldeorah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory [Armv8.1]
+ void ldeor(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldeora(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldeorl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldeoral(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory [Armv8.1]
+ void ldsetb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldsetab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Store-release semantics [Armv8.1]
+ void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory [Armv8.1]
+ void ldseth(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1]
+ void ldsetah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory [Armv8.1]
+ void ldset(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldseta(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsetl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsetal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory [Armv8.1]
+ void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory [Armv8.1]
+ void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory [Armv8.1]
+ void ldsmax(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory [Armv8.1]
+ void ldsminb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsminab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory [Armv8.1]
+ void ldsminh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsminah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory [Armv8.1]
+ void ldsmin(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldsmina(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsminl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldsminal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory [Armv8.1]
+ void ldumaxb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldumaxab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldumaxlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldumaxalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory [Armv8.1]
+ void ldumaxh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldumaxah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldumaxlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldumaxalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory [Armv8.1]
+ void ldumax(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldumaxa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldumaxl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldumaxal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory [Armv8.1]
+ void lduminb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void lduminab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void lduminlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void lduminalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory [Armv8.1]
+ void lduminh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void lduminah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void lduminlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void lduminalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory [Armv8.1]
+ void ldumin(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldumina(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void lduminl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void lduminal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, without return. [Armv8.1]
+ void staddb(const Register& rs, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void staddlb(const Register& rs, const MemOperand& src);
+
+ // Atomic add on halfword in memory, without return. [Armv8.1]
+ void staddh(const Register& rs, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void staddlh(const Register& rs, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, without return. [Armv8.1]
+ void stadd(const Register& rs, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void staddl(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, without return. [Armv8.1]
+ void stclrb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stclrlb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, without return. [Armv8.1]
+ void stclrh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stclrlh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, without return. [Armv8.1]
+ void stclr(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stclrl(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, without return. [Armv8.1]
+ void steorb(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void steorlb(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, without return. [Armv8.1]
+ void steorh(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void steorlh(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void steor(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void steorl(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, without return. [Armv8.1]
+ void stsetb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void stsetlb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, without return. [Armv8.1]
+ void stseth(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsetlh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, without return. [Armv8.1]
+ void stset(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stsetl(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, without return. [Armv8.1]
+ void stsmaxb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsmaxlb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, without return. [Armv8.1]
+ void stsmaxh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stsmaxlh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stsmax(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stsmaxl(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, without return. [Armv8.1]
+ void stsminb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsminlb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, without return. [Armv8.1]
+ void stsminh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stsminlh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stsmin(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Store-release
+ // semantics and without return. semantics [Armv8.1]
+ void stsminl(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, without return. [Armv8.1]
+ void stumaxb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stumaxlb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, without return. [Armv8.1]
+ void stumaxh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stumaxlh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stumax(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stumaxl(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, without return. [Armv8.1]
+ void stuminb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stuminlb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, without return. [Armv8.1]
+ void stuminh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stuminlh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stumin(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stuminl(const Register& rs, const MemOperand& src);
+
+ // Swap byte in memory [Armv8.1]
+ void swpb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Load-acquire semantics [Armv8.1]
+ void swpab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Store-release semantics [Armv8.1]
+ void swplb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void swpalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory [Armv8.1]
+ void swph(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Load-acquire semantics [Armv8.1]
+ void swpah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Store-release semantics [Armv8.1]
+ void swplh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void swpalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory [Armv8.1]
+ void swp(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Load-acquire semantics [Armv8.1]
+ void swpa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Store-release semantics [Armv8.1]
+ void swpl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void swpal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Prefetch memory.
+ void prfm(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Prefetch memory (with unscaled offset).
+ void prfum(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Prefetch from pc + imm19 << 2.
+ void prfm(PrefetchOperation op, int imm19);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move immediate and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move inverted immediate.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move immediate.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Generate exception targeting EL1.
+ void svc(int code);
+ static void svc(Instruction* at, int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move inverted operand to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System instruction.
+ void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr);
+
+ // System instruction with pre-encoded op (op1:crn:crm:op2).
+ void sys(int op, const Register& rt = xzr);
+
+ // System data cache operation.
+ void dc(DataCacheOp op, const Register& rt);
+
+ // System instruction cache operation.
+ void ic(InstructionCacheOp op, const Register& rt);
+
+ // System hint.
+ BufferOffset hint(SystemHint code);
+ static void hint(Instruction* at, SystemHint code);
+
+ // Clear exclusive monitor.
+ void clrex(int imm4 = 0xf);
+
+ // Data memory barrier.
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier.
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier.
+ void isb();
+
+ // Alias for system instructions.
+ // No-op.
+ BufferOffset nop() {
+ return hint(NOP);
+ }
+ static void nop(Instruction* at);
+
+ // Alias for system instructions.
+ // Conditional speculation barrier.
+ BufferOffset csdb() {
+ return hint(CSDB);
+ }
+ static void csdb(Instruction* at);
+
+ // FP and NEON instructions.
+ // Move double precision immediate to FP register.
+ void fmov(const VRegister& vd, double imm);
+
+ // Move single precision immediate to FP register.
+ void fmov(const VRegister& vd, float imm);
+
+ // Move FP register to register.
+ void fmov(const Register& rd, const VRegister& fn);
+
+ // Move register to FP register.
+ void fmov(const VRegister& vd, const Register& rn);
+
+ // Move FP register to FP register.
+ void fmov(const VRegister& vd, const VRegister& fn);
+
+ // Move 64-bit register to top half of 128-bit FP register.
+ void fmov(const VRegister& vd, int index, const Register& rn);
+
+ // Move top half of 128-bit FP register to 64-bit register.
+ void fmov(const Register& rd, const VRegister& vn, int index);
+
+ // FP add.
+ void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP subtract.
+ void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP multiply.
+ void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add.
+ void fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract.
+ void fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-add and negate.
+ void fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract and negate.
+ void fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP multiply-negate scalar.
+ void fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal exponent scalar.
+ void frecpx(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP divide.
+ void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum.
+ void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum.
+ void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum number.
+ void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum number.
+ void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP absolute.
+ void fabs(const VRegister& vd, const VRegister& vn);
+
+ // FP negate.
+ void fneg(const VRegister& vd, const VRegister& vn);
+
+ // FP square root.
+ void fsqrt(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to away.
+ void frinta(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, implicit rounding.
+ void frinti(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward minus infinity.
+ void frintm(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to even.
+ void frintn(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward plus infinity.
+ void frintp(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, exact, implicit rounding.
+ void frintx(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, towards zero.
+ void frintz(const VRegister& vd, const VRegister& vn);
+
+ void FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap);
+
+ void FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap);
+
+ // FP compare registers.
+ void fcmp(const VRegister& vn, const VRegister& vm);
+
+ // FP compare immediate.
+ void fcmp(const VRegister& vn, double value);
+
+ void FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap);
+
+ // FP conditional compare.
+ void fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP signaling compare registers.
+ void fcmpe(const VRegister& vn, const VRegister& vm);
+
+ // FP signaling compare immediate.
+ void fcmpe(const VRegister& vn, double value);
+
+ // FP conditional signaling compare.
+ void fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond);
+
+ // Common FP Convert functions.
+ void NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op);
+ void NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+
+ // FP convert between precisions.
+ void fcvt(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision.
+ void fcvtl(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision (second part).
+ void fcvtl2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision.
+ void fcvtn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower prevision (second part).
+ void fcvtn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd.
+ void fcvtxn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd (second part).
+ void fcvtxn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const VRegister& rd, const VRegister& vn);
+
+ // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
+ void fjcvtzs(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const VRegister& rd, const VRegister& vn);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const VRegister& vd, const VRegister& vn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert signed integer or fixed-point to FP.
+ void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Convert unsigned integer or fixed-point to FP.
+ void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Unsigned absolute difference.
+ void uabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference.
+ void sabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate.
+ void uaba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate.
+ void saba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add.
+ void add(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract.
+ void sub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving add.
+ void uhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving add.
+ void shadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding halving add.
+ void urhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding halving add.
+ void srhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving sub.
+ void uhsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving sub.
+ void shsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating add.
+ void uqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating add.
+ void sqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating subtract.
+ void uqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating subtract.
+ void sqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pairwise.
+ void addp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pair of elements scalar.
+ void addp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Multiply-add to accumulator.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply-subtract to accumulator.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply by scalar element.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-add by scalar element.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-subtract by scalar element.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Compare equal.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than or equal.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher.
+ void cmhi(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher or same.
+ void cmhs(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise test bits nonzero.
+ void cmtst(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise to zero.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than or equal to zero.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than zero.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than or equal to zero.
+ void cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than zero.
+ void cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Signed shift left by register.
+ void sshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned shift left by register.
+ void ushl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating shift left by register.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating shift left by register.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding shift left by register.
+ void srshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding shift left by register.
+ void urshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding shift left by register.
+ void sqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating rounding shift left by register.
+ void uqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise and.
+ void and_(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or.
+ void orr(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or immediate.
+ void orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Move register to register.
+ void mov(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise orn.
+ void orn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise eor.
+ void eor(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bit clear immediate.
+ void bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Bit clear.
+ void bic(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if false.
+ void bif(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if true.
+ void bit(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise select.
+ void bsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply.
+ void pmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Vector move immediate.
+ void movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Bitwise not.
+ void mvn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Vector move inverted immediate.
+ void mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Signed saturating accumulate of unsigned value.
+ void suqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating accumulate of signed value.
+ void usqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Absolute value.
+ void abs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating absolute value.
+ void sqabs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Negate.
+ void neg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating negate.
+ void sqneg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise not.
+ void not_(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow.
+ void xtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow (second part).
+ void xtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow.
+ void sqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow (second part).
+ void sqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow.
+ void uqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow (second part).
+ void uqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow.
+ void sqxtun(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow (second part).
+ void sqxtun2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract vector from pair of vectors.
+ void ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index);
+
+ // Duplicate vector element to vector or scalar.
+ void dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to scalar.
+ void mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Duplicate general-purpose register to vector.
+ void dup(const VRegister& vd,
+ const Register& rn);
+
+ // Insert vector element from another vector element.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to another vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Insert vector element from general-purpose register.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Move general-purpose register to a vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Unsigned move vector element to general-purpose register.
+ void umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to general-purpose register.
+ void mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Signed move vector element to general-purpose register.
+ void smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // One-element structure load to one register.
+ void ld1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure load to two registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure load to three registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure load to four registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure load to one lane.
+ void ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // One-element single structure load to all lanes.
+ void ld1r(const VRegister& vt,
+ const MemOperand& src);
+
+ // Two-element structure load.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure load to one lane.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element single structure load to all lanes.
+ void ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Three-element structure load.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure load to one lane.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element single structure load to all lanes.
+ void ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Four-element structure load.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure load to one lane.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element single structure load to all lanes.
+ void ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Count leading sign bits.
+ void cls(const VRegister& vd,
+ const VRegister& vn);
+
+ // Count leading zero bits (vector).
+ void clz(const VRegister& vd,
+ const VRegister& vn);
+
+ // Population count per byte.
+ void cnt(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse bit order.
+ void rbit(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 16-bit halfwords.
+ void rev16(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 32-bit words.
+ void rev32(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 64-bit doublewords.
+ void rev64(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal square root estimate.
+ void ursqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal estimate.
+ void urecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add.
+ void saddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add.
+ void uaddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add and accumulate.
+ void sadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add and accumulate.
+ void uadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate.
+ void shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left by immediate.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left unsigned by immediate.
+ void sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift left by immediate.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate.
+ void sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate (second part).
+ void sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed extend long.
+ void sxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed extend long (second part).
+ void sxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned shift left long by immediate.
+ void ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift left long by immediate (second part).
+ void ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size.
+ void shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size (second part).
+ void shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned extend long.
+ void uxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned extend long (second part).
+ void uxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate and insert.
+ void sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right by immediate and insert.
+ void sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed maximum.
+ void smax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed pairwise maximum.
+ void smaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add across vector.
+ void addv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed add long across vector.
+ void saddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned add long across vector.
+ void uaddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum number across vector.
+ void fmaxnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum across vector.
+ void fmaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum number across vector.
+ void fminnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum across vector.
+ void fminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed maximum across vector.
+ void smaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed minimum.
+ void smin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum pairwise.
+ void sminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum across vector.
+ void sminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // One-element structure store from one register.
+ void st1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure store from two registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure store from three registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure store from four registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure store from one lane.
+ void st1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element structure store from two registers.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure store from two lanes.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element structure store from three registers.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure store from three lanes.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element structure store from four registers.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure store from four lanes.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Unsigned add long.
+ void uaddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add long (second part).
+ void uaddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide.
+ void uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide (second part).
+ void uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long.
+ void saddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long (second part).
+ void saddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide.
+ void saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide (second part).
+ void saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long.
+ void usubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long (second part).
+ void usubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide.
+ void usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide (second part).
+ void usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long.
+ void ssubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long (second part).
+ void ssubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide.
+ void ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide (second part).
+ void ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum.
+ void umax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise maximum.
+ void umaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum across vector.
+ void umaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned minimum.
+ void umin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise minimum.
+ void uminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned minimum across vector.
+ void uminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Transpose vectors (primary).
+ void trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Transpose vectors (secondary).
+ void trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (primary).
+ void uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (secondary).
+ void uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (primary).
+ void zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (secondary).
+ void zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed shift right by immediate.
+ void sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate.
+ void ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate.
+ void srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate.
+ void urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift right by immediate and accumulate.
+ void ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate and accumulate.
+ void usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate and accumulate.
+ void srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate and accumulate.
+ void ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate.
+ void shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate (second part).
+ void shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate.
+ void rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate (second part).
+ void rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate.
+ void uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate (second part).
+ void uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate.
+ void uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate (second part).
+ void uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate.
+ void sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate (second part).
+ void sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate.
+ void sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate (second part).
+ void sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate.
+ void sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate (second part).
+ void sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate.
+ void sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate (second part).
+ void sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // FP reciprocal step.
+ void frecps(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal estimate.
+ void frecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root estimate.
+ void frsqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root step.
+ void frsqrts(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long.
+ void sabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long (second part).
+ void sabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long.
+ void uabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long (second part).
+ void uabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long.
+ void sabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long (second part).
+ void sabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long.
+ void uabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long (second part).
+ void uabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long.
+ void pmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long (second part).
+ void pmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding doubling multiply returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply element returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating rounding doubling multiply element returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply long.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half.
+ void addhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half (second part).
+ void addhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half.
+ void raddhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half (second part).
+ void raddhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half.
+ void subhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half (second part).
+ void subhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half.
+ void rsubhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half (second part).
+ void rsubhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply accumulate.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply subtract.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply extended.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than or equal.
+ void facge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than.
+ void facgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP multiply by element.
+ void fmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-add to accumulator by element.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-sub from accumulator by element.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP multiply extended by element.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP compare equal.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than or equal.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP compare equal to zero.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than zero.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than or equal to zero.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than or equal to zero.
+ void fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than to zero.
+ void fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP absolute difference.
+ void fabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add vector.
+ void faddp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add scalar.
+ void faddp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum vector.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum scalar.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum vector.
+ void fminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum scalar.
+ void fminp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum number vector.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum number scalar.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum number vector.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum number scalar.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Emit generic instructions.
+ // Emit raw instructions into the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 32 bits of data into the instruction stream.
+ void dc32(uint32_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Emit 64 bits of data into the instruction stream.
+ void dc64(uint64_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Code generation helpers.
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ VIXL_ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ VIXL_ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr RmNot31(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ VIXL_ASSERT(!rm.IsZero());
+ return Rm(rm);
+ }
+
+ static Instr Ra(CPURegister ra) {
+ VIXL_ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ VIXL_ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ static Instr Rs(CPURegister rs) {
+ VIXL_ASSERT(rs.code() != kSPRegInternalCode);
+ return rs.code() << Rs_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ VIXL_ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ VIXL_ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ static Instr Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ static Instr Cond(Condition cond) {
+ return cond << Condition_offset;
+ }
+
+ // PC-relative address encoding.
+ static Instr ImmPCRelAddress(int imm21) {
+ VIXL_ASSERT(IsInt21(imm21));
+ Instr imm = static_cast<Instr>(TruncateToUint21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+ }
+
+ // Branch encoding.
+ static Instr ImmUncondBranch(int imm26) {
+ VIXL_ASSERT(IsInt26(imm26));
+ return TruncateToUint26(imm26) << ImmUncondBranch_offset;
+ }
+
+ static Instr ImmCondBranch(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmCondBranch_offset;
+ }
+
+ static Instr ImmCmpBranch(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmCmpBranch_offset;
+ }
+
+ static Instr ImmTestBranch(int imm14) {
+ VIXL_ASSERT(IsInt14(imm14));
+ return TruncateToUint14(imm14) << ImmTestBranch_offset;
+ }
+
+ static Instr ImmTestBranchBit(unsigned bit_pos) {
+ VIXL_ASSERT(IsUint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+ }
+
+ // Data Processing encoding.
+ static Instr SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+ }
+
+ static Instr ImmAddSub(int imm) {
+ VIXL_ASSERT(IsImmAddSub(imm));
+ if (IsUint12(imm)) { // No shift required.
+ imm <<= ImmAddSub_offset;
+ } else {
+ imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+ return imm;
+ }
+
+ static Instr ImmS(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) ||
+ ((reg_size == kWRegSize) && IsUint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+ }
+
+ static Instr ImmR(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+ ((reg_size == kWRegSize) && IsUint5(immr)));
+ USE(reg_size);
+ VIXL_ASSERT(IsUint6(immr));
+ return immr << ImmR_offset;
+ }
+
+ static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(IsUint6(imms));
+ VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+ }
+
+ static Instr ImmRotate(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+ ((reg_size == kWRegSize) && IsUint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+ }
+
+ static Instr ImmLLiteral(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmLLiteral_offset;
+ }
+
+ static Instr BitN(unsigned bitn, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+ }
+
+ static Instr ShiftDP(Shift shift) {
+ VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+ }
+
+ static Instr ImmDPShift(unsigned amount) {
+ VIXL_ASSERT(IsUint6(amount));
+ return amount << ImmDPShift_offset;
+ }
+
+ static Instr ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+ }
+
+ static Instr ImmExtendShift(unsigned left_shift) {
+ VIXL_ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+ }
+
+ static Instr ImmCondCmp(unsigned imm) {
+ VIXL_ASSERT(IsUint5(imm));
+ return imm << ImmCondCmp_offset;
+ }
+
+ static Instr Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+ }
+
+ // MemOperand offset encoding.
+ static Instr ImmLSUnsigned(int imm12) {
+ VIXL_ASSERT(IsUint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+ }
+
+ static Instr ImmLS(int imm9) {
+ VIXL_ASSERT(IsInt9(imm9));
+ return TruncateToUint9(imm9) << ImmLS_offset;
+ }
+
+ static Instr ImmLSPair(int imm7, unsigned access_size) {
+ VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7);
+ int scaled_imm7 = imm7 >> access_size;
+ VIXL_ASSERT(IsInt7(scaled_imm7));
+ return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
+ }
+
+ static Instr ImmShiftLS(unsigned shift_amount) {
+ VIXL_ASSERT(IsUint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+ }
+
+ static Instr ImmPrefetchOperation(int imm5) {
+ VIXL_ASSERT(IsUint5(imm5));
+ return imm5 << ImmPrefetchOperation_offset;
+ }
+
+ static Instr ImmException(int imm16) {
+ VIXL_ASSERT(IsUint16(imm16));
+ return imm16 << ImmException_offset;
+ }
+
+ static Instr ImmSystemRegister(int imm15) {
+ VIXL_ASSERT(IsUint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+ }
+
+ static Instr ImmHint(int imm7) {
+ VIXL_ASSERT(IsUint7(imm7));
+ return imm7 << ImmHint_offset;
+ }
+
+ static Instr CRm(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << CRm_offset;
+ }
+
+ static Instr CRn(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << CRn_offset;
+ }
+
+ static Instr SysOp(int imm14) {
+ VIXL_ASSERT(IsUint14(imm14));
+ return imm14 << SysOp_offset;
+ }
+
+ static Instr ImmSysOp1(int imm3) {
+ VIXL_ASSERT(IsUint3(imm3));
+ return imm3 << SysOp1_offset;
+ }
+
+ static Instr ImmSysOp2(int imm3) {
+ VIXL_ASSERT(IsUint3(imm3));
+ return imm3 << SysOp2_offset;
+ }
+
+ static Instr ImmBarrierDomain(int imm2) {
+ VIXL_ASSERT(IsUint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+ }
+
+ static Instr ImmBarrierType(int imm2) {
+ VIXL_ASSERT(IsUint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+ }
+
+ // Move immediates encoding.
+ static Instr ImmMoveWide(uint64_t imm) {
+ VIXL_ASSERT(IsUint16(imm));
+ return static_cast<Instr>(imm << ImmMoveWide_offset);
+ }
+
+ static Instr ShiftMoveWide(int64_t shift) {
+ VIXL_ASSERT(IsUint2(shift));
+ return static_cast<Instr>(shift << ShiftMoveWide_offset);
+ }
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+
+ // FP register type.
+ static Instr FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+ }
+
+ static Instr FPScale(unsigned scale) {
+ VIXL_ASSERT(IsUint6(scale));
+ return scale << FPScale_offset;
+ }
+
+ // Immediate field checking helpers.
+ static bool IsImmAddSub(int64_t immediate);
+ static bool IsImmConditionalCompare(int64_t immediate);
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n = NULL,
+ unsigned* imm_s = NULL,
+ unsigned* imm_r = NULL);
+ static bool IsImmLSPair(int64_t offset, unsigned access_size);
+ static bool IsImmLSScaled(int64_t offset, unsigned access_size);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
+ // Instruction bits for vector format in data processing operations.
+ static Instr VFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 2: return NEON_2S;
+ case 4: return NEON_4H;
+ case 8: return NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return NEON_2D;
+ case 4: return NEON_4S;
+ case 8: return NEON_8H;
+ case 16: return NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for vector format in floating point data processing
+ // operations.
+ static Instr FPFormat(VRegister vd) {
+ if (vd.lanes() == 1) {
+ // Floating point scalar formats.
+ VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits());
+ return vd.Is64Bits() ? FP64 : FP32;
+ }
+
+ // Two lane floating point vector formats.
+ if (vd.lanes() == 2) {
+ VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits());
+ return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S;
+ }
+
+ // Four lane floating point vector format.
+ VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits());
+ return NEON_FP_4S;
+ }
+
+ // Instruction bits for vector format in load and store operations.
+ static Instr LSVFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 1: return LS_NEON_1D;
+ case 2: return LS_NEON_2S;
+ case 4: return LS_NEON_4H;
+ case 8: return LS_NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return LS_NEON_2D;
+ case 4: return LS_NEON_4S;
+ case 8: return LS_NEON_8H;
+ case 16: return LS_NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for scalar format in data processing operations.
+ static Instr SFormat(VRegister vd) {
+ VIXL_ASSERT(vd.lanes() == 1);
+ switch (vd.SizeInBytes()) {
+ case 1: return NEON_B;
+ case 2: return NEON_H;
+ case 4: return NEON_S;
+ case 8: return NEON_D;
+ default: return 0xffffffff;
+ }
+ }
+
+ static Instr ImmNEONHLM(int index, int num_bits) {
+ int h, l, m;
+ if (num_bits == 3) {
+ VIXL_ASSERT(IsUint3(index));
+ h = (index >> 2) & 1;
+ l = (index >> 1) & 1;
+ m = (index >> 0) & 1;
+ } else if (num_bits == 2) {
+ VIXL_ASSERT(IsUint2(index));
+ h = (index >> 1) & 1;
+ l = (index >> 0) & 1;
+ m = 0;
+ } else {
+ VIXL_ASSERT(IsUint1(index) && (num_bits == 1));
+ h = (index >> 0) & 1;
+ l = 0;
+ m = 0;
+ }
+ return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
+ }
+
+ static Instr ImmNEONExt(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << ImmNEONExt_offset;
+ }
+
+ static Instr ImmNEON5(Instr format, int index) {
+ VIXL_ASSERT(IsUint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm5 = (index << (s + 1)) | (1 << s);
+ return imm5 << ImmNEON5_offset;
+ }
+
+ static Instr ImmNEON4(Instr format, int index) {
+ VIXL_ASSERT(IsUint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm4 = index << s;
+ return imm4 << ImmNEON4_offset;
+ }
+
+ static Instr ImmNEONabcdefgh(int imm8) {
+ VIXL_ASSERT(IsUint8(imm8));
+ Instr instr;
+ instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
+ instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
+ return instr;
+ }
+
+ static Instr NEONCmode(int cmode) {
+ VIXL_ASSERT(IsUint4(cmode));
+ return cmode << NEONCmode_offset;
+ }
+
+ static Instr NEONModImmOp(int op) {
+ VIXL_ASSERT(IsUint1(op));
+ return op << NEONModImmOp_offset;
+ }
+
+ size_t size() const {
+ return SizeOfCodeGenerated();
+ }
+
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ PositionIndependentCodeOption pic() const {
+ return pic_;
+ }
+
+ CPUFeatures* GetCPUFeatures() { return &cpu_features_; }
+
+ void SetCPUFeatures(const CPUFeatures& cpu_features) {
+ cpu_features_ = cpu_features;
+ }
+
+ bool AllowPageOffsetDependentCode() const {
+ return (pic() == PageOffsetDependentCode) ||
+ (pic() == PositionDependentCode);
+ }
+
+ static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
+ return reg.Is64Bits() ? xzr : wzr;
+ }
+
+
+ protected:
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op);
+ void LoadStoreStruct1(const VRegister& vt,
+ int reg_count,
+ const MemOperand& addr);
+ void LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op);
+
+ void Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ BufferOffset Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ BufferOffset LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ void NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
+
+ // Convenience pass-through for CPU feature checks.
+ bool CPUHas(CPUFeatures::Feature feature0,
+ CPUFeatures::Feature feature1 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature2 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature3 = CPUFeatures::kNone) const {
+ return cpu_features_.Has(feature0, feature1, feature2, feature3);
+ }
+
+ // Determine whether the target CPU has the specified registers, based on the
+ // currently-enabled CPU features. Presence of a register does not imply
+ // support for arbitrary operations on it. For example, CPUs with FP have H
+ // registers, but most half-precision operations require the FPHalf feature.
+ //
+ // These are used to check CPU features in loads and stores that have the same
+ // entry point for both integer and FP registers.
+ bool CPUHas(const CPURegister& rt) const;
+ bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const;
+
+ bool CPUHas(SystemRegister sysreg) const;
+
+ private:
+ static uint32_t FP32ToImm8(float imm);
+ static uint32_t FP64ToImm8(double imm);
+
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ BufferOffset DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const VRegister& fd,
+ const VRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing3Source(const VRegister& fd,
+ const VRegister& fn,
+ const VRegister& fm,
+ const VRegister& fa,
+ FPDataProcessing3SourceOp op);
+ void NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op);
+ void NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op);
+ void NEONFP2Same(const VRegister& vd,
+ const VRegister& vn,
+ Instr vop);
+ void NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop);
+ void NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op);
+ void NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value = 0.0);
+ void NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value = 0);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+ void NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op);
+ void NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op);
+ void NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb);
+ void NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop);
+
+ Instr LoadStoreStructAddrModeField(const MemOperand& addr);
+
+ // Encode the specified MemOperand for the specified access size and scaling
+ // preference.
+ Instr LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option);
+
+ protected:
+ // Prevent generation of a literal pool for the next |maxInst| instructions.
+ // Guarantees instruction linearity.
+ class AutoBlockLiteralPool {
+ ARMBuffer* armbuffer_;
+
+ public:
+ AutoBlockLiteralPool(Assembler* assembler, size_t maxInst)
+ : armbuffer_(&assembler->armbuffer_) {
+ armbuffer_->enterNoPool(maxInst);
+ }
+ ~AutoBlockLiteralPool() {
+ armbuffer_->leaveNoPool();
+ }
+ };
+
+ protected:
+ // Buffer where the code is emitted.
+ PositionIndependentCodeOption pic_;
+
+ CPUFeatures cpu_features_;
+
+#ifdef DEBUG
+ bool finalized_;
+#endif
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
new file mode 100644
index 0000000000..e13eef6135
--- /dev/null
+++ b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
@@ -0,0 +1,179 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef VIXL_COMPILER_INTRINSICS_H
+#define VIXL_COMPILER_INTRINSICS_H
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Helper to check whether the version of GCC used is greater than the specified
+// requirement.
+#define MAJOR 1000000
+#define MINOR 1000
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#else
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
+#endif
+
+
+#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+
+#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
+#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
+#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
+#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
+#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+
+#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+// The documentation for these builtins is available at:
+// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
+
+# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
+# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
+
+#else
+// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
+// implemented C++ methods.
+
+#define COMPILER_HAS_BUILTIN_BSWAP false
+#define COMPILER_HAS_BUILTIN_CLRSB false
+#define COMPILER_HAS_BUILTIN_CLZ false
+#define COMPILER_HAS_BUILTIN_CTZ false
+#define COMPILER_HAS_BUILTIN_FFS false
+#define COMPILER_HAS_BUILTIN_POPCOUNT false
+
+#endif
+
+
+template<typename V>
+inline bool IsPowerOf2(V value) {
+ return (value != 0) && ((value & (value - 1)) == 0);
+}
+
+
+// Implementation of intrinsics functions.
+// TODO: The implementations could be improved for sizes different from 32bit
+// and 64bit: we could mask the values and call the appropriate builtin.
+
+
+template<typename V>
+inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_clzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountLeadingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountLeadingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLRSB
+ if (width == 32) {
+ return __builtin_clrsb(value);
+ } else if (width == 64) {
+ return __builtin_clrsbll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+#endif
+}
+
+
+template<typename V>
+inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_POPCOUNT
+ if (width == 32) {
+ return __builtin_popcount(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return __builtin_popcountll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountPopulation32(value);
+ } else if (width == 64) {
+ return mozilla::CountPopulation64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CTZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_ctzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountTrailingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountTrailingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+} // namespace vixl
+
+#endif // VIXL_COMPILER_INTRINSICS_H
+
diff --git a/js/src/jit/arm64/vixl/Constants-vixl.h b/js/src/jit/arm64/vixl/Constants-vixl.h
new file mode 100644
index 0000000000..2c174e61a5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Constants-vixl.h
@@ -0,0 +1,2694 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_CONSTANTS_A64_H_
+#define VIXL_A64_CONSTANTS_A64_H_
+
+#include <stdint.h>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Supervisor Call (svc) specific support.
+//
+// The SVC instruction encodes an optional 16-bit immediate value.
+// The simulator understands the codes below.
+enum SVCSimulatorCodes {
+ kCallRtRedirected = 0x10, // Transition to x86_64 C code.
+ kMarkStackPointer = 0x11, // Push the current SP on a special Simulator stack.
+ kCheckStackPointer = 0x12 // Pop from the special Simulator stack and compare to SP.
+};
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfVRegisters = 32;
+const unsigned kNumberOfFPRegisters = kNumberOfVRegisters;
+// Callee saved registers are x21-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 10;
+const int kFirstCalleeSavedRegisterIndex = 21;
+// Callee saved FP registers are d8-d15. Note that the high parts of v8-v15 are
+// still caller-saved.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load/store register. */ \
+V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
+V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+V_(LSOpc, 23, 22, Bits) \
+V_(LSVector, 26, 26, Bits) \
+V_(LSSize, 31, 30, Bits) \
+V_(ImmPrefetchOperation, 4, 0, Bits) \
+V_(PrefetchHint, 4, 3, Bits) \
+V_(PrefetchTarget, 2, 1, Bits) \
+V_(PrefetchStream, 0, 0, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR, SYS) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp, 18, 5, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+ \
+/* Load-/store-exclusive */ \
+V_(LdStXLoad, 22, 22, Bits) \
+V_(LdStXNotExclusive, 23, 23, Bits) \
+V_(LdStXAcquireRelease, 15, 15, Bits) \
+V_(LdStXSizeLog2, 31, 30, Bits) \
+V_(LdStXPair, 21, 21, Bits) \
+ \
+/* NEON generic fields */ \
+V_(NEONQ, 30, 30, Bits) \
+V_(NEONSize, 23, 22, Bits) \
+V_(NEONLSSize, 11, 10, Bits) \
+V_(NEONS, 12, 12, Bits) \
+V_(NEONL, 21, 21, Bits) \
+V_(NEONM, 20, 20, Bits) \
+V_(NEONH, 11, 11, Bits) \
+V_(ImmNEONExt, 14, 11, Bits) \
+V_(ImmNEON5, 20, 16, Bits) \
+V_(ImmNEON4, 14, 11, Bits) \
+ \
+/* NEON Modified Immediate fields */ \
+V_(ImmNEONabc, 18, 16, Bits) \
+V_(ImmNEONdefgh, 9, 5, Bits) \
+V_(NEONModImmOp, 29, 29, Bits) \
+V_(NEONCmode, 15, 12, Bits) \
+ \
+/* NEON Shift Immediate fields */ \
+V_(ImmNEONImmhImmb, 22, 16, Bits) \
+V_(ImmNEONImmh, 22, 19, Bits) \
+V_(ImmNEONImmb, 18, 16, Bits)
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0, // Z set Equal.
+ ne = 1, // Z clear Not equal.
+ cs = 2, // C set Carry set.
+ cc = 3, // C clear Carry clear.
+ mi = 4, // N set Negative.
+ pl = 5, // N clear Positive or zero.
+ vs = 6, // V set Overflow.
+ vc = 7, // V clear No overflow.
+ hi = 8, // C set, Z clear Unsigned higher.
+ ls = 9, // C clear or Z set Unsigned lower or same.
+ ge = 10, // N == V Greater or equal.
+ lt = 11, // N != V Less than.
+ gt = 12, // Z clear, N == V Greater than.
+ le = 13, // Z set or N != V Less then or equal
+ al = 14, // Always.
+ nv = 15, // Behaves as always/al.
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc, // C clear Unsigned lower.
+
+ // Mozilla expanded aliases.
+ Equal = 0, Zero = 0,
+ NotEqual = 1, NonZero = 1,
+ AboveOrEqual = 2, CarrySet = 2,
+ Below = 3, CarryClear = 3,
+ Signed = 4,
+ NotSigned = 5,
+ Overflow = 6,
+ NoOverflow = 7,
+ Above = 8,
+ BelowOrEqual = 9,
+ GreaterThanOrEqual_ = 10,
+ LessThan_ = 11,
+ GreaterThan_ = 12,
+ LessThanOrEqual_ = 13,
+ Always = 14,
+ Never = 15
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+enum FPTrapFlags {
+ EnableTrap = 1,
+ DisableTrap = 0
+};
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3,
+ MSL = 0x4
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5,
+ ESB = 16,
+ CSDB = 20,
+ BTI = 32,
+ BTI_c = 34,
+ BTI_j = 36,
+ BTI_jc = 38
+};
+
+enum BranchTargetIdentifier {
+ EmitBTI_none = NOP,
+ EmitBTI = BTI,
+ EmitBTI_c = BTI_c,
+ EmitBTI_j = BTI_j,
+ EmitBTI_jc = BTI_jc,
+
+ // These correspond to the values of the CRm:op2 fields in the equivalent HINT
+ // instruction.
+ EmitPACIASP = 25,
+ EmitPACIBSP = 27
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+enum PrefetchOperation {
+ PLDL1KEEP = 0x00,
+ PLDL1STRM = 0x01,
+ PLDL2KEEP = 0x02,
+ PLDL2STRM = 0x03,
+ PLDL3KEEP = 0x04,
+ PLDL3STRM = 0x05,
+
+ PLIL1KEEP = 0x08,
+ PLIL1STRM = 0x09,
+ PLIL2KEEP = 0x0a,
+ PLIL2STRM = 0x0b,
+ PLIL3KEEP = 0x0c,
+ PLIL3STRM = 0x0d,
+
+ PSTL1KEEP = 0x10,
+ PSTL1STRM = 0x11,
+ PSTL2KEEP = 0x12,
+ PSTL2STRM = 0x13,
+ PSTL3KEEP = 0x14,
+ PSTL3STRM = 0x15
+};
+
+enum BType {
+ // Set when executing any instruction on a guarded page, except those cases
+ // listed below.
+ DefaultBType = 0,
+
+ // Set when an indirect branch is taken from an unguarded page to a guarded
+ // page, or from a guarded page to ip0 or ip1 (x16 or x17), eg "br ip0".
+ BranchFromUnguardedOrToIP = 1,
+
+ // Set when an indirect branch and link (call) is taken, eg. "blr x0".
+ BranchAndLink = 2,
+
+ // Set when an indirect branch is taken from a guarded page to a register
+ // that is not ip0 or ip1 (x16 or x17), eg, "br x0".
+ BranchFromGuardedNotToIP = 3
+};
+
+template<int op0, int op1, int crn, int crm, int op2>
+class SystemRegisterEncoder {
+ public:
+ static const uint32_t value =
+ ((op0 << SysO0_offset) |
+ (op1 << SysOp1_offset) |
+ (crn << CRn_offset) |
+ (crm << CRm_offset) |
+ (op2 << SysOp2_offset)) >> ImmSystemRegister_offset;
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = SystemRegisterEncoder<3, 3, 4, 2, 0>::value,
+ FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value,
+ RNDR = SystemRegisterEncoder<3, 3, 2, 4, 0>::value, // Random number.
+ RNDRRS = SystemRegisterEncoder<3, 3, 2, 4, 1>::value // Reseeded random number.
+};
+
+template<int op1, int crn, int crm, int op2>
+class CacheOpEncoder {
+ public:
+ static const uint32_t value =
+ ((op1 << SysOp1_offset) |
+ (crn << CRn_offset) |
+ (crm << CRm_offset) |
+ (op2 << SysOp2_offset)) >> SysOp_offset;
+};
+
+enum InstructionCacheOp : uint32_t {
+ IVAU = CacheOpEncoder<3, 7, 5, 1>::value
+};
+
+enum DataCacheOp : uint32_t {
+ CVAC = CacheOpEncoder<3, 7, 10, 1>::value,
+ CVAU = CacheOpEncoder<3, 7, 11, 1>::value,
+ CVAP = CacheOpEncoder<3, 7, 12, 1>::value,
+ CVADP = CacheOpEncoder<3, 7, 13, 1>::value,
+ CIVAC = CacheOpEncoder<3, 7, 14, 1>::value,
+ ZVA = CacheOpEncoder<3, 7, 4, 1>::value
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField : uint32_t {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+
+ FPTypeMask = 0x00C00000,
+ FP16 = 0x00C00000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+enum NEONFormatField : uint32_t {
+ NEONFormatFieldMask = 0x40C00000,
+ NEON_Q = 0x40000000,
+ NEON_8B = 0x00000000,
+ NEON_16B = NEON_8B | NEON_Q,
+ NEON_4H = 0x00400000,
+ NEON_8H = NEON_4H | NEON_Q,
+ NEON_2S = 0x00800000,
+ NEON_4S = NEON_2S | NEON_Q,
+ NEON_1D = 0x00C00000,
+ NEON_2D = 0x00C00000 | NEON_Q
+};
+
+enum NEONFPFormatField : uint32_t {
+ NEONFPFormatFieldMask = 0x40400000,
+ NEON_FP_4H = FP16,
+ NEON_FP_2S = FP32,
+ NEON_FP_8H = FP16 | NEON_Q,
+ NEON_FP_4S = FP32 | NEON_Q,
+ NEON_FP_2D = FP64 | NEON_Q
+};
+
+enum NEONLSFormatField : uint32_t {
+ NEONLSFormatFieldMask = 0x40000C00,
+ LS_NEON_8B = 0x00000000,
+ LS_NEON_16B = LS_NEON_8B | NEON_Q,
+ LS_NEON_4H = 0x00000400,
+ LS_NEON_8H = LS_NEON_4H | NEON_Q,
+ LS_NEON_2S = 0x00000800,
+ LS_NEON_4S = LS_NEON_2S | NEON_Q,
+ LS_NEON_1D = 0x00000C00,
+ LS_NEON_2D = LS_NEON_1D | NEON_Q
+};
+
+enum NEONScalarFormatField : uint32_t {
+ NEONScalarFormatFieldMask = 0x00C00000,
+ NEONScalar = 0x10000000,
+ NEON_B = 0x00000000,
+ NEON_H = 0x00400000,
+ NEON_S = 0x00800000,
+ NEON_D = 0x00C00000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp : uint32_t {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp : uint32_t {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp : uint32_t {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp : uint32_t {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp : uint32_t {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp : uint32_t {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+// Rotate right into flags.
+enum RotateRightIntoFlagsOp : uint32_t {
+ RotateRightIntoFlagsFixed = 0x1A000400,
+ RotateRightIntoFlagsFMask = 0x1FE07C00,
+ RotateRightIntoFlagsMask = 0xFFE07C10,
+ RMIF = RotateRightIntoFlagsFixed | 0xA0000000
+};
+
+// Evaluate into flags.
+enum EvaluateIntoFlagsOp : uint32_t {
+ EvaluateIntoFlagsFixed = 0x1A000800,
+ EvaluateIntoFlagsFMask = 0x1FE03C00,
+ EvaluateIntoFlagsMask = 0xFFE07C1F,
+ SETF8 = EvaluateIntoFlagsFixed | 0x2000000D,
+ SETF16 = EvaluateIntoFlagsFixed | 0x2000400D
+};
+
+// Logical (immediate and shifted register).
+enum LogicalOp : uint32_t {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp : uint32_t {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp : uint32_t {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp : uint32_t {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp : uint32_t {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp : uint32_t {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp : uint32_t {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp : uint32_t {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC00,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000,
+
+ BRAAZ = UnconditionalBranchToRegisterFixed | 0x001F0800,
+ BRABZ = UnconditionalBranchToRegisterFixed | 0x001F0C00,
+ BLRAAZ = UnconditionalBranchToRegisterFixed | 0x003F0800,
+ BLRABZ = UnconditionalBranchToRegisterFixed | 0x003F0C00,
+ RETAA = UnconditionalBranchToRegisterFixed | 0x005F0800,
+ RETAB = UnconditionalBranchToRegisterFixed | 0x005F0C00,
+ BRAA = UnconditionalBranchToRegisterFixed | 0x011F0800,
+ BRAB = UnconditionalBranchToRegisterFixed | 0x011F0C00,
+ BLRAA = UnconditionalBranchToRegisterFixed | 0x013F0800,
+ BLRAB = UnconditionalBranchToRegisterFixed | 0x013F0C00
+};
+
+// Compare and branch.
+enum CompareBranchOp : uint32_t {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp : uint32_t {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp : uint32_t {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp : uint32_t {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp : uint32_t {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemPStateOp : uint32_t {
+ SystemPStateFixed = 0xD5004000,
+ SystemPStateFMask = 0xFFF8F000,
+ SystemPStateMask = 0xFFFFF0FF,
+ CFINV = SystemPStateFixed | 0x0000001F,
+ XAFLAG = SystemPStateFixed | 0x0000003F,
+ AXFLAG = SystemPStateFixed | 0x0000005F
+};
+
+enum SystemHintOp : uint32_t {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+enum SystemSysOp : uint32_t {
+ SystemSysFixed = 0xD5080000,
+ SystemSysFMask = 0xFFF80000,
+ SystemSysMask = 0xFFF80000,
+ SYS = SystemSysFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp : uint32_t {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+
+enum MemBarrierOp : uint32_t {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+enum SystemExclusiveMonitorOp : uint32_t {
+ SystemExclusiveMonitorFixed = 0xD503305F,
+ SystemExclusiveMonitorFMask = 0xFFFFF0FF,
+ SystemExclusiveMonitorMask = 0xFFFFF0FF,
+ CLREX = SystemExclusiveMonitorFixed
+};
+
+enum SystemPAuthOp : uint32_t {
+ SystemPAuthFixed = 0xD503211F,
+ SystemPAuthFMask = 0xFFFFFD1F,
+ SystemPAuthMask = 0xFFFFFFFF,
+ PACIA1716 = SystemPAuthFixed | 0x00000100,
+ PACIB1716 = SystemPAuthFixed | 0x00000140,
+ AUTIA1716 = SystemPAuthFixed | 0x00000180,
+ AUTIB1716 = SystemPAuthFixed | 0x000001C0,
+ PACIAZ = SystemPAuthFixed | 0x00000300,
+ PACIASP = SystemPAuthFixed | 0x00000320,
+ PACIBZ = SystemPAuthFixed | 0x00000340,
+ PACIBSP = SystemPAuthFixed | 0x00000360,
+ AUTIAZ = SystemPAuthFixed | 0x00000380,
+ AUTIASP = SystemPAuthFixed | 0x000003A0,
+ AUTIBZ = SystemPAuthFixed | 0x000003C0,
+ AUTIBSP = SystemPAuthFixed | 0x000003E0,
+
+ // XPACLRI has the same fixed mask as System Hints and needs to be handled
+ // differently.
+ XPACLRI = 0xD50320FF
+};
+
+// Any load or store.
+enum LoadStoreAnyOp : uint32_t {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp : uint32_t {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000), \
+ V(STP, q, 0x84000000), \
+ V(LDP, q, 0x84400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp : uint32_t {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp : uint32_t {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp : uint32_t {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp : uint32_t {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp : uint32_t {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ LoadStorePairNonTemporalLBit = 1 << 22,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d,
+ STNP_q = LoadStorePairNonTemporalFixed | STP_q,
+ LDNP_q = LoadStorePairNonTemporalFixed | LDP_q
+};
+
+// Load with pointer authentication.
+enum LoadStorePACOp : uint32_t {
+ LoadStorePACFixed = 0xF8200400,
+ LoadStorePACFMask = 0xFF200400,
+ LoadStorePACMask = 0xFFA00C00,
+ LoadStorePACPreBit = 0x00000800,
+ LDRAA = LoadStorePACFixed | 0x00000000,
+ LDRAA_pre = LoadStorePACPreBit | LDRAA,
+ LDRAB = LoadStorePACFixed | 0x00800000,
+ LDRAB_pre = LoadStorePACPreBit | LDRAB
+};
+
+// Load literal.
+enum LoadLiteralOp : uint32_t {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000,
+ LDR_q_lit = LoadLiteralFixed | 0x84000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, b, 0x04000000), \
+ V(ST, R, h, 0x44000000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(ST, R, q, 0x04800000), \
+ V(LD, R, b, 0x04400000), \
+ V(LD, R, h, 0x44400000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000), \
+ V(LD, R, q, 0x04C00000)
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp : uint32_t {
+ LoadStoreMask = 0xC4C00000,
+ LoadStoreVMask = 0x04000000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp : uint32_t {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store post index.
+enum LoadStorePostIndex : uint32_t {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex : uint32_t {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset : uint32_t {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset : uint32_t {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+enum LoadStoreExclusive : uint32_t {
+ LoadStoreExclusiveFixed = 0x08000000,
+ LoadStoreExclusiveFMask = 0x3F000000,
+ LoadStoreExclusiveMask = 0xFFE08000,
+ STXRB_w = LoadStoreExclusiveFixed | 0x00000000,
+ STXRH_w = LoadStoreExclusiveFixed | 0x40000000,
+ STXR_w = LoadStoreExclusiveFixed | 0x80000000,
+ STXR_x = LoadStoreExclusiveFixed | 0xC0000000,
+ LDXRB_w = LoadStoreExclusiveFixed | 0x00400000,
+ LDXRH_w = LoadStoreExclusiveFixed | 0x40400000,
+ LDXR_w = LoadStoreExclusiveFixed | 0x80400000,
+ LDXR_x = LoadStoreExclusiveFixed | 0xC0400000,
+ STXP_w = LoadStoreExclusiveFixed | 0x80200000,
+ STXP_x = LoadStoreExclusiveFixed | 0xC0200000,
+ LDXP_w = LoadStoreExclusiveFixed | 0x80600000,
+ LDXP_x = LoadStoreExclusiveFixed | 0xC0600000,
+ STLXRB_w = LoadStoreExclusiveFixed | 0x00008000,
+ STLXRH_w = LoadStoreExclusiveFixed | 0x40008000,
+ STLXR_w = LoadStoreExclusiveFixed | 0x80008000,
+ STLXR_x = LoadStoreExclusiveFixed | 0xC0008000,
+ LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000,
+ LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000,
+ LDAXR_w = LoadStoreExclusiveFixed | 0x80408000,
+ LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000,
+ STLXP_w = LoadStoreExclusiveFixed | 0x80208000,
+ STLXP_x = LoadStoreExclusiveFixed | 0xC0208000,
+ LDAXP_w = LoadStoreExclusiveFixed | 0x80608000,
+ LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000,
+ STLRB_w = LoadStoreExclusiveFixed | 0x00808000,
+ STLRH_w = LoadStoreExclusiveFixed | 0x40808000,
+ STLR_w = LoadStoreExclusiveFixed | 0x80808000,
+ STLR_x = LoadStoreExclusiveFixed | 0xC0808000,
+ LDARB_w = LoadStoreExclusiveFixed | 0x00C08000,
+ LDARH_w = LoadStoreExclusiveFixed | 0x40C08000,
+ LDAR_w = LoadStoreExclusiveFixed | 0x80C08000,
+ LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000,
+
+ // v8.1 Load/store LORegion ops
+ STLLRB = LoadStoreExclusiveFixed | 0x00800000,
+ LDLARB = LoadStoreExclusiveFixed | 0x00C00000,
+ STLLRH = LoadStoreExclusiveFixed | 0x40800000,
+ LDLARH = LoadStoreExclusiveFixed | 0x40C00000,
+ STLLR_w = LoadStoreExclusiveFixed | 0x80800000,
+ LDLAR_w = LoadStoreExclusiveFixed | 0x80C00000,
+ STLLR_x = LoadStoreExclusiveFixed | 0xC0800000,
+ LDLAR_x = LoadStoreExclusiveFixed | 0xC0C00000,
+
+ // v8.1 Load/store exclusive ops
+ LSEBit_l = 0x00400000,
+ LSEBit_o0 = 0x00008000,
+ LSEBit_sz = 0x40000000,
+ CASFixed = LoadStoreExclusiveFixed | 0x80A00000,
+ CASBFixed = LoadStoreExclusiveFixed | 0x00A00000,
+ CASHFixed = LoadStoreExclusiveFixed | 0x40A00000,
+ CASPFixed = LoadStoreExclusiveFixed | 0x00200000,
+ CAS_w = CASFixed,
+ CAS_x = CASFixed | LSEBit_sz,
+ CASA_w = CASFixed | LSEBit_l,
+ CASA_x = CASFixed | LSEBit_l | LSEBit_sz,
+ CASL_w = CASFixed | LSEBit_o0,
+ CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz,
+ CASAL_w = CASFixed | LSEBit_l | LSEBit_o0,
+ CASAL_x = CASFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz,
+ CASB = CASBFixed,
+ CASAB = CASBFixed | LSEBit_l,
+ CASLB = CASBFixed | LSEBit_o0,
+ CASALB = CASBFixed | LSEBit_l | LSEBit_o0,
+ CASH = CASHFixed,
+ CASAH = CASHFixed | LSEBit_l,
+ CASLH = CASHFixed | LSEBit_o0,
+ CASALH = CASHFixed | LSEBit_l | LSEBit_o0,
+ CASP_w = CASPFixed,
+ CASP_x = CASPFixed | LSEBit_sz,
+ CASPA_w = CASPFixed | LSEBit_l,
+ CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz,
+ CASPL_w = CASPFixed | LSEBit_o0,
+ CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz,
+ CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0,
+ CASPAL_x = CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz
+};
+
+// Load/store RCpc unscaled offset.
+enum LoadStoreRCpcUnscaledOffsetOp : uint32_t {
+ LoadStoreRCpcUnscaledOffsetFixed = 0x19000000,
+ LoadStoreRCpcUnscaledOffsetFMask = 0x3F200C00,
+ LoadStoreRCpcUnscaledOffsetMask = 0xFFE00C00,
+ STLURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00000000,
+ LDAPURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00400000,
+ LDAPURSB_x = LoadStoreRCpcUnscaledOffsetFixed | 0x00800000,
+ LDAPURSB_w = LoadStoreRCpcUnscaledOffsetFixed | 0x00C00000,
+ STLURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40000000,
+ LDAPURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40400000,
+ LDAPURSH_x = LoadStoreRCpcUnscaledOffsetFixed | 0x40800000,
+ LDAPURSH_w = LoadStoreRCpcUnscaledOffsetFixed | 0x40C00000,
+ STLUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80000000,
+ LDAPUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80400000,
+ LDAPURSW = LoadStoreRCpcUnscaledOffsetFixed | 0x80800000,
+ STLUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0000000,
+ LDAPUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0400000
+};
+
+#define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \
+ V(LDADD, 0x00000000), \
+ V(LDCLR, 0x00001000), \
+ V(LDEOR, 0x00002000), \
+ V(LDSET, 0x00003000), \
+ V(LDSMAX, 0x00004000), \
+ V(LDSMIN, 0x00005000), \
+ V(LDUMAX, 0x00006000), \
+ V(LDUMIN, 0x00007000)
+
+// Atomic memory.
+enum AtomicMemoryOp : uint32_t {
+ AtomicMemoryFixed = 0x38200000,
+ AtomicMemoryFMask = 0x3B200C00,
+ AtomicMemoryMask = 0xFFE0FC00,
+ SWPB = AtomicMemoryFixed | 0x00008000,
+ SWPAB = AtomicMemoryFixed | 0x00808000,
+ SWPLB = AtomicMemoryFixed | 0x00408000,
+ SWPALB = AtomicMemoryFixed | 0x00C08000,
+ SWPH = AtomicMemoryFixed | 0x40008000,
+ SWPAH = AtomicMemoryFixed | 0x40808000,
+ SWPLH = AtomicMemoryFixed | 0x40408000,
+ SWPALH = AtomicMemoryFixed | 0x40C08000,
+ SWP_w = AtomicMemoryFixed | 0x80008000,
+ SWPA_w = AtomicMemoryFixed | 0x80808000,
+ SWPL_w = AtomicMemoryFixed | 0x80408000,
+ SWPAL_w = AtomicMemoryFixed | 0x80C08000,
+ SWP_x = AtomicMemoryFixed | 0xC0008000,
+ SWPA_x = AtomicMemoryFixed | 0xC0808000,
+ SWPL_x = AtomicMemoryFixed | 0xC0408000,
+ SWPAL_x = AtomicMemoryFixed | 0xC0C08000,
+ LDAPRB = AtomicMemoryFixed | 0x0080C000,
+ LDAPRH = AtomicMemoryFixed | 0x4080C000,
+ LDAPR_w = AtomicMemoryFixed | 0x8080C000,
+ LDAPR_x = AtomicMemoryFixed | 0xC080C000,
+
+ AtomicMemorySimpleFMask = 0x3B208C00,
+ AtomicMemorySimpleOpMask = 0x00007000,
+#define ATOMIC_MEMORY_SIMPLE(N, OP) \
+ N##Op = OP, \
+ N##B = AtomicMemoryFixed | OP, \
+ N##AB = AtomicMemoryFixed | OP | 0x00800000, \
+ N##LB = AtomicMemoryFixed | OP | 0x00400000, \
+ N##ALB = AtomicMemoryFixed | OP | 0x00C00000, \
+ N##H = AtomicMemoryFixed | OP | 0x40000000, \
+ N##AH = AtomicMemoryFixed | OP | 0x40800000, \
+ N##LH = AtomicMemoryFixed | OP | 0x40400000, \
+ N##ALH = AtomicMemoryFixed | OP | 0x40C00000, \
+ N##_w = AtomicMemoryFixed | OP | 0x80000000, \
+ N##A_w = AtomicMemoryFixed | OP | 0x80800000, \
+ N##L_w = AtomicMemoryFixed | OP | 0x80400000, \
+ N##AL_w = AtomicMemoryFixed | OP | 0x80C00000, \
+ N##_x = AtomicMemoryFixed | OP | 0xC0000000, \
+ N##A_x = AtomicMemoryFixed | OP | 0xC0800000, \
+ N##L_x = AtomicMemoryFixed | OP | 0xC0400000, \
+ N##AL_x = AtomicMemoryFixed | OP | 0xC0C00000
+
+ ATOMIC_MEMORY_SIMPLE_OPC_LIST(ATOMIC_MEMORY_SIMPLE)
+#undef ATOMIC_MEMORY_SIMPLE
+};
+
+// Conditional compare.
+enum ConditionalCompareOp : uint32_t {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp : uint32_t {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp : uint32_t {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp : uint32_t {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp : uint32_t {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits,
+
+ // Pointer authentication instructions in Armv8.3.
+ PACIA = DataProcessing1SourceFixed | 0x80010000,
+ PACIB = DataProcessing1SourceFixed | 0x80010400,
+ PACDA = DataProcessing1SourceFixed | 0x80010800,
+ PACDB = DataProcessing1SourceFixed | 0x80010C00,
+ AUTIA = DataProcessing1SourceFixed | 0x80011000,
+ AUTIB = DataProcessing1SourceFixed | 0x80011400,
+ AUTDA = DataProcessing1SourceFixed | 0x80011800,
+ AUTDB = DataProcessing1SourceFixed | 0x80011C00,
+ PACIZA = DataProcessing1SourceFixed | 0x80012000,
+ PACIZB = DataProcessing1SourceFixed | 0x80012400,
+ PACDZA = DataProcessing1SourceFixed | 0x80012800,
+ PACDZB = DataProcessing1SourceFixed | 0x80012C00,
+ AUTIZA = DataProcessing1SourceFixed | 0x80013000,
+ AUTIZB = DataProcessing1SourceFixed | 0x80013400,
+ AUTDZA = DataProcessing1SourceFixed | 0x80013800,
+ AUTDZB = DataProcessing1SourceFixed | 0x80013C00,
+ XPACI = DataProcessing1SourceFixed | 0x80014000,
+ XPACD = DataProcessing1SourceFixed | 0x80014400
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp : uint32_t {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ PACGA = DataProcessing2SourceFixed | SixtyFourBits | 0x00003000,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp : uint32_t {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp : uint32_t {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_h = FPCompareFixed | FP16 | 0x00000000,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_h_zero = FPCompareFixed | FP16 | 0x00000008,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_h = FPCompareFixed | FP16 | 0x00000010,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE = FCMPE_s,
+ FCMPE_h_zero = FPCompareFixed | FP16 | 0x00000018,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018,
+ FCMPE_zero = FCMPE_s_zero
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp : uint32_t {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_h = FPConditionalCompareFixed | FP16 | 0x00000000,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_h = FPConditionalCompareFixed | FP16 | 0x00000010,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp : uint32_t {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_h = FPConditionalSelectFixed | FP16 | 0x00000000,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp : uint32_t {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_h_imm = FPImmediateFixed | FP16 | 0x00000000,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp : uint32_t {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_h = FPDataProcessing1SourceFixed | FP16 | 0x00000000,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_h = FPDataProcessing1SourceFixed | FP16 | 0x00008000,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_h = FPDataProcessing1SourceFixed | FP16 | 0x00010000,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_h = FPDataProcessing1SourceFixed | FP16 | 0x00018000,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000,
+ FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000,
+ FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000,
+ FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000,
+ FRINT32X_s = FPDataProcessing1SourceFixed | 0x00088000,
+ FRINT32X_d = FPDataProcessing1SourceFixed | FP64 | 0x00088000,
+ FRINT32X = FRINT32X_s,
+ FRINT32Z_s = FPDataProcessing1SourceFixed | 0x00080000,
+ FRINT32Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00080000,
+ FRINT32Z = FRINT32Z_s,
+ FRINT64X_s = FPDataProcessing1SourceFixed | 0x00098000,
+ FRINT64X_d = FPDataProcessing1SourceFixed | FP64 | 0x00098000,
+ FRINT64X = FRINT64X_s,
+ FRINT64Z_s = FPDataProcessing1SourceFixed | 0x00090000,
+ FRINT64Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00090000,
+ FRINT64Z = FRINT64Z_s,
+ FRINTN_h = FPDataProcessing1SourceFixed | FP16 | 0x00040000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_h = FPDataProcessing1SourceFixed | FP16 | 0x00048000,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_h = FPDataProcessing1SourceFixed | FP16 | 0x00050000,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_h = FPDataProcessing1SourceFixed | FP16 | 0x00058000,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_h = FPDataProcessing1SourceFixed | FP16 | 0x00060000,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_h = FPDataProcessing1SourceFixed | FP16 | 0x00070000,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_h = FPDataProcessing1SourceFixed | FP16 | 0x00078000,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp : uint32_t {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_h = FMUL | FP16,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_h = FDIV | FP16,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_h = FADD | FP16,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_h = FSUB | FP16,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_h = FMAX | FP16,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_h = FMIN | FP16,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_h = FMAXNM | FP16,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_h = FMINNM | FP16,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_h = FNMUL | FP16,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp : uint32_t {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_h = FPDataProcessing3SourceFixed | 0x00C00000,
+ FMSUB_h = FPDataProcessing3SourceFixed | 0x00C08000,
+ FNMADD_h = FPDataProcessing3SourceFixed | 0x00E00000,
+ FNMSUB_h = FPDataProcessing3SourceFixed | 0x00E08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp : uint32_t {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_wh = FCVTNS | FP16,
+ FCVTNS_xh = FCVTNS | SixtyFourBits | FP16,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_wh = FCVTNU | FP16,
+ FCVTNU_xh = FCVTNU | SixtyFourBits | FP16,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_wh = FCVTPS | FP16,
+ FCVTPS_xh = FCVTPS | SixtyFourBits | FP16,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_wh = FCVTPU | FP16,
+ FCVTPU_xh = FCVTPU | SixtyFourBits | FP16,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_wh = FCVTMS | FP16,
+ FCVTMS_xh = FCVTMS | SixtyFourBits | FP16,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_wh = FCVTMU | FP16,
+ FCVTMU_xh = FCVTMU | SixtyFourBits | FP16,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_wh = FCVTZS | FP16,
+ FCVTZS_xh = FCVTZS | SixtyFourBits | FP16,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_wh = FCVTZU | FP16,
+ FCVTZU_xh = FCVTZU | SixtyFourBits | FP16,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_hw = SCVTF | FP16,
+ SCVTF_hx = SCVTF | SixtyFourBits | FP16,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_hw = UCVTF | FP16,
+ UCVTF_hx = UCVTF | SixtyFourBits | FP16,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_wh = FCVTAS | FP16,
+ FCVTAS_xh = FCVTAS | SixtyFourBits | FP16,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_wh = FCVTAU | FP16,
+ FCVTAU_xh = FCVTAU | SixtyFourBits | FP16,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_wh = FPIntegerConvertFixed | 0x00060000 | FP16,
+ FMOV_hw = FPIntegerConvertFixed | 0x00070000 | FP16,
+ FMOV_xh = FMOV_wh | SixtyFourBits,
+ FMOV_hx = FMOV_hw | SixtyFourBits,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
+ FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000,
+ FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp : uint32_t {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_wh_fixed = FCVTZS_fixed | FP16,
+ FCVTZS_xh_fixed = FCVTZS_fixed | SixtyFourBits | FP16,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_wh_fixed = FCVTZU_fixed | FP16,
+ FCVTZU_xh_fixed = FCVTZU_fixed | SixtyFourBits | FP16,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_hw_fixed = SCVTF_fixed | FP16,
+ SCVTF_hx_fixed = SCVTF_fixed | SixtyFourBits | FP16,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_hw_fixed = UCVTF_fixed | FP16,
+ UCVTF_hx_fixed = UCVTF_fixed | SixtyFourBits | FP16,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Crypto - two register SHA.
+enum Crypto2RegSHAOp : uint32_t {
+ Crypto2RegSHAFixed = 0x5E280800,
+ Crypto2RegSHAFMask = 0xFF3E0C00
+};
+
+// Crypto - three register SHA.
+enum Crypto3RegSHAOp : uint32_t {
+ Crypto3RegSHAFixed = 0x5E000000,
+ Crypto3RegSHAFMask = 0xFF208C00
+};
+
+// Crypto - AES.
+enum CryptoAESOp : uint32_t {
+ CryptoAESFixed = 0x4E280800,
+ CryptoAESFMask = 0xFF3E0C00
+};
+
+// NEON instructions with two register operands.
+enum NEON2RegMiscOp : uint32_t {
+ NEON2RegMiscFixed = 0x0E200800,
+ NEON2RegMiscFMask = 0x9F3E0C00,
+ NEON2RegMiscMask = 0xBF3FFC00,
+ NEON2RegMiscUBit = 0x20000000,
+ NEON_REV64 = NEON2RegMiscFixed | 0x00000000,
+ NEON_REV32 = NEON2RegMiscFixed | 0x20000000,
+ NEON_REV16 = NEON2RegMiscFixed | 0x00001000,
+ NEON_SADDLP = NEON2RegMiscFixed | 0x00002000,
+ NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit,
+ NEON_SUQADD = NEON2RegMiscFixed | 0x00003000,
+ NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit,
+ NEON_CLS = NEON2RegMiscFixed | 0x00004000,
+ NEON_CLZ = NEON2RegMiscFixed | 0x20004000,
+ NEON_CNT = NEON2RegMiscFixed | 0x00005000,
+ NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000,
+ NEON_SADALP = NEON2RegMiscFixed | 0x00006000,
+ NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit,
+ NEON_SQABS = NEON2RegMiscFixed | 0x00007000,
+ NEON_SQNEG = NEON2RegMiscFixed | 0x20007000,
+ NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000,
+ NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000,
+ NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000,
+ NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000,
+ NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000,
+ NEON_ABS = NEON2RegMiscFixed | 0x0000B000,
+ NEON_NEG = NEON2RegMiscFixed | 0x2000B000,
+ NEON_XTN = NEON2RegMiscFixed | 0x00012000,
+ NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000,
+ NEON_SHLL = NEON2RegMiscFixed | 0x20013000,
+ NEON_SQXTN = NEON2RegMiscFixed | 0x00014000,
+ NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit,
+
+ NEON2RegMiscOpcode = 0x0001F000,
+ NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode,
+ NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode,
+ NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode,
+ NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode,
+
+ // These instructions use only one bit of the size field. The other bit is
+ // used to distinguish between instructions.
+ NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000,
+ NEON_FABS = NEON2RegMiscFixed | 0x0080F000,
+ NEON_FNEG = NEON2RegMiscFixed | 0x2080F000,
+ NEON_FCVTN = NEON2RegMiscFixed | 0x00016000,
+ NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000,
+ NEON_FCVTL = NEON2RegMiscFixed | 0x00017000,
+ NEON_FRINT32X = NEON2RegMiscFixed | 0x2001E000,
+ NEON_FRINT32Z = NEON2RegMiscFixed | 0x0001E000,
+ NEON_FRINT64X = NEON2RegMiscFixed | 0x2001F000,
+ NEON_FRINT64Z = NEON2RegMiscFixed | 0x0001F000,
+ NEON_FRINTN = NEON2RegMiscFixed | 0x00018000,
+ NEON_FRINTA = NEON2RegMiscFixed | 0x20018000,
+ NEON_FRINTP = NEON2RegMiscFixed | 0x00818000,
+ NEON_FRINTM = NEON2RegMiscFixed | 0x00019000,
+ NEON_FRINTX = NEON2RegMiscFixed | 0x20019000,
+ NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000,
+ NEON_FRINTI = NEON2RegMiscFixed | 0x20819000,
+ NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000,
+ NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit,
+ NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000,
+ NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit,
+ NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000,
+ NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit,
+ NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000,
+ NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit,
+ NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000,
+ NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit,
+ NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000,
+ NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000,
+ NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit,
+ NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000,
+ NEON_URECPE = NEON2RegMiscFixed | 0x0081C000,
+ NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000,
+ NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000,
+ NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000,
+ NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000,
+ NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000,
+ NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000,
+ NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000,
+
+ NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode,
+ NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode
+};
+
+// NEON instructions with two register operands (FP16).
+enum NEON2RegMiscFP16Op : uint32_t {
+ NEON2RegMiscFP16Fixed = 0x0E780800,
+ NEON2RegMiscFP16FMask = 0x9F7E0C00,
+ NEON2RegMiscFP16Mask = 0xBFFFFC00,
+ NEON_FRINTN_H = NEON2RegMiscFP16Fixed | 0x00018000,
+ NEON_FRINTM_H = NEON2RegMiscFP16Fixed | 0x00019000,
+ NEON_FCVTNS_H = NEON2RegMiscFP16Fixed | 0x0001A000,
+ NEON_FCVTMS_H = NEON2RegMiscFP16Fixed | 0x0001B000,
+ NEON_FCVTAS_H = NEON2RegMiscFP16Fixed | 0x0001C000,
+ NEON_SCVTF_H = NEON2RegMiscFP16Fixed | 0x0001D000,
+ NEON_FCMGT_H_zero = NEON2RegMiscFP16Fixed | 0x0080C000,
+ NEON_FCMEQ_H_zero = NEON2RegMiscFP16Fixed | 0x0080D000,
+ NEON_FCMLT_H_zero = NEON2RegMiscFP16Fixed | 0x0080E000,
+ NEON_FABS_H = NEON2RegMiscFP16Fixed | 0x0080F000,
+ NEON_FRINTP_H = NEON2RegMiscFP16Fixed | 0x00818000,
+ NEON_FRINTZ_H = NEON2RegMiscFP16Fixed | 0x00819000,
+ NEON_FCVTPS_H = NEON2RegMiscFP16Fixed | 0x0081A000,
+ NEON_FCVTZS_H = NEON2RegMiscFP16Fixed | 0x0081B000,
+ NEON_FRECPE_H = NEON2RegMiscFP16Fixed | 0x0081D000,
+ NEON_FRINTA_H = NEON2RegMiscFP16Fixed | 0x20018000,
+ NEON_FRINTX_H = NEON2RegMiscFP16Fixed | 0x20019000,
+ NEON_FCVTNU_H = NEON2RegMiscFP16Fixed | 0x2001A000,
+ NEON_FCVTMU_H = NEON2RegMiscFP16Fixed | 0x2001B000,
+ NEON_FCVTAU_H = NEON2RegMiscFP16Fixed | 0x2001C000,
+ NEON_UCVTF_H = NEON2RegMiscFP16Fixed | 0x2001D000,
+ NEON_FCMGE_H_zero = NEON2RegMiscFP16Fixed | 0x2080C000,
+ NEON_FCMLE_H_zero = NEON2RegMiscFP16Fixed | 0x2080D000,
+ NEON_FNEG_H = NEON2RegMiscFP16Fixed | 0x2080F000,
+ NEON_FRINTI_H = NEON2RegMiscFP16Fixed | 0x20819000,
+ NEON_FCVTPU_H = NEON2RegMiscFP16Fixed | 0x2081A000,
+ NEON_FCVTZU_H = NEON2RegMiscFP16Fixed | 0x2081B000,
+ NEON_FRSQRTE_H = NEON2RegMiscFP16Fixed | 0x2081D000,
+ NEON_FSQRT_H = NEON2RegMiscFP16Fixed | 0x2081F000
+};
+
+// NEON instructions with three same-type operands.
+enum NEON3SameOp : uint32_t {
+ NEON3SameFixed = 0x0E200400,
+ NEON3SameFMask = 0x9F200400,
+ NEON3SameMask = 0xBF20FC00,
+ NEON3SameUBit = 0x20000000,
+ NEON_ADD = NEON3SameFixed | 0x00008000,
+ NEON_ADDP = NEON3SameFixed | 0x0000B800,
+ NEON_SHADD = NEON3SameFixed | 0x00000000,
+ NEON_SHSUB = NEON3SameFixed | 0x00002000,
+ NEON_SRHADD = NEON3SameFixed | 0x00001000,
+ NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800,
+ NEON_CMGE = NEON3SameFixed | 0x00003800,
+ NEON_CMGT = NEON3SameFixed | 0x00003000,
+ NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT,
+ NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE,
+ NEON_CMTST = NEON3SameFixed | 0x00008800,
+ NEON_MLA = NEON3SameFixed | 0x00009000,
+ NEON_MLS = NEON3SameFixed | 0x20009000,
+ NEON_MUL = NEON3SameFixed | 0x00009800,
+ NEON_PMUL = NEON3SameFixed | 0x20009800,
+ NEON_SRSHL = NEON3SameFixed | 0x00005000,
+ NEON_SQSHL = NEON3SameFixed | 0x00004800,
+ NEON_SQRSHL = NEON3SameFixed | 0x00005800,
+ NEON_SSHL = NEON3SameFixed | 0x00004000,
+ NEON_SMAX = NEON3SameFixed | 0x00006000,
+ NEON_SMAXP = NEON3SameFixed | 0x0000A000,
+ NEON_SMIN = NEON3SameFixed | 0x00006800,
+ NEON_SMINP = NEON3SameFixed | 0x0000A800,
+ NEON_SABD = NEON3SameFixed | 0x00007000,
+ NEON_SABA = NEON3SameFixed | 0x00007800,
+ NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD,
+ NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA,
+ NEON_SQADD = NEON3SameFixed | 0x00000800,
+ NEON_SQSUB = NEON3SameFixed | 0x00002800,
+ NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000,
+ NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD,
+ NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB,
+ NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD,
+ NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX,
+ NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP,
+ NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN,
+ NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP,
+ NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL,
+ NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD,
+ NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL,
+ NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL,
+ NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB,
+ NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL,
+ NEON_SQDMULH = NEON3SameFixed | 0x0000B000,
+ NEON_SQRDMULH = NEON3SameFixed | 0x2000B000,
+
+ // NEON floating point instructions with three same-type operands.
+ NEON3SameFPFixed = NEON3SameFixed | 0x0000C000,
+ NEON3SameFPFMask = NEON3SameFMask | 0x0000C000,
+ NEON3SameFPMask = NEON3SameMask | 0x00800000,
+ NEON_FADD = NEON3SameFixed | 0x0000D000,
+ NEON_FSUB = NEON3SameFixed | 0x0080D000,
+ NEON_FMUL = NEON3SameFixed | 0x2000D800,
+ NEON_FDIV = NEON3SameFixed | 0x2000F800,
+ NEON_FMAX = NEON3SameFixed | 0x0000F000,
+ NEON_FMAXNM = NEON3SameFixed | 0x0000C000,
+ NEON_FMAXP = NEON3SameFixed | 0x2000F000,
+ NEON_FMAXNMP = NEON3SameFixed | 0x2000C000,
+ NEON_FMIN = NEON3SameFixed | 0x0080F000,
+ NEON_FMINNM = NEON3SameFixed | 0x0080C000,
+ NEON_FMINP = NEON3SameFixed | 0x2080F000,
+ NEON_FMINNMP = NEON3SameFixed | 0x2080C000,
+ NEON_FMLA = NEON3SameFixed | 0x0000C800,
+ NEON_FMLS = NEON3SameFixed | 0x0080C800,
+ NEON_FMULX = NEON3SameFixed | 0x0000D800,
+ NEON_FRECPS = NEON3SameFixed | 0x0000F800,
+ NEON_FRSQRTS = NEON3SameFixed | 0x0080F800,
+ NEON_FABD = NEON3SameFixed | 0x2080D000,
+ NEON_FADDP = NEON3SameFixed | 0x2000D000,
+ NEON_FCMEQ = NEON3SameFixed | 0x0000E000,
+ NEON_FCMGE = NEON3SameFixed | 0x2000E000,
+ NEON_FCMGT = NEON3SameFixed | 0x2080E000,
+ NEON_FACGE = NEON3SameFixed | 0x2000E800,
+ NEON_FACGT = NEON3SameFixed | 0x2080E800,
+
+ // NEON logical instructions with three same-type operands.
+ NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800,
+ NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800,
+ NEON3SameLogicalMask = 0xBFE0FC00,
+ NEON3SameLogicalFormatMask = NEON_Q,
+ NEON_AND = NEON3SameLogicalFixed | 0x00000000,
+ NEON_ORR = NEON3SameLogicalFixed | 0x00A00000,
+ NEON_ORN = NEON3SameLogicalFixed | 0x00C00000,
+ NEON_EOR = NEON3SameLogicalFixed | 0x20000000,
+ NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
+ NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
+ NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
+ NEON_BSL = NEON3SameLogicalFixed | 0x20400000,
+
+ // FHM (FMLAL-like) instructions have an oddball encoding scheme under 3Same.
+ NEON3SameFHMMask = 0xBFE0FC00, // U size opcode
+ NEON_FMLAL = NEON3SameFixed | 0x0000E800, // 0 00 11101
+ NEON_FMLAL2 = NEON3SameFixed | 0x2000C800, // 1 00 11001
+ NEON_FMLSL = NEON3SameFixed | 0x0080E800, // 0 10 11101
+ NEON_FMLSL2 = NEON3SameFixed | 0x2080C800 // 1 10 11001
+};
+
+enum NEON3SameFP16 : uint32_t {
+ NEON3SameFP16Fixed = 0x0E400400,
+ NEON3SameFP16FMask = 0x9F60C400,
+ NEON3SameFP16Mask = 0xBFE0FC00,
+ NEON_FMAXNM_H = NEON3SameFP16Fixed | 0x00000000,
+ NEON_FMLA_H = NEON3SameFP16Fixed | 0x00000800,
+ NEON_FADD_H = NEON3SameFP16Fixed | 0x00001000,
+ NEON_FMULX_H = NEON3SameFP16Fixed | 0x00001800,
+ NEON_FCMEQ_H = NEON3SameFP16Fixed | 0x00002000,
+ NEON_FMAX_H = NEON3SameFP16Fixed | 0x00003000,
+ NEON_FRECPS_H = NEON3SameFP16Fixed | 0x00003800,
+ NEON_FMINNM_H = NEON3SameFP16Fixed | 0x00800000,
+ NEON_FMLS_H = NEON3SameFP16Fixed | 0x00800800,
+ NEON_FSUB_H = NEON3SameFP16Fixed | 0x00801000,
+ NEON_FMIN_H = NEON3SameFP16Fixed | 0x00803000,
+ NEON_FRSQRTS_H = NEON3SameFP16Fixed | 0x00803800,
+ NEON_FMAXNMP_H = NEON3SameFP16Fixed | 0x20000000,
+ NEON_FADDP_H = NEON3SameFP16Fixed | 0x20001000,
+ NEON_FMUL_H = NEON3SameFP16Fixed | 0x20001800,
+ NEON_FCMGE_H = NEON3SameFP16Fixed | 0x20002000,
+ NEON_FACGE_H = NEON3SameFP16Fixed | 0x20002800,
+ NEON_FMAXP_H = NEON3SameFP16Fixed | 0x20003000,
+ NEON_FDIV_H = NEON3SameFP16Fixed | 0x20003800,
+ NEON_FMINNMP_H = NEON3SameFP16Fixed | 0x20800000,
+ NEON_FABD_H = NEON3SameFP16Fixed | 0x20801000,
+ NEON_FCMGT_H = NEON3SameFP16Fixed | 0x20802000,
+ NEON_FACGT_H = NEON3SameFP16Fixed | 0x20802800,
+ NEON_FMINP_H = NEON3SameFP16Fixed | 0x20803000
+};
+
+// 'Extra' NEON instructions with three same-type operands.
+enum NEON3SameExtraOp : uint32_t {
+ NEON3SameExtraFixed = 0x0E008400,
+ NEON3SameExtraUBit = 0x20000000,
+ NEON3SameExtraFMask = 0x9E208400,
+ NEON3SameExtraMask = 0xBE20FC00,
+ NEON_SQRDMLAH = NEON3SameExtraFixed | NEON3SameExtraUBit,
+ NEON_SQRDMLSH = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00000800,
+ NEON_SDOT = NEON3SameExtraFixed | 0x00001000,
+ NEON_UDOT = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00001000,
+
+ /* v8.3 Complex Numbers */
+ NEON3SameExtraFCFixed = 0x2E00C400,
+ NEON3SameExtraFCFMask = 0xBF20C400,
+ // FCMLA fixes opcode<3:2>, and uses opcode<1:0> to encode <rotate>.
+ NEON3SameExtraFCMLAMask = NEON3SameExtraFCFMask | 0x00006000,
+ NEON_FCMLA = NEON3SameExtraFCFixed,
+ // FCADD fixes opcode<3:2, 0>, and uses opcode<1> to encode <rotate>.
+ NEON3SameExtraFCADDMask = NEON3SameExtraFCFMask | 0x00006800,
+ NEON_FCADD = NEON3SameExtraFCFixed | 0x00002000
+ // Other encodings under NEON3SameExtraFCFMask are UNALLOCATED.
+};
+
+// NEON instructions with three different-type operands.
+enum NEON3DifferentOp : uint32_t {
+ NEON3DifferentFixed = 0x0E200000,
+ NEON3DifferentFMask = 0x9F200C00,
+ NEON3DifferentMask = 0xFF20FC00,
+ NEON_ADDHN = NEON3DifferentFixed | 0x00004000,
+ NEON_ADDHN2 = NEON_ADDHN | NEON_Q,
+ NEON_PMULL = NEON3DifferentFixed | 0x0000E000,
+ NEON_PMULL2 = NEON_PMULL | NEON_Q,
+ NEON_RADDHN = NEON3DifferentFixed | 0x20004000,
+ NEON_RADDHN2 = NEON_RADDHN | NEON_Q,
+ NEON_RSUBHN = NEON3DifferentFixed | 0x20006000,
+ NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q,
+ NEON_SABAL = NEON3DifferentFixed | 0x00005000,
+ NEON_SABAL2 = NEON_SABAL | NEON_Q,
+ NEON_SABDL = NEON3DifferentFixed | 0x00007000,
+ NEON_SABDL2 = NEON_SABDL | NEON_Q,
+ NEON_SADDL = NEON3DifferentFixed | 0x00000000,
+ NEON_SADDL2 = NEON_SADDL | NEON_Q,
+ NEON_SADDW = NEON3DifferentFixed | 0x00001000,
+ NEON_SADDW2 = NEON_SADDW | NEON_Q,
+ NEON_SMLAL = NEON3DifferentFixed | 0x00008000,
+ NEON_SMLAL2 = NEON_SMLAL | NEON_Q,
+ NEON_SMLSL = NEON3DifferentFixed | 0x0000A000,
+ NEON_SMLSL2 = NEON_SMLSL | NEON_Q,
+ NEON_SMULL = NEON3DifferentFixed | 0x0000C000,
+ NEON_SMULL2 = NEON_SMULL | NEON_Q,
+ NEON_SSUBL = NEON3DifferentFixed | 0x00002000,
+ NEON_SSUBL2 = NEON_SSUBL | NEON_Q,
+ NEON_SSUBW = NEON3DifferentFixed | 0x00003000,
+ NEON_SSUBW2 = NEON_SSUBW | NEON_Q,
+ NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000,
+ NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q,
+ NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000,
+ NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q,
+ NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000,
+ NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q,
+ NEON_SUBHN = NEON3DifferentFixed | 0x00006000,
+ NEON_SUBHN2 = NEON_SUBHN | NEON_Q,
+ NEON_UABAL = NEON_SABAL | NEON3SameUBit,
+ NEON_UABAL2 = NEON_UABAL | NEON_Q,
+ NEON_UABDL = NEON_SABDL | NEON3SameUBit,
+ NEON_UABDL2 = NEON_UABDL | NEON_Q,
+ NEON_UADDL = NEON_SADDL | NEON3SameUBit,
+ NEON_UADDL2 = NEON_UADDL | NEON_Q,
+ NEON_UADDW = NEON_SADDW | NEON3SameUBit,
+ NEON_UADDW2 = NEON_UADDW | NEON_Q,
+ NEON_UMLAL = NEON_SMLAL | NEON3SameUBit,
+ NEON_UMLAL2 = NEON_UMLAL | NEON_Q,
+ NEON_UMLSL = NEON_SMLSL | NEON3SameUBit,
+ NEON_UMLSL2 = NEON_UMLSL | NEON_Q,
+ NEON_UMULL = NEON_SMULL | NEON3SameUBit,
+ NEON_UMULL2 = NEON_UMULL | NEON_Q,
+ NEON_USUBL = NEON_SSUBL | NEON3SameUBit,
+ NEON_USUBL2 = NEON_USUBL | NEON_Q,
+ NEON_USUBW = NEON_SSUBW | NEON3SameUBit,
+ NEON_USUBW2 = NEON_USUBW | NEON_Q
+};
+
+// NEON instructions operating across vectors.
+enum NEONAcrossLanesOp : uint32_t {
+ NEONAcrossLanesFixed = 0x0E300800,
+ NEONAcrossLanesFMask = 0x9F3E0C00,
+ NEONAcrossLanesMask = 0xBF3FFC00,
+ NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000,
+ NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000,
+ NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000,
+ NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000,
+ NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000,
+ NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000,
+ NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000,
+
+ NEONAcrossLanesFP16Fixed = NEONAcrossLanesFixed | 0x0000C000,
+ NEONAcrossLanesFP16FMask = NEONAcrossLanesFMask | 0x2000C000,
+ NEONAcrossLanesFP16Mask = NEONAcrossLanesMask | 0x20800000,
+ NEON_FMAXNMV_H = NEONAcrossLanesFP16Fixed | 0x00000000,
+ NEON_FMAXV_H = NEONAcrossLanesFP16Fixed | 0x00003000,
+ NEON_FMINNMV_H = NEONAcrossLanesFP16Fixed | 0x00800000,
+ NEON_FMINV_H = NEONAcrossLanesFP16Fixed | 0x00803000,
+
+ // NEON floating point across instructions.
+ NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x2000C000,
+ NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x2000C000,
+ NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x20800000,
+
+ NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000,
+ NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000,
+ NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000,
+ NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000
+};
+
+// NEON instructions with indexed element operand.
+enum NEONByIndexedElementOp : uint32_t {
+ NEONByIndexedElementFixed = 0x0F000000,
+ NEONByIndexedElementFMask = 0x9F000400,
+ NEONByIndexedElementMask = 0xBF00F400,
+ NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000,
+ NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000,
+ NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000,
+ NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000,
+ NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000,
+ NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000,
+ NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000,
+ NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000,
+ NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000,
+ NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000,
+ NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000,
+ NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000,
+ NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000,
+ NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000,
+ NEON_SDOT_byelement = NEONByIndexedElementFixed | 0x0000E000,
+ NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000,
+ NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000,
+ NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000,
+
+ NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000,
+ NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000,
+ NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000,
+ NEON_FMULX_H_byelement = NEONByIndexedElementFixed | 0x20009000,
+
+ // Floating point instructions.
+ NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000,
+ NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000,
+ NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000,
+ NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000,
+
+ // FMLAL-like instructions.
+ // For all cases: U = x, size = 10, opcode = xx00
+ NEONByIndexedElementFPLongFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPLongFMask = NEONByIndexedElementFMask | 0x00C03000,
+ NEONByIndexedElementFPLongMask = 0xBFC0F400,
+ NEON_FMLAL_H_byelement = NEONByIndexedElementFixed | 0x00800000,
+ NEON_FMLAL2_H_byelement = NEONByIndexedElementFixed | 0x20808000,
+ NEON_FMLSL_H_byelement = NEONByIndexedElementFixed | 0x00804000,
+ NEON_FMLSL2_H_byelement = NEONByIndexedElementFixed | 0x2080C000,
+
+ // Complex instruction(s).
+ // This is necessary because the 'rot' encoding moves into the
+ // NEONByIndex..Mask space.
+ NEONByIndexedElementFPComplexMask = 0xBF009400,
+ NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000
+};
+
+// NEON register copy.
+enum NEONCopyOp : uint32_t {
+ NEONCopyFixed = 0x0E000400,
+ NEONCopyFMask = 0x9FE08400,
+ NEONCopyMask = 0x3FE08400,
+ NEONCopyInsElementMask = NEONCopyMask | 0x40000000,
+ NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800,
+ NEONCopyDupElementMask = NEONCopyMask | 0x20007800,
+ NEONCopyDupGeneralMask = NEONCopyDupElementMask,
+ NEONCopyUmovMask = NEONCopyMask | 0x20007800,
+ NEONCopySmovMask = NEONCopyMask | 0x20007800,
+ NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000,
+ NEON_INS_GENERAL = NEONCopyFixed | 0x40001800,
+ NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000,
+ NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800,
+ NEON_SMOV = NEONCopyFixed | 0x00002800,
+ NEON_UMOV = NEONCopyFixed | 0x00003800
+};
+
+// NEON extract.
+enum NEONExtractOp : uint32_t {
+ NEONExtractFixed = 0x2E000000,
+ NEONExtractFMask = 0xBF208400,
+ NEONExtractMask = 0xBFE08400,
+ NEON_EXT = NEONExtractFixed | 0x00000000
+};
+
+enum NEONLoadStoreMultiOp : uint32_t {
+ NEONLoadStoreMultiL = 0x00400000,
+ NEONLoadStoreMulti1_1v = 0x00007000,
+ NEONLoadStoreMulti1_2v = 0x0000A000,
+ NEONLoadStoreMulti1_3v = 0x00006000,
+ NEONLoadStoreMulti1_4v = 0x00002000,
+ NEONLoadStoreMulti2 = 0x00008000,
+ NEONLoadStoreMulti3 = 0x00004000,
+ NEONLoadStoreMulti4 = 0x00000000
+};
+
+// NEON load/store multiple structures.
+enum NEONLoadStoreMultiStructOp : uint32_t {
+ NEONLoadStoreMultiStructFixed = 0x0C000000,
+ NEONLoadStoreMultiStructFMask = 0xBFBF0000,
+ NEONLoadStoreMultiStructMask = 0xBFFFF000,
+ NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed,
+ NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed |
+ NEONLoadStoreMultiL,
+ NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v,
+ NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v,
+ NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v,
+ NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v,
+ NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2,
+ NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3,
+ NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4,
+ NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v,
+ NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v,
+ NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v,
+ NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v,
+ NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2,
+ NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3,
+ NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4
+};
+
+// NEON load/store multiple structures with post-index addressing.
+enum NEONLoadStoreMultiStructPostIndexOp : uint32_t {
+ NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
+ NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
+ NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
+ NEONLoadStoreMultiStructPostIndex = 0x00800000,
+ NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
+};
+
+enum NEONLoadStoreSingleOp : uint32_t {
+ NEONLoadStoreSingle1 = 0x00000000,
+ NEONLoadStoreSingle2 = 0x00200000,
+ NEONLoadStoreSingle3 = 0x00002000,
+ NEONLoadStoreSingle4 = 0x00202000,
+ NEONLoadStoreSingleL = 0x00400000,
+ NEONLoadStoreSingle_b = 0x00000000,
+ NEONLoadStoreSingle_h = 0x00004000,
+ NEONLoadStoreSingle_s = 0x00008000,
+ NEONLoadStoreSingle_d = 0x00008400,
+ NEONLoadStoreSingleAllLanes = 0x0000C000,
+ NEONLoadStoreSingleLenMask = 0x00202000
+};
+
+// NEON load/store single structure.
+enum NEONLoadStoreSingleStructOp : uint32_t {
+ NEONLoadStoreSingleStructFixed = 0x0D000000,
+ NEONLoadStoreSingleStructFMask = 0xBF9F0000,
+ NEONLoadStoreSingleStructMask = 0xBFFFE000,
+ NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed |
+ NEONLoadStoreSingleL,
+ NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructFixed,
+ NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b,
+ NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h,
+ NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s,
+ NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d,
+ NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes,
+ NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b,
+ NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h,
+ NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s,
+ NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d,
+
+ NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b,
+ NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h,
+ NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s,
+ NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d,
+ NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes,
+ NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b,
+ NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h,
+ NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s,
+ NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d,
+
+ NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b,
+ NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h,
+ NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s,
+ NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d,
+ NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes,
+ NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b,
+ NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h,
+ NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s,
+ NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d,
+
+ NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b,
+ NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h,
+ NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s,
+ NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d,
+ NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes,
+ NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b,
+ NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h,
+ NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s,
+ NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d
+};
+
+// NEON load/store single structure with post-index addressing.
+enum NEONLoadStoreSingleStructPostIndexOp : uint32_t {
+ NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
+ NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
+ NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
+ NEONLoadStoreSingleStructPostIndex = 0x00800000,
+ NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex
+};
+
+// NEON modified immediate.
+enum NEONModifiedImmediateOp : uint32_t {
+ NEONModifiedImmediateFixed = 0x0F000400,
+ NEONModifiedImmediateFMask = 0x9FF80400,
+ NEONModifiedImmediateOpBit = 0x20000000,
+ NEONModifiedImmediate_FMOV = NEONModifiedImmediateFixed | 0x00000800,
+ NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000,
+ NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000,
+ NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000,
+ NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000
+};
+
+// NEON shift immediate.
+enum NEONShiftImmediateOp : uint32_t {
+ NEONShiftImmediateFixed = 0x0F000400,
+ NEONShiftImmediateFMask = 0x9F800400,
+ NEONShiftImmediateMask = 0xBF80FC00,
+ NEONShiftImmediateUBit = 0x20000000,
+ NEON_SHL = NEONShiftImmediateFixed | 0x00005000,
+ NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000,
+ NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000,
+ NEON_SLI = NEONShiftImmediateFixed | 0x20005000,
+ NEON_SRI = NEONShiftImmediateFixed | 0x20004000,
+ NEON_SHRN = NEONShiftImmediateFixed | 0x00008000,
+ NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800,
+ NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000,
+ NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800,
+ NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000,
+ NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800,
+ NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000,
+ NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800,
+ NEON_SSHR = NEONShiftImmediateFixed | 0x00000000,
+ NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000,
+ NEON_USHR = NEONShiftImmediateFixed | 0x20000000,
+ NEON_URSHR = NEONShiftImmediateFixed | 0x20002000,
+ NEON_SSRA = NEONShiftImmediateFixed | 0x00001000,
+ NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000,
+ NEON_USRA = NEONShiftImmediateFixed | 0x20001000,
+ NEON_URSRA = NEONShiftImmediateFixed | 0x20003000,
+ NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000,
+ NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000,
+ NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000,
+ NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800,
+ NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800,
+ NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000,
+ NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000
+};
+
+// NEON table.
+enum NEONTableOp : uint32_t {
+ NEONTableFixed = 0x0E000000,
+ NEONTableFMask = 0xBF208C00,
+ NEONTableExt = 0x00001000,
+ NEONTableMask = 0xBF20FC00,
+ NEON_TBL_1v = NEONTableFixed | 0x00000000,
+ NEON_TBL_2v = NEONTableFixed | 0x00002000,
+ NEON_TBL_3v = NEONTableFixed | 0x00004000,
+ NEON_TBL_4v = NEONTableFixed | 0x00006000,
+ NEON_TBX_1v = NEON_TBL_1v | NEONTableExt,
+ NEON_TBX_2v = NEON_TBL_2v | NEONTableExt,
+ NEON_TBX_3v = NEON_TBL_3v | NEONTableExt,
+ NEON_TBX_4v = NEON_TBL_4v | NEONTableExt
+};
+
+// NEON perm.
+enum NEONPermOp : uint32_t {
+ NEONPermFixed = 0x0E000800,
+ NEONPermFMask = 0xBF208C00,
+ NEONPermMask = 0x3F20FC00,
+ NEON_UZP1 = NEONPermFixed | 0x00001000,
+ NEON_TRN1 = NEONPermFixed | 0x00002000,
+ NEON_ZIP1 = NEONPermFixed | 0x00003000,
+ NEON_UZP2 = NEONPermFixed | 0x00005000,
+ NEON_TRN2 = NEONPermFixed | 0x00006000,
+ NEON_ZIP2 = NEONPermFixed | 0x00007000
+};
+
+// NEON scalar instructions with two register operands.
+enum NEONScalar2RegMiscOp : uint32_t {
+ NEONScalar2RegMiscFixed = 0x5E200800,
+ NEONScalar2RegMiscFMask = 0xDF3E0C00,
+ NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
+ NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero,
+ NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero,
+ NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero,
+ NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero,
+ NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero,
+ NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS,
+ NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS,
+ NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG,
+ NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG,
+ NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN,
+ NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN,
+ NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN,
+ NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD,
+ NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD,
+
+ NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode,
+ NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode,
+
+ NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000,
+ NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE,
+ NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE,
+ NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF,
+ NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF,
+ NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero,
+ NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero,
+ NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero,
+ NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero,
+ NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero,
+ NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000,
+ NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS,
+ NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU,
+ NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS,
+ NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU,
+ NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS,
+ NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU,
+ NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS,
+ NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU,
+ NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS,
+ NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU,
+ NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN
+};
+
+// NEON instructions with two register operands (FP16).
+enum NEONScalar2RegMiscFP16Op : uint32_t {
+ NEONScalar2RegMiscFP16Fixed = 0x5E780800,
+ NEONScalar2RegMiscFP16FMask = 0xDF7E0C00,
+ NEONScalar2RegMiscFP16Mask = 0xFFFFFC00,
+ NEON_FCVTNS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNS_H,
+ NEON_FCVTMS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMS_H,
+ NEON_FCVTAS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAS_H,
+ NEON_SCVTF_H_scalar = NEON_Q | NEONScalar | NEON_SCVTF_H,
+ NEON_FCMGT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H_zero,
+ NEON_FCMEQ_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H_zero,
+ NEON_FCMLT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_H_zero,
+ NEON_FCVTPS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPS_H,
+ NEON_FCVTZS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_H,
+ NEON_FRECPE_H_scalar = NEON_Q | NEONScalar | NEON_FRECPE_H,
+ NEON_FRECPX_H_scalar = NEONScalar2RegMiscFP16Fixed | 0x0081F000,
+ NEON_FCVTNU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNU_H,
+ NEON_FCVTMU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMU_H,
+ NEON_FCVTAU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAU_H,
+ NEON_UCVTF_H_scalar = NEON_Q | NEONScalar | NEON_UCVTF_H,
+ NEON_FCMGE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H_zero,
+ NEON_FCMLE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_H_zero,
+ NEON_FCVTPU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPU_H,
+ NEON_FCVTZU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_H,
+ NEON_FRSQRTE_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE_H
+};
+
+// NEON scalar instructions with three same-type operands.
+enum NEONScalar3SameOp : uint32_t {
+ NEONScalar3SameFixed = 0x5E200400,
+ NEONScalar3SameFMask = 0xDF200400,
+ NEONScalar3SameMask = 0xFF20FC00,
+ NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD,
+ NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ,
+ NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE,
+ NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT,
+ NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI,
+ NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS,
+ NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST,
+ NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB,
+ NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD,
+ NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD,
+ NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB,
+ NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB,
+ NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL,
+ NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL,
+ NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL,
+ NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL,
+ NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL,
+ NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL,
+ NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL,
+ NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL,
+ NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH,
+ NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH,
+
+ // NEON floating point scalar instructions with three same-type operands.
+ NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000,
+ NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000,
+ NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000,
+ NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE,
+ NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT,
+ NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ,
+ NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE,
+ NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT,
+ NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX,
+ NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS,
+ NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS,
+ NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD
+};
+
+// NEON scalar instructions with three different-type operands.
+enum NEONScalar3DiffOp : uint32_t {
+ NEONScalar3DiffFixed = 0x5E200000,
+ NEONScalar3DiffFMask = 0xDF200C00,
+ NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
+ NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL,
+ NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL,
+ NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL
+};
+
+// NEON scalar instructions with indexed element operand.
+enum NEONScalarByIndexedElementOp : uint32_t {
+ NEONScalarByIndexedElementFixed = 0x5F000000,
+ NEONScalarByIndexedElementFMask = 0xDF000400,
+ NEONScalarByIndexedElementMask = 0xFF00F400,
+ NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement,
+ NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement,
+ NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement,
+ NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement,
+ NEON_SQRDMULH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement,
+ NEON_SQRDMLAH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMLAH_byelement,
+ NEON_SQRDMLSH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMLSH_byelement,
+ NEON_FMLA_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_H_byelement,
+ NEON_FMLS_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_H_byelement,
+ NEON_FMUL_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_H_byelement,
+ NEON_FMULX_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_H_byelement,
+
+ // Floating point instructions.
+ NEONScalarByIndexedElementFPFixed
+ = NEONScalarByIndexedElementFixed | 0x00800000,
+ NEONScalarByIndexedElementFPMask
+ = NEONScalarByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement,
+ NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement,
+ NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement,
+ NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement
+};
+
+// NEON scalar register copy.
+enum NEONScalarCopyOp : uint32_t {
+ NEONScalarCopyFixed = 0x5E000400,
+ NEONScalarCopyFMask = 0xDFE08400,
+ NEONScalarCopyMask = 0xFFE0FC00,
+ NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT
+};
+
+// NEON scalar pairwise instructions.
+enum NEONScalarPairwiseOp : uint32_t {
+ NEONScalarPairwiseFixed = 0x5E300800,
+ NEONScalarPairwiseFMask = 0xDF3E0C00,
+ NEONScalarPairwiseMask = 0xFFB1F800,
+ NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000,
+ NEON_FMAXNMP_h_scalar = NEONScalarPairwiseFixed | 0x0000C000,
+ NEON_FADDP_h_scalar = NEONScalarPairwiseFixed | 0x0000D000,
+ NEON_FMAXP_h_scalar = NEONScalarPairwiseFixed | 0x0000F000,
+ NEON_FMINNMP_h_scalar = NEONScalarPairwiseFixed | 0x0080C000,
+ NEON_FMINP_h_scalar = NEONScalarPairwiseFixed | 0x0080F000,
+ NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000,
+ NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000,
+ NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000,
+ NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000,
+ NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000
+};
+
+// NEON scalar shift immediate.
+enum NEONScalarShiftImmediateOp : uint32_t {
+ NEONScalarShiftImmediateFixed = 0x5F000400,
+ NEONScalarShiftImmediateFMask = 0xDF800400,
+ NEONScalarShiftImmediateMask = 0xFF80FC00,
+ NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL,
+ NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI,
+ NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI,
+ NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR,
+ NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR,
+ NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR,
+ NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR,
+ NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA,
+ NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA,
+ NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA,
+ NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA,
+ NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN,
+ NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN,
+ NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN,
+ NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN,
+ NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN,
+ NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN,
+ NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU,
+ NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm,
+ NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm,
+ NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm,
+ NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm,
+ NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm,
+ NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm
+};
+
+enum ReservedOp : uint32_t {
+ ReservedFixed = 0x00000000,
+ ReservedFMask = 0x1E000000,
+ ReservedMask = 0xFFFF0000,
+
+ UDF = ReservedFixed | 0x00000000
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp : uint32_t {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp : uint32_t {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+// Instruction bit pattern for an undefined instruction, that will trigger a
+// SIGILL at runtime.
+//
+// A couple of strategies we can use here. There are no unencoded
+// instructions in the instruction set that are guaranteed to remain that
+// way. However there are some currently (as of 2018) unencoded
+// instructions that are good candidates.
+//
+// Ideally, unencoded instructions should be non-destructive to the register
+// state, and should be unencoded at all exception levels.
+//
+// At the trap the pc will hold the address of the offending instruction.
+//
+// Some candidates for unencoded instructions:
+//
+// 0xd4a00000 (essentially dcps0, a good one since it is nonsensical and may
+// remain unencoded in the future for that reason)
+// 0x33000000 (bfm variant)
+// 0xd67f0000 (br variant)
+// 0x5ac00c00 (rbit variant)
+//
+// This instruction is "dcps0", also has 16-bit payload if needed.
+static constexpr uint32_t UNDEFINED_INST_PATTERN = 0xd4a00000;
+
+} // namespace vixl
+
+#endif // VIXL_A64_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp
new file mode 100644
index 0000000000..f31c22fbf5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp
@@ -0,0 +1,231 @@
+// Copyright 2018, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "jit/arm64/vixl/Cpu-Features-vixl.h"
+
+#include <ostream>
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#define VIXL_USE_AARCH64_CPU_HELPERS
+
+namespace vixl {
+
+static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) {
+ if (feature == CPUFeatures::kNone) {
+ return 0;
+ } else {
+ // Check that the shift is well-defined, and that the feature is valid.
+ VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <=
+ (sizeof(uint64_t) * 8));
+ VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures);
+ return UINT64_C(1) << feature;
+ }
+}
+
+CPUFeatures::CPUFeatures(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3)
+ : features_(0) {
+ Combine(feature0, feature1, feature2, feature3);
+}
+
+CPUFeatures CPUFeatures::All() {
+ CPUFeatures all;
+ // Check that the shift is well-defined.
+ VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8));
+ all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1;
+ return all;
+}
+
+CPUFeatures CPUFeatures::InferFromIDRegisters() {
+ // This function assumes that kIDRegisterEmulation is available.
+ CPUFeatures features(CPUFeatures::kIDRegisterEmulation);
+#ifdef VIXL_USE_AARCH64_CPU_HELPERS
+ // Note that the Linux kernel filters these values during emulation, so the
+ // results may not exactly match the expected hardware support.
+ features.Combine(CPU::InferCPUFeaturesFromIDRegisters());
+#endif
+ return features;
+}
+
+CPUFeatures CPUFeatures::InferFromOS(QueryIDRegistersOption option) {
+#ifdef VIXL_USE_AARCH64_CPU_HELPERS
+ return CPU::InferCPUFeaturesFromOS(option);
+#else
+ USE(option);
+ return CPUFeatures();
+#endif
+}
+
+void CPUFeatures::Combine(const CPUFeatures& other) {
+ features_ |= other.features_;
+}
+
+void CPUFeatures::Combine(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) {
+ features_ |= MakeFeatureMask(feature0);
+ features_ |= MakeFeatureMask(feature1);
+ features_ |= MakeFeatureMask(feature2);
+ features_ |= MakeFeatureMask(feature3);
+}
+
+void CPUFeatures::Remove(const CPUFeatures& other) {
+ features_ &= ~other.features_;
+}
+
+void CPUFeatures::Remove(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) {
+ features_ &= ~MakeFeatureMask(feature0);
+ features_ &= ~MakeFeatureMask(feature1);
+ features_ &= ~MakeFeatureMask(feature2);
+ features_ &= ~MakeFeatureMask(feature3);
+}
+
+CPUFeatures CPUFeatures::With(const CPUFeatures& other) const {
+ CPUFeatures f(*this);
+ f.Combine(other);
+ return f;
+}
+
+CPUFeatures CPUFeatures::With(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ CPUFeatures f(*this);
+ f.Combine(feature0, feature1, feature2, feature3);
+ return f;
+}
+
+CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const {
+ CPUFeatures f(*this);
+ f.Remove(other);
+ return f;
+}
+
+CPUFeatures CPUFeatures::Without(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ CPUFeatures f(*this);
+ f.Remove(feature0, feature1, feature2, feature3);
+ return f;
+}
+
+bool CPUFeatures::Has(const CPUFeatures& other) const {
+ return (features_ & other.features_) == other.features_;
+}
+
+bool CPUFeatures::Has(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) |
+ MakeFeatureMask(feature2) | MakeFeatureMask(feature3);
+ return (features_ & mask) == mask;
+}
+
+size_t CPUFeatures::Count() const { return CountSetBits(features_); }
+
+std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) {
+ // clang-format off
+ switch (feature) {
+#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \
+ case CPUFeatures::SYMBOL: \
+ return os << NAME;
+VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE)
+#undef VIXL_FORMAT_FEATURE
+ case CPUFeatures::kNone:
+ return os << "none";
+ case CPUFeatures::kNumberOfFeatures:
+ VIXL_UNREACHABLE();
+ }
+ // clang-format on
+ VIXL_UNREACHABLE();
+ return os;
+}
+
+CPUFeatures::const_iterator CPUFeatures::begin() const {
+ if (features_ == 0) return const_iterator(this, kNone);
+
+ int feature_number = CountTrailingZeros(features_);
+ vixl::CPUFeatures::Feature feature =
+ static_cast<CPUFeatures::Feature>(feature_number);
+ return const_iterator(this, feature);
+}
+
+CPUFeatures::const_iterator CPUFeatures::end() const {
+ return const_iterator(this, kNone);
+}
+
+std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) {
+ CPUFeatures::const_iterator it = features.begin();
+ while (it != features.end()) {
+ os << *it;
+ ++it;
+ if (it != features.end()) os << ", ";
+ }
+ return os;
+}
+
+bool CPUFeaturesConstIterator::operator==(
+ const CPUFeaturesConstIterator& other) const {
+ VIXL_ASSERT(IsValid());
+ return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_);
+}
+
+CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix
+ VIXL_ASSERT(IsValid());
+ do {
+ // Find the next feature. The order is unspecified.
+ feature_ = static_cast<CPUFeatures::Feature>(feature_ + 1);
+ if (feature_ == CPUFeatures::kNumberOfFeatures) {
+ feature_ = CPUFeatures::kNone;
+ VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1);
+ }
+ VIXL_ASSERT(CPUFeatures::kNone <= feature_);
+ VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures);
+ // cpu_features_->Has(kNone) is always true, so this will terminate even if
+ // the features list is empty.
+ } while (!cpu_features_->Has(feature_));
+ return feature_;
+}
+
+CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix
+ CPUFeatures::Feature result = feature_;
+ ++(*this);
+ return result;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Cpu-Features-vixl.h b/js/src/jit/arm64/vixl/Cpu-Features-vixl.h
new file mode 100644
index 0000000000..b980233bf2
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-Features-vixl.h
@@ -0,0 +1,397 @@
+// Copyright 2018, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_FEATURES_H
+#define VIXL_CPU_FEATURES_H
+
+#include <ostream>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+
+namespace vixl {
+
+
+// clang-format off
+#define VIXL_CPU_FEATURE_LIST(V) \
+ /* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \
+ /* registers, so that the detailed feature registers can be read */ \
+ /* directly. */ \
+ V(kIDRegisterEmulation, "ID register emulation", "cpuid") \
+ \
+ V(kFP, "FP", "fp") \
+ V(kNEON, "NEON", "asimd") \
+ V(kCRC32, "CRC32", "crc32") \
+ /* Cryptographic support instructions. */ \
+ V(kAES, "AES", "aes") \
+ V(kSHA1, "SHA1", "sha1") \
+ V(kSHA2, "SHA2", "sha2") \
+ /* A form of PMULL{2} with a 128-bit (1Q) result. */ \
+ V(kPmull1Q, "Pmull1Q", "pmull") \
+ /* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \
+ V(kAtomics, "Atomics", "atomics") \
+ /* Limited ordering regions: LDLAR, STLLR and their variants. */ \
+ V(kLORegions, "LORegions", NULL) \
+ /* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \
+ V(kRDM, "RDM", "asimdrdm") \
+ /* Scalable Vector Extension. */ \
+ V(kSVE, "SVE", "sve") \
+ /* SDOT and UDOT support (in NEON). */ \
+ V(kDotProduct, "DotProduct", "asimddp") \
+ /* Half-precision (FP16) support for FP and NEON, respectively. */ \
+ V(kFPHalf, "FPHalf", "fphp") \
+ V(kNEONHalf, "NEONHalf", "asimdhp") \
+ /* The RAS extension, including the ESB instruction. */ \
+ V(kRAS, "RAS", NULL) \
+ /* Data cache clean to the point of persistence: DC CVAP. */ \
+ V(kDCPoP, "DCPoP", "dcpop") \
+ /* Data cache clean to the point of deep persistence: DC CVADP. */ \
+ V(kDCCVADP, "DCCVADP", NULL) \
+ /* Cryptographic support instructions. */ \
+ V(kSHA3, "SHA3", "sha3") \
+ V(kSHA512, "SHA512", "sha512") \
+ V(kSM3, "SM3", "sm3") \
+ V(kSM4, "SM4", "sm4") \
+ /* Pointer authentication for addresses. */ \
+ V(kPAuth, "PAuth", NULL) \
+ /* Pointer authentication for addresses uses QARMA. */ \
+ V(kPAuthQARMA, "PAuthQARMA", NULL) \
+ /* Generic authentication (using the PACGA instruction). */ \
+ V(kPAuthGeneric, "PAuthGeneric", NULL) \
+ /* Generic authentication uses QARMA. */ \
+ V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \
+ /* JavaScript-style FP -> integer conversion instruction: FJCVTZS. */ \
+ V(kJSCVT, "JSCVT", "jscvt") \
+ /* Complex number support for NEON: FCMLA and FCADD. */ \
+ V(kFcma, "Fcma", "fcma") \
+ /* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \
+ V(kRCpc, "RCpc", "lrcpc") \
+ V(kRCpcImm, "RCpc (imm)", "ilrcpc") \
+ /* Flag manipulation instructions: SETF{8,16}, CFINV, RMIF. */ \
+ V(kFlagM, "FlagM", "flagm") \
+ /* Unaligned single-copy atomicity. */ \
+ V(kUSCAT, "USCAT", "uscat") \
+ /* FP16 fused multiply-add or -subtract long: FMLAL{2}, FMLSL{2}. */ \
+ V(kFHM, "FHM", "asimdfhm") \
+ /* Data-independent timing (for selected instructions). */ \
+ V(kDIT, "DIT", "dit") \
+ /* Branch target identification. */ \
+ V(kBTI, "BTI", NULL) \
+ /* Flag manipulation instructions: {AX,XA}FLAG */ \
+ V(kAXFlag, "AXFlag", NULL) \
+ /* Random number generation extension, */ \
+ V(kRNG, "RNG", NULL) \
+ /* Floating-point round to {32,64}-bit integer. */ \
+ V(kFrintToFixedSizedInt,"Frint (bounded)", NULL)
+// clang-format on
+
+
+class CPUFeaturesConstIterator;
+
+// A representation of the set of features known to be supported by the target
+// device. Each feature is represented by a simple boolean flag.
+//
+// - When the Assembler is asked to assemble an instruction, it asserts (in
+// debug mode) that the necessary features are available.
+//
+// - TODO: The MacroAssembler relies on the Assembler's assertions, but in
+// some cases it may be useful for macros to generate a fall-back sequence
+// in case features are not available.
+//
+// - The Simulator assumes by default that all features are available, but it
+// is possible to configure it to fail if the simulated code uses features
+// that are not enabled.
+//
+// The Simulator also offers pseudo-instructions to allow features to be
+// enabled and disabled dynamically. This is useful when you want to ensure
+// that some features are constrained to certain areas of code.
+//
+// - The base Disassembler knows nothing about CPU features, but the
+// PrintDisassembler can be configured to annotate its output with warnings
+// about unavailable features. The Simulator uses this feature when
+// instruction trace is enabled.
+//
+// - The Decoder-based components -- the Simulator and PrintDisassembler --
+// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of
+// features actually encountered so that a large block of code can be
+// examined (either directly or through simulation), and the required
+// features analysed later.
+//
+// Expected usage:
+//
+// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for
+// // compatibility with older version of VIXL.
+// MacroAssembler masm;
+//
+// // Generate code only for the current CPU.
+// masm.SetCPUFeatures(CPUFeatures::InferFromOS());
+//
+// // Turn off feature checking entirely.
+// masm.SetCPUFeatures(CPUFeatures::All());
+//
+// Feature set manipulation:
+//
+// CPUFeatures f; // The default constructor gives an empty set.
+// // Individual features can be added (or removed).
+// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES);
+// f.Remove(CPUFeatures::kNEON);
+//
+// // Some helpers exist for extensions that provide several features.
+// f.Remove(CPUFeatures::All());
+// f.Combine(CPUFeatures::AArch64LegacyBaseline());
+//
+// // Chained construction is also possible.
+// CPUFeatures g =
+// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32);
+//
+// // Features can be queried. Where multiple features are given, they are
+// // combined with logical AND.
+// if (h.Has(CPUFeatures::kNEON)) { ... }
+// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... }
+// if (h.Has(g)) { ... }
+// // If the empty set is requested, the result is always 'true'.
+// VIXL_ASSERT(h.Has(CPUFeatures()));
+//
+// // For debug and reporting purposes, features can be enumerated (or
+// // printed directly):
+// std::cout << CPUFeatures::kNEON; // Prints something like "NEON".
+// std::cout << f; // Prints something like "FP, NEON, CRC32".
+class CPUFeatures {
+ public:
+ // clang-format off
+ // Individual features.
+ // These should be treated as opaque tokens. User code should not rely on
+ // specific numeric values or ordering.
+ enum Feature {
+ // Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that
+ // this class supports.
+
+ kNone = -1,
+#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL,
+ VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE)
+#undef VIXL_DECLARE_FEATURE
+ kNumberOfFeatures
+ };
+ // clang-format on
+
+ // By default, construct with no features enabled.
+ CPUFeatures() : features_(0) {}
+
+ // Construct with some features already enabled.
+ CPUFeatures(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Construct with all features enabled. This can be used to disable feature
+ // checking: `Has(...)` returns true regardless of the argument.
+ static CPUFeatures All();
+
+ // Construct an empty CPUFeatures. This is equivalent to the default
+ // constructor, but is provided for symmetry and convenience.
+ static CPUFeatures None() { return CPUFeatures(); }
+
+ // The presence of these features was assumed by version of VIXL before this
+ // API was added, so using this set by default ensures API compatibility.
+ static CPUFeatures AArch64LegacyBaseline() {
+ return CPUFeatures(kFP, kNEON, kCRC32);
+ }
+
+ // Construct a new CPUFeatures object using ID registers. This assumes that
+ // kIDRegisterEmulation is present.
+ static CPUFeatures InferFromIDRegisters();
+
+ enum QueryIDRegistersOption {
+ kDontQueryIDRegisters,
+ kQueryIDRegistersIfAvailable
+ };
+
+ // Construct a new CPUFeatures object based on what the OS reports.
+ static CPUFeatures InferFromOS(
+ QueryIDRegistersOption option = kQueryIDRegistersIfAvailable);
+
+ // Combine another CPUFeatures object into this one. Features that already
+ // exist in this set are left unchanged.
+ void Combine(const CPUFeatures& other);
+
+ // Combine specific features into this set. Features that already exist in
+ // this set are left unchanged.
+ void Combine(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Remove features in another CPUFeatures object from this one.
+ void Remove(const CPUFeatures& other);
+
+ // Remove specific features from this set.
+ void Remove(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Chaining helpers for convenient construction.
+ CPUFeatures With(const CPUFeatures& other) const;
+ CPUFeatures With(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+ CPUFeatures Without(const CPUFeatures& other) const;
+ CPUFeatures Without(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+
+ // Query features.
+ // Note that an empty query (like `Has(kNone)`) always returns true.
+ bool Has(const CPUFeatures& other) const;
+ bool Has(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+
+ // Return the number of enabled features.
+ size_t Count() const;
+ bool HasNoFeatures() const { return Count() == 0; }
+
+ // Check for equivalence.
+ bool operator==(const CPUFeatures& other) const {
+ return Has(other) && other.Has(*this);
+ }
+ bool operator!=(const CPUFeatures& other) const { return !(*this == other); }
+
+ typedef CPUFeaturesConstIterator const_iterator;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ private:
+ // Each bit represents a feature. This field will be replaced as needed if
+ // features are added.
+ uint64_t features_;
+
+ friend std::ostream& operator<<(std::ostream& os,
+ const vixl::CPUFeatures& features);
+};
+
+std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature);
+std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features);
+
+// This is not a proper C++ iterator type, but it simulates enough of
+// ForwardIterator that simple loops can be written.
+class CPUFeaturesConstIterator {
+ public:
+ CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL,
+ CPUFeatures::Feature start = CPUFeatures::kNone)
+ : cpu_features_(cpu_features), feature_(start) {
+ VIXL_ASSERT(IsValid());
+ }
+
+ bool operator==(const CPUFeaturesConstIterator& other) const;
+ bool operator!=(const CPUFeaturesConstIterator& other) const {
+ return !(*this == other);
+ }
+ CPUFeatures::Feature operator++();
+ CPUFeatures::Feature operator++(int);
+
+ CPUFeatures::Feature operator*() const {
+ VIXL_ASSERT(IsValid());
+ return feature_;
+ }
+
+ // For proper support of C++'s simplest "Iterator" concept, this class would
+ // have to define member types (such as CPUFeaturesIterator::pointer) to make
+ // it appear as if it iterates over Feature objects in memory. That is, we'd
+ // need CPUFeatures::iterator to behave like std::vector<Feature>::iterator.
+ // This is at least partially possible -- the std::vector<bool> specialisation
+ // does something similar -- but it doesn't seem worthwhile for a
+ // special-purpose debug helper, so they are omitted here.
+ private:
+ const CPUFeatures* cpu_features_;
+ CPUFeatures::Feature feature_;
+
+ bool IsValid() const {
+ return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
+ cpu_features_->Has(feature_);
+ }
+};
+
+// A convenience scope for temporarily modifying a CPU features object. This
+// allows features to be enabled for short sequences.
+//
+// Expected usage:
+//
+// {
+// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32);
+// // This scope can now use CRC32, as well as anything else that was enabled
+// // before the scope.
+//
+// ...
+//
+// // At the end of the scope, the original CPU features are restored.
+// }
+class CPUFeaturesScope {
+ public:
+ // Start a CPUFeaturesScope on any object that implements
+ // `CPUFeatures* GetCPUFeatures()`.
+ template <typename T>
+ explicit CPUFeaturesScope(T* cpu_features_wrapper,
+ CPUFeatures::Feature feature0 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature1 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature2 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature3 = CPUFeatures::kNone)
+ : cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
+ old_features_(*cpu_features_) {
+ cpu_features_->Combine(feature0, feature1, feature2, feature3);
+ }
+
+ template <typename T>
+ CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other)
+ : cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
+ old_features_(*cpu_features_) {
+ cpu_features_->Combine(other);
+ }
+
+ ~CPUFeaturesScope() { *cpu_features_ = old_features_; }
+
+ // For advanced usage, the CPUFeatures object can be accessed directly.
+ // The scope will restore the original state when it ends.
+
+ CPUFeatures* GetCPUFeatures() const { return cpu_features_; }
+
+ void SetCPUFeatures(const CPUFeatures& cpu_features) {
+ *cpu_features_ = cpu_features;
+ }
+
+ private:
+ CPUFeatures* const cpu_features_;
+ const CPUFeatures old_features_;
+};
+
+
+} // namespace vixl
+
+#endif // VIXL_CPU_FEATURES_H
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
new file mode 100644
index 0000000000..12244e73e4
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
@@ -0,0 +1,256 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+
+#include "jstypes.h"
+
+#if defined(__aarch64__) && (defined(__ANDROID__) || defined(__linux__))
+#include <sys/auxv.h>
+#define VIXL_USE_LINUX_HWCAP 1
+#endif
+
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+
+namespace vixl {
+
+
+const IDRegister::Field AA64PFR0::kFP(16, Field::kSigned);
+const IDRegister::Field AA64PFR0::kAdvSIMD(20, Field::kSigned);
+const IDRegister::Field AA64PFR0::kSVE(32);
+const IDRegister::Field AA64PFR0::kDIT(48);
+
+const IDRegister::Field AA64PFR1::kBT(0);
+
+const IDRegister::Field AA64ISAR0::kAES(4);
+const IDRegister::Field AA64ISAR0::kSHA1(8);
+const IDRegister::Field AA64ISAR0::kSHA2(12);
+const IDRegister::Field AA64ISAR0::kCRC32(16);
+const IDRegister::Field AA64ISAR0::kAtomic(20);
+const IDRegister::Field AA64ISAR0::kRDM(28);
+const IDRegister::Field AA64ISAR0::kSHA3(32);
+const IDRegister::Field AA64ISAR0::kSM3(36);
+const IDRegister::Field AA64ISAR0::kSM4(40);
+const IDRegister::Field AA64ISAR0::kDP(44);
+const IDRegister::Field AA64ISAR0::kFHM(48);
+const IDRegister::Field AA64ISAR0::kTS(52);
+
+const IDRegister::Field AA64ISAR1::kDPB(0);
+const IDRegister::Field AA64ISAR1::kAPA(4);
+const IDRegister::Field AA64ISAR1::kAPI(8);
+const IDRegister::Field AA64ISAR1::kJSCVT(12);
+const IDRegister::Field AA64ISAR1::kFCMA(16);
+const IDRegister::Field AA64ISAR1::kLRCPC(20);
+const IDRegister::Field AA64ISAR1::kGPA(24);
+const IDRegister::Field AA64ISAR1::kGPI(28);
+const IDRegister::Field AA64ISAR1::kFRINTTS(32);
+const IDRegister::Field AA64ISAR1::kSB(36);
+const IDRegister::Field AA64ISAR1::kSPECRES(40);
+
+const IDRegister::Field AA64MMFR1::kLO(16);
+
+CPUFeatures AA64PFR0::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kFP) >= 0) f.Combine(CPUFeatures::kFP);
+ if (Get(kFP) >= 1) f.Combine(CPUFeatures::kFPHalf);
+ if (Get(kAdvSIMD) >= 0) f.Combine(CPUFeatures::kNEON);
+ if (Get(kAdvSIMD) >= 1) f.Combine(CPUFeatures::kNEONHalf);
+ if (Get(kSVE) >= 1) f.Combine(CPUFeatures::kSVE);
+ if (Get(kDIT) >= 1) f.Combine(CPUFeatures::kDIT);
+ return f;
+}
+
+CPUFeatures AA64PFR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kBT) >= 1) f.Combine(CPUFeatures::kBTI);
+ return f;
+}
+
+CPUFeatures AA64ISAR0::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kAES) >= 1) f.Combine(CPUFeatures::kAES);
+ if (Get(kAES) >= 2) f.Combine(CPUFeatures::kPmull1Q);
+ if (Get(kSHA1) >= 1) f.Combine(CPUFeatures::kSHA1);
+ if (Get(kSHA2) >= 1) f.Combine(CPUFeatures::kSHA2);
+ if (Get(kSHA2) >= 2) f.Combine(CPUFeatures::kSHA512);
+ if (Get(kCRC32) >= 1) f.Combine(CPUFeatures::kCRC32);
+ if (Get(kAtomic) >= 1) f.Combine(CPUFeatures::kAtomics);
+ if (Get(kRDM) >= 1) f.Combine(CPUFeatures::kRDM);
+ if (Get(kSHA3) >= 1) f.Combine(CPUFeatures::kSHA3);
+ if (Get(kSM3) >= 1) f.Combine(CPUFeatures::kSM3);
+ if (Get(kSM4) >= 1) f.Combine(CPUFeatures::kSM4);
+ if (Get(kDP) >= 1) f.Combine(CPUFeatures::kDotProduct);
+ if (Get(kFHM) >= 1) f.Combine(CPUFeatures::kFHM);
+ if (Get(kTS) >= 1) f.Combine(CPUFeatures::kFlagM);
+ if (Get(kTS) >= 2) f.Combine(CPUFeatures::kAXFlag);
+ return f;
+}
+
+CPUFeatures AA64ISAR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kDPB) >= 1) f.Combine(CPUFeatures::kDCPoP);
+ if (Get(kJSCVT) >= 1) f.Combine(CPUFeatures::kJSCVT);
+ if (Get(kFCMA) >= 1) f.Combine(CPUFeatures::kFcma);
+ if (Get(kLRCPC) >= 1) f.Combine(CPUFeatures::kRCpc);
+ if (Get(kLRCPC) >= 2) f.Combine(CPUFeatures::kRCpcImm);
+ if (Get(kFRINTTS) >= 1) f.Combine(CPUFeatures::kFrintToFixedSizedInt);
+
+ if (Get(kAPI) >= 1) f.Combine(CPUFeatures::kPAuth);
+ if (Get(kAPA) >= 1) f.Combine(CPUFeatures::kPAuth, CPUFeatures::kPAuthQARMA);
+ if (Get(kGPI) >= 1) f.Combine(CPUFeatures::kPAuthGeneric);
+ if (Get(kGPA) >= 1) {
+ f.Combine(CPUFeatures::kPAuthGeneric, CPUFeatures::kPAuthGenericQARMA);
+ }
+ return f;
+}
+
+CPUFeatures AA64MMFR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kLO) >= 1) f.Combine(CPUFeatures::kLORegions);
+ return f;
+}
+
+int IDRegister::Get(IDRegister::Field field) const {
+ int msb = field.GetMsb();
+ int lsb = field.GetLsb();
+ VIXL_STATIC_ASSERT(static_cast<size_t>(Field::kMaxWidthInBits) <
+ (sizeof(int) * kBitsPerByte));
+ switch (field.GetType()) {
+ case Field::kSigned:
+ return static_cast<int>(ExtractSignedBitfield64(msb, lsb, value_));
+ case Field::kUnsigned:
+ return static_cast<int>(ExtractUnsignedBitfield64(msb, lsb, value_));
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+CPUFeatures CPU::InferCPUFeaturesFromIDRegisters() {
+ CPUFeatures f;
+#define VIXL_COMBINE_ID_REG(NAME) f.Combine(Read##NAME().GetCPUFeatures());
+ VIXL_AARCH64_ID_REG_LIST(VIXL_COMBINE_ID_REG)
+#undef VIXL_COMBINE_ID_REG
+ return f;
+}
+
+CPUFeatures CPU::InferCPUFeaturesFromOS(
+ CPUFeatures::QueryIDRegistersOption option) {
+ CPUFeatures features;
+
+#if VIXL_USE_LINUX_HWCAP
+ // Map each set bit onto a feature. Ideally, we'd use HWCAP_* macros rather
+ // than explicit bits, but explicit bits allow us to identify features that
+ // the toolchain doesn't know about.
+ static const CPUFeatures::Feature kFeatureBits[] = {
+ // Bits 0-7
+ CPUFeatures::kFP,
+ CPUFeatures::kNEON,
+ CPUFeatures::kNone, // "EVTSTRM", which VIXL doesn't track.
+ CPUFeatures::kAES,
+ CPUFeatures::kPmull1Q,
+ CPUFeatures::kSHA1,
+ CPUFeatures::kSHA2,
+ CPUFeatures::kCRC32,
+ // Bits 8-15
+ CPUFeatures::kAtomics,
+ CPUFeatures::kFPHalf,
+ CPUFeatures::kNEONHalf,
+ CPUFeatures::kIDRegisterEmulation,
+ CPUFeatures::kRDM,
+ CPUFeatures::kJSCVT,
+ CPUFeatures::kFcma,
+ CPUFeatures::kRCpc,
+ // Bits 16-23
+ CPUFeatures::kDCPoP,
+ CPUFeatures::kSHA3,
+ CPUFeatures::kSM3,
+ CPUFeatures::kSM4,
+ CPUFeatures::kDotProduct,
+ CPUFeatures::kSHA512,
+ CPUFeatures::kSVE,
+ CPUFeatures::kFHM,
+ // Bits 24-27
+ CPUFeatures::kDIT,
+ CPUFeatures::kUSCAT,
+ CPUFeatures::kRCpcImm,
+ CPUFeatures::kFlagM
+ // Bits 28-31 are unassigned.
+ };
+ static const size_t kFeatureBitCount =
+ sizeof(kFeatureBits) / sizeof(kFeatureBits[0]);
+
+ // Mozilla change: Set the default for the simulator.
+#ifdef JS_SIMULATOR_ARM64
+ unsigned long auxv = ~(0UL); // Enable all features for the Simulator.
+#else
+ unsigned long auxv = getauxval(AT_HWCAP); // NOLINT(runtime/int)
+#endif
+
+ VIXL_STATIC_ASSERT(kFeatureBitCount < (sizeof(auxv) * kBitsPerByte));
+ for (size_t i = 0; i < kFeatureBitCount; i++) {
+ if (auxv & (1UL << i)) features.Combine(kFeatureBits[i]);
+ }
+#elif defined(XP_MACOSX)
+ // Apple processors have kJSCVT, kDotProduct, and kAtomics features.
+ features.Combine(CPUFeatures::kJSCVT, CPUFeatures::kDotProduct,
+ CPUFeatures::kAtomics);
+#endif // VIXL_USE_LINUX_HWCAP
+
+ if ((option == CPUFeatures::kQueryIDRegistersIfAvailable) &&
+ (features.Has(CPUFeatures::kIDRegisterEmulation))) {
+ features.Combine(InferCPUFeaturesFromIDRegisters());
+ }
+ return features;
+}
+
+
+#ifdef __aarch64__
+#define VIXL_READ_ID_REG(NAME) \
+ NAME CPU::Read##NAME() { \
+ uint64_t value = 0; \
+ __asm__("mrs %0, ID_" #NAME "_EL1" : "=r"(value)); \
+ return NAME(value); \
+ }
+#else // __aarch64__
+#define VIXL_READ_ID_REG(NAME) \
+ NAME CPU::Read##NAME() { \
+ /* TODO: Use VIXL_UNREACHABLE once it works in release builds. */ \
+ VIXL_ABORT(); \
+ }
+#endif // __aarch64__
+
+VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
+
+#undef VIXL_READ_ID_REG
+
+
+// Initialise to smallest possible cache size.
+unsigned CPU::dcache_line_size_ = 1;
+unsigned CPU::icache_line_size_ = 1;
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.h b/js/src/jit/arm64/vixl/Cpu-vixl.h
new file mode 100644
index 0000000000..4db51aad6b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.h
@@ -0,0 +1,241 @@
+// Copyright 2014, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_AARCH64_H
+#define VIXL_CPU_AARCH64_H
+
+#include "jit/arm64/vixl/Cpu-Features-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#ifndef VIXL_INCLUDE_TARGET_AARCH64
+// The supporting .cc file is only compiled when the A64 target is selected.
+// Throw an explicit error now to avoid a harder-to-debug linker error later.
+//
+// These helpers _could_ work on any AArch64 host, even when generating AArch32
+// code, but we don't support this because the available features may differ
+// between AArch32 and AArch64 on the same platform, so basing AArch32 code
+// generation on aarch64::CPU features is probably broken.
+#error cpu-aarch64.h requires VIXL_INCLUDE_TARGET_AARCH64 (scons target=a64).
+#endif
+
+namespace vixl {
+
+// A CPU ID register, for use with CPUFeatures::kIDRegisterEmulation. Fields
+// specific to each register are described in relevant subclasses.
+class IDRegister {
+ protected:
+ explicit IDRegister(uint64_t value = 0) : value_(value) {}
+
+ class Field {
+ public:
+ enum Type { kUnsigned, kSigned };
+
+ explicit Field(int lsb, Type type = kUnsigned) : lsb_(lsb), type_(type) {}
+
+ static const int kMaxWidthInBits = 4;
+
+ int GetWidthInBits() const {
+ // All current ID fields have four bits.
+ return kMaxWidthInBits;
+ }
+ int GetLsb() const { return lsb_; }
+ int GetMsb() const { return lsb_ + GetWidthInBits() - 1; }
+ Type GetType() const { return type_; }
+
+ private:
+ int lsb_;
+ Type type_;
+ };
+
+ public:
+ // Extract the specified field, performing sign-extension for signed fields.
+ // This allows us to implement the 'value >= number' detection mechanism
+ // recommended by the Arm ARM, for both signed and unsigned fields.
+ int Get(Field field) const;
+
+ private:
+ uint64_t value_;
+};
+
+class AA64PFR0 : public IDRegister {
+ public:
+ explicit AA64PFR0(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kFP;
+ static const Field kAdvSIMD;
+ static const Field kSVE;
+ static const Field kDIT;
+};
+
+class AA64PFR1 : public IDRegister {
+ public:
+ explicit AA64PFR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kBT;
+};
+
+class AA64ISAR0 : public IDRegister {
+ public:
+ explicit AA64ISAR0(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kAES;
+ static const Field kSHA1;
+ static const Field kSHA2;
+ static const Field kCRC32;
+ static const Field kAtomic;
+ static const Field kRDM;
+ static const Field kSHA3;
+ static const Field kSM3;
+ static const Field kSM4;
+ static const Field kDP;
+ static const Field kFHM;
+ static const Field kTS;
+};
+
+class AA64ISAR1 : public IDRegister {
+ public:
+ explicit AA64ISAR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kDPB;
+ static const Field kAPA;
+ static const Field kAPI;
+ static const Field kJSCVT;
+ static const Field kFCMA;
+ static const Field kLRCPC;
+ static const Field kGPA;
+ static const Field kGPI;
+ static const Field kFRINTTS;
+ static const Field kSB;
+ static const Field kSPECRES;
+};
+
+class AA64MMFR1 : public IDRegister {
+ public:
+ explicit AA64MMFR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kLO;
+};
+
+class CPU {
+ public:
+ // Initialise CPU support.
+ static void SetUp();
+
+ // Ensures the data at a given address and with a given size is the same for
+ // the I and D caches. I and D caches are not automatically coherent on ARM
+ // so this operation is required before any dynamically generated code can
+ // safely run.
+ static void EnsureIAndDCacheCoherency(void* address, size_t length);
+
+ // Flush the local instruction pipeline, forcing a reload of any instructions
+ // beyond this barrier from the icache.
+ static void FlushExecutionContext();
+
+ // Read and interpret the ID registers. This requires
+ // CPUFeatures::kIDRegisterEmulation, and therefore cannot be called on
+ // non-AArch64 platforms.
+ static CPUFeatures InferCPUFeaturesFromIDRegisters();
+
+ // Read and interpret CPUFeatures reported by the OS. Failed queries (or
+ // unsupported platforms) return an empty list. Note that this is
+ // indistinguishable from a successful query on a platform that advertises no
+ // features.
+ //
+ // Non-AArch64 hosts are considered to be unsupported platforms, and this
+ // function returns an empty list.
+ static CPUFeatures InferCPUFeaturesFromOS(
+ CPUFeatures::QueryIDRegistersOption option =
+ CPUFeatures::kQueryIDRegistersIfAvailable);
+
+ // Handle tagged pointers.
+ template <typename T>
+ static T SetPointerTag(T pointer, uint64_t tag) {
+ VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
+
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
+ return (T)raw;
+ }
+
+ template <typename T>
+ static uint64_t GetPointerTag(T pointer) {
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ return (raw & kAddressTagMask) >> kAddressTagOffset;
+ }
+
+ private:
+#define VIXL_AARCH64_ID_REG_LIST(V) \
+ V(AA64PFR0) \
+ V(AA64PFR1) \
+ V(AA64ISAR0) \
+ V(AA64ISAR1) \
+ V(AA64MMFR1)
+
+#define VIXL_READ_ID_REG(NAME) static NAME Read##NAME();
+ // On native AArch64 platforms, read the named CPU ID registers. These require
+ // CPUFeatures::kIDRegisterEmulation, and should not be called on non-AArch64
+ // platforms.
+ VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
+#undef VIXL_READ_ID_REG
+
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_CPU_AARCH64_H
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.cpp b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
new file mode 100644
index 0000000000..fa3e15601e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
@@ -0,0 +1,1535 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL ARM LIMITED BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+
+#include "mozilla/Vector.h"
+
+#include "js/AllocPolicy.h"
+
+namespace vixl {
+
+// List of commands supported by the debugger.
+#define DEBUG_COMMAND_LIST(C) \
+C(HelpCommand) \
+C(ContinueCommand) \
+C(StepCommand) \
+C(DisasmCommand) \
+C(PrintCommand) \
+C(ExamineCommand)
+
+// Debugger command lines are broken up in token of different type to make
+// processing easier later on.
+class Token {
+ public:
+ virtual ~Token() {}
+
+ // Token type.
+ virtual bool IsRegister() const { return false; }
+ virtual bool IsFPRegister() const { return false; }
+ virtual bool IsIdentifier() const { return false; }
+ virtual bool IsAddress() const { return false; }
+ virtual bool IsInteger() const { return false; }
+ virtual bool IsFormat() const { return false; }
+ virtual bool IsUnknown() const { return false; }
+ // Token properties.
+ virtual bool CanAddressMemory() const { return false; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const = 0;
+ virtual void Print(FILE* out = stdout) const = 0;
+
+ static Token* Tokenize(const char* arg);
+};
+
+typedef mozilla::Vector<Token*, 0, js::SystemAllocPolicy> TokenVector;
+
+// Tokens often hold one value.
+template<typename T> class ValueToken : public Token {
+ public:
+ explicit ValueToken(T value) : value_(value) {}
+ ValueToken() {}
+
+ T value() const { return value_; }
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ protected:
+ T value_;
+};
+
+// Integer registers (X or W) and their aliases.
+// Format: wn or xn with 0 <= n < 32 or a name in the aliases list.
+class RegisterToken : public ValueToken<const Register> {
+ public:
+ explicit RegisterToken(const Register reg)
+ : ValueToken<const Register>(reg) {}
+
+ virtual bool IsRegister() const override { return true; }
+ virtual bool CanAddressMemory() const override { return value().Is64Bits(); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+ const char* Name() const;
+
+ static Token* Tokenize(const char* arg);
+ static RegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsRegister());
+ return reinterpret_cast<RegisterToken*>(tok);
+ }
+
+ private:
+ static const int kMaxAliasNumber = 4;
+ static const char* kXAliases[kNumberOfRegisters][kMaxAliasNumber];
+ static const char* kWAliases[kNumberOfRegisters][kMaxAliasNumber];
+};
+
+// Floating point registers (D or S).
+// Format: sn or dn with 0 <= n < 32.
+class FPRegisterToken : public ValueToken<const FPRegister> {
+ public:
+ explicit FPRegisterToken(const FPRegister fpreg)
+ : ValueToken<const FPRegister>(fpreg) {}
+
+ virtual bool IsFPRegister() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static FPRegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFPRegister());
+ return reinterpret_cast<FPRegisterToken*>(tok);
+ }
+};
+
+
+// Non-register identifiers.
+// Format: Alphanumeric string starting with a letter.
+class IdentifierToken : public ValueToken<char*> {
+ public:
+ explicit IdentifierToken(const char* name) {
+ size_t size = strlen(name) + 1;
+ value_ = js_pod_malloc<char>(size);
+ strncpy(value_, name, size);
+ }
+ virtual ~IdentifierToken() { js_free(value_); }
+
+ virtual bool IsIdentifier() const override { return true; }
+ virtual bool CanAddressMemory() const override { return strcmp(value(), "pc") == 0; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static IdentifierToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsIdentifier());
+ return reinterpret_cast<IdentifierToken*>(tok);
+ }
+};
+
+// 64-bit address literal.
+// Format: 0x... with up to 16 hexadecimal digits.
+class AddressToken : public ValueToken<uint8_t*> {
+ public:
+ explicit AddressToken(uint8_t* address) : ValueToken<uint8_t*>(address) {}
+
+ virtual bool IsAddress() const override { return true; }
+ virtual bool CanAddressMemory() const override { return true; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static AddressToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsAddress());
+ return reinterpret_cast<AddressToken*>(tok);
+ }
+};
+
+
+// 64-bit decimal integer literal.
+// Format: n.
+class IntegerToken : public ValueToken<int64_t> {
+ public:
+ explicit IntegerToken(int64_t value) : ValueToken<int64_t>(value) {}
+
+ virtual bool IsInteger() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static IntegerToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsInteger());
+ return reinterpret_cast<IntegerToken*>(tok);
+ }
+};
+
+// Literal describing how to print a chunk of data (up to 64 bits).
+// Format: .ln
+// where l (letter) is one of
+// * x: hexadecimal
+// * s: signed integer
+// * u: unsigned integer
+// * f: floating point
+// * i: instruction
+// and n (size) is one of 8, 16, 32 and 64. n should be omitted for
+// instructions.
+class FormatToken : public Token {
+ public:
+ FormatToken() {}
+
+ virtual bool IsFormat() const override { return true; }
+ virtual int SizeOf() const = 0;
+ virtual char type_code() const = 0;
+ virtual void PrintData(void* data, FILE* out = stdout) const = 0;
+ virtual void Print(FILE* out = stdout) const override = 0;
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ static Token* Tokenize(const char* arg);
+ static FormatToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFormat());
+ return reinterpret_cast<FormatToken*>(tok);
+ }
+};
+
+
+template<typename T> class Format : public FormatToken {
+ public:
+ Format(const char* fmt, char type_code) : fmt_(fmt), type_code_(type_code) {}
+
+ virtual int SizeOf() const override { return sizeof(T); }
+ virtual char type_code() const override { return type_code_; }
+ virtual void PrintData(void* data, FILE* out = stdout) const override {
+ T value;
+ memcpy(&value, data, sizeof(value));
+ fprintf(out, fmt_, value);
+ }
+ virtual void Print(FILE* out = stdout) const override;
+
+ private:
+ const char* fmt_;
+ char type_code_;
+};
+
+// Tokens which don't fit any of the above.
+class UnknownToken : public Token {
+ public:
+ explicit UnknownToken(const char* arg) {
+ size_t size = strlen(arg) + 1;
+ unknown_ = js_pod_malloc<char>(size);
+ strncpy(unknown_, arg, size);
+ }
+ virtual ~UnknownToken() { js_free(unknown_); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ virtual bool IsUnknown() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ private:
+ char* unknown_;
+};
+
+
+// All debugger commands must subclass DebugCommand and implement Run, Print
+// and Build. Commands must also define kHelp and kAliases.
+class DebugCommand {
+ public:
+ explicit DebugCommand(Token* name) : name_(IdentifierToken::Cast(name)) {}
+ DebugCommand() : name_(NULL) {}
+ virtual ~DebugCommand() { js_delete(name_); }
+
+ const char* name() { return name_->value(); }
+ // Run the command on the given debugger. The command returns true if
+ // execution should move to the next instruction.
+ virtual bool Run(Debugger * debugger) = 0;
+ virtual void Print(FILE* out = stdout);
+
+ static bool Match(const char* name, const char** aliases);
+ static DebugCommand* Parse(char* line);
+ static void PrintHelp(const char** aliases,
+ const char* args,
+ const char* help);
+
+ private:
+ IdentifierToken* name_;
+};
+
+// For all commands below see their respective kHelp and kAliases in
+// debugger-a64.cc
+class HelpCommand : public DebugCommand {
+ public:
+ explicit HelpCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class ContinueCommand : public DebugCommand {
+ public:
+ explicit ContinueCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class StepCommand : public DebugCommand {
+ public:
+ StepCommand(Token* name, IntegerToken* count)
+ : DebugCommand(name), count_(count) {}
+ virtual ~StepCommand() { js_delete(count_); }
+
+ int64_t count() { return count_->value(); }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ IntegerToken* count_;
+};
+
+class DisasmCommand : public DebugCommand {
+ public:
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class PrintCommand : public DebugCommand {
+ public:
+ PrintCommand(Token* name, Token* target, FormatToken* format)
+ : DebugCommand(name), target_(target), format_(format) {}
+ virtual ~PrintCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+};
+
+class ExamineCommand : public DebugCommand {
+ public:
+ ExamineCommand(Token* name,
+ Token* target,
+ FormatToken* format,
+ IntegerToken* count)
+ : DebugCommand(name), target_(target), format_(format), count_(count) {}
+ virtual ~ExamineCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ js_delete(count_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ IntegerToken* count() { return count_; }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+ IntegerToken* count_;
+};
+
+// Commands which name does not match any of the known commnand.
+class UnknownCommand : public DebugCommand {
+ public:
+ explicit UnknownCommand(TokenVector&& args) : args_(std::move(args)) {}
+ virtual ~UnknownCommand();
+
+ virtual bool Run(Debugger* debugger) override;
+
+ private:
+ TokenVector args_;
+};
+
+// Commands which name match a known command but the syntax is invalid.
+class InvalidCommand : public DebugCommand {
+ public:
+ InvalidCommand(TokenVector&& args, int index, const char* cause)
+ : args_(std::move(args)), index_(index), cause_(cause) {}
+ virtual ~InvalidCommand();
+
+ virtual bool Run(Debugger* debugger) override;
+
+ private:
+ TokenVector args_;
+ int index_;
+ const char* cause_;
+};
+
+const char* HelpCommand::kAliases[] = { "help", NULL };
+const char* HelpCommand::kArguments = NULL;
+const char* HelpCommand::kHelp = " Print this help.";
+
+const char* ContinueCommand::kAliases[] = { "continue", "c", NULL };
+const char* ContinueCommand::kArguments = NULL;
+const char* ContinueCommand::kHelp = " Resume execution.";
+
+const char* StepCommand::kAliases[] = { "stepi", "si", NULL };
+const char* StepCommand::kArguments = "[n = 1]";
+const char* StepCommand::kHelp = " Execute n next instruction(s).";
+
+const char* DisasmCommand::kAliases[] = { "disasm", "di", NULL };
+const char* DisasmCommand::kArguments = "[n = 10]";
+const char* DisasmCommand::kHelp =
+ " Disassemble n instruction(s) at pc.\n"
+ " This command is equivalent to x pc.i [n = 10]."
+;
+
+const char* PrintCommand::kAliases[] = { "print", "p", NULL };
+const char* PrintCommand::kArguments = "<entity>[.format]";
+const char* PrintCommand::kHelp =
+ " Print the given entity according to the given format.\n"
+ " The format parameter only affects individual registers; it is ignored\n"
+ " for other entities.\n"
+ " <entity> can be one of the following:\n"
+ " * A register name (such as x0, s1, ...).\n"
+ " * 'regs', to print all integer (W and X) registers.\n"
+ " * 'fpregs' to print all floating-point (S and D) registers.\n"
+ " * 'sysregs' to print all system registers (including NZCV).\n"
+ " * 'pc' to print the current program counter.\n"
+;
+
+const char* ExamineCommand::kAliases[] = { "m", "mem", "x", NULL };
+const char* ExamineCommand::kArguments = "<addr>[.format] [n = 10]";
+const char* ExamineCommand::kHelp =
+ " Examine memory. Print n items of memory at address <addr> according to\n"
+ " the given [.format].\n"
+ " Addr can be an immediate address, a register name or pc.\n"
+ " Format is made of a type letter: 'x' (hexadecimal), 's' (signed), 'u'\n"
+ " (unsigned), 'f' (floating point), i (instruction) and a size in bits\n"
+ " when appropriate (8, 16, 32, 64)\n"
+ " E.g 'x sp.x64' will print 10 64-bit words from the stack in\n"
+ " hexadecimal format."
+;
+
+const char* RegisterToken::kXAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "x0", NULL },
+ { "x1", NULL },
+ { "x2", NULL },
+ { "x3", NULL },
+ { "x4", NULL },
+ { "x5", NULL },
+ { "x6", NULL },
+ { "x7", NULL },
+ { "x8", NULL },
+ { "x9", NULL },
+ { "x10", NULL },
+ { "x11", NULL },
+ { "x12", NULL },
+ { "x13", NULL },
+ { "x14", NULL },
+ { "x15", NULL },
+ { "ip0", "x16", NULL },
+ { "ip1", "x17", NULL },
+ { "x18", "pr", NULL },
+ { "x19", NULL },
+ { "x20", NULL },
+ { "x21", NULL },
+ { "x22", NULL },
+ { "x23", NULL },
+ { "x24", NULL },
+ { "x25", NULL },
+ { "x26", NULL },
+ { "x27", NULL },
+ { "x28", NULL },
+ { "fp", "x29", NULL },
+ { "lr", "x30", NULL },
+ { "sp", NULL}
+};
+
+const char* RegisterToken::kWAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "w0", NULL },
+ { "w1", NULL },
+ { "w2", NULL },
+ { "w3", NULL },
+ { "w4", NULL },
+ { "w5", NULL },
+ { "w6", NULL },
+ { "w7", NULL },
+ { "w8", NULL },
+ { "w9", NULL },
+ { "w10", NULL },
+ { "w11", NULL },
+ { "w12", NULL },
+ { "w13", NULL },
+ { "w14", NULL },
+ { "w15", NULL },
+ { "w16", NULL },
+ { "w17", NULL },
+ { "w18", NULL },
+ { "w19", NULL },
+ { "w20", NULL },
+ { "w21", NULL },
+ { "w22", NULL },
+ { "w23", NULL },
+ { "w24", NULL },
+ { "w25", NULL },
+ { "w26", NULL },
+ { "w27", NULL },
+ { "w28", NULL },
+ { "w29", NULL },
+ { "w30", NULL },
+ { "wsp", NULL }
+};
+
+
+Debugger::Debugger(Decoder* decoder, FILE* stream)
+ : Simulator(decoder, stream),
+ debug_parameters_(DBG_INACTIVE),
+ pending_request_(false),
+ steps_(0),
+ last_command_(NULL) {
+ disasm_ = js_new<PrintDisassembler>(stdout);
+ printer_ = js_new<Decoder>();
+ printer_->AppendVisitor(disasm_);
+}
+
+
+Debugger::~Debugger() {
+ js_delete(disasm_);
+ js_delete(printer_);
+}
+
+
+void Debugger::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ if (pending_request()) RunDebuggerShell();
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Debugger::PrintInstructions(const void* address, int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const Instruction* from = Instruction::CastConst(address);
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * kInstructionSize;
+ }
+ const Instruction* to = from + count * kInstructionSize;
+
+ for (const Instruction* current = from;
+ current < to;
+ current = current->NextInstruction()) {
+ printer_->Decode(current);
+ }
+}
+
+
+void Debugger::PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const uint8_t* from = address;
+ int size = format->SizeOf();
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * size;
+ }
+ const uint8_t* to = from + count * size;
+
+ for (const uint8_t* current = from; current < to; current += size) {
+ if (((current - from) % 8) == 0) {
+ printf("\n%p: ", current);
+ }
+
+ uint64_t data = Memory::Read<uint64_t>(current);
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n\n");
+}
+
+
+void Debugger::PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format) {
+ const uint64_t reg_size = target_reg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = reg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t reg_value = reg<uint64_t>(target_reg.code(),
+ Reg31IsStackPointer);
+ VIXL_ASSERT(count > 0);
+
+ printf("%s = ", name);
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = reg_value >> (reg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+// TODO(all): fix this for vector registers.
+void Debugger::PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format) {
+ const unsigned fpreg_size = target_fpreg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = fpreg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t fpreg_value = vreg<uint64_t>(fpreg_size, target_fpreg.code());
+ VIXL_ASSERT(count > 0);
+
+ if (target_fpreg.Is32Bits()) {
+ printf("s%u = ", target_fpreg.code());
+ } else {
+ printf("d%u = ", target_fpreg.code());
+ }
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = fpreg_value >> (fpreg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+void Debugger::VisitException(const Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK:
+ DoBreakpoint(instr);
+ return;
+ case HLT:
+ VIXL_FALLTHROUGH();
+ default: Simulator::VisitException(instr);
+ }
+}
+
+
+// Read a command. A command will be at most kMaxDebugShellLine char long and
+// ends with '\n\0'.
+// TODO: Should this be a utility function?
+char* Debugger::ReadCommandLine(const char* prompt, char* buffer, int length) {
+ int fgets_calls = 0;
+ char* end = NULL;
+
+ printf("%s", prompt);
+ fflush(stdout);
+
+ do {
+ if (fgets(buffer, length, stdin) == NULL) {
+ printf(" ** Error while reading command. **\n");
+ return NULL;
+ }
+
+ fgets_calls++;
+ end = strchr(buffer, '\n');
+ } while (end == NULL);
+
+ if (fgets_calls != 1) {
+ printf(" ** Command too long. **\n");
+ return NULL;
+ }
+
+ // Remove the newline from the end of the command.
+ VIXL_ASSERT(end[1] == '\0');
+ VIXL_ASSERT((end - buffer) < (length - 1));
+ end[0] = '\0';
+
+ return buffer;
+}
+
+
+void Debugger::RunDebuggerShell() {
+ if (IsDebuggerRunning()) {
+ if (steps_ > 0) {
+ // Finish stepping first.
+ --steps_;
+ return;
+ }
+
+ printf("Next: ");
+ PrintInstructions(pc());
+ bool done = false;
+ while (!done) {
+ char buffer[kMaxDebugShellLine];
+ char* line = ReadCommandLine("vixl> ", buffer, kMaxDebugShellLine);
+
+ if (line == NULL) continue; // An error occurred.
+
+ DebugCommand* command = DebugCommand::Parse(line);
+ if (command != NULL) {
+ last_command_ = command;
+ }
+
+ if (last_command_ != NULL) {
+ done = last_command_->Run(this);
+ } else {
+ printf("No previous command to run!\n");
+ }
+ }
+
+ if ((debug_parameters_ & DBG_BREAK) != 0) {
+ // The break request has now been handled, move to next instruction.
+ debug_parameters_ &= ~DBG_BREAK;
+ increment_pc();
+ }
+ }
+}
+
+
+void Debugger::DoBreakpoint(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == BRK);
+
+ printf("Hit breakpoint at pc=%p.\n", reinterpret_cast<const void*>(instr));
+ set_debug_parameters(debug_parameters() | DBG_BREAK | DBG_ACTIVE);
+ // Make the shell point to the brk instruction.
+ set_pc(instr);
+}
+
+
+static bool StringToUInt64(uint64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t parsed = strtoul(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+static bool StringToInt64(int64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ int64_t parsed = strtol(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow, undeflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+Token* Token::Tokenize(const char* arg) {
+ if ((arg == NULL) || (*arg == '\0')) {
+ return NULL;
+ }
+
+ // The order is important. For example Identifier::Tokenize would consider
+ // any register to be a valid identifier.
+
+ Token* token = RegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = FPRegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IdentifierToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = AddressToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IntegerToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ return js_new<UnknownToken>(arg);
+}
+
+
+uint8_t* RegisterToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ uint64_t reg_value = debugger->xreg(value().code(), Reg31IsStackPointer);
+ uint8_t* address = NULL;
+ memcpy(&address, &reg_value, sizeof(address));
+ return address;
+}
+
+
+void RegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ fprintf(out, "[Register %s]", Name());
+}
+
+
+const char* RegisterToken::Name() const {
+ if (value().Is32Bits()) {
+ return kWAliases[value().code()][0];
+ } else {
+ return kXAliases[value().code()][0];
+ }
+}
+
+
+Token* RegisterToken::Tokenize(const char* arg) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ // Is it a X register or alias?
+ for (const char** current = kXAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::XRegFromCode(i));
+ }
+ }
+
+ // Is it a W register or alias?
+ for (const char** current = kWAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::WRegFromCode(i));
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+void FPRegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ char prefix = value().Is32Bits() ? 's' : 'd';
+ fprintf(out, "[FPRegister %c%" PRIu32 "]", prefix, value().code());
+}
+
+
+Token* FPRegisterToken::Tokenize(const char* arg) {
+ if (strlen(arg) < 2) {
+ return NULL;
+ }
+
+ switch (*arg) {
+ case 's':
+ case 'd':
+ const char* cursor = arg + 1;
+ uint64_t code = 0;
+ if (!StringToUInt64(&code, cursor)) {
+ return NULL;
+ }
+
+ if (code > kNumberOfFPRegisters) {
+ return NULL;
+ }
+
+ VRegister fpreg = NoVReg;
+ switch (*arg) {
+ case 's':
+ fpreg = VRegister::SRegFromCode(static_cast<unsigned>(code));
+ break;
+ case 'd':
+ fpreg = VRegister::DRegFromCode(static_cast<unsigned>(code));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ return js_new<FPRegisterToken>(fpreg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* IdentifierToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ const Instruction* pc_value = debugger->pc();
+ uint8_t* address = NULL;
+ memcpy(&address, &pc_value, sizeof(address));
+ return address;
+}
+
+void IdentifierToken::Print(FILE* out) const {
+ fprintf(out, "[Identifier %s]", value());
+}
+
+
+Token* IdentifierToken::Tokenize(const char* arg) {
+ if (!isalpha(arg[0])) {
+ return NULL;
+ }
+
+ const char* cursor = arg + 1;
+ while ((*cursor != '\0') && isalnum(*cursor)) {
+ ++cursor;
+ }
+
+ if (*cursor == '\0') {
+ return js_new<IdentifierToken>(arg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* AddressToken::ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ return value();
+}
+
+
+void AddressToken::Print(FILE* out) const {
+ fprintf(out, "[Address %p]", value());
+}
+
+
+Token* AddressToken::Tokenize(const char* arg) {
+ if ((strlen(arg) < 3) || (arg[0] != '0') || (arg[1] != 'x')) {
+ return NULL;
+ }
+
+ uint64_t ptr = 0;
+ if (!StringToUInt64(&ptr, arg, 16)) {
+ return NULL;
+ }
+
+ uint8_t* address = reinterpret_cast<uint8_t*>(ptr);
+ return js_new<AddressToken>(address);
+}
+
+
+void IntegerToken::Print(FILE* out) const {
+ fprintf(out, "[Integer %" PRId64 "]", value());
+}
+
+
+Token* IntegerToken::Tokenize(const char* arg) {
+ int64_t value = 0;
+ if (!StringToInt64(&value, arg)) {
+ return NULL;
+ }
+
+ return js_new<IntegerToken>(value);
+}
+
+
+Token* FormatToken::Tokenize(const char* arg) {
+ size_t length = strlen(arg);
+ switch (arg[0]) {
+ case 'x':
+ case 's':
+ case 'u':
+ case 'f':
+ if (length == 1) return NULL;
+ break;
+ case 'i':
+ if (length == 1) return js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ VIXL_FALLTHROUGH();
+ default: return NULL;
+ }
+
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t count = strtoul(arg + 1, &endptr, 10);
+
+ if (errno != 0) {
+ // Overflow, etc.
+ return NULL;
+ }
+
+ if (endptr == arg) {
+ // No digits were parsed.
+ return NULL;
+ }
+
+ if (*endptr != '\0') {
+ // There are unexpected (non-digit) characters after the number.
+ return NULL;
+ }
+
+ switch (arg[0]) {
+ case 'x':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%02" PRIx8, 'x');
+ case 16: return js_new<Format<uint16_t>>("%04" PRIx16, 'x');
+ case 32: return js_new<Format<uint32_t>>("%08" PRIx32, 'x');
+ case 64: return js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ default: return NULL;
+ }
+ case 's':
+ switch (count) {
+ case 8: return js_new<Format<int8_t>>("%4" PRId8, 's');
+ case 16: return js_new<Format<int16_t>>("%6" PRId16, 's');
+ case 32: return js_new<Format<int32_t>>("%11" PRId32, 's');
+ case 64: return js_new<Format<int64_t>>("%20" PRId64, 's');
+ default: return NULL;
+ }
+ case 'u':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%3" PRIu8, 'u');
+ case 16: return js_new<Format<uint16_t>>("%5" PRIu16, 'u');
+ case 32: return js_new<Format<uint32_t>>("%10" PRIu32, 'u');
+ case 64: return js_new<Format<uint64_t>>("%20" PRIu64, 'u');
+ default: return NULL;
+ }
+ case 'f':
+ switch (count) {
+ case 32: return js_new<Format<float>>("%13g", 'f');
+ case 64: return js_new<Format<double>>("%13g", 'f');
+ default: return NULL;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+template<typename T>
+void Format<T>::Print(FILE* out) const {
+ unsigned size = sizeof(T) * 8;
+ fprintf(out, "[Format %c%u - %s]", type_code_, size, fmt_);
+}
+
+
+void UnknownToken::Print(FILE* out) const {
+ fprintf(out, "[Unknown %s]", unknown_);
+}
+
+
+void DebugCommand::Print(FILE* out) {
+ fprintf(out, "%s", name());
+}
+
+
+bool DebugCommand::Match(const char* name, const char** aliases) {
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (strcmp(name, *current) == 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+DebugCommand* DebugCommand::Parse(char* line) {
+ TokenVector args;
+
+ for (char* chunk = strtok(line, " \t");
+ chunk != NULL;
+ chunk = strtok(NULL, " \t")) {
+ char* dot = strchr(chunk, '.');
+ if (dot != NULL) {
+ // 'Token.format'.
+ Token* format = FormatToken::Tokenize(dot + 1);
+ if (format != NULL) {
+ *dot = '\0';
+ (void)args.append(Token::Tokenize(chunk));
+ (void)args.append(format);
+ } else {
+ // Error while parsing the format, push the UnknownToken so an error
+ // can be accurately reported.
+ (void)args.append(Token::Tokenize(chunk));
+ }
+ } else {
+ (void)args.append(Token::Tokenize(chunk));
+ }
+ }
+
+ if (args.empty()) {
+ return NULL;
+ }
+
+ if (!args[0]->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 0, "command name is not valid");
+ }
+
+ const char* name = IdentifierToken::Cast(args[0])->value();
+ #define RETURN_IF_MATCH(Command) \
+ if (Match(name, Command::kAliases)) { \
+ return Command::Build(std::move(args)); \
+ }
+ DEBUG_COMMAND_LIST(RETURN_IF_MATCH);
+ #undef RETURN_IF_MATCH
+
+ return js_new<UnknownCommand>(std::move(args));
+}
+
+
+void DebugCommand::PrintHelp(const char** aliases,
+ const char* args,
+ const char* help) {
+ VIXL_ASSERT(aliases[0] != NULL);
+ VIXL_ASSERT(help != NULL);
+
+ printf("\n----\n\n");
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (args != NULL) {
+ printf("%s %s\n", *current, args);
+ } else {
+ printf("%s\n", *current);
+ }
+ }
+ printf("\n%s\n", help);
+}
+
+
+bool HelpCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ #define PRINT_HELP(Command) \
+ DebugCommand::PrintHelp(Command::kAliases, \
+ Command::kArguments, \
+ Command::kHelp);
+ DEBUG_COMMAND_LIST(PRINT_HELP);
+ #undef PRINT_HELP
+ printf("\n----\n\n");
+
+ return false;
+}
+
+
+DebugCommand* HelpCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<HelpCommand>(args[0]);
+}
+
+
+bool ContinueCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ debugger->set_debug_parameters(debugger->debug_parameters() & ~DBG_ACTIVE);
+ return true;
+}
+
+
+DebugCommand* ContinueCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<ContinueCommand>(args[0]);
+}
+
+
+bool StepCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ int64_t steps = count();
+ if (steps < 0) {
+ printf(" ** invalid value for steps: %" PRId64 " (<0) **\n", steps);
+ } else if (steps > 1) {
+ debugger->set_steps(steps - 1);
+ }
+
+ return true;
+}
+
+
+void StepCommand::Print(FILE* out) {
+ fprintf(out, "%s %" PRId64 "", name(), count());
+}
+
+
+DebugCommand* StepCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // step [1]
+ count = js_new<IntegerToken>(1);
+ break;
+ }
+ case 2: { // step n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects int");
+ }
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<StepCommand>(args[0], count);
+}
+
+
+DebugCommand* DisasmCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // disasm [10]
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 2: { // disasm n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects int");
+ }
+
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ Token* target = js_new<IdentifierToken>("pc");
+ FormatToken* format = js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+void PrintCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ target()->Print(out);
+ if (format() != NULL) format()->Print(out);
+}
+
+
+bool PrintCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ Token* tok = target();
+ if (tok->IsIdentifier()) {
+ char* identifier = IdentifierToken::Cast(tok)->value();
+ if (strcmp(identifier, "regs") == 0) {
+ debugger->PrintRegisters();
+ } else if (strcmp(identifier, "fpregs") == 0) {
+ debugger->PrintVRegisters();
+ } else if (strcmp(identifier, "sysregs") == 0) {
+ debugger->PrintSystemRegisters();
+ } else if (strcmp(identifier, "pc") == 0) {
+ printf("pc = %16p\n", reinterpret_cast<const void*>(debugger->pc()));
+ } else {
+ printf(" ** Unknown identifier to print: %s **\n", identifier);
+ }
+
+ return false;
+ }
+
+ FormatToken* format_tok = format();
+ VIXL_ASSERT(format_tok != NULL);
+ if (format_tok->type_code() == 'i') {
+ // TODO(all): Add support for instruction disassembly.
+ printf(" ** unsupported format: instructions **\n");
+ return false;
+ }
+
+ if (tok->IsRegister()) {
+ RegisterToken* reg_tok = RegisterToken::Cast(tok);
+ Register reg = reg_tok->value();
+ debugger->PrintRegister(reg, reg_tok->Name(), format_tok);
+ return false;
+ }
+
+ if (tok->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(tok)->value();
+ debugger->PrintFPRegister(fpreg, format_tok);
+ return false;
+ }
+
+ VIXL_UNREACHABLE();
+ return false;
+}
+
+
+DebugCommand* PrintCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->IsRegister() &&
+ !target->IsFPRegister() &&
+ !target->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects reg or identifier");
+ }
+
+ FormatToken* format = NULL;
+ int target_size = 0;
+ if (target->IsRegister()) {
+ Register reg = RegisterToken::Cast(target)->value();
+ target_size = reg.SizeInBytes();
+ } else if (target->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(target)->value();
+ target_size = fpreg.SizeInBytes();
+ }
+ // If the target is an identifier there must be no format. This is checked
+ // in the switch statement below.
+
+ switch (args.length()) {
+ case 2: {
+ if (target->IsRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<uint32_t>>("%08" PRIx32, 'x'); break;
+ case 8: format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ } else if (target->IsFPRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<float>>("%8g", 'f'); break;
+ case 8: format = js_new<Format<double>>("%8g", 'f'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ break;
+ }
+ case 3: {
+ if (target->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 2,
+ "format is only allowed with registers");
+ }
+
+ Token* second = args[2];
+ if (!second->IsFormat()) {
+ return js_new<InvalidCommand>(std::move(args), 2, "expects format");
+ }
+ format = FormatToken::Cast(second);
+
+ if (format->SizeOf() > target_size) {
+ return js_new<InvalidCommand>(std::move(args), 2, "format too wide");
+ }
+
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<PrintCommand>(args[0], target, format);
+}
+
+
+bool ExamineCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ uint8_t* address = target()->ToAddress(debugger);
+ int64_t amount = count()->value();
+ if (format()->type_code() == 'i') {
+ debugger->PrintInstructions(address, amount);
+ } else {
+ debugger->PrintMemory(address, format(), amount);
+ }
+
+ return false;
+}
+
+
+void ExamineCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ format()->Print(out);
+ target()->Print(out);
+}
+
+
+DebugCommand* ExamineCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->CanAddressMemory()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects address");
+ }
+
+ FormatToken* format = NULL;
+ IntegerToken* count = NULL;
+
+ switch (args.length()) {
+ case 2: { // mem addr[.x64] [10]
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 3: { // mem addr.format [10]
+ // mem addr[.x64] n
+ Token* second = args[2];
+ if (second->IsFormat()) {
+ format = FormatToken::Cast(second);
+ count = js_new<IntegerToken>(10);
+ break;
+ } else if (second->IsInteger()) {
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = IntegerToken::Cast(second);
+ } else {
+ return js_new<InvalidCommand>(std::move(args), 2, "expects format or integer");
+ }
+ VIXL_UNREACHABLE();
+ break;
+ }
+ case 4: { // mem addr.format n
+ Token* second = args[2];
+ Token* third = args[3];
+ if (!second->IsFormat() || !third->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), -1, "expects addr[.format] [n]");
+ }
+ format = FormatToken::Cast(second);
+ count = IntegerToken::Cast(third);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+UnknownCommand::~UnknownCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool UnknownCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Unknown Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ args_[i]->Print(stdout);
+ }
+ printf(" **\n");
+
+ return false;
+}
+
+
+InvalidCommand::~InvalidCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool InvalidCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Invalid Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ if (i == static_cast<size_t>(index_)) {
+ printf(">>");
+ args_[i]->Print(stdout);
+ printf("<<");
+ } else {
+ args_[i]->Print(stdout);
+ }
+ }
+ printf(" **\n");
+ printf(" ** %s\n", cause_);
+
+ return false;
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.h b/js/src/jit/arm64/vixl/Debugger-vixl.h
new file mode 100644
index 0000000000..7236bf1e5e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.h
@@ -0,0 +1,117 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#ifndef VIXL_A64_DEBUGGER_A64_H_
+#define VIXL_A64_DEBUGGER_A64_H_
+
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+// Flags that represent the debugger state.
+enum DebugParameters {
+ DBG_INACTIVE = 0,
+ DBG_ACTIVE = 1 << 0, // The debugger is active.
+ DBG_BREAK = 1 << 1 // The debugger is at a breakpoint.
+};
+
+// Forward declarations.
+class DebugCommand;
+class Token;
+class FormatToken;
+
+class Debugger : public Simulator {
+ public:
+ explicit Debugger(Decoder* decoder, FILE* stream = stdout);
+ ~Debugger();
+
+ virtual void Run() override;
+ virtual void VisitException(const Instruction* instr) override;
+
+ int debug_parameters() const { return debug_parameters_; }
+ void set_debug_parameters(int parameters) {
+ debug_parameters_ = parameters;
+
+ update_pending_request();
+ }
+
+ // Numbers of instructions to execute before the debugger shell is given
+ // back control.
+ int64_t steps() const { return steps_; }
+ void set_steps(int64_t value) {
+ VIXL_ASSERT(value > 1);
+ steps_ = value;
+ }
+
+ bool IsDebuggerRunning() const {
+ return (debug_parameters_ & DBG_ACTIVE) != 0;
+ }
+
+ bool pending_request() const { return pending_request_; }
+ void update_pending_request() {
+ pending_request_ = IsDebuggerRunning();
+ }
+
+ void PrintInstructions(const void* address, int64_t count = 1);
+ void PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count = 1);
+ void PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format);
+ void PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format);
+
+ private:
+ char* ReadCommandLine(const char* prompt, char* buffer, int length);
+ void RunDebuggerShell();
+ void DoBreakpoint(const Instruction* instr);
+
+ int debug_parameters_;
+ bool pending_request_;
+ int64_t steps_;
+ DebugCommand* last_command_;
+ PrintDisassembler* disasm_;
+ Decoder* printer_;
+
+ // Length of the biggest command line accepted by the debugger shell.
+ static const int kMaxDebugShellLine = 256;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DEBUGGER_A64_H_
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.cpp b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
new file mode 100644
index 0000000000..884654ec8e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
@@ -0,0 +1,899 @@
+// Copyright 2014, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Decoder-vixl.h"
+
+#include <algorithm>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+void Decoder::DecodeInstruction(const Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // Load/store exclusive.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.insert(visitors_.begin(), new_visitor));
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.insert(it, new_visitor));
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ MOZ_ALWAYS_TRUE(visitors_.insert(it, new_visitor));
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.erase(std::remove(visitors_.begin(), visitors_.end(), visitor),
+ visitors_.end());
+}
+
+
+void Decoder::DecodePCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ VIXL_ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ if (instr->InstructionBits() == UNDEFINED_INST_PATTERN) {
+ VisitException(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ // (instr->Mask(0x003FF200) == 0x00032200) || // match CSDB
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+ // TODO(all): rearrange the tree to integrate this branch.
+ if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
+ DecodeNEONLoadStore(instr);
+ return;
+ }
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ VisitLoadStoreExclusive(instr);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO: VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x0) {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(26) == 0) {
+ if ((instr->Bit(15) == 1) &&
+ ((instr->Bits(14, 12) == 0x1) ||
+ (instr->Bit(13) == 1) ||
+ (instr->Bits(14, 12) == 0x5) ||
+ ((instr->Bits(14, 12) == 0x4) &&
+ ((instr->Bit(23) == 0) ||
+ (instr->Bits(23, 22) == 0x3))))) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAtomicMemory(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB));
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ VIXL_FALLTHROUGH();
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF));
+ if (instr->Bit(28) == 0) {
+ DecodeNEONVectorDataProcessing(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeNEONScalarDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ VIXL_ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
+ if (instr->Bit(31) == 0) {
+ if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
+ VisitUnallocated(instr);
+ return;
+ }
+
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(20, 16) == 0) {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStruct(instr);
+ } else {
+ VisitNEONLoadStoreSingleStruct(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStructPostIndex(instr);
+ } else {
+ VisitNEONLoadStoreSingleStructPostIndex(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
+ if (instr->Bit(31) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONTable(instr);
+ } else {
+ VisitNEONPerm(instr);
+ }
+ } else {
+ VisitNEONExtract(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEON3Different(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEON2RegMisc(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x2) {
+ VisitCryptoAES(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONAcrossLanes(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEON3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(22, 19) == 0) {
+ VisitNEONModifiedImmediate(instr);
+ } else {
+ VisitNEONShiftImmediate(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitCrypto3RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONScalarCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONScalar3Diff(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalar2RegMisc(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitCrypto2RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalarPairwise(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEONScalar3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONScalarByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitNEONScalarShiftImmediate(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(const Instruction *instr) { \
+ VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ for (auto visitor : visitors_) { \
+ visitor->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.h b/js/src/jit/arm64/vixl/Decoder-vixl.h
new file mode 100644
index 0000000000..1b3cf172ac
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.h
@@ -0,0 +1,276 @@
+// Copyright 2014, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DECODER_A64_H_
+#define VIXL_A64_DECODER_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "js/AllocPolicy.h"
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST_THAT_RETURN(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(AtomicMemory) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LoadStoreExclusive) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Crypto2RegSHA) \
+ V(Crypto3RegSHA) \
+ V(CryptoAES) \
+ V(NEON2RegMisc) \
+ V(NEON3Different) \
+ V(NEON3Same) \
+ V(NEONAcrossLanes) \
+ V(NEONByIndexedElement) \
+ V(NEONCopy) \
+ V(NEONExtract) \
+ V(NEONLoadStoreMultiStruct) \
+ V(NEONLoadStoreMultiStructPostIndex) \
+ V(NEONLoadStoreSingleStruct) \
+ V(NEONLoadStoreSingleStructPostIndex) \
+ V(NEONModifiedImmediate) \
+ V(NEONScalar2RegMisc) \
+ V(NEONScalar3Diff) \
+ V(NEONScalar3Same) \
+ V(NEONScalarByIndexedElement) \
+ V(NEONScalarCopy) \
+ V(NEONScalarPairwise) \
+ V(NEONScalarShiftImmediate) \
+ V(NEONShiftImmediate) \
+ V(NEONTable) \
+ V(NEONPerm)
+
+#define VISITOR_LIST_THAT_DONT_RETURN(V) \
+ V(Unallocated) \
+ V(Unimplemented) \
+
+#define VISITOR_LIST(V) \
+ VISITOR_LIST_THAT_RETURN(V) \
+ VISITOR_LIST_THAT_DONT_RETURN(V) \
+
+namespace vixl {
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ enum VisitorConstness {
+ kConstVisitor,
+ kNonConstVisitor
+ };
+ explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
+ : constness_(constness) {}
+
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ bool IsConstVisitor() const { return constness_ == kConstVisitor; }
+ Instruction* MutableInstruction(const Instruction* instr) {
+ VIXL_ASSERT(!IsConstVisitor());
+ return const_cast<Instruction*>(instr);
+ }
+
+ private:
+ const VisitorConstness constness_;
+};
+
+
+class Decoder {
+ public:
+ Decoder() {}
+
+ // Top-level wrappers around the actual decoding function.
+ void Decode(const Instruction* instr) {
+#ifdef DEBUG
+ for (auto visitor : visitors_) {
+ VIXL_ASSERT(visitor->IsConstVisitor());
+ }
+#endif
+ DecodeInstruction(instr);
+ }
+ void Decode(Instruction* instr) {
+ DecodeInstruction(const_cast<const Instruction*>(instr));
+ }
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in order.
+ // A visitor can be registered multiple times.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2);
+ // d.AppendVisitor(V3);
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V2, V1, V2, V3.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ // These helpers register `new_visitor` before or after the first instance of
+ // `registered_visiter` in the list.
+ // So if
+ // V1, V2, V1, V2
+ // are registered in this order in the decoder, calls to
+ // d.InsertVisitorAfter(V3, V1);
+ // d.InsertVisitorBefore(V4, V2);
+ // will yield the order
+ // V1, V3, V4, V2, V1, V2
+ //
+ // For more complex modifications of the order of registered visitors, one can
+ // directly access and modify the list of visitors via the `visitors()'
+ // accessor.
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove all instances of a previously registered visitor class from the list
+ // of visitors stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(const Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+
+ private:
+ // Decodes an instruction and calls the visitor functions registered with the
+ // Decoder class.
+ void DecodeInstruction(const Instruction* instr);
+
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(const Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the correspoding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(const Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(const Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(const Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(const Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(const Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(const Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeNEONLoadStore(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) vector data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0x7.
+ void DecodeNEONVectorDataProcessing(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) scalar data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0xF.
+ void DecodeNEONScalarDataProcessing(const Instruction* instr);
+
+ private:
+ // Visitors are registered in a list.
+ mozilla::Vector<DecoderVisitor*, 8, js::SystemAllocPolicy> visitors_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DECODER_A64_H_
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.cpp b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
new file mode 100644
index 0000000000..1116ebb67b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
@@ -0,0 +1,3741 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Disasm-vixl.h"
+
+#include "mozilla/Sprintf.h"
+#include <cstdlib>
+
+namespace vixl {
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NDP";
+ const char *form_cmp = "'Rn, 'Rm'NDP";
+ const char *form_neg = "'Rd, 'Rm'NDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(const Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ VIXL_ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
+ if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'NLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'NLo";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(const Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(const Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(const Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'TImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_wwx = "'Wd, 'Wn, 'Xm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ case CRC32B: mnemonic = "crc32b"; break;
+ case CRC32H: mnemonic = "crc32h"; break;
+ case CRC32W: mnemonic = "crc32w"; break;
+ case CRC32X: mnemonic = "crc32x"; form = form_wwx; break;
+ case CRC32CB: mnemonic = "crc32cb"; break;
+ case CRC32CH: mnemonic = "crc32ch"; break;
+ case CRC32CW: mnemonic = "crc32cw"; break;
+ case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break;
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(const Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'TImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'TImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
+ if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
+ mnemonic = "movn";
+ } else {
+ mnemonic = "mov";
+ form = "'Rd, 'IMoveNeg";
+ }
+ } else {
+ mnemonic = "movn";
+ }
+ break;
+ case MOVZ_w:
+ case MOVZ_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
+ mnemonic = "mov";
+ else
+ mnemonic = "movz";
+ break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_b, "str", "'Bt") \
+ V(STR_h, "str", "'Ht") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_b, "ldr", "'Bt") \
+ V(LDR_h, "ldr", "'Ht") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt") \
+ V(STR_q, "str", "'Qt") \
+ V(LDR_q, "ldr", "'Qt")
+
+void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_b = "'Bt, ['Xns'ILS]";
+ const char *form_h = "'Ht, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+ const char *form_q = "'Qt, ['Xns'ILS]";
+ const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_b: mnemonic = "stur"; form = form_b; break;
+ case STUR_h: mnemonic = "stur"; form = form_h; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case STUR_q: mnemonic = "stur"; form = form_q; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_b: mnemonic = "ldur"; form = form_b; break;
+ case LDUR_h: mnemonic = "ldur"; form = form_h; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDUR_q: mnemonic = "ldur"; form = form_q; break;
+ case LDURSB_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(const Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break;
+ case LDRSW_x_lit: {
+ mnemonic = "ldrsw";
+ form = "'Xt, 'ILLiteral 'LValue";
+ break;
+ }
+ case PRFM_lit: {
+ mnemonic = "prfm";
+ form = "'PrefOp, 'ILLiteral 'LValue";
+ break;
+ }
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "2") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "3") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \
+ V(STP_s, "stp", "'St, 'St2", "2") \
+ V(LDP_s, "ldp", "'St, 'St2", "2") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "3") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \
+ V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \
+ V(STP_q, "stp", "'Qt, 'Qt2", "4")
+
+void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+// clang-format off
+#define LOAD_STORE_EXCLUSIVE_LIST(V) \
+ V(STXRB_w, "stxrb", "'Ws, 'Wt") \
+ V(STXRH_w, "stxrh", "'Ws, 'Wt") \
+ V(STXR_w, "stxr", "'Ws, 'Wt") \
+ V(STXR_x, "stxr", "'Ws, 'Xt") \
+ V(LDXRB_w, "ldxrb", "'Wt") \
+ V(LDXRH_w, "ldxrh", "'Wt") \
+ V(LDXR_w, "ldxr", "'Wt") \
+ V(LDXR_x, "ldxr", "'Xt") \
+ V(STXP_w, "stxp", "'Ws, 'Wt, 'Wt2") \
+ V(STXP_x, "stxp", "'Ws, 'Xt, 'Xt2") \
+ V(LDXP_w, "ldxp", "'Wt, 'Wt2") \
+ V(LDXP_x, "ldxp", "'Xt, 'Xt2") \
+ V(STLXRB_w, "stlxrb", "'Ws, 'Wt") \
+ V(STLXRH_w, "stlxrh", "'Ws, 'Wt") \
+ V(STLXR_w, "stlxr", "'Ws, 'Wt") \
+ V(STLXR_x, "stlxr", "'Ws, 'Xt") \
+ V(LDAXRB_w, "ldaxrb", "'Wt") \
+ V(LDAXRH_w, "ldaxrh", "'Wt") \
+ V(LDAXR_w, "ldaxr", "'Wt") \
+ V(LDAXR_x, "ldaxr", "'Xt") \
+ V(STLXP_w, "stlxp", "'Ws, 'Wt, 'Wt2") \
+ V(STLXP_x, "stlxp", "'Ws, 'Xt, 'Xt2") \
+ V(LDAXP_w, "ldaxp", "'Wt, 'Wt2") \
+ V(LDAXP_x, "ldaxp", "'Xt, 'Xt2") \
+ V(STLRB_w, "stlrb", "'Wt") \
+ V(STLRH_w, "stlrh", "'Wt") \
+ V(STLR_w, "stlr", "'Wt") \
+ V(STLR_x, "stlr", "'Xt") \
+ V(LDARB_w, "ldarb", "'Wt") \
+ V(LDARH_w, "ldarh", "'Wt") \
+ V(LDAR_w, "ldar", "'Wt") \
+ V(LDAR_x, "ldar", "'Xt") \
+ V(CAS_w, "cas", "'Ws, 'Wt") \
+ V(CAS_x, "cas", "'Xs, 'Xt") \
+ V(CASA_w, "casa", "'Ws, 'Wt") \
+ V(CASA_x, "casa", "'Xs, 'Xt") \
+ V(CASL_w, "casl", "'Ws, 'Wt") \
+ V(CASL_x, "casl", "'Xs, 'Xt") \
+ V(CASAL_w, "casal", "'Ws, 'Wt") \
+ V(CASAL_x, "casal", "'Xs, 'Xt") \
+ V(CASB, "casb", "'Ws, 'Wt") \
+ V(CASAB, "casab", "'Ws, 'Wt") \
+ V(CASLB, "caslb", "'Ws, 'Wt") \
+ V(CASALB, "casalb", "'Ws, 'Wt") \
+ V(CASH, "cash", "'Ws, 'Wt") \
+ V(CASAH, "casah", "'Ws, 'Wt") \
+ V(CASLH, "caslh", "'Ws, 'Wt") \
+ V(CASALH, "casalh", "'Ws, 'Wt") \
+ V(CASP_w, "casp", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \
+ V(CASP_x, "casp", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \
+ V(CASPA_w, "caspa", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \
+ V(CASPA_x, "caspa", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \
+ V(CASPL_w, "caspl", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \
+ V(CASPL_x, "caspl", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \
+ V(CASPAL_w, "caspal", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \
+ V(CASPAL_x, "caspal", "'Xs, 'X(s+1), 'Xt, 'X(t+1)")
+// clang-format on
+
+void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStoreExclusiveMask)) {
+#define LSX(A, B, C) \
+ case A: \
+ mnemonic = B; \
+ form = C ", ['Xns]"; \
+ break;
+ LOAD_STORE_EXCLUSIVE_LIST(LSX)
+#undef LSX
+ default:
+ form = "(LoadStoreExclusive)";
+ }
+
+ switch (instr->Mask(LoadStoreExclusiveMask)) {
+ case CASP_w:
+ case CASP_x:
+ case CASPA_w:
+ case CASPA_x:
+ case CASPL_w:
+ case CASPL_x:
+ case CASPAL_w:
+ case CASPAL_x:
+ if ((instr->Rs() % 2 == 1) || (instr->Rt() % 2 == 1)) {
+ mnemonic = "unallocated";
+ form = "(LoadStoreExclusive)";
+ }
+ break;
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+#define ATOMIC_MEMORY_SIMPLE_LIST(V) \
+ V(LDADD, "add") \
+ V(LDCLR, "clr") \
+ V(LDEOR, "eor") \
+ V(LDSET, "set") \
+ V(LDSMAX, "smax") \
+ V(LDSMIN, "smin") \
+ V(LDUMAX, "umax") \
+ V(LDUMIN, "umin")
+
+void Disassembler::VisitAtomicMemory(const Instruction* instr) {
+ const int kMaxAtomicOpMnemonicLength = 16;
+ const char* mnemonic;
+ const char* form = "'Ws, 'Wt, ['Xns]";
+
+ switch (instr->Mask(AtomicMemoryMask)) {
+#define AMS(A, MN) \
+ case A##B: \
+ mnemonic = MN "b"; \
+ break; \
+ case A##AB: \
+ mnemonic = MN "ab"; \
+ break; \
+ case A##LB: \
+ mnemonic = MN "lb"; \
+ break; \
+ case A##ALB: \
+ mnemonic = MN "alb"; \
+ break; \
+ case A##H: \
+ mnemonic = MN "h"; \
+ break; \
+ case A##AH: \
+ mnemonic = MN "ah"; \
+ break; \
+ case A##LH: \
+ mnemonic = MN "lh"; \
+ break; \
+ case A##ALH: \
+ mnemonic = MN "alh"; \
+ break; \
+ case A##_w: \
+ mnemonic = MN; \
+ break; \
+ case A##A_w: \
+ mnemonic = MN "a"; \
+ break; \
+ case A##L_w: \
+ mnemonic = MN "l"; \
+ break; \
+ case A##AL_w: \
+ mnemonic = MN "al"; \
+ break; \
+ case A##_x: \
+ mnemonic = MN; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##A_x: \
+ mnemonic = MN "a"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##L_x: \
+ mnemonic = MN "l"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##AL_x: \
+ mnemonic = MN "al"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break;
+ ATOMIC_MEMORY_SIMPLE_LIST(AMS)
+
+ // SWP has the same semantics as ldadd etc but without the store aliases.
+ AMS(SWP, "swp")
+#undef AMS
+
+ case LDAPRB:
+ mnemonic = "ldaprb";
+ form = "'Wt, ['Xns]";
+ break;
+ case LDAPRH:
+ mnemonic = "ldaprh";
+ form = "'Wt, ['Xns]";
+ break;
+ case LDAPR_w:
+ mnemonic = "ldapr";
+ form = "'Wt, ['Xns]";
+ break;
+ case LDAPR_x:
+ mnemonic = "ldapr";
+ form = "'Xt, ['Xns]";
+ break;
+ default:
+ mnemonic = "unimplemented";
+ form = "(AtomicMemory)";
+ }
+
+ const char* prefix = "";
+ switch (instr->Mask(AtomicMemoryMask)) {
+#define AMS(A, MN) \
+ case A##AB: \
+ case A##ALB: \
+ case A##AH: \
+ case A##ALH: \
+ case A##A_w: \
+ case A##AL_w: \
+ case A##A_x: \
+ case A##AL_x: \
+ prefix = "ld"; \
+ break; \
+ case A##B: \
+ case A##LB: \
+ case A##H: \
+ case A##LH: \
+ case A##_w: \
+ case A##L_w: { \
+ prefix = "ld"; \
+ unsigned rt = instr->Rt(); \
+ if (Register(rt, 32).IsZero()) { \
+ prefix = "st"; \
+ form = "'Ws, ['Xns]"; \
+ } \
+ break; \
+ } \
+ case A##_x: \
+ case A##L_x: { \
+ prefix = "ld"; \
+ unsigned rt = instr->Rt(); \
+ if (Register(rt, 64).IsZero()) { \
+ prefix = "st"; \
+ form = "'Xs, ['Xns]"; \
+ } \
+ break; \
+ }
+ ATOMIC_MEMORY_SIMPLE_LIST(AMS)
+#undef AMS
+ }
+
+ char buffer[kMaxAtomicOpMnemonicLength];
+ if (strlen(prefix) > 0) {
+ snprintf(buffer, kMaxAtomicOpMnemonicLength, "%s%s", prefix, mnemonic);
+ mnemonic = buffer;
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+void Disassembler::VisitFPCompare(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ case FCMPE_s_zero:
+ case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMPE_s:
+ case FCMPE_d: mnemonic = "fcmpe"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(const Instruction* instr) {
+ const char *mnemonic = "unmplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break;
+ case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break;
+ case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break;
+ case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break;
+ case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case FCVTPU_xd:
+ case FCVTPU_ws:
+ case FCVTPU_wd:
+ case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break;
+ case FCVTPS_xd:
+ case FCVTPS_wd:
+ case FCVTPS_xs:
+ case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ case FJCVTZS: mnemonic = "fjcvtzs"; form = form_rf; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ mnemonic = "clrex";
+ form = (instr->CRm() == 0xf) ? NULL : "'IX";
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ case CSDB: {
+ mnemonic = "csdb";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) {
+ switch (instr->SysOp()) {
+ case IVAU:
+ mnemonic = "ic";
+ form = "ivau, 'Xt";
+ break;
+ case CVAC:
+ mnemonic = "dc";
+ form = "cvac, 'Xt";
+ break;
+ case CVAU:
+ mnemonic = "dc";
+ form = "cvau, 'Xt";
+ break;
+ case CIVAC:
+ mnemonic = "dc";
+ form = "civac, 'Xt";
+ break;
+ case ZVA:
+ mnemonic = "dc";
+ form = "zva, 'Xt";
+ break;
+ default:
+ mnemonic = "sys";
+ if (instr->Rt() == 31) {
+ form = "'G1, 'Kn, 'Km, 'G2";
+ } else {
+ form = "'G1, 'Kn, 'Km, 'G2, 'Xt";
+ }
+ break;
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitNEON2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s";
+ const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0";
+ const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0";
+ NEONFormatDecoder nfd(instr);
+
+ static const NEONFormatMap map_lp_ta = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_ta = {
+ {22}, {NF_4S, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_tb = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: mnemonic = "rev64"; break;
+ case NEON_REV32: mnemonic = "rev32"; break;
+ case NEON_REV16: mnemonic = "rev16"; break;
+ case NEON_SADDLP:
+ mnemonic = "saddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADDLP:
+ mnemonic = "uaddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SUQADD: mnemonic = "suqadd"; break;
+ case NEON_USQADD: mnemonic = "usqadd"; break;
+ case NEON_CLS: mnemonic = "cls"; break;
+ case NEON_CLZ: mnemonic = "clz"; break;
+ case NEON_CNT: mnemonic = "cnt"; break;
+ case NEON_SADALP:
+ mnemonic = "sadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADALP:
+ mnemonic = "uadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SQABS: mnemonic = "sqabs"; break;
+ case NEON_SQNEG: mnemonic = "sqneg"; break;
+ case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break;
+ case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break;
+ case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break;
+ case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break;
+ case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break;
+ case NEON_ABS: mnemonic = "abs"; break;
+ case NEON_NEG: mnemonic = "neg"; break;
+ case NEON_RBIT_NOT:
+ switch (instr->FPType()) {
+ case 0: mnemonic = "mvn"; break;
+ case 1: mnemonic = "rbit"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ break;
+ }
+ } else {
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: mnemonic = "fabs"; break;
+ case NEON_FNEG: mnemonic = "fneg"; break;
+ case NEON_FCVTN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTXN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTL:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl";
+ nfd.SetFormatMap(0, &map_cvt_ta);
+ nfd.SetFormatMap(1, &map_cvt_tb);
+ break;
+ case NEON_FRINTN: mnemonic = "frintn"; break;
+ case NEON_FRINTA: mnemonic = "frinta"; break;
+ case NEON_FRINTP: mnemonic = "frintp"; break;
+ case NEON_FRINTM: mnemonic = "frintm"; break;
+ case NEON_FRINTX: mnemonic = "frintx"; break;
+ case NEON_FRINTZ: mnemonic = "frintz"; break;
+ case NEON_FRINTI: mnemonic = "frinti"; break;
+ case NEON_FCVTNS: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU: mnemonic = "fcvtau"; break;
+ case NEON_FSQRT: mnemonic = "fsqrt"; break;
+ case NEON_SCVTF: mnemonic = "scvtf"; break;
+ case NEON_UCVTF: mnemonic = "ucvtf"; break;
+ case NEON_URSQRTE: mnemonic = "ursqrte"; break;
+ case NEON_URECPE: mnemonic = "urecpe"; break;
+ case NEON_FRSQRTE: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE: mnemonic = "frecpe"; break;
+ case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break;
+ case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break;
+ case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break;
+ case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break;
+ case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: mnemonic = "xtn"; break;
+ case NEON_SQXTN: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN: mnemonic = "sqxtun"; break;
+ case NEON_SHLL:
+ mnemonic = "shll";
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.IntegerFormatMap());
+ switch (instr->NEONSize()) {
+ case 0: form = "'Vd.%s, 'Vn.%s, #8"; break;
+ case 1: form = "'Vd.%s, 'Vn.%s, #16"; break;
+ case 2: form = "'Vd.%s, 'Vn.%s, #32"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ return;
+ } else {
+ form = "(NEON2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: mnemonic = "and"; break;
+ case NEON_ORR:
+ mnemonic = "orr";
+ if (instr->Rm() == instr->Rn()) {
+ mnemonic = "mov";
+ form = "'Vd.%s, 'Vn.%s";
+ }
+ break;
+ case NEON_ORN: mnemonic = "orn"; break;
+ case NEON_EOR: mnemonic = "eor"; break;
+ case NEON_BIC: mnemonic = "bic"; break;
+ case NEON_BIF: mnemonic = "bif"; break;
+ case NEON_BIT: mnemonic = "bit"; break;
+ case NEON_BSL: mnemonic = "bsl"; break;
+ default: form = "(NEON3Same)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ } else {
+ static const char *mnemonics[] = {
+ "shadd", "uhadd", "shadd", "uhadd",
+ "sqadd", "uqadd", "sqadd", "uqadd",
+ "srhadd", "urhadd", "srhadd", "urhadd",
+ NULL, NULL, NULL, NULL, // Handled by logical cases above.
+ "shsub", "uhsub", "shsub", "uhsub",
+ "sqsub", "uqsub", "sqsub", "uqsub",
+ "cmgt", "cmhi", "cmgt", "cmhi",
+ "cmge", "cmhs", "cmge", "cmhs",
+ "sshl", "ushl", "sshl", "ushl",
+ "sqshl", "uqshl", "sqshl", "uqshl",
+ "srshl", "urshl", "srshl", "urshl",
+ "sqrshl", "uqrshl", "sqrshl", "uqrshl",
+ "smax", "umax", "smax", "umax",
+ "smin", "umin", "smin", "umin",
+ "sabd", "uabd", "sabd", "uabd",
+ "saba", "uaba", "saba", "uaba",
+ "add", "sub", "add", "sub",
+ "cmtst", "cmeq", "cmtst", "cmeq",
+ "mla", "mls", "mla", "mls",
+ "mul", "pmul", "mul", "pmul",
+ "smaxp", "umaxp", "smaxp", "umaxp",
+ "sminp", "uminp", "sminp", "uminp",
+ "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh",
+ "addp", "unallocated", "addp", "unallocated",
+ "fmaxnm", "fmaxnmp", "fminnm", "fminnmp",
+ "fmla", "unallocated", "fmls", "unallocated",
+ "fadd", "faddp", "fsub", "fabd",
+ "fmulx", "fmul", "unallocated", "unallocated",
+ "fcmeq", "fcmge", "unallocated", "fcmgt",
+ "unallocated", "facge", "unallocated", "facgt",
+ "fmax", "fmaxp", "fmin", "fminp",
+ "frecps", "fdiv", "frsqrts", "unallocated"};
+
+ // Operation is determined by the opcode bits (15-11), the top bit of
+ // size (23) and the U bit (29).
+ unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) |
+ instr->Bit(29);
+ VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0])));
+ mnemonic = mnemonics[index];
+ // Assert that index is not one of the previously handled logical
+ // instructions.
+ VIXL_ASSERT(mnemonic != NULL);
+
+ if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Different(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+
+ NEONFormatDecoder nfd(instr);
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+
+ // Ignore the Q bit. Appending a "2" suffix is handled later.
+ switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) {
+ case NEON_PMULL: mnemonic = "pmull"; break;
+ case NEON_SABAL: mnemonic = "sabal"; break;
+ case NEON_SABDL: mnemonic = "sabdl"; break;
+ case NEON_SADDL: mnemonic = "saddl"; break;
+ case NEON_SMLAL: mnemonic = "smlal"; break;
+ case NEON_SMLSL: mnemonic = "smlsl"; break;
+ case NEON_SMULL: mnemonic = "smull"; break;
+ case NEON_SSUBL: mnemonic = "ssubl"; break;
+ case NEON_SQDMLAL: mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL: mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL: mnemonic = "sqdmull"; break;
+ case NEON_UABAL: mnemonic = "uabal"; break;
+ case NEON_UABDL: mnemonic = "uabdl"; break;
+ case NEON_UADDL: mnemonic = "uaddl"; break;
+ case NEON_UMLAL: mnemonic = "umlal"; break;
+ case NEON_UMLSL: mnemonic = "umlsl"; break;
+ case NEON_UMULL: mnemonic = "umull"; break;
+ case NEON_USUBL: mnemonic = "usubl"; break;
+ case NEON_SADDW:
+ mnemonic = "saddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_SSUBW:
+ mnemonic = "ssubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_UADDW:
+ mnemonic = "uaddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_USUBW:
+ mnemonic = "usubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_ADDHN:
+ mnemonic = "addhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RADDHN:
+ mnemonic = "raddhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RSUBHN:
+ mnemonic = "rsubhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_SUBHN:
+ mnemonic = "subhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ default: form = "(NEON3Different)";
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(),
+ NEONFormatDecoder::IntegerFormatMap());
+
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.FPFormatMap());
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: mnemonic = "fmaxv"; break;
+ case NEON_FMINV: mnemonic = "fminv"; break;
+ case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break;
+ case NEON_FMINNMV: mnemonic = "fminnmv"; break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) {
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: mnemonic = "addv"; break;
+ case NEON_SMAXV: mnemonic = "smaxv"; break;
+ case NEON_SMINV: mnemonic = "sminv"; break;
+ case NEON_UMAXV: mnemonic = "umaxv"; break;
+ case NEON_UMINV: mnemonic = "uminv"; break;
+ case NEON_SADDLV:
+ mnemonic = "saddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ case NEON_UADDLV:
+ mnemonic = "uaddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ bool l_instr = false;
+ bool fp_instr = false;
+
+ const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
+
+ static const NEONFormatMap map_ta = {
+ {23, 22}, {NF_UNDEF, NF_4S, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map_ta,
+ NEONFormatDecoder::IntegerFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_SMULL_byelement: mnemonic = "smull"; l_instr = true; break;
+ case NEON_UMULL_byelement: mnemonic = "umull"; l_instr = true; break;
+ case NEON_SMLAL_byelement: mnemonic = "smlal"; l_instr = true; break;
+ case NEON_UMLAL_byelement: mnemonic = "umlal"; l_instr = true; break;
+ case NEON_SMLSL_byelement: mnemonic = "smlsl"; l_instr = true; break;
+ case NEON_UMLSL_byelement: mnemonic = "umlsl"; l_instr = true; break;
+ case NEON_SQDMULL_byelement: mnemonic = "sqdmull"; l_instr = true; break;
+ case NEON_SQDMLAL_byelement: mnemonic = "sqdmlal"; l_instr = true; break;
+ case NEON_SQDMLSL_byelement: mnemonic = "sqdmlsl"; l_instr = true; break;
+ case NEON_MUL_byelement: mnemonic = "mul"; break;
+ case NEON_MLA_byelement: mnemonic = "mla"; break;
+ case NEON_MLS_byelement: mnemonic = "mls"; break;
+ case NEON_SQDMULH_byelement: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break;
+ default:
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: mnemonic = "fmul"; fp_instr = true; break;
+ case NEON_FMLA_byelement: mnemonic = "fmla"; fp_instr = true; break;
+ case NEON_FMLS_byelement: mnemonic = "fmls"; fp_instr = true; break;
+ case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break;
+ }
+ }
+
+ if (l_instr) {
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ } else if (fp_instr) {
+ nfd.SetFormatMap(0, nfd.FPFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ } else {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ }
+}
+
+
+void Disassembler::VisitNEONCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(),
+ NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]";
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Vd.%s['IVInsIndex1], 'Xn";
+ } else {
+ form = "'Vd.%s['IVInsIndex1], 'Wn";
+ }
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) {
+ mnemonic = "mov";
+ } else {
+ mnemonic = "umov";
+ }
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Xd, 'Vn.%s['IVInsIndex1]";
+ } else {
+ form = "'Wd, 'Vn.%s['IVInsIndex1]";
+ }
+ } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) {
+ mnemonic = "smov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Rdq, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ mnemonic = "dup";
+ form = "'Vd.%s, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ mnemonic = "dup";
+ if (nfd.GetVectorFormat() == kFormat2D) {
+ form = "'Vd.%s, 'Xn";
+ } else {
+ form = "'Vd.%s, 'Wn";
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONExtract(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONExtract)";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ mnemonic = "ext";
+ form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStruct)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns]";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
+ const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+ case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStructPostIndex)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
+ const char *form_4v =
+ "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStruct)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+ case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns]";
+ break;
+ case NEON_LD2_b:
+ case NEON_ST2_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD2_h:
+ case NEON_ST2_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD2_s:
+ case NEON_ST2_s:
+ VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d);
+ VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD2R:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+ break;
+ case NEON_LD3_b:
+ case NEON_ST3_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD3_h:
+ case NEON_ST3_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD3_s:
+ case NEON_ST3_s:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD3R:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+ break;
+ case NEON_LD4_b:
+ case NEON_ST4_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD4_h:
+ case NEON_ST4_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD4_s:
+ case NEON_ST4_s:
+ VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d);
+ VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD4R:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStructPostIndex)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s_post:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s_post:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R_post:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns], 'Xmz1";
+ break;
+ case NEON_LD2_b_post:
+ case NEON_ST2_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2";
+ break;
+ case NEON_ST2_h_post:
+ case NEON_LD2_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD2_s_post:
+ case NEON_ST2_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16";
+ break;
+ case NEON_LD2R_post:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2";
+ break;
+ case NEON_LD3_b_post:
+ case NEON_ST3_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3";
+ break;
+ case NEON_LD3_h_post:
+ case NEON_ST3_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6";
+ break;
+ case NEON_LD3_s_post:
+ case NEON_ST3_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3";
+ break;
+ case NEON_LD3R_post:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3";
+ break;
+ case NEON_LD4_b_post:
+ case NEON_ST4_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD4_h_post:
+ case NEON_ST4_h_post:
+ mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8";
+ break;
+ case NEON_LD4_s_post:
+ case NEON_ST4_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32";
+ break;
+ case NEON_LD4R_post:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1";
+
+ int cmode = instr->NEONCmode();
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op = instr->NEONModImmOp();
+
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} };
+ static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<3> == '1'.
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_h);
+ } else { // cmode<2> == '1'.
+ if (cmode_1 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2";
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<1> == '1'.
+ if (cmode_0 == 0) {
+ mnemonic = "movi";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImm8";
+ } else {
+ form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm";
+ }
+ } else { // cmode<0> == '1'
+ mnemonic = "fmov";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImmFPSingle";
+ nfd.SetFormatMap(0, &map_s);
+ } else {
+ if (q == 1) {
+ form = "'Vt.2d, 'IVMIImmFPDouble";
+ }
+ }
+ }
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn";
+ const char *form_0 = "%sd, %sn, #0";
+ const char *form_fp0 = "%sd, %sn, #0.0";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break;
+ case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break;
+ case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break;
+ case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break;
+ case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break;
+ case NEON_NEG_scalar: mnemonic = "neg"; break;
+ case NEON_SQNEG_scalar: mnemonic = "sqneg"; break;
+ case NEON_ABS_scalar: mnemonic = "abs"; break;
+ case NEON_SQABS_scalar: mnemonic = "sqabs"; break;
+ case NEON_SUQADD_scalar: mnemonic = "suqadd"; break;
+ case NEON_USQADD_scalar: mnemonic = "usqadd"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ } else {
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRSQRTE_scalar: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE_scalar: mnemonic = "frecpe"; break;
+ case NEON_SCVTF_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_scalar: mnemonic = "ucvtf"; break;
+ case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break;
+ case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break;
+ case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break;
+ case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break;
+ case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break;
+ case NEON_FRECPX_scalar: mnemonic = "frecpx"; break;
+ case NEON_FCVTNS_scalar: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU_scalar: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS_scalar: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU_scalar: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS_scalar: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU_scalar: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS_scalar: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU_scalar: mnemonic = "fcvtau"; break;
+ case NEON_FCVTXN_scalar:
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ mnemonic = "fcvtxn";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.ScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.LongScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN_scalar: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar : mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL_scalar : mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL_scalar : mnemonic = "sqdmull"; break;
+ default: form = "(NEONScalar3Diff)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FACGE_scalar: mnemonic = "facge"; break;
+ case NEON_FACGT_scalar: mnemonic = "facgt"; break;
+ case NEON_FCMEQ_scalar: mnemonic = "fcmeq"; break;
+ case NEON_FCMGE_scalar: mnemonic = "fcmge"; break;
+ case NEON_FCMGT_scalar: mnemonic = "fcmgt"; break;
+ case NEON_FMULX_scalar: mnemonic = "fmulx"; break;
+ case NEON_FRECPS_scalar: mnemonic = "frecps"; break;
+ case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break;
+ case NEON_FABD_scalar: mnemonic = "fabd"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: mnemonic = "add"; break;
+ case NEON_SUB_scalar: mnemonic = "sub"; break;
+ case NEON_CMEQ_scalar: mnemonic = "cmeq"; break;
+ case NEON_CMGE_scalar: mnemonic = "cmge"; break;
+ case NEON_CMGT_scalar: mnemonic = "cmgt"; break;
+ case NEON_CMHI_scalar: mnemonic = "cmhi"; break;
+ case NEON_CMHS_scalar: mnemonic = "cmhs"; break;
+ case NEON_CMTST_scalar: mnemonic = "cmtst"; break;
+ case NEON_UQADD_scalar: mnemonic = "uqadd"; break;
+ case NEON_SQADD_scalar: mnemonic = "sqadd"; break;
+ case NEON_UQSUB_scalar: mnemonic = "uqsub"; break;
+ case NEON_SQSUB_scalar: mnemonic = "sqsub"; break;
+ case NEON_USHL_scalar: mnemonic = "ushl"; break;
+ case NEON_SSHL_scalar: mnemonic = "sshl"; break;
+ case NEON_UQSHL_scalar: mnemonic = "uqshl"; break;
+ case NEON_SQSHL_scalar: mnemonic = "sqshl"; break;
+ case NEON_URSHL_scalar: mnemonic = "urshl"; break;
+ case NEON_SRSHL_scalar: mnemonic = "srshl"; break;
+ case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break;
+ case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break;
+ case NEON_SQDMULH_scalar: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ bool long_instr = false;
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar:
+ mnemonic = "sqdmull";
+ long_instr = true;
+ break;
+ case NEON_SQDMLAL_byelement_scalar:
+ mnemonic = "sqdmlal";
+ long_instr = true;
+ break;
+ case NEON_SQDMLSL_byelement_scalar:
+ mnemonic = "sqdmlsl";
+ long_instr = true;
+ break;
+ case NEON_SQDMULH_byelement_scalar:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break;
+ case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break;
+ case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break;
+ case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break;
+ default: form = "(NEONScalarByIndexedElement)";
+ }
+ }
+
+ if (long_instr) {
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(
+ form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONScalarCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ mnemonic = "mov";
+ form = "%sd, 'Vn.%s['IVInsIndex1]";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+ NEONFormatMap map = { {22}, {NF_2S, NF_2D} };
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map);
+
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: mnemonic = "addp"; break;
+ case NEON_FADDP_scalar: mnemonic = "faddp"; break;
+ case NEON_FMAXP_scalar: mnemonic = "fmaxp"; break;
+ case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break;
+ case NEON_FMINP_scalar: mnemonic = "fminp"; break;
+ case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break;
+ default: form = "(NEONScalarPairwise)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Is1";
+ const char *form_2 = "%sd, %sn, 'Is2";
+
+ static const NEONFormatMap map_shift = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ static const NEONFormatMap map_shift_narrow = {
+ {21, 20, 19},
+ {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map_shift);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_SCVTF_imm_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf"; break;
+ case NEON_SRI_scalar: mnemonic = "sri"; break;
+ case NEON_SSHR_scalar: mnemonic = "sshr"; break;
+ case NEON_USHR_scalar: mnemonic = "ushr"; break;
+ case NEON_SRSHR_scalar: mnemonic = "srshr"; break;
+ case NEON_URSHR_scalar: mnemonic = "urshr"; break;
+ case NEON_SSRA_scalar: mnemonic = "ssra"; break;
+ case NEON_USRA_scalar: mnemonic = "usra"; break;
+ case NEON_SRSRA_scalar: mnemonic = "srsra"; break;
+ case NEON_URSRA_scalar: mnemonic = "ursra"; break;
+ case NEON_SHL_scalar: mnemonic = "shl"; form = form_2; break;
+ case NEON_SLI_scalar: mnemonic = "sli"; form = form_2; break;
+ case NEON_SQSHLU_scalar: mnemonic = "sqshlu"; form = form_2; break;
+ case NEON_SQSHL_imm_scalar: mnemonic = "sqshl"; form = form_2; break;
+ case NEON_UQSHL_imm_scalar: mnemonic = "uqshl"; form = form_2; break;
+ case NEON_UQSHRN_scalar:
+ mnemonic = "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_UQRSHRN_scalar:
+ mnemonic = "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRN_scalar:
+ mnemonic = "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRN_scalar:
+ mnemonic = "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRUN_scalar:
+ mnemonic = "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRUN_scalar:
+ mnemonic = "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ default:
+ form = "(NEONScalarShiftImmediate)";
+ }
+ } else {
+ form = "(NEONScalarShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Is1";
+ const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2";
+ const char *form_xtl = "'Vd.%s, 'Vn.%s";
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_shift_ta = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map_shift_tb = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+
+ NEONFormatDecoder nfd(instr, &map_shift_tb);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SQSHLU: mnemonic = "sqshlu"; form = form_shift_2; break;
+ case NEON_SQSHL_imm: mnemonic = "sqshl"; form = form_shift_2; break;
+ case NEON_UQSHL_imm: mnemonic = "uqshl"; form = form_shift_2; break;
+ case NEON_SHL: mnemonic = "shl"; form = form_shift_2; break;
+ case NEON_SLI: mnemonic = "sli"; form = form_shift_2; break;
+ case NEON_SCVTF_imm: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm: mnemonic = "ucvtf"; break;
+ case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break;
+ case NEON_SRI: mnemonic = "sri"; break;
+ case NEON_SSHR: mnemonic = "sshr"; break;
+ case NEON_USHR: mnemonic = "ushr"; break;
+ case NEON_SRSHR: mnemonic = "srshr"; break;
+ case NEON_URSHR: mnemonic = "urshr"; break;
+ case NEON_SSRA: mnemonic = "ssra"; break;
+ case NEON_USRA: mnemonic = "usra"; break;
+ case NEON_SRSRA: mnemonic = "srsra"; break;
+ case NEON_URSRA: mnemonic = "ursra"; break;
+ case NEON_SHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_RSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SSHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl";
+ } else { // sshll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll";
+ }
+ break;
+ case NEON_USHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl";
+ } else { // ushll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll";
+ }
+ break;
+ default: form = "(NEONShiftImmediate)";
+ }
+ } else {
+ form = "(NEONShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONTable(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONTable)";
+ const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s";
+ const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s";
+ const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ const char form_4v[] =
+ "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break;
+ case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break;
+ case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break;
+ case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break;
+ case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break;
+ case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break;
+ case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break;
+ case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break;
+ default: break;
+ }
+
+ char re_form[sizeof(form_4v) + 6];
+ int reg_num = instr->Rn();
+ SprintfLiteral(re_form, form,
+ (reg_num + 1) % kNumberOfVRegisters,
+ (reg_num + 2) % kNumberOfVRegisters,
+ (reg_num + 3) % kNumberOfVRegisters);
+
+ Format(instr, mnemonic, nfd.Substitute(re_form));
+}
+
+
+void Disassembler::VisitNEONPerm(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: mnemonic = "trn1"; break;
+ case NEON_TRN2: mnemonic = "trn2"; break;
+ case NEON_UZP1: mnemonic = "uzp1"; break;
+ case NEON_UZP2: mnemonic = "uzp2"; break;
+ case NEON_ZIP1: mnemonic = "zip1"; break;
+ case NEON_ZIP2: mnemonic = "zip2"; break;
+ default: form = "(NEONPerm)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitUnimplemented(const Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(const Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(const Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg) {
+ USE(instr);
+ VIXL_ASSERT(reg.IsValid());
+ char reg_char;
+
+ if (reg.IsRegister()) {
+ reg_char = reg.Is64Bits() ? 'x' : 'w';
+ } else {
+ VIXL_ASSERT(reg.IsVRegister());
+ switch (reg.SizeInBits()) {
+ case kBRegSize: reg_char = 'b'; break;
+ case kHRegSize: reg_char = 'h'; break;
+ case kSRegSize: reg_char = 's'; break;
+ case kDRegSize: reg_char = 'd'; break;
+ default:
+ VIXL_ASSERT(reg.Is128Bits());
+ reg_char = 'q';
+ }
+ }
+
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
+ // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
+ AppendToOutput("%c%d", reg_char, reg.code());
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_char);
+ }
+}
+
+
+void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset) {
+ USE(instr);
+ char sign = (offset < 0) ? '-' : '+';
+ AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset));
+}
+
+
+void Disassembler::AppendAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
+}
+
+
+void Disassembler::AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ int64_t rel_addr = CodeRelativeAddress(addr);
+ if (rel_addr >= 0) {
+ AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
+ } else {
+ AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
+ }
+}
+
+
+void Disassembler::AppendCodeRelativeCodeAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeDataAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::MapCodeAddress(int64_t base_address,
+ const Instruction* instr_address) {
+ set_code_address_offset(
+ base_address - reinterpret_cast<intptr_t>(instr_address));
+}
+int64_t Disassembler::CodeRelativeAddress(const void* addr) {
+ return reinterpret_cast<intptr_t>(addr) + code_address_offset();
+}
+
+
+void Disassembler::Format(const Instruction* instr, const char* mnemonic,
+ const char* format) {
+ VIXL_ASSERT(mnemonic != NULL);
+ ResetOutput();
+ uint32_t pos = buffer_pos_;
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ uint32_t spaces = buffer_pos_ - pos < 8 ? 8 - (buffer_pos_ - pos) : 1;
+ while (spaces--) {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = ' ';
+ }
+ Substitute(instr, format);
+ }
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(const Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(const Instruction* instr,
+ const char* format) {
+ switch (format[0]) {
+ // NB. The remaining substitution prefix characters are: GJKUZ.
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP register. S or D, selected by type field.
+ case 'V': // Vector register, V, vector format.
+ case 'W':
+ case 'X':
+ case 'B':
+ case 'H':
+ case 'S':
+ case 'D':
+ case 'Q': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'N': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'T': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ case 'K': return SubstituteCrField(instr, format);
+ case 'G': return SubstituteSysOpField(instr, format);
+ default: {
+ VIXL_UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(const Instruction* instr,
+ const char* format) {
+ char reg_prefix = format[0];
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+
+ switch (format[1]) {
+ case 'd':
+ reg_num = instr->Rd();
+ if (format[2] == 'q') {
+ reg_prefix = instr->NEONQ() ? 'X' : 'W';
+ field_len = 3;
+ }
+ break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm':
+ reg_num = instr->Rm();
+ switch (format[2]) {
+ // Handle registers tagged with b (bytes), z (instruction), or
+ // r (registers), used for address updates in
+ // NEON load/store instructions.
+ case 'r':
+ case 'b':
+ case 'z': {
+ field_len = 3;
+ char* eimm;
+ int imm = static_cast<int>(strtol(&format[3], &eimm, 10));
+ field_len += eimm - &format[3];
+ if (reg_num == 31) {
+ switch (format[2]) {
+ case 'z':
+ imm *= (1 << instr->NEONLSSize());
+ break;
+ case 'r':
+ imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes
+ : kQRegSizeInBytes;
+ break;
+ case 'b':
+ break;
+ }
+ AppendToOutput("#%d", imm);
+ return field_len;
+ }
+ break;
+ }
+ }
+ break;
+ case 'e':
+ // This is register Rm, but using a 4-bit specifier. Used in NEON
+ // by-element instructions.
+ reg_num = (instr->Rm() & 0xf);
+ break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 's': reg_num = instr->Rs(); break;
+ case 't':
+ reg_num = instr->Rt();
+ if (format[0] == 'V') {
+ if ((format[2] >= '2') && (format[2] <= '4')) {
+ // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4.
+ reg_num = (reg_num + format[2] - '1') % 32;
+ field_len = 3;
+ }
+ } else {
+ if (format[2] == '2') {
+ // Handle register specifier Rt2.
+ reg_num = instr->Rt2();
+ field_len = 3;
+ }
+ }
+ break;
+ case '(': {
+ switch (format[2]) {
+ case 's':
+ reg_num = instr->Rs();
+ break;
+ case 't':
+ reg_num = instr->Rt();
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ VIXL_ASSERT(format[3] == '+');
+ int i = 4;
+ int addition = 0;
+ while (format[i] != ')') {
+ VIXL_ASSERT((format[i] >= '0') && (format[i] <= '9'));
+ addition *= 10;
+ addition += format[i] - '0';
+ ++i;
+ }
+ reg_num += addition;
+ field_len = i + 1;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[1] != '(' && format[2] == 's') {
+ field_len = 3;
+ }
+
+ CPURegister::RegisterType reg_type = CPURegister::kRegister;
+ unsigned reg_size = kXRegSize;
+
+ if (reg_prefix == 'R') {
+ reg_prefix = instr->SixtyFourBits() ? 'X' : 'W';
+ } else if (reg_prefix == 'F') {
+ reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D';
+ }
+
+ switch (reg_prefix) {
+ case 'W':
+ reg_type = CPURegister::kRegister; reg_size = kWRegSize; break;
+ case 'X':
+ reg_type = CPURegister::kRegister; reg_size = kXRegSize; break;
+ case 'B':
+ reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break;
+ case 'H':
+ reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break;
+ case 'S':
+ reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break;
+ case 'D':
+ reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break;
+ case 'Q':
+ reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break;
+ case 'V':
+ AppendToOutput("v%d", reg_num);
+ return field_len;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if ((reg_type == CPURegister::kRegister) &&
+ (reg_num == kZeroRegCode) && (format[2] == 's')) {
+ reg_num = kSPRegInternalCode;
+ }
+
+ AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type));
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
+ if (format[5] == 'L') {
+ AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide());
+ }
+ } else {
+ VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
+ uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide()) <<
+ (16 * instr->ShiftMoveWide());
+ if (format[5] == 'N')
+ imm = ~imm;
+ if (!instr->SixtyFourBits())
+ imm &= UINT64_C(0xffffffff);
+ AppendToOutput("#0x%" PRIx64, imm);
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId32,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId32, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = 1 << (format[3] - '0');
+ AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ int shift = instr->SizeLS();
+ AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift);
+ }
+ return 3;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ VIXL_ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFbits.
+ AppendToOutput("#%" PRId32, 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%" PRId32, instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%" PRId32, instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 's': { // Is - Shift (immediate).
+ switch (format[2]) {
+ case '1': { // Is1 - SSHR.
+ int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh());
+ shift -= instr->ImmNEONImmhImmb();
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ case '2': { // Is2 - SLI.
+ int shift = instr->ImmNEONImmhImmb();
+ shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh());
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%" PRIx32, instr->ImmException());
+ return 6;
+ }
+ case 'V': { // Immediate Vector.
+ switch (format[2]) {
+ case 'E': { // IVExtract.
+ AppendToOutput("#%" PRId32, instr->ImmNEONExt());
+ return 9;
+ }
+ case 'B': { // IVByElemIndex.
+ int vm_index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ vm_index = (vm_index << 1) | instr->NEONM();
+ }
+ AppendToOutput("%d", vm_index);
+ return strlen("IVByElemIndex");
+ }
+ case 'I': { // INS element.
+ if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
+ int rd_index, rn_index;
+ int imm5 = instr->ImmNEON5();
+ int imm4 = instr->ImmNEON4();
+ int tz = CountTrailingZeros(imm5, 32);
+ rd_index = imm5 >> (tz + 1);
+ rn_index = imm4 >> tz;
+ if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
+ AppendToOutput("%d", rd_index);
+ return strlen("IVInsIndex1");
+ } else if (strncmp(format, "IVInsIndex2",
+ strlen("IVInsIndex2")) == 0) {
+ AppendToOutput("%d", rn_index);
+ return strlen("IVInsIndex2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
+ AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
+ return 9;
+ }
+ case 'M': { // Modified Immediate cases.
+ if (strncmp(format,
+ "IVMIImmFPSingle",
+ strlen("IVMIImmFPSingle")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP32());
+ return strlen("IVMIImmFPSingle");
+ } else if (strncmp(format,
+ "IVMIImmFPDouble",
+ strlen("IVMIImmFPDouble")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP64());
+ return strlen("IVMIImmFPDouble");
+ } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ AppendToOutput("#0x%" PRIx64, imm8);
+ return strlen("IVMIImm8");
+ } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ uint64_t imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1ULL << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ AppendToOutput("#0x%" PRIx64, imm);
+ return strlen("IVMIImm");
+ } else if (strncmp(format, "IVMIShiftAmt1",
+ strlen("IVMIShiftAmt1")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 * ((cmode >> 1) & 3);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt1");
+ } else if (strncmp(format, "IVMIShiftAmt2",
+ strlen("IVMIShiftAmt2")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 << (cmode & 1);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'X': { // IX - CLREX instruction.
+ AppendToOutput("#0x%" PRIx32, instr->CRm());
+ return 2;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ VIXL_ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ const void * address = instr->LiteralAddress<const void *>();
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDRSW_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit:
+ case LDR_q_lit:
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case PRFM_lit: {
+ // Use the prefetch hint to decide how to print the address.
+ switch (instr->PrefetchHint()) {
+ case 0x0: // PLD: prefetch for load.
+ case 0x2: // PST: prepare for store.
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case 0x1: // PLI: preload instructions.
+ AppendCodeRelativeCodeAddressToOutput(instr, address);
+ break;
+ case 0x3: // Unallocated hint.
+ AppendCodeRelativeAddressToOutput(instr, address);
+ break;
+ }
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'N');
+ VIXL_ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ VIXL_ASSERT(instr->ShiftDP() != ROR);
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`.
+ (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
+
+ int64_t offset = instr->ImmPCRel();
+
+ // Compute the target address based on the effective address (after applying
+ // code_address_offset). This is required for correct behaviour of adrp.
+ const Instruction* base = instr + code_address_offset();
+ if (format[9] == 'P') {
+ offset *= kPageSize;
+ base = AlignDown(base, kPageSize);
+ }
+ // Strip code_address_offset before printing, so we can use the
+ // semantically-correct AppendCodeRelativeAddressToOutput.
+ const void* target =
+ reinterpret_cast<const void*>(base + offset - code_address_offset());
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeAddressToOutput(instr, target);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "TImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ const void* target_address = reinterpret_cast<const void*>(instr + offset);
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeCodeAddressToOutput(instr, target_address);
+
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
+ VIXL_ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%" PRId32, instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'P');
+ USE(format);
+
+ static const char* hints[] = {"ld", "li", "st"};
+ static const char* stream_options[] = {"keep", "strm"};
+
+ unsigned hint = instr->PrefetchHint();
+ unsigned target = instr->PrefetchTarget() + 1;
+ unsigned stream = instr->PrefetchStream();
+
+ if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
+ // Unallocated prefetch operations.
+ int prefetch_mode = instr->ImmPrefetchOperation();
+ AppendToOutput("#0b%c%c%c%c%c",
+ (prefetch_mode & (1 << 4)) ? '1' : '0',
+ (prefetch_mode & (1 << 3)) ? '1' : '0',
+ (prefetch_mode & (1 << 2)) ? '1' : '0',
+ (prefetch_mode & (1 << 1)) ? '1' : '0',
+ (prefetch_mode & (1 << 0)) ? '1' : '0');
+ } else {
+ VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
+ AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
+ }
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+int Disassembler::SubstituteSysOpField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'G');
+ int op = -1;
+ switch (format[1]) {
+ case '1': op = instr->SysOp1(); break;
+ case '2': op = instr->SysOp2(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("#%d", op);
+ return 2;
+}
+
+int Disassembler::SubstituteCrField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'K');
+ int cr = -1;
+ switch (format[1]) {
+ case 'n': cr = instr->CRn(); break;
+ case 'm': cr = instr->CRm(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("C%d", cr);
+ return 2;
+}
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_,
+ format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(const Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr),
+ instr->InstructionBits(),
+ GetOutput());
+}
+
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr)
+{
+ vixl::Disassembler disasm(buffer, bufsize-1);
+ vixl::Decoder decoder;
+ decoder.AppendVisitor(&disasm);
+ decoder.Decode(instr);
+ buffer[bufsize-1] = 0; // Just to be safe
+}
+
+char* GdbDisassembleInstruction(const Instruction* instr)
+{
+ static char buffer[1024];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+ return buffer;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.h b/js/src/jit/arm64/vixl/Disasm-vixl.h
new file mode 100644
index 0000000000..e04730da83
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.h
@@ -0,0 +1,181 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DISASM_A64_H
+#define VIXL_A64_DISASM_A64_H
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr);
+
+ // Default output functions. The functions below implement a default way of
+ // printing elements in the disassembly. A sub-class can override these to
+ // customize the disassembly output.
+
+ // Prints the name of a register.
+ // TODO: This currently doesn't allow renaming of V registers.
+ virtual void AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg);
+
+ // Prints a PC-relative offset. This is used for example when disassembling
+ // branches to immediate offsets.
+ virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset);
+
+ // Prints an address, in the general case. It can be code or data. This is
+ // used for example to print the target address of an ADR instruction.
+ virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some code.
+ // This is used for example to print the target address of a branch to an
+ // immediate offset.
+ // A sub-class can for example override this method to lookup the address and
+ // print an appropriate name.
+ virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some data.
+ // This is used for example to print the source address of a load literal
+ // instruction.
+ virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Same as the above, but for addresses that are not relative to the code
+ // buffer. They are currently not used by VIXL.
+ virtual void AppendAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ public:
+ // Get/Set the offset that should be added to code addresses when printing
+ // code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
+ // helpers.
+ // Below is an example of how a branch immediate instruction in memory at
+ // address 0xb010200 would disassemble with different offsets.
+ // Base address | Disassembly
+ // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
+ // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
+ // 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
+ void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
+ int64_t CodeRelativeAddress(const void* instr);
+
+ private:
+ void Format(
+ const Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(const Instruction* instr, const char* string);
+ int SubstituteField(const Instruction* instr, const char* format);
+ int SubstituteRegisterField(const Instruction* instr, const char* format);
+ int SubstituteImmediateField(const Instruction* instr, const char* format);
+ int SubstituteLiteralField(const Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(
+ const Instruction* instr, const char* format);
+ int SubstituteShiftField(const Instruction* instr, const char* format);
+ int SubstituteExtendField(const Instruction* instr, const char* format);
+ int SubstituteConditionField(const Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(const Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
+ int SubstitutePrefetchField(const Instruction* instr, const char* format);
+ int SubstituteBarrierField(const Instruction* instr, const char* format);
+ int SubstituteSysOpField(const Instruction* instr, const char* format);
+ int SubstituteCrField(const Instruction* instr, const char* format);
+ bool RdIsZROrSP(const Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(const Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(const Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(const Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ int64_t code_address_offset() const { return code_address_offset_; }
+
+ protected:
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
+
+ void set_code_address_offset(int64_t code_address_offset) {
+ code_address_offset_ = code_address_offset;
+ }
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+
+ int64_t code_address_offset_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr) override;
+
+ private:
+ FILE *stream_;
+};
+
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr);
+char* GdbDisassembleInstruction(const Instruction* instr);
+
+} // namespace vixl
+
+#endif // VIXL_A64_DISASM_A64_H
diff --git a/js/src/jit/arm64/vixl/Globals-vixl.h b/js/src/jit/arm64/vixl/Globals-vixl.h
new file mode 100644
index 0000000000..2c7d5703f1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Globals-vixl.h
@@ -0,0 +1,272 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_GLOBALS_H
+#define VIXL_GLOBALS_H
+
+// Get standard C99 macros for integer types.
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "mozilla/Assertions.h"
+
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+
+extern "C" {
+#include <inttypes.h>
+#include <stdint.h>
+}
+
+#include "jstypes.h"
+
+#include "jit/arm64/vixl/Platform-vixl.h"
+#include "js/Utility.h"
+
+#ifdef VIXL_NEGATIVE_TESTING
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#endif
+
+namespace vixl {
+
+typedef uint8_t byte;
+
+const int KBytes = 1024;
+const int MBytes = 1024 * KBytes;
+
+const int kBitsPerByte = 8;
+
+template <int SizeInBits>
+struct Unsigned;
+
+template <>
+struct Unsigned<32> {
+ typedef uint32_t type;
+};
+
+template <>
+struct Unsigned<64> {
+ typedef uint64_t type;
+};
+
+} // namespace vixl
+
+// Detect the host's pointer size.
+#if (UINTPTR_MAX == UINT32_MAX)
+#define VIXL_HOST_POINTER_32
+#elif (UINTPTR_MAX == UINT64_MAX)
+#define VIXL_HOST_POINTER_64
+#else
+#error "Unsupported host pointer size."
+#endif
+
+#ifdef VIXL_NEGATIVE_TESTING
+#define VIXL_ABORT() \
+ do { \
+ std::ostringstream oss; \
+ oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } while (false)
+#define VIXL_ABORT_WITH_MSG(msg) \
+ do { \
+ std::ostringstream oss; \
+ oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } while (false)
+#define VIXL_CHECK(condition) \
+ do { \
+ if (!(condition)) { \
+ std::ostringstream oss; \
+ oss << "Assertion failed (" #condition ")\nin "; \
+ oss << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } \
+ } while (false)
+#else
+#define VIXL_ABORT() \
+ do { \
+ MOZ_CRASH(); \
+ } while (false)
+#define VIXL_ABORT_WITH_MSG(msg) \
+ do { \
+ MOZ_CRASH(msg); \
+ } while (false)
+#define VIXL_CHECK(condition) \
+ do { \
+ if (!(condition)) { \
+ MOZ_CRASH(); \
+ } \
+ } while (false)
+#endif
+#ifdef DEBUG
+#define VIXL_ASSERT(condition) MOZ_ASSERT(condition)
+#define VIXL_UNIMPLEMENTED() \
+ do { \
+ VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
+ } while (false)
+#define VIXL_UNREACHABLE() \
+ do { \
+ VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
+ } while (false)
+#else
+#define VIXL_ASSERT(condition) ((void)0)
+#define VIXL_UNIMPLEMENTED() ((void)0)
+#define VIXL_UNREACHABLE() MOZ_CRASH("vixl unreachable")
+#endif
+// This is not as powerful as template based assertions, but it is simple.
+// It assumes that the descriptions are unique. If this starts being a problem,
+// we can switch to a different implemention.
+#define VIXL_CONCAT(a, b) a##b
+#if __cplusplus >= 201103L
+#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
+ static_assert(condition, message)
+#else
+#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \
+ typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
+ __attribute__((unused))
+#endif
+#define VIXL_STATIC_ASSERT(condition) \
+ VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
+#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
+ VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
+
+#define VIXL_WARNING(message) \
+ do { \
+ printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
+ } while (false)
+
+template <typename T1>
+inline void USE(const T1&) {}
+
+template <typename T1, typename T2>
+inline void USE(const T1&, const T2&) {}
+
+template <typename T1, typename T2, typename T3>
+inline void USE(const T1&, const T2&, const T3&) {}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline void USE(const T1&, const T2&, const T3&, const T4&) {}
+
+#define VIXL_ALIGNMENT_EXCEPTION() \
+ do { \
+ VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \
+ } while (0)
+
+// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
+// argument to annotate intentional fall-through between switch labels.
+// For more information please refer to:
+// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+#ifndef __has_warning
+#define __has_warning(x) 0
+#endif
+
+// Fallthrough annotation for Clang and C++11(201103L).
+#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
+#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
+// Fallthrough annotation for GCC >= 7.
+#elif __GNUC__ >= 7
+#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
+#else
+#define VIXL_FALLTHROUGH() \
+ do { \
+ } while (0)
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_NO_RETURN [[noreturn]]
+#else
+#define VIXL_NO_RETURN __attribute__((noreturn))
+#endif
+#ifdef VIXL_DEBUG
+#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
+#else
+#define VIXL_NO_RETURN_IN_DEBUG_MODE
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_OVERRIDE override
+#else
+#define VIXL_OVERRIDE
+#endif
+
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
+#endif
+#else
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
+#endif
+#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#warning "Generating Simulator instructions without Simulator support."
+#endif
+#endif
+
+// We do not have a simulator for AArch32, although we can pretend we do so that
+// tests that require running natively can be skipped.
+#ifndef __arm__
+#define VIXL_INCLUDE_SIMULATOR_AARCH32
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
+#endif
+#else
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
+#endif
+#endif
+
+// Target Architecture/ISA
+
+// Hack: always include AArch64.
+#define VIXL_INCLUDE_TARGET_A64
+
+#ifdef VIXL_INCLUDE_TARGET_A64
+#define VIXL_INCLUDE_TARGET_AARCH64
+#endif
+
+#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
+#define VIXL_INCLUDE_TARGET_AARCH32
+#elif defined(VIXL_INCLUDE_TARGET_A32)
+#define VIXL_INCLUDE_TARGET_A32_ONLY
+#else
+#define VIXL_INCLUDE_TARGET_T32_ONLY
+#endif
+
+
+#endif // VIXL_GLOBALS_H
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.cpp b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
new file mode 100644
index 0000000000..dcc0fab05e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
@@ -0,0 +1,627 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+namespace vixl {
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((UINT64_C(1) << width) - 1);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_b:
+ case LDR_h:
+ case LDR_s:
+ case LDR_d:
+ case LDR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_b:
+ case STR_h:
+ case STR_s:
+ case STR_d:
+ case STR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are
+// not met.
+uint64_t Instruction::ImmLogical() const {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int32_t n = BitN();
+ int32_t imm_s = ImmSetBits();
+ int32_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3f) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1f) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+uint32_t Instruction::ImmNEONabcdefgh() const {
+ return ImmNEONabc() << 5 | ImmNEONdefgh();
+}
+
+
+float Instruction::Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return RawbitsToFloat(result);
+}
+
+
+float Instruction::ImmFP32() const {
+ return Imm8ToFP32(ImmFP());
+}
+
+
+double Instruction::Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return RawbitsToDouble(result);
+}
+
+
+double Instruction::ImmFP64() const {
+ return Imm8ToFP64(ImmFP());
+}
+
+
+float Instruction::ImmNEONFP32() const {
+ return Imm8ToFP32(ImmNEONabcdefgh());
+}
+
+
+double Instruction::ImmNEONFP64() const {
+ return Imm8ToFP64(ImmNEONabcdefgh());
+}
+
+unsigned CalcLSPairDataSize(LoadStorePairOp op) {
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ switch (op) {
+ case STP_q:
+ case LDP_q: return kQRegSizeInBytesLog2;
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return kXRegSizeInBytesLog2;
+ default: return kWRegSizeInBytesLog2;
+ }
+}
+
+
+int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
+ int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
+ return encoded_max * kInstructionSize;
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int64_t offset) {
+ return IsIntN(ImmBranchRangeBitwidth(branch_type), offset);
+}
+
+ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
+{
+ switch (branch_type) {
+ case UncondBranchType:
+ return UncondBranchRangeType;
+ case CondBranchType:
+ case CompareBranchType:
+ return CondBranchRangeType;
+ case TestBranchType:
+ return TestBranchRangeType;
+ default:
+ return UnknownBranchRangeType;
+ }
+}
+
+int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)
+{
+ // Branches encode a pc-relative two's complement number of 32-bit
+ // instructions. Compute the number of bytes corresponding to the largest
+ // positive number of instructions that can be encoded.
+ switch(range_type) {
+ case TestBranchRangeType:
+ return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize;
+ case CondBranchRangeType:
+ return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize;
+ case UncondBranchRangeType:
+ return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)
+{
+ switch(range_type) {
+ case TestBranchRangeType:
+ return -int32_t(1 << ImmTestBranch_width) / int32_t(2 * kInstructionSize);
+ case CondBranchRangeType:
+ return -int32_t(1 << ImmCondBranch_width) / int32_t(2 * kInstructionSize);
+ case UncondBranchRangeType:
+ return -int32_t(1 << ImmUncondBranch_width) / int32_t(2 * kInstructionSize);
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+const Instruction* Instruction::ImmPCOffsetTarget() const {
+ const Instruction * base = this;
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ if (Mask(PCRelAddressingMask) == ADRP) {
+ base = AlignDown(base, kPageSize);
+ offset *= kPageSize;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
+ }
+ } else {
+ // All PC-relative branches.
+ VIXL_ASSERT(BranchType() != UnknownBranchType);
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ }
+ return base + offset;
+}
+
+
+int Instruction::ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: VIXL_UNREACHABLE();
+ }
+ return 0;
+}
+
+
+void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else {
+ SetBranchImmTarget(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(const Instruction* target) {
+ ptrdiff_t imm21;
+ if ((Mask(PCRelAddressingMask) == ADR)) {
+ imm21 = target - this;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
+ uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
+ uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
+ imm21 = target_page - this_page;
+ }
+ Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(const Instruction* target) {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(const Instruction* source) {
+ VIXL_ASSERT(IsWordAligned(source));
+ ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+ vform == kFormatH || vform == kFormatS || vform == kFormatD);
+ switch (vform) {
+ case kFormat8H: return kFormat8B;
+ case kFormat4S: return kFormat4H;
+ case kFormat2D: return kFormat2S;
+ case kFormatH: return kFormatB;
+ case kFormatS: return kFormatH;
+ case kFormatD: return kFormatS;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+ vform == kFormatB || vform == kFormatH || vform == kFormatS);
+ switch (vform) {
+ case kFormat8B: return kFormat8H;
+ case kFormat4H: return kFormat4S;
+ case kFormat2S: return kFormat2D;
+ case kFormatB: return kFormatH;
+ case kFormatH: return kFormatS;
+ case kFormatS: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatFillQ(const VectorFormat vform) {
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return kFormat16B;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return kFormat8H;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return kFormat4S;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return kFormat2D;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
+ switch (vform) {
+ case kFormat4H: return kFormat8B;
+ case kFormat8H: return kFormat16B;
+ case kFormat2S: return kFormat4H;
+ case kFormat4S: return kFormat8H;
+ case kFormat1D: return kFormat2S;
+ case kFormat2D: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+ switch (vform) {
+ case kFormat8B: return kFormat16B;
+ case kFormat4H: return kFormat8H;
+ case kFormat2S: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+ switch (vform) {
+ case kFormat16B: return kFormat8B;
+ case kFormat8H: return kFormat4H;
+ case kFormat4S: return kFormat2S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+ switch (laneSize) {
+ case 8: return kFormatB;
+ case 16: return kFormatH;
+ case 32: return kFormatS;
+ case 64: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB: return kBRegSize;
+ case kFormatH: return kHRegSize;
+ case kFormatS: return kSRegSize;
+ case kFormatD: return kDRegSize;
+ case kFormat8B:
+ case kFormat4H:
+ case kFormat2S:
+ case kFormat1D: return kDRegSize;
+ default: return kQRegSize;
+ }
+}
+
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+ return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 8;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 16;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 32;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 64;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+ return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 0;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 1;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 2;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 3;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormat16B: return 16;
+ case kFormat8B:
+ case kFormat8H: return 8;
+ case kFormat4H:
+ case kFormat4S: return 4;
+ case kFormat2S:
+ case kFormat2D: return 2;
+ case kFormat1D:
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return 1;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 16;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 8;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 4;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 2;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return false;
+ default: return true;
+ }
+}
+
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+ return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+ return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+ return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+} // namespace vixl
+
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.h b/js/src/jit/arm64/vixl/Instructions-vixl.h
new file mode 100644
index 0000000000..4bcddf642a
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.h
@@ -0,0 +1,817 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
+#define VIXL_A64_INSTRUCTIONS_A64_H_
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MBytes;
+
+// This is the nominal page size (as used by the adrp instruction); the actual
+// size of the memory pages allocated by the kernel is likely to differ.
+const unsigned kPageSize = 4 * KBytes;
+const unsigned kPageSizeLog2 = 12;
+
+const unsigned kBRegSize = 8;
+const unsigned kBRegSizeLog2 = 3;
+const unsigned kBRegSizeInBytes = kBRegSize / 8;
+const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
+const unsigned kHRegSize = 16;
+const unsigned kHRegSizeLog2 = 4;
+const unsigned kHRegSizeInBytes = kHRegSize / 8;
+const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize / 8;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const unsigned kQRegSize = 128;
+const unsigned kQRegSizeLog2 = 7;
+const unsigned kQRegSizeInBytes = kQRegSize / 8;
+const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
+const uint64_t kWRegMask = UINT64_C(0xffffffff);
+const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSRegMask = UINT64_C(0xffffffff);
+const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSSignMask = UINT64_C(0x80000000);
+const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kWSignMask = UINT64_C(0x80000000);
+const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kByteMask = UINT64_C(0xff);
+const uint64_t kHalfWordMask = UINT64_C(0xffff);
+const uint64_t kWordMask = UINT64_C(0xffffffff);
+const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
+const uint64_t kXMaxExactUInt = UINT64_C(0xfffffffffffff800);
+const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
+const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
+const int64_t kXMaxExactInt = UINT64_C(0x7ffffffffffffc00);
+const int64_t kXMinInt = INT64_C(0x8000000000000000);
+const int32_t kWMaxInt = INT32_C(0x7fffffff);
+const int32_t kWMinInt = INT32_C(0x80000000);
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+
+const unsigned kAddressTagOffset = 56;
+const unsigned kAddressTagWidth = 8;
+const uint64_t kAddressTagMask =
+ ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
+VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
+
+static inline unsigned CalcLSDataSize(LoadStoreOp op) {
+ VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ unsigned size = static_cast<Instr>(op) >> LSSize_offset;
+ if ((op & LSVector_mask) != 0) {
+ // Vector register memory operations encode the access size in the "size"
+ // and "opc" fields.
+ if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+ size = kQRegSizeInBytesLog2;
+ }
+ }
+ return size;
+}
+
+unsigned CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+// The classes of immediate branch ranges, in order of increasing range.
+// Note that CondBranchType and CompareBranchType have the same range.
+enum ImmBranchRangeType {
+ TestBranchRangeType, // tbz/tbnz: imm14 = +/- 32KB.
+ CondBranchRangeType, // b.cond/cbz/cbnz: imm19 = +/- 1MB.
+ UncondBranchRangeType, // b/bl: imm26 = +/- 128MB.
+ UnknownBranchRangeType,
+
+ // Number of 'short-range' branch range types.
+ // We don't consider unconditional branches 'short-range'.
+ NumShortBranchRangeTypes = UncondBranchRangeType
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ Instr InstructionBits() const {
+ return *(reinterpret_cast<const Instr*>(this));
+ }
+
+ void SetInstructionBits(Instr new_instr) {
+ *(reinterpret_cast<Instr*>(this)) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return ExtractUnsignedBitfield32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return ExtractSignedBitfield32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int32_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ #define DEFINE_SETTER(Name, HighBit, LowBit, Func) \
+ inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_SETTER)
+ #undef DEFINE_SETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int offset =
+ static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return ExtractSignedBitfield32(width - 1, 0, offset);
+ }
+
+ uint64_t ImmLogical() const;
+ unsigned ImmNEONabcdefgh() const;
+ float ImmFP32() const;
+ double ImmFP64() const;
+ float ImmNEONFP32() const;
+ double ImmNEONFP64() const;
+
+ unsigned SizeLS() const {
+ return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
+ }
+
+ unsigned SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ int NEONLSIndex(int access_size_shift) const {
+ int64_t q = NEONQ();
+ int64_t s = NEONS();
+ int64_t size = NEONLSSize();
+ int64_t index = (q << 3) | (s << 2) | size;
+ return static_cast<int>(index >> access_size_shift);
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsImmBranch() const {
+ return BranchType() != UnknownBranchType;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ bool IsLoad() const;
+ bool IsStore() const;
+
+ bool IsLoadLiteral() const {
+ // This includes PRFM_lit.
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ // Mozilla modifications.
+ bool IsUncondB() const;
+ bool IsCondB() const;
+ bool IsBL() const;
+ bool IsBR() const;
+ bool IsBLR() const;
+ bool IsTBZ() const;
+ bool IsTBNZ() const;
+ bool IsCBZ() const;
+ bool IsCBNZ() const;
+ bool IsLDR() const;
+ bool IsNOP() const;
+ bool IsCSDB() const;
+ bool IsADR() const;
+ bool IsADRP() const;
+ bool IsMovz() const;
+ bool IsMovk() const;
+ bool IsBranchLinkImm() const;
+ bool IsTargetReachable(const Instruction* target) const;
+ ptrdiff_t ImmPCRawOffset() const;
+ void SetImmPCRawOffset(ptrdiff_t offset);
+ void SetBits32(int msb, int lsb, unsigned value);
+
+ // Is this a stack pointer synchronization instruction as inserted by
+ // MacroAssembler::syncStackPtr()?
+ bool IsStackPtrSync() const;
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
+ static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
+
+ // Check if offset can be encoded as a RAW offset in a branch_type
+ // instruction. The offset must be encodeable directly as the immediate field
+ // in the instruction, it is not scaled by kInstructionSize first.
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
+
+ // Get the range type corresponding to a branch type.
+ static ImmBranchRangeType ImmBranchTypeToRange(ImmBranchType);
+
+ // Get the maximum realizable forward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the largest positive multiple of kInstructionSize, offset, such
+ // that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType range_type);
+
+ // Get the minimuum realizable backward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the smallest (i.e., largest in magnitude) negative multiple of
+ // kInstructionSize, offset, such that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMinBackwardOffset(ImmBranchRangeType range_type);
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use sp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into sp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use sp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ const Instruction* ImmPCOffsetTarget() const;
+
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(const Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(const Instruction* source);
+
+ // The range of a load literal instruction, expressed as 'instr +- range'.
+ // The range is actually the 'positive' range; the branch instruction can
+ // target [instr - range - kInstructionSize, instr + range].
+ static const int kLoadLiteralImmBitwidth = 19;
+ static const int kLoadLiteralRange =
+ (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
+
+ // Calculate the address of a literal referred to by a load-literal
+ // instruction, and return it as the specified type.
+ //
+ // The literal itself is safely mutable only if the backing buffer is safely
+ // mutable.
+ template <typename T>
+ T LiteralAddress() const {
+ uint64_t base_raw = reinterpret_cast<uint64_t>(this);
+ int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ uint64_t address_raw = base_raw + offset;
+
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ T address = (T)(address_raw);
+
+ // Assert that the address can be represented by the specified type.
+ VIXL_ASSERT((uint64_t)(address) == address_raw);
+
+ return address;
+ }
+
+ uint32_t Literal32() const {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ uint64_t Literal64() const {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ void SetLiteral64(uint64_t literal) const {
+ memcpy(LiteralAddress<void*>(), &literal, sizeof(literal));
+ }
+
+ float LiteralFP32() const {
+ return RawbitsToFloat(Literal32());
+ }
+
+ double LiteralFP64() const {
+ return RawbitsToDouble(Literal64());
+ }
+
+ const Instruction* NextInstruction() const {
+ return this + kInstructionSize;
+ }
+
+ // Skip any constant pools with artificial guards at this point.
+ // Return either |this| or the first instruction after the pool.
+ const Instruction* skipPool() const;
+
+ const Instruction* InstructionAtOffset(int64_t offset) const {
+ VIXL_ASSERT(IsWordAligned(this + offset));
+ return this + offset;
+ }
+
+ template<typename T> static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ template<typename T> static const Instruction* CastConst(T src) {
+ return reinterpret_cast<const Instruction*>(src);
+ }
+
+ private:
+ int ImmBranch() const;
+
+ static float Imm8ToFP32(uint32_t imm8);
+ static double Imm8ToFP64(uint32_t imm8);
+
+ void SetPCRelImmTarget(const Instruction* target);
+ void SetBranchImmTarget(const Instruction* target);
+};
+
+
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+ kFormatUndefined = 0xffffffff,
+ kFormat8B = NEON_8B,
+ kFormat16B = NEON_16B,
+ kFormat4H = NEON_4H,
+ kFormat8H = NEON_8H,
+ kFormat2S = NEON_2S,
+ kFormat4S = NEON_4S,
+ kFormat1D = NEON_1D,
+ kFormat2D = NEON_2D,
+
+ // Scalar formats. We add the scalar bit to distinguish between scalar and
+ // vector enumerations; the bit is always set in the encoding of scalar ops
+ // and always clear for vector ops. Although kFormatD and kFormat1D appear
+ // to be the same, their meaning is subtly different. The first is a scalar
+ // operation, the second a vector operation that only affects one lane.
+ kFormatB = NEON_B | NEONScalar,
+ kFormatH = NEON_H | NEONScalar,
+ kFormatS = NEON_S | NEONScalar,
+ kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatFillQ(const VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+// TODO: Make the return types of these functions consistent.
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
+
+
+enum NEONFormat {
+ NF_UNDEF = 0,
+ NF_8B = 1,
+ NF_16B = 2,
+ NF_4H = 3,
+ NF_8H = 4,
+ NF_2S = 5,
+ NF_4S = 6,
+ NF_1D = 7,
+ NF_2D = 8,
+ NF_B = 9,
+ NF_H = 10,
+ NF_S = 11,
+ NF_D = 12
+};
+
+static const unsigned kNEONFormatMaxBits = 6;
+
+struct NEONFormatMap {
+ // The bit positions in the instruction to consider.
+ uint8_t bits[kNEONFormatMaxBits];
+
+ // Mapping from concatenated bits to format.
+ NEONFormat map[1 << kNEONFormatMaxBits];
+};
+
+class NEONFormatDecoder {
+ public:
+ enum SubstitutionMode {
+ kPlaceholder,
+ kFormat
+ };
+
+ // Construct a format decoder with increasingly specific format maps for each
+ // subsitution. If no format map is specified, the default is the integer
+ // format map.
+ explicit NEONFormatDecoder(const Instruction* instr) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(IntegerFormatMap());
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1,
+ const NEONFormatMap* format2) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1, format2);
+ }
+
+ // Set the format mapping for all or individual substitutions.
+ void SetFormatMaps(const NEONFormatMap* format0,
+ const NEONFormatMap* format1 = NULL,
+ const NEONFormatMap* format2 = NULL) {
+ VIXL_ASSERT(format0 != NULL);
+ formats_[0] = format0;
+ formats_[1] = (format1 == NULL) ? formats_[0] : format1;
+ formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+ }
+ void SetFormatMap(unsigned index, const NEONFormatMap* format) {
+ VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
+ VIXL_ASSERT(format != NULL);
+ formats_[index] = format;
+ }
+
+ // Substitute %s in the input string with the placeholder string for each
+ // register, ie. "'B", "'H", etc.
+ const char* SubstitutePlaceholders(const char* string) {
+ return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+ }
+
+ // Substitute %s in the input string with a new string based on the
+ // substitution mode.
+ const char* Substitute(const char* string,
+ SubstitutionMode mode0 = kFormat,
+ SubstitutionMode mode1 = kFormat,
+ SubstitutionMode mode2 = kFormat) {
+ snprintf(form_buffer_, sizeof(form_buffer_), string,
+ GetSubstitute(0, mode0),
+ GetSubstitute(1, mode1),
+ GetSubstitute(2, mode2));
+ return form_buffer_;
+ }
+
+ // Append a "2" to a mnemonic string based of the state of the Q bit.
+ const char* Mnemonic(const char* mnemonic) {
+ if ((instrbits_ & NEON_Q) != 0) {
+ snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
+ return mne_buffer_;
+ }
+ return mnemonic;
+ }
+
+ VectorFormat GetVectorFormat(int format_index = 0) {
+ return GetVectorFormat(formats_[format_index]);
+ }
+
+ VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
+ static const VectorFormat vform[] = {
+ kFormatUndefined,
+ kFormat8B, kFormat16B, kFormat4H, kFormat8H,
+ kFormat2S, kFormat4S, kFormat1D, kFormat2D,
+ kFormatB, kFormatH, kFormatS, kFormatD
+ };
+ VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
+ return vform[GetNEONFormat(format_map)];
+ }
+
+ // Built in mappings for common cases.
+
+ // The integer format map uses three bits (Q, size<1:0>) to encode the
+ // "standard" set of NEON integer vector formats.
+ static const NEONFormatMap* IntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The long integer format map uses two bits (size<1:0>) to encode the
+ // long set of NEON integer vector formats. These are used in narrow, wide
+ // and long operations.
+ static const NEONFormatMap* LongIntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_8H, NF_4S, NF_2D}
+ };
+ return &map;
+ }
+
+ // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
+ // formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap* FPFormatMap() {
+ // The FP format map assumes two bits (Q, size<0>) are used to encode the
+ // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap map = {
+ {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The load/store format map uses three bits (Q, 11, 10) to encode the
+ // set of NEON vector formats.
+ static const NEONFormatMap* LoadStoreFormatMap() {
+ static const NEONFormatMap map = {
+ {11, 10, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ return &map;
+ }
+
+ // The logical format map uses one bit (Q) to encode the NEON vector format:
+ // NF_8B, NF_16B.
+ static const NEONFormatMap* LogicalFormatMap() {
+ static const NEONFormatMap map = {
+ {30}, {NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The triangular format map uses between two and five bits to encode the NEON
+ // vector format:
+ // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
+ // x1000->2S, x1001->4S, 10001->2D, all others undefined.
+ static const NEONFormatMap* TriangularFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
+ NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
+ NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
+ NF_4H, NF_8H, NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
+ // formats: NF_B, NF_H, NF_S, NF_D.
+ static const NEONFormatMap* ScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_B, NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The long scalar format map uses two bits (size<1:0>) to encode the longer
+ // NEON scalar formats: NF_H, NF_S, NF_D.
+ static const NEONFormatMap* LongScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The FP scalar format map assumes one bit (size<0>) is used to encode the
+ // NEON FP scalar formats: NF_S, NF_D.
+ static const NEONFormatMap* FPScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {22}, {NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The triangular scalar format map uses between one and four bits to encode
+ // the NEON FP scalar formats:
+ // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
+ static const NEONFormatMap* TriangularScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16},
+ {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
+ NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
+ };
+ return &map;
+ }
+
+ private:
+ // Get a pointer to a string that represents the format or placeholder for
+ // the specified substitution index, based on the format map and instruction.
+ const char* GetSubstitute(int index, SubstitutionMode mode) {
+ if (mode == kFormat) {
+ return NEONFormatAsString(GetNEONFormat(formats_[index]));
+ }
+ VIXL_ASSERT(mode == kPlaceholder);
+ return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
+ }
+
+ // Get the NEONFormat enumerated value for bits obtained from the
+ // instruction based on the specified format mapping.
+ NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
+ return format_map->map[PickBits(format_map->bits)];
+ }
+
+ // Convert a NEONFormat into a string.
+ static const char* NEONFormatAsString(NEONFormat format) {
+ static const char* formats[] = {
+ "undefined",
+ "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
+ "b", "h", "s", "d"
+ };
+ VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
+ return formats[format];
+ }
+
+ // Convert a NEONFormat into a register placeholder string.
+ static const char* NEONFormatAsPlaceholder(NEONFormat format) {
+ VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
+ (format == NF_S) || (format == NF_D) ||
+ (format == NF_UNDEF));
+ static const char* formats[] = {
+ "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "'B", "'H", "'S", "'D"
+ };
+ return formats[format];
+ }
+
+ // Select bits from instrbits_ defined by the bits array, concatenate them,
+ // and return the value.
+ uint8_t PickBits(const uint8_t bits[]) {
+ uint8_t result = 0;
+ for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
+ if (bits[b] == 0) break;
+ result <<= 1;
+ result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
+ }
+ return result;
+ }
+
+ Instr instrbits_;
+ const NEONFormatMap* formats_[3];
+ char form_buffer_[64];
+ char mne_buffer_[16];
+};
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.cpp b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
new file mode 100644
index 0000000000..c07495c29d
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
@@ -0,0 +1,850 @@
+// Copyright 2014, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instrument-vixl.h"
+
+namespace vixl {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ VIXL_ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+struct CounterDescriptor {
+ const char* name;
+ CounterType type;
+};
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"NEON", Gauge},
+ {"Crypto", Gauge}
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stdout), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stdout.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ printf("Can't open output file %s. Using stdout.\n", datafile);
+ output_stream_ = stdout;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ if (Counter* counter = js_new<Counter>(kCounterList[i].name, kCounterList[i].type))
+ (void)counters_.append(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ for (auto counter : counters_) {
+ js_delete(counter);
+ }
+
+ if (output_stream_ != stdout) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ VIXL_ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%" PRIu64 ",", counter->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%s,", counter->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ for (auto counter : counters_) {
+ if (strcmp(counter->name(), name) == 0) {
+ return counter;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ for (auto counter : counters_) {
+ counter->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ for (auto counter : counters_) {
+ counter->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitLogicalImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+void Instrument::VisitAtomicMemory(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+void Instrument::VisitLoadLiteral(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(const Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreMask)) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ VIXL_FALLTHROUGH();
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s:
+ VIXL_FALLTHROUGH();
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_x:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDRSB_w:
+ VIXL_FALLTHROUGH();
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s:
+ VIXL_FALLTHROUGH();
+ case LDR_d: load_fp_counter->Increment(); break;
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
+ USE(instr);
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCryptoAES(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Different(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONTable(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONPerm(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.h b/js/src/jit/arm64/vixl/Instrument-vixl.h
new file mode 100644
index 0000000000..eca076d234
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.h
@@ -0,0 +1,109 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUMENT_A64_H_
+#define VIXL_A64_INSTRUMENT_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "js/AllocPolicy.h"
+
+namespace vixl {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ explicit Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ void Enable();
+ void Disable();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(const Instruction* instr);
+ void InstrumentLoadStorePair(const Instruction* instr);
+
+ mozilla::Vector<Counter*, 8, js::SystemAllocPolicy> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUMENT_A64_H_
diff --git a/js/src/jit/arm64/vixl/Logic-vixl.cpp b/js/src/jit/arm64/vixl/Logic-vixl.cpp
new file mode 100644
index 0000000000..71821a333f
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Logic-vixl.cpp
@@ -0,0 +1,4738 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include <cmath>
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+namespace vixl {
+
+template<> double Simulator::FPDefaultNaN<double>() {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() {
+ return kFP32DefaultNaN;
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16))
+ return;
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ dst.ReadUintFromMem(vform, index, addr);
+}
+
+
+void Simulator::ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ addr1 += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+}
+
+
+void Simulator::ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ addr1 += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+}
+
+
+void Simulator::ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ addr1 += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+ dst4.ReadUintFromMem(vform, index, addr4);
+}
+
+
+void Simulator::ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16))
+ return;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ src.WriteUintToMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ src.WriteUintToMem(vform, index, addr);
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*2))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ addr += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*3))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ addr += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*4))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ dst4.WriteUintToMem(vform, i, addr4);
+ addr += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+ dst4.WriteUintToMem(vform, index, addr + 3 * esize);
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = src1.Int(vform, i);
+ int64_t sb = src2.Int(vform, i);
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ bool result = false;
+ switch (cond) {
+ case eq: result = (ua == ub); break;
+ case ge: result = (sa >= sb); break;
+ case gt: result = (sa > sb) ; break;
+ case hi: result = (ua > ub) ; break;
+ case hs: result = (ua >= ub); break;
+ case lt: result = (sa < sb) ; break;
+ case le: result = (sa <= sb); break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond) {
+ SimVRegister temp;
+ LogicVRegister imm_reg = dup_immediate(vform, temp, imm);
+ return cmp(vform, dst, src1, imm_reg, cond);
+}
+
+
+LogicVRegister Simulator::cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ // TODO(all): consider assigning the result of LaneCountFromFormat to a local.
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ uint64_t ua = src1.UintLeftJustified(vform, i);
+ uint64_t ub = src2.UintLeftJustified(vform, i);
+ uint64_t ur = ua + ub;
+ if (ur < ua) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa + sb;
+ // If the signs of the operands are the same, but different from the result,
+ // there was an overflow.
+ if (((sa >= 0) == (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sa >= 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) + src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uzp1(vform, temp1, src1, src2);
+ uzp2(vform, temp2, src1, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ sub(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mul(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mla(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mls(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) {
+ uint16_t result = 0;
+ uint16_t extended_op2 = op2;
+ for (int i = 0; i < 8; ++i) {
+ if ((op1 >> i) & 1) {
+ result = result ^ (extended_op2 << i);
+ }
+ }
+ return result;
+}
+
+
+LogicVRegister Simulator::pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i,
+ PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidth(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, i),
+ src2.Uint(vform_src, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform);
+ dst.ClearForWrite(vform);
+ int lane_count = LaneCountFromFormat(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, lane_count + i),
+ src2.Uint(vform_src, lane_count + i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ if (src2.Uint(vform, i) > src1.Uint(vform, i)) {
+ dst.SetUnsignedSat(i, false);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa - sb;
+ // If the signs of the operands are different, and the sign of the first
+ // operand doesn't match the result, there was an overflow.
+ if (((sa >= 0) != (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sr < 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) - src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) & ~imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = ~src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = src2.Uint(vform, i);
+ uint64_t operand2 = dst.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src1_val = src1.Int(vform, i);
+ int64_t src2_val = src2.Int(vform, i);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ int64_t src1_val = src.Int(vform, i);
+ int64_t src2_val = src.Int(vform, i + 1);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, true);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, false);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(vform == kFormatD);
+
+ int64_t dst_val = src.Int(kFormat2D, 0) + src.Int(kFormat2D, 1);
+ dst.ClearForWrite(vform);
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ uint64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetUint(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ int64_t dst_val = max ? INT64_MIN : INT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src_val = src.Int(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, 0);
+ }
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src1_val = src1.Uint(vform, i);
+ uint64_t src2_val = src2.Uint(vform, i);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ uint64_t src1_val = src.Uint(vform, i);
+ uint64_t src2_val = src.Uint(vform, i + 1);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, true);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, false);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ uint64_t dst_val = max ? 0 : UINT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src_val = src.Uint(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, 0);
+ }
+ dst.SetUint(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl2(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll2(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl2(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted = src_lane << shift;
+ uint64_t mask = MaxUintFromFormat(vform) << shift;
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ VIXL_ASSERT((shift > 0) &&
+ (shift <= static_cast<int>(LaneSizeInBitsFromFormat(vform))));
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted;
+ uint64_t mask;
+ if (shift == 64) {
+ shifted = 0;
+ mask = 0;
+ } else {
+ shifted = src_lane >> shift;
+ mask = MaxUintFromFormat(vform) >> shift;
+ }
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return sshl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t value = src.Uint(vform, i);
+ result[i] = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ result[i] += (value & 1);
+ value >>= 1;
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ int64_t lj_src_val = src1.IntLeftJustified(vform, i);
+
+ // Set signed saturation state.
+ if ((shift_val > CountLeadingSignBits(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetSignedSat(i, lj_src_val >= 0);
+ }
+
+ // Set unsigned saturation state.
+ if (lj_src_val < 0) {
+ dst.SetUnsignedSat(i, false);
+ } else if ((shift_val > CountLeadingZeros(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ int64_t src_val = src1.Int(vform, i);
+ if (shift_val > 63) {
+ dst.SetInt(vform, i, 0);
+ } else if (shift_val < -63) {
+ dst.SetRounding(i, src_val < 0);
+ dst.SetInt(vform, i, (src_val < 0) ? -1 : 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+ src_val >>= -shift_val;
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetInt(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ uint64_t lj_src_val = src1.UintLeftJustified(vform, i);
+
+ // Set saturation state.
+ if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ uint64_t src_val = src1.Uint(vform, i);
+ if ((shift_val > 63) || (shift_val < -64)) {
+ dst.SetUint(vform, i, 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+
+ if (shift_val == -64) {
+ src_val = 0;
+ } else {
+ src_val >>= -shift_val;
+ }
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetUint(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ dst.SetInt(vform, i, -sa);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = dst.IntLeftJustified(vform, i);
+ uint64_t ub = src.UintLeftJustified(vform, i);
+ int64_t sr = sa + ub;
+
+ if (sr < sa) { // Test for signed positive saturation.
+ dst.SetInt(vform, i, MaxIntFromFormat(vform));
+ } else {
+ dst.SetInt(vform, i, dst.Int(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = dst.UintLeftJustified(vform, i);
+ int64_t sb = src.IntLeftJustified(vform, i);
+ uint64_t ur = ua + sb;
+
+ if ((sb > 0) && (ur <= ua)) {
+ dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation.
+ } else if ((sb < 0) && (ur >= ua)) {
+ dst.SetUint(vform, i, 0); // Negative saturation.
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ if (sa < 0) {
+ dst.SetInt(vform, i, -sa);
+ } else {
+ dst.SetInt(vform, i, sa);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::extractnarrow(VectorFormat dstform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned) {
+ bool upperhalf = false;
+ VectorFormat srcform = kFormatUndefined;
+ int64_t ssrc[8];
+ uint64_t usrc[8];
+
+ switch (dstform) {
+ case kFormat8B : upperhalf = false; srcform = kFormat8H; break;
+ case kFormat16B: upperhalf = true; srcform = kFormat8H; break;
+ case kFormat4H : upperhalf = false; srcform = kFormat4S; break;
+ case kFormat8H : upperhalf = true; srcform = kFormat4S; break;
+ case kFormat2S : upperhalf = false; srcform = kFormat2D; break;
+ case kFormat4S : upperhalf = true; srcform = kFormat2D; break;
+ case kFormatB : upperhalf = false; srcform = kFormatH; break;
+ case kFormatH : upperhalf = false; srcform = kFormatS; break;
+ case kFormatS : upperhalf = false; srcform = kFormatD; break;
+ default:VIXL_UNIMPLEMENTED();
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ ssrc[i] = src.Int(srcform, i);
+ usrc[i] = src.Uint(srcform, i);
+ }
+
+ int offset;
+ if (upperhalf) {
+ offset = LaneCountFromFormat(dstform) / 2;
+ } else {
+ offset = 0;
+ dst.ClearForWrite(dstform);
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ // Test for signed saturation
+ if (ssrc[i] > MaxIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, true);
+ } else if (ssrc[i] < MinIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, false);
+ }
+
+ // Test for unsigned saturation
+ if (srcIsSigned) {
+ if (ssrc[i] > static_cast<int64_t>(MaxUintFromFormat(dstform))) {
+ dst.SetUnsignedSat(offset + i, true);
+ } else if (ssrc[i] < 0) {
+ dst.SetUnsignedSat(offset + i, false);
+ }
+ } else {
+ if (usrc[i] > MaxUintFromFormat(dstform)) {
+ dst.SetUnsignedSat(offset + i, true);
+ }
+ }
+
+ int64_t result;
+ if (srcIsSigned) {
+ result = ssrc[i] & MaxUintFromFormat(dstform);
+ } else {
+ result = usrc[i] & MaxUintFromFormat(dstform);
+ }
+
+ if (dstIsSigned) {
+ dst.SetInt(dstform, offset + i, result);
+ } else {
+ dst.SetUint(dstform, offset + i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true);
+}
+
+
+LogicVRegister Simulator::sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (issigned) {
+ int64_t sr = src1.Int(vform, i) - src2.Int(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetInt(vform, i, sr);
+ } else {
+ int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetUint(vform, i, sr);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, true);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, false);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, ~src.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ uint64_t reversed_value;
+ uint64_t value;
+ for (int i = 0; i < laneCount; i++) {
+ value = src.Uint(vform, i);
+ reversed_value = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ reversed_value = (reversed_value << 1) | (value & 1);
+ value >>= 1;
+ }
+ result[i] = reversed_value;
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSize = LaneSizeInBytesFromFormat(vform);
+ int lanesPerLoop = revSize / laneSize;
+ for (int i = 0; i < laneCount; i += lanesPerLoop) {
+ for (int j = 0; j < lanesPerLoop; j++) {
+ result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j);
+ }
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 2);
+}
+
+
+LogicVRegister Simulator::rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 4);
+}
+
+
+LogicVRegister Simulator::rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 8);
+}
+
+
+LogicVRegister Simulator::addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate) {
+ VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform);
+
+ int64_t sr[16];
+ uint64_t ur[16];
+
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (is_signed) {
+ sr[i] = src.Int(vformsrc, 2 * i) + src.Int(vformsrc, 2 * i + 1);
+ } else {
+ ur[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1);
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (do_accumulate) {
+ if (is_signed) {
+ dst.SetInt(vform, i, dst.Int(vform, i) + sr[i]);
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + ur[i]);
+ }
+ } else {
+ if (is_signed) {
+ dst.SetInt(vform, i, sr[i]);
+ } else {
+ dst.SetUint(vform, i, ur[i]);
+ }
+ }
+ }
+
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, false);
+}
+
+
+LogicVRegister Simulator::uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, false);
+}
+
+
+LogicVRegister Simulator::sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, true);
+}
+
+
+LogicVRegister Simulator::uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, true);
+}
+
+
+LogicVRegister Simulator::ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ uint8_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount - index; ++i) {
+ result[i] = src1.Uint(vform, i + index);
+ }
+ for (int i = 0; i < index; ++i) {
+ result[laneCount - index + i] = src2.Uint(vform, i);
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = src.Uint(vform, src_index);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index) {
+ dst.SetUint(vform, dst_index, src.Uint(vform, src_index));
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm) {
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.SetUint(vform, dst_index, value);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mov(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int lane = 0; lane < LaneCountFromFormat(vform); lane++) {
+ dst.SetUint(vform, lane, src.Uint(vform, lane));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, ~imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) | imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vform_src = VectorFormatDoubleWidth(vform);
+ VectorFormat vform_dst = vform;
+ LogicVRegister shifted_src = ushr(vform_src, temp, src, shift);
+ return extractnarrow(vform_dst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, tab3, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, tab3, tab4, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ case 3: dst.SetUint(vform, i, tab4.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull2(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round) {
+ // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow.
+ // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1)
+ // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize.
+
+ int esize = LaneSizeInBitsFromFormat(vform);
+ int round_const = round ? (1 << (esize - 2)) : 0;
+ int64_t product;
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ product = src1.Int(vform, i) * src2.Int(vform, i);
+ product += round_const;
+ product = product >> (esize - 1);
+
+ if (product > MaxIntFromFormat(vform)) {
+ product = MaxIntFromFormat(vform);
+ } else if (product < MinIntFromFormat(vform)) {
+ product = MinIntFromFormat(vform);
+ }
+ dst.SetInt(vform, i, product);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sqrdmulh(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::addhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::addhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, 2 * i);
+ result[(2 * i) + 1] = src2.Uint(vform, 2 * i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, (2 * i) + 1);
+ result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, i);
+ result[(2 * i) + 1] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, pairs + i);
+ result[(2 * i) + 1] = src2.Uint(vform, pairs + i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[2 * i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[ (2 * i) + 1]);
+ }
+ return dst;
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ T result = FPProcessNaNs(op1, op2);
+ if (std::isnan(result)) return result;
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulx(T op1, T op2) {
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns +/-2.0.
+ T two = 2.0;
+ return copysign(1.0, op1) * copysign(1.0, op2) * two;
+ }
+ return FPMul(op1, op2);
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ VIXL_ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ if (op2 == 0.0) FPProcessException();
+
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPRecipStepFused(T op1, T op2) {
+ const T two = 2.0;
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return two;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ return FusedMultiplyAdd(op1, op2, two);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPRSqrtStepFused(T op1, T op2) {
+ const T one_point_five = 1.5;
+ const T two = 2.0;
+
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return one_point_five;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ // The multiply-add-halve operation must be fully fused, so avoid interim
+ // rounding by checking which operand can be losslessly divided by two
+ // before doing the multiply-add.
+ if (std::isnormal(op1 / two)) {
+ return FusedMultiplyAdd(op1 / two, op2, one_point_five);
+ } else if (std::isnormal(op2 / two)) {
+ return FusedMultiplyAdd(op1, op2 / two, one_point_five);
+ } else {
+ // Neither operand is normal after halving: the result is dominated by
+ // the addition term, so just return that.
+ return one_point_five;
+ }
+ }
+}
+
+int32_t Simulator::FPToFixedJS(double value) {
+ // The Z-flag is set when the conversion from double precision floating-point
+ // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN,
+ // outside the bounds of a 32-bit integer, or isn't an exact integer then the
+ // Z-flag is unset.
+ int Z = 1;
+ int32_t result;
+
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ // +/- zero and infinity all return zero, however -0 and +/- Infinity also
+ // unset the Z-flag.
+ result = 0.0;
+ if ((value != 0.0) || std::signbit(value)) {
+ Z = 0;
+ }
+ } else if (std::isnan(value)) {
+ // NaN values unset the Z-flag and set the result to 0.
+ FPProcessNaN(value);
+ result = 0;
+ Z = 0;
+ } else {
+ // All other values are converted to an integer representation, rounded
+ // toward zero.
+ double int_result = std::floor(value);
+ double error = value - int_result;
+
+ if ((error != 0.0) && (int_result < 0.0)) {
+ int_result++;
+ }
+
+ // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost
+ // write a one-liner with std::round, but the behaviour on ties is incorrect
+ // for our purposes.
+ double mod_const = static_cast<double>(UINT64_C(1) << 32);
+ double mod_error =
+ (int_result / mod_const) - std::floor(int_result / mod_const);
+ double constrained;
+ if (mod_error == 0.5) {
+ constrained = INT32_MIN;
+ } else {
+ constrained = int_result - mod_const * round(int_result / mod_const);
+ }
+
+ VIXL_ASSERT(std::floor(constrained) == constrained);
+ VIXL_ASSERT(constrained >= INT32_MIN);
+ VIXL_ASSERT(constrained <= INT32_MAX);
+
+ // Take the bottom 32 bits of the result as a 32-bit integer.
+ result = static_cast<int32_t>(constrained);
+
+ if ((int_result < INT32_MIN) || (int_result > INT32_MAX) ||
+ (error != 0.0)) {
+ // If the integer result is out of range or the conversion isn't exact,
+ // take exception and unset the Z-flag.
+ FPProcessException();
+ Z = 0;
+ }
+ }
+
+ ReadNzcv().SetN(0);
+ ReadNzcv().SetZ(Z);
+ ReadNzcv().SetC(0);
+ ReadNzcv().SetV(0);
+
+ return result;
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = std::floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // Take care of correctly handling the range ]-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value>0 then we take floor(value)
+ // otherwise, ceil(value).
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ case FPPositiveInfinity: {
+ // Take care of correctly handling the range ]-1.0, -0.0], which must
+ // yield -0.0.
+ if ((-1.0 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is non-zero, round up.
+ } else if (error > 0.0) {
+ int_result++;
+ }
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ // The compiler would have to round kXMaxInt, triggering a warning. Compare
+ // against the largest int64_t that is exactly representable as a double.
+ if (value > kXMaxExactInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ // The compiler would have to round kXMaxUInt, triggering a warning. Compare
+ // against the largest uint64_t that is exactly representable as a double.
+ if (value > kXMaxExactUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+template <typename T> \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ dst.ClearForWrite(vform); \
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) { \
+ T op1 = src1.Float<T>(i); \
+ T op2 = src2.Float<T>(i); \
+ T result; \
+ if (PROCNAN) { \
+ result = FPProcessNaNs(op1, op2); \
+ if (!std::isnan(result)) { \
+ result = OP(op1, op2); \
+ } \
+ } else { \
+ result = OP(op1, op2); \
+ } \
+ dst.SetFloat(i, result); \
+ } \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \
+ FN<float>(vform, dst, src1, src2); \
+ } else { \
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \
+ FN<double>(vform, dst, src1, src2); \
+ } \
+ return dst; \
+}
+NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP)
+#undef DEFINE_NEON_FP_VECTOR_OP
+
+
+LogicVRegister Simulator::fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = fmul(vform, temp, src1, src2);
+ return fneg(vform, dst, product);
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecps<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecps<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frsqrts<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frsqrts<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ bool result = false;
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T nan_result = FPProcessNaNs(op1, op2);
+ if (!std::isnan(nan_result)) {
+ switch (cond) {
+ case eq: result = (op1 == op2); break;
+ case ge: result = (op1 >= op2); break;
+ case gt: result = (op1 > op2) ; break;
+ case le: result = (op1 <= op2); break;
+ case lt: result = (op1 < op2) ; break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fcmp<float>(vform, dst, src1, src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fcmp<double>(vform, dst, src1, src2, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond) {
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister zero_reg = dup_immediate(vform, temp, FloatToRawbits(0.0));
+ fcmp<float>(vform, dst, src, zero_reg, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister zero_reg = dup_immediate(vform, temp,
+ DoubleToRawbits(0.0));
+ fcmp<double>(vform, dst, src, zero_reg, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ SimVRegister temp1, temp2;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister abs_src1 = fabs_<float>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<float>(vform, temp2, src2);
+ fcmp<float>(vform, dst, abs_src1, abs_src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister abs_src1 = fabs_<double>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<double>(vform, temp2, src2);
+ fcmp<double>(vform, dst, abs_src1, abs_src2, cond);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmla<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmla<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmls<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmls<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ op = -op;
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fneg<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fneg<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ if (copysign(1.0, op) < 0.0) {
+ op = -op;
+ }
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fabs_<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fabs_<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ fsub(vform, temp, src1, src2);
+ fabs_(vform, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float result = FPSqrt(src.Float<float>(i));
+ dst.SetFloat(i, result);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double result = FPSqrt(src.Float<double>(i));
+ dst.SetFloat(i, result);
+ }
+ }
+ return dst;
+}
+
+
+#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ SimVRegister temp1, temp2; \
+ uzp1(vform, temp1, src1, src2); \
+ uzp2(vform, temp2, src1, src2); \
+ FN(vform, dst, temp1, temp2); \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src) { \
+ if (vform == kFormatS) { \
+ float result = OP(src.Float<float>(0), src.Float<float>(1)); \
+ dst.SetFloat(0, result); \
+ } else { \
+ VIXL_ASSERT(vform == kFormatD); \
+ double result = OP(src.Float<double>(0), src.Float<double>(1)); \
+ dst.SetFloat(0, result); \
+ } \
+ dst.ClearForWrite(vform); \
+ return dst; \
+}
+NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP)
+#undef DEFINE_NEON_FP_PAIR_OP
+
+
+LogicVRegister Simulator::fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op) {
+ VIXL_ASSERT(vform == kFormat4S);
+ USE(vform);
+ float result1 = (this->*Op)(src.Float<float>(0), src.Float<float>(1));
+ float result2 = (this->*Op)(src.Float<float>(2), src.Float<float>(3));
+ float result = (this->*Op)(result1, result2);
+ dst.ClearForWrite(kFormatS);
+ dst.SetFloat<float>(0, result);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMax);
+}
+
+
+LogicVRegister Simulator::fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMin);
+}
+
+
+LogicVRegister Simulator::fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMaxNM);
+}
+
+
+LogicVRegister Simulator::fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMinNM);
+}
+
+
+LogicVRegister Simulator::fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmul<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmul<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmla<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmla<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmls<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmls<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmulx<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmulx<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ float rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<float>(i, rounded);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ double rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<double>(i, rounded);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetInt(vform, i, FPToInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetInt(vform, i, FPToInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetUint(vform, i, FPToUInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetUint(vform, i, FPToUInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ // TODO: Full support for SimFloat16 in SimRegister(s).
+ dst.SetFloat(i,
+ FPToFloat(RawbitsToFloat16(src.Float<uint16_t>(i)),
+ ReadDN()));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i), ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < lane_count; i++) {
+ // TODO: Full support for SimFloat16 in SimRegister(s).
+ dst.SetFloat(i,
+ FPToFloat(RawbitsToFloat16(
+ src.Float<uint16_t>(i + lane_count)),
+ ReadDN()));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count), ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ SimVRegister tmp;
+ LogicVRegister srctmp = mov(kFormat2D, tmp, src);
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i,
+ Float16ToRawbits(FPToFloat16(srctmp.Float<float>(i),
+ FPTieEven,
+ ReadDN())));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(srctmp.Float<double>(i), FPTieEven, ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ Float16ToRawbits(
+ FPToFloat16(src.Float<float>(i), FPTieEven, ReadDN())));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ FPToFloat(src.Float<double>(i), FPTieEven, ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ SimVRegister tmp;
+ LogicVRegister srctmp = mov(kFormat2D, tmp, src);
+ dst.ClearForWrite(vform);
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(srctmp.Float<double>(i), FPRoundOdd, ReadDN()));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ FPToFloat(src.Float<double>(i), FPRoundOdd, ReadDN()));
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_sqrt_estimate from ARM ARM.
+double Simulator::recip_sqrt_estimate(double a) {
+ int q0, q1, s;
+ double r;
+ if (a < 0.5) {
+ q0 = static_cast<int>(a * 512.0);
+ r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
+ } else {
+ q1 = static_cast<int>(a * 256.0);
+ r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
+ }
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) {
+ return ExtractUnsignedBitfield64(start_bit, end_bit, val);
+}
+
+
+template <typename T>
+T Simulator::FPRecipSqrtEstimate(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op == 0.0) {
+ if (copysign(1.0, op) < 0.0) {
+ return kFP64NegativeInfinity;
+ } else {
+ return kFP64PositiveInfinity;
+ }
+ } else if (copysign(1.0, op) < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else if (std::isinf(op)) {
+ return 0.0;
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ exp = FloatExp(op);
+ fraction = FloatMantissa(op);
+ fraction <<= 29;
+ } else {
+ exp = DoubleExp(op);
+ fraction = DoubleMantissa(op);
+ }
+
+ if (exp == 0) {
+ while (Bits(fraction, 51, 51) == 0) {
+ fraction = Bits(fraction, 50, 0) << 1;
+ exp -= 1;
+ }
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+
+ double scaled;
+ if (Bits(exp, 0, 0) == 0) {
+ scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
+ } else {
+ scaled = DoublePack(0, 1021, Bits(fraction, 51, 44) << 44);
+ }
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (380 - exp) / 2;
+ } else {
+ result_exp = (3068 - exp) / 2;
+ }
+
+ uint64_t estimate = DoubleToRawbits(recip_sqrt_estimate(scaled));
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29));
+ return FloatPack(0, exp_bits, est_bits);
+ } else {
+ return DoublePack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<float>(input));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<double>(input));
+ }
+ }
+ return dst;
+}
+
+template <typename T>
+T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ } else {
+ sign = DoubleSign(op);
+ }
+
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (std::isinf(op)) {
+ return (sign == 1) ? -0.0 : 0.0;
+ } else if (op == 0.0) {
+ FPProcessException(); // FPExc_DivideByZero exception.
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else if (((sizeof(T) == sizeof(float)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -128.0))) ||
+ ((sizeof(T) == sizeof(double)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -1024.0)))) {
+ bool overflow_to_inf = false;
+ switch (rounding) {
+ case FPTieEven: overflow_to_inf = true; break;
+ case FPPositiveInfinity: overflow_to_inf = (sign == 0); break;
+ case FPNegativeInfinity: overflow_to_inf = (sign == 1); break;
+ case FPZero: overflow_to_inf = false; break;
+ default: break;
+ }
+ FPProcessException(); // FPExc_Overflow and FPExc_Inexact.
+ if (overflow_to_inf) {
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else {
+ // Return FPMaxNormal(sign).
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ return FloatPack(sign, 0xfe, 0x07fffff);
+ } else {
+ return DoublePack(sign, 0x7fe, 0x0fffffffffffffl);
+ }
+ }
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ exp = FloatExp(op);
+ fraction = FloatMantissa(op);
+ fraction <<= 29;
+ } else {
+ sign = DoubleSign(op);
+ exp = DoubleExp(op);
+ fraction = DoubleMantissa(op);
+ }
+
+ if (exp == 0) {
+ if (Bits(fraction, 51, 51) == 0) {
+ exp -= 1;
+ fraction = Bits(fraction, 49, 0) << 2;
+ } else {
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+ }
+
+ double scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254.
+ } else {
+ result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046.
+ }
+
+ double estimate = recip_estimate(scaled);
+
+ fraction = DoubleMantissa(estimate);
+ if (result_exp == 0) {
+ fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1);
+ } else if (result_exp == -1) {
+ fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2);
+ result_exp = 0;
+ }
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29));
+ return FloatPack(sign, exp_bits, frac_bits);
+ } else {
+ return DoublePack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding round) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipEstimate<float>(input, round));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipEstimate<double>(input, round));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x3FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_estimate from ARM ARM.
+double Simulator::recip_estimate(double a) {
+ int q, s;
+ double r;
+ q = static_cast<int>(a * 512.0);
+ r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+LogicVRegister Simulator::urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x7FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ T result;
+ if (std::isnan(op)) {
+ result = FPProcessNaN(op);
+ } else {
+ int exp;
+ uint32_t sign;
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ exp = FloatExp(op);
+ exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0));
+ result = FloatPack(sign, exp, 0);
+ } else {
+ sign = DoubleSign(op);
+ exp = DoubleExp(op);
+ exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0));
+ result = DoublePack(sign, exp, 0);
+ }
+ }
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecpx<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecpx<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = FixedToFloat(src.Int(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = FixedToDouble(src.Int(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
new file mode 100644
index 0000000000..5c4a5ce145
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
@@ -0,0 +1,2027 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+#include <ctype.h>
+
+namespace vixl {
+
+MacroAssembler::MacroAssembler()
+ : js::jit::Assembler(),
+ sp_(x28),
+ tmp_list_(ip0, ip1),
+ fptmp_list_(d31)
+{
+}
+
+
+void MacroAssembler::FinalizeCode() {
+ Assembler::FinalizeCode();
+}
+
+
+int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm) {
+ bool emit_code = (masm != NULL);
+ VIXL_ASSERT(IsUint32(imm) || IsInt32(imm) || rd.Is64Bits());
+ // The worst case for size is mov 64-bit immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(masm);
+
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (OneInstrMoveImmediateHelper(masm, rd, imm)) {
+ return 1;
+ } else {
+ int instruction_count = 0;
+ unsigned reg_size = rd.size();
+
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffff;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move values into the stack pointer, so set up a
+ // temporary register, if needed.
+ UseScratchRegisterScope temps;
+ Register temp;
+ if (emit_code) {
+ temps.Open(masm);
+ temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+ }
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ VIXL_ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (temp.size() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i);
+ instruction_count++;
+ } else {
+ if (emit_code) masm->movz(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ if (emit_code) masm->movk(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ }
+ }
+
+ VIXL_ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ if (emit_code) masm->mov(rd, temp);
+ instruction_count++;
+ }
+ return instruction_count;
+ }
+}
+
+
+bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm) {
+ bool emit_code = masm != NULL;
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.size();
+
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ if (emit_code) {
+ masm->movz(dst, imm);
+ }
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move negative instruction. Movn can't
+ // write to the stack pointer.
+ if (emit_code) {
+ masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ }
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ VIXL_ASSERT(!dst.IsZero());
+ if (emit_code) {
+ masm->LogicalImmediate(
+ dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ }
+ return true;
+ }
+ return false;
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
+ ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ b(label);
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ b(&done, InvertCondition(cond));
+ b(label);
+ bind(&done);
+ } else {
+ b(label, cond);
+ }
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ cbnz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbnz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ cbz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbnz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ Ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ // The worst case for size is logical immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to do the operation
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ }
+
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
+ ((immediate >> kWRegSize) == -1));
+ immediate &= kWRegMask;
+ }
+
+ VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR:
+ VIXL_FALLTHROUGH();
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffff))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // If the left-hand input is the stack pointer, we can't pre-shift the
+ // immediate, as the encoding won't allow the subsequent post shift.
+ PreShiftImmMode mode = rn.IsSP() ? kNoShift : kAnyShift;
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ if (rd.Is(sp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, imm_operand, op);
+ Mov(sp, temp);
+ } else {
+ Logical(rd, rn, imm_operand, op);
+ }
+ }
+ } else if (operand.IsExtendedRegister()) {
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, Operand(temp), op);
+ } else {
+ // The operand can be encoded in the instruction.
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ // The worst case for size is mov immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, operand.immediate());
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ mov(rd, operand.reg());
+ }
+ }
+}
+
+
+void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(IsUint16(imm));
+ int byte1 = (imm & 0xff);
+ int byte2 = ((imm >> 8) & 0xff);
+ if (byte1 == byte2) {
+ movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
+ } else if (byte1 == 0) {
+ movi(vd, byte2, LSL, 8);
+ } else if (byte2 == 0) {
+ movi(vd, byte1);
+ } else if (byte1 == 0xff) {
+ mvni(vd, ~byte2 & 0xff, LSL, 8);
+ } else if (byte2 == 0xff) {
+ mvni(vd, ~byte1 & 0xff);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ movz(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(IsUint32(imm));
+
+ uint8_t bytes[sizeof(imm)];
+ memcpy(bytes, &imm, sizeof(imm));
+
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 4; ++i) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ all0orff = false;
+ break;
+ }
+ }
+
+ if (all0orff == true) {
+ movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is non-zero.
+ for (int i = 0; i < 4; i++) {
+ if ((imm & (0xff << (i * 8))) == imm) {
+ movi(vd, bytes[i], LSL, i * 8);
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is not 0xff.
+ for (int i = 0; i < 4; i++) {
+ uint32_t mask = ~(0xff << (i * 8));
+ if ((imm & mask) == mask) {
+ mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ return;
+ }
+ }
+
+ // Immediate is of the form 0x00MMFFFF.
+ if ((imm & 0xff00ffff) == 0x0000ffff) {
+ movi(vd, bytes[2], MSL, 16);
+ return;
+ }
+
+ // Immediate is of the form 0x0000MMFF.
+ if ((imm & 0xffff00ff) == 0x000000ff) {
+ movi(vd, bytes[1], MSL, 8);
+ return;
+ }
+
+ // Immediate is of the form 0xFFMM0000.
+ if ((imm & 0xff00ffff) == 0xff000000) {
+ mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ return;
+ }
+ // Immediate is of the form 0xFFFFMM00.
+ if ((imm & 0xffff00ff) == 0xffff0000) {
+ mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ return;
+ }
+
+ // Top and bottom 16-bits are equal.
+ if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ Mov(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 8; ++i) {
+ int byteval = (imm >> (i * 8)) & 0xff;
+ if (byteval != 0 && byteval != 0xff) {
+ all0orff = false;
+ break;
+ }
+ }
+ if (all0orff == true) {
+ movi(vd, imm);
+ return;
+ }
+ }
+
+ // Top and bottom 32-bits are equal.
+ if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, imm);
+ if (vd.Is1D()) {
+ mov(vd.D(), 0, temp);
+ } else {
+ dup(vd.V2D(), temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift,
+ int shift_amount) {
+ MacroEmissionCheckScope guard(this);
+ if (shift_amount != 0 || shift != LSL) {
+ movi(vd, imm, shift, shift_amount);
+ } else if (vd.Is8B() || vd.Is16B()) {
+ // 8-bit immediate.
+ VIXL_ASSERT(IsUint8(imm));
+ movi(vd, imm);
+ } else if (vd.Is4H() || vd.Is8H()) {
+ // 16-bit immediate.
+ Movi16bitHelper(vd, imm);
+ } else if (vd.Is2S() || vd.Is4S()) {
+ // 32-bit immediate.
+ Movi32bitHelper(vd, imm);
+ } else {
+ // 64-bit immediate.
+ Movi64bitHelper(vd, imm);
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t hi,
+ uint64_t lo) {
+ VIXL_ASSERT(vd.Is128Bits());
+ UseScratchRegisterScope temps(this);
+
+ // When hi == lo, the following generates good code.
+ //
+ // In situations where the constants are complex and hi != lo, the following
+ // can turn into up to 10 instructions: 2*(mov + 3*movk + dup/insert). To do
+ // any better, we could try to estimate whether splatting the high value and
+ // updating the low value would generate fewer instructions than vice versa
+ // (what we do now).
+ //
+ // (A PC-relative load from memory to the vector register (ADR + LD2) is going
+ // to have fairly high latency but is fairly compact; not clear what the best
+ // tradeoff is.)
+
+ Movi(vd.V2D(), lo);
+ if (hi != lo) {
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ // The worst case for size is mvn immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mvn(rd, operand.immediate());
+ } else if (operand.IsExtendedRegister()) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(operand.reg());
+
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ Register temp = temps.AcquireSameSizeAs(rd);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, Operand(temp));
+ } else {
+ // Otherwise, register and shifted register cases can be handled by the
+ // assembler directly, using orn.
+ mvn(rd, operand);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ MoveImmediateHelper(this, rd, imm);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is ccmp immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for ccmp
+ MacroEmissionCheckScope guard(this);
+
+ if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+ } else {
+ UseScratchRegisterScope temps(this);
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is csel immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for csel
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, S, ADD);
+ }
+}
+
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Add(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, S, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Sub(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value,
+ FPTrapFlags trap) {
+ // The worst case for size is:
+ // * 1 to materialise the constant, using literal pool if necessary
+ // * 1 instruction for fcmp{e}
+ MacroEmissionCheckScope guard(this);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ VIXL_ASSERT(!tmp.Is(fn));
+ Fmov(tmp, value);
+ FPCompareMacro(fn, tmp, trap);
+ } else {
+ FPCompareMacro(fn, value, trap);
+ }
+}
+
+
+void MacroAssembler::Fcmpe(const FPRegister& fn, double value) {
+ Fcmp(fn, value, EnableTrap);
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, double imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
+ Fmov(vd, static_cast<float>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1D() || vd.Is2D());
+ if (IsImmFP64(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint64_t rawbits = DoubleToRawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, xzr);
+ } else {
+ Assembler::fImmPool64(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, float imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1D() || vd.Is2D()) {
+ Fmov(vd, static_cast<double>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
+ if (IsImmFP32(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint32_t rawbits = FloatToRawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, wzr);
+ } else {
+ Assembler::fImmPool32(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ return OneInstrMoveImmediateHelper(this, dst, imm);
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm,
+ PreShiftImmMode mode) {
+ int reg_size = dst.size();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ if (mode == kLimitShiftForSP) {
+ // When applied to the stack pointer, the subsequent arithmetic operation
+ // can use the extend form to shift left by a maximum of four bits. Right
+ // shifts are not allowed, so we filter them out later before the new
+ // immediate is tested.
+ shift_low = std::min(shift_low, 4);
+ }
+
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register,
+ // inserting set bits in the least-significant bits.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
+
+ if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
+void MacroAssembler::ComputeAddress(const Register& dst,
+ const MemOperand& mem_op) {
+ // We cannot handle pre-indexing or post-indexing.
+ VIXL_ASSERT(mem_op.addrmode() == Offset);
+ Register base = mem_op.base();
+ if (mem_op.IsImmediateOffset()) {
+ Add(dst, base, mem_op.offset());
+ } else {
+ VIXL_ASSERT(mem_op.IsRegisterOffset());
+ Register reg_offset = mem_op.regoffset();
+ Shift shift = mem_op.shift();
+ Extend extend = mem_op.extend();
+ if (shift == NO_SHIFT) {
+ VIXL_ASSERT(extend != NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, extend, mem_op.shift_amount()));
+ } else {
+ VIXL_ASSERT(extend == NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, shift, mem_op.shift_amount()));
+ }
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ // Worst case is add/sub immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ if (operand.IsImmediate()) {
+ PreShiftImmMode mode = kAnyShift;
+
+ // If the destination or source register is the stack pointer, we can
+ // only pre-shift the immediate right by values supported in the add/sub
+ // extend encoding.
+ if (rd.IsSP()) {
+ // If the destination is SP and flags will be set, we can't pre-shift
+ // the immediate at all.
+ mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
+ } else if (rn.IsSP()) {
+ mode = kLimitShiftForSP;
+ }
+
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.immediate(), mode);
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ // Worst case is addc/subc immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+ VIXL_ASSERT(IsUintN(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
+ operand.shift_amount()));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ // Worst case is ldr/str pre/post index:
+ // * 1 instruction for ldr/str
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ VIXL_ASSERT(!temp.Is(rt));
+ VIXL_ASSERT(!temp.Is(addr.base()) && !temp.Is(addr.regoffset()));
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ Add(addr.base(), addr.base(), Operand(offset));
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ Add(addr.base(), addr.base(), Operand(offset));
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+void MacroAssembler::FN(const REGTYPE REG, \
+ const REGTYPE REG2, \
+ const MemOperand& addr) { \
+ LoadStorePairMacro(REG, REG2, addr, OP); \
+}
+LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ VIXL_ASSERT(!addr.IsRegisterOffset());
+ // Worst case is ldp/stp immediate:
+ // * 1 instruction for ldp/stp
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, access_size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ VIXL_ASSERT(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
+
+void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) {
+ MacroEmissionCheckScope guard(this);
+
+ // There are no pre- or post-index modes for prfm.
+ VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset());
+
+ // The access size is implicitly 8 bytes for all prefetch operations.
+ unsigned size = kXRegSizeInBytesLog2;
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) &&
+ !IsImmLSUnscaled(addr.offset())) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ Prefetch(op, MemOperand(addr.base(), temp));
+ } else {
+ // Simple register-offsets are encodable in one instruction.
+ Prefetch(op, addr);
+ }
+}
+
+
+void MacroAssembler::PushStackPointer() {
+ PrepareForPush(1, 8);
+
+ // Pushing a stack pointer leads to implementation-defined
+ // behavior, which may be surprising. In particular,
+ // str x28, [x28, #-8]!
+ // pre-decrements the stack pointer, storing the decremented value.
+ // Additionally, sp is read as xzr in this context, so it cannot be pushed.
+ // So we must use a scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ Mov(scratch, GetStackPointer64());
+ str(scratch, MemOperand(GetStackPointer64(), -8, PreIndex));
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(src0.IsValid());
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ if (src0.Is(GetStackPointer64())) {
+ VIXL_ASSERT(count == 1);
+ VIXL_ASSERT(size == 8);
+ PushStackPointer();
+ return;
+ }
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPush(registers.Count(), reg_size);
+
+ // Bump the stack pointer and store two registers at the bottom.
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Stp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), -size, PreIndex));
+ } else if (bottom_0.IsValid()) {
+ Str(bottom_0, MemOperand(GetStackPointer64(), -size, PreIndex));
+ }
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& src0 = registers.PopLowestIndex();
+ const CPURegister& src1 = registers.PopLowestIndex();
+ if (src1.IsValid()) {
+ Stp(src0, src1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Str(src0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPop(registers.Count(), reg_size);
+
+
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (dst1.IsValid()) {
+ Ldp(dst0, dst1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Ldr(dst0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+
+ // Load the two registers at the bottom and drop the stack pointer.
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Ldp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), size, PostIndex));
+ } else if (bottom_0.IsValid()) {
+ Ldr(bottom_0, MemOperand(GetStackPointer64(), size, PostIndex));
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(int count, Register src) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is sp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for sp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ VIXL_ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 stp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(size == src0.SizeInBytes());
+
+ // Pushing the stack pointer has unexpected behavior. See PushStackPointer().
+ VIXL_ASSERT(!src0.Is(GetStackPointer64()) && !src0.Is(sp));
+ VIXL_ASSERT(!src1.Is(GetStackPointer64()) && !src1.Is(sp));
+ VIXL_ASSERT(!src2.Is(GetStackPointer64()) && !src2.Is(sp));
+ VIXL_ASSERT(!src3.Is(GetStackPointer64()) && !src3.Is(sp));
+
+ // The JS engine should never push 4 bytes.
+ VIXL_ASSERT(size >= 8);
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(GetStackPointer64(), -1 * size, PreIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(GetStackPointer64(), -2 * size, PreIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(GetStackPointer64(), -3 * size, PreIndex));
+ str(src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ stp(src3, src2, MemOperand(GetStackPointer64(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 ldp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(GetStackPointer64(), 1 * size, PostIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 2 * size, PostIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and skip
+ // the whole block in the second instruction. This allows four W registers
+ // to be popped using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ ldp(dst2, dst3, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 4 * size, PostIndex));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(int count, int size) {
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(count * size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(int count, int size) {
+ USE(count, size);
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ }
+}
+
+void MacroAssembler::Poke(const Register& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Str(src, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Ldr(dst, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Claim(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Sub(GetStackPointer64(), GetStackPointer64(), size);
+
+ // Make sure the real stack pointer reflects the claimed stack space.
+ // We can't use stack memory below the stack pointer, it could be clobbered by
+ // interupts and signal handlers.
+ if (!sp.Is(GetStackPointer64())) {
+ Mov(sp, GetStackPointer64());
+ }
+}
+
+
+void MacroAssembler::Drop(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Add(GetStackPointer64(), GetStackPointer64(), size);
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 stp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSizeInBytes), PreIndex);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos);
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 ldp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos);
+ ldp(x29, x30, tos);
+}
+
+void MacroAssembler::LoadCPURegList(CPURegList registers,
+ const MemOperand& src) {
+ LoadStoreCPURegListHelper(kLoad, registers, src);
+}
+
+void MacroAssembler::StoreCPURegList(CPURegList registers,
+ const MemOperand& dst) {
+ LoadStoreCPURegListHelper(kStore, registers, dst);
+}
+
+
+void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op,
+ CPURegList registers,
+ const MemOperand& mem) {
+ // We do not handle pre-indexing or post-indexing.
+ VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex()));
+ VIXL_ASSERT(!registers.Overlaps(tmp_list_));
+ VIXL_ASSERT(!registers.Overlaps(fptmp_list_));
+ VIXL_ASSERT(!registers.IncludesAliasOf(sp));
+
+ UseScratchRegisterScope temps(this);
+
+ MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers,
+ mem,
+ &temps);
+
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (op == kStore) {
+ Stp(dst0, dst1, loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldp(dst0, dst1, loc);
+ }
+ loc.AddOffset(2 * registers.RegisterSizeInBytes());
+ }
+ if (!registers.IsEmpty()) {
+ if (op == kStore) {
+ Str(registers.PopLowestIndex(), loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldr(registers.PopLowestIndex(), loc);
+ }
+ }
+}
+
+MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope) {
+ // If necessary, pre-compute the base address for the accesses.
+ if (mem.IsRegisterOffset()) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+
+ } else if (mem.IsImmediateOffset()) {
+ int reg_size = registers.RegisterSizeInBytes();
+ int total_size = registers.TotalSizeInBytes();
+ int64_t min_offset = mem.offset();
+ int64_t max_offset = mem.offset() + std::max(0, total_size - 2 * reg_size);
+ if ((registers.Count() >= 2) &&
+ (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) ||
+ !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+ }
+ }
+
+ return mem;
+}
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ VIXL_ASSERT(!sp.Is(GetStackPointer64()));
+ // TODO: Several callers rely on this not using scratch registers, so we use
+ // the assembler directly here. However, this means that large immediate
+ // values of 'space' cannot be handled.
+ InstructionAccurateScope scope(this, 1);
+ sub(sp, GetStackPointer64(), space);
+}
+
+
+void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the trace pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kTraceOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
+ dc32(parameters);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
+ dc32(command);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters, command);
+#endif
+}
+
+
+void MacroAssembler::Log(TraceParameters parameters) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the log pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kLogOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
+ dc32(parameters);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters);
+#endif
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateEnable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateDisable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ VIXL_ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+
+void UseScratchRegisterScope::Open(MacroAssembler* masm) {
+ VIXL_ASSERT(!initialised_);
+ available_ = masm->TmpList();
+ availablefp_ = masm->FPTmpList();
+ old_available_ = available_->list();
+ old_availablefp_ = availablefp_->list();
+ VIXL_ASSERT(available_->type() == CPURegister::kRegister);
+ VIXL_ASSERT(availablefp_->type() == CPURegister::kVRegister);
+#ifdef DEBUG
+ initialised_ = true;
+#endif
+}
+
+
+void UseScratchRegisterScope::Close() {
+ if (available_) {
+ available_->set_list(old_available_);
+ available_ = NULL;
+ }
+ if (availablefp_) {
+ availablefp_->set_list(old_availablefp_);
+ availablefp_ = NULL;
+ }
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+
+UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+ Open(masm);
+}
+
+// This allows deferred (and optional) initialisation of the scope.
+UseScratchRegisterScope::UseScratchRegisterScope()
+ : available_(NULL), availablefp_(NULL),
+ old_available_(0), old_availablefp_(0) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ Close();
+}
+
+
+bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
+ return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register(code, reg.size());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister(code, reg.size());
+}
+
+
+void UseScratchRegisterScope::Release(const CPURegister& reg) {
+ VIXL_ASSERT(initialised_);
+ if (reg.IsRegister()) {
+ ReleaseByCode(available_, reg.code());
+ } else if (reg.IsFPRegister()) {
+ ReleaseByCode(availablefp_, reg.code());
+ } else {
+ VIXL_ASSERT(reg.IsNone());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const CPURegList& list) {
+ VIXL_ASSERT(initialised_);
+ if (list.type() == CPURegister::kRegister) {
+ // Make sure that neither sp nor xzr are included the list.
+ IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ IncludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ VIXL_ASSERT(initialised_);
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ // Make sure that neither sp nor xzr are included the list.
+ include &= ~(xzr.Bit() | sp.Bit());
+
+ IncludeByRegList(available_, include);
+}
+
+
+void UseScratchRegisterScope::Include(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ IncludeByRegList(availablefp_, include);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegList& list) {
+ if (list.type() == CPURegister::kRegister) {
+ ExcludeByRegList(available_, list.list());
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ ExcludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Exclude(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(available_, exclude);
+}
+
+
+void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4) {
+ RegList exclude = 0;
+ RegList excludefp = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4};
+
+ for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
+ if (regs[i].IsRegister()) {
+ exclude |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ excludefp |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(regs[i].IsNone());
+ }
+ }
+
+ ExcludeByRegList(available_, exclude);
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::ExcludeAll() {
+ ExcludeByRegList(available_, available_->list());
+ ExcludeByRegList(availablefp_, availablefp_->list());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ VIXL_CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ VIXL_ASSERT(!AreAliased(result, xzr, sp));
+ return result;
+}
+
+
+void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
+ ReleaseByRegList(available, static_cast<RegList>(1) << code);
+}
+
+
+void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
+ RegList exclude) {
+ available->set_list(available->list() & ~exclude);
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
new file mode 100644
index 0000000000..3c403a815f
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
@@ -0,0 +1,2622 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
+#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
+
+#include <algorithm>
+#include <limits>
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+#define LSPAIR_MACRO_LIST(V) \
+ V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
+ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
+ V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+
+namespace vixl {
+
+// Forward declaration
+class MacroAssembler;
+class UseScratchRegisterScope;
+
+// This scope has the following purposes:
+// * Acquire/Release the underlying assembler's code buffer.
+// * This is mandatory before emitting.
+// * Emit the literal or veneer pools if necessary before emitting the
+// macro-instruction.
+// * Ensure there is enough space to emit the macro-instruction.
+class EmissionCheckScope {
+ public:
+ EmissionCheckScope(MacroAssembler* masm, size_t size)
+ : masm_(masm)
+ { }
+
+ protected:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ Label start_;
+ size_t size_;
+#endif
+};
+
+
+// Helper for common Emission checks.
+// The macro-instruction maps to a single instruction.
+class SingleEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit SingleEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kInstructionSize) {}
+};
+
+
+// The macro instruction is a "typical" macro-instruction. Typical macro-
+// instruction only emit a few instructions, a few being defined as 8 here.
+class MacroEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit MacroEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
+
+ private:
+ static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
+};
+
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+
+// The macro assembler supports moving automatically pre-shifted immediates for
+// arithmetic and logical instructions, and then applying a post shift in the
+// instruction to undo the modification, in order to reduce the code emitted for
+// an operation. For example:
+//
+// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
+//
+// This optimisation can be only partially applied when the stack pointer is an
+// operand or destination, so this enumeration is used to control the shift.
+enum PreShiftImmMode {
+ kNoShift, // Don't pre-shift.
+ kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
+ kAnyShift // Allow any pre-shift.
+};
+
+
+class MacroAssembler : public js::jit::Assembler {
+ public:
+ MacroAssembler();
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+
+ // Constant generation helpers.
+ // These functions return the number of instructions required to move the
+ // immediate into the destination register. Also, if the masm pointer is
+ // non-null, it generates the code to do so.
+ // The two features are implemented using one function to avoid duplication of
+ // the logic.
+ // The function can be used to evaluate the cost of synthesizing an
+ // instruction using 'mov immediate' instructions. A user might prefer loading
+ // a constant using the literal pool instead of using multiple 'mov immediate'
+ // instructions.
+ static int MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm);
+ static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm);
+
+
+ // Logical macros.
+ void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Cmn(const Register& rn, const Operand& operand);
+ void Cmp(const Register& rn, const Operand& operand);
+ void Neg(const Register& rd,
+ const Operand& operand);
+ void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ngc(const Register& rd,
+ const Operand& operand);
+ void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd, uint64_t imm);
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mvn(const Register& rd, uint64_t imm) {
+ Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask));
+ }
+ void Mvn(const Register& rd, const Operand& operand);
+
+ // Try to move an immediate into the destination register in a single
+ // instruction. Returns true for success, and updates the contents of dst.
+ // Returns false, otherwise.
+ bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+
+ // Move an immediate into register dst, and return an Operand object for
+ // use with a subsequent instruction that accepts a shift. The value moved
+ // into dst is not necessarily equal to imm; it may have had a shifting
+ // operation applied to it that will be subsequently undone by the shift
+ // applied in the Operand.
+ Operand MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm,
+ PreShiftImmMode mode);
+
+ // Synthesises the address represented by a MemOperand into a register.
+ void ComputeAddress(const Register& dst, const MemOperand& mem_op);
+
+ // Conditional macros.
+ void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+ LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+
+ void Prfm(PrefetchOperation op, const MemOperand& addr);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void PushStackPointer();
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSize);
+ }
+ void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSize);
+ }
+ void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSize);
+ }
+ void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSize);
+ }
+ void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+ void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(int count, Register src);
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Poke(const Register& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Peek(const Register& dst, const Operand& offset);
+
+ // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses.
+ //
+ // (Peek|Poke)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
+ void PeekCPURegList(CPURegList registers, int64_t offset) {
+ LoadCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+ void PokeCPURegList(CPURegList registers, int64_t offset) {
+ StoreCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+
+ void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PeekCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PokeCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PeekXRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kXRegSize);
+ }
+ void PokeXRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kXRegSize);
+ }
+ void PeekWRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kWRegSize);
+ }
+ void PokeWRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kWRegSize);
+ }
+ void PeekDRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PokeDRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PeekSRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+ void PokeSRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ void Claim(const Operand& size);
+ void Drop(const Operand& size);
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PopCalleeSavedRegisters();
+
+ void LoadCPURegList(CPURegList registers, const MemOperand& src);
+ void StoreCPURegList(CPURegList registers, const MemOperand& dst);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ void Adr(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adr(rd, label);
+ }
+ void Adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adrp(rd, label);
+ }
+ void Asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asr(rd, rn, shift);
+ }
+ void Asr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asrv(rd, rn, rm);
+ }
+
+ // Branch type inversion relies on these relations.
+ VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ InvertCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+ }
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ void B(Label* label);
+ void B(Label* label, Condition cond);
+ void B(Condition cond, Label* label) {
+ B(label, cond);
+ }
+ void Bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfm(rd, rn, immr, imms);
+ }
+ void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfi(rd, rn, lsb, width);
+ }
+ void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfxil(rd, rn, lsb, width);
+ }
+ void Bind(Label* label);
+ // Bind a label to a specified offset from the start of the buffer.
+ void BindToOffset(Label* label, ptrdiff_t offset);
+ void Bl(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ bl(label);
+ }
+ void Blr(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ blr(xn);
+ }
+ void Br(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ br(xn);
+ }
+ void Brk(int code = 0) {
+ SingleEmissionCheckScope guard(this);
+ brk(code);
+ }
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ void Cinc(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinc(rd, rn, cond);
+ }
+ void Cinv(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinv(rd, rn, cond);
+ }
+ void Clrex() {
+ SingleEmissionCheckScope guard(this);
+ clrex();
+ }
+ void Cls(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cls(rd, rn);
+ }
+ void Clz(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ clz(rd, rn);
+ }
+ void Cneg(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cneg(rd, rn, cond);
+ }
+ void Cset(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cset(rd, cond);
+ }
+ void Csetm(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ csetm(rd, cond);
+ }
+ void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ // The VIXL source code contains these assertions, but the AArch64 ISR
+ // explicitly permits the use of zero registers. CSET itself is defined
+ // in terms of CSINC with WZR/XZR.
+ //
+ // VIXL_ASSERT(!rn.IsZero());
+ // VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinc(rd, rn, rm, cond);
+ }
+ void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinv(rd, rn, rm, cond);
+ }
+ void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csneg(rd, rn, rm, cond);
+ }
+ void Dmb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dmb(domain, type);
+ }
+ void Dsb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dsb(domain, type);
+ }
+ void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ extr(rd, rn, rm, lsb);
+ }
+ void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fadd(vd, vn, vm);
+ }
+ void Fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap = DisableTrap) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ FPCCompareMacro(vn, vm, nzcv, cond, trap);
+ }
+ void Fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ Fccmp(vn, vm, nzcv, cond, EnableTrap);
+ }
+ void Fcmp(const VRegister& vn, const VRegister& vm,
+ FPTrapFlags trap = DisableTrap) {
+ SingleEmissionCheckScope guard(this);
+ FPCompareMacro(vn, vm, trap);
+ }
+ void Fcmp(const VRegister& vn, double value,
+ FPTrapFlags trap = DisableTrap);
+ void Fcmpe(const VRegister& vn, double value);
+ void Fcmpe(const VRegister& vn, const VRegister& vm) {
+ Fcmp(vn, vm, EnableTrap);
+ }
+ void Fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ fcsel(vd, vn, vm, cond);
+ }
+ void Fcvt(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvt(vd, vn);
+ }
+ void Fcvtl(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl(vd, vn);
+ }
+ void Fcvtl2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl2(vd, vn);
+ }
+ void Fcvtn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn(vd, vn);
+ }
+ void Fcvtn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn2(vd, vn);
+ }
+ void Fcvtxn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn(vd, vn);
+ }
+ void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn2(vd, vn);
+ }
+ void Fcvtas(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtas(rd, vn);
+ }
+ void Fcvtau(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtau(rd, vn);
+ }
+ void Fcvtms(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtms(rd, vn);
+ }
+ void Fcvtmu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtmu(rd, vn);
+ }
+ void Fcvtns(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtns(rd, vn);
+ }
+ void Fcvtnu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtnu(rd, vn);
+ }
+ void Fcvtps(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtps(rd, vn);
+ }
+ void Fcvtpu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtpu(rd, vn);
+ }
+ void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(rd, vn, fbits);
+ }
+ void Fjcvtzs(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fjcvtzs(rd, vn);
+ }
+ void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(rd, vn, fbits);
+ }
+ void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fdiv(vd, vn, vm);
+ }
+ void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmax(vd, vn, vm);
+ }
+ void Fmaxnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmaxnm(vd, vn, vm);
+ }
+ void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmin(vd, vn, vm);
+ }
+ void Fminnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fminnm(vd, vn, vm);
+ }
+ void Fmov(VRegister vd, VRegister vn) {
+ SingleEmissionCheckScope guard(this);
+ // Only emit an instruction if vd and vn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
+ // the top of q0, but VRegister does not currently support Q registers.
+ if (!vd.Is(vn) || !vd.Is64Bits()) {
+ fmov(vd, vn);
+ }
+ }
+ void Fmov(VRegister vd, Register rn) {
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, rn);
+ }
+ void Fmov(const VRegister& vd, int index, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, index, rn);
+ }
+ void Fmov(const Register& rd, const VRegister& vn, int index) {
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn, index);
+ }
+
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of vd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Fmov(VRegister vd, double imm);
+ void Fmov(VRegister vd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(VRegister vd, T imm) {
+ Fmov(vd, static_cast<double>(imm));
+ }
+ void Fmov(Register rd, VRegister vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn);
+ }
+ void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmul(vd, vn, vm);
+ }
+ void Fnmul(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fnmul(vd, vn, vm);
+ }
+ void Fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmadd(vd, vn, vm, va);
+ }
+ void Fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmsub(vd, vn, vm, va);
+ }
+ void Fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmadd(vd, vn, vm, va);
+ }
+ void Fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmsub(vd, vn, vm, va);
+ }
+ void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fsub(vd, vn, vm);
+ }
+ void Hint(SystemHint code) {
+ SingleEmissionCheckScope guard(this);
+ hint(code);
+ }
+ void Hlt(int code) {
+ SingleEmissionCheckScope guard(this);
+ hlt(code);
+ }
+ void Isb() {
+ SingleEmissionCheckScope guard(this);
+ isb();
+ }
+ void Ldar(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldar(rt, src);
+ }
+ void Ldarb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarb(rt, src);
+ }
+ void Ldarh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarh(rt, src);
+ }
+ void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldaxp(rt, rt2, src);
+ }
+ void Ldaxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxr(rt, src);
+ }
+ void Ldaxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrb(rt, src);
+ }
+ void Ldaxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrh(rt, src);
+ }
+
+// clang-format off
+#define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \
+ V(cas, Cas) \
+ V(casa, Casa) \
+ V(casl, Casl) \
+ V(casal, Casal) \
+ V(casb, Casb) \
+ V(casab, Casab) \
+ V(caslb, Caslb) \
+ V(casalb, Casalb) \
+ V(cash, Cash) \
+ V(casah, Casah) \
+ V(caslh, Caslh) \
+ V(casalh, Casalh)
+ // clang-format on
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(rs, rt, src); \
+ }
+ COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+
+// clang-format off
+#define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \
+ V(casp, Casp) \
+ V(caspa, Caspa) \
+ V(caspl, Caspl) \
+ V(caspal, Caspal)
+ // clang-format on
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const Register& rs, const Register& rs2, const Register& rt, \
+ const Register& rt2, const MemOperand& src) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(rs, rs2, rt, rt2, src); \
+ }
+ COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+
+// These macros generate all the variations of the atomic memory operations,
+// e.g. ldadd, ldadda, ldaddb, staddl, etc.
+
+// clang-format off
+#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \
+ V(DEF, MASM_PRE##add, ASM_PRE##add) \
+ V(DEF, MASM_PRE##clr, ASM_PRE##clr) \
+ V(DEF, MASM_PRE##eor, ASM_PRE##eor) \
+ V(DEF, MASM_PRE##set, ASM_PRE##set) \
+ V(DEF, MASM_PRE##smax, ASM_PRE##smax) \
+ V(DEF, MASM_PRE##smin, ASM_PRE##smin) \
+ V(DEF, MASM_PRE##umax, ASM_PRE##umax) \
+ V(DEF, MASM_PRE##umin, ASM_PRE##umin)
+
+#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
+ V(MASM, ASM) \
+ V(MASM##l, ASM##l) \
+ V(MASM##b, ASM##b) \
+ V(MASM##lb, ASM##lb) \
+ V(MASM##h, ASM##h) \
+ V(MASM##lh, ASM##lh)
+
+#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \
+ ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
+ V(MASM##a, ASM##a) \
+ V(MASM##al, ASM##al) \
+ V(MASM##ab, ASM##ab) \
+ V(MASM##alb, ASM##alb) \
+ V(MASM##ah, ASM##ah) \
+ V(MASM##alh, ASM##alh)
+ // clang-format on
+
+#define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \
+ void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(rs, rt, src); \
+ }
+#define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM) \
+ void MASM(const Register& rs, const MemOperand& src) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(rs, src); \
+ }
+
+ ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES,
+ DEFINE_MACRO_LOAD_ASM_FUNC,
+ Ld,
+ ld)
+ ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES,
+ DEFINE_MACRO_STORE_ASM_FUNC,
+ St,
+ st)
+
+#define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM) \
+ void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(rs, rt, src); \
+ }
+
+ ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp)
+
+#undef DEFINE_MACRO_LOAD_ASM_FUNC
+#undef DEFINE_MACRO_STORE_ASM_FUNC
+#undef DEFINE_MACRO_SWP_ASM_FUNC
+
+ void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldnp(rt, rt2, src);
+ }
+ // Provide both double and float interfaces for FP immediate loads, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Ldr(const VRegister& vt, double imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is64Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<float>(imm));
+ }
+ }
+ void Ldr(const VRegister& vt, float imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is32Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<double>(imm));
+ }
+ }
+ /*
+ void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
+ VIXL_ASSERT(vt.IsQ());
+ SingleEmissionCheckScope guard(this);
+ ldr(vt, new Literal<uint64_t>(high64, low64,
+ &literal_pool_,
+ RawLiteral::kDeletedOnPlacementByPool));
+ }
+ */
+ void Ldr(const Register& rt, uint64_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldr(rt, imm);
+ }
+ void Ldrsw(const Register& rt, uint32_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldrsw(rt, imm);
+ }
+ void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldxp(rt, rt2, src);
+ }
+ void Ldxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxr(rt, src);
+ }
+ void Ldxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrb(rt, src);
+ }
+ void Ldxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrh(rt, src);
+ }
+ void Lsl(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsl(rd, rn, shift);
+ }
+ void Lsl(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lslv(rd, rn, rm);
+ }
+ void Lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsr(rd, rn, shift);
+ }
+ void Lsr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsrv(rd, rn, rm);
+ }
+ void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ madd(rd, rn, rm, ra);
+ }
+ void Mneg(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mneg(rd, rn, rm);
+ }
+ void Mov(const Register& rd, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, rn);
+ }
+ void Movk(const Register& rd, uint64_t imm, int shift = -1) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ movk(rd, imm, shift);
+ }
+ void Mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mrs(rt, sysreg);
+ }
+ void Msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msr(sysreg, rt);
+ }
+ void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
+ SingleEmissionCheckScope guard(this);
+ sys(op1, crn, crm, op2, rt);
+ }
+ void Dc(DataCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ dc(op, rt);
+ }
+ void Ic(InstructionCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ ic(op, rt);
+ }
+ void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msub(rd, rn, rm, ra);
+ }
+ void Mul(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mul(rd, rn, rm);
+ }
+ void Nop() {
+ SingleEmissionCheckScope guard(this);
+ nop();
+ }
+ void Csdb() {
+ SingleEmissionCheckScope guard(this);
+ csdb();
+ }
+ void Rbit(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rbit(rd, rn);
+ }
+ void Ret(const Register& xn = lr) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ret(xn);
+ }
+ void Rev(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev(rd, rn);
+ }
+ void Rev16(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev16(rd, rn);
+ }
+ void Rev32(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev32(rd, rn);
+ }
+ void Ror(const Register& rd, const Register& rs, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rs.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ror(rd, rs, shift);
+ }
+ void Ror(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rorv(rd, rn, rm);
+ }
+ void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfiz(rd, rn, lsb, width);
+ }
+ void Sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfm(rd, rn, immr, imms);
+ }
+ void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfx(rd, rn, lsb, width);
+ }
+ void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, rn, fbits);
+ }
+ void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sdiv(rd, rn, rm);
+ }
+ void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smaddl(rd, rn, rm, ra);
+ }
+ void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smsubl(rd, rn, rm, ra);
+ }
+ void Smull(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smull(rd, rn, rm);
+ }
+ void Smulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smulh(xd, xn, xm);
+ }
+ void Stlr(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlr(rt, dst);
+ }
+ void Stlrb(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrb(rt, dst);
+ }
+ void Stlrh(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrh(rt, dst);
+ }
+ void Stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stlxp(rs, rt, rt2, dst);
+ }
+ void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxr(rs, rt, dst);
+ }
+ void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrb(rs, rt, dst);
+ }
+ void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrh(rs, rt, dst);
+ }
+ void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stnp(rt, rt2, dst);
+ }
+ void Stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stxp(rs, rt, rt2, dst);
+ }
+ void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxr(rs, rt, dst);
+ }
+ void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrb(rs, rt, dst);
+ }
+ void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrh(rs, rt, dst);
+ }
+ void Svc(int code) {
+ SingleEmissionCheckScope guard(this);
+ svc(code);
+ }
+ void Sxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtb(rd, rn);
+ }
+ void Sxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxth(rd, rn);
+ }
+ void Sxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtw(rd, rn);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfiz(rd, rn, lsb, width);
+ }
+ void Ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfm(rd, rn, immr, imms);
+ }
+ void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfx(rd, rn, lsb, width);
+ }
+ void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, rn, fbits);
+ }
+ void Udiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ udiv(rd, rn, rm);
+ }
+ void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umaddl(rd, rn, rm, ra);
+ }
+ void Umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umull(rd, rn, rm);
+ }
+ void Umulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umulh(xd, xn, xm);
+ }
+ void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umsubl(rd, rn, rm, ra);
+ }
+
+ void Unreachable() {
+ SingleEmissionCheckScope guard(this);
+ Emit(UNDEFINED_INST_PATTERN);
+ }
+
+ void Uxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtb(rd, rn);
+ }
+ void Uxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxth(rd, rn);
+ }
+ void Uxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtw(rd, rn);
+ }
+
+ // NEON 3 vector register instructions.
+ #define NEON_3VREG_MACRO_LIST(V) \
+ V(add, Add) \
+ V(addhn, Addhn) \
+ V(addhn2, Addhn2) \
+ V(addp, Addp) \
+ V(and_, And) \
+ V(bic, Bic) \
+ V(bif, Bif) \
+ V(bit, Bit) \
+ V(bsl, Bsl) \
+ V(cmeq, Cmeq) \
+ V(cmge, Cmge) \
+ V(cmgt, Cmgt) \
+ V(cmhi, Cmhi) \
+ V(cmhs, Cmhs) \
+ V(cmtst, Cmtst) \
+ V(eor, Eor) \
+ V(fabd, Fabd) \
+ V(facge, Facge) \
+ V(facgt, Facgt) \
+ V(faddp, Faddp) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxp, Fmaxp) \
+ V(fminnmp, Fminnmp) \
+ V(fminp, Fminp) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(frecps, Frecps) \
+ V(frsqrts, Frsqrts) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(mul, Mul) \
+ V(orn, Orn) \
+ V(orr, Orr) \
+ V(pmul, Pmul) \
+ V(pmull, Pmull) \
+ V(pmull2, Pmull2) \
+ V(raddhn, Raddhn) \
+ V(raddhn2, Raddhn2) \
+ V(rsubhn, Rsubhn) \
+ V(rsubhn2, Rsubhn2) \
+ V(saba, Saba) \
+ V(sabal, Sabal) \
+ V(sabal2, Sabal2) \
+ V(sabd, Sabd) \
+ V(sabdl, Sabdl) \
+ V(sabdl2, Sabdl2) \
+ V(saddl, Saddl) \
+ V(saddl2, Saddl2) \
+ V(saddw, Saddw) \
+ V(saddw2, Saddw2) \
+ V(shadd, Shadd) \
+ V(shsub, Shsub) \
+ V(smax, Smax) \
+ V(smaxp, Smaxp) \
+ V(smin, Smin) \
+ V(sminp, Sminp) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(sqadd, Sqadd) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqrshl, Sqrshl) \
+ V(sqshl, Sqshl) \
+ V(sqsub, Sqsub) \
+ V(srhadd, Srhadd) \
+ V(srshl, Srshl) \
+ V(sshl, Sshl) \
+ V(ssubl, Ssubl) \
+ V(ssubl2, Ssubl2) \
+ V(ssubw, Ssubw) \
+ V(ssubw2, Ssubw2) \
+ V(sub, Sub) \
+ V(subhn, Subhn) \
+ V(subhn2, Subhn2) \
+ V(trn1, Trn1) \
+ V(trn2, Trn2) \
+ V(uaba, Uaba) \
+ V(uabal, Uabal) \
+ V(uabal2, Uabal2) \
+ V(uabd, Uabd) \
+ V(uabdl, Uabdl) \
+ V(uabdl2, Uabdl2) \
+ V(uaddl, Uaddl) \
+ V(uaddl2, Uaddl2) \
+ V(uaddw, Uaddw) \
+ V(uaddw2, Uaddw2) \
+ V(uhadd, Uhadd) \
+ V(uhsub, Uhsub) \
+ V(umax, Umax) \
+ V(umaxp, Umaxp) \
+ V(umin, Umin) \
+ V(uminp, Uminp) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(uqadd, Uqadd) \
+ V(uqrshl, Uqrshl) \
+ V(uqshl, Uqshl) \
+ V(uqsub, Uqsub) \
+ V(urhadd, Urhadd) \
+ V(urshl, Urshl) \
+ V(ushl, Ushl) \
+ V(usubl, Usubl) \
+ V(usubl2, Usubl2) \
+ V(usubw, Usubw) \
+ V(usubw2, Usubw2) \
+ V(uzp1, Uzp1) \
+ V(uzp2, Uzp2) \
+ V(zip1, Zip1) \
+ V(zip2, Zip2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm); \
+ }
+ NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register instructions.
+ #define NEON_2VREG_MACRO_LIST(V) \
+ V(abs, Abs) \
+ V(addp, Addp) \
+ V(addv, Addv) \
+ V(cls, Cls) \
+ V(clz, Clz) \
+ V(cnt, Cnt) \
+ V(fabs, Fabs) \
+ V(faddp, Faddp) \
+ V(fcvtas, Fcvtas) \
+ V(fcvtau, Fcvtau) \
+ V(fcvtms, Fcvtms) \
+ V(fcvtmu, Fcvtmu) \
+ V(fcvtns, Fcvtns) \
+ V(fcvtnu, Fcvtnu) \
+ V(fcvtps, Fcvtps) \
+ V(fcvtpu, Fcvtpu) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxnmv, Fmaxnmv) \
+ V(fmaxp, Fmaxp) \
+ V(fmaxv, Fmaxv) \
+ V(fminnmp, Fminnmp) \
+ V(fminnmv, Fminnmv) \
+ V(fminp, Fminp) \
+ V(fminv, Fminv) \
+ V(fneg, Fneg) \
+ V(frecpe, Frecpe) \
+ V(frecpx, Frecpx) \
+ V(frinta, Frinta) \
+ V(frinti, Frinti) \
+ V(frintm, Frintm) \
+ V(frintn, Frintn) \
+ V(frintp, Frintp) \
+ V(frintx, Frintx) \
+ V(frintz, Frintz) \
+ V(frsqrte, Frsqrte) \
+ V(fsqrt, Fsqrt) \
+ V(mov, Mov) \
+ V(mvn, Mvn) \
+ V(neg, Neg) \
+ V(not_, Not) \
+ V(rbit, Rbit) \
+ V(rev16, Rev16) \
+ V(rev32, Rev32) \
+ V(rev64, Rev64) \
+ V(sadalp, Sadalp) \
+ V(saddlp, Saddlp) \
+ V(saddlv, Saddlv) \
+ V(smaxv, Smaxv) \
+ V(sminv, Sminv) \
+ V(sqabs, Sqabs) \
+ V(sqneg, Sqneg) \
+ V(sqxtn, Sqxtn) \
+ V(sqxtn2, Sqxtn2) \
+ V(sqxtun, Sqxtun) \
+ V(sqxtun2, Sqxtun2) \
+ V(suqadd, Suqadd) \
+ V(sxtl, Sxtl) \
+ V(sxtl2, Sxtl2) \
+ V(uadalp, Uadalp) \
+ V(uaddlp, Uaddlp) \
+ V(uaddlv, Uaddlv) \
+ V(umaxv, Umaxv) \
+ V(uminv, Uminv) \
+ V(uqxtn, Uqxtn) \
+ V(uqxtn2, Uqxtn2) \
+ V(urecpe, Urecpe) \
+ V(ursqrte, Ursqrte) \
+ V(usqadd, Usqadd) \
+ V(uxtl, Uxtl) \
+ V(uxtl2, Uxtl2) \
+ V(xtn, Xtn) \
+ V(xtn2, Xtn2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn); \
+ }
+ NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register with immediate instructions.
+ #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fcmle, Fcmle) \
+ V(fcmlt, Fcmlt)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ double imm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, imm); \
+ }
+ NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON by element instructions.
+ #define NEON_BYELEMENT_MACRO_LIST(V) \
+ V(fmul, Fmul) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(mul, Mul) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index \
+ ) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm, vm_index); \
+ }
+ NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
+ V(rshrn, Rshrn) \
+ V(rshrn2, Rshrn2) \
+ V(shl, Shl) \
+ V(shll, Shll) \
+ V(shll2, Shll2) \
+ V(shrn, Shrn) \
+ V(shrn2, Shrn2) \
+ V(sli, Sli) \
+ V(sqrshrn, Sqrshrn) \
+ V(sqrshrn2, Sqrshrn2) \
+ V(sqrshrun, Sqrshrun) \
+ V(sqrshrun2, Sqrshrun2) \
+ V(sqshl, Sqshl) \
+ V(sqshlu, Sqshlu) \
+ V(sqshrn, Sqshrn) \
+ V(sqshrn2, Sqshrn2) \
+ V(sqshrun, Sqshrun) \
+ V(sqshrun2, Sqshrun2) \
+ V(sri, Sri) \
+ V(srshr, Srshr) \
+ V(srsra, Srsra) \
+ V(sshll, Sshll) \
+ V(sshll2, Sshll2) \
+ V(sshr, Sshr) \
+ V(ssra, Ssra) \
+ V(uqrshrn, Uqrshrn) \
+ V(uqrshrn2, Uqrshrn2) \
+ V(uqshl, Uqshl) \
+ V(uqshrn, Uqshrn) \
+ V(uqshrn2, Uqshrn2) \
+ V(urshr, Urshr) \
+ V(ursra, Ursra) \
+ V(ushll, Ushll) \
+ V(ushll2, Ushll2) \
+ V(ushr, Ushr) \
+ V(usra, Usra) \
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ int shift) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, shift); \
+ }
+ NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ void Bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ bic(vd, imm8, left_shift);
+ }
+ void Cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmeq(vd, vn, imm);
+ }
+ void Cmge(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmge(vd, vn, imm);
+ }
+ void Cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmgt(vd, vn, imm);
+ }
+ void Cmle(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmle(vd, vn, imm);
+ }
+ void Cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmlt(vd, vn, imm);
+ }
+ void Dup(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, vn, index);
+ }
+ void Dup(const VRegister& vd,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, rn);
+ }
+ void Ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ ext(vd, vn, vm, index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, vn, vn_index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, rn);
+ }
+ void Ld1(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, vt4, src);
+ }
+ void Ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, lane, src);
+ }
+ void Ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1r(vt, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, lane, src);
+ }
+ void Ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2r(vt, vt2, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, lane, src);
+ }
+ void Ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3r(vt, vt2, vt3, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, lane, src);
+ }
+ void Ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4r(vt, vt2, vt3, vt4, src);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, vn, vn_index);
+ }
+ void Mov(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vn, index);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, rn);
+ }
+ void Mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, vn, vn_index);
+ }
+ void Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift = LSL,
+ int shift_amount = 0);
+ void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
+ void Mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0) {
+ SingleEmissionCheckScope guard(this);
+ mvni(vd, imm8, shift, shift_amount);
+ }
+ void Orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ orr(vd, imm8, left_shift);
+ }
+ void Scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, vn, fbits);
+ }
+ void Ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, vn, fbits);
+ }
+ void Fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(vd, vn, fbits);
+ }
+ void Fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(vd, vn, fbits);
+ }
+ void St1(const VRegister& vt,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, lane, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, lane, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, lane, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, lane, dst);
+ }
+ void Smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ smov(rd, vn, vn_index);
+ }
+ void Umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ umov(rd, vn, vn_index);
+ }
+ void Crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32b(rd, rn, rm);
+ }
+ void Crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32h(rd, rn, rm);
+ }
+ void Crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32w(rd, rn, rm);
+ }
+ void Crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32x(rd, rn, rm);
+ }
+ void Crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cb(rd, rn, rm);
+ }
+ void Crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32ch(rd, rn, rm);
+ }
+ void Crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cw(rd, rn, rm);
+ }
+ void Crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cx(rd, rn, rm);
+ }
+
+ // Push the system stack pointer (sp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not sp, since the call does
+ // not make sense in that context.
+ //
+ // TODO: This method can only accept values of 'space' that can be encoded in
+ // one instruction. Refer to the implementation for details.
+ void BumpSystemStackPointer(const Operand& space);
+
+ // Set the current stack pointer, but don't generate any code.
+ void SetStackPointer64(const Register& stack_pointer) {
+ VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ const Register& StackPointer() const {
+ return sp_;
+ }
+
+ const Register& GetStackPointer64() const {
+ return sp_;
+ }
+
+ js::jit::RegisterOrSP getStackPointer() const {
+ return js::jit::RegisterOrSP(sp_.code());
+ }
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ // Trace control when running the debug simulator.
+ //
+ // For example:
+ //
+ // __ Trace(LOG_REGS, TRACE_ENABLE);
+ // Will add registers to the trace if it wasn't already the case.
+ //
+ // __ Trace(LOG_DISASM, TRACE_DISABLE);
+ // Will stop logging disassembly. It has no effect if the disassembly wasn't
+ // already being logged.
+ void Trace(TraceParameters parameters, TraceCommand command);
+
+ // Log the requested data independently of what is being traced.
+ //
+ // For example:
+ //
+ // __ Log(LOG_FLAGS)
+ // Will output the flags.
+ void Log(TraceParameters parameters);
+
+ // Enable or disable instrumentation when an Instrument visitor is attached to
+ // the simulator.
+ void EnableInstrumentation();
+ void DisableInstrumentation();
+
+ // Add a marker to the instrumentation data produced by an Instrument visitor.
+ // The name is a two character string that will be attached to the marker in
+ // the output data.
+ void AnnotateInstrumentation(const char* marker_name);
+
+ private:
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together setup code for a large block of
+ // registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ void Movi16bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi32bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi64bitHelper(const VRegister& vd, uint64_t imm);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PrepareForPush(int count, int size);
+ void PrepareForPop(int count, int size);
+
+ // The actual implementation of load and store operations for CPURegList.
+ enum LoadStoreCPURegListAction {
+ kLoad,
+ kStore
+ };
+ void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
+ CPURegList registers,
+ const MemOperand& mem);
+ // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
+ // This helper may allocate registers from `scratch_scope` and generate code
+ // to compute an intermediate address. The resulting MemOperand is only valid
+ // as long as `scratch_scope` remains valid.
+ MemOperand BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope);
+
+ bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
+ return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().getOffset() - label->offset());
+ }
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ ptrdiff_t checkpoint_;
+ ptrdiff_t recommended_checkpoint_;
+};
+
+
+// All Assembler emits MUST acquire/release the underlying code buffer. The
+// helper scope below will do so and optionally ensure the buffer is big enough
+// to receive the emit. It is possible to request the scope not to perform any
+// checks (kNoCheck) if for example it is known in advance the buffer size is
+// adequate or there is some other size checking mechanism in place.
+class CodeBufferCheckScope {
+ public:
+ // Tell whether or not the scope needs to ensure the associated CodeBuffer
+ // has enough space for the requested size.
+ enum CheckPolicy {
+ kNoCheck,
+ kCheck
+ };
+
+ // Tell whether or not the scope should assert the amount of code emitted
+ // within the scope is consistent with the requested amount.
+ enum AssertPolicy {
+ kNoAssert, // No assert required.
+ kExactSize, // The code emitted must be exactly size bytes.
+ kMaximumSize // The code emitted must be at most size bytes.
+ };
+
+ CodeBufferCheckScope(Assembler* assm,
+ size_t size,
+ CheckPolicy check_policy = kCheck,
+ AssertPolicy assert_policy = kMaximumSize)
+ { }
+
+ // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
+ explicit CodeBufferCheckScope(Assembler* assm) {}
+};
+
+
+// Use this scope when you need a one-to-one mapping between methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+// FIXME: Because of the disabled calls below, this class asserts nothing.
+class InstructionAccurateScope : public CodeBufferCheckScope {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm,
+ int64_t count,
+ AssertPolicy policy = kExactSize)
+ : CodeBufferCheckScope(masm,
+ (count * kInstructionSize),
+ kCheck,
+ policy) {
+ }
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ // This constructor implicitly calls the `Open` function to initialise the
+ // scope, so it is ready to use immediately after it has been constructed.
+ explicit UseScratchRegisterScope(MacroAssembler* masm);
+ // This constructor allows deferred and optional initialisation of the scope.
+ // The user is required to explicitly call the `Open` function before using
+ // the scope.
+ UseScratchRegisterScope();
+ // This function performs the actual initialisation work.
+ void Open(MacroAssembler* masm);
+
+ // The destructor always implicitly calls the `Close` function.
+ ~UseScratchRegisterScope();
+ // This function performs the cleaning-up work. It must succeed even if the
+ // scope has not been opened. It is safe to call multiple times.
+ void Close();
+
+
+ bool IsAvailable(const CPURegister& reg) const;
+
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
+
+
+ Register AcquireSameSizeAs(const Register& reg);
+ VRegister AcquireSameSizeAs(const VRegister& reg);
+
+
+ // Explicitly release an acquired (or excluded) register, putting it back in
+ // the appropriate temps list.
+ void Release(const CPURegister& reg);
+
+
+ // Make the specified registers available as scratch registers for the
+ // duration of this scope.
+ void Include(const CPURegList& list);
+ void Include(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Include(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+ // Make sure that the specified registers are not available in this scope.
+ // This can be used to prevent helper functions from using sensitive
+ // registers, for example.
+ void Exclude(const CPURegList& list);
+ void Exclude(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Exclude(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+ void Exclude(const CPURegister& reg1,
+ const CPURegister& reg2 = NoCPUReg,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg);
+
+
+ // Prevent any scratch registers from being used in this scope.
+ void ExcludeAll();
+
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+
+ static void ReleaseByCode(CPURegList* available, int code);
+
+ static void ReleaseByRegList(CPURegList* available,
+ RegList regs);
+
+ static void IncludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ static void ExcludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kVRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kVRegister
+#ifdef DEBUG
+ bool initialised_;
+#endif
+
+ // Disallow copy constructor and operator=.
+ UseScratchRegisterScope(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+ void operator=(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+};
+
+
+} // namespace vixl
+
+#endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
new file mode 100644
index 0000000000..b9189cc23b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -0,0 +1,610 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/Label.h"
+
+namespace vixl {
+
+using LabelDoc = js::jit::DisassemblerSpew::LabelDoc;
+
+// Assembler
+void Assembler::FinalizeCode() {
+#ifdef DEBUG
+ finalized_ = true;
+#endif
+}
+
+// Unbound Label Representation.
+//
+// We can have multiple branches using the same label before it is bound.
+// Assembler::bind() must then be able to enumerate all the branches and patch
+// them to target the final label location.
+//
+// When a Label is unbound with uses, its offset is pointing to the tip of a
+// linked list of uses. The uses can be branches or adr/adrp instructions. In
+// the case of branches, the next member in the linked list is simply encoded
+// as the branch target. For adr/adrp, the relative pc offset is encoded in the
+// immediate field as a signed instruction offset.
+//
+// In both cases, the end of the list is encoded as a 0 pc offset, i.e. the
+// tail is pointing to itself.
+
+static const ptrdiff_t kEndOfLabelUseList = 0;
+
+BufferOffset
+MozBaseAssembler::NextLink(BufferOffset cur)
+{
+ Instruction* link = getInstructionAt(cur);
+ // Raw encoded offset.
+ ptrdiff_t offset = link->ImmPCRawOffset();
+ // End of the list is encoded as 0.
+ if (offset == kEndOfLabelUseList)
+ return BufferOffset();
+ // The encoded offset is the number of instructions to move.
+ return BufferOffset(cur.getOffset() + offset * kInstructionSize);
+}
+
+static ptrdiff_t
+EncodeOffset(BufferOffset cur, BufferOffset next)
+{
+ MOZ_ASSERT(next.assigned() && cur.assigned());
+ ptrdiff_t offset = next.getOffset() - cur.getOffset();
+ MOZ_ASSERT(offset % kInstructionSize == 0);
+ return offset / kInstructionSize;
+}
+
+void
+MozBaseAssembler::SetNextLink(BufferOffset cur, BufferOffset next)
+{
+ Instruction* link = getInstructionAt(cur);
+ link->SetImmPCRawOffset(EncodeOffset(cur, next));
+}
+
+// A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+//
+// If the label is bound, returns the offset as a multiple of 1 << elementShift.
+// Otherwise, links the instruction to the label and returns the raw offset to
+// encode. (This will be an instruction count.)
+//
+// The offset is calculated by aligning the PC and label addresses down to a
+// multiple of 1 << elementShift, then calculating the (scaled) offset between
+// them. This matches the semantics of adrp, for example. (Assuming that the
+// assembler buffer is page-aligned, which it probably isn't.)
+//
+// For an unbound label, the returned offset will be encodable in the provided
+// branch range. If the label is already bound, the caller is expected to make
+// sure that it is in range, and emit the necessary branch instrutions if it
+// isn't.
+//
+ptrdiff_t
+MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementShift, Label* label)
+{
+ if (armbuffer_.oom())
+ return kEndOfLabelUseList;
+
+ if (label->bound()) {
+ // The label is bound: all uses are already linked.
+ ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift);
+ ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift);
+ return label_offset - branch_offset;
+ }
+
+ // Keep track of short-range branches targeting unbound labels. We may need
+ // to insert veneers in PatchShortRangeBranchToVeneer() below.
+ if (branchRange < NumShortBranchRangeTypes) {
+ // This is the last possible branch target.
+ BufferOffset deadline(branch.getOffset() +
+ Instruction::ImmBranchMaxForwardOffset(branchRange));
+ armbuffer_.registerBranchDeadline(branchRange, deadline);
+ }
+
+ // The label is unbound and previously unused: Store the offset in the label
+ // itself for patching by bind().
+ if (!label->used()) {
+ label->use(branch.getOffset());
+ return kEndOfLabelUseList;
+ }
+
+ // The label is unbound and has multiple users. Create a linked list between
+ // the branches, and update the linked list head in the label struct. This is
+ // not always trivial since the branches in the linked list have limited
+ // ranges.
+
+ // What is the earliest buffer offset that would be reachable by the branch
+ // we're about to add?
+ ptrdiff_t earliestReachable =
+ branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange);
+
+ // If the existing instruction at the head of the list is within reach of the
+ // new branch, we can simply insert the new branch at the front of the list.
+ if (label->offset() >= earliestReachable) {
+ ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label));
+ label->use(branch.getOffset());
+ MOZ_ASSERT(offset != kEndOfLabelUseList);
+ return offset;
+ }
+
+ // The label already has a linked list of uses, but we can't reach the head
+ // of the list with the allowed branch range. Insert this branch at a
+ // different position in the list.
+ //
+ // Find an existing branch, exbr, such that:
+ //
+ // 1. The new branch can be reached by exbr, and either
+ // 2a. The new branch can reach exbr's target, or
+ // 2b. The exbr branch is at the end of the list.
+ //
+ // Then the new branch can be inserted after exbr in the linked list.
+ //
+ // We know that it is always possible to find an exbr branch satisfying these
+ // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All
+ // branches are guaranteed to either be able to reach the end of the
+ // assembler buffer, or they will be pointing to an unconditional branch that
+ // can.
+ //
+ // In particular, the end of the list is always a viable candidate, so we'll
+ // just get that.
+ BufferOffset next(label);
+ BufferOffset exbr;
+ do {
+ exbr = next;
+ next = NextLink(next);
+ } while (next.assigned());
+ SetNextLink(exbr, branch);
+
+ // This branch becomes the new end of the list.
+ return kEndOfLabelUseList;
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, 0, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetInstructionOffsetTo(BufferOffset branch,
+ ImmBranchRangeType branchRange,
+ Label* label) {
+ return LinkAndGetOffsetTo(branch, branchRange, kInstructionSizeLog2, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, kPageSizeLog2, label);
+}
+
+BufferOffset Assembler::b(int imm26, const LabelDoc& doc) {
+ return EmitBranch(B | ImmUncondBranch(imm26), doc);
+}
+
+
+void Assembler::b(Instruction* at, int imm26) {
+ return EmitBranch(at, B | ImmUncondBranch(imm26));
+}
+
+
+BufferOffset Assembler::b(int imm19, Condition cond, const LabelDoc& doc) {
+ return EmitBranch(B_cond | ImmCondBranch(imm19) | cond, doc);
+}
+
+
+void Assembler::b(Instruction* at, int imm19, Condition cond) {
+ EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+BufferOffset Assembler::b(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
+}
+
+
+BufferOffset Assembler::b(Label* label, Condition cond) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), cond, doc);
+}
+
+void Assembler::br(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BR | Rn(xn));
+}
+
+
+void Assembler::blr(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BLR | Rn(xn));
+}
+
+
+void Assembler::bl(int imm26, const LabelDoc& doc) {
+ EmitBranch(BL | ImmUncondBranch(imm26), doc);
+}
+
+
+void Assembler::bl(Instruction* at, int imm26) {
+ EmitBranch(at, BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
+}
+
+
+void Assembler::cbz(const Register& rt, int imm19, const LabelDoc& doc) {
+ EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt), doc);
+}
+
+
+void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
+}
+
+
+void Assembler::cbnz(const Register& rt, int imm19, const LabelDoc& doc) {
+ EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt), doc);
+}
+
+
+void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
+}
+
+
+void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
+}
+
+
+void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
+}
+
+
+void Assembler::adr(const Register& rd, int imm21, const LabelDoc& doc) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd), doc);
+}
+
+
+void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ // Encode the relative offset from the inserted adr to the label.
+ LabelDoc doc = refLabel(label);
+ return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label), doc);
+}
+
+
+void Assembler::adrp(const Register& rd, int imm21, const LabelDoc& doc) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd), doc);
+}
+
+
+void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(AllowPageOffsetDependentCode());
+ // Encode the relative offset from the inserted adr to the label.
+ LabelDoc doc = refLabel(label);
+ return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label), doc);
+}
+
+
+BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
+ return Logical(rd, rn, operand, ANDS);
+}
+
+
+BufferOffset Assembler::tst(const Register& rn, const Operand& operand) {
+ return ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+BufferOffset Assembler::hint(SystemHint code) {
+ return Emit(HINT | ImmHint(code));
+}
+
+
+void Assembler::hint(Instruction* at, SystemHint code) {
+ Emit(at, HINT | ImmHint(code));
+}
+
+
+void Assembler::svc(Instruction* at, int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(at, SVC | ImmException(code));
+}
+
+
+void Assembler::nop(Instruction* at) {
+ hint(at, NOP);
+}
+
+
+void Assembler::csdb(Instruction* at) {
+ hint(at, CSDB);
+}
+
+
+BufferOffset Assembler::Logical(const Register& rd, const Register& rn,
+ const Operand& operand, LogicalOp op)
+{
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ VIXL_ASSERT(immediate != 0);
+ VIXL_ASSERT(immediate != -1);
+ VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn,
+ unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
+{
+ unsigned reg_size = rd.size();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
+}
+
+
+BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, Instr op)
+{
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && IsUint5(operand.shift_amount())));
+ return Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
+ // Store the js::jit::PoolEntry index into the instruction.
+ // finishPool() will walk over all literal load instructions
+ // and use PatchConstantPoolLoad() to patch to the final relative offset.
+ *((uint32_t*)load) |= Assembler::ImmLLiteral(index);
+}
+
+
+bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
+
+ // The load currently contains the js::jit::PoolEntry's index,
+ // as written by InsertIndexIntoTag().
+ uint32_t index = load->ImmLLiteral();
+
+ // Each entry in the literal pool is uint32_t-sized,
+ // but literals may use multiple entries.
+ uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
+ Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
+
+ load->SetImmLLiteral(source);
+ return false; // Nothing uses the return value.
+}
+
+void
+MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx,
+ BufferOffset deadline, BufferOffset veneer)
+{
+ // Reconstruct the position of the branch from (rangeIdx, deadline).
+ vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx);
+ BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange));
+ Instruction *branchInst = buffer->getInst(branch);
+ Instruction *veneerInst = buffer->getInst(veneer);
+
+ // Verify that the branch range matches what's encoded.
+ MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange);
+
+ // We want to insert veneer after branch in the linked list of instructions
+ // that use the same unbound label.
+ // The veneer should be an unconditional branch.
+ ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset();
+
+ // If offset is 0, this is the end of the linked list.
+ if (nextElemOffset != kEndOfLabelUseList) {
+ // Make the offset relative to veneer so it targets the same instruction
+ // as branchInst.
+ nextElemOffset *= kInstructionSize;
+ nextElemOffset += branch.getOffset() - veneer.getOffset();
+ nextElemOffset /= kInstructionSize;
+ }
+ Assembler::b(veneerInst, nextElemOffset);
+
+ // Now point branchInst at veneer. See also SetNextLink() above.
+ branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer));
+}
+
+struct PoolHeader {
+ uint32_t data;
+
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4bytes), not byte.
+ union {
+ struct {
+ uint32_t size : 15;
+
+ // "Natural" guards are part of the normal instruction stream,
+ // while "non-natural" guards are inserted for the sole purpose
+ // of skipping around a pool.
+ uint32_t isNatural : 1;
+ uint32_t ONES : 16;
+ };
+ uint32_t data;
+ };
+
+ Header(int size_, bool isNatural_)
+ : size(size_),
+ isNatural(isNatural_),
+ ONES(0xffff)
+ { }
+
+ Header(uint32_t data)
+ : data(data)
+ {
+ VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ VIXL_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ return data;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : data(Header(size_, isNatural_).raw())
+ { }
+
+ uint32_t size() const {
+ Header tmp(data);
+ return tmp.size;
+ }
+
+ uint32_t isNatural() const {
+ Header tmp(data);
+ return tmp.isNatural;
+ }
+};
+
+
+void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ static_assert(sizeof(PoolHeader) == 4);
+
+ // Get the total size of the pool.
+ const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+ const uintptr_t totalPoolInstructions = totalPoolSize / kInstructionSize;
+
+ VIXL_ASSERT((totalPoolSize & 0x3) == 0);
+ VIXL_ASSERT(totalPoolInstructions < (1 << 15));
+
+ PoolHeader header(totalPoolInstructions, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+
+void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ return;
+}
+
+
+void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) {
+ int byteOffset = dest.getOffset() - branch.getOffset();
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+
+ int instOffset = byteOffset >> kInstructionSizeLog2;
+ Assembler::b(inst, instOffset);
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
new file mode 100644
index 0000000000..5d12f81bb1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
@@ -0,0 +1,356 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_arm64_vixl_MozBaseAssembler_vixl_h
+#define jit_arm64_vixl_MozBaseAssembler_vixl_h
+
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Sprintf.h" // SprintfLiteral
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+#include <string.h> // strstr
+
+#include "jit/arm64/vixl/Constants-vixl.h" // vixl::{HINT, NOP, ImmHint_offset}
+#include "jit/arm64/vixl/Globals-vixl.h" // VIXL_ASSERT
+#include "jit/arm64/vixl/Instructions-vixl.h" // vixl::{Instruction, NumShortBranchRangeTypes, Instr, ImmBranchRangeType}
+
+#include "jit/Label.h" // jit::Label
+#include "jit/shared/Assembler-shared.h" // jit::AssemblerShared
+#include "jit/shared/Disassembler-shared.h" // jit::DisassemblerSpew
+#include "jit/shared/IonAssemblerBuffer.h" // jit::BufferOffset
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h" // jit::AssemblerBufferWithConstantPools
+
+namespace vixl {
+
+
+using js::jit::BufferOffset;
+using js::jit::DisassemblerSpew;
+using js::jit::Label;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+using LiteralDoc = DisassemblerSpew::LiteralDoc;
+
+#ifdef JS_DISASM_ARM64
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr);
+#endif
+
+class MozBaseAssembler;
+typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, MozBaseAssembler,
+ NumShortBranchRangeTypes> ARMBuffer;
+
+// Base class for vixl::Assembler, for isolating Moz-specific changes to VIXL.
+class MozBaseAssembler : public js::jit::AssemblerShared {
+ // Buffer initialization constants.
+ static const unsigned BufferGuardSize = 1;
+ static const unsigned BufferHeaderSize = 1;
+ static const size_t BufferCodeAlignment = 8;
+ static const size_t BufferMaxPoolOffset = 1024;
+ static const unsigned BufferPCBias = 0;
+ static const uint32_t BufferAlignmentFillInstruction = HINT | (NOP << ImmHint_offset);
+ static const uint32_t BufferNopFillInstruction = HINT | (NOP << ImmHint_offset);
+ static const unsigned BufferNumDebugNopsToInsert = 0;
+
+#ifdef JS_DISASM_ARM64
+ static constexpr const char* const InstrIndent = " ";
+ static constexpr const char* const LabelIndent = " ";
+ static constexpr const char* const TargetIndent = " ";
+#endif
+
+ public:
+ MozBaseAssembler()
+ : armbuffer_(BufferGuardSize,
+ BufferHeaderSize,
+ BufferCodeAlignment,
+ BufferMaxPoolOffset,
+ BufferPCBias,
+ BufferAlignmentFillInstruction,
+ BufferNopFillInstruction,
+ BufferNumDebugNopsToInsert)
+ {
+#ifdef JS_DISASM_ARM64
+ spew_.setLabelIndent(LabelIndent);
+ spew_.setTargetIndent(TargetIndent);
+#endif
+}
+ ~MozBaseAssembler()
+ {
+#ifdef JS_DISASM_ARM64
+ spew_.spewOrphans();
+#endif
+ }
+
+ public:
+ // Return the Instruction at a given byte offset.
+ Instruction* getInstructionAt(BufferOffset offset) {
+ return armbuffer_.getInst(offset);
+ }
+
+ // Return the byte offset of a bound label.
+ template <typename T>
+ inline T GetLabelByteOffset(const js::jit::Label* label) {
+ VIXL_ASSERT(label->bound());
+ static_assert(sizeof(T) >= sizeof(uint32_t));
+ return reinterpret_cast<T>(label->offset());
+ }
+
+ protected:
+ // Get the buffer offset of the next inserted instruction. This may flush
+ // constant pools.
+ BufferOffset nextInstrOffset() {
+ return armbuffer_.nextInstrOffset();
+ }
+
+ // Get the next usable buffer offset. Note that a constant pool may be placed
+ // here before the next instruction is emitted.
+ BufferOffset nextOffset() const {
+ return armbuffer_.nextOffset();
+ }
+
+ // Allocate memory in the buffer by forwarding to armbuffer_.
+ // Propagate OOM errors.
+ BufferOffset allocLiteralLoadEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data,
+ const LiteralDoc& doc = LiteralDoc(),
+ ARMBuffer::PoolEntry* pe = nullptr)
+ {
+ MOZ_ASSERT(inst);
+ MOZ_ASSERT(numInst == 1); /* If not, then fix disassembly */
+ BufferOffset offset = armbuffer_.allocEntry(numInst, numPoolEntries, inst,
+ data, pe);
+ propagateOOM(offset.assigned());
+#ifdef JS_DISASM_ARM64
+ Instruction* instruction = armbuffer_.getInstOrNull(offset);
+ if (instruction)
+ spewLiteralLoad(offset,
+ reinterpret_cast<vixl::Instruction*>(instruction), doc);
+#endif
+ return offset;
+ }
+
+#ifdef JS_DISASM_ARM64
+ DisassemblerSpew spew_;
+
+ void spew(BufferOffset offs, const vixl::Instruction* instr) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer);
+ }
+
+ void spewBranch(BufferOffset offs,
+ const vixl::Instruction* instr, const LabelDoc& target) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+
+ char labelBuf[128];
+ labelBuf[0] = 0;
+
+ bool hasTarget = target.valid;
+ if (!hasTarget)
+ SprintfLiteral(labelBuf, "-> (link-time target)");
+
+ if (instr->IsImmBranch() && hasTarget) {
+ // The target information in the instruction is likely garbage, so remove it.
+ // The target label will in any case be printed if we have it.
+ //
+ // The format of the instruction disassembly is /.*#.*/. Strip the # and later.
+ size_t i;
+ const size_t BUFLEN = sizeof(buffer)-1;
+ for ( i=0 ; i < BUFLEN && buffer[i] && buffer[i] != '#' ; i++ )
+ ;
+ buffer[i] = 0;
+
+ SprintfLiteral(labelBuf, "-> %d%s", target.doc, !target.bound ? "f" : "");
+ hasTarget = false;
+ }
+
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s%s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer, labelBuf);
+
+ if (hasTarget)
+ spew_.spewRef(target);
+ }
+
+ void spewLiteralLoad(BufferOffset offs,
+ const vixl::Instruction* instr, const LiteralDoc& doc) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+
+ char litbuf[2048];
+ spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
+
+ // The instruction will have the form /^.*pc\+0/ followed by junk that we
+ // don't need; try to strip it.
+
+ char *probe = strstr(buffer, "pc+0");
+ if (probe)
+ *(probe + 4) = 0;
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s ; .const %s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer, litbuf);
+ }
+
+ LabelDoc refLabel(Label* label) {
+ if (spew_.isDisabled())
+ return LabelDoc();
+
+ return spew_.refLabel(label);
+ }
+#else
+ LabelDoc refLabel(js::jit::Label*) {
+ return LabelDoc();
+ }
+#endif
+
+ // Emit the instruction, returning its offset.
+ BufferOffset Emit(Instr instruction, bool isBranch = false) {
+ static_assert(sizeof(instruction) == kInstructionSize);
+ // TODO: isBranch is obsolete and should be removed.
+ (void)isBranch;
+ MOZ_ASSERT(hasCreator());
+ BufferOffset offs = armbuffer_.putInt(*(uint32_t*)(&instruction));
+#ifdef JS_DISASM_ARM64
+ if (!isBranch)
+ spew(offs, armbuffer_.getInstOrNull(offs));
+#endif
+ return offs;
+ }
+
+ BufferOffset EmitBranch(Instr instruction, const LabelDoc& doc) {
+ BufferOffset offs = Emit(instruction, true);
+#ifdef JS_DISASM_ARM64
+ spewBranch(offs, armbuffer_.getInstOrNull(offs), doc);
+#endif
+ return offs;
+ }
+
+ public:
+ // Emit the instruction at |at|.
+ static void Emit(Instruction* at, Instr instruction) {
+ static_assert(sizeof(instruction) == kInstructionSize);
+ memcpy(at, &instruction, sizeof(instruction));
+ }
+
+ static void EmitBranch(Instruction* at, Instr instruction) {
+ // TODO: Assert that the buffer already has the instruction marked as a branch.
+ Emit(at, instruction);
+ }
+
+ // Emit data inline in the instruction stream.
+ BufferOffset EmitData(void const * data, unsigned size) {
+ VIXL_ASSERT(size % 4 == 0);
+ MOZ_ASSERT(hasCreator());
+ return armbuffer_.allocEntry(size / sizeof(uint32_t), 0, (uint8_t*)(data), nullptr);
+ }
+
+ public:
+ // Size of the code generated in bytes, including pools.
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ // Move the pool into the instruction stream.
+ void flushBuffer() {
+ armbuffer_.flushPool();
+ }
+
+ // Inhibit pool flushing for the given number of instructions.
+ // Generating more than |maxInst| instructions in a no-pool region
+ // triggers an assertion within the ARMBuffer.
+ // Does not nest.
+ void enterNoPool(size_t maxInst) {
+ armbuffer_.enterNoPool(maxInst);
+ }
+
+ // Marks the end of a no-pool region.
+ void leaveNoPool() {
+ armbuffer_.leaveNoPool();
+ }
+
+ void enterNoNops() {
+ armbuffer_.enterNoNops();
+ }
+ void leaveNoNops() {
+ armbuffer_.leaveNoNops();
+ }
+
+ public:
+ // Static interface used by IonAssemblerBufferWithConstantPools.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+ static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+ static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx, BufferOffset deadline,
+ BufferOffset veneer);
+ static uint32_t PlaceConstantPoolBarrier(int offset);
+
+ static void WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest);
+
+ protected:
+ // Functions for managing Labels and linked lists of Label uses.
+
+ // Get the next Label user in the linked list of Label uses.
+ // Return an unassigned BufferOffset when the end of the list is reached.
+ BufferOffset NextLink(BufferOffset cur);
+
+ // Patch the instruction at cur to link to the instruction at next.
+ void SetNextLink(BufferOffset cur, BufferOffset next);
+
+ // Link the current (not-yet-emitted) instruction to the specified label,
+ // then return a raw offset to be encoded in the instruction.
+ ptrdiff_t LinkAndGetByteOffsetTo(BufferOffset branch, js::jit::Label* label);
+ ptrdiff_t LinkAndGetInstructionOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ js::jit::Label* label);
+ ptrdiff_t LinkAndGetPageOffsetTo(BufferOffset branch, js::jit::Label* label);
+
+ // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+ ptrdiff_t LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementSizeBits, js::jit::Label* label);
+
+ protected:
+ // The buffer into which code and relocation info are generated.
+ ARMBuffer armbuffer_;
+};
+
+
+} // namespace vixl
+
+
+#endif // jit_arm64_vixl_MozBaseAssembler_vixl_h
+
diff --git a/js/src/jit/arm64/vixl/MozCachingDecoder.h b/js/src/jit/arm64/vixl/MozCachingDecoder.h
new file mode 100644
index 0000000000..5b4cfc17d5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozCachingDecoder.h
@@ -0,0 +1,179 @@
+#ifndef VIXL_A64_MOZ_CACHING_DECODER_A64_H_
+#define VIXL_A64_MOZ_CACHING_DECODER_A64_H_
+
+#include "mozilla/HashTable.h"
+
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "js/AllocPolicy.h"
+
+#ifdef DEBUG
+#define JS_CACHE_SIMULATOR_ARM64 1
+#endif
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+namespace vixl {
+
+// This enumeration list the different kind of instructions which can be
+// decoded. These kind correspond to the set of visitor defined by the default
+// Decoder.
+enum class InstDecodedKind : uint8_t {
+ NotDecodedYet,
+#define DECLARE(E) E,
+ VISITOR_LIST(DECLARE)
+#undef DECLARE
+};
+
+// A SinglePageDecodeCache is used to store the decoded kind of all instructions
+// in an executable page of code. Each time an instruction is decoded, its
+// decoded kind is recorded in this structure. The previous instruction value is
+// also recorded in this structure when using a debug build.
+//
+// The next time the same offset is visited, the instruction would be decoded
+// using the previously recorded decode kind. It is also compared against the
+// previously recorded bits of the instruction to check for potential missing
+// cache invalidations, in debug builds.
+//
+// This structure stores the equivalent of a single page of code to have better
+// memory locality when using the simulator. As opposed to having a hash-table
+// for all instructions. However a hash-table is used by the CachingDecoder to
+// map the prefixes of page addresses to these SinglePageDecodeCaches.
+class SinglePageDecodeCache {
+ public:
+ static const uintptr_t PageSize = 1 << 12;
+ static const uintptr_t PageMask = PageSize - 1;
+ static const uintptr_t InstSize = vixl::kInstructionSize;
+ static const uintptr_t InstMask = InstSize - 1;
+ static const uintptr_t InstPerPage = PageSize / InstSize;
+
+ SinglePageDecodeCache(const Instruction* inst)
+ : pageStart_(PageStart(inst))
+ {
+ memset(&decodeCache_, int(InstDecodedKind::NotDecodedYet), sizeof(decodeCache_));
+ }
+ // Compute the start address of the page which contains this instruction.
+ static uintptr_t PageStart(const Instruction* inst) {
+ return uintptr_t(inst) & ~PageMask;
+ }
+ // Returns whether the instruction decoded kind is stored in this
+ // SinglePageDecodeCache.
+ bool contains(const Instruction* inst) {
+ return pageStart_ == PageStart(inst);
+ }
+ void clearDecode(const Instruction* inst) {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ decodeCache_[offset] = InstDecodedKind::NotDecodedYet;
+ }
+ InstDecodedKind* decodePtr(const Instruction* inst) {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ uint32_t instValue = *reinterpret_cast<const uint32_t*>(inst);
+ instCache_[offset] = instValue;
+ return &decodeCache_[offset];
+ }
+ InstDecodedKind decode(const Instruction* inst) const {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ InstDecodedKind val = decodeCache_[offset];
+ uint32_t instValue = *reinterpret_cast<const uint32_t*>(inst);
+ MOZ_ASSERT_IF(val != InstDecodedKind::NotDecodedYet,
+ instCache_[offset] == instValue);
+ return val;
+ }
+
+ private:
+ // Record the address at which the corresponding code page starts.
+ const uintptr_t pageStart_;
+
+ // Cache what instruction got decoded previously, in order to assert if we see
+ // any stale instructions after.
+ uint32_t instCache_[InstPerPage];
+
+ // Cache the decoding of the instruction such that we can skip the decoding
+ // part.
+ InstDecodedKind decodeCache_[InstPerPage];
+};
+
+// A DecoderVisitor which will record which visitor function should be called
+// the next time we want to decode the same instruction.
+class CachingDecoderVisitor : public DecoderVisitor {
+ public:
+ CachingDecoderVisitor() = default;
+ virtual ~CachingDecoderVisitor() {}
+
+#define DECLARE(A) virtual void Visit##A(const Instruction* instr) { \
+ if (last_) { \
+ MOZ_ASSERT(*last_ == InstDecodedKind::NotDecodedYet); \
+ *last_ = InstDecodedKind::A; \
+ last_ = nullptr; \
+ } \
+ };
+
+ VISITOR_LIST(DECLARE)
+#undef DECLARE
+
+ void setDecodePtr(InstDecodedKind* ptr) {
+ last_ = ptr;
+ }
+
+ private:
+ InstDecodedKind* last_;
+};
+
+// The Caching decoder works by extending the default vixl Decoder class. It
+// extends it by overloading the Decode function.
+//
+// The overloaded Decode function checks whether the instruction given as
+// argument got decoded before or since it got invalidated. If it was not
+// previously decoded, the value of the instruction is recorded as well as the
+// kind of instruction. Otherwise, the value of the instruction is checked
+// against the previously recorded value and the instruction kind is used to
+// skip the decoding visitor and resume the execution of instruction.
+//
+// The caching decoder stores the equivalent of a page of executable code in a
+// hash-table. Each SinglePageDecodeCache stores an array of decoded kind as
+// well as the value of the previously decoded instruction.
+//
+// When testing if an instruction was decoded before, we check if the address of
+// the instruction is contained in the last SinglePageDecodeCache. If it is not,
+// then the hash-table entry is queried and created if necessary, and the last
+// SinglePageDecodeCache is updated. Then, the last SinglePageDecodeCache
+// necessary contains the decoded kind of the instruction given as argument.
+//
+// The caching decoder add an extra function for flushing the cache, which is in
+// charge of clearing the decoded kind of instruction in the range of addresses
+// given as argument. This is indirectly called by
+// CPU::EnsureIAndDCacheCoherency.
+class CachingDecoder : public Decoder {
+ using ICacheMap = mozilla::HashMap<uintptr_t, SinglePageDecodeCache*>;
+ public:
+ CachingDecoder()
+ : lastPage_(nullptr)
+ {
+ PrependVisitor(&cachingDecoder_);
+ }
+ ~CachingDecoder() {
+ RemoveVisitor(&cachingDecoder_);
+ }
+
+ void Decode(const Instruction* instr);
+ void Decode(Instruction* instr) {
+ Decode(const_cast<const Instruction*>(instr));
+ }
+
+ void FlushICache(void* start, size_t size);
+
+ private:
+ // Record the type of the decoded instruction, to avoid decoding it a second
+ // time the next time we execute it.
+ CachingDecoderVisitor cachingDecoder_;
+
+ // Store the mapping of Instruction pointer to the corresponding
+ // SinglePageDecodeCache.
+ ICacheMap iCache_;
+
+ // Record the last SinglePageDecodeCache seen, such that we can quickly access
+ // it for the next instruction.
+ SinglePageDecodeCache* lastPage_;
+};
+
+}
+#endif // !JS_CACHE_SIMULATOR_ARM64
+#endif // !VIXL_A64_MOZ_CACHING_DECODER_A64_H_
diff --git a/js/src/jit/arm64/vixl/MozCpu-vixl.cpp b/js/src/jit/arm64/vixl/MozCpu-vixl.cpp
new file mode 100644
index 0000000000..909cc590ae
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozCpu-vixl.cpp
@@ -0,0 +1,226 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "util/WindowsWrapper.h"
+
+#if defined(XP_DARWIN)
+# include <libkern/OSCacheControl.h>
+#endif
+
+namespace vixl {
+
+// Currently computes I and D cache line size.
+void CPU::SetUp() {
+ uint32_t cache_type_register = GetCacheType();
+
+ // The cache type register holds information about the caches, including I
+ // D caches line size.
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches in words as
+ // a power of two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 4 << dcache_line_size_power_of_two;
+ icache_line_size_ = 4 << icache_line_size_power_of_two;
+
+ // Bug 1521158 suggests that having CPU with different cache line sizes could
+ // cause issues as we would only invalidate half of the cache line of we
+ // invalidate every 128 bytes, but other little cores have a different stride
+ // such as 64 bytes. To be conservative, we will try reducing the stride to 32
+ // bytes, which should be smaller than any known cache line.
+ const uint32_t conservative_line_size = 32;
+ dcache_line_size_ = std::min(dcache_line_size_, conservative_line_size);
+ icache_line_size_ = std::min(icache_line_size_, conservative_line_size);
+}
+
+
+uint32_t CPU::GetCacheType() {
+#if defined(__aarch64__) && (defined(__linux__) || defined(__android__))
+ uint64_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ VIXL_ASSERT(IsUint32(cache_type_register));
+ return static_cast<uint32_t>(cache_type_register);
+#else
+ // This will lead to a cache with 1 byte long lines, which is fine since
+ // neither EnsureIAndDCacheCoherency nor the simulator will need this
+ // information.
+ return 0;
+#endif
+}
+
+void CPU::EnsureIAndDCacheCoherency(void* address, size_t length) {
+#if defined(JS_SIMULATOR_ARM64) && defined(JS_CACHE_SIMULATOR_ARM64)
+ // This code attempts to emulate what the following assembly sequence is
+ // doing, which is sending the information to all cores that some cache line
+ // have to be invalidated and invalidating them only on the current core.
+ //
+ // This is done by recording the current range to be flushed to all
+ // simulators, then if there is a simulator associated with the current
+ // thread, applying all flushed ranges as the "isb" instruction would do.
+ //
+ // As we have no control over the CPU cores used by the code generator and the
+ // execution threads, this code assumes that each thread runs on its own core.
+ //
+ // See Bug 1529933 for more detailed explanation of this issue.
+ using js::jit::SimulatorProcess;
+ js::jit::AutoLockSimulatorCache alsc;
+ if (length > 0) {
+ SimulatorProcess::recordICacheFlush(address, length);
+ }
+ Simulator* sim = vixl::Simulator::Current();
+ if (sim) {
+ sim->FlushICache();
+ }
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ FlushInstructionCache(GetCurrentProcess(), address, length);
+#elif defined(XP_DARWIN)
+ sys_icache_invalidate(address, length);
+#elif defined(__aarch64__) && (defined(__linux__) || defined(__android__))
+ // Implement the cache synchronisation for all targets where AArch64 is the
+ // host, even if we're building the simulator for an AAarch64 host. This
+ // allows for cases where the user wants to simulate code as well as run it
+ // natively.
+
+ if (length == 0) {
+ return;
+ }
+
+ // The code below assumes user space cache operations are allowed.
+
+ // Work out the line sizes for each cache, and use them to determine the
+ // start addresses.
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
+ uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
+ uintptr_t dline = start & ~(dsize - 1);
+ uintptr_t iline = start & ~(isize - 1);
+
+ // Cache line sizes are always a power of 2.
+ VIXL_ASSERT(IsPowerOf2(dsize));
+ VIXL_ASSERT(IsPowerOf2(isize));
+ uintptr_t end = start + length;
+
+ do {
+ __asm__ __volatile__ (
+ // Clean each line of the D cache containing the target data.
+ //
+ // dc : Data Cache maintenance
+ // c : Clean
+ // i : Invalidate
+ // va : by (Virtual) Address
+ // c : to the point of Coherency
+ // Original implementation used cvau, but changed to civac due to
+ // errata on Cortex-A53 819472, 826319, 827319 and 824069.
+ // See ARM DDI 0406B page B2-12 for more information.
+ //
+ " dc civac, %[dline]\n"
+ :
+ : [dline] "r" (dline)
+ // This code does not write to memory, but the "memory" dependency
+ // prevents GCC from reordering the code.
+ : "memory");
+ dline += dsize;
+ } while (dline < end);
+
+ __asm__ __volatile__ (
+ // Make sure that the data cache operations (above) complete before the
+ // instruction cache operations (below).
+ //
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ //
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ " dsb ish\n"
+ : : : "memory");
+
+ do {
+ __asm__ __volatile__ (
+ // Invalidate each line of the I cache containing the target data.
+ //
+ // ic : Instruction Cache maintenance
+ // i : Invalidate
+ // va : by Address
+ // u : to the point of Unification
+ " ic ivau, %[iline]\n"
+ :
+ : [iline] "r" (iline)
+ : "memory");
+ iline += isize;
+ } while (iline < end);
+
+ __asm__ __volatile__(
+ // Make sure that the instruction cache operations (above) take effect
+ // before the isb (below).
+ " dsb ish\n"
+
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the new data.
+ // isb : Instruction Synchronisation Barrier
+ " isb\n"
+ :
+ :
+ : "memory");
+#else
+ // If the host isn't AArch64, we must be using the simulator, so this function
+ // doesn't have to do anything.
+ USE(address, length);
+#endif
+}
+
+void CPU::FlushExecutionContext() {
+#if defined(JS_SIMULATOR_ARM64) && defined(JS_CACHE_SIMULATOR_ARM64)
+ // Performing an 'isb' will ensure the current core instruction pipeline is
+ // synchronized with an icache flush executed by another core.
+ using js::jit::SimulatorProcess;
+ js::jit::AutoLockSimulatorCache alsc;
+ Simulator* sim = vixl::Simulator::Current();
+ if (sim) {
+ sim->FlushICache();
+ }
+#elif defined(__aarch64__)
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the icache.
+ __asm__ __volatile__("isb\n" : : : "memory");
+#endif
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
new file mode 100644
index 0000000000..398f864493
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
@@ -0,0 +1,211 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/Architecture-arm64.h"
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+namespace vixl {
+
+bool Instruction::IsUncondB() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B);
+}
+
+
+bool Instruction::IsCondB() const {
+ return Mask(ConditionalBranchMask) == (ConditionalBranchFixed | B_cond);
+}
+
+
+bool Instruction::IsBL() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsBR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BR);
+}
+
+
+bool Instruction::IsBLR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BLR);
+}
+
+
+bool Instruction::IsTBZ() const {
+ return Mask(TestBranchMask) == TBZ;
+}
+
+
+bool Instruction::IsTBNZ() const {
+ return Mask(TestBranchMask) == TBNZ;
+}
+
+
+bool Instruction::IsCBZ() const {
+ return Mask(CompareBranchMask) == CBZ_w || Mask(CompareBranchMask) == CBZ_x;
+}
+
+
+bool Instruction::IsCBNZ() const {
+ return Mask(CompareBranchMask) == CBNZ_w || Mask(CompareBranchMask) == CBNZ_x;
+}
+
+
+bool Instruction::IsLDR() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+}
+
+
+bool Instruction::IsNOP() const {
+ return Mask(SystemHintMask) == HINT && ImmHint() == NOP;
+}
+
+
+bool Instruction::IsCSDB() const {
+ return Mask(SystemHintMask) == HINT && ImmHint() == CSDB;
+}
+
+
+bool Instruction::IsADR() const {
+ return Mask(PCRelAddressingMask) == ADR;
+}
+
+
+bool Instruction::IsADRP() const {
+ return Mask(PCRelAddressingMask) == ADRP;
+}
+
+
+bool Instruction::IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+}
+
+
+bool Instruction::IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+}
+
+bool Instruction::IsBranchLinkImm() const {
+ return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsTargetReachable(const Instruction* target) const {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType:
+ return IsInt19(offset);
+ case UncondBranchType:
+ return IsInt26(offset);
+ case CompareBranchType:
+ return IsInt19(offset);
+ case TestBranchType:
+ return IsInt14(offset);
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCRawOffset() const {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ } else if (BranchType() == UnknownBranchType) {
+ offset = ImmLLiteral();
+ } else {
+ offset = ImmBranch();
+ }
+ return offset;
+}
+
+void
+Instruction::SetImmPCRawOffset(ptrdiff_t offset)
+{
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP. We're encoding a raw offset here.
+ // See also SetPCRelImmTarget().
+ Instr imm = vixl::Assembler::ImmPCRelAddress(offset);
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+ } else {
+ SetBranchImmTarget(this + (offset << kInstructionSizeLog2));
+ }
+}
+
+// Is this a stack pointer synchronization instruction as inserted by
+// MacroAssembler::syncStackPtr()?
+bool
+Instruction::IsStackPtrSync() const
+{
+ // The stack pointer sync is a move to the stack pointer.
+ // This is encoded as 'add sp, Rs, #0'.
+ return IsAddSubImmediate() && Rd() == js::jit::Registers::sp && ImmAddSub() == 0;
+}
+
+// Skip over a constant pool at |this| if there is one.
+//
+// If |this| is pointing to the artifical guard branch around a constant pool,
+// return the instruction after the pool. Otherwise return |this| itself.
+//
+// This function does not skip constant pools with a natural guard branch. It
+// is assumed that anyone inspecting the instruction stream understands about
+// branches that were inserted naturally.
+const Instruction*
+Instruction::skipPool() const
+{
+ // Artificial pool guards can only be B (rather than BR), and they must be
+ // forward branches.
+ if (!IsUncondB() || ImmUncondBranch() <= 0)
+ return this;
+
+ // Check for a constant pool header which has the high 16 bits set. See
+ // struct PoolHeader. Bit 15 indicates a natural pool guard when set. It
+ // must be clear which indicates an artificial pool guard.
+ const Instruction *header = InstructionAtOffset(kInstructionSize);
+ if (header->Mask(0xffff8000) != 0xffff0000)
+ return this;
+
+ // OK, this is an artificial jump around a constant pool.
+ return ImmPCOffsetTarget();
+}
+
+
+void Instruction::SetBits32(int msb, int lsb, unsigned value) {
+ uint32_t me;
+ memcpy(&me, this, sizeof(me));
+ uint32_t new_mask = (1 << (msb+1)) - (1 << lsb);
+ uint32_t keep_mask = ~new_mask;
+ me = (me & keep_mask) | ((value << lsb) & new_mask);
+ memcpy(this, &me, sizeof(me));
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
new file mode 100644
index 0000000000..9f817cf0a3
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -0,0 +1,1258 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/MozCachingDecoder.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/IonTypes.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+
+js::jit::SimulatorProcess* js::jit::SimulatorProcess::singleton_ = nullptr;
+
+namespace vixl {
+
+using mozilla::DebugOnly;
+using js::jit::ABIFunctionType;
+using js::jit::JitActivation;
+using js::jit::SimulatorProcess;
+
+Simulator::Simulator(Decoder* decoder, FILE* stream)
+ : stream_(nullptr)
+ , print_disasm_(nullptr)
+ , instrumentation_(nullptr)
+ , stack_(nullptr)
+ , stack_limit_(nullptr)
+ , decoder_(nullptr)
+ , oom_(false)
+{
+ this->init(decoder, stream);
+
+ // If this environment variable is present, trace the executed instructions.
+ // (Very helpful for debugging code generation crashes.)
+ if (getenv("VIXL_TRACE")) {
+ set_trace_parameters(LOG_DISASM);
+ }
+}
+
+
+Simulator::~Simulator() {
+ js_free(stack_);
+ stack_ = nullptr;
+
+ // The decoder may outlive the simulator.
+ if (print_disasm_) {
+ decoder_->RemoveVisitor(print_disasm_);
+ js_delete(print_disasm_);
+ print_disasm_ = nullptr;
+ }
+
+ if (instrumentation_) {
+ decoder_->RemoveVisitor(instrumentation_);
+ js_delete(instrumentation_);
+ instrumentation_ = nullptr;
+ }
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = nullptr;
+ pc_modified_ = false;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
+ uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
+ VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits & kDRegMask)));
+ VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits & kSRegMask)));
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ set_dreg_bits(i, nan_bits);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+}
+
+
+void Simulator::init(Decoder* decoder, FILE* stream) {
+ // Ensure that shift operations act as the simulator expects.
+ VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+ VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+ instruction_stats_ = false;
+
+ // Set up the decoder.
+ decoder_ = decoder;
+ decoder_->AppendVisitor(this);
+
+ stream_ = stream;
+ print_disasm_ = js_new<PrintDisassembler>(stream_);
+ if (!print_disasm_) {
+ oom_ = true;
+ return;
+ }
+ set_coloured_trace(false);
+ trace_parameters_ = LOG_NONE;
+
+ ResetState();
+
+ // Allocate and set up the simulator stack.
+ stack_ = js_pod_malloc<byte>(stack_size_);
+ if (!stack_) {
+ oom_ = true;
+ return;
+ }
+ stack_limit_ = stack_ + stack_protection_size_;
+ // Configure the starting stack pointer.
+ // - Find the top of the stack.
+ byte * tos = stack_ + stack_size_;
+ // - There's a protection region at both ends of the stack.
+ tos -= stack_protection_size_;
+ // - The stack pointer must be 16-byte aligned.
+ tos = AlignDown(tos, 16);
+ set_sp(tos);
+
+ // Set the sample period to 10, as the VIXL examples and tests are short.
+ if (getenv("VIXL_STATS")) {
+ instrumentation_ = js_new<Instrument>("vixl_stats.csv", 10);
+ if (!instrumentation_) {
+ oom_ = true;
+ return;
+ }
+ }
+
+ // Print a warning about exclusive-access instructions, but only the first
+ // time they are encountered. This warning can be silenced using
+ // SilenceExclusiveAccessWarning().
+ print_exclusive_access_warning_ = true;
+}
+
+
+Simulator* Simulator::Current() {
+ JSContext* cx = js::TlsContext.get();
+ if (!cx) {
+ return nullptr;
+ }
+ JSRuntime* rt = cx->runtime();
+ if (!rt) {
+ return nullptr;
+ }
+ if (!js::CurrentThreadCanAccessRuntime(rt)) {
+ return nullptr;
+ }
+ return cx->simulator();
+}
+
+
+Simulator* Simulator::Create() {
+ Decoder *decoder = js_new<Decoder>();
+ if (!decoder)
+ return nullptr;
+
+ // FIXME: This just leaks the Decoder object for now, which is probably OK.
+ // FIXME: We should free it at some point.
+ // FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
+ js::UniquePtr<Simulator> sim;
+ if (getenv("USE_DEBUGGER") != nullptr) {
+ sim.reset(js_new<Debugger>(decoder, stdout));
+ } else {
+ sim.reset(js_new<Simulator>(decoder, stdout));
+ }
+
+ // Check if Simulator:init ran out of memory.
+ if (sim && sim->oom()) {
+ return nullptr;
+ }
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ // Register the simulator in the Simulator process to handle cache flushes
+ // across threads.
+ js::jit::AutoLockSimulatorCache alsc;
+ if (!SimulatorProcess::registerSimulator(sim.get())) {
+ return nullptr;
+ }
+#endif
+
+ return sim.release();
+}
+
+
+void Simulator::Destroy(Simulator* sim) {
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ if (sim) {
+ js::jit::AutoLockSimulatorCache alsc;
+ SimulatorProcess::unregisterSimulator(sim);
+ }
+#endif
+
+ js_delete(sim);
+}
+
+
+void Simulator::ExecuteInstruction() {
+ // The program counter should always be aligned.
+ VIXL_ASSERT(IsWordAligned(pc_));
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ if (pendingCacheRequests) {
+ // We're here emulating the behavior of the membarrier carried over on
+ // real hardware does; see syscalls to membarrier in MozCpu-vixl.cpp.
+ // There's a slight difference that the simulator is not being
+ // interrupted: instead, we effectively run the icache flush request
+ // before executing the next instruction, which is close enough and
+ // sufficient for our use case.
+ js::jit::AutoLockSimulatorCache alsc;
+ FlushICache();
+ }
+#endif
+ decoder_->Decode(pc_);
+ increment_pc();
+}
+
+
+uintptr_t Simulator::stackLimit() const {
+ return reinterpret_cast<uintptr_t>(stack_limit_);
+}
+
+
+uintptr_t* Simulator::addressOfStackLimit() {
+ return (uintptr_t*)&stack_limit_;
+}
+
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = get_sp();
+ }
+ return newsp <= stackLimit();
+}
+
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = get_sp() - extra;
+ return newsp <= stackLimit();
+}
+
+
+JS::ProfilingFrameIterator::RegisterState
+Simulator::registerState()
+{
+ JS::ProfilingFrameIterator::RegisterState state;
+ state.pc = (uint8_t*) get_pc();
+ state.fp = (uint8_t*) get_fp();
+ state.lr = (uint8_t*) get_lr();
+ state.sp = (uint8_t*) get_sp();
+ return state;
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ // First eight arguments passed in registers.
+ VIXL_ASSERT(argument_count <= 8);
+ // This code should use the type of the called function
+ // (with templates, like the callVM machinery), but since the
+ // number of called functions is miniscule, their types have been
+ // divined from the number of arguments.
+ if (argument_count == 8) {
+ // EnterJitData::jitcode.
+ set_xreg(0, va_arg(parameters, int64_t));
+ // EnterJitData::maxArgc.
+ set_xreg(1, va_arg(parameters, unsigned));
+ // EnterJitData::maxArgv.
+ set_xreg(2, va_arg(parameters, int64_t));
+ // EnterJitData::osrFrame.
+ set_xreg(3, va_arg(parameters, int64_t));
+ // EnterJitData::calleeToken.
+ set_xreg(4, va_arg(parameters, int64_t));
+ // EnterJitData::scopeChain.
+ set_xreg(5, va_arg(parameters, int64_t));
+ // EnterJitData::osrNumStackValues.
+ set_xreg(6, va_arg(parameters, unsigned));
+ // Address of EnterJitData::result.
+ set_xreg(7, va_arg(parameters, int64_t));
+ } else if (argument_count == 2) {
+ // EntryArg* args
+ set_xreg(0, va_arg(parameters, int64_t));
+ // uint8_t* GlobalData
+ set_xreg(1, va_arg(parameters, int64_t));
+ } else if (argument_count == 1) { // irregexp
+ // InputOutputData& data
+ set_xreg(0, va_arg(parameters, int64_t));
+ } else if (argument_count == 0) { // testsJit.cpp
+ // accept.
+ } else {
+ MOZ_CRASH("Unknown number of arguments");
+ }
+
+ va_end(parameters);
+
+ // Call must transition back to native code on exit.
+ VIXL_ASSERT(get_lr() == int64_t(kEndOfSimAddress));
+
+ // Execute the simulation.
+ DebugOnly<int64_t> entryStack = get_sp();
+ RunFrom((Instruction*)entry);
+ DebugOnly<int64_t> exitStack = get_sp();
+ VIXL_ASSERT(entryStack == exitStack);
+
+ int64_t result = xreg(0);
+ if (getenv("USE_DEBUGGER")) {
+ printf("LEAVE\n");
+ }
+ return result;
+}
+
+
+// When the generated code calls a VM function (masm.callWithABI) we need to
+// call that function instead of trying to execute it with the simulator
+// (because it's x64 code instead of AArch64 code). We do that by redirecting the VM
+// call to a svc (Supervisor Call) instruction that is handled by the
+// simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = SimulatorProcess::redirection();
+ SimulatorProcess::setRedirection(this);
+
+ Instruction* instr = (Instruction*)(&svcInstruction_);
+ vixl::Assembler::svc(instr, kCallRtRedirected);
+ }
+
+ public:
+ void* addressOfSvcInstruction() { return &svcInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ js::jit::AutoLockSimulatorCache alsr;
+
+ // TODO: Store srt_ in the simulator for this assertion.
+ // VIXL_ASSERT_IF(pt->simulator(), pt->simulator()->srt_ == srt);
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ VIXL_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir)
+ oomUnsafe.crash("Simulator redirection");
+ new(redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static const Redirection* FromSvcInstruction(const Instruction* svcInstruction) {
+ const uint8_t* addrOfSvc = reinterpret_cast<const uint8_t*>(svcInstruction);
+ const uint8_t* addrOfRedirection = addrOfSvc - offsetof(Redirection, svcInstruction_);
+ return reinterpret_cast<const Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t svcInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+
+
+
+void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSvcInstruction();
+}
+
+void Simulator::VisitException(const Instruction* instr) {
+ if (instr->InstructionBits() == UNDEFINED_INST_PATTERN) {
+ uint8_t* newPC;
+ if (js::wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc((Instruction*)newPC);
+ return;
+ }
+ DoUnreachable(instr);
+ }
+
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK: {
+ int lowbit = ImmException_offset;
+ int highbit = ImmException_offset + ImmException_width - 1;
+ HostBreakpoint(instr->Bits(highbit, lowbit));
+ break;
+ }
+ case HLT:
+ switch (instr->ImmException()) {
+ case kTraceOpcode:
+ DoTrace(instr);
+ return;
+ case kLogOpcode:
+ DoLog(instr);
+ return;
+ case kPrintfOpcode:
+ DoPrintf(instr);
+ return;
+ default:
+ HostBreakpoint();
+ return;
+ }
+ case SVC:
+ // The SVC instruction is hijacked by the JIT as a pseudo-instruction
+ // causing the Simulator to execute host-native code for callWithABI.
+ switch (instr->ImmException()) {
+ case kCallRtRedirected:
+ VisitCallRedirection(instr);
+ return;
+ case kMarkStackPointer: {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!spStack_.append(get_sp()))
+ oomUnsafe.crash("tracking stack for ARM64 simulator");
+ return;
+ }
+ case kCheckStackPointer: {
+ DebugOnly<int64_t> current = get_sp();
+ DebugOnly<int64_t> expected = spStack_.popCopy();
+ VIXL_ASSERT(current == expected);
+ return;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::setGPR32Result(int32_t result) {
+ set_wreg(0, result);
+}
+
+
+void Simulator::setGPR64Result(int64_t result) {
+ set_xreg(0, result);
+}
+
+
+void Simulator::setFP32Result(float result) {
+ set_sreg(0, result);
+}
+
+
+void Simulator::setFP64Result(double result) {
+ set_dreg(0, result);
+}
+
+
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, uint64_t arg1, uint64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(uint64_t arg0, double arg1,
+ uint64_t arg2, uint64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ float,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t,
+ int32_t,
+ int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t,
+ int32_t,
+ int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(int64_t,
+ int32_t,
+ int64_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t,
+ int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t,
+ int64_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(int64_t, int64_t,
+ int32_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(int64_t, int64_t,
+ int64_t, int64_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t,
+ int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t,
+ int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t,
+ int64_t,
+ int32_t,
+ int64_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int64_t,
+ int64_t,
+ int32_t,
+ int64_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+
+// Simulator support for callWithABI().
+void
+Simulator::VisitCallRedirection(const Instruction* instr)
+{
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == SVC);
+ VIXL_ASSERT(instr->ImmException() == kCallRtRedirected);
+
+ const Redirection* redir = Redirection::FromSvcInstruction(instr);
+ uintptr_t nativeFn = reinterpret_cast<uintptr_t>(redir->nativeFunction());
+
+ // Stack must be aligned prior to the call.
+ // FIXME: It's actually our job to perform the alignment...
+ //VIXL_ASSERT((xreg(31, Reg31IsStackPointer) & (StackAlignment - 1)) == 0);
+
+ // Used to assert that callee-saved registers are preserved.
+ DebugOnly<int64_t> x19 = xreg(19);
+ DebugOnly<int64_t> x20 = xreg(20);
+ DebugOnly<int64_t> x21 = xreg(21);
+ DebugOnly<int64_t> x22 = xreg(22);
+ DebugOnly<int64_t> x23 = xreg(23);
+ DebugOnly<int64_t> x24 = xreg(24);
+ DebugOnly<int64_t> x25 = xreg(25);
+ DebugOnly<int64_t> x26 = xreg(26);
+ DebugOnly<int64_t> x27 = xreg(27);
+ DebugOnly<int64_t> x28 = xreg(28);
+ DebugOnly<int64_t> x29 = xreg(29);
+ DebugOnly<int64_t> savedSP = get_sp();
+
+ // Remember LR for returning from the "call".
+ int64_t savedLR = xreg(30);
+
+ // Allow recursive Simulator calls: returning from the call must stop
+ // the simulation and transition back to native Simulator code.
+ set_xreg(30, int64_t(kEndOfSimAddress));
+
+ // Store argument register values in local variables for ease of use below.
+ int64_t x0 = xreg(0);
+ int64_t x1 = xreg(1);
+ int64_t x2 = xreg(2);
+ int64_t x3 = xreg(3);
+ int64_t x4 = xreg(4);
+ int64_t x5 = xreg(5);
+ int64_t x6 = xreg(6);
+ int64_t x7 = xreg(7);
+ int64_t x8 = xreg(8);
+ double d0 = dreg(0);
+ double d1 = dreg(1);
+ double d2 = dreg(2);
+ double d3 = dreg(3);
+ float s0 = sreg(0);
+ float s1 = sreg(1);
+ float s2 = sreg(2);
+ float s3 = sreg(3);
+ float s4 = sreg(4);
+
+ // Dispatch the call and set the return value.
+ switch (redir->type()) {
+ // Cases with int64_t return type.
+ case js::jit::Args_General0: {
+ int64_t ret = reinterpret_cast<Prototype_General0>(nativeFn)();
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General1: {
+ int64_t ret = reinterpret_cast<Prototype_General1>(nativeFn)(x0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General2: {
+ int64_t ret = reinterpret_cast<Prototype_General2>(nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General3: {
+ int64_t ret = reinterpret_cast<Prototype_General3>(nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General4: {
+ int64_t ret = reinterpret_cast<Prototype_General4>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General5: {
+ int64_t ret = reinterpret_cast<Prototype_General5>(nativeFn)(x0, x1, x2, x3, x4);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General6: {
+ int64_t ret = reinterpret_cast<Prototype_General6>(nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General7: {
+ int64_t ret = reinterpret_cast<Prototype_General7>(nativeFn)(x0, x1, x2, x3, x4, x5, x6);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General8: {
+ int64_t ret = reinterpret_cast<Prototype_General8>(nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_GeneralGeneralGeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_GeneralGeneralInt64Int64: {
+ int64_t ret = reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption.
+ case js::jit::Args_Int_Double: {
+ int64_t ret = reinterpret_cast<Prototype_Int_Double>(nativeFn)(d0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_IntDouble: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDouble>(nativeFn)(x0, d0);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_DoubleInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_DoubleInt>(nativeFn)(d0, x0);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_IntDoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(nativeFn)(x0, d0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_DoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_DoubleIntInt>(nativeFn)(d0, x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with float return type.
+ case js::jit::Args_Float32_Float32: {
+ float ret = reinterpret_cast<Prototype_Float32_Float32>(nativeFn)(s0);
+ setFP32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_Float32: {
+ int64_t ret = reinterpret_cast<Prototype_Int_Float32>(nativeFn)(s0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Float32_Float32Float32: {
+ float ret = reinterpret_cast<Prototype_Float32_Float32Float32>(nativeFn)(s0, s1);
+ setFP32Result(ret);
+ break;
+ }
+
+ // Cases with double return type.
+ case js::jit::Args_Double_None: {
+ double ret = reinterpret_cast<Prototype_Double_None>(nativeFn)();
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Double: {
+ double ret = reinterpret_cast<Prototype_Double_Double>(nativeFn)(d0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Int: {
+ double ret = reinterpret_cast<Prototype_Double_Int>(nativeFn)(x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleInt: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleInt>(nativeFn)(d0, x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDouble>(nativeFn)(d0, d1);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(nativeFn)(d0, d1, d2);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(nativeFn)(d0, d1, d2, d3);
+ setFP64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Double_IntDouble: {
+ double ret = reinterpret_cast<Prototype_Double_IntDouble>(nativeFn)(x0, d0);
+ setFP64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(x0);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(x0, x1);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, s2, s3, x2, x3, x4, x5, x6);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, x2, s2, s3, x3, s4, x4, x5, x6, x7, x8);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneral: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneral>(nativeFn)(x0, x1);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32>(nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32General: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(nativeFn)(
+ x0, x1, x2, x3, x4, x5, x6);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret =
+ reinterpret_cast<Prototype_Int64_General>(
+ nativeFn)(x0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret =
+ reinterpret_cast<Prototype_Int64_GeneralInt64>(
+ nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ // Nuke the volatile registers. x0-x7 are used as result registers, but except
+ // for x0, none are used in the above signatures.
+ for (int i = 1; i <= 18; i++) {
+ // Code feed 1 bad data
+ set_xreg(i, int64_t(0xc0defeed1badda7a));
+ }
+
+ // Assert that callee-saved registers are unchanged.
+ VIXL_ASSERT(xreg(19) == x19);
+ VIXL_ASSERT(xreg(20) == x20);
+ VIXL_ASSERT(xreg(21) == x21);
+ VIXL_ASSERT(xreg(22) == x22);
+ VIXL_ASSERT(xreg(23) == x23);
+ VIXL_ASSERT(xreg(24) == x24);
+ VIXL_ASSERT(xreg(25) == x25);
+ VIXL_ASSERT(xreg(26) == x26);
+ VIXL_ASSERT(xreg(27) == x27);
+ VIXL_ASSERT(xreg(28) == x28);
+ VIXL_ASSERT(xreg(29) == x29);
+
+ // Assert that the stack is unchanged.
+ VIXL_ASSERT(savedSP == get_sp());
+
+ // Simulate a return.
+ set_lr(savedLR);
+ set_pc((Instruction*)savedLR);
+ if (getenv("USE_DEBUGGER"))
+ printf("SVCRET\n");
+}
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+void
+Simulator::FlushICache()
+{
+ // Flush the caches recorded by the current thread as well as what got
+ // recorded from other threads before this call.
+ auto& vec = SimulatorProcess::getICacheFlushes(this);
+ for (auto& flush : vec) {
+ decoder_->FlushICache(flush.start, flush.length);
+ }
+ vec.clear();
+ pendingCacheRequests = false;
+}
+
+void CachingDecoder::Decode(const Instruction* instr) {
+ InstDecodedKind state;
+ if (lastPage_ && lastPage_->contains(instr)) {
+ state = lastPage_->decode(instr);
+ } else {
+ uintptr_t key = SinglePageDecodeCache::PageStart(instr);
+ ICacheMap::AddPtr p = iCache_.lookupForAdd(key);
+ if (p) {
+ lastPage_ = p->value();
+ state = lastPage_->decode(instr);
+ } else {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ SinglePageDecodeCache* newPage = js_new<SinglePageDecodeCache>(instr);
+ if (!newPage || !iCache_.add(p, key, newPage)) {
+ oomUnsafe.crash("Simulator SinglePageDecodeCache");
+ }
+ lastPage_ = newPage;
+ state = InstDecodedKind::NotDecodedYet;
+ }
+ }
+
+ switch (state) {
+ case InstDecodedKind::NotDecodedYet: {
+ cachingDecoder_.setDecodePtr(lastPage_->decodePtr(instr));
+ this->Decoder::Decode(instr);
+ break;
+ }
+#define CASE(A) \
+ case InstDecodedKind::A: { \
+ Visit##A(instr); \
+ break; \
+ }
+
+ VISITOR_LIST(CASE)
+#undef CASE
+ }
+}
+
+void CachingDecoder::FlushICache(void* start, size_t size) {
+ MOZ_ASSERT(uintptr_t(start) % vixl::kInstructionSize == 0);
+ MOZ_ASSERT(size % vixl::kInstructionSize == 0);
+ const uint8_t* it = reinterpret_cast<const uint8_t*>(start);
+ const uint8_t* end = it + size;
+ SinglePageDecodeCache* last = nullptr;
+ for (; it < end; it += vixl::kInstructionSize) {
+ auto instr = reinterpret_cast<const Instruction*>(it);
+ if (last && last->contains(instr)) {
+ last->clearDecode(instr);
+ } else {
+ uintptr_t key = SinglePageDecodeCache::PageStart(instr);
+ ICacheMap::Ptr p = iCache_.lookup(key);
+ if (p) {
+ last = p->value();
+ last->clearDecode(instr);
+ }
+ }
+ }
+}
+#endif
+
+} // namespace vixl
+
+namespace js {
+namespace jit {
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+void SimulatorProcess::recordICacheFlush(void* start, size_t length) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ICacheFlush range{start, length};
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (!s.records.append(range)) {
+ oomUnsafe.crash("Simulator recordFlushICache");
+ }
+ }
+}
+
+void SimulatorProcess::membarrier() {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ s.thread->pendingCacheRequests = true;
+ }
+}
+
+SimulatorProcess::ICacheFlushes& SimulatorProcess::getICacheFlushes(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (s.thread == sim) {
+ return s.records;
+ }
+ }
+ MOZ_CRASH("Simulator is not registered in the SimulatorProcess");
+}
+
+bool SimulatorProcess::registerSimulator(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ ICacheFlushes empty;
+ SimFlushes simFlushes{sim, std::move(empty)};
+ return singleton_->pendingFlushes_.append(std::move(simFlushes));
+}
+
+void SimulatorProcess::unregisterSimulator(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (s.thread == sim) {
+ singleton_->pendingFlushes_.erase(&s);
+ return;
+ }
+ }
+ MOZ_CRASH("Simulator is not registered in the SimulatorProcess");
+}
+#endif // !JS_CACHE_SIMULATOR_ARM64
+
+} // namespace jit
+} // namespace js
+
+vixl::Simulator* JSContext::simulator() const {
+ return simulator_;
+}
diff --git a/js/src/jit/arm64/vixl/Platform-vixl.h b/js/src/jit/arm64/vixl/Platform-vixl.h
new file mode 100644
index 0000000000..a4de54c785
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Platform-vixl.h
@@ -0,0 +1,39 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_PLATFORM_H
+#define VIXL_PLATFORM_H
+
+// Define platform specific functionalities.
+#include <signal.h>
+
+#include "jstypes.h"
+
+namespace vixl {
+inline void HostBreakpoint(int64_t code = 0) { raise(SIGINT); }
+} // namespace vixl
+
+#endif
diff --git a/js/src/jit/arm64/vixl/README.md b/js/src/jit/arm64/vixl/README.md
new file mode 100644
index 0000000000..7111753279
--- /dev/null
+++ b/js/src/jit/arm64/vixl/README.md
@@ -0,0 +1,7 @@
+This directory is a mix of VIXL files for ARM64, and files added to integrate
+VIXL within SpiderMonkey MacroAssembler. Many of SpiderMonkey extension would be
+in files prefixed with Moz*, but some might be spread across imported files when
+convenient.
+
+VIXL upstream sources can be found at:
+https://git.linaro.org/arm/vixl.git/about/
diff --git a/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
new file mode 100644
index 0000000000..4b9064a89b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
@@ -0,0 +1,140 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+#define VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+
+namespace vixl {
+
+// Debug instructions.
+//
+// VIXL's macro-assembler and simulator support a few pseudo instructions to
+// make debugging easier. These pseudo instructions do not exist on real
+// hardware.
+//
+// TODO: Also consider allowing these pseudo-instructions to be disabled in the
+// simulator, so that users can check that the input is a valid native code.
+// (This isn't possible in all cases. Printf won't work, for example.)
+//
+// Each debug pseudo instruction is represented by a HLT instruction. The HLT
+// immediate field is used to identify the type of debug pseudo instruction.
+
+enum DebugHltOpcodes {
+ kPrintfOpcode,
+ kTraceOpcode,
+ kLogOpcode,
+ // Aliases.
+ kDebugHltFirstOpcode = kPrintfOpcode,
+ kDebugHltLastOpcode = kLogOpcode
+};
+
+// Each pseudo instruction uses a custom encoding for additional arguments, as
+// described below.
+
+// Unreachable - kUnreachableOpcode
+//
+// Instruction which should never be executed. This is used as a guard in parts
+// of the code that should not be reachable, such as in data encoded inline in
+// the instructions.
+
+// Printf - kPrintfOpcode
+// - arg_count: The number of arguments.
+// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
+//
+// Simulate a call to printf.
+//
+// Floating-point and integer arguments are passed in separate sets of registers
+// in AAPCS64 (even for varargs functions), so it is not possible to determine
+// the type of each argument without some information about the values that were
+// passed in. This information could be retrieved from the printf format string,
+// but the format string is not trivial to parse so we encode the relevant
+// information with the HLT instruction.
+//
+// Also, the following registers are populated (as if for a native A64 call):
+// x0: The format string
+// x1-x7: Optional arguments, if type == CPURegister::kRegister
+// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
+const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
+const unsigned kPrintfLength = 3 * kInstructionSize;
+
+const unsigned kPrintfMaxArgCount = 4;
+
+// The argument pattern is a set of two-bit-fields, each with one of the
+// following values:
+enum PrintfArgPattern {
+ kPrintfArgW = 1,
+ kPrintfArgX = 2,
+ // There is no kPrintfArgS because floats are always converted to doubles in C
+ // varargs calls.
+ kPrintfArgD = 3
+};
+static const unsigned kPrintfArgPatternBits = 2;
+
+// Trace - kTraceOpcode
+// - parameter: TraceParameter stored as a uint32_t
+// - command: TraceCommand stored as a uint32_t
+//
+// Allow for trace management in the generated code. This enables or disables
+// automatic tracing of the specified information for every simulated
+// instruction.
+const unsigned kTraceParamsOffset = 1 * kInstructionSize;
+const unsigned kTraceCommandOffset = 2 * kInstructionSize;
+const unsigned kTraceLength = 3 * kInstructionSize;
+
+// Trace parameters.
+enum TraceParameters {
+ LOG_DISASM = 1 << 0, // Log disassembly.
+ LOG_REGS = 1 << 1, // Log general purpose registers.
+ LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
+ LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
+ LOG_WRITE = 1 << 4, // Log writes to memory.
+
+ LOG_NONE = 0,
+ LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
+};
+
+// Trace commands.
+enum TraceCommand {
+ TRACE_ENABLE = 1,
+ TRACE_DISABLE = 2
+};
+
+// Log - kLogOpcode
+// - parameter: TraceParameter stored as a uint32_t
+//
+// Print the specified information once. This mechanism is separate from Trace.
+// In particular, _all_ of the specified registers are printed, rather than just
+// the registers that the instruction writes.
+//
+// Any combination of the TraceParameters values can be used, except that
+// LOG_DISASM is not supported for Log.
+const unsigned kLogParamsOffset = 1 * kInstructionSize;
+const unsigned kLogLength = 2 * kInstructionSize;
+} // namespace vixl
+
+#endif // VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.cpp b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
new file mode 100644
index 0000000000..71e1a31d46
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
@@ -0,0 +1,4371 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+#include <cmath>
+#include <string.h>
+
+#include "jit/AtomicOperations.h"
+
+namespace vixl {
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ VIXL_ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ VIXL_UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Simulator::RunFrom(const Instruction* first) {
+ set_pc(first);
+ Run();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "x27", "x28", "x29", "lr", "xzr", "sp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "w27", "w28", "w29", "w30", "wzr", "wsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vreg_names[code];
+}
+
+
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
+#define MAGENTA "35"
+#define CYAN "36"
+#define WHITE "37"
+void Simulator::set_coloured_trace(bool value) {
+ coloured_trace_ = value;
+
+ clr_normal = value ? COLOUR(NORMAL) : "";
+ clr_flag_name = value ? COLOUR_BOLD(WHITE) : "";
+ clr_flag_value = value ? COLOUR(NORMAL) : "";
+ clr_reg_name = value ? COLOUR_BOLD(CYAN) : "";
+ clr_reg_value = value ? COLOUR(CYAN) : "";
+ clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : "";
+ clr_vreg_value = value ? COLOUR(MAGENTA) : "";
+ clr_memory_address = value ? COLOUR_BOLD(BLUE) : "";
+ clr_warning = value ? COLOUR_BOLD(YELLOW) : "";
+ clr_warning_message = value ? COLOUR(YELLOW) : "";
+ clr_printf = value ? COLOUR(GREEN) : "";
+}
+#undef COLOUR
+#undef COLOUR_BOLD
+#undef NORMAL
+#undef GREY
+#undef RED
+#undef GREEN
+#undef YELLOW
+#undef BLUE
+#undef MAGENTA
+#undef CYAN
+#undef WHITE
+
+
+void Simulator::set_trace_parameters(int parameters) {
+ bool disasm_before = trace_parameters_ & LOG_DISASM;
+ trace_parameters_ = parameters;
+ bool disasm_after = trace_parameters_ & LOG_DISASM;
+
+ if (disasm_before != disasm_after) {
+ if (disasm_after) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+}
+
+
+void Simulator::set_instruction_stats(bool value) {
+ if (instrumentation_ == nullptr) {
+ return;
+ }
+
+ if (value != instruction_stats_) {
+ if (value) {
+ decoder_->AppendVisitor(instrumentation_);
+ } else {
+ decoder_->RemoveVisitor(instrumentation_);
+ }
+ instruction_stats_ = value;
+ }
+}
+
+// Helpers ---------------------------------------------------------------------
+uint64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in) {
+ VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+
+ uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
+ uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
+ uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
+
+ left &= reg_mask;
+ right &= reg_mask;
+ uint64_t result = (left + right + carry_in) & reg_mask;
+
+ if (set_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+
+ // Compute the C flag by comparing the result to the max unsigned integer.
+ uint64_t max_uint_2op = max_uint - carry_in;
+ bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
+ nzcv().SetC(C ? 1 : 0);
+
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ uint64_t left_sign = left & sign_mask;
+ uint64_t right_sign = right & sign_mask;
+ uint64_t result_sign = result & sign_mask;
+ bool V = (left_sign == right_sign) && (left_sign != result_sign);
+ nzcv().SetV(V ? 1 : 0);
+
+ LogSystemRegister(NZCV);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSize - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSize) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((INT64_C(1) << amount) - 1)) <<
+ (reg_size - amount));
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) {
+ AssertSupportedFPCR();
+
+ // TODO: This assumes that the C++ implementation handles comparisons in the
+ // way that we expect (as per AssertSupportedFPCR()).
+ bool process_exception = false;
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ if (IsSignallingNaN(val0) || IsSignallingNaN(val1) ||
+ (trap == EnableTrap)) {
+ process_exception = true;
+ }
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ LogSystemRegister(NZCV);
+ if (process_exception) FPProcessException();
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
+ unsigned reg_size, unsigned lane_size) {
+ VIXL_ASSERT(reg_size >= lane_size);
+
+ uint32_t format = 0;
+ if (reg_size != lane_size) {
+ switch (reg_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format = kPrintRegAsQVector; break;
+ case kDRegSizeInBytes: format = kPrintRegAsDVector; break;
+ }
+ }
+
+ switch (lane_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format |= kPrintReg1Q; break;
+ case kDRegSizeInBytes: format |= kPrintReg1D; break;
+ case kSRegSizeInBytes: format |= kPrintReg1S; break;
+ case kHRegSizeInBytes: format |= kPrintReg1H; break;
+ case kBRegSizeInBytes: format |= kPrintReg1B; break;
+ }
+ // These sizes would be duplicate case labels.
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D);
+ VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S);
+
+ return static_cast<PrintRegisterFormat>(format);
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
+ VectorFormat vform) {
+ switch (vform) {
+ default: VIXL_UNREACHABLE(); return kPrintReg16B;
+ case kFormat16B: return kPrintReg16B;
+ case kFormat8B: return kPrintReg8B;
+ case kFormat8H: return kPrintReg8H;
+ case kFormat4H: return kPrintReg4H;
+ case kFormat4S: return kPrintReg4S;
+ case kFormat2S: return kPrintReg2S;
+ case kFormat2D: return kPrintReg2D;
+ case kFormat1D: return kPrintReg1D;
+ }
+}
+
+
+void Simulator::PrintWrittenRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintWrittenVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters() {
+ PrintSystemRegister(NZCV);
+ PrintSystemRegister(FPCR);
+}
+
+
+void Simulator::PrintRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+// Print a register's name and raw value.
+//
+// Only the least-significant `size_in_bytes` bytes of the register are printed,
+// but the value is aligned as if the whole register had been printed.
+//
+// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes
+// -- the default -- so that the whole register is printed. Other values of
+// size_in_bytes are intended for use when the register hasn't actually been
+// updated (such as in PrintWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes) {
+ // The template for all supported sizes.
+ // "# x{code}: 0xffeeddccbbaa9988"
+ // "# w{code}: 0xbbaa9988"
+ // "# w{code}<15:0>: 0x9988"
+ // "# w{code}<7:0>: 0x88"
+ unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2;
+
+ const char * name = "";
+ const char * suffix = "";
+ switch (size_in_bytes) {
+ case kXRegSizeInBytes: name = XRegNameForCode(code, r31mode); break;
+ case kWRegSizeInBytes: name = WRegNameForCode(code, r31mode); break;
+ case 2:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<15:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ case 1:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<7:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
+
+ // Print leading padding spaces.
+ VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2));
+ for (unsigned i = 0; i < padding_chars; i++) {
+ putc(' ', stream_);
+ }
+
+ // Print the specified bits in hexadecimal format.
+ uint64_t bits = reg<uint64_t>(code, r31mode);
+ bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8);
+ VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes);
+
+ int chars = size_in_bytes * 2;
+ fprintf(stream_, "%s0x%0*" PRIx64 "%s",
+ clr_reg_value, chars, bits, clr_normal);
+}
+
+
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ registers_[code].NotifyRegisterLogged();
+
+ // Don't print writes into xzr.
+ if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ // The template for all x and w registers:
+ // "# x{code}: 0x{value}"
+ // "# w{code}: 0x{value}"
+
+ PrintRegisterRawHelper(code, r31mode);
+ fprintf(stream_, "\n");
+}
+
+
+// Print a register's name and raw value.
+//
+// The `bytes` and `lsb` arguments can be used to limit the bytes that are
+// printed. These arguments are intended for use in cases where register hasn't
+// actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a floating-point interpretation or a memory access annotation).
+void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
+ // The template for vector types:
+ // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // An example with bytes=4 and lsb=8:
+ // "# v{code}: 0xbbaa9988 ".
+ fprintf(stream_, "# %s%5s: %s",
+ clr_vreg_name, VRegNameForCode(code), clr_vreg_value);
+
+ int msb = lsb + bytes - 1;
+ int byte = kQRegSizeInBytes - 1;
+
+ // Print leading padding spaces. (Two spaces per byte.)
+ while (byte > msb) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+
+ // Print the specified part of the value, byte by byte.
+ qreg_t rawbits = qreg(code);
+ fprintf(stream_, "0x");
+ while (byte >= lsb) {
+ fprintf(stream_, "%02x", rawbits.val[byte]);
+ byte--;
+ }
+
+ // Print trailing padding spaces.
+ while (byte >= 0) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+ fprintf(stream_, "%s", clr_normal);
+}
+
+
+// Print each of the specified lanes of a register as a float or double value.
+//
+// The `lane_count` and `lslane` arguments can be used to limit the lanes that
+// are printed. These arguments are intended for use in cases where register
+// hasn't actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintVRegisterFPHelper(unsigned code,
+ unsigned lane_size_in_bytes,
+ int lane_count,
+ int rightmost_lane) {
+ VIXL_ASSERT((lane_size_in_bytes == kSRegSizeInBytes) ||
+ (lane_size_in_bytes == kDRegSizeInBytes));
+
+ unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes);
+ VIXL_ASSERT(msb <= kQRegSizeInBytes);
+
+ // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
+ // name is used:
+ // " (s{code}: {value})"
+ // " (d{code}: {value})"
+ // For vector types, "..." is used to represent one or more omitted lanes.
+ // " (..., {value}, {value}, ...)"
+ if ((lane_count == 1) && (rightmost_lane == 0)) {
+ const char * name =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? SRegNameForCode(code)
+ : DRegNameForCode(code);
+ fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
+ } else {
+ if (msb < (kQRegSizeInBytes - 1)) {
+ fprintf(stream_, " (..., ");
+ } else {
+ fprintf(stream_, " (");
+ }
+ }
+
+ // Print the list of values.
+ const char * separator = "";
+ int leftmost_lane = rightmost_lane + lane_count - 1;
+ for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
+ double value =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? vreg(code).Get<float>(lane)
+ : vreg(code).Get<double>(lane);
+ fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
+ separator = ", ";
+ }
+
+ if (rightmost_lane > 0) {
+ fprintf(stream_, ", ...");
+ }
+ fprintf(stream_, ")");
+}
+
+
+void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
+ vregisters_[code].NotifyRegisterLogged();
+
+ int lane_size_log2 = format & kPrintRegLaneSizeMask;
+
+ int reg_size_log2;
+ if (format & kPrintRegAsQVector) {
+ reg_size_log2 = kQRegSizeInBytesLog2;
+ } else if (format & kPrintRegAsDVector) {
+ reg_size_log2 = kDRegSizeInBytesLog2;
+ } else {
+ // Scalar types.
+ reg_size_log2 = lane_size_log2;
+ }
+
+ int lane_count = 1 << (reg_size_log2 - lane_size_log2);
+ int lane_size = 1 << lane_size_log2;
+
+ // The template for vector types:
+ // "# v{code}: 0x{rawbits} (..., {value}, ...)".
+ // The template for scalar types:
+ // "# v{code}: 0x{rawbits} ({reg}:{value})".
+ // The values in parentheses after the bit representations are floating-point
+ // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
+
+ PrintVRegisterRawHelper(code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(code, lane_size, lane_count);
+ }
+
+ fprintf(stream_, "\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name, clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ break;
+ case FPCR: {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ VIXL_ASSERT(fpcr().RMode() < (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_,
+ "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::PrintRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ registers_[reg_code].NotifyRegisterLogged();
+
+ USE(format);
+
+ // The template is "# {reg}: 0x{value} <- {address}".
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ vregisters_[reg_code].NotifyRegisterLogged();
+
+ // The template is "# v{code}: 0x{rawbits} <- address".
+ PrintVRegisterRawHelper(reg_code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
+ GetPrintRegLaneCount(format), lane);
+ }
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
+
+ // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
+ GetPrintRegSizeInBytes(format));
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ // The templates:
+ // "# v{code}: 0x{rawbits} -> {address}"
+ // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
+ // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
+ // Because this trace doesn't represent a change to the source register's
+ // value, only the relevant part of the value is printed. To keep the trace
+ // tidy and readable, the raw value is aligned with the other values in the
+ // register trace.
+ int lane_count = GetPrintRegLaneCount(format);
+ int lane_size = GetPrintRegLaneSizeInBytes(format);
+ int reg_size = GetPrintRegSizeInBytes(format);
+ PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
+ }
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(const Instruction* instr) {
+ printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(const Instruction* instr) {
+ printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) ||
+ (instr->Mask(PCRelAddressingMask) == ADRP));
+
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+}
+
+
+void Simulator::VisitUnconditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(instr->ConditionBranch())) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ const Instruction* target = Instruction::Cast(xreg(instr->Rn()));
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case BR:
+ case RET: set_pc(target); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitTestBranch(const Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool bit_zero = ((xreg(instr->Rt()) >> bit_pos) & 1) == 0;
+ bool take_branch = false;
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: take_branch = bit_zero; break;
+ case TBNZ: take_branch = !bit_zero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(const Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(const Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; VIXL_FALLTHROUGH();
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ LogSystemRegister(NZCV);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(const Instruction* instr,
+ int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(instr->Condition())) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+template<typename T>
+static T Faulted() {
+ return ~0;
+}
+
+template<>
+Simulator::qreg_t Faulted() {
+ static_assert(kQRegSizeInBytes == 16, "Known constraint");
+ static Simulator::qreg_t dummy = { {
+ 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255
+ } };
+ return dummy;
+}
+
+template<typename T> T
+Simulator::Read(uintptr_t address)
+{
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return Faulted<T>();
+ return Memory::Read<T>(address);
+}
+
+template <typename T> void
+Simulator::Write(uintptr_t address, T value)
+{
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return;
+ Memory::Write<T>(address, value);
+}
+
+void Simulator::LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ set_wreg(srcdst, Read<uint8_t>(address), NoRegLog); break;
+ case LDRH_w:
+ set_wreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
+ case LDR_w:
+ set_wreg(srcdst, Read<uint32_t>(address), NoRegLog); break;
+ case LDR_x:
+ set_xreg(srcdst, Read<uint64_t>(address), NoRegLog); break;
+ case LDRSB_w:
+ set_wreg(srcdst, Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_w:
+ set_wreg(srcdst, Read<int16_t>(address), NoRegLog); break;
+ case LDRSB_x:
+ set_xreg(srcdst, Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_x:
+ set_xreg(srcdst, Read<int16_t>(address), NoRegLog); break;
+ case LDRSW_x:
+ set_xreg(srcdst, Read<int32_t>(address), NoRegLog); break;
+ case LDR_b:
+ set_breg(srcdst, Read<uint8_t>(address), NoRegLog); break;
+ case LDR_h:
+ set_hreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
+ case LDR_s:
+ set_sreg(srcdst, Read<float>(address), NoRegLog); break;
+ case LDR_d:
+ set_dreg(srcdst, Read<double>(address), NoRegLog); break;
+ case LDR_q:
+ set_qreg(srcdst, Read<qreg_t>(address), NoRegLog); break;
+
+ case STRB_w: Write<uint8_t>(address, wreg(srcdst)); break;
+ case STRH_w: Write<uint16_t>(address, wreg(srcdst)); break;
+ case STR_w: Write<uint32_t>(address, wreg(srcdst)); break;
+ case STR_x: Write<uint64_t>(address, xreg(srcdst)); break;
+ case STR_b: Write<uint8_t>(address, breg(srcdst)); break;
+ case STR_h: Write<uint16_t>(address, hreg(srcdst)); break;
+ case STR_s: Write<float>(address, sreg(srcdst)); break;
+ case STR_d: Write<double>(address, dreg(srcdst)); break;
+ case STR_q: Write<qreg_t>(address, qreg(srcdst)); break;
+
+ // Ignore prfm hint instructions.
+ case PRFM: break;
+
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ unsigned access_size = 1 << instr->SizeLS();
+ if (instr->IsLoad()) {
+ if ((op == LDR_s) || (op == LDR_d)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ } else {
+ if ((op == STR_s) || (op == STR_d)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::VisitLoadStorePairOffset(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(const Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ int element_size = 1 << instr->SizeLSPair();
+ int64_t offset = instr->ImmLSPair() * element_size;
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+ uintptr_t address2 = address + element_size;
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
+ // will print a more detailed log.
+ case LDP_w: {
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Read<uint32_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, Read<float>(address), NoRegLog);
+ set_sreg(rt2, Read<float>(address2), NoRegLog);
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Read<uint64_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, Read<double>(address), NoRegLog);
+ set_dreg(rt2, Read<double>(address2), NoRegLog);
+ break;
+ }
+ case LDP_q: {
+ set_qreg(rt, Read<qreg_t>(address), NoRegLog);
+ set_qreg(rt2, Read<qreg_t>(address2), NoRegLog);
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, Read<int32_t>(address), NoRegLog);
+ set_xreg(rt2, Read<int32_t>(address2), NoRegLog);
+ break;
+ }
+ case STP_w: {
+ Write<uint32_t>(address, wreg(rt));
+ Write<uint32_t>(address2, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ Write<float>(address, sreg(rt));
+ Write<float>(address2, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ Write<uint64_t>(address, xreg(rt));
+ Write<uint64_t>(address2, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ Write<double>(address, dreg(rt));
+ Write<double>(address2, dreg(rt2));
+ break;
+ }
+ case STP_q: {
+ Write<qreg_t>(address, qreg(rt));
+ Write<qreg_t>(address2, qreg(rt2));
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ if (instr->IsLoad()) {
+ if ((op == LDP_s) || (op == LDP_d)) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == LDP_q) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if ((op == STP_s) || (op == STP_d)) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == STP_q) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::PrintExclusiveAccessWarning() {
+ if (print_exclusive_access_warning_) {
+ fprintf(
+ stderr,
+ "%sWARNING:%s VIXL simulator support for load-/store-/clear-exclusive "
+ "instructions is limited. Refer to the README for details.%s\n",
+ clr_warning, clr_warning_message, clr_normal);
+ print_exclusive_access_warning_ = false;
+ }
+}
+
+template <typename T>
+void Simulator::CompareAndSwapHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, element_size))
+ return;
+
+ bool is_acquire = instr->Bit(22) == 1;
+ bool is_release = instr->Bit(15) == 1;
+
+ T comparevalue = reg<T>(rs);
+ T newvalue = reg<T>(rt);
+
+ // The architecture permits that the data read clears any exclusive monitors
+ // associated with that location, even if the compare subsequently fails.
+ local_monitor_.Clear();
+
+ T data = Memory::Read<T>(address);
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ if (data == comparevalue) {
+ if (is_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+ Memory::Write<T>(address, newvalue);
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ }
+ set_reg<T>(rs, data);
+ LogRead(address, rs, GetPrintRegisterFormatForSize(element_size));
+}
+
+template <typename T>
+void Simulator::CompareAndSwapPairHelper(const Instruction* instr) {
+ VIXL_ASSERT((sizeof(T) == 4) || (sizeof(T) == 8));
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ VIXL_ASSERT((rs % 2 == 0) && (rs % 2 == 0));
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, element_size))
+ return;
+
+ uint64_t address2 = address + element_size;
+
+ bool is_acquire = instr->Bit(22) == 1;
+ bool is_release = instr->Bit(15) == 1;
+
+ T comparevalue_high = reg<T>(rs + 1);
+ T comparevalue_low = reg<T>(rs);
+ T newvalue_high = reg<T>(rt + 1);
+ T newvalue_low = reg<T>(rt);
+
+ // The architecture permits that the data read clears any exclusive monitors
+ // associated with that location, even if the compare subsequently fails.
+ local_monitor_.Clear();
+
+ T data_high = Memory::Read<T>(address);
+ T data_low = Memory::Read<T>(address2);
+
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ bool same =
+ (data_high == comparevalue_high) && (data_low == comparevalue_low);
+ if (same) {
+ if (is_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ Memory::Write<T>(address, newvalue_high);
+ Memory::Write<T>(address2, newvalue_low);
+ }
+
+ set_reg<T>(rs + 1, data_high);
+ set_reg<T>(rs, data_low);
+
+ LogRead(address, rs + 1, GetPrintRegisterFormatForSize(element_size));
+ LogRead(address2, rs, GetPrintRegisterFormatForSize(element_size));
+
+ if (same) {
+ LogWrite(address, rt + 1, GetPrintRegisterFormatForSize(element_size));
+ LogWrite(address2, rt, GetPrintRegisterFormatForSize(element_size));
+ }
+}
+
+void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
+ LoadStoreExclusive op =
+ static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask));
+
+ switch (op) {
+ case CAS_w:
+ case CASA_w:
+ case CASL_w:
+ case CASAL_w:
+ CompareAndSwapHelper<uint32_t>(instr);
+ break;
+ case CAS_x:
+ case CASA_x:
+ case CASL_x:
+ case CASAL_x:
+ CompareAndSwapHelper<uint64_t>(instr);
+ break;
+ case CASB:
+ case CASAB:
+ case CASLB:
+ case CASALB:
+ CompareAndSwapHelper<uint8_t>(instr);
+ break;
+ case CASH:
+ case CASAH:
+ case CASLH:
+ case CASALH:
+ CompareAndSwapHelper<uint16_t>(instr);
+ break;
+ case CASP_w:
+ case CASPA_w:
+ case CASPL_w:
+ case CASPAL_w:
+ CompareAndSwapPairHelper<uint32_t>(instr);
+ break;
+ case CASP_x:
+ case CASPA_x:
+ case CASPL_x:
+ case CASPAL_x:
+ CompareAndSwapPairHelper<uint64_t>(instr);
+ break;
+ default:
+ PrintExclusiveAccessWarning();
+
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned rn = instr->Rn();
+
+ bool is_exclusive = !instr->LdStXNotExclusive();
+ bool is_acquire_release = !is_exclusive || instr->LdStXAcquireRelease();
+ bool is_load = instr->LdStXLoad();
+ bool is_pair = instr->LdStXPair();
+
+ unsigned element_size = 1 << instr->LdStXSizeLog2();
+ unsigned access_size = is_pair ? element_size * 2 : element_size;
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ // Check the alignment of `address`.
+ if (AlignDown(address, access_size) != address) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ // The sp must be aligned to 16 bytes when it is accessed.
+ if ((rn == 31) && (AlignDown(address, 16) != address)) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if (is_load) {
+ if (is_exclusive) {
+ local_monitor_.MarkExclusive(address, access_size);
+ } else {
+ // Any non-exclusive load can clear the local monitor as a side
+ // effect. We don't need to do this, but it is useful to stress the
+ // simulated code.
+ local_monitor_.Clear();
+ }
+
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS).
+ // We will print a more detailed log.
+ switch (op) {
+ case LDXRB_w:
+ case LDAXRB_w:
+ case LDARB_w:
+ set_wreg(rt, Read<uint8_t>(address), NoRegLog);
+ break;
+ case LDXRH_w:
+ case LDAXRH_w:
+ case LDARH_w:
+ set_wreg(rt, Read<uint16_t>(address), NoRegLog);
+ break;
+ case LDXR_w:
+ case LDAXR_w:
+ case LDAR_w:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ break;
+ case LDXR_x:
+ case LDAXR_x:
+ case LDAR_x:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ break;
+ case LDXP_w:
+ case LDAXP_w:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Read<uint32_t>(address + element_size), NoRegLog);
+ break;
+ case LDXP_x:
+ case LDAXP_x:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Read<uint64_t>(address + element_size), NoRegLog);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if (is_acquire_release) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ js::jit::AtomicOperations::fenceSeqCst();
+ }
+
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogRead(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if (is_acquire_release) {
+ // Approximate store-release by issuing a full barrier before the
+ // store.
+ js::jit::AtomicOperations::fenceSeqCst();
+ }
+
+ bool do_store = true;
+ if (is_exclusive) {
+ do_store = local_monitor_.IsExclusive(address, access_size) &&
+ global_monitor_.IsExclusive(address, access_size);
+ set_wreg(rs, do_store ? 0 : 1);
+
+ // - All exclusive stores explicitly clear the local monitor.
+ local_monitor_.Clear();
+ } else {
+ // - Any other store can clear the local monitor as a side effect.
+ local_monitor_.MaybeClear();
+ }
+
+ if (do_store) {
+ switch (op) {
+ case STXRB_w:
+ case STLXRB_w:
+ case STLRB_w:
+ Write<uint8_t>(address, wreg(rt));
+ break;
+ case STXRH_w:
+ case STLXRH_w:
+ case STLRH_w:
+ Write<uint16_t>(address, wreg(rt));
+ break;
+ case STXR_w:
+ case STLXR_w:
+ case STLR_w:
+ Write<uint32_t>(address, wreg(rt));
+ break;
+ case STXR_x:
+ case STLXR_x:
+ case STLR_x:
+ Write<uint64_t>(address, xreg(rt));
+ break;
+ case STXP_w:
+ case STLXP_w:
+ Write<uint32_t>(address, wreg(rt));
+ Write<uint32_t>(address + element_size, wreg(rt2));
+ break;
+ case STXP_x:
+ case STLXP_x:
+ Write<uint64_t>(address, xreg(rt));
+ Write<uint64_t>(address + element_size, xreg(rt2));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogWrite(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+ }
+ }
+}
+
+template <typename T>
+void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
+ bool is_release = instr->Bit(22) == 1;
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return;
+
+ T value = reg<T>(rs);
+
+ T data = Memory::Read<T>(address);
+
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ T result = 0;
+ switch (instr->Mask(AtomicMemorySimpleOpMask)) {
+ case LDADDOp:
+ result = data + value;
+ break;
+ case LDCLROp:
+ VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
+ result = data & ~value;
+ break;
+ case LDEOROp:
+ VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
+ result = data ^ value;
+ break;
+ case LDSETOp:
+ VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
+ result = data | value;
+ break;
+
+ // Signed/Unsigned difference is done via the templated type T.
+ case LDSMAXOp:
+ case LDUMAXOp:
+ result = (data > value) ? data : value;
+ break;
+ case LDSMINOp:
+ case LDUMINOp:
+ result = (data > value) ? value : data;
+ break;
+ }
+
+ if (is_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ Memory::Write<T>(address, result);
+ set_reg<T>(rt, data, NoRegLog);
+
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogWrite(address, rs, GetPrintRegisterFormatForSize(element_size));
+}
+
+template <typename T>
+void Simulator::AtomicMemorySwapHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
+ bool is_release = instr->Bit(22) == 1;
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return;
+
+ T data = Memory::Read<T>(address);
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ if (is_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+ Memory::Write<T>(address, reg<T>(rs));
+
+ set_reg<T>(rt, data);
+
+ LogRead(address, rt, GetPrintRegisterFormat(element_size));
+ LogWrite(address, rs, GetPrintRegisterFormat(element_size));
+}
+
+template <typename T>
+void Simulator::LoadAcquireRCpcHelper(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return;
+
+ set_reg<T>(rt, Memory::Read<T>(address));
+
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+
+ LogRead(address, rt, GetPrintRegisterFormat(element_size));
+}
+
+#define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \
+ V(LDADD) \
+ V(LDCLR) \
+ V(LDEOR) \
+ V(LDSET) \
+ V(LDUMAX) \
+ V(LDUMIN)
+
+#define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \
+ V(LDSMAX) \
+ V(LDSMIN)
+
+void Simulator::VisitAtomicMemory(const Instruction* instr) {
+ switch (instr->Mask(AtomicMemoryMask)) {
+// clang-format off
+#define SIM_FUNC_B(A) \
+ case A##B: \
+ case A##AB: \
+ case A##LB: \
+ case A##ALB:
+#define SIM_FUNC_H(A) \
+ case A##H: \
+ case A##AH: \
+ case A##LH: \
+ case A##ALH:
+#define SIM_FUNC_w(A) \
+ case A##_w: \
+ case A##A_w: \
+ case A##L_w: \
+ case A##AL_w:
+#define SIM_FUNC_x(A) \
+ case A##_x: \
+ case A##A_x: \
+ case A##L_x: \
+ case A##AL_x:
+
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B)
+ AtomicMemorySimpleHelper<uint8_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B)
+ AtomicMemorySimpleHelper<int8_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H)
+ AtomicMemorySimpleHelper<uint16_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H)
+ AtomicMemorySimpleHelper<int16_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w)
+ AtomicMemorySimpleHelper<uint32_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w)
+ AtomicMemorySimpleHelper<int32_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x)
+ AtomicMemorySimpleHelper<uint64_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x)
+ AtomicMemorySimpleHelper<int64_t>(instr);
+ break;
+ // clang-format on
+
+ case SWPB:
+ case SWPAB:
+ case SWPLB:
+ case SWPALB:
+ AtomicMemorySwapHelper<uint8_t>(instr);
+ break;
+ case SWPH:
+ case SWPAH:
+ case SWPLH:
+ case SWPALH:
+ AtomicMemorySwapHelper<uint16_t>(instr);
+ break;
+ case SWP_w:
+ case SWPA_w:
+ case SWPL_w:
+ case SWPAL_w:
+ AtomicMemorySwapHelper<uint32_t>(instr);
+ break;
+ case SWP_x:
+ case SWPA_x:
+ case SWPL_x:
+ case SWPAL_x:
+ AtomicMemorySwapHelper<uint64_t>(instr);
+ break;
+ case LDAPRB:
+ LoadAcquireRCpcHelper<uint8_t>(instr);
+ break;
+ case LDAPRH:
+ LoadAcquireRCpcHelper<uint16_t>(instr);
+ break;
+ case LDAPR_w:
+ LoadAcquireRCpcHelper<uint32_t>(instr);
+ break;
+ case LDAPR_x:
+ LoadAcquireRCpcHelper<uint64_t>(instr);
+ break;
+ }
+}
+
+void Simulator::VisitLoadLiteral(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ uint64_t address = instr->LiteralAddress<uint64_t>();
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
+ // print a more detailed log.
+ case LDR_w_lit:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+ case LDR_x_lit:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintXReg);
+ break;
+ case LDR_s_lit:
+ set_sreg(rt, Read<float>(address), NoRegLog);
+ LogVRead(address, rt, kPrintSReg);
+ break;
+ case LDR_d_lit:
+ set_dreg(rt, Read<double>(address), NoRegLog);
+ LogVRead(address, rt, kPrintDReg);
+ break;
+ case LDR_q_lit:
+ set_qreg(rt, Read<qreg_t>(address), NoRegLog);
+ LogVRead(address, rt, kPrintReg1Q);
+ break;
+ case LDRSW_x_lit:
+ set_xreg(rt, Read<int32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+
+ // Ignore prfm hint instructions.
+ case PRFM_lit: break;
+
+ default: VIXL_UNREACHABLE();
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+uintptr_t Simulator::AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+
+ if ((addr_reg == 31) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ VIXL_ASSERT(offset != 0);
+ // Only preindex should log the register update here. For Postindex, the
+ // update will be printed automatically by LogWrittenRegisters _after_ the
+ // memory access itself is logged.
+ RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog;
+ set_xreg(addr_reg, address + offset, log_mode, Reg31IsStackPointer);
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ return static_cast<uintptr_t>(address);
+}
+
+
+void Simulator::VisitMoveWideImmediate(const Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ VIXL_ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val =
+ (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(const Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(const Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src))); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src))); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), 1)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), 1)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), 2)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), 2)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), 3)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src))); break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src))); break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src)));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src)));
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) {
+ VIXL_ASSERT((n > 32) && (n <= 64));
+ for (unsigned i = (n - 1); i >= 32; i--) {
+ if (((data >> i) & 1) != 0) {
+ uint64_t polysh32 = (uint64_t)poly << (i - 32);
+ uint64_t mask = (UINT64_C(1) << i) - 1;
+ data = ((data & mask) ^ polysh32);
+ }
+ }
+ return data & 0xffffffff;
+}
+
+
+template <typename T>
+uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) {
+ unsigned size = sizeof(val) * 8; // Number of bits in type T.
+ VIXL_ASSERT((size == 8) || (size == 16) || (size == 32));
+ uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size;
+ uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32;
+ return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly));
+}
+
+
+uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) {
+ // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute
+ // the CRC of each 32-bit word sequentially.
+ acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly);
+ return Crc32Checksum(acc, (uint32_t)(val >> 32), poly);
+}
+
+
+void Simulator::VisitDataProcessing2Source(const Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ case CRC32B: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32H: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32W: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32X: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ case CRC32CB: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CH: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CW: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CX: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is adapted from the one described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+// Type T must be either uint64_t or int64_t.
+template <typename T>
+static T MultiplyHigh(T u, T v) {
+ uint64_t u0, v0, w0;
+ T u1, v1, w1, w2, t;
+
+ VIXL_ASSERT(sizeof(u) == sizeof(u0));
+
+ u0 = u & 0xffffffff;
+ u1 = u >> 32;
+ v0 = v & 0xffffffff;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffff;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case UMULH_x:
+ result = MultiplyHigh(reg<uint64_t>(instr->Rn()),
+ reg<uint64_t>(instr->Rm()));
+ break;
+ case SMULH_x:
+ result = MultiplyHigh(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = (diff < (reg_size - 1)) ? (INT64_C(1) << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = (INT64_C(1) << (S + 1)) - 1;
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((INT64_C(1) << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(const Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ uint64_t low_res = static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb;
+ uint64_t high_res =
+ (lsb == 0) ? 0 : reg(reg_size, instr->Rn()) << (reg_size - lsb);
+ set_reg(reg_size, instr->Rd(), low_res | high_res);
+}
+
+
+void Simulator::VisitFPImmediate(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTPS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FJCVTZS: set_wreg(dst, FPToFixedJS(dreg(src))); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+ case FMOV_d1_x:
+ LogicVRegister(vreg(dst)).SetUint(kFormatD, 1, xreg(src));
+ break;
+ case FMOV_x_d1:
+ set_xreg(dst, LogicVRegister(vreg(src)).Uint(kFormatD, 1));
+ break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case FCVTZS_xd_fixed:
+ set_xreg(dst, FPToInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_wd_fixed:
+ set_wreg(dst, FPToInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_xd_fixed:
+ set_xreg(dst, FPToUInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_wd_fixed:
+ set_wreg(dst, FPToUInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_xs_fixed:
+ set_xreg(dst, FPToInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZS_ws_fixed:
+ set_wreg(dst, FPToInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_xs_fixed:
+ set_xreg(dst, FPToUInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_ws_fixed:
+ set_wreg(dst, FPToUInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMPE_s: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s: FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap); break;
+ case FCMPE_d: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d: FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap); break;
+ case FCMPE_s_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s_zero: FPCompare(sreg(instr->Rn()), 0.0f, trap); break;
+ case FCMPE_d_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d_zero: FPCompare(dreg(instr->Rn()), 0.0, trap); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMPE_s: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_s:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ case FCCMPE_d: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_d:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(instr->Condition())) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ bool inexact_exception = false;
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); return;
+ case FMOV_d: set_dreg(fd, dreg(fn)); return;
+ case FABS_s: fabs_(kFormatS, vreg(fd), vreg(fn)); return;
+ case FABS_d: fabs_(kFormatD, vreg(fd), vreg(fn)); return;
+ case FNEG_s: fneg(kFormatS, vreg(fd), vreg(fn)); return;
+ case FNEG_d: fneg(kFormatD, vreg(fd), vreg(fn)); return;
+ case FCVT_ds:
+ set_dreg(fd, FPToDouble(sreg(fn), ReadDN()));
+ return;
+ case FCVT_sd:
+ set_sreg(fd, FPToFloat(dreg(fn), FPTieEven, ReadDN()));
+ return;
+ case FCVT_hs:
+ set_hreg(fd, Float16ToRawbits(FPToFloat16(sreg(fn), FPTieEven, ReadDN())));
+ return;
+ case FCVT_sh:
+ set_sreg(fd, FPToFloat(RawbitsToFloat16(hreg(fn)), ReadDN()));
+ return;
+ case FCVT_dh:
+ set_dreg(fd, FPToDouble(hreg(fn), ReadDN()));
+ return;
+ case FCVT_hd:
+ set_hreg(fd, Float16ToRawbits(FPToFloat16(dreg(fn), FPTieEven, ReadDN())));
+ return;
+ case FSQRT_s:
+ case FSQRT_d: fsqrt(vform, rd, rn); return;
+ case FRINTI_s:
+ case FRINTI_d: break; // Use FPCR rounding mode.
+ case FRINTX_s:
+ case FRINTX_d: inexact_exception = true; break;
+ case FRINTA_s:
+ case FRINTA_d: fpcr_rounding = FPTieAway; break;
+ case FRINTM_s:
+ case FRINTM_d: fpcr_rounding = FPNegativeInfinity; break;
+ case FRINTN_s:
+ case FRINTN_d: fpcr_rounding = FPTieEven; break;
+ case FRINTP_s:
+ case FRINTP_d: fpcr_rounding = FPPositiveInfinity; break;
+ case FRINTZ_s:
+ case FRINTZ_d: fpcr_rounding = FPZero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(vform, rd, rn, fpcr_rounding, inexact_exception);
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s:
+ case FADD_d: fadd(vform, rd, rn, rm); break;
+ case FSUB_s:
+ case FSUB_d: fsub(vform, rd, rn, rm); break;
+ case FMUL_s:
+ case FMUL_d: fmul(vform, rd, rn, rm); break;
+ case FNMUL_s:
+ case FNMUL_d: fnmul(vform, rd, rn, rm); break;
+ case FDIV_s:
+ case FDIV_d: fdiv(vform, rd, rn, rm); break;
+ case FMAX_s:
+ case FMAX_d: fmax(vform, rd, rn, rm); break;
+ case FMIN_s:
+ case FMIN_d: fmin(vform, rd, rn, rm); break;
+ case FMAXNM_s:
+ case FMAXNM_d: fmaxnm(vform, rd, rn, rm); break;
+ case FMINNM_s:
+ case FMINNM_d: fminnm(vform, rd, rn, rm); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(const Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::SysOp_W(int op, int64_t val) {
+ switch (op) {
+ case IVAU:
+ case CVAC:
+ case CVAU:
+ case CIVAC: {
+ // Perform a dummy memory access to ensure that we have read access
+ // to the specified address.
+ volatile uint8_t y = Read<uint8_t>(val);
+ USE(y);
+ // TODO: Implement "case ZVA:".
+ break;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX);
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ PrintExclusiveAccessWarning();
+ ClearLocalMonitor();
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV:
+ nzcv().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(NZCV);
+ break;
+ case FPCR:
+ fpcr().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(FPCR);
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ case CSDB: break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ js::jit::AtomicOperations::fenceSeqCst();
+ } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) {
+ switch (instr->Mask(SystemSysMask)) {
+ case SYS: SysOp_W(instr->SysOp(), xreg(instr->Rt())); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitNEON2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ static const NEONFormatMap map_lp = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
+
+ static const NEONFormatMap map_fcvtl = {
+ {22}, {NF_4S, NF_2D}
+ };
+ VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
+
+ static const NEONFormatMap map_fcvtn = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+ VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: rev64(vf, rd, rn); break;
+ case NEON_REV32: rev32(vf, rd, rn); break;
+ case NEON_REV16: rev16(vf, rd, rn); break;
+ case NEON_SUQADD: suqadd(vf, rd, rn); break;
+ case NEON_USQADD: usqadd(vf, rd, rn); break;
+ case NEON_CLS: cls(vf, rd, rn); break;
+ case NEON_CLZ: clz(vf, rd, rn); break;
+ case NEON_CNT: cnt(vf, rd, rn); break;
+ case NEON_SQABS: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SQNEG: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_CMGT_zero: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMGE_zero: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMEQ_zero: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMLE_zero: cmp(vf, rd, rn, 0, le); break;
+ case NEON_CMLT_zero: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_ABS: abs(vf, rd, rn); break;
+ case NEON_NEG: neg(vf, rd, rn); break;
+ case NEON_SADDLP: saddlp(vf_lp, rd, rn); break;
+ case NEON_UADDLP: uaddlp(vf_lp, rd, rn); break;
+ case NEON_SADALP: sadalp(vf_lp, rd, rn); break;
+ case NEON_UADALP: uadalp(vf_lp, rd, rn); break;
+ case NEON_RBIT_NOT:
+ vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->FPType()) {
+ case 0: not_(vf, rd, rn); break;
+ case 1: rbit(vf, rd, rn);; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ bool inexact_exception = false;
+
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: fabs_(fpf, rd, rn); return;
+ case NEON_FNEG: fneg(fpf, rd, rn); return;
+ case NEON_FSQRT: fsqrt(fpf, rd, rn); return;
+ case NEON_FCVTL:
+ if (instr->Mask(NEON_Q)) {
+ fcvtl2(vf_fcvtl, rd, rn);
+ } else {
+ fcvtl(vf_fcvtl, rd, rn);
+ }
+ return;
+ case NEON_FCVTN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtn(vf_fcvtn, rd, rn);
+ }
+ return;
+ case NEON_FCVTXN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtxn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtxn(vf_fcvtn, rd, rn);
+ }
+ return;
+
+ // The following instructions break from the switch statement, rather
+ // than return.
+ case NEON_FRINTI: break; // Use FPCR rounding mode.
+ case NEON_FRINTX: inexact_exception = true; break;
+ case NEON_FRINTA: fpcr_rounding = FPTieAway; break;
+ case NEON_FRINTM: fpcr_rounding = FPNegativeInfinity; break;
+ case NEON_FRINTN: fpcr_rounding = FPTieEven; break;
+ case NEON_FRINTP: fpcr_rounding = FPPositiveInfinity; break;
+ case NEON_FRINTZ: fpcr_rounding = FPZero; break;
+
+ case NEON_FCVTNS: fcvts(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTNU: fcvtu(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTPS: fcvts(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTPU: fcvtu(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTMS: fcvts(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTMU: fcvtu(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTZS: fcvts(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTZU: fcvtu(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTAS: fcvts(fpf, rd, rn, FPTieAway); return;
+ case NEON_FCVTAU: fcvtu(fpf, rd, rn, FPTieAway); return;
+ case NEON_SCVTF: scvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_UCVTF: ucvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_URSQRTE: ursqrte(fpf, rd, rn); return;
+ case NEON_URECPE: urecpe(fpf, rd, rn); return;
+ case NEON_FRSQRTE: frsqrte(fpf, rd, rn); return;
+ case NEON_FRECPE: frecpe(fpf, rd, rn, fpcr_rounding); return;
+ case NEON_FCMGT_zero: fcmp_zero(fpf, rd, rn, gt); return;
+ case NEON_FCMGE_zero: fcmp_zero(fpf, rd, rn, ge); return;
+ case NEON_FCMEQ_zero: fcmp_zero(fpf, rd, rn, eq); return;
+ case NEON_FCMLE_zero: fcmp_zero(fpf, rd, rn, le); return;
+ case NEON_FCMLT_zero: fcmp_zero(fpf, rd, rn, lt); return;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: xtn(vf, rd, rn); return;
+ case NEON_SQXTN: sqxtn(vf, rd, rn); return;
+ case NEON_UQXTN: uqxtn(vf, rd, rn); return;
+ case NEON_SQXTUN: sqxtun(vf, rd, rn); return;
+ case NEON_SHLL:
+ vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+ if (instr->Mask(NEON_Q)) {
+ shll2(vf, rd, rn);
+ } else {
+ shll(vf, rd, rn);
+ }
+ return;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
+ }
+}
+
+
+void Simulator::VisitNEON3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: and_(vf, rd, rn, rm); break;
+ case NEON_ORR: orr(vf, rd, rn, rm); break;
+ case NEON_ORN: orn(vf, rd, rn, rm); break;
+ case NEON_EOR: eor(vf, rd, rn, rm); break;
+ case NEON_BIC: bic(vf, rd, rn, rm); break;
+ case NEON_BIF: bif(vf, rd, rn, rm); break;
+ case NEON_BIT: bit(vf, rd, rn, rm); break;
+ case NEON_BSL: bsl(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ switch (instr->Mask(NEON3SameFPMask)) {
+ case NEON_FADD: fadd(vf, rd, rn, rm); break;
+ case NEON_FSUB: fsub(vf, rd, rn, rm); break;
+ case NEON_FMUL: fmul(vf, rd, rn, rm); break;
+ case NEON_FDIV: fdiv(vf, rd, rn, rm); break;
+ case NEON_FMAX: fmax(vf, rd, rn, rm); break;
+ case NEON_FMIN: fmin(vf, rd, rn, rm); break;
+ case NEON_FMAXNM: fmaxnm(vf, rd, rn, rm); break;
+ case NEON_FMINNM: fminnm(vf, rd, rn, rm); break;
+ case NEON_FMLA: fmla(vf, rd, rn, rm); break;
+ case NEON_FMLS: fmls(vf, rd, rn, rm); break;
+ case NEON_FMULX: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD: fabd(vf, rd, rn, rm); break;
+ case NEON_FADDP: faddp(vf, rd, rn, rm); break;
+ case NEON_FMAXP: fmaxp(vf, rd, rn, rm); break;
+ case NEON_FMAXNMP: fmaxnmp(vf, rd, rn, rm); break;
+ case NEON_FMINP: fminp(vf, rd, rn, rm); break;
+ case NEON_FMINNMP: fminnmp(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+ switch (instr->Mask(NEON3SameMask)) {
+ case NEON_ADD: add(vf, rd, rn, rm); break;
+ case NEON_ADDP: addp(vf, rd, rn, rm); break;
+ case NEON_CMEQ: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST: cmptst(vf, rd, rn, rm); break;
+ case NEON_MLS: mls(vf, rd, rn, rm); break;
+ case NEON_MLA: mla(vf, rd, rn, rm); break;
+ case NEON_MUL: mul(vf, rd, rn, rm); break;
+ case NEON_PMUL: pmul(vf, rd, rn, rm); break;
+ case NEON_SMAX: smax(vf, rd, rn, rm); break;
+ case NEON_SMAXP: smaxp(vf, rd, rn, rm); break;
+ case NEON_SMIN: smin(vf, rd, rn, rm); break;
+ case NEON_SMINP: sminp(vf, rd, rn, rm); break;
+ case NEON_SUB: sub(vf, rd, rn, rm); break;
+ case NEON_UMAX: umax(vf, rd, rn, rm); break;
+ case NEON_UMAXP: umaxp(vf, rd, rn, rm); break;
+ case NEON_UMIN: umin(vf, rd, rn, rm); break;
+ case NEON_UMINP: uminp(vf, rd, rn, rm); break;
+ case NEON_SSHL: sshl(vf, rd, rn, rm); break;
+ case NEON_USHL: ushl(vf, rd, rn, rm); break;
+ case NEON_SABD: absdiff(vf, rd, rn, rm, true); break;
+ case NEON_UABD: absdiff(vf, rd, rn, rm, false); break;
+ case NEON_SABA: saba(vf, rd, rn, rm); break;
+ case NEON_UABA: uaba(vf, rd, rn, rm); break;
+ case NEON_UQADD: add(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQADD: add(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_UQSUB: sub(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSUB: sub(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_SQDMULH: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQSHL: ushl(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSHL: sshl(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_URSHL: ushl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_SRSHL: sshl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_UQRSHL:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ case NEON_UHADD:
+ add(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_URHADD:
+ add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
+ break;
+ case NEON_SHADD:
+ add(vf, rd, rn, rm).Halve(vf);
+ break;
+ case NEON_SRHADD:
+ add(vf, rd, rn, rm).Halve(vf).Round(vf);
+ break;
+ case NEON_UHSUB:
+ sub(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_SHSUB:
+ sub(vf, rd, rn, rm).Halve(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEON3Different(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEON3DifferentMask)) {
+ case NEON_PMULL: pmull(vf_l, rd, rn, rm); break;
+ case NEON_PMULL2: pmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDL: uaddl(vf_l, rd, rn, rm); break;
+ case NEON_UADDL2: uaddl2(vf_l, rd, rn, rm); break;
+ case NEON_SADDL: saddl(vf_l, rd, rn, rm); break;
+ case NEON_SADDL2: saddl2(vf_l, rd, rn, rm); break;
+ case NEON_USUBL: usubl(vf_l, rd, rn, rm); break;
+ case NEON_USUBL2: usubl2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL: ssubl(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL2: ssubl2(vf_l, rd, rn, rm); break;
+ case NEON_SABAL: sabal(vf_l, rd, rn, rm); break;
+ case NEON_SABAL2: sabal2(vf_l, rd, rn, rm); break;
+ case NEON_UABAL: uabal(vf_l, rd, rn, rm); break;
+ case NEON_UABAL2: uabal2(vf_l, rd, rn, rm); break;
+ case NEON_SABDL: sabdl(vf_l, rd, rn, rm); break;
+ case NEON_SABDL2: sabdl2(vf_l, rd, rn, rm); break;
+ case NEON_UABDL: uabdl(vf_l, rd, rn, rm); break;
+ case NEON_UABDL2: uabdl2(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL: smlal(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL2: smlal2(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL: umlal(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL2: umlal2(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL: smlsl(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL2: smlsl2(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL: umlsl(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL2: umlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SMULL: smull(vf_l, rd, rn, rm); break;
+ case NEON_SMULL2: smull2(vf_l, rd, rn, rm); break;
+ case NEON_UMULL: umull(vf_l, rd, rn, rm); break;
+ case NEON_UMULL2: umull2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL: sqdmlal(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL2: sqdmlal2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL: sqdmlsl(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL2: sqdmlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL: sqdmull(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL2: sqdmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDW: uaddw(vf_l, rd, rn, rm); break;
+ case NEON_UADDW2: uaddw2(vf_l, rd, rn, rm); break;
+ case NEON_SADDW: saddw(vf_l, rd, rn, rm); break;
+ case NEON_SADDW2: saddw2(vf_l, rd, rn, rm); break;
+ case NEON_USUBW: usubw(vf_l, rd, rn, rm); break;
+ case NEON_USUBW2: usubw2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW: ssubw(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW2: ssubw2(vf_l, rd, rn, rm); break;
+ case NEON_ADDHN: addhn(vf, rd, rn, rm); break;
+ case NEON_ADDHN2: addhn2(vf, rd, rn, rm); break;
+ case NEON_RADDHN: raddhn(vf, rd, rn, rm); break;
+ case NEON_RADDHN2: raddhn2(vf, rd, rn, rm); break;
+ case NEON_SUBHN: subhn(vf, rd, rn, rm); break;
+ case NEON_SUBHN2: subhn2(vf, rd, rn, rm); break;
+ case NEON_RSUBHN: rsubhn(vf, rd, rn, rm); break;
+ case NEON_RSUBHN2: rsubhn2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONAcrossLanes(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ // The input operand's VectorFormat is passed for these instructions.
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: fmaxv(vf, rd, rn); break;
+ case NEON_FMINV: fminv(vf, rd, rn); break;
+ case NEON_FMAXNMV: fmaxnmv(vf, rd, rn); break;
+ case NEON_FMINNMV: fminnmv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: addv(vf, rd, rn); break;
+ case NEON_SMAXV: smaxv(vf, rd, rn); break;
+ case NEON_SMINV: sminv(vf, rd, rn); break;
+ case NEON_UMAXV: umaxv(vf, rd, rn); break;
+ case NEON_UMINV: uminv(vf, rd, rn); break;
+ case NEON_SADDLV: saddlv(vf, rd, rn); break;
+ case NEON_UADDLV: uaddlv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf_r = nfd.GetVectorFormat();
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_MUL_byelement: Op = &Simulator::mul; vf = vf_r; break;
+ case NEON_MLA_byelement: Op = &Simulator::mla; vf = vf_r; break;
+ case NEON_MLS_byelement: Op = &Simulator::mls; vf = vf_r; break;
+ case NEON_SQDMULH_byelement: Op = &Simulator::sqdmulh; vf = vf_r; break;
+ case NEON_SQRDMULH_byelement: Op = &Simulator::sqrdmulh; vf = vf_r; break;
+ case NEON_SMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smull2;
+ } else {
+ Op = &Simulator::smull;
+ }
+ break;
+ case NEON_UMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umull2;
+ } else {
+ Op = &Simulator::umull;
+ }
+ break;
+ case NEON_SMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlal2;
+ } else {
+ Op = &Simulator::smlal;
+ }
+ break;
+ case NEON_UMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlal2;
+ } else {
+ Op = &Simulator::umlal;
+ }
+ break;
+ case NEON_SMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlsl2;
+ } else {
+ Op = &Simulator::smlsl;
+ }
+ break;
+ case NEON_UMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlsl2;
+ } else {
+ Op = &Simulator::umlsl;
+ }
+ break;
+ case NEON_SQDMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmull2;
+ } else {
+ Op = &Simulator::sqdmull;
+ }
+ break;
+ case NEON_SQDMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlal2;
+ } else {
+ Op = &Simulator::sqdmlal;
+ }
+ break;
+ case NEON_SQDMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlsl2;
+ } else {
+ Op = &Simulator::sqdmlsl;
+ }
+ break;
+ default:
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+
+ vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int reg_index = imm5 >> (tz + 1);
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ int imm4 = instr->ImmNEON4();
+ int rn_index = imm4 >> tz;
+ ins_element(vf, rd, reg_index, rn, rn_index);
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
+ value &= MaxUintFromFormat(vf);
+ set_xreg(instr->Rd(), value);
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
+ int64_t value = LogicVRegister(rn).Int(vf, reg_index);
+ if (instr->NEONQ()) {
+ set_xreg(instr->Rd(), value);
+ } else {
+ set_wreg(instr->Rd(), (int32_t)value);
+ }
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ dup_element(vf, rd, rn, reg_index);
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ dup_immediate(vf, rd, xreg(instr->Rn()));
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONExtract(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ int index = instr->ImmNEONExt();
+ ext(vf, rd, rn, rm, index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
+ int reg_size = RegisterSizeInBytesFromFormat(vf);
+
+ int reg[4];
+ uint64_t addr[4];
+ for (int i = 0; i < 4; i++) {
+ reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
+ addr[i] = addr_base + (i * reg_size);
+ }
+ int count = 1;
+ bool log_read = true;
+
+ Instr itype = instr->Mask(NEONLoadStoreMultiStructMask);
+ if (((itype == NEON_LD1_1v) || (itype == NEON_LD1_2v) ||
+ (itype == NEON_LD1_3v) || (itype == NEON_LD1_4v) ||
+ (itype == NEON_ST1_1v) || (itype == NEON_ST1_2v) ||
+ (itype == NEON_ST1_3v) || (itype == NEON_ST1_4v)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_4v:
+ case NEON_LD1_4v_post: ld1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_3v:
+ case NEON_LD1_3v_post: ld1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_2v:
+ case NEON_LD1_2v_post: ld1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_1v:
+ case NEON_LD1_1v_post:
+ ld1(vf, vreg(reg[0]), addr[0]);
+ log_read = true;
+ break;
+ case NEON_ST1_4v:
+ case NEON_ST1_4v_post: st1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_3v:
+ case NEON_ST1_3v_post: st1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_2v:
+ case NEON_ST1_2v_post: st1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_1v:
+ case NEON_ST1_1v_post:
+ st1(vf, vreg(reg[0]), addr[0]);
+ log_read = false;
+ break;
+ case NEON_LD2_post:
+ case NEON_LD2:
+ ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_ST2:
+ case NEON_ST2_post:
+ st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_LD3_post:
+ case NEON_LD3:
+ ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST3:
+ case NEON_ST3_post:
+ st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST4:
+ case NEON_ST4_post:
+ st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ case NEON_LD4_post:
+ case NEON_LD4:
+ ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Explicitly log the register update whilst we have type information.
+ for (int i = 0; i < count; i++) {
+ // For de-interleaving loads, only print the base address.
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
+ GetPrintRegisterFormatForSize(reg_size, lane_size));
+ if (log_read) {
+ LogVRead(addr_base, reg[i], format);
+ } else {
+ LogVWrite(addr_base, reg[i], format);
+ }
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count
+ : xreg(rm);
+ set_xreg(instr->Rn(), addr_base);
+ } else {
+ VIXL_ASSERT(addr_mode == Offset);
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
+ int rt = instr->Rt();
+
+ Instr itype = instr->Mask(NEONLoadStoreSingleStructMask);
+ if (((itype == NEON_LD1_b) || (itype == NEON_LD1_h) ||
+ (itype == NEON_LD1_s) || (itype == NEON_LD1_d)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ bool do_load = false;
+
+ bool replicating = false;
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf_t = nfd.GetVectorFormat();
+
+ VectorFormat vf = kFormat16B;
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b:
+ case NEON_LD1_b_post:
+ case NEON_LD2_b:
+ case NEON_LD2_b_post:
+ case NEON_LD3_b:
+ case NEON_LD3_b_post:
+ case NEON_LD4_b:
+ case NEON_LD4_b_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_b:
+ case NEON_ST1_b_post:
+ case NEON_ST2_b:
+ case NEON_ST2_b_post:
+ case NEON_ST3_b:
+ case NEON_ST3_b_post:
+ case NEON_ST4_b:
+ case NEON_ST4_b_post: break;
+
+ case NEON_LD1_h:
+ case NEON_LD1_h_post:
+ case NEON_LD2_h:
+ case NEON_LD2_h_post:
+ case NEON_LD3_h:
+ case NEON_LD3_h_post:
+ case NEON_LD4_h:
+ case NEON_LD4_h_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_h:
+ case NEON_ST1_h_post:
+ case NEON_ST2_h:
+ case NEON_ST2_h_post:
+ case NEON_ST3_h:
+ case NEON_ST3_h_post:
+ case NEON_ST4_h:
+ case NEON_ST4_h_post: vf = kFormat8H; break;
+ case NEON_LD1_s:
+ case NEON_LD1_s_post:
+ case NEON_LD2_s:
+ case NEON_LD2_s_post:
+ case NEON_LD3_s:
+ case NEON_LD3_s_post:
+ case NEON_LD4_s:
+ case NEON_LD4_s_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_s:
+ case NEON_ST1_s_post:
+ case NEON_ST2_s:
+ case NEON_ST2_s_post:
+ case NEON_ST3_s:
+ case NEON_ST3_s_post:
+ case NEON_ST4_s:
+ case NEON_ST4_s_post: {
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post);
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post);
+ vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
+ break;
+ }
+
+ case NEON_LD1R:
+ case NEON_LD1R_post:
+ case NEON_LD2R:
+ case NEON_LD2R_post:
+ case NEON_LD3R:
+ case NEON_LD3R_post:
+ case NEON_LD4R:
+ case NEON_LD4R_post: {
+ vf = vf_t;
+ do_load = true;
+ replicating = true;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ PrintRegisterFormat print_format =
+ GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
+ // Make sure that the print_format only includes a single lane.
+ print_format =
+ static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
+
+ int esize = LaneSizeInBytesFromFormat(vf);
+ int index_shift = LaneSizeInBytesLog2FromFormat(vf);
+ int lane = instr->NEONLSIndex(index_shift);
+ int scale = 0;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
+ case NEONLoadStoreSingle1:
+ scale = 1;
+ if (do_load) {
+ if (replicating) {
+ ld1r(vf, vreg(rt), addr);
+ } else {
+ ld1(vf, vreg(rt), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ } else {
+ st1(vf, vreg(rt), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle2:
+ scale = 2;
+ if (do_load) {
+ if (replicating) {
+ ld2r(vf, vreg(rt), vreg(rt2), addr);
+ } else {
+ ld2(vf, vreg(rt), vreg(rt2), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ } else {
+ st2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle3:
+ scale = 3;
+ if (do_load) {
+ if (replicating) {
+ ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
+ } else {
+ ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ } else {
+ st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle4:
+ scale = 4;
+ if (do_load) {
+ if (replicating) {
+ ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
+ } else {
+ ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ LogVRead(addr + (3 * esize), rt4, print_format, lane);
+ } else {
+ st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ LogVWrite(addr + (3 * esize), rt4, print_format, lane);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ int cmode = instr->NEONCmode();
+ int cmode_3_1 = (cmode >> 1) & 7;
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op_bit = instr->NEONModImmOp();
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+
+ // Find the format and immediate value
+ uint64_t imm = 0;
+ VectorFormat vform = kFormatUndefined;
+ switch (cmode_3_1) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ imm = imm8 << (8 * cmode_3_1);
+ break;
+ case 0x4:
+ case 0x5:
+ vform = (q == 1) ? kFormat8H : kFormat4H;
+ imm = imm8 << (8 * cmode_1);
+ break;
+ case 0x6:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ if (cmode_0 == 0) {
+ imm = imm8 << 8 | 0x000000ff;
+ } else {
+ imm = imm8 << 16 | 0x0000ffff;
+ }
+ break;
+ case 0x7:
+ if (cmode_0 == 0 && op_bit == 0) {
+ vform = q ? kFormat16B : kFormat8B;
+ imm = imm8;
+ } else if (cmode_0 == 0 && op_bit == 1) {
+ vform = q ? kFormat2D : kFormat1D;
+ imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1ULL << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ } else { // cmode_0 == 1, cmode == 0xf.
+ if (op_bit == 0) {
+ vform = q ? kFormat4S : kFormat2S;
+ imm = FloatToRawbits(instr->ImmNEONFP32());
+ } else if (q == 1) {
+ vform = kFormat2D;
+ imm = DoubleToRawbits(instr->ImmNEONFP64());
+ } else {
+ VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ VisitUnallocated(instr);
+ }
+ }
+ break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+
+ // Find the operation
+ NEONModifiedImmediateOp op;
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<3> == '1'
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<2> == '1'
+ if (cmode_1 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<1> == '1'
+ if (cmode_0 == 0) {
+ op = NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = NEONModifiedImmediate_MOVI;
+ }
+ }
+ }
+ }
+
+ // Call the logic function
+ if (op == NEONModifiedImmediate_ORR) {
+ orr(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_BIC) {
+ bic(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_MOVI) {
+ movi(vform, rd, imm);
+ } else if (op == NEONModifiedImmediate_MVNI) {
+ mvni(vform, rd, imm);
+ } else {
+ VisitUnimplemented(instr);
+ }
+}
+
+
+void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMEQ_zero_scalar: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMGE_zero_scalar: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMGT_zero_scalar: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMLT_zero_scalar: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_CMLE_zero_scalar: cmp(vf, rd, rn, 0, le); break;
+ case NEON_ABS_scalar: abs(vf, rd, rn); break;
+ case NEON_SQABS_scalar: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_NEG_scalar: neg(vf, rd, rn); break;
+ case NEON_SQNEG_scalar: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SUQADD_scalar: suqadd(vf, rd, rn); break;
+ case NEON_USQADD_scalar: usqadd(vf, rd, rn); break;
+ default: VIXL_UNIMPLEMENTED(); break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRECPE_scalar: frecpe(fpf, rd, rn, fpcr_rounding); break;
+ case NEON_FRECPX_scalar: frecpx(fpf, rd, rn); break;
+ case NEON_FRSQRTE_scalar: frsqrte(fpf, rd, rn); break;
+ case NEON_FCMGT_zero_scalar: fcmp_zero(fpf, rd, rn, gt); break;
+ case NEON_FCMGE_zero_scalar: fcmp_zero(fpf, rd, rn, ge); break;
+ case NEON_FCMEQ_zero_scalar: fcmp_zero(fpf, rd, rn, eq); break;
+ case NEON_FCMLE_zero_scalar: fcmp_zero(fpf, rd, rn, le); break;
+ case NEON_FCMLT_zero_scalar: fcmp_zero(fpf, rd, rn, lt); break;
+ case NEON_SCVTF_scalar: scvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_UCVTF_scalar: ucvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_FCVTNS_scalar: fcvts(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTNU_scalar: fcvtu(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTPS_scalar: fcvts(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTPU_scalar: fcvtu(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTMS_scalar: fcvts(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTMU_scalar: fcvtu(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTZS_scalar: fcvts(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTZU_scalar: fcvtu(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTAS_scalar: fcvts(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTAU_scalar: fcvtu(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTXN_scalar:
+ // Unlike all of the other FP instructions above, fcvtxn encodes dest
+ // size S as size<0>=1. There's only one case, so we ignore the form.
+ VIXL_ASSERT(instr->Bit(22) == 1);
+ fcvtxn(kFormatS, rd, rn);
+ break;
+ default:
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: sqxtn(vf, rd, rn); break;
+ case NEON_UQXTN_scalar: uqxtn(vf, rd, rn); break;
+ case NEON_SQXTUN_scalar: sqxtun(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Diff(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar: sqdmlal(vf, rd, rn, rm); break;
+ case NEON_SQDMLSL_scalar: sqdmlsl(vf, rd, rn, rm); break;
+ case NEON_SQDMULL_scalar: sqdmull(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FMULX_scalar: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE_scalar: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT_scalar: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ_scalar: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE_scalar: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT_scalar: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS_scalar: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS_scalar: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD_scalar: fabd(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: add(vf, rd, rn, rm); break;
+ case NEON_SUB_scalar: sub(vf, rd, rn, rm); break;
+ case NEON_CMEQ_scalar: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE_scalar: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT_scalar: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI_scalar: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS_scalar: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST_scalar: cmptst(vf, rd, rn, rm); break;
+ case NEON_USHL_scalar: ushl(vf, rd, rn, rm); break;
+ case NEON_SSHL_scalar: sshl(vf, rd, rn, rm); break;
+ case NEON_SQDMULH_scalar: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH_scalar: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQADD_scalar:
+ add(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQADD_scalar:
+ add(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSUB_scalar:
+ sub(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSUB_scalar:
+ sub(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSHL_scalar:
+ ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSHL_scalar:
+ sshl(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_URSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_SRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_UQRSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar: Op = &Simulator::sqdmull; break;
+ case NEON_SQDMLAL_byelement_scalar: Op = &Simulator::sqdmlal; break;
+ case NEON_SQDMLSL_byelement_scalar: Op = &Simulator::sqdmlsl; break;
+ case NEON_SQDMULH_byelement_scalar:
+ Op = &Simulator::sqdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ Op = &Simulator::sqrdmulh;
+ vf = vf_r;
+ break;
+ default:
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement_scalar: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement_scalar: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement_scalar: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONScalarCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int rn_index = imm5 >> (tz + 1);
+ dup_element(vf, rd, rn, rn_index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarPairwise(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: addp(vf, rd, rn); break;
+ case NEON_FADDP_scalar: faddp(vf, rd, rn); break;
+ case NEON_FMAXP_scalar: fmaxp(vf, rd, rn); break;
+ case NEON_FMAXNMP_scalar: fmaxnmp(vf, rd, rn); break;
+ case NEON_FMINP_scalar: fminp(vf, rd, rn); break;
+ case NEON_FMINNMP_scalar: fminnmp(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_SHL_scalar: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI_scalar: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHL_imm_scalar: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm_scalar: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU_scalar: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI_scalar: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR_scalar: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR_scalar: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR_scalar: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR_scalar: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA_scalar: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA_scalar: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA_scalar: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA_scalar: ursra(vf, rd, rn, right_shift); break;
+ case NEON_UQSHRN_scalar: uqshrn(vf, rd, rn, right_shift); break;
+ case NEON_UQRSHRN_scalar: uqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRN_scalar: sqshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRN_scalar: sqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRUN_scalar: sqshrun(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRUN_scalar: sqrshrun(vf, rd, rn, right_shift); break;
+ case NEON_FCVTZS_imm_scalar: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm_scalar: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SCVTF_imm_scalar:
+ scvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_UCVTF_imm_scalar:
+ ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_l = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+ VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SHL: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA: ursra(vf, rd, rn, right_shift); break;
+ case NEON_SQSHL_imm: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SCVTF_imm: scvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_UCVTF_imm: ucvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_FCVTZS_imm: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SSHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ sshll2(vf, rd, rn, left_shift);
+ } else {
+ sshll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_USHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ ushll2(vf, rd, rn, left_shift);
+ } else {
+ ushll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_SHRN:
+ if (instr->Mask(NEON_Q)) {
+ shrn2(vf, rd, rn, right_shift);
+ } else {
+ shrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_RSHRN:
+ if (instr->Mask(NEON_Q)) {
+ rshrn2(vf, rd, rn, right_shift);
+ } else {
+ rshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONTable(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
+ SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
+ SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: tbl(vf, rd, rn, rm); break;
+ case NEON_TBL_2v: tbl(vf, rd, rn, rn2, rm); break;
+ case NEON_TBL_3v: tbl(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBL_4v: tbl(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ case NEON_TBX_1v: tbx(vf, rd, rn, rm); break;
+ case NEON_TBX_2v: tbx(vf, rd, rn, rn2, rm); break;
+ case NEON_TBX_3v: tbx(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBX_4v: tbx(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONPerm(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: trn1(vf, rd, rn, rm); break;
+ case NEON_TRN2: trn2(vf, rd, rn, rm); break;
+ case NEON_UZP1: uzp1(vf, rd, rn, rm); break;
+ case NEON_UZP2: uzp2(vf, rd, rn, rm); break;
+ case NEON_ZIP1: zip1(vf, rd, rn, rm); break;
+ case NEON_ZIP2: zip2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::DoUnreachable(const Instruction* instr) {
+ VIXL_ASSERT(instr->InstructionBits() == UNDEFINED_INST_PATTERN);
+
+ fprintf(stream_, "Hit UNREACHABLE marker at pc=%p.\n",
+ reinterpret_cast<const void*>(instr));
+ abort();
+}
+
+
+void Simulator::DoTrace(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kTraceOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+ uint32_t command;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+ memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
+
+ switch (command) {
+ case TRACE_ENABLE:
+ set_trace_parameters(trace_parameters() | parameters);
+ break;
+ case TRACE_DISABLE:
+ set_trace_parameters(trace_parameters() & ~parameters);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ set_pc(instr->InstructionAtOffset(kTraceLength));
+}
+
+
+void Simulator::DoLog(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kLogOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+
+ // We don't support a one-shot LOG_DISASM.
+ VIXL_ASSERT((parameters & LOG_DISASM) == 0);
+ // Print the requested information.
+ if (parameters & LOG_SYSREGS) PrintSystemRegisters();
+ if (parameters & LOG_REGS) PrintRegisters();
+ if (parameters & LOG_VREGS) PrintVRegisters();
+
+ set_pc(instr->InstructionAtOffset(kLogLength));
+}
+
+
+void Simulator::DoPrintf(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kPrintfOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t arg_count;
+ uint32_t arg_pattern_list;
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&arg_count,
+ instr + kPrintfArgCountOffset,
+ sizeof(arg_count));
+ memcpy(&arg_pattern_list,
+ instr + kPrintfArgPatternListOffset,
+ sizeof(arg_pattern_list));
+
+ VIXL_ASSERT(arg_count <= kPrintfMaxArgCount);
+ VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+
+ // We need to call the host printf function with a set of arguments defined by
+ // arg_pattern_list. Because we don't know the types and sizes of the
+ // arguments, this is very difficult to do in a robust and portable way. To
+ // work around the problem, we pick apart the format string, and print one
+ // format placeholder at a time.
+
+ // Allocate space for the format string. We take a copy, so we can modify it.
+ // Leave enough space for one extra character per expected argument (plus the
+ // '\0' termination).
+ const char * format_base = reg<const char *>(0);
+ VIXL_ASSERT(format_base != NULL);
+ size_t length = strlen(format_base) + 1;
+ char * const format = (char *)js_calloc(length + arg_count);
+
+ // A list of chunks, each with exactly one format placeholder.
+ const char * chunks[kPrintfMaxArgCount];
+
+ // Copy the format string and search for format placeholders.
+ uint32_t placeholder_count = 0;
+ char * format_scratch = format;
+ for (size_t i = 0; i < length; i++) {
+ if (format_base[i] != '%') {
+ *format_scratch++ = format_base[i];
+ } else {
+ if (format_base[i + 1] == '%') {
+ // Ignore explicit "%%" sequences.
+ *format_scratch++ = format_base[i];
+ i++;
+ // Chunks after the first are passed as format strings to printf, so we
+ // need to escape '%' characters in those chunks.
+ if (placeholder_count > 0) *format_scratch++ = format_base[i];
+ } else {
+ VIXL_CHECK(placeholder_count < arg_count);
+ // Insert '\0' before placeholders, and store their locations.
+ *format_scratch++ = '\0';
+ chunks[placeholder_count++] = format_scratch;
+ *format_scratch++ = format_base[i];
+ }
+ }
+ }
+ VIXL_CHECK(placeholder_count == arg_count);
+
+ // Finally, call printf with each chunk, passing the appropriate register
+ // argument. Normally, printf returns the number of bytes transmitted, so we
+ // can emulate a single printf call by adding the result from each chunk. If
+ // any call returns a negative (error) value, though, just return that value.
+
+ printf("%s", clr_printf);
+
+ // Because '\0' is inserted before each placeholder, the first string in
+ // 'format' contains no format placeholders and should be printed literally.
+ int result = printf("%s", format);
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
+ if (result >= 0) {
+ for (uint32_t i = 0; i < placeholder_count; i++) {
+ int part_result = -1;
+
+ uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
+ arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
+ switch (arg_pattern) {
+ case kPrintfArgW: part_result = printf(chunks[i], wreg(pcs_r++)); break;
+ case kPrintfArgX: part_result = printf(chunks[i], xreg(pcs_r++)); break;
+ case kPrintfArgD: part_result = printf(chunks[i], dreg(pcs_f++)); break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ if (part_result < 0) {
+ // Handle error values.
+ result = part_result;
+ break;
+ }
+
+ result += part_result;
+ }
+ }
+
+ printf("%s", clr_normal);
+
+ // Printf returns its result in x0 (just like the C library's printf).
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(instr->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ js_free(format);
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.h b/js/src/jit/arm64/vixl/Simulator-vixl.h
new file mode 100644
index 0000000000..af78f5bad0
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -0,0 +1,2592 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_A64_H_
+#define VIXL_A64_SIMULATOR_A64_H_
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Disasm-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/MozCachingDecoder.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "jit/IonTypes.h"
+#include "js/AllocPolicy.h"
+#include "vm/MutexIDs.h"
+#include "wasm/WasmSignalHandlers.h"
+
+namespace vixl {
+
+// Representation of memory, with typed getters and setters for access.
+class Memory {
+ public:
+ template <typename T>
+ static T AddressUntag(T address) {
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ uint64_t bits = (uint64_t)address;
+ return (T)(bits & ~kAddressTagMask);
+ }
+
+ template <typename T, typename A>
+ static T Read(A address) {
+ T value;
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(&value, reinterpret_cast<const char *>(address), sizeof(value));
+ return value;
+ }
+
+ template <typename T, typename A>
+ static void Write(A address, T value) {
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(reinterpret_cast<char *>(address), &value, sizeof(value));
+ }
+};
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ SimRegisterBase() : written_since_last_log_(false) {}
+
+ // Write the specified value. The value is zero-extended if necessary.
+ template<typename T>
+ void Set(T new_value) {
+ VIXL_STATIC_ASSERT(sizeof(new_value) <= kSizeInBytes);
+ if (sizeof(new_value) < kSizeInBytes) {
+ // All AArch64 registers are zero-extending.
+ memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value));
+ }
+ memcpy(value_, &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Insert a typed value into a register, leaving the rest of the register
+ // unchanged. The lane parameter indicates where in the register the value
+ // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where
+ // 0 represents the least significant bits.
+ template<typename T>
+ void Insert(int lane, T new_value) {
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(new_value) +
+ (lane * sizeof(new_value))) <= kSizeInBytes);
+ memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Read the value as the specified type. The value is truncated if necessary.
+ template<typename T>
+ T Get(int lane = 0) const {
+ T result;
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(result) + (lane * sizeof(result))) <= kSizeInBytes);
+ memcpy(&result, &value_[lane * sizeof(result)], sizeof(result));
+ return result;
+ }
+
+ // TODO: Make this return a map of updated bytes, so that we can highlight
+ // updated lanes for load-and-insert. (That never happens for scalar code, but
+ // NEON has some instructions that can update individual lanes.)
+ bool WrittenSinceLastLog() const {
+ return written_since_last_log_;
+ }
+
+ void NotifyRegisterLogged() {
+ written_since_last_log_ = false;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+
+ // Helpers to aid with register tracing.
+ bool written_since_last_log_;
+
+ void NotifyRegisterWrite() {
+ written_since_last_log_ = true;
+ }
+};
+typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
+typedef SimRegisterBase<kQRegSizeInBytes> SimVRegister; // v0-v31
+
+// Representation of a vector register, with typed getters and setters for lanes
+// and additional information to represent lane state.
+class LogicVRegister {
+ public:
+ inline LogicVRegister(SimVRegister& other) // NOLINT
+ : register_(other) {
+ for (unsigned i = 0; i < sizeof(saturated_) / sizeof(saturated_[0]); i++) {
+ saturated_[i] = kNotSaturated;
+ }
+ for (unsigned i = 0; i < sizeof(round_) / sizeof(round_[0]); i++) {
+ round_[i] = 0;
+ }
+ }
+
+ int64_t Int(VectorFormat vform, int index) const {
+ int64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<int8_t>(index); break;
+ case 16: element = register_.Get<int16_t>(index); break;
+ case 32: element = register_.Get<int32_t>(index); break;
+ case 64: element = register_.Get<int64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ uint64_t Uint(VectorFormat vform, int index) const {
+ uint64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<uint8_t>(index); break;
+ case 16: element = register_.Get<uint16_t>(index); break;
+ case 32: element = register_.Get<uint32_t>(index); break;
+ case 64: element = register_.Get<uint64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ int64_t IntLeftJustified(VectorFormat vform, int index) const {
+ return Int(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ uint64_t UintLeftJustified(VectorFormat vform, int index) const {
+ return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ void SetInt(VectorFormat vform, int index, int64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<int8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<int16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<int32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<int64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void SetUint(VectorFormat vform, int index, uint64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<uint8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<uint16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<uint32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<uint64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, Memory::Read<uint8_t>(addr)); break;
+ case 16: register_.Insert(index, Memory::Read<uint16_t>(addr)); break;
+ case 32: register_.Insert(index, Memory::Read<uint32_t>(addr)); break;
+ case 64: register_.Insert(index, Memory::Read<uint64_t>(addr)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const {
+ uint64_t value = Uint(vform, index);
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: Memory::Write(addr, static_cast<uint8_t>(value)); break;
+ case 16: Memory::Write(addr, static_cast<uint16_t>(value)); break;
+ case 32: Memory::Write(addr, static_cast<uint32_t>(value)); break;
+ case 64: Memory::Write(addr, value); break;
+ }
+ }
+
+ template <typename T>
+ T Float(int index) const {
+ return register_.Get<T>(index);
+ }
+
+ template <typename T>
+ void SetFloat(int index, T value) const {
+ register_.Insert(index, value);
+ }
+
+ // When setting a result in a register of size less than Q, the top bits of
+ // the Q register must be cleared.
+ void ClearForWrite(VectorFormat vform) const {
+ unsigned size = RegisterSizeInBytesFromFormat(vform);
+ for (unsigned i = size; i < kQRegSizeInBytes; i++) {
+ SetUint(kFormat16B, i, 0);
+ }
+ }
+
+ // Saturation state for each lane of a vector.
+ enum Saturation {
+ kNotSaturated = 0,
+ kSignedSatPositive = 1 << 0,
+ kSignedSatNegative = 1 << 1,
+ kSignedSatMask = kSignedSatPositive | kSignedSatNegative,
+ kSignedSatUndefined = kSignedSatMask,
+ kUnsignedSatPositive = 1 << 2,
+ kUnsignedSatNegative = 1 << 3,
+ kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative,
+ kUnsignedSatUndefined = kUnsignedSatMask
+ };
+
+ // Getters for saturation state.
+ Saturation GetSignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kSignedSatMask);
+ }
+
+ Saturation GetUnsignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kUnsignedSatMask);
+ }
+
+ // Setters for saturation state.
+ void ClearSat(int index) {
+ saturated_[index] = kNotSaturated;
+ }
+
+ void SetSignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative);
+ }
+
+ void SetUnsignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative);
+ }
+
+ void SetSatFlag(int index, Saturation sat) {
+ saturated_[index] = static_cast<Saturation>(saturated_[index] | sat);
+ VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined);
+ VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined);
+ }
+
+ // Saturate lanes of a vector based on saturation state.
+ LogicVRegister& SignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetSignedSaturation(i);
+ if (sat == kSignedSatPositive) {
+ SetInt(vform, i, MaxIntFromFormat(vform));
+ } else if (sat == kSignedSatNegative) {
+ SetInt(vform, i, MinIntFromFormat(vform));
+ }
+ }
+ return *this;
+ }
+
+ LogicVRegister& UnsignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetUnsignedSaturation(i);
+ if (sat == kUnsignedSatPositive) {
+ SetUint(vform, i, MaxUintFromFormat(vform));
+ } else if (sat == kUnsignedSatNegative) {
+ SetUint(vform, i, 0);
+ }
+ }
+ return *this;
+ }
+
+ // Getter for rounding state.
+ bool GetRounding(int index) {
+ return round_[index];
+ }
+
+ // Setter for rounding state.
+ void SetRounding(int index, bool round) {
+ round_[index] = round;
+ }
+
+ // Round lanes of a vector based on rounding state.
+ LogicVRegister& Round(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetInt(vform, i, Int(vform, i) + (GetRounding(i) ? 1 : 0));
+ }
+ return *this;
+ }
+
+ // Unsigned halve lanes of a vector, and use the saturation state to set the
+ // top bit.
+ LogicVRegister& Uhalve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t val = Uint(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetUnsignedSaturation(i) != kNotSaturated) {
+ // If the operation causes unsigned saturation, the bit shifted into the
+ // most significant bit must be set.
+ val |= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ // Signed halve lanes of a vector, and use the carry state to set the top bit.
+ LogicVRegister& Halve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t val = Int(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetSignedSaturation(i) != kNotSaturated) {
+ // If the operation causes signed saturation, the sign bit must be
+ // inverted.
+ val ^= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ private:
+ SimVRegister& register_;
+
+ // Allocate one saturation state entry per lane; largest register is type Q,
+ // and lanes can be a minimum of one byte wide.
+ Saturation saturated_[kQRegSizeInBytes];
+
+ // Allocate one rounding state entry per lane.
+ bool round_[kQRegSizeInBytes];
+};
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return ExtractUnsignedBitfield32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return ExtractSignedBitfield32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ uint32_t Name() const { return Func(HighBit, LowBit); } \
+ void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+class SimExclusiveLocalMonitor {
+ public:
+ SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) {
+ Clear();
+ }
+
+ // Clear the exclusive monitor (like clrex).
+ void Clear() {
+ address_ = 0;
+ size_ = 0;
+ }
+
+ // Clear the exclusive monitor most of the time.
+ void MaybeClear() {
+ if ((seed_ % kSkipClearProbability) != 0) {
+ Clear();
+ }
+
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ }
+
+ // Mark the address range for exclusive access (like load-exclusive).
+ void MarkExclusive(uint64_t address, size_t size) {
+ address_ = address;
+ size_ = size;
+ }
+
+ // Return true if the address range is marked (like store-exclusive).
+ // This helper doesn't implicitly clear the monitor.
+ bool IsExclusive(uint64_t address, size_t size) {
+ VIXL_ASSERT(size > 0);
+ // Be pedantic: Require both the address and the size to match.
+ return (size == size_) && (address == address_);
+ }
+
+ private:
+ uint64_t address_;
+ size_t size_;
+
+ const int kSkipClearProbability;
+ uint32_t seed_;
+};
+
+
+// We can't accurate simulate the global monitor since it depends on external
+// influences. Instead, this implementation occasionally causes accesses to
+// fail, according to kPassProbability.
+class SimExclusiveGlobalMonitor {
+ public:
+ SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {}
+
+ bool IsExclusive(uint64_t address, size_t size) {
+ USE(address, size);
+
+ bool pass = (seed_ % kPassProbability) != 0;
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ return pass;
+ }
+
+ private:
+ const int kPassProbability;
+ uint32_t seed_;
+};
+
+class Redirection;
+
+class Simulator : public DecoderVisitor {
+ public:
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ using Decoder = CachingDecoder;
+ mozilla::Atomic<bool> pendingCacheRequests = mozilla::Atomic<bool>{ false };
+#endif
+ explicit Simulator(Decoder* decoder, FILE* stream = stdout);
+ ~Simulator();
+
+ // Moz changes.
+ void init(Decoder* decoder, FILE* stream);
+ static Simulator* Current();
+ static Simulator* Create();
+ static void Destroy(Simulator* sim);
+ uintptr_t stackLimit() const;
+ uintptr_t* addressOfStackLimit();
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+ int64_t call(uint8_t* entry, int argument_count, ...);
+ static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type);
+ void setGPR32Result(int32_t result);
+ void setGPR64Result(int64_t result);
+ void setFP32Result(float result);
+ void setFP64Result(double result);
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ void FlushICache();
+#endif
+ void VisitCallRedirection(const Instruction* instr);
+ static uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+ template<typename T> T Read(uintptr_t address);
+ template <typename T> void Write(uintptr_t address_, T value);
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ void ResetState();
+
+ // Run the simulator.
+ virtual void Run();
+ void RunFrom(const Instruction* first);
+
+ // Simulation helpers.
+ const Instruction* pc() const { return pc_; }
+ const Instruction* get_pc() const { return pc_; }
+ int64_t get_sp() const { return xreg(31, Reg31IsStackPointer); }
+ int64_t get_lr() const { return xreg(30); }
+ int64_t get_fp() const { return xreg(29); }
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
+
+ void set_pc(const Instruction* new_pc) {
+ pc_ = Memory::AddressUntag(new_pc);
+ pc_modified_ = true;
+ }
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes, &newPC)) {
+ return false;
+ }
+
+ set_pc((Instruction*)newPC);
+ return true;
+ }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->NextInstruction();
+ }
+
+ pc_modified_ = false;
+ }
+
+ void ExecuteInstruction();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST_THAT_RETURN(DECLARE)
+ VISITOR_LIST_THAT_DONT_RETURN(DECLARE)
+ #undef DECLARE
+
+
+ // Integer register accessors.
+
+ // Basic accessor: Read the register as the specified type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ uint64_t raw;
+ switch (size) {
+ case kWRegSize: raw = reg<uint32_t>(code, r31mode); break;
+ case kXRegSize: raw = reg<uint64_t>(code, r31mode); break;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ T result;
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ // Use int64_t by default if T is not specified.
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ enum RegLogMode {
+ LogRegWrites,
+ NoRegLog
+ };
+
+ // Write 'value' into an integer register. The value is zero-extended. This
+ // behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ if (sizeof(T) < kWRegSizeInBytes) {
+ // We use a C-style cast on purpose here.
+ // Since we do not have access to 'constepxr if', the casts in this `if`
+ // must be valid even if we know the code will never be executed, in
+ // particular when `T` is a pointer type.
+ int64_t tmp_64bit = (int64_t)value;
+ int32_t tmp_32bit = static_cast<int32_t>(tmp_64bit);
+ set_reg<int32_t>(code, tmp_32bit, log_mode, r31mode);
+ return;
+ }
+
+ VIXL_ASSERT((sizeof(T) == kWRegSizeInBytes) ||
+ (sizeof(T) == kXRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ registers_[code].Set(value);
+
+ if (log_mode == LogRegWrites) LogRegister(code, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ // As above, with parameterized size and type. The value is either
+ // zero-extended or truncated to fit, as required.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ // Zero-extend the input.
+ uint64_t raw = 0;
+ VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw));
+ memcpy(&raw, &value, sizeof(value));
+
+ // Write (and possibly truncate) the value.
+ switch (size) {
+ case kWRegSize:
+ set_reg(code, static_cast<uint32_t>(raw), log_mode, r31mode);
+ break;
+ case kXRegSize:
+ set_reg(code, raw, log_mode, r31mode);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ return;
+ }
+ }
+
+ // Common specialized accessors for the set_reg() template.
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ set_reg(31, value, LogRegWrites, Reg31IsStackPointer);
+ }
+
+ // Vector register accessors.
+ // These are equivalent to the integer register accessors, but for vector
+ // registers.
+
+ // A structure for representing a 128-bit Q register.
+ struct qreg_t { uint8_t val[kQRegSizeInBytes]; };
+
+ // Basic accessor: read the register as the specified type.
+ template<typename T>
+ T vreg(unsigned code) const {
+ VIXL_STATIC_ASSERT((sizeof(T) == kBRegSizeInBytes) ||
+ (sizeof(T) == kHRegSizeInBytes) ||
+ (sizeof(T) == kSRegSizeInBytes) ||
+ (sizeof(T) == kDRegSizeInBytes) ||
+ (sizeof(T) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+
+ return vregisters_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the vreg() template.
+ int8_t breg(unsigned code) const {
+ return vreg<int8_t>(code);
+ }
+
+ int16_t hreg(unsigned code) const {
+ return vreg<int16_t>(code);
+ }
+
+ float sreg(unsigned code) const {
+ return vreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return vreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return vreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return vreg<uint64_t>(code);
+ }
+
+ qreg_t qreg(unsigned code) const {
+ return vreg<qreg_t>(code);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T vreg(unsigned size, unsigned code) const {
+ uint64_t raw = 0;
+ T result;
+
+ switch (size) {
+ case kSRegSize: raw = vreg<uint32_t>(code); break;
+ case kDRegSize: raw = vreg<uint64_t>(code); break;
+ default:
+ VIXL_UNREACHABLE();
+ break;
+ }
+
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ inline SimVRegister& vreg(unsigned code) {
+ return vregisters_[code];
+ }
+
+ // Basic accessor: Write the specified value.
+ template<typename T>
+ void set_vreg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites) {
+ VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) ||
+ (sizeof(value) == kHRegSizeInBytes) ||
+ (sizeof(value) == kSRegSizeInBytes) ||
+ (sizeof(value) == kDRegSizeInBytes) ||
+ (sizeof(value) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ vregisters_[code].Set(value);
+
+ if (log_mode == LogRegWrites) {
+ LogVRegister(code, GetPrintRegisterFormat(value));
+ }
+ }
+
+ // Common specialized accessors for the set_vreg() template.
+ void set_breg(unsigned code, int8_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_hreg(unsigned code, int16_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg(unsigned code, float value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg(unsigned code, double value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_qreg(unsigned code, qreg_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ bool N() const { return nzcv_.N() != 0; }
+ bool Z() const { return nzcv_.Z() != 0; }
+ bool C() const { return nzcv_.C() != 0; }
+ bool V() const { return nzcv_.V() != 0; }
+
+ SimSystemRegister& ReadNzcv() { return nzcv_; }
+ SimSystemRegister& nzcv() { return nzcv_; }
+
+ // TODO: Find a way to make the fpcr_ members return the proper types, so
+ // these accessors are not necessary.
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ bool DN() { return fpcr_.DN() != 0; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ UseDefaultNaN ReadDN() const {
+ return fpcr_.DN() != 0 ? kUseDefaultNaN : kIgnoreDefaultNaN;
+ }
+
+ // Specify relevant register formats for Print(V)Register and related helpers.
+ enum PrintRegisterFormat {
+ // The lane size.
+ kPrintRegLaneSizeB = 0 << 0,
+ kPrintRegLaneSizeH = 1 << 0,
+ kPrintRegLaneSizeS = 2 << 0,
+ kPrintRegLaneSizeW = kPrintRegLaneSizeS,
+ kPrintRegLaneSizeD = 3 << 0,
+ kPrintRegLaneSizeX = kPrintRegLaneSizeD,
+ kPrintRegLaneSizeQ = 4 << 0,
+
+ kPrintRegLaneSizeOffset = 0,
+ kPrintRegLaneSizeMask = 7 << 0,
+
+ // The lane count.
+ kPrintRegAsScalar = 0,
+ kPrintRegAsDVector = 1 << 3,
+ kPrintRegAsQVector = 2 << 3,
+
+ kPrintRegAsVectorMask = 3 << 3,
+
+ // Indicate floating-point format lanes. (This flag is only supported for S-
+ // and D-sized lanes.)
+ kPrintRegAsFP = 1 << 5,
+
+ // Supported combinations.
+
+ kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar,
+ kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar,
+ kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+
+ kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar,
+ kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector,
+ kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector,
+ kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar,
+ kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector,
+ kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector,
+ kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar,
+ kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector,
+ kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector,
+ kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP,
+ kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar,
+ kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector,
+ kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar
+ };
+
+ unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) {
+ return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset;
+ }
+
+ unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) {
+ if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2;
+ if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2;
+
+ // Scalar types.
+ return GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegLaneCount(PrintRegisterFormat format) {
+ unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format);
+ unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format);
+ VIXL_ASSERT(reg_size_log2 >= lane_size_log2);
+ return 1 << (reg_size_log2 - lane_size_log2);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size,
+ unsigned lane_size);
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) {
+ return GetPrintRegisterFormatForSize(size, size);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) {
+ switch (size) {
+ default: VIXL_UNREACHABLE(); return kPrintDReg;
+ case kDRegSizeInBytes: return kPrintDReg;
+ case kSRegSizeInBytes: return kPrintSReg;
+ }
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) {
+ if ((GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) ||
+ (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) {
+ return static_cast<PrintRegisterFormat>(format | kPrintRegAsFP);
+ }
+ return format;
+ }
+
+ template<typename T>
+ PrintRegisterFormat GetPrintRegisterFormat(T value) {
+ return GetPrintRegisterFormatForSize(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(double value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(float value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform);
+
+ // Print all registers of the specified types.
+ void PrintRegisters();
+ void PrintVRegisters();
+ void PrintSystemRegisters();
+
+ // As above, but only print the registers that have been updated.
+ void PrintWrittenRegisters();
+ void PrintWrittenVRegisters();
+
+ // As above, but respect LOG_REG and LOG_VREG.
+ inline void LogWrittenRegisters() {
+ if (trace_parameters() & LOG_REGS) PrintWrittenRegisters();
+ }
+ inline void LogWrittenVRegisters() {
+ if (trace_parameters() & LOG_VREGS) PrintWrittenVRegisters();
+ }
+ inline void LogAllWrittenRegisters() {
+ LogWrittenRegisters();
+ LogWrittenVRegisters();
+ }
+
+ // Print individual register values (after update).
+ void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+ void PrintVRegister(unsigned code, PrintRegisterFormat format);
+ void PrintSystemRegister(SystemRegister id);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+ if (trace_parameters() & LOG_REGS) PrintRegister(code, r31mode);
+ }
+ void LogVRegister(unsigned code, PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_VREGS) PrintVRegister(code, format);
+ }
+ void LogSystemRegister(SystemRegister id) {
+ if (trace_parameters() & LOG_SYSREGS) PrintSystemRegister(id);
+ }
+
+ // Print memory accesses.
+ void PrintRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+ void PrintVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_REGS) PrintRead(address, reg_code, format);
+ }
+ void LogWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format);
+ }
+ void LogVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_VREGS) {
+ PrintVRead(address, reg_code, format, lane);
+ }
+ }
+ void LogVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_WRITE) {
+ PrintVWrite(address, reg_code, format, lane);
+ }
+ }
+
+ // Helper functions for register tracing.
+ void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes = kXRegSizeInBytes);
+ void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSizeInBytes,
+ int lsb = 0);
+ void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes,
+ int lane_count = 1, int rightmost_lane = 0);
+
+ void DoUnreachable(const Instruction* instr);
+ void DoTrace(const Instruction* instr);
+ void DoLog(const Instruction* instr);
+
+ static const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* SRegNameForCode(unsigned code);
+ static const char* DRegNameForCode(unsigned code);
+ static const char* VRegNameForCode(unsigned code);
+
+ bool coloured_trace() const { return coloured_trace_; }
+ void set_coloured_trace(bool value);
+
+ int trace_parameters() const { return trace_parameters_; }
+ void set_trace_parameters(int parameters);
+
+ void set_instruction_stats(bool value);
+
+ // Clear the simulated local monitor to force the next store-exclusive
+ // instruction to fail.
+ void ClearLocalMonitor() {
+ local_monitor_.Clear();
+ }
+
+ void SilenceExclusiveAccessWarning() {
+ print_exclusive_access_warning_ = false;
+ }
+
+ protected:
+ const char* clr_normal;
+ const char* clr_flag_name;
+ const char* clr_flag_value;
+ const char* clr_reg_name;
+ const char* clr_reg_value;
+ const char* clr_vreg_name;
+ const char* clr_vreg_value;
+ const char* clr_memory_address;
+ const char* clr_warning;
+ const char* clr_warning_message;
+ const char* clr_printf;
+
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ switch (cond) {
+ case eq:
+ return Z();
+ case ne:
+ return !Z();
+ case hs:
+ return C();
+ case lo:
+ return !C();
+ case mi:
+ return N();
+ case pl:
+ return !N();
+ case vs:
+ return V();
+ case vc:
+ return !V();
+ case hi:
+ return C() && !Z();
+ case ls:
+ return !(C() && !Z());
+ case ge:
+ return N() == V();
+ case lt:
+ return N() != V();
+ case gt:
+ return !Z() && (N() == V());
+ case le:
+ return !(!Z() && (N() == V()));
+ case nv:
+ VIXL_FALLTHROUGH();
+ case al:
+ return true;
+ default:
+ VIXL_UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionPassed(Instr cond) {
+ return ConditionPassed(static_cast<Condition>(cond));
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(const Instruction* instr, int64_t op2);
+ uint64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in = 0);
+ void LogicalHelper(const Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(const Instruction* instr, int64_t op2);
+ void LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode);
+ template <typename T>
+ void CompareAndSwapHelper(const Instruction* instr);
+ template <typename T>
+ void CompareAndSwapPairHelper(const Instruction* instr);
+ template <typename T>
+ void AtomicMemorySimpleHelper(const Instruction* instr);
+ template <typename T>
+ void AtomicMemorySwapHelper(const Instruction* instr);
+ template <typename T>
+ void LoadAcquireRCpcHelper(const Instruction* instr);
+ uintptr_t AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+ void NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+
+ uint64_t AddressUntag(uint64_t address) {
+ return address & ~kAddressTagMask;
+ }
+
+ template <typename T>
+ T* AddressUntag(T* address) {
+ uintptr_t address_raw = reinterpret_cast<uintptr_t>(address);
+ return reinterpret_cast<T*>(AddressUntag(address_raw));
+ }
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+ uint16_t PolynomialMult(uint8_t op1, uint8_t op2);
+
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr);
+ void ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr);
+ void ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr);
+ void ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr);
+ void ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ int index,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ int index,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ int index,
+ uint64_t addr);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond);
+ LogicVRegister cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize);
+ LogicVRegister rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate);
+ LogicVRegister saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm);
+ LogicVRegister dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister mov(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister extractnarrow(VectorFormat vform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned);
+ LogicVRegister xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned);
+ LogicVRegister saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round = true);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ #define NEON_3VREG_LOGIC_LIST(V) \
+ V(addhn) \
+ V(addhn2) \
+ V(raddhn) \
+ V(raddhn2) \
+ V(subhn) \
+ V(subhn2) \
+ V(rsubhn) \
+ V(rsubhn2) \
+ V(pmull) \
+ V(pmull2) \
+ V(sabal) \
+ V(sabal2) \
+ V(uabal) \
+ V(uabal2) \
+ V(sabdl) \
+ V(sabdl2) \
+ V(uabdl) \
+ V(uabdl2) \
+ V(smull) \
+ V(smull2) \
+ V(umull) \
+ V(umull2) \
+ V(smlal) \
+ V(smlal2) \
+ V(umlal) \
+ V(umlal2) \
+ V(smlsl) \
+ V(smlsl2) \
+ V(umlsl) \
+ V(umlsl2) \
+ V(sqdmlal) \
+ V(sqdmlal2) \
+ V(sqdmlsl) \
+ V(sqdmlsl2) \
+ V(sqdmull) \
+ V(sqdmull2)
+
+ #define DEFINE_LOGIC_FUNC(FXN) \
+ LogicVRegister FXN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC)
+ #undef DEFINE_LOGIC_FUNC
+
+ #define NEON_FP3SAME_LIST(V) \
+ V(fadd, FPAdd, false) \
+ V(fsub, FPSub, true) \
+ V(fmul, FPMul, true) \
+ V(fmulx, FPMulx, true) \
+ V(fdiv, FPDiv, true) \
+ V(fmax, FPMax, false) \
+ V(fmin, FPMin, false) \
+ V(fmaxnm, FPMaxNM, false) \
+ V(fminnm, FPMinNM, false)
+
+ #define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+ template <typename T> \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP)
+ #undef DECLARE_NEON_FP_VECTOR_OP
+
+ #define NEON_FPPAIRWISE_LIST(V) \
+ V(faddp, fadd, FPAdd) \
+ V(fmaxp, fmax, FPMax) \
+ V(fmaxnmp, fmaxnm, FPMaxNM) \
+ V(fminp, fmin, FPMin) \
+ V(fminnmp, fminnm, FPMinNM)
+
+ #define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src);
+ NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP)
+ #undef DECLARE_NEON_FP_PAIR_OP
+
+ template <typename T>
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ template <typename T>
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond);
+
+ template <typename T>
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception = false);
+ LogicVRegister fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding);
+ LogicVRegister ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ typedef float (Simulator::*FPMinMaxOp)(float a, float b);
+
+ LogicVRegister fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op);
+
+ LogicVRegister fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ static const uint32_t CRC32_POLY = 0x04C11DB7;
+ static const uint32_t CRC32C_POLY = 0x1EDC6F41;
+ uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly);
+ template <typename T>
+ uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly);
+ uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly);
+
+ void SysOp_W(int op, int64_t val);
+
+ template <typename T>
+ T FPRecipSqrtEstimate(T op);
+ template <typename T>
+ T FPRecipEstimate(T op, FPRounding rounding);
+ template <typename T, typename R>
+ R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding);
+
+ void FPCompare(double val0, double val1, FPTrapFlags trap);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double recip_sqrt_estimate(double a);
+ double recip_estimate(double a);
+ double FPRecipSqrtEstimate(double a);
+ double FPRecipEstimate(double a);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+ int32_t FPToFixedJS(double value);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulx(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ template <typename T>
+ T FPRecipStepFused(T op1, T op2);
+
+ template <typename T>
+ T FPRSqrtStepFused(T op1, T op2);
+
+ // This doesn't do anything at the moment. We'll need it if we want support
+ // for cumulative exception bits or floating-point exceptions.
+ void FPProcessException() { }
+
+ bool FPProcessNaNs(const Instruction* instr);
+
+ // Pseudo Printf instruction
+ void DoPrintf(const Instruction* instr);
+
+ // Processor state ---------------------------------------
+
+ // Simulated monitors for exclusive access instructions.
+ SimExclusiveLocalMonitor local_monitor_;
+ SimExclusiveGlobalMonitor global_monitor_;
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instruction statistics instrumentation.
+ Instrument* instrumentation_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Vector registers
+ SimVRegister vregisters_[kNumberOfVRegisters];
+
+ // Program Status Register.
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ VIXL_ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ VIXL_ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return (result == 0) ? 1 : 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const int stack_protection_size_ = 512 * KBytes;
+ static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
+ byte* stack_limit_;
+
+ Decoder* decoder_;
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ const Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ static const Instruction* kEndOfSimAddress;
+
+ private:
+ template <typename T>
+ static T FPDefaultNaN();
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op) {
+ VIXL_ASSERT(std::isnan(op));
+ if (IsSignallingNaN(op)) {
+ FPProcessException();
+ }
+ return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+ }
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+ }
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ VIXL_ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+ }
+
+ bool coloured_trace_;
+
+ // A set of TraceParameters flags.
+ int trace_parameters_;
+
+ // Indicates whether the instruction instrumentation is active.
+ bool instruction_stats_;
+
+ // Indicates whether the exclusive-access warning has been printed.
+ bool print_exclusive_access_warning_;
+ void PrintExclusiveAccessWarning();
+
+ // Indicates that the simulator ran out of memory at some point.
+ // Data structures may not be fully allocated.
+ bool oom_;
+
+ public:
+ // True if the simulator ran out of memory during or after construction.
+ bool oom() const { return oom_; }
+
+ protected:
+ mozilla::Vector<int64_t, 0, js::SystemAllocPolicy> spStack_;
+};
+
+} // namespace vixl
+
+namespace js {
+namespace jit {
+
+class SimulatorProcess
+{
+ public:
+ static SimulatorProcess* singleton_;
+
+ SimulatorProcess()
+ : lock_(mutexid::Arm64SimulatorLock)
+ , redirection_(nullptr)
+ {}
+
+ // Synchronizes access between main thread and compilation threads.
+ js::Mutex lock_ MOZ_UNANNOTATED;
+ vixl::Redirection* redirection_;
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ // For each simulator, record what other thread registered as instruction
+ // being invalidated.
+ struct ICacheFlush {
+ void* start;
+ size_t length;
+ };
+ using ICacheFlushes = mozilla::Vector<ICacheFlush, 2>;
+ struct SimFlushes {
+ vixl::Simulator* thread;
+ ICacheFlushes records;
+ };
+ mozilla::Vector<SimFlushes, 1> pendingFlushes_;
+
+ static void recordICacheFlush(void* start, size_t length);
+ static void membarrier();
+ static ICacheFlushes& getICacheFlushes(vixl::Simulator* sim);
+ [[nodiscard]] static bool registerSimulator(vixl::Simulator* sim);
+ static void unregisterSimulator(vixl::Simulator* sim);
+#endif
+
+ static void setRedirection(vixl::Redirection* redirection) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+
+ static vixl::Redirection* redirection() {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return !!singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+};
+
+// Protects the icache and redirection properties of the simulator.
+class AutoLockSimulatorCache : public js::LockGuard<js::Mutex>
+{
+ using Base = js::LockGuard<js::Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->lock_)
+ {
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_SIMULATOR_ARM64
+#endif // VIXL_A64_SIMULATOR_A64_H_
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.cpp b/js/src/jit/arm64/vixl/Utils-vixl.cpp
new file mode 100644
index 0000000000..381c3501d1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.cpp
@@ -0,0 +1,555 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include <cstdio>
+
+namespace vixl {
+
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
+const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00);
+
+// Floating-point zero values.
+const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0);
+const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000);
+
+// Floating-point infinity values.
+const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00);
+const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00);
+const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
+const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
+const double kFP64PositiveInfinity =
+ RawbitsToDouble(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+ RawbitsToDouble(UINT64_C(0xfff0000000000000));
+
+bool IsZero(Float16 value) {
+ uint16_t bits = Float16ToRawbits(value);
+ return (bits == Float16ToRawbits(kFP16PositiveZero) ||
+ bits == Float16ToRawbits(kFP16NegativeZero));
+}
+
+uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; }
+
+uint32_t FloatToRawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+uint64_t DoubleToRawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+Float16 RawbitsToFloat16(uint16_t bits) {
+ Float16 f;
+ f.rawbits_ = bits;
+ return f;
+}
+
+
+float RawbitsToFloat(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+double RawbitsToDouble(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+uint32_t Float16Sign(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(15, 15, rawbits);
+}
+
+
+uint32_t Float16Exp(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(14, 10, rawbits);
+}
+
+uint32_t Float16Mantissa(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(9, 0, rawbits);
+}
+
+
+uint32_t FloatSign(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(31, 31, rawbits);
+}
+
+
+uint32_t FloatExp(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(30, 23, rawbits);
+}
+
+
+uint32_t FloatMantissa(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(22, 0, rawbits);
+}
+
+
+uint32_t DoubleSign(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return static_cast<uint32_t>(ExtractUnsignedBitfield64(63, 63, rawbits));
+}
+
+
+uint32_t DoubleExp(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return static_cast<uint32_t>(ExtractUnsignedBitfield64(62, 52, rawbits));
+}
+
+
+uint64_t DoubleMantissa(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return ExtractUnsignedBitfield64(51, 0, rawbits);
+}
+
+
+internal::SimFloat16 Float16Pack(uint16_t sign,
+ uint16_t exp,
+ uint16_t mantissa) {
+ uint16_t bits = (sign << 15) | (exp << 10) | mantissa;
+ return RawbitsToFloat16(bits);
+}
+
+
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+ uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
+ return RawbitsToFloat(bits);
+}
+
+
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+ uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
+ return RawbitsToDouble(bits);
+}
+
+
+int Float16Classify(Float16 value) {
+ uint16_t bits = Float16ToRawbits(value);
+ uint16_t exponent_max = (1 << 5) - 1;
+ uint16_t exponent_mask = exponent_max << 10;
+ uint16_t mantissa_mask = (1 << 10) - 1;
+
+ uint16_t exponent = (bits & exponent_mask) >> 10;
+ uint16_t mantissa = bits & mantissa_mask;
+ if (exponent == 0) {
+ if (mantissa == 0) {
+ return FP_ZERO;
+ }
+ return FP_SUBNORMAL;
+ } else if (exponent == exponent_max) {
+ if (mantissa == 0) {
+ return FP_INFINITE;
+ }
+ return FP_NAN;
+ }
+ return FP_NORMAL;
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+int BitCount(uint64_t value) { return CountSetBits(value); }
+
+// Float16 definitions.
+
+Float16::Float16(double dvalue) {
+ rawbits_ =
+ Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN));
+}
+
+namespace internal {
+
+SimFloat16 SimFloat16::operator-() const {
+ return RawbitsToFloat16(rawbits_ ^ 0x8000);
+}
+
+// SimFloat16 definitions.
+SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const {
+ return static_cast<double>(*this) + static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const {
+ return static_cast<double>(*this) - static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const {
+ return static_cast<double>(*this) * static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const {
+ return static_cast<double>(*this) / static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator<(SimFloat16 rhs) const {
+ return static_cast<double>(*this) < static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator>(SimFloat16 rhs) const {
+ return static_cast<double>(*this) > static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator==(SimFloat16 rhs) const {
+ if (IsNaN(*this) || IsNaN(rhs)) {
+ return false;
+ } else if (IsZero(rhs) && IsZero(*this)) {
+ // +0 and -0 should be treated as equal.
+ return true;
+ }
+ return this->rawbits_ == rhs.rawbits_;
+}
+
+bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); }
+
+bool SimFloat16::operator==(double rhs) const {
+ return static_cast<double>(*this) == static_cast<double>(rhs);
+}
+
+SimFloat16::operator double() const {
+ return FPToDouble(*this, kIgnoreDefaultNaN);
+}
+
+Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); }
+
+} // namespace internal
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) {
+ uint16_t bits = Float16ToRawbits(value);
+ uint32_t sign = bits >> 15;
+ uint32_t exponent =
+ ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
+ kFloat16MantissaBits,
+ bits);
+ uint32_t mantissa =
+ ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits);
+
+ switch (Float16Classify(value)) {
+ case FP_ZERO:
+ return (sign == 0) ? 0.0f : -0.0f;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
+
+ case FP_SUBNORMAL: {
+ // Calculate shift required to put mantissa into the most-significant bits
+ // of the destination mantissa.
+ int shift = CountLeadingZeros(mantissa << (32 - 10));
+
+ // Shift mantissa and discard implicit '1'.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
+ mantissa &= (1 << kFloatMantissaBits) - 1;
+
+ // Adjust the exponent for the shift applied, and rebias.
+ exponent = exponent - shift + (-15 + 127);
+ break;
+ }
+
+ case FP_NAN:
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ exponent = (1 << kFloatExponentBits) - 1;
+
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+ mantissa |= 1 << 22; // Force a quiet NaN.
+ break;
+
+ case FP_NORMAL:
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+
+ // Change exponent bias.
+ exponent += (-15 + 127);
+ break;
+
+ default:
+ VIXL_UNREACHABLE();
+ }
+ return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) |
+ mantissa);
+}
+
+
+float FPToFloat(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = DoubleToRawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload =
+ static_cast<uint32_t>(ExtractUnsignedBitfield64(50, 52 - 23, raw));
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return RawbitsToFloat((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = DoubleToRawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent =
+ static_cast<int32_t>(ExtractUnsignedBitfield64(62, 52, raw)) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (UINT64_C(1) << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return value;
+}
+
+// TODO: We should consider implementing a full FPToDouble(Float16)
+// conversion function (for performance reasons).
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) {
+ // We can rely on implicit float to double conversion here.
+ return FPToFloat(value, DN, exception);
+}
+
+
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = FloatToRawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (UINT64_C(1) << 51); // Force a quiet NaN.
+
+ return RawbitsToDouble((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+Float16 FPToFloat16(float value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint32_t raw = FloatToRawbits(value);
+ int32_t sign = raw >> 31;
+ int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127;
+ uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+ : Float16ToRawbits(kFP16NegativeInfinity);
+ result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return RawbitsToFloat16(result);
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert float-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (1 << 23);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return kFP16PositiveZero;
+}
+
+
+Float16 FPToFloat16(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint64_t raw = DoubleToRawbits(value);
+ int32_t sign = raw >> 63;
+ int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023;
+ uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+ : Float16ToRawbits(kFP16NegativeInfinity);
+ result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return RawbitsToFloat16(result);
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (UINT64_C(1) << 52);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return kFP16PositiveZero;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.h b/js/src/jit/arm64/vixl/Utils-vixl.h
new file mode 100644
index 0000000000..d1f6a835f8
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.h
@@ -0,0 +1,1283 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_UTILS_H
+#define VIXL_UTILS_H
+
+#include "mozilla/FloatingPoint.h"
+
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <vector>
+
+#include "jit/arm64/vixl/CompilerIntrinsics-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Macros for compile-time format checking.
+#if GCC_VERSION_OR_NEWER(4, 4, 0)
+#define PRINTF_CHECK(format_index, varargs_index) \
+ __attribute__((format(gnu_printf, format_index, varargs_index)))
+#else
+#define PRINTF_CHECK(format_index, varargs_index)
+#endif
+
+#ifdef __GNUC__
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#elif defined(__clang__)
+#ifdef __has_extension
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#endif
+#endif
+
+#ifdef VIXL_HAS_DEPRECATED_WITH_MSG
+#define VIXL_DEPRECATED(replaced_by, declarator) \
+ __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
+#else
+#define VIXL_DEPRECATED(replaced_by, declarator) declarator
+#endif
+
+#ifdef VIXL_DEBUG
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
+#else
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
+#endif
+
+template <typename T, size_t n>
+size_t ArrayLength(const T (&)[n]) {
+ return n;
+}
+
+// Check number width.
+// TODO: Refactor these using templates.
+inline bool IsIntN(unsigned n, uint32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ uint32_t limit = UINT32_C(1) << (n - 1);
+ return x < limit;
+}
+inline bool IsIntN(unsigned n, int32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ int32_t limit = INT32_C(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+inline bool IsIntN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ uint64_t limit = UINT64_C(1) << (n - 1);
+ return x < limit;
+}
+inline bool IsIntN(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ int64_t limit = INT64_C(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
+ return IsIntN(n, x);
+}
+
+inline bool IsUintN(unsigned n, uint32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ return !(x >> n);
+}
+inline bool IsUintN(unsigned n, int32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ // Convert to an unsigned integer to avoid implementation-defined behavior.
+ return !(static_cast<uint32_t>(x) >> n);
+}
+inline bool IsUintN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return !(x >> n);
+}
+inline bool IsUintN(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ // Convert to an unsigned integer to avoid implementation-defined behavior.
+ return !(static_cast<uint64_t>(x) >> n);
+}
+VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
+ return IsUintN(n, x);
+}
+
+inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
+}
+VIXL_DEPRECATED("TruncateToUintN",
+ inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
+ return TruncateToUintN(n, x);
+}
+
+// clang-format off
+#define INT_1_TO_32_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
+
+#define INT_33_TO_63_LIST(V) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
+
+// clang-format on
+
+#define DECLARE_IS_INT_N(N) \
+ inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \
+ VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
+ return IsIntN(N, x); \
+ }
+
+#define DECLARE_IS_UINT_N(N) \
+ inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \
+ VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
+ return IsUintN(N, x); \
+ }
+
+#define DECLARE_TRUNCATE_TO_UINT_32(N) \
+ inline uint32_t TruncateToUint##N(uint64_t x) { \
+ return static_cast<uint32_t>(TruncateToUintN(N, x)); \
+ } \
+ VIXL_DEPRECATED("TruncateToUint" #N, \
+ inline uint32_t truncate_to_int##N(int64_t x)) { \
+ return TruncateToUint##N(x); \
+ }
+
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
+
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
+
+// Bit field extraction.
+inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ if ((msb == 63) && (lsb == 0)) return x;
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+
+inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
+}
+
+
+inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
+ // If the highest extracted bit is set, sign extend.
+ if ((temp >> (msb - lsb)) == 1) {
+ temp |= ~UINT64_C(0) << (msb - lsb);
+ }
+ int64_t result;
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+
+inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
+ int32_t result;
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+
+inline uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ VIXL_ASSERT((width > 0) && (width <= 64));
+ uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
+ rotate &= 63;
+ if (rotate > 0) {
+ value &= width_mask;
+ value = (value << (width - rotate)) | (value >> rotate);
+ }
+ return value & width_mask;
+}
+
+
+// Wrapper class for passing FP16 values through the assembler.
+// This is purely to aid with type checking/casting.
+class Float16 {
+ public:
+ explicit Float16(double dvalue);
+ Float16() : rawbits_(0x0) {}
+ friend uint16_t Float16ToRawbits(Float16 value);
+ friend Float16 RawbitsToFloat16(uint16_t bits);
+
+ protected:
+ uint16_t rawbits_;
+};
+
+// Floating point representation.
+uint16_t Float16ToRawbits(Float16 value);
+
+
+uint32_t FloatToRawbits(float value);
+VIXL_DEPRECATED("FloatToRawbits",
+ inline uint32_t float_to_rawbits(float value)) {
+ return FloatToRawbits(value);
+}
+
+uint64_t DoubleToRawbits(double value);
+VIXL_DEPRECATED("DoubleToRawbits",
+ inline uint64_t double_to_rawbits(double value)) {
+ return DoubleToRawbits(value);
+}
+
+Float16 RawbitsToFloat16(uint16_t bits);
+
+float RawbitsToFloat(uint32_t bits);
+VIXL_DEPRECATED("RawbitsToFloat",
+ inline float rawbits_to_float(uint32_t bits)) {
+ return RawbitsToFloat(bits);
+}
+
+double RawbitsToDouble(uint64_t bits);
+VIXL_DEPRECATED("RawbitsToDouble",
+ inline double rawbits_to_double(uint64_t bits)) {
+ return RawbitsToDouble(bits);
+}
+
+namespace internal {
+
+// Internal simulation class used solely by the simulator to
+// provide an abstraction layer for any half-precision arithmetic.
+class SimFloat16 : public Float16 {
+ public:
+ // TODO: We should investigate making this constructor explicit.
+ // This is currently difficult to do due to a number of templated
+ // functions in the simulator which rely on returning double values.
+ SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit)
+ SimFloat16(Float16 f) { // NOLINT(runtime/explicit)
+ this->rawbits_ = Float16ToRawbits(f);
+ }
+ SimFloat16() : Float16() {}
+ SimFloat16 operator-() const;
+ SimFloat16 operator+(SimFloat16 rhs) const;
+ SimFloat16 operator-(SimFloat16 rhs) const;
+ SimFloat16 operator*(SimFloat16 rhs) const;
+ SimFloat16 operator/(SimFloat16 rhs) const;
+ bool operator<(SimFloat16 rhs) const;
+ bool operator>(SimFloat16 rhs) const;
+ bool operator==(SimFloat16 rhs) const;
+ bool operator!=(SimFloat16 rhs) const;
+ // This is necessary for conversions peformed in (macro asm) Fmov.
+ bool operator==(double rhs) const;
+ operator double() const;
+};
+} // namespace internal
+
+uint32_t Float16Sign(internal::SimFloat16 value);
+
+uint32_t Float16Exp(internal::SimFloat16 value);
+
+uint32_t Float16Mantissa(internal::SimFloat16 value);
+
+uint32_t FloatSign(float value);
+VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
+ return FloatSign(value);
+}
+
+uint32_t FloatExp(float value);
+VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
+ return FloatExp(value);
+}
+
+uint32_t FloatMantissa(float value);
+VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
+ return FloatMantissa(value);
+}
+
+uint32_t DoubleSign(double value);
+VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
+ return DoubleSign(value);
+}
+
+uint32_t DoubleExp(double value);
+VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
+ return DoubleExp(value);
+}
+
+uint64_t DoubleMantissa(double value);
+VIXL_DEPRECATED("DoubleMantissa",
+ inline uint64_t double_mantissa(double value)) {
+ return DoubleMantissa(value);
+}
+
+internal::SimFloat16 Float16Pack(uint16_t sign,
+ uint16_t exp,
+ uint16_t mantissa);
+
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+VIXL_DEPRECATED("FloatPack",
+ inline float float_pack(uint32_t sign,
+ uint32_t exp,
+ uint32_t mantissa)) {
+ return FloatPack(sign, exp, mantissa);
+}
+
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+VIXL_DEPRECATED("DoublePack",
+ inline double double_pack(uint32_t sign,
+ uint32_t exp,
+ uint64_t mantissa)) {
+ return DoublePack(sign, exp, mantissa);
+}
+
+// An fpclassify() function for 16-bit half-precision floats.
+int Float16Classify(Float16 value);
+VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
+ return Float16Classify(RawbitsToFloat16(value));
+}
+
+bool IsZero(Float16 value);
+
+inline bool IsNaN(float value) { return std::isnan(value); }
+
+inline bool IsNaN(double value) { return std::isnan(value); }
+
+inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
+
+inline bool IsInf(float value) { return std::isinf(value); }
+
+inline bool IsInf(double value) { return std::isinf(value); }
+
+inline bool IsInf(Float16 value) {
+ return Float16Classify(value) == FP_INFINITE;
+}
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ uint64_t raw = DoubleToRawbits(num);
+ if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ uint32_t raw = FloatToRawbits(num);
+ if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(Float16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return IsNaN(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ VIXL_ASSERT(IsNaN(num));
+ return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ VIXL_ASSERT(IsNaN(num));
+ return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
+}
+
+
+inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ VIXL_ASSERT(IsNaN(num));
+ return internal::SimFloat16(
+ RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+
+inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
+
+
+template <typename T>
+inline int HighestSetBitPosition(T value) {
+ VIXL_ASSERT(value != 0);
+ return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
+}
+
+
+template <typename V>
+inline int WhichPowerOf2(V value) {
+ VIXL_ASSERT(IsPowerOf2(value));
+ return CountTrailingZeros(value);
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+
+int BitCount(uint64_t value);
+
+
+template <typename T>
+T ReverseBits(T value) {
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+template <typename T>
+inline T SignExtend(T val, int bitSize) {
+ VIXL_ASSERT(bitSize > 0);
+ T mask = (T(2) << (bitSize - 1)) - T(1);
+ val &= mask;
+ T sign_bits = -((val >> (bitSize - 1)) << bitSize);
+ val |= sign_bits;
+ return val;
+}
+
+
+template <typename T>
+T ReverseBytes(T value, int block_bytes_log2) {
+ VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
+ VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = UINT64_C(0xff00000000000000);
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[0] is used by REV16_x, REV16_w
+ // permute_table[1] is used by REV32_x, REV_w
+ // permute_table[2] is used by REV_x
+ VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
+ static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7}};
+ uint64_t temp = 0;
+ for (int i = 0; i < 8; i++) {
+ temp <<= 8;
+ temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
+ }
+
+ T result;
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+template <unsigned MULTIPLE, typename T>
+inline bool IsMultiple(T value) {
+ VIXL_ASSERT(IsPowerOf2(MULTIPLE));
+ return (value & (MULTIPLE - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsMultiple(T value, unsigned multiple) {
+ VIXL_ASSERT(IsPowerOf2(multiple));
+ return (value & (multiple - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsAligned(T pointer, int alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ return (pointer & (alignment - 1)) == 0;
+}
+
+// Pointer alignment
+// TODO: rename/refactor to make it specific to instructions.
+template <unsigned ALIGN, typename T>
+inline bool IsAligned(T pointer) {
+ VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+ return IsAligned((intptr_t)(pointer), ALIGN);
+}
+
+template <typename T>
+bool IsWordAligned(T pointer) {
+ return IsAligned<4>(pointer);
+}
+
+// Increment a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignUp(T pointer,
+ typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+ (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t mask = alignment - 1;
+ T result = (T)((pointer_raw + mask) & ~mask);
+ VIXL_ASSERT(result >= pointer);
+
+ return result;
+}
+
+// Decrement a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignDown(T pointer,
+ typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+ (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t mask = alignment - 1;
+ return (T)(pointer_raw & ~mask);
+}
+
+
+template <typename T>
+inline T ExtractBit(T value, unsigned bit) {
+ return (value >> bit) & T(1);
+}
+
+template <typename Ts, typename Td>
+inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
+ return Td((value >> least_significant_bit) & Ts(mask));
+}
+
+template <typename Ts, typename Td>
+inline void AssignBit(Td& dst, // NOLINT(runtime/references)
+ int bit,
+ Ts value) {
+ VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
+ VIXL_ASSERT(bit >= 0);
+ VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
+ Td mask(1);
+ dst &= ~(mask << bit);
+ dst |= Td(value) << bit;
+}
+
+template <typename Td, typename Ts>
+inline void AssignBits(Td& dst, // NOLINT(runtime/references)
+ int least_significant_bit,
+ Ts mask,
+ Ts value) {
+ VIXL_ASSERT(least_significant_bit >= 0);
+ VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
+ VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
+ Td(mask));
+ VIXL_ASSERT((value & mask) == value);
+ dst &= ~(Td(mask) << least_significant_bit);
+ dst |= Td(value) << least_significant_bit;
+}
+
+class VFP {
+ public:
+ static uint32_t FP32ToImm8(float imm) {
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+ }
+ static uint32_t FP64ToImm8(double imm) {
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bit7: a000.0000
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+ }
+ static float Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return RawbitsToFloat(result);
+ }
+ static double Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+ return RawbitsToDouble(result);
+ }
+ static bool IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+ return true;
+ }
+ static bool IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0x0000ffffffffffff) != 0) {
+ return false;
+ }
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+ return false;
+ }
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+ return false;
+ }
+ return true;
+ }
+};
+
+class BitField {
+ // ForEachBitHelper is a functor that will call
+ // bool ForEachBitHelper::execute(ElementType id) const
+ // and expects a boolean in return whether to continue (if true)
+ // or stop (if false)
+ // check_set will check if the bits are on (true) or off(false)
+ template <typename ForEachBitHelper, bool check_set>
+ bool ForEachBit(const ForEachBitHelper& helper) {
+ for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
+ if (bitfield_[i] == check_set)
+ if (!helper.execute(i)) return false;
+ }
+ return true;
+ }
+
+ public:
+ explicit BitField(unsigned size) : bitfield_(size, 0) {}
+
+ void Set(int i) {
+ VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+ bitfield_[i] = true;
+ }
+
+ void Unset(int i) {
+ VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+ bitfield_[i] = true;
+ }
+
+ bool IsSet(int i) const { return bitfield_[i]; }
+
+ // For each bit not set in the bitfield call the execute functor
+ // execute.
+ // ForEachBitSetHelper::execute returns true if the iteration through
+ // the bits can continue, otherwise it will stop.
+ // struct ForEachBitSetHelper {
+ // bool execute(int /*id*/) { return false; }
+ // };
+ template <typename ForEachBitNotSetHelper>
+ bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
+ return ForEachBit<ForEachBitNotSetHelper, false>(helper);
+ }
+
+ // For each bit set in the bitfield call the execute functor
+ // execute.
+ template <typename ForEachBitSetHelper>
+ bool ForEachBitSet(const ForEachBitSetHelper& helper) {
+ return ForEachBit<ForEachBitSetHelper, true>(helper);
+ }
+
+ private:
+ std::vector<bool> bitfield_;
+};
+
+namespace internal {
+
+typedef int64_t Int64;
+class Uint64;
+class Uint128;
+
+class Uint32 {
+ uint32_t data_;
+
+ public:
+ // Unlike uint32_t, Uint32 has a default constructor.
+ Uint32() { data_ = 0; }
+ explicit Uint32(uint32_t data) : data_(data) {}
+ inline explicit Uint32(Uint64 data);
+ uint32_t Get() const { return data_; }
+ template <int N>
+ int32_t GetSigned() const {
+ return ExtractSignedBitfield32(N - 1, 0, data_);
+ }
+ int32_t GetSigned() const { return data_; }
+ Uint32 operator~() const { return Uint32(~data_); }
+ Uint32 operator-() const { return Uint32(-data_); }
+ bool operator==(Uint32 value) const { return data_ == value.data_; }
+ bool operator!=(Uint32 value) const { return data_ != value.data_; }
+ bool operator>(Uint32 value) const { return data_ > value.data_; }
+ Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
+ Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
+ Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
+ Uint32 operator&=(Uint32 value) {
+ data_ &= value.data_;
+ return *this;
+ }
+ Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
+ Uint32 operator^=(Uint32 value) {
+ data_ ^= value.data_;
+ return *this;
+ }
+ Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
+ Uint32 operator|=(Uint32 value) {
+ data_ |= value.data_;
+ return *this;
+ }
+ // Unlike uint32_t, the shift functions can accept negative shift and
+ // return 0 when the shift is too big.
+ Uint32 operator>>(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ << tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ >> tmp);
+ }
+ Uint32 operator<<(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ >> tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ << tmp);
+ }
+};
+
+class Uint64 {
+ uint64_t data_;
+
+ public:
+ // Unlike uint64_t, Uint64 has a default constructor.
+ Uint64() { data_ = 0; }
+ explicit Uint64(uint64_t data) : data_(data) {}
+ explicit Uint64(Uint32 data) : data_(data.Get()) {}
+ inline explicit Uint64(Uint128 data);
+ uint64_t Get() const { return data_; }
+ int64_t GetSigned(int N) const {
+ return ExtractSignedBitfield64(N - 1, 0, data_);
+ }
+ int64_t GetSigned() const { return data_; }
+ Uint32 ToUint32() const {
+ VIXL_ASSERT((data_ >> 32) == 0);
+ return Uint32(static_cast<uint32_t>(data_));
+ }
+ Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
+ Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
+ Uint64 operator~() const { return Uint64(~data_); }
+ Uint64 operator-() const { return Uint64(-data_); }
+ bool operator==(Uint64 value) const { return data_ == value.data_; }
+ bool operator!=(Uint64 value) const { return data_ != value.data_; }
+ Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
+ Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
+ Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
+ Uint64 operator&=(Uint64 value) {
+ data_ &= value.data_;
+ return *this;
+ }
+ Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
+ Uint64 operator^=(Uint64 value) {
+ data_ ^= value.data_;
+ return *this;
+ }
+ Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
+ Uint64 operator|=(Uint64 value) {
+ data_ |= value.data_;
+ return *this;
+ }
+ // Unlike uint64_t, the shift functions can accept negative shift and
+ // return 0 when the shift is too big.
+ Uint64 operator>>(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ << tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ >> tmp);
+ }
+ Uint64 operator<<(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ >> tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ << tmp);
+ }
+};
+
+class Uint128 {
+ uint64_t data_high_;
+ uint64_t data_low_;
+
+ public:
+ Uint128() : data_high_(0), data_low_(0) {}
+ explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
+ explicit Uint128(Uint64 data_low)
+ : data_high_(0), data_low_(data_low.Get()) {}
+ Uint128(uint64_t data_high, uint64_t data_low)
+ : data_high_(data_high), data_low_(data_low) {}
+ Uint64 ToUint64() const {
+ VIXL_ASSERT(data_high_ == 0);
+ return Uint64(data_low_);
+ }
+ Uint64 GetHigh64() const { return Uint64(data_high_); }
+ Uint64 GetLow64() const { return Uint64(data_low_); }
+ Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
+ bool operator==(Uint128 value) const {
+ return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
+ }
+ Uint128 operator&(Uint128 value) const {
+ return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
+ }
+ Uint128 operator&=(Uint128 value) {
+ data_high_ &= value.data_high_;
+ data_low_ &= value.data_low_;
+ return *this;
+ }
+ Uint128 operator|=(Uint128 value) {
+ data_high_ |= value.data_high_;
+ data_low_ |= value.data_low_;
+ return *this;
+ }
+ Uint128 operator>>(int shift) const {
+ VIXL_ASSERT((shift >= 0) && (shift < 128));
+ if (shift == 0) return *this;
+ if (shift >= 64) {
+ return Uint128(0, data_high_ >> (shift - 64));
+ }
+ uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
+ return Uint128(data_high_ >> shift, tmp);
+ }
+ Uint128 operator<<(int shift) const {
+ VIXL_ASSERT((shift >= 0) && (shift < 128));
+ if (shift == 0) return *this;
+ if (shift >= 64) {
+ return Uint128(data_low_ << (shift - 64), 0);
+ }
+ uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
+ return Uint128(tmp, data_low_ << shift);
+ }
+};
+
+Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
+Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
+
+Int64 BitCount(Uint32 value);
+
+} // namespace internal
+
+// The default NaN values (for FPCR.DN=1).
+extern const double kFP64DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const Float16 kFP16DefaultNaN;
+
+// Floating-point infinity values.
+extern const Float16 kFP16PositiveInfinity;
+extern const Float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// Floating-point zero values.
+extern const Float16 kFP16PositiveZero;
+extern const Float16 kFP16NegativeZero;
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding modes are only available when explicitly specified by
+ // the instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway,
+ FPRoundOdd
+};
+
+enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+T FPRound(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ VIXL_ASSERT((sign == 0) || (sign == 1));
+
+ // Only FPTieEven and FPRoundOdd rounding modes are implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return static_cast<T>(sign << sign_offset);
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: the input is too large for the result type to represent.
+ if (round_mode == FPTieEven) {
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // FPRoundOdd rounding mode handles overflows using the largest magnitude
+ // normal number.
+ exponent = max_normal_exponent;
+ mantissa = (UINT64_C(1) << exponent_offset) - 1;
+ }
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset));
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ if (round_mode == FPTieEven) {
+ // The result will always be +/-0.0.
+ return static_cast<T>(sign << sign_offset);
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ VIXL_ASSERT(mantissa != 0);
+ // For FPRoundOdd, if the mantissa is too small to represent and
+ // non-zero return the next "odd" value.
+ return static_cast<T>((sign << sign_offset) | 1);
+ }
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(UINT64_C(1) << highest_significant_bit);
+ }
+
+ // The casts below are only well-defined for unsigned integers.
+ VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
+ VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
+
+ if (shift > 0) {
+ if (round_mode == FPTieEven) {
+ // We have to shift the mantissa to the right. Some precision is lost, so
+ // we need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
+ uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
+ uint64_t adjusted = mantissa - adjustment;
+ T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
+
+ T result =
+ static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+
+ // A very large mantissa can overflow during rounding. If this happens,
+ // the exponent should be incremented and the mantissa set to 1.0
+ // (encoded as 0). Applying halfbit_adjusted after assembling the float
+ // has the nice side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // If any bits at position halfbit or below are set, onebit (ie. the
+ // bottom bit of the resulting mantissa) must be set.
+ uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
+ if (fractional_bits != 0) {
+ mantissa |= UINT64_C(1) << shift;
+ }
+
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+ }
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset));
+ }
+}
+
+
+// See FPRound for a description of this function.
+inline double FPRoundToDouble(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ uint64_t bits =
+ FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return RawbitsToDouble(bits);
+}
+
+
+// See FPRound for a description of this function.
+inline Float16 FPRoundToFloat16(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ return RawbitsToFloat16(
+ FPRound<uint16_t,
+ kFloat16ExponentBits,
+ kFloat16MantissaBits>(sign, exponent, mantissa, round_mode));
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ uint32_t bits =
+ FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return RawbitsToFloat(bits);
+}
+
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+float FPToFloat(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
+
+Float16 FPToFloat16(float value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+
+Float16 FPToFloat16(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+} // namespace vixl
+
+#endif // VIXL_UTILS_H
diff --git a/js/src/jit/loong64/Architecture-loong64.cpp b/js/src/jit/loong64/Architecture-loong64.cpp
new file mode 100644
index 0000000000..6b1069a592
--- /dev/null
+++ b/js/src/jit/loong64/Architecture-loong64.cpp
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Architecture-loong64.h"
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/loong64/Simulator-loong64.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet ret;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ ret.addUnchecked(FromCode((*iter).encoding()));
+ }
+ return ret.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return s.size() * sizeof(double);
+}
+
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return encoding() * sizeof(double);
+}
+
+bool CPUFlagsHaveBeenComputed() {
+ // TODO(loong64): Add CPU flags support.
+ return true;
+}
+
+uint32_t GetLOONG64Flags() { return 0; }
+
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/loong64/Architecture-loong64.h b/js/src/jit/loong64/Architecture-loong64.h
new file mode 100644
index 0000000000..48745ee37a
--- /dev/null
+++ b/js/src/jit/loong64/Architecture-loong64.h
@@ -0,0 +1,522 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Architecture_loong64_h
+#define jit_loong64_Architecture_loong64_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// LoongArch64 has 32 64-bit integer registers, r0 though r31.
+// The program counter is not accessible as a register.
+//
+// SIMD and scalar floating-point registers share a register bank.
+// Floating-point registers are f0 through f31.
+// 128 bit SIMD registers are vr0 through vr31.
+// e.g., f0 is the bottom 64 bits of vr0.
+
+// LoongArch64 INT Register Convention:
+// Name Alias Usage
+// $r0 $zero Constant zero
+// $r1 $ra Return address
+// $r2 $tp TLS
+// $r3 $sp Stack pointer
+// $r4-$r11 $a0-$a7 Argument registers
+// $r4-$r5 $v0-$v1 Return values
+// $r12-$r20 $t0-$t8 Temporary registers
+// $r21 $x Reserved
+// $r22 $fp Frame pointer
+// $r23-$r31 $s0-$s8 Callee-saved registers
+
+// LoongArch64 FP Register Convention:
+// Name Alias Usage
+// $f0-$f7 $fa0-$fa7 Argument registers
+// $f0-$f1 $fv0-$fv1 Return values
+// $f8-f23 $ft0-$ft15 Temporary registers
+// $f24-$f31 $fs0-$fs7 Callee-saved registers
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ ra = r1,
+ tp = r2,
+ sp = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ t4 = r16,
+ t5 = r17,
+ t6 = r18,
+ t7 = r19,
+ t8 = r20,
+ rx = r21,
+ fp = r22,
+ s0 = r23,
+ s1 = r24,
+ s2 = r25,
+ s3 = r26,
+ s4 = r27,
+ s5 = r28,
+ s6 = r29,
+ s7 = r30,
+ s8 = r31,
+ invalid_reg,
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ typedef uint32_t SetType;
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "zero", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "rx",
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8"};
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable =
+ 23; // No named special-function registers.
+
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType NoneMask = 0x0;
+
+ static const SetType ArgRegMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7);
+
+ static const SetType VolatileMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7) | (1 << Registers::t0) |
+ (1 << Registers::t1) | (1 << Registers::t2) | (1 << Registers::t3) |
+ (1 << Registers::t4) | (1 << Registers::t5) | (1 << Registers::t6);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::ra) | (1 << Registers::fp) | (1 << Registers::s0) |
+ (1 << Registers::s1) | (1 << Registers::s2) | (1 << Registers::s3) |
+ (1 << Registers::s4) | (1 << Registers::s5) | (1 << Registers::s6) |
+ (1 << Registers::s7) | (1 << Registers::s8);
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | // Always be zero.
+ (1 << Registers::t7) | // First scratch register.
+ (1 << Registers::t8) | // Second scratch register.
+ (1 << Registers::rx) | // Reserved Register.
+ (1 << Registers::ra) | (1 << Registers::tp) | (1 << Registers::sp) |
+ (1 << Registers::fp);
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::a2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::a0);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23, // Scratch register.
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ };
+
+ // Eight bits: (invalid << 7) | (kind << 5) | encoding
+ typedef uint8_t Code;
+ typedef FPRegisterID Encoding;
+ typedef uint64_t SetType;
+
+ enum Kind : uint8_t { Double, Single, NumTypes };
+
+ static constexpr Code Invalid = 0x80;
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ static_assert(TotalPhys == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Total = TotalPhys * NumTypes;
+ static const uint32_t Allocatable = 31; // Without f23, the scratch register.
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1)
+ << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1)
+ << (uint32_t(Double) * TotalPhys);
+ static const SetType Spread = SpreadSingle | SpreadDouble;
+
+ static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+ static const SetType NoneMask = SetType(0);
+
+ // TODO(loong64): Much less than ARM64 here.
+ static const SetType NonVolatileMask =
+ SetType((1 << FloatRegisters::f24) | (1 << FloatRegisters::f25) |
+ (1 << FloatRegisters::f26) | (1 << FloatRegisters::f27) |
+ (1 << FloatRegisters::f28) | (1 << FloatRegisters::f29) |
+ (1 << FloatRegisters::f30) | (1 << FloatRegisters::f31)) *
+ Spread;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // f23 is the scratch register.
+ static const SetType NonAllocatableMask =
+ (SetType(1) << FloatRegisters::f23) * Spread;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ static constexpr Encoding encoding(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total);
+ return Encoding(c & 31);
+ }
+
+ static constexpr Kind kind(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total && ((c >> 5) & 3) < NumTypes);
+ return Kind((c >> 5) & 3);
+ }
+
+ static constexpr Code fromParts(uint32_t encoding, uint32_t kind,
+ uint32_t invalid) {
+ return Code((invalid << 7) | (kind << 5) | encoding);
+ }
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t SizeOfReturnAddressAfterCall = 0;
+
+// When our only strategy for far jumps is to encode the offset directly, and
+// not insert any jump islands during assembly for even further jumps, then the
+// architecture restricts us to -2^27 .. 2^27-4, to fit into a signed 28-bit
+// value. We further reduce this range to allow the far-jump inserting code to
+// have some breathing room.
+static const uint32_t JumpImmediateRange = ((1 << 27) - (20 * 1024 * 1024));
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ x |= x >> FloatRegisters::TotalPhys;
+ x &= FloatRegisters::AllPhysMask;
+ return mozilla::CountPopulation32(x);
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType");
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+
+ private:
+ // These fields only hold valid values: an invalid register is always
+ // represented as a valid encoding and kind with the invalid_ bit set.
+ uint8_t encoding_; // 32 encodings
+ uint8_t kind_; // Double, Single; more later
+ bool invalid_;
+
+ typedef Codes::Kind Kind;
+
+ public:
+ constexpr FloatRegister(Encoding encoding, Kind kind)
+ : encoding_(encoding), kind_(kind), invalid_(false) {
+ // assert(uint32_t(encoding) < Codes::TotalPhys);
+ }
+
+ constexpr FloatRegister()
+ : encoding_(0), kind_(FloatRegisters::Double), invalid_(true) {}
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Codes::Total);
+ return FloatRegister(FloatRegisters::encoding(i), FloatRegisters::kind(i));
+ }
+
+ bool isSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Double;
+ }
+ bool isSimd128() const {
+ MOZ_ASSERT(!invalid_);
+ return false;
+ }
+ bool isInvalid() const { return invalid_; }
+
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Double);
+ }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+
+ constexpr uint32_t size() const {
+ MOZ_ASSERT(!invalid_);
+ if (kind_ == FloatRegisters::Double) {
+ return sizeof(double);
+ }
+ MOZ_ASSERT(kind_ == FloatRegisters::Single);
+ return sizeof(float);
+ }
+
+ constexpr Code code() const {
+ // assert(!invalid_);
+ return Codes::fromParts(encoding_, kind_, invalid_);
+ }
+
+ constexpr Encoding encoding() const {
+ MOZ_ASSERT(!invalid_);
+ return Encoding(encoding_);
+ }
+
+ const char* name() const { return FloatRegisters::GetName(code()); }
+ bool volatile_() const {
+ MOZ_ASSERT(!invalid_);
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ constexpr bool operator!=(FloatRegister other) const {
+ return code() != other.code();
+ }
+ constexpr bool operator==(FloatRegister other) const {
+ return code() == other.code();
+ }
+
+ bool aliases(FloatRegister other) const {
+ return other.encoding_ == encoding_;
+ }
+ // Ensure that two floating point registers' types are equivalent.
+ bool equiv(FloatRegister other) const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == other.kind_;
+ }
+
+ uint32_t numAliased() const { return Codes::NumTypes; }
+ uint32_t numAlignedAliased() { return numAliased(); }
+
+ FloatRegister aliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(!invalid_);
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return FloatRegister(Encoding(encoding_),
+ Kind((aliasIdx + kind_) % numAliased()));
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return aliased(aliasIdx);
+ }
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::Spread << encoding_;
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName Name = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+// LoongArch doesn't have double registers that cannot be treated as float32.
+inline bool hasUnaliasedDouble() { return false; }
+
+// LoongArch doesn't have double registers that alias multiple floats.
+inline bool hasMultiAlias() { return false; }
+
+uint32_t GetLOONG64Flags();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Architecture_loong64_h */
diff --git a/js/src/jit/loong64/Assembler-loong64.cpp b/js/src/jit/loong64/Assembler-loong64.cpp
new file mode 100644
index 0000000000..1a4976d07a
--- /dev/null
+++ b/js/src/jit/loong64/Assembler-loong64.cpp
@@ -0,0 +1,2478 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Assembler-loong64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Note this is used for inter-wasm calls and may pass arguments and results
+// in floating point registers even if the system ABI does not.
+
+// TODO(loong64): Inconsistent with LoongArch's calling convention.
+// LoongArch floating-point parameters calling convention:
+// The first eight floating-point parameters should be passed in f0-f7, and
+// the other floating point parameters will be passed like integer parameters.
+// But we just pass the other floating-point parameters on stack here.
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults: {
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_ + a0.encoding()));
+ intRegIndex_++;
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(
+ FloatRegisters::Encoding(floatRegIndex_ + f0.encoding()),
+ type == MIRType::Double ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+ }
+ case MIRType::Simd128: {
+ MOZ_CRASH("LoongArch does not support simd yet.");
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+// Encode a standard register when it is being used as rd, the rj, and
+// an extra register(rk). These should never be called with an InvalidReg.
+uint32_t js::jit::RJ(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RJShift;
+}
+
+uint32_t js::jit::RK(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RKShift;
+}
+
+uint32_t js::jit::RD(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RDShift;
+}
+
+uint32_t js::jit::FJ(FloatRegister r) { return r.encoding() << RJShift; }
+
+uint32_t js::jit::FK(FloatRegister r) { return r.encoding() << RKShift; }
+
+uint32_t js::jit::FD(FloatRegister r) { return r.encoding() << RDShift; }
+
+uint32_t js::jit::FA(FloatRegister r) { return r.encoding() << FAShift; }
+
+uint32_t js::jit::SA2(uint32_t value) {
+ MOZ_ASSERT(value < 4);
+ return (value & SA2Mask) << SAShift;
+}
+
+uint32_t js::jit::SA3(uint32_t value) {
+ MOZ_ASSERT(value < 8);
+ return (value & SA3Mask) << SAShift;
+}
+
+Register js::jit::toRK(Instruction& i) {
+ return Register::FromCode((i.encode() & RKMask) >> RKShift);
+}
+
+Register js::jit::toRJ(Instruction& i) {
+ return Register::FromCode((i.encode() & RJMask) >> RJShift);
+}
+
+Register js::jit::toRD(Instruction& i) {
+ return Register::FromCode((i.encode() & RDMask) >> RDShift);
+}
+
+Register js::jit::toR(Instruction& i) {
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void InstImm::extractImm16(BOffImm16* dest) { *dest = BOffImm16(*this); }
+
+void AssemblerLOONG64::finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool AssemblerLOONG64::appendRawCode(const uint8_t* code, size_t numBytes) {
+ return m_buffer.appendRawCode(code, numBytes);
+}
+
+bool AssemblerLOONG64::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool AssemblerLOONG64::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+void AssemblerLOONG64::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void AssemblerLOONG64::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+AssemblerLOONG64::Condition AssemblerLOONG64::InvertCondition(Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerLOONG64::DoubleCondition AssemblerLOONG64::InvertCondition(
+ DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerLOONG64::Condition AssemblerLOONG64::InvertCmpCondition(
+ Condition cond) {
+ switch (cond) {
+ case Equal:
+ case NotEqual:
+ return cond;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("no meaningful swapped-operand condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst)
+ : data((inst.encode() >> Imm16Shift) & Imm16Mask) {}
+
+Instruction* BOffImm16::getDest(Instruction* src) const {
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool AssemblerLOONG64::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t AssemblerLOONG64::size() const { return m_buffer.size(); }
+
+// Size of the relocation table, in bytes.
+size_t AssemblerLOONG64::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t AssemblerLOONG64::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t AssemblerLOONG64::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset AssemblerLOONG64::writeInst(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(hasCreator());
+ if (dest == nullptr) {
+ return m_buffer.putInt(x);
+ }
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void AssemblerLOONG64::WriteInstStatic(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset AssemblerLOONG64::haltingAlign(int alignment) {
+ // TODO(loong64): Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset AssemblerLOONG64::nopAlign(int alignment) {
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ }
+ return ret;
+}
+
+// Logical operations.
+BufferOffset AssemblerLOONG64::as_and(Register rd, Register rj, Register rk) {
+ spew("and %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_and, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_or(Register rd, Register rj, Register rk) {
+ spew("or %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_or, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_xor(Register rd, Register rj, Register rk) {
+ spew("xor %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_xor, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_nor(Register rd, Register rj, Register rk) {
+ spew("nor %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_nor, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_andn(Register rd, Register rj, Register rk) {
+ spew("andn %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_andn, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_orn(Register rd, Register rj, Register rk) {
+ spew("orn %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_orn, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_andi(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("andi %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_andi, ui12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ori(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("ori %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_ori, ui12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_xori(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("xori %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_xori, ui12, rj, rd, 12).encode());
+}
+
+// Branch and jump instructions
+BufferOffset AssemblerLOONG64::as_b(JOffImm26 off) {
+ spew("b %d", off.decode());
+ return writeInst(InstJump(op_b, off).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bl(JOffImm26 off) {
+ spew("bl %d", off.decode());
+ return writeInst(InstJump(op_bl, off).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_jirl(Register rd, Register rj,
+ BOffImm16 off) {
+ spew("jirl %3s, %3s, %d", rd.name(), rj.name(), off.decode());
+ return writeInst(InstImm(op_jirl, off, rj, rd).encode());
+}
+
+InstImm AssemblerLOONG64::getBranchCode(JumpOrCall jumpOrCall) {
+ // jirl or beq
+ if (jumpOrCall == BranchIsCall) {
+ return InstImm(op_jirl, BOffImm16(0), zero, ra);
+ }
+
+ return InstImm(op_beq, BOffImm16(0), zero, zero);
+}
+
+InstImm AssemblerLOONG64::getBranchCode(Register rj, Register rd, Condition c) {
+ // beq, bne
+ MOZ_ASSERT(c == AssemblerLOONG64::Equal || c == AssemblerLOONG64::NotEqual);
+ return InstImm(c == AssemblerLOONG64::Equal ? op_beq : op_bne, BOffImm16(0),
+ rj, rd);
+}
+
+InstImm AssemblerLOONG64::getBranchCode(Register rj, Condition c) {
+ // beq, bne, blt, bge
+ switch (c) {
+ case AssemblerLOONG64::Equal:
+ case AssemblerLOONG64::Zero:
+ case AssemblerLOONG64::BelowOrEqual:
+ return InstImm(op_beq, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::NotEqual:
+ case AssemblerLOONG64::NonZero:
+ case AssemblerLOONG64::Above:
+ return InstImm(op_bne, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::GreaterThan:
+ return InstImm(op_blt, BOffImm16(0), zero, rj);
+ case AssemblerLOONG64::GreaterThanOrEqual:
+ case AssemblerLOONG64::NotSigned:
+ return InstImm(op_bge, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::LessThan:
+ case AssemblerLOONG64::Signed:
+ return InstImm(op_blt, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::LessThanOrEqual:
+ return InstImm(op_bge, BOffImm16(0), zero, rj);
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+// Code semantics must conform to compareFloatingpoint
+InstImm AssemblerLOONG64::getBranchCode(FPConditionBit cj) {
+ return InstImm(op_bcz, 0, cj, true); // bcnez
+}
+
+// Arithmetic instructions
+BufferOffset AssemblerLOONG64::as_add_w(Register rd, Register rj, Register rk) {
+ spew("add_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_add_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_add_d(Register rd, Register rj, Register rk) {
+ spew("add_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_add_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sub_w(Register rd, Register rj, Register rk) {
+ spew("sub_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sub_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sub_d(Register rd, Register rj, Register rk) {
+ spew("sub_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sub_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addi_w(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("addi_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_addi_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addi_d(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("addi_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_addi_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addu16i_d(Register rd, Register rj,
+ int32_t si16) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(si16));
+ spew("addu16i_d %3s,%3s,0x%x", rd.name(), rj.name(), si16);
+ return writeInst(InstImm(op_addu16i_d, Imm16(si16), rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_w(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_w %3s,%3s,0x%x", rd.name(), rj.name(), sa2);
+ return writeInst(InstReg(op_alsl_w, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_wu(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_wu %3s,%3s,0x%x", rd.name(), rj.name(), sa2);
+ return writeInst(InstReg(op_alsl_wu, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_d(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_d %3s,%3s,%3s,0x%x", rd.name(), rj.name(), rk.name(), sa2);
+ return writeInst(InstReg(op_alsl_d, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu12i_w(Register rd, int32_t si20) {
+ spew("lu12i_w %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_lu12i_w, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu32i_d(Register rd, int32_t si20) {
+ spew("lu32i_d %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_lu32i_d, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu52i_d(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_uintN(si12, 12));
+ spew("lu52i_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_lu52i_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slt(Register rd, Register rj, Register rk) {
+ spew("slt %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_slt, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sltu(Register rd, Register rj, Register rk) {
+ spew("sltu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sltu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slti(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("slti %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_slti, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sltui(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("sltui %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_sltui, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddi(Register rd, int32_t si20) {
+ spew("pcaddi %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddi, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddu12i(Register rd, int32_t si20) {
+ spew("pcaddu12i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddu12i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddu18i(Register rd, int32_t si20) {
+ spew("pcaddu18i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddu18i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcalau12i(Register rd, int32_t si20) {
+ spew("pcalau12i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcalau12i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mul_w(Register rd, Register rj, Register rk) {
+ spew("mul_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mul_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_w(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mul_d(Register rd, Register rj, Register rk) {
+ spew("mul_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mul_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_d(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_du(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulw_d_w(Register rd, Register rj,
+ Register rk) {
+ spew("mulw_d_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulw_d_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulw_d_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mulw_d_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulw_d_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_w(Register rd, Register rj, Register rk) {
+ spew("div_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_w(Register rd, Register rj, Register rk) {
+ spew("mod_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_wu(Register rd, Register rj,
+ Register rk) {
+ spew("div_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mod_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_d(Register rd, Register rj, Register rk) {
+ spew("div_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_d(Register rd, Register rj, Register rk) {
+ spew("mod_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_du(Register rd, Register rj,
+ Register rk) {
+ spew("div_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_du(Register rd, Register rj,
+ Register rk) {
+ spew("mod_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_du, rk, rj, rd).encode());
+}
+
+// Shift instructions
+BufferOffset AssemblerLOONG64::as_sll_w(Register rd, Register rj, Register rk) {
+ spew("sll_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sll_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srl_w(Register rd, Register rj, Register rk) {
+ spew("srl_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_srl_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sra_w(Register rd, Register rj, Register rk) {
+ spew("sra_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sra_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotr_w(Register rd, Register rj,
+ Register rk) {
+ spew("rotr_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_rotr_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slli_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("slli_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_slli_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srli_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("srli_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_srli_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srai_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("srai_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_srai_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotri_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("rotri_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_rotri_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sll_d(Register rd, Register rj, Register rk) {
+ spew("sll_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sll_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srl_d(Register rd, Register rj, Register rk) {
+ spew("srl_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_srl_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sra_d(Register rd, Register rj, Register rk) {
+ spew("sra_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sra_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotr_d(Register rd, Register rj,
+ Register rk) {
+ spew("rotr_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_rotr_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slli_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("slli_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_slli_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srli_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("srli_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_srli_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srai_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("srai_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_srai_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotri_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("rotri_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_rotri_d, ui6, rj, rd, 6).encode());
+}
+
+// Bit operation instrucitons
+BufferOffset AssemblerLOONG64::as_ext_w_b(Register rd, Register rj) {
+ spew("ext_w_b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ext_w_b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ext_w_h(Register rd, Register rj) {
+ spew("ext_w_h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ext_w_h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clo_w(Register rd, Register rj) {
+ spew("clo_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clo_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clz_w(Register rd, Register rj) {
+ spew("clz_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clz_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_cto_w(Register rd, Register rj) {
+ spew("cto_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_cto_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ctz_w(Register rd, Register rj) {
+ spew("ctz_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ctz_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clo_d(Register rd, Register rj) {
+ spew("clo_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clo_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clz_d(Register rd, Register rj) {
+ spew("clz_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clz_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_cto_d(Register rd, Register rj) {
+ spew("cto_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_cto_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ctz_d(Register rd, Register rj) {
+ spew("ctz_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ctz_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bytepick_w(Register rd, Register rj,
+ Register rk, int32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("bytepick_w %3s,%3s,%3s, 0x%x", rd.name(), rj.name(), rk.name(), sa2);
+ return writeInst(InstReg(op_bytepick_w, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bytepick_d(Register rd, Register rj,
+ Register rk, int32_t sa3) {
+ MOZ_ASSERT(sa3 < 8);
+ spew("bytepick_d %3s,%3s,%3s, 0x%x", rd.name(), rj.name(), rk.name(), sa3);
+ return writeInst(InstReg(op_bytepick_d, sa3, rk, rj, rd, 3).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_2h(Register rd, Register rj) {
+ spew("revb_2h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_2h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_4h(Register rd, Register rj) {
+ spew("revb_4h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_4h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_2w(Register rd, Register rj) {
+ spew("revb_2w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_2w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_d(Register rd, Register rj) {
+ spew("revb_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revh_2w(Register rd, Register rj) {
+ spew("revh_2w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revh_2w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revh_d(Register rd, Register rj) {
+ spew("revh_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revh_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_4b(Register rd, Register rj) {
+ spew("bitrev_4b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_4b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_8b(Register rd, Register rj) {
+ spew("bitrev_8b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_8b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_w(Register rd, Register rj) {
+ spew("bitrev_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_d(Register rd, Register rj) {
+ spew("bitrev_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrins_w(Register rd, Register rj,
+ int32_t msbw, int32_t lsbw) {
+ MOZ_ASSERT(lsbw <= msbw);
+ spew("bstrins_w %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbw, lsbw);
+ return writeInst(InstImm(op_bstr_w, msbw, lsbw, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrins_d(Register rd, Register rj,
+ int32_t msbd, int32_t lsbd) {
+ MOZ_ASSERT(lsbd <= msbd);
+ spew("bstrins_d %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbd, lsbd);
+ return writeInst(InstImm(op_bstrins_d, msbd, lsbd, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrpick_w(Register rd, Register rj,
+ int32_t msbw, int32_t lsbw) {
+ MOZ_ASSERT(lsbw <= msbw);
+ spew("bstrpick_w %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbw, lsbw);
+ return writeInst(InstImm(op_bstr_w, msbw, lsbw, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrpick_d(Register rd, Register rj,
+ int32_t msbd, int32_t lsbd) {
+ MOZ_ASSERT(lsbd <= msbd);
+ spew("bstrpick_d %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbd, lsbd);
+ return writeInst(InstImm(op_bstrpick_d, msbd, lsbd, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_maskeqz(Register rd, Register rj,
+ Register rk) {
+ spew("maskeqz %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_maskeqz, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_masknez(Register rd, Register rj,
+ Register rk) {
+ spew("masknez %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_masknez, rk, rj, rd).encode());
+}
+
+// Load and store instructions
+BufferOffset AssemblerLOONG64::as_ld_b(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_b %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_b, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_h(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_h %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_h, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_w(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_d(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_bu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_bu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_bu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_hu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_hu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_hu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_wu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_wu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_wu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_b(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_b %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_b, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_h(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_h %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_h, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_w(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_d(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_b(Register rd, Register rj, Register rk) {
+ spew("ldx_b %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_b, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_h(Register rd, Register rj, Register rk) {
+ spew("ldx_h %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_h, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_w(Register rd, Register rj, Register rk) {
+ spew("ldx_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_d(Register rd, Register rj, Register rk) {
+ spew("ldx_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_bu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_bu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_bu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_hu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_hu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_hu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_b(Register rd, Register rj, Register rk) {
+ spew("stx_b %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_b, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_h(Register rd, Register rj, Register rk) {
+ spew("stx_h %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_h, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_w(Register rd, Register rj, Register rk) {
+ spew("stx_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_d(Register rd, Register rj, Register rk) {
+ spew("stx_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldptr_w(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("ldptr_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_ldptr_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldptr_d(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("ldptr_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_ldptr_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stptr_w(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("stptr_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_stptr_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stptr_d(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("stptr_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_stptr_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_preld(int32_t hint, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("preld 0x%x,%3s,0x%x", hint, rj.name(), si12);
+ return writeInst(InstImm(op_preld, si12, rj, hint).encode());
+}
+
+// Atomic instructions
+BufferOffset AssemblerLOONG64::as_amswap_w(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_d(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_w(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_d(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_w(Register rd, Register rj,
+ Register rk) {
+ spew("amand_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_d(Register rd, Register rj,
+ Register rk) {
+ spew("amand_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_w(Register rd, Register rj,
+ Register rk) {
+ spew("amor_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_d(Register rd, Register rj,
+ Register rk) {
+ spew("amor_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_w(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_d(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amand_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amand_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amor_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amor_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ll_w(Register rd, Register rj, int32_t si14) {
+ spew("ll_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_ll_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ll_d(Register rd, Register rj, int32_t si14) {
+ spew("ll_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_ll_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sc_w(Register rd, Register rj, int32_t si14) {
+ spew("sc_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_sc_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sc_d(Register rd, Register rj, int32_t si14) {
+ spew("sc_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_sc_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+// Barrier instructions
+BufferOffset AssemblerLOONG64::as_dbar(int32_t hint) {
+ MOZ_ASSERT(is_uintN(hint, 15));
+ spew("dbar 0x%x", hint);
+ return writeInst(InstImm(op_dbar, hint).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ibar(int32_t hint) {
+ MOZ_ASSERT(is_uintN(hint, 15));
+ spew("ibar 0x%x", hint);
+ return writeInst(InstImm(op_ibar, hint).encode());
+}
+
+/* =============================================================== */
+
+// FP Arithmetic instructions
+BufferOffset AssemblerLOONG64::as_fadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fadd_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fadd_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fadd_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fadd_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fsub_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fsub_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fsub_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fsub_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmul_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmul_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmul_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmul_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmul_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmul_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fdiv_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fdiv_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fdiv_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fdiv_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fdiv_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fdiv_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmadd_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmadd_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmadd_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmadd_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmsub_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmsub_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmsub_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmsub_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmadd_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmadd_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmadd_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmadd_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmsub_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmsub_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmsub_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmsub_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmax_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmax_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmax_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmax_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmax_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmax_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmin_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmin_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmin_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmin_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmin_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmin_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmaxa_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmaxa_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmaxa_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmaxa_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmaxa_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmaxa_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmina_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmina_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmina_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmina_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmina_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmina_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fabs_s(FloatRegister fd, FloatRegister fj) {
+ spew("fabs_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fabs_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fabs_d(FloatRegister fd, FloatRegister fj) {
+ spew("fabs_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fabs_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fneg_s(FloatRegister fd, FloatRegister fj) {
+ spew("fneg_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fneg_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fneg_d(FloatRegister fd, FloatRegister fj) {
+ spew("fneg_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fneg_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsqrt_s(FloatRegister fd, FloatRegister fj) {
+ spew("fsqrt_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fsqrt_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsqrt_d(FloatRegister fd, FloatRegister fj) {
+ spew("fsqrt_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fsqrt_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcopysign_s(FloatRegister fd,
+ FloatRegister fj,
+ FloatRegister fk) {
+ spew("fcopysign_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fcopysign_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcopysign_d(FloatRegister fd,
+ FloatRegister fj,
+ FloatRegister fk) {
+ spew("fcopysign_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fcopysign_d, fk, fj, fd).encode());
+}
+
+// FP compare instructions
+// fcmp.cond.s and fcmp.cond.d instructions
+BufferOffset AssemblerLOONG64::as_fcmp_cor(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cor_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, COR, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cor_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, COR, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_ceq(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_ceq_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CEQ, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_ceq_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CEQ, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cne(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cne_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CNE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cne_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CNE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cle(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cle_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CLE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cle_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CLE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_clt(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_clt_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CLT, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_clt_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CLT, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cun(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cun_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUN, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cun_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUN, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cueq(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cueq_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUEQ, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cueq_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUEQ, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cune(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cune_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUNE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cune_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUNE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cule(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cule_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CULE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cule_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CULE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cult(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cult_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CULT, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cult_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CULT, fk, fj, cd).encode());
+ }
+}
+
+// FP conversion instructions
+BufferOffset AssemblerLOONG64::as_fcvt_s_d(FloatRegister fd, FloatRegister fj) {
+ spew("fcvt_s_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fcvt_s_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcvt_d_s(FloatRegister fd, FloatRegister fj) {
+ spew("fcvt_d_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fcvt_d_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_s_w(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_s_w %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_s_w, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_s_l(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_s_l %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_s_l, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_d_w(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_d_w %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_d_w, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_d_l(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_d_l %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_d_l, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_l_d, fj, fd).encode());
+}
+BufferOffset AssemblerLOONG64::as_ftintrne_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_frint_s(FloatRegister fd, FloatRegister fj) {
+ spew("frint_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_frint_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_frint_d(FloatRegister fd, FloatRegister fj) {
+ spew("frint_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_frint_d, fj, fd).encode());
+}
+
+// FP mov instructions
+BufferOffset AssemblerLOONG64::as_fmov_s(FloatRegister fd, FloatRegister fj) {
+ spew("fmov_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fmov_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmov_d(FloatRegister fd, FloatRegister fj) {
+ spew("fmov_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fmov_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsel(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FPConditionBit ca) {
+ spew("fsel %3s,%3s,%3s,%d", fd.name(), fj.name(), fk.name(), ca);
+ return writeInst(InstReg(op_fsel, ca, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fr_w(FloatRegister fd, Register rj) {
+ spew("movgr2fr_w %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2fr_w, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fr_d(FloatRegister fd, Register rj) {
+ spew("movgr2fr_d %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2fr_d, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2frh_w(FloatRegister fd, Register rj) {
+ spew("movgr2frh_w %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2frh_w, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2gr_s(Register rd, FloatRegister fj) {
+ spew("movfr2gr_s %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfr2gr_s, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2gr_d(Register rd, FloatRegister fj) {
+ spew("movfr2gr_d %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfr2gr_d, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfrh2gr_s(Register rd, FloatRegister fj) {
+ spew("movfrh2gr_s %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfrh2gr_s, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fcsr(Register rj) {
+ spew("movgr2fcsr %3s", rj.name());
+ return writeInst(InstReg(op_movgr2fcsr, rj, FCSR).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfcsr2gr(Register rd) {
+ spew("movfcsr2gr %3s", rd.name());
+ return writeInst(InstReg(op_movfcsr2gr, FCSR, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2cf(FPConditionBit cd,
+ FloatRegister fj) {
+ spew("movfr2cf %d,%3s", cd, fj.name());
+ return writeInst(InstReg(op_movfr2cf, fj, cd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movcf2fr(FloatRegister fd,
+ FPConditionBit cj) {
+ spew("movcf2fr %3s,%d", fd.name(), cj);
+ return writeInst(InstReg(op_movcf2fr, cj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2cf(FPConditionBit cd, Register rj) {
+ spew("movgr2cf %d,%3s", cd, rj.name());
+ return writeInst(InstReg(op_movgr2cf, rj, cd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movcf2gr(Register rd, FPConditionBit cj) {
+ spew("movcf2gr %3s,%d", rd.name(), cj);
+ return writeInst(InstReg(op_movcf2gr, cj, rd).encode());
+}
+
+// FP load/store instructions
+BufferOffset AssemblerLOONG64::as_fld_s(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fld_s %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fld_s, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fld_d(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fld_d %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fld_d, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fst_s(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fst_s %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fst_s, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fst_d(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fst_d %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fst_d, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fldx_s(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fldx_s %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fldx_s, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fldx_d(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fldx_d %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fldx_d, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fstx_s(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fstx_s %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fstx_s, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fstx_d(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fstx_d %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fstx_d, rk, rj, fd).encode());
+}
+
+/* ========================================================================= */
+
+void AssemblerLOONG64::bind(Label* label, BufferOffset boff) {
+ spew(".set Llabel %p", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void AssemblerLOONG64::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->offset();
+ target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+
+void AssemblerLOONG64::as_break(uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("break %d", code);
+ writeInst(InstImm(op_break, code).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void AssemblerLOONG64::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t* AssemblerLOONG64::NextInstruction(uint8_t* inst_, uint32_t* count) {
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr) {
+ *count += sizeof(Instruction);
+ }
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+void AssemblerLOONG64::ToggleToJmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractBitField(31, 26) == (uint32_t)op_addu16i_d >> 26);
+ // We converted beq to addu16i_d, so now we restore it.
+ inst->setOpcode(op_beq, 6);
+}
+
+void AssemblerLOONG64::ToggleToCmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractBitField(31, 26) == (uint32_t)op_beq >> 26);
+ // Replace "beq $zero, $zero, offset" with "addu16i_d $zero, $zero, offset"
+ inst->setOpcode(op_addu16i_d, 6);
+}
+
+// Since there are no pools in LoongArch64 implementation, this should be
+// simple.
+Instruction* Instruction::next() { return this + 1; }
+
+InstImm AssemblerLOONG64::invertBranch(InstImm branch, BOffImm16 skipOffset) {
+ uint32_t rj = 0;
+ OpcodeField opcode = (OpcodeField)((branch.extractBitField(31, 26)) << 26);
+ switch (opcode) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne, 6);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq, 6);
+ return branch;
+ case op_bge:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blt, 6);
+ return branch;
+ case op_bgeu:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bltu, 6);
+ return branch;
+ case op_blt:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bge, 6);
+ return branch;
+ case op_bltu:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgeu, 6);
+ return branch;
+ case op_beqz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bnez, 6);
+ return branch;
+ case op_bnez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beqz, 6);
+ return branch;
+ case op_bcz:
+ branch.setBOffImm16(skipOffset);
+ rj = branch.extractRJ();
+ if (rj & 0x8) {
+ branch.setRJ(rj & 0x17);
+ } else {
+ branch.setRJ(rj | 0x8);
+ }
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+#ifdef JS_JITSPEW
+void AssemblerLOONG64::decodeBranchInstAndSpew(InstImm branch) {
+ OpcodeField opcode = (OpcodeField)((branch.extractBitField(31, 26)) << 26);
+ uint32_t rd_id;
+ uint32_t rj_id;
+ uint32_t cj_id;
+ uint32_t immi = branch.extractImm16Value();
+ switch (opcode) {
+ case op_beq:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beq 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bne:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bne 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bge:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bge 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bgeu:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bgeu 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_blt:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("blt 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bltu:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bltu 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_beqz:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beqz 0x%x,%3s,0x%x", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), rd_id);
+ break;
+ case op_bnez:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bnez 0x%x,%3s,0x%x", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), rd_id);
+ break;
+ case op_bcz:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ cj_id = branch.extractBitField(CJShift + CJBits - 1, CJShift);
+ if (rj_id & 0x8) {
+ spew("bcnez 0x%x,FCC%d,0x%x", (int32_t(immi << 18) >> 16) + 4, cj_id,
+ rd_id);
+ } else {
+ spew("bceqz 0x%x,FCC%d,0x%x", (int32_t(immi << 18) >> 16) + 4, cj_id,
+ rd_id);
+ }
+ break;
+ case op_jirl:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beqz 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ default:
+ MOZ_CRASH("Error disassemble branch.");
+ }
+}
+#endif
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int64_t offset = target - branch;
+ InstImm inst_jirl = InstImm(op_jirl, BOffImm16(0), zero, ra);
+ InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop(); // because before set INVALID_OFFSET
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_jirl.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3].makeNop(); // There are 1 nop.
+ inst[4] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ // Skip trailing nops .
+ bool skipNops = (inst[0].encode() != inst_jirl.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ if (skipNops) {
+ inst[2] = InstImm(op_bge, BOffImm16(3 * sizeof(uint32_t)), zero, zero);
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump. Only four 4 instruction.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, zero);
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[4] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, zero);
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() {
+ // Load an address needs 3 instructions, and a jump.
+ return (3 + 1) * sizeof(uint32_t);
+}
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if ((i3->extractBitField(31, 22)) == ((uint32_t)op_lu52i_d >> 22)) {
+ // Li64
+ uint64_t value =
+ (uint64_t(i0->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 12) |
+ (uint64_t(
+ i1->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))) |
+ (uint64_t(i2->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 32) |
+ (uint64_t(i3->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))
+ << 52);
+ return value;
+ } else {
+ // Li48
+ uint64_t value =
+ (uint64_t(i0->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 12) |
+ (uint64_t(
+ i1->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))) |
+ (uint64_t(i2->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 32);
+
+ return uint64_t((int64_t(value) << 16) >> 16);
+ }
+}
+
+void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
+ // Todo: with ma_liPatchable
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if ((i3->extractBitField(31, 22)) == ((uint32_t)op_lu52i_d >> 22)) {
+ // Li64
+ *i0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+ *i2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff),
+ Register::FromCode(i2->extractRD()), false);
+ *i3 = InstImm(op_lu52i_d, (int32_t)((value >> 52) & 0xfff),
+ Register::FromCode(i3->extractRJ()),
+ Register::FromCode(i3->extractRD()), 12);
+ } else {
+ // Li48
+ *i0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+ *i2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff),
+ Register::FromCode(i2->extractRD()), false);
+ }
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ Instruction* inst1 = inst0->next();
+ Instruction* inst2 = inst1->next();
+ *inst0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff), reg, false);
+ *inst1 = InstImm(op_ori, (int32_t)(value & 0xfff), reg, reg, 12);
+ *inst2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff), reg, false);
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ Instruction* i3 = (Instruction*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if (enabled) {
+ MOZ_ASSERT((i3->extractBitField(31, 25)) != ((uint32_t)op_lu12i_w >> 25));
+ InstImm jirl = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+ *i3 = jirl;
+ } else {
+ InstNOP nop;
+ *i3 = nop;
+ }
+}
diff --git a/js/src/jit/loong64/Assembler-loong64.h b/js/src/jit/loong64/Assembler-loong64.h
new file mode 100644
index 0000000000..4f20e4949d
--- /dev/null
+++ b/js/src/jit/loong64/Assembler-loong64.h
@@ -0,0 +1,1884 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Assembler_loong64_h
+#define jit_loong64_Assembler_loong64_h
+
+#include "mozilla/Sprintf.h"
+#include <iterator>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/loong64/Architecture-loong64.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register ra{Registers::ra};
+static constexpr Register tp{Registers::tp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::a4};
+static constexpr Register a5{Registers::a5};
+static constexpr Register a6{Registers::a6};
+static constexpr Register a7{Registers::a7};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::t4};
+static constexpr Register t5{Registers::t5};
+static constexpr Register t6{Registers::t6};
+static constexpr Register t7{Registers::t7};
+static constexpr Register t8{Registers::t8};
+static constexpr Register rx{Registers::rx};
+static constexpr Register fp{Registers::fp};
+static constexpr Register s0{Registers::s0};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register s8{Registers::s8};
+
+static constexpr FloatRegister f0{FloatRegisters::f0, FloatRegisters::Double};
+static constexpr FloatRegister f1{FloatRegisters::f1, FloatRegisters::Double};
+static constexpr FloatRegister f2{FloatRegisters::f2, FloatRegisters::Double};
+static constexpr FloatRegister f3{FloatRegisters::f3, FloatRegisters::Double};
+static constexpr FloatRegister f4{FloatRegisters::f4, FloatRegisters::Double};
+static constexpr FloatRegister f5{FloatRegisters::f5, FloatRegisters::Double};
+static constexpr FloatRegister f6{FloatRegisters::f6, FloatRegisters::Double};
+static constexpr FloatRegister f7{FloatRegisters::f7, FloatRegisters::Double};
+static constexpr FloatRegister f8{FloatRegisters::f8, FloatRegisters::Double};
+static constexpr FloatRegister f9{FloatRegisters::f9, FloatRegisters::Double};
+static constexpr FloatRegister f10{FloatRegisters::f10, FloatRegisters::Double};
+static constexpr FloatRegister f11{FloatRegisters::f11, FloatRegisters::Double};
+static constexpr FloatRegister f12{FloatRegisters::f12, FloatRegisters::Double};
+static constexpr FloatRegister f13{FloatRegisters::f13, FloatRegisters::Double};
+static constexpr FloatRegister f14{FloatRegisters::f14, FloatRegisters::Double};
+static constexpr FloatRegister f15{FloatRegisters::f15, FloatRegisters::Double};
+static constexpr FloatRegister f16{FloatRegisters::f16, FloatRegisters::Double};
+static constexpr FloatRegister f17{FloatRegisters::f17, FloatRegisters::Double};
+static constexpr FloatRegister f18{FloatRegisters::f18, FloatRegisters::Double};
+static constexpr FloatRegister f19{FloatRegisters::f19, FloatRegisters::Double};
+static constexpr FloatRegister f20{FloatRegisters::f20, FloatRegisters::Double};
+static constexpr FloatRegister f21{FloatRegisters::f21, FloatRegisters::Double};
+static constexpr FloatRegister f22{FloatRegisters::f22, FloatRegisters::Double};
+static constexpr FloatRegister f23{FloatRegisters::f23, FloatRegisters::Double};
+static constexpr FloatRegister f24{FloatRegisters::f24, FloatRegisters::Double};
+static constexpr FloatRegister f25{FloatRegisters::f25, FloatRegisters::Double};
+static constexpr FloatRegister f26{FloatRegisters::f26, FloatRegisters::Double};
+static constexpr FloatRegister f27{FloatRegisters::f27, FloatRegisters::Double};
+static constexpr FloatRegister f28{FloatRegisters::f28, FloatRegisters::Double};
+static constexpr FloatRegister f29{FloatRegisters::f29, FloatRegisters::Double};
+static constexpr FloatRegister f30{FloatRegisters::f30, FloatRegisters::Double};
+static constexpr FloatRegister f31{FloatRegisters::f31, FloatRegisters::Double};
+
+static constexpr Register InvalidReg{Registers::Invalid};
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = fp;
+static constexpr Register ReturnReg = a0;
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr FloatRegister ReturnFloat32Reg{FloatRegisters::f0,
+ FloatRegisters::Single};
+static constexpr FloatRegister ReturnDoubleReg = f0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+
+static constexpr Register ScratchRegister = t7;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+
+struct SecondScratchRegisterScope : public AutoRegisterScope {
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg) {}
+};
+
+static constexpr FloatRegister ScratchFloat32Reg{FloatRegisters::f23,
+ FloatRegisters::Single};
+static constexpr FloatRegister ScratchDoubleReg = f23;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register PreBarrierReg = a1;
+static constexpr Register InterpreterPCReg = t0;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register HeapReg = s7;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register JSReturnReg = a2;
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = ra;
+
+// This register may be volatile or nonvolatile.
+// Avoid f23 which is the scratch register.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f21,
+ FloatRegisters::Double};
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions. Must be nonvolatile.
+static constexpr Register InstanceReg = s4;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO(loong64): this is just a filler to prevent a build failure. The
+// LoongArch SIMD alignment requirements still need to be explored.
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+// TODO(loong64): Add LoongArch instruction types description.
+
+// LoongArch instruction encoding constants.
+static const uint32_t RJShift = 5;
+static const uint32_t RJBits = 5;
+static const uint32_t RKShift = 10;
+static const uint32_t RKBits = 5;
+static const uint32_t RDShift = 0;
+static const uint32_t RDBits = 5;
+static const uint32_t FJShift = 5;
+static const uint32_t FJBits = 5;
+static const uint32_t FKShift = 10;
+static const uint32_t FKBits = 5;
+static const uint32_t FDShift = 0;
+static const uint32_t FDBits = 5;
+static const uint32_t FAShift = 15;
+static const uint32_t FABits = 5;
+static const uint32_t CJShift = 5;
+static const uint32_t CJBits = 3;
+static const uint32_t CDShift = 0;
+static const uint32_t CDBits = 3;
+static const uint32_t CAShift = 15;
+static const uint32_t CABits = 3;
+static const uint32_t CONDShift = 15;
+static const uint32_t CONDBits = 5;
+
+static const uint32_t SAShift = 15;
+static const uint32_t SA2Bits = 2;
+static const uint32_t SA3Bits = 3;
+static const uint32_t LSBWShift = 10;
+static const uint32_t LSBWBits = 5;
+static const uint32_t LSBDShift = 10;
+static const uint32_t LSBDBits = 6;
+static const uint32_t MSBWShift = 16;
+static const uint32_t MSBWBits = 5;
+static const uint32_t MSBDShift = 16;
+static const uint32_t MSBDBits = 6;
+static const uint32_t Imm5Shift = 10;
+static const uint32_t Imm5Bits = 5;
+static const uint32_t Imm6Shift = 10;
+static const uint32_t Imm6Bits = 6;
+static const uint32_t Imm12Shift = 10;
+static const uint32_t Imm12Bits = 12;
+static const uint32_t Imm14Shift = 10;
+static const uint32_t Imm14Bits = 14;
+static const uint32_t Imm15Shift = 0;
+static const uint32_t Imm15Bits = 15;
+static const uint32_t Imm16Shift = 10;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm20Shift = 5;
+static const uint32_t Imm20Bits = 20;
+static const uint32_t Imm21Shift = 0;
+static const uint32_t Imm21Bits = 21;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t CODEShift = 0;
+static const uint32_t CODEBits = 15;
+
+// LoongArch instruction field bit masks.
+static const uint32_t RJMask = (1 << RJBits) - 1;
+static const uint32_t RKMask = (1 << RKBits) - 1;
+static const uint32_t RDMask = (1 << RDBits) - 1;
+static const uint32_t SA2Mask = (1 << SA2Bits) - 1;
+static const uint32_t SA3Mask = (1 << SA3Bits) - 1;
+static const uint32_t CONDMask = (1 << CONDBits) - 1;
+static const uint32_t LSBWMask = (1 << LSBWBits) - 1;
+static const uint32_t LSBDMask = (1 << LSBDBits) - 1;
+static const uint32_t MSBWMask = (1 << MSBWBits) - 1;
+static const uint32_t MSBDMask = (1 << MSBDBits) - 1;
+static const uint32_t CODEMask = (1 << CODEBits) - 1;
+static const uint32_t Imm5Mask = (1 << Imm5Bits) - 1;
+static const uint32_t Imm6Mask = (1 << Imm6Bits) - 1;
+static const uint32_t Imm12Mask = (1 << Imm12Bits) - 1;
+static const uint32_t Imm14Mask = (1 << Imm14Bits) - 1;
+static const uint32_t Imm15Mask = (1 << Imm15Bits) - 1;
+static const uint32_t Imm16Mask = (1 << Imm16Bits) - 1;
+static const uint32_t Imm20Mask = (1 << Imm20Bits) - 1;
+static const uint32_t Imm21Mask = (1 << Imm21Bits) - 1;
+static const uint32_t Imm26Mask = (1 << Imm26Bits) - 1;
+static const uint32_t BOffImm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t BOffImm21Mask = ((1 << Imm21Bits) - 1) << Imm21Shift;
+static const uint32_t BOffImm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+// TODO(loong64) Change to syscall?
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+static const uint32_t WASM_TRAP = 6; // BRK_OVERFLOW
+
+// TODO(loong64) Change to LoongArch instruction type.
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RJ(Register r);
+uint32_t RK(Register r);
+uint32_t RD(Register r);
+uint32_t FJ(FloatRegister r);
+uint32_t FK(FloatRegister r);
+uint32_t FD(FloatRegister r);
+uint32_t FA(FloatRegister r);
+uint32_t SA2(uint32_t value);
+uint32_t SA2(FloatRegister r);
+uint32_t SA3(uint32_t value);
+uint32_t SA3(FloatRegister r);
+
+Register toRK(Instruction& i);
+Register toRJ(Instruction& i);
+Register toRD(Instruction& i);
+Register toR(Instruction& i);
+
+// LoongArch enums for instruction fields
+enum OpcodeField {
+ op_beqz = 0x10U << 26,
+ op_bnez = 0x11U << 26,
+ op_bcz = 0x12U << 26, // bceqz & bcnez
+ op_jirl = 0x13U << 26,
+ op_b = 0x14U << 26,
+ op_bl = 0x15U << 26,
+ op_beq = 0x16U << 26,
+ op_bne = 0x17U << 26,
+ op_blt = 0x18U << 26,
+ op_bge = 0x19U << 26,
+ op_bltu = 0x1aU << 26,
+ op_bgeu = 0x1bU << 26,
+
+ op_addu16i_d = 0x4U << 26,
+
+ op_lu12i_w = 0xaU << 25,
+ op_lu32i_d = 0xbU << 25,
+ op_pcaddi = 0xcU << 25,
+ op_pcalau12i = 0xdU << 25,
+ op_pcaddu12i = 0xeU << 25,
+ op_pcaddu18i = 0xfU << 25,
+ op_ll_w = 0x20U << 24,
+ op_sc_w = 0x21U << 24,
+ op_ll_d = 0x22U << 24,
+ op_sc_d = 0x23U << 24,
+ op_ldptr_w = 0x24U << 24,
+ op_stptr_w = 0x25U << 24,
+ op_ldptr_d = 0x26U << 24,
+ op_stptr_d = 0x27U << 24,
+ op_bstrins_d = 0x2U << 22,
+ op_bstrpick_d = 0x3U << 22,
+ op_slti = 0x8U << 22,
+ op_sltui = 0x9U << 22,
+ op_addi_w = 0xaU << 22,
+ op_addi_d = 0xbU << 22,
+ op_lu52i_d = 0xcU << 22,
+ op_andi = 0xdU << 22,
+ op_ori = 0xeU << 22,
+ op_xori = 0xfU << 22,
+ op_ld_b = 0xa0U << 22,
+ op_ld_h = 0xa1U << 22,
+ op_ld_w = 0xa2U << 22,
+ op_ld_d = 0xa3U << 22,
+ op_st_b = 0xa4U << 22,
+ op_st_h = 0xa5U << 22,
+ op_st_w = 0xa6U << 22,
+ op_st_d = 0xa7U << 22,
+ op_ld_bu = 0xa8U << 22,
+ op_ld_hu = 0xa9U << 22,
+ op_ld_wu = 0xaaU << 22,
+ op_preld = 0xabU << 22,
+ op_fld_s = 0xacU << 22,
+ op_fst_s = 0xadU << 22,
+ op_fld_d = 0xaeU << 22,
+ op_fst_d = 0xafU << 22,
+ op_bstr_w = 0x3U << 21, // BSTRINS_W & BSTRPICK_W
+ op_fmadd_s = 0x81U << 20,
+ op_fmadd_d = 0x82U << 20,
+ op_fmsub_s = 0x85U << 20,
+ op_fmsub_d = 0x86U << 20,
+ op_fnmadd_s = 0x89U << 20,
+ op_fnmadd_d = 0x8aU << 20,
+ op_fnmsub_s = 0x8dU << 20,
+ op_fnmsub_d = 0x8eU << 20,
+ op_fcmp_cond_s = 0xc1U << 20,
+ op_fcmp_cond_d = 0xc2U << 20,
+
+ op_bytepick_d = 0x3U << 18,
+ op_fsel = 0x340U << 18,
+
+ op_bytepick_w = 0x4U << 17,
+ op_alsl_w = 0x2U << 17,
+ op_alsl_wu = 0x3U << 17,
+ op_alsl_d = 0x16U << 17,
+
+ op_slli_d = 0x41U << 16,
+ op_srli_d = 0x45U << 16,
+ op_srai_d = 0x49U << 16,
+
+ op_slli_w = 0x81U << 15,
+ op_srli_w = 0x89U << 15,
+ op_srai_w = 0x91U << 15,
+ op_add_w = 0x20U << 15,
+ op_add_d = 0x21U << 15,
+ op_sub_w = 0x22U << 15,
+ op_sub_d = 0x23U << 15,
+ op_slt = 0x24U << 15,
+ op_sltu = 0x25U << 15,
+ op_maskeqz = 0x26U << 15,
+ op_masknez = 0x27U << 15,
+ op_nor = 0x28U << 15,
+ op_and = 0x29U << 15,
+ op_or = 0x2aU << 15,
+ op_xor = 0x2bU << 15,
+ op_orn = 0x2cU << 15,
+ op_andn = 0x2dU << 15,
+ op_sll_w = 0x2eU << 15,
+ op_srl_w = 0x2fU << 15,
+ op_sra_w = 0x30U << 15,
+ op_sll_d = 0x31U << 15,
+ op_srl_d = 0x32U << 15,
+ op_sra_d = 0x33U << 15,
+ op_rotr_w = 0x36U << 15,
+ op_rotr_d = 0x37U << 15,
+ op_rotri_w = 0x99U << 15,
+ op_rotri_d = 0x4DU << 16,
+ op_mul_w = 0x38U << 15,
+ op_mulh_w = 0x39U << 15,
+ op_mulh_wu = 0x3aU << 15,
+ op_mul_d = 0x3bU << 15,
+ op_mulh_d = 0x3cU << 15,
+ op_mulh_du = 0x3dU << 15,
+ op_mulw_d_w = 0x3eU << 15,
+ op_mulw_d_wu = 0x3fU << 15,
+ op_div_w = 0x40U << 15,
+ op_mod_w = 0x41U << 15,
+ op_div_wu = 0x42U << 15,
+ op_mod_wu = 0x43U << 15,
+ op_div_d = 0x44U << 15,
+ op_mod_d = 0x45U << 15,
+ op_div_du = 0x46U << 15,
+ op_mod_du = 0x47U << 15,
+ op_break = 0x54U << 15,
+ op_syscall = 0x56U << 15,
+ op_fadd_s = 0x201U << 15,
+ op_fadd_d = 0x202U << 15,
+ op_fsub_s = 0x205U << 15,
+ op_fsub_d = 0x206U << 15,
+ op_fmul_s = 0x209U << 15,
+ op_fmul_d = 0x20aU << 15,
+ op_fdiv_s = 0x20dU << 15,
+ op_fdiv_d = 0x20eU << 15,
+ op_fmax_s = 0x211U << 15,
+ op_fmax_d = 0x212U << 15,
+ op_fmin_s = 0x215U << 15,
+ op_fmin_d = 0x216U << 15,
+ op_fmaxa_s = 0x219U << 15,
+ op_fmaxa_d = 0x21aU << 15,
+ op_fmina_s = 0x21dU << 15,
+ op_fmina_d = 0x21eU << 15,
+ op_fcopysign_s = 0x225U << 15,
+ op_fcopysign_d = 0x226U << 15,
+ op_ldx_b = 0x7000U << 15,
+ op_ldx_h = 0x7008U << 15,
+ op_ldx_w = 0x7010U << 15,
+ op_ldx_d = 0x7018U << 15,
+ op_stx_b = 0x7020U << 15,
+ op_stx_h = 0x7028U << 15,
+ op_stx_w = 0x7030U << 15,
+ op_stx_d = 0x7038U << 15,
+ op_ldx_bu = 0x7040U << 15,
+ op_ldx_hu = 0x7048U << 15,
+ op_ldx_wu = 0x7050U << 15,
+ op_fldx_s = 0x7060U << 15,
+ op_fldx_d = 0x7068U << 15,
+ op_fstx_s = 0x7070U << 15,
+ op_fstx_d = 0x7078U << 15,
+ op_amswap_w = 0x70c0U << 15,
+ op_amswap_d = 0x70c1U << 15,
+ op_amadd_w = 0x70c2U << 15,
+ op_amadd_d = 0x70c3U << 15,
+ op_amand_w = 0x70c4U << 15,
+ op_amand_d = 0x70c5U << 15,
+ op_amor_w = 0x70c6U << 15,
+ op_amor_d = 0x70c7U << 15,
+ op_amxor_w = 0x70c8U << 15,
+ op_amxor_d = 0x70c9U << 15,
+ op_ammax_w = 0x70caU << 15,
+ op_ammax_d = 0x70cbU << 15,
+ op_ammin_w = 0x70ccU << 15,
+ op_ammin_d = 0x70cdU << 15,
+ op_ammax_wu = 0x70ceU << 15,
+ op_ammax_du = 0x70cfU << 15,
+ op_ammin_wu = 0x70d0U << 15,
+ op_ammin_du = 0x70d1U << 15,
+ op_amswap_db_w = 0x70d2U << 15,
+ op_amswap_db_d = 0x70d3U << 15,
+ op_amadd_db_w = 0x70d4U << 15,
+ op_amadd_db_d = 0x70d5U << 15,
+ op_amand_db_w = 0x70d6U << 15,
+ op_amand_db_d = 0x70d7U << 15,
+ op_amor_db_w = 0x70d8U << 15,
+ op_amor_db_d = 0x70d9U << 15,
+ op_amxor_db_w = 0x70daU << 15,
+ op_amxor_db_d = 0x70dbU << 15,
+ op_ammax_db_w = 0x70dcU << 15,
+ op_ammax_db_d = 0x70ddU << 15,
+ op_ammin_db_w = 0x70deU << 15,
+ op_ammin_db_d = 0x70dfU << 15,
+ op_ammax_db_wu = 0x70e0U << 15,
+ op_ammax_db_du = 0x70e1U << 15,
+ op_ammin_db_wu = 0x70e2U << 15,
+ op_ammin_db_du = 0x70e3U << 15,
+ op_dbar = 0x70e4U << 15,
+ op_ibar = 0x70e5U << 15,
+ op_clo_w = 0x4U << 10,
+ op_clz_w = 0x5U << 10,
+ op_cto_w = 0x6U << 10,
+ op_ctz_w = 0x7U << 10,
+ op_clo_d = 0x8U << 10,
+ op_clz_d = 0x9U << 10,
+ op_cto_d = 0xaU << 10,
+ op_ctz_d = 0xbU << 10,
+ op_revb_2h = 0xcU << 10,
+ op_revb_4h = 0xdU << 10,
+ op_revb_2w = 0xeU << 10,
+ op_revb_d = 0xfU << 10,
+ op_revh_2w = 0x10U << 10,
+ op_revh_d = 0x11U << 10,
+ op_bitrev_4b = 0x12U << 10,
+ op_bitrev_8b = 0x13U << 10,
+ op_bitrev_w = 0x14U << 10,
+ op_bitrev_d = 0x15U << 10,
+ op_ext_w_h = 0x16U << 10,
+ op_ext_w_b = 0x17U << 10,
+ op_fabs_s = 0x4501U << 10,
+ op_fabs_d = 0x4502U << 10,
+ op_fneg_s = 0x4505U << 10,
+ op_fneg_d = 0x4506U << 10,
+ op_fsqrt_s = 0x4511U << 10,
+ op_fsqrt_d = 0x4512U << 10,
+ op_fmov_s = 0x4525U << 10,
+ op_fmov_d = 0x4526U << 10,
+ op_movgr2fr_w = 0x4529U << 10,
+ op_movgr2fr_d = 0x452aU << 10,
+ op_movgr2frh_w = 0x452bU << 10,
+ op_movfr2gr_s = 0x452dU << 10,
+ op_movfr2gr_d = 0x452eU << 10,
+ op_movfrh2gr_s = 0x452fU << 10,
+ op_movgr2fcsr = 0x4530U << 10,
+ op_movfcsr2gr = 0x4532U << 10,
+ op_movfr2cf = 0x4534U << 10,
+ op_movgr2cf = 0x4536U << 10,
+ op_fcvt_s_d = 0x4646U << 10,
+ op_fcvt_d_s = 0x4649U << 10,
+ op_ftintrm_w_s = 0x4681U << 10,
+ op_ftintrm_w_d = 0x4682U << 10,
+ op_ftintrm_l_s = 0x4689U << 10,
+ op_ftintrm_l_d = 0x468aU << 10,
+ op_ftintrp_w_s = 0x4691U << 10,
+ op_ftintrp_w_d = 0x4692U << 10,
+ op_ftintrp_l_s = 0x4699U << 10,
+ op_ftintrp_l_d = 0x469aU << 10,
+ op_ftintrz_w_s = 0x46a1U << 10,
+ op_ftintrz_w_d = 0x46a2U << 10,
+ op_ftintrz_l_s = 0x46a9U << 10,
+ op_ftintrz_l_d = 0x46aaU << 10,
+ op_ftintrne_w_s = 0x46b1U << 10,
+ op_ftintrne_w_d = 0x46b2U << 10,
+ op_ftintrne_l_s = 0x46b9U << 10,
+ op_ftintrne_l_d = 0x46baU << 10,
+ op_ftint_w_s = 0x46c1U << 10,
+ op_ftint_w_d = 0x46c2U << 10,
+ op_ftint_l_s = 0x46c9U << 10,
+ op_ftint_l_d = 0x46caU << 10,
+ op_ffint_s_w = 0x4744U << 10,
+ op_ffint_s_l = 0x4746U << 10,
+ op_ffint_d_w = 0x4748U << 10,
+ op_ffint_d_l = 0x474aU << 10,
+ op_frint_s = 0x4791U << 10,
+ op_frint_d = 0x4792U << 10,
+ op_movcf2fr = 0x114d4U << 8,
+ op_movcf2gr = 0x114dcU << 8,
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16);
+ }
+
+ explicit BOffImm16(int offset) : data((offset) >> 2 & Imm16Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset) < int(unsigned(INT16_MIN) << 2)) {
+ return false;
+ }
+ if ((offset) > (INT16_MAX << 2)) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6);
+ }
+
+ explicit JOffImm26(int offset) : data((offset) >> 2 & Imm26Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset) < -536870912) {
+ return false;
+ }
+ if ((offset) > 536870908) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src);
+};
+
+class Imm16 {
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm) : value(imm) {}
+ uint32_t encode() { return value; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT16_MAX; }
+};
+
+class Imm8 {
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm) : value(imm) {}
+ uint32_t encode(uint32_t shift) { return value << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT8_MAX; }
+ static Imm8 Lower(Imm16 imm) { return Imm8(imm.decodeSigned() & 0xff); }
+ static Imm8 Upper(Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand(Register reg_) : tag(REG), reg(reg_.code()) {}
+
+ Operand(FloatRegister freg) : tag(FREG), reg(freg.code()) {}
+
+ Operand(Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value) {}
+
+ Operand(Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off) {}
+
+ Operand(const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset) {}
+
+ Tag getTag() const { return tag; }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+// int check.
+inline bool is_intN(int32_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < 64));
+ int32_t limit = static_cast<int32_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintN(int32_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < (sizeof(x) * 8)));
+ return !(x >> n);
+}
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+static constexpr int32_t SliceSize = 1024;
+typedef js::jit::AssemblerBuffer<SliceSize, Instruction> LOONGBuffer;
+
+class LOONGBufferWithExecutableCopy : public LOONGBuffer {
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom()) {
+ return;
+ }
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+};
+
+class AssemblerLOONG64 : public AssemblerShared {
+ public:
+ // TODO(loong64): Should we remove these conditions here?
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ GreaterThanOrEqual_Signed,
+ GreaterThanOrEqual_NotSigned,
+ LessThan,
+ LessThan_Signed,
+ LessThan_NotSigned,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ CAF = 0x00,
+ SAF = 0x01,
+ CLT = 0x02,
+ SLT = 0x03,
+ CEQ = 0x04,
+ SEQ = 0x05,
+ CLE = 0x06,
+ SLE = 0x07,
+ CUN = 0x08,
+ SUN = 0x09,
+ CULT = 0x0a,
+ SULT = 0x0b,
+ CUEQ = 0x0c,
+ SUEQ = 0x0d,
+ CULE = 0x0e,
+ SULE = 0x0f,
+ CNE = 0x10,
+ SNE = 0x11,
+ COR = 0x14,
+ SOR = 0x15,
+ CUNE = 0x18,
+ SUNE = 0x19,
+ };
+
+ enum FPConditionBit { FCC0 = 0, FCC1, FFC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+ enum FPControl { FCSR = 0 };
+
+ enum FCSRBit { CauseI = 24, CauseU, CauseO, CauseZ, CauseV };
+
+ enum FloatFormat { SingleFloat, DoubleFloat };
+
+ enum JumpOrCall { BranchIsJump, BranchIsCall };
+
+ enum FloatTestKind { TestForTrue, TestForFalse };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+
+ protected:
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ LOONGBufferWithExecutableCopy m_buffer;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+
+ public:
+ AssemblerLOONG64()
+ : m_buffer(),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ isFinished(false) {
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+ // This is changing the condition codes for cmp a, b to the same codes for cmp
+ // b, a.
+ static Condition InvertCmpCondition(Condition cond);
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ public:
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+ void decodeBranchInstAndSpew(InstImm branch);
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ Register getStackPointer() const { return StackPointer; }
+
+ protected:
+ bool isFinished;
+
+ public:
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop() { return as_andi(zero, zero, 0); }
+
+ // Branch and jump instructions
+ BufferOffset as_b(JOffImm26 off);
+ BufferOffset as_bl(JOffImm26 off);
+ BufferOffset as_jirl(Register rd, Register rj, BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall); // b, bl
+ InstImm getBranchCode(Register rd, Register rj,
+ Condition c); // beq, bne, bge, bgeu, blt, bltu
+ InstImm getBranchCode(Register rj, Condition c); // beqz, bnez
+ InstImm getBranchCode(FPConditionBit cj); // bceqz, bcnez
+
+ // Arithmetic instructions
+ BufferOffset as_add_w(Register rd, Register rj, Register rk);
+ BufferOffset as_add_d(Register rd, Register rj, Register rk);
+ BufferOffset as_sub_w(Register rd, Register rj, Register rk);
+ BufferOffset as_sub_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_addi_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_addi_d(Register rd, Register rj, int32_t si12);
+ BufferOffset as_addu16i_d(Register rd, Register rj, int32_t si16);
+
+ BufferOffset as_alsl_w(Register rd, Register rj, Register rk, uint32_t sa2);
+ BufferOffset as_alsl_wu(Register rd, Register rj, Register rk, uint32_t sa2);
+ BufferOffset as_alsl_d(Register rd, Register rj, Register rk, uint32_t sa2);
+
+ BufferOffset as_lu12i_w(Register rd, int32_t si20);
+ BufferOffset as_lu32i_d(Register rd, int32_t si20);
+ BufferOffset as_lu52i_d(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_slt(Register rd, Register rj, Register rk);
+ BufferOffset as_sltu(Register rd, Register rj, Register rk);
+ BufferOffset as_slti(Register rd, Register rj, int32_t si12);
+ BufferOffset as_sltui(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_pcaddi(Register rd, int32_t si20);
+ BufferOffset as_pcaddu12i(Register rd, int32_t si20);
+ BufferOffset as_pcaddu18i(Register rd, int32_t si20);
+ BufferOffset as_pcalau12i(Register rd, int32_t si20);
+
+ BufferOffset as_mul_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_mul_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_mulw_d_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulw_d_wu(Register rd, Register rj, Register rk);
+
+ BufferOffset as_div_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_w(Register rd, Register rj, Register rk);
+ BufferOffset as_div_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_div_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_d(Register rd, Register rj, Register rk);
+ BufferOffset as_div_du(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_du(Register rd, Register rj, Register rk);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rj, Register rk);
+ BufferOffset as_or(Register rd, Register rj, Register rk);
+ BufferOffset as_xor(Register rd, Register rj, Register rk);
+ BufferOffset as_nor(Register rd, Register rj, Register rk);
+ BufferOffset as_andn(Register rd, Register rj, Register rk);
+ BufferOffset as_orn(Register rd, Register rj, Register rk);
+
+ BufferOffset as_andi(Register rd, Register rj, int32_t ui12);
+ BufferOffset as_ori(Register rd, Register rj, int32_t ui12);
+ BufferOffset as_xori(Register rd, Register rj, int32_t ui12);
+
+ // Shift instructions
+ BufferOffset as_sll_w(Register rd, Register rj, Register rk);
+ BufferOffset as_srl_w(Register rd, Register rj, Register rk);
+ BufferOffset as_sra_w(Register rd, Register rj, Register rk);
+ BufferOffset as_rotr_w(Register rd, Register rj, Register rk);
+
+ BufferOffset as_slli_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_srli_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_srai_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_rotri_w(Register rd, Register rj, int32_t ui5);
+
+ BufferOffset as_sll_d(Register rd, Register rj, Register rk);
+ BufferOffset as_srl_d(Register rd, Register rj, Register rk);
+ BufferOffset as_sra_d(Register rd, Register rj, Register rk);
+ BufferOffset as_rotr_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_slli_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_srli_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_srai_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_rotri_d(Register rd, Register rj, int32_t ui6);
+
+ // Bit operation instrucitons
+ BufferOffset as_ext_w_b(Register rd, Register rj);
+ BufferOffset as_ext_w_h(Register rd, Register rj);
+
+ BufferOffset as_clo_w(Register rd, Register rj);
+ BufferOffset as_clz_w(Register rd, Register rj);
+ BufferOffset as_cto_w(Register rd, Register rj);
+ BufferOffset as_ctz_w(Register rd, Register rj);
+ BufferOffset as_clo_d(Register rd, Register rj);
+ BufferOffset as_clz_d(Register rd, Register rj);
+ BufferOffset as_cto_d(Register rd, Register rj);
+ BufferOffset as_ctz_d(Register rd, Register rj);
+
+ BufferOffset as_bytepick_w(Register rd, Register rj, Register rk,
+ int32_t sa2);
+ BufferOffset as_bytepick_d(Register rd, Register rj, Register rk,
+ int32_t sa3);
+
+ BufferOffset as_revb_2h(Register rd, Register rj);
+ BufferOffset as_revb_4h(Register rd, Register rj);
+ BufferOffset as_revb_2w(Register rd, Register rj);
+ BufferOffset as_revb_d(Register rd, Register rj);
+
+ BufferOffset as_revh_2w(Register rd, Register rj);
+ BufferOffset as_revh_d(Register rd, Register rj);
+
+ BufferOffset as_bitrev_4b(Register rd, Register rj);
+ BufferOffset as_bitrev_8b(Register rd, Register rj);
+
+ BufferOffset as_bitrev_w(Register rd, Register rj);
+ BufferOffset as_bitrev_d(Register rd, Register rj);
+
+ BufferOffset as_bstrins_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw);
+ BufferOffset as_bstrins_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd);
+ BufferOffset as_bstrpick_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw);
+ BufferOffset as_bstrpick_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd);
+
+ BufferOffset as_maskeqz(Register rd, Register rj, Register rk);
+ BufferOffset as_masknez(Register rd, Register rj, Register rk);
+
+ // Load and store instructions
+ BufferOffset as_ld_b(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_h(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_d(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_bu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_hu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_wu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_b(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_h(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_d(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_ldx_b(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_h(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_bu(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_hu(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_b(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_h(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_w(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_ldptr_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_ldptr_d(Register rd, Register rj, int32_t si14);
+ BufferOffset as_stptr_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_stptr_d(Register rd, Register rj, int32_t si14);
+
+ BufferOffset as_preld(int32_t hint, Register rj, int32_t si12);
+
+ // Atomic instructions
+ BufferOffset as_amswap_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amswap_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_du(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_amswap_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amswap_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_du(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_ll_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_ll_d(Register rd, Register rj, int32_t si14);
+ BufferOffset as_sc_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_sc_d(Register rd, Register rj, int32_t si14);
+
+ // Barrier instructions
+ BufferOffset as_dbar(int32_t hint);
+ BufferOffset as_ibar(int32_t hint);
+
+ // FP Arithmetic instructions
+ BufferOffset as_fadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmul_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmul_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fdiv_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fdiv_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fmadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+
+ BufferOffset as_fmax_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmax_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmin_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmin_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fmaxa_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmaxa_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmina_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmina_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fabs_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fabs_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fneg_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fneg_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_fsqrt_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fsqrt_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fcopysign_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk);
+ BufferOffset as_fcopysign_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk);
+
+ // FP compare instructions (fcmp.cond.s fcmp.cond.d)
+ BufferOffset as_fcmp_cor(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_ceq(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cne(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cle(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_clt(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cun(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cueq(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cune(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cule(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cult(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+
+ // FP conversion instructions
+ BufferOffset as_fcvt_s_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fcvt_d_s(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_ffint_s_w(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_s_l(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_d_w(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_d_l(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_l_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_ftintrm_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_l_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_frint_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_frint_d(FloatRegister fd, FloatRegister fj);
+
+ // FP mov instructions
+ BufferOffset as_fmov_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fmov_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_fsel(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FPConditionBit ca);
+
+ BufferOffset as_movgr2fr_w(FloatRegister fd, Register rj);
+ BufferOffset as_movgr2fr_d(FloatRegister fd, Register rj);
+ BufferOffset as_movgr2frh_w(FloatRegister fd, Register rj);
+
+ BufferOffset as_movfr2gr_s(Register rd, FloatRegister fj);
+ BufferOffset as_movfr2gr_d(Register rd, FloatRegister fj);
+ BufferOffset as_movfrh2gr_s(Register rd, FloatRegister fj);
+
+ BufferOffset as_movgr2fcsr(Register rj);
+ BufferOffset as_movfcsr2gr(Register rd);
+
+ BufferOffset as_movfr2cf(FPConditionBit cd, FloatRegister fj);
+ BufferOffset as_movcf2fr(FloatRegister fd, FPConditionBit cj);
+
+ BufferOffset as_movgr2cf(FPConditionBit cd, Register rj);
+ BufferOffset as_movcf2gr(Register rd, FPConditionBit cj);
+
+ // FP load/store instructions
+ BufferOffset as_fld_s(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fld_d(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fst_s(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fst_d(FloatRegister fd, Register rj, int32_t si12);
+
+ BufferOffset as_fldx_s(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fldx_d(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fstx_s(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fstx_d(FloatRegister fd, Register rj, Register rk);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if defined(__loongarch_hard_float) || defined(JS_SIMULATOR_LOONG64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+
+ static bool HasRoundInstruction(RoundingMode mode) { return false; }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ void flushBuffer() {}
+
+ void comment(const char* msg) { spew("; %s", msg); }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint8_t* NextInstruction(uint8_t* instruction,
+ uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerLOONG64
+
+// andi r0, r0, 0
+const uint32_t NopInst = 0x03400000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// LoongArch instructions.
+class Instruction {
+ public:
+ uint32_t data;
+
+ protected:
+ // Standard constructor
+ Instruction(uint32_t data_) : data(data_) {}
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+
+ public:
+ uint32_t encode() const { return data; }
+
+ void makeNop() { data = NopInst; }
+
+ void setData(uint32_t data) { this->data = data; }
+
+ const Instruction& operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) { return (encode() >> bit) & 1; }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4,
+ "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction {
+ public:
+ InstNOP() : Instruction(NopInst) {}
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction {
+ public:
+ InstReg(OpcodeField op, Register rj, Register rd)
+ : Instruction(op | RJ(rj) | RD(rd)) {}
+ InstReg(OpcodeField op, Register rk, Register rj, Register rd)
+ : Instruction(op | RK(rk) | RJ(rj) | RD(rd)) {}
+ InstReg(OpcodeField op, uint32_t sa, Register rk, Register rj, Register rd,
+ uint32_t sa_bit)
+ : Instruction(sa_bit == 2 ? op | SA2(sa) | RK(rk) | RJ(rj) | RD(rd)
+ : op | SA3(sa) | RK(rk) | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(sa_bit == 2 || sa_bit == 3);
+ }
+ InstReg(OpcodeField op, Register rj, Register rd, bool HasRd)
+ : Instruction(HasRd ? op | RJ(rj) | RD(rd) : op | RK(rj) | RJ(rd)) {}
+
+ // For floating-point
+ InstReg(OpcodeField op, Register rj, FloatRegister fd)
+ : Instruction(op | RJ(rj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fj, FloatRegister fd)
+ : Instruction(op | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fk, FloatRegister fj, FloatRegister fd)
+ : Instruction(op | FK(fk) | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, Register rk, Register rj, FloatRegister fd)
+ : Instruction(op | RK(rk) | RJ(rj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fa, FloatRegister fk, FloatRegister fj,
+ FloatRegister fd)
+ : Instruction(op | FA(fa) | FK(fk) | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit ca, FloatRegister fk,
+ FloatRegister fj, FloatRegister fd)
+ : Instruction(op | ca << CAShift | FK(fk) | FJ(fj) | FD(fd)) {
+ MOZ_ASSERT(op == op_fsel);
+ }
+ InstReg(OpcodeField op, FloatRegister fj, Register rd)
+ : Instruction(op | FJ(fj) | RD(rd)) {
+ MOZ_ASSERT((op == op_movfr2gr_s) || (op == op_movfr2gr_d) ||
+ (op == op_movfrh2gr_s));
+ }
+ InstReg(OpcodeField op, Register rj, uint32_t fd)
+ : Instruction(op | RJ(rj) | fd) {
+ MOZ_ASSERT(op == op_movgr2fcsr);
+ }
+ InstReg(OpcodeField op, uint32_t fj, Register rd)
+ : Instruction(op | (fj << FJShift) | RD(rd)) {
+ MOZ_ASSERT(op == op_movfcsr2gr);
+ }
+ InstReg(OpcodeField op, FloatRegister fj, AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | FJ(fj) | cd) {
+ MOZ_ASSERT(op == op_movfr2cf);
+ }
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit cj, FloatRegister fd)
+ : Instruction(op | (cj << CJShift) | FD(fd)) {
+ MOZ_ASSERT(op == op_movcf2fr);
+ }
+ InstReg(OpcodeField op, Register rj, AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | RJ(rj) | cd) {
+ MOZ_ASSERT(op == op_movgr2cf);
+ }
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit cj, Register rd)
+ : Instruction(op | (cj << CJShift) | RD(rd)) {
+ MOZ_ASSERT(op == op_movcf2gr);
+ }
+ InstReg(OpcodeField op, int32_t cond, FloatRegister fk, FloatRegister fj,
+ AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | (cond & CONDMask) << CONDShift | FK(fk) | FJ(fj) |
+ (cd & RDMask)) {
+ MOZ_ASSERT(is_uintN(cond, 5));
+ }
+
+ uint32_t extractRK() {
+ return extractBitField(RKShift + RKBits - 1, RKShift);
+ }
+ uint32_t extractRJ() {
+ return extractBitField(RJShift + RJBits - 1, RJShift);
+ }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA2() {
+ return extractBitField(SAShift + SA2Bits - 1, SAShift);
+ }
+ uint32_t extractSA3() {
+ return extractBitField(SAShift + SA3Bits - 1, SAShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction {
+ public:
+ void extractImm16(BOffImm16* dest);
+ uint32_t genImm(int32_t value, uint32_t value_bits) {
+ uint32_t imm = value & Imm5Mask;
+ if (value_bits == 6) {
+ imm = value & Imm6Mask;
+ } else if (value_bits == 12) {
+ imm = value & Imm12Mask;
+ } else if (value_bits == 14) {
+ imm = value & Imm14Mask;
+ }
+
+ return imm;
+ }
+
+ InstImm(OpcodeField op, int32_t value, Register rj, Register rd,
+ uint32_t value_bits)
+ : Instruction(op | genImm(value, value_bits) << RKShift | RJ(rj) |
+ RD(rd)) {
+ MOZ_ASSERT(value_bits == 5 || value_bits == 6 || value_bits == 12 ||
+ value_bits == 14);
+ }
+ InstImm(OpcodeField op, BOffImm16 off, Register rj, Register rd)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift | RJ(rj) |
+ RD(rd)) {}
+ InstImm(OpcodeField op, int32_t si21, Register rj, bool NotHasRd)
+ : Instruction(NotHasRd ? op | (si21 & Imm16Mask) << RKShift | RJ(rj) |
+ (si21 & Imm21Mask) >> 16
+ : op | (si21 & Imm20Mask) << Imm20Shift | RD(rj)) {
+ if (NotHasRd) {
+ MOZ_ASSERT(op == op_beqz || op == op_bnez);
+ MOZ_ASSERT(is_intN(si21, 21));
+ } else {
+ MOZ_ASSERT(op == op_lu12i_w || op == op_lu32i_d || op == op_pcaddi ||
+ op == op_pcaddu12i || op == op_pcaddu18i ||
+ op == op_pcalau12i);
+ // si20
+ MOZ_ASSERT(is_intN(si21, 20) || is_uintN(si21, 20));
+ }
+ }
+ InstImm(OpcodeField op, int32_t si21, AssemblerLOONG64::FPConditionBit cj,
+ bool isNotEqual)
+ : Instruction(isNotEqual
+ ? op | (si21 & Imm16Mask) << RKShift |
+ (cj + 8) << CJShift | (si21 & Imm21Mask) >> 16
+ : op | (si21 & Imm16Mask) << RKShift | cj << CJShift |
+ (si21 & Imm21Mask) >> 16) {
+ MOZ_ASSERT(is_intN(si21, 21));
+ MOZ_ASSERT(op == op_bcz);
+ MOZ_ASSERT(cj >= 0 && cj <= 7);
+ }
+ InstImm(OpcodeField op, Imm16 off, Register rj, Register rd)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift | RJ(rj) |
+ RD(rd)) {}
+ InstImm(OpcodeField op, int32_t bit15)
+ : Instruction(op | (bit15 & Imm15Mask)) {
+ MOZ_ASSERT(is_uintN(bit15, 15));
+ }
+
+ InstImm(OpcodeField op, int32_t bit26, bool jump)
+ : Instruction(op | (bit26 & Imm16Mask) << Imm16Shift |
+ (bit26 & Imm26Mask) >> 16) {
+ MOZ_ASSERT(is_intN(bit26, 26));
+ }
+ InstImm(OpcodeField op, int32_t si12, Register rj, int32_t hint)
+ : Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) |
+ (hint & RDMask)) {
+ MOZ_ASSERT(op == op_preld);
+ }
+ InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd,
+ uint32_t sb_bits)
+ : Instruction((sb_bits == 5)
+ ? op | (msb & MSBWMask) << MSBWShift |
+ (lsb & LSBWMask) << LSBWShift | RJ(rj) | RD(rd)
+ : op | (msb & MSBDMask) << MSBDShift |
+ (lsb & LSBDMask) << LSBDShift | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(sb_bits == 5 || sb_bits == 6);
+ MOZ_ASSERT(op == op_bstr_w || op == op_bstrins_d || op == op_bstrpick_d);
+ }
+ InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd)
+ : Instruction(op | (msb & MSBWMask) << MSBWShift |
+ ((lsb + 0x20) & LSBDMask) << LSBWShift | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(op == op_bstr_w);
+ }
+
+ // For floating-point loads and stores.
+ InstImm(OpcodeField op, int32_t si12, Register rj, FloatRegister fd)
+ : Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) | FD(fd)) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ }
+
+ void setOpcode(OpcodeField op, uint32_t opBits) {
+ // opBits not greater than 24.
+ MOZ_ASSERT(opBits < 25);
+ uint32_t OpcodeShift = 32 - opBits;
+ uint32_t OpcodeMask = ((1 << opBits) - 1) << OpcodeShift;
+ data = (data & ~OpcodeMask) | op;
+ }
+ uint32_t extractRK() {
+ return extractBitField(RKShift + RKBits - 1, RKShift);
+ }
+ uint32_t extractRJ() {
+ return extractBitField(RJShift + RJBits - 1, RJShift);
+ }
+ void setRJ(uint32_t rj) { data = (data & ~RJMask) | (rj << RJShift); }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~BOffImm16Mask) | (off.encode() << Imm16Shift);
+ }
+ void setImm21(int32_t off) {
+ // Reset immediate field and replace it
+ uint32_t low16 = (off >> 2) & Imm16Mask;
+ int32_t high5 = (off >> 18) & Imm5Mask;
+ uint32_t fcc_info = (data >> 5) & 0x1F;
+ data = (data & ~BOffImm26Mask) | (low16 << Imm16Shift) | high5 |
+ (fcc_info << 5);
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction {
+ public:
+ InstJump(OpcodeField op, JOffImm26 off)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift |
+ (off.encode() & Imm26Mask) >> 16) {
+ MOZ_ASSERT(op == op_b || op == op_bl);
+ }
+
+ void setJOffImm26(JOffImm26 off) {
+ // Reset immediate field and replace it
+ data = (data & ~BOffImm26Mask) |
+ ((off.encode() & Imm16Mask) << Imm16Shift) |
+ ((off.encode() >> 16) & 0x3ff);
+ }
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
+
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+class Assembler : public AssemblerLOONG64 {
+ public:
+ Assembler() : AssemblerLOONG64() {}
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ using AssemblerLOONG64::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint64_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+
+static inline bool GetIntArgReg(uint32_t usedIntArgs, Register* out) {
+ if (usedIntArgs < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedIntArgs);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedFloatArgs, FloatRegister* out) {
+ if (usedFloatArgs < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(f0.code() + usedFloatArgs);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Assembler_loong64_h */
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp
new file mode 100644
index 0000000000..960f0e654f
--- /dev/null
+++ b/js/src/jit/loong64/CodeGenerator-loong64.cpp
@@ -0,0 +1,2790 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/CodeGenerator-loong64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorLOONG64::CodeGeneratorLOONG64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorLOONG64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorLOONG64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorLOONG64::branchToBlock(Assembler::FloatFormat fmt,
+ FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == Assembler::DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorLOONG64* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+MoveOperand CodeGeneratorLOONG64::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+
+ return MoveOperand(address, kind);
+}
+
+void CodeGeneratorLOONG64::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorLOONG64::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+bool CodeGeneratorLOONG64::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorLOONG64> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorLOONG64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorLOONG64::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorLOONG64::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmLoad(mir->access(), HeapReg, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+}
+
+template <typename T>
+void CodeGeneratorLOONG64::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg, ptr,
+ ptrScratch);
+}
+
+void CodeGeneratorLOONG64::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.jump(thunk);
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ if (ool->toType() == MIRType::Int32) {
+ masm.outOfLineWasmTruncateToInt32Check(
+ ool->input(), ool->output(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ } else {
+ MOZ_ASSERT(ool->toType() == MIRType::Int64);
+ masm.outOfLineWasmTruncateToInt64Check(
+ ool->input(), ool->output64(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ }
+}
+
+ValueOperand CodeGeneratorLOONG64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorLOONG64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorLOONG64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtrSet(cond, lhsReg, ImmWord(ToInt64(rhs)), output);
+ } else if (rhs.value().isGeneralReg()) {
+ masm.cmpPtrSet(cond, lhsReg, ToRegister64(rhs).reg, output);
+ } else {
+ masm.cmpPtrSet(cond, lhsReg, ToAddress(rhs.value()), output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ emitBranch(lhsReg, ImmWord(ToInt64(rhs)), cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else if (rhs.value().isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister64(rhs).reg, cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else {
+ emitBranch(lhsReg, ToAddress(rhs.value()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const MCompare::CompareType type = mir->compareType();
+ const LAllocation* lhs = comp->left();
+ const LAllocation* rhs = comp->right();
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+ Register lhsReg = ToRegister(lhs);
+ const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (rhs->isConstant()) {
+ emitBranch(ToRegister(lhs), Imm32(ToInt32(rhs)), cond, ifTrue, ifFalse);
+ } else if (rhs->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ MOZ_CRASH("NYI");
+ }
+ return;
+ }
+
+ if (rhs->isConstant()) {
+ emitBranch(lhsReg, Imm32(ToInt32(comp->right())), cond, ifTrue, ifFalse);
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ // TODO(loong64): emitBranch with 32-bit comparision
+ ScratchRegisterScope scratch(masm);
+ masm.load32(ToAddress(rhs), scratch);
+ emitBranch(lhsReg, Register(scratch), cond, ifTrue, ifFalse);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.as_xor(output, output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.as_mod_d(output, lhs, rhs);
+ } else {
+ masm.as_div_d(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.as_mod_du(output, lhs, rhs);
+ } else {
+ masm.as_div_du(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGeneratorLOONG64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.as_div_d(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorLOONG64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.as_mod_d(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ masm.wasmLoadI64(mir->access(), HeapReg, ptrReg, ptrScratch,
+ ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg, ptrReg,
+ ptrScratch);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.as_movgr2fr_d(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.as_movfr2gr_d(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.as_bstrpick_d(output, ToRegister(input), 31, 0);
+ } else {
+ masm.as_slli_w(output, ToRegister(input), 0);
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.as_slli_w(output, ToRegister(input), 0);
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_add_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_add_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_sub_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_sub_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.as_sub_w(dest, zero, src);
+ break;
+ case 0:
+ masm.move32(zero, dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_add_w(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.as_slli_w(dest, src, shift % 32);
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.as_slli_w(dest, src, (shift - shift_rest) % 32);
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.as_slli_w(dest, dest, shift_rest % 32);
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ ScratchRegisterScope scratch(masm);
+ // dest = lhs * pow(2, shift)
+ masm.as_slli_w(dest, src, shift % 32);
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.as_srai_w(scratch, dest, shift % 32);
+ bailoutCmp32(Assembler::NotEqual, src, Register(scratch),
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul_w(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lhs) == output);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.as_add_d(output.reg, ToRegister64(lhs).reg, ToRegister64(lhs).reg);
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(constant + 1))) {
+ ScratchRegisterScope scratch(masm);
+ masm.movePtr(ToRegister64(lhs).reg, scratch);
+ masm.as_slli_d(output.reg, ToRegister64(lhs).reg,
+ FloorLog2(constant + 1));
+ masm.sub64(scratch, output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint64_t>(constant - 1))) {
+ int32_t shift = mozilla::FloorLog2(constant - 1);
+ if (shift < 5) {
+ masm.as_alsl_d(output.reg, ToRegister64(lhs).reg,
+ ToRegister64(lhs).reg, shift - 1);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.movePtr(ToRegister64(lhs).reg, scratch);
+ masm.as_slli_d(output.reg, ToRegister64(lhs).reg, shift);
+ masm.add64(scratch, output);
+ }
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.as_div_w(dest, lhs, rhs);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.as_slli_w(tmp, lhs, (32 - shift) % 32);
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.as_srai_w(dest, lhs, shift % 32);
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.as_srai_w(tmp, lhs, 31);
+ masm.as_srli_w(tmp, tmp, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ } else {
+ masm.as_srli_w(tmp, lhs, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.as_srai_w(dest, tmp, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.as_mod_w(dest, lhs, rhs);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.as_nor(ToRegister(dest), ToRegister(input), zero);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.as_nor(inputReg, inputReg, zero);
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.as_slli_w(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.as_srai_w(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.as_srli_w(dest, lhs, shift % 32);
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.as_sll_w(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.as_sra_w(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.as_srl_w(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.as_srli_w(temp, lhs, ToInt32(rhs) % 32);
+ } else {
+ masm.as_srl_w(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz_w(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_ctz_w(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
+ masm.ma_bc_d(input, fpscratch, &skip, Assembler::DoubleNotEqualOrUnordered,
+ ShortJump);
+ masm.as_fneg_d(output, fpscratch);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.as_fadd_d(output, input, fpscratch);
+ masm.as_fsqrt_d(output, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_fadd_d(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_fsub_d(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_fmul_d(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_fdiv_d(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_fadd_s(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_fsub_s(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_fmul_s(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_fdiv_s(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.as_bstrins_w(rhsi, lhsi, 30, 0);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.as_bstrins_w(rhsi, lhsi, 30, 0);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchDoubleScope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ // If 0, or NaN, the result is false.
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchFloat32Scope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ ScratchRegisterScope scratch(masm);
+ if (lir->right()->isConstant()) {
+ masm.ma_and(scratch, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ } else {
+ masm.as_and(scratch, ToRegister(lir->left()), ToRegister(lir->right()));
+ }
+ emitBranch(scratch, Register(scratch), lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.ma_cmp_set_double(dest, in, fpscratch,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchFloat32Scope fpscratch(masm);
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ masm.ma_cmp_set_float32(dest, in, fpscratch,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* output = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+ Scalar::Type accessType = mir->accessType();
+ bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64;
+ Label done;
+
+ if (mir->needsBoundsCheck()) {
+ Label boundsCheckPassed;
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg,
+ &boundsCheckPassed);
+ // Return a default value in case of a bounds-check failure.
+ if (isFloat) {
+ if (accessType == Scalar::Float32) {
+ masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(output));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(output));
+ }
+ } else {
+ masm.mov(zero, ToRegister(output));
+ }
+ masm.jump(&done);
+ masm.bind(&boundsCheckPassed);
+ }
+
+ // TODO(loong64): zero-extend index in asm.js?
+ SecondScratchRegisterScope scratch2(masm);
+ masm.move32To64ZeroExtend(ptrReg, Register64(scratch2));
+
+ switch (accessType) {
+ case Scalar::Int8:
+ masm.as_ldx_b(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Uint8:
+ masm.as_ldx_bu(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Int16:
+ masm.as_ldx_h(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Uint16:
+ masm.as_ldx_hu(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.as_ldx_w(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Float64:
+ masm.as_fldx_d(ToFloatRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Float32:
+ masm.as_fldx_s(ToFloatRegister(output), HeapReg, scratch2);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+
+ Label done;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg, boundsCheckLimitReg,
+ &done);
+ }
+
+ // TODO(loong64): zero-extend index in asm.js?
+ SecondScratchRegisterScope scratch2(masm);
+ masm.move32To64ZeroExtend(ptrReg, Register64(scratch2));
+
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.as_stx_b(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.as_stx_h(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.as_stx_w(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Float64:
+ masm.as_fstx_d(ToFloatRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Float32:
+ masm.as_fstx_s(ToFloatRegister(value), HeapReg, scratch2);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.moveIfZero(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.ma_fmovz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else if (mirType == MIRType::Double) {
+ masm.ma_fmovz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ mozilla::DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_movfr2gr_s(ToRegister(lir->output()),
+ ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_movgr2fr_w(ToFloatRegister(lir->output()),
+ ToRegister(lir->input()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.as_mod_wu(output, lhs, rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+ masm.as_div_wu(output, lhs, rhs);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_sub_w(output, zero, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_fneg_d(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_fneg_s(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ Register64 temp(ToRegister(lir->getTemp(0)));
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.h b/js/src/jit/loong64/CodeGenerator-loong64.h
new file mode 100644
index 0000000000..5c75f65cd2
--- /dev/null
+++ b/js/src/jit/loong64/CodeGenerator-loong64.h
@@ -0,0 +1,209 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_CodeGenerator_loong64_h
+#define jit_loong64_CodeGenerator_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorLOONG64;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorLOONG64>;
+
+class CodeGeneratorLOONG64 : public CodeGeneratorShared {
+ friend class MoveResolverLA;
+
+ protected:
+ CodeGeneratorLOONG64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ // TODO(loong64) Didn't use branchTestPtr due to '-Wundefined-inline'.
+ MOZ_ASSERT(c == Assembler::Zero || c == Assembler::NonZero ||
+ c == Assembler::Signed || c == Assembler::NotSigned);
+ Label bail;
+ if (lhs == rhs) {
+ masm.ma_b(lhs, rhs, &bail, c);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.as_and(scratch, lhs, rhs);
+ masm.ma_b(scratch, scratch, &bail, c);
+ }
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(scratch, reg, Imm32(0xFF));
+ masm.ma_b(scratch, scratch, &bail, Assembler::Zero);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue,
+ ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorLOONG64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorLOONG64> {
+ protected:
+ LSnapshot* snapshot_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorLOONG64* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_CodeGenerator_loong64_h */
diff --git a/js/src/jit/loong64/LIR-loong64.h b/js/src/jit/loong64/LIR-loong64.h
new file mode 100644
index 0000000000..20fde694a1
--- /dev/null
+++ b/js/src/jit/loong64/LIR-loong64.h
@@ -0,0 +1,399 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_LIR_loong64_h
+#define jit_loong64_LIR_loong64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_LIR_loong64_h */
diff --git a/js/src/jit/loong64/Lowering-loong64.cpp b/js/src/jit/loong64/Lowering-loong64.cpp
new file mode 100644
index 0000000000..ae2db476c1
--- /dev/null
+++ b/js/src/jit/loong64/Lowering-loong64.cpp
@@ -0,0 +1,1088 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Lowering-loong64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LTableSwitch* LIRGeneratorLOONG64::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorLOONG64::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorLOONG64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+// x = !y
+void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x + y
+void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorLOONG64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorLOONG64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorLOONG64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorLOONG64::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+LBoxAllocation LIRGeneratorLOONG64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorLOONG64::tempByteOpRegister() { return temp(); }
+
+LDefinition LIRGeneratorLOONG64::tempToUnbox() { return temp(); }
+
+void LIRGeneratorLOONG64::lowerUntypedPhiInput(MPhi* phi,
+ uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorLOONG64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorLOONG64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorLOONG64::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+void LIRGeneratorLOONG64::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGeneratorLOONG64::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorLOONG64::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorLOONG64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGeneratorLOONG64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorLOONG64::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+// On loong64 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // We have no memory-base value, meaning that HeapReg is to be used as the
+ // memory base. This follows from the definition of
+ // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on loongarch64 platform and we should explicitly promote it
+ // to 64-bit by zero-extension when use it as an index register in memory
+ // accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation ptr;
+ ptr = useRegisterAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+
+ if (ins->access().type() == Scalar::Int64) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()));
+ lir->setTemp(0, temp());
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/loong64/Lowering-loong64.h b/js/src/jit/loong64/Lowering-loong64.h
new file mode 100644
index 0000000000..6285b13291
--- /dev/null
+++ b/js/src/jit/loong64/Lowering-loong64.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Lowering_loong64_h
+#define jit_loong64_Lowering_loong64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorLOONG64 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorLOONG64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerDivI64(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerModI64(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUDivI64(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUModI64(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerPowOfTwoI(MPow* mir);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorLOONG64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Lowering_loong64_h */
diff --git a/js/src/jit/loong64/MacroAssembler-loong64-inl.h b/js/src/jit/loong64/MacroAssembler-loong64-inl.h
new file mode 100644
index 0000000000..b774b90926
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64-inl.h
@@ -0,0 +1,2131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MacroAssembler_loong64_inl_h
+#define jit_loong64_MacroAssembler_loong64_inl_h
+
+#include "jit/loong64/MacroAssembler-loong64.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDouble(src, dest.reg);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDouble(src.reg, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ as_slli_w(dest, src.reg, 0);
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ as_bstrpick_d(dest.reg, src, 31, 0);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ as_slli_w(dest.reg, src, 0);
+}
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ moveFromFloat32(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ moveToFloat32(src, dest);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ as_ext_w_b(dest, src);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ as_ext_w_h(dest, src);
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ as_bstrpick_d(dest, src, 31, 0);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { as_nor(reg, reg, zero); }
+
+void MacroAssembler::notPtr(Register reg) { as_nor(reg, reg, zero); }
+
+void MacroAssembler::andPtr(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_and(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ as_and(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(*this);
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ and64(scratch64, dest);
+ } else {
+ and64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::and32(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ and32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ as_and(dest, dest, scratch2);
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_or(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::or32(Register src, Register dest) {
+ as_or(dest, dest, src);
+}
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ or32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_xor(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) {
+ as_or(dest, dest, src);
+}
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ as_or(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ or64(scratch64, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ as_xor(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ xor64(scratch64, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) {
+ as_xor(dest, dest, src);
+}
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) {
+ as_xor(dest, dest, src);
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ xor32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ xor32(scratch2, dest);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ as_revb_2h(reg, reg);
+ as_ext_w_h(reg, reg);
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ as_revb_2h(reg, reg);
+ as_bstrpick_d(reg, reg, 15, 0);
+}
+
+void MacroAssembler::byteSwap32(Register reg) {
+ as_revb_2w(reg, reg);
+ as_slli_w(reg, reg, 0);
+}
+
+void MacroAssembler::byteSwap64(Register64 reg64) {
+ as_revb_d(reg64.reg, reg64.reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) {
+ as_add_d(dest, dest, src);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ ma_add_d(dest, dest, imm);
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(imm, scratch);
+ addPtr(scratch, dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ add64(scratch64, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_add_d(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_add_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::add32(Register src, Register dest) {
+ as_add_w(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_add_w(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ ma_add_w(scratch2, scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(dest, scratch);
+ addPtr(imm, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(src, scratch);
+ addPtr(scratch, dest);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ as_fadd_d(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ as_fadd_s(dest, dest, src);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerLOONG64::ma_liPatchable(dest, Imm32(0));
+ as_sub_d(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ // TODO: by wangqing
+ Instruction* inst0 =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+
+ *i0 = InstImm(op_lu12i_w, (int32_t)((imm.value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(imm.value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_sub_d(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_sub_d(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ as_sub_d(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ sub64(scratch64, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_sub_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ as_sub_w(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_sub_w(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ as_sub_w(dest, dest, scratch2);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(dest, scratch2);
+ subPtr(src, scratch2);
+ storePtr(scratch2, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(addr, scratch2);
+ subPtr(scratch2, dest);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ as_fsub_d(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ as_fsub_s(dest, dest, src);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_mul_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_mul_d(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ mul64(scratch64, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ as_mul_d(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ as_add_d(scratch, src, src);
+ as_add_d(dest, scratch, src);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ as_mul_w(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ ScratchRegisterScope scratch(asMasm());
+ move32(imm, scratch);
+ mul32(scratch, srcDest);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ move32(imm, scratch);
+ as_mulh_wu(dest, src, scratch);
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ as_fmul_s(dest, dest, src);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ as_fmul_d(dest, dest, src);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ movePtr(imm, scratch);
+ loadDouble(Address(scratch, 0), fpscratch);
+ mulDouble(fpscratch, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch, ImmWord(uintptr_t(dest.addr)));
+ as_ld_d(scratch2, scratch, 0);
+ as_addi_d(scratch2, scratch2, 1);
+ as_st_d(scratch2, scratch, 0);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ as_div_wu(srcDest, srcDest, rhs);
+ } else {
+ as_div_w(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ as_mod_wu(srcDest, srcDest, rhs);
+ } else {
+ as_mod_w(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ as_fdiv_s(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ as_fdiv_d(dest, dest, src);
+}
+
+void MacroAssembler::neg64(Register64 reg) { as_sub_d(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { as_sub_d(reg, zero, reg); }
+
+void MacroAssembler::neg32(Register reg) { as_sub_w(reg, zero, reg); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { as_fneg_d(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { as_fneg_s(reg, reg); }
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_srai_w(scratch, src, 31);
+ as_xor(dest, src, scratch);
+ as_sub_w(dest, dest, scratch);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ as_fabs_s(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ as_fabs_d(dest, src);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ as_fsqrt_s(dest, src);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ as_fsqrt_d(dest, src);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ as_sll_w(dest, dest, src);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ as_slli_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ as_sll_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_slli_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ as_sll_d(dest, dest, shift);
+}
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_slli_d(dest, dest, imm.value);
+}
+
+void MacroAssembler::rshift32(Register src, Register dest) {
+ as_srl_w(dest, dest, src);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ as_srli_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ as_sra_w(dest, dest, src);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ as_srai_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ as_srl_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srli_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srai_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ as_sra_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ as_srl_d(dest, dest, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srli_d(dest, dest, imm.value);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srai_d(dest, dest, imm.value);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_w(scratch, zero, count);
+ as_rotr_w(dest, input, scratch);
+}
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ as_rotri_w(dest, input, (32 - count.value) & 31);
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_d(scratch, zero, count);
+ as_rotr_d(dest.reg, src.reg, scratch);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotri_d(dest.reg, src.reg, (64 - count.value) & 63);
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ as_rotr_w(dest, input, count);
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ as_rotri_w(dest, input, count.value & 31);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotr_d(dest.reg, src.reg, count);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotri_d(dest.reg, src.reg, count.value & 63);
+}
+
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_clz_d(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ as_ctz_d(dest, src.reg);
+}
+
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ ScratchRegisterScope scratch(asMasm());
+ as_or(output.reg, input.reg, zero);
+ as_srai_d(tmp, input.reg, 1);
+ ma_li(scratch, Imm32(0x55555555));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(tmp, tmp, scratch);
+ as_sub_d(output.reg, output.reg, tmp);
+ as_srai_d(tmp, output.reg, 2);
+ ma_li(scratch, Imm32(0x33333333));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(output.reg, output.reg, scratch);
+ as_and(tmp, tmp, scratch);
+ as_add_d(output.reg, output.reg, tmp);
+ as_srli_d(tmp, output.reg, 4);
+ as_add_d(output.reg, output.reg, tmp);
+ ma_li(scratch, Imm32(0xF0F0F0F));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(output.reg, output.reg, scratch);
+ ma_li(tmp, Imm32(0x1010101));
+ as_bstrins_d(tmp, tmp, 63, 32);
+ as_mul_d(output.reg, output.reg, tmp);
+ as_srai_d(output.reg, output.reg, 56);
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ as_clz_w(dest, src);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ as_ctz_w(dest, src);
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ as_or(output, input, zero);
+ as_srai_w(tmp, input, 1);
+ ma_and(tmp, tmp, Imm32(0x55555555));
+ as_sub_w(output, output, tmp);
+ as_srai_w(tmp, output, 2);
+ ma_and(output, output, Imm32(0x33333333));
+ ma_and(tmp, tmp, Imm32(0x33333333));
+ as_add_w(output, output, tmp);
+ as_srli_w(tmp, output, 4);
+ as_add_w(output, output, tmp);
+ ma_and(output, output, Imm32(0xF0F0F0F));
+ as_slli_w(tmp, output, 8);
+ as_add_w(output, output, tmp);
+ as_slli_w(tmp, output, 16);
+ as_add_w(output, output, tmp);
+ as_srai_w(output, output, 24);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// Also see below for specializations of cmpPtrSet.
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(addr, scratch2);
+ ma_b(scratch2, imm, label, cond);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc_s(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrz_l_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+
+ as_slli_w(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertFloat32ToInt32(src, dest, fail, false);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc_d(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrz_l_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+
+ as_slli_w(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ as_ftintrz_l_d(fpscratch, src);
+ moveFromDouble(fpscratch, dest);
+
+ // Fail on overflow cases.
+ as_slli_w(scratch, dest, 0);
+ ma_b(dest, scratch, fail, Assembler::NotEqual);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+// the type of 'T src' maybe a Register, maybe a Imm32,depends on who call it.
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_sub_w(dest, dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_and(scratch2, lhs, rhs);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ScratchRegisterScope scratch(asMasm());
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchTestPtr(cond, scratch2, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ as_bstrpick_d(scratch, value.valueReg(), 31, 0);
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ScratchDoubleScope fpscratch(*this);
+ ma_lid(fpscratch, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc_d(value, fpscratch, label, cond);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(addr, scratch2);
+ branch(scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmpPtrSet(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+// ========================================================================
+// Memory access primitives.
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ ma_fst_s(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_s(src, addr);
+}
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& addr) {
+ ma_fst_d(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_d(src, addr);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ as_dbar(0);
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ ScratchRegisterScope scratch(*this);
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(scratch, reg, 0);
+ as_masknez(reg, reg, scratch);
+
+ // If reg is >= 255, then we want to clamp to 255.
+ as_addi_d(reg, reg, -255);
+ as_slt(scratch, reg, zero);
+ as_maskeqz(reg, reg, scratch);
+ as_addi_d(reg, reg, 255);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src.valueReg() != scratch);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ as_xor(dest, src.valueReg(), scratch);
+ as_srli_d(scratch, dest, JSVAL_TAG_SHIFT);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// The specializations for cmpPtrSet are outside the braces because
+// check_macroassembler_style can't yet deal with specializations.
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ loadPtr(rhs, scratch);
+ cmpPtrSet(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rhs != scratch2);
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ load32(rhs, scratch);
+ cmp32Set(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rhs != scratch2);
+ load32(lhs, scratch2);
+ cmp32Set(cond, Register(scratch2), rhs, dest);
+}
+
+void MacroAssemblerLOONG64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerLOONG64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jirl(zero, ra, BOffImm16(0));
+}
+
+// If source is a double, load into dest.
+// If source is int32, convert to double and store in dest.
+// Else, branch to failure.
+void MacroAssemblerLOONG64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ convertInt32ToDouble(source.valueReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MacroAssembler_loong64_inl_h */
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.cpp b/js/src/jit/loong64/MacroAssembler-loong64.cpp
new file mode 100644
index 0000000000..9da4378940
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64.cpp
@@ -0,0 +1,5389 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/MacroAssembler-loong64.h"
+
+#include "jsmath.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/loong64/SharedICRegisters-loong64.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrne_l_d(fpscratch, input);
+ as_movfr2gr_d(output, fpscratch);
+ // if (res < 0); res = 0;
+ as_slt(scratch, output, zero);
+ as_masknez(output, output, scratch);
+ // if res > 255; res = 255;
+ as_sltui(scratch, output, 255);
+ as_addi_d(output, output, -255);
+ as_maskeqz(output, output, scratch);
+ as_addi_d(output, output, 255);
+}
+
+bool MacroAssemblerLOONG64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_bstrpick_d(scratch, src, 31, 0);
+ asMasm().convertInt64ToDouble(Register64(scratch), dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(src != scratch2);
+
+ ma_and(scratch, src, Imm32(1));
+ as_srli_d(scratch2, src, 1);
+ as_or(scratch, scratch, scratch2);
+ as_movgr2fr_d(dest, scratch);
+ as_ffint_d_l(dest, dest);
+ asMasm().addDouble(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_movgr2fr_d(dest, src);
+ as_ffint_d_l(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_bstrpick_d(scratch, src, 31, 0);
+ asMasm().convertInt64ToFloat32(Register64(scratch), dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_fcvt_s_d(dest, src);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerLOONG64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ as_rotri_d(dest, dest, 63);
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_ftintrz_w_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ as_rotri_d(dest, dest, 63);
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ // Truncate double to int64 ; if result is inexact or invalid fail.
+ as_ftintrz_l_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerLOONG64Compat::convertFloat32ToInt32(
+ FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ MOZ_ASSERT(CauseBitPos + CauseBitCount < 33);
+ MOZ_ASSERT(CauseBitPos < 32);
+ as_bstrpick_w(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch, CauseIOrVMask);
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_fcvt_d_s(dest, src);
+}
+
+void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ as_ffint_s_w(dest, dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_fld_s(dest, src);
+ as_ffint_s_w(dest, dest);
+}
+
+void MacroAssemblerLOONG64Compat::movq(Register rj, Register rd) {
+ as_or(rd, rj, zero);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, ImmWord imm) {
+ int64_t value = imm.value;
+
+ if (-1 == (value >> 11) || 0 == (value >> 11)) {
+ as_addi_w(dest, zero, value);
+ return;
+ }
+
+ if (0 == (value >> 12)) {
+ as_ori(dest, zero, value);
+ return;
+ }
+
+ if (-1 == (value >> 31) || 0 == (value >> 31)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ } else if (0 == (value >> 32)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ as_bstrins_d(dest, zero, 63, 32);
+ } else if (-1 == (value >> 51) || 0 == (value >> 51)) {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ } else if (0 == (value >> 52)) {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ as_bstrins_d(dest, zero, 63, 52);
+ } else {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ if (is_uintN((value >> 32) & 0xfffff, 20)) {
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ }
+ as_lu52i_d(dest, dest, (value >> 52) & 0xfff);
+ }
+
+ if (is_uintN(value & 0xfff, 12)) {
+ as_ori(dest, dest, value & 0xfff);
+ }
+}
+
+// This method generates lu32i_d, lu12i_w and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ // hi12, hi20, low20, low12
+ if (Li64 == flags) { // Li64: Imm data
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
+ as_ori(dest, dest, imm.value & 0xfff); // low12
+ as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
+ as_lu52i_d(dest, dest, imm.value >> 52 & 0xfff); // hi12
+ } else { // Li48 address
+ m_buffer.ensureSpace(3 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
+ as_ori(dest, dest, imm.value & 0xfff); // low12
+ as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
+ }
+}
+
+// Memory access ops.
+
+void MacroAssemblerLOONG64::ma_ld_b(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_b(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_b(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_b(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_bu(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_bu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_bu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_bu(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_h(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_h(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_h(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_h(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_hu(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_hu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_hu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_hu(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_w(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_w(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_w(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_w(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_wu(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_wu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_wu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_wu(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_ld_d(Register dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_ld_d(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ as_ldx_d(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_ldx_d(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_st_b(Register src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_st_b(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_stx_b(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_st_h(Register src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_st_h(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_stx_h(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_st_w(Register src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_st_w(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_stx_w(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_st_d(Register src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_st_d(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_stx_d(src, base, scratch);
+ }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerLOONG64::ma_add_d(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ } else if (rd != rj) {
+ ma_li(rd, imm);
+ as_add_d(rd, rj, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_add_d(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_add_d(scratch, rj, rk);
+ as_add_w(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ ScratchRegisterScope scratch(asMasm());
+ as_addi_d(scratch, rj, imm.value);
+ as_addi_w(rd, rj, imm.value);
+ ma_b(rd, scratch, overflow, Assembler::NotEqual);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_add32TestOverflow(rd, rj, scratch2, overflow);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ if (rj == rk) {
+ if (rj == rd) {
+ as_or(scratch, rj, zero);
+ rj = scratch;
+ }
+
+ as_add_d(rd, rj, rj);
+ as_xor(scratch, rj, rd);
+ ma_b(scratch, zero, overflow, Assembler::LessThan);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rj == rd) {
+ as_or(scratch2, rj, zero);
+ rj = scratch2;
+ }
+
+ as_add_d(rd, rj, rk);
+ as_slti(scratch, rj, 0);
+ as_slt(scratch2, rd, rj);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (imm.value == 0) {
+ as_ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ as_ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_add_d(rd, rj, imm);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ ImmWord imm,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (imm.value == 0) {
+ as_ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ MOZ_ASSERT(rj != scratch2);
+ as_ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_li(rd, imm);
+ as_add_d(rd, rj, rd);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* label) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != rk);
+ MOZ_ASSERT(rd != scratch);
+ as_add_d(rd, rj, rk);
+ as_sltu(scratch, rd, rk);
+ ma_b(scratch, Register(scratch), label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ as_sltui(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, ImmWord imm,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ as_sltui(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+// Subtract.
+void MacroAssemblerLOONG64::ma_sub_d(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(-imm.value, 12)) {
+ as_addi_d(rd, rj, -imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ as_sub_d(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_d(scratch, rj, rk);
+ as_sub_w(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT_IF(rj == rd, rj != rk);
+ MOZ_ASSERT(rj != scratch2);
+ MOZ_ASSERT(rk != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rj_copy = rj;
+
+ if (rj == rd) {
+ as_or(scratch2, rj, zero);
+ rj_copy = scratch2;
+ }
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ as_sub_d(rd, rj, rk);
+ // If the sign of rj and rk are the same, no overflow
+ as_xor(scratch, rj_copy, rk);
+ // Check if the sign of rd and rj are the same
+ as_xor(scratch2, rd, rj_copy);
+ as_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // TODO(loong64): Check subPtrTestOverflow
+ MOZ_ASSERT(imm.value != INT32_MIN);
+ ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
+}
+
+void MacroAssemblerLOONG64::ma_mul_d(Register rd, Register rj, Imm32 imm) {
+ // li handles the relocation.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mul_d(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mulh_d(Register rd, Register rj, Imm32 imm) {
+ // li handles the relocation.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mulh_d(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mulPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ if (rd == rj) {
+ as_or(scratch, rj, zero);
+ rj = scratch;
+ rk = (rd == rk) ? rj : rk;
+ } else if (rd == rk) {
+ as_or(scratch, rk, zero);
+ rk = scratch;
+ }
+
+ as_mul_d(rd, rj, rk);
+ as_mulh_d(scratch, rj, rk);
+ as_srai_d(scratch2, rd, 63);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+// Memory.
+
+void MacroAssemblerLOONG64::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int32_t encodedOffset;
+ Register base;
+
+ // TODO: use as_ldx_b/h/w/d, could decrease as_add_d instr.
+ switch (size) {
+ case SizeByte:
+ case SizeHalfWord:
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeByte) {
+ if (ZeroExtend == extension) {
+ as_ld_bu(dest, base, encodedOffset);
+ } else {
+ as_ld_b(dest, base, encodedOffset);
+ }
+ } else {
+ if (ZeroExtend == extension) {
+ as_ld_hu(dest, base, encodedOffset);
+ } else {
+ as_ld_h(dest, base, encodedOffset);
+ }
+ }
+ break;
+ case SizeWord:
+ case SizeDouble:
+ if ((address.offset & 0x3) == 0 &&
+ (size == SizeDouble ||
+ (size == SizeWord && SignExtend == extension))) {
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeWord) {
+ as_ldptr_w(dest, base, encodedOffset);
+ } else {
+ as_ldptr_d(dest, base, encodedOffset);
+ }
+ } else {
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeWord) {
+ if (ZeroExtend == extension) {
+ as_ld_wu(dest, base, encodedOffset);
+ } else {
+ as_ld_w(dest, base, encodedOffset);
+ }
+ } else {
+ as_ld_d(dest, base, encodedOffset);
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerLOONG64::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int32_t encodedOffset;
+ Register base;
+
+ // TODO: use as_stx_b/h/w/d, could decrease as_add_d instr.
+ switch (size) {
+ case SizeByte:
+ case SizeHalfWord:
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeByte) {
+ as_st_b(data, base, encodedOffset);
+ } else {
+ as_st_h(data, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ case SizeDouble:
+ if ((address.offset & 0x3) == 0) {
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeWord) {
+ as_stptr_w(data, base, encodedOffset);
+ } else {
+ as_stptr_d(data, base, encodedOffset);
+ }
+ } else {
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ if (size == SizeWord) {
+ as_st_w(data, base, encodedOffset);
+ } else {
+ as_st_d(data, base, encodedOffset);
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void MacroAssemblerLOONG64Compat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ if (shift) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(dest, index, base, shift - 1);
+ } else {
+ as_add_d(dest, base, index);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_pop(Register r) {
+ MOZ_ASSERT(r != StackPointer);
+ as_ld_d(r, StackPointer, 0);
+ as_addi_d(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerLOONG64::ma_push(Register r) {
+ if (r == StackPointer) {
+ ScratchRegisterScope scratch(asMasm());
+ as_or(scratch, r, zero);
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_st_d(scratch, StackPointer, 0);
+ } else {
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_st_d(r, StackPointer, 0);
+ }
+}
+
+// Branches when done from within loongarch-specific code.
+void MacroAssemblerLOONG64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if (imm.value <= INT32_MAX) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_ld_d(scratch, addr);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_bl(Label* label) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(ra, scratch, BOffImm16(0));
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '5'
+ // instructions are writing at below.
+ m_buffer.ensureSpace(5 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+}
+
+void MacroAssemblerLOONG64::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind) {
+ // simply output the pointer of one label as its id,
+ // notice that after one label destructor, the pointer will be reused.
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_jirl, BOffImm16(0), zero, ra).encode());
+ InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ // ShortJump
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+
+ if (code.extractBitField(31, 26) == ((uint32_t)op_bcz >> 26)) {
+ code.setImm21(offset);
+ } else {
+ code.setBOffImm16(BOffImm16(offset));
+ }
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ return;
+ }
+
+ // LongJump
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
+ as_nop();
+ return;
+ }
+
+ // OpenLongJump
+ // Handle long conditional branch, the target offset is based on self,
+ // point to next instruction of nop at below.
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0));
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer. The '5'
+ // instructions are writing at below (contain conditional nop).
+ m_buffer.ensureSpace(5 * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode()); // invert
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_cmp_set(rd, rj, scratch, c);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ // TODO(loong64): 32-bit ma_cmp_set?
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_w(scratch2, address);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address,
+ ImmWord imm, Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, address);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+// fp instructions
+void MacroAssemblerLOONG64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ moveToDouble(scratch, dest);
+ } else {
+ moveToDouble(zero, dest);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_mv(FloatRegister src, ValueOperand dest) {
+ as_movfr2gr_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerLOONG64::ma_mv(ValueOperand src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.valueReg());
+}
+
+void MacroAssemblerLOONG64::ma_fld_s(FloatRegister dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_fld_s(dest, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_fldx_s(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_fld_d(FloatRegister dest, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_fld_d(dest, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_fldx_d(dest, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_fst_s(FloatRegister src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_fst_s(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_fstx_s(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_fst_d(FloatRegister src, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intN(offset, 12)) {
+ as_fst_d(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ as_fstx_d(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_pop(FloatRegister f) {
+ as_fld_d(f, StackPointer, 0);
+ as_addi_d(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerLOONG64::ma_push(FloatRegister f) {
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ as_fst_d(f, StackPointer, 0);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, ImmGCPtr ptr) {
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_w(dest, zero, imm.value);
+ } else if (is_uintN(imm.value, 12)) {
+ as_ori(dest, zero, imm.value & 0xfff);
+ } else {
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
+ if (imm.value & 0xfff) {
+ as_ori(dest, dest, imm.value & 0xfff);
+ }
+ }
+}
+
+// This method generates lu12i_w and ori instruction pair that can be modified
+// by UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, Imm32 imm) {
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
+ as_ori(dest, dest, imm.value & 0xfff);
+}
+
+void MacroAssemblerLOONG64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::NotEqual);
+ if (fmt == SingleFloat) {
+ as_fmov_s(fd, fj);
+ } else {
+ as_fmov_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::ma_fmovn(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::Equal);
+ if (fmt == SingleFloat) {
+ as_fmov_s(fd, fj);
+ } else {
+ as_fmov_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_andi(rd, rj, imm.value);
+ } else if (rd != rj) {
+ ma_li(rd, imm);
+ as_and(rd, rj, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_and(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_ori(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_or(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_xori(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_xor(rd, rj, scratch);
+ }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerLOONG64::ma_add_w(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_w(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_add_w(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rj, rk != rd);
+ ScratchRegisterScope scratch(asMasm());
+ as_add_w(rd, rj, rk);
+ as_sltu(scratch, rd, rd == rj ? rk : rj);
+ ma_b(Register(scratch), Register(scratch), overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rj != scratch2);
+ ma_li(scratch2, imm);
+ ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
+}
+
+// Subtract.
+void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(-imm.value, 12)) {
+ as_addi_w(rd, rj, -imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_sub_w(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Register rk) {
+ as_sub_w(rd, rj, rk);
+}
+
+void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_mul(Register rd, Register rj, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mul_w(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_mulh_w(scratch, rj, rk);
+ as_mul_w(rd, rj, rk);
+ as_srai_w(scratch2, rd, 31);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch, imm);
+ as_mulh_w(scratch2, rj, scratch);
+ as_mul_w(rd, rj, scratch);
+ as_srai_w(scratch, rd, 31);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_mod_w(scratch, rj, rk);
+ ma_b(scratch, scratch, overflow, Assembler::NonZero);
+ as_div_w(rd, rj, rk);
+}
+
+void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_div_branch_overflow(rd, rj, scratch2, overflow);
+}
+
+void MacroAssemblerLOONG64::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ as_or(remain, src, zero);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ as_sub_w(remain, zero, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(scratch2, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_add_w(dest, dest, scratch2);
+ // Do a trial subtraction
+ ma_sub_w(scratch2, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
+ as_or(dest, scratch2, zero);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srli_w(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ as_sub_w(dest, zero, dest);
+ } else {
+ as_sub_w(dest, zero, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void MacroAssemblerLOONG64::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
+}
+
+void MacroAssemblerLOONG64::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(dest, scratch2);
+ asMasm().ma_store(data, Address(scratch2, dest.offset), size, extension);
+}
+
+void MacroAssemblerLOONG64::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Make sure that scratch2 contains absolute address so that offset is 0.
+ asMasm().computeEffectiveAddress(dest, scratch2);
+
+ ScratchRegisterScope scratch(asMasm());
+ // Scrach register is free now, use it for loading imm value
+ ma_li(scratch, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(scratch, Address(scratch2, 0), size, extension);
+}
+
+// Branches when done from within loongarch-specific code.
+// TODO(loong64) Optimize ma_b
+void MacroAssemblerLOONG64::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default: {
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ break;
+ }
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual) {
+ ma_b(label, jumpKind);
+ } else if (c == Below) {
+ ; // This condition is always false. No branch required.
+ } else {
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ }
+ } else {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ }
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_b(Label* label, JumpKind jumpKind) {
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
+ Register rhs, Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
+ Imm32 imm, Condition c) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_RELEASE_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12) &&
+ imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ as_sltui(dest, lhs, imm.value + 1);
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (is_intN(imm.value, 12)) {
+ as_sltui(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ as_slti(dest, lhs, imm.value + 1);
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (is_intN(imm.value, 12)) {
+ as_slti(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+// fp instructions
+void MacroAssemblerLOONG64::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ moveToFloat32(scratch, dest);
+ } else {
+ moveToFloat32(zero, dest);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_fst_d(FloatRegister ft, BaseIndex address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(address, scratch2);
+ asMasm().ma_fst_d(ft, Address(scratch2, address.offset));
+}
+
+void MacroAssemblerLOONG64::ma_fst_s(FloatRegister ft, BaseIndex address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(address, scratch2);
+ asMasm().ma_fst_s(ft, Address(scratch2, address.offset));
+}
+
+void MacroAssemblerLOONG64::ma_fld_d(FloatRegister ft, const BaseIndex& src) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ asMasm().ma_fld_d(ft, Address(scratch2, src.offset));
+}
+
+void MacroAssemblerLOONG64::ma_fld_s(FloatRegister ft, const BaseIndex& src) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ asMasm().ma_fld_s(ft, Address(scratch2, src.offset));
+}
+
+void MacroAssemblerLOONG64::ma_bc_s(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, fcc);
+ asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_bc_d(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, fcc);
+ asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_call(ImmPtr dest) {
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jirl(ra, CallReg, BOffImm16(0));
+}
+
+void MacroAssemblerLOONG64::ma_jump(ImmPtr dest) {
+ ScratchRegisterScope scratch(asMasm());
+ asMasm().ma_liPatchable(scratch, dest);
+ as_jirl(zero, scratch, BOffImm16(0));
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Register rk,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rj, rk);
+ as_sltui(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rj, rk);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rk, rj);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rj, rk);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rj, rk);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rk, rj);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rk, rj);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rj, rk);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rj, rk);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rk, rj);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rj == rk);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ as_sltui(rd, rj, 1);
+ break;
+ case NonZero:
+ MOZ_ASSERT(rj == rk);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ as_sltu(rd, zero, rj);
+ break;
+ case Signed:
+ MOZ_ASSERT(rj == rk);
+ as_slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rj == rk);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rj, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set_double(Register dest, FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c);
+ as_movcf2gr(dest, FCC0);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set_float32(Register dest, FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ compareFloatingPoint(SingleFloat, lhs, rhs, c);
+ as_movcf2gr(dest, FCC0);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ as_sltui(rd, rj, 1);
+ break;
+ case NotEqual:
+ case Above:
+ as_sltu(rd, zero, rj);
+ break;
+ case AboveOrEqual:
+ case Below:
+ as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ as_slt(rd, zero, rj);
+ if (c == LessThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ as_slt(rd, rj, zero);
+ if (c == GreaterThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ as_sltui(rd, rj, 1);
+ break;
+ case NonZero:
+ as_sltu(rd, zero, rj);
+ break;
+ case Signed:
+ as_slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ as_slt(rd, rj, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_xor(rd, rj, imm);
+ if (c == Equal) {
+ as_sltui(rd, rd, 1);
+ } else {
+ as_sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rj, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) as_xori(rd, rd, 1);
+ }
+}
+
+void MacroAssemblerLOONG64::compareFloatingPoint(FloatFormat fmt,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c,
+ FPConditionBit fcc) {
+ switch (c) {
+ case DoubleOrdered:
+ as_fcmp_cor(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleEqual:
+ as_fcmp_ceq(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleNotEqual:
+ as_fcmp_cne(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleGreaterThan:
+ as_fcmp_clt(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_fcmp_cle(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleLessThan:
+ as_fcmp_clt(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleLessThanOrEqual:
+ as_fcmp_cle(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleUnordered:
+ as_fcmp_cun(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleEqualOrUnordered:
+ as_fcmp_cueq(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_fcmp_cune(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_fcmp_cult(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_fcmp_cule(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleLessThanOrUnordered:
+ as_fcmp_cult(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_fcmp_cule(fmt, lhs, rhs, fcc);
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void MacroAssemblerLOONG64::minMaxDouble(FloatRegister srcDest,
+ FloatRegister second, bool handleNaN,
+ bool isMax) {
+ if (srcDest == second) return;
+
+ Label nan, done;
+
+ // First or second is NaN, result is NaN.
+ ma_bc_d(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ if (isMax) {
+ as_fmax_d(srcDest, srcDest, second);
+ } else {
+ as_fmin_d(srcDest, srcDest, second);
+ }
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ as_fadd_d(srcDest, srcDest, second);
+
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::minMaxFloat32(FloatRegister srcDest,
+ FloatRegister second, bool handleNaN,
+ bool isMax) {
+ if (srcDest == second) return;
+
+ Label nan, done;
+
+ // First or second is NaN, result is NaN.
+ ma_bc_s(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ if (isMax) {
+ as_fmax_s(srcDest, srcDest, second);
+ } else {
+ as_fmin_s(srcDest, srcDest, second);
+ }
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ as_fadd_s(srcDest, srcDest, second);
+
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::loadDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_fld_d(dest, address);
+}
+
+void MacroAssemblerLOONG64::loadDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_fld_d(dest, src);
+}
+
+void MacroAssemblerLOONG64::loadFloatAsDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_fld_s(dest, address);
+ as_fcvt_d_s(dest, dest);
+}
+
+void MacroAssemblerLOONG64::loadFloatAsDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().loadFloat32(src, dest);
+ as_fcvt_d_s(dest, dest);
+}
+
+void MacroAssemblerLOONG64::loadFloat32(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_fld_s(dest, address);
+}
+
+void MacroAssemblerLOONG64::loadFloat32(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_fld_s(dest, src);
+}
+
+void MacroAssemblerLOONG64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ AnyRegister output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ as_ldx_b(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Uint8:
+ as_ldx_bu(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ as_ldx_h(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Uint16:
+ as_ldx_hu(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_ldx_w(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Float64:
+ as_fldx_d(output.fpu(), memoryBase, ptr);
+ break;
+ case Scalar::Float32:
+ as_fldx_s(output.fpu(), memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ as_stx_b(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ as_stx_h(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_stx_w(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_stx_d(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Float64:
+ as_fstx_d(value.fpu(), memoryBase, ptr);
+ break;
+ case Scalar::Float32:
+ as_fstx_s(value.fpu(), memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ // Only the last emitted instruction is a memory access.
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ as_ldx_b(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint8:
+ as_ldx_bu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ as_ldx_h(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint16:
+ as_ldx_hu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ as_ldx_w(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint32:
+ // TODO(loong64): Why need zero-extension here?
+ as_ldx_wu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_ldx_d(output.reg, memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ as_stx_b(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ as_stx_h(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_stx_w(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_stx_d(value.reg, memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt32Check(
+ FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ ScratchDoubleScope fpscratch(asMasm());
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, fpscratch);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, fpscratch);
+ }
+
+ if (isUnsigned) {
+ ma_li(output, Imm32(UINT32_MAX));
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+ } else {
+ // Positive overflow is already saturated to INT32_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThan);
+
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 == 1, move INT32_MIN to output; else not change.
+ as_slli_w(scratch, scratch, 31);
+ as_or(output, output, scratch);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt64Check(
+ FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, fpscratch);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, fpscratch);
+ }
+
+ if (isUnsigned) {
+ asMasm().ma_li(output, ImmWord(UINT64_MAX));
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+ } else {
+ // Positive overflow is already saturated to INT64_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThan);
+
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 == 1, move INT64_MIN to output; else not change.
+ as_slli_d(scratch, scratch, 63);
+ as_or(output, output, scratch);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerLOONG64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerLOONG64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler& MacroAssemblerLOONG64::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerLOONG64::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() {}
+
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ mozilla::DebugOnly<unsigned> numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ mozilla::DebugOnly<int32_t> diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ptr);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(sizeof(double)));
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(sizeof(double)));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ as_jirl(ra, reg, BOffImm16(0));
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ ma_bl(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ as_bl(JOffImm26(1 * sizeof(uint32_t)));
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ BufferOffset call(callerOffset - 1 * sizeof(uint32_t));
+
+ JOffImm26 offset = BufferOffset(calleeOffset).diffB<JOffImm26>(call);
+ if (!offset.isInvalid()) {
+ InstJump* bal = (InstJump*)editSrc(call);
+ bal->setJOffImm26(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 4 * sizeof(uint32_t);
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_pcaddi(scratch, 4);
+ as_ld_w(scratch2, scratch, 0);
+ as_add_d(scratch, scratch, scratch2);
+ as_jirl(zero, scratch, BOffImm16(0));
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ return farJump;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+
+void MacroAssembler::call(const Address& addr) {
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+
+void MacroAssembler::call(JitCode* c) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ callJitNoProfiler(scratch);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ // LOONG64
+ as_nop(); // lu12i_w
+ as_nop(); // ori
+ as_nop(); // lu32i_d
+ as_nop(); // jirl
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ inst[0].makeNop(); // lu12i_w
+ inst[1].makeNop(); // ori
+ inst[2].makeNop(); // lu32i_d
+ inst[3].makeNop(); // jirl
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ as_or(scratch, StackPointer, zero);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Load the callee in scratch2, no instruction between the movePtr and
+ // call should clobber it. Note that we can't use fun because it may be
+ // one of the IntArg registers clobbered before the call.
+ movePtr(fun, scratch2);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch2);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Load the callee in scratch2, as above.
+ loadPtr(fun, scratch2);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch2);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchRegister &&
+ ptr != SecondScratchReg); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchRegister && temp != SecondScratchReg);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ zero, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != InvalidReg);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ getGCThingValueChunk(value, temp);
+ loadPtr(Address(temp, gc::ChunkStoreBufferOffset), temp);
+ branchPtr(InvertCondition(cond), temp, zero, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ===============================================================
+// WebAssembly
+
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ CodeOffset offset(currentOffset());
+ as_break(WASM_TRAP); // TODO: as_teq(zero, zero, WASM_TRAP)
+ return offset;
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, Register(scratch2), ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+// FTINTRZ behaves as follows:
+//
+// on NaN it produces zero
+// on too large it produces INT_MAX (for appropriate type)
+// on too small it produces INT_MIN (ditto)
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ if (!isSaturating) {
+ ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_d(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ as_srli_d(scratch, output, 32);
+ as_slli_w(output, output, 0);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ if (!isSaturating) {
+ ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_s(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ as_srli_d(scratch, output, 32);
+ as_slli_w(output, output, 0);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+// Assembler::CauseV is a enum,called FCSRBit. Assembler::CauseV == 16
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_d(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, output);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_s(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, output);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ Label done;
+
+ if (!isSaturating) {
+ ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_d(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ loadConstantDouble(double(INT64_MAX + 1ULL), fpscratch);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, ImmWord(INT64_MAX));
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
+
+ ma_li(scratch2, ImmWord(INT64_MIN));
+ as_fsub_d(fpscratch, input, fpscratch);
+ as_ftintrz_l_d(fpscratch, fpscratch);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ as_add_d(output, output, scratch2);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltui(scratch2, output, 1);
+ as_or(scratch, scratch, scratch2);
+
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ Label done;
+
+ if (!isSaturating) {
+ ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_s(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ loadConstantFloat32(float(INT64_MAX + 1ULL), fpscratch);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, ImmWord(INT64_MAX));
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
+
+ ma_li(scratch2, ImmWord(INT64_MIN));
+ as_fsub_s(fpscratch, input, fpscratch);
+ as_ftintrz_l_s(fpscratch, fpscratch);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ as_add_d(output, output, scratch2);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltui(scratch2, output, 1);
+ as_or(scratch, scratch, scratch2);
+
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ as_ftintrz_l_d(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output.reg);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ as_ftintrz_l_s(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output.reg);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// TODO(loong64): widenInt32 should be nop?
+void MacroAssembler::widenInt32(Register r) {
+ move32To64SignExtend(r, Register64(r));
+}
+
+// ========================================================================
+// Convert floating point.
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ Register src = src_.reg;
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(src != scratch2);
+
+ ma_and(scratch, src, Imm32(1));
+ as_srli_d(scratch2, src, 1);
+ as_or(scratch, scratch, scratch2);
+ as_movgr2fr_d(dest, scratch);
+ as_ffint_s_l(dest, dest);
+ addFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_movgr2fr_d(dest, src);
+ as_ffint_s_l(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.reg);
+ as_ffint_s_l(dest, dest);
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.reg);
+ as_ffint_d_l(dest, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.as_or(scratch2, newval, zero);
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+
+ masm.as_srl_w(output, scratch2, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(valueTemp, oldval);
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xff);
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(valueTemp, oldval);
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(valueTemp, oldval, 15, 0);
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.as_sll_w(valueTemp, newval, offsetTemp);
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, scratch2);
+ masm.as_sc_d(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.as_or(scratch2, value, zero);
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, value, 15, 0);
+ break;
+ }
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.as_and(scratch2, output, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.as_srl_w(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ masm.movePtr(value.reg, scratch2);
+ masm.as_sc_d(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(scratch2, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(scratch2, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch2, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(scratch2, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(scratch2, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+ masm.as_srl_w(output, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
+ break;
+ }
+
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_d(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_d(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_d(temp.reg, scratch, 0);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(scratch2, scratch2, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(scratch2, scratch2, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch2, scratch2, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(scratch2, scratch2, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(scratch2, scratch2, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+ masm.as_srl_w(valueTemp, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
+ break;
+ }
+
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+ as_mod_wu(remOutput, srcDest, rhs);
+ as_div_wu(srcDest, srcDest, rhs);
+ } else {
+ as_mod_w(remOutput, srcDest, rhs);
+ as_div_w(srcDest, srcDest, rhs);
+ }
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleLo(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_ftintrm_w_s(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantDouble(0.0, scratch);
+ ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_ftintrm_w_d(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0.0f, scratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantFloat32(-1.0f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromFloat32(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ftintrp_w_s(scratch, src);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, scratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantDouble(-1.0, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ftintrp_w_d(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc_s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromFloat32(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_fadd_s(scratch, src, temp);
+ as_ftintrm_w_s(scratch, scratch);
+
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantFloat32(-0.5f, scratch);
+ branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantFloat32(0.5f, temp);
+ bind(&loadJoin);
+
+ as_fadd_s(temp, src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_ftintrm_w_s(scratch, temp);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantDouble(0.0, scratch);
+ ma_bc_d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_fadd_d(scratch, src, temp);
+ as_ftintrm_w_d(scratch, scratch);
+
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantDouble(-0.5, scratch);
+ branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantDouble(0.5, temp);
+ bind(&loadJoin);
+
+ addDouble(src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_ftintrm_w_d(scratch, temp);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+
+ Label notZero;
+ as_ftintrz_w_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
+
+ {
+ // dest == zero
+ SecondScratchRegisterScope scratch2(asMasm());
+ moveFromFloat32(src, scratch2);
+ // Check if input is in ]-1; -0] range by checking the sign bit.
+ as_slt(scratch2, scratch2, zero);
+ as_add_d(scratch, scratch, scratch2);
+ }
+
+ bind(&notZero);
+ branch32(Assembler::NotEqual, Register(scratch), zero, fail);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+
+ Label notZero;
+ as_ftintrz_w_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
+
+ {
+ // dest == zero
+ SecondScratchRegisterScope scratch2(asMasm());
+ moveFromDoubleHi(src, scratch2);
+ // Check if input is in ]-1; -0] range by checking the sign bit.
+ as_slt(scratch2, scratch2, zero);
+ as_add_d(scratch, scratch, scratch2);
+ }
+
+ bind(&notZero);
+ branch32(Assembler::NotEqual, Register(scratch), zero, fail);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssemblerLOONG64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::move32(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(Register src, Register dest) {
+ as_or(dest, src, zero);
+}
+void MacroAssemblerLOONG64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void MacroAssemblerLOONG64Compat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerLOONG64Compat::load32(const Address& address,
+ Register dest) {
+ ma_ld_w(dest, address);
+}
+
+void MacroAssemblerLOONG64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t offset = address.offset;
+ uint32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ if (offset != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift != 0) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ } else {
+ as_add_d(scratch, index, scratch);
+ }
+ as_ldx_w(dest, base, scratch);
+ } else if (shift != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ as_slli_d(scratch, index, shift);
+ as_ldx_w(dest, base, scratch);
+ } else {
+ as_ldx_w(dest, base, index);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ load32(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(address, scratch);
+ load32(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(const Address& address,
+ Register dest) {
+ ma_ld_d(dest, address);
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(const BaseIndex& src, Register dest) {
+ Register base = src.base;
+ Register index = src.index;
+ int32_t offset = src.offset;
+ uint32_t shift = Imm32::ShiftOf(src.scale).value;
+
+ if (offset != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift != 0) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ } else {
+ as_add_d(scratch, index, scratch);
+ }
+ as_ldx_d(dest, base, scratch);
+ } else if (shift != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ as_slli_d(scratch, index, shift);
+ as_ldx_d(dest, base, scratch);
+ } else {
+ as_ldx_d(dest, base, index);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ loadPtr(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(address, scratch);
+ loadPtr(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ store32(src, Address(scratch, 0));
+}
+
+void MacroAssemblerLOONG64Compat::store32(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Imm32 src, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ move32(src, scratch2);
+ ma_store(scratch2, address, SizeWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmWord imm, T address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeDouble);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmGCPtr imm, T address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ movePtr(imm, scratch2);
+ storePtr(scratch2, address);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+void MacroAssemblerLOONG64Compat::storePtr(Register src,
+ const Address& address) {
+ ma_st_d(src, address);
+}
+
+void MacroAssemblerLOONG64Compat::storePtr(Register src,
+ const BaseIndex& address) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t offset = address.offset;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ if ((offset == 0) && (shift == 0)) {
+ as_stx_d(src, base, index);
+ } else if (is_intN(offset, 12)) {
+ ScratchRegisterScope scratch(asMasm());
+ if (shift == 0) {
+ as_add_d(scratch, base, index);
+ } else {
+ as_alsl_d(scratch, index, base, shift - 1);
+ }
+ as_st_d(src, scratch, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift == 0) {
+ as_add_d(scratch, scratch, index);
+ } else {
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ }
+ as_stx_d(src, base, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(dest.addr), scratch);
+ storePtr(src, Address(scratch, 0));
+}
+
+void MacroAssemblerLOONG64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerLOONG64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerLOONG64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ as_slli_w(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const Address& src,
+ Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ computeScaledAddress(src, scratch2);
+ load32(Address(scratch2, src.offset), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ as_slli_w(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_ld_w(dest, src);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ computeScaledAddress(src, scratch2);
+ ma_ld_w(dest, Address(scratch2, src.offset));
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ as_movgr2fr_d(dest, operand.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_fld_d(dest, Address(src.base, src.offset));
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(src, scratch2);
+ unboxDouble(ValueOperand(scratch2), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ as_movfr2gr_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerLOONG64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ convertBoolToInt32(operand.valueReg(), scratch);
+ convertInt32ToDouble(scratch, dest);
+}
+
+void MacroAssemblerLOONG64Compat::int32ValueToDouble(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::boolValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ convertBoolToInt32(operand.valueReg(), scratch);
+ convertInt32ToFloat32(scratch, dest);
+}
+
+void MacroAssemblerLOONG64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ Label end;
+
+ // If it's an int, convert it to double.
+ loadPtr(Address(src.base, src.offset), scratch2);
+ as_movgr2fr_d(dest, scratch2);
+ as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
+ as_ffint_d_w(dest, dest);
+
+ bind(&end);
+}
+
+void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ Label end;
+
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, scratch2);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(scratch2, 0), scratch2);
+ as_movgr2fr_d(dest, scratch2);
+ as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
+ as_ffint_d_w(dest, dest);
+
+ bind(&end);
+}
+
+void MacroAssemblerLOONG64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerLOONG64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ as_bstrpick_d(scratch, scratch, JSVAL_TAG_SHIFT - 1, 0);
+ return scratch;
+}
+
+Register MacroAssemblerLOONG64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ as_bstrpick_d(scratch, scratch, 63, JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerLOONG64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/LoongArch interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ storePtr(val.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ tagValue(type, reg, ValueOperand(scratch2));
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ tagValue(type, reg, ValueOperand(scratch2));
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(const Value& val, Address dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch2);
+ } else {
+ ma_li(scratch2, ImmWord(val.asRawBits()));
+ }
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(const Value& val, BaseIndex dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch2);
+ } else {
+ ma_li(scratch2, ImmWord(val.asRawBits()));
+ }
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(src, val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::loadValue(const BaseIndex& src,
+ ValueOperand val) {
+ loadPtr(src, val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.valueReg() != scratch);
+
+ if (payload == dest.valueReg()) {
+ as_or(scratch, payload, zero);
+ payload = scratch;
+ }
+ ma_li(dest.valueReg(), ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_bstrins_d(dest.valueReg(), payload, 31, 0);
+ } else {
+ as_bstrins_d(dest.valueReg(), payload, JSVAL_TAG_SHIFT - 1, 0);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::pushValue(ValueOperand val) {
+ push(val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::pushValue(const Address& addr) { push(addr); }
+
+void MacroAssemblerLOONG64Compat::popValue(ValueOperand val) {
+ pop(val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::breakpoint(uint32_t value) {
+ as_break(value);
+}
+
+void MacroAssemblerLOONG64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ mov(StackPointer, a0); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jirl(zero, ra, BOffImm16(0));
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ as_or(StackPointer, FramePointer, zero);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerLOONG64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerLOONG64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset()); // first instruction location,not changed.
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jirl(ra, scratch, BOffImm16(0));
+ } else {
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset; // location of first instruction of call instr sequence.
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+//}}} check_macroassembler_style
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.h b/js/src/jit/loong64/MacroAssembler-loong64.h
new file mode 100644
index 0000000000..722c30f0eb
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64.h
@@ -0,0 +1,1037 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MacroAssembler_loong64_h
+#define jit_loong64_MacroAssembler_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/MoveResolver.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+
+static Register CallReg = t8;
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
+ }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope : public SecondScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : SecondScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+class MacroAssemblerLOONG64 : public Assembler {
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, DoubleCondition c,
+ FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+ // load
+ void ma_ld_b(Register dest, Address address);
+ void ma_ld_h(Register dest, Address address);
+ void ma_ld_w(Register dest, Address address);
+ void ma_ld_d(Register dest, Address address);
+ void ma_ld_bu(Register dest, Address address);
+ void ma_ld_hu(Register dest, Address address);
+ void ma_ld_wu(Register dest, Address address);
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_st_b(Register src, Address address);
+ void ma_st_h(Register src, Address address);
+ void ma_st_w(Register src, Address address);
+ void ma_st_d(Register src, Address address);
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_add_d(Register rd, Register rj, Imm32 imm);
+ void ma_add32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub_d(Register rd, Register rj, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul_d(Register rd, Register rj, Imm32 imm);
+ void ma_mulh_d(Register rd, Register rj, Imm32 imm);
+ void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rhs != scratch);
+ ma_ld_d(scratch, addr);
+ ma_b(scratch, rhs, l, c, jumpKind);
+ }
+
+ void ma_bl(Label* l);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_fld_s(FloatRegister ft, Address address);
+ void ma_fld_d(FloatRegister ft, Address address);
+ void ma_fst_d(FloatRegister ft, Address address);
+ void ma_fst_s(FloatRegister ft, Address address);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+
+ void moveIfZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ as_masknez(scratch, src, cond);
+ as_maskeqz(dst, dst, cond);
+ as_or(dst, dst, scratch);
+ }
+ void moveIfNotZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ as_maskeqz(scratch, src, cond);
+ as_masknez(dst, dst, cond);
+ as_or(dst, dst, scratch);
+ }
+
+ // These functions abstract the access to high part of the double precision
+ // float register. They are intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_movgr2frh_w(dest, src);
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_movfrh2gr_s(dest, src);
+ }
+
+ void moveToDouble(Register src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src);
+ }
+ void moveFromDouble(FloatRegister src, Register dest) {
+ as_movfr2gr_d(dest, src);
+ }
+
+ public:
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
+
+ void ma_rotr_w(Register rd, Register rj, Imm32 shift);
+
+ void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+ void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+
+ void ma_and(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ void ma_or(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ void ma_xor(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ // load
+ void ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_add_w(Register rd, Register rj, Imm32 imm);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub_w(Register rd, Register rj, Imm32 imm);
+ void ma_sub_w(Register rd, Register rj, Register rk);
+ void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rj, Imm32 imm);
+ void ma_mul32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, scratch, l, c, jumpKind);
+ }
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ void ma_fst_d(FloatRegister src, BaseIndex address);
+ void ma_fst_s(FloatRegister src, BaseIndex address);
+
+ void ma_fld_d(FloatRegister dest, const BaseIndex& src);
+ void ma_fld_s(FloatRegister dest, const BaseIndex& src);
+
+ // FP branches
+ void ma_bc_s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+ void ma_bc_d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_movfr2gr_s(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ }
+ void moveFromFloat32(FloatRegister src, Register dest) {
+ as_movfr2gr_s(dest, src);
+ }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+ void outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+class MacroAssembler;
+
+class MacroAssemblerLOONG64Compat : public MacroAssemblerLOONG64 {
+ public:
+ using MacroAssemblerLOONG64::call;
+
+ MacroAssemblerLOONG64Compat() {}
+
+ void convertBoolToInt32(Register src, Register dest) {
+ ma_and(dest, src, Imm32(0xff));
+ };
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ as_ffint_d_w(dest, dest);
+ };
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ ma_fld_s(dest, src);
+ as_ffint_d_w(dest, dest);
+ };
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ computeScaledAddress(src, scratch);
+ convertInt32ToDouble(Address(scratch, src.offset), dest);
+ };
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rj, Register rd);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_add_d(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ ma_add_d(dest, dest, Imm32(address.offset));
+ }
+ }
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ as_jirl(zero, scratch, BOffImm16(0));
+ }
+ void branch(const Register reg) { as_jirl(zero, reg, BOffImm16(0)); }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jirl(zero, ra, BOffImm16(0));
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmGCPtr imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(address, scratch2);
+ ma_push(scratch2);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On LOONG64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerLOONG64Compat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ CodeOffset offset = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ writeInst(-1);
+ writeInst(-1);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) { as_jirl(zero, reg, BOffImm16(0)); }
+ void jump(const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(address, scratch);
+ as_jirl(zero, scratch, BOffImm16(0));
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ as_srli_d(dest, src, JSVAL_TAG_SHIFT);
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_slli_w(dest, src, 0);
+ return;
+ }
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ as_xor(dest, src, scratch);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ as_bstrins_d(dest, zero, JSVAL_TAG_SHIFT + 3, JSVAL_TAG_SHIFT + 3);
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ as_bstrpick_d(dest, dest, JSVAL_TAG_SHIFT - 1, 0);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ as_bstrpick_d(dest, src.valueReg(), JSVAL_TAG_SHIFT - 1, 0);
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ as_and(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ as_and(dest, dest, src.valueReg());
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ computeEffectiveAddress(address, scratch);
+ as_st_d(scratch2, scratch, 0);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ storePtr(scratch2, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ if (src == dest) {
+ as_ori(scratch, src, 0);
+ src = scratch;
+ }
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsSignExtended;
+ as_slli_w(dest, src, 0);
+ ma_b(src, dest, &upper32BitsSignExtended, Equal, ShortJump);
+ breakpoint();
+ bind(&upper32BitsSignExtended);
+ }
+#endif
+ ma_li(dest, ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_bstrins_d(dest, src, 31, 0);
+ } else {
+ as_bstrins_d(dest, src, JSVAL_TAG_SHIFT - 1, 0);
+ }
+ }
+
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(const BaseIndex& src, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ ScratchRegisterScope scratch(asMasm());
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ boxValue(type, reg, scratch2);
+ push(scratch2);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ store16(src, dest);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ store32(src, dest);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ store64(src, dest);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_fmov_d(dest, src);
+ }
+
+ void zeroDouble(FloatRegister reg) { moveToDouble(zero, reg); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint(uint32_t value = 0);
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ ScratchRegisterScope scratch(asMasm());
+ as_andi(scratch, sp, ABIStackAlignment - 1);
+ ma_b(scratch, zero, &aligned, Equal, ShortJump);
+ breakpoint();
+ bind(&aligned);
+#endif
+ };
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_add_d(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() { as_jirl(zero, ra, BOffImm16(0)); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_fmov_s(dest, src);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerLOONG64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MacroAssembler_loong64_h */
diff --git a/js/src/jit/loong64/MoveEmitter-loong64.cpp b/js/src/jit/loong64/MoveEmitter-loong64.cpp
new file mode 100644
index 0000000000..a12378be83
--- /dev/null
+++ b/js/src/jit/loong64/MoveEmitter-loong64.cpp
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/MoveEmitter-loong64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterLOONG64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(to), fpscratch32);
+ masm.storeFloat32(fpscratch32, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(to), fpscratch64);
+ masm.storeDouble(fpscratch64, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(getAdjustedAddress(to), scratch2);
+ masm.store32(scratch2, cycleSlot(0));
+ } else {
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(getAdjustedAddress(to), scratch2);
+ masm.storePtr(scratch2, cycleSlot(0));
+ } else {
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(cycleSlot(slotId), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(cycleSlot(slotId), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(cycleSlot(0), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(cycleSlot(0), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+Address MoveEmitterLOONG64::cycleSlot(uint32_t slot, uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterLOONG64::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterLOONG64::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+void MoveEmitterLOONG64::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterLOONG64::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterLOONG64::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(from), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterLOONG64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.moveFromDouble(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.moveToDouble(from.reg(), to.floatReg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(from), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterLOONG64::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterLOONG64::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/loong64/MoveEmitter-loong64.h b/js/src/jit/loong64/MoveEmitter-loong64.h
new file mode 100644
index 0000000000..1481c8f973
--- /dev/null
+++ b/js/src/jit/loong64/MoveEmitter-loong64.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MoveEmitter_loong64_h
+#define jit_loong64_MoveEmitter_loong64_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterLOONG64 {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterLOONG64(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+
+ ~MoveEmitterLOONG64() { assertDone(); }
+
+ void emit(const MoveResolver& moves);
+ void finish();
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterLOONG64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MoveEmitter_loong64_h */
diff --git a/js/src/jit/loong64/SharedICHelpers-loong64-inl.h b/js/src/jit/loong64/SharedICHelpers-loong64-inl.h
new file mode 100644
index 0000000000..960a583ccf
--- /dev/null
+++ b/js/src/jit/loong64/SharedICHelpers-loong64-inl.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICHelpers_loong64_inl_h
+#define jit_loong64_SharedICHelpers_loong64_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ra);
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICHelpers_loong64_inl_h */
diff --git a/js/src/jit/loong64/SharedICHelpers-loong64.h b/js/src/jit/loong64/SharedICHelpers-loong64.h
new file mode 100644
index 0000000000..e8934e2f82
--- /dev/null
+++ b/js/src/jit/loong64/SharedICHelpers-loong64.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICHelpers_loong64_h
+#define jit_loong64_SharedICHelpers_loong64_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on LoongArch).
+static const size_t ICStackValueOffset = 0;
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on LA because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on LA because ra register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.Pop(scratch2);
+ }
+
+ masm.checkStackAlignment();
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On LoongArch, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICHelpers_loong64_h */
diff --git a/js/src/jit/loong64/SharedICRegisters-loong64.h b/js/src/jit/loong64/SharedICRegisters-loong64.h
new file mode 100644
index 0000000000..d51336c1d2
--- /dev/null
+++ b/js/src/jit/loong64/SharedICRegisters-loong64.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICRegisters_loong64_h
+#define jit_loong64_SharedICRegisters_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(a2);
+static constexpr ValueOperand R1(s1);
+static constexpr ValueOperand R2(a0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t0;
+
+// Note that ICTailCallReg is actually just the link register.
+// In LoongArch code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f1;
+static constexpr FloatRegister FloatReg2 = f2;
+static constexpr FloatRegister FloatReg3 = f3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICRegisters_loong64_h */
diff --git a/js/src/jit/loong64/Simulator-loong64.cpp b/js/src/jit/loong64/Simulator-loong64.cpp
new file mode 100644
index 0000000000..21193976c3
--- /dev/null
+++ b/js/src/jit/loong64/Simulator-loong64.cpp
@@ -0,0 +1,5238 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/loong64/Simulator-loong64.h"
+
+#include <float.h>
+#include <limits>
+
+#include "jit/AtomicOperations.h"
+#include "jit/loong64/Assembler-loong64.h"
+#include "js/Conversions.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+#define I64(v) static_cast<int64_t>(v)
+#define U64(v) static_cast<uint64_t>(v)
+#define I128(v) static_cast<__int128_t>(v)
+#define U128(v) static_cast<__uint128_t>(v)
+
+#define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) {
+ uint64_t u0, v0, w0;
+ uint64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+// Precondition: 0 <= shift < 32
+inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+ return (value >> shift) | (value << ((32 - shift) & 31));
+}
+
+// Precondition: 0 <= shift < 32
+inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
+ return (value << shift) | (value >> ((32 - shift) & 31));
+}
+
+// Precondition: 0 <= shift < 64
+inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) {
+ return (value >> shift) | (value << ((64 - shift) & 63));
+}
+
+// Precondition: 0 <= shift < 64
+inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
+ return (value << shift) | (value >> ((64 - shift) & 63));
+}
+
+// break instr with MAX_BREAK_CODE.
+static const Instr kCallRedirInstr = op_break | CODEMask;
+
+// -----------------------------------------------------------------------------
+// LoongArch64 assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On LoongArch, PC cannot actually be directly accessed. We behave as if PC
+ // was always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kUnsupported = -1,
+ kOp6Type,
+ kOp7Type,
+ kOp8Type,
+ kOp10Type,
+ kOp11Type,
+ kOp12Type,
+ kOp14Type,
+ kOp15Type,
+ kOp16Type,
+ kOp17Type,
+ kOp22Type,
+ kOp24Type
+ };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ inline int rjValue() const { return bits(RJShift + RJBits - 1, RJShift); }
+
+ inline int rkValue() const { return bits(RKShift + RKBits - 1, RKShift); }
+
+ inline int rdValue() const { return bits(RDShift + RDBits - 1, RDShift); }
+
+ inline int sa2Value() const { return bits(SAShift + SA2Bits - 1, SAShift); }
+
+ inline int sa3Value() const { return bits(SAShift + SA3Bits - 1, SAShift); }
+
+ inline int lsbwValue() const {
+ return bits(LSBWShift + LSBWBits - 1, LSBWShift);
+ }
+
+ inline int msbwValue() const {
+ return bits(MSBWShift + MSBWBits - 1, MSBWShift);
+ }
+
+ inline int lsbdValue() const {
+ return bits(LSBDShift + LSBDBits - 1, LSBDShift);
+ }
+
+ inline int msbdValue() const {
+ return bits(MSBDShift + MSBDBits - 1, MSBDShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fjValue() const { return bits(FJShift + FJBits - 1, FJShift); }
+
+ inline int fkValue() const { return bits(FKShift + FKBits - 1, FKShift); }
+
+ inline int faValue() const { return bits(FAShift + FABits - 1, FAShift); }
+
+ inline int cdValue() const { return bits(CDShift + CDBits - 1, CDShift); }
+
+ inline int cjValue() const { return bits(CJShift + CJBits - 1, CJShift); }
+
+ inline int caValue() const { return bits(CAShift + CABits - 1, CAShift); }
+
+ inline int condValue() const {
+ return bits(CONDShift + CONDBits - 1, CONDShift);
+ }
+
+ inline int imm5Value() const {
+ return bits(Imm5Shift + Imm5Bits - 1, Imm5Shift);
+ }
+
+ inline int imm6Value() const {
+ return bits(Imm6Shift + Imm6Bits - 1, Imm6Shift);
+ }
+
+ inline int imm12Value() const {
+ return bits(Imm12Shift + Imm12Bits - 1, Imm12Shift);
+ }
+
+ inline int imm14Value() const {
+ return bits(Imm14Shift + Imm14Bits - 1, Imm14Shift);
+ }
+
+ inline int imm16Value() const {
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int imm20Value() const {
+ return bits(Imm20Shift + Imm20Bits - 1, Imm20Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isTrap() const {
+ // is break??
+ switch (bits(31, 15) << 15) {
+ case op_break:
+ return (instructionBits() != kCallRedirInstr) && (bits(15, 0) != 6);
+ default:
+ return false;
+ };
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ SimInstruction::Type kType = kUnsupported;
+
+ // Check for kOp6Type
+ switch (bits(31, 26) << 26) {
+ case op_beqz:
+ case op_bnez:
+ case op_bcz:
+ case op_jirl:
+ case op_b:
+ case op_bl:
+ case op_beq:
+ case op_bne:
+ case op_blt:
+ case op_bge:
+ case op_bltu:
+ case op_bgeu:
+ case op_addu16i_d:
+ kType = kOp6Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp7Type
+ switch (bits(31, 25) << 25) {
+ case op_lu12i_w:
+ case op_lu32i_d:
+ case op_pcaddi:
+ case op_pcalau12i:
+ case op_pcaddu12i:
+ case op_pcaddu18i:
+ kType = kOp7Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp8Type
+ switch (bits(31, 24) << 24) {
+ case op_ll_w:
+ case op_sc_w:
+ case op_ll_d:
+ case op_sc_d:
+ case op_ldptr_w:
+ case op_stptr_w:
+ case op_ldptr_d:
+ case op_stptr_d:
+ kType = kOp8Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp10Type
+ switch (bits(31, 22) << 22) {
+ case op_bstrins_d:
+ case op_bstrpick_d:
+ case op_slti:
+ case op_sltui:
+ case op_addi_w:
+ case op_addi_d:
+ case op_lu52i_d:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_ld_b:
+ case op_ld_h:
+ case op_ld_w:
+ case op_ld_d:
+ case op_st_b:
+ case op_st_h:
+ case op_st_w:
+ case op_st_d:
+ case op_ld_bu:
+ case op_ld_hu:
+ case op_ld_wu:
+ case op_preld:
+ case op_fld_s:
+ case op_fst_s:
+ case op_fld_d:
+ case op_fst_d:
+ case op_bstr_w: // BSTRINS_W & BSTRPICK_W
+ kType = kOp10Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp11Type
+ switch (bits(31, 21) << 21) {
+ case op_bstr_w:
+ kType = kOp11Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp12Type
+ switch (bits(31, 20) << 20) {
+ case op_fmadd_s:
+ case op_fmadd_d:
+ case op_fmsub_s:
+ case op_fmsub_d:
+ case op_fnmadd_s:
+ case op_fnmadd_d:
+ case op_fnmsub_s:
+ case op_fnmsub_d:
+ case op_fcmp_cond_s:
+ case op_fcmp_cond_d:
+ kType = kOp12Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp14Type
+ switch (bits(31, 18) << 18) {
+ case op_bytepick_d:
+ case op_fsel:
+ kType = kOp14Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp15Type
+ switch (bits(31, 17) << 17) {
+ case op_bytepick_w:
+ case op_alsl_w:
+ case op_alsl_wu:
+ case op_alsl_d:
+ kType = kOp15Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp16Type
+ switch (bits(31, 16) << 16) {
+ case op_slli_d:
+ case op_srli_d:
+ case op_srai_d:
+ case op_rotri_d:
+ kType = kOp16Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp17Type
+ switch (bits(31, 15) << 15) {
+ case op_slli_w:
+ case op_srli_w:
+ case op_srai_w:
+ case op_rotri_w:
+ case op_add_w:
+ case op_add_d:
+ case op_sub_w:
+ case op_sub_d:
+ case op_slt:
+ case op_sltu:
+ case op_maskeqz:
+ case op_masknez:
+ case op_nor:
+ case op_and:
+ case op_or:
+ case op_xor:
+ case op_orn:
+ case op_andn:
+ case op_sll_w:
+ case op_srl_w:
+ case op_sra_w:
+ case op_sll_d:
+ case op_srl_d:
+ case op_sra_d:
+ case op_rotr_w:
+ case op_rotr_d:
+ case op_mul_w:
+ case op_mul_d:
+ case op_mulh_d:
+ case op_mulh_du:
+ case op_mulh_w:
+ case op_mulh_wu:
+ case op_mulw_d_w:
+ case op_mulw_d_wu:
+ case op_div_w:
+ case op_mod_w:
+ case op_div_wu:
+ case op_mod_wu:
+ case op_div_d:
+ case op_mod_d:
+ case op_div_du:
+ case op_mod_du:
+ case op_break:
+ case op_fadd_s:
+ case op_fadd_d:
+ case op_fsub_s:
+ case op_fsub_d:
+ case op_fmul_s:
+ case op_fmul_d:
+ case op_fdiv_s:
+ case op_fdiv_d:
+ case op_fmax_s:
+ case op_fmax_d:
+ case op_fmin_s:
+ case op_fmin_d:
+ case op_fmaxa_s:
+ case op_fmaxa_d:
+ case op_fmina_s:
+ case op_fmina_d:
+ case op_fcopysign_s:
+ case op_fcopysign_d:
+ case op_ldx_b:
+ case op_ldx_h:
+ case op_ldx_w:
+ case op_ldx_d:
+ case op_stx_b:
+ case op_stx_h:
+ case op_stx_w:
+ case op_stx_d:
+ case op_ldx_bu:
+ case op_ldx_hu:
+ case op_ldx_wu:
+ case op_fldx_s:
+ case op_fldx_d:
+ case op_fstx_s:
+ case op_fstx_d:
+ case op_amswap_w:
+ case op_amswap_d:
+ case op_amadd_w:
+ case op_amadd_d:
+ case op_amand_w:
+ case op_amand_d:
+ case op_amor_w:
+ case op_amor_d:
+ case op_amxor_w:
+ case op_amxor_d:
+ case op_ammax_w:
+ case op_ammax_d:
+ case op_ammin_w:
+ case op_ammin_d:
+ case op_ammax_wu:
+ case op_ammax_du:
+ case op_ammin_wu:
+ case op_ammin_du:
+ case op_amswap_db_w:
+ case op_amswap_db_d:
+ case op_amadd_db_w:
+ case op_amadd_db_d:
+ case op_amand_db_w:
+ case op_amand_db_d:
+ case op_amor_db_w:
+ case op_amor_db_d:
+ case op_amxor_db_w:
+ case op_amxor_db_d:
+ case op_ammax_db_w:
+ case op_ammax_db_d:
+ case op_ammin_db_w:
+ case op_ammin_db_d:
+ case op_ammax_db_wu:
+ case op_ammax_db_du:
+ case op_ammin_db_wu:
+ case op_ammin_db_du:
+ case op_dbar:
+ case op_ibar:
+ kType = kOp17Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp22Type
+ switch (bits(31, 10) << 10) {
+ case op_clo_w:
+ case op_clz_w:
+ case op_cto_w:
+ case op_ctz_w:
+ case op_clo_d:
+ case op_clz_d:
+ case op_cto_d:
+ case op_ctz_d:
+ case op_revb_2h:
+ case op_revb_4h:
+ case op_revb_2w:
+ case op_revb_d:
+ case op_revh_2w:
+ case op_revh_d:
+ case op_bitrev_4b:
+ case op_bitrev_8b:
+ case op_bitrev_w:
+ case op_bitrev_d:
+ case op_ext_w_h:
+ case op_ext_w_b:
+ case op_fabs_s:
+ case op_fabs_d:
+ case op_fneg_s:
+ case op_fneg_d:
+ case op_fsqrt_s:
+ case op_fsqrt_d:
+ case op_fmov_s:
+ case op_fmov_d:
+ case op_movgr2fr_w:
+ case op_movgr2fr_d:
+ case op_movgr2frh_w:
+ case op_movfr2gr_s:
+ case op_movfr2gr_d:
+ case op_movfrh2gr_s:
+ case op_movfcsr2gr:
+ case op_movfr2cf:
+ case op_movgr2cf:
+ case op_fcvt_s_d:
+ case op_fcvt_d_s:
+ case op_ftintrm_w_s:
+ case op_ftintrm_w_d:
+ case op_ftintrm_l_s:
+ case op_ftintrm_l_d:
+ case op_ftintrp_w_s:
+ case op_ftintrp_w_d:
+ case op_ftintrp_l_s:
+ case op_ftintrp_l_d:
+ case op_ftintrz_w_s:
+ case op_ftintrz_w_d:
+ case op_ftintrz_l_s:
+ case op_ftintrz_l_d:
+ case op_ftintrne_w_s:
+ case op_ftintrne_w_d:
+ case op_ftintrne_l_s:
+ case op_ftintrne_l_d:
+ case op_ftint_w_s:
+ case op_ftint_w_d:
+ case op_ftint_l_s:
+ case op_ftint_l_d:
+ case op_ffint_s_w:
+ case op_ffint_s_l:
+ case op_ffint_d_w:
+ case op_ffint_d_l:
+ case op_frint_s:
+ case op_frint_d:
+ kType = kOp22Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp24Type
+ switch (bits(31, 8) << 8) {
+ case op_movcf2fr:
+ case op_movcf2gr:
+ kType = kOp24Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ return kType;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("LOONG64_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The loong64Debugger class is used by the simulator while debugging simulated
+// code.
+class loong64Debugger {
+ public:
+ explicit loong64Debugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0x7fff to easily recognize it.
+ static const Instr kBreakpointInstr = op_break | (0x7fff & CODEMask);
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t getRegisterValue(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNIMPLEMENTED() {
+ printf("UNIMPLEMENTED instruction.\n");
+ MOZ_CRASH();
+}
+static void UNREACHABLE() {
+ printf("UNREACHABLE instruction.\n");
+ MOZ_CRASH();
+}
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void loong64Debugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int64_t loong64Debugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int64_t loong64Debugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+float loong64Debugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double loong64Debugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool loong64Debugger::getValue(const char* desc, int64_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc + 2, "%lx", reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%lu", reinterpret_cast<uint64_t*>(value)) == 1;
+}
+
+bool loong64Debugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool loong64Debugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void loong64Debugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void loong64Debugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void loong64Debugger::printAllRegs() {
+ int64_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i),
+ value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%016" PRIx64 "\n", value);
+}
+
+void loong64Debugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueLong(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint64_t pc) {
+ printf("Not supported on loongarch64 yet\n");
+}
+
+void loong64Debugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ printf(" 0x%016" PRIi64 " \n", sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fReg = FloatRegisters::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value,
+ value);
+ } else if (fReg != FloatRegisters::Invalid) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fReg),
+ getFPURegisterValueLong(fReg),
+ getFPURegisterValueFloat(fReg),
+ getFPURegisterValueDouble(fReg));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint64_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on LOONG64 !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ mozilla::DebugOnly<int> cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+
+ for (int i = 0; i < kNumCFRegisters; i++) {
+ CFregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("LOONG64_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterHiWord(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ int32_t* phiword;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+
+ *phiword = value;
+}
+
+void Simulator::setFpuRegisterWord(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ int32_t* pword;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+
+ *pword = value;
+}
+
+void Simulator::setFpuRegisterWordInvalidResult(float original, float rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegisterWord(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterWordInvalidResult(double original, double rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegisterWord(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegisterWord(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegisterWord(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult(float original, float rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult(double original, double rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult64(float original, float rounded,
+ int fpureg) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(INT64_MAX);
+ double min_int64 = static_cast<double>(INT64_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded >= max_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult64(double original, double rounded,
+ int fpureg) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(INT64_MAX);
+ double min_int64 = static_cast<double>(INT64_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded >= max_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setCFRegister(int cfreg, bool value) {
+ MOZ_ASSERT((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ CFregisters_[cfreg] = value;
+}
+
+bool Simulator::getCFRegister(int cfreg) const {
+ MOZ_ASSERT((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ return CFregisters_[cfreg];
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterSignedWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHiWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(a0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(a0, I64(res));
+ setRegister(a1, I64(res >> 64));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+unsigned int Simulator::getFCSRRoundingMode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+template <typename T>
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if ((long double)rounded > (long double)std::numeric_limits<T>::max() ||
+ (long double)rounded < (long double)std::numeric_limits<T>::min()) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// For cvt instructions only
+template <typename T>
+void Simulator::roundAccordingToFCSR(T toRound, T* rounded,
+ int32_t* rounded_int) {
+ switch ((FCSR_ >> 8) & 3) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+template <typename T>
+void Simulator::round64AccordingToFCSR(T toRound, T* rounded,
+ int64_t* rounded_int) {
+ switch ((FCSR_ >> 8) & 3) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+uint8_t Simulator::readBU(uint64_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int8_t Simulator::readB(uint64_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint64_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::writeB(uint64_t addr, int8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uint16_t Simulator::readHU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
+
+int16_t Simulator::readH(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+void Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+uint32_t Simulator::readWU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+}
+
+int32_t Simulator::readW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+void Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+int64_t Simulator::readDW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+double Simulator::readD(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the a1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1,
+ int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1,
+ int64_t arg2, int64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t, int32_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int64_t, int32_t, int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t, int64_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int64_t, int64_t, int32_t, int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int64_t, int64_t, int64_t, int64_t, int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t, int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t, int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int64_t, int32_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t, int64_t, int32_t, int64_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+
+inline int32_t Simulator::rj_reg(SimInstruction* instr) const {
+ return instr->rjValue();
+}
+
+inline int64_t Simulator::rj(SimInstruction* instr) const {
+ return getRegister(rj_reg(instr));
+}
+
+inline uint64_t Simulator::rj_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rj_reg(instr)));
+}
+
+inline int32_t Simulator::rk_reg(SimInstruction* instr) const {
+ return instr->rkValue();
+}
+
+inline int64_t Simulator::rk(SimInstruction* instr) const {
+ return getRegister(rk_reg(instr));
+}
+
+inline uint64_t Simulator::rk_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rk_reg(instr)));
+}
+
+inline int32_t Simulator::rd_reg(SimInstruction* instr) const {
+ return instr->rdValue();
+}
+
+inline int64_t Simulator::rd(SimInstruction* instr) const {
+ return getRegister(rd_reg(instr));
+}
+
+inline uint64_t Simulator::rd_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rd_reg(instr)));
+}
+
+inline int32_t Simulator::fa_reg(SimInstruction* instr) const {
+ return instr->faValue();
+}
+
+inline float Simulator::fa_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fa_reg(instr));
+}
+
+inline double Simulator::fa_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fa_reg(instr));
+}
+
+inline int32_t Simulator::fj_reg(SimInstruction* instr) const {
+ return instr->fjValue();
+}
+
+inline float Simulator::fj_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fj_reg(instr));
+}
+
+inline double Simulator::fj_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fj_reg(instr));
+}
+
+inline int32_t Simulator::fk_reg(SimInstruction* instr) const {
+ return instr->fkValue();
+}
+
+inline float Simulator::fk_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fk_reg(instr));
+}
+
+inline double Simulator::fk_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fk_reg(instr));
+}
+
+inline int32_t Simulator::fd_reg(SimInstruction* instr) const {
+ return instr->fdValue();
+}
+
+inline float Simulator::fd_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fd_reg(instr));
+}
+
+inline double Simulator::fd_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fd_reg(instr));
+}
+
+inline int32_t Simulator::cj_reg(SimInstruction* instr) const {
+ return instr->cjValue();
+}
+
+inline bool Simulator::cj(SimInstruction* instr) const {
+ return getCFRegister(cj_reg(instr));
+}
+
+inline int32_t Simulator::cd_reg(SimInstruction* instr) const {
+ return instr->cdValue();
+}
+
+inline bool Simulator::cd(SimInstruction* instr) const {
+ return getCFRegister(cd_reg(instr));
+}
+
+inline int32_t Simulator::ca_reg(SimInstruction* instr) const {
+ return instr->caValue();
+}
+
+inline bool Simulator::ca(SimInstruction* instr) const {
+ return getCFRegister(ca_reg(instr));
+}
+
+inline uint32_t Simulator::sa2(SimInstruction* instr) const {
+ return instr->sa2Value();
+}
+
+inline uint32_t Simulator::sa3(SimInstruction* instr) const {
+ return instr->sa3Value();
+}
+
+inline uint32_t Simulator::ui5(SimInstruction* instr) const {
+ return instr->imm5Value();
+}
+
+inline uint32_t Simulator::ui6(SimInstruction* instr) const {
+ return instr->imm6Value();
+}
+
+inline uint32_t Simulator::lsbw(SimInstruction* instr) const {
+ return instr->lsbwValue();
+}
+
+inline uint32_t Simulator::msbw(SimInstruction* instr) const {
+ return instr->msbwValue();
+}
+
+inline uint32_t Simulator::lsbd(SimInstruction* instr) const {
+ return instr->lsbdValue();
+}
+
+inline uint32_t Simulator::msbd(SimInstruction* instr) const {
+ return instr->msbdValue();
+}
+
+inline uint32_t Simulator::cond(SimInstruction* instr) const {
+ return instr->condValue();
+}
+
+inline int32_t Simulator::si12(SimInstruction* instr) const {
+ return (instr->imm12Value() << 20) >> 20;
+}
+
+inline uint32_t Simulator::ui12(SimInstruction* instr) const {
+ return instr->imm12Value();
+}
+
+inline int32_t Simulator::si14(SimInstruction* instr) const {
+ return (instr->imm14Value() << 18) >> 18;
+}
+
+inline int32_t Simulator::si16(SimInstruction* instr) const {
+ return (instr->imm16Value() << 16) >> 16;
+}
+
+inline int32_t Simulator::si20(SimInstruction* instr) const {
+ return (instr->imm20Value() << 12) >> 12;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ // the break_ instruction could get us here.
+ mozilla::DebugOnly<int32_t> opcode_hi15 = instr->bits(31, 17);
+ MOZ_ASSERT(opcode_hi15 == 0x15);
+ uint32_t code = instr->bits(14, 0);
+
+ if (instr->instructionBits() == kCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ int64_t arg0 = getRegister(a0);
+ int64_t arg1 = getRegister(a1);
+ int64_t arg2 = getRegister(a2);
+ int64_t arg3 = getRegister(a3);
+ int64_t arg4 = getRegister(a4);
+ int64_t arg5 = getRegister(a5);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ if (external == intptr_t(&js::wasm::Instance::wake_m32)) {
+ result = int32_t(result);
+ }
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t arg7 = getRegister(a7);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(0);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int64_t result = target(fval0);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(0);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t result = target(dval0);
+ if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i32_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i64_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval = getFpuRegisterDouble(0);
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ int64_t result = target(dval, arg0);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(0);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t result = target(dval, arg0, arg1);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(0);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t result = target(arg0, dval, arg1, arg2);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(0);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(0);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(0);
+ fval1 = getFpuRegisterFloat(1);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(0);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(0);
+ double dval1 = getFpuRegisterDouble(1);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval0 = getFpuRegisterDouble(0);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval0 = getFpuRegisterDouble(0);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval0);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(0);
+ double dval1 = getFpuRegisterDouble(1);
+ double dval2 = getFpuRegisterDouble(2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(0);
+ double dval1 = getFpuRegisterDouble(1);
+ double dval2 = getFpuRegisterDouble(2);
+ double dval3 = getFpuRegisterDouble(3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(arg0);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(
+ arg0, I32(arg1));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4),
+ I32(arg5));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(nativeFn)(
+ arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4), arg5);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), arg4);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(arg0, I32(arg1), arg2, arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3), I32(arg4));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneral>(
+ nativeFn)(arg0, arg1);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(arg0, arg1, arg2);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3), I32(arg4));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(arg0, arg1, I32(arg2));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, arg4);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3, arg4);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32>(
+ nativeFn)(arg0, I32(arg1));
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(arg0, I32(arg1), arg2);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3, I32(arg4));
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int64_t arg6 = getRegister(a6);
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, I32(arg4), I32(arg5),
+ I32(arg6));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_General>(nativeFn)(arg0);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_GeneralInt64>(nativeFn)(
+ arg0, arg1);
+ setRegister(a0, ret);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+ } else if ((instr->bits(31, 15) << 15 == op_break) && code == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ } else if ((instr->bits(31, 15) << 15 == op_break) && code <= kMaxStopCode &&
+ code != 6) {
+ if (isWatchpoint(code)) {
+ // printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ loong64Debugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ loong64Debugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ loong64Debugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 1 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t opcode_hi15 = instr->bits(31, 17);
+ uint32_t code = static_cast<uint32_t>(instr->bits(14, 0));
+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// ReverseBits(value) returns |value| in reverse bit order.
+template <typename T>
+T ReverseBits(T value) {
+ MOZ_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ *result = a;
+ } else if (std::isnan(a)) {
+ *result = b;
+ } else if (std::isnan(b)) {
+ *result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
+ // negates the result.
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+enum class KeepSign : bool { no = false, yes };
+
+// Handle execution based on instruction types.
+// decodeTypeImmediate
+void Simulator::decodeTypeOp6(SimInstruction* instr) {
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for memory instructions.
+ int64_t alu_out = 0;
+
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, &next_pc](SimInstruction* instr) {
+ int64_t current_pc = get_pc();
+ setRegister(ra, current_pc + SimInstruction::kInstrSize);
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr->bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ set_pc(next_pc);
+ };
+
+ auto BranchOff16Helper = [this, &next_pc](SimInstruction* instr,
+ bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs16 = static_cast<int32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs = do_branch ? (offs16 << 2) : SimInstruction::kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff21Helper = [this, &next_pc](SimInstruction* instr,
+ bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs21_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs21_high5 = static_cast<int32_t>(instr->bits(4, 0) << 27) >> 11;
+ int32_t offs = offs21_low16 | offs21_high5;
+ offs = do_branch ? (offs << 2) : SimInstruction::kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff26Helper = [this, &next_pc](SimInstruction* instr) {
+ int64_t current_pc = get_pc();
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr->bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ set_pc(next_pc);
+ };
+
+ auto JumpOff16Helper = [this, &next_pc](SimInstruction* instr) {
+ int32_t offs16 = static_cast<int32_t>(instr->bits(25, 10) << 16) >> 16;
+ setRegister(rd_reg(instr), get_pc() + SimInstruction::kInstrSize);
+ next_pc = rj(instr) + (offs16 << 2);
+ set_pc(next_pc);
+ };
+
+ switch (instr->bits(31, 26) << 26) {
+ case op_addu16i_d: {
+ int32_t si16_upper = static_cast<int32_t>(si16(instr)) << 16;
+ alu_out = static_cast<int64_t>(si16_upper) + rj(instr);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_beqz: {
+ BranchOff21Helper(instr, rj(instr) == 0);
+ break;
+ }
+ case op_bnez: {
+ BranchOff21Helper(instr, rj(instr) != 0);
+ break;
+ }
+ case op_bcz: {
+ if (instr->bits(9, 8) == 0b00) {
+ // BCEQZ
+ BranchOff21Helper(instr, cj(instr) == false);
+ } else if (instr->bits(9, 8) == 0b01) {
+ // BCNEZ
+ BranchOff21Helper(instr, cj(instr) == true);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case op_jirl: {
+ JumpOff16Helper(instr);
+ break;
+ }
+ case op_b: {
+ BranchOff26Helper(instr);
+ break;
+ }
+ case op_bl: {
+ BranchAndLinkHelper(instr);
+ break;
+ }
+ case op_beq: {
+ BranchOff16Helper(instr, rj(instr) == rd(instr));
+ break;
+ }
+ case op_bne: {
+ BranchOff16Helper(instr, rj(instr) != rd(instr));
+ break;
+ }
+ case op_blt: {
+ BranchOff16Helper(instr, rj(instr) < rd(instr));
+ break;
+ }
+ case op_bge: {
+ BranchOff16Helper(instr, rj(instr) >= rd(instr));
+ break;
+ }
+ case op_bltu: {
+ BranchOff16Helper(instr, rj_u(instr) < rd_u(instr));
+ break;
+ }
+ case op_bgeu: {
+ BranchOff16Helper(instr, rj_u(instr) >= rd_u(instr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp7(SimInstruction* instr) {
+ int64_t alu_out;
+
+ switch (instr->bits(31, 25) << 25) {
+ case op_lu12i_w: {
+ int32_t si20_upper = static_cast<int32_t>(si20(instr) << 12);
+ setRegister(rd_reg(instr), static_cast<int64_t>(si20_upper));
+ break;
+ }
+ case op_lu32i_d: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12) >> 12;
+ int64_t lower_32bit_mask = 0xFFFFFFFF;
+ alu_out = (static_cast<int64_t>(si20_signExtend) << 32) |
+ (rd(instr) & lower_32bit_mask);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcaddi: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12) >> 10;
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcalau12i: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12);
+ int64_t current_pc = get_pc();
+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000;
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out & clear_lower12bit_mask);
+ break;
+ }
+ case op_pcaddu12i: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12);
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcaddu18i: {
+ int64_t si20_signExtend = (static_cast<int64_t>(si20(instr)) << 44) >> 26;
+ int64_t current_pc = get_pc();
+ alu_out = si20_signExtend + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp8(SimInstruction* instr) {
+ int64_t addr = 0x0;
+ int64_t si14_se = (static_cast<int64_t>(si14(instr)) << 50) >> 48;
+
+ switch (instr->bits(31, 24) << 24) {
+ case op_ldptr_w: {
+ setRegister(rd_reg(instr), readW(rj(instr) + si14_se, instr));
+ break;
+ }
+ case op_stptr_w: {
+ writeW(rj(instr) + si14_se, static_cast<int32_t>(rd(instr)), instr);
+ break;
+ }
+ case op_ldptr_d: {
+ setRegister(rd_reg(instr), readDW(rj(instr) + si14_se, instr));
+ break;
+ }
+ case op_stptr_d: {
+ writeDW(rj(instr) + si14_se, rd(instr), instr);
+ break;
+ }
+ case op_ll_w: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), loadLinkedW(addr, instr));
+ break;
+ }
+ case op_sc_w: {
+ addr = si14_se + rj(instr);
+ setRegister(
+ rd_reg(instr),
+ storeConditionalW(addr, static_cast<int32_t>(rd(instr)), instr));
+ break;
+ }
+ case op_ll_d: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), loadLinkedD(addr, instr));
+ break;
+ }
+ case op_sc_d: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), storeConditionalD(addr, rd(instr), instr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp10(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+ int64_t si12_se = (static_cast<int64_t>(si12(instr)) << 52) >> 52;
+ uint64_t si12_ze = (static_cast<uint64_t>(ui12(instr)) << 52) >> 52;
+
+ switch (instr->bits(31, 22) << 22) {
+ case op_bstrins_d: {
+ uint8_t lsbd_ = lsbd(instr);
+ uint8_t msbd_ = msbd(instr);
+ MOZ_ASSERT(lsbd_ <= msbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out =
+ (rd_u(instr) & ~(mask << lsbd_)) | ((rj_u(instr) & mask) << lsbd_);
+ setRegister(rd_reg(instr), alu_out);
+ } else if (size == 64) {
+ setRegister(rd_reg(instr), rj(instr));
+ }
+ break;
+ }
+ case op_bstrpick_d: {
+ uint8_t lsbd_ = lsbd(instr);
+ uint8_t msbd_ = msbd(instr);
+ MOZ_ASSERT(lsbd_ <= msbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rj_u(instr) & (mask << lsbd_)) >> lsbd_;
+ setRegister(rd_reg(instr), alu_out);
+ } else if (size == 64) {
+ setRegister(rd_reg(instr), rj(instr));
+ }
+ break;
+ }
+ case op_slti: {
+ setRegister(rd_reg(instr), rj(instr) < si12_se ? 1 : 0);
+ break;
+ }
+ case op_sltui: {
+ setRegister(rd_reg(instr),
+ rj_u(instr) < static_cast<uint64_t>(si12_se) ? 1 : 0);
+ break;
+ }
+ case op_addi_w: {
+ int32_t alu32_out =
+ static_cast<int32_t>(rj(instr)) + static_cast<int32_t>(si12_se);
+ setRegister(rd_reg(instr), alu32_out);
+ break;
+ }
+ case op_addi_d: {
+ setRegister(rd_reg(instr), rj(instr) + si12_se);
+ break;
+ }
+ case op_lu52i_d: {
+ int64_t si12_se = static_cast<int64_t>(si12(instr)) << 52;
+ uint64_t mask = (1ULL << 52) - 1;
+ alu_out = si12_se + (rj(instr) & mask);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_andi: {
+ setRegister(rd_reg(instr), rj(instr) & si12_ze);
+ break;
+ }
+ case op_ori: {
+ setRegister(rd_reg(instr), rj_u(instr) | si12_ze);
+ break;
+ }
+ case op_xori: {
+ setRegister(rd_reg(instr), rj_u(instr) ^ si12_ze);
+ break;
+ }
+ case op_ld_b: {
+ setRegister(rd_reg(instr), readB(rj(instr) + si12_se));
+ break;
+ }
+ case op_ld_h: {
+ setRegister(rd_reg(instr), readH(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_w: {
+ setRegister(rd_reg(instr), readW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_d: {
+ setRegister(rd_reg(instr), readDW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_st_b: {
+ writeB(rj(instr) + si12_se, static_cast<int8_t>(rd(instr)));
+ break;
+ }
+ case op_st_h: {
+ writeH(rj(instr) + si12_se, static_cast<int16_t>(rd(instr)), instr);
+ break;
+ }
+ case op_st_w: {
+ writeW(rj(instr) + si12_se, static_cast<int32_t>(rd(instr)), instr);
+ break;
+ }
+ case op_st_d: {
+ writeDW(rj(instr) + si12_se, rd(instr), instr);
+ break;
+ }
+ case op_ld_bu: {
+ setRegister(rd_reg(instr), readBU(rj(instr) + si12_se));
+ break;
+ }
+ case op_ld_hu: {
+ setRegister(rd_reg(instr), readHU(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_wu: {
+ setRegister(rd_reg(instr), readWU(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fld_s: {
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterWord(fd_reg(instr), readW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fst_s: {
+ int32_t alu_out_32 = static_cast<int32_t>(getFpuRegister(fd_reg(instr)));
+ writeW(rj(instr) + si12_se, alu_out_32, instr);
+ break;
+ }
+ case op_fld_d: {
+ setFpuRegisterDouble(fd_reg(instr), readD(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fst_d: {
+ writeD(rj(instr) + si12_se, getFpuRegisterDouble(fd_reg(instr)), instr);
+ break;
+ }
+ case op_preld:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp11(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+
+ switch (instr->bits(31, 21) << 21) {
+ case op_bstr_w: {
+ MOZ_ASSERT(instr->bit(21) == 1);
+ uint8_t lsbw_ = lsbw(instr);
+ uint8_t msbw_ = msbw(instr);
+ MOZ_ASSERT(lsbw_ <= msbw_);
+ uint8_t size = msbw_ - lsbw_ + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ if (instr->bit(15) == 0) {
+ // BSTRINS_W
+ alu_out = static_cast<int32_t>((rd_u(instr) & ~(mask << lsbw_)) |
+ ((rj_u(instr) & mask) << lsbw_));
+ } else {
+ // BSTRPICK_W
+ alu_out =
+ static_cast<int32_t>((rj_u(instr) & (mask << lsbw_)) >> lsbw_);
+ }
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp12(SimInstruction* instr) {
+ switch (instr->bits(31, 20) << 20) {
+ case op_fmadd_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(fj_float(instr), fk_float(instr), fa_float(instr)));
+ break;
+ }
+ case op_fmadd_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(fj_double(instr), fk_double(instr), fa_double(instr)));
+ break;
+ }
+ case op_fmsub_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(-fj_float(instr), fk_float(instr), fa_float(instr)));
+ break;
+ }
+ case op_fmsub_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(-fj_double(instr), fk_double(instr), fa_double(instr)));
+ break;
+ }
+ case op_fnmadd_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(-fj_float(instr), fk_float(instr), -fa_float(instr)));
+ break;
+ }
+ case op_fnmadd_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(-fj_double(instr), fk_double(instr), -fa_double(instr)));
+ break;
+ }
+ case op_fnmsub_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(fj_float(instr), fk_float(instr), -fa_float(instr)));
+ break;
+ }
+ case op_fnmsub_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(fj_double(instr), fk_double(instr), -fa_double(instr)));
+ break;
+ }
+ case op_fcmp_cond_s: {
+ MOZ_ASSERT(instr->bits(4, 3) == 0);
+ float fj = fj_float(instr);
+ float fk = fk_float(instr);
+ switch (cond(instr)) {
+ case AssemblerLOONG64::CAF: {
+ setCFRegister(cd_reg(instr), false);
+ break;
+ }
+ case AssemblerLOONG64::CUN: {
+ setCFRegister(cd_reg(instr), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CEQ: {
+ setCFRegister(cd_reg(instr), fj == fk);
+ break;
+ }
+ case AssemblerLOONG64::CUEQ: {
+ setCFRegister(cd_reg(instr),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLT: {
+ setCFRegister(cd_reg(instr), fj < fk);
+ break;
+ }
+ case AssemblerLOONG64::CULT: {
+ setCFRegister(cd_reg(instr),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLE: {
+ setCFRegister(cd_reg(instr), fj <= fk);
+ break;
+ }
+ case AssemblerLOONG64::CULE: {
+ setCFRegister(cd_reg(instr),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk));
+ break;
+ }
+ case AssemblerLOONG64::COR: {
+ setCFRegister(cd_reg(instr), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CUNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk) ||
+ std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::SAF:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUN:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SNE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SOR:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUNE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case op_fcmp_cond_d: {
+ MOZ_ASSERT(instr->bits(4, 3) == 0);
+ double fj = fj_double(instr);
+ double fk = fk_double(instr);
+ switch (cond(instr)) {
+ case AssemblerLOONG64::CAF: {
+ setCFRegister(cd_reg(instr), false);
+ break;
+ }
+ case AssemblerLOONG64::CUN: {
+ setCFRegister(cd_reg(instr), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CEQ: {
+ setCFRegister(cd_reg(instr), fj == fk);
+ break;
+ }
+ case AssemblerLOONG64::CUEQ: {
+ setCFRegister(cd_reg(instr),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLT: {
+ setCFRegister(cd_reg(instr), fj < fk);
+ break;
+ }
+ case AssemblerLOONG64::CULT: {
+ setCFRegister(cd_reg(instr),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLE: {
+ setCFRegister(cd_reg(instr), fj <= fk);
+ break;
+ }
+ case AssemblerLOONG64::CULE: {
+ setCFRegister(cd_reg(instr),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk));
+ break;
+ }
+ case AssemblerLOONG64::COR: {
+ setCFRegister(cd_reg(instr), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CUNE: {
+ setCFRegister(cd_reg(instr),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::SAF:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUN:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SNE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SOR:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUNE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp14(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+
+ switch (instr->bits(31, 18) << 18) {
+ case op_bytepick_d: {
+ uint8_t sa = sa3(instr) * 8;
+ if (sa == 0) {
+ alu_out = rk(instr);
+ } else {
+ int64_t mask = (1ULL << 63) >> (sa - 1);
+ int64_t rk_hi = (rk(instr) & (~mask)) << sa;
+ int64_t rj_lo = (rj(instr) & mask) >> (64 - sa);
+ alu_out = rk_hi | rj_lo;
+ }
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_fsel: {
+ MOZ_ASSERT(instr->bits(19, 18) == 0);
+ if (ca(instr) == 0) {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr));
+ } else {
+ setFpuRegisterDouble(fd_reg(instr), fk_double(instr));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp15(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+ int32_t alu32_out = 0x0;
+
+ switch (instr->bits(31, 17) << 17) {
+ case op_bytepick_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ uint8_t sa = sa2(instr) * 8;
+ if (sa == 0) {
+ alu32_out = static_cast<int32_t>(rk(instr));
+ } else {
+ int32_t mask = (1 << 31) >> (sa - 1);
+ int32_t rk_hi = (static_cast<int32_t>(rk(instr)) & (~mask)) << sa;
+ int32_t rj_lo = (static_cast<uint32_t>(rj(instr)) & mask) >> (32 - sa);
+ alu32_out = rk_hi | rj_lo;
+ }
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_alsl_w: {
+ uint8_t sa = sa2(instr) + 1;
+ alu32_out = (static_cast<int32_t>(rj(instr)) << sa) +
+ static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), alu32_out);
+ break;
+ }
+ case op_alsl_wu: {
+ uint8_t sa = sa2(instr) + 1;
+ alu32_out = (static_cast<int32_t>(rj(instr)) << sa) +
+ static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), static_cast<uint32_t>(alu32_out));
+ break;
+ }
+ case op_alsl_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ uint8_t sa = sa2(instr) + 1;
+ alu_out = (rj(instr) << sa) + rk(instr);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp16(SimInstruction* instr) {
+ int64_t alu_out;
+ switch (instr->bits(31, 16) << 16) {
+ case op_slli_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 16) == 0b01);
+ setRegister(rd_reg(instr), rj(instr) << ui6(instr));
+ break;
+ }
+ case op_srli_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ setRegister(rd_reg(instr), rj_u(instr) >> ui6(instr));
+ break;
+ }
+ case op_srai_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ setRegister(rd_reg(instr), rj(instr) >> ui6(instr));
+ break;
+ }
+ case op_rotri_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 16) == 0b01);
+ alu_out = static_cast<int64_t>(RotateRight64(rj_u(instr), ui6(instr)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp17(SimInstruction* instr) {
+ int64_t alu_out;
+ int32_t alu32_out;
+
+ switch (instr->bits(31, 15) << 15) {
+ case op_slli_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(rj(instr)) << ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_srai_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(rj(instr)) >> ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_rotri_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(
+ RotateRight32(static_cast<const uint32_t>(rj_u(instr)),
+ static_cast<const uint32_t>(ui5(instr))));
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_srli_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<uint32_t>(rj(instr)) >> ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_add_w: {
+ int32_t alu32_out = static_cast<int32_t>(rj(instr) + rk(instr));
+ // Sign-extend result of 32bit operation into 64bit register.
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_add_d:
+ setRegister(rd_reg(instr), rj(instr) + rk(instr));
+ break;
+ case op_sub_w: {
+ int32_t alu32_out = static_cast<int32_t>(rj(instr) - rk(instr));
+ // Sign-extend result of 32bit operation into 64bit register.
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_sub_d:
+ setRegister(rd_reg(instr), rj(instr) - rk(instr));
+ break;
+ case op_slt:
+ setRegister(rd_reg(instr), rj(instr) < rk(instr) ? 1 : 0);
+ break;
+ case op_sltu:
+ setRegister(rd_reg(instr), rj_u(instr) < rk_u(instr) ? 1 : 0);
+ break;
+ case op_maskeqz:
+ setRegister(rd_reg(instr), rk(instr) == 0 ? 0 : rj(instr));
+ break;
+ case op_masknez:
+ setRegister(rd_reg(instr), rk(instr) != 0 ? 0 : rj(instr));
+ break;
+ case op_nor:
+ setRegister(rd_reg(instr), ~(rj(instr) | rk(instr)));
+ break;
+ case op_and:
+ setRegister(rd_reg(instr), rj(instr) & rk(instr));
+ break;
+ case op_or:
+ setRegister(rd_reg(instr), rj(instr) | rk(instr));
+ break;
+ case op_xor:
+ setRegister(rd_reg(instr), rj(instr) ^ rk(instr));
+ break;
+ case op_orn:
+ setRegister(rd_reg(instr), rj(instr) | (~rk(instr)));
+ break;
+ case op_andn:
+ setRegister(rd_reg(instr), rj(instr) & (~rk(instr)));
+ break;
+ case op_sll_w:
+ setRegister(rd_reg(instr), (int32_t)rj(instr) << (rk_u(instr) % 32));
+ break;
+ case op_srl_w: {
+ alu_out =
+ static_cast<int32_t>((uint32_t)rj_u(instr) >> (rk_u(instr) % 32));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_sra_w:
+ setRegister(rd_reg(instr), (int32_t)rj(instr) >> (rk_u(instr) % 32));
+ break;
+ case op_sll_d:
+ setRegister(rd_reg(instr), rj(instr) << (rk_u(instr) % 64));
+ break;
+ case op_srl_d: {
+ alu_out = static_cast<int64_t>(rj_u(instr) >> (rk_u(instr) % 64));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_sra_d:
+ setRegister(rd_reg(instr), rj(instr) >> (rk_u(instr) % 64));
+ break;
+ case op_rotr_w: {
+ alu_out = static_cast<int32_t>(
+ RotateRight32(static_cast<const uint32_t>(rj_u(instr)),
+ static_cast<const uint32_t>(rk_u(instr) % 32)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_rotr_d: {
+ alu_out = static_cast<int64_t>(
+ RotateRight64((rj_u(instr)), (rk_u(instr) % 64)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_mul_w: {
+ alu_out =
+ static_cast<int32_t>(rj(instr)) * static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_mulh_w: {
+ int32_t rj_lo = static_cast<int32_t>(rj(instr));
+ int32_t rk_lo = static_cast<int32_t>(rk(instr));
+ alu_out = static_cast<int64_t>(rj_lo) * static_cast<int64_t>(rk_lo);
+ setRegister(rd_reg(instr), alu_out >> 32);
+ break;
+ }
+ case op_mulh_wu: {
+ uint32_t rj_lo = static_cast<uint32_t>(rj_u(instr));
+ uint32_t rk_lo = static_cast<uint32_t>(rk_u(instr));
+ alu_out = static_cast<uint64_t>(rj_lo) * static_cast<uint64_t>(rk_lo);
+ setRegister(rd_reg(instr), alu_out >> 32);
+ break;
+ }
+ case op_mul_d:
+ setRegister(rd_reg(instr), rj(instr) * rk(instr));
+ break;
+ case op_mulh_d:
+ setRegister(rd_reg(instr), MultiplyHighSigned(rj(instr), rk(instr)));
+ break;
+ case op_mulh_du:
+ setRegister(rd_reg(instr),
+ MultiplyHighUnsigned(rj_u(instr), rk_u(instr)));
+ break;
+ case op_mulw_d_w: {
+ int64_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int64_t rk_i32 = static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), rj_i32 * rk_i32);
+ break;
+ }
+ case op_mulw_d_wu: {
+ uint64_t rj_u32 = static_cast<uint32_t>(rj_u(instr));
+ uint64_t rk_u32 = static_cast<uint32_t>(rk_u(instr));
+ setRegister(rd_reg(instr), rj_u32 * rk_u32);
+ break;
+ }
+ case op_div_w: {
+ int32_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int32_t rk_i32 = static_cast<int32_t>(rk(instr));
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ setRegister(rd_reg(instr), INT_MIN);
+ } else if (rk_i32 != 0) {
+ setRegister(rd_reg(instr), rj_i32 / rk_i32);
+ }
+ break;
+ }
+ case op_mod_w: {
+ int32_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int32_t rk_i32 = static_cast<int32_t>(rk(instr));
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ setRegister(rd_reg(instr), 0);
+ } else if (rk_i32 != 0) {
+ setRegister(rd_reg(instr), rj_i32 % rk_i32);
+ }
+ break;
+ }
+ case op_div_wu: {
+ uint32_t rj_u32 = static_cast<uint32_t>(rj(instr));
+ uint32_t rk_u32 = static_cast<uint32_t>(rk(instr));
+ if (rk_u32 != 0) {
+ setRegister(rd_reg(instr), static_cast<int32_t>(rj_u32 / rk_u32));
+ }
+ break;
+ }
+ case op_mod_wu: {
+ uint32_t rj_u32 = static_cast<uint32_t>(rj(instr));
+ uint32_t rk_u32 = static_cast<uint32_t>(rk(instr));
+ if (rk_u32 != 0) {
+ setRegister(rd_reg(instr), static_cast<int32_t>(rj_u32 % rk_u32));
+ }
+ break;
+ }
+ case op_div_d: {
+ if (rj(instr) == INT64_MIN && rk(instr) == -1) {
+ setRegister(rd_reg(instr), INT64_MIN);
+ } else if (rk(instr) != 0) {
+ setRegister(rd_reg(instr), rj(instr) / rk(instr));
+ }
+ break;
+ }
+ case op_mod_d: {
+ if (rj(instr) == LONG_MIN && rk(instr) == -1) {
+ setRegister(rd_reg(instr), 0);
+ } else if (rk(instr) != 0) {
+ setRegister(rd_reg(instr), rj(instr) % rk(instr));
+ }
+ break;
+ }
+ case op_div_du: {
+ if (rk_u(instr) != 0) {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(rj_u(instr) / rk_u(instr)));
+ }
+ break;
+ }
+ case op_mod_du: {
+ if (rk_u(instr) != 0) {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(rj_u(instr) % rk_u(instr)));
+ }
+ break;
+ }
+ case op_break:
+ softwareInterrupt(instr);
+ break;
+ case op_fadd_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) + fk_float(instr));
+ break;
+ }
+ case op_fadd_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) + fk_double(instr));
+ break;
+ }
+ case op_fsub_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) - fk_float(instr));
+ break;
+ }
+ case op_fsub_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) - fk_double(instr));
+ break;
+ }
+ case op_fmul_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) * fk_float(instr));
+ break;
+ }
+ case op_fmul_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) * fk_double(instr));
+ break;
+ }
+ case op_fdiv_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) / fk_float(instr));
+ break;
+ }
+
+ case op_fdiv_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) / fk_double(instr));
+ break;
+ }
+ case op_fmax_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMax(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmax_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMax(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmin_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMin(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmin_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMin(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmaxa_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMaxA(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmaxa_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMaxA(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmina_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMinA(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmina_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMinA(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_ldx_b:
+ setRegister(rd_reg(instr), readB(rj(instr) + rk(instr)));
+ break;
+ case op_ldx_h:
+ setRegister(rd_reg(instr), readH(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_w:
+ setRegister(rd_reg(instr), readW(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_d:
+ setRegister(rd_reg(instr), readDW(rj(instr) + rk(instr), instr));
+ break;
+ case op_stx_b:
+ writeB(rj(instr) + rk(instr), static_cast<int8_t>(rd(instr)));
+ break;
+ case op_stx_h:
+ writeH(rj(instr) + rk(instr), static_cast<int16_t>(rd(instr)), instr);
+ break;
+ case op_stx_w:
+ writeW(rj(instr) + rk(instr), static_cast<int32_t>(rd(instr)), instr);
+ break;
+ case op_stx_d:
+ writeDW(rj(instr) + rk(instr), rd(instr), instr);
+ break;
+ case op_ldx_bu:
+ setRegister(rd_reg(instr), readBU(rj(instr) + rk(instr)));
+ break;
+ case op_ldx_hu:
+ setRegister(rd_reg(instr), readHU(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_wu:
+ setRegister(rd_reg(instr), readWU(rj(instr) + rk(instr), instr));
+ break;
+ case op_fldx_s:
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterWord(fd_reg(instr), readW(rj(instr) + rk(instr), instr));
+ break;
+ case op_fldx_d:
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterDouble(fd_reg(instr), readD(rj(instr) + rk(instr), instr));
+ break;
+ case op_fstx_s: {
+ int32_t alu_out_32 = static_cast<int32_t>(getFpuRegister(fd_reg(instr)));
+ writeW(rj(instr) + rk(instr), alu_out_32, instr);
+ break;
+ }
+ case op_fstx_d: {
+ writeD(rj(instr) + rk(instr), getFpuRegisterDouble(fd_reg(instr)), instr);
+ break;
+ }
+ case op_amswap_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_du:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_du:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_du:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_du:
+ UNIMPLEMENTED();
+ break;
+ case op_dbar:
+ // TODO(loong64): dbar simulation
+ break;
+ case op_ibar:
+ UNIMPLEMENTED();
+ break;
+ case op_fcopysign_s:
+ UNIMPLEMENTED();
+ break;
+ case op_fcopysign_d:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp22(SimInstruction* instr) {
+ int64_t alu_out;
+
+ switch (instr->bits(31, 10) << 10) {
+ case op_clz_w: {
+ alu_out = U32(rj_u(instr)) ? __builtin_clz(U32(rj_u(instr))) : 32;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ctz_w: {
+ alu_out = U32(rj_u(instr)) ? __builtin_ctz(U32(rj_u(instr))) : 32;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_clz_d: {
+ alu_out = U64(rj_u(instr)) ? __builtin_clzll(U64(rj_u(instr))) : 64;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ctz_d: {
+ alu_out = U64(rj_u(instr)) ? __builtin_ctzll(U64(rj_u(instr))) : 64;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_2h: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_4h: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_2w: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF000000FF000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (24 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 24);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_d: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 3) {
+ tmp = tmp >> (56 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 56);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revh_2w: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 16;
+ } else {
+ tmp = tmp << 16;
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revh_d: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (48 - i * 32);
+ } else {
+ tmp = tmp << (i * 32 - 48);
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_4b: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_8b: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint64_t>(o_byte) << 56);
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_w: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint32_t output = 0;
+ output = ReverseBits(input);
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_d: {
+ alu_out = static_cast<int64_t>(ReverseBits(rj_u(instr)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ext_w_b: {
+ uint8_t input = static_cast<uint8_t>(rj(instr));
+ alu_out = static_cast<int64_t>(static_cast<int8_t>(input));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ext_w_h: {
+ uint16_t input = static_cast<uint16_t>(rj(instr));
+ alu_out = static_cast<int64_t>(static_cast<int16_t>(input));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_fabs_s: {
+ setFpuRegisterFloat(fd_reg(instr), std::abs(fj_float(instr)));
+ break;
+ }
+ case op_fabs_d: {
+ setFpuRegisterDouble(fd_reg(instr), std::abs(fj_double(instr)));
+ break;
+ }
+ case op_fneg_s: {
+ setFpuRegisterFloat(fd_reg(instr), -fj_float(instr));
+ break;
+ }
+ case op_fneg_d: {
+ setFpuRegisterDouble(fd_reg(instr), -fj_double(instr));
+ break;
+ }
+ case op_fsqrt_s: {
+ if (fj_float(instr) >= 0) {
+ setFpuRegisterFloat(fd_reg(instr), std::sqrt(fj_float(instr)));
+ } else {
+ setFpuRegisterFloat(fd_reg(instr), std::sqrt(-1)); // qnan
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ }
+ break;
+ }
+ case op_fsqrt_d: {
+ if (fj_double(instr) >= 0) {
+ setFpuRegisterDouble(fd_reg(instr), std::sqrt(fj_double(instr)));
+ } else {
+ setFpuRegisterDouble(fd_reg(instr), std::sqrt(-1)); // qnan
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ }
+ break;
+ }
+ case op_fmov_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr));
+ break;
+ }
+ case op_fmov_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr));
+ break;
+ }
+ case op_movgr2fr_w: {
+ setFpuRegisterWord(fd_reg(instr), static_cast<int32_t>(rj(instr)));
+ break;
+ }
+ case op_movgr2fr_d: {
+ setFpuRegister(fd_reg(instr), rj(instr));
+ break;
+ }
+ case op_movgr2frh_w: {
+ setFpuRegisterHiWord(fd_reg(instr), static_cast<int32_t>(rj(instr)));
+ break;
+ }
+ case op_movfr2gr_s: {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(getFpuRegisterWord(fj_reg(instr))));
+ break;
+ }
+ case op_movfr2gr_d: {
+ setRegister(rd_reg(instr), getFpuRegister(fj_reg(instr)));
+ break;
+ }
+ case op_movfrh2gr_s: {
+ setRegister(rd_reg(instr), getFpuRegisterHiWord(fj_reg(instr)));
+ break;
+ }
+ case op_movgr2fcsr: {
+ // fcsr could be 0-3
+ MOZ_ASSERT(rd_reg(instr) < 4);
+ FCSR_ = static_cast<uint32_t>(rj(instr));
+ break;
+ }
+ case op_movfcsr2gr: {
+ setRegister(rd_reg(instr), FCSR_);
+ break;
+ }
+ case op_fcvt_s_d: {
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(fj_double(instr)));
+ break;
+ }
+ case op_fcvt_d_s: {
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(fj_float(instr)));
+ break;
+ }
+ case op_ftintrm_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_w_s: {
+ float fj = fj_float(instr);
+ float rounded;
+ int32_t result;
+ roundAccordingToFCSR<float>(fj, &rounded, &result);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_w_d: {
+ double fj = fj_double(instr);
+ double rounded;
+ int32_t result;
+ roundAccordingToFCSR<double>(fj, &rounded, &result);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_l_s: {
+ float fj = fj_float(instr);
+ float rounded;
+ int64_t result;
+ round64AccordingToFCSR<float>(fj, &rounded, &result);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_l_d: {
+ double fj = fj_double(instr);
+ double rounded;
+ int64_t result;
+ round64AccordingToFCSR<double>(fj, &rounded, &result);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ffint_s_w: {
+ alu_out = getFpuRegisterSignedWord(fj_reg(instr));
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(alu_out));
+ break;
+ }
+ case op_ffint_s_l: {
+ alu_out = getFpuRegister(fj_reg(instr));
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(alu_out));
+ break;
+ }
+ case op_ffint_d_w: {
+ alu_out = getFpuRegisterSignedWord(fj_reg(instr));
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(alu_out));
+ break;
+ }
+ case op_ffint_d_l: {
+ alu_out = getFpuRegister(fj_reg(instr));
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(alu_out));
+ break;
+ }
+ case op_frint_s: {
+ float fj = fj_float(instr);
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fj);
+ float lower = std::floor(fj);
+ switch (getFCSRRoundingMode()) {
+ case kRoundToNearest:
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ setFpuRegisterFloat(fd_reg(instr), result);
+ if (result != fj) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case op_frint_d: {
+ double fj = fj_double(instr);
+ double result, temp, temp_result;
+ double upper = std::ceil(fj);
+ double lower = std::floor(fj);
+ switch (getFCSRRoundingMode()) {
+ case kRoundToNearest:
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ setFpuRegisterDouble(fd_reg(instr), result);
+ if (result != fj) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case op_movfr2cf:
+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n");
+ UNIMPLEMENTED();
+ break;
+ case op_movgr2cf:
+ printf("Sim UNIMPLEMENTED: MOVGR2CF\n");
+ UNIMPLEMENTED();
+ break;
+ case op_clo_w:
+ printf("Sim UNIMPLEMENTED: FCO_W\n");
+ UNIMPLEMENTED();
+ break;
+ case op_cto_w:
+ printf("Sim UNIMPLEMENTED: FTO_W\n");
+ UNIMPLEMENTED();
+ break;
+ case op_clo_d:
+ printf("Sim UNIMPLEMENTED: FLO_D\n");
+ UNIMPLEMENTED();
+ break;
+ case op_cto_d:
+ printf("Sim UNIMPLEMENTED: FTO_D\n");
+ UNIMPLEMENTED();
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp24(SimInstruction* instr) {
+ switch (instr->bits(31, 8) << 8) {
+ case op_movcf2fr:
+ UNIMPLEMENTED();
+ break;
+ case op_movcf2gr:
+ setRegister(rd_reg(instr), getCFRegister(cj_reg(instr)));
+ break;
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kOp6Type:
+ decodeTypeOp6(instr);
+ break;
+ case SimInstruction::kOp7Type:
+ decodeTypeOp7(instr);
+ break;
+ case SimInstruction::kOp8Type:
+ decodeTypeOp8(instr);
+ break;
+ case SimInstruction::kOp10Type:
+ decodeTypeOp10(instr);
+ break;
+ case SimInstruction::kOp11Type:
+ decodeTypeOp11(instr);
+ break;
+ case SimInstruction::kOp12Type:
+ decodeTypeOp12(instr);
+ break;
+ case SimInstruction::kOp14Type:
+ decodeTypeOp14(instr);
+ break;
+ case SimInstruction::kOp15Type:
+ decodeTypeOp15(instr);
+ break;
+ case SimInstruction::kOp16Type:
+ decodeTypeOp16(instr);
+ break;
+ case SimInstruction::kOp17Type:
+ decodeTypeOp17(instr);
+ break;
+ case SimInstruction::kOp22Type:
+ decodeTypeOp22(instr);
+ break;
+ case SimInstruction::kOp24Type:
+ decodeTypeOp24(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ loong64Debugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = getRegister(s0);
+ int64_t s1_val = getRegister(s1);
+ int64_t s2_val = getRegister(s2);
+ int64_t s3_val = getRegister(s3);
+ int64_t s4_val = getRegister(s4);
+ int64_t s5_val = getRegister(s5);
+ int64_t s6_val = getRegister(s6);
+ int64_t s7_val = getRegister(s7);
+ int64_t s8_val = getRegister(s8);
+ int64_t gp_val = getRegister(gp);
+ int64_t sp_val = getRegister(sp);
+ int64_t tp_val = getRegister(tp);
+ int64_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(s8, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(tp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(s8));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(tp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(s8, s8_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(tp, tp_val);
+ setRegister(fp, fp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(a0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/loong64/Simulator-loong64.h b/js/src/jit/loong64/Simulator-loong64.h
new file mode 100644
index 0000000000..233f218256
--- /dev/null
+++ b/js/src/jit/loong64/Simulator-loong64.h
@@ -0,0 +1,650 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_loong64_Simulator_loong64_h
+#define jit_loong64_Simulator_loong64_h
+
+#ifdef JS_SIMULATOR_LOONG64
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 32;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+// TODO fcsr0 fcsr1 fcsr2 fcsr3
+const int kFCSRRegister = 0;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31);
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
+
+const uint32_t kFPURoundingModeShift = 8;
+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift;
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest.
+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero.
+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity.
+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
+};
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 16;
+const uint32_t kFCSRUnderflowFlagBit = 17;
+const uint32_t kFCSROverflowFlagBit = 18;
+const uint32_t kFCSRDivideByZeroFlagBit = 19;
+const uint32_t kFCSRInvalidOpFlagBit = 20;
+
+const uint32_t kFCSRInexactCauseBit = 24;
+const uint32_t kFCSRUnderflowCauseBit = 25;
+const uint32_t kFCSROverflowCauseBit = 26;
+const uint32_t kFCSRDivideByZeroCauseBit = 27;
+const uint32_t kFCSRInvalidOpCauseBit = 28;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On LoongArch64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class loong64Debugger;
+
+ public:
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ gp,
+ sp,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ t8,
+ tp,
+ fp,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ v0 = a0,
+ v1 = a1,
+ };
+
+ // Condition flag registers.
+ enum CFRegister {
+ fcc0,
+ fcc1,
+ fcc2,
+ fcc3,
+ fcc4,
+ fcc5,
+ fcc6,
+ fcc7,
+ kNumCFRegisters
+ };
+
+ // Floating point registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the LOONG64
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterWord(int fpureg, int32_t value);
+ void setFpuRegisterHiWord(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+
+ void setFpuRegisterWordInvalidResult(float original, float rounded,
+ int fpureg);
+ void setFpuRegisterWordInvalidResult(double original, double rounded,
+ int fpureg);
+ void setFpuRegisterInvalidResult(float original, float rounded, int fpureg);
+ void setFpuRegisterInvalidResult(double original, double rounded, int fpureg);
+ void setFpuRegisterInvalidResult64(float original, float rounded, int fpureg);
+ void setFpuRegisterInvalidResult64(double original, double rounded,
+ int fpureg);
+
+ int64_t getFpuRegister(int fpureg) const;
+ // int32_t getFpuRegisterLo(int fpureg) const;
+ // int32_t getFpuRegisterHi(int fpureg) const;
+ int32_t getFpuRegisterWord(int fpureg) const;
+ int32_t getFpuRegisterSignedWord(int fpureg) const;
+ int32_t getFpuRegisterHiWord(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+
+ void setCFRegister(int cfreg, bool value);
+ bool getCFRegister(int cfreg) const;
+
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ unsigned int getFCSRRoundingMode();
+ template <typename T>
+ bool setFCSRRoundError(double original, double rounded);
+ bool setFCSRRound64Error(float original, float rounded);
+
+ template <typename T>
+ void roundAccordingToFCSR(T toRound, T* rounded, int32_t* rounded_int);
+
+ template <typename T>
+ void round64AccordingToFCSR(T toRound, T* rounded, int64_t* rounded_int);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes LOONG64 instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint8_t readBU(uint64_t addr);
+ inline int8_t readB(uint64_t addr);
+ inline void writeB(uint64_t addr, uint8_t value);
+ inline void writeB(uint64_t addr, int8_t value);
+
+ inline uint16_t readHU(uint64_t addr, SimInstruction* instr);
+ inline int16_t readH(uint64_t addr, SimInstruction* instr);
+ inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint32_t readWU(uint64_t addr, SimInstruction* instr);
+ inline int32_t readW(uint64_t addr, SimInstruction* instr);
+ inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr);
+ inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+ inline int64_t readDW(uint64_t addr, SimInstruction* instr);
+ inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
+
+ inline double readD(uint64_t addr, SimInstruction* instr);
+ inline void writeD(uint64_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeOp6(SimInstruction* instr);
+ void decodeTypeOp7(SimInstruction* instr);
+ void decodeTypeOp8(SimInstruction* instr);
+ void decodeTypeOp10(SimInstruction* instr);
+ void decodeTypeOp11(SimInstruction* instr);
+ void decodeTypeOp12(SimInstruction* instr);
+ void decodeTypeOp14(SimInstruction* instr);
+ void decodeTypeOp15(SimInstruction* instr);
+ void decodeTypeOp16(SimInstruction* instr);
+ void decodeTypeOp17(SimInstruction* instr);
+ void decodeTypeOp22(SimInstruction* instr);
+ void decodeTypeOp24(SimInstruction* instr);
+
+ inline int32_t rj_reg(SimInstruction* instr) const;
+ inline int64_t rj(SimInstruction* instr) const;
+ inline uint64_t rj_u(SimInstruction* instr) const;
+ inline int32_t rk_reg(SimInstruction* instr) const;
+ inline int64_t rk(SimInstruction* instr) const;
+ inline uint64_t rk_u(SimInstruction* instr) const;
+ inline int32_t rd_reg(SimInstruction* instr) const;
+ inline int64_t rd(SimInstruction* instr) const;
+ inline uint64_t rd_u(SimInstruction* instr) const;
+ inline int32_t fa_reg(SimInstruction* instr) const;
+ inline float fa_float(SimInstruction* instr) const;
+ inline double fa_double(SimInstruction* instr) const;
+
+ inline int32_t fj_reg(SimInstruction* instr) const;
+ inline float fj_float(SimInstruction* instr) const;
+ inline double fj_double(SimInstruction* instr) const;
+
+ inline int32_t fk_reg(SimInstruction* instr) const;
+ inline float fk_float(SimInstruction* instr) const;
+ inline double fk_double(SimInstruction* instr) const;
+ inline int32_t fd_reg(SimInstruction* instr) const;
+ inline float fd_float(SimInstruction* instr) const;
+ inline double fd_double(SimInstruction* instr) const;
+
+ inline int32_t cj_reg(SimInstruction* instr) const;
+ inline bool cj(SimInstruction* instr) const;
+
+ inline int32_t cd_reg(SimInstruction* instr) const;
+ inline bool cd(SimInstruction* instr) const;
+
+ inline int32_t ca_reg(SimInstruction* instr) const;
+ inline bool ca(SimInstruction* instr) const;
+ inline uint32_t sa2(SimInstruction* instr) const;
+ inline uint32_t sa3(SimInstruction* instr) const;
+ inline uint32_t ui5(SimInstruction* instr) const;
+ inline uint32_t ui6(SimInstruction* instr) const;
+ inline uint32_t lsbw(SimInstruction* instr) const;
+ inline uint32_t msbw(SimInstruction* instr) const;
+ inline uint32_t lsbd(SimInstruction* instr) const;
+ inline uint32_t msbd(SimInstruction* instr) const;
+ inline uint32_t cond(SimInstruction* instr) const;
+ inline int32_t si12(SimInstruction* instr) const;
+ inline uint32_t ui12(SimInstruction* instr) const;
+ inline int32_t si14(SimInstruction* instr) const;
+ inline int32_t si16(SimInstruction* instr) const;
+ inline int32_t si20(SimInstruction* instr) const;
+
+ // Used for breakpoints.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Floating point Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Condition flags Registers.
+ bool CFregisters_[kNumCFRegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_LOONG64 */
+
+#endif /* jit_loong64_Simulator_loong64_h */
diff --git a/js/src/jit/loong64/Trampoline-loong64.cpp b/js/src/jit/loong64/Trampoline-loong64.cpp
new file mode 100644
index 0000000000..4d99e76aa1
--- /dev/null
+++ b/js/src/jit/loong64/Trampoline-loong64.cpp
@@ -0,0 +1,833 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/loong64/SharedICHelpers-loong64.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean.");
+
+struct EnterJITRegs {
+ double f31;
+ double f30;
+ double f29;
+ double f28;
+ double f27;
+ double f26;
+ double f25;
+ double f24;
+
+ // uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t fp;
+ uint64_t s8;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ uint64_t s0;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.as_ld_d(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_ld_d(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_ld_d(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_ld_d(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_ld_d(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_ld_d(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_ld_d(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_ld_d(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_ld_d(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.as_ld_d(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld_d(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_fld_d(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_fld_d(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_fld_d(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_fld_d(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_fld_d(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_fld_d(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_fld_d(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_fld_d(f31, StackPointer, offsetof(EnterJITRegs, f31));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ masm.as_st_d(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_st_d(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_st_d(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_st_d(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_st_d(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_st_d(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_st_d(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_st_d(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_st_d(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.as_st_d(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_st_d(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.as_st_d(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.as_fst_d(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_fst_d(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_fst_d(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_fst_d(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_fst_d(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_fst_d(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_fst_d(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_fst_d(f31, StackPointer, offsetof(EnterJITRegs, f31));
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Make stack algined
+ masm.ma_and(s0, reg_argc, Imm32(1));
+ masm.ma_sub_d(s1, zero, Imm32(sizeof(Value)));
+ masm.as_maskeqz(s1, s1, s0);
+ masm.as_add_d(StackPointer, StackPointer, s1);
+
+ masm.as_slli_d(s0, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s0); // s0 = &argv[argc]
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s0);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.as_slli_d(scratch, numStackValues, 3);
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.as_or(R1.scratchReg(), reg_chain, zero);
+ }
+
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.as_ld_d(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.as_sub_d(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'] [undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg.
+ masm.mov(s3, numActArgsReg);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.as_slli_d(t0, s3, 3); // t0 <- nargs * 8
+ masm.as_add_d(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+ //
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+/* - When bailout is done via out of line code (lazy bailout).
+ * Frame size is stored in $ra (look at
+ * CodeGeneratorLOONG64::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorLOONG64::visitOutOfLineBailout().
+ */
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorLOONG64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movePtr(StackPointer, a1);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get the bailoutInfo outparam.
+ masm.pop(a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_add_d(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH(
+ "NYI: LOONG64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, a0, a0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(a0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.as_fld_d(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(sizeof(void*));
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.push(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.cpp b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
new file mode 100644
index 0000000000..fb28b298ea
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/RegisterSets.h"
+
+#if defined(__linux__) && !defined(JS_SIMULATOR)
+# include <sys/cachectl.h>
+#endif
+
+#define HWCAP_MIPS (1 << 28)
+#define HWCAP_LOONGSON (1 << 27)
+#define HWCAP_R2 (1 << 26)
+#define HWCAP_FPU (1 << 0)
+
+namespace js {
+namespace jit {
+
+static uint32_t get_mips_flags() {
+ uint32_t flags = HWCAP_MIPS;
+
+#if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ flags |= HWCAP_FPU;
+ flags |= HWCAP_R2;
+#else
+# ifdef __linux__
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (!fp) {
+ return flags;
+ }
+
+ char buf[1024] = {};
+ (void)fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ if (strstr(buf, "FPU")) {
+ flags |= HWCAP_FPU;
+ }
+ if (strstr(buf, "Loongson")) {
+ flags |= HWCAP_LOONGSON;
+ }
+ if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2")) {
+ flags |= HWCAP_R2;
+ }
+# endif
+#endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64
+ return flags;
+}
+
+static bool check_fpu() { return mips_private::Flags & HWCAP_FPU; }
+
+static bool check_loongson() { return mips_private::Flags & HWCAP_LOONGSON; }
+
+static bool check_r2() { return mips_private::Flags & HWCAP_R2; }
+
+namespace mips_private {
+// Cache a local copy so we only have to read /proc/cpuinfo once.
+uint32_t Flags = get_mips_flags();
+bool hasFPU = check_fpu();
+;
+bool isLoongson = check_loongson();
+bool hasR2 = check_r2();
+} // namespace mips_private
+
+bool CPUFlagsHaveBeenComputed() {
+ // Flags were computed above.
+ return true;
+}
+
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(_MIPS_ARCH_LOONGSON3A)
+ // On Loongson3-CPUs, The cache flushed automatically
+ // by hardware. Just need to execute an instruction hazard.
+ uintptr_t tmp;
+ asm volatile(
+ ".set push \n"
+ ".set noreorder \n"
+ "move %[tmp], $ra \n"
+ "bal 1f \n"
+ "daddiu $ra, 8 \n"
+ "1: \n"
+ "jr.hb $ra \n"
+ "move $ra, %[tmp] \n"
+ ".set pop\n"
+ : [tmp] "=&r"(tmp));
+
+#elif defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.h b/js/src/jit/mips-shared/Architecture-mips-shared.h
new file mode 100644
index 0000000000..1749a2fe6c
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -0,0 +1,341 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Architecture_mips_shared_h
+#define jit_mips_shared_Architecture_mips_shared_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+// gcc appears to use _mips_hard_float to denote
+// that the target is a hard-float target.
+#ifdef _mips_hard_float
+# define JS_CODEGEN_MIPS_HARDFP
+#endif
+
+#if (defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32)) || \
+ defined(JS_SIMULATOR_MIPS32)
+# define USES_O32_ABI
+#elif (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64)) || \
+ defined(JS_SIMULATOR_MIPS64)
+# define USES_N64_ABI
+#else
+# error "Unsupported ABI"
+#endif
+
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 6))
+# define MIPSR6
+#endif
+
+namespace js {
+namespace jit {
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = UINT32_MAX;
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+#if defined(USES_O32_ABI)
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ ta0 = t4,
+ ta1 = t5,
+ ta2 = t6,
+ ta3 = t7,
+#elif defined(USES_N64_ABI)
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ ta0 = a4,
+ ta1 = a5,
+ ta2 = a6,
+ ta3 = a7,
+#endif
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char* const RegNames[];
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ return RegNames[code];
+ }
+ static const char* GetName(Encoding i) { return GetName(Code(i)); }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t Allocatable;
+
+ typedef uint32_t SetType;
+ static const SetType AllMask = 0xffffffff;
+ static const SetType SharedArgRegMask =
+ (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3);
+ static const SetType ArgRegMask;
+
+ static const SetType VolatileMask =
+ (1 << Registers::v0) | (1 << Registers::v1) | (1 << Registers::a0) |
+ (1 << Registers::a1) | (1 << Registers::a2) | (1 << Registers::a3) |
+ (1 << Registers::t0) | (1 << Registers::t1) | (1 << Registers::t2) |
+ (1 << Registers::t3) | (1 << Registers::ta0) | (1 << Registers::ta1) |
+ (1 << Registers::ta2) | (1 << Registers::ta3);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::s0) | (1 << Registers::s1) | (1 << Registers::s2) |
+ (1 << Registers::s3) | (1 << Registers::s4) | (1 << Registers::s5) |
+ (1 << Registers::s6) | (1 << Registers::s7) | (1 << Registers::fp) |
+ (1 << Registers::ra);
+
+ static const SetType WrapperMask = VolatileMask | // = arguments
+ (1 << Registers::t0) | // = outReg
+ (1 << Registers::t1); // = argBase
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | (1 << Registers::at) | // at = scratch
+ (1 << Registers::t8) | // t8 = scratch
+ (1 << Registers::t9) | // t9 = scratch
+ (1 << Registers::k0) | (1 << Registers::k1) | (1 << Registers::gp) |
+ (1 << Registers::sp) | (1 << Registers::ra) | (1 << Registers::fp);
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask;
+
+ // Registers returned from a JS -> C call.
+ static const SetType SharedCallMask = (1 << Registers::v0);
+ static const SetType CallMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegistersMIPSShared {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_freg
+ };
+ typedef uint32_t Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetName(Encoding code) {
+ static const char* const Names[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ return Names[code];
+ }
+
+ static const Encoding Invalid = invalid_freg;
+
+#if defined(JS_CODEGEN_MIPS32)
+ typedef uint32_t SetType;
+#elif defined(JS_CODEGEN_MIPS64)
+ typedef uint64_t SetType;
+#endif
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegistersMIPSShared::RegisterContent));
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisterMIPSShared {
+ public:
+ bool isSimd128() const { return false; }
+
+ typedef FloatRegistersMIPSShared::SetType SetType;
+
+#if defined(JS_CODEGEN_MIPS32)
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation64(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+#endif
+};
+
+namespace mips_private {
+extern uint32_t Flags;
+extern bool hasFPU;
+extern bool isLoongson;
+extern bool hasR2;
+} // namespace mips_private
+
+inline uint32_t GetMIPSFlags() { return mips_private::Flags; }
+inline bool hasFPU() { return mips_private::hasFPU; }
+inline bool isLoongson() { return mips_private::isLoongson; }
+inline bool hasR2() { return mips_private::hasR2; }
+
+// MIPS doesn't have double registers that can NOT be treated as float32.
+inline bool hasUnaliasedDouble() { return false; }
+
+// MIPS64 doesn't support it and on MIPS32 we don't allocate odd single fp
+// registers thus not exposing multi aliasing to the jit.
+// See comments in Arhitecture-mips32.h.
+inline bool hasMultiAlias() { return false; }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Architecture_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
new file mode 100644
index 0000000000..11f834c3c7
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -0,0 +1,2094 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "gc/Marking.h"
+#include "jit/ExecutableAllocator.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Encode a standard register when it is being used as rd, the rs, and
+// an extra register(rt). These should never be called with an InvalidReg.
+uint32_t js::jit::RS(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RSShift;
+}
+
+uint32_t js::jit::RT(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RTShift;
+}
+
+uint32_t js::jit::RD(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RDShift;
+}
+
+uint32_t js::jit::RZ(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RZShift;
+}
+
+uint32_t js::jit::SA(uint32_t value) {
+ MOZ_ASSERT(value < 32);
+ return value << SAShift;
+}
+
+uint32_t js::jit::FS(uint32_t value) {
+ MOZ_ASSERT(value < 32);
+ return value << FSShift;
+}
+
+Register js::jit::toRS(Instruction& i) {
+ return Register::FromCode((i.encode() & RSMask) >> RSShift);
+}
+
+Register js::jit::toRT(Instruction& i) {
+ return Register::FromCode((i.encode() & RTMask) >> RTShift);
+}
+
+Register js::jit::toRD(Instruction& i) {
+ return Register::FromCode((i.encode() & RDMask) >> RDShift);
+}
+
+Register js::jit::toR(Instruction& i) {
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void InstImm::extractImm16(BOffImm16* dest) { *dest = BOffImm16(*this); }
+
+void AssemblerMIPSShared::finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool AssemblerMIPSShared::appendRawCode(const uint8_t* code, size_t numBytes) {
+ return m_buffer.appendRawCode(code, numBytes);
+}
+
+bool AssemblerMIPSShared::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool AssemblerMIPSShared::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+void AssemblerMIPSShared::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+AssemblerMIPSShared::Condition AssemblerMIPSShared::InvertCondition(
+ Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerMIPSShared::DoubleCondition AssemblerMIPSShared::InvertCondition(
+ DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst) : data(inst.encode() & Imm16Mask) {}
+
+Instruction* BOffImm16::getDest(Instruction* src) const {
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool AssemblerMIPSShared::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t AssemblerMIPSShared::size() const { return m_buffer.size(); }
+
+// Size of the relocation table, in bytes.
+size_t AssemblerMIPSShared::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t AssemblerMIPSShared::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t AssemblerMIPSShared::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(hasCreator());
+ if (dest == nullptr) {
+ return m_buffer.putInt(x);
+ }
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void AssemblerMIPSShared::WriteInstStatic(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset AssemblerMIPSShared::haltingAlign(int alignment) {
+ // TODO: Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset AssemblerMIPSShared::nopAlign(int alignment) {
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ }
+ return ret;
+}
+
+BufferOffset AssemblerMIPSShared::as_nop() {
+ spew("nop");
+ return writeInst(op_special | ff_sll);
+}
+
+// Logical operations.
+BufferOffset AssemblerMIPSShared::as_and(Register rd, Register rs,
+ Register rt) {
+ spew("and %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_or(Register rd, Register rs, Register rt) {
+ spew("or %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_xor(Register rd, Register rs,
+ Register rt) {
+ spew("xor %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_nor(Register rd, Register rs,
+ Register rt) {
+ spew("nor %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_andi(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("andi %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ori(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("ori %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_xori(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("xori %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lui(Register rd, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("lui %3s,0x%x", rd.name(), j);
+ return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
+}
+
+// Branch and jump instructions
+BufferOffset AssemblerMIPSShared::as_bal(BOffImm16 off) {
+ spew("bal %d", off.decode());
+ BufferOffset bo =
+ writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
+ return bo;
+}
+
+BufferOffset AssemblerMIPSShared::as_b(BOffImm16 off) {
+ spew("b %d", off.decode());
+ BufferOffset bo = writeInst(InstImm(op_beq, zero, zero, off).encode());
+ return bo;
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(JumpOrCall jumpOrCall) {
+ if (jumpOrCall == BranchIsCall) {
+ return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ }
+
+ return InstImm(op_beq, zero, zero, BOffImm16(0));
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(Register s, Register t,
+ Condition c) {
+ MOZ_ASSERT(c == AssemblerMIPSShared::Equal ||
+ c == AssemblerMIPSShared::NotEqual);
+ return InstImm(c == AssemblerMIPSShared::Equal ? op_beq : op_bne, s, t,
+ BOffImm16(0));
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(Register s, Condition c) {
+ switch (c) {
+ case AssemblerMIPSShared::Equal:
+ case AssemblerMIPSShared::Zero:
+ case AssemblerMIPSShared::BelowOrEqual:
+ return InstImm(op_beq, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::NotEqual:
+ case AssemblerMIPSShared::NonZero:
+ case AssemblerMIPSShared::Above:
+ return InstImm(op_bne, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThan:
+ return InstImm(op_bgtz, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThanOrEqual:
+ case AssemblerMIPSShared::NotSigned:
+ return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
+ case AssemblerMIPSShared::LessThan:
+ case AssemblerMIPSShared::Signed:
+ return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
+ case AssemblerMIPSShared::LessThanOrEqual:
+ return InstImm(op_blez, s, zero, BOffImm16(0));
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(FloatTestKind testKind,
+ FPConditionBit fcc) {
+ MOZ_ASSERT(!(fcc && FccMask));
+#ifdef MIPSR6
+ RSField rsField = ((testKind == TestForTrue ? rs_t : rs_f));
+
+ return InstImm(op_cop1, rsField, FloatRegisters::f24 << 16, BOffImm16(0));
+#else
+ uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift))
+ << RTShift;
+
+ return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_j(JOffImm26 off) {
+ spew("j 0x%x", off.decode());
+ BufferOffset bo = writeInst(InstJump(op_j, off).encode());
+ return bo;
+}
+BufferOffset AssemblerMIPSShared::as_jal(JOffImm26 off) {
+ spew("jal 0x%x", off.decode());
+ BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
+ return bo;
+}
+
+BufferOffset AssemblerMIPSShared::as_jr(Register rs) {
+ spew("jr %3s", rs.name());
+#ifdef MIPSR6
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, zero, ff_jalr).encode());
+#else
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
+#endif
+ return bo;
+}
+BufferOffset AssemblerMIPSShared::as_jalr(Register rs) {
+ spew("jalr %3s", rs.name());
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
+ return bo;
+}
+
+// Arithmetic instructions
+BufferOffset AssemblerMIPSShared::as_addu(Register rd, Register rs,
+ Register rt) {
+ spew("addu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_addiu(Register rd, Register rs,
+ int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("addiu %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_daddu(Register rd, Register rs,
+ Register rt) {
+ spew("daddu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_daddu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_daddiu(Register rd, Register rs,
+ int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("daddiu %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_daddiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subu(Register rd, Register rs,
+ Register rt) {
+ spew("subu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsubu(Register rd, Register rs,
+ Register rt) {
+ spew("dsubu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsubu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mult(Register rs, Register rt) {
+ spew("mult %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_multu(Register rs, Register rt) {
+ spew("multu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmult(Register rs, Register rt) {
+ spew("dmult %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_dmult).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmultu(Register rs, Register rt) {
+ spew("dmultu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_dmultu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_div(Register rs, Register rt) {
+ spew("div %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_div(Register rd, Register rs,
+ Register rt) {
+ spew("div %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_div).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divu(Register rs, Register rt) {
+ spew("divu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divu(Register rd, Register rs,
+ Register rt) {
+ spew("divu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_divu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mod(Register rd, Register rs,
+ Register rt) {
+ spew("mod %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_mod).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_modu(Register rd, Register rs,
+ Register rt) {
+ spew("modu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_modu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddiv(Register rs, Register rt) {
+ spew("ddiv %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_ddiv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddiv(Register rd, Register rs,
+ Register rt) {
+ spew("ddiv %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_ddiv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddivu(Register rs, Register rt) {
+ spew("ddivu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_ddivu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddivu(Register rd, Register rs,
+ Register rt) {
+ spew("ddivu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_ddivu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mul(Register rd, Register rs,
+ Register rt) {
+ spew("mul %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_mul).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_muh(Register rd, Register rs,
+ Register rt) {
+ spew("muh %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_muh).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mulu(Register rd, Register rs,
+ Register rt) {
+ spew("mulu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_mulu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muhu(Register rd, Register rs,
+ Register rt) {
+ spew("muhu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_muhu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmul(Register rd, Register rs,
+ Register rt) {
+ spew("dmul %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_dmul).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmuh(Register rd, Register rs,
+ Register rt) {
+ spew("dmuh %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmuh).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmulu(Register rd, Register rt,
+ Register rs) {
+ spew("dmulu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_dmulu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmuhu(Register rd, Register rt,
+ Register rs) {
+ spew("dmuhu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmuhu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmod(Register rd, Register rs,
+ Register rt) {
+ spew("dmod %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmod).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmodu(Register rd, Register rs,
+ Register rt) {
+ spew("dmodu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmodu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_madd(Register rs, Register rt) {
+ spew("madd %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special2, rs, rt, ff_madd).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_maddu(Register rs, Register rt) {
+ spew("maddu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special2, rs, rt, ff_maddu).encode());
+}
+
+// Shift instructions
+BufferOffset AssemblerMIPSShared::as_sll(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("sll %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsll(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsll %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsll).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsll32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsll32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsll32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sllv(Register rd, Register rt,
+ Register rs) {
+ spew("sllv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsllv(Register rd, Register rt,
+ Register rs) {
+ spew("dsllv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsllv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srl(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("srl %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrl(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsrl %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrl32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsrl32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srlv(Register rd, Register rt,
+ Register rs) {
+ spew("srlv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrlv(Register rd, Register rt,
+ Register rs) {
+ spew("dsrlv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sra(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("sra %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsra(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsra %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsra).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsra32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsra32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsra32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srav(Register rd, Register rt,
+ Register rs) {
+ spew("srav %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrav(Register rd, Register rt,
+ Register rs) {
+ spew("dsrav %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrav).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_rotr(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("rotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotr(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("drotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotr32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("drotr32%3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_rotrv(Register rd, Register rt,
+ Register rs) {
+ spew("rotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotrv(Register rd, Register rt,
+ Register rs) {
+ spew("drotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode());
+}
+
+// Load and store instructions
+BufferOffset AssemblerMIPSShared::as_lb(Register rd, Register rs, int16_t off) {
+ spew("lb %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lbu(Register rd, Register rs,
+ int16_t off) {
+ spew("lbu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lh(Register rd, Register rs, int16_t off) {
+ spew("lh %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lhu(Register rd, Register rs,
+ int16_t off) {
+ spew("lhu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lw(Register rd, Register rs, int16_t off) {
+ spew("lw %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwu(Register rd, Register rs,
+ int16_t off) {
+ spew("lwu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwl(Register rd, Register rs,
+ int16_t off) {
+ spew("lwl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwr(Register rd, Register rs,
+ int16_t off) {
+ spew("lwr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off) {
+ spew("ll %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_ll).encode());
+#else
+ return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_lld(Register rd, Register rs,
+ int16_t off) {
+ spew("lld %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_lld).encode());
+#else
+ return writeInst(InstImm(op_lld, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off) {
+ spew("ld %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ld, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ldl(Register rd, Register rs,
+ int16_t off) {
+ spew("ldl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ldl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ldr(Register rd, Register rs,
+ int16_t off) {
+ spew("ldr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ldr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sb(Register rd, Register rs, int16_t off) {
+ spew("sb %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sh(Register rd, Register rs, int16_t off) {
+ spew("sh %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sw(Register rd, Register rs, int16_t off) {
+ spew("sw %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swl(Register rd, Register rs,
+ int16_t off) {
+ spew("swl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swr(Register rd, Register rs,
+ int16_t off) {
+ spew("swr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off) {
+ spew("sc %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_sc).encode());
+#else
+ return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_scd(Register rd, Register rs,
+ int16_t off) {
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_scd).encode());
+#else
+ spew("scd %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_scd, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off) {
+ spew("sd %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sd, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdl(Register rd, Register rs,
+ int16_t off) {
+ spew("sdl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sdl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdr(Register rd, Register rs,
+ int16_t off) {
+ spew("sdr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sdr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_seleqz(Register rd, Register rs,
+ Register rt) {
+ spew("seleqz %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x0, ff_seleqz).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_selnez(Register rd, Register rs,
+ Register rt) {
+ spew("selnez %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x0, ff_selnez).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslbx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslbx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssbx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssbx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslhx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslhx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsshx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsshx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslwx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslwx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsswx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsswx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslq(Register rh, Register rl, Register rs,
+ int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gslq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssq(Register rh, Register rl, Register rs,
+ int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gssq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+// Move from HI/LO register.
+BufferOffset AssemblerMIPSShared::as_mfhi(Register rd) {
+ spew("mfhi %3s", rd.name());
+ return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mflo(Register rd) {
+ spew("mflo %3s", rd.name());
+ return writeInst(InstReg(op_special, rd, ff_mflo).encode());
+}
+
+// Set on less than.
+BufferOffset AssemblerMIPSShared::as_slt(Register rd, Register rs,
+ Register rt) {
+ spew("slt %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sltu(Register rd, Register rs,
+ Register rt) {
+ spew("sltu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_slti(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("slti %3s,%3s, 0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sltiu(Register rd, Register rs,
+ uint32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(int32_t(j)));
+ spew("sltiu %3s,%3s, 0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
+}
+
+// Conditional move.
+BufferOffset AssemblerMIPSShared::as_movz(Register rd, Register rs,
+ Register rt) {
+ spew("movz %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movn(Register rd, Register rs,
+ Register rt) {
+ spew("movn %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movt(Register rd, Register rs,
+ uint16_t cc) {
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 1);
+ spew("movt %3s,%3s, FCC%d", rd.name(), rs.name(), cc);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movf(Register rd, Register rs,
+ uint16_t cc) {
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 0);
+ spew("movf %3s,%3s, FCC%d", rd.name(), rs.name(), cc);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+// Bit twiddling.
+BufferOffset AssemblerMIPSShared::as_clz(Register rd, Register rs) {
+ spew("clz %3s,%3s", rd.name(), rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, 0x0, rd, 0x1, ff_clz).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_clz).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_dclz(Register rd, Register rs) {
+ spew("dclz %3s,%3s", rd.name(), rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, 0x0, rd, 0x1, ff_dclz).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_dclz).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_wsbh(Register rd, Register rt) {
+ spew("wsbh %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x2, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsbh(Register rd, Register rt) {
+ spew("dsbh %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x2, ff_dbshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dshd(Register rd, Register rt) {
+ spew("dshd %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x5, ff_dbshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ spew("ins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dins(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ spew("dins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dinsm(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size >= 2 && size <= 64 && pos + size > 32 &&
+ pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ spew("dinsm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dinsu(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size >= 1 && size <= 32 &&
+ pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ spew("dinsu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ext(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("ext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
+}
+
+// Sign extend
+BufferOffset AssemblerMIPSShared::as_seb(Register rd, Register rt) {
+ spew("seb %3s,%3s", rd.name(), rt.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_seh(Register rd, Register rt) {
+ spew("seh %3s,%3s", rd.name(), rt.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dext(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 63);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("dext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dextm(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size > 32 && size <= 64 && pos + size > 32 &&
+ pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1 - 32);
+ spew("dextm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dextu(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size != 0 && size <= 32 &&
+ pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("dextu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode());
+}
+
+// FP instructions
+BufferOffset AssemblerMIPSShared::as_ldc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("ldc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_ldc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("sdc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_sdc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("lwc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_lwc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("swc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_swc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslq(FloatRegister rh, FloatRegister rl,
+ Register rs, int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gslq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(
+ InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssq(FloatRegister rh, FloatRegister rl,
+ Register rs, int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gssq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(
+ InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movs(FloatRegister fd, FloatRegister fs) {
+ spew("mov.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movd(FloatRegister fd, FloatRegister fs) {
+ spew("mov.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ctc1(Register rt, FPControl fc) {
+ spew("ctc1 %3s,%d", rt.name(), fc);
+ return writeInst(InstReg(op_cop1, rs_ctc1, rt, (uint32_t)fc).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cfc1(Register rt, FPControl fc) {
+ spew("cfc1 %3s,%d", rt.name(), fc);
+ return writeInst(InstReg(op_cop1, rs_cfc1, rt, (uint32_t)fc).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mtc1(Register rt, FloatRegister fs) {
+ spew("mtc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mfc1(Register rt, FloatRegister fs) {
+ spew("mfc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mthc1(Register rt, FloatRegister fs) {
+ spew("mthc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mthc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mfhc1(Register rt, FloatRegister fs) {
+ spew("mfhc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mfhc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmtc1(Register rt, FloatRegister fs) {
+ spew("dmtc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_dmtc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmfc1(Register rt, FloatRegister fs) {
+ spew("dmfc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_dmfc1, rt, fs).encode());
+}
+
+// FP convert instructions
+BufferOffset AssemblerMIPSShared::as_ceilws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("ceil.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_floorws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("floor.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_roundws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("round.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncls(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.l.s%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ceilwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("ceil.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_floorwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("floor.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_roundwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("round.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncld(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.l.d%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.l%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtds(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtdw(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.w%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsd(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.l%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsw(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.w%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtwd(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtws(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+// FP arithmetic instructions
+BufferOffset AssemblerMIPSShared::as_adds(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("add.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_addd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("add.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subs(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("sub.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("sub.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_abss(FloatRegister fd, FloatRegister fs) {
+ spew("abs.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_absd(FloatRegister fd, FloatRegister fs) {
+ spew("abs.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_negs(FloatRegister fd, FloatRegister fs) {
+ spew("neg.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_negd(FloatRegister fd, FloatRegister fs) {
+ spew("neg.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muls(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("mul.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muld(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("mul.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divs(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("div.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("divd.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sqrts(FloatRegister fd, FloatRegister fs) {
+ spew("sqrts %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sqrtd(FloatRegister fd, FloatRegister fs) {
+ spew("sqrtd %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+// FP compare instructions
+BufferOffset AssemblerMIPSShared::as_cf(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.f.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_f_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+#endif
+ } else {
+ spew("c.f.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_f_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cun(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.un.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_un_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+#endif
+ } else {
+ spew("c.un.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_un_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_ceq(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.eq.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_eq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+#endif
+ } else {
+ spew("c.eq.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_eq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cueq(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ueq.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ueq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+#endif
+ } else {
+ spew("c.ueq.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ueq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_colt(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.olt.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_olt_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+#endif
+ } else {
+ spew("c.olt.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_olt_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cult(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ult.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ult_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+#endif
+ } else {
+ spew("c.ult.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ult_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cole(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ole.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ole_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+#endif
+ } else {
+ spew("c.ole.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ole_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cule(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ule.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ule_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+#endif
+ } else {
+ spew("c.ule.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ule_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+#endif
+ }
+}
+
+// FP conditional move.
+BufferOffset AssemblerMIPSShared::as_movt(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs,
+ FPConditionBit fcc) {
+ Register rt = Register::FromCode(fcc << 2 | 1);
+ if (fmt == DoubleFloat) {
+ spew("movt.d FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movf_fmt).encode());
+ } else {
+ spew("movt.s FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movf_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movf(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs,
+ FPConditionBit fcc) {
+ Register rt = Register::FromCode(fcc << 2 | 0);
+ if (fmt == DoubleFloat) {
+ spew("movf.d FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movf_fmt).encode());
+ } else {
+ spew("movf.s FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movf_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, Register rt) {
+ if (fmt == DoubleFloat) {
+ spew("movz.d %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movz_fmt).encode());
+ } else {
+ spew("movz.s %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movz_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movn(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, Register rt) {
+ if (fmt == DoubleFloat) {
+ spew("movn.d %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movn_fmt).encode());
+ } else {
+ spew("movn.s %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movn_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_max(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, FloatRegister ft) {
+ if (fmt == DoubleFloat) {
+ spew("max %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_max).encode());
+ } else {
+ spew("max %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_max).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_min(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, FloatRegister ft) {
+ if (fmt == DoubleFloat) {
+ spew("min %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_min).encode());
+ } else {
+ spew("min %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_min).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_tge(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tge %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tge).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tgeu(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tgeu %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tgeu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tlt(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tlt %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tlt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tltu(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tltu %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tltu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_teq(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("teq %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_teq).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tne(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tne %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tne).encode());
+}
+
+void AssemblerMIPSShared::bind(Label* label, BufferOffset boff) {
+ spew(".set Llabel %p", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void AssemblerMIPSShared::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->offset();
+ target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+void AssemblerMIPSShared::as_break(uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("break %d", code);
+ writeInst(op_special | code << FunctionBits | ff_break);
+}
+
+void AssemblerMIPSShared::as_sync(uint32_t stype) {
+ MOZ_ASSERT(stype <= 31);
+ spew("sync %d", stype);
+ writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t* AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count) {
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr) {
+ *count += sizeof(Instruction);
+ }
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+// Since there are no pools in MIPS implementation, this should be simple.
+Instruction* Instruction::next() { return this + 1; }
+
+InstImm AssemblerMIPSShared::invertBranch(InstImm branch,
+ BOffImm16 skipOffset) {
+ uint32_t rt = 0;
+ OpcodeField op = (OpcodeField)(branch.extractOpcode() << OpcodeShift);
+ switch (op) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq);
+ return branch;
+ case op_bgtz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blez);
+ return branch;
+ case op_blez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgtz);
+ return branch;
+ case op_regimm:
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt == (rt_bltz >> RTShift)) {
+ branch.setRT(rt_bgez);
+ return branch;
+ }
+ if (rt == (rt_bgez >> RTShift)) {
+ branch.setRT(rt_bltz);
+ return branch;
+ }
+
+ MOZ_CRASH("Error creating long branch.");
+
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt & 0x1) {
+ branch.setRT((RTField)((rt & ~0x1) << RTShift));
+ } else {
+ branch.setRT((RTField)((rt | 0x1) << RTShift));
+ }
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+void AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
+ // We converted beq to andi, so now we restore it.
+ inst->setOpcode(op_beq);
+}
+
+void AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
+ // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
+ inst->setOpcode(op_andi);
+}
+
+void AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0,
+ Instruction* inst1,
+ uint32_t value) {
+ MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm*)inst0)->setImm16(Imm16::Upper(Imm32(value)));
+ ((InstImm*)inst1)->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+#ifdef JS_JITSPEW
+void AssemblerMIPSShared::decodeBranchInstAndSpew(InstImm branch) {
+ OpcodeField op = (OpcodeField)(branch.extractOpcode() << OpcodeShift);
+ uint32_t rt_id;
+ uint32_t rs_id;
+ uint32_t immi = branch.extractImm16Value();
+ uint32_t fcc;
+ switch (op) {
+ case op_beq:
+ rt_id = branch.extractRT();
+ rs_id = branch.extractRS();
+ spew("beq %3s,%3s,0x%x", Registers::GetName(rs_id),
+ Registers::GetName(rt_id), (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_bne:
+ rt_id = branch.extractRT();
+ rs_id = branch.extractRS();
+ spew("bne %3s,%3s,0x%x", Registers::GetName(rs_id),
+ Registers::GetName(rt_id), (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_bgtz:
+ rs_id = branch.extractRS();
+ spew("bgt %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_blez:
+ rs_id = branch.extractRS();
+ spew("ble %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_regimm:
+ rt_id = branch.extractRT();
+ if (rt_id == (rt_bltz >> RTShift)) {
+ rs_id = branch.extractRS();
+ spew("blt %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ } else if (rt_id == (rt_bgez >> RTShift)) {
+ rs_id = branch.extractRS();
+ spew("bge %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ } else {
+ MOZ_CRASH("Error disassemble branch.");
+ }
+ break;
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+ rt_id = branch.extractRT();
+ fcc = branch.extractBitField(FCccShift + FCccBits - 1, FCccShift);
+ if (rt_id & 0x1) {
+ spew("bc1t FCC%d, 0x%x", fcc, (int32_t(immi << 18) >> 16) + 4);
+ } else {
+ spew("bc1f FCC%d, 0x%x", fcc, (int32_t(immi << 18) >> 16) + 4);
+ }
+ break;
+ default:
+ MOZ_CRASH("Error disassemble branch.");
+ }
+}
+#endif
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
new file mode 100644
index 0000000000..32332de5be
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -0,0 +1,1500 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Assembler_mips_shared_h
+#define jit_mips_shared_Assembler_mips_shared_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Sprintf.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/Architecture-mips-shared.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register at{Registers::at};
+static constexpr Register v0{Registers::v0};
+static constexpr Register v1{Registers::v1};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::ta0};
+static constexpr Register a5{Registers::ta1};
+static constexpr Register a6{Registers::ta2};
+static constexpr Register a7{Registers::ta3};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::ta0};
+static constexpr Register t5{Registers::ta1};
+static constexpr Register t6{Registers::ta2};
+static constexpr Register t7{Registers::ta3};
+static constexpr Register s0{Registers::s0};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register t8{Registers::t8};
+static constexpr Register t9{Registers::t9};
+static constexpr Register k0{Registers::k0};
+static constexpr Register k1{Registers::k1};
+static constexpr Register gp{Registers::gp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register fp{Registers::fp};
+static constexpr Register ra{Registers::ra};
+
+static constexpr Register ScratchRegister = at;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+struct SecondScratchRegisterScope : public AutoRegisterScope {
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg) {}
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register GlobalReg = s6; // used by Odin
+static constexpr Register HeapReg = s7; // used by Odin
+
+static constexpr Register PreBarrierReg = a1;
+
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = fp;
+static constexpr Register ReturnReg = v0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+// A bias applied to the GlobalReg to allow the use of instructions with small
+// negative immediate offsets which doubles the range of global data that can be
+// accessed with a single instruction.
+static const int32_t WasmGlobalRegBias = 32768;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr uint32_t CodeAlignment = 8;
+
+/* clang-format off */
+// MIPS instruction types
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 5 | 5 | 6 |
+// +---------------------------------------------------------------+
+// Register type | Opcode | Rs | Rt | Rd | Sa | Function |
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 16 |
+// +---------------------------------------------------------------+
+// Immediate type | Opcode | Rs | Rt | 2's complement constant |
+// +---------------------------------------------------------------+
+// | 6 | 26 |
+// +---------------------------------------------------------------+
+// Jump type | Opcode | jump_target |
+// +---------------------------------------------------------------+
+// 31 bit bit 0
+/* clang-format on */
+
+// MIPS instruction encoding constants.
+static const uint32_t OpcodeShift = 26;
+static const uint32_t OpcodeBits = 6;
+static const uint32_t RSShift = 21;
+static const uint32_t RSBits = 5;
+static const uint32_t RTShift = 16;
+static const uint32_t RTBits = 5;
+static const uint32_t RDShift = 11;
+static const uint32_t RDBits = 5;
+static const uint32_t RZShift = 0;
+static const uint32_t RZBits = 5;
+static const uint32_t SAShift = 6;
+static const uint32_t SABits = 5;
+static const uint32_t FunctionShift = 0;
+static const uint32_t FunctionBits = 6;
+static const uint32_t Imm16Shift = 0;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t Imm28Shift = 0;
+static const uint32_t Imm28Bits = 28;
+static const uint32_t ImmFieldShift = 2;
+static const uint32_t FRBits = 5;
+static const uint32_t FRShift = 21;
+static const uint32_t FSShift = 11;
+static const uint32_t FSBits = 5;
+static const uint32_t FTShift = 16;
+static const uint32_t FTBits = 5;
+static const uint32_t FDShift = 6;
+static const uint32_t FDBits = 5;
+static const uint32_t FCccShift = 8;
+static const uint32_t FCccBits = 3;
+static const uint32_t FBccShift = 18;
+static const uint32_t FBccBits = 3;
+static const uint32_t FBtrueShift = 16;
+static const uint32_t FBtrueBits = 1;
+static const uint32_t FccMask = 0x7;
+static const uint32_t FccShift = 2;
+
+// MIPS instruction field bit masks.
+static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift;
+static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
+static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
+static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
+static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
+static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
+static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+static const uint32_t BREAK_STACK_UNALIGNED = 1;
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+static const uint32_t WASM_TRAP = 6; // BRK_OVERFLOW
+
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RS(Register r);
+uint32_t RT(Register r);
+uint32_t RT(FloatRegister r);
+uint32_t RD(Register r);
+uint32_t RD(FloatRegister r);
+uint32_t RZ(Register r);
+uint32_t RZ(FloatRegister r);
+uint32_t SA(uint32_t value);
+uint32_t SA(FloatRegister r);
+uint32_t FS(uint32_t value);
+
+Register toRS(Instruction& i);
+Register toRT(Instruction& i);
+Register toRD(Instruction& i);
+Register toR(Instruction& i);
+
+// MIPS enums for instruction fields
+enum OpcodeField {
+ op_special = 0 << OpcodeShift,
+ op_regimm = 1 << OpcodeShift,
+
+ op_j = 2 << OpcodeShift,
+ op_jal = 3 << OpcodeShift,
+ op_beq = 4 << OpcodeShift,
+ op_bne = 5 << OpcodeShift,
+ op_blez = 6 << OpcodeShift,
+ op_bgtz = 7 << OpcodeShift,
+
+ op_addi = 8 << OpcodeShift,
+ op_addiu = 9 << OpcodeShift,
+ op_slti = 10 << OpcodeShift,
+ op_sltiu = 11 << OpcodeShift,
+ op_andi = 12 << OpcodeShift,
+ op_ori = 13 << OpcodeShift,
+ op_xori = 14 << OpcodeShift,
+ op_lui = 15 << OpcodeShift,
+
+ op_cop1 = 17 << OpcodeShift,
+ op_cop1x = 19 << OpcodeShift,
+
+ op_beql = 20 << OpcodeShift,
+ op_bnel = 21 << OpcodeShift,
+ op_blezl = 22 << OpcodeShift,
+ op_bgtzl = 23 << OpcodeShift,
+
+ op_daddi = 24 << OpcodeShift,
+ op_daddiu = 25 << OpcodeShift,
+
+ op_ldl = 26 << OpcodeShift,
+ op_ldr = 27 << OpcodeShift,
+
+ op_special2 = 28 << OpcodeShift,
+ op_special3 = 31 << OpcodeShift,
+
+ op_lb = 32 << OpcodeShift,
+ op_lh = 33 << OpcodeShift,
+ op_lwl = 34 << OpcodeShift,
+ op_lw = 35 << OpcodeShift,
+ op_lbu = 36 << OpcodeShift,
+ op_lhu = 37 << OpcodeShift,
+ op_lwr = 38 << OpcodeShift,
+ op_lwu = 39 << OpcodeShift,
+ op_sb = 40 << OpcodeShift,
+ op_sh = 41 << OpcodeShift,
+ op_swl = 42 << OpcodeShift,
+ op_sw = 43 << OpcodeShift,
+ op_sdl = 44 << OpcodeShift,
+ op_sdr = 45 << OpcodeShift,
+ op_swr = 46 << OpcodeShift,
+
+ op_ll = 48 << OpcodeShift,
+ op_lwc1 = 49 << OpcodeShift,
+ op_lwc2 = 50 << OpcodeShift,
+ op_lld = 52 << OpcodeShift,
+ op_ldc1 = 53 << OpcodeShift,
+ op_ldc2 = 54 << OpcodeShift,
+ op_ld = 55 << OpcodeShift,
+
+ op_sc = 56 << OpcodeShift,
+ op_swc1 = 57 << OpcodeShift,
+ op_swc2 = 58 << OpcodeShift,
+ op_scd = 60 << OpcodeShift,
+ op_sdc1 = 61 << OpcodeShift,
+ op_sdc2 = 62 << OpcodeShift,
+ op_sd = 63 << OpcodeShift,
+};
+
+enum RSField {
+ rs_zero = 0 << RSShift,
+ // cop1 encoding of RS field.
+ rs_mfc1 = 0 << RSShift,
+ rs_one = 1 << RSShift,
+ rs_dmfc1 = 1 << RSShift,
+ rs_cfc1 = 2 << RSShift,
+ rs_mfhc1 = 3 << RSShift,
+ rs_mtc1 = 4 << RSShift,
+ rs_dmtc1 = 5 << RSShift,
+ rs_ctc1 = 6 << RSShift,
+ rs_mthc1 = 7 << RSShift,
+ rs_bc1 = 8 << RSShift,
+ rs_f = 0x9 << RSShift,
+ rs_t = 0xd << RSShift,
+ rs_s_r6 = 20 << RSShift,
+ rs_d_r6 = 21 << RSShift,
+ rs_s = 16 << RSShift,
+ rs_d = 17 << RSShift,
+ rs_w = 20 << RSShift,
+ rs_l = 21 << RSShift,
+ rs_ps = 22 << RSShift
+};
+
+enum RTField {
+ rt_zero = 0 << RTShift,
+ // regimm encoding of RT field.
+ rt_bltz = 0 << RTShift,
+ rt_bgez = 1 << RTShift,
+ rt_bltzal = 16 << RTShift,
+ rt_bgezal = 17 << RTShift
+};
+
+enum FunctionField {
+ // special encoding of function field.
+ ff_sll = 0,
+ ff_movci = 1,
+ ff_srl = 2,
+ ff_sra = 3,
+ ff_sllv = 4,
+ ff_srlv = 6,
+ ff_srav = 7,
+
+ ff_jr = 8,
+ ff_jalr = 9,
+ ff_movz = 10,
+ ff_movn = 11,
+ ff_break = 13,
+ ff_sync = 15,
+
+ ff_mfhi = 16,
+ ff_mflo = 18,
+
+ ff_dsllv = 20,
+ ff_dsrlv = 22,
+ ff_dsrav = 23,
+
+ ff_mult = 24,
+ ff_multu = 25,
+
+ ff_mulu = 25,
+ ff_muh = 24,
+ ff_muhu = 25,
+ ff_dmul = 28,
+ ff_dmulu = 29,
+ ff_dmuh = 28,
+ ff_dmuhu = 29,
+
+ ff_div = 26,
+ ff_mod = 26,
+ ff_divu = 27,
+ ff_modu = 27,
+ ff_dmult = 28,
+ ff_dmultu = 29,
+ ff_ddiv = 30,
+ ff_dmod = 30,
+ ff_ddivu = 31,
+ ff_dmodu = 31,
+
+ ff_add = 32,
+ ff_addu = 33,
+ ff_sub = 34,
+ ff_subu = 35,
+ ff_and = 36,
+ ff_or = 37,
+ ff_xor = 38,
+ ff_nor = 39,
+
+ ff_slt = 42,
+ ff_sltu = 43,
+ ff_dadd = 44,
+ ff_daddu = 45,
+ ff_dsub = 46,
+ ff_dsubu = 47,
+
+ ff_tge = 48,
+ ff_tgeu = 49,
+ ff_tlt = 50,
+ ff_tltu = 51,
+ ff_teq = 52,
+ ff_seleqz = 53,
+ ff_tne = 54,
+ ff_selnez = 55,
+ ff_dsll = 56,
+ ff_dsrl = 58,
+ ff_dsra = 59,
+ ff_dsll32 = 60,
+ ff_dsrl32 = 62,
+ ff_dsra32 = 63,
+
+ // special2 encoding of function field.
+ ff_madd = 0,
+ ff_maddu = 1,
+#ifdef MIPSR6
+ ff_clz = 16,
+ ff_dclz = 18,
+ ff_mul = 24,
+#else
+ ff_mul = 2,
+ ff_clz = 32,
+ ff_dclz = 36,
+#endif
+ ff_clo = 33,
+
+ // special3 encoding of function field.
+ ff_ext = 0,
+ ff_dextm = 1,
+ ff_dextu = 2,
+ ff_dext = 3,
+ ff_ins = 4,
+ ff_dinsm = 5,
+ ff_dinsu = 6,
+ ff_dins = 7,
+ ff_bshfl = 32,
+ ff_dbshfl = 36,
+ ff_sc = 38,
+ ff_scd = 39,
+ ff_ll = 54,
+ ff_lld = 55,
+
+ // cop1 encoding of function field.
+ ff_add_fmt = 0,
+ ff_sub_fmt = 1,
+ ff_mul_fmt = 2,
+ ff_div_fmt = 3,
+ ff_sqrt_fmt = 4,
+ ff_abs_fmt = 5,
+ ff_mov_fmt = 6,
+ ff_neg_fmt = 7,
+
+ ff_round_l_fmt = 8,
+ ff_trunc_l_fmt = 9,
+ ff_ceil_l_fmt = 10,
+ ff_floor_l_fmt = 11,
+
+ ff_round_w_fmt = 12,
+ ff_trunc_w_fmt = 13,
+ ff_ceil_w_fmt = 14,
+ ff_floor_w_fmt = 15,
+
+ ff_movf_fmt = 17,
+ ff_movz_fmt = 18,
+ ff_movn_fmt = 19,
+
+ ff_min = 28,
+ ff_max = 30,
+
+ ff_cvt_s_fmt = 32,
+ ff_cvt_d_fmt = 33,
+ ff_cvt_w_fmt = 36,
+ ff_cvt_l_fmt = 37,
+ ff_cvt_ps_s = 38,
+
+#ifdef MIPSR6
+ ff_c_f_fmt = 0,
+ ff_c_un_fmt = 1,
+ ff_c_eq_fmt = 2,
+ ff_c_ueq_fmt = 3,
+ ff_c_olt_fmt = 4,
+ ff_c_ult_fmt = 5,
+ ff_c_ole_fmt = 6,
+ ff_c_ule_fmt = 7,
+#else
+ ff_c_f_fmt = 48,
+ ff_c_un_fmt = 49,
+ ff_c_eq_fmt = 50,
+ ff_c_ueq_fmt = 51,
+ ff_c_olt_fmt = 52,
+ ff_c_ult_fmt = 53,
+ ff_c_ole_fmt = 54,
+ ff_c_ule_fmt = 55,
+#endif
+
+ ff_madd_s = 32,
+ ff_madd_d = 33,
+
+ // Loongson encoding of function field.
+ ff_gsxbx = 0,
+ ff_gsxhx = 1,
+ ff_gsxwx = 2,
+ ff_gsxdx = 3,
+ ff_gsxwlc1 = 4,
+ ff_gsxwrc1 = 5,
+ ff_gsxdlc1 = 6,
+ ff_gsxdrc1 = 7,
+ ff_gsxwxc1 = 6,
+ ff_gsxdxc1 = 7,
+ ff_gsxq = 0x20,
+ ff_gsxqc1 = 0x8020,
+
+ ff_null = 0
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16) + 4;
+ }
+
+ explicit BOffImm16(int offset) : data((offset - 4) >> 2 & Imm16Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < int(unsigned(INT16_MIN) << 2)) {
+ return false;
+ }
+ if ((offset - 4) > (INT16_MAX << 2)) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6) + 4;
+ }
+
+ explicit JOffImm26(int offset) : data((offset - 4) >> 2 & Imm26Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < -536870912) {
+ return false;
+ }
+ if ((offset - 4) > 536870908) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src);
+};
+
+class Imm16 {
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm) : value(imm) {}
+ uint32_t encode() { return value; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT16_MAX; }
+ static Imm16 Lower(Imm32 imm) { return Imm16(imm.value & 0xffff); }
+ static Imm16 Upper(Imm32 imm) { return Imm16((imm.value >> 16) & 0xffff); }
+};
+
+class Imm8 {
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm) : value(imm) {}
+ uint32_t encode(uint32_t shift) { return value << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT8_MAX; }
+ static Imm8 Lower(Imm16 imm) { return Imm8(imm.decodeSigned() & 0xff); }
+ static Imm8 Upper(Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class GSImm13 {
+ uint16_t value;
+
+ public:
+ GSImm13();
+ GSImm13(uint32_t imm) : value(imm & ~0xf) {}
+ uint32_t encode(uint32_t shift) { return ((value >> 4) & 0x1ff) << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInRange(int32_t imm) {
+ return imm >= int32_t(uint32_t(-256) << 4) && imm <= (255 << 4);
+ }
+};
+
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand(Register reg_) : tag(REG), reg(reg_.code()) {}
+
+ Operand(FloatRegister freg) : tag(FREG), reg(freg.code()) {}
+
+ Operand(Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value) {}
+
+ Operand(Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off) {}
+
+ Operand(const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset) {}
+
+ Tag getTag() const { return tag; }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+static constexpr int32_t SliceSize = 1024;
+typedef js::jit::AssemblerBuffer<SliceSize, Instruction> MIPSBuffer;
+
+class MIPSBufferWithExecutableCopy : public MIPSBuffer {
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom()) {
+ return;
+ }
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+};
+
+class AssemblerMIPSShared : public AssemblerShared {
+ public:
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered
+ // - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPConditionBit { FCC0 = 0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+ enum FPControl {
+ FIR = 0,
+ UFR,
+ UNFR = 4,
+ FCCR = 25,
+ FEXR,
+ FENR = 28,
+ FCSR = 31
+ };
+
+ enum FCSRBit { CauseI = 12, CauseU, CauseO, CauseZ, CauseV };
+
+ enum FloatFormat { SingleFloat, DoubleFloat };
+
+ enum JumpOrCall { BranchIsJump, BranchIsCall };
+
+ enum FloatTestKind { TestForTrue, TestForFalse };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+
+ protected:
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ MIPSBufferWithExecutableCopy m_buffer;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+
+ public:
+ AssemblerMIPSShared()
+ : m_buffer(),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ isFinished(false) {
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ public:
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+ void decodeBranchInstAndSpew(InstImm branch);
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ Register getStackPointer() const { return StackPointer; }
+
+ protected:
+ bool isFinished;
+
+ public:
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop();
+
+ // Branch and jump instructions
+ BufferOffset as_bal(BOffImm16 off);
+ BufferOffset as_b(BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall);
+ InstImm getBranchCode(Register s, Register t, Condition c);
+ InstImm getBranchCode(Register s, Condition c);
+ InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
+
+ BufferOffset as_j(JOffImm26 off);
+ BufferOffset as_jal(JOffImm26 off);
+
+ BufferOffset as_jr(Register rs);
+ BufferOffset as_jalr(Register rs);
+
+ // Arithmetic instructions
+ BufferOffset as_addu(Register rd, Register rs, Register rt);
+ BufferOffset as_addiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_daddu(Register rd, Register rs, Register rt);
+ BufferOffset as_daddiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_subu(Register rd, Register rs, Register rt);
+ BufferOffset as_dsubu(Register rd, Register rs, Register rt);
+ BufferOffset as_mult(Register rs, Register rt);
+ BufferOffset as_multu(Register rs, Register rt);
+ BufferOffset as_dmult(Register rs, Register rt);
+ BufferOffset as_dmultu(Register rs, Register rt);
+ BufferOffset as_div(Register rs, Register rt);
+ BufferOffset as_divu(Register rs, Register rt);
+ BufferOffset as_mul(Register rd, Register rs, Register rt);
+ BufferOffset as_madd(Register rs, Register rt);
+ BufferOffset as_maddu(Register rs, Register rt);
+ BufferOffset as_ddiv(Register rs, Register rt);
+ BufferOffset as_ddivu(Register rs, Register rt);
+
+ BufferOffset as_muh(Register rd, Register rs, Register rt);
+ BufferOffset as_muhu(Register rd, Register rs, Register rt);
+ BufferOffset as_mulu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmuh(Register rd, Register rs, Register rt);
+ BufferOffset as_dmuhu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmul(Register rd, Register rs, Register rt);
+ BufferOffset as_dmulu(Register rd, Register rs, Register rt);
+ BufferOffset as_div(Register rd, Register rs, Register rt);
+ BufferOffset as_divu(Register rd, Register rs, Register rt);
+ BufferOffset as_mod(Register rd, Register rs, Register rt);
+ BufferOffset as_modu(Register rd, Register rs, Register rt);
+ BufferOffset as_ddiv(Register rd, Register rs, Register rt);
+ BufferOffset as_ddivu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmod(Register rd, Register rs, Register rt);
+ BufferOffset as_dmodu(Register rd, Register rs, Register rt);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rs, Register rt);
+ BufferOffset as_or(Register rd, Register rs, Register rt);
+ BufferOffset as_xor(Register rd, Register rs, Register rt);
+ BufferOffset as_nor(Register rd, Register rs, Register rt);
+
+ BufferOffset as_andi(Register rd, Register rs, int32_t j);
+ BufferOffset as_ori(Register rd, Register rs, int32_t j);
+ BufferOffset as_xori(Register rd, Register rs, int32_t j);
+ BufferOffset as_lui(Register rd, int32_t j);
+
+ // Shift instructions
+ // as_sll(zero, zero, x) instructions are reserved as nop
+ BufferOffset as_sll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_sllv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsllv(Register rd, Register rt, Register rs);
+ BufferOffset as_srl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srlv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrlv(Register rd, Register rt, Register rs);
+ BufferOffset as_sra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srav(Register rd, Register rt, Register rs);
+ BufferOffset as_rotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_rotrv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrav(Register rd, Register rt, Register rs);
+ BufferOffset as_drotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotr32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotrv(Register rd, Register rt, Register rs);
+
+ // Load and store instructions
+ BufferOffset as_lb(Register rd, Register rs, int16_t off);
+ BufferOffset as_lbu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lh(Register rd, Register rs, int16_t off);
+ BufferOffset as_lhu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lw(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwl(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwr(Register rd, Register rs, int16_t off);
+ BufferOffset as_ll(Register rd, Register rs, int16_t off);
+ BufferOffset as_lld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldl(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sb(Register rd, Register rs, int16_t off);
+ BufferOffset as_sh(Register rd, Register rs, int16_t off);
+ BufferOffset as_sw(Register rd, Register rs, int16_t off);
+ BufferOffset as_swl(Register rd, Register rs, int16_t off);
+ BufferOffset as_swr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sc(Register rd, Register rs, int16_t off);
+ BufferOffset as_scd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdl(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdr(Register rd, Register rs, int16_t off);
+
+ // Loongson-specific load and store instructions
+ BufferOffset as_gslbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslhx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsshx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslwx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsswx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(Register rh, Register rl, Register rs, int16_t off);
+ BufferOffset as_gssq(Register rh, Register rl, Register rs, int16_t off);
+
+ // Move from HI/LO register.
+ BufferOffset as_mfhi(Register rd);
+ BufferOffset as_mflo(Register rd);
+
+ // Set on less than.
+ BufferOffset as_slt(Register rd, Register rs, Register rt);
+ BufferOffset as_sltu(Register rd, Register rs, Register rt);
+ BufferOffset as_slti(Register rd, Register rs, int32_t j);
+ BufferOffset as_sltiu(Register rd, Register rs, uint32_t j);
+
+ // Conditional move.
+ BufferOffset as_movz(Register rd, Register rs, Register rt);
+ BufferOffset as_movn(Register rd, Register rs, Register rt);
+ BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_seleqz(Register rd, Register rs, Register rt);
+ BufferOffset as_selnez(Register rd, Register rs, Register rt);
+
+ // Bit twiddling.
+ BufferOffset as_clz(Register rd, Register rs);
+ BufferOffset as_dclz(Register rd, Register rs);
+ BufferOffset as_wsbh(Register rd, Register rt);
+ BufferOffset as_dsbh(Register rd, Register rt);
+ BufferOffset as_dshd(Register rd, Register rt);
+ BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ BufferOffset as_seb(Register rd, Register rt);
+ BufferOffset as_seh(Register rd, Register rt);
+
+ // FP instructions
+
+ BufferOffset as_ldc1(FloatRegister ft, Register base, int32_t off);
+ BufferOffset as_sdc1(FloatRegister ft, Register base, int32_t off);
+
+ BufferOffset as_lwc1(FloatRegister ft, Register base, int32_t off);
+ BufferOffset as_swc1(FloatRegister ft, Register base, int32_t off);
+
+ // Loongson-specific FP load and store instructions
+ BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsldr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gsssx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gsldx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gssdx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gslq(FloatRegister rh, FloatRegister rl, Register rs,
+ int16_t off);
+ BufferOffset as_gssq(FloatRegister rh, FloatRegister rl, Register rs,
+ int16_t off);
+
+ BufferOffset as_movs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_movd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ctc1(Register rt, FPControl fc);
+ BufferOffset as_cfc1(Register rt, FPControl fc);
+
+ BufferOffset as_mtc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfc1(Register rt, FloatRegister fs);
+
+ BufferOffset as_mthc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfhc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmtc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmfc1(Register rt, FloatRegister fs);
+
+ public:
+ // FP convert instructions
+ BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncls(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncld(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs);
+
+ // FP arithmetic instructions
+ BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+
+ BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_max(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FloatRegister ft);
+ BufferOffset as_min(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FloatRegister ft);
+
+ // FP compare instructions
+ BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+
+ // FP conditional move.
+ BufferOffset as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ Register rt);
+ BufferOffset as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ Register rt);
+
+ // Conditional trap operations
+ BufferOffset as_tge(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tgeu(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tlt(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tltu(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_teq(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tne(Register rs, Register rt, uint32_t code = 0);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+ void as_sync(uint32_t stype = 0);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if (defined(__mips_hard_float) && !defined(__mips_single_float)) || \
+ defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return false; }
+
+ static bool HasRoundInstruction(RoundingMode mode) { return false; }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ void flushBuffer() {}
+
+ void comment(const char* msg) { spew("; %s", msg); }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ return (offset + 1U) & ~1U;
+ }
+
+ static uint8_t* NextInstruction(uint8_t* instruction,
+ uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1,
+ uint32_t value);
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerMIPSShared
+
+// sll zero, zero, 0
+const uint32_t NopInst = 0x00000000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// MIPS instructions.
+class Instruction {
+ protected:
+ uint32_t data;
+
+ // Standard constructor
+ Instruction(uint32_t data_) : data(data_) {}
+
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+ public:
+ uint32_t encode() const { return data; }
+
+ void makeNop() { data = NopInst; }
+
+ void setData(uint32_t data) { this->data = data; }
+
+ const Instruction& operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) { return (encode() >> bit) & 1; }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+ // Since all MIPS instructions have opcode, the opcode
+ // extractor resides in the base class.
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ // Return the fields at their original place in the instruction encoding.
+ OpcodeField OpcodeFieldRaw() const {
+ return static_cast<OpcodeField>(encode() & OpcodeMask);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4,
+ "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction {
+ public:
+ InstNOP() : Instruction(NopInst) {}
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction {
+ public:
+ InstReg(OpcodeField op, Register rd, FunctionField ff)
+ : Instruction(op | RD(rd) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, Register rd,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, Register rs, RTField rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, Register rs, uint32_t cc, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, uint32_t code, FunctionField ff)
+ : Instruction(op | code | ff) {}
+ // for float point
+ InstReg(OpcodeField op, RSField rs, Register rt, uint32_t fs)
+ : Instruction(op | rs | RT(rt) | FS(fs)) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister rd)
+ : Instruction(op | rs | RT(rt) | RD(rd)) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister rd,
+ uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister fs,
+ FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff) {}
+ InstReg(OpcodeField op, RSField rs, FloatRegister ft, FloatRegister fs,
+ FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff) {}
+ InstReg(OpcodeField op, RSField rs, FloatRegister ft, FloatRegister fd,
+ uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff) {}
+
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA() {
+ return extractBitField(SAShift + SABits - 1, SAShift);
+ }
+ uint32_t extractFunctionField() {
+ return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction {
+ public:
+ void extractImm16(BOffImm16* dest);
+
+ InstImm(OpcodeField op, Register rs, Register rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+ InstImm(OpcodeField op, Register rs, RTField rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | rt | off.encode()) {}
+ InstImm(OpcodeField op, RSField rs, uint32_t cc, BOffImm16 off)
+ : Instruction(op | rs | cc | off.encode()) {}
+ InstImm(OpcodeField op, Register rs, Register rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+ InstImm(uint32_t raw) : Instruction(raw) {}
+ // For floating-point loads and stores.
+ InstImm(OpcodeField op, Register rs, FloatRegister rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ void setOpcode(OpcodeField op) { data = (data & ~OpcodeMask) | op; }
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ void setRT(RTField rt) { data = (data & ~RTMask) | rt; }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+ void setImm16(Imm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction {
+ public:
+ InstJump(OpcodeField op, JOffImm26 off) : Instruction(op | off.encode()) {}
+
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+// Class for Loongson-specific instructions
+class InstGS : public Instruction {
+ public:
+ // For indexed loads and stores.
+ InstGS(OpcodeField op, Register rs, Register rt, Register rd, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff) {}
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, Register rd, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff) {}
+ // For quad-word loads and stores.
+ InstGS(OpcodeField op, Register rs, Register rt, Register rz, GSImm13 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff) {}
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, FloatRegister rz,
+ GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff) {}
+ InstGS(uint32_t raw) : Instruction(raw) {}
+ // For floating-point unaligned loads and stores.
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff) {}
+};
+
+inline bool IsUnaligned(const wasm::MemoryAccessDesc& access) {
+ if (!access.align()) {
+ return false;
+ }
+
+#ifdef JS_CODEGEN_MIPS32
+ if (access.type() == Scalar::Int64 && access.align() >= 4) {
+ return false;
+ }
+#endif
+
+ return access.align() < access.byteSize();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Assembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
new file mode 100644
index 0000000000..5ef11fd8c2
--- /dev/null
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -0,0 +1,521 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+// NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free
+// 64-bit atomics. We lie down below about 8-byte atomics being always lock-
+// free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use
+// __atomic intrinsic and therefore do not relay on -latomic.
+// Access to a aspecific 64-bit variable in memory is protected by an
+// AddressLock whose instance is shared between jit and AtomicOperations.
+
+#ifndef jit_mips_shared_AtomicOperations_mips_shared_h
+#define jit_mips_shared_AtomicOperations_mips_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include "builtin/AtomicsObject.h"
+#include "vm/Uint8Clamped.h"
+
+#if !defined(__clang__) && !defined(__GNUC__)
+# error "This file only for gcc-compatible compilers"
+#endif
+
+#if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__)
+# error "The MIPS32 simulator atomics assume x86"
+#endif
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+struct AddressLock {
+ public:
+ void acquire();
+ void release();
+
+ private:
+ uint32_t spinlock;
+};
+
+static_assert(sizeof(AddressLock) == sizeof(uint32_t),
+ "AddressLock must be 4 bytes for it to be consumed by jit");
+
+// For now use a single global AddressLock.
+static AddressLock gAtomic64Lock;
+
+struct MOZ_RAII AddressGuard {
+ explicit AddressGuard(void* addr) { gAtomic64Lock.acquire(); }
+
+ ~AddressGuard() { gAtomic64Lock.release(); }
+};
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
+
+inline bool js::jit::AtomicOperations::isLockfree8() {
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+#if defined(JS_64BIT)
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+#endif
+ return true;
+}
+
+inline void js::jit::AtomicOperations::fenceSeqCst() {
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::loadSeqCst(int64_t* addr) {
+ AddressGuard guard(addr);
+ return *addr;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::loadSeqCst(uint64_t* addr) {
+ AddressGuard guard(addr);
+ return *addr;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline void js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
+ AddressGuard guard(addr);
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ *addr = val;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
+ T newval) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return oldval;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::compareExchangeSeqCst(
+ int64_t* addr, int64_t oldval, int64_t newval) {
+ AddressGuard guard(addr);
+ int64_t val = *addr;
+ if (val == oldval) {
+ *addr = newval;
+ }
+ return val;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::compareExchangeSeqCst(
+ uint64_t* addr, uint64_t oldval, uint64_t newval) {
+ AddressGuard guard(addr);
+ uint64_t val = *addr;
+ if (val == oldval) {
+ *addr = newval;
+ }
+ return val;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old + val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old + val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old - val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old - val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old & val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old & val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old | val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old | val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old ^ val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old ^ val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_RELAXED);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr) {
+ return *addr;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr) {
+ return *addr;
+}
+
+#endif
+
+template <>
+inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
+ uint8_clamped* addr) {
+ uint8_t v;
+ __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
+ return uint8_clamped(v);
+}
+
+template <>
+inline float js::jit::AtomicOperations::loadSafeWhenRacy(float* addr) {
+ return *addr;
+}
+
+template <>
+inline double js::jit::AtomicOperations::loadSafeWhenRacy(double* addr) {
+ return *addr;
+}
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_store(addr, &val, __ATOMIC_RELAXED);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr,
+ int64_t val) {
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr,
+ uint64_t val) {
+ *addr = val;
+}
+
+#endif
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
+ uint8_clamped val) {
+ __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(float* addr,
+ float val) {
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(double* addr,
+ double val) {
+ *addr = val;
+}
+
+} // namespace jit
+} // namespace js
+
+inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ ::memcpy(dest, src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ ::memmove(dest, src, nbytes);
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+#if !defined(JS_64BIT)
+
+inline void js::jit::AddressLock::acquire() {
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, true,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
+ zero = 0;
+ }
+}
+
+inline void js::jit::AddressLock::release() {
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+}
+
+#endif
+
+#endif // jit_mips_shared_AtomicOperations_mips_shared_h
diff --git a/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
new file mode 100644
index 0000000000..6e21edc0ba
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+bool ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm) {
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+ masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
new file mode 100644
index 0000000000..f8b96e0354
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -0,0 +1,2448 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen,
+ LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorMIPSShared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorMIPSShared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
+ FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == Assembler::DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+#endif
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ MCompare* mir = comp->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_RefOrNull) {
+ if (comp->right()->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+ return;
+ }
+#endif
+
+ if (comp->right()->isConstant()) {
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.load32(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(),
+ comp->ifFalse());
+ }
+}
+
+bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool =
+ new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.ma_negu(dest, src);
+ break;
+ case 0:
+ masm.move32(Imm32(0), dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_addu(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_sll(dest, src, Imm32(shift));
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.ma_sll(dest, src, Imm32(shift - shift_rest));
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.ma_sll(dest, dest, Imm32(shift_rest));
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2, shift)
+ masm.ma_sll(dest, src, Imm32(shift));
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.ma_sra(ScratchRegister, dest, Imm32(shift));
+ bailoutCmp32(Assembler::NotEqual, src, ScratchRegister,
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lhs) == output);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(constant + 1))) {
+ ScratchRegisterScope scratch(masm);
+ Register64 scratch64(scratch);
+ masm.move64(ToRegister64(lhs), scratch64);
+ masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
+ masm.sub64(scratch64, output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint64_t>(constant - 1))) {
+ ScratchRegisterScope scratch(masm);
+ Register64 scratch64(scratch);
+ masm.move64(ToRegister64(lhs), scratch64);
+ masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
+ masm.add64(scratch64, output);
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+#ifdef MIPSR6
+ masm.as_div(dest, lhs, rhs);
+#else
+ masm.as_div(lhs, rhs);
+ masm.as_mflo(dest);
+#endif
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.ma_sll(tmp, lhs, Imm32(32 - shift));
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.ma_sra(tmp, lhs, Imm32(31));
+ masm.ma_srl(tmp, tmp, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ } else {
+ masm.ma_srl(tmp, lhs, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.ma_sra(dest, tmp, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+#ifdef MIPSR6
+ masm.as_mod(dest, lhs, rhs);
+#else
+ masm.as_div(lhs, rhs);
+ masm.as_mfhi(dest);
+#endif
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_not(ToRegister(dest), ToRegister(input));
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.ma_sll(dest, lhs, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.ma_srl(dest, lhs, Imm32(shift));
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.ma_sll(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.ma_sra(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.ma_srl(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+#ifdef JS_CODEGEN_MIPS64
+ MOZ_ASSERT(input == output);
+#endif
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+#ifdef JS_CODEGEN_MIPS32
+ masm.move64(input, output);
+#endif
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_srl(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_ctz(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
+ masm.ma_bc1d(input, ScratchDoubleReg, &skip,
+ Assembler::DoubleNotEqualOrUnordered, ShortJump);
+ masm.as_negd(output, ScratchDoubleReg);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.as_addd(output, input, ScratchDoubleReg);
+ masm.as_sqrtd(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+ return MoveOperand(address, kind);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_addd(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_subd(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_muld(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_divd(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_adds(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_subs(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_muls(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_divs(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ if (ool->toType() == MIRType::Int32) {
+ masm.outOfLineWasmTruncateToInt32Check(
+ ool->input(), ool->output(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ } else {
+ MOZ_ASSERT(ool->toType() == MIRType::Int64);
+ masm.outOfLineWasmTruncateToInt64Check(
+ ool->input(), ool->output64(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ }
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.ma_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.ma_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ if (lir->right()->isConstant()) {
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()),
+ Imm32(ToInt32(lir->right())));
+ } else {
+ masm.as_and(ScratchRegister, ToRegister(lir->left()),
+ ToRegister(lir->right()));
+ }
+ emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+ masm.jump(thunk);
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorMIPSShared* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ if (IsUnaligned(mir->access())) {
+ if (IsFloatingPointType(mir->type())) {
+ masm.wasmUnalignedLoadFP(mir->access(), HeapReg, ptr, ptrScratch,
+ ToFloatRegister(lir->output()),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmUnalignedLoad(mir->access(), HeapReg, ptr, ptrScratch,
+ ToRegister(lir->output()),
+ ToRegister(lir->getTemp(1)));
+ }
+ } else {
+ masm.wasmLoad(mir->access(), HeapReg, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
+ emitWasmLoad(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ if (IsUnaligned(mir->access())) {
+ if (mir->access().type() == Scalar::Float32 ||
+ mir->access().type() == Scalar::Float64) {
+ masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
+ HeapReg, ptr, ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()), HeapReg,
+ ptr, ptrScratch, ToRegister(lir->getTemp(1)));
+ }
+ } else {
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg, ptr,
+ ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
+ emitWasmStore(lir);
+}
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label done, outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
+ }
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32) {
+ masm.storeFloat32(freg, addr);
+ } else {
+ masm.storeDouble(freg, addr);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32) {
+ masm.storeFloat32(freg, bi);
+ } else {
+ masm.storeDouble(freg, bi);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.as_movz(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else if (mirType == MIRType::Double) {
+ masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+#ifdef MIPSR6
+ masm.as_modu(output, lhs, rhs);
+#else
+ masm.as_divu(lhs, rhs);
+ masm.as_mfhi(output);
+#endif
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+#ifdef MIPSR6
+ masm.as_divu(output, lhs, rhs);
+#else
+ masm.as_mflo(output);
+#endif
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_negu(output, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negd(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negs(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+#ifdef JS_CODEGEN_MIPS32
+ Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
+#else
+ Register64 temp(ToRegister(lir->getTemp(0)));
+#endif
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
new file mode 100644
index 0000000000..2452a443be
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_CodeGenerator_mips_shared_h
+#define jit_mips_shared_CodeGenerator_mips_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPSShared;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorMIPSShared>;
+
+class CodeGeneratorMIPSShared : public CodeGeneratorShared {
+ friend class MoveResolverMIPS;
+
+ protected:
+ CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTestPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
+ protected:
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot), frameSize_(frameSize) {}
+
+ void accept(CodeGeneratorMIPSShared* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_CodeGenerator_mips_shared_h */
diff --git a/js/src/jit/mips-shared/LIR-mips-shared.h b/js/src/jit/mips-shared/LIR-mips-shared.h
new file mode 100644
index 0000000000..624e9eb6a7
--- /dev/null
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -0,0 +1,360 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_LIR_mips_shared_h
+#define jit_mips_shared_LIR_mips_shared_h
+
+namespace js {
+namespace jit {
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template <size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 2> {
+ public:
+ typedef LWasmLoadBase<NumDefs, 2> Base;
+
+ explicit LWasmUnalignedLoadBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LDefinition& valueHelper)
+ : Base(opcode, ptr, LAllocation()) {
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+
+ const LAllocation* ptr() { return Base::getOperand(0); }
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1> {
+ public:
+ LIR_HEADER(WasmUnalignedLoad);
+
+ explicit LWasmUnalignedLoad(const LAllocation& ptr,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) {}
+};
+
+class LWasmUnalignedLoadI64
+ : public details::LWasmUnalignedLoadBase<INT64_PIECES> {
+ public:
+ LIR_HEADER(WasmUnalignedLoadI64);
+
+ explicit LWasmUnalignedLoadI64(const LAllocation& ptr,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) {}
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template <size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2> {
+ public:
+ typedef LInstructionHelper<0, NumOps, 2> Base;
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmUnalignedStoreBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LDefinition& valueHelper)
+ : Base(opcode) {
+ Base::setOperand(0, ptr);
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+
+ MWasmStore* mir() const { return Base::mir_->toWasmStore(); }
+ const LAllocation* ptr() { return Base::getOperand(PtrIndex); }
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2> {
+ public:
+ LIR_HEADER(WasmUnalignedStore);
+
+ LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) {
+ setOperand(1, value);
+ }
+
+ const LAllocation* value() { return Base::getOperand(ValueIndex); }
+};
+
+class LWasmUnalignedStoreI64
+ : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(WasmUnalignedStoreI64);
+ LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) {
+ setInt64Operand(1, value);
+ }
+
+ const LInt64Allocation value() { return getInt64Operand(ValueIndex); }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_LIR_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
new file mode 100644
index 0000000000..c28990211f
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -0,0 +1,1024 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorMIPSShared::tempByteOpRegister() { return temp(); }
+
+// x = !y
+void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorMIPSShared::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+#ifdef JS_CODEGEN_MIPS32
+ needsTemp = true;
+ cannotAliasRhs = true;
+ // See the documentation on willHaveDifferentLIRNodes; that test does not
+ // allow additional constraints.
+ MOZ_CRASH(
+ "cannotAliasRhs cannot be used the way it is used in the guard below");
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorMIPSShared::visitMulI64
+ if (constant >= -1 && constant <= 2) {
+ needsTemp = false;
+ }
+ if (int64_t(1) << shift == constant) {
+ needsTemp = false;
+ }
+ if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1)) ||
+ mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant - 1)))
+ reuseInput = false;
+ }
+#endif
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+template <size_t Temps>
+void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+#ifdef JS_CODEGEN_MIPS32
+ if (mir->isRotate()) {
+ if (!rhs->isConstant()) {
+ ins->setTemp(0, temp());
+ }
+ ins->setInt64Operand(0, useInt64Register(lhs));
+ } else {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ }
+#else
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+#ifdef JS_CODEGEN_MIPS32
+ if (mir->isRotate()) {
+ defineInt64(ins, mir);
+ } else {
+ defineInt64ReuseInput(ins, mir, 0);
+ }
+#else
+ defineInt64ReuseInput(ins, mir, 0);
+#endif
+}
+
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+void LIRGeneratorMIPSShared::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorMIPSShared::lowerForFPU(
+ LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForFPU(
+ LInstructionHelper<1, 2, 1>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorMIPSShared::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+
+void LIRGeneratorMIPSShared::lowerNegI64(MInstruction* ins,
+ MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorMIPSShared::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+LTableSwitch* LIRGeneratorMIPSShared::newLTableSwitch(
+ const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorMIPSShared::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPSShared::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on mips64 platform and we should explicitly promote it to
+ // 64-bit by zero-extension when use it as an index register in memory
+ // accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation ptr;
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->type() == MIRType::Int64) {
+ ptr = useRegister(base);
+ } else {
+ ptr = useRegisterAtStart(base);
+ }
+#else
+ ptr = useRegisterAtStart(base);
+#endif
+
+ if (IsUnaligned(ins->access())) {
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmUnalignedLoadI64(ptr, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmUnalignedLoad(ptr, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicLoadI64(ptr);
+ defineInt64(lir, ins);
+ return;
+ }
+#endif
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+
+ if (IsUnaligned(ins->access())) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ if (ins->access().type() == Scalar::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir =
+ new (alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir =
+ new (alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ if (ins->access().type() == Scalar::Int64) {
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicStoreI64(
+ useRegister(base), useInt64Register(value), temp());
+ add(lir, ins);
+ return;
+ }
+#endif
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGeneratorMIPSShared::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorMIPSShared::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+ // For MIPS it is best to keep the 'base' in a register if a bounds check
+ // is needed.
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()));
+ lir->setTemp(0, temp());
+#ifdef JS_CODEGEN_MIPS32
+ lir->setTemp(1, temp());
+#endif
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+// On mips we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
+
+bool MWasmBinarySimd128::canPmaddubsw() { return false; }
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
new file mode 100644
index 0000000000..ca74a7aaf5
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Lowering_mips_shared_h
+#define jit_mips_shared_Lowering_mips_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPSShared : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorMIPSShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on MIPS all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerPowOfTwoI(MPow* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerPhi(MPhi* phi);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Lowering_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
new file mode 100644
index 0000000000..e03b13f297
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -0,0 +1,1307 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_inl_h
+#define jit_mips_shared_MacroAssembler_mips_shared_inl_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ moveFromFloat32(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ moveToFloat32(src, dest);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ ma_seb(dest, src);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ ma_seh(dest, src);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::and32(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_and(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ ma_and(dest, SecondScratchReg);
+}
+
+void MacroAssembler::or32(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_or(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_xor(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ ma_xor(dest, SecondScratchReg);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ ma_wsbh(reg, reg);
+ ma_seh(reg, reg);
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ ma_wsbh(reg, reg);
+ ma_and(reg, Imm32(0xFFFF));
+}
+
+void MacroAssembler::byteSwap32(Register reg) {
+ ma_wsbh(reg, reg);
+ as_rotr(reg, reg, 16);
+}
+
+// ===============================================================
+// Arithmetic instructions
+
+void MacroAssembler::add32(Register src, Register dest) {
+ as_addu(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_addu(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_addu(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ loadPtr(dest, ScratchRegister);
+ addPtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ loadPtr(src, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ as_addd(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ as_adds(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ as_subu(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_subu(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ as_subu(dest, dest, SecondScratchReg);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ loadPtr(dest, SecondScratchReg);
+ subPtr(src, SecondScratchReg);
+ storePtr(SecondScratchReg, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ loadPtr(addr, SecondScratchReg);
+ subPtr(SecondScratchReg, dest);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ as_subd(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ as_subs(dest, dest, src);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ move32(imm, SecondScratchReg);
+ mul32(SecondScratchReg, srcDest);
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ as_muls(dest, dest, src);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ as_muld(dest, dest, src);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ movePtr(imm, ScratchRegister);
+ loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_divu(srcDest, srcDest, rhs);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_div(srcDest, srcDest, rhs);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mflo(srcDest);
+#endif
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_modu(srcDest, srcDest, rhs);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_mod(srcDest, srcDest, rhs);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mfhi(srcDest);
+#endif
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ as_divs(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ as_divd(dest, dest, src);
+}
+
+void MacroAssembler::neg32(Register reg) { ma_negu(reg, reg); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { as_negd(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { as_negs(reg, reg); }
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ // TODO: There's probably a better way to do this.
+ if (src != dest) {
+ move32(src, dest);
+ }
+ Label positive;
+ branchTest32(Assembler::NotSigned, dest, dest, &positive);
+ neg32(dest);
+ bind(&positive);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ as_abss(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ as_absd(dest, src);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ as_sqrts(dest, src);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ as_sqrtd(dest, src);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ ma_sll(dest, dest, src);
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ ma_sll(dest, dest, imm);
+}
+
+void MacroAssembler::rshift32(Register src, Register dest) {
+ ma_srl(dest, dest, src);
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ ma_srl(dest, dest, imm);
+}
+
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ ma_sra(dest, dest, src);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ ma_sra(dest, dest, imm);
+}
+
+// ===============================================================
+// Rotation functions
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_rol(dest, input, count);
+ } else {
+ ma_move(dest, input);
+ }
+}
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ ma_rol(dest, input, count);
+}
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_ror(dest, input, count);
+ } else {
+ ma_move(dest, input);
+ }
+}
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ ma_ror(dest, input, count);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ as_clz(dest, src);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ ma_ctz(dest, src);
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ ma_move(output, input);
+ ma_sra(tmp, input, Imm32(1));
+ ma_and(tmp, Imm32(0x55555555));
+ ma_subu(output, tmp);
+ ma_sra(tmp, output, Imm32(2));
+ ma_and(output, Imm32(0x33333333));
+ ma_and(tmp, Imm32(0x33333333));
+ ma_addu(output, tmp);
+ ma_srl(tmp, output, Imm32(4));
+ ma_addu(output, tmp);
+ ma_and(output, Imm32(0xF0F0F0F));
+ ma_sll(tmp, output, Imm32(8));
+ ma_addu(output, tmp);
+ ma_sll(tmp, output, Imm32(16));
+ ma_addu(output, tmp);
+ ma_sra(output, output, Imm32(24));
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ load32(addr, SecondScratchReg);
+ ma_b(SecondScratchReg, imm, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc1s(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertFloat32ToInt32(src, dest, fail, false);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc1d(lhs, rhs, label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_subu(dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchTestPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ma_lid(ScratchDoubleReg, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc1d(value, ScratchDoubleReg, label, cond);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ loadPtr(addr, ScratchRegister);
+ branch(ScratchRegister);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmp32Set(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmp32Set(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+// ========================================================================
+// Memory access primitives.
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& addr) {
+ ma_sd(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_sd(src, addr);
+}
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ ma_ss(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_ss(src, addr);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ as_sync();
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(ScratchRegister, reg, 0);
+#ifdef MIPSR6
+ as_seleqz(reg, reg, ScratchRegister);
+#else
+ as_movn(reg, zero, ScratchRegister);
+#endif
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_li(SecondScratchReg, Imm32(255));
+ as_slti(ScratchRegister, reg, 255);
+#ifdef MIPSR6
+ as_seleqz(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_selnez(reg, reg, ScratchRegister);
+ as_or(reg, reg, SecondScratchReg);
+#else
+ as_movz(reg, SecondScratchReg, ScratchRegister);
+#endif
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
new file mode 100644
index 0000000000..0f52a28e43
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -0,0 +1,3355 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+#include "mozilla/EndianUtils.h"
+
+#include "jsmath.h"
+
+#include "jit/MacroAssembler.h"
+
+using namespace js;
+using namespace jit;
+
+void MacroAssemblerMIPSShared::ma_move(Register rd, Register rs) {
+ as_or(rd, rs, zero);
+}
+
+void MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr) {
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest, zero, imm.value);
+ } else if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(dest, zero, Imm16::Lower(imm).encode());
+ } else if (Imm16::Lower(imm).encode() == 0) {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ } else {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+ }
+}
+
+// This method generates lui and ori instruction pair that can be modified by
+// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm) {
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+}
+
+// Shifts
+void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift) {
+ as_sll(rd, rt, shift.value % 32);
+}
+void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift) {
+ as_srl(rd, rt, shift.value % 32);
+}
+
+void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift) {
+ as_sra(rd, rt, shift.value % 32);
+}
+
+void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift) {
+ if (hasR2()) {
+ as_rotr(rd, rt, shift.value % 32);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_srl(scratch, rt, shift.value % 32);
+ as_sll(rd, rt, (32 - (shift.value % 32)) % 32);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift) {
+ if (hasR2()) {
+ as_rotr(rd, rt, (32 - (shift.value % 32)) % 32);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_srl(scratch, rt, (32 - (shift.value % 32)) % 32);
+ as_sll(rd, rt, shift.value % 32);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt,
+ Register shift) {
+ as_sllv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt,
+ Register shift) {
+ as_srlv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt,
+ Register shift) {
+ as_srav(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt,
+ Register shift) {
+ if (hasR2()) {
+ as_rotrv(rd, rt, shift);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_negu(scratch, shift);
+ as_sllv(scratch, rt, scratch);
+ as_srlv(rd, rt, shift);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt,
+ Register shift) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_negu(scratch, shift);
+ if (hasR2()) {
+ as_rotrv(rd, rt, scratch);
+ } else {
+ as_srlv(rd, rt, scratch);
+ as_sllv(scratch, rt, shift);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs) {
+ as_subu(rd, zero, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_not(Register rd, Register rs) {
+ as_nor(rd, rs, zero);
+}
+
+// Bit extract/insert
+void MacroAssemblerMIPSShared::ma_ext(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(pos + size < 33);
+
+ if (hasR2()) {
+ as_ext(rt, rs, pos, size);
+ } else {
+ int shift_left = 32 - (pos + size);
+ as_sll(rt, rs, shift_left);
+ int shift_right = 32 - size;
+ if (shift_right > 0) {
+ as_srl(rt, rt, shift_right);
+ }
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_ins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(pos + size <= 32);
+ MOZ_ASSERT(size != 0);
+
+ if (hasR2()) {
+ as_ins(rt, rs, pos, size);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_subu(scratch, zero, Imm32(1));
+ as_srl(scratch, scratch, 32 - size);
+ as_and(scratch2, rs, scratch);
+ as_sll(scratch2, scratch2, pos);
+ as_sll(scratch, scratch, pos);
+ as_nor(scratch, scratch, zero);
+ as_and(scratch, rt, scratch);
+ as_or(rt, scratch2, scratch);
+ }
+}
+
+// Sign extend
+void MacroAssemblerMIPSShared::ma_seb(Register rd, Register rt) {
+ if (hasR2()) {
+ as_seb(rd, rt);
+ } else {
+ as_sll(rd, rt, 24);
+ as_sra(rd, rd, 24);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_seh(Register rd, Register rt) {
+ if (hasR2()) {
+ as_seh(rd, rt);
+ } else {
+ as_sll(rd, rt, 16);
+ as_sra(rd, rd, 16);
+ }
+}
+
+// And.
+void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs) {
+ as_and(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm) {
+ ma_and(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_andi(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_and(rd, rs, ScratchRegister);
+ }
+}
+
+// Or.
+void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs) {
+ as_or(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm) {
+ ma_or(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_or(rd, rs, ScratchRegister);
+ }
+}
+
+// xor
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs) {
+ as_xor(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm) {
+ ma_xor(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_xori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_xor(rd, rs, ScratchRegister);
+ }
+}
+
+// word swap bytes within halfwords
+void MacroAssemblerMIPSShared::ma_wsbh(Register rd, Register rt) {
+ as_wsbh(rd, rt);
+}
+
+void MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs) {
+ as_addiu(ScratchRegister, rs, -1);
+ as_xor(rd, ScratchRegister, rs);
+ as_and(rd, rd, ScratchRegister);
+ as_clz(rd, rd);
+ ma_li(ScratchRegister, Imm32(0x20));
+ as_subu(rd, ScratchRegister, rd);
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs) {
+ as_addu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm) {
+ ma_addu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
+ Register rs, Register rt,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rs, rt != rd);
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, rd == rs ? rt : rs);
+ ma_b(SecondScratchReg, SecondScratchReg, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
+ Register rs, Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_add32TestCarry(cond, rd, rs, ScratchRegister, overflow);
+}
+
+// Subtract.
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_addiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_subu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm) {
+ ma_subu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs) {
+ as_subu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_sub32TestOverflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rs, Imm32(-imm.value), overflow);
+ } else {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ as_mul(rd, rs, ScratchRegister);
+}
+
+void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
+ Register rt,
+ Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_mul(rd, rs, rt);
+ as_muh(SecondScratchReg, rs, rt);
+#else
+ as_mult(rs, rt);
+ as_mflo(rd);
+ as_mfhi(SecondScratchReg);
+#endif
+ as_sra(ScratchRegister, rd, 31);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_mul32TestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
+ Register rt,
+ Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_mod(ScratchRegister, rs, rt);
+#else
+ as_div(rs, rt);
+ as_mfhi(ScratchRegister);
+#endif
+ ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+#ifdef MIPSR6
+ as_div(rd, rs, rt);
+#else
+ as_mflo(rd);
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ ma_move(remain, src);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ ma_negu(remain, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(SecondScratchReg, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_addu(dest, dest, SecondScratchReg);
+ // Do a trial subtraction
+ ma_subu(SecondScratchReg, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+ ma_move(dest, SecondScratchReg);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srl(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ ma_negu(dest, dest);
+ } else {
+ ma_negu(dest, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && ZeroExtend != extension &&
+ Imm8::IsInSignedRange(src.offset)) {
+ Register index = src.index;
+
+ if (src.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(src.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != src.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, src.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, src.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, src.base, index, src.offset);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, src.base, index, src.offset);
+ break;
+ case SizeWord:
+ as_gslwx(dest, src.base, index, src.offset);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, src.base, index, src.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
+ const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t lowOffset, hiOffset;
+ SecondScratchRegisterScope base(asMasm());
+ asMasm().computeScaledAddress(src, base);
+ ScratchRegisterScope scratch(asMasm());
+
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(src.offset));
+ asMasm().addPtr(scratch, base);
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(dest != scratch);
+ if (extension == ZeroExtend) {
+ as_lbu(scratch, base, hiOffset);
+ } else {
+ as_lb(scratch, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, scratch, 8, 24);
+ break;
+ case SizeWord:
+ MOZ_ASSERT(dest != base);
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ MOZ_ASSERT(dest != base);
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
+ const Address& address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t lowOffset, hiOffset;
+ ScratchRegisterScope scratch1(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register base;
+
+ if (Imm16::IsInSignedRange(address.offset) &&
+ Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
+ base = address.base;
+ lowOffset = Imm16(address.offset).encode();
+ hiOffset = Imm16(address.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch1, Imm32(address.offset));
+ asMasm().addPtr(address.base, scratch1);
+ base = scratch1;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(base != scratch2 && dest != scratch2);
+ if (extension == ZeroExtend) {
+ as_lbu(scratch2, base, hiOffset);
+ } else {
+ as_lb(scratch2, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, scratch2, 8, 24);
+ break;
+ case SizeWord:
+ MOZ_ASSERT(dest != base);
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ MOZ_ASSERT(dest != base);
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(
+ const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src,
+ Register temp, LoadStoreSize size, LoadStoreExtension extension) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ BufferOffset load;
+ switch (size) {
+ case SizeHalfWord:
+ if (extension == ZeroExtend) {
+ load = as_lbu(temp, base, hiOffset);
+ } else {
+ load = as_lb(temp, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, temp, 8, 24);
+ break;
+ case SizeWord:
+ load = as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ load = as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+
+ append(access, load.getOffset());
+}
+
+void MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register index = dest.index;
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+ asMasm().ma_store(data, Address(SecondScratchReg, dest.offset), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register data = zero;
+ Register index = dest.index;
+
+ if (imm.value) {
+ MOZ_ASSERT(ScratchRegister != dest.base);
+ MOZ_ASSERT(ScratchRegister != dest.index);
+ data = ScratchRegister;
+ ma_li(data, imm);
+ }
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ // Make sure that SecondScratchReg contains absolute address so that
+ // offset is 0.
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(ScratchRegister, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
+ const Address& address,
+ LoadStoreSize size) {
+ int16_t lowOffset, hiOffset;
+ ScratchRegisterScope scratch(asMasm());
+ Register base;
+
+ if (Imm16::IsInSignedRange(address.offset) &&
+ Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
+ base = address.base;
+ lowOffset = Imm16(address.offset).encode();
+ hiOffset = Imm16(address.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(address.offset));
+ asMasm().addPtr(address.base, scratch);
+ base = scratch;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord: {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(base != scratch2);
+ as_sb(data, base, lowOffset);
+ ma_ext(scratch2, data, 8, 8);
+ as_sb(scratch2, base, hiOffset);
+ break;
+ }
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
+ const BaseIndex& dest,
+ LoadStoreSize size) {
+ int16_t lowOffset, hiOffset;
+ SecondScratchRegisterScope base(asMasm());
+ asMasm().computeScaledAddress(dest, base);
+ ScratchRegisterScope scratch(asMasm());
+
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(dest.offset));
+ asMasm().addPtr(scratch, base);
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(base != scratch);
+ as_sb(data, base, lowOffset);
+ ma_ext(scratch, data, 8, 8);
+ as_sb(scratch, base, hiOffset);
+ break;
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(
+ const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest,
+ Register temp, LoadStoreSize size, LoadStoreExtension extension) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ BufferOffset store;
+ switch (size) {
+ case SizeHalfWord:
+ ma_ext(temp, data, 8, 8);
+ store = as_sb(temp, base, hiOffset);
+ as_sb(data, base, lowOffset);
+ break;
+ case SizeWord:
+ store = as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ store = as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ append(access, store.getOffset());
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ break;
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual) {
+ ma_b(label, jumpKind);
+ } else if (c == Below) {
+ ; // This condition is always false. No branch required.
+ } else {
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ }
+ } else {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ }
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind) {
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
+ Register lhs,
+ Register rhs,
+ Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
+ Register lhs, Imm32 imm,
+ Condition c) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ as_sltiu(dest, lhs, imm.value + 1);
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_sltiu(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ as_slti(dest, lhs, imm.value + 1);
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_slti(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rs, rt);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rs, rt);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rt, rs);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rs, rt);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rt, rs);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rs, rt);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rs == rt);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ as_sltiu(rd, rs, 1);
+ break;
+ case NonZero:
+ MOZ_ASSERT(rs == rt);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ as_sltu(rd, zero, rs);
+ break;
+ case Signed:
+ MOZ_ASSERT(rs == rt);
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rs == rt);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerMIPSShared::compareFloatingPoint(
+ FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, DoubleCondition c,
+ FloatTestKind* testKind, FPConditionBit fcc) {
+ switch (c) {
+ case DoubleOrdered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleEqual:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqual:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThan:
+ as_colt(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_cole(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThan:
+ as_colt(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqual:
+ as_cole(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleUnordered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleEqualOrUnordered:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_cult(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_cule(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrUnordered:
+ as_cult(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_cule(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ FloatTestKind moveCondition;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+#ifdef MIPSR6
+ as_mfc1(dest, FloatRegisters::f24);
+ if (moveCondition == TestForTrue) {
+ as_andi(dest, dest, 0x1);
+ } else {
+ as_addiu(dest, dest, 0x1);
+ }
+#else
+ ma_li(dest, Imm32(1));
+
+ if (moveCondition == TestForTrue) {
+ as_movf(dest, zero);
+ } else {
+ as_movt(dest, zero);
+ }
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ FloatTestKind moveCondition;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+#ifdef MIPSR6
+ as_mfc1(dest, FloatRegisters::f24);
+ if (moveCondition == TestForTrue) {
+ as_andi(dest, dest, 0x1);
+ } else {
+ as_addiu(dest, dest, 0x1);
+ }
+#else
+ ma_li(dest, Imm32(1));
+
+ if (moveCondition == TestForTrue) {
+ as_movf(dest, zero);
+ } else {
+ as_movt(dest, zero);
+ }
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NotEqual:
+ case Above:
+ as_sltu(rd, zero, rs);
+ break;
+ case AboveOrEqual:
+ case Below:
+ as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ as_slt(rd, zero, rs);
+ if (c == LessThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ as_slt(rd, rs, zero);
+ if (c == GreaterThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NonZero:
+ as_sltu(rd, zero, rs);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(rs != ScratchRegister);
+ ma_xor(rd, rs, imm);
+ if (c == Equal) {
+ as_sltiu(rd, rd, 1);
+ } else {
+ as_sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rs, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) as_xori(rd, rd, 1);
+ }
+}
+
+// fp instructions
+void MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+ } else {
+ moveToFloat32(zero, dest);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address) {
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gssdx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_sd(ft, Address(SecondScratchReg, address.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address) {
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gsssx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ld(FloatRegister ft, const BaseIndex& src) {
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_ld(ft, Address(SecondScratchReg, src.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ls(FloatRegister ft, const BaseIndex& src) {
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_ls(ft, Address(SecondScratchReg, src.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ FloatTestKind testKind;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ FloatTestKind testKind;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest,
+ FloatRegister second,
+ bool handleNaN, bool isMax) {
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+#ifdef MIPSR6
+ if (isMax) {
+ as_max(DoubleFloat, srcDest, first, second);
+ } else {
+ as_min(DoubleFloat, srcDest, first, second);
+ }
+#else
+ // Make sure we handle -0 and 0 right.
+ ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(DoubleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_addd(ScratchDoubleReg, first, second);
+ } else {
+ as_negd(ScratchDoubleReg, first);
+ as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
+ as_negd(ScratchDoubleReg, ScratchDoubleReg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(DoubleFloat, first, ScratchDoubleReg);
+#endif
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest,
+ FloatRegister second,
+ bool handleNaN, bool isMax) {
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+#ifdef MIPSR6
+ if (isMax) {
+ as_max(SingleFloat, srcDest, first, second);
+ } else {
+ as_min(SingleFloat, srcDest, first, second);
+ }
+#else
+ // Make sure we handle -0 and 0 right.
+ ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(SingleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_adds(ScratchFloat32Reg, first, second);
+ } else {
+ as_negs(ScratchFloat32Reg, first);
+ as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
+ as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(SingleFloat, first, ScratchFloat32Reg);
+#endif
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSShared::loadDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ld(dest, address);
+}
+
+void MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_ld(dest, src);
+}
+
+void MacroAssemblerMIPSShared::loadFloatAsDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void MacroAssemblerMIPSShared::loadFloatAsDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void MacroAssemblerMIPSShared::loadFloat32(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, address);
+}
+
+void MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, src);
+}
+
+void MacroAssemblerMIPSShared::ma_call(ImmPtr dest) {
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jalr(CallReg);
+ as_nop();
+}
+
+void MacroAssemblerMIPSShared::ma_jump(ImmPtr dest) {
+ asMasm().ma_liPatchable(ScratchRegister, dest);
+ as_jr(ScratchRegister);
+ as_nop();
+}
+
+MacroAssembler& MacroAssemblerMIPSShared::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerMIPSShared::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() {}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ ma_li(ScratchRegister, ptr);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(f.pushSize()));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(f.pushSize()));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ as_jalr(reg);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ ma_bal(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ addPtr(Imm32(5 * sizeof(uint32_t)), ra);
+ // Allocate space which will be patched by patchCall().
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ as_lw(ScratchRegister, ra, -(int32_t)(5 * sizeof(uint32_t)));
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
+
+ BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
+ if (!offset.isInvalid()) {
+ InstImm* bal = (InstImm*)editSrc(call);
+ bal->setBOffImm16(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ ma_move(SecondScratchReg, ra);
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ as_lw(ScratchRegister, ra, 0);
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ ma_move(ra, SecondScratchReg);
+ return farJump;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+
+void MacroAssembler::call(const Address& addr) {
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+
+void MacroAssembler::call(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ callJitNoProfiler(ScratchRegister);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ // MIPS32 //MIPS64
+ as_nop(); // lui // lui
+ as_nop(); // ori // ori
+ as_nop(); // jalr // drotr32
+ as_nop(); // ori
+#ifdef JS_CODEGEN_MIPS64
+ as_nop(); // jalr
+ as_nop();
+#endif
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+#ifdef JS_CODEGEN_MIPS64
+ Instruction* inst = (Instruction*)call - 6 /* six nops */;
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#else
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ (uint32_t)target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#endif
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+#ifdef JS_CODEGEN_MIPS64
+ Instruction* inst = (Instruction*)call - 6 /* six nops */;
+#else
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+#endif
+
+ inst[0].makeNop();
+ inst[1].makeNop();
+ inst[2].makeNop();
+ inst[3].makeNop();
+#ifdef JS_CODEGEN_MIPS64
+ inst[4].makeNop();
+ inst[5].makeNop();
+#endif
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != SecondScratchReg);
+
+ ma_and(SecondScratchReg, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond),
+ Address(SecondScratchReg, gc::ChunkStoreBufferOffset), ImmWord(0),
+ label);
+}
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ===============================================================
+// WebAssembly
+
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ CodeOffset offset(currentOffset());
+ as_teq(zero, zero, WASM_TRAP);
+ return offset;
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncwd(ScratchFloat32Reg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncws(ScratchFloat32Reg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt32Check(
+ FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ }
+
+ if (isUnsigned) {
+ ma_li(output, Imm32(UINT32_MAX));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThanOrUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+ } else {
+ // Positive overflow is already saturated to INT32_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThan, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_movt(output, ScratchRegister);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt64Check(
+ FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+#if defined(JS_CODEGEN_MIPS32)
+ // Saturating callouts don't use ool path.
+ return;
+#else
+ Register output = output_.reg;
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ }
+
+ if (isUnsigned) {
+ asMasm().ma_li(output, ImmWord(UINT64_MAX));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThanOrUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ } else {
+ // Positive overflow is already saturated to INT64_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThan, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ asMasm().ma_li(ScratchRegister, ImmWord(INT64_MIN));
+ as_movt(output, ScratchRegister);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+#endif
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+#if defined(JS_CODEGEN_MIPS32)
+
+ // Only possible valid input that produces INT64_MIN result.
+ double validInput =
+ isUnsigned ? double(uint64_t(INT64_MIN)) : double(int64_t(INT64_MIN));
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(validInput, ScratchDoubleReg);
+ asMasm().branchDouble(Assembler::DoubleEqual, input, ScratchDoubleReg,
+ rejoin);
+ } else {
+ asMasm().loadConstantFloat32(float(validInput), ScratchFloat32Reg);
+ asMasm().branchFloat(Assembler::DoubleEqual, input, ScratchDoubleReg,
+ rejoin);
+ }
+
+#endif
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register output,
+ Register tmp) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp);
+}
+
+void MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ FloatRegister output, Register tmp1) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp1);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access,
+ Register value, Register memoryBase,
+ Register ptr, Register ptrScratch,
+ Register tmp) {
+ wasmStoreImpl(access, AnyRegister(value), memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
+ FloatRegister floatValue,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreImpl(access, AnyRegister(floatValue), memoryBase, ptr, ptrScratch,
+ tmp);
+}
+
+void MacroAssemblerMIPSShared::wasmLoadImpl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().loadUnalignedFloat32(access, address, tmp, output.fpu());
+ } else {
+ asMasm().loadUnalignedDouble(access, address, tmp, output.fpu());
+ }
+ } else {
+ asMasm().ma_load_unaligned(access, output.gpr(), address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_ls(output.fpu(), address);
+ } else {
+ asMasm().ma_ld(output.fpu(), address);
+ }
+ } else {
+ asMasm().ma_load(output.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPSShared::wasmStoreImpl(
+ const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().storeUnalignedFloat32(access, value.fpu(), tmp, address);
+ } else {
+ asMasm().storeUnalignedDouble(access, value.fpu(), tmp, address);
+ }
+ } else {
+ asMasm().ma_store_unaligned(access, value.gpr(), address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_ss(value.fpu(), address);
+ } else {
+ asMasm().ma_sd(value.fpu(), address);
+ }
+ } else {
+ asMasm().ma_store(value.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ // Only the last emitted instruction is a memory access.
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.ma_move(ScratchRegister, newval);
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+ masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(valueTemp, oldval);
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xff);
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(valueTemp, oldval);
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xffff);
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.as_sllv(valueTemp, newval, offsetTemp);
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.ma_move(ScratchRegister, value);
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, value, 0xffff);
+ break;
+ }
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.as_and(ScratchRegister, output, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.as_srlv(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(ScratchRegister, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(ScratchRegister, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(ScratchRegister, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(ScratchRegister, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(ScratchRegister, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+ masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(ScratchRegister, ScratchRegister, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+ masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_divu(ScratchRegister, srcDest, rhs);
+ as_modu(remOutput, srcDest, rhs);
+ ma_move(srcDest, ScratchRegister);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_div(ScratchRegister, srcDest, rhs);
+ as_mod(remOutput, srcDest, rhs);
+ ma_move(srcDest, ScratchRegister);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mfhi(remOutput);
+ as_mflo(srcDest);
+#endif
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleLo(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_floorws(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantDouble(0.0, scratch);
+ ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_floorwd(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0.0f, scratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantFloat32(-1.0f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ moveFromFloat32(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ceilws(scratch, src);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, scratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantDouble(-1, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If high part is not zero, the input was not 0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ceilwd(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc1s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ moveFromFloat32(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_adds(scratch, src, temp);
+ as_floorws(scratch, scratch);
+
+ moveFromFloat32(scratch, dest);
+
+ branchTest32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branchTest32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantFloat32(-0.5f, scratch);
+ branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantFloat32(0.5f, temp);
+ bind(&loadJoin);
+
+ as_adds(temp, src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_floorws(scratch, temp);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantDouble(0.0, scratch);
+ ma_bc1d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_addd(scratch, src, temp);
+ as_floorwd(scratch, scratch);
+
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantDouble(-0.5, scratch);
+ branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantDouble(0.5, temp);
+ bind(&loadJoin);
+
+ addDouble(src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_floorwd(scratch, temp);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label notZero;
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+
+ ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+ moveFromFloat32(src, ScratchRegister);
+ // Check if src is in ]-1; -0] range by checking the sign bit.
+ as_slt(ScratchRegister, ScratchRegister, zero);
+ bind(&notZero);
+
+ branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label notZero;
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+
+ ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+ moveFromDoubleHi(src, ScratchRegister);
+ // Check if src is in ]-1; -0] range by checking the sign bit.
+ as_slt(ScratchRegister, ScratchRegister, zero);
+ bind(&notZero);
+
+ branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
new file mode 100644
index 0000000000..88238accbb
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -0,0 +1,258 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_h
+#define jit_mips_shared_MacroAssembler_mips_shared_h
+
+#if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+
+#include "jit/AtomicOp.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+
+enum DelaySlotFill { DontFillDelaySlot = 0, FillDelaySlot = 1 };
+
+static Register CallReg = t9;
+
+class MacroAssemblerMIPSShared : public Assembler {
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, DoubleCondition c,
+ FloatTestKind* testKind, FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_move(Register rd, Register rs);
+
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
+
+ // Shift operations
+ void ma_sll(Register rd, Register rt, Imm32 shift);
+ void ma_srl(Register rd, Register rt, Imm32 shift);
+ void ma_sra(Register rd, Register rt, Imm32 shift);
+ void ma_ror(Register rd, Register rt, Imm32 shift);
+ void ma_rol(Register rd, Register rt, Imm32 shift);
+
+ void ma_sll(Register rd, Register rt, Register shift);
+ void ma_srl(Register rd, Register rt, Register shift);
+ void ma_sra(Register rd, Register rt, Register shift);
+ void ma_ror(Register rd, Register rt, Register shift);
+ void ma_rol(Register rd, Register rt, Register shift);
+
+ // Negate
+ void ma_negu(Register rd, Register rs);
+
+ void ma_not(Register rd, Register rs);
+
+ // Bit extract/insert
+ void ma_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ma_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ void ma_seb(Register rd, Register rt);
+ void ma_seh(Register rd, Register rt);
+
+ // and
+ void ma_and(Register rd, Register rs);
+ void ma_and(Register rd, Imm32 imm);
+ void ma_and(Register rd, Register rs, Imm32 imm);
+
+ // or
+ void ma_or(Register rd, Register rs);
+ void ma_or(Register rd, Imm32 imm);
+ void ma_or(Register rd, Register rs, Imm32 imm);
+
+ // xor
+ void ma_xor(Register rd, Register rs);
+ void ma_xor(Register rd, Imm32 imm);
+ void ma_xor(Register rd, Register rs, Imm32 imm);
+
+ // word swap byte within halfwords
+ void ma_wsbh(Register rd, Register rt);
+
+ void ma_ctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const Address& address,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest,
+ const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // store
+ void ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store_unaligned(Register data, const Address& dest,
+ LoadStoreSize size = SizeWord);
+ void ma_store_unaligned(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord);
+ void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data,
+ const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // arithmetic based ops
+ // add
+ void ma_addu(Register rd, Register rs, Imm32 imm);
+ void ma_addu(Register rd, Register rs);
+ void ma_addu(Register rd, Imm32 imm);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_subu(Register rd, Register rs, Imm32 imm);
+ void ma_subu(Register rd, Register rs);
+ void ma_subu(Register rd, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rs, Imm32 imm);
+ void ma_mul32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ void ma_sd(FloatRegister src, BaseIndex address);
+ void ma_ss(FloatRegister src, BaseIndex address);
+
+ void ma_ld(FloatRegister dest, const BaseIndex& src);
+ void ma_ls(FloatRegister dest, const BaseIndex& src);
+
+ // FP branches
+ void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+ void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ // void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) { as_mtc1(src, dest); }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) { as_mtc1(src, dest); }
+ void moveFromFloat32(FloatRegister src, Register dest) { as_mfc1(dest, src); }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+ void outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
new file mode 100644
index 0000000000..5ea8f0b8de
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPSShared::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+Address MoveEmitterMIPSShared::cycleSlot(uint32_t slot,
+ uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+Register MoveEmitterMIPSShared::tempReg() {
+ spilledReg_ = SecondScratchReg;
+ return SecondScratchReg;
+}
+
+void MoveEmitterMIPSShared::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.loadPtr(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterMIPSShared::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.load32(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterMIPSShared::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ // Ensure that we can use ScratchFloat32Reg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterMIPSShared::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPSShared::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterMIPSShared::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.h b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
new file mode 100644
index 0000000000..81dbaddc45
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MoveEmitter_mips_shared_h
+#define jit_mips_shared_MoveEmitter_mips_shared_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPSShared {
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ virtual void emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) = 0;
+ virtual void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ virtual void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterMIPSShared(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+ ~MoveEmitterMIPSShared() { assertDone(); }
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MoveEmitter_mips_shared_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h
new file mode 100644
index 0000000000..cee021595f
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_inl_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ra);
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
new file mode 100644
index 0000000000..979e4b0a42
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.Pop(scratch2);
+ }
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On MIPS, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_h */
diff --git a/js/src/jit/mips32/Architecture-mips32.cpp b/js/src/jit/mips32/Architecture-mips32.cpp
new file mode 100644
index 0000000000..598551eafe
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Architecture-mips32.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char* const Registers::RegNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2",
+ "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5",
+ "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"};
+
+const uint32_t Allocatable = 14;
+
+const Registers::SetType Registers::ArgRegMask = Registers::SharedArgRegMask;
+
+const Registers::SetType Registers::JSCallMask =
+ (1 << Registers::a2) | (1 << Registers::a3);
+
+const Registers::SetType Registers::CallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1); // used for double-size returns
+
+FloatRegisters::Encoding FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < RegisterIdLimit; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Encoding(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(isNotOdd());
+ if (isSingle()) {
+ return FloatRegister(code_, Double);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(isNotOdd());
+ if (isDouble()) {
+ return FloatRegister(code_, Single);
+ }
+ return *this;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ }
+ return mod.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xFFFF) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits) * sizeof(double);
+
+ // Additional space needed by MacroAssembler::PushRegsInMask to ensure
+ // correct alignment of double values.
+ if (ret) {
+ ret += sizeof(double);
+ }
+
+ return ret;
+}
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+ MOZ_ASSERT(isNotOdd());
+ return id() * sizeof(float);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
new file mode 100644
index 0000000000..8e186d2c9c
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -0,0 +1,282 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Architecture_mips32_h
+#define jit_mips32_Architecture_mips32_h
+
+#include "mozilla/EndianUtils.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+// Size of MIPS32 general purpose registers is 32 bits.
+#if MOZ_LITTLE_ENDIAN()
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#else
+static const int32_t NUNBOX32_TYPE_OFFSET = 0;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 4;
+#endif
+
+// MIPS32 can have two types of floating-point coprocessors modes:
+// - FR=0 mode/ 32-bit FPRs - Historical default, there are 32 single
+// precision registers and pairs of even and odd float registers are used as
+// double precision registers. Example: f0 (double) is composed of
+// f0 and f1 (single). Loongson3A FPU running in this mode doesn't allow
+// use of odd registers for single precision arithmetic.
+// - FR=1 mode/ 64-bit FPRs - In this case, there are 32 double precision
+// register which can also be used as single precision registers. More info
+// https://dmz-portal.imgtec.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
+
+// Currently we enable 16 even single precision registers which can be also can
+// be used as double precision registers. It enables jit code to run even on
+// Loongson3A. It does not support FR=1 mode because MacroAssembler threats odd
+// single precision registers as high parts of even double precision registers.
+#ifdef __mips_fpr
+static_assert(__mips_fpr == 32, "MIPS32 jit only supports FR=0 fpu mode.");
+#endif
+
+class FloatRegisters : public FloatRegistersMIPSShared {
+ public:
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < RegisterIdLimit);
+ return FloatRegistersMIPSShared::GetName(Encoding(i % 32));
+ }
+
+ static Encoding FromName(const char* name);
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t TotalSingle = 16;
+
+ static const uint32_t Allocatable = 30;
+ static const SetType AllSingleMask = (1ULL << TotalSingle) - 1;
+
+ static const SetType AllDoubleMask = ((1ULL << TotalDouble) - 1)
+ << TotalSingle;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t RegisterIdLimit = 32;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ static const SetType NonVolatileMask =
+ ((SetType(1) << (FloatRegisters::f20 >> 1)) |
+ (SetType(1) << (FloatRegisters::f22 >> 1)) |
+ (SetType(1) << (FloatRegisters::f24 >> 1)) |
+ (SetType(1) << (FloatRegisters::f26 >> 1)) |
+ (SetType(1) << (FloatRegisters::f28 >> 1)) |
+ (SetType(1) << (FloatRegisters::f30 >> 1))) *
+ ((1 << TotalSingle) + 1);
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType NonAllocatableMask =
+ (SetType(1) << (FloatRegisters::f18 >> 1)) * ((1 << TotalSingle) + 1);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+class FloatRegister : public FloatRegisterMIPSShared {
+ public:
+ enum RegType {
+ Single = 0x0,
+ Double = 0x1,
+ };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ Encoding code_ : 6;
+
+ protected:
+ RegType kind_ : 1;
+
+ public:
+ constexpr FloatRegister(uint32_t code, RegType kind = Double)
+ : code_(Encoding(code)), kind_(kind) {}
+ constexpr FloatRegister()
+ : code_(FloatRegisters::invalid_freg), kind_(Double) {}
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && code_ == other.code_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const { return (kind_ == Double) ? 8 : 4; }
+ size_t pushSize() const { return size(); }
+
+ bool isNotOdd() const { return !isInvalid() && ((code_ & 1) == 0); }
+
+ bool isSingle() const { return kind_ == Single; }
+ bool isDouble() const { return kind_ == Double; }
+ bool isInvalid() const { return code_ == FloatRegisters::invalid_freg; }
+ bool isSimd128() const { return false; }
+
+ FloatRegister doubleOverlay() const;
+ FloatRegister singleOverlay() const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(isNotOdd());
+ return Code((code_ >> 1) | (kind_ << 4));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ return code_;
+ }
+ uint32_t id() const {
+ MOZ_ASSERT(!isInvalid());
+ return code_;
+ }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 15;
+ uint32_t kind = i >> 4;
+ return FloatRegister(Encoding(code << 1), RegType(kind));
+ }
+
+ static FloatRegister FromIndex(uint32_t index, RegType kind) {
+ MOZ_ASSERT(index < 16);
+ return FloatRegister(Encoding(index << 1), kind);
+ }
+
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const { return FloatRegisters::GetName(code_); }
+ bool operator!=(const FloatRegister& other) const {
+ return other.kind_ != kind_ || code_ != other.code_;
+ }
+ bool aliases(const FloatRegister& other) {
+ MOZ_ASSERT(isNotOdd());
+ return code_ == other.code_;
+ }
+ uint32_t numAliased() const {
+ MOZ_ASSERT(isNotOdd());
+ return 2;
+ }
+ FloatRegister aliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isNotOdd());
+
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+ uint32_t numAlignedAliased() const {
+ MOZ_ASSERT(isNotOdd());
+ return 2;
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isNotOdd());
+
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ MOZ_ASSERT(isNotOdd());
+ return (SetType(1) << (code_ >> 1)) *
+ ((1 << FloatRegisters::TotalSingle) + 1);
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::AllocatableAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ // Single registers are not dominating any smaller registers, thus masking
+ // is enough to convert an allocatable set into a set of register list all
+ // single register available.
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::AllocatableAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+// In order to handle functions such as int(*)(int, double) where the first
+// argument is a general purpose register, and the second argument is a floating
+// point register, we have to store the double content into 2 general purpose
+// registers, namely a2 and a3.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Architecture_mips32_h */
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp
new file mode 100644
index 0000000000..8073b8e4ec
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -0,0 +1,369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/AutoWritableJitCode.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloatSize_(0),
+ useGPRForFloats_(false),
+ current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ Register destReg;
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Int64:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(a0, a1);
+ usedArgSlots_ = 2;
+ } else if (usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ case MIRType::Float32:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12.asSingle());
+ firstArgFloatSize_ = 1;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14.asSingle());
+ } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Double:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12);
+ usedArgSlots_ = 2;
+ firstArgFloatSize_ = 2;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14);
+ usedArgSlots_ = 4;
+ } else if (useGPRForFloats_ && usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t js::jit::RT(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RTShift;
+}
+
+uint32_t js::jit::RD(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RDShift;
+}
+
+uint32_t js::jit::RZ(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RZShift;
+}
+
+uint32_t js::jit::SA(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << SAShift;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLuiOriValue(jump, jump->next());
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLuiOriValue(inst, inst->next());
+ void* prior = ptr;
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
+ (uint32_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int32_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ // Skip the trailing nops in conditional branches.
+ if (conditional) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*)))
+ .encode();
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(void*)), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() { return 4 * sizeof(uint32_t); }
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ (uint32_t)dest);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[3] = InstNOP();
+}
+
+uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)inst1;
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ uint32_t value = i0->extractImm16Value() << 16;
+ value = value | i1->extractImm16Value();
+ return value;
+}
+
+void Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
+ Register reg, uint32_t value) {
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
+ MOZ_ASSERT(value == uint32_t(expectedValue.value));
+
+ // Replace with new value
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
+ uint32_t(newValue.value));
+}
+
+uint32_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ Instruction* i2 = (Instruction*)i1->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i2 = jalr;
+ } else {
+ InstNOP nop;
+ *i2 = nop;
+ }
+}
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
new file mode 100644
index 0000000000..496c382590
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -0,0 +1,265 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Assembler_mips32_h
+#define jit_mips32_Assembler_mips32_h
+
+#include <iterator>
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips32/Architecture-mips32.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3, t4};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ unsigned usedArgSlots_;
+ unsigned firstArgFloatSize_;
+ // Note: This is not compliant with the system ABI. The Lowering phase
+ // expects to lower an MWasmParameter to only one register.
+ bool useGPRForFloats_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ void enforceO32ABI() { useGPRForFloats_ = true; }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 4) {
+ return ShadowStackSpace;
+ }
+
+ return usedArgSlots_ * sizeof(intptr_t);
+ }
+
+ void increaseStackOffset(uint32_t bytes) { MOZ_CRASH("NYI"); }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// This register may be volatile or nonvolatile. Avoid f18 which is the
+// ScratchDoubleReg.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f16,
+ FloatRegister::Double};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = t0;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = s5;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr Register InterpreterPCReg = t5;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register64 ReturnReg64(v1, v0);
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::f0,
+ FloatRegister::Single};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::f0,
+ FloatRegister::Double};
+static constexpr FloatRegister ScratchFloat32Reg = {FloatRegisters::f18,
+ FloatRegister::Single};
+static constexpr FloatRegister ScratchDoubleReg = {FloatRegisters::f18,
+ FloatRegister::Double};
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+static constexpr FloatRegister f0 = {FloatRegisters::f0, FloatRegister::Double};
+static constexpr FloatRegister f2 = {FloatRegisters::f2, FloatRegister::Double};
+static constexpr FloatRegister f4 = {FloatRegisters::f4, FloatRegister::Double};
+static constexpr FloatRegister f6 = {FloatRegisters::f6, FloatRegister::Double};
+static constexpr FloatRegister f8 = {FloatRegisters::f8, FloatRegister::Double};
+static constexpr FloatRegister f10 = {FloatRegisters::f10,
+ FloatRegister::Double};
+static constexpr FloatRegister f12 = {FloatRegisters::f12,
+ FloatRegister::Double};
+static constexpr FloatRegister f14 = {FloatRegisters::f14,
+ FloatRegister::Double};
+static constexpr FloatRegister f16 = {FloatRegisters::f16,
+ FloatRegister::Double};
+static constexpr FloatRegister f18 = {FloatRegisters::f18,
+ FloatRegister::Double};
+static constexpr FloatRegister f20 = {FloatRegisters::f20,
+ FloatRegister::Double};
+static constexpr FloatRegister f22 = {FloatRegisters::f22,
+ FloatRegister::Double};
+static constexpr FloatRegister f24 = {FloatRegisters::f24,
+ FloatRegister::Double};
+static constexpr FloatRegister f26 = {FloatRegisters::f26,
+ FloatRegister::Double};
+static constexpr FloatRegister f28 = {FloatRegisters::f28,
+ FloatRegister::Double};
+static constexpr FloatRegister f30 = {FloatRegisters::f30,
+ FloatRegister::Double};
+
+// MIPS CPUs can only load multibyte data that is "naturally"
+// four-byte-aligned, sp register should be eight-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 8;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesFour;
+
+class Assembler : public AssemblerMIPSShared {
+ public:
+ Assembler() : AssemblerMIPSShared() {}
+
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ protected:
+ // This is used to access the odd register form the pair of single
+ // precision registers that make one double register.
+ FloatRegister getOddPair(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ MOZ_ASSERT(reg.id() % 2 == 0);
+ FloatRegister odd(reg.id() | 1, FloatRegister::Single);
+ return odd;
+ }
+
+ public:
+ using AssemblerMIPSShared::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
+ static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1,
+ Register reg, uint32_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint32_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 4;
+
+static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) {
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Assembler_mips32_h */
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
new file mode 100644
index 0000000000..b5bc0c041f
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -0,0 +1,507 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/CodeGenerator-mips32.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+ValueOperand CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // For NUNBOX32, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
+ const AnyRegister in = ToAnyRegister(box->getOperand(0));
+ const ValueOperand out = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), in), out);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ if (mir->fallible()) {
+ bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot());
+ }
+}
+
+void CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.cmp64Set(condition, lhsRegs, imm, output);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.cmp64Set(condition, lhsRegs, rhsRegs, output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.xor64(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64);
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64);
+ }
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64);
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64);
+ }
+}
+
+void CodeGeneratorMIPS::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_div(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_div(dividend, divisor);
+ masm.as_mflo(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorMIPS::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_mod(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_div(dividend, divisor);
+ masm.as_mfhi(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+template <typename T>
+void CodeGeneratorMIPS::emitWasmLoadI64(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
+ ptrScratch, ToOutRegister64(lir),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToOutRegister64(lir));
+ }
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPS::emitWasmStoreI64(T* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
+ HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ToRegister(lir->ptr()), ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation trueExpr = lir->trueExpr();
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 output = ToOutRegister64(lir);
+
+ masm.move64(ToRegister64(trueExpr), output);
+
+ if (falseExpr.low().isRegister()) {
+ masm.as_movz(output.low, ToRegister(falseExpr.low()), cond);
+ masm.as_movz(output.high, ToRegister(falseExpr.high()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.low()), output.low);
+ masm.loadPtr(ToAddress(falseExpr.high()), output.high);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.moveToDoubleLo(input.low, output);
+ masm.moveToDoubleHi(input.high, output);
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.moveFromDoubleLo(input, output.low);
+ masm.moveFromDoubleHi(input, output.high);
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ if (input != output.low) {
+ masm.move32(input, output.low);
+ }
+ if (lir->mir()->isUnsigned()) {
+ masm.move32(Imm32(0), output.high);
+ } else {
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.move32(ToRegister(input.low()), output);
+ } else {
+ masm.move32(ToRegister(input.high()), output);
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32(input.low, output.low);
+ break;
+ }
+ masm.ma_sra(output.high, output.low, Imm32(31));
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.as_or(output, input.low, input.high);
+ masm.cmp32Set(Assembler::Equal, output, Imm32(0), output);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister arg = input;
+ Register64 output = ToOutRegister64(lir);
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new (alloc())
+ OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
+ addOutOfLineCode(ool, mir);
+
+ if (fromType == MIRType::Float32) {
+ arg = ScratchDoubleReg;
+ masm.convertFloat32ToDouble(input, arg);
+ }
+
+ if (!lir->mir()->isSaturating()) {
+ masm.Push(input);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(arg, MoveOp::DOUBLE);
+
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToUint64);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToInt64);
+ }
+
+ masm.Pop(input);
+
+ masm.ma_xor(ScratchRegister, output.high, Imm32(0x80000000));
+ masm.ma_or(ScratchRegister, output.low);
+ masm.ma_b(ScratchRegister, Imm32(0), ool->entry(), Assembler::Equal);
+
+ masm.bind(ool->rejoin());
+ } else {
+ masm.setupWasmABICall();
+ masm.passABIArg(arg, MoveOp::DOUBLE);
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64);
+ }
+ }
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ mozilla::DebugOnly<FloatRegister> output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ masm.setupWasmABICall();
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+
+ if (lir->mir()->isUnsigned()) {
+ if (toType == MIRType::Double) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Uint64ToDouble, MoveOp::DOUBLE);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Uint64ToFloat32, MoveOp::FLOAT32);
+ }
+ } else {
+ if (toType == MIRType::Double) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Int64ToDouble, MoveOp::DOUBLE);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Int64ToFloat32, MoveOp::FLOAT32);
+ }
+ }
+
+ MOZ_ASSERT_IF(toType == MIRType::Double, *(&output) == ReturnDoubleReg);
+ MOZ_ASSERT_IF(toType == MIRType::Float32, *(&output) == ReturnFloat32Reg);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ branchToBlock(input.high, Imm32(0), lir->ifTrue(), Assembler::NonZero);
+ emitBranch(input.low, Imm32(0), Assembler::NonZero, lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicLoad64(lir->mir()->access(), addr, Register64::Invalid(),
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register tmp = ToRegister(lir->tmp());
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicStore64(lir->mir()->access(), addr, tmp, value);
+}
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.h b/js/src/jit/mips32/CodeGenerator-mips32.h
new file mode 100644
index 0000000000..161b94326f
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_CodeGenerator_mips32_h
+#define jit_mips32_CodeGenerator_mips32_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS : public CodeGeneratorMIPSShared {
+ protected:
+ CodeGeneratorMIPS(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm) {}
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_NULL), cond, ifTrue,
+ ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_UNDEFINED), cond,
+ ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorMIPS CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_CodeGenerator_mips32_h */
diff --git a/js/src/jit/mips32/LIR-mips32.h b/js/src/jit/mips32/LIR-mips32.h
new file mode 100644
index 0000000000..da68ad7464
--- /dev/null
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -0,0 +1,197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LIR_mips32_h
+#define jit_mips32_LIR_mips32_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp,
+ MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox() : LInstructionHelper(classOpcode) {}
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const LAllocation* payload() { return getOperand(0); }
+ const LAllocation* type() { return getOperand(1); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint
+ : public LCallInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmAtomicLoadI64);
+
+ LWasmAtomicLoadI64(const LAllocation& ptr) : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const MWasmLoad* mir() const { return mir_->toWasmLoad(); }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 1 + INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(WasmAtomicStoreI64);
+
+ LWasmAtomicStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& tmp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setTemp(0, tmp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LDefinition* tmp() { return getTemp(0); }
+ const MWasmStore* mir() const { return mir_->toWasmStore(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_LIR_mips32_h */
diff --git a/js/src/jit/mips32/Lowering-mips32.cpp b/js/src/jit/mips32/Lowering-mips32.cpp
new file mode 100644
index 0000000000..721491c46b
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.cpp
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Lowering-mips32.h"
+
+#include "jit/Lowering.h"
+#include "jit/mips32/Assembler-mips32.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new (alloc()) LBoxFloatingPoint(
+ useRegisterAtStart(inner), tempCopy(inner, 0), inner->type()),
+ box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new (alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* inner = unbox->getOperand(0);
+
+ // An unbox on mips reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir =
+ new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload
+ // register.
+ LUnbox* lir = new (alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void LIRGeneratorMIPS::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition,
+ LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void LIRGeneratorMIPS::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void LIRGeneratorMIPS::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition,
+ LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc())
+ LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc())
+ LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
+
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorMIPS::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
+
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorMIPS::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorMIPS::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorMIPS::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineReturn(new (alloc()) LWasmTruncateToInt64(useRegisterAtStart(opd)),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ defineReturn(
+ new (alloc()) LInt64ToFloatingPoint(useInt64RegisterAtStart(opd)), ins);
+}
+
+void LIRGeneratorMIPS::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
diff --git a/js/src/jit/mips32/Lowering-mips32.h b/js/src/jit/mips32/Lowering-mips32.h
new file mode 100644
index 0000000000..1565c84656
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Lowering_mips32_h
+#define jit_mips32_Lowering_mips32_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS : public LIRGeneratorMIPSShared {
+ protected:
+ LIRGeneratorMIPS(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() { return LDefinition::BogusTemp(); }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+};
+
+typedef LIRGeneratorMIPS LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Lowering_mips32_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32-inl.h b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
new file mode 100644
index 0000000000..58ade13d85
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -0,0 +1,1027 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_inl_h
+#define jit_mips32_MacroAssembler_mips32_inl_h
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "vm/BigIntType.h" // JS::BigInt
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDoubleHi(src, dest.high);
+ moveFromDoubleLo(src, dest.low);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDoubleHi(src.high, dest);
+ moveToDoubleLo(src.low, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ if (src.low != dest) {
+ move32(src.low, dest);
+ }
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ move32(Imm32(0), dest.high);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move8SignExtend(src, dest.low);
+ move32To64SignExtend(dest.low, dest);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move16SignExtend(src, dest.low);
+ move32To64SignExtend(dest.low, dest);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ ma_sra(dest.high, dest.low, Imm32(31));
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ move32(src, dest);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ move32(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::notPtr(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { ma_and(dest, src); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.low(), dest.low);
+ }
+ if (imm.hi().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ or32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ or32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ xor32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ xor32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.low, src.low);
+ ma_xor(dest.high, src.high);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ byteSwap32(reg.high);
+ byteSwap32(reg.low);
+
+ // swap reg.high and reg.low.
+ ma_xor(reg.high, reg.low);
+ ma_xor(reg.low, reg.high);
+ ma_xor(reg.high, reg.low);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) { ma_addu(dest, src); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { ma_addu(dest, imm); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ addPtr(Imm32(imm.value), dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ if (dest.low == src.low) {
+ as_sltu(ScratchRegister, src.low, zero);
+ as_addu(dest.low, dest.low, src.low);
+ } else {
+ as_addu(dest.low, dest.low, src.low);
+ as_sltu(ScratchRegister, dest.low, src.low);
+ }
+ as_addu(dest.high, dest.high, src.high);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest.low, dest.low, imm.value);
+ as_sltiu(ScratchRegister, dest.low, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(dest.low, dest.low, ScratchRegister);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ }
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ add64(imm.low(), dest);
+ ma_addu(dest.high, dest.high, imm.hi());
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, Imm32(0));
+ as_subu(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ Instruction* lui =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+ MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ UpdateLuiOriValue(lui, lui->next(), imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_subu(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_subu(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ MOZ_ASSERT(dest.low != src.high);
+ MOZ_ASSERT(dest.high != src.low);
+ MOZ_ASSERT(dest.high != src.high);
+
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, src.low);
+ as_subu(dest.high, dest.high, src.high);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ if (Imm16::IsInSignedRange(imm.low().value) &&
+ Imm16::IsInSignedRange(-imm.value)) {
+ as_sltiu(ScratchRegister, dest.low, imm.low().value);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_addiu(dest.low, dest.low, -imm.value);
+ } else {
+ ma_li(SecondScratchReg, imm.low());
+ as_sltu(ScratchRegister, dest.low, SecondScratchReg);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, SecondScratchReg);
+ }
+ ma_subu(dest.high, dest.high, imm.hi());
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ if (imm.low().value == 5) {
+ // Optimized case for Math.random().
+ as_sll(ScratchRegister, dest.low, 2);
+ as_srl(SecondScratchReg, dest.low, 32 - 2);
+ as_addu(dest.low, ScratchRegister, dest.low);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ as_sll(SecondScratchReg, dest.high, 2);
+ as_addu(SecondScratchReg, SecondScratchReg, dest.high);
+ as_addu(dest.high, ScratchRegister, SecondScratchReg);
+ } else {
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ ma_li(ScratchRegister, imm.low());
+ as_mult(dest.high, ScratchRegister);
+ ma_li(ScratchRegister, imm.hi());
+ as_madd(dest.low, ScratchRegister);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ ma_li(ScratchRegister, imm.low());
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+ }
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ ma_li(ScratchRegister, imm.low());
+ as_mult(dest.high, ScratchRegister);
+ ma_li(temp, imm.hi());
+ as_madd(dest.low, temp);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits]
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits]
+ as_mult(dest.high, src.low);
+ as_madd(dest.low, src.high);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(src)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(src));
+ as_multu(dest.low, src.low);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::neg64(Register64 reg) {
+ as_subu(ScratchRegister, zero, reg.low);
+ as_sltu(ScratchRegister, reg.low, ScratchRegister);
+ as_subu(reg.high, zero, reg.high);
+ as_subu(reg.high, reg.high, ScratchRegister);
+}
+
+void MacroAssembler::negPtr(Register reg) { as_subu(reg, zero, reg); }
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ MOZ_ASSERT(src != ScratchRegister);
+ as_addu(ScratchRegister, src, src);
+ as_addu(dest, ScratchRegister, src);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_lw(SecondScratchReg, ScratchRegister, 0);
+
+ as_addiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sw(SecondScratchReg, ScratchRegister, 0);
+
+ as_sltiu(SecondScratchReg, SecondScratchReg, 1);
+ as_lw(ScratchRegister, ScratchRegister, 4);
+
+ as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
+
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_sw(SecondScratchReg, ScratchRegister, 4);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sll(dest, dest, imm);
+}
+
+void MacroAssembler::lshiftPtr(Register src, Register dest) {
+ ma_sll(dest, dest, src);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_sll(dest.high, dest.high, imm.value);
+ as_srl(scratch, dest.low, (32 - imm.value) % 32);
+ as_or(dest.high, dest.high, scratch);
+ as_sll(dest.low, dest.low, imm.value);
+ } else {
+ as_sll(dest.high, dest.low, imm.value - 32);
+ move32(Imm32(0), dest.low);
+ }
+}
+
+void MacroAssembler::lshift64(Register unmaskedShift, Register64 dest) {
+ Label done;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.low, SecondScratchReg);
+ ma_sll(dest.low, dest.low, shift);
+ as_nor(shift, zero, shift);
+ as_srl(SecondScratchReg, SecondScratchReg, 1);
+ ma_srl(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sll(dest.high, dest.high, shift);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_movn(dest.high, dest.low, SecondScratchReg);
+ as_movn(dest.low, zero, SecondScratchReg);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_srl(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sra(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtr(Register src, Register dest) {
+ ma_srl(dest, dest, src);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, (32 - imm.value) % 32);
+ as_or(dest.low, dest.low, scratch);
+ as_srl(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+ } else {
+ ma_srl(dest.low, dest.high, Imm32(imm.value - 32));
+ move32(Imm32(0), dest.high);
+ }
+}
+
+void MacroAssembler::rshift64(Register unmaskedShift, Register64 dest) {
+ Label done;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.high, SecondScratchReg);
+ ma_srl(dest.high, dest.high, shift);
+ as_nor(shift, zero, shift);
+ as_sll(SecondScratchReg, SecondScratchReg, 1);
+ ma_sll(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_movn(dest.low, dest.high, SecondScratchReg);
+ as_movn(dest.high, zero, SecondScratchReg);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, (32 - imm.value) % 32);
+ as_or(dest.low, dest.low, scratch);
+ as_sra(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ as_sra(dest.high, dest.high, 31);
+ } else {
+ as_sra(dest.low, dest.high, imm.value - 32);
+ as_sra(dest.high, dest.high, 31);
+ }
+}
+
+void MacroAssembler::rshift64Arithmetic(Register unmaskedShift,
+ Register64 dest) {
+ Label done;
+
+ ScratchRegisterScope shift(*this);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.high, SecondScratchReg);
+ ma_sra(dest.high, dest.high, shift);
+ as_nor(shift, zero, shift);
+ as_sll(SecondScratchReg, SecondScratchReg, 1);
+ ma_sll(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_sra(shift, dest.high, 31);
+ as_movn(dest.low, dest.high, SecondScratchReg);
+ as_movn(dest.high, shift, SecondScratchReg);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_sll(dest.high, input.high, Imm32(amount));
+ ma_srl(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_sll(dest.low, input.low, Imm32(amount));
+ ma_srl(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void MacroAssembler::rotateLeft64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope scratch(*this);
+
+ ma_and(scratch, shift, Imm32(0x3f));
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_sll(temp, src.low, scratch);
+ ma_move(scratch, src.low);
+ as_srl(dest.low, src.high, 1);
+ ma_srl(dest.low, dest.low, SecondScratchReg);
+ as_or(dest.low, dest.low, temp);
+ ma_move(SecondScratchReg, src.high);
+ as_srl(dest.high, scratch, 1);
+ ma_and(scratch, shift, Imm32(0x3f));
+ ma_sll(temp, SecondScratchReg, scratch);
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_srl(dest.high, dest.high, SecondScratchReg);
+ as_or(dest.high, dest.high, temp);
+ ma_and(temp, scratch, Imm32(32));
+ as_movn(SecondScratchReg, dest.high, temp);
+ as_movn(dest.high, dest.low, temp);
+ as_movn(dest.low, SecondScratchReg, temp);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_srl(dest.high, input.high, Imm32(amount));
+ ma_sll(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_srl(dest.low, input.low, Imm32(amount));
+ ma_sll(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void MacroAssembler::rotateRight64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope scratch(*this);
+
+ ma_and(scratch, shift, Imm32(0x3f));
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_srl(temp, src.low, scratch);
+ ma_move(scratch, src.low);
+ as_sll(dest.low, src.high, 1);
+ ma_sll(dest.low, dest.low, SecondScratchReg);
+ as_or(dest.low, dest.low, temp);
+ ma_move(SecondScratchReg, src.high);
+ as_sll(dest.high, scratch, 1);
+ ma_and(scratch, shift, Imm32(0x3f));
+ ma_srl(temp, SecondScratchReg, scratch);
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_sll(dest.high, dest.high, SecondScratchReg);
+ as_or(dest.high, dest.high, temp);
+ ma_and(temp, scratch, Imm32(32));
+ as_movn(SecondScratchReg, dest.high, temp);
+ as_movn(dest.high, dest.low, temp);
+ as_movn(dest.low, SecondScratchReg, temp);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_clz(ScratchRegister, src.high);
+ as_clz(SecondScratchReg, src.low);
+ as_movn(SecondScratchReg, zero, src.high);
+ as_addu(dest, ScratchRegister, SecondScratchReg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ as_movz(SecondScratchReg, src.high, src.low);
+ as_movn(SecondScratchReg, src.low, src.low);
+ ma_ctz(SecondScratchReg, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x20));
+ as_movn(ScratchRegister, zero, src.low);
+ as_addu(dest, SecondScratchReg, ScratchRegister);
+}
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+
+ as_srl(tmp, src.low, 1);
+ as_srl(SecondScratchReg, src.high, 1);
+ ma_li(ScratchRegister, Imm32(0x55555555));
+ as_and(tmp, tmp, ScratchRegister);
+ as_subu(tmp, src.low, tmp);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_subu(SecondScratchReg, src.high, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x33333333));
+ as_and(dest.low, tmp, ScratchRegister);
+ as_srl(tmp, tmp, 2);
+ as_and(tmp, tmp, ScratchRegister);
+ as_addu(tmp, dest.low, tmp);
+ as_and(dest.high, SecondScratchReg, ScratchRegister);
+ as_srl(SecondScratchReg, SecondScratchReg, 2);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_addu(SecondScratchReg, dest.high, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x0F0F0F0F));
+ as_addu(tmp, SecondScratchReg, tmp);
+ as_srl(dest.low, tmp, 4);
+ as_and(dest.low, dest.low, ScratchRegister);
+ as_and(tmp, tmp, ScratchRegister);
+ as_addu(dest.low, dest.low, tmp);
+ ma_mul(dest.low, dest.low, Imm32(0x01010101));
+ as_srl(dest.low, dest.low, 24);
+ ma_move(dest.high, zero);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
+ val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, rhs.low, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, rhs.low, label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), rhs.high,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+ }
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ if (val.value == 0) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::BelowOrEqual:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ as_or(ScratchRegister, lhs.high, lhs.low);
+ ma_b(ScratchRegister, ScratchRegister, success,
+ (cond == Assembler::Equal || cond == Assembler::BelowOrEqual)
+ ? Assembler::Zero
+ : Assembler::NonZero);
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ ma_b(lhs.high, Imm32(0), success, cond);
+ break;
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ as_or(SecondScratchReg, lhs.high, lhs.low);
+ as_sra(ScratchRegister, lhs.high, 31);
+ as_sltu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ ma_b(ScratchRegister, ScratchRegister, success,
+ (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero);
+ break;
+ case Assembler::Below:
+ // This condition is always false. No branch required.
+ break;
+ case Assembler::AboveOrEqual:
+ ma_b(success);
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ }
+ return;
+ }
+
+ Condition c = ma_cmp64(cond, lhs, val, SecondScratchReg);
+ ma_b(SecondScratchReg, SecondScratchReg, success, c);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ Condition c = ma_cmp64(cond, lhs, rhs, SecondScratchReg);
+ ma_b(SecondScratchReg, SecondScratchReg, success, c);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ if (cond == Assembler::Zero || cond == Assembler::NonZero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ as_or(ScratchRegister, lhs.low, lhs.high);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
+ branchTest32(cond, lhs.high, rhs.high, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestUndefined(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestInt32(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, value.payloadReg(), value.payloadReg());
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestDouble(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNumber(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ma_b(value.payloadReg(), value.payloadReg(), label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestString(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ Register string = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(string, JSString::offsetOfLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestSymbol(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestBigInt(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ Register bi = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(bi, BigInt::offsetOfDigitLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNull(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestObject(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestPrimitive(cond, value.typeReg(), label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ ma_b(value.typeReg(), ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notMagic;
+ if (cond == Assembler::Equal) {
+ branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
+ } else {
+ branchTestMagic(Assembler::NotEqual, valaddr, label);
+ }
+
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+ bind(&notMagic);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertDoubleToInt32(src, dest, fail, false);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+void MacroAssemblerMIPSCompat::computeEffectiveAddress(const BaseIndex& address,
+ Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ asMasm().addPtr(Imm32(address.offset), dest);
+ }
+}
+
+void MacroAssemblerMIPSCompat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_inl_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
new file mode 100644
index 0000000000..c4aef75d8a
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -0,0 +1,2825 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ const uint32_t kExponentShift =
+ mozilla::FloatingPoint<double>::kExponentShift - 32;
+ const uint32_t kExponent =
+ (31 + mozilla::FloatingPoint<double>::kExponentBias);
+
+ ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
+ ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
+ ma_or(SecondScratchReg, ScratchRegister);
+ ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
+ moveToDoubleHi(SecondScratchReg, dest);
+ moveToDoubleLo(ScratchRegister, dest);
+
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToDouble(src, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ const uint32_t kExponentShift =
+ mozilla::FloatingPoint<double>::kExponentShift - 32;
+ const uint32_t kExponent =
+ (31 + mozilla::FloatingPoint<double>::kExponentBias);
+
+ ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
+ ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
+ ma_or(SecondScratchReg, ScratchRegister);
+ ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
+ FloatRegister destDouble = dest.asDouble();
+ moveToDoubleHi(SecondScratchReg, destDouble);
+ moveToDoubleLo(ScratchRegister, destDouble);
+
+ convertDoubleToFloat32(destDouble, dest);
+
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToFloat32(src, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtsd(dest, src);
+}
+
+void MacroAssemblerMIPSCompat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDoubleHi(src, dest);
+ moveFromDoubleLo(src, SecondScratchReg);
+ ma_xor(dest, Imm32(INT32_MIN));
+ ma_or(dest, SecondScratchReg);
+ ma_b(dest, Imm32(0), fail, Assembler::Equal);
+ }
+
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ // Here adding the masking andi instruction just for a precaution.
+ // For the instruction of trunc.*.*, the Floating Point Exceptions can be
+ // only Inexact, Invalid Operation, Unimplemented Operation.
+ // Leaving it maybe is also ok.
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtds(dest, src);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPS::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm) {
+ ma_li(dest, Imm32(uint32_t(imm.value)));
+}
+
+void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm) {
+ ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm) {
+ ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPS::ma_add32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rs != ScratchRegister);
+ MOZ_ASSERT(rt != ScratchRegister);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ if (rs == rt) {
+ as_addu(rd, rs, rs);
+ as_xor(SecondScratchReg, rs, rd);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+ return;
+ }
+
+ // If different sign, no overflow
+ as_xor(ScratchRegister, rs, rt);
+
+ as_addu(rd, rs, rt);
+ as_nor(ScratchRegister, ScratchRegister, zero);
+ // If different sign, then overflow
+ as_xor(SecondScratchReg, rt, rd);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS::ma_add32TestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ MOZ_ASSERT(rs != ScratchRegister);
+ MOZ_ASSERT(rs != SecondScratchReg);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ Register rs_copy = rs;
+
+ if (imm.value > 0) {
+ as_nor(ScratchRegister, rs, zero);
+ } else if (rs == rd) {
+ ma_move(ScratchRegister, rs);
+ rs_copy = ScratchRegister;
+ }
+
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(SecondScratchReg, imm);
+ as_addu(rd, rs, SecondScratchReg);
+ }
+
+ if (imm.value > 0) {
+ as_and(ScratchRegister, ScratchRegister, rd);
+ } else {
+ as_nor(SecondScratchReg, rd, zero);
+ as_and(ScratchRegister, rs_copy, SecondScratchReg);
+ }
+
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+}
+
+// Subtract.
+void MacroAssemblerMIPS::ma_sub32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ // The rs == rt case should probably be folded at MIR stage.
+ // Happens for Number_isInteger*. Not worth specializing here.
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rs != SecondScratchReg);
+ MOZ_ASSERT(rt != SecondScratchReg);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ Register rs_copy = rs;
+
+ if (rs == rd) {
+ ma_move(SecondScratchReg, rs);
+ rs_copy = SecondScratchReg;
+ }
+
+ as_subu(rd, rs, rt);
+ // If same sign, no overflow
+ as_xor(ScratchRegister, rs_copy, rt);
+ // If different sign, then overflow
+ as_xor(SecondScratchReg, rs_copy, rd);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+}
+
+// Memory.
+
+void MacroAssemblerMIPS::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ as_lbu(dest, base, encodedOffset);
+ } else {
+ as_lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ as_lhu(dest, base, encodedOffset);
+ } else {
+ as_lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ as_lw(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerMIPS::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_sll(ScratchRegister, address.index, Imm32(shift));
+ as_addu(dest, address.base, ScratchRegister);
+ } else {
+ as_addu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerMIPS::ma_lw(Register data, Address address) {
+ ma_load(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_sw(Register data, Address address) {
+ ma_store(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address) {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sw(ScratchRegister, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(address.offset));
+ as_addu(SecondScratchReg, address.base, SecondScratchReg);
+ as_sw(ScratchRegister, SecondScratchReg, 0);
+ }
+}
+
+void MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address) {
+ ma_store(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_pop(Register r) {
+ as_lw(r, StackPointer, 0);
+ as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerMIPS::ma_push(Register r) {
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind) {
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
+ Imm64 val, Register dest) {
+ if (val.value == 0) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::BelowOrEqual:
+ as_or(dest, lhs.high, lhs.low);
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ as_or(dest, lhs.high, lhs.low);
+ as_sltu(dest, zero, dest);
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ as_slt(dest, lhs.high, zero);
+ if (cond == Assembler::GreaterThanOrEqual) {
+ as_xori(dest, dest, 1);
+ }
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ as_or(SecondScratchReg, lhs.high, lhs.low);
+ as_sra(ScratchRegister, lhs.high, 31);
+ as_sltu(dest, ScratchRegister, SecondScratchReg);
+ if (cond == Assembler::LessThanOrEqual) {
+ as_xori(dest, dest, 1);
+ }
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ as_ori(dest, zero, cond == Assembler::AboveOrEqual ? 1 : 0);
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+ return;
+ }
+
+ Condition c = ma_cmp64(cond, lhs, val, dest);
+
+ switch (cond) {
+ // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
+ case Assembler::Equal:
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ as_sltu(dest, zero, dest);
+ break;
+ default:
+ if (c == Assembler::Zero) as_xori(dest, dest, 1);
+ break;
+ }
+}
+
+void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
+ Register64 rhs, Register dest) {
+ Condition c = ma_cmp64(cond, lhs, rhs, dest);
+
+ switch (cond) {
+ // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
+ case Assembler::Equal:
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ as_sltu(dest, zero, dest);
+ break;
+ default:
+ if (c == Assembler::Zero) as_xori(dest, dest, 1);
+ break;
+ }
+}
+
+Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
+ Register64 lhs,
+ Register64 rhs,
+ Register dest) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ as_xor(SecondScratchReg, lhs.high, rhs.high);
+ as_xor(ScratchRegister, lhs.low, rhs.low);
+ as_or(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ as_slt(SecondScratchReg, rhs.high, lhs.high);
+ as_sltu(ScratchRegister, lhs.low, rhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_slt(ScratchRegister, lhs.high, rhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ as_slt(SecondScratchReg, lhs.high, rhs.high);
+ as_sltu(ScratchRegister, rhs.low, lhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_slt(ScratchRegister, rhs.high, lhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ as_sltu(SecondScratchReg, rhs.high, lhs.high);
+ as_sltu(ScratchRegister, lhs.low, rhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_sltu(ScratchRegister, lhs.high, rhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Above:
+ case Assembler::BelowOrEqual:
+ as_sltu(SecondScratchReg, lhs.high, rhs.high);
+ as_sltu(ScratchRegister, rhs.low, lhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_sltu(ScratchRegister, rhs.high, lhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+}
+
+Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
+ Register64 lhs,
+ Imm64 val,
+ Register dest) {
+ MOZ_ASSERT(val.value != 0);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ ma_xor(SecondScratchReg, lhs.high, val.hi());
+ ma_xor(ScratchRegister, lhs.low, val.low());
+ as_or(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_slt(ScratchRegister, lhs.high, SecondScratchReg);
+ as_slt(SecondScratchReg, SecondScratchReg, lhs.high);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, lhs.low, ScratchRegister);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_slt(ScratchRegister, SecondScratchReg, lhs.high);
+ as_slt(SecondScratchReg, lhs.high, SecondScratchReg);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, ScratchRegister, lhs.low);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_sltu(ScratchRegister, lhs.high, SecondScratchReg);
+ as_sltu(SecondScratchReg, SecondScratchReg, lhs.high);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, lhs.low, ScratchRegister);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Above:
+ case Assembler::BelowOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_sltu(ScratchRegister, SecondScratchReg, lhs.high);
+ as_sltu(SecondScratchReg, lhs.high, SecondScratchReg);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, ScratchRegister, lhs.low);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+}
+
+// fp instructions
+void MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value) {
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ };
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+#if MOZ_BIG_ENDIAN()
+ std::swap(intStruct.hi, intStruct.lo);
+#endif
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.hi));
+ moveToDoubleHi(ScratchRegister, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.lo));
+ moveToDoubleLo(ScratchRegister, dest);
+ }
+}
+
+void MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest) {
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest) {
+ moveToDoubleLo(src.payloadReg(), dest);
+ moveToDoubleHi(src.typeReg(), dest);
+}
+
+void MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_lwc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_lwc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ldc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsldx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ldc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sdc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gssdx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_sdc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_swc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_swc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
+ Imm16::IsInSignedRange(off + TAG_OFFSET));
+
+ as_lwc1(ft, base, off + PAYLOAD_OFFSET);
+ as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
+ Imm16::IsInSignedRange(off + TAG_OFFSET));
+
+ as_swc1(ft, base, off + PAYLOAD_OFFSET);
+ as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPS::ma_pop(FloatRegister f) {
+ if (f.isDouble()) {
+ ma_ldc1WordAligned(f, StackPointer, 0);
+ } else {
+ as_lwc1(f, StackPointer, 0);
+ }
+
+ as_addiu(StackPointer, StackPointer, f.size());
+}
+
+void MacroAssemblerMIPS::ma_push(FloatRegister f) {
+ as_addiu(StackPointer, StackPointer, -f.size());
+
+ if (f.isDouble()) {
+ ma_sdc1WordAligned(f, StackPointer, 0);
+ } else {
+ as_swc1(f, StackPointer, 0);
+ }
+}
+
+bool MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ uint32_t descriptor = MakeFrameDescriptor(
+ asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::move32(Register src, Register dest) {
+ ma_move(dest, src);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(Register src, Register dest) {
+ ma_move(dest, src);
+}
+void MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load32(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPrivate(const Address& address,
+ Register dest) {
+ ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::loadUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(src, SecondScratchReg);
+
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 7)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleLo(temp, dest);
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleHi(temp, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleLo(temp, dest);
+ load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleHi(temp, dest);
+ }
+}
+
+void MacroAssemblerMIPSCompat::loadUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 3)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToFloat32(temp, dest);
+}
+
+void MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Register src, const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address) {
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm,
+ BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm,
+ BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address) {
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm,
+ BaseIndex address);
+
+void MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest) {
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPSCompat::storeUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 3)) {
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssemblerMIPSCompat::storeUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(dest, SecondScratchReg);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 7)) {
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
+
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ as_roundwd(ScratchDoubleReg, input);
+ ma_li(ScratchRegister, Imm32(255));
+ as_mfc1(output, ScratchDoubleReg);
+ zeroDouble(ScratchDoubleReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_colt(DoubleFloat, ScratchDoubleReg, input);
+ // if res > 255; res = 255;
+ as_movz(output, ScratchRegister, SecondScratchReg);
+ // if !(input > 0); res = 0;
+ as_movf(output, zero);
+}
+
+// higher level tag testing code
+Operand MacroAssemblerMIPSCompat::ToPayload(Operand base) {
+ return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand MacroAssemblerMIPSCompat::ToType(Operand base) {
+ return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
+}
+
+void MacroAssemblerMIPSCompat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
+}
+
+void MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
+}
+
+// unboxing code
+void MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand,
+ Register dest, JSValueType) {
+ if (operand.payloadReg() != dest) {
+ ma_move(dest, operand.payloadReg());
+ }
+}
+
+void MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest,
+ JSValueType) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src,
+ Register dest, JSValueType) {
+ computeScaledAddress(src, SecondScratchReg);
+ ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ moveToDoubleLo(operand.payloadReg(), dest);
+ moveToDoubleHi(operand.typeReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ moveToDoubleLo(ScratchRegister, dest);
+ ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ loadDouble(src, dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxBigInt(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ ma_move(dest, src.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxObjectOrNull(const Address& src,
+ Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src,
+ AnyRegister dest, JSValueType) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ ma_move(dest.gpr(), src.payloadReg());
+ }
+}
+
+void MacroAssemblerMIPSCompat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ if (src != dest.payloadReg()) {
+ ma_move(dest.payloadReg(), src);
+ }
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest,
+ int32_t shift) {
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPSCompat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerMIPSCompat::extractObject(const Address& address,
+ Register scratch) {
+ ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+ return scratch;
+}
+
+Register MacroAssemblerMIPSCompat::extractTag(const Address& address,
+ Register scratch) {
+ ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+ return scratch;
+}
+
+Register MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+uint32_t MacroAssemblerMIPSCompat::getType(const Value& val) {
+ return val.toNunboxTag();
+}
+
+void MacroAssemblerMIPSCompat::moveData(const Value& val, Register data) {
+ if (val.isGCThing()) {
+ ma_li(data, ImmGCPtr(val.toGCThing()));
+ } else {
+ ma_li(data, Imm32(val.toNunboxPayload()));
+ }
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst) {
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
+ const Address& dest) {
+ ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(getType(val)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val) {
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory.
+ if (src.base != val.payloadReg()) {
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ } else {
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ }
+}
+
+void MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg()) {
+ ma_move(dest.payloadReg(), payload);
+ }
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void MacroAssemblerMIPSCompat::pushValue(ValueOperand val) {
+ // Allocate stack slots for type and payload. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store type and payload.
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPSCompat::pushValue(const Address& addr) {
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // If address is based on StackPointer its offset needs to be adjusted
+ // to accommodate for previous stack allocation.
+ int32_t offset =
+ addr.base != StackPointer ? addr.offset : addr.offset + sizeof(Value);
+ // Store type and payload.
+ ma_lw(ScratchRegister, Address(addr.base, offset + TAG_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+ ma_lw(ScratchRegister, Address(addr.base, offset + PAYLOAD_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::popValue(ValueOperand val) {
+ // Load payload and type.
+ as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+ as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+ // Free stack.
+ as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest) {
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storePayload(Register src, Address dest) {
+ ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ return;
+}
+
+void MacroAssemblerMIPSCompat::storePayload(const Value& val,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+
+ moveData(val, ScratchRegister);
+
+ as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::storePayload(Register src,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest) {
+ ma_li(SecondScratchReg, tag);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ ma_li(ScratchRegister, tag);
+ as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::breakpoint() { as_break(0); }
+
+void MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void MacroAssemblerMIPSCompat::alignStackPointer() {
+ movePtr(StackPointer, SecondScratchReg);
+ asMasm().subPtr(Imm32(sizeof(intptr_t)), StackPointer);
+ asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
+ storePtr(SecondScratchReg, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPSCompat::restoreStackPointer() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+}
+
+void MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(
+ Label* profilerExitTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, r0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1, a2);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, FramePointer);
+ pop(FramePointer);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ ma_li(ReturnReg, Imm32(1));
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ jump(a1);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerMIPSCompat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerMIPSCompat::toggledCall(JitCode* target,
+ bool enabled) {
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerMIPSCompat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (diffF > 0) {
+ // Double values have to be aligned. We reserve extra space so that we can
+ // start writing from the first aligned location.
+ // We reserve a whole extra double so that the buffer has even size.
+ ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
+ reserveStack(diffF);
+
+ diffF -= sizeof(double);
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ as_sdc1(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+
+ MOZ_ASSERT(diffF == 0);
+ }
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (reservedF > 0) {
+ // Read the buffer form the first aligned location.
+ ma_addu(SecondScratchReg, sp, Imm32(reservedF));
+ ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
+
+ diffF -= sizeof(double);
+
+ LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ if (!ignore.has(*iter)) {
+ as_ldc1(*iter, SecondScratchReg, -diffF);
+ }
+ diffF -= sizeof(double);
+ }
+ freeStack(reservedF);
+ MOZ_ASSERT(diffF == 0);
+ }
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ }
+ freeStack(reservedG);
+ MOZ_ASSERT(diffG == 0);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register scratch) {
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+ MOZ_ASSERT(dest.base == StackPointer);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (diffF > 0) {
+ computeEffectiveAddress(dest, scratch);
+ ma_and(scratch, scratch, Imm32(~(ABIStackAlignment - 1)));
+
+ diffF -= sizeof(double);
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ as_sdc1(*iter, scratch, -diffF);
+ diffF -= sizeof(double);
+ }
+ MOZ_ASSERT(diffF == 0);
+ }
+}
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ if (reg.gpr() != dest.payloadReg()) {
+ move32(reg.gpr(), dest.payloadReg());
+ }
+ mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ Register s0 = src.typeReg();
+ Register s1 = src.payloadReg();
+ Register d0 = dest.typeReg();
+ Register d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(d1 != scratch);
+ MOZ_ASSERT(d0 != scratch);
+ move32(d1, scratch);
+ move32(d0, d1);
+ move32(scratch, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ move32(s0, d0);
+ }
+ if (s1 != d1) {
+ move32(s1, d1);
+ }
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ move32(Imm32(src.toNunboxTag()), dest.typeReg());
+ if (src.isGCThing()) {
+ movePtr(ImmGCPtr(src.toGCThing()), dest.payloadReg());
+ } else {
+ move32(Imm32(src.toNunboxPayload()), dest.payloadReg());
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, address,
+ cond == Assembler::Equal ? &done : label);
+
+ loadPtr(address, temp);
+ branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ moveData(rhs, scratch);
+
+ if (cond == Equal) {
+ Label done;
+ ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
+ { ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal); }
+ bind(&done);
+ } else {
+ ma_b(lhs.payloadReg(), scratch, label, NotEqual);
+
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag.
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value.constant()) {
+ storePayload(value.value(), dest);
+ } else {
+ storePayload(value.reg().typedReg().gpr(), dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::PushBoxed(FloatRegister reg) { Push(reg); }
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(boundsCheckLimit, SecondScratchReg);
+ ma_b(index, SecondScratchReg, ok, cond);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+
+ as_truncwd(ScratchFloat32Reg, input);
+ ma_li(ScratchRegister, Imm32(INT32_MAX));
+ moveFromFloat32(ScratchFloat32Reg, output);
+
+ // For numbers in -1.[ : ]INT32_MAX range do nothing more
+ ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
+
+ loadConstantDouble(double(INT32_MAX + 1ULL), ScratchDoubleReg);
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
+ as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
+ as_cfc1(SecondScratchReg, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
+ ma_addu(output, ScratchRegister);
+
+ ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+
+ as_truncws(ScratchFloat32Reg, input);
+ ma_li(ScratchRegister, Imm32(INT32_MAX));
+ moveFromFloat32(ScratchFloat32Reg, output);
+ // For numbers in -1.[ : ]INT32_MAX range do nothing more
+ ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
+
+ loadConstantFloat32(float(INT32_MAX + 1ULL), ScratchFloat32Reg);
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
+ as_truncws(ScratchFloat32Reg, ScratchFloat32Reg);
+ as_cfc1(SecondScratchReg, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
+ ma_addu(output, ScratchRegister);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(ScratchRegister, output, 1);
+ ma_or(SecondScratchReg, ScratchRegister);
+
+ ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ Register64 output, Register tmp) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssemblerMIPSCompat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().movePtr(ptr, ptrScratch);
+ asMasm().addPtr(Imm32(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (byteSize <= 4) {
+ asMasm().ma_load_unaligned(access, output.low, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned) {
+ asMasm().move32(Imm32(0), output.high);
+ } else {
+ asMasm().ma_sra(output.high, output.low, Imm32(31));
+ }
+ } else {
+ MOZ_ASSERT(output.low != ptr);
+ asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord,
+ ZeroExtend);
+ asMasm().ma_load_unaligned(
+ access, output.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
+ SignExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (byteSize <= 4) {
+ asMasm().ma_load(output.low, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ if (!isSigned) {
+ asMasm().move32(Imm32(0), output.high);
+ } else {
+ asMasm().ma_sra(output.high, output.low, Imm32(31));
+ }
+ } else {
+ MOZ_ASSERT(output.low != ptr);
+ asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().ma_load(output.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
+ SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ }
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPSCompat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(Imm32(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (byteSize <= 4) {
+ asMasm().ma_store_unaligned(access, value.low, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ } else {
+ asMasm().ma_store_unaligned(
+ access, value.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
+ SignExtend);
+ asMasm().ma_store_unaligned(access, value.low, address, tmp, SizeWord,
+ ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (byteSize <= 4) {
+ asMasm().ma_store(value.low, address,
+ static_cast<LoadStoreSize>(8 * byteSize));
+ asMasm().append(access, asMasm().size() - 4);
+ } else {
+ asMasm().ma_store(value.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
+ SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().ma_store(value.low, address, SizeWord);
+ }
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+static void EnterAtomic64Region(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ Register addr, Register spinlock,
+ Register scratch) {
+ masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
+
+ masm.append(access, masm.size());
+ masm.as_lbu(
+ zero, addr,
+ 7); // Force memory trap on invalid access before we enter the spinlock.
+
+ Label tryLock;
+
+ masm.memoryBarrier(MembarFull);
+
+ masm.bind(&tryLock);
+
+ masm.as_ll(scratch, spinlock, 0);
+ masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
+ masm.ma_li(scratch, Imm32(1));
+ masm.as_sc(scratch, spinlock, 0);
+ masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrier(MembarFull);
+}
+
+static void ExitAtomic64Region(MacroAssembler& masm, Register spinlock) {
+ masm.memoryBarrier(MembarFull);
+ masm.as_sw(zero, spinlock, 0);
+ masm.memoryBarrier(MembarFull);
+}
+
+template <typename T>
+static void AtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access, const T& mem,
+ Register64 temp, Register64 output) {
+ MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(Address(SecondScratchReg, 0), output);
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, access, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, access, mem, temp, output);
+}
+
+template <typename T>
+void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const T& mem, Register temp,
+ Register64 value) {
+ computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(asMasm(), access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister, /* scratch= */ temp);
+
+ store64(value, Address(SecondScratchReg, 0));
+
+ ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
+}
+
+template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const Address& mem, Register temp,
+ Register64 value);
+template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& mem, Register temp,
+ Register64 value);
+
+template <typename T>
+static void WasmCompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ MOZ_ASSERT(output != expect);
+ MOZ_ASSERT(output != replace);
+
+ Label exit;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+ Address addr(SecondScratchReg, 0);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(addr, output);
+
+ masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
+ masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
+ masm.store64(replace, addr);
+ masm.bind(&exit);
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ WasmCompareExchange64(*this, access, mem, expect, replace, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ WasmCompareExchange64(*this, access, mem, expect, replace, output);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 src,
+ Register64 output) {
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+ Address addr(SecondScratchReg, 0);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(addr, output);
+ masm.store64(src, addr);
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(Address(SecondScratchReg, 0), output);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(temp.low, output.low, value.low);
+ masm.as_sltu(temp.high, temp.low, output.low);
+ masm.as_addu(temp.high, temp.high, output.high);
+ masm.as_addu(temp.high, temp.high, value.high);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sltu(temp.high, output.low, value.low);
+ masm.as_subu(temp.high, output.high, temp.high);
+ masm.as_subu(temp.low, output.low, value.low);
+ masm.as_subu(temp.high, temp.high, value.high);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.low, output.low, value.low);
+ masm.as_and(temp.high, output.high, value.high);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.low, output.low, value.low);
+ masm.as_or(temp.high, output.high, value.high);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.low, output.low, value.low);
+ masm.as_xor(temp.high, output.high, value.high);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.store64(temp, Address(SecondScratchReg, 0));
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+// ========================================================================
+// Convert floating point.
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ convertUInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, dest);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ convertInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt32ToDouble(src, dest);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
new file mode 100644
index 0000000000..da686bbdd7
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -0,0 +1,823 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_h
+#define jit_mips32_MacroAssembler_mips32_h
+
+#include "mozilla/EndianUtils.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+#include "vm/BytecodeUtil.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
+ JSReturnReg_Data};
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+static const uint32_t LOW_32_MASK = (1LL << 32) - 1;
+#if MOZ_LITTLE_ENDIAN()
+static const int32_t LOW_32_OFFSET = 0;
+static const int32_t HIGH_32_OFFSET = 4;
+#else
+static const int32_t LOW_32_OFFSET = 4;
+static const int32_t HIGH_32_OFFSET = 0;
+#endif
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ const ValueOperand& v_;
+
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
+ operator Register() { return v_.typeReg(); }
+ void release() {}
+ void reacquire() {}
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerMIPS : public MacroAssemblerMIPSShared {
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_ld;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_liPatchable;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_ls;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_sub32TestOverflow;
+
+ void ma_li(Register dest, CodeLabel* label);
+
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_add32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ void ma_addPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow) {
+ ma_add32TestOverflow(rd, rs, rt, overflow);
+ }
+
+ void ma_addPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow) {
+ ma_add32TestOverflow(rd, rs, imm, overflow);
+ }
+
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Register rt,
+ Label* overflow) {
+ ma_add32TestCarry(cond, rd, rs, rt, overflow);
+ }
+
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Imm32 imm,
+ Label* overflow) {
+ ma_add32TestCarry(cond, rd, rs, imm, overflow);
+ }
+
+ // subtract
+ void ma_sub32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+
+ void ma_subPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow) {
+ ma_sub32TestOverflow(rd, rs, rt, overflow);
+ }
+
+ void ma_subPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_sub32TestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+
+ void ma_mulPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow) {
+ ma_mul32TestOverflow(rd, rs, rt, overflow);
+ }
+
+ // memory
+ // shortcut for when we know we're transferring 32 bits of data
+ void ma_lw(Register data, Address address);
+
+ void ma_sw(Register data, Address address);
+ void ma_sw(Imm32 imm, Address address);
+ void ma_sw(Register data, BaseIndex& address);
+
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+ void ma_b(Address addr, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister ft, Address address);
+ void ma_ld(FloatRegister ft, Address address);
+ void ma_sd(FloatRegister ft, Address address);
+ void ma_ss(FloatRegister ft, Address address);
+
+ void ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off);
+ void ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c) {
+ ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
+ ma_cmp_set(dst, lhs, ImmWord(uintptr_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register dst, Register lhs, Address addr, Condition c) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_cmp_set(dst, lhs, ScratchRegister, c);
+ }
+ void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_lw(ScratchRegister, lhs);
+ ma_cmp_set(dst, ScratchRegister, rhs, c);
+ }
+ void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
+ ma_lw(SecondScratchReg, lhs);
+ ma_cmp_set(dst, SecondScratchReg, imm, c);
+ }
+
+ // These fuctions abstract the access to high part of the double precision
+ // float register. It is intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ // :TODO: (Bug 985881) Modify this for N32 ABI to use mthc1 and mfhc1
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mtc1(src, getOddPair(dest));
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfc1(dest, getOddPair(src));
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS {
+ public:
+ using MacroAssemblerMIPS::call;
+
+ MacroAssemblerMIPSCompat() {}
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_addu(dest, address.base, Imm32(address.offset));
+ }
+
+ inline void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset label = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ BufferOffset off = writeInst(-1);
+ label->patchAt()->bind(off.getOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void negl(Register reg) { ma_negu(reg, reg); }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest, JSValueType);
+ void unboxNonDouble(const Address& src, Register dest, JSValueType);
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType);
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObjectOrNull(const Address& src, Register dest);
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType);
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ unboxObject(src, dest);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ unboxObject(src, dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.payloadReg(), val.payloadReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& address, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index, FloatRegister dest,
+ int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ // higher level tag testing code
+ Operand ToPayload(Operand base);
+ Address ToPayload(Address base) {
+ return ToPayload(Operand(base)).toAddress();
+ }
+
+ BaseIndex ToPayload(BaseIndex base) {
+ return BaseIndex(base.base, base.index, base.scale,
+ base.offset + NUNBOX32_PAYLOAD_OFFSET);
+ }
+
+ protected:
+ Operand ToType(Operand base);
+ Address ToType(Address base) { return ToType(Operand(base)).toAddress(); }
+
+ uint32_t getType(const Value& val);
+ void moveData(const Value& val, Register data);
+
+ public:
+ void moveValue(const Value& val, Register type, Register data);
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ ma_lw(dest.gpr(), ToPayload(address));
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ } else {
+ load32(ToPayload(address), dest.gpr());
+ }
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType) {
+ switch (nbytes) {
+ case 4:
+ store32(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(), s1 = src.payloadReg(),
+ d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ MOZ_ASSERT(d1 != ScratchRegister);
+ MOZ_ASSERT(d0 != ScratchRegister);
+ move32(d1, ScratchRegister);
+ move32(d0, d1);
+ move32(ScratchRegister, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ move32(s0, d0);
+ }
+ if (s1 != d1) {
+ move32(s1, d1);
+ }
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+#if MOZ_LITTLE_ENDIAN()
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+#else
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ push(Imm32(val.toNunboxTag()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ ma_push(reg);
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ }
+#endif
+ void pushValue(const Address& addr);
+
+ void storePayload(const Value& val, Address dest);
+ void storePayload(Register src, Address dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, Address dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail);
+
+ template <typename T>
+ void wasmAtomicStore64(const wasm::MemoryAccessDesc& access, const T& mem,
+ Register temp, Register64 value);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, SignExtend);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, ZeroExtend);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ ma_load_unaligned(dest, src);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ ma_load_unaligned(dest.low, LowWord(src));
+ ma_load_unaligned(dest.high, HighWord(src));
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeHalfWord);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest);
+ }
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+ void store64(Register64 src, const BaseIndex& address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ ma_store_unaligned(src.low, LowWord(dest));
+ ma_store_unaligned(src.high, HighWord(dest));
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { as_movd(dest, src); }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDoubleLo(zero, reg);
+ moveToDoubleHi(zero, reg);
+ }
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ void alignStackPointer();
+ void restoreStackPointer();
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void cmp64Set(Condition cond, Register64 lhs, Register64 rhs, Register dest);
+ void cmp64Set(Condition cond, Register64 lhs, Imm64 val, Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void enterAtomic64Region(Register addr, Register spinlock, Register tmp);
+ void exitAtomic64Region(Register spinlock);
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+ Condition ma_cmp64(Condition cond, Register64 lhs, Register64 rhs,
+ Register dest);
+ Condition ma_cmp64(Condition cond, Register64 lhs, Imm64 val, Register dest);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_addu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void ma_storeImm(Imm32 imm, const Address& addr) { ma_sw(imm, addr); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_h */
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.cpp b/js/src/jit/mips32/MoveEmitter-mips32.cpp
new file mode 100644
index 0000000000..2f52c73899
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.cpp
@@ -0,0 +1,152 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MoveEmitter-mips32.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.storeFloat32(temp, cycleSlot(slotId, 0));
+ masm.storeFloat32(temp, cycleSlot(slotId, 4));
+ } else {
+ // Just always store the largest possible size.
+ masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId, 0));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ [[fallthrough]];
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0, 0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0, 0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId, 0), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ uint32_t offset = 0;
+ if (from.floatReg().numAlignedAliased() == 1) {
+ offset = sizeof(float);
+ }
+ masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId, 0), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ [[fallthrough]];
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0, 0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0, 0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.moveFromDoubleLo(from.floatReg(), a2);
+ masm.moveFromDoubleHi(from.floatReg(), a3);
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.loadPtr(getAdjustedAddress(from), a2);
+ masm.loadPtr(
+ Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.h b/js/src/jit/mips32/MoveEmitter-mips32.h
new file mode 100644
index 0000000000..4a669e9991
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MoveEmitter_mips32_h
+#define jit_mips32_MoveEmitter_mips32_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS : public MoveEmitterMIPSShared {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS(MacroAssembler& masm) : MoveEmitterMIPSShared(masm) {}
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MoveEmitter_mips32_h */
diff --git a/js/src/jit/mips32/SharedICRegisters-mips32.h b/js/src/jit/mips32/SharedICRegisters-mips32.h
new file mode 100644
index 0000000000..a1017c9399
--- /dev/null
+++ b/js/src/jit/mips32/SharedICRegisters-mips32.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_SharedICRegisters_mips32_h
+#define jit_mips32_SharedICRegisters_mips32_h
+
+#include "jit/mips32/Assembler-mips32.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+static constexpr ValueOperand R0(a3, a2);
+static constexpr ValueOperand R1(s7, s6);
+static constexpr ValueOperand R2(t7, t6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t5;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+static constexpr FloatRegister FloatReg2 = f4;
+static constexpr FloatRegister FloatReg3 = f6;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_SharedICRegisters_mips32_h */
diff --git a/js/src/jit/mips32/Simulator-mips32.cpp b/js/src/jit/mips32/Simulator-mips32.cpp
new file mode 100644
index 0000000000..ed5d30eaf1
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -0,0 +1,3629 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips32/Simulator-mips32.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+
+#include "jit/AtomicOperations.h"
+#include "jit/mips32/Assembler-mips32.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr =
+ op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static bool HaveSameSign(int32_t a, int32_t b) { return ((a ^ b) >= 0); }
+
+static uint32_t GetFCSRConditionBit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static const int32_t kRegisterskMaxValue = 0x7fffffff;
+static const int32_t kRegisterskMinValue = 0x80000000;
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline OpcodeField opcodeValue() const {
+ return static_cast<OpcodeField>(
+ bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fsValue() const { return bits(FSShift + FSBits - 1, FSShift); }
+
+ inline int ftValue() const { return bits(FTShift + FTBits - 1, FTShift); }
+
+ inline int frValue() const { return bits(FRShift + FRBits - 1, FRShift); }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline OpcodeField opcodeFieldRaw() const {
+ return static_cast<OpcodeField>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const { return instructionBits() & RSMask; }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ OpcodeField op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isForbiddenInBranchDelay() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ }
+}
+
+bool SimInstruction::isLinkingInstruction() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isTrap() const {
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ return instructionBits() != kCallRedirInstr;
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return bits(15, 6) != kWasmTrapCode;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_break:
+ case ff_sll:
+ case ff_srl:
+ case ff_sra:
+ case ff_sllv:
+ case ff_srlv:
+ case ff_srav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_multu:
+ case ff_div:
+ case ff_divu:
+ case ff_add:
+ case ff_addu:
+ case ff_sub:
+ case ff_subu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ case ff_sync:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_madd:
+ case ff_maddu:
+ case ff_clz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_ext:
+ case ff_bshfl:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_sb:
+ case op_sh:
+ case op_swl:
+ case op_sw:
+ case op_swr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ case op_ll:
+ case op_sc:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ }
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * SimInstruction::kInstrSize;
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ AutoLockSimulatorCache() : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ int32_t getFPURegisterValueInt(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void MipsDebugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t MipsDebugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int32_t MipsDebugger::getFPURegisterValueInt(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+int64_t MipsDebugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegisterLong(regnum);
+}
+
+float MipsDebugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double MipsDebugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool MipsDebugger::getValue(const char* desc, int32_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%i", value) == 1;
+}
+
+bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void MipsDebugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void MipsDebugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void MipsDebugger::printAllRegs() {
+ int32_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d ", Registers::GetName(i), value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%08x %10d ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%08x %10d\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%08x\n", value);
+}
+
+void MipsDebugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::RegisterIdLimit; i++) {
+ if (i & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n", FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i), getFPURegisterValueFloat(i));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint32_t pc) {
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2],
+ bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd,
+ "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mipsel -mcpu=mips32r2 | "
+ "grep -v pure_instructions | grep -v .text\"",
+ static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd)) {
+ printf("Cannot disassemble instruction.\n");
+ }
+}
+
+void MipsDebugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int32_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fCode = FloatRegister::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fCode != FloatRegisters::Invalid) {
+ if (fCode & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode),
+ getFPURegisterValueDouble(fCode));
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint32_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int32_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double Simulator::getDoubleFromRegisterPair(int reg) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters) &&
+ ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ memcpy(&dm_val, &registers_[reg], sizeof(dm_val));
+ return (dm_val);
+}
+
+int32_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int64_t Simulator::getFpuRegisterLong(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<int64_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<double*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f14.
+void Simulator::getFpArgs(double* x, double* y, int32_t* z) {
+ *x = getFpuRegisterDouble(12);
+ *y = getFpuRegisterDouble(14);
+ *z = getRegister(a2);
+}
+
+void Simulator::getFpFromStack(int32_t* stack, double* x) {
+ MOZ_ASSERT(stack);
+ MOZ_ASSERT(x);
+ memcpy(x, stack, sizeof(double));
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) {
+ setRegister(v0, static_cast<int32_t>(res));
+ setRegister(v1, static_cast<int32_t>(res >> 32));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+// MIPS memory instructions (except lwl/r and swl/r) trap on unaligned memory
+// access enabling the OS to handle them via trap-and-emulate.
+// Note that simulator runs have the runtime system running directly on the host
+// system and only generated code is executed in the simulator.
+// Since the host is typically IA32 it will not trap on unaligned memory access.
+// We assume that that executing correct generated code will not produce
+// unaligned memory access, so we explicitly check for address alignment and
+// trap. Note that trapping does not occur when executing wasm code, which
+// requires that unaligned memory access provides correct result.
+int Simulator::readW(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeW(uint32_t addr, int value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double Simulator::readD(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ if ((addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeD(uint32_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if ((addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint16_t Simulator::readHU(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t Simulator::readH(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeH(uint32_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeH(uint32_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t Simulator::readBU(uint32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int32_t Simulator::readB(uint32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint32_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+}
+
+void Simulator::writeB(uint32_t addr, int8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+}
+
+int Simulator::loadLinkedW(uint32_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint32_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%08x, pc=0x%08" PRIxPTR ", expected: 0x%08x\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = lastLLValue_;
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int32_t arg0,
+ int32_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1,
+ int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1,
+ int32_t arg2, int32_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int32_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+typedef float (*Prototype_Float32_IntInt)(int arg0, int arg1);
+
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+static int64_t MakeInt64(int32_t first, int32_t second) {
+ // Little-endian order.
+ return ((int64_t)second << 32) | (uint32_t)first;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_O32_ABI)
+ MOZ_CRASH("Only O32 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = getRegister(a0);
+ int32_t arg1 = getRegister(a1);
+ int32_t arg2 = getRegister(a2);
+ int32_t arg3 = getRegister(a3);
+
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(getRegister(sp));
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ int32_t arg4 = stack_pointer[4];
+ int32_t arg5 = stack_pointer[5];
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[6];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ // The int64 arg is not split across register and stack
+ int64_t result = target(arg0, arg1, arg2, MakeInt64(arg4, arg5));
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result =
+ target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target =
+ reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int32_t res = target(dval, arg2, arg3);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int32_t res = target(arg0, dval, arg4, arg5);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int32_t result = target(fval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(12);
+ fval1 = getFpuRegisterFloat(14);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Float32_IntInt: {
+ Prototype_Float32_IntInt target =
+ reinterpret_cast<Prototype_Float32_IntInt>(external);
+ float fresult = target(arg0, arg1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target =
+ reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ getFpFromStack(stack_pointer + 6, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ switch (func) {
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (instr->bits(15, 6) == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int32_t(newPC));
+ return;
+ }
+ }
+ };
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf(
+ "\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// Handle execution based on instruction types.
+void Simulator::configureTypeRegister(SimInstruction* instr, int32_t& alu_out,
+ int64_t& i64hilo, uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = rt << sa;
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
+ break;
+ case ff_sra:
+ alu_out = rt >> sa;
+ break;
+ case ff_sllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
+ break;
+ case ff_srav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_add:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - rt);
+ }
+ }
+ alu_out = rs + rt;
+ break;
+ case ff_addu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue + rt);
+ }
+ }
+ alu_out = rs - rt;
+ break;
+ case ff_subu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = rs_u >= rt_u;
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = rs_u < rt_u;
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ case ff_sync:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ case ff_divu:
+ // div and divu never raise exceptions.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_madd:
+ i64hilo += static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_maddu:
+ u64hilo += static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_clz:
+ alu_out = rs_u ? __builtin_clz(rs_u) : 32;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case ff_ext: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_bshfl: { // Mips32r2 instruction.
+ if (16 == sa) { // seb
+ alu_out = I32(I8(rt));
+ } else if (24 == sa) { // seh
+ alu_out = I32(I16(rt));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void Simulator::decodeTypeRegister(SimInstruction* instr) {
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr, alu_out, i64hilo, u64hilo, next_pc,
+ return_addr_reg, do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ FPUregisters_[fs_reg] = registers_[rt_reg];
+ break;
+ case rs_mthc1:
+ MOZ_CRASH();
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, std::isnan(fs_value) || std::isnan(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative
+ // infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt:
+ case ff_round_l_fmt:
+ case ff_trunc_l_fmt:
+ case ff_floor_l_fmt:
+ case ff_ceil_l_fmt:
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, std::isnan(ds_value) || std::isnan(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards
+ // 0).
+ double rounded = trunc(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative
+ // infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt:
+ case ff_trunc_l_fmt:
+ case ff_round_l_fmt:
+ case ff_floor_l_fmt:
+ case ff_ceil_l_fmt:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(alu_out));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(alu_out));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt:
+ case ff_cvt_s_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ setRegister(return_addr_reg,
+ current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_multu:
+ setRegister(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ case ff_div:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ setRegister(LO, INT_MIN);
+ setRegister(HI, 0);
+ } else if (rt != 0) {
+ setRegister(LO, rs / rt);
+ setRegister(HI, rs % rt);
+ }
+ break;
+ case ff_divu:
+ if (rt_u != 0) {
+ setRegister(LO, rs_u / rt_u);
+ setRegister(HI, rs_u % rt_u);
+ }
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ case ff_sync:
+ switch (instr->bits(10, 6)) {
+ case 0x0:
+ case 0x4:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ AtomicOperations::fenceSeqCst();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) setRegister(rd_reg, rs);
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ } else {
+ if (!testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) setRegister(rd_reg, rs);
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ case ff_madd:
+ setRegister(
+ LO, getRegister(LO) + static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI,
+ getRegister(HI) + static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_maddu:
+ setRegister(
+ LO, getRegister(LO) + static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI,
+ getRegister(HI) + static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_bshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in
+ // common cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ }
+}
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void Simulator::decodeTypeImmediate(SimInstruction* instr) {
+ // Instruction fields.
+ OpcodeField op = instr->opcodeFieldRaw();
+ int32_t rs = getRegister(instr->rsValue());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int32_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc.
+ int32_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int32_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ break;
+ }
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we
+ // don't need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case op_addiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (oe_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = static_cast<uint32_t>(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ case op_ll:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedW(addr, instr);
+ break;
+ case op_sc:
+ addr = rs + se_imm16;
+ alu_out = storeConditionalW(addr, rt, instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_ll:
+ case op_sc:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, static_cast<int8_t>(rt));
+ break;
+ case op_sh:
+ writeH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_sw:
+ writeW(addr, rt, instr);
+ break;
+ case op_swr:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegister(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ addr = rs + se_imm16;
+ writeW(addr, getFpuRegister(ft_reg), instr);
+ break;
+ case op_sdc1:
+ addr = rs + se_imm16;
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ }
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void Simulator::decodeTypeJump(SimInstruction* instr) {
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::branchDelayInstructionDecode(SimInstruction* instr) {
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int32_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t s0_val = getRegister(s0);
+ int32_t s1_val = getRegister(s1);
+ int32_t s2_val = getRegister(s2);
+ int32_t s3_val = getRegister(s3);
+ int32_t s4_val = getRegister(s4);
+ int32_t s5_val = getRegister(s5);
+ int32_t s6_val = getRegister(s6);
+ int32_t s7_val = getRegister(s7);
+ int32_t gp_val = getRegister(gp);
+ int32_t sp_val = getRegister(sp);
+ int32_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int32_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int32_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int32_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int32_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int32_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/mips32/Simulator-mips32.h b/js/src/jit/mips32/Simulator-mips32.h
new file mode 100644
index 0000000000..7ab204af54
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -0,0 +1,526 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips32_Simulator_mips32_h
+#define jit_mips32_Simulator_mips32_h
+
+#ifdef JS_SIMULATOR_MIPS32
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 4;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class MipsDebugger;
+
+ public:
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15, // f12 and f14 are arguments FPURegisters.
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int32_t value);
+ int32_t getRegister(int reg) const;
+ double getDoubleFromRegisterPair(int reg);
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterFloat(int fpureg, int64_t value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterDouble(int fpureg, int64_t value);
+ int32_t getFpuRegister(int fpureg) const;
+ int64_t getFpuRegisterLong(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t readBU(uint32_t addr);
+ inline int32_t readB(uint32_t addr);
+ inline void writeB(uint32_t addr, uint8_t value);
+ inline void writeB(uint32_t addr, int8_t value);
+
+ inline uint16_t readHU(uint32_t addr, SimInstruction* instr);
+ inline int16_t readH(uint32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(uint32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint32_t addr, int16_t value, SimInstruction* instr);
+
+ inline int readW(uint32_t addr, SimInstruction* instr);
+ inline void writeW(uint32_t addr, int value, SimInstruction* instr);
+
+ inline double readD(uint32_t addr, SimInstruction* instr);
+ inline void writeD(uint32_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint32_t addr, SimInstruction* instr);
+ inline int32_t storeConditionalW(uint32_t addr, int32_t value,
+ SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr, int32_t& alu_out,
+ int64_t& i64hilo, uint64_t& u64hilo,
+ int32_t& next_pc, int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(int32_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int32_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static int StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x);
+
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int32_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uint32_t LLAddr_;
+ int32_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int icount_;
+ int break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS32 */
+
+#endif /* jit_mips32_Simulator_mips32_h */
diff --git a/js/src/jit/mips32/Trampoline-mips32.cpp b/js/src/jit/mips32/Trampoline-mips32.cpp
new file mode 100644
index 0000000000..2d49c865b5
--- /dev/null
+++ b/js/src/jit/mips32/Trampoline-mips32.cpp
@@ -0,0 +1,942 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
+
+struct EnterJITRegs {
+ double f30;
+ double f28;
+ double f26;
+ double f24;
+ double f22;
+ double f20;
+
+ // non-volatile registers.
+ uintptr_t ra;
+ uintptr_t fp;
+ uintptr_t s7;
+ uintptr_t s6;
+ uintptr_t s5;
+ uintptr_t s4;
+ uintptr_t s3;
+ uintptr_t s2;
+ uintptr_t s1;
+ uintptr_t s0;
+};
+
+struct EnterJITArgs {
+ // First 4 argumet placeholders
+ void* jitcode; // <- sp points here when function is entered.
+ int maxArgc;
+ Value* maxArgv;
+ InterpreterFrame* fp;
+
+ // Arguments on stack
+ CalleeToken calleeToken;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.as_lw(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_lw(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_lw(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_lw(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_lw(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_lw(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_lw(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_lw(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_lw(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_lw(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_ldc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_ldc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.reserveStack(sizeof(EnterJITRegs));
+ masm.as_sw(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_sw(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_sw(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_sw(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_sw(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_sw(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_sw(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_sw(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_sw(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sw(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ masm.as_sdc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_sdc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+}
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ * CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = a0;
+ const Register reg_argc = a1;
+ const Register reg_argv = a2;
+ const mozilla::DebugOnly<Register> reg_frame = a3;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ const Address slotToken(
+ sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
+ const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Load calleeToken into s2.
+ masm.loadPtr(slotToken, s2);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.loadPtr(slotVp, s3);
+ masm.unboxInt32(Address(s3, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, s2,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8
+ masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0);
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6, s7);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ // Create the frame descriptor.
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, FrameType::CppToJSJit, JitFrameLayout::Size());
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3,
+ Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(s2, Address(StackPointer, 0)); // callee token
+
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ regs.take(ReturnReg);
+
+ const Address slotNumStackValues(
+ FramePointer,
+ sizeof(EnterJITRegs) + offsetof(EnterJITArgs, numStackValues));
+ const Address slotScopeChain(
+ FramePointer,
+ sizeof(EnterJITRegs) + offsetof(EnterJITArgs, scopeChain));
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slotNumStackValues, numStackValues);
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_sll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(
+ Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset),
+ scratch);
+ masm.makeFrameDescriptor(scratch, FrameType::BaselineJS,
+ ExitFrameLayout::Size());
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(
+ scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(FramePointer); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ regs.take(JSReturnOperand);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.ma_addu(realFramePtr, framePtr, Imm32(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.loadPtr(slotScopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0);
+
+ // Discard calleeToken, numActualArgs.
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ // Pop arguments off the stack.
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the slotVp
+ masm.loadPtr(slotVp, s1);
+ masm.storeValue(JSReturnOperand, Address(s1, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // NOTE: Members ionScript_ and osiPointReturnAddress_ of
+ // InvalidationBailoutStack are already on the stack.
+ static const uint32_t STACK_DATA_SIZE =
+ sizeof(InvalidationBailoutStack) - 2 * sizeof(uintptr_t);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Make room for data on stack.
+ masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer);
+
+ // Save general purpose registers
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ Address address =
+ Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() +
+ i * sizeof(uintptr_t));
+ masm.storePtr(Register::FromCode(i), address);
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
+ masm.as_sdc1(
+ FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
+ }
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ using Fn = bool (*)(InvalidationBailoutStack * sp, size_t * frameSizeOut,
+ BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)),
+ StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+
+#error "Port changes from bug 1772506"
+
+ Register numActArgsReg = t6;
+ Register calleeTokenReg = t7;
+ Register numArgsReg = t5;
+
+ // Load the number of actual arguments into numActArgsReg
+ masm.loadPtr(
+ Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.loadPtr(
+ Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+
+ // Copy the number of actual arguments into s3.
+ masm.mov(numActArgsReg, s3);
+
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(CalleeTokenMask), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ masm.as_subu(t1, numArgsReg, s3);
+
+ // Get the topmost argument.
+ masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_addu(t2, sp, t0); // t2 <- sp + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t2);
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ // Include the newly pushed newTarget value in the frame size
+ // calculated below.
+ masm.add32(Imm32(1), numArgsReg);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), ValueOperand(t3, t4));
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0));
+ masm.sub32(Imm32(1), t1);
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop, initialSkip;
+
+ masm.ma_b(&initialSkip, ShortJump);
+
+ masm.bind(&copyLoopTop);
+ masm.subPtr(Imm32(sizeof(Value)), t2);
+ masm.sub32(Imm32(1), s3);
+
+ masm.bind(&initialSkip);
+
+ MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t));
+ // Read argument and push to stack.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // translate the framesize from values into bytes
+ masm.ma_addu(t0, numArgsReg, Imm32(1));
+ masm.lshiftPtr(Imm32(3), t0);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(t0, FrameType::Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t0, Address(StackPointer, 0));
+
+ // Call the target function.
+ masm.andPtr(Imm32(CalleeTokenMask), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+// NOTE: Members snapshotOffset_ and padding_ of BailoutStack
+// are not stored in PushBailoutFrame().
+static const uint32_t bailoutDataSize =
+ sizeof(BailoutStack) - 2 * sizeof(uintptr_t);
+static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t);
+
+/* There are two different stack layouts when doing bailout. They are
+ * represented via class BailoutStack.
+ *
+ * - First case is when bailout is done trough bailout table. In this case
+ * table offset is stored in $ra (look at JitRuntime::generateBailoutTable())
+ * and thunk code should save it on stack. In this case frameClassId_ cannot
+ * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on
+ * the stack.
+ *
+ * - Other case is when bailout is done via out of line code (lazy bailout).
+ * In this case frame size is stored in $ra (look at
+ * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout().
+ */
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Make sure that alignment is proper.
+ masm.checkStackAlignment();
+
+ // Make room for data.
+ masm.subPtr(Imm32(bailoutDataSize), StackPointer);
+
+ // Save general purpose registers.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t);
+ masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // What to do for SIMD?
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ // Save floating point registers
+ // We can use as_sdc1 because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
+ masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double),
+ StackPointer,
+ BailoutStack::offsetOfFpRegs() + i * sizeof(double));
+ }
+
+ // Store the frameSize_ stored in ra
+ // See: JitRuntime::generateBailoutTable()
+ // See: CodeGeneratorMIPS::generateOutOfLineCode()
+ masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize()));
+
+#error "Code needs to be updated"
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Put pointer to BailoutInfo
+ masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer);
+ masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0));
+ masm.movePtr(StackPointer, a1);
+
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer, bailoutInfoOutParamSize +
+ BailoutStack::offsetOfFrameSize()),
+ a1);
+
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize),
+ StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+JitRuntime::BailoutTable JitRuntime::generateBailoutTable(MacroAssembler& masm,
+ Label* bailoutTail,
+ uint32_t frameClass) {
+ uint32_t offset = startTrampolineCode(masm);
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) {
+ // Calculate offset to the end of table
+ int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE;
+
+ // We use the 'ra' as table offset later in GenerateBailoutThunk
+ masm.as_bal(BOffImm16(offset));
+ masm.nop();
+ }
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(masm, frameClass, bailoutTail);
+
+ return BailoutTable(offset, masm.currentOffset() - offset);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, NO_FRAME_SIZE_CLASS_ID, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // We're aligned to an exit frame, so link it up.
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_addu(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+ uint32_t framePushedBeforeAlignStack = masm.framePushed();
+ masm.alignStackPointer();
+ masm.setFramePushed(0);
+
+ // Reserve space for the outparameter. Reserve sizeof(Value) for every
+ // case so that stack stays aligned.
+ uint32_t outParamSize = 0;
+ switch (f.outParam) {
+ case Type_Value:
+ outParamSize = sizeof(Value);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Handle: {
+ uint32_t pushed = masm.framePushed();
+ masm.PushEmptyRooted(f.outParamRootType);
+ outParamSize = masm.framePushed() - pushed;
+ } break;
+
+ case Type_Bool:
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ [[fallthrough]];
+ case Type_Pointer:
+ outParamSize = sizeof(uintptr_t);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Double:
+ outParamSize = sizeof(double);
+ masm.reserveStack(outParamSize);
+ break;
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ uint32_t outParamOffset = 0;
+ if (f.outParam != Type_Void) {
+ // Make sure that stack is double aligned after outParam.
+ MOZ_ASSERT(outParamSize <= sizeof(double));
+ outParamOffset += sizeof(double) - outParamSize;
+ }
+ // Reserve stack for double sized args that are copied to be aligned.
+ outParamOffset += f.doubleByRefArgs() * sizeof(double);
+
+ Register doubleArgs = t0;
+ masm.reserveStack(outParamOffset);
+ masm.movePtr(StackPointer, doubleArgs);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+ size_t doubleArgDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunctionData::DoubleByValue:
+ // Values should be passed by reference, not by value, so we
+ // assert that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunctionData::DoubleByRef:
+ // Copy double sized argument to aligned place.
+ masm.ma_ldc1WordAligned(ScratchDoubleReg, argsBase, argDisp);
+ masm.as_sdc1(ScratchDoubleReg, doubleArgs, doubleArgDisp);
+ masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp,
+ MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ doubleArgDisp += sizeof(double);
+ argDisp += sizeof(double);
+ break;
+ }
+ }
+
+ MOZ_ASSERT_IF(f.outParam != Type_Void, doubleArgDisp + sizeof(double) ==
+ outParamOffset + outParamSize);
+
+ // Copy the implicit outparam, if any.
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(MoveOperand(doubleArgs, outParamOffset,
+ MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ masm.freeStack(outParamOffset);
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ [[fallthrough]];
+ case Type_Pointer:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.restoreStackPointer();
+ masm.setFramePushed(framePushedBeforeAlignStack);
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(uintptr_t) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateExceptionTailStub(MacroAssembler& masm,
+ Label* profilerExitTail) {
+ exceptionTailOffset_ = startTrampolineCode(masm);
+
+ masm.bind(masm.failureLabel());
+ masm.handleFailureWithHandlerTail(profilerExitTail);
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ bailoutTailOffset_ = startTrampolineCode(masm);
+ masm.bind(bailoutTail);
+
+ masm.generateBailoutTail(a1, a2);
+}
diff --git a/js/src/jit/mips64/Architecture-mips64.cpp b/js/src/jit/mips64/Architecture-mips64.cpp
new file mode 100644
index 0000000000..54ae127954
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.cpp
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Architecture-mips64.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char* const Registers::RegNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5",
+ "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"};
+
+const uint32_t Allocatable = 22;
+
+const Registers::SetType Registers::ArgRegMask =
+ Registers::SharedArgRegMask | (1 << a4) | (1 << a5) | (1 << a6) | (1 << a7);
+
+const Registers::SetType Registers::JSCallMask = (1 << Registers::v1);
+
+const Registers::SetType Registers::CallMask = (1 << Registers::v0);
+
+FloatRegisters::Encoding FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Encoding(i)), name) == 0) {
+ return Encoding(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Codes::Double) {
+ return FloatRegister(reg_, Codes::Single);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Codes::Double) {
+ return FloatRegister(reg_, Codes::Double);
+ }
+ return *this;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xffffffff) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+ return id() * sizeof(double);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h
new file mode 100644
index 0000000000..d3db37ea2c
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -0,0 +1,233 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Architecture_mips64_h
+#define jit_mips64_Architecture_mips64_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS64.
+static const uint32_t ShadowStackSpace = 0;
+
+// MIPS64 have 64 bit floating-point coprocessor. There are 32 double
+// precision register which can also be used as single precision registers.
+class FloatRegisters : public FloatRegistersMIPSShared {
+ public:
+ enum ContentType { Single, Double, NumTypes };
+
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < TotalPhys);
+ return FloatRegistersMIPSShared::GetName(Encoding(i));
+ }
+
+ static Encoding FromName(const char* name);
+
+ static const uint32_t Total = 32 * NumTypes;
+#ifdef MIPSR6
+ static const uint32_t Allocatable = 60;
+#else
+ static const uint32_t Allocatable = 62;
+#endif
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 32;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1)
+ << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1)
+ << (uint32_t(Double) * TotalPhys);
+ static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
+ static const SetType SpreadVector = 0;
+ static const SetType Spread = SpreadScalar | SpreadVector;
+
+ static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+
+ static const SetType NonVolatileMask =
+ ((1U << FloatRegisters::f24) | (1U << FloatRegisters::f25) |
+ (1U << FloatRegisters::f26) | (1U << FloatRegisters::f27) |
+ (1U << FloatRegisters::f28) | (1U << FloatRegisters::f29) |
+ (1U << FloatRegisters::f30) | (1U << FloatRegisters::f31)) *
+ SpreadScalar |
+ AllPhysMask * SpreadVector;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+#ifdef MIPSR6
+ static const SetType NonAllocatableMask =
+ ((1U << FloatRegisters::f23) | (1U << FloatRegisters::f24)) * Spread;
+#else
+ static const SetType NonAllocatableMask =
+ (1U << FloatRegisters::f23) * Spread;
+#endif
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegister : public FloatRegisterMIPSShared {
+ public:
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::ContentType ContentType;
+
+ Encoding reg_ : 6;
+
+ private:
+ ContentType kind_ : 3;
+
+ public:
+ constexpr FloatRegister(uint32_t r, ContentType kind = Codes::Double)
+ : reg_(Encoding(r)), kind_(kind) {}
+ constexpr FloatRegister()
+ : reg_(Encoding(FloatRegisters::invalid_freg)), kind_(Codes::Double) {}
+
+ static uint32_t SetSize(SetType x) {
+ // Count the number of non-aliased registers.
+ x |= x >> Codes::TotalPhys;
+ x &= Codes::AllPhysMask;
+ static_assert(Codes::AllPhysMask <= 0xffffffff,
+ "We can safely use CountPopulation32");
+ return mozilla::CountPopulation32(x);
+ }
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && reg_ == other.reg_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const {
+ return (kind_ == Codes::Double) ? sizeof(double) : sizeof(float);
+ }
+ // Always push doubles to maintain 8-byte stack alignment.
+ size_t pushSize() const { return sizeof(double); }
+ bool isInvalid() const { return reg_ == FloatRegisters::invalid_freg; }
+
+ bool isSingle() const { return kind_ == Codes::Single; }
+ bool isDouble() const { return kind_ == Codes::Double; }
+ bool isSimd128() const { return false; }
+
+ FloatRegister singleOverlay() const;
+ FloatRegister doubleOverlay() const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ return Code(reg_ | (kind_ << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ return reg_;
+ }
+ uint32_t id() const { return reg_; }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 0x1f;
+ uint32_t kind = i >> 5;
+ return FloatRegister(Code(code), ContentType(kind));
+ }
+
+ bool volatile_() const {
+ return !!((1 << reg_) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const { return FloatRegisters::GetName(reg_); }
+ bool operator!=(const FloatRegister& other) const {
+ return kind_ != other.kind_ || reg_ != other.reg_;
+ }
+ bool aliases(const FloatRegister& other) { return reg_ == other.reg_; }
+ uint32_t numAliased() const { return 2; }
+ FloatRegister aliased(uint32_t aliasIdx) {
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+ uint32_t numAlignedAliased() const { return 2; }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isDouble());
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ return singleOverlay();
+ }
+
+ SetType alignedOrDominatedAliasedSet() const { return Codes::Spread << reg_; }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Architecture_mips64_h */
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
new file mode 100644
index 0000000000..bae7c14a69
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -0,0 +1,371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Assembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/AutoWritableJitCode.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : regIndex_(0), stackOffset_(0), current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ static_assert(NumIntArgRegs == NumFloatArgRegs);
+ if (regIndex_ == NumIntArgRegs) {
+ if (type != MIRType::Simd128) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ } else {
+ // Mips platform does not support simd yet.
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+ }
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults: {
+ Register destReg;
+ GetIntArgReg(regIndex_++, &destReg);
+ current_ = ABIArg(destReg);
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ FloatRegister::ContentType contentType;
+ contentType = (type == MIRType::Double) ? FloatRegisters::Double
+ : FloatRegisters::Single;
+ FloatRegister destFReg;
+ GetFloatArgReg(regIndex_++, &destFReg);
+ current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t js::jit::RT(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RTShift;
+}
+
+uint32_t js::jit::RD(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RDShift;
+}
+
+uint32_t js::jit::RZ(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RZShift;
+}
+
+uint32_t js::jit::SA(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << SAShift;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int64_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ // Don't skip trailing nops can improve performance
+ // on Loongson3 platform.
+ bool skipNops =
+ !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ if (skipNops) {
+ inst[2] =
+ InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t)))
+ .encode();
+ // There are 4 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+#ifdef MIPSR6
+ inst[4] =
+ InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
+#else
+ inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+#endif
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+#ifdef MIPSR6
+ inst[5] =
+ InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
+#else
+ inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+#endif
+ // There is 1 nop after this.
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() {
+ // Load an address needs 4 instructions, and a jump with a delay slot.
+ return (4 + 2) * sizeof(uint32_t);
+}
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[5] = InstNOP();
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstReg* i2 = (InstReg*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+ InstImm* i5 = (InstImm*)i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32)) {
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
+ (uint64_t(i1->extractImm16Value()) << 16) |
+ uint64_t(i3->extractImm16Value());
+ return uint64_t((int64_t(value) << 16) >> 16);
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) |
+ (uint64_t(i1->extractImm16Value()) << 32) |
+ (uint64_t(i3->extractImm16Value()) << 16) |
+ uint64_t(i5->extractImm16Value());
+ return value;
+}
+
+void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstReg* i2 = (InstReg*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+ InstImm* i5 = (InstImm*)i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32)) {
+ i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Upper(Imm32(value)));
+ i3->setImm16(Imm16::Lower(Imm32(value)));
+ return;
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ i0->setImm16(Imm16::Upper(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i3->setImm16(Imm16::Upper(Imm32(value)));
+ i5->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ Instruction* inst1 = inst0->next();
+ Instruction* inst2 = inst1->next();
+ Instruction* inst3 = inst2->next();
+
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
+ *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
+ *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i3 = (InstImm*)i1->next()->next();
+ Instruction* i4 = (Instruction*)i3->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i4 = jalr;
+ } else {
+ InstNOP nop;
+ *i4 = nop;
+ }
+}
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
new file mode 100644
index 0000000000..7a51f12407
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -0,0 +1,288 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Assembler_mips64_h
+#define jit_mips64_Assembler_mips64_h
+
+#include <iterator>
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips64/Architecture-mips64.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = a4;
+static constexpr Register CallTempReg5 = a5;
+
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ unsigned regIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// This register may be volatile or nonvolatile. Avoid f23 which is the
+// ScratchDoubleReg.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f21,
+ FloatRegisters::Double};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = t0;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = s5;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr Register InterpreterPCReg = t5;
+
+static constexpr Register JSReturnReg = v1;
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::f0,
+ FloatRegisters::Single};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::f0,
+ FloatRegisters::Double};
+static constexpr FloatRegister ScratchFloat32Reg = {FloatRegisters::f23,
+ FloatRegisters::Single};
+static constexpr FloatRegister ScratchDoubleReg = {FloatRegisters::f23,
+ FloatRegisters::Double};
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+static constexpr FloatRegister f0 = {FloatRegisters::f0,
+ FloatRegisters::Double};
+static constexpr FloatRegister f1 = {FloatRegisters::f1,
+ FloatRegisters::Double};
+static constexpr FloatRegister f2 = {FloatRegisters::f2,
+ FloatRegisters::Double};
+static constexpr FloatRegister f3 = {FloatRegisters::f3,
+ FloatRegisters::Double};
+static constexpr FloatRegister f4 = {FloatRegisters::f4,
+ FloatRegisters::Double};
+static constexpr FloatRegister f5 = {FloatRegisters::f5,
+ FloatRegisters::Double};
+static constexpr FloatRegister f6 = {FloatRegisters::f6,
+ FloatRegisters::Double};
+static constexpr FloatRegister f7 = {FloatRegisters::f7,
+ FloatRegisters::Double};
+static constexpr FloatRegister f8 = {FloatRegisters::f8,
+ FloatRegisters::Double};
+static constexpr FloatRegister f9 = {FloatRegisters::f9,
+ FloatRegisters::Double};
+static constexpr FloatRegister f10 = {FloatRegisters::f10,
+ FloatRegisters::Double};
+static constexpr FloatRegister f11 = {FloatRegisters::f11,
+ FloatRegisters::Double};
+static constexpr FloatRegister f12 = {FloatRegisters::f12,
+ FloatRegisters::Double};
+static constexpr FloatRegister f13 = {FloatRegisters::f13,
+ FloatRegisters::Double};
+static constexpr FloatRegister f14 = {FloatRegisters::f14,
+ FloatRegisters::Double};
+static constexpr FloatRegister f15 = {FloatRegisters::f15,
+ FloatRegisters::Double};
+static constexpr FloatRegister f16 = {FloatRegisters::f16,
+ FloatRegisters::Double};
+static constexpr FloatRegister f17 = {FloatRegisters::f17,
+ FloatRegisters::Double};
+static constexpr FloatRegister f18 = {FloatRegisters::f18,
+ FloatRegisters::Double};
+static constexpr FloatRegister f19 = {FloatRegisters::f19,
+ FloatRegisters::Double};
+static constexpr FloatRegister f20 = {FloatRegisters::f20,
+ FloatRegisters::Double};
+static constexpr FloatRegister f21 = {FloatRegisters::f21,
+ FloatRegisters::Double};
+static constexpr FloatRegister f22 = {FloatRegisters::f22,
+ FloatRegisters::Double};
+static constexpr FloatRegister f23 = {FloatRegisters::f23,
+ FloatRegisters::Double};
+static constexpr FloatRegister f24 = {FloatRegisters::f24,
+ FloatRegisters::Double};
+static constexpr FloatRegister f25 = {FloatRegisters::f25,
+ FloatRegisters::Double};
+static constexpr FloatRegister f26 = {FloatRegisters::f26,
+ FloatRegisters::Double};
+static constexpr FloatRegister f27 = {FloatRegisters::f27,
+ FloatRegisters::Double};
+static constexpr FloatRegister f28 = {FloatRegisters::f28,
+ FloatRegisters::Double};
+static constexpr FloatRegister f29 = {FloatRegisters::f29,
+ FloatRegisters::Double};
+static constexpr FloatRegister f30 = {FloatRegisters::f30,
+ FloatRegisters::Double};
+static constexpr FloatRegister f31 = {FloatRegisters::f31,
+ FloatRegisters::Double};
+
+// MIPS64 CPUs can only load multibyte data that is "naturally"
+// eight-byte-aligned, sp register should be sixteen-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+class Assembler : public AssemblerMIPSShared {
+ public:
+ Assembler() : AssemblerMIPSShared() {}
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ using AssemblerMIPSShared::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint64_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = NumIntArgRegs;
+
+static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) {
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedArgSlots, FloatRegister* out) {
+ if (usedArgSlots < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(f12.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Assembler_mips64_h */
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
new file mode 100644
index 0000000000..22e45663db
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -0,0 +1,586 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/CodeGenerator-mips64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+ValueOperand CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ Register rhsReg;
+ ScratchRegisterScope scratch(masm);
+
+ if (IsConstant(rhs)) {
+ rhsReg = scratch;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else if (rhs.value().isGeneralReg()) {
+ rhsReg = ToRegister64(rhs).reg;
+ } else {
+ rhsReg = scratch;
+ masm.loadPtr(ToAddress(rhs.value()), rhsReg);
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg,
+ output);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register rhsReg;
+ ScratchRegisterScope scratch(masm);
+
+ if (IsConstant(rhs)) {
+ rhsReg = scratch;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else if (rhs.value().isGeneralReg()) {
+ rhsReg = ToRegister64(rhs).reg;
+ } else {
+ rhsReg = scratch;
+ masm.loadPtr(ToAddress(rhs.value()), rhsReg);
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+ emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.ma_xor(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+#ifdef MIPSR6
+ if (lir->mir()->isMod()) {
+ masm.as_dmod(output, lhs, rhs);
+ } else {
+ masm.as_ddiv(output, lhs, rhs);
+ }
+#else
+ masm.as_ddiv(lhs, rhs);
+ if (lir->mir()->isMod()) {
+ masm.as_mfhi(output);
+ } else {
+ masm.as_mflo(output);
+ }
+#endif
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+#ifdef MIPSR6
+ if (lir->mir()->isMod()) {
+ masm.as_dmodu(output, lhs, rhs);
+ } else {
+ masm.as_ddivu(output, lhs, rhs);
+ }
+#else
+ masm.as_ddivu(lhs, rhs);
+ if (lir->mir()->isMod()) {
+ masm.as_mfhi(output);
+ } else {
+ masm.as_mflo(output);
+ }
+#endif
+ masm.bind(&done);
+}
+
+void CodeGeneratorMIPS64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_ddiv(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_ddiv(dividend, divisor);
+ masm.as_mflo(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorMIPS64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_dmod(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_ddiv(dividend, divisor);
+ masm.as_mfhi(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+template <typename T>
+void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ptrReg, ptrScratch,
+ ToOutRegister64(lir),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmLoadI64(mir->access(), HeapReg, ptrReg, ptrScratch,
+ ToOutRegister64(lir));
+ }
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
+ HeapReg, ptrReg, ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ptrReg, ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
+ } else {
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.ma_not(inputReg, inputReg);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.h b/js/src/jit/mips64/CodeGenerator-mips64.h
new file mode 100644
index 0000000000..81c30c913e
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_CodeGenerator_mips64_h
+#define jit_mips64_CodeGenerator_mips64_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared {
+ protected:
+ CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm) {}
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue,
+ ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorMIPS64 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_CodeGenerator_mips64_h */
diff --git a/js/src/jit/mips64/LIR-mips64.h b/js/src/jit/mips64/LIR-mips64.h
new file mode 100644
index 0000000000..4d8228418c
--- /dev/null
+++ b/js/src/jit/mips64/LIR-mips64.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_LIR_mips64_h
+#define jit_mips64_LIR_mips64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_LIR_mips64_h */
diff --git a/js/src/jit/mips64/Lowering-mips64.cpp b/js/src/jit/mips64/Lowering-mips64.cpp
new file mode 100644
index 0000000000..e9cda9299c
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.cpp
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Lowering-mips64.h"
+
+#include "jit/Lowering.h"
+#include "jit/mips64/Assembler-mips64.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorMIPS64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+LBoxAllocation LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+void LIRGeneratorMIPS64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorMIPS64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorMIPS64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorMIPS64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorMIPS64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGeneratorMIPS64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGeneratorMIPS64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
diff --git a/js/src/jit/mips64/Lowering-mips64.h b/js/src/jit/mips64/Lowering-mips64.h
new file mode 100644
index 0000000000..b8543de6d2
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Lowering_mips64_h
+#define jit_mips64_Lowering_mips64_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS64 : public LIRGeneratorMIPSShared {
+ protected:
+ LIRGeneratorMIPS64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph) {}
+
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() { return temp(); }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorMIPS64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Lowering_mips64_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64-inl.h b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
new file mode 100644
index 0000000000..ec166851a3
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -0,0 +1,845 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_inl_h
+#define jit_mips64_MacroAssembler_mips64_inl_h
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "vm/BigIntType.h" // JS::BigInt
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDouble(src, dest.reg);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDouble(src.reg, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ ma_sll(dest, src.reg, Imm32(0));
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ ma_dext(dest.reg, src, Imm32(0), Imm32(32));
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ ma_sll(dest.reg, src, Imm32(0));
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ ma_sll(dest, src, Imm32(0));
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ ma_dext(dest, src, Imm32(0), Imm32(32));
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::notPtr(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { ma_and(dest, src); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_and(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ ma_and(dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ and64(scratch, dest);
+ } else {
+ and64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_or(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_xor(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ ma_or(dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ or64(scratch, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ xor64(scratch, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg64) {
+ Register reg = reg64.reg;
+ ma_dsbh(reg, reg);
+ ma_dshd(reg, reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) {
+ ma_daddu(dest, src);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { ma_daddu(dest, imm); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ movePtr(imm, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ add64(scratch, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_daddu(dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ ma_daddu(dest.reg, ScratchRegister);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerMIPSShared::ma_liPatchable(dest, Imm32(0));
+ as_dsubu(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ Instruction* lui =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+ MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ MacroAssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_dsubu(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_dsubu(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ as_dsubu(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ sub64(scratch, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ as_dsubu(dest.reg, dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(src != scratch);
+ move32(imm, scratch);
+#ifdef MIPSR6
+ as_muhu(dest, src, scratch);
+#else
+ as_multu(src, scratch);
+ as_mfhi(dest);
+#endif
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+#ifdef MIPSR6
+ as_dmulu(srcDest, srcDest, rhs);
+#else
+ as_dmultu(srcDest, rhs);
+ as_mflo(srcDest);
+#endif
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+#ifdef MIPSR6
+ as_dmulu(dest.reg, ScratchRegister, dest.reg);
+#else
+ as_dmultu(dest.reg, ScratchRegister);
+ as_mflo(dest.reg);
+#endif
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+#ifdef MIPSR6
+ as_dmulu(dest.reg, src.reg, dest.reg);
+#else
+ as_dmultu(dest.reg, src.reg);
+ as_mflo(dest.reg);
+#endif
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ mul64(scratch, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ MOZ_ASSERT(src != ScratchRegister);
+ as_daddu(ScratchRegister, src, src);
+ as_daddu(dest, ScratchRegister, src);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr)));
+ as_ld(SecondScratchReg, ScratchRegister, 0);
+ as_daddiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sd(SecondScratchReg, ScratchRegister, 0);
+}
+
+void MacroAssembler::neg64(Register64 reg) { as_dsubu(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { as_dsubu(reg, zero, reg); }
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest, dest, imm);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ ma_dsll(dest, dest, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ ma_dsll(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ ma_dsrl(dest, dest, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ ma_dsrl(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest, dest, imm);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ ma_dsra(dest.reg, dest.reg, shift);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value) {
+ ma_drol(dest.reg, src.reg, count);
+ } else {
+ ma_move(dest.reg, src.reg);
+ }
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_drol(dest.reg, src.reg, count);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value) {
+ ma_dror(dest.reg, src.reg, count);
+ } else {
+ ma_move(dest.reg, src.reg);
+ }
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_dror(dest.reg, src.reg, count);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// Also see below for specializations of cmpPtrSet.
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_dclz(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ ma_dctz(dest, src.reg);
+}
+
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ ma_move(output.reg, input.reg);
+ ma_dsra(tmp, input.reg, Imm32(1));
+ ma_li(ScratchRegister, ImmWord(0x5555555555555555UL));
+ ma_and(tmp, ScratchRegister);
+ ma_dsubu(output.reg, tmp);
+ ma_dsra(tmp, output.reg, Imm32(2));
+ ma_li(ScratchRegister, ImmWord(0x3333333333333333UL));
+ ma_and(output.reg, ScratchRegister);
+ ma_and(tmp, ScratchRegister);
+ ma_daddu(output.reg, tmp);
+ ma_dsrl(tmp, output.reg, Imm32(4));
+ ma_daddu(output.reg, tmp);
+ ma_li(ScratchRegister, ImmWord(0xF0F0F0F0F0F0F0FUL));
+ ma_and(output.reg, ScratchRegister);
+ ma_dsll(tmp, output.reg, Imm32(8));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(16));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(32));
+ ma_daddu(output.reg, tmp);
+ ma_dsra(output.reg, output.reg, Imm32(56));
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ma_dext(scratch, value.valueReg(), Imm32(0), Imm32(32));
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncld(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+
+ as_sll(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncls(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+
+ as_sll(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of -0, the output is zero.
+ // In the case of overflow, the output is:
+ // - MIPS64R2: 2^63-1
+ // - MIPS64R6: saturated
+ // In the case of NaN, the output is:
+ // - MIPS64R2: 2^63-1
+ // - MIPS64R6: 0
+ as_truncld(fpscratch, src);
+ moveFromDouble(fpscratch, dest);
+
+ // Fail on overflow cases, besides MIPS64R2 will also fail here on NaN cases.
+ as_sll(scratch, dest, 0);
+ ma_b(dest, scratch, fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ ma_xor(scratch, src.valueReg());
+ ma_move(dest, scratch);
+ ma_dsrl(scratch, scratch, Imm32(JSVAL_TAG_SHIFT));
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// The specializations for cmpPtrSet are outside the braces because
+// check_macroassembler_style can't yet deal with specializations.
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ loadPtr(lhs, SecondScratchReg);
+ cmpPtrSet(cond, SecondScratchReg, rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ loadPtr(rhs, ScratchRegister);
+ cmpPtrSet(cond, lhs, ScratchRegister, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ loadPtr(lhs, ScratchRegister);
+ cmpPtrSet(cond, ScratchRegister, rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ load32(rhs, ScratchRegister);
+ cmp32Set(cond, lhs, ScratchRegister, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ load32(lhs, ScratchRegister);
+ cmp32Set(cond, ScratchRegister, rhs, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmpPtrSet(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerMIPS64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_inl_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
new file mode 100644
index 0000000000..2d466d7efd
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -0,0 +1,2852 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
+
+void MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src,
+ Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
+ asMasm().convertInt64ToDouble(Register64(ScratchRegister), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != ScratchRegister);
+ MOZ_ASSERT(src != SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtdl(dest, dest);
+ asMasm().addDouble(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtdl(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
+ asMasm().convertInt64ToFloat32(Register64(ScratchRegister), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtsd(dest, src);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ ma_drol(dest, dest, Imm32(1));
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ ma_drol(dest, dest, Imm32(1));
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+ as_truncld(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtds(dest, src);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+void MacroAssemblerMIPS64Compat::movq(Register rs, Register rd) {
+ ma_move(rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) {
+ int64_t value = imm.value;
+
+ if (-1 == (value >> 15) || 0 == (value >> 15)) {
+ as_addiu(dest, zero, value);
+ return;
+ }
+ if (0 == (value >> 16)) {
+ as_ori(dest, zero, value);
+ return;
+ }
+
+ if (-1 == (value >> 31) || 0 == (value >> 31)) {
+ as_lui(dest, uint16_t(value >> 16));
+ } else if (0 == (value >> 32)) {
+ as_lui(dest, uint16_t(value >> 16));
+ as_dinsu(dest, zero, 32, 32);
+ } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
+ as_lui(dest, uint16_t(value >> 32));
+ if (uint16_t(value >> 16)) {
+ as_ori(dest, dest, uint16_t(value >> 16));
+ }
+ as_dsll(dest, dest, 16);
+ } else if (0 == (value >> 48)) {
+ as_lui(dest, uint16_t(value >> 32));
+ as_dinsu(dest, zero, 32, 32);
+ if (uint16_t(value >> 16)) {
+ as_ori(dest, dest, uint16_t(value >> 16));
+ }
+ as_dsll(dest, dest, 16);
+ } else {
+ as_lui(dest, uint16_t(value >> 48));
+ if (uint16_t(value >> 32)) {
+ as_ori(dest, dest, uint16_t(value >> 32));
+ }
+ if (uint16_t(value >> 16)) {
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, uint16_t(value >> 16));
+ as_dsll(dest, dest, 16);
+ } else {
+ as_dsll32(dest, dest, 32);
+ }
+ }
+ if (uint16_t(value)) {
+ as_ori(dest, dest, uint16_t(value));
+ }
+}
+
+// This method generates lui, dsll and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ if (Li64 == flags) {
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ } else {
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_drotr32(dest, dest, 48);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) {
+ as_dsubu(rd, zero, rs);
+}
+
+// Shifts
+void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsll32(rd, rt, shift.value);
+ } else {
+ as_dsll(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsrl32(rd, rt, shift.value);
+ } else {
+ as_dsrl(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsra32(rd, rt, shift.value);
+ } else {
+ as_dsra(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_drotr32(rd, rt, shift.value);
+ } else {
+ as_drotr(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) {
+ uint32_t s = 64 - shift.value;
+
+ if (31 < s) {
+ as_drotr32(rd, rt, s);
+ } else {
+ as_drotr(rd, rt, s);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) {
+ as_dsllv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) {
+ as_dsrlv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) {
+ as_dsrav(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) {
+ as_drotrv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) {
+ as_dsubu(ScratchRegister, zero, shift);
+ as_drotrv(rd, rt, ScratchRegister);
+}
+
+void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos,
+ Imm32 size) {
+ if (pos.value >= 0 && pos.value < 32) {
+ if (pos.value + size.value > 32) {
+ as_dinsm(rt, rs, pos.value, size.value);
+ } else {
+ as_dins(rt, rs, pos.value, size.value);
+ }
+ } else {
+ as_dinsu(rt, rs, pos.value, size.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos,
+ Imm32 size) {
+ if (pos.value >= 0 && pos.value < 32) {
+ if (size.value > 32) {
+ as_dextm(rt, rs, pos.value, size.value);
+ } else {
+ as_dext(rt, rs, pos.value, size.value);
+ }
+ } else {
+ as_dextu(rt, rs, pos.value, size.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsbh(Register rd, Register rt) {
+ as_dsbh(rd, rt);
+}
+
+void MacroAssemblerMIPS64::ma_dshd(Register rd, Register rt) {
+ as_dshd(rd, rt);
+}
+
+void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) {
+ ma_dnegu(ScratchRegister, rs);
+ as_and(rd, ScratchRegister, rs);
+ as_dclz(rd, rd);
+ ma_dnegu(SecondScratchReg, rd);
+ ma_daddu(SecondScratchReg, Imm32(0x3f));
+#ifdef MIPS64
+ as_selnez(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_seleqz(rd, rd, ScratchRegister);
+ as_or(rd, rd, SecondScratchReg);
+#else
+ as_movn(rd, SecondScratchReg, ScratchRegister);
+#endif
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_daddiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_daddu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) {
+ as_daddu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) {
+ ma_daddu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ as_daddu(SecondScratchReg, rs, rt);
+ as_addu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_daddiu(SecondScratchReg, rs, imm.value);
+ as_addiu(rd, rs, imm.value);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_add32TestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rs == rt) {
+ as_daddu(rd, rs, rs);
+ as_xor(scratch2, rs, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rs != scratch2);
+ MOZ_ASSERT(rt != scratch2);
+
+ // If the sign of rs and rt are different, no overflow
+ as_xor(scratch2, rs, rt);
+ as_nor(scratch2, scratch2, zero);
+
+ as_daddu(rd, rs, rt);
+ as_xor(scratch, rd, rt);
+ as_and(scratch, scratch, scratch2);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_addPtrTestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ ImmWord imm, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_addPtrTestOverflow(rd, rs, scratch, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, Register rt,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddu(rd, rs, rt);
+ as_sltu(scratch2, rd, rt);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, Imm32 imm,
+ Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddiu(rd, rs, imm.value);
+ as_sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addPtrTestCarry(cond, rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, ImmWord imm,
+ Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddiu(rd, rs, imm.value);
+ as_sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_addPtrTestCarry(cond, rd, rs, scratch, overflow);
+ }
+}
+
+// Subtract.
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_daddiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_dsubu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) {
+ as_dsubu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) {
+ ma_dsubu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPS64::ma_sub32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ as_dsubu(SecondScratchReg, rs, rt);
+ as_subu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rs != scratch2);
+ MOZ_ASSERT(rt != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rs_copy = rs;
+
+ if (rs == rd) {
+ ma_move(scratch2, rs);
+ rs_copy = scratch2;
+ }
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ as_dsubu(rd, rs, rt);
+ // If the sign of rs and rt are the same, no overflow
+ as_xor(scratch, rs_copy, rt);
+ // Check if the sign of rd and rs are the same
+ as_xor(scratch2, rd, rs_copy);
+ as_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_subPtrTestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+#ifdef MIPSR6
+ as_dmul(rs, ScratchRegister, SecondScratchReg);
+ as_dmuh(rs, ScratchRegister, rs);
+ ma_move(rs, SecondScratchReg);
+#else
+ as_dmult(rs, ScratchRegister);
+#endif
+}
+
+void MacroAssemblerMIPS64::ma_mulPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_dmul(rd, rs, rt);
+ as_dmuh(SecondScratchReg, rs, rt);
+#else
+ as_dmult(rs, rt);
+ as_mflo(rd);
+ as_mfhi(SecondScratchReg);
+#endif
+ as_dsra32(ScratchRegister, rd, 63);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+// Memory.
+void MacroAssemblerMIPS64::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ as_lbu(dest, base, encodedOffset);
+ } else {
+ as_lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ as_lhu(dest, base, encodedOffset);
+ } else {
+ as_lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ if (ZeroExtend == extension) {
+ as_lwu(dest, base, encodedOffset);
+ } else {
+ as_lw(dest, base, encodedOffset);
+ }
+ break;
+ case SizeDouble:
+ as_ld(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerMIPS64::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ case SizeDouble:
+ as_sd(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_dsll(ScratchRegister, address.index, Imm32(shift));
+ as_daddu(dest, address.base, ScratchRegister);
+ } else {
+ as_daddu(dest, address.base, address.index);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::computeEffectiveAddress(
+ const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ asMasm().addPtr(Imm32(address.offset), dest);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerMIPS64::ma_pop(Register r) {
+ as_ld(r, StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerMIPS64::ma_push(Register r) {
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_sd(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if (imm.value <= INT32_MAX) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '6'
+ // instructions are writing at below (contain delay slot).
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind) {
+ // simply output the pointer of one label as its id,
+ // notice that after one label destructor, the pointer will be reused.
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch, the target offset is based on self,
+ // point to next instruction of nop at below.
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(7 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer. The '7'
+ // instructions are writing at below (contain conditional nop).
+ m_buffer.ensureSpace(7 * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, ImmWord imm,
+ Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_load(scratch2, address, SizeDouble);
+ ma_cmp_set(rd, scratch2, imm, c);
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_load(scratch2, address, SizeWord, SignExtend);
+ ma_cmp_set(rd, scratch2, imm, c);
+}
+
+// fp instructions
+void MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ ma_li(ScratchRegister, imm);
+ moveToDouble(ScratchRegister, dest);
+ } else {
+ moveToDouble(zero, dest);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) {
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) {
+ as_dmtc1(src.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_lwc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_lwc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ldc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsldx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_ldc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sdc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gssdx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_sdc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_swc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_swc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_pop(FloatRegister f) {
+ as_ldc1(f, StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerMIPS64::ma_push(FloatRegister f) {
+ as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ as_sdc1(f, StackPointer, 0);
+}
+
+bool MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::move32(Register src, Register dest) {
+ ma_move(dest, src);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) {
+ ma_move(dest, src);
+}
+void MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 7)) {
+ load = as_ldl(temp, SecondScratchReg, src.offset + 7);
+ as_ldr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_ldl(temp, ScratchRegister, 7);
+ as_ldr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToDouble(temp, dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 3)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToFloat32(temp, dest);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Register src, const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) {
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeDouble);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) {
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPS64Compat::storeUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 3)) {
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssemblerMIPS64Compat::storeUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromDouble(src, temp);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 7)) {
+ store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
+ as_sdr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_sdl(temp, ScratchRegister, 7);
+ as_sdr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ as_roundwd(ScratchDoubleReg, input);
+ ma_li(ScratchRegister, Imm32(255));
+ as_mfc1(output, ScratchDoubleReg);
+#ifdef MIPSR6
+ as_slti(SecondScratchReg, output, 0);
+ as_seleqz(output, output, SecondScratchReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_selnez(output, output, SecondScratchReg);
+ as_seleqz(ScratchRegister, ScratchRegister, SecondScratchReg);
+ as_or(output, output, ScratchRegister);
+#else
+ zeroDouble(ScratchDoubleReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_colt(DoubleFloat, ScratchDoubleReg, input);
+ // if res > 255; res = 255;
+ as_movz(output, ScratchRegister, SecondScratchReg);
+ // if !(input > 0); res = 0;
+ as_movf(output, zero);
+#endif
+}
+
+void MacroAssemblerMIPS64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerMIPS64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ ma_sll(dest, operand.valueReg(), Imm32(0));
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) {
+ ma_sll(dest, src, Imm32(0));
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ load32(Address(SecondScratchReg, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) {
+ ma_dext(dest, src, Imm32(0), Imm32(32));
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ as_dmtc1(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ld(dest, Address(src.base, src.offset));
+}
+void MacroAssemblerMIPS64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch(asMasm());
+ loadPtr(src, scratch);
+ unboxDouble(ValueOperand(scratch), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPS64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ loadPtr(Address(src.base, src.offset), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ loadPtr(Address(src.base, src.offset), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ unboxDouble(src, dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(addr, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(addr, SecondScratchReg);
+ unboxDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPS64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerMIPS64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register MacroAssemblerMIPS64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT),
+ Imm32(64 - JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) {
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ store32(reg, dest);
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
+ } else {
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
+ ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+ }
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) {
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
+ } else {
+ ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
+ }
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+ if (payload != dest.valueReg()) {
+ ma_move(dest.valueReg(), payload);
+ }
+ ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT),
+ Imm32(64 - JSVAL_TAG_SHIFT));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(dest.valueReg(), zero, Imm32(32), Imm32(JSVAL_TAG_SHIFT - 32));
+ }
+}
+
+void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) {
+ // Allocate stack slots for Value. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store Value
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPS64Compat::pushValue(const Address& addr) {
+ // Load value before allocate stack, addr.base may be is sp.
+ loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+ ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPS64Compat::popValue(ValueOperand val) {
+ as_ld(val.valueReg(), StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void MacroAssemblerMIPS64Compat::breakpoint() { as_break(0); }
+
+void MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ unboxInt32(source, ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPS64Compat::checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ma_move(StackPointer, FramePointer);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerMIPS64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerMIPS64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerMIPS64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Move
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ FloatRegister scratch = ScratchDoubleReg;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ // temp may be InvalidReg, use scratch2 instead.
+ SecondScratchRegisterScope scratch2(*this);
+
+ getGCThingValueChunk(value, scratch2);
+ loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
+ branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, scratch2, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+void MacroAssembler::widenInt32(Register r) {
+ // I *think* this is correct. It may be redundant.
+ move32To64SignExtend(r, Register64(r));
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncld(ScratchDoubleReg, input);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_dsrl(ScratchRegister, output, Imm32(32));
+ as_sll(output, output, 0);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncls(ScratchDoubleReg, input);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_dsrl(ScratchRegister, output, Imm32(32));
+ as_sll(output, output, 0);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ Register64 output, Register tmp) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+
+ as_truncld(ScratchDoubleReg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output.reg);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ Register output = output_.reg;
+
+ Label done;
+
+ as_truncld(ScratchDoubleReg, input);
+ // ma_li INT64_MAX
+ ma_li(SecondScratchReg, Imm32(-1));
+ ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
+ moveFromDouble(ScratchDoubleReg, output);
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
+
+ loadConstantDouble(double(INT64_MAX + 1ULL), ScratchDoubleReg);
+ // ma_li INT64_MIN
+ ma_daddu(SecondScratchReg, Imm32(1));
+ as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
+ as_truncld(ScratchDoubleReg, ScratchDoubleReg);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_daddu(output, SecondScratchReg);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(SecondScratchReg, output, 1);
+ ma_or(ScratchRegister, SecondScratchReg);
+
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+
+ as_truncls(ScratchDoubleReg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output.reg);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ Register output = output_.reg;
+
+ Label done;
+
+ as_truncls(ScratchDoubleReg, input);
+ // ma_li INT64_MAX
+ ma_li(SecondScratchReg, Imm32(-1));
+ ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
+ moveFromDouble(ScratchDoubleReg, output);
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
+
+ loadConstantFloat32(float(INT64_MAX + 1ULL), ScratchFloat32Reg);
+ // ma_li INT64_MIN
+ ma_daddu(SecondScratchReg, Imm32(1));
+ as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
+ as_truncls(ScratchDoubleReg, ScratchFloat32Reg);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_daddu(output, SecondScratchReg);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(SecondScratchReg, output, 1);
+ ma_or(ScratchRegister, SecondScratchReg);
+
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ asMasm().ma_load_unaligned(access, output.reg, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ asMasm().ma_load(output.reg, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ asMasm().ma_store_unaligned(access, value.reg, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ asMasm().ma_store(value.reg, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, ScratchRegister);
+ masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+ masm.movePtr(value.reg, ScratchRegister);
+ masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_daddu(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_dsubu(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_scd(temp.reg, SecondScratchReg, 0);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+// ========================================================================
+// Convert floating point.
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ as_dmtc1(src.reg, dest);
+ as_cvtdl(dest, dest);
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ as_dmtc1(src.reg, dest);
+ as_cvtsl(dest, dest);
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ Register src = src_.reg;
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != ScratchRegister);
+ MOZ_ASSERT(src != SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtsl(dest, dest);
+ addFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtsl(dest, dest);
+
+ bind(&done);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
new file mode 100644
index 0000000000..5add3bf1ee
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -0,0 +1,841 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_h
+#define jit_mips64_MacroAssembler_mips64_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+#include "vm/BytecodeUtil.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
+ }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+static constexpr ValueOperand JSReturnOperand{JSReturnReg};
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope : public SecondScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : SecondScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared {
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_ld;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_ls;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_sub32TestOverflow;
+
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+ // Negate
+ void ma_dnegu(Register rd, Register rs);
+
+ // Shift operations
+ void ma_dsll(Register rd, Register rt, Imm32 shift);
+ void ma_dsrl(Register rd, Register rt, Imm32 shift);
+ void ma_dsra(Register rd, Register rt, Imm32 shift);
+ void ma_dror(Register rd, Register rt, Imm32 shift);
+ void ma_drol(Register rd, Register rt, Imm32 shift);
+
+ void ma_dsll(Register rd, Register rt, Register shift);
+ void ma_dsrl(Register rd, Register rt, Register shift);
+ void ma_dsra(Register rd, Register rt, Register shift);
+ void ma_dror(Register rd, Register rt, Register shift);
+ void ma_drol(Register rd, Register rt, Register shift);
+
+ void ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size);
+ void ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size);
+
+ // doubleword swap bytes
+ void ma_dsbh(Register rd, Register rt);
+ void ma_dshd(Register rd, Register rt);
+
+ void ma_dctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_daddu(Register rd, Register rs, Imm32 imm);
+ void ma_daddu(Register rd, Register rs);
+ void ma_daddu(Register rd, Imm32 imm);
+ void ma_add32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, ImmWord imm,
+ Label* overflow);
+ // subtract
+ void ma_dsubu(Register rd, Register rs, Imm32 imm);
+ void ma_dsubu(Register rd, Register rs);
+ void ma_dsubu(Register rd, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_dmult(Register rs, Imm32 imm);
+ void ma_mulPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister ft, Address address);
+ void ma_ld(FloatRegister ft, Address address);
+ void ma_sd(FloatRegister ft, Address address);
+ void ma_ss(FloatRegister ft, Address address);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+
+ // These functions abstract the access to high part of the double precision
+ // float register. They are intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ void moveToDoubleHi(Register src, FloatRegister dest) { as_mthc1(src, dest); }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfhc1(dest, src);
+ }
+
+ void moveToDouble(Register src, FloatRegister dest) { as_dmtc1(src, dest); }
+ void moveFromDouble(FloatRegister src, Register dest) { as_dmfc1(dest, src); }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64 {
+ public:
+ using MacroAssemblerMIPS64::call;
+
+ MacroAssemblerMIPS64Compat() {}
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rs, Register rd);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_daddu(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall
+ return 6 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset offset = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ writeInst(-1);
+ writeInst(-1);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_sll(dest, src, Imm32(0));
+ return;
+ }
+ MOZ_ASSERT(ScratchRegister != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), ScratchRegister);
+ as_xor(dest, src, ScratchRegister);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ ma_dins(dest, zero, Imm32(JSVAL_TAG_SHIFT + 3), Imm32(1));
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ ma_dext(dest, src.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ as_and(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ as_and(dest, dest, src.valueReg());
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchRegister);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8:
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, SecondScratchReg);
+ } else {
+ unboxNonDouble(value, SecondScratchReg, type);
+ }
+ computeEffectiveAddress(address, ScratchRegister);
+ as_sd(SecondScratchReg, ScratchRegister, 0);
+ return;
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8:
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, SecondScratchReg);
+ } else {
+ unboxNonDouble(value, SecondScratchReg, type);
+ }
+ storePtr(SecondScratchReg, address);
+ return;
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+ ma_li(dest, Imm32(tag));
+ ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(dest, src, Imm32(0), Imm32(32));
+ } else {
+ ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), ScratchRegister);
+ push(ScratchRegister);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ boxValue(type, reg, ScratchRegister);
+ push(ScratchRegister);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, SignExtend);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, ZeroExtend);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeWord, SignExtend);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ ma_load_unaligned(dest.reg, src, SizeDouble, ZeroExtend);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeHalfWord);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeWord);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ ma_store_unaligned(src.reg, dest, SizeDouble);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { as_movd(dest, src); }
+
+ void zeroDouble(FloatRegister reg) { moveToDouble(zero, reg); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_daddu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_h */
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.cpp b/js/src/jit/mips64/MoveEmitter-mips64.cpp
new file mode 100644
index 0000000000..70217a37f8
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.cpp
@@ -0,0 +1,149 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MoveEmitter-mips64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPS64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ masm.storeFloat32(temp, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(getAdjustedAddress(to), temp);
+ masm.store32(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(cycleSlot(0), temp);
+ masm.store32(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.moveFromDouble(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.moveToDouble(from.reg(), to.floatReg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.h b/js/src/jit/mips64/MoveEmitter-mips64.h
new file mode 100644
index 0000000000..e6dbcd0693
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MoveEmitter_mips64_h
+#define jit_mips64_MoveEmitter_mips64_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS64 : public MoveEmitterMIPSShared {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS64(MacroAssembler& masm) : MoveEmitterMIPSShared(masm) {}
+};
+
+typedef MoveEmitterMIPS64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MoveEmitter_mips64_h */
diff --git a/js/src/jit/mips64/SharedICRegisters-mips64.h b/js/src/jit/mips64/SharedICRegisters-mips64.h
new file mode 100644
index 0000000000..99b263ca1e
--- /dev/null
+++ b/js/src/jit/mips64/SharedICRegisters-mips64.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_SharedICRegisters_mips64_h
+#define jit_mips64_SharedICRegisters_mips64_h
+
+#include "jit/mips64/Assembler-mips64.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(v1);
+static constexpr ValueOperand R1(s4);
+static constexpr ValueOperand R2(a6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = a5;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+static constexpr FloatRegister FloatReg2 = f4;
+static constexpr FloatRegister FloatReg3 = f6;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_SharedICRegisters_mips64_h */
diff --git a/js/src/jit/mips64/Simulator-mips64.cpp b/js/src/jit/mips64/Simulator-mips64.cpp
new file mode 100644
index 0000000000..0cdac18365
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -0,0 +1,4402 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips64/Simulator-mips64.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+#include <limits>
+
+#include "jit/AtomicOperations.h"
+#include "jit/mips64/Assembler-mips64.h"
+#include "js/Conversions.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+#define I64(v) static_cast<int64_t>(v)
+#define U64(v) static_cast<uint64_t>(v)
+#define I128(v) static_cast<__int128_t>(v)
+#define U128(v) static_cast<__uint128_t>(v)
+
+#define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr =
+ op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static uint32_t GetFCSRConditionBit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ }
+ return 24 + cc;
+}
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline OpcodeField opcodeValue() const {
+ return static_cast<OpcodeField>(
+ bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fsValue() const { return bits(FSShift + FSBits - 1, FSShift); }
+
+ inline int ftValue() const { return bits(FTShift + FTBits - 1, FTShift); }
+
+ inline int frValue() const { return bits(FRShift + FRBits - 1, FRShift); }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline OpcodeField opcodeFieldRaw() const {
+ return static_cast<OpcodeField>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const { return instructionBits() & RSMask; }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ OpcodeField op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isForbiddenInBranchDelay() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isLinkingInstruction() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isTrap() const {
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ return instructionBits() != kCallRedirInstr;
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return bits(15, 6) != kWasmTrapCode;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_sync:
+ case ff_break:
+ case ff_sll:
+ case ff_dsll:
+ case ff_dsll32:
+ case ff_srl:
+ case ff_dsrl:
+ case ff_dsrl32:
+ case ff_sra:
+ case ff_dsra:
+ case ff_dsra32:
+ case ff_sllv:
+ case ff_dsllv:
+ case ff_srlv:
+ case ff_dsrlv:
+ case ff_srav:
+ case ff_dsrav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_dmult:
+ case ff_multu:
+ case ff_dmultu:
+ case ff_div:
+ case ff_ddiv:
+ case ff_divu:
+ case ff_ddivu:
+ case ff_add:
+ case ff_dadd:
+ case ff_addu:
+ case ff_daddu:
+ case ff_sub:
+ case ff_dsub:
+ case ff_subu:
+ case ff_dsubu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_clz:
+ case ff_dclz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ case ff_bshfl:
+ case ff_dbshfl:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lbu:
+ case op_lh:
+ case op_lhu:
+ case op_lw:
+ case op_lwu:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_lld:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ case op_sb:
+ case op_sh:
+ case op_sw:
+ case op_swl:
+ case op_swr:
+ case op_sc:
+ case op_scd:
+ case op_sd:
+ case op_sdl:
+ case op_sdr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ };
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int64_t getRegisterValue(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void MipsDebugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int64_t MipsDebugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int64_t MipsDebugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+float MipsDebugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double MipsDebugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool MipsDebugger::getValue(const char* desc, int64_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%" PRIu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%" PRIi64, value) == 1;
+}
+
+bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void MipsDebugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void MipsDebugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void MipsDebugger::printAllRegs() {
+ int64_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i),
+ value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%016" PRIx64 " %20" PRIi64 " ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%016" PRIx64 " %20" PRIi64 "\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%016" PRIx64 "\n", value);
+}
+
+void MipsDebugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueLong(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint64_t pc) {
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2],
+ bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd,
+ "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mips64el -mcpu=mips64r2 | "
+ "grep -v pure_instructions | grep -v .text\"",
+ static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd)) {
+ printf("Cannot disassemble instruction.\n");
+ }
+}
+
+void MipsDebugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Encoding fReg = FloatRegisters::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value,
+ value);
+ } else if (fReg != FloatRegisters::Invalid) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fReg),
+ getFPURegisterValueLong(fReg),
+ getFPURegisterValueFloat(fReg),
+ getFPURegisterValueDouble(fReg));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint64_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterLo(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterHi(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterLo(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHi(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(v0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(v0, I64(res));
+ setRegister(v1, I64(res >> 64));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+template <typename T>
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if ((long double)rounded > (long double)std::numeric_limits<T>::max() ||
+ (long double)rounded < (long double)std::numeric_limits<T>::min()) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+static bool AllowUnaligned() {
+ static bool hasReadFlag = false;
+ static bool unalignedAllowedFlag = false;
+ if (!hasReadFlag) {
+ unalignedAllowedFlag = !!getenv("MIPS_UNALIGNED");
+ hasReadFlag = true;
+ }
+ return unalignedAllowedFlag;
+}
+
+// MIPS memory instructions (except lw(d)l/r , sw(d)l/r) trap on unaligned
+// memory access enabling the OS to handle them via trap-and-emulate. Note that
+// simulator runs have the runtime system running directly on the host system
+// and only generated code is executed in the simulator. Since the host is
+// typically IA32 it will not trap on unaligned memory access. We assume that
+// that executing correct generated code will not produce unaligned memory
+// access, so we explicitly check for address alignment and trap. Note that
+// trapping does not occur when executing wasm code, which requires that
+// unaligned memory access provides correct result.
+
+uint8_t Simulator::readBU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int8_t Simulator::readB(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uint16_t Simulator::readHU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%016" PRIx64
+ ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t Simulator::readH(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%016" PRIx64
+ ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t Simulator::readWU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int32_t Simulator::readW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+int64_t Simulator::readDW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double Simulator::readD(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ if (AllowUnaligned() || (addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1,
+ int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1,
+ int64_t arg2, int64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t, int32_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int64_t, int32_t, int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t, int64_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int64_t, int64_t, int32_t, int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int64_t, int64_t, int64_t, int64_t, int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t, int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t, int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int64_t, int32_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t, int64_t, int32_t, int64_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_N64_ABI)
+ MOZ_CRASH("Only N64 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ int64_t arg0 = getRegister(a0);
+ int64_t arg1 = getRegister(a1);
+ int64_t arg2 = getRegister(a2);
+ int64_t arg3 = getRegister(a3);
+ int64_t arg4 = getRegister(a4);
+ int64_t arg5 = getRegister(a5);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ if (external == intptr_t(&js::wasm::Instance::wake_m32)) {
+ result = int32_t(result);
+ }
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t arg7 = getRegister(a7);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t result = target(dval0);
+ if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i32_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i64_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ int64_t result = target(dval, arg1);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t result = target(dval, arg1, arg2);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(13);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t result = target(arg0, dval, arg2, arg3);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int64_t result = target(fval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(12);
+ fval1 = getFpuRegisterFloat(13);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval1);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ double dval3 = getFpuRegisterDouble(15);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(arg0);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(
+ arg0, I32(arg1));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4),
+ I32(arg5));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(nativeFn)(
+ arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4), arg5);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(arg0, I32(arg1), arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneral>(
+ nativeFn)(arg0, arg1);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(arg0, arg1, arg2);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(arg0, arg1, I32(arg2));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3, arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32>(
+ nativeFn)(arg0, I32(arg1));
+ setRegister(v0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(v0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(arg0, I32(arg1), arg2);
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3, I32(arg4));
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int64_t arg6 = getRegister(a6);
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, I32(arg4), I32(arg5),
+ I32(arg6));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_General>(nativeFn)(arg0);
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_GeneralInt64>(nativeFn)(
+ arg0, arg1);
+ setRegister(v0, ret);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ switch (func) {
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (instr->bits(15, 6) == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ }
+ };
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = U32(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// Helper function for decodeTypeRegister.
+void Simulator::configureTypeRegister(SimInstruction* instr, int64_t& alu_out,
+ __int128& i128hilo,
+ unsigned __int128& u128hilo,
+ int64_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+ __int128 temp;
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegisterLo(fs_reg);
+ break;
+ case rs_dmfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ alu_out = getFpuRegisterHi(fs_reg);
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_dmtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = I64(I32(rt) << sa);
+ break;
+ case ff_dsll:
+ alu_out = rt << sa;
+ break;
+ case ff_dsll32:
+ alu_out = rt << (sa + 32);
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = I64(I32(U32(I32_CHECK(rt)) >> sa));
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = I64(I32((U32(I32_CHECK(rt)) >> sa) |
+ (U32(I32_CHECK(rt)) << (32 - sa))));
+ }
+ break;
+ case ff_dsrl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of DSRL instruction, added in MIPS64 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (U64(rt) >> sa) | (U64(rt) << (64 - sa));
+ }
+ break;
+ case ff_dsrl32:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> (sa + 32);
+ } else {
+ // Logical right-rotate of a double word by a fixed number of bits.
+ // This is special case of DSRL instruction, added in MIPS64
+ // Release 2. RS field is equal to 00001.
+ alu_out = (U64(rt) >> (sa + 32)) | (U64(rt) << (64 - (sa + 32)));
+ }
+ break;
+ case ff_sra:
+ alu_out = I64(I32_CHECK(rt)) >> sa;
+ break;
+ case ff_dsra:
+ alu_out = rt >> sa;
+ break;
+ case ff_dsra32:
+ alu_out = rt >> (sa + 32);
+ break;
+ case ff_sllv:
+ alu_out = I64(I32(rt) << rs);
+ break;
+ case ff_dsllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = I64(I32(U32(I32_CHECK(rt)) >> rs));
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = I64(I32((U32(I32_CHECK(rt)) >> rs) |
+ (U32(I32_CHECK(rt)) << (32 - rs))));
+ }
+ break;
+ case ff_dsrlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a double word by a variable number
+ // of bits instruction. SA field is always equal to 0.
+ alu_out = U64(rt) >> rs;
+ } else {
+ // Logical right-rotate of a double word by a variable number of
+ // bits. This is special case od DSRLV instruction, added in MIPS64
+ // Release 2. SA field is equal to 00001.
+ alu_out = (U64(rt) >> rs) | (U64(rt) << (64 - rs));
+ }
+ break;
+ case ff_srav:
+ alu_out = I64(I32_CHECK(rt) >> rs);
+ break;
+ case ff_dsrav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i128hilo = I64(U32(I32_CHECK(rs))) * I64(U32(I32_CHECK(rt)));
+ break;
+ case ff_dmult:
+ i128hilo = I128(rs) * I128(rt);
+ break;
+ case ff_multu:
+ u128hilo = U64(U32(I32_CHECK(rs))) * U64(U32(I32_CHECK(rt)));
+ break;
+ case ff_dmultu:
+ u128hilo = U128(rs) * U128(rt);
+ break;
+ case ff_add:
+ alu_out = I32_CHECK(rs) + I32_CHECK(rt);
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I32(alu_out);
+ break;
+ case ff_dadd:
+ temp = I128(rs) + I128(rt);
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case ff_addu:
+ alu_out = I32(I32_CHECK(rs) + I32_CHECK(rt));
+ break;
+ case ff_daddu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ alu_out = I32_CHECK(rs) - I32_CHECK(rt);
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerUnderflow] = 1;
+ }
+ alu_out = I32(alu_out);
+ break;
+ case ff_dsub:
+ temp = I128(rs) - I128(rt);
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerUnderflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case ff_subu:
+ alu_out = I32(I32_CHECK(rs) - I32_CHECK(rt));
+ break;
+ case ff_dsubu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = I64(rs) < I64(rt) ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = U64(rs) < U64(rt) ? 1 : 0;
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = U64(rs) >= U64(rt);
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = U64(rs) < U64(rt);
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ if (I32_CHECK(rs) == INT_MIN && I32_CHECK(rt) == -1) {
+ i128hilo = U32(INT_MIN);
+ } else {
+ uint32_t div = I32_CHECK(rs) / I32_CHECK(rt);
+ uint32_t mod = I32_CHECK(rs) % I32_CHECK(rt);
+ i128hilo = (I64(mod) << 32) | div;
+ }
+ break;
+ case ff_ddiv:
+ if (I64(rs) == INT64_MIN && I64(rt) == -1) {
+ i128hilo = U64(INT64_MIN);
+ } else {
+ uint64_t div = rs / rt;
+ uint64_t mod = rs % rt;
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ case ff_divu: {
+ uint32_t div = U32(I32_CHECK(rs)) / U32(I32_CHECK(rt));
+ uint32_t mod = U32(I32_CHECK(rs)) % U32(I32_CHECK(rt));
+ i128hilo = (U64(mod) << 32) | div;
+ } break;
+ case ff_ddivu:
+ if (0 == rt) {
+ i128hilo = (I128(Unpredictable) << 64) | I64(Unpredictable);
+ } else {
+ uint64_t div = U64(rs) / U64(rt);
+ uint64_t mod = U64(rs) % U64(rt);
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = I32(I32_CHECK(rs) *
+ I32_CHECK(rt)); // Only the lower 32 bits are kept.
+ break;
+ case ff_clz:
+ alu_out = U32(I32_CHECK(rs)) ? __builtin_clz(U32(I32_CHECK(rs))) : 32;
+ break;
+ case ff_dclz:
+ alu_out = U64(rs) ? __builtin_clzl(U64(rs)) : 64;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if (lsb > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = I32((U32(I32_CHECK(rt)) & ~(mask << lsb)) |
+ ((U32(I32_CHECK(rs)) & mask) << lsb));
+ }
+ break;
+ }
+ case ff_dins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if (lsb > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ }
+ break;
+ }
+ case ff_dinsm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ break;
+ }
+ case ff_dinsu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if (sa > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ }
+ break;
+ }
+ case ff_ext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if ((lsb + msb) > 31) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U32(I32_CHECK(rs)) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_dext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_dextm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 32 + 1) > 64) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_dextu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 1) > 64) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_bshfl: { // Mips32r2 instruction.
+ if (16 == sa) { // seb
+ alu_out = I64(I8(I32_CHECK(rt)));
+ } else if (24 == sa) { // seh
+ alu_out = I64(I16(I32_CHECK(rt)));
+ } else if (2 == sa) { // wsbh
+ uint32_t input = U32(I32_CHECK(rt));
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+ alu_out = I64(I32(output));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case ff_dbshfl: { // Mips64r2 instruction.
+ uint64_t input = U64(rt);
+ uint64_t output = 0;
+
+ if (2 == sa) { // dsbh
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0)
+ tmp = tmp >> 8;
+ else
+ tmp = tmp << 8;
+
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+ } else if (5 == sa) { // dshd
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i == 0)
+ tmp = tmp >> 48;
+ else if (i == 1)
+ tmp = tmp >> 16;
+ else if (i == 2)
+ tmp = tmp << 16;
+ else
+ tmp = tmp << 48;
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+ } else {
+ MOZ_CRASH();
+ }
+
+ alu_out = I64(output);
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ default:
+ MOZ_CRASH();
+ };
+}
+
+// Handle execution based on instruction types.
+void Simulator::decodeTypeRegister(SimInstruction* instr) {
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ __int128 i128hilo = 0;
+ unsigned __int128 u128hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int64_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc
+ int64_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr, alu_out, i128hilo, u128hilo, next_pc,
+ return_addr_reg, do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ setRegister(rt_reg, alu_out);
+ [[fallthrough]];
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_dmfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ setFpuRegisterLo(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_dmtc1:
+ setFpuRegister(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_mthc1:
+ setFpuRegisterHi(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, std::isnan(fs_value) || std::isnan(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative
+ // infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt: // Mips64r2: Truncate float to 64-bit long-word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ float rounded = fs_value > 0 ? std::floor(fs_value + 0.5)
+ : std::ceil(fs_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ float rounded = truncf(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_floor_l_fmt: { // Mips64r2 instruction.
+ float rounded = std::floor(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_ceil_l_fmt: { // Mips64r2 instruction.
+ float rounded = std::ceil(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ dt_value = getFpuRegisterDouble(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, std::isnan(ds_value) || std::isnan(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards
+ // 0).
+ double rounded = trunc(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative
+ // infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt: // Mips64r2: Truncate double to 64-bit
+ // long-word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ double rounded = ds_value > 0 ? std::floor(ds_value + 0.5)
+ : std::ceil(ds_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ double rounded = trunc(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_floor_l_fmt: { // Mips64r2 instruction.
+ double rounded = std::floor(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_ceil_l_fmt: { // Mips64r2 instruction.
+ double rounded = std::ceil(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt: // Mips64r2 instruction.
+ i64 = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ case ff_cvt_s_fmt:
+ i64 = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ setRegister(return_addr_reg,
+ current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_dmult:
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_multu:
+ setRegister(LO, I32(u128hilo & 0xffffffff));
+ setRegister(HI, I32(u128hilo >> 32));
+ break;
+ case ff_dmultu:
+ setRegister(LO, I64(u128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(u128hilo >> 64));
+ break;
+ case ff_div:
+ case ff_divu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_ddiv:
+ case ff_ddivu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) {
+ setRegister(rd_reg, rs);
+ }
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) {
+ setRegister(rd_reg, rs);
+ }
+ } else {
+ if (!testFCSRBit(fcsr_cc)) {
+ setRegister(rd_reg, rs);
+ }
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) {
+ setRegister(rd_reg, rs);
+ }
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_bshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ case ff_dbshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in
+ // common cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ };
+}
+
+// Type 2: instructions using a 16 bits immediate. (e.g. addi, beq).
+void Simulator::decodeTypeImmediate(SimInstruction* instr) {
+ // Instruction fields.
+ OpcodeField op = instr->opcodeFieldRaw();
+ int64_t rs = getRegister(instr->rsValue());
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int64_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int64_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint64_t addr = 0x0;
+ // Value to be written in memory.
+ uint64_t mem_value = 0x0;
+ __int128 temp;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ break;
+ };
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we
+ // don't need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ alu_out = I32_CHECK(rs) + se_imm16;
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I32_CHECK(alu_out);
+ break;
+ case op_daddi:
+ temp = alu_out = rs + se_imm16;
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case op_addiu:
+ alu_out = I32(I32_CHECK(rs) + se_imm16);
+ break;
+ case op_daddiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (U64(rs) < U64(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (se_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr, instr);
+ break;
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr, instr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwu:
+ addr = rs + se_imm16;
+ alu_out = readWU(addr, instr);
+ break;
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = U32(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ alu_out = I32(alu_out);
+ break;
+ }
+ case op_ll:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedW(addr, instr);
+ break;
+ case op_lld:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedD(addr, instr);
+ break;
+ case op_ld:
+ addr = rs + se_imm16;
+ alu_out = readDW(addr, instr);
+ break;
+ case op_ldl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = (1ul << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_ldr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = al_offset ? (~0ul << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out = U64(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U32(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_sc:
+ addr = rs + se_imm16;
+ break;
+ case op_scd:
+ addr = rs + se_imm16;
+ break;
+ case op_sd:
+ addr = rs + se_imm16;
+ break;
+ case op_sdl: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = byte_shift ? (~0ul << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U64(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sdr: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint64_t mask = (1ul << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ case op_lb:
+ case op_lhu:
+ case op_lh:
+ case op_lwu:
+ case op_lw:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_lld:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, I8(rt), instr);
+ break;
+ case op_sh:
+ writeH(addr, U16(rt), instr);
+ break;
+ case op_sw:
+ writeW(addr, I32(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_swr:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_sc:
+ setRegister(rt_reg, storeConditionalW(addr, I32(rt), instr));
+ break;
+ case op_scd:
+ setRegister(rt_reg, storeConditionalD(addr, rt, instr));
+ break;
+ case op_sd:
+ writeDW(addr, rt, instr);
+ break;
+ case op_sdl:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_sdr:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegisterLo(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ writeW(addr, getFpuRegisterLo(ft_reg), instr);
+ break;
+ case op_sdc1:
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ };
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+// Type 3: instructions using a 26 bits immediate. (e.g. j, jal).
+void Simulator::decodeTypeJump(SimInstruction* instr) {
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int64_t pc_high_bits = current_pc & 0xfffffffff0000000ul;
+ // Next pc.
+ int64_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::branchDelayInstructionDecode(SimInstruction* instr) {
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = getRegister(s0);
+ int64_t s1_val = getRegister(s1);
+ int64_t s2_val = getRegister(s2);
+ int64_t s3_val = getRegister(s3);
+ int64_t s4_val = getRegister(s4);
+ int64_t s5_val = getRegister(s5);
+ int64_t s6_val = getRegister(s6);
+ int64_t s7_val = getRegister(s7);
+ int64_t gp_val = getRegister(gp);
+ int64_t sp_val = getRegister(sp);
+ int64_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/mips64/Simulator-mips64.h b/js/src/jit/mips64/Simulator-mips64.h
new file mode 100644
index 0000000000..02b2774f24
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -0,0 +1,536 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips64_Simulator_mips64_h
+#define jit_mips64_Simulator_mips64_h
+
+#ifdef JS_SIMULATOR_MIPS64
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPUInvalidResult64 = static_cast<uint64_t>(1ULL << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class MipsDebugger;
+
+ public:
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterLo(int fpureg, int32_t value);
+ void setFpuRegisterHi(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ int64_t getFpuRegister(int fpureg) const;
+ int32_t getFpuRegisterLo(int fpureg) const;
+ int32_t getFpuRegisterHi(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ template <typename T>
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint8_t readBU(uint64_t addr, SimInstruction* instr);
+ inline int8_t readB(uint64_t addr, SimInstruction* instr);
+ inline void writeB(uint64_t addr, uint8_t value, SimInstruction* instr);
+ inline void writeB(uint64_t addr, int8_t value, SimInstruction* instr);
+
+ inline uint16_t readHU(uint64_t addr, SimInstruction* instr);
+ inline int16_t readH(uint64_t addr, SimInstruction* instr);
+ inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint32_t readWU(uint64_t addr, SimInstruction* instr);
+ inline int32_t readW(uint64_t addr, SimInstruction* instr);
+ inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr);
+ inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+ inline int64_t readDW(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWL(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWR(uint64_t addr, SimInstruction* instr);
+ inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
+
+ inline double readD(uint64_t addr, SimInstruction* instr);
+ inline void writeD(uint64_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr, int64_t& alu_out,
+ __int128& i128hilo, unsigned __int128& u128hilo,
+ int64_t& next_pc, int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS64 */
+
+#endif /* jit_mips64_Simulator_mips64_h */
diff --git a/js/src/jit/mips64/Trampoline-mips64.cpp b/js/src/jit/mips64/Trampoline-mips64.cpp
new file mode 100644
index 0000000000..a85e7b3702
--- /dev/null
+++ b/js/src/jit/mips64/Trampoline-mips64.cpp
@@ -0,0 +1,870 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean.");
+
+struct EnterJITRegs {
+ double f31;
+ double f30;
+ double f29;
+ double f28;
+ double f27;
+ double f26;
+ double f25;
+ double f24;
+
+ uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t fp;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ uint64_t s0;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gslq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gslq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gslq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gslq(s7, fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_gslq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gslq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gslq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gslq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ } else {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_ld(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_ld(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_ld(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_ld(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_ld(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_ld(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_ld(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_ld(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_ldc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_ldc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_ldc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_ldc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
+ }
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ masm.as_gssq(a7, s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gssq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gssq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gssq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gssq(s7, fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ masm.as_gssq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gssq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gssq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gssq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ return;
+ }
+
+ masm.as_sd(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_sd(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_sd(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_sd(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_sd(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_sd(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_sd(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_sd(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_sd(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sdc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sdc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sdc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_sdc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Make stack algined
+ masm.ma_and(s0, reg_argc, Imm32(1));
+ masm.ma_dsubu(s1, StackPointer, Imm32(sizeof(Value)));
+#ifdef MIPSR6
+ masm.as_selnez(s1, s1, s0);
+ masm.as_seleqz(StackPointer, StackPointer, s0);
+ masm.as_or(StackPointer, StackPointer, s1);
+#else
+ masm.as_movn(StackPointer, s1, s0);
+#endif
+
+ masm.as_dsll(s0, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s0); // s0 = &argv[argc]
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s0);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.ma_dsll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.ma_move(R1.scratchReg(), reg_chain);
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.as_ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.as_dsubu(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'][undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.mov(s3, numActArgsReg); // Save %sp.
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.ma_dsll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_daddu(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+/* - When bailout is done via out of line code (lazy bailout).
+ * Frame size is stored in $ra (look at
+ * CodeGeneratorMIPS64::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS64::visitOutOfLineBailout().
+ */
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorMIPS64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Put pointer to BailoutInfo
+ static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t) * 2;
+ masm.subPtr(Imm32(sizeOfBailoutInfo), StackPointer);
+ masm.movePtr(StackPointer, a1);
+
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_daddu(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH("NYI: MIPS64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(sizeof(void*));
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}
diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
new file mode 100644
index 0000000000..e4f140fdfa
--- /dev/null
+++ b/js/src/jit/moz.build
@@ -0,0 +1,295 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+FINAL_LIBRARY = "js"
+
+# Includes should be relative to parent path
+LOCAL_INCLUDES += ["!..", ".."]
+
+include("../js-config.mozbuild")
+include("../js-cxxflags.mozbuild")
+
+UNIFIED_SOURCES += [
+ "AliasAnalysis.cpp",
+ "AlignmentMaskAnalysis.cpp",
+ "BacktrackingAllocator.cpp",
+ "Bailouts.cpp",
+ "BaselineBailouts.cpp",
+ "BaselineCacheIRCompiler.cpp",
+ "BaselineCodeGen.cpp",
+ "BaselineDebugModeOSR.cpp",
+ "BaselineFrame.cpp",
+ "BaselineFrameInfo.cpp",
+ "BaselineIC.cpp",
+ "BaselineJIT.cpp",
+ "BitSet.cpp",
+ "BytecodeAnalysis.cpp",
+ "CacheIR.cpp",
+ "CacheIRCompiler.cpp",
+ "CacheIRHealth.cpp",
+ "CacheIRSpewer.cpp",
+ "CodeGenerator.cpp",
+ "CompileWrappers.cpp",
+ "Disassemble.cpp",
+ "EdgeCaseAnalysis.cpp",
+ "EffectiveAddressAnalysis.cpp",
+ "ExecutableAllocator.cpp",
+ "FlushICache.cpp",
+ "FoldLinearArithConstants.cpp",
+ "InlinableNatives.cpp",
+ "InstructionReordering.cpp",
+ "InterpreterEntryTrampoline.cpp",
+ "Ion.cpp",
+ "IonAnalysis.cpp",
+ "IonCacheIRCompiler.cpp",
+ "IonCompileTask.cpp",
+ "IonIC.cpp",
+ "IonOptimizationLevels.cpp",
+ "Jit.cpp",
+ "JitcodeMap.cpp",
+ "JitContext.cpp",
+ "JitFrames.cpp",
+ "JitOptions.cpp",
+ "JitScript.cpp",
+ "JitSpewer.cpp",
+ "JSJitFrameIter.cpp",
+ "JSONSpewer.cpp",
+ "KnownClass.cpp",
+ "Label.cpp",
+ "LICM.cpp",
+ "Linker.cpp",
+ "LIR.cpp",
+ "Lowering.cpp",
+ "MacroAssembler.cpp",
+ "MIR.cpp",
+ "MIRGraph.cpp",
+ "MoveResolver.cpp",
+ "PerfSpewer.cpp",
+ "ProcessExecutableMemory.cpp",
+ "RangeAnalysis.cpp",
+ "ReciprocalMulConstants.cpp",
+ "Recover.cpp",
+ "RegisterAllocator.cpp",
+ "RematerializedFrame.cpp",
+ "SafepointIndex.cpp",
+ "Safepoints.cpp",
+ "ScalarReplacement.cpp",
+ "shared/Assembler-shared.cpp",
+ "shared/AtomicOperations-shared-jit.cpp",
+ "shared/CodeGenerator-shared.cpp",
+ "shared/Disassembler-shared.cpp",
+ "shared/Lowering-shared.cpp",
+ "ShuffleAnalysis.cpp",
+ "Sink.cpp",
+ "Snapshots.cpp",
+ "Trampoline.cpp",
+ "TrialInlining.cpp",
+ "TypePolicy.cpp",
+ "ValueNumbering.cpp",
+ "VMFunctions.cpp",
+ "WarpBuilder.cpp",
+ "WarpBuilderShared.cpp",
+ "WarpCacheIRTranspiler.cpp",
+ "WarpOracle.cpp",
+ "WarpSnapshot.cpp",
+ "WasmBCE.cpp",
+ "XrayJitInfo.cpp",
+]
+
+if CONFIG["JS_CODEGEN_NONE"]:
+ UNIFIED_SOURCES += ["none/Trampoline-none.cpp"]
+elif CONFIG["JS_CODEGEN_X86"] or CONFIG["JS_CODEGEN_X64"]:
+ UNIFIED_SOURCES += [
+ "x86-shared/Architecture-x86-shared.cpp",
+ "x86-shared/Assembler-x86-shared.cpp",
+ "x86-shared/AssemblerBuffer-x86-shared.cpp",
+ "x86-shared/CodeGenerator-x86-shared.cpp",
+ "x86-shared/Lowering-x86-shared.cpp",
+ "x86-shared/MacroAssembler-x86-shared-SIMD.cpp",
+ "x86-shared/MacroAssembler-x86-shared.cpp",
+ "x86-shared/MoveEmitter-x86-shared.cpp",
+ ]
+ if CONFIG["JS_CODEGEN_X64"]:
+ UNIFIED_SOURCES += [
+ "x64/Assembler-x64.cpp",
+ "x64/CodeGenerator-x64.cpp",
+ "x64/Lowering-x64.cpp",
+ "x64/MacroAssembler-x64.cpp",
+ "x64/Trampoline-x64.cpp",
+ ]
+ else:
+ UNIFIED_SOURCES += [
+ "x86/Assembler-x86.cpp",
+ "x86/CodeGenerator-x86.cpp",
+ "x86/Lowering-x86.cpp",
+ "x86/MacroAssembler-x86.cpp",
+ "x86/Trampoline-x86.cpp",
+ ]
+elif CONFIG["JS_CODEGEN_ARM"]:
+ UNIFIED_SOURCES += [
+ "arm/Architecture-arm.cpp",
+ "arm/Assembler-arm.cpp",
+ "arm/CodeGenerator-arm.cpp",
+ "arm/disasm/Constants-arm.cpp",
+ "arm/disasm/Disasm-arm.cpp",
+ "arm/Lowering-arm.cpp",
+ "arm/MacroAssembler-arm.cpp",
+ "arm/MoveEmitter-arm.cpp",
+ "arm/Trampoline-arm.cpp",
+ ]
+ if CONFIG["JS_SIMULATOR_ARM"]:
+ UNIFIED_SOURCES += ["arm/Simulator-arm.cpp"]
+ elif CONFIG["OS_ARCH"] == "Darwin":
+ SOURCES += [
+ "arm/llvm-compiler-rt/arm/aeabi_idivmod.S",
+ "arm/llvm-compiler-rt/arm/aeabi_uidivmod.S",
+ ]
+elif CONFIG["JS_CODEGEN_ARM64"]:
+ UNIFIED_SOURCES += [
+ "arm64/Architecture-arm64.cpp",
+ "arm64/Assembler-arm64.cpp",
+ "arm64/CodeGenerator-arm64.cpp",
+ "arm64/Lowering-arm64.cpp",
+ "arm64/MacroAssembler-arm64.cpp",
+ "arm64/MoveEmitter-arm64.cpp",
+ "arm64/Trampoline-arm64.cpp",
+ "arm64/vixl/Assembler-vixl.cpp",
+ "arm64/vixl/Cpu-Features-vixl.cpp",
+ "arm64/vixl/Cpu-vixl.cpp",
+ "arm64/vixl/Decoder-vixl.cpp",
+ "arm64/vixl/Instructions-vixl.cpp",
+ "arm64/vixl/MacroAssembler-vixl.cpp",
+ "arm64/vixl/MozAssembler-vixl.cpp",
+ "arm64/vixl/MozCpu-vixl.cpp",
+ "arm64/vixl/MozInstructions-vixl.cpp",
+ "arm64/vixl/Utils-vixl.cpp",
+ ]
+ vixl_werror_sources = [
+ "arm64/vixl/Disasm-vixl.cpp",
+ "arm64/vixl/Instrument-vixl.cpp",
+ ]
+ SOURCES += vixl_werror_sources
+ if CONFIG["CC_TYPE"] == "clang-cl":
+ for f in vixl_werror_sources:
+ SOURCES[f].flags += ["-Wno-c++11-narrowing"]
+ if CONFIG["JS_SIMULATOR_ARM64"]:
+ UNIFIED_SOURCES += [
+ "arm64/vixl/Debugger-vixl.cpp",
+ "arm64/vixl/Logic-vixl.cpp",
+ "arm64/vixl/MozSimulator-vixl.cpp",
+ "arm64/vixl/Simulator-vixl.cpp",
+ ]
+elif CONFIG["JS_CODEGEN_MIPS32"] or CONFIG["JS_CODEGEN_MIPS64"]:
+ UNIFIED_SOURCES += [
+ "mips-shared/Architecture-mips-shared.cpp",
+ "mips-shared/Assembler-mips-shared.cpp",
+ "mips-shared/CodeGenerator-mips-shared.cpp",
+ "mips-shared/Lowering-mips-shared.cpp",
+ "mips-shared/MacroAssembler-mips-shared.cpp",
+ "mips-shared/MoveEmitter-mips-shared.cpp",
+ ]
+ if CONFIG["JS_CODEGEN_MIPS32"]:
+ UNIFIED_SOURCES += [
+ "mips32/Architecture-mips32.cpp",
+ "mips32/Assembler-mips32.cpp",
+ "mips32/CodeGenerator-mips32.cpp",
+ "mips32/Lowering-mips32.cpp",
+ "mips32/MacroAssembler-mips32.cpp",
+ "mips32/MoveEmitter-mips32.cpp",
+ "mips32/Trampoline-mips32.cpp",
+ ]
+ if CONFIG["JS_SIMULATOR_MIPS32"]:
+ UNIFIED_SOURCES += ["mips32/Simulator-mips32.cpp"]
+ elif CONFIG["JS_CODEGEN_MIPS64"]:
+ UNIFIED_SOURCES += [
+ "mips64/Architecture-mips64.cpp",
+ "mips64/Assembler-mips64.cpp",
+ "mips64/CodeGenerator-mips64.cpp",
+ "mips64/Lowering-mips64.cpp",
+ "mips64/MacroAssembler-mips64.cpp",
+ "mips64/MoveEmitter-mips64.cpp",
+ "mips64/Trampoline-mips64.cpp",
+ ]
+ if CONFIG["JS_SIMULATOR_MIPS64"]:
+ UNIFIED_SOURCES += ["mips64/Simulator-mips64.cpp"]
+elif CONFIG["JS_CODEGEN_LOONG64"]:
+ UNIFIED_SOURCES += [
+ "loong64/Architecture-loong64.cpp",
+ "loong64/Assembler-loong64.cpp",
+ "loong64/CodeGenerator-loong64.cpp",
+ "loong64/Lowering-loong64.cpp",
+ "loong64/MacroAssembler-loong64.cpp",
+ "loong64/MoveEmitter-loong64.cpp",
+ "loong64/Trampoline-loong64.cpp",
+ ]
+ if CONFIG["JS_SIMULATOR_LOONG64"]:
+ UNIFIED_SOURCES += ["loong64/Simulator-loong64.cpp"]
+elif CONFIG["JS_CODEGEN_RISCV64"]:
+ UNIFIED_SOURCES += [
+ "riscv64/Architecture-riscv64.cpp",
+ "riscv64/Assembler-riscv64.cpp",
+ "riscv64/AssemblerMatInt.cpp",
+ "riscv64/CodeGenerator-riscv64.cpp",
+ "riscv64/constant/Base-constant-riscv.cpp",
+ "riscv64/disasm/Disasm-riscv64.cpp",
+ "riscv64/extension/base-assembler-riscv.cc",
+ "riscv64/extension/base-riscv-i.cc",
+ "riscv64/extension/extension-riscv-a.cc",
+ "riscv64/extension/extension-riscv-c.cc",
+ "riscv64/extension/extension-riscv-d.cc",
+ "riscv64/extension/extension-riscv-f.cc",
+ "riscv64/extension/extension-riscv-m.cc",
+ "riscv64/extension/extension-riscv-v.cc",
+ "riscv64/extension/extension-riscv-zicsr.cc",
+ "riscv64/extension/extension-riscv-zifencei.cc",
+ "riscv64/Lowering-riscv64.cpp",
+ "riscv64/MacroAssembler-riscv64.cpp",
+ "riscv64/MoveEmitter-riscv64.cpp",
+ "riscv64/Trampoline-riscv64.cpp",
+ ]
+ if CONFIG["JS_SIMULATOR_RISCV64"]:
+ UNIFIED_SOURCES += ["riscv64/Simulator-riscv64.cpp"]
+elif CONFIG["JS_CODEGEN_WASM32"]:
+ UNIFIED_SOURCES += [
+ "wasm32/CodeGenerator-wasm32.cpp",
+ "wasm32/MacroAssembler-wasm32.cpp",
+ "wasm32/Trampoline-wasm32.cpp",
+ ]
+
+# Generate jit/MIROpsGenerated.h from jit/MIROps.yaml
+GeneratedFile(
+ "MIROpsGenerated.h",
+ script="GenerateMIRFiles.py",
+ entry_point="generate_mir_header",
+ inputs=["MIROps.yaml"],
+)
+
+# Generate jit/LIROpsGenerated.h from jit/LIR.h, jit/shared/LIR-shared.h, and
+# platform-specific LIR files.
+GeneratedFile(
+ "LIROpsGenerated.h",
+ script="GenerateLIRFiles.py",
+ entry_point="generate_lir_header",
+ inputs=["LIROps.yaml"],
+)
+
+# Generate jit/CacheIROpsGenerated.h from jit/CacheIROps.yaml
+GeneratedFile(
+ "CacheIROpsGenerated.h",
+ script="GenerateCacheIRFiles.py",
+ entry_point="generate_cacheirops_header",
+ inputs=["CacheIROps.yaml"],
+)
+
+GeneratedFile(
+ "AtomicOperationsGenerated.h",
+ script="GenerateAtomicOperations.py",
+ entry_point="generate_atomics_header",
+ inputs=[],
+)
+
+if CONFIG["FUZZING_INTERFACES"] or CONFIG["FUZZING_JS_FUZZILLI"]:
+ include("/tools/fuzzing/libfuzzer-config.mozbuild")
diff --git a/js/src/jit/none/Architecture-none.h b/js/src/jit/none/Architecture-none.h
new file mode 100644
index 0000000000..2433234fbf
--- /dev/null
+++ b/js/src/jit/none/Architecture-none.h
@@ -0,0 +1,171 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_Architecture_none_h
+#define jit_none_Architecture_none_h
+
+// JitSpewer.h is included through MacroAssembler implementations for other
+// platforms, so include it here to avoid inadvertent build bustage.
+#include "jit/JitSpewer.h"
+
+#include "jit/shared/Architecture-shared.h"
+
+namespace js {
+namespace jit {
+
+static const uint32_t SimdMemoryAlignment =
+ 4; // Make it 4 to avoid a bunch of div-by-zero warnings
+static const uint32_t WasmStackAlignment = 8;
+static const uint32_t WasmTrapInstructionLength = 0;
+
+// See comments in wasm::GenerateFunctionPrologue.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ invalid_reg,
+ invalid_reg2, // To avoid silly static_assert failures.
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ typedef uint8_t SetType;
+
+ static uint32_t SetSize(SetType) { MOZ_CRASH(); }
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Encoding StackPointer = invalid_reg;
+ static const Encoding Invalid = invalid_reg;
+ static const uint32_t Total = 1;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType ArgRegMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+ static const SetType JSCallMask = 0;
+ static const SetType CallMask = 0;
+};
+
+typedef uint8_t PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID { f0 = 0, invalid_reg };
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ typedef uint32_t SetType;
+
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Code Invalid = invalid_reg;
+ static const uint32_t Total = 0;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType AllDoubleMask = 0;
+ static const SetType AllSingleMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ Code _;
+
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static FloatRegister FromCode(uint32_t) { MOZ_CRASH(); }
+ bool isSingle() const { MOZ_CRASH(); }
+ bool isDouble() const { MOZ_CRASH(); }
+ bool isSimd128() const { MOZ_CRASH(); }
+ bool isInvalid() const { MOZ_CRASH(); }
+ FloatRegister asSingle() const { MOZ_CRASH(); }
+ FloatRegister asDouble() const { MOZ_CRASH(); }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+ Code code() const { MOZ_CRASH(); }
+ Encoding encoding() const { MOZ_CRASH(); }
+ const char* name() const { MOZ_CRASH(); }
+ bool volatile_() const { MOZ_CRASH(); }
+ bool operator!=(FloatRegister) const { MOZ_CRASH(); }
+ bool operator==(FloatRegister) const { MOZ_CRASH(); }
+ bool aliases(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t numAliased() const { MOZ_CRASH(); }
+ FloatRegister aliased(uint32_t) { MOZ_CRASH(); }
+ bool equiv(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t size() const { MOZ_CRASH(); }
+ uint32_t numAlignedAliased() const { MOZ_CRASH(); }
+ FloatRegister alignedAliased(uint32_t) { MOZ_CRASH(); }
+ SetType alignedOrDominatedAliasedSet() const { MOZ_CRASH(); }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return SetType(0);
+ }
+
+ template <typename T>
+ static T ReduceSetForPush(T) {
+ MOZ_CRASH();
+ }
+ uint32_t getRegisterDumpOffsetInBytes() { MOZ_CRASH(); }
+ static uint32_t SetSize(SetType x) { MOZ_CRASH(); }
+ static Code FromName(const char* name) { MOZ_CRASH(); }
+
+ // This is used in static initializers, so produce a bogus value instead of
+ // crashing.
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>&) {
+ return 0;
+ }
+};
+
+inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
+inline bool hasMultiAlias() { MOZ_CRASH(); }
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+#ifdef JS_NUNBOX32
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_Architecture_none_h */
diff --git a/js/src/jit/none/Assembler-none.h b/js/src/jit/none/Assembler-none.h
new file mode 100644
index 0000000000..b69fc462e2
--- /dev/null
+++ b/js/src/jit/none/Assembler-none.h
@@ -0,0 +1,211 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_Assembler_none_h
+#define jit_none_Assembler_none_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/none/Architecture-none.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+
+static constexpr Register StackPointer{Registers::invalid_reg};
+static constexpr Register FramePointer{Registers::invalid_reg};
+static constexpr Register ReturnReg{Registers::invalid_reg2};
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ReturnSimd128Reg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ScratchSimd128Reg = {
+ FloatRegisters::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg = {FloatRegisters::invalid_reg};
+
+struct ScratchFloat32Scope : FloatRegister {
+ explicit ScratchFloat32Scope(MacroAssembler& masm) {}
+};
+
+struct ScratchDoubleScope : FloatRegister {
+ explicit ScratchDoubleScope(MacroAssembler& masm) {}
+};
+
+static constexpr Register OsrFrameReg{Registers::invalid_reg};
+static constexpr Register PreBarrierReg{Registers::invalid_reg};
+static constexpr Register InterpreterPCReg{Registers::invalid_reg};
+static constexpr Register CallTempReg0{Registers::invalid_reg};
+static constexpr Register CallTempReg1{Registers::invalid_reg};
+static constexpr Register CallTempReg2{Registers::invalid_reg};
+static constexpr Register CallTempReg3{Registers::invalid_reg};
+static constexpr Register CallTempReg4{Registers::invalid_reg};
+static constexpr Register CallTempReg5{Registers::invalid_reg};
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr Register CallTempNonArgRegs[] = {InvalidReg, InvalidReg};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0{Registers::invalid_reg};
+static constexpr Register IntArgReg1{Registers::invalid_reg};
+static constexpr Register IntArgReg2{Registers::invalid_reg};
+static constexpr Register IntArgReg3{Registers::invalid_reg};
+static constexpr Register HeapReg{Registers::invalid_reg};
+
+static constexpr Register RegExpMatcherRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpMatcherStringReg{Registers::invalid_reg};
+static constexpr Register RegExpMatcherLastIndexReg{Registers::invalid_reg};
+
+static constexpr Register RegExpExecTestRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpExecTestStringReg{Registers::invalid_reg};
+
+static constexpr Register RegExpSearcherRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpSearcherStringReg{Registers::invalid_reg};
+static constexpr Register RegExpSearcherLastIndexReg{Registers::invalid_reg};
+
+// Uses |invalid_reg2| to avoid static_assert failures.
+static constexpr Register JSReturnReg_Type{Registers::invalid_reg2};
+static constexpr Register JSReturnReg_Data{Registers::invalid_reg2};
+static constexpr Register JSReturnReg{Registers::invalid_reg2};
+
+#if defined(JS_NUNBOX32)
+static constexpr ValueOperand JSReturnOperand(InvalidReg, InvalidReg);
+static constexpr Register64 ReturnReg64(InvalidReg, InvalidReg);
+#elif defined(JS_PUNBOX64)
+static constexpr ValueOperand JSReturnOperand(InvalidReg);
+static constexpr Register64 ReturnReg64(InvalidReg);
+#else
+# error "Bad architecture"
+#endif
+
+static constexpr Register ABINonArgReg0{Registers::invalid_reg};
+static constexpr Register ABINonArgReg1{Registers::invalid_reg};
+static constexpr Register ABINonArgReg2{Registers::invalid_reg};
+static constexpr Register ABINonArgReg3{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnReg0{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnReg1{Registers::invalid_reg};
+static constexpr Register ABINonVolatileReg{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnVolatileReg{Registers::invalid_reg};
+
+static constexpr FloatRegister ABINonArgDoubleReg = {
+ FloatRegisters::invalid_reg};
+
+static constexpr Register WasmTableCallScratchReg0{Registers::invalid_reg};
+static constexpr Register WasmTableCallScratchReg1{Registers::invalid_reg};
+static constexpr Register WasmTableCallSigReg{Registers::invalid_reg};
+static constexpr Register WasmTableCallIndexReg{Registers::invalid_reg};
+static constexpr Register InstanceReg{Registers::invalid_reg};
+static constexpr Register WasmJitEntryReturnScratch{Registers::invalid_reg};
+static constexpr Register WasmCallRefCallScratchReg0{Registers::invalid_reg};
+static constexpr Register WasmCallRefCallScratchReg1{Registers::invalid_reg};
+static constexpr Register WasmCallRefReg{Registers::invalid_reg};
+
+static constexpr uint32_t ABIStackAlignment = 4;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 8;
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+
+static const Scale ScalePointer = TimesOne;
+
+class Assembler : public AssemblerShared {
+ public:
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static Condition InvertCondition(Condition) { MOZ_CRASH(); }
+
+ static DoubleCondition InvertCondition(DoubleCondition) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ static void PatchDataWithValueCheck(CodeLocationLabel, T, S) {
+ MOZ_CRASH();
+ }
+ static void PatchWrite_Imm32(CodeLocationLabel, Imm32) { MOZ_CRASH(); }
+
+ static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) {
+ MOZ_CRASH();
+ }
+ static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
+
+ static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
+
+ static void Bind(uint8_t*, const CodeLabel&) { MOZ_CRASH(); }
+
+ static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
+
+ static bool HasRoundInstruction(RoundingMode) { return false; }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ MOZ_CRASH();
+ }
+
+ void setUnlimitedBuffer() { MOZ_CRASH(); }
+};
+
+class Operand {
+ public:
+ explicit Operand(const Address&) { MOZ_CRASH(); }
+ explicit Operand(const Register) { MOZ_CRASH(); }
+ explicit Operand(const FloatRegister) { MOZ_CRASH(); }
+ explicit Operand(Register, Imm32) { MOZ_CRASH(); }
+ explicit Operand(Register, int32_t) { MOZ_CRASH(); }
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator() { MOZ_CRASH(); }
+ ABIArg next(MIRType) { MOZ_CRASH(); }
+ ABIArg& current() { MOZ_CRASH(); }
+ uint32_t stackBytesConsumedSoFar() const { MOZ_CRASH(); }
+ void increaseStackOffset(uint32_t) { MOZ_CRASH(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_Assembler_none_h */
diff --git a/js/src/jit/none/CodeGenerator-none.h b/js/src/jit/none/CodeGenerator-none.h
new file mode 100644
index 0000000000..6efd71555e
--- /dev/null
+++ b/js/src/jit/none/CodeGenerator-none.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_CodeGenerator_none_h
+#define jit_none_CodeGenerator_none_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorNone : public CodeGeneratorShared {
+ protected:
+ CodeGeneratorNone(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {
+ MOZ_CRASH();
+ }
+
+ MoveOperand toMoveOperand(LAllocation) const { MOZ_CRASH(); }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ void bailoutTestPtr(Assembler::Condition, Register, Register, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ void bailoutIfFalseBool(Register, LSnapshot*) { MOZ_CRASH(); }
+ void bailoutFrom(Label*, LSnapshot*) { MOZ_CRASH(); }
+ void bailout(LSnapshot*) { MOZ_CRASH(); }
+ void bailoutIf(Assembler::Condition, LSnapshot*) { MOZ_CRASH(); }
+ bool generateOutOfLineCode() { MOZ_CRASH(); }
+ void testNullEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testUndefinedEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testObjectEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testZeroEmitBranch(Assembler::Condition, Register, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void emitTableSwitchDispatch(MTableSwitch*, Register, Register) {
+ MOZ_CRASH();
+ }
+ void emitBigIntDiv(LBigIntDiv*, Register, Register, Register, Label*) {
+ MOZ_CRASH();
+ }
+ void emitBigIntMod(LBigIntMod*, Register, Register, Register, Label*) {
+ MOZ_CRASH();
+ }
+ ValueOperand ToValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ ValueOperand ToTempValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ void generateInvalidateEpilogue() { MOZ_CRASH(); }
+};
+
+typedef CodeGeneratorNone CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_CodeGenerator_none_h */
diff --git a/js/src/jit/none/LIR-none.h b/js/src/jit/none/LIR-none.h
new file mode 100644
index 0000000000..c04578630e
--- /dev/null
+++ b/js/src/jit/none/LIR-none.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_LIR_none_h
+#define jit_none_LIR_none_h
+
+namespace js {
+namespace jit {
+
+class LUnboxFloatingPoint : public LInstruction {
+ public:
+ LIR_HEADER(UnboxFloatingPoint)
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { MOZ_CRASH(); }
+
+ const LDefinition* output() const { MOZ_CRASH(); }
+ MIRType type() const { MOZ_CRASH(); }
+};
+
+class LTableSwitch : public LInstruction {
+ public:
+ LIR_HEADER(TableSwitch)
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LAllocation* index() { MOZ_CRASH(); }
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+};
+
+class LTableSwitchV : public LInstruction {
+ public:
+ LIR_HEADER(TableSwitchV)
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempFloat() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+
+ static const size_t InputValue = 0;
+};
+
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ explicit LWasmUint32ToFloat32(const LAllocation&)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ MUnbox* mir() const { MOZ_CRASH(); }
+ const LAllocation* payload() { MOZ_CRASH(); }
+ const LAllocation* type() { MOZ_CRASH(); }
+ const char* extraName() const { MOZ_CRASH(); }
+};
+class LDivI : public LBinaryMath<1> {
+ public:
+ LDivI(const LAllocation&, const LAllocation&, const LDefinition&)
+ : LBinaryMath(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LDivPowTwoI(const LAllocation&, int32_t)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ const LAllocation* numerator() { MOZ_CRASH(); }
+ int32_t shift() { MOZ_CRASH(); }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LModI : public LBinaryMath<1> {
+ public:
+ LModI(const LAllocation&, const LAllocation&, const LDefinition&)
+ : LBinaryMath(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+
+ const LDefinition* callTemp() { MOZ_CRASH(); }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ explicit LWasmUint32ToDouble(const LAllocation&)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+};
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ public:
+ int32_t shift() { MOZ_CRASH(); }
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+
+class LMulI : public LInstruction {};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_LIR_none_h */
diff --git a/js/src/jit/none/Lowering-none.h b/js/src/jit/none/Lowering-none.h
new file mode 100644
index 0000000000..ad804b970b
--- /dev/null
+++ b/js/src/jit/none/Lowering-none.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_Lowering_none_h
+#define jit_none_Lowering_none_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorNone : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorNone(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {
+ MOZ_CRASH();
+ }
+
+ LBoxAllocation useBoxFixed(MDefinition*, Register, Register,
+ bool useAtStart = false) {
+ MOZ_CRASH();
+ }
+
+ LAllocation useByteOpRegister(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterAtStart(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition*) {
+ MOZ_CRASH();
+ }
+ LDefinition tempByteOpRegister() { MOZ_CRASH(); }
+ LDefinition tempToUnbox() { MOZ_CRASH(); }
+ bool needTempForPostBarrier() { MOZ_CRASH(); }
+ void lowerUntypedPhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void defineInt64Phi(MPhi*, size_t) { MOZ_CRASH(); }
+ void lowerForShift(LInstructionHelper<1, 2, 0>*, MDefinition*, MDefinition*,
+ MDefinition*) {
+ MOZ_CRASH();
+ }
+ void lowerUrshD(MUrsh*) { MOZ_CRASH(); }
+ void lowerPowOfTwoI(MPow*) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForALU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForFPU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForALUInt64(T, MDefinition*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ void lowerForMulInt64(LMulI64*, MMul*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForShiftInt64(T, MDefinition*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ void lowerForBitAndAndBranch(LBitAndAndBranch*, MInstruction*, MDefinition*,
+ MDefinition*) {
+ MOZ_CRASH();
+ }
+ void lowerForCompareI64AndBranch(MTest*, MCompare*, JSOp, MDefinition*,
+ MDefinition*, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+
+ void lowerConstantDouble(double, MInstruction*) { MOZ_CRASH(); }
+ void lowerConstantFloat32(float, MInstruction*) { MOZ_CRASH(); }
+ void lowerTruncateDToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerTruncateFToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH();
+ }
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH();
+ }
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins) {
+ MOZ_CRASH();
+ }
+ void lowerDivI(MDiv*) { MOZ_CRASH(); }
+ void lowerModI(MMod*) { MOZ_CRASH(); }
+ void lowerDivI64(MDiv*) { MOZ_CRASH(); }
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) { MOZ_CRASH(); }
+ void lowerModI64(MMod*) { MOZ_CRASH(); }
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) { MOZ_CRASH(); }
+ void lowerNegI(MInstruction*, MDefinition*) { MOZ_CRASH(); }
+ void lowerNegI64(MInstruction*, MDefinition*) { MOZ_CRASH(); }
+ void lowerMulI(MMul*, MDefinition*, MDefinition*) { MOZ_CRASH(); }
+ void lowerUDiv(MDiv*) { MOZ_CRASH(); }
+ void lowerUMod(MMod*) { MOZ_CRASH(); }
+ void lowerWasmSelectI(MWasmSelect* select) { MOZ_CRASH(); }
+ void lowerWasmSelectI64(MWasmSelect* select) { MOZ_CRASH(); }
+ void lowerWasmCompareAndSelect(MWasmSelect* ins, MDefinition* lhs,
+ MDefinition* rhs, MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_CRASH();
+ }
+ bool canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,
+ MIRType insTy) {
+ MOZ_CRASH();
+ }
+
+ void lowerBigIntLsh(MBigIntLsh*) { MOZ_CRASH(); }
+ void lowerBigIntRsh(MBigIntRsh*) { MOZ_CRASH(); }
+ void lowerBigIntDiv(MBigIntDiv*) { MOZ_CRASH(); }
+ void lowerBigIntMod(MBigIntMod*) { MOZ_CRASH(); }
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar*) { MOZ_CRASH(); }
+ void lowerAtomicStore64(MStoreUnboxedScalar*) { MOZ_CRASH(); }
+
+ LTableSwitch* newLTableSwitch(LAllocation, LDefinition, MTableSwitch*) {
+ MOZ_CRASH();
+ }
+ LTableSwitchV* newLTableSwitchV(MTableSwitch*) { MOZ_CRASH(); }
+};
+
+typedef LIRGeneratorNone LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_Lowering_none_h */
diff --git a/js/src/jit/none/MacroAssembler-none.h b/js/src/jit/none/MacroAssembler-none.h
new file mode 100644
index 0000000000..2a89c8836f
--- /dev/null
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -0,0 +1,454 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_MacroAssembler_none_h
+#define jit_none_MacroAssembler_none_h
+
+#include <iterator>
+
+#include "jit/MoveResolver.h"
+#include "jit/none/Assembler-none.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+class CompactBufferReader;
+
+class ScratchTagScope {
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand) {}
+ operator Register() { MOZ_CRASH(); }
+ void release() { MOZ_CRASH(); }
+ void reacquire() { MOZ_CRASH(); }
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerNone : public Assembler {
+ public:
+ MacroAssemblerNone() { MOZ_CRASH(); }
+
+ MoveResolver moveResolver_;
+
+ size_t size() const { MOZ_CRASH(); }
+ size_t bytesNeeded() const { MOZ_CRASH(); }
+ size_t jumpRelocationTableBytes() const { MOZ_CRASH(); }
+ size_t dataRelocationTableBytes() const { MOZ_CRASH(); }
+ size_t preBarrierTableBytes() const { MOZ_CRASH(); }
+
+ size_t numCodeLabels() const { MOZ_CRASH(); }
+ CodeLabel codeLabel(size_t) { MOZ_CRASH(); }
+
+ bool reserve(size_t size) { MOZ_CRASH(); }
+ bool appendRawCode(const uint8_t* code, size_t numBytes) { MOZ_CRASH(); }
+ bool swapBuffer(wasm::Bytes& bytes) { MOZ_CRASH(); }
+
+ void assertNoGCThings() const { MOZ_CRASH(); }
+
+ static void TraceJumpRelocations(JSTracer*, JitCode*, CompactBufferReader&) {
+ MOZ_CRASH();
+ }
+ static void TraceDataRelocations(JSTracer*, JitCode*, CompactBufferReader&) {
+ MOZ_CRASH();
+ }
+
+ static bool SupportsFloatingPoint() { return false; }
+ static bool SupportsUnalignedAccesses() { return false; }
+ static bool SupportsFastUnalignedFPAccesses() { return false; }
+
+ void executableCopy(void*, bool = true) { MOZ_CRASH(); }
+ void copyJumpRelocationTable(uint8_t*) { MOZ_CRASH(); }
+ void copyDataRelocationTable(uint8_t*) { MOZ_CRASH(); }
+ void copyPreBarrierTable(uint8_t*) { MOZ_CRASH(); }
+ void processCodeLabels(uint8_t*) { MOZ_CRASH(); }
+
+ void flushBuffer() { MOZ_CRASH(); }
+
+ template <typename T>
+ void bind(T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void j(Condition, T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void jump(T) {
+ MOZ_CRASH();
+ }
+ void writeCodePointer(CodeLabel* label) { MOZ_CRASH(); }
+ void haltingAlign(size_t) { MOZ_CRASH(); }
+ void nopAlign(size_t) { MOZ_CRASH(); }
+ void checkStackAlignment() { MOZ_CRASH(); }
+ uint32_t currentOffset() { MOZ_CRASH(); }
+
+ void nop() { MOZ_CRASH(); }
+ void breakpoint() { MOZ_CRASH(); }
+ void abiret() { MOZ_CRASH(); }
+ void ret() { MOZ_CRASH(); }
+
+ CodeOffset toggledJump(Label*) { MOZ_CRASH(); }
+ CodeOffset toggledCall(JitCode*, bool) { MOZ_CRASH(); }
+ static size_t ToggledCallSize(uint8_t*) { MOZ_CRASH(); }
+
+ void finish() { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ void moveValue(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S, typename U>
+ void moveValue(T, S, U) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void storeValue(const T&, const S&) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S, typename U>
+ void storeValue(T, S, U) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void storePrivateValue(const T&, const S&) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void loadValue(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void loadUnalignedValue(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void pushValue(const T&) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void pushValue(T, S) {
+ MOZ_CRASH();
+ }
+ void popValue(ValueOperand) { MOZ_CRASH(); }
+ void tagValue(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+ void retn(Imm32 n) { MOZ_CRASH(); }
+ template <typename T>
+ void push(const T&) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void Push(T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void pop(T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void Pop(T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ CodeOffset pushWithPatch(T) {
+ MOZ_CRASH();
+ }
+
+ void testNullSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testObjectSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testUndefinedSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ void cmpPtrSet(Condition, T, S, Register) {
+ MOZ_CRASH();
+ }
+ void cmp8Set(Condition, Address, Imm32, Register) { MOZ_CRASH(); }
+ void cmp16Set(Condition, Address, Imm32, Register) { MOZ_CRASH(); }
+ template <typename T, typename S>
+ void cmp32Set(Condition, T, S, Register) {
+ MOZ_CRASH();
+ }
+ void cmp64Set(Condition, Address, Imm64, Register) { MOZ_CRASH(); }
+
+ template <typename T>
+ void mov(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void movePtr(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void move32(const T&, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void movq(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void moveFloat32(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void moveDouble(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void move64(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ CodeOffset movWithPatch(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void loadPtr(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load32(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load32Unaligned(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void loadFloat32(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void loadDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void loadPrivate(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load8SignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load8ZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load16SignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load16UnalignedSignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load16ZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load16UnalignedZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load64(T, Register64) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void load64Unaligned(T, Register64) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storePtr(const T&, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store32(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store32Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void storeFloat32(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void storeDouble(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store8(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store16(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store16Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store64(T, S) {
+ MOZ_CRASH();
+ }
+ template <typename T, typename S>
+ void store64Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void computeEffectiveAddress(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void splitTagForTest(ValueOperand, ScratchTagScope&) { MOZ_CRASH(); }
+
+ void boxDouble(FloatRegister, ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+ template <typename T>
+ void boxDouble(FloatRegister src, const T& dest) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxInt32(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxBoolean(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxString(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxSymbol(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxBigInt(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxObject(T, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+ void unboxValue(const ValueOperand&, AnyRegister, JSValueType) {
+ MOZ_CRASH();
+ }
+ void unboxNonDouble(const ValueOperand&, Register, JSValueType) {
+ MOZ_CRASH();
+ }
+ void unboxNonDouble(const Address&, Register, JSValueType) { MOZ_CRASH(); }
+ template <typename T>
+ void unboxGCThingForGCBarrier(const T&, Register) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ MOZ_CRASH();
+ }
+ void notBoolean(ValueOperand) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractObject(Address, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractObject(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractSymbol(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractInt32(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractBoolean(ValueOperand, Register) { MOZ_CRASH(); }
+ template <typename T>
+ [[nodiscard]] Register extractTag(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void convertFloat32ToInt32(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertDoubleToInt32(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertDoubleToPtr(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertBoolToInt32(Register, Register) { MOZ_CRASH(); }
+
+ void convertDoubleToFloat32(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+ void convertInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+
+ template <typename T>
+ void convertInt32ToDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+ void convertFloat32ToDouble(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+
+ void boolValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void boolValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+
+ void loadConstantDouble(double, FloatRegister) { MOZ_CRASH(); }
+ void loadConstantFloat32(float, FloatRegister) { MOZ_CRASH(); }
+ Condition testInt32Truthy(bool, ValueOperand) { MOZ_CRASH(); }
+ Condition testStringTruthy(bool, ValueOperand) { MOZ_CRASH(); }
+ Condition testBigIntTruthy(bool, ValueOperand) { MOZ_CRASH(); }
+
+ template <typename T>
+ void loadUnboxedValue(T, MIRType, AnyRegister) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void storeUnboxedValue(const ConstantOrRegister&, MIRType, T) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T, size_t, JSValueType) {
+ MOZ_CRASH();
+ }
+
+ void convertUInt32ToDouble(Register, FloatRegister) { MOZ_CRASH(); }
+ void convertUInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+ void incrementInt32Value(Address) { MOZ_CRASH(); }
+ void ensureDouble(ValueOperand, FloatRegister, Label*) { MOZ_CRASH(); }
+ void handleFailureWithHandlerTail(Label*, Label*) { MOZ_CRASH(); }
+
+ void buildFakeExitFrame(Register, uint32_t*) { MOZ_CRASH(); }
+ bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
+
+ void setPrinter(Sprinter*) { MOZ_CRASH(); }
+ Operand ToPayload(Operand base) { MOZ_CRASH(); }
+ Address ToPayload(Address) { MOZ_CRASH(); }
+
+ Register getStackPointer() const { MOZ_CRASH(); }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register, Register) { MOZ_CRASH(); }
+ void profilerExitFrame() { MOZ_CRASH(); }
+
+#ifdef JS_NUNBOX32
+ Address ToType(Address) { MOZ_CRASH(); }
+#endif
+};
+
+typedef MacroAssemblerNone MacroAssemblerSpecific;
+
+static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) {
+ MOZ_CRASH();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_MacroAssembler_none_h */
diff --git a/js/src/jit/none/MoveEmitter-none.h b/js/src/jit/none/MoveEmitter-none.h
new file mode 100644
index 0000000000..39e60bfaee
--- /dev/null
+++ b/js/src/jit/none/MoveEmitter-none.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_MoveEmitter_none_h
+#define jit_none_MoveEmitter_none_h
+
+#include "mozilla/Assertions.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssemblerNone;
+class MoveResolver;
+struct Register;
+
+class MoveEmitterNone {
+ public:
+ explicit MoveEmitterNone(MacroAssemblerNone&) { MOZ_CRASH(); }
+ void emit(const MoveResolver&) { MOZ_CRASH(); }
+ void finish() { MOZ_CRASH(); }
+ void setScratchRegister(Register) { MOZ_CRASH(); }
+};
+
+typedef MoveEmitterNone MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_MoveEmitter_none_h */
diff --git a/js/src/jit/none/SharedICHelpers-none-inl.h b/js/src/jit/none/SharedICHelpers-none-inl.h
new file mode 100644
index 0000000000..2d63956ba7
--- /dev/null
+++ b/js/src/jit/none/SharedICHelpers-none-inl.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_SharedICHelpers_none_inl_h
+#define jit_none_SharedICHelpers_none_inl_h
+
+#include "jit/SharedICHelpers.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr, MacroAssembler&, uint32_t) {
+ MOZ_CRASH();
+}
+inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler&, Register,
+ uint32_t) {
+ MOZ_CRASH();
+}
+inline void EmitBaselineCallVM(TrampolinePtr, MacroAssembler&) { MOZ_CRASH(); }
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler&, Register) {
+ MOZ_CRASH();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_SharedICHelpers_none_inl_h */
diff --git a/js/src/jit/none/SharedICHelpers-none.h b/js/src/jit/none/SharedICHelpers-none.h
new file mode 100644
index 0000000000..8c2b4ee396
--- /dev/null
+++ b/js/src/jit/none/SharedICHelpers-none.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_SharedICHelpers_none_h
+#define jit_none_SharedICHelpers_none_h
+
+namespace js {
+namespace jit {
+
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitRepushTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitCallIC(MacroAssembler&, CodeOffset*) { MOZ_CRASH(); }
+inline void EmitReturnFromIC(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitBaselineLeaveStubFrame(MacroAssembler&, bool v = false) {
+ MOZ_CRASH();
+}
+inline void EmitStubGuardFailure(MacroAssembler&) { MOZ_CRASH(); }
+
+template <typename T>
+inline void EmitPreBarrier(MacroAssembler&, T, MIRType) {
+ MOZ_CRASH();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_SharedICHelpers_none_h */
diff --git a/js/src/jit/none/SharedICRegisters-none.h b/js/src/jit/none/SharedICRegisters-none.h
new file mode 100644
index 0000000000..170e5058a9
--- /dev/null
+++ b/js/src/jit/none/SharedICRegisters-none.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_none_SharedICRegisters_none_h
+#define jit_none_SharedICRegisters_none_h
+
+#include "jit/none/MacroAssembler-none.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+static constexpr ValueOperand R0 = JSReturnOperand;
+static constexpr ValueOperand R1 = JSReturnOperand;
+static constexpr ValueOperand R2 = JSReturnOperand;
+
+static constexpr Register ICTailCallReg{Registers::invalid_reg};
+static constexpr Register ICStubReg{Registers::invalid_reg};
+
+static constexpr FloatRegister FloatReg0 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg1 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg2 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg3 = {FloatRegisters::invalid_reg};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_none_SharedICRegisters_none_h */
diff --git a/js/src/jit/none/Trampoline-none.cpp b/js/src/jit/none/Trampoline-none.cpp
new file mode 100644
index 0000000000..d2ee3a9a05
--- /dev/null
+++ b/js/src/jit/none/Trampoline-none.cpp
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineIC.h"
+#include "jit/JitRuntime.h"
+#include "vm/Realm.h"
+
+using namespace js;
+using namespace js::jit;
+
+// This file includes stubs for generating the JIT trampolines when there is no
+// JIT backend, and also includes implementations for assorted random things
+// which can't be implemented in headers.
+
+void JitRuntime::generateEnterJIT(JSContext*, MacroAssembler&) { MOZ_CRASH(); }
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ return mozilla::Nothing{};
+}
+void JitRuntime::generateInvalidator(MacroAssembler&, Label*) { MOZ_CRASH(); }
+void JitRuntime::generateArgumentsRectifier(MacroAssembler&,
+ ArgumentsRectifierKind kind) {
+ MOZ_CRASH();
+}
+void JitRuntime::generateBailoutHandler(MacroAssembler&, Label*) {
+ MOZ_CRASH();
+}
+uint32_t JitRuntime::generatePreBarrier(JSContext*, MacroAssembler&, MIRType) {
+ MOZ_CRASH();
+}
+void JitRuntime::generateBailoutTailStub(MacroAssembler&, Label*) {
+ MOZ_CRASH();
+}
+
+bool JitRuntime::generateVMWrapper(JSContext*, MacroAssembler&,
+ const VMFunctionData&, DynFn, uint32_t*) {
+ MOZ_CRASH();
+}
diff --git a/js/src/jit/riscv64/Architecture-riscv64.cpp b/js/src/jit/riscv64/Architecture-riscv64.cpp
new file mode 100644
index 0000000000..ea4a364b92
--- /dev/null
+++ b/js/src/jit/riscv64/Architecture-riscv64.cpp
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/Architecture-riscv64.h"
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/RegisterSets.h"
+#include "jit/Simulator.h"
+namespace js {
+namespace jit {
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Codes::Double) {
+ return FloatRegister(encoding_, Codes::Single);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Codes::Double) {
+ return FloatRegister(encoding_, Codes::Double);
+ }
+ return *this;
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(
+ const TypedRegisterSet<FloatRegister>& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return s.size() * sizeof(double);
+}
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(__linux__)
+# if defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+# else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+# endif
+#else
+# error "Unsupported platform"
+#endif
+}
+
+bool CPUFlagsHaveBeenComputed() {
+ // TODO Add CPU flags support
+ // Flags were computed above.
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/Architecture-riscv64.h b/js/src/jit/riscv64/Architecture-riscv64.h
new file mode 100644
index 0000000000..e53273f2e2
--- /dev/null
+++ b/js/src/jit/riscv64/Architecture-riscv64.h
@@ -0,0 +1,513 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Architecture_riscv64_h
+#define jit_riscv64_Architecture_riscv64_h
+
+// JitSpewer.h is included through MacroAssembler implementations for other
+// platforms, so include it here to avoid inadvertent build bustage.
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/Architecture-shared.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+static const uint32_t SimdMemoryAlignment =
+ 16; // Make it 4 to avoid a bunch of div-by-zero warnings
+
+// RISCV64 has 32 64-bit integer registers, x0 though x31.
+// The program counter is not accessible as a register.
+
+// RISCV INT Register Convention:
+// Name Alias Usage
+// x0 zero hardwired to 0, ignores writes
+// x1 ra return address for calls
+// x2 sp stack pointer
+// x3 gp global pointer
+// x4 tp thread pointer
+// x5-x7 t0-t2 temporary register 0
+// x8 fp/s0 Callee-saved register 0 or frame pointer
+// x9 s1 Callee-saved register 1
+// x10-x11 a0-a1 return value or function argument
+// x12-x17 a2-a7 function argument 2
+// x18-x27 s2-s11 Callee-saved register
+// x28-x31 t3-t6 temporary register 3
+
+// RISCV-64 FP Register Convention:
+// Name Alias Usage
+// $f0-$f7 $ft0-$ft7 Temporary registers
+// $f8-$f9 $fs0-$fs1 Callee-saved registers
+// $f10-$f11 $fa0-$fa1 Return values
+// $f12-$f17 $fa2-$fa7 Args values
+// $f18-$f27 $fs2-$fs11 Callee-saved registers
+// $f28-$f31 $ft8-$ft11 Temporary registers
+class Registers {
+ public:
+ enum RegisterID {
+ x0 = 0,
+ x1,
+ x2,
+ x3,
+ x4,
+ x5,
+ x6,
+ x7,
+ x8,
+ x9,
+ x10,
+ x11,
+ x12,
+ x13,
+ x14,
+ x15,
+ x16,
+ x17,
+ x18,
+ x19,
+ x20,
+ x21,
+ x22,
+ x23,
+ x24,
+ x25,
+ x26,
+ x27,
+ x28,
+ x29,
+ x30,
+ x31,
+ zero = x0,
+ ra = x1,
+ sp = x2,
+ gp = x3,
+ tp = x4,
+ t0 = x5,
+ t1 = x6,
+ t2 = x7,
+ fp = x8,
+ s1 = x9,
+ a0 = x10,
+ a1 = x11,
+ a2 = x12,
+ a3 = x13,
+ a4 = x14,
+ a5 = x15,
+ a6 = x16,
+ a7 = x17,
+ s2 = x18,
+ s3 = x19,
+ s4 = x20,
+ s5 = x21,
+ s6 = x22,
+ s7 = x23,
+ s8 = x24,
+ s9 = x25,
+ s10 = x26,
+ s11 = x27,
+ t3 = x28,
+ t4 = x29,
+ t5 = x30,
+ t6 = x31,
+ invalid_reg,
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ typedef uint32_t SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "s1", "a0",
+ "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5",
+ "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6"};
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char*);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable = 24;
+ static const SetType NoneMask = 0x0;
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType ArgRegMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7);
+
+ static const SetType VolatileMask =
+ ArgRegMask | (1 << Registers::t0) | (1 << Registers::t1) |
+ (1 << Registers::t2) | (1 << Registers::t3) | (1 << Registers::t4) |
+ (1 << Registers::t5) | (1 << Registers::t6);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::ra) | (1 << Registers::fp) | (1 << Registers::s1) |
+ (1 << Registers::s2) | (1 << Registers::s3) | (1 << Registers::s4) |
+ (1 << Registers::s5) | (1 << Registers::s6) | (1 << Registers::s7) |
+ (1 << Registers::s8) | (1 << Registers::s9) | (1 << Registers::s10) |
+ (1 << Registers::s11);
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | // Always be zero.
+ (1 << Registers::t4) | // Scratch reg
+ (1 << Registers::t5) | // Scratch reg
+ (1 << Registers::t6) | // Scratch reg or call reg
+ (1 << Registers::s11) | // Scratch reg
+ (1 << Registers::ra) | (1 << Registers::tp) | (1 << Registers::sp) |
+ (1 << Registers::fp) | (1 << Registers::gp);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::a2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::a0);
+
+ static const SetType WrapperMask = VolatileMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_reg,
+ ft0 = f0,
+ ft1 = f1,
+ ft2 = f2,
+ ft3 = f3,
+ ft4 = f4,
+ ft5 = f5,
+ ft6 = f6,
+ ft7 = f7,
+ fs0 = f8,
+ fs1 = f9,
+ fa0 = f10,
+ fa1 = f11,
+ fa2 = f12,
+ fa3 = f13,
+ fa4 = f14,
+ fa5 = f15,
+ fa6 = f16,
+ fa7 = f17,
+ fs2 = f18,
+ fs3 = f19,
+ fs4 = f20,
+ fs5 = f21,
+ fs6 = f22,
+ fs7 = f23,
+ fs8 = f24,
+ fs9 = f25,
+ fs10 = f26,
+ fs11 = f27, // Scratch register
+ ft8 = f28,
+ ft9 = f29,
+ ft10 = f30, // Scratch register
+ ft11 = f31
+ };
+
+ enum Kind : uint8_t { Double, NumTypes, Single };
+
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
+ "fs0", "fs2", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
+ "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"};
+ static_assert(TotalPhys == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ typedef uint32_t SetType;
+
+ static const Code Invalid = invalid_reg;
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable = 23;
+ static const SetType AllPhysMask = 0xFFFFFFFF;
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType AllDoubleMask = AllMask;
+ // Single values are stored as 64 bits values (NaN-boxed) when pushing them to
+ // the stack, we do not require making distinctions between the 2 types, and
+ // therefore the masks are overlapping.See The RISC-V Instruction Set Manual
+ // for 14.2 NaN Boxing of Narrower Values.
+ static const SetType AllSingleMask = AllMask;
+ static const SetType NonVolatileMask =
+ SetType((1 << FloatRegisters::fs0) | (1 << FloatRegisters::fs1) |
+ (1 << FloatRegisters::fs2) | (1 << FloatRegisters::fs3) |
+ (1 << FloatRegisters::fs4) | (1 << FloatRegisters::fs5) |
+ (1 << FloatRegisters::fs6) | (1 << FloatRegisters::fs7) |
+ (1 << FloatRegisters::fs8) | (1 << FloatRegisters::fs9) |
+ (1 << FloatRegisters::fs10) | (1 << FloatRegisters::fs11));
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ // fs11/ft10 is the scratch register.
+ static const SetType NonAllocatableMask =
+ SetType((1 << FloatRegisters::fs11) | (1 << FloatRegisters::ft10));
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ public:
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ x &= FloatRegisters::AllPhysMask;
+ return mozilla::CountPopulation32(x);
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType");
+ return 31 - mozilla::CountLeadingZeroes64(x);
+ }
+
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 0x1f;
+ return FloatRegister(Code(code));
+ }
+ bool isSimd128() const { return false; }
+ bool isInvalid() const { return invalid_; }
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Double);
+ }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+ constexpr Code code() const {
+ MOZ_ASSERT(!invalid_);
+ return encoding_;
+ }
+ Encoding encoding() const { return encoding_; }
+ const char* name() const { return FloatRegisters::GetName(code()); }
+ bool volatile_() const {
+ MOZ_ASSERT(!invalid_);
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ bool operator!=(FloatRegister other) const { return code() != other.code(); }
+ bool operator==(FloatRegister other) const { return code() == other.code(); }
+ bool aliases(FloatRegister other) const {
+ return other.encoding_ == encoding_;
+ }
+ uint32_t numAliased() const { return 1; }
+ FloatRegister aliased(uint32_t aliasIdx) const {
+ MOZ_ASSERT(aliasIdx == 0);
+ return *this;
+ }
+ // Ensure that two floating point registers' types are equivalent.
+ bool equiv(FloatRegister other) const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == other.kind_;
+ }
+ constexpr uint32_t size() const {
+ MOZ_ASSERT(!invalid_);
+ if (kind_ == FloatRegisters::Double) {
+ return sizeof(double);
+ }
+ MOZ_ASSERT(kind_ == FloatRegisters::Single);
+ return sizeof(float);
+ }
+ uint32_t numAlignedAliased() { return numAliased(); }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return aliased(aliasIdx);
+ }
+ SetType alignedOrDominatedAliasedSet() const { return SetType(1) << code(); }
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName Name = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ printf("AllocatableAsIndexableSet\n");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ FloatRegister singleOverlay() const;
+ FloatRegister doubleOverlay() const;
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+
+ uint32_t getRegisterDumpOffsetInBytes() {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return code() * sizeof(double);
+ }
+ static Code FromName(const char* name);
+
+ // This is used in static initializers, so produce a bogus value instead of
+ // crashing.
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+
+ private:
+ typedef Codes::Kind Kind;
+ // These fields only hold valid values: an invalid register is always
+ // represented as a valid encoding and kind with the invalid_ bit set.
+ Encoding encoding_; // 32 encodings
+ Kind kind_; // Double, Single; more later
+ bool invalid_;
+
+ public:
+ constexpr FloatRegister(Encoding encoding, Kind kind)
+ : encoding_(encoding), kind_(kind), invalid_(false) {
+ MOZ_ASSERT(uint32_t(encoding) < Codes::Total);
+ }
+
+ constexpr FloatRegister(Encoding encoding)
+ : encoding_(encoding), kind_(FloatRegisters::Double), invalid_(false) {
+ MOZ_ASSERT(uint32_t(encoding) < Codes::Total);
+ }
+
+ constexpr FloatRegister()
+ : encoding_(FloatRegisters::invalid_reg),
+ kind_(FloatRegisters::Double),
+ invalid_(true) {}
+
+ bool isSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Double;
+ }
+
+ Encoding code() { return encoding_; }
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+inline bool hasUnaliasedDouble() { return false; }
+inline bool hasMultiAlias() { return false; }
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+#ifdef JS_NUNBOX32
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+inline uint32_t GetRISCV64Flags() { return 0; }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_Architecture_riscv64_h */
diff --git a/js/src/jit/riscv64/Assembler-riscv64.cpp b/js/src/jit/riscv64/Assembler-riscv64.cpp
new file mode 100644
index 0000000000..d9e748bfb9
--- /dev/null
+++ b/js/src/jit/riscv64/Assembler-riscv64.cpp
@@ -0,0 +1,1548 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+#include "jit/riscv64/Assembler-riscv64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+namespace js {
+namespace jit {
+
+#define UNIMPLEMENTED_RISCV() MOZ_CRASH("RISC_V not implemented");
+
+bool Assembler::FLAG_riscv_debug = false;
+
+void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
+
+// Size of the instruction stream, in bytes.
+size_t Assembler::size() const { return m_buffer.size(); }
+
+bool Assembler::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+// Size of the relocation table, in bytes.
+size_t Assembler::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t Assembler::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+// Size of the data table, in bytes.
+size_t Assembler::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uint32_t Assembler::AsmPoolMaxOffset = 1024;
+
+uint32_t Assembler::GetPoolMaxOffset() {
+ static bool isSet = false;
+ if (!isSet) {
+ char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
+ uint32_t poolMaxOffset;
+ if (poolMaxOffsetStr &&
+ sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
+ AsmPoolMaxOffset = poolMaxOffset;
+ }
+ isSet = true;
+ }
+ return AsmPoolMaxOffset;
+}
+
+// Pool callbacks stuff:
+void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
+ MOZ_CRASH("Unimplement");
+}
+
+void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ MOZ_CRASH("Unimplement");
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
+ BufferOffset afterPool) {
+ DEBUG_PRINTF("\tWritePoolGuard\n");
+ int32_t off = afterPool.getOffset() - branch.getOffset();
+ if (!is_int21(off) || !((off & 0x1) == 0)) {
+ printf("%d\n", off);
+ MOZ_CRASH("imm invalid");
+ }
+ // JAL encode is
+ // 31 | 30 21 | 20 | 19 12 | 11 7 | 6 0 |
+ // imm[20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode|
+ // 1 10 1 8 5 7
+ // offset[20:1] dest JAL
+ int32_t imm20 = (off & 0xff000) | // bits 19-12
+ ((off & 0x800) << 9) | // bit 11
+ ((off & 0x7fe) << 20) | // bits 10-1
+ ((off & 0x100000) << 11); // bit 20
+ Instr instr = JAL | (imm20 & kImm20Mask);
+ dest->SetInstructionBits(instr);
+ DEBUG_PRINTF("%p(%x): ", dest, branch.getOffset());
+ disassembleInstr(dest->InstructionBits(), JitSpew_Codegen);
+}
+
+void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
+ static_assert(sizeof(PoolHeader) == 4);
+
+ // Get the total size of the pool.
+ const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+ const uintptr_t totalPoolInstructions = totalPoolSize / kInstrSize;
+
+ MOZ_ASSERT((totalPoolSize & 0x3) == 0);
+ MOZ_ASSERT(totalPoolInstructions < (1 << 15));
+
+ PoolHeader header(totalPoolInstructions, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+void Assembler::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void Assembler::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+void Assembler::RV_li(Register rd, int64_t imm) {
+ UseScratchRegisterScope temps(this);
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.hasAvailable())) {
+ GeneralLi(rd, imm);
+ } else {
+ RecursiveLi(rd, imm);
+ }
+}
+
+int Assembler::RV_li_count(int64_t imm, bool is_get_temp_reg) {
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
+ return GeneralLiCount(imm, is_get_temp_reg);
+ } else {
+ return RecursiveLiCount(imm);
+ }
+}
+
+void Assembler::GeneralLi(Register rd, int64_t imm) {
+ // 64-bit imm is put in the register rd.
+ // In most cases the imm is 32 bit and 2 instructions are generated. If a
+ // temporary register is available, in the worst case, 6 instructions are
+ // generated for a full 64-bit immediate. If temporay register is not
+ // available the maximum will be 8 instructions. If imm is more than 32 bits
+ // and a temp register is available, imm is divided into two 32-bit parts,
+ // low_32 and up_32. Each part is built in a separate register. low_32 is
+ // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff
+ // is subtracted from up_32 before up_32 is built. This compensates for 32
+ // bits of 1's in the lower when the two registers are added. If no temp is
+ // available, the upper 32 bit is built in rd, and the lower 32 bits are
+ // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added
+ // to the upper part built in rd.
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ addi(rd, zero_reg, low_12);
+ }
+ return;
+ } else {
+ UseScratchRegisterScope temps(this);
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ Register temp_reg = rd;
+ // Check if a temporary register is available
+ if (up_32 == 0 || low_32 == 0) {
+ // No temp register is needed
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 0);
+ temp_reg = temps.hasAvailable() ? temps.Acquire() : InvalidReg;
+ }
+ if (temp_reg != InvalidReg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ addi(rd, rd, low_12);
+ }
+ } else {
+ sim_low = low_12;
+ ori(rd, zero_reg, low_12);
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ slli(rd, rd, 32);
+ srli(rd, rd, 32);
+ return;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return;
+ }
+ // Build upper part in a temporary register
+ if (low_32 == 0) {
+ // Build upper part in rd
+ temp_reg = rd;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(temp_reg, (int32_t)high_20);
+ if (low_12) {
+ addi(temp_reg, temp_reg, low_12);
+ }
+ } else {
+ ori(temp_reg, zero_reg, low_12);
+ }
+ // Put it at the bgining of register
+ slli(temp_reg, temp_reg, 32);
+ if (low_32 != 0) {
+ add(rd, rd, temp_reg);
+ }
+ return;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ ori(rd, zero_reg, low_12);
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t shift_val = 0;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ shift_val++;
+ if (i == 31) {
+ // rest is zero
+ slli(rd, rd, shift_val);
+ }
+ continue;
+ }
+ // The first 1 seen
+ int32_t part;
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
+ slli(rd, rd, shift_val + 11);
+ ori(rd, rd, part);
+ i += 10;
+ mask >>= 11;
+ } else {
+ part = (uint32_t)(low_32 << i) >> i;
+ slli(rd, rd, shift_val + (32 - i));
+ ori(rd, rd, part);
+ break;
+ }
+ shift_val = 0;
+ }
+ }
+}
+
+int Assembler::GeneralLiCount(int64_t imm, bool is_get_temp_reg) {
+ int count = 0;
+ // imitate Assembler::RV_li
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ return count;
+ } else {
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ // Check if a temporary register is available
+ if (is_get_temp_reg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ count++;
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ count++;
+ }
+ } else {
+ sim_low = low_12;
+ count++;
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ count++;
+ count++;
+ return count;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return count;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // Put it at the bgining of register
+ count++;
+ if (low_32 != 0) {
+ count++;
+ }
+ return count;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ if (i == 31) {
+ // rest is zero
+ count++;
+ }
+ continue;
+ }
+ // The first 1 seen
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ count++;
+ count++;
+ i += 10;
+ mask >>= 11;
+ } else {
+ count++;
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
+void Assembler::li_ptr(Register rd, int64_t imm) {
+ m_buffer.enterNoNops();
+ m_buffer.assertNoPoolAndNoNops();
+ // Initialize rd with an address
+ // Pointers are 48 bits
+ // 6 fixed instructions are generated
+ DEBUG_PRINTF("li_ptr(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm);
+ MOZ_ASSERT((imm & 0xfff0000000000000ll) == 0);
+ int64_t a6 = imm & 0x3f; // bits 0:5. 6 bits
+ int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ lui(rd, (int32_t)high_20);
+ addi(rd, rd, low_12); // 31 bits in rd.
+ slli(rd, rd, 11); // Space for next 11 bis
+ ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
+ slli(rd, rd, 6); // Space for next 6 bits
+ ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
+ m_buffer.leaveNoNops();
+}
+
+void Assembler::li_constant(Register rd, int64_t imm) {
+ m_buffer.enterNoNops();
+ m_buffer.assertNoPoolAndNoNops();
+ DEBUG_PRINTF("li_constant(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm);
+ lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
+ 48); // Bits 63:48
+ addiw(rd, rd,
+ (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
+ 52); // Bits 47:36
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits 35:24
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); // Bits 23:12
+ slli(rd, rd, 12);
+ addi(rd, rd, imm << 52 >> 52); // Bits 11:0
+ m_buffer.leaveNoNops();
+}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults: {
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_ + a0.encoding()));
+ intRegIndex_++;
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(
+ FloatRegisters::Encoding(floatRegIndex_ + fa0.encoding()),
+ type == MIRType::Double ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+ }
+ case MIRType::Simd128: {
+ MOZ_CRASH("RISCV64 does not support simd yet.");
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+bool Assembler::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom() || !enoughLabelCache_;
+}
+
+int Assembler::disassembleInstr(Instr instr, bool enable_spew) {
+ if (!FLAG_riscv_debug && !enable_spew) return -1;
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ int size =
+ disasm.InstructionDecode(disasm_buffer, reinterpret_cast<byte*>(&instr));
+ DEBUG_PRINTF("%s\n", disasm_buffer.start());
+ if (enable_spew) {
+ JitSpew(JitSpew_Codegen, "%s", disasm_buffer.start());
+ }
+ return size;
+}
+
+uintptr_t Assembler::target_address_at(Instruction* pc) {
+ Instruction* instr0 = pc;
+ DEBUG_PRINTF("target_address_at: pc: 0x%p\t", instr0);
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+
+ // Interpret instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5))) {
+ // Assemble the 64 bit value.
+ int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
+ (int64_t)instr1->Imm12Value();
+ MOZ_ASSERT(instr2->Imm12Value() == 11);
+ addr <<= 11;
+ addr |= (int64_t)instr3->Imm12Value();
+ MOZ_ASSERT(instr4->Imm12Value() == 6);
+ addr <<= 6;
+ addr |= (int64_t)instr5->Imm12Value();
+
+ DEBUG_PRINTF("addr: %lx\n", addr);
+ return static_cast<uintptr_t>(addr);
+ }
+ // We should never get here, force a bad address if we do.
+ MOZ_CRASH("RISC-V UNREACHABLE");
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ DEBUG_PRINTF("\tExtractLoad64Value: \tpc:%p ", inst0);
+ if (IsJal(*reinterpret_cast<Instr*>(inst0))) {
+ int offset = inst0->Imm20JValue();
+ inst0 = inst0 + offset;
+ }
+ Instruction* instr1 = inst0 + 1 * kInstrSize;
+ if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
+ // Li64
+ Instruction* instr2 = inst0 + 2 * kInstrSize;
+ Instruction* instr3 = inst0 + 3 * kInstrSize;
+ Instruction* instr4 = inst0 + 4 * kInstrSize;
+ Instruction* instr5 = inst0 + 5 * kInstrSize;
+ Instruction* instr6 = inst0 + 6 * kInstrSize;
+ Instruction* instr7 = inst0 + 7 * kInstrSize;
+ if (IsLui(*reinterpret_cast<Instr*>(inst0)) &&
+ IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr7))) {
+ int64_t imm = (int64_t)(inst0->Imm20UValue() << kImm20Shift) +
+ (int64_t)instr1->Imm12Value();
+ MOZ_ASSERT(instr2->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr3->Imm12Value();
+ MOZ_ASSERT(instr4->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr5->Imm12Value();
+ MOZ_ASSERT(instr6->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr7->Imm12Value();
+ DEBUG_PRINTF("imm:%lx\n", imm);
+ return imm;
+ } else {
+ FLAG_riscv_debug = true;
+ disassembleInstr(inst0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_CRASH();
+ }
+ } else {
+ DEBUG_PRINTF("\n");
+ Instruction* instrf1 = (inst0 - 1 * kInstrSize);
+ Instruction* instr2 = inst0 + 2 * kInstrSize;
+ Instruction* instr3 = inst0 + 3 * kInstrSize;
+ Instruction* instr4 = inst0 + 4 * kInstrSize;
+ Instruction* instr5 = inst0 + 5 * kInstrSize;
+ Instruction* instr6 = inst0 + 6 * kInstrSize;
+ Instruction* instr7 = inst0 + 7 * kInstrSize;
+ disassembleInstr(instrf1->InstructionBits());
+ disassembleInstr(inst0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
+ // Li48
+ return target_address_at(inst0);
+ }
+}
+
+void Assembler::UpdateLoad64Value(Instruction* pc, uint64_t value) {
+ DEBUG_PRINTF("\tUpdateLoad64Value: pc: %p\tvalue: %lx\n", pc, value);
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ if (IsJal(*reinterpret_cast<Instr*>(pc))) {
+ pc = pc + pc->Imm20JValue();
+ instr1 = pc + 1 * kInstrSize;
+ }
+ if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
+ Instruction* instr0 = pc;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ Instruction* instr6 = pc + 6 * kInstrSize;
+ Instruction* instr7 = pc + 7 * kInstrSize;
+ MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(pc)) &&
+ IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr7)));
+ // lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
+ // 48); // Bits 63:48
+ // addiw(rd, rd,
+ // (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
+ // 52); // Bits 47:36
+ // slli(rd, rd, 12);
+ // addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits
+ // 35:24 slli(rd, rd, 12); addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); //
+ // Bits 23:12 slli(rd, rd, 12); addi(rd, rd, imm << 52 >> 52); // Bits 11:0
+ *reinterpret_cast<Instr*>(instr0) &= 0xfff;
+ *reinterpret_cast<Instr*>(instr0) |=
+ (((value + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >> 48)
+ << 12);
+ *reinterpret_cast<Instr*>(instr1) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr1) |=
+ (((value + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr3) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr3) |=
+ (((value + (1LL << 23) + (1LL << 11)) << 28 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr5) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr5) |=
+ (((value + (1LL << 11)) << 40 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr7) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr7) |= ((value << 52 >> 52) << 20);
+ disassembleInstr(instr0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(ExtractLoad64Value(pc) == value);
+ } else {
+ Instruction* instr0 = pc;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ Instruction* instr6 = pc + 6 * kInstrSize;
+ Instruction* instr7 = pc + 7 * kInstrSize;
+ disassembleInstr(instr0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
+ set_target_value_at(pc, value);
+ }
+}
+
+void Assembler::set_target_value_at(Instruction* pc, uint64_t target) {
+ DEBUG_PRINTF("\tset_target_value_at: pc: %p\ttarget: %lx\n", pc, target);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ MOZ_ASSERT((target & 0xffff000000000000ll) == 0);
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instruction* instr0 = pc;
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5)));
+#endif
+ int64_t a6 = target & 0x3f; // bits 0:6. 6 bits
+ int64_t b11 = (target >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (target >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ *p = *p & 0xfff;
+ *p = *p | ((int32_t)high_20 << 12);
+ *(p + 1) = *(p + 1) & 0xfffff;
+ *(p + 1) = *(p + 1) | ((int32_t)low_12 << 20);
+ *(p + 2) = *(p + 2) & 0xfffff;
+ *(p + 2) = *(p + 2) | (11 << 20);
+ *(p + 3) = *(p + 3) & 0xfffff;
+ *(p + 3) = *(p + 3) | ((int32_t)b11 << 20);
+ *(p + 4) = *(p + 4) & 0xfffff;
+ *(p + 4) = *(p + 4) | (6 << 20);
+ *(p + 5) = *(p + 5) & 0xfffff;
+ *(p + 5) = *(p + 5) | ((int32_t)a6 << 20);
+ MOZ_ASSERT(target_address_at(pc) == target);
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ DEBUG_PRINTF("\tWriteLoad64Instructions\n");
+ // Initialize rd with an address
+ // Pointers are 48 bits
+ // 6 fixed instructions are generated
+ MOZ_ASSERT((value & 0xfff0000000000000ll) == 0);
+ int64_t a6 = value & 0x3f; // bits 0:5. 6 bits
+ int64_t b11 = (value >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (value >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ Instr lui_ = LUI | (reg.code() << kRdShift) |
+ ((int32_t)high_20 << kImm20Shift); // lui(rd, (int32_t)high_20);
+ *reinterpret_cast<Instr*>(inst0) = lui_;
+
+ Instr addi_ =
+ OP_IMM | (reg.code() << kRdShift) | (0b000 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (low_12 << kImm12Shift); // addi(rd, rd, low_12); // 31 bits in rd.
+ *reinterpret_cast<Instr*>(inst0 + 1 * kInstrSize) = addi_;
+
+ Instr slli_ =
+ OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (11 << kImm12Shift); // slli(rd, rd, 11); // Space for next 11 bis
+ *reinterpret_cast<Instr*>(inst0 + 2 * kInstrSize) = slli_;
+
+ Instr ori_b11 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (b11 << kImm12Shift); // ori(rd, rd, b11); // 11 bits
+ // are put in. 42 bit in rd
+ *reinterpret_cast<Instr*>(inst0 + 3 * kInstrSize) = ori_b11;
+
+ slli_ = OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (6 << kImm12Shift); // slli(rd, rd, 6); // Space for next 11 bis
+ *reinterpret_cast<Instr*>(inst0 + 4 * kInstrSize) =
+ slli_; // slli(rd, rd, 6); // Space for next 6 bits
+
+ Instr ori_a6 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (a6 << kImm12Shift); // ori(rd, rd, a6); // 6 bits are
+ // put in. 48 bis in rd
+ *reinterpret_cast<Instr*>(inst0 + 5 * kInstrSize) = ori_a6;
+ disassembleInstr((inst0 + 0 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 1 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 2 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 3 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 4 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 5 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 6 * kInstrSize)->InstructionBits());
+ MOZ_ASSERT(ExtractLoad64Value(inst0) == value);
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+void Assembler::target_at_put(BufferOffset pos, BufferOffset target_pos,
+ bool trampoline) {
+ if (m_buffer.oom()) {
+ return;
+ }
+ DEBUG_PRINTF("\ttarget_at_put: %p (%d) to %p (%d)\n",
+ reinterpret_cast<Instr*>(editSrc(pos)), pos.getOffset(),
+ reinterpret_cast<Instr*>(editSrc(pos)) + target_pos.getOffset() -
+ pos.getOffset(),
+ target_pos.getOffset());
+ Instruction* instruction = editSrc(pos);
+ Instr instr = instruction->InstructionBits();
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ instr = SetBranchOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ instr_at_put(pos, instr);
+ } break;
+ case JAL: {
+ MOZ_ASSERT(IsJal(instr));
+ instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ instr_at_put(pos, instr);
+ } break;
+ case LUI: {
+ set_target_value_at(instruction,
+ reinterpret_cast<uintptr_t>(editSrc(target_pos)));
+ } break;
+ case AUIPC: {
+ Instr instr_auipc = instr;
+ Instr instr_I =
+ editSrc(BufferOffset(pos.getOffset() + 4))->InstructionBits();
+ MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
+
+ intptr_t offset = target_pos.getOffset() - pos.getOffset();
+ if (is_int21(offset) && IsJalr(instr_I) && trampoline) {
+ MOZ_ASSERT(is_int21(offset) && ((offset & 1) == 0));
+ Instr instr = JAL;
+ instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ MOZ_ASSERT(IsJal(instr));
+ MOZ_ASSERT(JumpOffset(instr) == offset);
+ instr_at_put(pos, instr);
+ instr_at_put(BufferOffset(pos.getOffset() + 4), kNopByte);
+ } else {
+ MOZ_RELEASE_ASSERT(is_int32(offset + 0x800));
+ MOZ_ASSERT(instruction->RdValue() ==
+ editSrc(BufferOffset(pos.getOffset() + 4))->Rs1Value());
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+
+ instr_auipc =
+ (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12);
+ instr_at_put(pos, instr_auipc);
+
+ const int kImm31_20Mask = ((1 << 12) - 1) << 20;
+ const int kImm11_0Mask = ((1 << 12) - 1);
+ instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
+ instr_at_put(BufferOffset(pos.getOffset() + 4), instr_I);
+ }
+ } break;
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+const int kEndOfChain = -1;
+const int32_t kEndOfJumpChain = 0;
+
+int Assembler::target_at(BufferOffset pos, bool is_internal) {
+ if (oom()) {
+ return kEndOfChain;
+ }
+ Instruction* instruction = editSrc(pos);
+ Instruction* instruction2 = nullptr;
+ if (IsAuipc(instruction->InstructionBits())) {
+ instruction2 = editSrc(BufferOffset(pos.getOffset() + kInstrSize));
+ }
+ return target_at(instruction, pos, is_internal, instruction2);
+}
+
+int Assembler::target_at(Instruction* instruction, BufferOffset pos,
+ bool is_internal, Instruction* instruction2) {
+ DEBUG_PRINTF("\t target_at: %p(%x)\n\t",
+ reinterpret_cast<Instr*>(instruction), pos.getOffset());
+ disassembleInstr(instruction->InstructionBits());
+ Instr instr = instruction->InstructionBits();
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ int32_t imm13 = BranchOffset(instr);
+ if (imm13 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm13, pos.getOffset() + imm13);
+ return pos.getOffset() + imm13;
+ }
+ }
+ case JAL: {
+ int32_t imm21 = JumpOffset(instr);
+ if (imm21 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm21, pos.getOffset() + imm21);
+ return pos.getOffset() + imm21;
+ }
+ }
+ case JALR: {
+ int32_t imm12 = instr >> 20;
+ if (imm12 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm12, pos.getOffset() + imm12);
+ return pos.getOffset() + imm12;
+ }
+ }
+ case LUI: {
+ uintptr_t imm = target_address_at(instruction);
+ uintptr_t instr_address = reinterpret_cast<uintptr_t>(instruction);
+ if (imm == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ MOZ_ASSERT(instr_address - imm < INT_MAX);
+ int32_t delta = static_cast<int32_t>(instr_address - imm);
+ MOZ_ASSERT(pos.getOffset() > delta);
+ return pos.getOffset() - delta;
+ }
+ }
+ case AUIPC: {
+ MOZ_ASSERT(instruction2 != nullptr);
+ Instr instr_auipc = instr;
+ Instr instr_I = instruction2->InstructionBits();
+ MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
+ int32_t offset = BrachlongOffset(instr_auipc, instr_I);
+ if (offset == kEndOfJumpChain) return kEndOfChain;
+ DEBUG_PRINTF("\t target_at: %d %d\n", offset, pos.getOffset() + offset);
+ return offset + pos.getOffset();
+ }
+ default: {
+ UNIMPLEMENTED_RISCV();
+ }
+ }
+}
+
+uint32_t Assembler::next_link(Label* L, bool is_internal) {
+ MOZ_ASSERT(L->used());
+ BufferOffset pos(L);
+ int link = target_at(pos, is_internal);
+ if (link == kEndOfChain) {
+ L->reset();
+ return LabelBase::INVALID_OFFSET;
+ } else {
+ MOZ_ASSERT(link >= 0);
+ DEBUG_PRINTF("next: %p to offset %d\n", L, link);
+ L->use(link);
+ return link;
+ }
+}
+
+void Assembler::bind(Label* label, BufferOffset boff) {
+ JitSpew(JitSpew_Codegen, ".set Llabel %p %d", label, currentOffset());
+ DEBUG_PRINTF(".set Llabel %p\n", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ uint32_t next;
+
+ // A used label holds a link to branch that uses it.
+ do {
+ BufferOffset b(label);
+ DEBUG_PRINTF("\tbind next:%d\n", b.getOffset());
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+ int fixup_pos = b.getOffset();
+ int dist = dest.getOffset() - fixup_pos;
+ next = next_link(label, false);
+ DEBUG_PRINTF("\t%p fixup: %d next: %d\n", label, fixup_pos, next);
+ DEBUG_PRINTF("\t fixup: %d dest: %d dist: %d %d %d\n", fixup_pos,
+ dest.getOffset(), dist, nextOffset().getOffset(),
+ currentOffset());
+ Instruction* instruction = editSrc(b);
+ Instr instr = instruction->InstructionBits();
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
+ MOZ_RELEASE_ASSERT((next - fixup_pos) <= kMaxBranchOffset);
+ MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
+ MOZ_ASSERT(
+ IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
+ } else {
+ target_at_put(b, dest);
+ BufferOffset deadline(b.getOffset() +
+ ImmBranchMaxForwardOffset(CondBranchRangeType));
+ m_buffer.unregisterBranchDeadline(CondBranchRangeType, deadline);
+ }
+ } else if (IsJal(instr)) {
+ if (dist > kMaxJumpOffset) {
+ MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
+ MOZ_RELEASE_ASSERT((next - fixup_pos) <= kMaxJumpOffset);
+ MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
+ MOZ_ASSERT(
+ IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
+ } else {
+ target_at_put(b, dest);
+ BufferOffset deadline(
+ b.getOffset() + ImmBranchMaxForwardOffset(UncondBranchRangeType));
+ m_buffer.unregisterBranchDeadline(UncondBranchRangeType, deadline);
+ }
+ } else {
+ MOZ_ASSERT(IsAuipc(instr));
+ target_at_put(b, dest);
+ }
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+bool Assembler::is_near(Label* L) {
+ MOZ_ASSERT(L->bound());
+ return is_intn((currentOffset() - L->offset()), kJumpOffsetBits);
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->bound()) return true;
+ return is_intn((currentOffset() - L->offset()), bits);
+}
+
+bool Assembler::is_near_branch(Label* L) {
+ MOZ_ASSERT(L->bound());
+ return is_intn((currentOffset() - L->offset()), kBranchOffsetBits);
+}
+
+int32_t Assembler::branch_long_offset(Label* L) {
+ if (oom()) {
+ return kEndOfJumpChain;
+ }
+ intptr_t target_pos;
+ BufferOffset next_instr_offset = nextInstrOffset(2);
+ DEBUG_PRINTF("\tbranch_long_offset: %p to (%d)\n", L,
+ next_instr_offset.getOffset());
+ if (L->bound()) {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ target_pos = L->offset();
+ } else {
+ if (L->used()) {
+ LabelCahe::Ptr p = label_cache_.lookup(L->offset());
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->key() == L->offset());
+ target_pos = p->value().getOffset();
+ target_at_put(BufferOffset(target_pos), next_instr_offset);
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.put(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ } else {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ L->use(next_instr_offset.getOffset());
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.putNew(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ }
+ }
+ intptr_t offset = target_pos - next_instr_offset.getOffset();
+ MOZ_ASSERT((offset & 3) == 0);
+ MOZ_ASSERT(is_int32(offset));
+ return static_cast<int32_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ if (oom()) {
+ return kEndOfJumpChain;
+ }
+ int32_t target_pos;
+ BufferOffset next_instr_offset = nextInstrOffset();
+ DEBUG_PRINTF("\tbranch_offset_helper: %p to %d\n", L,
+ next_instr_offset.getOffset());
+ // This is the last possible branch target.
+ if (L->bound()) {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ target_pos = L->offset();
+ } else {
+ BufferOffset deadline(next_instr_offset.getOffset() +
+ ImmBranchMaxForwardOffset(bits));
+ DEBUG_PRINTF("\tregisterBranchDeadline %d type %d\n", deadline.getOffset(),
+ OffsetSizeToImmBranchRangeType(bits));
+ m_buffer.registerBranchDeadline(OffsetSizeToImmBranchRangeType(bits),
+ deadline);
+ if (L->used()) {
+ LabelCahe::Ptr p = label_cache_.lookup(L->offset());
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->key() == L->offset());
+ target_pos = p->value().getOffset();
+ target_at_put(BufferOffset(target_pos), next_instr_offset);
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.put(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ } else {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ L->use(next_instr_offset.getOffset());
+ bool ok = label_cache_.putNew(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ int32_t offset = target_pos - next_instr_offset.getOffset();
+ DEBUG_PRINTF("\toffset = %d\n", offset);
+ MOZ_ASSERT(is_intn(offset, bits));
+ MOZ_ASSERT((offset & 1) == 0);
+ return offset;
+}
+
+Assembler::Condition Assembler::InvertCondition(Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ MOZ_ASSERT(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxTracepointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxTracepointCode)));
+
+ // since ebreak does not allow additional immediate field, we use the
+ // immediate field of lui instruction immediately following the ebreak to
+ // encode the "code" info
+ ebreak();
+ MOZ_ASSERT(is_uint20(code));
+ lui(zero_reg, code);
+}
+
+void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(IsAddi(inst->InstructionBits()));
+ int32_t offset = inst->Imm12Value();
+ MOZ_ASSERT(is_int12(offset));
+ Instr jal_ = JAL | (0b000 << kFunct3Shift) |
+ (offset & 0xff000) | // bits 19-12
+ ((offset & 0x800) << 9) | // bit 11
+ ((offset & 0x7fe) << 20) | // bits 10-1
+ ((offset & 0x100000) << 11); // bit 20
+ // jal(zero, offset);
+ *reinterpret_cast<Instr*>(inst) = jal_;
+}
+
+void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
+ Instruction* inst = (Instruction*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(IsJal(inst->InstructionBits()));
+ // Replace "jal zero_reg, offset" with "addi $zero, $zero, offset"
+ int32_t offset = inst->Imm20JValue();
+ MOZ_ASSERT(is_int12(offset));
+ Instr addi_ = OP_IMM | (0b000 << kFunct3Shift) |
+ (offset << kImm12Shift); // addi(zero, zero, low_12);
+ *reinterpret_cast<Instr*>(inst) = addi_;
+}
+
+bool Assembler::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ MOZ_ASSERT(available_ != nullptr);
+ MOZ_ASSERT(!available_->empty());
+ Register index = GeneralRegisterSet::FirstRegister(available_->bits());
+ available_->takeRegisterIndex(index);
+ return index;
+}
+
+bool UseScratchRegisterScope::hasAvailable() const {
+ return (available_->size()) != 0;
+}
+
+void Assembler::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ next = next_link(label, false);
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ target->use(label->offset());
+ target_at_put(labelBranchOffset, BufferOffset(target));
+ MOZ_CRASH("check");
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (m_buffer.oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ m_buffer.putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ m_buffer.putBytes(numBytes, code);
+ return !m_buffer.oom();
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* i0 = (Instruction*)inst_.raw();
+ Instruction* i1 = (Instruction*)(inst_.raw() + 1 * kInstrSize);
+ Instruction* i2 = (Instruction*)(inst_.raw() + 2 * kInstrSize);
+ Instruction* i3 = (Instruction*)(inst_.raw() + 3 * kInstrSize);
+ Instruction* i4 = (Instruction*)(inst_.raw() + 4 * kInstrSize);
+ Instruction* i5 = (Instruction*)(inst_.raw() + 5 * kInstrSize);
+ Instruction* i6 = (Instruction*)(inst_.raw() + 6 * kInstrSize);
+
+ MOZ_ASSERT(IsLui(i0->InstructionBits()));
+ MOZ_ASSERT(IsAddi(i1->InstructionBits()));
+ MOZ_ASSERT(IsSlli(i2->InstructionBits()));
+ MOZ_ASSERT(IsOri(i3->InstructionBits()));
+ MOZ_ASSERT(IsSlli(i4->InstructionBits()));
+ MOZ_ASSERT(IsOri(i5->InstructionBits()));
+ if (enabled) {
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (i5->RdValue() << kRs1Shift) | (0x0 << kImm12Shift);
+ *((Instr*)i6) = jalr_;
+ } else {
+ *((Instr*)i6) = kNopByte;
+ }
+}
+
+void Assembler::PatchShortRangeBranchToVeneer(Buffer* buffer, unsigned rangeIdx,
+ BufferOffset deadline,
+ BufferOffset veneer) {
+ if (buffer->oom()) {
+ return;
+ }
+ DEBUG_PRINTF("\tPatchShortRangeBranchToVeneer\n");
+ // Reconstruct the position of the branch from (rangeIdx, deadline).
+ ImmBranchRangeType branchRange = static_cast<ImmBranchRangeType>(rangeIdx);
+ BufferOffset branch(deadline.getOffset() -
+ ImmBranchMaxForwardOffset(branchRange));
+ Instruction* branchInst = buffer->getInst(branch);
+ Instruction* veneerInst_1 = buffer->getInst(veneer);
+ Instruction* veneerInst_2 =
+ buffer->getInst(BufferOffset(veneer.getOffset() + 4));
+ // Verify that the branch range matches what's encoded.
+ DEBUG_PRINTF("\t%p(%x): ", branchInst, branch.getOffset());
+ disassembleInstr(branchInst->InstructionBits(), JitSpew_Codegen);
+ DEBUG_PRINTF("\t instert veneer %x, branch:%x deadline: %x\n",
+ veneer.getOffset(), branch.getOffset(), deadline.getOffset());
+ MOZ_ASSERT(branchRange <= UncondBranchRangeType);
+ MOZ_ASSERT(branchInst->GetImmBranchRangeType() == branchRange);
+ // emit a long jump slot
+ Instr auipc = AUIPC | (t6.code() << kRdShift) | (0x0 << kImm20Shift);
+ Instr jalr = JALR | (zero_reg.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (t6.code() << kRs1Shift) | (0x0 << kImm12Shift);
+
+ // We want to insert veneer after branch in the linked list of instructions
+ // that use the same unbound label.
+ // The veneer should be an unconditional branch.
+ int32_t nextElemOffset = target_at(buffer->getInst(branch), branch, false);
+ int32_t dist;
+ // If offset is 0, this is the end of the linked list.
+ if (nextElemOffset != kEndOfChain) {
+ // Make the offset relative to veneer so it targets the same instruction
+ // as branchInst.
+ dist = nextElemOffset - veneer.getOffset();
+ } else {
+ dist = 0;
+ }
+ int32_t Hi20 = (((int32_t)dist + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)dist << 20 >> 20;
+ auipc = SetAuipcOffset(Hi20, auipc);
+ jalr = SetJalrOffset(Lo12, jalr);
+ // insert veneer
+ veneerInst_1->SetInstructionBits(auipc);
+ veneerInst_2->SetInstructionBits(jalr);
+ // Now link branchInst to veneer.
+ if (IsBranch(branchInst->InstructionBits())) {
+ branchInst->SetInstructionBits(SetBranchOffset(
+ branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
+ } else {
+ MOZ_ASSERT(IsJal(branchInst->InstructionBits()));
+ branchInst->SetInstructionBits(SetJalOffset(
+ branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
+ }
+ DEBUG_PRINTF("\tfix to veneer:");
+ disassembleInstr(branchInst->InstructionBits());
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/Assembler-riscv64.h b/js/src/jit/riscv64/Assembler-riscv64.h
new file mode 100644
index 0000000000..4086b38ff7
--- /dev/null
+++ b/js/src/jit/riscv64/Assembler-riscv64.h
@@ -0,0 +1,685 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef jit_riscv64_Assembler_riscv64_h
+#define jit_riscv64_Assembler_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Sprintf.h"
+
+#include <stdint.h>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/extension/extension-riscv-a.h"
+#include "jit/riscv64/extension/extension-riscv-c.h"
+#include "jit/riscv64/extension/extension-riscv-d.h"
+#include "jit/riscv64/extension/extension-riscv-f.h"
+#include "jit/riscv64/extension/extension-riscv-m.h"
+#include "jit/riscv64/extension/extension-riscv-v.h"
+#include "jit/riscv64/extension/extension-riscv-zicsr.h"
+#include "jit/riscv64/extension/extension-riscv-zifencei.h"
+#include "jit/riscv64/Register-riscv64.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+#include "js/HashTable.h"
+#include "wasm/WasmTypeDecls.h"
+namespace js {
+namespace jit {
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+
+class MacroAssembler;
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 8;
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static const uint32_t WasmStackAlignment = 16;
+static const uint32_t WasmTrapInstructionLength = 2 * sizeof(uint32_t);
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+static constexpr uint32_t WasmCheckedTailEntryOffset = 20u;
+
+static const Scale ScalePointer = TimesEight;
+
+class Assembler;
+
+static constexpr int32_t SliceSize = 1024;
+
+typedef js::jit::AssemblerBufferWithConstantPools<
+ SliceSize, 4, Instruction, Assembler, NumShortBranchRangeTypes>
+ Buffer;
+
+class Assembler : public AssemblerShared,
+ public AssemblerRISCVI,
+ public AssemblerRISCVA,
+ public AssemblerRISCVF,
+ public AssemblerRISCVD,
+ public AssemblerRISCVM,
+ public AssemblerRISCVC,
+ public AssemblerRISCVZicsr,
+ public AssemblerRISCVZifencei {
+ GeneralRegisterSet scratch_register_list_;
+
+ static constexpr int kInvalidSlotPos = -1;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+ bool enoughLabelCache_ = true;
+
+ protected:
+ using LabelOffset = int32_t;
+ using LabelCahe =
+ HashMap<LabelOffset, BufferOffset, js::DefaultHasher<LabelOffset>,
+ js::SystemAllocPolicy>;
+ LabelCahe label_cache_;
+ void NoEnoughLabelCache() { enoughLabelCache_ = false; }
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ Buffer m_buffer;
+ bool isFinished = false;
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ static bool FLAG_riscv_debug;
+
+ Assembler()
+ : scratch_register_list_((1 << t5.code()) | (1 << t4.code()) |
+ (1 << t6.code())),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ m_buffer(/*guardSize*/ 2, /*headerSize*/ 2, /*instBufferAlign*/ 8,
+ /*poolMaxOffset*/ GetPoolMaxOffset(), /*pcBias*/ 8,
+ /*alignFillInst*/ kNopByte, /*nopFillInst*/ kNopByte),
+ isFinished(false) {
+ }
+ static uint32_t NopFill;
+ static uint32_t AsmPoolMaxOffset;
+ static uint32_t GetPoolMaxOffset();
+ bool reserve(size_t size);
+ bool oom() const;
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+ void finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+ }
+ void enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
+ void leaveNoPool() { m_buffer.leaveNoPool(); }
+ bool swapBuffer(wasm::Bytes& bytes);
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+ // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+ // initial placeholder instruction that we want to later fix up.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+ static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+ // We're not tracking short-range branches for ARM for now.
+ static void PatchShortRangeBranchToVeneer(Buffer*, unsigned rangeIdx,
+ BufferOffset deadline,
+ BufferOffset veneer);
+ struct PoolHeader {
+ uint32_t data;
+
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4bytes), not byte.
+ union {
+ struct {
+ uint32_t size : 15;
+
+ // "Natural" guards are part of the normal instruction stream,
+ // while "non-natural" guards are inserted for the sole purpose
+ // of skipping around a pool.
+ uint32_t isNatural : 1;
+ uint32_t ONES : 16;
+ };
+ uint32_t data;
+ };
+
+ Header(int size_, bool isNatural_)
+ : size(size_), isNatural(isNatural_), ONES(0xffff) {}
+
+ Header(uint32_t data) : data(data) {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ MOZ_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ return data;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : data(Header(size_, isNatural_).raw()) {}
+
+ uint32_t size() const {
+ Header tmp(data);
+ return tmp.size;
+ }
+
+ uint32_t isNatural() const {
+ Header tmp(data);
+ return tmp.isNatural;
+ }
+ };
+
+ static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst,
+ BufferOffset dest);
+ void processCodeLabels(uint8_t* rawCode);
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+ // Get the buffer offset of the next inserted instruction. This may flush
+ // constant pools.
+ BufferOffset nextInstrOffset(int numInstr = 1) {
+ return m_buffer.nextInstrOffset(numInstr);
+ }
+ void comment(const char* msg) { spew("; %s", msg); }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ enum Condition {
+ Overflow = overflow,
+ Below = Uless,
+ BelowOrEqual = Uless_equal,
+ Above = Ugreater,
+ AboveOrEqual = Ugreater_equal,
+ Equal = equal,
+ NotEqual = not_equal,
+ GreaterThan = greater,
+ GreaterThanOrEqual = greater_equal,
+ LessThan = less,
+ LessThanOrEqual = less_equal,
+ Always = cc_always,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered
+ // - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered,
+ FIRST_UNORDERED = DoubleUnordered,
+ LAST_UNORDERED = DoubleLessThanOrEqualOrUnordered
+ };
+
+ Register getStackPointer() const { return StackPointer; }
+ void flushBuffer() {}
+ static int disassembleInstr(Instr instr, bool enable_spew = false);
+ int target_at(BufferOffset pos, bool is_internal);
+ static int target_at(Instruction* instruction, BufferOffset pos,
+ bool is_internal, Instruction* instruction2 = nullptr);
+ uint32_t next_link(Label* label, bool is_internal);
+ static uintptr_t target_address_at(Instruction* pos);
+ static void set_target_value_at(Instruction* pc, uint64_t target);
+ void target_at_put(BufferOffset pos, BufferOffset target_pos,
+ bool trampoline = false);
+ virtual int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ int32_t branch_long_offset(Label* L);
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+
+ void nopAlign(int m) {
+ MOZ_ASSERT(m >= 4 && (m & (m - 1)) == 0);
+ while ((currentOffset() & (m - 1)) != 0) {
+ nop();
+ }
+ }
+ virtual void emit(Instr x) {
+ MOZ_ASSERT(hasCreator());
+ m_buffer.putInt(x);
+#ifdef DEBUG
+ if (!oom()) {
+ DEBUG_PRINTF(
+ "0x%lx(%lx):",
+ (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
+ currentOffset() - sizeof(Instr));
+ disassembleInstr(x, JitSpewEnabled(JitSpew_Codegen));
+ }
+#endif
+ }
+ virtual void emit(ShortInstr x) { MOZ_CRASH(); }
+ virtual void emit(uint64_t x) { MOZ_CRASH(); }
+ virtual void emit(uint32_t x) {
+ DEBUG_PRINTF(
+ "0x%lx(%lx): uint32_t: %d\n",
+ (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
+ currentOffset() - sizeof(Instr), x);
+ m_buffer.putInt(x);
+ }
+
+ void instr_at_put(BufferOffset offset, Instr instr) {
+ DEBUG_PRINTF("\t[instr_at_put\n");
+ DEBUG_PRINTF("\t%p %d \n\t", editSrc(offset), offset.getOffset());
+ disassembleInstr(editSrc(offset)->InstructionBits());
+ DEBUG_PRINTF("\t");
+ *reinterpret_cast<Instr*>(editSrc(offset)) = instr;
+ disassembleInstr(editSrc(offset)->InstructionBits());
+ DEBUG_PRINTF("\t]\n");
+ }
+
+ static Condition InvertCondition(Condition);
+
+ static DoubleCondition InvertCondition(DoubleCondition);
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ // WriteLoad64Instructions will emit 6 instrs to load a addr.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (ScratchRegister.code() << kRs1Shift) | (0x0 << kImm12Shift);
+ *reinterpret_cast<Instr*>(inst + 6 * kInstrSize) = jalr_;
+ }
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static uint32_t PatchWrite_NearCallSize() { return 7 * sizeof(uint32_t); }
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+ static void ToggleCall(CodeLocationLabel inst_, bool enable);
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+ static uint32_t NopSize() { return 4; }
+
+ static uintptr_t GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+ }
+
+ static bool HasRoundInstruction(RoundingMode) { return false; }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ MOZ_CRASH();
+ }
+
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+
+ GeneralRegisterSet* GetScratchRegisterList() {
+ return &scratch_register_list_;
+ }
+
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {}
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ // Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
+ void break_(uint32_t code, bool break_as_stop = false);
+ void nop();
+ void RV_li(Register rd, intptr_t imm);
+ static int RV_li_count(int64_t imm, bool is_get_temp_reg = false);
+ void GeneralLi(Register rd, int64_t imm);
+ static int GeneralLiCount(intptr_t imm, bool is_get_temp_reg = false);
+ void RecursiveLiImpl(Register rd, intptr_t imm);
+ void RecursiveLi(Register rd, intptr_t imm);
+ static int RecursiveLiCount(intptr_t imm);
+ static int RecursiveLiImplCount(intptr_t imm);
+ // Returns the number of instructions required to load the immediate
+ static int li_estimate(intptr_t imm, bool is_get_temp_reg = false);
+ // Loads an immediate, always using 8 instructions, regardless of the value,
+ // so that it can be modified later.
+ void li_constant(Register rd, intptr_t imm);
+ void li_ptr(Register rd, intptr_t imm);
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
+ ABIArg next(MIRType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem, int margin)
+ : assem_(assem) {
+ assem_->enterNoPool(margin);
+ }
+ ~BlockTrampolinePoolScope() { assem_->leaveNoPool(); }
+
+ private:
+ Assembler* assem_;
+ BlockTrampolinePoolScope() = delete;
+ BlockTrampolinePoolScope(const BlockTrampolinePoolScope&) = delete;
+ BlockTrampolinePoolScope& operator=(const BlockTrampolinePoolScope&) = delete;
+};
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+ void Include(const GeneralRegisterSet& list) {
+ *available_ = GeneralRegisterSet::Intersect(*available_, list);
+ }
+ void Exclude(const GeneralRegisterSet& list) {
+ *available_ = GeneralRegisterSet::Subtract(*available_, list);
+ }
+
+ private:
+ GeneralRegisterSet* available_;
+ GeneralRegisterSet old_available_;
+};
+
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM, IMM };
+ Operand(FloatRegister freg) : tag(FREG), rm_(freg.code()) {}
+
+ explicit Operand(Register base, Imm32 off)
+ : tag(MEM), rm_(base.code()), offset_(off.value) {}
+
+ explicit Operand(Register base, int32_t off)
+ : tag(MEM), rm_(base.code()), offset_(off) {}
+
+ explicit Operand(const Address& addr)
+ : tag(MEM), rm_(addr.base.code()), offset_(addr.offset) {}
+
+ explicit Operand(intptr_t immediate) : tag(IMM), rm_() { value_ = immediate; }
+ // Register.
+ Operand(const Register rm) : tag(REG), rm_(rm.code()) {}
+ // Return true if this is a register operand.
+ bool is_reg() const { return tag == REG; }
+ bool is_freg() const { return tag == FREG; }
+ bool is_mem() const { return tag == MEM; }
+ bool is_imm() const { return tag == IMM; }
+ inline intptr_t immediate() const {
+ MOZ_ASSERT(is_imm());
+ return value_;
+ }
+ bool IsImmediate() const { return !is_reg(); }
+ Register rm() const { return Register::FromCode(rm_); }
+ int32_t offset() const {
+ MOZ_ASSERT(is_mem());
+ return offset_;
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(rm_);
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(rm_);
+ }
+
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(rm_), offset());
+ }
+
+ private:
+ Tag tag;
+ uint32_t rm_;
+ int32_t offset_;
+ intptr_t value_; // valid if rm_ == no_reg
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+static inline bool GetIntArgReg(uint32_t usedIntArgs, Register* out) {
+ if (usedIntArgs < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedIntArgs);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedFloatArgs, FloatRegister* out) {
+ if (usedFloatArgs < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(fa0.code() + usedFloatArgs);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+#endif /* jit_riscv64_Assembler_riscv64_h */
diff --git a/js/src/jit/riscv64/AssemblerMatInt.cpp b/js/src/jit/riscv64/AssemblerMatInt.cpp
new file mode 100644
index 0000000000..81c7fa7c40
--- /dev/null
+++ b/js/src/jit/riscv64/AssemblerMatInt.cpp
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++
+//-*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM
+// Exceptions. See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+#include "vm/Realm.h"
+namespace js {
+namespace jit {
+void Assembler::RecursiveLi(Register rd, int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = mozilla::CountLeadingZeroes64((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ RecursiveLiImpl(rd, ShiftedVal);
+ srli(rd, rd, LeadingZeros);
+ return;
+ }
+ }
+ RecursiveLiImpl(rd, val);
+}
+
+int Assembler::RecursiveLiCount(int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = mozilla::CountLeadingZeroes64((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ // Fill in the bits that will be shifted out with 1s. An example where
+ // this helps is trailing one masks with 32 or more ones. This will
+ // generate ADDI -1 and an SRLI.
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ return countFillZero;
+ }
+ }
+ return RecursiveLiImplCount(val);
+}
+
+inline int64_t signExtend(uint64_t V, int N) {
+ return int64_t(V << (64 - N)) >> (64 - N);
+}
+
+void Assembler::RecursiveLiImpl(Register rd, int64_t Val) {
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ lui(rd, (int32_t)Hi20);
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ if (Hi20) {
+ addiw(rd, rd, Lo12);
+ } else {
+ addi(rd, zero_reg, Lo12);
+ }
+ }
+ return;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + mozilla::CountTrailingZeroes64((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+ RecursiveLi(rd, Hi52);
+
+ if (Unsigned) {
+ } else {
+ slli(rd, rd, ShiftAmount);
+ }
+ if (Lo12) {
+ addi(rd, rd, Lo12);
+ }
+}
+
+int Assembler::RecursiveLiImplCount(int64_t Val) {
+ int count = 0;
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ // lui(rd, (int32_t)Hi20);
+ count++;
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ // unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ // Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
+ count++;
+ }
+ return count;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + mozilla::CountTrailingZeroes64((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+
+ count += RecursiveLiImplCount(Hi52);
+
+ if (Unsigned) {
+ } else {
+ // slli(rd, rd, ShiftAmount);
+ count++;
+ }
+ if (Lo12) {
+ // addi(rd, rd, Lo12);
+ count++;
+ }
+ return count;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
new file mode 100644
index 0000000000..98bb5015cb
--- /dev/null
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
@@ -0,0 +1,2871 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/CodeGenerator-riscv64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorRiscv64::CodeGeneratorRiscv64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorRiscv64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorRiscv64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorRiscv64::branchToBlock(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorRiscv64* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+MoveOperand CodeGeneratorRiscv64::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+
+ return MoveOperand(address, kind);
+}
+
+void CodeGeneratorRiscv64::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorRiscv64::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+bool CodeGeneratorRiscv64::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorRiscv64> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorRiscv64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorRiscv64::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorRiscv64::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmLoad(mir->access(), HeapReg, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+}
+
+template <typename T>
+void CodeGeneratorRiscv64::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg, ptr,
+ ptrScratch);
+}
+
+void CodeGeneratorRiscv64::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.jump(thunk);
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+ masm.nop();
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+ BlockTrampolinePoolScope block_trampoline_pool(
+ &masm, mir->numCases() * sizeof(uint64_t));
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ FloatRegister input = ool->input();
+ Register output = ool->output();
+ Register64 output64 = ool->output64();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+ Label* oolRejoin = ool->rejoin();
+ TruncFlags flags = ool->flags();
+ wasm::BytecodeOffset off = ool->bytecodeOffset();
+
+ if (fromType == MIRType::Float32) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF32ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else if (fromType == MIRType::Double) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF64ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+ValueOperand CodeGeneratorRiscv64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorRiscv64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorRiscv64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtrSet(cond, lhsReg, ImmWord(ToInt64(rhs)), output);
+ } else if (rhs.value().isGeneralReg()) {
+ masm.cmpPtrSet(cond, lhsReg, ToRegister64(rhs).reg, output);
+ } else {
+ masm.cmpPtrSet(cond, lhsReg, ToAddress(rhs.value()), output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ emitBranch(lhsReg, ImmWord(ToInt64(rhs)), cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else if (rhs.value().isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister64(rhs).reg, cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else {
+ emitBranch(lhsReg, ToAddress(rhs.value()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const MCompare::CompareType type = mir->compareType();
+ const LAllocation* lhs = comp->left();
+ const LAllocation* rhs = comp->right();
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+ Register lhsReg = ToRegister(lhs);
+ const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (rhs->isConstant()) {
+ emitBranch(ToRegister(lhs), Imm32(ToInt32(rhs)), cond, ifTrue, ifFalse);
+ } else if (rhs->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ MOZ_CRASH("NYI");
+ }
+ return;
+ }
+
+ if (rhs->isConstant()) {
+ emitBranch(lhsReg, Imm32(ToInt32(comp->right())), cond, ifTrue, ifFalse);
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ // TODO(loong64): emitBranch with 32-bit comparision
+ ScratchRegisterScope scratch(masm);
+ masm.load32(ToAddress(rhs), scratch);
+ emitBranch(lhsReg, Register(scratch), cond, ifTrue, ifFalse);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.ma_xor(output, output, Operand(output));
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.ma_mod64(output, lhs, rhs);
+ } else {
+ masm.ma_div64(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.ma_modu64(output, lhs, rhs);
+ } else {
+ masm.ma_divu64(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGeneratorRiscv64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.ma_div64(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorRiscv64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.ma_mod64(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ToRegister(lir->ptr()), ptrScratch);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.fmv_d_x(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.fmv_x_d(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.move32To64ZeroExtend(ToRegister(input), Register64(output));
+ } else {
+ masm.slliw(output, ToRegister(input), 0);
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.slliw(output, ToRegister(input), 0);
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_add32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.addw(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_sub32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_sub32(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.ma_sub32(dest, zero, src);
+ break;
+ case 0:
+ masm.move32(zero, dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.addw(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.slliw(dest, src, shift % 32);
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.slliw(dest, src, (shift - shift_rest) % 32);
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.slliw(dest, dest, shift_rest % 32);
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ ScratchRegisterScope scratch(masm);
+ // dest = lhs * pow(2, shift)
+ masm.slliw(dest, src, shift % 32);
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.sraiw(scratch, dest, shift % 32);
+ bailoutCmp32(Assembler::NotEqual, src, Register(scratch),
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul32(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.mulw(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.or_(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1))) {
+ masm.move64(ToRegister64(lhs), output);
+ masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
+ masm.sub64(ToRegister64(lhs), output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint32_t>(constant - 1))) {
+ masm.move64(ToRegister64(lhs), output);
+ masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
+ masm.add64(ToRegister64(lhs), output);
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.ma_div32(dest, lhs, rhs);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.slliw(tmp, lhs, (32 - shift) % 32);
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.sraiw(dest, lhs, shift % 32);
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.sraiw(tmp, lhs, 31);
+ masm.srliw(tmp, tmp, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ } else {
+ masm.srliw(tmp, lhs, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.sraiw(dest, tmp, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.ma_mod32(dest, lhs, rhs);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_branch(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.nor(ToRegister(dest), ToRegister(input), zero);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.nor(inputReg, inputReg, zero);
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.or_(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs),
+ Operand(ToRegister(rhs)));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.and_(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.slliw(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.sraiw(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.srliw(dest, lhs, shift % 32);
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.sllw(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.sraw(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.srlw(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.srliw(temp, lhs, ToInt32(rhs) % 32);
+ } else {
+ masm.srlw(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.Clz32(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.Ctz32(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.Popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.Popcnt64(input.scratchReg(), output.scratchReg(), tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+
+ masm.ma_compareF64(scratch, Assembler::DoubleNotEqualOrUnordered, input,
+ fpscratch);
+ masm.ma_branch(&skip, Assembler::Equal, scratch, Operand(1));
+ // masm.ma_bc_d(input, fpscratch, &skip, Assembler::DoubleNotEqualOrUnordered,
+ // ShortJump);
+ masm.fneg_d(output, fpscratch);
+ masm.ma_branch(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.fadd_d(output, input, fpscratch);
+ masm.fsqrt_d(output, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.fadd_d(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.fsub_d(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.fmul_d(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.fdiv_d(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.fadd_s(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.fsub_s(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.fmul_s(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.fdiv_s(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ masm.fsgnj_s(output, lhs, rhs);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ masm.fsgnj_d(output, lhs, rhs);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchDoubleScope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ // If 0, or NaN, the result is false.
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(DoubleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(DoubleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchFloat32Scope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(SingleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(SingleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_compareF64(dest, cond, lhs, rhs);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_compareF32(dest, cond, lhs, rhs);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ ScratchRegisterScope scratch(masm);
+ if (lir->right()->isConstant()) {
+ masm.ma_and(scratch, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ } else {
+ masm.ma_and(scratch, ToRegister(lir->left()), ToRegister(lir->right()));
+ }
+ emitBranch(scratch, Register(scratch), lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.ma_compareF64(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchFloat32Scope fpscratch(masm);
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ masm.ma_compareF32(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label done, outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
+ }
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32) {
+ masm.storeFloat32(freg, addr);
+ } else {
+ masm.storeDouble(freg, addr);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32) {
+ masm.storeFloat32(freg, bi);
+ } else {
+ masm.storeDouble(freg, bi);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.moveIfZero(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.ma_fmovz(SingleFloat, out, ToFloatRegister(falseExpr), cond);
+ } else if (mirType == MIRType::Double) {
+ masm.ma_fmovz(DoubleFloat, out, ToFloatRegister(falseExpr), cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ mozilla::DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.fmv_x_w(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.fmv_w_x(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.ma_modu32(output, lhs, rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+ masm.ma_divu32(output, lhs, rhs);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_sub32(output, zero, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.fneg_d(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.fneg_s(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ Register64 temp(ToRegister(lir->getTemp(0)));
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.h b/js/src/jit/riscv64/CodeGenerator-riscv64.h
new file mode 100644
index 0000000000..793c834085
--- /dev/null
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.h
@@ -0,0 +1,210 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_CodeGenerator_riscv64_h
+#define jit_riscv64_CodeGenerator_riscv64_h
+
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorRiscv64;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorRiscv64>;
+
+class CodeGeneratorRiscv64 : public CodeGeneratorShared {
+ friend class MoveResolverLA;
+
+ protected:
+ CodeGeneratorRiscv64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ // TODO(riscv64) Didn't use branchTestPtr due to '-Wundefined-inline'.
+ MOZ_ASSERT(c == Assembler::Zero || c == Assembler::NonZero ||
+ c == Assembler::Signed || c == Assembler::NotSigned);
+ Label bail;
+ if (lhs == rhs) {
+ masm.ma_b(lhs, rhs, &bail, c);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.and_(scratch, lhs, rhs);
+ masm.ma_b(scratch, scratch, &bail, c);
+ }
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(scratch, reg, Imm32(0xFF));
+ masm.ma_b(scratch, scratch, &bail, Assembler::Zero);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorRiscv64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorRiscv64> {
+ protected:
+ LSnapshot* snapshot_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorRiscv64* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_CodeGenerator_riscv64_h */
diff --git a/js/src/jit/riscv64/LIR-riscv64.h b/js/src/jit/riscv64/LIR-riscv64.h
new file mode 100644
index 0000000000..7143919608
--- /dev/null
+++ b/js/src/jit/riscv64/LIR-riscv64.h
@@ -0,0 +1,399 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_LIR_riscv64_h
+#define jit_riscv64_LIR_riscv64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_LIR_riscv64_h */
diff --git a/js/src/jit/riscv64/Lowering-riscv64.cpp b/js/src/jit/riscv64/Lowering-riscv64.cpp
new file mode 100644
index 0000000000..b32896694a
--- /dev/null
+++ b/js/src/jit/riscv64/Lowering-riscv64.cpp
@@ -0,0 +1,1087 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/Lowering-riscv64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LTableSwitch* LIRGeneratorRiscv64::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorRiscv64::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorRiscv64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+// x = !y
+void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x + y
+void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorRiscv64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorRiscv64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorRiscv64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorRiscv64::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+LBoxAllocation LIRGeneratorRiscv64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorRiscv64::tempByteOpRegister() { return temp(); }
+LDefinition LIRGeneratorRiscv64::tempToUnbox() { return temp(); }
+
+void LIRGeneratorRiscv64::lowerUntypedPhiInput(MPhi* phi,
+ uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorRiscv64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorRiscv64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorRiscv64::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+void LIRGeneratorRiscv64::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGeneratorRiscv64::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorRiscv64::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorRiscv64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGeneratorRiscv64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorRiscv64::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+// On loong64 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // We have no memory-base value, meaning that HeapReg is to be used as the
+ // memory base. This follows from the definition of
+ // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on loongarch64 platform and we should explicitly promote it
+ // to 64-bit when use it as an index register in memory accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation ptr;
+ ptr = useRegisterAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+
+ if (ins->access().type() == Scalar::Int64) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()));
+ lir->setTemp(0, temp());
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/riscv64/Lowering-riscv64.h b/js/src/jit/riscv64/Lowering-riscv64.h
new file mode 100644
index 0000000000..03ccb3ac8f
--- /dev/null
+++ b/js/src/jit/riscv64/Lowering-riscv64.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Lowering_riscv64_h
+#define jit_riscv64_Lowering_riscv64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorRiscv64 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorRiscv64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ LDefinition tempByteOpRegister();
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerDivI64(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerModI64(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUDivI64(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUModI64(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerPowOfTwoI(MPow* mir);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorRiscv64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_Lowering_riscv64_h */
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h b/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h
new file mode 100644
index 0000000000..65f04a33e8
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h
@@ -0,0 +1,2025 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_MacroAssembler_riscv64_inl_h
+#define jit_riscv64_MacroAssembler_riscv64_inl_h
+
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+namespace js {
+namespace jit {
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ loadPtr(rhs, scratch);
+ cmpPtrSet(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch2);
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ load32(rhs, scratch);
+ cmp32Set(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch2);
+ load32(lhs, scratch2);
+ cmp32Set(cond, Register(scratch2), rhs, dest);
+}
+
+//{{{ check_macroassembler_style
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerRiscv64::ma_liPatchable(dest, Imm32(0));
+ sub(dest, StackPointer, dest);
+ return offset;
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ and_(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+// the type of 'T src' maybe a Register, maybe a Imm32,depends on who call it.
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_sub32(dest, dest, src);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+// Also see below for specializations of cmpPtrSet.
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+void MacroAssembler::abs32(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ sraiw(scratch, src, 31);
+ xor_(dest, src, scratch);
+ subw(dest, dest, scratch);
+}
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ fabs_s(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ fabs_d(dest, src);
+}
+void MacroAssembler::add32(Register src, Register dest) {
+ ma_add32(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_add32(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_add32(scratch2, scratch2, imm);
+ store32(scratch2, dest);
+}
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ add64(scratch64, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_add64(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ add(dest.reg, dest.reg, scratch);
+}
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ fadd_d(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ fadd_s(dest, dest, src);
+}
+void MacroAssembler::addPtr(Register src, Register dest) {
+ ma_add64(dest, dest, Operand(src));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ ma_add64(dest, dest, imm);
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movePtr(imm, scratch);
+ addPtr(scratch, dest);
+}
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(dest, scratch);
+ addPtr(imm, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(src, scratch);
+ addPtr(scratch, dest);
+}
+void MacroAssembler::and32(Register src, Register dest) {
+ ma_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_and(scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(src, scratch2);
+ ma_and(dest, dest, scratch2);
+}
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_and(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ ma_and(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ ma_and(dest.scratchReg(), scratch64.scratchReg());
+ } else {
+ ma_and(dest.scratchReg(), src.toReg());
+ }
+}
+
+void MacroAssembler::andPtr(Register src, Register dest) {
+ ma_and(dest, dest, src);
+}
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(addr, scratch2);
+ ma_b(scratch2, imm, label, cond);
+}
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cc, FloatRegister frs1,
+ FloatRegister frs2, Label* L) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_compareF64(scratch, cc, frs1, frs2);
+ ma_b(scratch, Imm32(1), L, Equal);
+}
+void MacroAssembler::branchFloat(DoubleCondition cc, FloatRegister frs1,
+ FloatRegister frs2, Label* L) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_compareF32(scratch, cc, frs1, frs2);
+ ma_b(scratch, Imm32(1), L, Equal);
+}
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ if (rhs.value == nullptr && (cond == Zero || cond == NonZero)) {
+ ma_b(lhs, lhs, label, cond);
+ } else {
+ ma_b(lhs, rhs, label, cond);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ branchDouble(cond, value, fpscratch, label);
+}
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ExtractBits(scratch, value.valueReg(), 0, 32);
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, Operand(rhs));
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchTestPtr(cond, scratch2, rhs, label);
+}
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(addr, scratch2);
+ branch(scratch2);
+}
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::byteSwap16SignExtend(Register src) {
+ JitSpew(JitSpew_Codegen, "[ %s\n", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ // src 0xFFFFFFFFFFFF8000
+ andi(scratch, src, 0xFF); //
+ slli(scratch, scratch, 8); // scratch 0x00
+ ma_li(scratch2, 0xFF00); // scratch2 0xFF00
+ and_(src, src, scratch2); // src 0x8000
+ srli(src, src, 8); // src 0x0080
+ or_(src, src, scratch); // src 0x0080
+ slliw(src, src, 16);
+ sraiw(src, src, 16);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register src) {
+ JitSpew(JitSpew_Codegen, "[ %s\n", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ andi(scratch, src, 0xFF);
+ slli(scratch, scratch, 8);
+ ma_li(scratch2, 0xFF00);
+ and_(src, src, scratch2);
+ srli(src, src, 8);
+ or_(src, src, scratch);
+ slliw(src, src, 16);
+ srliw(src, src, 16);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::byteSwap32(Register src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ByteSwap(src, src, 4, scratch);
+}
+void MacroAssembler::byteSwap64(Register64 src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ByteSwap(src.reg, src.reg, 8, scratch);
+}
+void MacroAssembler::clampIntToUint8(Register reg) {
+ // If reg is < 0, then we want to clamp to 0.
+ Label skip, skip2;
+ slti(ScratchRegister, reg, 0);
+ ma_branch(&skip, NotEqual, ScratchRegister, Operand(1));
+ ma_li(reg, Imm32(0));
+ jump(&skip2);
+ bind(&skip);
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_branch(&skip2, LessThanOrEqual, reg, Operand(255));
+ ma_li(reg, Imm32(255));
+ bind(&skip2);
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ Clz32(dest, src);
+}
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ Clz64(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ Ctz64(dest, src.reg);
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmpPtrSet(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+void MacroAssembler::ctz32(Register, Register, bool) { MOZ_CRASH(); }
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ fdiv_s(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ fdiv_d(dest, dest, src);
+}
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(src.valueReg() != scratch);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xor_(dest, src.valueReg(), scratch);
+ srli(scratch, dest, JSVAL_TAG_SHIFT);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ ma_li(scratch, ImmWord(uintptr_t(dest.addr)));
+ ld(scratch2, scratch, 0);
+ addi(scratch2, scratch2, 1);
+ sd(scratch2, scratch, 0);
+}
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ sllw(dest, dest, src);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ slliw(dest, dest, imm.value % 32);
+}
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ sll(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ slli(dest.reg, dest.reg, imm.value);
+}
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ sll(dest, dest, shift);
+}
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ slli(dest, dest, imm.value);
+}
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float64Max(srcDest, srcDest, other);
+}
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float32Max(srcDest, srcDest, other);
+}
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ sync();
+ }
+}
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float64Min(srcDest, srcDest, other);
+}
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float32Min(srcDest, srcDest, other);
+}
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ slli(dest, src, xlen - 16);
+ srai(dest, dest, xlen - 16);
+}
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ slliw(dest.reg, src, 0);
+}
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ slli(dest.reg, src, 32);
+ srli(dest.reg, dest.reg, 32);
+}
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ slli(dest, src, 32);
+ srli(dest, dest, 32);
+}
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ slliw(dest, src.reg, 0);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ slli(dest, src, xlen - 8);
+ srai(dest, dest, xlen - 8);
+}
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ fmv_x_d(dest.reg, src);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ fmv_d_x(dest, src.reg);
+}
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ fmv_x_w(dest, src);
+}
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ fmv_w_x(dest, src);
+}
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ mulw(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ ScratchRegisterScope scratch(asMasm());
+ move32(imm, scratch);
+ mul32(scratch, srcDest);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, uint32_t(imm.value));
+ mul(dest, src, scratch);
+ srli(dest, dest, 32);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ mul(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.is_mem()) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ mul64(scratch64, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ add(scratch, src, src);
+ add(dest, scratch, src);
+}
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ fmul_d(dest, dest, src);
+}
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ movePtr(imm, scratch);
+ loadDouble(Address(scratch, 0), fpscratch);
+ mulDouble(fpscratch, dest);
+}
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ fmul_s(dest, dest, src);
+}
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::negateDouble(FloatRegister reg) { fneg_d(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { fneg_s(reg, reg); }
+
+void MacroAssembler::neg64(Register64 reg) { sub(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { sub(reg, zero, reg); }
+
+void MacroAssembler::neg32(Register reg) { subw(reg, zero, reg); }
+void MacroAssembler::not32(Register reg) { nor(reg, reg, zero); }
+
+void MacroAssembler::notPtr(Register reg) { nor(reg, reg, zero); }
+
+void MacroAssembler::or32(Register src, Register dest) {
+ ma_or(dest, dest, src);
+}
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_or(scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ ma_or(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ or64(scratch64, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_or(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) {
+ ma_or(dest, dest, src);
+}
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset, Imm32) { MOZ_CRASH(); }
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ Popcnt32(output, input, tmp);
+}
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ Popcnt64(output.reg, input.reg, tmp);
+}
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ ma_divu32(srcDest, srcDest, rhs);
+ } else {
+ ma_div32(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ ma_modu32(srcDest, srcDest, rhs);
+ } else {
+ ma_mod32(srcDest, srcDest, rhs);
+ }
+}
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ Dror(dest.reg, src.reg, Operand(64 - (count.value % 64)));
+}
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, count, Operand(64));
+ negw(scratch, scratch);
+ addi(scratch, scratch, 64);
+ Dror(dest.reg, src.reg, Operand(scratch));
+}
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ JitSpew(JitSpew_Codegen, "[ rotateLeft\n");
+ Ror(dest, input, Operand(32 - (count.value % 32)));
+ JitSpew(JitSpew_Codegen, "]\n");
+}
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ JitSpew(JitSpew_Codegen, "[ rotateLeft\n");
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, count, Operand(32));
+ negw(scratch, scratch);
+ addi(scratch, scratch, 32);
+ Ror(dest, input, Operand(scratch));
+ JitSpew(JitSpew_Codegen, "]\n");
+}
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ Dror(dest.reg, src.reg, Operand(count));
+}
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ Dror(dest.reg, src.reg, Operand(count.value));
+}
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ Ror(dest, input, Operand(count.value));
+}
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ Ror(dest, input, Operand(count));
+}
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ sraw(dest, dest, src);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ sraiw(dest, dest, imm.value % 32);
+}
+void MacroAssembler::rshift32(Register src, Register dest) {
+ srlw(dest, dest, src);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ srliw(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srai(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ sra(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ srl(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srli(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srai(dest, dest, imm.value);
+}
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ srl(dest, dest, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srli(dest, dest, imm.value);
+}
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+void MacroAssembler::spectreMovePtr(Condition, Register, Register) {
+ MOZ_CRASH("spectreMovePtr");
+}
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH("spectreZeroRegister");
+}
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ fsqrt_d(dest, src);
+}
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ fsqrt_s(dest, src);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ ma_fst_s(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_s(src, addr);
+}
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& addr) {
+ ma_fst_d(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_d(src, addr);
+}
+void MacroAssembler::sub32(Register src, Register dest) {
+ subw(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_sub32(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ subw(dest, dest, scratch);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ sub(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ sub64(scratch64, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(dest.reg != scratch);
+ ma_li(scratch, ImmWord(imm.value));
+ sub(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ fsub_d(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ fsub_s(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(dest, scratch);
+ subPtr(src, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(addr, scratch);
+ subPtr(scratch, dest);
+}
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_sub64(dest, dest, imm);
+}
+void MacroAssembler::subPtr(Register src, Register dest) {
+ sub(dest, dest, src);
+}
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+void MacroAssembler::test32MovePtr(Condition, const Address&, Imm32, Register,
+ Register) {
+ MOZ_CRASH();
+}
+void MacroAssembler::xor32(Register src, Register dest) {
+ ma_xor(dest, dest, src);
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ xor32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(src, scratch2);
+ xor32(scratch2, dest);
+}
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ xor64(scratch64, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_xor(dest.reg, dest.reg, scratch);
+}
+void MacroAssembler::xorPtr(Register src, Register dest) {
+ ma_xor(dest, dest, src);
+}
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+//}}} check_macroassembler_style
+
+void MacroAssemblerRiscv64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerRiscv64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ jr(ra, 0);
+}
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MacroAssembler_riscv64_inl_h */
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
new file mode 100644
index 0000000000..c7879fd5cc
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
@@ -0,0 +1,6515 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+#include "jsmath.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "jit/riscv64/SharedICRegisters-riscv64.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+MacroAssembler& MacroAssemblerRiscv64::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerRiscv64::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_cmp_set(rd, rj, scratch, c);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ // TODO(loong64): 32-bit ma_cmp_set?
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, address, SizeWord);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address,
+ ImmWord imm, Condition c) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, address, SizeDouble);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NotEqual:
+ case Above:
+ sltu(rd, zero, rj);
+ break;
+ case AboveOrEqual:
+ case Below:
+ ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ slt(rd, zero, rj);
+ if (c == LessThanOrEqual) {
+ xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ slt(rd, rj, zero);
+ if (c == GreaterThanOrEqual) {
+ xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NonZero:
+ sltu(rd, zero, rj);
+ break;
+ case Signed:
+ slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ slt(rd, rj, zero);
+ xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_xor(rd, rj, imm);
+ if (c == Equal) {
+ ma_sltu(rd, rd, Operand(1));
+ } else {
+ sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rj, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) xori(rd, rd, 1);
+ }
+}
+
+Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
+ Register rhs, Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
+ Imm32 imm, Condition c) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_RELEASE_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12) &&
+ imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ ma_sltu(dest, lhs, Operand(imm.value + 1));
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (is_intn(imm.value, 12)) {
+ ma_sltu(dest, lhs, Operand(imm.value));
+ } else {
+ ma_li(scratch, imm);
+ sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ ma_slt(dest, lhs, Operand(imm.value + 1));
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (is_intn(imm.value, 12)) {
+ ma_slt(dest, lhs, imm);
+ } else {
+ ma_li(scratch, imm);
+ slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Register rk,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ xor_(rd, rj, rk);
+ ma_sltu(rd, rd, Operand(1));
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ xor_(rd, rj, rk);
+ sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ sltu(rd, rk, rj);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ sltu(rd, rj, rk);
+ xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ sltu(rd, rj, rk);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ sltu(rd, rk, rj);
+ xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ slt(rd, rk, rj);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ slt(rd, rj, rk);
+ xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ slt(rd, rj, rk);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ slt(rd, rk, rj);
+ xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rj == rk);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NonZero:
+ MOZ_ASSERT(rj == rk);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ sltu(rd, zero, rj);
+ break;
+ case Signed:
+ MOZ_ASSERT(rj == rk);
+ slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rj == rk);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ slt(rd, rj, zero);
+ xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerRiscv64::ma_compareF32(Register rd, DoubleCondition cc,
+ FloatRegister cmp1,
+ FloatRegister cmp2) {
+ switch (cc) {
+ case DoubleEqualOrUnordered:
+ case DoubleEqual:
+ feq_s(rd, cmp1, cmp2);
+ break;
+ case DoubleNotEqualOrUnordered:
+ case DoubleNotEqual: {
+ Label done;
+ CompareIsNanF32(rd, cmp1, cmp2);
+ ma_branch(&done, Equal, rd, Operand(1));
+ feq_s(rd, cmp1, cmp2);
+ bind(&done);
+ NegateBool(rd, rd);
+ break;
+ }
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThan:
+ flt_s(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleGreaterThanOrEqual:
+ fle_s(rd, cmp2, cmp1);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ case DoubleLessThanOrEqual:
+ fle_s(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThan:
+ flt_s(rd, cmp2, cmp1);
+ break;
+ case DoubleOrdered:
+ CompareIsNotNanF32(rd, cmp1, cmp2);
+ return;
+ case DoubleUnordered:
+ CompareIsNanF32(rd, cmp1, cmp2);
+ return;
+ }
+ if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNanF32(scratch, cmp1, cmp2);
+ or_(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_compareF64(Register rd, DoubleCondition cc,
+ FloatRegister cmp1,
+ FloatRegister cmp2) {
+ switch (cc) {
+ case DoubleEqualOrUnordered:
+ case DoubleEqual:
+ feq_d(rd, cmp1, cmp2);
+ break;
+ case DoubleNotEqualOrUnordered:
+ case DoubleNotEqual: {
+ Label done;
+ CompareIsNanF64(rd, cmp1, cmp2);
+ ma_branch(&done, Equal, rd, Operand(1));
+ feq_d(rd, cmp1, cmp2);
+ bind(&done);
+ NegateBool(rd, rd);
+ } break;
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThan:
+ flt_d(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleGreaterThanOrEqual:
+ fle_d(rd, cmp2, cmp1);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ case DoubleLessThanOrEqual:
+ fle_d(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThan:
+ flt_d(rd, cmp2, cmp1);
+ break;
+ case DoubleOrdered:
+ CompareIsNotNanF64(rd, cmp1, cmp2);
+ return;
+ case DoubleUnordered:
+ CompareIsNanF64(rd, cmp1, cmp2);
+ return;
+ }
+
+ if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNanF64(scratch, cmp1, cmp2);
+ or_(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(Register src, Register dest) {
+ mv(dest, src);
+}
+void MacroAssemblerRiscv64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerRiscv64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1), Li64);
+ DEBUG_PRINTF("]\n");
+}
+
+bool MacroAssemblerRiscv64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ fcvt_d_wu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ fcvt_d_lu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ fcvt_s_wu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ fcvt_s_d(dest, src);
+}
+
+template <typename F>
+void MacroAssemblerRiscv64::RoundHelper(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch,
+ FPURoundingMode frm) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 20);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ MOZ_ASSERT((std::is_same<float, F>::value) ||
+ (std::is_same<double, F>::value));
+ // Need at least two FPRs, so check against dst == src == fpu_scratch
+ MOZ_ASSERT(!(dst == src && dst == fpu_scratch));
+
+ const int kFloatMantissaBits =
+ sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
+ const int kFloatExponentBits =
+ sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
+ const int kFloatExponentBias =
+ sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
+ Label done;
+
+ {
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // extract exponent value of the source floating-point to scratch
+ if (std::is_same<F, double>::value) {
+ fmv_x_d(scratch, src);
+ } else {
+ fmv_x_w(scratch, src);
+ }
+ ExtractBits(scratch2, scratch, kFloatMantissaBits, kFloatExponentBits);
+ }
+
+ // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
+ // in mantissa, the result is the same as src, so move src to dest (to avoid
+ // generating another branch)
+ if (dst != src) {
+ if (std::is_same<F, double>::value) {
+ fmv_d(dst, src);
+ } else {
+ fmv_s(dst, src);
+ }
+ }
+ {
+ Label not_NaN;
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // According to the wasm spec
+ // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
+ // if input is canonical NaN, then output is canonical NaN, and if input is
+ // any other NaN, then output is any NaN with most significant bit of
+ // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
+ // src is not a NaN, branch to the label and do nothing, but if it is,
+ // fmin_d will set dst to the canonical NaN.
+ if (std::is_same<F, double>::value) {
+ feq_d(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_d(dst, src, src);
+ } else {
+ feq_s(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_s(dst, src, src);
+ }
+ bind(&not_NaN);
+ }
+
+ // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
+ // kFloat32MantissaBits, it means the floating-point value has no fractional
+ // part, thus the input is already rounded, jump to done. Note that, NaN and
+ // Infinity in floating-point representation sets maximal exponent value, so
+ // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
+ // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
+ // (Infinity), so NaN and Infinity are considered rounded value too.
+ ma_branch(&done, GreaterThanOrEqual, scratch2,
+ Operand(kFloatExponentBias + kFloatMantissaBits));
+
+ // Actual rounding is needed along this path
+
+ // old_src holds the original input, needed for the case of src == dst
+ FPURegister old_src = src;
+ if (src == dst) {
+ MOZ_ASSERT(fpu_scratch != dst);
+ fmv_d(fpu_scratch, src);
+ old_src = fpu_scratch;
+ }
+
+ // Since only input whose real exponent value is less than kMantissaBits
+ // (i.e., 23 or 52-bits) falls into this path, the value range of the input
+ // falls into that of 23- or 53-bit integers. So we round the input to integer
+ // values, then convert them back to floating-point.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<F, double>::value) {
+ fcvt_l_d(scratch, src, frm);
+ fcvt_d_l(dst, scratch, frm);
+ } else {
+ fcvt_w_s(scratch, src, frm);
+ fcvt_s_w(dst, scratch, frm);
+ }
+ }
+ // A special handling is needed if the input is a very small positive/negative
+ // number that rounds to zero. JS semantics requires that the rounded result
+ // retains the sign of the input, so a very small positive (negative)
+ // floating-point number should be rounded to positive (negative) 0.
+ // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
+ // testing for zero w/ a branch, we just insert sign-bit for everyone on this
+ // path (this is where old_src is needed)
+ if (std::is_same<F, double>::value) {
+ fsgnj_d(dst, dst, old_src);
+ } else {
+ fsgnj_s(dst, dst, old_src);
+ }
+
+ bind(&done);
+}
+
+template <typename CvtFunc>
+void MacroAssemblerRiscv64::RoundFloatingPointToInteger(Register rd,
+ FPURegister fs,
+ Register result,
+ CvtFunc fcvt_generator,
+ bool Inexact) {
+ // Save csr_fflags to scratch & clear exception flags
+ if (result != Register::Invalid()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ int exception_flags = kInvalidOperation;
+ if (Inexact) exception_flags |= kInexact;
+ csrrci(scratch, csr_fflags, exception_flags);
+
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+
+ // check kInvalidOperation flag (out-of-range, NaN)
+ // set result to 1 if normal, otherwise set result to 0 for abnormal
+ frflags(result);
+ andi(result, result, exception_flags);
+ seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal)
+
+ // restore csr_fflags
+ csrw(csr_fflags, scratch);
+ } else {
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+ }
+}
+
+void MacroAssemblerRiscv64::Trunc_uw_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_uw_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+void MacroAssemblerRiscv64::Trunc_ul_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_l_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_ul_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_l_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RDN);
+}
+
+void MacroAssemblerRiscv64::Ceil_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RUP);
+}
+
+void MacroAssemblerRiscv64::Trunc_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RTZ);
+}
+
+void MacroAssemblerRiscv64::Round_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RNE);
+}
+
+void MacroAssemblerRiscv64::Floor_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RDN);
+}
+
+void MacroAssemblerRiscv64::Ceil_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RUP);
+}
+
+void MacroAssemblerRiscv64::Trunc_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RTZ);
+}
+
+void MacroAssemblerRiscv64::Round_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RNE);
+}
+
+void MacroAssemblerRiscv64::Round_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RNE);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Round_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RNE);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Ceil_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RUP);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Ceil_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RUP);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
+ },
+ Inexact);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerRiscv64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+void MacroAssemblerRiscv64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_l_d(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerRiscv64Compat::convertFloat32ToInt32(
+ FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+void MacroAssemblerRiscv64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ fcvt_d_s(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ fcvt_s_w(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ fcvt_s_w(dest, scratch);
+}
+
+void MacroAssemblerRiscv64Compat::movq(Register rj, Register rd) { mv(rd, rj); }
+
+// Memory.
+void MacroAssemblerRiscv64::ma_loadDouble(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fld(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_loadFloat(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ flw(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ lbu(dest, base, encodedOffset);
+ } else {
+ lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ lhu(dest, base, encodedOffset);
+ } else {
+ lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ if (ZeroExtend == extension) {
+ lwu(dest, base, encodedOffset);
+ } else {
+ lw(dest, base, encodedOffset);
+ }
+ break;
+ case SizeDouble:
+ ld(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerRiscv64::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ asMasm().computeScaledAddress(dest, scratch2);
+ asMasm().ma_store(data, Address(scratch2, dest.offset), size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register address = temps.Acquire();
+ // Make sure that scratch contains absolute address so that
+ // offset is 0.
+ computeScaledAddress(dest, address);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(scratch, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ ma_store(scratch, Address(address, 0), size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Imm32 imm, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_store(scratch, address, size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ sw(data, base, encodedOffset);
+ break;
+ case SizeDouble:
+ sd(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+// Memory.
+void MacroAssemblerRiscv64::ma_storeDouble(FloatRegister dest,
+ Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fsd(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_storeFloat(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fsw(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ UseScratchRegisterScope temps(this);
+ Register tmp = dest == base ? temps.Acquire() : dest;
+ if (shift) {
+ MOZ_ASSERT(shift <= 4);
+ slli(tmp, index, shift);
+ add(dest, base, tmp);
+ } else {
+ add(dest, base, index);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ add(ScratchRegister, memoryBase, ptr);
+ lb(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint8:
+ add(ScratchRegister, memoryBase, ptr);
+ lbu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int16:
+ add(ScratchRegister, memoryBase, ptr);
+ lh(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint16:
+ add(ScratchRegister, memoryBase, ptr);
+ lhu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int32:
+ add(ScratchRegister, memoryBase, ptr);
+ lw(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint32:
+ // TODO(loong64): Why need zero-extension here?
+ add(ScratchRegister, memoryBase, ptr);
+ lwu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int64:
+ add(ScratchRegister, memoryBase, ptr);
+ ld(output.reg, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ add(ScratchRegister, memoryBase, ptr);
+ sb(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ add(ScratchRegister, memoryBase, ptr);
+ sh(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ add(ScratchRegister, memoryBase, ptr);
+ sw(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int64:
+ add(ScratchRegister, memoryBase, ptr);
+ sd(value.reg, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerRiscv64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssemblerRiscv64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::move32(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+
+void MacroAssemblerRiscv64Compat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load32(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Imm32 src, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ move32(src, ScratchRegister);
+ ma_store(ScratchRegister, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmWord imm, T address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeDouble);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmGCPtr imm, T address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, address);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerRiscv64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerRiscv64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerRiscv64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ slliw(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const Address& src,
+ Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(src, ScratchRegister);
+ load32(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ExtractBits(dest, operand.valueReg(), 0, 32);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(Register src, Register dest) {
+ ExtractBits(dest, src, 0, 32);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(src, ScratchRegister);
+ ma_load(dest, Address(ScratchRegister, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ fmv_d_x(dest, operand.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_loadDouble(dest, Address(src.base, src.offset));
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(src, scratch);
+ unboxDouble(ValueOperand(scratch), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_branch(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ fmv_x_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerRiscv64Compat::int32ValueToDouble(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerRiscv64Compat::boolValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerRiscv64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ loadPtr(Address(src.base, src.offset), ScratchRegister);
+ srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ loadPtr(Address(src.base, src.offset), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_branch(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ unboxDouble(src, dest);
+ bind(&end);
+}
+
+void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ Label notInt32, end;
+
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+ srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(addr, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_branch(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(addr, SecondScratchReg);
+ unboxDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerRiscv64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerRiscv64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ExtractBits(scratch, scratch, 0, JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerRiscv64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ExtractBits(scratch, scratch, JSVAL_TAG_SHIFT, 64 - JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerRiscv64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/LoongArch interface.
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(dest, ScratchRegister);
+ storeValue(val, Address(ScratchRegister, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!is_int12(offset)) {
+ UseScratchRegisterScope temps(this);
+ Register SecondScratchReg = temps.Acquire();
+ ma_li(SecondScratchReg, Imm32(offset));
+ add(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ store32(reg, dest);
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
+ } else {
+ ScratchRegisterScope SecondScratchReg(asMasm());
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ slli(SecondScratchReg, SecondScratchReg, JSVAL_TAG_SHIFT);
+ InsertBits(SecondScratchReg, reg, 0, JSVAL_TAG_SHIFT);
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+ }
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(const Value& val, Address dest) {
+ UseScratchRegisterScope temps(this);
+ Register SecondScratchReg = temps.Acquire();
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
+ } else {
+ ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
+ }
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(const Value& val, BaseIndex dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!is_int12(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ add(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerRiscv64Compat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(addr, ScratchRegister);
+ loadValue(Address(ScratchRegister, addr.offset), val);
+}
+
+void MacroAssemblerRiscv64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+ JitSpew(JitSpew_Codegen, "[ tagValue");
+ if (payload != dest.valueReg()) {
+ mv(dest.valueReg(), payload);
+ }
+ ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ InsertBits(dest.valueReg(), ScratchRegister, JSVAL_TAG_SHIFT,
+ 64 - JSVAL_TAG_SHIFT);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ InsertBits(dest.valueReg(), zero, 32, JSVAL_TAG_SHIFT - 32);
+ }
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssemblerRiscv64Compat::pushValue(ValueOperand val) {
+ // Allocate stack slots for Value. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store Value
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerRiscv64Compat::pushValue(const Address& addr) {
+ // Load value before allocate stack, addr.base may be is sp.
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+ ma_sub64(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void MacroAssemblerRiscv64Compat::popValue(ValueOperand val) {
+ ld(val.valueReg(), StackPointer, 0);
+ ma_add64(StackPointer, StackPointer, Imm32(sizeof(Value)));
+}
+
+void MacroAssemblerRiscv64Compat::breakpoint(uint32_t value) { break_(value); }
+
+void MacroAssemblerRiscv64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ unboxInt32(source, ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ mv(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException* rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ jump(ra);
+ nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ mv(StackPointer, FramePointer);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerRiscv64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ BranchShort(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerRiscv64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ DEBUG_PRINTF("\ttoggledCall\n");
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ jalr(ScratchRegister);
+ } else {
+ nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ JitSpew(JitSpew_Codegen, "[ clampDoubleToUint8");
+ Label nan, done;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ feq_d(scratch, input, input);
+ beqz(scratch, &nan);
+ addi(zero, scratch, 0x11);
+ Round_w_d(output, input);
+ clampIntToUint8(output);
+ ma_branch(&done);
+ // Input is nan
+ bind(&nan);
+ mv(output, zero_reg);
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+CodeOffset MacroAssembler::call(Label* label) {
+ BranchAndLink(label);
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::call(Register reg) {
+ jalr(reg, 0);
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(nextInstrOffset(5).getOffset());
+ auipc(scratch, 0);
+ lw(scratch2, scratch, 4 * sizeof(Instr));
+ add(scratch, scratch, scratch2);
+ jr(scratch, 0);
+ spew(".space 32bit initValue 0xffff ffff");
+ emit(UINT32_MAX);
+ return farJump;
+}
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ // riscv64
+ nop(); // lui(rd, (int32_t)high_20);
+ nop(); // addi(rd, rd, low_12); // 31 bits in rd.
+ nop(); // slli(rd, rd, 11); // Space for next 11 bis
+ nop(); // ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
+ nop(); // slli(rd, rd, 6); // Space for next 6 bits
+ nop(); // ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
+ nop(); // jirl
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ CodeOffset offset(currentOffset());
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ break_(kWasmTrapCode); // TODO: teq(zero, zero, WASM_TRAP)
+ return offset;
+}
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ // temp may be InvalidReg, use scratch2 instead.
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ getGCThingValueChunk(value, scratch2);
+ loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
+ branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+
+ bind(&done);
+}
+
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+//===============================
+// AtomicOp
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+ masm.or_(scratch2, value, zero);
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.ma_and(valueTemp, value, Imm32(0xffff));
+ break;
+ }
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+ masm.and_(scratch2, output, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.srlw(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 8);
+ masm.sraiw(output, output, 32 - 8);
+ } else {
+ masm.andi(valueTemp, value, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 16);
+ masm.sraiw(output, output, 32 - 16);
+ } else {
+ masm.ma_and(valueTemp, value, Imm32(0xffff));
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, SecondScratchReg);
+ masm.movePtr(value.reg, ScratchRegister);
+ masm.sc_d(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, SecondScratchReg);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.add(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.sub(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_d(true, true, temp.reg, SecondScratchReg, temp.reg);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::NonZero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(scratch2, scratch2, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(scratch2, scratch2, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(scratch2, scratch2, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(scratch2, scratch2, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(scratch2, scratch2, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+ masm.srlw(valueTemp, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.ma_and(valueTemp, valueTemp, Imm32(0xffff));
+ break;
+ }
+
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.and_(scratch2, scratch2, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(scratch2, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(scratch2, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(scratch2, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(scratch2, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(scratch2, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+ masm.srlw(output, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.and_(scratch2, scratch2, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 8);
+ masm.sraiw(output, output, 32 - 8);
+ } else {
+ masm.andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 16);
+ masm.sraiw(output, output, 32 - 16);
+ } else {
+ masm.andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchRegister); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchRegister);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ zero, label);
+}
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+void MacroAssembler::call(const Address& addr) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(JitCode* c) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ callJitNoProfiler(scratch);
+ DEBUG_PRINTF("]\n");
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(asMasm());
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in scratch2, no instruction between the movePtr and
+ // call should clobber it. Note that we can't use fun because it may be
+ // one of the IntArg registers clobbered before the call.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ movePtr(fun, CallReg);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(CallReg);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in scratch2, as above.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ loadPtr(fun, CallReg);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(CallReg);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ ScratchDoubleScope fscratch(*this);
+ Label performCeil, done;
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, fscratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, fscratch, &performCeil);
+ loadConstantDouble(-1.0, fscratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, fscratch, &performCeil);
+
+ Register scratch = temps.Acquire();
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ bind(&performCeil);
+ Ceil_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ ScratchDoubleScope fscratch(*this);
+ Label performCeil, done;
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0, fscratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, fscratch, &performCeil);
+ loadConstantFloat32(-1.0, fscratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, fscratch, &performCeil);
+
+ Register scratch = temps.Acquire();
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ fmv_x_w(scratch, src);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+ bind(&performCeil);
+ Ceil_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ bind(&done);
+}
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, scratch);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, scratch2);
+ masm.sc_d(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ fcvt_d_l(dest, src.scratchReg());
+}
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ fcvt_s_l(dest, src.scratchReg());
+}
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ fcvt_d_l(dest, src);
+}
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register tmp) {
+ fcvt_d_lu(dest, src.scratchReg());
+}
+void MacroAssembler::convertUInt64ToFloat32(Register64 src, FloatRegister dest,
+ Register tmp) {
+ fcvt_s_lu(dest, src.scratchReg());
+}
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ fsgnj_d(output, lhs, rhs);
+}
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+ ma_modu32(remOutput, srcDest, rhs);
+ ma_divu32(srcDest, srcDest, rhs);
+ } else {
+ ma_mod32(remOutput, srcDest, rhs);
+ ma_div32(srcDest, srcDest, rhs);
+ }
+}
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Floor_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ fmv_x_d(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(0x8000000000000000));
+ JitSpew(JitSpew_Codegen, "]");
+}
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Floor_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ fmv_x_w(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(int32_t(0x80000000)));
+ JitSpew(JitSpew_Codegen, "]");
+}
+void MacroAssembler::flush() {}
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+void MacroAssembler::nearbyIntDouble(RoundingMode, FloatRegister,
+ FloatRegister) {
+ MOZ_CRASH("not supported on this platform");
+}
+void MacroAssembler::nearbyIntFloat32(RoundingMode, FloatRegister,
+ FloatRegister) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF32(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_31 = -float(INT32_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_31 * 2, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-1.0f, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantFloat32(two_31, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-two_31, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF64(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_31 = -double(INT32_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_31 * 2, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-1.0, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantDouble(two_31, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-two_31 - 1, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF32(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_63 = -float(INT64_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_63 * 2, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-1.0f, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantFloat32(two_63, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-two_63, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF64(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_63 = -double(INT64_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_63 * 2, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-1.0, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantDouble(two_63, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-two_63, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ uint32_t* p = reinterpret_cast<uint32_t*>(call) - 7;
+ *reinterpret_cast<Instr*>(p) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 1) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 2) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 3) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 4) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 5) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 6) = kNopByte;
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ DEBUG_PRINTF("\tcallWithPatch\n");
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ int32_t imm32 = 1 * sizeof(uint32_t);
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(scratch, Hi20); // Read PC + Hi20 into scratch.
+ jalr(scratch, Lo12); // jump PC + Hi20 + Lo12
+ DEBUG_PRINTF("\tret %d\n", currentOffset());
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ DEBUG_PRINTF("\tpatchCall\n");
+ BufferOffset call(callerOffset - 2 * sizeof(uint32_t));
+ DEBUG_PRINTF("\tcallerOffset %d\n", callerOffset);
+ int32_t offset = BufferOffset(calleeOffset).getOffset() - call.getOffset();
+ if (is_int32(offset)) {
+ Instruction* auipc_ = (Instruction*)editSrc(call);
+ Instruction* jalr_ = (Instruction*)editSrc(
+ BufferOffset(callerOffset - 1 * sizeof(uint32_t)));
+ DEBUG_PRINTF("\t%p %lu\n\t", auipc_, callerOffset - 2 * sizeof(uint32_t));
+ disassembleInstr(auipc_->InstructionBits());
+ DEBUG_PRINTF("\t%p %lu\n\t", jalr_, callerOffset - 1 * sizeof(uint32_t));
+ disassembleInstr(jalr_->InstructionBits());
+ DEBUG_PRINTF("\t\n");
+ MOZ_ASSERT(IsJalr(jalr_->InstructionBits()) &&
+ IsAuipc(auipc_->InstructionBits()));
+ MOZ_ASSERT(auipc_->RdValue() == jalr_->Rs1Value());
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ instr_at_put(call, SetAuipcOffset(Hi20, auipc_->InstructionBits()));
+ instr_at_put(BufferOffset(callerOffset - 1 * sizeof(uint32_t)),
+ SetJalrOffset(Lo12, jalr_->InstructionBits()));
+ } else {
+ MOZ_CRASH();
+ }
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(
+ editSrc(BufferOffset(farJump.offset() + 4 * kInstrSize)));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ uint32_t* p = reinterpret_cast<uint32_t*>(call) - 7;
+ Assembler::WriteLoad64Instructions((Instruction*)p, ScratchRegister,
+ (uint64_t)target);
+ DEBUG_PRINTF("\tpatchNopToCall %lu %lu\n", (uint64_t)target,
+ ExtractLoad64Value((Instruction*)p));
+ MOZ_ASSERT(ExtractLoad64Value((Instruction*)p) == (uint64_t)target);
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (ScratchRegister.code() << kRs1Shift) | (0x0 << kImm12Shift);
+ *reinterpret_cast<Instr*>(p + 6) = jalr_;
+}
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(sizeof(double)));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ptr);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(sizeof(double)));
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ ScratchDoubleScope fscratch(*this);
+ Label negative, done;
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ fmv_x_w(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(int32_t(0x80000000)));
+ fmv_w_x(temp, zero);
+ ma_compareF32(scratch, DoubleLessThan, src, temp);
+ ma_branch(&negative, Equal, scratch, Operand(1));
+ }
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 32-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RMM);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ jump(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) need 0.5 added; other negative inputs need
+ // the biggest double less than 0.5.
+ Label join;
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5), temp);
+ loadConstantFloat32(-0.5, fscratch);
+ branchFloat(Assembler::DoubleLessThan, src, fscratch, &join);
+ loadConstantFloat32(0.5, temp);
+ bind(&join);
+ addFloat32(src, temp);
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, temp, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If output is zero, then the actual result is -0. Fail.
+ branchTest32(Assembler::Zero, dest, dest, fail);
+ }
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+
+ ScratchDoubleScope fscratch(*this);
+ Label negative, done;
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ fmv_x_d(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(0x8000000000000000));
+ fmv_d_x(temp, zero);
+ ma_compareF64(scratch, DoubleLessThan, src, temp);
+ ma_branch(&negative, Equal, scratch, Operand(1));
+ }
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 32-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RMM);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ jump(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) need 0.5 added; other negative inputs need
+ // the biggest double less than 0.5.
+ Label join;
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+ loadConstantDouble(-0.5, fscratch);
+ branchDouble(Assembler::DoubleLessThan, src, fscratch, &join);
+ loadConstantDouble(0.5, temp);
+ bind(&join);
+ addDouble(src, temp);
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, temp, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If output is zero, then the actual result is -0. Fail.
+ branchTest32(Assembler::Zero, dest, dest, fail);
+ }
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ or_(scratch, StackPointer, zero);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label zeroCase, done;
+ // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If the output was zero, worry about special cases.
+ branch32(Assembler::Equal, dest, Imm32(0), &zeroCase);
+ jump(&done);
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ // 3. +0, return 0.
+ {
+ bind(&zeroCase);
+
+ // If input is a negative number that truncated to zero, the real
+ // output should be the non-integer -0.
+ // The use of "lt" instead of "lo" also catches unordered NaN input.
+ ScratchDoubleScope fscratch(*this);
+ fmv_d_x(fscratch, zero);
+ ma_compareF64(scratch, DoubleLessThan, src, fscratch);
+ ma_b(scratch, Imm32(1), fail, Equal);
+
+ // Check explicitly for -0, bitwise.
+ fmv_x_d(dest, src);
+ branchTestPtr(Assembler::Signed, dest, dest, fail);
+ movePtr(ImmWord(0), dest);
+ }
+
+ bind(&done);
+}
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label zeroCase, done;
+ // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If the output was zero, worry about special cases.
+ branch32(Assembler::Equal, dest, Imm32(0), &zeroCase);
+ jump(&done);
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ // 3. +0, return 0.
+ {
+ bind(&zeroCase);
+
+ // If input is a negative number that truncated to zero, the real
+ // output should be the non-integer -0.
+ // The use of "lt" instead of "lo" also catches unordered NaN input.
+ ScratchDoubleScope fscratch(*this);
+ fmv_w_x(fscratch, zero);
+ ma_compareF32(scratch, DoubleLessThan, src, fscratch);
+ ma_b(scratch, Imm32(1), fail, Equal);
+
+ // Check explicitly for -0, bitwise.
+ fmv_x_w(dest, src);
+ branchTestPtr(Assembler::Signed, dest, dest, fail);
+ movePtr(ImmWord(0), dest);
+ }
+
+ bind(&done);
+}
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, Register(scratch2), ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, SecondScratchReg);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.mv(ScratchRegister, newval);
+ masm.sc_w(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.slli(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sll(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, ScratchRegister, SecondScratchReg);
+
+ masm.srl(output, ScratchRegister, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.SignExtendByte(valueTemp, oldval);
+ masm.SignExtendByte(output, output);
+ } else {
+ masm.andi(valueTemp, oldval, 0xff);
+ masm.andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.SignExtendShort(valueTemp, oldval);
+ masm.SignExtendShort(output, output);
+ } else {
+ masm.andi(valueTemp, oldval, 0xffff);
+ masm.andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.sll(valueTemp, newval, offsetTemp);
+ masm.and_(ScratchRegister, ScratchRegister, maskTemp);
+ masm.or_(ScratchRegister, ScratchRegister, valueTemp);
+ masm.sc_w(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssemblerRiscv64::Clear_if_nan_d(Register rd, FPURegister fs) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Label no_nan;
+ feq_d(ScratchRegister, fs, fs);
+ bnez(ScratchRegister, &no_nan);
+ mv(rd, zero_reg);
+ bind(&no_nan);
+}
+
+void MacroAssemblerRiscv64::Clear_if_nan_s(Register rd, FPURegister fs) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Label no_nan;
+ feq_s(ScratchRegister, fs, fs);
+ bnez(ScratchRegister, &no_nan);
+ mv(rd, zero_reg);
+ bind(&no_nan);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_w_d(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_d(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_l_d(output.reg, input, ScratchRegister);
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_d(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_uw_d(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_d(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_ul_d(output.reg, input, ScratchRegister);
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_d(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_w_s(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_s(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_l_s(output.reg, input, ScratchRegister);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_s(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_uw_s(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_s(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_ul_s(output.reg, input, ScratchRegister);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_s(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+// TODO(riscv64): widenInt32 should be nop?
+void MacroAssembler::widenInt32(Register r) {
+ move32To64SignExtend(r, Register64(r));
+}
+
+//}}} check_macroassembler_style
+
+// This method generates lui, dsll and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, Imm32 imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ DEBUG_PRINTF("\tma_liPatchable\n");
+ if (Li64 == flags) {
+ li_constant(dest, imm.value);
+ } else {
+ li_ptr(dest, imm.value);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_li(Register dest, ImmGCPtr ptr) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ writeDataRelocation(ptr);
+ ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, Imm32 imm) {
+ RV_li(dest, imm.value);
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, Imm64 imm) {
+ RV_li(dest, imm.value);
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, CodeLabel* label) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ BufferOffset bo = m_buffer.nextOffset();
+ JitSpew(JitSpew_Codegen, ".load CodeLabel %p", label);
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+ DEBUG_PRINTF("]\n");
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, ImmWord imm) {
+ RV_li(dest, imm.value);
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerRiscv64::ma_pop(Register r) {
+ ld(r, StackPointer, 0);
+ addi(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerRiscv64::ma_push(Register r) {
+ if (r == sp) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ // Pushing sp requires one more instruction.
+ mv(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ addi(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ sd(r, StackPointer, 0);
+}
+
+// multiplies. For now, there are only few that we care about.
+void MacroAssemblerRiscv64::ma_mul32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MulOverflow32(rd, rj, rk, ScratchRegister);
+ ma_b(ScratchRegister, Register(zero), overflow, Assembler::NotEqual);
+}
+void MacroAssemblerRiscv64::ma_mul32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MulOverflow32(rd, rj, Operand(imm.value), ScratchRegister);
+ ma_b(ScratchRegister, Register(zero), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_mulPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ if (rd == rj) {
+ or_(scratch, rj, zero);
+ rj = scratch;
+ rk = (rd == rk) ? rj : rk;
+ } else if (rd == rk) {
+ or_(scratch, rk, zero);
+ rk = scratch;
+ }
+
+ mul(rd, rj, rk);
+ mulh(scratch, rj, rk);
+ srai(scratch2, rd, 63);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+// MulOverflow32 sets overflow register to zero if no overflow occured
+void MacroAssemblerRiscv64::MulOverflow32(Register dst, Register left,
+ const Operand& right,
+ Register overflow) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 11);
+ Register right_reg;
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (right.is_imm()) {
+ ma_li(scratch, right.immediate());
+ right_reg = scratch;
+ } else {
+ MOZ_ASSERT(right.is_reg());
+ right_reg = right.rm();
+ }
+
+ MOZ_ASSERT(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ MOZ_ASSERT(overflow != left && overflow != right_reg);
+ sext_w(overflow, left);
+ sext_w(scratch2, right_reg);
+
+ mul(overflow, overflow, scratch2);
+ sext_w(dst, overflow);
+ xor_(overflow, overflow, dst);
+}
+
+int32_t MacroAssemblerRiscv64::GetOffset(int32_t offset, Label* L,
+ OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits);
+ } else {
+ MOZ_ASSERT(is_intn(offset, bits));
+ }
+ return offset;
+}
+
+bool MacroAssemblerRiscv64::CalculateOffset(Label* L, int32_t* offset,
+ OffsetSize bits) {
+ if (!is_near(L, bits)) return false;
+ *offset = GetOffset(*offset, L, bits);
+ return true;
+}
+
+void MacroAssemblerRiscv64::BranchShortHelper(int32_t offset, Label* L) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ Assembler::j(offset);
+}
+
+bool MacroAssemblerRiscv64::BranchShortHelper(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ MOZ_ASSERT(rt.is_reg() || rt.is_imm());
+ UseScratchRegisterScope temps(this);
+ Register scratch = Register();
+ if (rt.is_imm()) {
+ scratch = temps.Acquire();
+ ma_li(scratch, Imm64(rt.immediate()));
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ scratch = rt.rm();
+ }
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ {
+ switch (cond) {
+ case Always:
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ EmitConstPoolWithJumpIfNeeded();
+ break;
+ case Equal:
+ // rs == rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::beq(rs, scratch, offset);
+ }
+ break;
+ case NotEqual:
+ // rs != rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bne(rs, scratch, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case GreaterThan:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgt(rs, scratch, offset);
+ }
+ break;
+ case GreaterThanOrEqual:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bge(rs, scratch, offset);
+ }
+ break;
+ case LessThan:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::blt(rs, scratch, offset);
+ }
+ break;
+ case LessThanOrEqual:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::ble(rs, scratch, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Above:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgtu(rs, scratch, offset);
+ }
+ break;
+ case AboveOrEqual:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgeu(rs, scratch, offset);
+ }
+ break;
+ case Below:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bltu(rs, scratch, offset);
+ }
+ break;
+ case BelowOrEqual:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bleu(rs, scratch, offset);
+ }
+ break;
+ default:
+ MOZ_CRASH("UNREACHABLE");
+ }
+ }
+ return true;
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) \
+ MOZ_ASSERT((cond == Always && rs == zero && rt.rm() == zero) || \
+ (cond != Always && (rs != zero || rt.rm() != zero)))
+
+bool MacroAssemblerRiscv64::BranchShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ MOZ_ASSERT(is_int13(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt);
+ } else {
+ MOZ_ASSERT(offset == 0);
+ return BranchShortHelper(0, L, cond, rs, rt);
+ }
+}
+
+void MacroAssemblerRiscv64::BranchShort(Label* L) { BranchShortHelper(0, L); }
+
+void MacroAssemblerRiscv64::BranchShort(int32_t offset, Condition cond,
+ Register rs, const Operand& rt) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt);
+}
+
+void MacroAssemblerRiscv64::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt) {
+ BranchShortCheck(0, L, cond, rs, rt);
+}
+
+void MacroAssemblerRiscv64::BranchLong(Label* L) {
+ // Generate position independent long branch.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ int32_t imm;
+ imm = branch_long_offset(L);
+ GenPCRelativeJump(scratch, imm);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkLong(Label* L) {
+ // Generate position independent long branch and link.
+ int32_t imm;
+ imm = branch_long_offset(L);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ GenPCRelativeJumpAndLink(scratch, imm);
+}
+
+void MacroAssemblerRiscv64::ma_branch(Label* L, Condition cond, Register rs,
+ const Operand& rt, JumpKind jumpKind) {
+ if (L->used()) {
+ if (jumpKind == ShortJump && BranchShortCheck(0, L, cond, rs, rt)) {
+ return;
+ }
+ if (cond != Always) {
+ Label skip;
+ Condition neg_cond = InvertCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ } else {
+ if (jumpKind == LongJump) {
+ if (cond != Always) {
+ Label skip;
+ Condition neg_cond = InvertCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ } else {
+ BranchShort(L, cond, rs, rt);
+ }
+ }
+}
+
+// Branches when done from within riscv code.
+void MacroAssemblerRiscv64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_load(scratch, addr, SizeDouble);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+// Branches when done from within loongarch-specific code.
+void MacroAssemblerRiscv64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if ((c == NonZero || c == Zero || c == Signed || c == NotSigned) &&
+ imm.value == 0) {
+ ma_b(lhs, lhs, label, c, jumpKind);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_branch(label, c, lhs, rhs, jumpKind);
+ break;
+ case Always:
+ ma_branch(label, c, zero, Operand(zero), jumpKind);
+ break;
+ case Zero:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, Equal, lhs, Operand(zero), jumpKind);
+ break;
+ case NonZero:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, NotEqual, lhs, Operand(zero), jumpKind);
+ break;
+ case Signed:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, LessThan, lhs, Operand(zero), jumpKind);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, GreaterThanOrEqual, lhs, Operand(zero), jumpKind);
+ break;
+ default: {
+ ma_branch(label, c, lhs, rhs, jumpKind);
+ break;
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ExtractBits(Register rt, Register rs, uint16_t pos,
+ uint16_t size, bool sign_extend) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ slli(rt, rs, 64 - (pos + size));
+ if (sign_extend) {
+ srai(rt, rt, 64 - size);
+ } else {
+ srli(rt, rt, 64 - size);
+ }
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(size > 0);
+ MOZ_ASSERT(size <= 32);
+ MOZ_ASSERT((pos + size) > 0);
+ MOZ_ASSERT((pos + size) <= 32);
+ slli(rt, rs, 32 - (pos + size));
+ if (sign_extend) {
+ srai(rt, rt, 32 - size);
+ } else {
+ srli(rt, rt, 32 - size);
+ }
+#endif
+}
+
+void MacroAssemblerRiscv64::InsertBits(Register dest, Register source, int pos,
+ int size) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(size < 64);
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(size < 32);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register mask = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ Register source_ = temps.Acquire();
+ // Create a mask of the length=size.
+ ma_li(mask, Imm32(1));
+ slli(mask, mask, size);
+ addi(mask, mask, -1);
+ and_(source_, mask, source);
+ slli(source_, source_, pos);
+ // Make a mask containing 0's. 0's start at "pos" with length=size.
+ slli(mask, mask, pos);
+ not_(mask, mask);
+ // cut area for insertion of source.
+ and_(dest, mask, dest);
+ // insert source
+ or_(dest, dest, source_);
+}
+
+void MacroAssemblerRiscv64::InsertBits(Register dest, Register source,
+ Register pos, int size) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(size < 64);
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(size < 32);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register mask = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ Register source_ = temps.Acquire();
+ // Create a mask of the length=size.
+ ma_li(mask, Imm32(1));
+ slli(mask, mask, size);
+ addi(mask, mask, -1);
+ and_(source_, mask, source);
+ sll(source_, source_, pos);
+ // Make a mask containing 0's. 0's start at "pos" with length=size.
+ sll(mask, mask, pos);
+ not_(mask, mask);
+ // cut area for insertion of source.
+ and_(dest, mask, dest);
+ // insert source
+ or_(dest, dest, source_);
+}
+
+void MacroAssemblerRiscv64::ma_add32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addiw(rd, rs, rt.immediate() / 2);
+ addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ addw(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ addw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addi(rd, rs, rt.immediate() / 2);
+ addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ add(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ add(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(-rt.immediate())) {
+ addiw(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addiw(rd, rs, -rt.immediate() / 2);
+ addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, rt.immediate());
+ subw(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ subw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(-rt.immediate())) {
+ addi(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addi(rd, rs, -rt.immediate() / 2);
+ addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, rt.immediate());
+ sub(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ sub(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_and(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ andi(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ and_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ and_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_or(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ ori(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ or_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ or_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_xor(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ xori(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ xor_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ xor_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_nor(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ nor(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ nor(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_div32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_divu32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divuw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divuw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_div64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ div(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ div(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_divu64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divu(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divu(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mod32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_modu32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remuw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remuw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mod64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ rem(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ rem(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_modu64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remu(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remu(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mul32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mulw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mulw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mulh32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mul(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mul(rd, rs, rt.rm());
+ }
+ srai(rd, rd, 32);
+}
+
+void MacroAssemblerRiscv64::ma_mul64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mul(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mul(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mulh64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mulh(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mulh(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sll64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sll(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slli(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sll32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sllw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slliw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sra64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sra(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srai(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sra32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sraw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ sraiw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_srl64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ srl(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srli(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_srl32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ srlw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srliw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_slt(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ if (is_int12(rt.immediate())) {
+ slti(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, rs, scratch);
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sltu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ if (is_int12(rt.immediate())) {
+ sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, rs, scratch);
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sle(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sleu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sgt(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sgtu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sge(Register rd, Register rs, Operand rt) {
+ ma_slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sgeu(Register rd, Register rs, Operand rt) {
+ ma_sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ return rt.immediate() == 0;
+ }
+}
+
+void MacroAssemblerRiscv64::ma_seq(Register rd, Register rs, Operand rt) {
+ if (rs == zero_reg) {
+ ma_seqz(rd, rt);
+ } else if (IsZero(rt)) {
+ seqz(rd, rs);
+ } else {
+ ma_sub64(rd, rs, rt);
+ seqz(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sne(Register rd, Register rs, Operand rt) {
+ if (rs == zero_reg) {
+ ma_snez(rd, rt);
+ } else if (IsZero(rt)) {
+ snez(rd, rs);
+ } else {
+ ma_sub64(rd, rs, rt);
+ snez(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_seqz(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ seqz(rd, rt.rm());
+ } else {
+ ma_li(rd, rt.immediate() == 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_snez(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ snez(rd, rt.rm());
+ } else {
+ ma_li(rd, rt.immediate() != 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_neg(Register rd, const Operand& rt) {
+ MOZ_ASSERT(rt.is_reg());
+ neg(rd, rt.rm());
+}
+
+void MacroAssemblerRiscv64::ma_jump(ImmPtr dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().ma_liPatchable(scratch, dest);
+ jr(scratch, 0);
+ DEBUG_PRINTF("]\n");
+}
+// fp instructions
+void MacroAssemblerRiscv64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ fmv_d_x(dest, scratch);
+ } else {
+ fmv_d_x(dest, zero);
+ }
+}
+// fp instructions
+void MacroAssemblerRiscv64::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ fmv_w_x(dest, scratch);
+ } else {
+ fmv_w_x(dest, zero);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sub(scratch, rj, rk);
+ subw(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_sub32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, rj, rk);
+ addw(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_add32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of addi
+ if (is_intn(imm.value, 12)) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi(scratch, rj, imm.value);
+ addiw(rd, rj, imm.value);
+ ma_b(rd, scratch, overflow, Assembler::NotEqual);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_li(scratch2, imm);
+ ma_add32TestOverflow(rd, rj, scratch2, overflow);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT_IF(rj == rd, rj != rk);
+ MOZ_ASSERT(rj != scratch2);
+ MOZ_ASSERT(rk != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rj_copy = rj;
+
+ if (rj == rd) {
+ ma_or(scratch2, rj, zero);
+ rj_copy = scratch2;
+ }
+
+ {
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ sub(rd, rj, rk);
+ // If the sign of rj and rk are the same, no overflow
+ ma_xor(scratch, rj_copy, rk);
+ // Check if the sign of rd and rj are the same
+ ma_xor(scratch2, rd, rj_copy);
+ ma_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ if (rj == rk) {
+ if (rj == rd) {
+ ma_or(scratch, rj, zero);
+ rj = scratch;
+ }
+
+ add(rd, rj, rj);
+ ma_xor(scratch, rj, rd);
+ ma_b(scratch, zero, overflow, Assembler::LessThan);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rj != scratch);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rj == rd) {
+ ma_or(scratch2, rj, zero);
+ rj = scratch2;
+ }
+
+ add(rd, rj, rk);
+ slti(scratch, rj, 0);
+ slt(scratch2, rd, rj);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ if (imm.value == 0) {
+ ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_add64(rd, rj, imm);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ ImmWord imm,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ if (imm.value == 0) {
+ ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ MOZ_ASSERT(rj != scratch2);
+ ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_li(rd, imm);
+ add(rd, rj, rd);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rj, rk != rd);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addw(rd, rj, rk);
+ sltu(scratch, rd, rd == rj ? rk : rj);
+ ma_b(Register(scratch), Register(scratch), overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerRiscv64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rj != scratch2);
+ ma_li(scratch2, imm);
+ ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
+}
+
+void MacroAssemblerRiscv64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // TODO(loong64): Check subPtrTestOverflow
+ MOZ_ASSERT(imm.value != INT32_MIN);
+ ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != rk);
+ MOZ_ASSERT(rd != scratch);
+ add(rd, rj, rk);
+ sltu(scratch, rd, rk);
+ ma_b(scratch, Register(scratch), label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ // Check for signed range because of addi
+ if (is_intn(imm.value, 12)) {
+ addi(rd, rj, imm.value);
+ sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, ImmWord imm,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ // Check for signed range because of addi_d
+ if (is_intn(imm.value, 12)) {
+ uint32_t value = imm.value;
+ addi(rd, rj, value);
+ ma_sltu(scratch2, rd, Operand(value));
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch2);
+ asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
+}
+void MacroAssemblerRiscv64::ma_pop(FloatRegister f) {
+ fld(f, StackPointer, 0);
+ addi(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerRiscv64::ma_push(FloatRegister f) {
+ addi(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ fsd(f, StackPointer, 0);
+}
+
+void MacroAssemblerRiscv64::ma_fld_s(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ flw(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ flw(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fld_d(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fld(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fld(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fst_d(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fsd(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fsd(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fst_s(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fsw(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fsw(ft, scratch, 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_fst_d(FloatRegister ft, BaseIndex address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(address, scratch);
+ asMasm().ma_fst_d(ft, Address(scratch, address.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fst_s(FloatRegister ft, BaseIndex address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(address, scratch);
+ asMasm().ma_fst_s(ft, Address(scratch, address.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fld_d(FloatRegister ft, const BaseIndex& src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch);
+ asMasm().ma_fld_d(ft, Address(scratch, src.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fld_s(FloatRegister ft, const BaseIndex& src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch);
+ asMasm().ma_fld_s(ft, Address(scratch, src.offset));
+}
+
+void MacroAssemblerRiscv64::ma_call(ImmPtr dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ asMasm().ma_liPatchable(CallReg, dest);
+ jalr(CallReg, 0);
+ DEBUG_PRINTF("]\n");
+}
+
+void MacroAssemblerRiscv64::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 3);
+ Register scratch = temps.Acquire();
+
+ feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ ma_and(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 3);
+ Register scratch = temps.Acquire();
+
+ feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ ma_and(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ ma_xor(rd, rd, Operand(1)); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ ma_xor(rd, rd, Operand(1)); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::Clz32(Register rd, Register xx) {
+ // 32 bit unsigned in lower word: count number of leading zeros.
+ // int n = 32;
+ // unsigned y;
+
+ // y = x >>16; if (y != 0) { n = n -16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4;
+ UseScratchRegisterScope temps(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ MOZ_ASSERT(xx != y && xx != n);
+ mv(x, xx);
+ ma_li(n, Imm32(32));
+#if JS_CODEGEN_RISCV64
+ srliw(y, x, 16);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ mv(x, y);
+ addiw(n, n, -16);
+ bind(&L0);
+ srliw(y, x, 8);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ mv(x, y);
+ bind(&L1);
+ srliw(y, x, 4);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ mv(x, y);
+ bind(&L2);
+ srliw(y, x, 2);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ mv(x, y);
+ bind(&L3);
+ srliw(y, x, 1);
+ subw(rd, n, x);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L4);
+#elif JS_CODEGEN_RISCV32
+ srli(y, x, 16);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ mv(x, y);
+ addi(n, n, -16);
+ bind(&L0);
+ srli(y, x, 8);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addi(n, n, -8);
+ mv(x, y);
+ bind(&L1);
+ srli(y, x, 4);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addi(n, n, -4);
+ mv(x, y);
+ bind(&L2);
+ srli(y, x, 2);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addi(n, n, -2);
+ mv(x, y);
+ bind(&L3);
+ srli(y, x, 1);
+ sub(rd, n, x);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addi(rd, n, -2);
+ bind(&L4);
+#endif
+}
+
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Clz64(Register rd, Register xx) {
+ // 64 bit: count number of leading zeros.
+ // int n = 64;
+ // unsigned y;
+
+ // y = x >>32; if (y != 0) { n = n - 32; x = y; }
+ // y = x >>16; if (y != 0) { n = n - 16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4, L5;
+ UseScratchRegisterScope temps(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ MOZ_ASSERT(xx != y && xx != n);
+ mv(x, xx);
+ ma_li(n, Imm32(64));
+ srli(y, x, 32);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ addiw(n, n, -32);
+ mv(x, y);
+ bind(&L0);
+ srli(y, x, 16);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addiw(n, n, -16);
+ mv(x, y);
+ bind(&L1);
+ srli(y, x, 8);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ mv(x, y);
+ bind(&L2);
+ srli(y, x, 4);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ mv(x, y);
+ bind(&L3);
+ srli(y, x, 2);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ mv(x, y);
+ bind(&L4);
+ srli(y, x, 1);
+ subw(rd, n, x);
+ ma_branch(&L5, Equal, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L5);
+}
+#endif
+void MacroAssemblerRiscv64::Ctz32(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_add64(scratch, rs, Operand(-1));
+ ma_xor(rd, scratch, rs);
+ ma_and(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz32(rd, rd);
+ {
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, Imm32(32));
+ ma_sub32(rd, scratch, rd);
+ }
+}
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Ctz64(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_add64(scratch, rs, Operand(-1));
+ ma_xor(rd, scratch, rs);
+ ma_and(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz64(rd, rd);
+ {
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, 64);
+ ma_sub64(rd, scratch, rd);
+ }
+}
+#endif
+void MacroAssemblerRiscv64::Popcnt32(Register rd, Register rs,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ // The number of instruction is 20.
+ // uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ // uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ // uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ // uint32_t value = 0x01010101; // (T)~(T)0/255
+
+ uint32_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register value = temps.Acquire();
+ MOZ_ASSERT((rd != value) && (rs != value));
+ ma_li(value, 0x01010101); // value = 0x01010101;
+ ma_li(scratch2, 0x55555555); // B0 = 0x55555555;
+ ma_srl32(scratch, rs, Operand(1));
+ ma_and(scratch, scratch, scratch2);
+ ma_sub32(scratch, rs, scratch);
+ ma_li(scratch2, 0x33333333); // B1 = 0x33333333;
+ slli(rd, scratch2, 4);
+ or_(scratch2, scratch2, rd);
+ ma_and(rd, scratch, scratch2);
+ ma_srl32(scratch, scratch, Operand(2));
+ ma_and(scratch, scratch, scratch2);
+ ma_add32(scratch, rd, scratch);
+ ma_srl32(rd, scratch, Operand(4));
+ ma_add32(rd, rd, scratch);
+ ma_li(scratch2, 0xF);
+ ma_mul32(scratch2, value, scratch2); // B2 = 0x0F0F0F0F;
+ ma_and(rd, rd, scratch2);
+ ma_mul32(rd, rd, value);
+ ma_srl32(rd, rd, Operand(shift));
+}
+
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Popcnt64(Register rd, Register rs,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ // uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ // uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ // uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ // uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ uint64_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register value = temps.Acquire();
+ MOZ_ASSERT((rd != value) && (rs != value));
+ ma_li(value, 0x1111111111111111l); // value = 0x1111111111111111l;
+ ma_li(scratch2, 5);
+ ma_mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l;
+ ma_srl64(scratch, rs, Operand(1));
+ ma_and(scratch, scratch, scratch2);
+ ma_sub64(scratch, rs, scratch);
+ ma_li(scratch2, 3);
+ ma_mul64(scratch2, value, scratch2); // B1 = 0x3333333333333333l;
+ ma_and(rd, scratch, scratch2);
+ ma_srl64(scratch, scratch, Operand(2));
+ ma_and(scratch, scratch, scratch2);
+ ma_add64(scratch, rd, scratch);
+ ma_srl64(rd, scratch, Operand(4));
+ ma_add64(rd, rd, scratch);
+ ma_li(scratch2, 0xF);
+ ma_li(value, 0x0101010101010101l); // value = 0x0101010101010101l;
+ ma_mul64(scratch2, value, scratch2); // B2 = 0x0F0F0F0F0F0F0F0Fl;
+ ma_and(rd, rd, scratch2);
+ ma_mul64(rd, rd, value);
+ srli(rd, rd, 32 + shift);
+}
+#endif
+
+void MacroAssemblerRiscv64::ma_div_branch_overflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, rj, rk);
+ ma_b(scratch, scratch, overflow, Assembler::NonZero);
+ divw(rd, rj, rk);
+}
+
+void MacroAssemblerRiscv64::ma_div_branch_overflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_div_branch_overflow(rd, rj, scratch, overflow);
+}
+
+void MacroAssemblerRiscv64::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ or_(remain, src, zero);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_branch(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ subw(remain, zero, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(scratch2, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ addw(dest, dest, scratch2);
+ // Do a trial subtraction
+ ma_sub32(scratch2, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
+ or_(dest, scratch2, zero);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ srliw(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ subw(dest, zero, dest);
+ } else {
+ subw(dest, zero, dest);
+ }
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::NotEqual);
+ if (fmt == SingleFloat) {
+ fmv_s(fd, fj);
+ } else {
+ fmv_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::ByteSwap(Register rd, Register rs, int operand_size,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ MOZ_ASSERT(operand_size == 4 || operand_size == 8);
+ if (operand_size == 4) {
+ // Uint32_t x1 = 0x00FF00FF;
+ // x0 = (x0 << 16 | x0 >> 16);
+ // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8));
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 17);
+ MOZ_ASSERT((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = scratch;
+ RV_li(x1, 0x00FF00FF);
+ slliw(x0, rs, 16);
+ srliw(rd, rs, 16);
+ or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF
+ slliw(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slliw(x1, x1, 8); // x1 <- 0xFF00FF00
+ and_(rd, x0, x1); // x0 & 0xFF00FF00
+ srliw(rd, rd, 8);
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ } else {
+ // uinx24_t x1 = 0x0000FFFF0000FFFFl;
+ // uinx24_t x1 = 0x00FF00FF00FF00FFl;
+ // x0 = (x0 << 32 | x0 >> 32);
+ // x0 = (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ // x0 = (x0 & x1) << 8 | (x0 & (x1 << 8)) >> 8;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 30);
+ MOZ_ASSERT((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = scratch;
+ RV_li(x1, 0x0000FFFF0000FFFFl);
+ slli(x0, rs, 32);
+ srli(rd, rs, 32);
+ or_(x0, rd, x0); // x0 <- x0 << 32 | x0 >> 32
+ and_(x2, x0, x1); // x2 <- x0 & 0x0000FFFF0000FFFF
+ slli(x2, x2, 16); // x2 <- (x0 & 0x0000FFFF0000FFFF) << 16
+ slli(x1, x1, 16); // x1 <- 0xFFFF0000FFFF0000
+ and_(rd, x0, x1); // rd <- x0 & 0xFFFF0000FFFF0000
+ srli(rd, rd, 16); // rd <- x0 & (x1 << 16)) >> 16
+ or_(x0, rd, x2); // (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ RV_li(x1, 0x00FF00FF00FF00FFl);
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF00FF00FF
+ slli(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slli(x1, x1, 8); // x1 <- 0xFF00FF00FF00FF00
+ and_(rd, x0, x1);
+ srli(rd, rd, 8); // rd <- (x0 & (x1 << 8)) >> 8
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ }
+}
+
+template <typename F_TYPE>
+void MacroAssemblerRiscv64::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
+ FPURegister src2,
+ MaxMinKind kind) {
+ MOZ_ASSERT((std::is_same<F_TYPE, float>::value) ||
+ (std::is_same<F_TYPE, double>::value));
+
+ if (src1 == src2 && dst != src1) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmv_s(dst, src1);
+ } else {
+ fmv_d(dst, src1);
+ }
+ return;
+ }
+
+ Label done, nan;
+
+ // For RISCV, fmin_s returns the other non-NaN operand as result if only one
+ // operand is NaN; but for JS, if any operand is NaN, result is Nan. The
+ // following handles the discrepency between handling of NaN between ISA and
+ // JS semantics
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<float, F_TYPE>::value) {
+ CompareIsNotNanF32(scratch, src1, src2);
+ } else {
+ CompareIsNotNanF64(scratch, src1, src2);
+ }
+ BranchFalseF(scratch, &nan);
+
+ if (kind == MaxMinKind::kMax) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmax_s(dst, src1, src2);
+ } else {
+ fmax_d(dst, src1, src2);
+ }
+ } else {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmin_s(dst, src1, src2);
+ } else {
+ fmin_d(dst, src1, src2);
+ }
+ }
+ jump(&done);
+
+ bind(&nan);
+ // if any operand is NaN, return NaN (fadd returns NaN if any operand is NaN)
+ if (std::is_same<float, F_TYPE>::value) {
+ fadd_s(dst, src1, src2);
+ } else {
+ fadd_d(dst, src1, src2);
+ }
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void MacroAssemblerRiscv64::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+void MacroAssemblerRiscv64::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void MacroAssemblerRiscv64::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+void MacroAssemblerRiscv64::BranchTrueShortF(Register rs, Label* target) {
+ ma_branch(target, NotEqual, rs, Operand(zero_reg));
+}
+
+void MacroAssemblerRiscv64::BranchFalseShortF(Register rs, Label* target) {
+ ma_branch(target, Equal, rs, Operand(zero_reg));
+}
+
+void MacroAssemblerRiscv64::BranchTrueF(Register rs, Label* target) {
+ bool long_branch = target->bound() ? !is_near(target) : false;
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(rs, target);
+ }
+}
+
+void MacroAssemblerRiscv64::BranchFalseF(Register rs, Label* target) {
+ bool long_branch = target->bound() ? !is_near(target) : false;
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(rs, target);
+ }
+}
+
+void MacroAssemblerRiscv64::Ror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sllw(scratch, rs, scratch);
+ srlw(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ } else {
+ int64_t ror_value = rt.immediate() % 32;
+ if (ror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (ror_value < 0) {
+ ror_value += 32;
+ }
+ srliw(scratch, rs, ror_value);
+ slliw(rd, rs, 32 - ror_value);
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::Dror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sll(scratch, rs, scratch);
+ srl(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ } else {
+ int64_t dror_value = rt.immediate() % 64;
+ if (dror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (dror_value < 0) {
+ dror_value += 64;
+ }
+ srli(scratch, rs, dror_value);
+ slli(rd, rs, 64 - dror_value);
+ or_(rd, scratch, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ AnyRegister output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ add(scratch, memoryBase, ptr);
+ lb(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint8:
+ add(scratch, memoryBase, ptr);
+ lbu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Int16:
+ add(scratch, memoryBase, ptr);
+ lh(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint16:
+ add(scratch, memoryBase, ptr);
+ lhu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Int32:
+ add(scratch, memoryBase, ptr);
+ lw(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint32:
+ add(scratch, memoryBase, ptr);
+ lwu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Float64:
+ add(scratch, memoryBase, ptr);
+ fld(output.fpu(), scratch, 0);
+ break;
+ case Scalar::Float32:
+ add(scratch, memoryBase, ptr);
+ flw(output.fpu(), scratch, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_fst_s(value.fpu(), address);
+ } else {
+ asMasm().ma_fst_d(value.fpu(), address);
+ }
+ } else {
+ asMasm().ma_store(value.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ // Only the last emitted instruction is a memory access.
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64::GenPCRelativeJumpAndLink(Register rd,
+ int32_t imm32) {
+ MOZ_ASSERT(is_int32(imm32 + 0x800));
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShortHelper(int32_t offset, Label* L) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ jal(offset);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShort(int32_t offset) {
+ MOZ_ASSERT(is_int21(offset));
+ BranchAndLinkShortHelper(offset, nullptr);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShort(Label* L) {
+ BranchAndLinkShortHelper(0, L);
+}
+
+void MacroAssemblerRiscv64::BranchAndLink(Label* L) {
+ if (L->bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L);
+ } else {
+ BranchAndLinkLong(L);
+ }
+ } else {
+ BranchAndLinkShort(L);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_fmv_d(FloatRegister src, ValueOperand dest) {
+ fmv_x_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64::ma_fmv_d(ValueOperand src, FloatRegister dest) {
+ fmv_d_x(dest, src.valueReg());
+}
+
+void MacroAssemblerRiscv64::ma_fmv_w(FloatRegister src, ValueOperand dest) {
+ fmv_x_w(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64::ma_fmv_w(ValueOperand src, FloatRegister dest) {
+ fmv_w_x(dest, src.valueReg());
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.h b/js/src/jit/riscv64/MacroAssembler-riscv64.h
new file mode 100644
index 0000000000..f0e567ece7
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.h
@@ -0,0 +1,1224 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_MacroAssembler_riscv64_h
+#define jit_riscv64_MacroAssembler_riscv64_h
+
+#include <iterator>
+
+#include "jit/MoveResolver.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static Register CallReg = t6;
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+class CompactBufferReader;
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+enum FloatFormat { SingleFloat, DoubleFloat };
+class ScratchTagScope : public ScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : ScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+class MacroAssemblerRiscv64 : public Assembler {
+ public:
+ MacroAssemblerRiscv64() {}
+
+#ifdef JS_SIMULATOR_RISCV64
+ // See riscv64/base-constants-riscv.h DebugParameters.
+ void Debug(uint32_t parameters) { break_(parameters, false); }
+#endif
+
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ MoveResolver moveResolver_;
+
+ static bool SupportsFloatingPoint() { return true; }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+ void haltingAlign(int alignment) {
+ // TODO(loong64): Implement a proper halting align.
+ nopAlign(alignment);
+ }
+
+ // TODO(RISCV) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
+ inline void GenPCRelativeJump(Register rd, int32_t imm32) {
+ MOZ_ASSERT(is_int32(imm32 + 0x800));
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+ }
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_loadDouble(FloatRegister dest, Address address);
+ void ma_loadFloat(FloatRegister dest, Address address);
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_storeDouble(FloatRegister dest, Address address);
+ void ma_storeFloat(FloatRegister dest, Address address);
+ void ma_liPatchable(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+ void ma_li(Register dest, ImmGCPtr ptr);
+ void ma_li(Register dest, Imm32 imm);
+ void ma_li(Register dest, Imm64 imm);
+ void ma_li(Register dest, intptr_t imm) { RV_li(dest, imm); }
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void BranchAndLinkShort(Label* L);
+ void BranchAndLink(Label* label);
+ void BranchAndLinkShort(int32_t offset);
+ void BranchAndLinkShortHelper(int32_t offset, Label* L);
+ void BranchAndLinkLong(Label* L);
+ void GenPCRelativeJumpAndLink(Register rd, int32_t imm32);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rj, Operand rt); \
+ void instr(Register rd, Register rj, Imm32 imm) { \
+ instr(rd, rj, Operand(imm.value)); \
+ } \
+ void instr(Register rd, Imm32 imm) { instr(rd, rd, Operand(imm.value)); } \
+ void instr(Register rd, Register rs) { instr(rd, rd, Operand(rs)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
+ void instr(Register rs, Imm32 j) { instr(rs, Operand(j.value)); }
+
+ DEFINE_INSTRUCTION(ma_and);
+ DEFINE_INSTRUCTION(ma_or);
+ DEFINE_INSTRUCTION(ma_xor);
+ DEFINE_INSTRUCTION(ma_nor);
+ DEFINE_INSTRUCTION(ma_sub32)
+ DEFINE_INSTRUCTION(ma_sub64)
+ DEFINE_INSTRUCTION(ma_add32)
+ DEFINE_INSTRUCTION(ma_add64)
+ DEFINE_INSTRUCTION(ma_div32)
+ DEFINE_INSTRUCTION(ma_divu32)
+ DEFINE_INSTRUCTION(ma_div64)
+ DEFINE_INSTRUCTION(ma_divu64)
+ DEFINE_INSTRUCTION(ma_mod32)
+ DEFINE_INSTRUCTION(ma_modu32)
+ DEFINE_INSTRUCTION(ma_mod64)
+ DEFINE_INSTRUCTION(ma_modu64)
+ DEFINE_INSTRUCTION(ma_mul32)
+ DEFINE_INSTRUCTION(ma_mulh32)
+ DEFINE_INSTRUCTION(ma_mulhu32)
+ DEFINE_INSTRUCTION(ma_mul64)
+ DEFINE_INSTRUCTION(ma_mulh64)
+ DEFINE_INSTRUCTION(ma_sll64)
+ DEFINE_INSTRUCTION(ma_sra64)
+ DEFINE_INSTRUCTION(ma_srl64)
+ DEFINE_INSTRUCTION(ma_sll32)
+ DEFINE_INSTRUCTION(ma_sra32)
+ DEFINE_INSTRUCTION(ma_srl32)
+ DEFINE_INSTRUCTION(ma_slt)
+ DEFINE_INSTRUCTION(ma_sltu)
+ DEFINE_INSTRUCTION(ma_sle)
+ DEFINE_INSTRUCTION(ma_sleu)
+ DEFINE_INSTRUCTION(ma_sgt)
+ DEFINE_INSTRUCTION(ma_sgtu)
+ DEFINE_INSTRUCTION(ma_sge)
+ DEFINE_INSTRUCTION(ma_sgeu)
+ DEFINE_INSTRUCTION(ma_seq)
+ DEFINE_INSTRUCTION(ma_sne)
+
+ DEFINE_INSTRUCTION2(ma_seqz)
+ DEFINE_INSTRUCTION2(ma_snez)
+ DEFINE_INSTRUCTION2(ma_neg);
+
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION
+ // arithmetic based ops
+ void ma_add32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch);
+ ma_load(scratch, addr, SizeDouble);
+ ma_b(scratch, rhs, l, c, jumpKind);
+ }
+
+ void ma_branch(Label* target, Condition cond, Register r1, const Operand& r2,
+ JumpKind jumpKind = ShortJump);
+
+ void ma_branch(Label* target, JumpKind jumpKind = ShortJump) {
+ ma_branch(target, Always, zero, zero, jumpKind);
+ }
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ void ma_fst_d(FloatRegister src, BaseIndex address);
+ void ma_fst_s(FloatRegister src, BaseIndex address);
+
+ void ma_fld_d(FloatRegister dest, const BaseIndex& src);
+ void ma_fld_s(FloatRegister dest, const BaseIndex& src);
+
+ void ma_fmv_d(FloatRegister src, ValueOperand dest);
+ void ma_fmv_d(ValueOperand src, FloatRegister dest);
+
+ void ma_fmv_w(FloatRegister src, ValueOperand dest);
+ void ma_fmv_w(ValueOperand src, FloatRegister dest);
+
+ void ma_fld_s(FloatRegister ft, Address address);
+ void ma_fld_d(FloatRegister ft, Address address);
+ void ma_fst_d(FloatRegister ft, Address address);
+ void ma_fst_s(FloatRegister ft, Address address);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+
+ void ma_rotr_w(Register rd, Register rj, Imm32 shift);
+
+ void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+ void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+
+ // arithmetic based ops
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ void MulOverflow32(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // FP branches
+ void ma_compareF32(Register rd, DoubleCondition cc, FloatRegister cmp1,
+ FloatRegister cmp2);
+ void ma_compareF64(Register rd, DoubleCondition cc, FloatRegister cmp1,
+ FloatRegister cmp2);
+
+ void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void jump(Label* label) { ma_branch(label); }
+ void jump(Register reg) { jr(reg); }
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void BranchShort(Label* L);
+
+ void BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt);
+ void BranchShort(Label* L, Condition cond, Register rs, const Operand& rt);
+ void BranchShortHelper(int32_t offset, Label* L);
+ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ void BranchLong(Label* L);
+
+ // Floating point branches
+ void BranchTrueShortF(Register rs, Label* target);
+ void BranchFalseShortF(Register rs, Label* target);
+
+ void BranchTrueF(Register rs, Label* target);
+ void BranchFalseF(Register rs, Label* target);
+
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ fmv_x_d(dest, src);
+ srli(dest, dest, 32);
+ }
+ // Bit field starts at bit pos and extending for size bits is extracted from
+ // rs and stored zero/sign-extended and right-justified in rt
+ void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size,
+ bool sign_extend = false);
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false) {
+ sra(dest, source, pos);
+ ExtractBits(dest, dest, 0, size, sign_extend);
+ }
+
+ // Insert bits [0, size) of source to bits [pos, pos+size) of dest
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ // Insert bits [0, size) of source to bits [pos, pos+size) of dest
+ void InsertBits(Register dest, Register source, int pos, int size);
+
+ template <typename F_TYPE>
+ void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+
+ template <typename TruncFunc>
+ void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
+ TruncFunc trunc, bool Inexact = false);
+
+ void Clear_if_nan_d(Register rd, FPURegister fs);
+ void Clear_if_nan_s(Register rd, FPURegister fs);
+ // Convert double to unsigned word.
+ void Trunc_uw_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert double to signed word.
+ void Trunc_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert double to unsigned long.
+ void Trunc_ul_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert singled to signed long.
+ void Trunc_l_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to signed word.
+ void Trunc_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert singled to signed long.
+ void Trunc_l_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Round double functions
+ void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Round float functions
+ void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Round single to signed word.
+ void Round_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Round double to signed word.
+ void Round_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Ceil single to signed word.
+ void Ceil_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Ceil double to signed word.
+ void Ceil_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Floor single to signed word.
+ void Floor_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Floor double to signed word.
+ void Floor_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ void Clz32(Register rd, Register rs);
+ void Ctz32(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs, Register scratch);
+
+ void Popcnt64(Register rd, Register rs, Register scratch);
+ void Ctz64(Register rd, Register rs);
+ void Clz64(Register rd, Register rs);
+
+ // Change endianness
+ void ByteSwap(Register dest, Register src, int operand_size,
+ Register scratch);
+
+ void Ror(Register rd, Register rs, const Operand& rt);
+ void Dror(Register rd, Register rs, const Operand& rt);
+
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ template <typename F>
+ void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2,
+ MaxMinKind kind);
+
+ inline void NegateBool(Register rd, Register rs) { xori(rd, rs, 1); }
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+class MacroAssemblerRiscv64Compat : public MacroAssemblerRiscv64 {
+ public:
+ using MacroAssemblerRiscv64::call;
+
+ MacroAssemblerRiscv64Compat() {}
+
+ void convertBoolToInt32(Register src, Register dest) {
+ ma_and(dest, src, Imm32(0xff));
+ };
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ fcvt_d_w(dest, src);
+ };
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_load(scratch, src, SizeWord, SignExtend);
+ fcvt_d_w(dest, scratch);
+ };
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ computeScaledAddress(src, scratch);
+ convertInt32ToDouble(Address(scratch, src.offset), dest);
+ };
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rj, Register rd);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_add64(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ ma_add64(dest, dest, Imm32(address.offset));
+ }
+ }
+
+ void j(Label* dest) { ma_branch(dest); }
+
+ void mov(Register src, Register dest) { addi(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ jr(scratch);
+ }
+ void branch(const Register reg) { jr(reg); }
+ void ret() {
+ ma_pop(ra);
+ jalr(zero_reg, ra, 0);
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmGCPtr imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(address, scratch);
+ ma_push(scratch);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On LOONG64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerRiscv64Compat::toggledCall
+ return 7 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CodeOffset offset = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ emit(uint32_t(-1));
+ emit(uint32_t(-1));
+ }
+
+ void jump(Label* label) { ma_branch(label); }
+ void jump(Register reg) { jr(reg); }
+ void jump(const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(address, scratch);
+ jr(scratch);
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ srli(dest, src, JSVAL_TAG_SHIFT);
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ void moveIfZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ Label done;
+ ma_branch(&done, NotEqual, cond, zero);
+ mv(dst, src);
+ bind(&done);
+ }
+
+ void moveIfNotZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ Label done;
+ ma_branch(&done, Equal, cond, zero);
+ mv(dst, src);
+ bind(&done);
+ }
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ slliw(dest, src, 0);
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(scratch != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xor_(dest, src, scratch);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ InsertBits(dest, zero, JSVAL_TAG_SHIFT + 3, 1);
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ ExtractBits(dest, dest, 0, JSVAL_TAG_SHIFT - 1);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ ExtractBits(dest, src.valueReg(), 0, JSVAL_TAG_SHIFT - 1);
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ and_(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ and_(dest, dest, src.valueReg());
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ // higher level tag testing code
+ Address ToPayload(Address value) { return value; }
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ computeEffectiveAddress(address, scratch);
+ sd(scratch2, scratch, 0);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch);
+ } else {
+ unboxNonDouble(value, scratch, type);
+ }
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+ ma_li(dest, Imm32(tag));
+ slli(dest, dest, JSVAL_TAG_SHIFT);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ InsertBits(dest, src, 0, 32);
+ } else {
+ InsertBits(dest, src, 0, JSVAL_TAG_SHIFT);
+ }
+ }
+
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(const BaseIndex& src, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ boxValue(type, reg, scratch);
+ push(scratch);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void SignExtendByte(Register rd, Register rs) {
+ slli(rd, rs, xlen - 8);
+ srai(rd, rd, xlen - 8);
+ }
+
+ void SignExtendShort(Register rd, Register rs) {
+ slli(rd, rs, xlen - 16);
+ srai(rd, rd, xlen - 16);
+ }
+
+ void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
+ void ZeroExtendWord(Register rd, Register rs) {
+ slli(rd, rs, 32);
+ srli(rd, rd, 32);
+ }
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ void loadDouble(const Address& addr, FloatRegister dest) {
+ ma_loadDouble(dest, addr);
+ }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ computeScaledAddress(src, scratch);
+ fld(dest, scratch, 0);
+ }
+
+ void loadFloat32(const Address& addr, FloatRegister dest) {
+ ma_loadFloat(dest, addr);
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ computeScaledAddress(src, scratch);
+ flw(dest, scratch, 0);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ store16(src, dest);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on LOONG64. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ store32(src, dest);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ store64(src, dest);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { fmv_d(dest, src); }
+
+ void zeroDouble(FloatRegister reg) { fmv_d_x(reg, zero); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint(uint32_t value = 0);
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, ABIStackAlignment - 1);
+ ma_b(scratch, zero, &aligned, Equal, ShortJump);
+ breakpoint();
+ bind(&aligned);
+#endif
+ };
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void abiret() { jr(ra); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) { fmv_s(dest, src); }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerRiscv64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MacroAssembler_riscv64_h */
diff --git a/js/src/jit/riscv64/MoveEmitter-riscv64.cpp b/js/src/jit/riscv64/MoveEmitter-riscv64.cpp
new file mode 100644
index 0000000000..79f8d176b2
--- /dev/null
+++ b/js/src/jit/riscv64/MoveEmitter-riscv64.cpp
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/MoveEmitter-riscv64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterRiscv64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(to), fpscratch32);
+ masm.storeFloat32(fpscratch32, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(to), fpscratch64);
+ masm.storeDouble(fpscratch64, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(getAdjustedAddress(to), scratch2);
+ masm.store32(scratch2, cycleSlot(0));
+ } else {
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(getAdjustedAddress(to), scratch2);
+ masm.storePtr(scratch2, cycleSlot(0));
+ } else {
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(cycleSlot(slotId), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(cycleSlot(slotId), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(cycleSlot(0), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(cycleSlot(0), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+void MoveEmitterRiscv64::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterRiscv64::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterRiscv64::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.fmv_s(to.floatReg(), from.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.fmv_x_w(to.reg(), from.floatReg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(from), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterRiscv64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.fmv_d(to.floatReg(), from.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.fmv_x_d(to.reg(), from.floatReg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.fmv_d_x(to.floatReg(), from.reg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(from), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ }
+}
+
+Address MoveEmitterRiscv64::cycleSlot(uint32_t slot, uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterRiscv64::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterRiscv64::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+void MoveEmitterRiscv64::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterRiscv64::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/riscv64/MoveEmitter-riscv64.h b/js/src/jit/riscv64/MoveEmitter-riscv64.h
new file mode 100644
index 0000000000..34d86b5794
--- /dev/null
+++ b/js/src/jit/riscv64/MoveEmitter-riscv64.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_MoveEmitter_riscv64_h
+#define jit_riscv64_MoveEmitter_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "jit/MacroAssembler.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+class MacroAssemblerRiscv64;
+class MoveResolver;
+struct Register;
+
+class MoveEmitterRiscv64 {
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ public:
+ explicit MoveEmitterRiscv64(MacroAssembler& m)
+ : inCycle_(0),
+ masm(m),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+ void emit(const MoveResolver&);
+ void emit(const MoveOp& move);
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void finish();
+ void assertDone();
+ void setScratchRegister(Register) { MOZ_CRASH("Unimplement on riscv"); }
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+};
+
+typedef MoveEmitterRiscv64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MoveEmitter_riscv64_h */
diff --git a/js/src/jit/riscv64/Register-riscv64.h b/js/src/jit/riscv64/Register-riscv64.h
new file mode 100644
index 0000000000..54664dcf96
--- /dev/null
+++ b/js/src/jit/riscv64/Register-riscv64.h
@@ -0,0 +1,186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Register_riscv64_h
+#define jit_riscv64_Register_riscv64_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register ra{Registers::ra};
+static constexpr Register tp{Registers::tp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register gp{Registers::gp};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::a4};
+static constexpr Register a5{Registers::a5};
+static constexpr Register a6{Registers::a6};
+static constexpr Register a7{Registers::a7};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::t4};
+static constexpr Register t5{Registers::t5};
+static constexpr Register t6{Registers::t6};
+static constexpr Register fp{Registers::fp};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register s8{Registers::s8};
+static constexpr Register s9{Registers::s9};
+static constexpr Register s10{Registers::s10};
+static constexpr Register s11{Registers::s11};
+
+static constexpr FloatRegister ft0{FloatRegisters::f0};
+static constexpr FloatRegister ft1{FloatRegisters::f1};
+static constexpr FloatRegister ft2{FloatRegisters::f2};
+static constexpr FloatRegister ft3{FloatRegisters::f3};
+static constexpr FloatRegister ft4{FloatRegisters::f4};
+static constexpr FloatRegister ft5{FloatRegisters::f5};
+static constexpr FloatRegister ft6{FloatRegisters::f6};
+static constexpr FloatRegister ft7{FloatRegisters::f7};
+static constexpr FloatRegister fs0{FloatRegisters::f8};
+static constexpr FloatRegister fs1{FloatRegisters::f9};
+static constexpr FloatRegister fa0{FloatRegisters::f10};
+static constexpr FloatRegister fa1{FloatRegisters::f11};
+static constexpr FloatRegister fa2{FloatRegisters::f12};
+static constexpr FloatRegister fa3{FloatRegisters::f13};
+static constexpr FloatRegister fa4{FloatRegisters::f14};
+static constexpr FloatRegister fa5{FloatRegisters::f15};
+static constexpr FloatRegister fa6{FloatRegisters::f16};
+static constexpr FloatRegister fa7{FloatRegisters::f17};
+static constexpr FloatRegister fs2{FloatRegisters::f18};
+static constexpr FloatRegister fs3{FloatRegisters::f19};
+static constexpr FloatRegister fs4{FloatRegisters::f20};
+static constexpr FloatRegister fs5{FloatRegisters::f21};
+static constexpr FloatRegister fs6{FloatRegisters::f22};
+static constexpr FloatRegister fs7{FloatRegisters::f23};
+static constexpr FloatRegister fs8{FloatRegisters::f24};
+static constexpr FloatRegister fs9{FloatRegisters::f25};
+static constexpr FloatRegister fs10{FloatRegisters::f26};
+static constexpr FloatRegister fs11{FloatRegisters::f27};
+static constexpr FloatRegister ft8{FloatRegisters::f28};
+static constexpr FloatRegister ft9{FloatRegisters::f29};
+static constexpr FloatRegister ft10{FloatRegisters::f30};
+static constexpr FloatRegister ft11{FloatRegisters::f31};
+
+static constexpr Register StackPointer{Registers::sp};
+static constexpr Register FramePointer{Registers::fp};
+static constexpr Register ReturnReg{Registers::a0};
+static constexpr Register ScratchRegister{Registers::s11};
+static constexpr Register64 ReturnReg64(ReturnReg);
+
+static constexpr FloatRegister ReturnFloat32Reg{FloatRegisters::fa0};
+static constexpr FloatRegister ReturnDoubleReg{FloatRegisters::fa0};
+#ifdef ENABLE_WASM_SIMD
+static constexpr FloatRegister ReturnSimd128Reg{FloatRegisters::invalid_reg};
+static constexpr FloatRegister ScratchSimd128Reg{FloatRegisters::invalid_reg};
+#endif
+static constexpr FloatRegister InvalidFloatReg{};
+
+static constexpr FloatRegister ScratchFloat32Reg{FloatRegisters::ft10};
+static constexpr FloatRegister ScratchDoubleReg{FloatRegisters::ft10};
+static constexpr FloatRegister ScratchDoubleReg2{FloatRegisters::fs11};
+
+static constexpr Register OsrFrameReg{Registers::a3};
+static constexpr Register PreBarrierReg{Registers::a1};
+static constexpr Register InterpreterPCReg{Registers::t0};
+static constexpr Register CallTempReg0{Registers::t0};
+static constexpr Register CallTempReg1{Registers::t1};
+static constexpr Register CallTempReg2{Registers::t2};
+static constexpr Register CallTempReg3{Registers::t3};
+static constexpr Register CallTempReg4{Registers::a6};
+static constexpr Register CallTempReg5{Registers::a7};
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0{Registers::a0};
+static constexpr Register IntArgReg1{Registers::a1};
+static constexpr Register IntArgReg2{Registers::a2};
+static constexpr Register IntArgReg3{Registers::a3};
+static constexpr Register IntArgReg4{Registers::a4};
+static constexpr Register IntArgReg5{Registers::a5};
+static constexpr Register IntArgReg6{Registers::a6};
+static constexpr Register IntArgReg7{Registers::a7};
+static constexpr Register HeapReg{Registers::s7};
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type{Registers::a3};
+static constexpr Register JSReturnReg_Data{Registers::s2};
+static constexpr Register JSReturnReg{Registers::a2};
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0{Registers::t0};
+static constexpr Register ABINonArgReg1{Registers::t1};
+static constexpr Register ABINonArgReg2{Registers::t2};
+static constexpr Register ABINonArgReg3{Registers::t3};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0{Registers::t0};
+static constexpr Register ABINonArgReturnReg1{Registers::t1};
+static constexpr Register ABINonVolatileReg{Registers::s1};
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg{Registers::t0};
+
+// This register may be volatile or nonvolatile.
+// Avoid ft11 which is the scratch register.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::ft11};
+
+static constexpr Register WasmTableCallScratchReg0{ABINonArgReg0};
+static constexpr Register WasmTableCallScratchReg1{ABINonArgReg1};
+static constexpr Register WasmTableCallSigReg{ABINonArgReg2};
+static constexpr Register WasmTableCallIndexReg{ABINonArgReg3};
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions. Must be nonvolatile.
+static constexpr Register InstanceReg{Registers::s4};
+
+static constexpr Register WasmJitEntryReturnScratch{Registers::t1};
+
+static constexpr Register WasmCallRefCallScratchReg0{ABINonArgReg0};
+static constexpr Register WasmCallRefCallScratchReg1{ABINonArgReg1};
+static constexpr Register WasmCallRefReg{ABINonArgReg3};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_Register_riscv64_h
diff --git a/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h b/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h
new file mode 100644
index 0000000000..bd8667c5ec
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICHelpers_riscv64_inl_h
+#define jit_riscv64_SharedICHelpers_riscv64_inl_h
+
+#include "jit/SharedICHelpers.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ra);
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICHelpers_riscv64_inl_h */
diff --git a/js/src/jit/riscv64/SharedICHelpers-riscv64.h b/js/src/jit/riscv64/SharedICHelpers-riscv64.h
new file mode 100644
index 0000000000..3411c6727e
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICHelpers-riscv64.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICHelpers_riscv64_h
+#define jit_riscv64_SharedICHelpers_riscv64_h
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+namespace js {
+namespace jit {
+
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on RISC-V because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on RISC-V because ra register is always holding the return address.
+}
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.Pop(scratch2);
+ }
+
+ masm.checkStackAlignment();
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On RISC-V, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICHelpers_riscv64_h */
diff --git a/js/src/jit/riscv64/SharedICRegisters-riscv64.h b/js/src/jit/riscv64/SharedICRegisters-riscv64.h
new file mode 100644
index 0000000000..3dcefe51c7
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICRegisters-riscv64.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICRegisters_riscv64_h
+#define jit_riscv64_SharedICRegisters_riscv64_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(a2);
+static constexpr ValueOperand R1(s1);
+static constexpr ValueOperand R2(a0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t0;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = fa0;
+static constexpr FloatRegister FloatReg1 = fa1;
+static constexpr FloatRegister FloatReg2 = fa2;
+static constexpr FloatRegister FloatReg3 = fa3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICRegisters_riscv64_h */
diff --git a/js/src/jit/riscv64/Simulator-riscv64.cpp b/js/src/jit/riscv64/Simulator-riscv64.cpp
new file mode 100644
index 0000000000..02a668f185
--- /dev/null
+++ b/js/src/jit/riscv64/Simulator-riscv64.cpp
@@ -0,0 +1,4718 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef JS_SIMULATOR_RISCV64
+# include "jit/riscv64/Simulator-riscv64.h"
+
+# include "mozilla/Casting.h"
+# include "mozilla/FloatingPoint.h"
+# include "mozilla/IntegerPrintfMacros.h"
+# include "mozilla/Likely.h"
+# include "mozilla/MathAlgorithms.h"
+
+# include <float.h>
+# include <iostream>
+# include <limits>
+
+# include "jit/AtomicOperations.h"
+# include "jit/riscv64/Assembler-riscv64.h"
+# include "js/Conversions.h"
+# include "js/UniquePtr.h"
+# include "js/Utility.h"
+# include "threading/LockGuard.h"
+# include "vm/JSContext.h"
+# include "vm/Runtime.h"
+# include "wasm/WasmInstance.h"
+# include "wasm/WasmSignalHandlers.h"
+
+# define I8(v) static_cast<int8_t>(v)
+# define I16(v) static_cast<int16_t>(v)
+# define U16(v) static_cast<uint16_t>(v)
+# define I32(v) static_cast<int32_t>(v)
+# define U32(v) static_cast<uint32_t>(v)
+# define I64(v) static_cast<int64_t>(v)
+# define U64(v) static_cast<uint64_t>(v)
+# define I128(v) static_cast<__int128_t>(v)
+# define U128(v) static_cast<__uint128_t>(v)
+
+# define REGIx_FORMAT PRIx64
+# define REGId_FORMAT PRId64
+
+# define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+bool Simulator::FLAG_trace_sim = false;
+bool Simulator::FLAG_debug_sim = false;
+bool Simulator::FLAG_riscv_trap_to_simulator_debugger = false;
+bool Simulator::FLAG_riscv_print_watchpoint = false;
+
+static void UNIMPLEMENTED() {
+ printf("UNIMPLEMENTED instruction.\n");
+ MOZ_CRASH();
+}
+static void UNREACHABLE() {
+ printf("UNREACHABLE instruction.\n");
+ MOZ_CRASH();
+}
+# define UNSUPPORTED() \
+ std::cout << "Unrecognized instruction [@pc=0x" << std::hex \
+ << registers_[pc] << "]: 0x" << instr_.InstructionBits() \
+ << std::endl; \
+ printf("Unsupported instruction.\n"); \
+ MOZ_CRASH();
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+// -----------------------------------------------------------------------------
+// Riscv assembly various constants.
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+const int kBranchReturnOffset = 2 * kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+static bool IsFlag(const char* found, const char* flag) {
+ return strlen(found) == strlen(flag) && strcmp(found, flag) == 0;
+}
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("RISCV_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+ char* str = getenv("RISCV_TRACE_SIM");
+ if (str != nullptr && IsFlag(str, "true")) {
+ FLAG_trace_sim = true;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+# if JS_CODEGEN_RISCV64
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" REGId_FORMAT
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+# elif JS_CODEGEN_RISCV32
+template <typename T>
+void Simulator::TraceRegWr(T value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int32_t fmt_int32;
+ float fmt_float;
+ double fmt_double;
+ } v;
+ if (t != DOUBLE) {
+ v.fmt_int32 = value;
+ } else {
+ DCHECK_EQ(sizeof(T), 8);
+ v.fmt_double = value;
+ }
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" REGId_FORMAT
+ " uint32:%" PRIu32,
+ v.fmt_int32, icount_, v.fmt_int32, v.fmt_int32);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") flt:%e",
+ v.fmt_int32, icount_, v.fmt_float);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ static_cast<int64_t>(v.fmt_double), icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+# endif
+// The RiscvDebugger class is used by the simulator while debugging simulated
+// code.
+class RiscvDebugger {
+ public:
+ explicit RiscvDebugger(Simulator* sim) : sim_(sim) {}
+
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintRegs(char name_prefix, int start_index, int end_index);
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ static const Instr kNopInstr = 0x0;
+
+ private:
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ __int128_t GetVRegisterValue(int regnum);
+# endif
+ bool GetValue(const char* desc, int64_t* value);
+};
+
+int64_t RiscvDebugger::GetRegisterValue(int regnum) {
+ if (regnum == Simulator::Register::kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getRegister(regnum);
+ }
+}
+
+int64_t RiscvDebugger::GetFPURegisterValue(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegister(regnum);
+ }
+}
+
+float RiscvDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegisterFloat(regnum);
+ }
+}
+
+double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegisterDouble(regnum);
+ }
+}
+
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
+ if (regnum == kNumVRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_vregister(regnum);
+ }
+}
+# endif
+
+bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::FromName(desc);
+ int fpuregnum = FloatRegisters::FromName(desc);
+
+ if (regnum != Registers::invalid_reg) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != FloatRegisters::invalid_reg) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc + 2, "%" SCNx64, reinterpret_cast<int64_t*>(value)) == 1;
+ } else {
+ return sscanf(desc, "%" SCNu64, reinterpret_cast<int64_t*>(value)) == 1;
+ }
+}
+
+# define REG_INFO(name) \
+ name, GetRegisterValue(Registers::FromName(name)), \
+ GetRegisterValue(Registers::FromName(name))
+
+void RiscvDebugger::PrintRegs(char name_prefix, int start_index,
+ int end_index) {
+ EmbeddedVector<char, 10> name1, name2;
+ MOZ_ASSERT(name_prefix == 'a' || name_prefix == 't' || name_prefix == 's');
+ MOZ_ASSERT(start_index >= 0 && end_index <= 99);
+ int num_registers = (end_index - start_index) + 1;
+ for (int i = 0; i < num_registers / 2; i++) {
+ SNPrintF(name1, "%c%d", name_prefix, start_index + 2 * i);
+ SNPrintF(name2, "%c%d", name_prefix, start_index + 2 * i + 1);
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ " \t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT " \n",
+ REG_INFO(name1.start()), REG_INFO(name2.start()));
+ }
+ if (num_registers % 2 == 1) {
+ SNPrintF(name1, "%c%d", name_prefix, end_index);
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT " \n",
+ REG_INFO(name1.start()));
+ }
+}
+
+void RiscvDebugger::printAllRegs() {
+ printf("\n");
+ // ra, sp, gp
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT "\n",
+ REG_INFO("ra"), REG_INFO("sp"), REG_INFO("gp"));
+
+ // tp, fp, pc
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT "\n",
+ REG_INFO("tp"), REG_INFO("fp"), REG_INFO("pc"));
+
+ // print register a0, .., a7
+ PrintRegs('a', 0, 7);
+ // print registers s1, ..., s11
+ PrintRegs('s', 1, 11);
+ // print registers t0, ..., t6
+ PrintRegs('t', 0, 6);
+}
+
+# undef REG_INFO
+
+void RiscvDebugger::printAllRegsIncludingFPU() {
+# define FPU_REG_INFO(n) \
+ FloatRegisters::GetName(n), GetFPURegisterValue(n), \
+ GetFPURegisterValueDouble(n)
+
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ MOZ_ASSERT(kNumFPURegisters % 2 == 0);
+ for (int i = 0; i < kNumFPURegisters; i += 2)
+ printf("%3s: 0x%016" PRIx64 " %16.4e \t%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPU_REG_INFO(i), FPU_REG_INFO(i + 1));
+# undef FPU_REG_INFO
+}
+
+void RiscvDebugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+# define COMMAND_SIZE 63
+# define ARG_SIZE 255
+
+# define STR(a) #a
+# define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ printf(" 0x%016" REGIx_FORMAT " %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(
+ line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->icount_++;
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ int64_t fvalue;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::FromName(arg1);
+ int fpuregnum = FloatRegisters::FromName(arg1);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ int vregnum = VRegisters::FromName(arg1);
+# endif
+ if (regnum != Registers::invalid_reg) {
+ value = GetRegisterValue(regnum);
+ printf("%s: 0x%08" REGIx_FORMAT " %" REGId_FORMAT " \n", arg1,
+ value, value);
+ } else if (fpuregnum != FloatRegisters::invalid_reg) {
+ fvalue = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ printf("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FloatRegisters::GetName(fpuregnum), fvalue, dvalue);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ } else if (vregnum != kInvalidVRegister) {
+ __int128_t v = GetVRegisterValue(vregnum);
+ printf("\t%s:0x%016" REGIx_FORMAT "%016" REGIx_FORMAT "\n",
+ VRegisters::GetName(vregnum), (uint64_t)(v >> 64),
+ (uint64_t)v);
+# endif
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FloatRegisters::FromName(arg1);
+
+ if (fpuregnum != FloatRegisters::invalid_reg) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ printf("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("print <fpu register> single\n");
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ UNIMPLEMENTED();
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+ if (argc < 2) {
+ printf("Need to specify <address> to memhex command\n");
+ continue;
+ }
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" 0x%012" PRIxPTR " : 0x%016" REGIx_FORMAT
+ " %14" REGId_FORMAT " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+ } else if ((strcmp(cmd, "watch") == 0)) {
+ if (argc < 2) {
+ printf("Need to specify <address> to mem command\n");
+ continue;
+ }
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ sim_->watch_address_ = reinterpret_cast<int64_t*>(value);
+ sim_->watch_value_ = *(sim_->watch_address_);
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ auto regnum = Registers::FromName(arg1);
+ if (regnum != Registers::invalid_reg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ sreg_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ sreg_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ sreg_t value1;
+ sreg_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ printf(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "trace") == 0) {
+ Simulator::FLAG_trace_sim = true;
+ Simulator::FLAG_riscv_print_watchpoint = true;
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0 ||
+ strcmp(cmd, "tbreak") == 0) {
+ bool is_tbreak = strcmp(cmd, "tbreak") == 0;
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ sim_->SetBreakpoint(reinterpret_cast<SimInstruction*>(value),
+ is_tbreak);
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ sim_->ListBreakpoints();
+ printf("Use `break <address>` to set or disable a breakpoint\n");
+ printf(
+ "Use `tbreak <address>` to set or disable a temporary "
+ "breakpoint\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on RISC-V !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ UNIMPLEMENTED();
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont (alias 'c')\n");
+ printf(" Continue execution\n");
+ printf("stepi (alias 'si')\n");
+ printf(" Step one instruction\n");
+ printf("print (alias 'p')\n");
+ printf(" print <register>\n");
+ printf(" Print register content\n");
+ printf(" Use register name 'all' to print all GPRs\n");
+ printf(" Use register name 'allf' to print all GPRs and FPRs\n");
+ printf("printobject (alias 'po')\n");
+ printf(" printobject <register>\n");
+ printf(" Print an object from a register\n");
+ printf("stack\n");
+ printf(" stack [<words>]\n");
+ printf(" Dump stack content, default dump 10 words)\n");
+ printf("mem\n");
+ printf(" mem <address> [<words>]\n");
+ printf(" Dump memory content, default dump 10 words)\n");
+ printf("watch\n");
+ printf(" watch <address> \n");
+ printf(" watch memory content.)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm (alias 'di')\n");
+ printf(" disasm [<instructions>]\n");
+ printf(" disasm [<address/register>] (e.g., disasm pc) \n");
+ printf(" disasm [[<address/register>] <instructions>]\n");
+ printf(" Disassemble code, default is 10 instructions\n");
+ printf(" from pc\n");
+ printf("gdb \n");
+ printf(" Return to gdb if the simulator was started with gdb\n");
+ printf("break (alias 'b')\n");
+ printf(" break : list all breakpoints\n");
+ printf(" break <address> : set / enable / disable a breakpoint.\n");
+ printf("tbreak\n");
+ printf(" tbreak : list all breakpoints\n");
+ printf(
+ " tbreak <address> : set / enable / disable a temporary "
+ "breakpoint.\n");
+ printf(" Set a breakpoint enabled only for one stop. \n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+# undef COMMAND_SIZE
+# undef ARG_SIZE
+
+# undef STR
+# undef XSTR
+}
+
+void Simulator::SetBreakpoint(SimInstruction* location, bool is_tbreak) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ if (breakpoints_.at(i).is_tbreak != is_tbreak) {
+ printf("Change breakpoint at %p to %s breakpoint\n",
+ reinterpret_cast<void*>(location),
+ is_tbreak ? "temporary" : "regular");
+ breakpoints_.at(i).is_tbreak = is_tbreak;
+ return;
+ }
+ printf("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true, is_tbreak};
+ breakpoints_.push_back(new_breakpoint);
+ printf("Set a %sbreakpoint at %p\n", is_tbreak ? "temporary " : "",
+ reinterpret_cast<void*>(location));
+}
+
+void Simulator::ListBreakpoints() {
+ printf("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ printf("%p : %s %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled",
+ breakpoints_.at(i).is_tbreak ? ": temporary" : "");
+ }
+}
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ bool is_tbreak = false;
+ SimInstruction* pc_ = reinterpret_cast<SimInstruction*>(get_pc());
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) && breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ if (breakpoints_.at(i).is_tbreak) {
+ // Disable a temporary breakpoint.
+ is_tbreak = true;
+ breakpoints_.at(i).enabled = false;
+ }
+ break;
+ }
+ }
+ if (hit_a_breakpoint) {
+ printf("Hit %sa breakpoint at %p.\n", is_tbreak ? "and disabled " : "",
+ reinterpret_cast<void*>(pc_));
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret = memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset), kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Simulator::Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(rtCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Simulator::Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterLo(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterHi(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int64_t*>(&FPUregisters_[fpureg]) = box_float(value);
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, Float32 value) {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ Float64 t = Float64::FromBits(box_float(value.get_bits()));
+ memcpy(&FPUregisters_[fpureg], &t, 8);
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, Float64 value) {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ memcpy(&FPUregisters_[fpureg], &value, 8);
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Simulator::Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterLo(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHi(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+Float32 Simulator::getFpuRegisterFloat32(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ if (!is_boxed_float(FPUregisters_[fpureg])) {
+ return Float32::FromBits(0x7ffc0000);
+ }
+ return Float32::FromBits(
+ *bit_cast<uint32_t*>(const_cast<int64_t*>(&FPUregisters_[fpureg])));
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+Float64 Simulator::getFpuRegisterFloat64(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return Float64::FromBits(FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(fa0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(fa0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(a0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(a0, I64(res));
+ setRegister(a1, I64(res >> 64));
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+template <typename T>
+void Simulator::TraceMemRd(sreg_t addr, T value, sreg_t reg_value) {
+ if (FLAG_trace_sim) {
+ if (std::is_integral<T>::value) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (std::is_same<float, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64
+ ") flt:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<float>(value), addr);
+ } else if (std::is_same<double, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value), addr);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") flt:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<float>(value.get_scalar()), addr);
+ }
+}
+
+void Simulator::TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value), addr);
+ }
+}
+
+void Simulator::TraceMemRdDouble(sreg_t addr, Float64 value,
+ int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value.get_scalar()), addr);
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(sreg_t addr, T value) {
+ if (FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") flt:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<float>(value), addr);
+ }
+ break;
+ case 8:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") dbl:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<double>(value), addr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemWrDouble(sreg_t addr, double value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") dbl:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, value, addr);
+ }
+}
+
+template <typename T>
+void Simulator::TraceLr(sreg_t addr, T value, sreg_t reg_value) {
+ if (FLAG_trace_sim) {
+ if (std::is_integral<T>::value) {
+ switch (sizeof(T)) {
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceSc(sreg_t addr, T value) {
+ if (FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " --> [addr: %" REGIx_FORMAT "]",
+ getRegister(rd_reg()), icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " --> [addr: %" REGIx_FORMAT "]",
+ getRegister(rd_reg()), icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(RISCV): check whether the specific board supports unaligned load/store
+// (determined by EEI). For now, we assume the board does not support unaligned
+// load/store (e.g., trapping)
+template <typename T>
+T Simulator::ReadMem(sreg_t addr, Instruction* instr) {
+ if (handleWasmSegFault(addr, sizeof(T))) {
+ return -1;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ T* ptr = reinterpret_cast<T*>(addr);
+ T value = *ptr;
+ return value;
+}
+
+template <typename T>
+void Simulator::WriteMem(sreg_t addr, T value, Instruction* instr) {
+ if (handleWasmSegFault(addr, sizeof(T))) {
+ value = -1;
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ T* ptr = reinterpret_cast<T*>(addr);
+ if (!std::is_same<double, T>::value) {
+ TraceMemWr(addr, value);
+ } else {
+ TraceMemWrDouble(addr, value);
+ }
+ *ptr = value;
+}
+
+template <>
+void Simulator::WriteMem(sreg_t addr, Float32 value, Instruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ value = Float32(-1.0f);
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ float* ptr = reinterpret_cast<float*>(addr);
+ TraceMemWr(addr, value.get_scalar());
+ memcpy(ptr, &value, 4);
+}
+
+template <>
+void Simulator::WriteMem(sreg_t addr, Float64 value, Instruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ value = Float64(-1.0);
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ double* ptr = reinterpret_cast<double*>(addr);
+ TraceMemWrDouble(addr, value.get_scalar());
+ memcpy(ptr, &value, 8);
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1,
+ int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1,
+ int64_t arg2, int64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t, int32_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int64_t, int32_t, int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t, int64_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t, int32_t);
+
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int64_t, int64_t, int32_t, int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int64_t, int64_t, int64_t, int64_t, int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t, int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t, int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int64_t, int32_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t, int64_t, int32_t, int64_t, int32_t, int32_t, int32_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+// Generated by Assembler::break_()/stop(), ebreak code is passed as immediate
+// field of a subsequent LUI instruction; otherwise returns -1
+static inline uint32_t get_ebreak_code(Instruction* instr) {
+ MOZ_ASSERT(instr->InstructionBits() == kBreakInstr);
+ uint8_t* cur = reinterpret_cast<uint8_t*>(instr);
+ Instruction* next_instr = reinterpret_cast<Instruction*>(cur + kInstrSize);
+ if (next_instr->BaseOpcodeFieldRaw() == LUI)
+ return (next_instr->Imm20UValue());
+ else
+ return -1;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::SoftwareInterrupt() {
+ // There are two instructions that could get us here, the ebreak or ecall
+ // instructions are "SYSTEM" class opcode distinuished by Imm12Value field w/
+ // the rest of instruction fields being zero
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ intptr_t arg0 = getRegister(a0);
+ intptr_t arg1 = getRegister(a1);
+ intptr_t arg2 = getRegister(a2);
+ intptr_t arg3 = getRegister(a3);
+ intptr_t arg4 = getRegister(a4);
+ intptr_t arg5 = getRegister(a5);
+ intptr_t arg6 = getRegister(a6);
+ intptr_t arg7 = getRegister(a7);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ intptr_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+ if (FLAG_trace_sim) {
+ printf(
+ "Call to host function at %p with args %ld, %ld, %ld, %ld, %ld, %ld, "
+ "%ld, %ld\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7);
+ }
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wake_m32)) {
+ result = int32_t(result);
+ }
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t result = target(dval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wait_i32_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wait_i64_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ int64_t result = target(dval, arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t result = target(dval, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t result = target(arg0, dval, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(fa0);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ if (FLAG_trace_sim) printf("ret %f\n", fresult);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(fa0);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int64_t result = target(fval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(fa0);
+ fval1 = getFpuRegisterFloat(fa1);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ if (FLAG_trace_sim) printf("ret %f\n", fresult);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, arg0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ double dval2 = getFpuRegisterDouble(fa2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ double dval2 = getFpuRegisterDouble(fa2);
+ double dval3 = getFpuRegisterDouble(fa3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(arg0);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(
+ arg0, I32(arg1));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4),
+ I32(arg5));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(nativeFn)(
+ arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4), arg5);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(arg0, I32(arg1), arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneral>(
+ nativeFn)(arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(arg0, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(arg0, arg1, I32(arg2));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32>(
+ nativeFn)(arg0, I32(arg1));
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(arg0, I32(arg1), arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3, I32(arg4));
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ Prototype_Int32_GeneralGeneralInt32General target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ external);
+ int64_t result = target(I32(arg0), I32(arg1), I32(arg2), I32(arg3));
+ setRegister(a0, I64(result));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int64_t arg6 = getRegister(a6);
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, I32(arg4), I32(arg5),
+ I32(arg6));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_General>(nativeFn)(arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_GeneralInt64>(nativeFn)(
+ arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+
+ } else if (instr_.InstructionBits() == kBreakInstr &&
+ (get_ebreak_code(instr_.instr()) <= kMaxStopCode)) {
+ uint32_t code = get_ebreak_code(instr_.instr());
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else if (IsTracepoint(code)) {
+ if (!FLAG_debug_sim) {
+ MOZ_CRASH("Add --debug-sim when tracepoint instruction is used.\n");
+ }
+ // printf("%d %d %d %d %d %d %d\n", code, code & LOG_TRACE, code &
+ // LOG_REGS,
+ // code & kDebuggerTracingDirectivesMask, TRACE_ENABLE,
+ // TRACE_DISABLE, kDebuggerTracingDirectivesMask);
+ switch (code & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ if (code & LOG_TRACE) {
+ FLAG_trace_sim = true;
+ }
+ if (code & LOG_REGS) {
+ RiscvDebugger dbg(this);
+ dbg.printAllRegs();
+ }
+ break;
+ case TRACE_DISABLE:
+ if (code & LOG_TRACE) {
+ FLAG_trace_sim = false;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ increaseStopCounter(code);
+ handleStop(code);
+ }
+ } else {
+ // uint8_t code = get_ebreak_code(instr_.instr()) - kMaxStopCode - 1;
+ // switch (LNode::Opcode(code)) {
+ // #define EMIT_OP(OP, ...) \
+// case LNode::Opcode::OP:\
+// std::cout << #OP << std::endl; \
+// break;
+ // LIR_OPCODE_LIST(EMIT_OP);
+ // #undef EMIT_OP
+ // }
+ DieOrDebug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+bool Simulator::IsTracepoint(uint32_t code) {
+ return (code <= kMaxTracepointCode && code > kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ RiscvDebugger dbg(this);
+ ++break_count_;
+ if (FLAG_riscv_print_watchpoint) {
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+ }
+}
+
+void Simulator::handleStop(uint32_t code) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ set_pc(get_pc() + 2 * kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ if (instr->InstructionBits() != kBreakInstr) return false;
+ int32_t code = get_ebreak_code(instr->instr());
+ return code != -1 && static_cast<uint32_t>(code) > kMaxWatchpointCode &&
+ static_cast<uint32_t>(code) <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ printf("Error: Exception %i raised.", static_cast<int>(e));
+ MOZ_CRASH();
+}
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if (FLAG_riscv_trap_to_simulator_debugger) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ MOZ_CRASH("Die");
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ // if (FLAG_check_icache) {
+ // CheckICache(SimulatorProcess::icache(), instr);
+ // }
+ pc_modified_ = false;
+
+ EmbeddedVector<char, 256> buffer;
+
+ if (FLAG_trace_sim || FLAG_debug_sim) {
+ SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+
+ // printf("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(instr), buffer.begin());
+ }
+
+ instr_ = instr;
+ switch (instr_.InstructionType()) {
+ case Instruction::kRType:
+ DecodeRVRType();
+ break;
+ case Instruction::kR4Type:
+ DecodeRVR4Type();
+ break;
+ case Instruction::kIType:
+ DecodeRVIType();
+ break;
+ case Instruction::kSType:
+ DecodeRVSType();
+ break;
+ case Instruction::kBType:
+ DecodeRVBType();
+ break;
+ case Instruction::kUType:
+ DecodeRVUType();
+ break;
+ case Instruction::kJType:
+ DecodeRVJType();
+ break;
+ case Instruction::kCRType:
+ DecodeCRType();
+ break;
+ case Instruction::kCAType:
+ DecodeCAType();
+ break;
+ case Instruction::kCJType:
+ DecodeCJType();
+ break;
+ case Instruction::kCBType:
+ DecodeCBType();
+ break;
+ case Instruction::kCIType:
+ DecodeCIType();
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType();
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType();
+ break;
+ case Instruction::kCLType:
+ DecodeCLType();
+ break;
+ case Instruction::kCSType:
+ DecodeCSType();
+ break;
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ case Instruction::kVType:
+ DecodeVType();
+ break;
+# endif
+ default:
+ UNSUPPORTED();
+ }
+
+ if (FLAG_trace_sim) {
+ printf(" 0x%012" PRIxPTR " %-44s\t%s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.start(),
+ trace_buf_.start());
+ }
+
+ if (!pc_modified_) {
+ setRegister(pc, reinterpret_cast<sreg_t>(instr) + instr->InstructionSize());
+ }
+
+ if (watch_address_ != nullptr) {
+ printf(" 0x%012" PRIxPTR " : 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ " \n",
+ reinterpret_cast<intptr_t>(watch_address_), *watch_address_,
+ *watch_address_);
+ if (watch_value_ != *watch_address_) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ watch_value_ = *watch_address_;
+ }
+ }
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ InstructionDecode(instr);
+ icount_++;
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+// RISCV Instruction Decode Routine
+void Simulator::DecodeRVRType() {
+ switch (instr_.InstructionBits() & kRTypeMask) {
+ case RO_ADD: {
+ set_rd(sext_xlen(rs1() + rs2()));
+ break;
+ }
+ case RO_SUB: {
+ set_rd(sext_xlen(rs1() - rs2()));
+ break;
+ }
+ case RO_SLL: {
+ set_rd(sext_xlen(rs1() << (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SLT: {
+ set_rd(sreg_t(rs1()) < sreg_t(rs2()));
+ break;
+ }
+ case RO_SLTU: {
+ set_rd(reg_t(rs1()) < reg_t(rs2()));
+ break;
+ }
+ case RO_XOR: {
+ set_rd(rs1() ^ rs2());
+ break;
+ }
+ case RO_SRL: {
+ set_rd(sext_xlen(zext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SRA: {
+ set_rd(sext_xlen(sext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_OR: {
+ set_rd(rs1() | rs2());
+ break;
+ }
+ case RO_AND: {
+ set_rd(rs1() & rs2());
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_ADDW: {
+ set_rd(sext32(rs1() + rs2()));
+ break;
+ }
+ case RO_SUBW: {
+ set_rd(sext32(rs1() - rs2()));
+ break;
+ }
+ case RO_SLLW: {
+ set_rd(sext32(rs1() << (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRLW: {
+ set_rd(sext32(uint32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRAW: {
+ set_rd(sext32(int32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL: {
+ set_rd(rs1() * rs2());
+ break;
+ }
+ case RO_MULH: {
+ set_rd(mulh(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHSU: {
+ set_rd(mulhsu(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHU: {
+ set_rd(mulhu(rs1(), rs2()));
+ break;
+ }
+ case RO_DIV: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INTPTR_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(UINTPTR_MAX);
+ } else {
+ set_rd(zext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REM: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INTPTR_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext_xlen(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else {
+ set_rd(zext_xlen(lhs % rhs));
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_MULW: {
+ set_rd(sext32(sext32(rs1()) * sext32(rs2())));
+ break;
+ }
+ case RO_DIVW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(UINT32_MAX);
+ } else {
+ set_rd(zext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REMW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext32(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(zext32(lhs));
+ } else {
+ set_rd(zext32(lhs % rhs));
+ }
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr_.BaseOpcode()) {
+ case AMO:
+ DecodeRVRAType();
+ break;
+ case OP_FP:
+ DecodeRVRFPType();
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+ }
+}
+
+template <typename T>
+T Simulator::FMaxMinHelper(T a, T b, MaxMinKind kind) {
+ // set invalid bit for signaling nan
+ if ((a == std::numeric_limits<T>::signaling_NaN()) ||
+ (b == std::numeric_limits<T>::signaling_NaN())) {
+ set_csr_bits(csr_fflags, kInvalidOperation);
+ }
+
+ T result = 0;
+ if (std::isnan(a) && std::isnan(b)) {
+ result = std::numeric_limits<float>::quiet_NaN();
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) { // Handle -0.0 == 0.0 case.
+ if (kind == MaxMinKind::kMax) {
+ result = std::signbit(b) ? a : b;
+ } else {
+ result = std::signbit(b) ? b : a;
+ }
+ } else {
+ result = (kind == MaxMinKind::kMax) ? fmax(a, b) : fmin(a, b);
+ }
+
+ return result;
+}
+
+float Simulator::RoundF2FHelper(float input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ float rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = floorf(input_val);
+ float error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::truncf(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = floorf(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = ceilf(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::roundf(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ return rounded;
+}
+
+double Simulator::RoundF2FHelper(double input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ double rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = std::floor(input_val);
+ double error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::trunc(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = std::floor(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = std::ceil(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::round(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return rounded;
+}
+
+// convert rounded floating-point to integer types, handle input values that
+// are out-of-range, underflow, or NaN, and set appropriate fflags
+template <typename I_TYPE, typename F_TYPE>
+I_TYPE Simulator::RoundF2IHelper(F_TYPE original, int rmode) {
+ MOZ_ASSERT(std::is_integral<I_TYPE>::value);
+
+ MOZ_ASSERT((std::is_same<F_TYPE, float>::value ||
+ std::is_same<F_TYPE, double>::value));
+
+ I_TYPE max_i = std::numeric_limits<I_TYPE>::max();
+ I_TYPE min_i = std::numeric_limits<I_TYPE>::min();
+
+ if (!std::isfinite(original)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(original) ||
+ original == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ MOZ_ASSERT(original == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ F_TYPE rounded = RoundF2FHelper(original, rmode);
+ if (original != rounded) set_fflags(kInexact);
+
+ if (!std::isfinite(rounded)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(rounded) ||
+ rounded == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ MOZ_ASSERT(rounded == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ // Since integer max values are either all 1s (for unsigned) or all 1s
+ // except for sign-bit (for signed), they cannot be represented precisely in
+ // floating point, in order to precisely tell whether the rounded floating
+ // point is within the max range, we compare against (max_i+1) which would
+ // have a single 1 w/ many trailing zeros
+ float max_i_plus_1 =
+ std::is_same<uint64_t, I_TYPE>::value
+ ? 0x1p64f // uint64_t::max + 1 cannot be represented in integers,
+ // so use its float representation directly
+ : static_cast<float>(static_cast<uint64_t>(max_i) + 1);
+ if (rounded >= max_i_plus_1) {
+ set_fflags(kOverflow | kInvalidOperation);
+ return max_i;
+ }
+
+ // Since min_i (either 0 for unsigned, or for signed) is represented
+ // precisely in floating-point, comparing rounded directly against min_i
+ if (rounded <= min_i) {
+ if (rounded < min_i) set_fflags(kOverflow | kInvalidOperation);
+ return min_i;
+ }
+
+ F_TYPE underflow_fval =
+ std::is_same<F_TYPE, float>::value ? FLT_MIN : DBL_MIN;
+ if (rounded < underflow_fval && rounded > -underflow_fval && rounded != 0) {
+ set_fflags(kUnderflow);
+ }
+
+ return static_cast<I_TYPE>(rounded);
+}
+
+template <typename T>
+static int64_t FclassHelper(T value) {
+ switch (std::fpclassify(value)) {
+ case FP_INFINITE:
+ return (std::signbit(value) ? kNegativeInfinity : kPositiveInfinity);
+ case FP_NAN:
+ return (isSnan(value) ? kSignalingNaN : kQuietNaN);
+ case FP_NORMAL:
+ return (std::signbit(value) ? kNegativeNormalNumber
+ : kPositiveNormalNumber);
+ case FP_SUBNORMAL:
+ return (std::signbit(value) ? kNegativeSubnormalNumber
+ : kPositiveSubnormalNumber);
+ case FP_ZERO:
+ return (std::signbit(value) ? kNegativeZero : kPositiveZero);
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return FP_ZERO;
+}
+
+template <typename T>
+bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
+ MOZ_ASSERT(std::is_floating_point<T>::value);
+ bool result = false;
+ switch (cc) {
+ case LT:
+ case LE:
+ // FLT, FLE are signaling compares
+ if (std::isnan(input1) || std::isnan(input2)) {
+ set_fflags(kInvalidOperation);
+ result = false;
+ } else {
+ result = (cc == LT) ? (input1 < input2) : (input1 <= input2);
+ }
+ break;
+ case EQ:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = false;
+ } else {
+ result = (input1 == input2);
+ }
+ break;
+ case NE:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = true;
+ } else {
+ result = (input1 != input2);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return result;
+}
+
+template <typename T>
+static inline bool is_invalid_fmul(T src1, T src2) {
+ return (isinf(src1) && src2 == static_cast<T>(0.0)) ||
+ (src1 == static_cast<T>(0.0) && isinf(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fadd(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) != std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fsub(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) == std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fdiv(T src1, T src2) {
+ return ((src1 == 0 && src2 == 0) || (isinf(src1) && isinf(src2)));
+}
+
+template <typename T>
+static inline bool is_invalid_fsqrt(T src1) {
+ return (src1 < 0);
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 1;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 0 : 1;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 1;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 0 : 1;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::DecodeRVRAType() {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr_.InstructionBits() & kRATypeMask) {
+ case RO_LR_W: {
+ sreg_t addr = rs1();
+ set_rd(loadLinkedW(addr, &instr_));
+ TraceLr(addr, getRegister(rd_reg()), getRegister(rd_reg()));
+ break;
+ }
+ case RO_SC_W: {
+ sreg_t addr = rs1();
+ auto value = static_cast<int32_t>(rs2());
+ auto result =
+ storeConditionalW(addr, static_cast<int32_t>(rs2()), &instr_);
+ set_rd(result);
+ if (!result) {
+ TraceSc(addr, value);
+ }
+ break;
+ }
+ case RO_AMOSWAP_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return (uint32_t)rs2(); }, instr_.instr(),
+ WORD)));
+ break;
+ }
+ case RO_AMOADD_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs + (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOXOR_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs ^ (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOAND_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs & (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOOR_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs | (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMIN_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::min(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAX_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::max(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMINU_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::min(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAXU_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::max(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_LR_D: {
+ sreg_t addr = rs1();
+ set_rd(loadLinkedD(addr, &instr_));
+ TraceLr(addr, getRegister(rd_reg()), getRegister(rd_reg()));
+ break;
+ }
+ case RO_SC_D: {
+ sreg_t addr = rs1();
+ auto value = static_cast<int64_t>(rs2());
+ auto result =
+ storeConditionalD(addr, static_cast<int64_t>(rs2()), &instr_);
+ set_rd(result);
+ if (!result) {
+ TraceSc(addr, value);
+ }
+ break;
+ }
+ case RO_AMOSWAP_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return rs2(); }, instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOADD_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs + rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOXOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs ^ rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOAND_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs & rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs | rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOMIN_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::min(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAX_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::max(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMINU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::min(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAXU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::max(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVRFPType() {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses func3 and
+ // rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr_.InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 - frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FMUL_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fmul(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 * frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FDIV_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fdiv(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else if (frs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(frs1) == std::signbit(frs2)
+ ? std::numeric_limits<float>::infinity()
+ : -std::numeric_limits<float>::infinity());
+ } else {
+ return frs1 / frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSQRT_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs) {
+ if (is_invalid_fsqrt(frs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::sqrt(frs);
+ }
+ };
+ set_frd(CanonicalizeFPUOp1<float>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSQNJX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ float original_val = frs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_S
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_S
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_L_S
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_S
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // RO_FMV_X_W
+ set_rd(sext32(getFpuRegister(rs1_reg())));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case 0b001: { // RO_FCLASS_S
+ set_rd(FclassHelper(frs1()));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(frs1(), frs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_S
+ set_rd(CompareFHelper(frs1(), frs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_S
+ set_rd(CompareFHelper(frs1(), frs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_S_W
+ set_frd(static_cast<float>((int32_t)rs1()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_S_WU
+ set_frd(static_cast<float>((uint32_t)rs1()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_S_L
+ set_frd(static_cast<float>((int64_t)rs1()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_S_LU
+ set_frd(static_cast<float>((uint64_t)rs1()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr_.Funct3Value() == 0b000) {
+ // since FMV preserves source bit-pattern, no need to canonize
+ Float32 result = Float32::FromBits((uint32_t)rs1());
+ set_frd(result);
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fadd(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 + drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fsub(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 - drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FMUL_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FDIV_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fdiv(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else if (drs2 == 0.0) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(drs1) == std::signbit(drs2)
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity());
+ } else {
+ return drs1 / drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSQRT_D: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs) {
+ if (is_invalid_fsqrt(drs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::sqrt(drs);
+ }
+ };
+ set_drd(CanonicalizeFPUOp1<double>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSQNJX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr_.Rs2Value() == 0b00001) {
+ auto fn = [](double drs) { return static_cast<float>(drs); };
+ set_frd(CanonicalizeDoubleToFloatOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ auto fn = [](float frs) { return static_cast<double>(frs); };
+ set_drd(CanonicalizeFloatToDoubleOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(drs1(), drs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_D
+ set_rd(CompareFHelper(drs1(), drs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_D
+ set_rd(CompareFHelper(drs1(), drs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr_.Rs2Value() != 0b00000) {
+ UNSUPPORTED();
+ }
+ switch (instr_.Funct3Value()) {
+ case 0b001: { // RO_FCLASS_D
+ set_rd(FclassHelper(drs1()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b000: { // RO_FMV_X_D
+ set_rd(bit_cast<int64_t>(drs1()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ double original_val = drs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_D
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_D
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_L_D
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_D
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_D_W
+ set_drd((int32_t)rs1());
+ break;
+ }
+ case 0b00001: { // RO_FCVT_D_WU
+ set_drd((uint32_t)rs1());
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_D_L
+ set_drd((int64_t)rs1());
+ break;
+ }
+ case 0b00011: { // RO_FCVT_D_LU
+ set_drd((uint64_t)rs1());
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_FMV_D_X: {
+ if (instr_.Funct3Value() == 0b000 && instr_.Rs2Value() == 0b00000) {
+ // Since FMV preserves source bit-pattern, no need to canonize
+ set_drd(bit_cast<double>(rs1()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVR4Type() {
+ switch (instr_.InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs3, frs1 * frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs3, drs1 * drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+bool Simulator::DecodeRvvVL() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VL == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_LD(0, (i * nf + fn), int8, false);
+ break;
+ }
+ case 16: {
+ RVV_VI_LD(0, (i * nf + fn), int16, false);
+ break;
+ }
+ case 32: {
+ RVV_VI_LD(0, (i * nf + fn), int32, false);
+ break;
+ }
+ case 64: {
+ RVV_VI_LD(0, (i * nf + fn), int64, false);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::DecodeRvvVS() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_ST(0, (i * nf + fn), uint8, false);
+ break;
+ }
+ case 16: {
+ RVV_VI_ST(0, (i * nf + fn), uint16, false);
+ break;
+ }
+ case 32: {
+ RVV_VI_ST(0, (i * nf + fn), uint32, false);
+ break;
+ }
+ case 64: {
+ RVV_VI_ST(0, (i * nf + fn), uint64, false);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ return true;
+ } else if (RO_V_VSS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSU == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+# endif
+
+void Simulator::DecodeRVIType() {
+ switch (instr_.InstructionBits() & kITypeMask) {
+ case RO_JALR: {
+ set_rd(get_pc() + kInstrSize);
+ // Note: No need to shift 2 for JALR's imm12, but set lowest bit to 0.
+ sreg_t next_pc = (rs1() + imm12()) & ~sreg_t(1);
+ set_pc(next_pc);
+ break;
+ }
+ case RO_LB: {
+ sreg_t addr = rs1() + imm12();
+ int8_t val = ReadMem<int8_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LH: {
+ sreg_t addr = rs1() + imm12();
+ int16_t val = ReadMem<int16_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LW: {
+ sreg_t addr = rs1() + imm12();
+ int32_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LBU: {
+ sreg_t addr = rs1() + imm12();
+ uint8_t val = ReadMem<uint8_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LHU: {
+ sreg_t addr = rs1() + imm12();
+ uint16_t val = ReadMem<uint16_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_LWU: {
+ int64_t addr = rs1() + imm12();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LD: {
+ int64_t addr = rs1() + imm12();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ case RO_ADDI: {
+ set_rd(sext_xlen(rs1() + imm12()));
+ break;
+ }
+ case RO_SLTI: {
+ set_rd(sreg_t(rs1()) < sreg_t(imm12()));
+ break;
+ }
+ case RO_SLTIU: {
+ set_rd(reg_t(rs1()) < reg_t(imm12()));
+ break;
+ }
+ case RO_XORI: {
+ set_rd(imm12() ^ rs1());
+ break;
+ }
+ case RO_ORI: {
+ set_rd(imm12() | rs1());
+ break;
+ }
+ case RO_ANDI: {
+ set_rd(imm12() & rs1());
+ break;
+ }
+ case RO_SLLI: {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(rs1() << shamt6()));
+ break;
+ }
+ case RO_SRLI: { // RO_SRAI
+ if (!instr_.IsArithShift()) {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(zext_xlen(rs1()) >> shamt6()));
+ } else {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(sext_xlen(rs1()) >> shamt6()));
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_ADDIW: {
+ set_rd(sext32(rs1() + imm12()));
+ break;
+ }
+ case RO_SLLIW: {
+ set_rd(sext32(rs1() << shamt5()));
+ break;
+ }
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr_.IsArithShift()) {
+ set_rd(sext32(uint32_t(rs1()) >> shamt5()));
+ } else {
+ set_rd(sext32(int32_t(rs1()) >> shamt5()));
+ }
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ case RO_FENCE: {
+ // DO nothing in sumulator
+ break;
+ }
+ case RO_ECALL: { // RO_EBREAK
+ if (instr_.Imm12Value() == 0) { // ECALL
+ SoftwareInterrupt();
+ } else if (instr_.Imm12Value() == 1) { // EBREAK
+ uint8_t code = get_ebreak_code(instr_.instr());
+ if (code == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ }
+ SoftwareInterrupt();
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I: {
+ // spike: flush icache.
+ break;
+ }
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ case RO_CSRRW: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), rs1());
+ break;
+ }
+ case RO_CSRRS: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ set_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRC: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ clear_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRWI: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), imm5CSR());
+ break;
+ }
+ case RO_CSRRSI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ set_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ case RO_CSRRCI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ clear_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW: {
+ sreg_t addr = rs1() + imm12();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_frd(Float32::FromBits(val), false);
+ TraceMemRdFloat(addr, Float32::FromBits(val), getFpuRegister(frd_reg()));
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD: {
+ sreg_t addr = rs1() + imm12();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_drd(Float64::FromBits(val), false);
+ TraceMemRdDouble(addr, Float64::FromBits(val), getFpuRegister(frd_reg()));
+ break;
+ }
+ default: {
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (!DecodeRvvVL()) {
+ UNSUPPORTED();
+ }
+ break;
+# else
+ UNSUPPORTED();
+# endif
+ }
+ }
+}
+
+void Simulator::DecodeRVSType() {
+ switch (instr_.InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ WriteMem<uint8_t>(rs1() + s_imm12(), (uint8_t)rs2(), instr_.instr());
+ break;
+ case RO_SH:
+ WriteMem<uint16_t>(rs1() + s_imm12(), (uint16_t)rs2(), instr_.instr());
+ break;
+ case RO_SW:
+ WriteMem<uint32_t>(rs1() + s_imm12(), (uint32_t)rs2(), instr_.instr());
+ break;
+# ifdef JS_CODEGEN_RISCV64
+ case RO_SD:
+ WriteMem<uint64_t>(rs1() + s_imm12(), (uint64_t)rs2(), instr_.instr());
+ break;
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW: {
+ WriteMem<Float32>(rs1() + s_imm12(), getFpuRegisterFloat32(rs2_reg()),
+ instr_.instr());
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD: {
+ WriteMem<Float64>(rs1() + s_imm12(), getFpuRegisterFloat64(rs2_reg()),
+ instr_.instr());
+ break;
+ }
+ default:
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (!DecodeRvvVS()) {
+ UNSUPPORTED();
+ }
+ break;
+# else
+ UNSUPPORTED();
+# endif
+ }
+}
+
+void Simulator::DecodeRVBType() {
+ switch (instr_.InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ if (rs1() == rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BNE:
+ if (rs1() != rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLT:
+ if (rs1() < rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGE:
+ if (rs1() >= rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLTU:
+ if ((reg_t)rs1() < (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGEU:
+ if ((reg_t)rs1() >= (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVUType() {
+ // U Type doesn't have additoinal mask
+ switch (instr_.BaseOpcodeFieldRaw()) {
+ case LUI:
+ set_rd(u_imm20());
+ break;
+ case AUIPC:
+ set_rd(sext_xlen(u_imm20() + get_pc()));
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVJType() {
+ // J Type doesn't have additional mask
+ switch (instr_.BaseOpcodeValue()) {
+ case JAL: {
+ set_rd(get_pc() + kInstrSize);
+ int64_t next_pc = get_pc() + imm20J();
+ set_pc(next_pc);
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeCRType() {
+ switch (instr_.RvcFunct4Value()) {
+ case 0b1000:
+ if (instr_.RvcRs1Value() != 0 && instr_.RvcRs2Value() == 0) { // c.jr
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.mv
+ set_rvc_rd(sext_xlen(rvc_rs2()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ case 0b1001:
+ if (instr_.RvcRs1Value() == 0 && instr_.RvcRs2Value() == 0) { // c.ebreak
+ DieOrDebug();
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() == 0) { // c.jalr
+ setRegister(ra, get_pc() + kShortInstrSize);
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.add
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_rs2()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCAType() {
+ switch (instr_.InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_XOR:
+ set_rvc_rs1s(rvc_rs1s() ^ rvc_rs2s());
+ break;
+ case RO_C_OR:
+ set_rvc_rs1s(rvc_rs1s() | rvc_rs2s());
+ break;
+ case RO_C_AND:
+ set_rvc_rs1s(rvc_rs1s() & rvc_rs2s());
+ break;
+# if JS_CODEGEN_RISCV64
+ case RO_C_SUBW:
+ set_rvc_rs1s(sext32(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_ADDW:
+ set_rvc_rs1s(sext32(rvc_rs1s() + rvc_rs2s()));
+ break;
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr_.RvcRdValue() == 0) // c.nop
+ break;
+ else // c.addi
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_imm6()));
+ break;
+# if JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+ set_rvc_rd(sext32(rvc_rs1() + rvc_imm6()));
+ break;
+# endif
+ case RO_C_LI:
+ set_rvc_rd(sext_xlen(rvc_imm6()));
+ break;
+ case RO_C_LUI_ADD:
+ if (instr_.RvcRdValue() == 2) {
+ // c.addi16sp
+ int64_t value = getRegister(sp) + rvc_imm6_addi16sp();
+ setRegister(sp, value);
+ } else if (instr_.RvcRdValue() != 0 && instr_.RvcRdValue() != 2) {
+ // c.lui
+ set_rvc_rd(rvc_u_imm6());
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ case RO_C_SLLI:
+ set_rvc_rd(sext_xlen(rvc_rs1() << rvc_shamt6()));
+ break;
+ case RO_C_FLDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_rvc_drd(Float64::FromBits(val), false);
+ TraceMemRdDouble(addr, Float64::FromBits(val),
+ getFpuRegister(rvc_frd_reg()));
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_LWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_lwsp();
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+ case RO_C_LDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+# elif JS_CODEGEN_RISCV32
+ case RO_C_FLWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rvc_frd(Float32::FromBits(val), false);
+ TraceMemRdFloat(addr, Float32::FromBits(val),
+ getFpuRegister(rvc_frd_reg()));
+ break;
+ }
+ case RO_C_LWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_lwsp();
+ int32_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIWType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_ADDI4SPN: {
+ set_rvc_rs2s(getRegister(sp) + rvc_imm8_addi4spn());
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeCSSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_FSDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<Float64>(addr, getFpuRegisterFloat64(rvc_rs2_reg()),
+ instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV32
+ case RO_C_FSWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<Float32>(addr, getFpuRegisterFloat32(rvc_rs2_reg()),
+ instr_.instr());
+ break;
+ }
+# endif
+ case RO_C_SWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_swsp();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_SDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCLType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_LW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_w();
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rs2s_reg()));
+ break;
+ }
+ case RO_C_FLD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_rvc_drs2s(Float64::FromBits(val), false);
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_LD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rs2s_reg()));
+ break;
+ }
+# elif JS_CODEGEN_RISCV32
+ case RO_C_FLW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rvc_frs2s(Float32::FromBits(val), false);
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_SW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_w();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_SD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+# endif
+ case RO_C_FSD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<double>(addr, static_cast<double>(rvc_drs2s()), instr_.instr());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCJType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_J: {
+ set_pc(get_pc() + instr_.RvcImm11CJValue());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCBType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_BNEZ:
+ if (rvc_rs1() != 0) {
+ sreg_t next_pc = get_pc() + rvc_imm8_b();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_C_BEQZ:
+ if (rvc_rs1() == 0) {
+ sreg_t next_pc = get_pc() + rvc_imm8_b();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_C_MISC_ALU:
+ if (instr_.RvcFunct2BValue() == 0b00) { // c.srli
+ set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
+ } else if (instr_.RvcFunct2BValue() == 0b01) { // c.srai
+ require(rvc_shamt6() < xlen);
+ set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
+ } else if (instr_.RvcFunct2BValue() == 0b10) { // c.andi
+ set_rvc_rs1s(rvc_imm6() & rvc_rs1s());
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+ // Remember the values of callee-saved registers.
+ intptr_t s0_val = getRegister(Simulator::Register::fp);
+ intptr_t s1_val = getRegister(Simulator::Register::s1);
+ intptr_t s2_val = getRegister(Simulator::Register::s2);
+ intptr_t s3_val = getRegister(Simulator::Register::s3);
+ intptr_t s4_val = getRegister(Simulator::Register::s4);
+ intptr_t s5_val = getRegister(Simulator::Register::s5);
+ intptr_t s6_val = getRegister(Simulator::Register::s6);
+ intptr_t s7_val = getRegister(Simulator::Register::s7);
+ intptr_t s8_val = getRegister(Simulator::Register::s8);
+ intptr_t s9_val = getRegister(Simulator::Register::s9);
+ intptr_t s10_val = getRegister(Simulator::Register::s10);
+ intptr_t s11_val = getRegister(Simulator::Register::s11);
+ intptr_t gp_val = getRegister(Simulator::Register::gp);
+ intptr_t sp_val = getRegister(Simulator::Register::sp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution. If this value is
+ // small int, it should be SMI.
+ intptr_t callee_saved_value = icount_;
+ setRegister(Simulator::Register::fp, callee_saved_value);
+ setRegister(Simulator::Register::s1, callee_saved_value);
+ setRegister(Simulator::Register::s2, callee_saved_value);
+ setRegister(Simulator::Register::s3, callee_saved_value);
+ setRegister(Simulator::Register::s4, callee_saved_value);
+ setRegister(Simulator::Register::s5, callee_saved_value);
+ setRegister(Simulator::Register::s6, callee_saved_value);
+ setRegister(Simulator::Register::s7, callee_saved_value);
+ setRegister(Simulator::Register::s8, callee_saved_value);
+ setRegister(Simulator::Register::s9, callee_saved_value);
+ setRegister(Simulator::Register::s10, callee_saved_value);
+ setRegister(Simulator::Register::s11, callee_saved_value);
+ setRegister(Simulator::Register::gp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::fp));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s8));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s9));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s10));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s11));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::gp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(Simulator::Register::fp, s0_val);
+ setRegister(Simulator::Register::s1, s1_val);
+ setRegister(Simulator::Register::s2, s2_val);
+ setRegister(Simulator::Register::s3, s3_val);
+ setRegister(Simulator::Register::s4, s4_val);
+ setRegister(Simulator::Register::s5, s5_val);
+ setRegister(Simulator::Register::s6, s6_val);
+ setRegister(Simulator::Register::s7, s7_val);
+ setRegister(Simulator::Register::s8, s8_val);
+ setRegister(Simulator::Register::s9, s9_val);
+ setRegister(Simulator::Register::s10, s10_val);
+ setRegister(Simulator::Register::s11, s11_val);
+ setRegister(Simulator::Register::gp, gp_val);
+ setRegister(Simulator::Register::sp, sp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(a0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
+
+#endif // JS_SIMULATOR_RISCV64
diff --git a/js/src/jit/riscv64/Simulator-riscv64.h b/js/src/jit/riscv64/Simulator-riscv64.h
new file mode 100644
index 0000000000..20a3f6e97c
--- /dev/null
+++ b/js/src/jit/riscv64/Simulator-riscv64.h
@@ -0,0 +1,1281 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_riscv64_Simulator_riscv64_h
+#define jit_riscv64_Simulator_riscv64_h
+
+#ifdef JS_SIMULATOR_RISCV64
+# include "mozilla/Atomics.h"
+
+# include <vector>
+
+# include "jit/IonTypes.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/constant/util-riscv64.h"
+# include "jit/riscv64/disasm/Disasm-riscv64.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "bit_cast requires source and destination to be the same size");
+ static_assert(std::is_trivially_copyable<Dest>::value,
+ "bit_cast requires the destination type to be copyable");
+ static_assert(std::is_trivially_copyable<Source>::value,
+ "bit_cast requires the source type to be copyable");
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+# define ASSERT_TRIVIALLY_COPYABLE(T) \
+ static_assert(std::is_trivially_copyable<T>::value, \
+ #T " should be trivially copyable")
+# define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \
+ static_assert(!std::is_trivially_copyable<T>::value, \
+ #T " should not be trivially copyable")
+
+constexpr uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFF7FFFF;
+
+constexpr uint64_t kHoleNanInt64 =
+ (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+// Safety wrapper for a 32-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value.
+class Float32 {
+ public:
+ Float32() = default;
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float32(float value) : bit_pattern_(bit_cast<uint32_t>(value)) {
+ // Check that the provided value is not a NaN, because the bit pattern of a
+ // NaN may be changed by a bit_cast, e.g. for signalling NaNs on
+ // ia32.
+ MOZ_ASSERT(!std::isnan(value));
+ }
+
+ uint32_t get_bits() const { return bit_pattern_; }
+
+ float get_scalar() const { return bit_cast<float>(bit_pattern_); }
+
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint32_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float32 FromBits(uint32_t bits) { return Float32(bits); }
+
+ private:
+ uint32_t bit_pattern_ = 0;
+
+ explicit constexpr Float32(uint32_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Float32);
+
+// Safety wrapper for a 64-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value.
+// TODO(ahaas): Unify this class with Double in double.h
+class Float64 {
+ public:
+ Float64() = default;
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float64(double value) : bit_pattern_(bit_cast<uint64_t>(value)) {
+ // Check that the provided value is not a NaN, because the bit pattern of a
+ // NaN may be changed by a bit_cast, e.g. for signalling NaNs on
+ // ia32.
+ MOZ_ASSERT(!std::isnan(value));
+ }
+
+ uint64_t get_bits() const { return bit_pattern_; }
+ double get_scalar() const { return bit_cast<double>(bit_pattern_); }
+ bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint64_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float64 FromBits(uint64_t bits) { return Float64(bits); }
+
+ private:
+ uint64_t bit_pattern_ = 0;
+
+ explicit constexpr Float64(uint64_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Float64);
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 32;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPUInvalidResult64 = static_cast<uint64_t>(1ULL << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// -----------------------------------------------------------------------------
+// Utility types and functions for RISCV
+# ifdef JS_CODEGEN_RISCV32
+using sreg_t = int32_t;
+using reg_t = uint32_t;
+using freg_t = uint64_t;
+using sfreg_t = int64_t;
+# elif JS_CODEGEN_RISCV64
+using sreg_t = int64_t;
+using reg_t = uint64_t;
+using freg_t = uint64_t;
+using sfreg_t = int64_t;
+# else
+# error "Cannot detect Riscv's bitwidth"
+# endif
+
+# define sext32(x) ((sreg_t)(int32_t)(x))
+# define zext32(x) ((reg_t)(uint32_t)(x))
+
+# ifdef JS_CODEGEN_RISCV64
+# define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
+# define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
+# elif JS_CODEGEN_RISCV32
+# define sext_xlen(x) (((sreg_t)(x) << (32 - xlen)) >> (32 - xlen))
+# define zext_xlen(x) (((reg_t)(x) << (32 - xlen)) >> (32 - xlen))
+# endif
+
+# define BIT(n) (0x1LL << n)
+# define QUIET_BIT_S(nan) (bit_cast<int32_t>(nan) & BIT(22))
+# define QUIET_BIT_D(nan) (bit_cast<int64_t>(nan) & BIT(51))
+static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
+static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
+# undef QUIET_BIT_S
+# undef QUIET_BIT_D
+
+# ifdef JS_CODEGEN_RISCV64
+inline uint64_t mulhu(uint64_t a, uint64_t b) {
+ __uint128_t full_result = ((__uint128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulh(int64_t a, int64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__int128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulhsu(int64_t a, uint64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+# elif JS_CODEGEN_RISCV32
+inline uint32_t mulhu(uint32_t a, uint32_t b) {
+ uint64_t full_result = ((uint64_t)a) * ((uint64_t)b);
+ uint64_t upper_part = full_result >> 32;
+ return (uint32_t)upper_part;
+}
+
+inline int32_t mulh(int32_t a, int32_t b) {
+ int64_t full_result = ((int64_t)a) * ((int64_t)b);
+ int64_t upper_part = full_result >> 32;
+ return (int32_t)upper_part;
+}
+
+inline int32_t mulhsu(int32_t a, uint32_t b) {
+ int64_t full_result = ((int64_t)a) * ((uint64_t)b);
+ int64_t upper_part = full_result >> 32;
+ return (int32_t)upper_part;
+}
+# endif
+
+// Floating point helpers
+# define F32_SIGN ((uint32_t)1 << 31)
+union u32_f32 {
+ uint32_t u;
+ float f;
+};
+inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
+ u32_f32 a = {.f = rs1}, b = {.f = rs2};
+ u32_f32 res;
+ res.u = (a.u & ~F32_SIGN) | ((((x) ? a.u
+ : (n) ? F32_SIGN
+ : 0) ^
+ b.u) &
+ F32_SIGN);
+ return res.f;
+}
+
+inline Float32 fsgnj32(Float32 rs1, Float32 rs2, bool n, bool x) {
+ u32_f32 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
+ u32_f32 res;
+ if (x) { // RO_FSQNJX_S
+ res.u = (a.u & ~F32_SIGN) | ((a.u ^ b.u) & F32_SIGN);
+ } else {
+ if (n) { // RO_FSGNJN_S
+ res.u = (a.u & ~F32_SIGN) | ((F32_SIGN ^ b.u) & F32_SIGN);
+ } else { // RO_FSGNJ_S
+ res.u = (a.u & ~F32_SIGN) | ((0 ^ b.u) & F32_SIGN);
+ }
+ }
+ return Float32::FromBits(res.u);
+}
+# define F64_SIGN ((uint64_t)1 << 63)
+union u64_f64 {
+ uint64_t u;
+ double d;
+};
+inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
+ u64_f64 a = {.d = rs1}, b = {.d = rs2};
+ u64_f64 res;
+ res.u = (a.u & ~F64_SIGN) | ((((x) ? a.u
+ : (n) ? F64_SIGN
+ : 0) ^
+ b.u) &
+ F64_SIGN);
+ return res.d;
+}
+
+inline Float64 fsgnj64(Float64 rs1, Float64 rs2, bool n, bool x) {
+ u64_f64 a = {.d = rs1.get_scalar()}, b = {.d = rs2.get_scalar()};
+ u64_f64 res;
+ if (x) { // RO_FSQNJX_D
+ res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
+ } else {
+ if (n) { // RO_FSGNJN_D
+ res.u = (a.u & ~F64_SIGN) | ((F64_SIGN ^ b.u) & F64_SIGN);
+ } else { // RO_FSGNJ_D
+ res.u = (a.u & ~F64_SIGN) | ((0 ^ b.u) & F64_SIGN);
+ }
+ }
+ return Float64::FromBits(res.u);
+}
+inline bool is_boxed_float(int64_t v) { return (uint32_t)((v >> 32) + 1) == 0; }
+inline int64_t box_float(float v) {
+ return (0xFFFFFFFF00000000 | bit_cast<int32_t>(v));
+}
+
+inline uint64_t box_float(uint32_t v) { return (0xFFFFFFFF00000000 | v); }
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ SimInstructionBase& operator=(const SimInstructionBase&) = delete;
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ MOZ_ASSERT(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+// Per thread simulator state.
+class Simulator {
+ friend class RiscvDebugger;
+
+ public:
+ static bool FLAG_riscv_trap_to_simulator_debugger;
+ static bool FLAG_trace_sim;
+ static bool FLAG_debug_sim;
+ static bool FLAG_riscv_print_watchpoint;
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ x0 = 0,
+ x1,
+ x2,
+ x3,
+ x4,
+ x5,
+ x6,
+ x7,
+ x8,
+ x9,
+ x10,
+ x11,
+ x12,
+ x13,
+ x14,
+ x15,
+ x16,
+ x17,
+ x18,
+ x19,
+ x20,
+ x21,
+ x22,
+ x23,
+ x24,
+ x25,
+ x26,
+ x27,
+ x28,
+ x29,
+ x30,
+ x31,
+ pc,
+ kNumSimuRegisters,
+ // alias
+ zero = x0,
+ ra = x1,
+ sp = x2,
+ gp = x3,
+ tp = x4,
+ t0 = x5,
+ t1 = x6,
+ t2 = x7,
+ fp = x8,
+ s1 = x9,
+ a0 = x10,
+ a1 = x11,
+ a2 = x12,
+ a3 = x13,
+ a4 = x14,
+ a5 = x15,
+ a6 = x16,
+ a7 = x17,
+ s2 = x18,
+ s3 = x19,
+ s4 = x20,
+ s5 = x21,
+ s6 = x22,
+ s7 = x23,
+ s8 = x24,
+ s9 = x25,
+ s10 = x26,
+ s11 = x27,
+ t3 = x28,
+ t4 = x29,
+ t5 = x30,
+ t6 = x31,
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters,
+ // alias
+ ft0 = f0,
+ ft1 = f1,
+ ft2 = f2,
+ ft3 = f3,
+ ft4 = f4,
+ ft5 = f5,
+ ft6 = f6,
+ ft7 = f7,
+ fs0 = f8,
+ fs1 = f9,
+ fa0 = f10,
+ fa1 = f11,
+ fa2 = f12,
+ fa3 = f13,
+ fa4 = f14,
+ fa5 = f15,
+ fa6 = f16,
+ fa7 = f17,
+ fs2 = f18,
+ fs3 = f19,
+ fs4 = f20,
+ fs5 = f21,
+ fs6 = f22,
+ fs7 = f23,
+ fs8 = f24,
+ fs9 = f25,
+ fs10 = f26,
+ fs11 = f27,
+ ft8 = f28,
+ ft9 = f29,
+ ft10 = f30,
+ ft11 = f31
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // RISCV decoding routine
+ void DecodeRVRType();
+ void DecodeRVR4Type();
+ void DecodeRVRFPType(); // Special routine for R/OP_FP type
+ void DecodeRVRAType(); // Special routine for R/AMO type
+ void DecodeRVIType();
+ void DecodeRVSType();
+ void DecodeRVBType();
+ void DecodeRVUType();
+ void DecodeRVJType();
+ void DecodeCRType();
+ void DecodeCAType();
+ void DecodeCIType();
+ void DecodeCIWType();
+ void DecodeCSSType();
+ void DecodeCLType();
+ void DecodeCSType();
+ void DecodeCJType();
+ void DecodeCBType();
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ void DecodeVType();
+ void DecodeRvvIVV();
+ void DecodeRvvIVI();
+ void DecodeRvvIVX();
+ void DecodeRvvMVV();
+ void DecodeRvvMVX();
+ void DecodeRvvFVV();
+ void DecodeRvvFVF();
+ bool DecodeRvvVL();
+ bool DecodeRvvVS();
+# endif
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterLo(int fpureg, int32_t value);
+ void setFpuRegisterHi(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterFloat(int fpureg, Float32 value);
+ void setFpuRegisterDouble(int fpureg, Float64 value);
+
+ int64_t getFpuRegister(int fpureg) const;
+ int32_t getFpuRegisterLo(int fpureg) const;
+ int32_t getFpuRegisterHi(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ Float32 getFpuRegisterFloat32(int fpureg) const;
+ Float64 getFpuRegisterFloat64(int fpureg) const;
+
+ inline int16_t shamt6() const { return (imm12() & 0x3F); }
+ inline int16_t shamt5() const { return (imm12() & 0x1F); }
+ inline int16_t rvc_shamt6() const { return instr_.RvcShamt6(); }
+ inline int32_t s_imm12() const { return instr_.StoreOffset(); }
+ inline int32_t u_imm20() const { return instr_.Imm20UValue() << 12; }
+ inline int32_t rvc_u_imm6() const { return instr_.RvcImm6Value() << 12; }
+ inline void require(bool check) {
+ if (!check) {
+ SignalException(kIllegalInstruction);
+ }
+ }
+
+ // Special case of setRegister and getRegister to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ SimInstruction instr_;
+ // RISCV utlity API to access register value
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+# if JS_CODEGEN_RISCV64
+ DWORD,
+# endif
+ FLOAT,
+ DOUBLE,
+ // FLOAT_DOUBLE,
+ // WORD_DWORD
+ };
+ inline int32_t rs1_reg() const { return instr_.Rs1Value(); }
+ inline sreg_t rs1() const { return getRegister(rs1_reg()); }
+ inline float frs1() const { return getFpuRegisterFloat(rs1_reg()); }
+ inline double drs1() const { return getFpuRegisterDouble(rs1_reg()); }
+ inline Float32 frs1_boxed() const { return getFpuRegisterFloat32(rs1_reg()); }
+ inline Float64 drs1_boxed() const { return getFpuRegisterFloat64(rs1_reg()); }
+ inline int32_t rs2_reg() const { return instr_.Rs2Value(); }
+ inline sreg_t rs2() const { return getRegister(rs2_reg()); }
+ inline float frs2() const { return getFpuRegisterFloat(rs2_reg()); }
+ inline double drs2() const { return getFpuRegisterDouble(rs2_reg()); }
+ inline Float32 frs2_boxed() const { return getFpuRegisterFloat32(rs2_reg()); }
+ inline Float64 drs2_boxed() const { return getFpuRegisterFloat64(rs2_reg()); }
+ inline int32_t rs3_reg() const { return instr_.Rs3Value(); }
+ inline sreg_t rs3() const { return getRegister(rs3_reg()); }
+ inline float frs3() const { return getFpuRegisterFloat(rs3_reg()); }
+ inline double drs3() const { return getFpuRegisterDouble(rs3_reg()); }
+ inline Float32 frs3_boxed() const { return getFpuRegisterFloat32(rs3_reg()); }
+ inline Float64 drs3_boxed() const { return getFpuRegisterFloat64(rs3_reg()); }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int32_t frd_reg() const { return instr_.RdValue(); }
+ inline int32_t rvc_rs1_reg() const { return instr_.RvcRs1Value(); }
+ inline sreg_t rvc_rs1() const { return getRegister(rvc_rs1_reg()); }
+ inline int32_t rvc_rs2_reg() const { return instr_.RvcRs2Value(); }
+ inline sreg_t rvc_rs2() const { return getRegister(rvc_rs2_reg()); }
+ inline double rvc_drs2() const { return getFpuRegisterDouble(rvc_rs2_reg()); }
+ inline int32_t rvc_rs1s_reg() const { return instr_.RvcRs1sValue(); }
+ inline sreg_t rvc_rs1s() const { return getRegister(rvc_rs1s_reg()); }
+ inline int32_t rvc_rs2s_reg() const { return instr_.RvcRs2sValue(); }
+ inline sreg_t rvc_rs2s() const { return getRegister(rvc_rs2s_reg()); }
+ inline double rvc_drs2s() const {
+ return getFpuRegisterDouble(rvc_rs2s_reg());
+ }
+ inline int32_t rvc_rd_reg() const { return instr_.RvcRdValue(); }
+ inline int32_t rvc_frd_reg() const { return instr_.RvcRdValue(); }
+ inline int16_t boffset() const { return instr_.BranchOffset(); }
+ inline int16_t imm12() const { return instr_.Imm12Value(); }
+ inline int32_t imm20J() const { return instr_.Imm20JValue(); }
+ inline int32_t imm5CSR() const { return instr_.Rs1Value(); }
+ inline int16_t csr_reg() const { return instr_.CsrValue(); }
+ inline int16_t rvc_imm6() const { return instr_.RvcImm6Value(); }
+ inline int16_t rvc_imm6_addi16sp() const {
+ return instr_.RvcImm6Addi16spValue();
+ }
+ inline int16_t rvc_imm8_addi4spn() const {
+ return instr_.RvcImm8Addi4spnValue();
+ }
+ inline int16_t rvc_imm6_lwsp() const { return instr_.RvcImm6LwspValue(); }
+ inline int16_t rvc_imm6_ldsp() const { return instr_.RvcImm6LdspValue(); }
+ inline int16_t rvc_imm6_swsp() const { return instr_.RvcImm6SwspValue(); }
+ inline int16_t rvc_imm6_sdsp() const { return instr_.RvcImm6SdspValue(); }
+ inline int16_t rvc_imm5_w() const { return instr_.RvcImm5WValue(); }
+ inline int16_t rvc_imm5_d() const { return instr_.RvcImm5DValue(); }
+ inline int16_t rvc_imm8_b() const { return instr_.RvcImm8BValue(); }
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+# if JS_CODEGEN_RISCV32
+ template <typename T>
+ void TraceRegWr(T value, TraceType t = WORD);
+# elif JS_CODEGEN_RISCV64
+ void TraceRegWr(sreg_t value, TraceType t = DWORD);
+# endif
+ void TraceMemWr(sreg_t addr, sreg_t value, TraceType t);
+ template <typename T>
+ void TraceMemRd(sreg_t addr, T value, sreg_t reg_value);
+ void TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value);
+ void TraceMemRdDouble(sreg_t addr, Float64 value, int64_t reg_value);
+ void TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value);
+
+ template <typename T>
+ void TraceLr(sreg_t addr, T value, sreg_t reg_value);
+
+ template <typename T>
+ void TraceSc(sreg_t addr, T value);
+
+ template <typename T>
+ void TraceMemWr(sreg_t addr, T value);
+ void TraceMemWrDouble(sreg_t addr, double value);
+
+ inline void set_rd(sreg_t value, bool trace = true) {
+ setRegister(rd_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rd_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rd_reg()), WORD);
+# endif
+ }
+ inline void set_frd(float value, bool trace = true) {
+ setFpuRegisterFloat(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), FLOAT);
+ }
+ inline void set_frd(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), FLOAT);
+ }
+ inline void set_drd(double value, bool trace = true) {
+ setFpuRegisterDouble(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), DOUBLE);
+ }
+ inline void set_drd(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rd(sreg_t value, bool trace = true) {
+ setRegister(rvc_rd_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rd_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rd_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_rs1s(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs1s_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs1s_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs1s_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_rs2(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs2_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs2_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs2_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_drd(double value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_drd(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_frd(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rs2s(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs2s_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs2s_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs2s_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_drs2s(double value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), DOUBLE);
+ }
+ inline void set_rvc_drs2s(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), DOUBLE);
+ }
+
+ inline void set_rvc_frs2s(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), FLOAT);
+ }
+
+ uint32_t get_dynamic_rounding_mode() { return read_csr_value(csr_frm); }
+
+ // helper functions to read/write/set/clear CRC values/bits
+ uint32_t read_csr_value(uint32_t csr) {
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ return (FCSR_ & kFcsrFlagsMask);
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ return (FCSR_ & kFcsrFrmMask) >> kFcsrFrmShift;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ return (FCSR_ & kFcsrMask);
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void write_csr_value(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFlagsMask)) | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFrmMask)) | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrMask)) | value;
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void set_csr_bits(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void clear_csr_bits(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ & (~(value << kFcsrFrmShift));
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ bool test_fflags_bits(uint32_t mask) {
+ return (FCSR_ & kFcsrFlagsMask & mask) != 0;
+ }
+
+ void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
+ void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+
+ float RoundF2FHelper(float input_val, int rmode);
+ double RoundF2FHelper(double input_val, int rmode);
+ template <typename I_TYPE, typename F_TYPE>
+ I_TYPE RoundF2IHelper(F_TYPE original, int rmode);
+
+ template <typename T>
+ T FMaxMinHelper(T a, T b, MaxMinKind kind);
+
+ template <typename T>
+ bool CompareFHelper(T input1, T input2, FPUCondition cc);
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ // RISCV Memory read/write methods
+ template <typename T>
+ T ReadMem(sreg_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(sreg_t addr, T value, Instruction* instr);
+ template <typename T, typename OP>
+ T amo(sreg_t addr, OP f, Instruction* instr, TraceType t) {
+ auto lhs = ReadMem<T>(addr, instr);
+ // TODO(RISCV): trace memory read for AMO
+ WriteMem<T>(addr, (T)f(lhs), instr);
+ return lhs;
+ }
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void SoftwareInterrupt();
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ bool IsTracepoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ SimInstruction* location;
+ bool enabled;
+ bool is_tbreak;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(SimInstruction* breakpoint, bool is_tbreak);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+
+ // ICache.
+ // static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ // Instruction* instr);
+ // static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
+ // start,
+ // size_t size);
+ // static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ // void* page);
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOpFMA(Func fn, T dst, T src1, T src2) {
+ static_assert(std::is_floating_point<T>::value);
+ auto alu_out = fn(dst, src1, src2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
+ std::isnan(dst)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(dst))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp3(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ T src3 = std::is_same<float, T>::value ? frs3() : drs3();
+ auto alu_out = fn(src1, src2, src3);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
+ std::isnan(src3)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(src3))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp2(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ auto alu_out = fn(src1, src2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp1(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ auto alu_out = fn(src1);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1)) set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn) {
+ float alu_out = fn(drs1());
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn, double frs) {
+ float alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn, float frs) {
+ double alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn) {
+ double alu_out = fn(frs1());
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions,
+ // RISCV illegual instruction exception
+ kIllegalInstruction,
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ intptr_t* watch_address_ = nullptr;
+ intptr_t watch_value_ = 0;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+ EmbeddedVector<char, 256> trace_buf_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS64 */
+
+#endif /* jit_riscv64_Simulator_riscv64_h */
diff --git a/js/src/jit/riscv64/Trampoline-riscv64.cpp b/js/src/jit/riscv64/Trampoline-riscv64.cpp
new file mode 100644
index 0000000000..6a8782ddfd
--- /dev/null
+++ b/js/src/jit/riscv64/Trampoline-riscv64.cpp
@@ -0,0 +1,856 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/riscv64/SharedICRegisters-riscv64.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// This file includes stubs for generating the JIT trampolines when there is no
+// JIT backend, and also includes implementations for assorted random things
+// which can't be implemented in headers.
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorLOONG64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+struct EnterJITRegs {
+ double fs11;
+ double fs10;
+ double fs9;
+ double fs8;
+ double fs7;
+ double fs6;
+ double fs5;
+ double fs4;
+ double fs3;
+ double fs2;
+ double fs1;
+ double fs0;
+
+ // uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t sp;
+ uint64_t fp;
+ uint64_t gp;
+ uint64_t s11;
+ uint64_t s10;
+ uint64_t s9;
+ uint64_t s8;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.ld(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.ld(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.ld(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.ld(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.ld(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.ld(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.ld(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.ld(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.ld(s9, StackPointer, offsetof(EnterJITRegs, s9));
+ masm.ld(s10, StackPointer, offsetof(EnterJITRegs, s10));
+ masm.ld(s11, StackPointer, offsetof(EnterJITRegs, s11));
+ masm.ld(gp, StackPointer, offsetof(EnterJITRegs, gp));
+ masm.ld(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.ld(sp, StackPointer, offsetof(EnterJITRegs, sp));
+ masm.ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.fld(fs11, StackPointer, offsetof(EnterJITRegs, fs11));
+ masm.fld(fs10, StackPointer, offsetof(EnterJITRegs, fs10));
+ masm.fld(fs9, StackPointer, offsetof(EnterJITRegs, fs9));
+ masm.fld(fs8, StackPointer, offsetof(EnterJITRegs, fs8));
+ masm.fld(fs7, StackPointer, offsetof(EnterJITRegs, fs7));
+ masm.fld(fs6, StackPointer, offsetof(EnterJITRegs, fs6));
+ masm.fld(fs5, StackPointer, offsetof(EnterJITRegs, fs5));
+ masm.fld(fs4, StackPointer, offsetof(EnterJITRegs, fs4));
+ masm.fld(fs3, StackPointer, offsetof(EnterJITRegs, fs3));
+ masm.fld(fs2, StackPointer, offsetof(EnterJITRegs, fs2));
+ masm.fld(fs1, StackPointer, offsetof(EnterJITRegs, fs1));
+ masm.fld(fs0, StackPointer, offsetof(EnterJITRegs, fs0));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ masm.sd(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.sd(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.sd(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.sd(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.sd(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.sd(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.sd(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.sd(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.sd(s9, StackPointer, offsetof(EnterJITRegs, s9));
+ masm.sd(s10, StackPointer, offsetof(EnterJITRegs, s10));
+ masm.sd(s11, StackPointer, offsetof(EnterJITRegs, s11));
+ masm.sd(gp, StackPointer, offsetof(EnterJITRegs, gp));
+ masm.sd(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.sd(sp, StackPointer, offsetof(EnterJITRegs, sp));
+ masm.sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.fsd(fs11, StackPointer, offsetof(EnterJITRegs, fs11));
+ masm.fsd(fs10, StackPointer, offsetof(EnterJITRegs, fs10));
+ masm.fsd(fs9, StackPointer, offsetof(EnterJITRegs, fs9));
+ masm.fsd(fs8, StackPointer, offsetof(EnterJITRegs, fs8));
+ masm.fsd(fs7, StackPointer, offsetof(EnterJITRegs, fs7));
+ masm.fsd(fs6, StackPointer, offsetof(EnterJITRegs, fs6));
+ masm.fsd(fs5, StackPointer, offsetof(EnterJITRegs, fs5));
+ masm.fsd(fs4, StackPointer, offsetof(EnterJITRegs, fs4));
+ masm.fsd(fs3, StackPointer, offsetof(EnterJITRegs, fs3));
+ masm.fsd(fs2, StackPointer, offsetof(EnterJITRegs, fs2));
+ masm.fsd(fs1, StackPointer, offsetof(EnterJITRegs, fs1));
+ masm.fsd(fs0, StackPointer, offsetof(EnterJITRegs, fs0));
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movePtr(StackPointer, a1);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get the bailoutInfo outparam.
+ masm.pop(a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Make stack algined
+ masm.ma_and(s2, reg_argc, Imm32(1));
+ masm.ma_sub64(s1, zero, Imm32(sizeof(Value)));
+ Label no_zero;
+ masm.ma_branch(&no_zero, Assembler::Condition::Equal, s2, Operand(0));
+ masm.mv(s1, zero);
+ masm.bind(&no_zero);
+ masm.ma_add64(StackPointer, StackPointer, s1);
+
+ masm.slli(s2, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s2); // s2 = &argv[argc]
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s2, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s2);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s2, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s2, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.slli(scratch, numStackValues, 3);
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.ma_or(R1.scratchReg(), reg_chain, zero);
+ }
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.ma_sub64(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'] [undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg.
+ masm.mov(s3, numActArgsReg);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.slli(t0, s3, 3); // t0 <- nargs * 8
+ masm.ma_add64(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+ //
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.push(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_add64(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH(
+ "NYI: LOONG64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, a0, a0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(a0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.fld(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(sizeof(void*));
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
diff --git a/js/src/jit/riscv64/constant/Base-constant-riscv.cpp b/js/src/jit/riscv64/constant/Base-constant-riscv.cpp
new file mode 100644
index 0000000000..9658689775
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Base-constant-riscv.cpp
@@ -0,0 +1,247 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Constant-riscv-c.h"
+#include "jit/riscv64/constant/Constant-riscv-d.h"
+#include "jit/riscv64/constant/Constant-riscv-f.h"
+#include "jit/riscv64/constant/Constant-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv-m.h"
+#include "jit/riscv64/constant/Constant-riscv-v.h"
+#include "jit/riscv64/constant/Constant-riscv-zicsr.h"
+#include "jit/riscv64/constant/Constant-riscv-zifencei.h"
+#include "jit/riscv64/Simulator-riscv64.h"
+namespace js {
+namespace jit {
+
+int32_t ImmBranchMaxForwardOffset(OffsetSize bits) {
+ return (1 << (bits - 1)) - 1;
+}
+
+bool InstructionBase::IsShortInstruction() const {
+ uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
+ return (FirstByte & 0x03) <= C2;
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRdValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcRdShift + kRvcRdBits - 1, kRvcRdShift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs2Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcRs2Shift + kRvcRs2Bits - 1, kRvcRs2Shift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs1sValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs1sShift + kRvcRs1sBits - 1, kRvcRs1sShift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs2sValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs2sShift + kRvcRs2sBits - 1, kRvcRs2sShift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct6Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct6Shift + kRvcFunct6Bits - 1, kRvcFunct6Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct4Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct4Shift + kRvcFunct4Bits - 1, kRvcFunct4Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct3Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct3Shift + kRvcFunct3Bits - 1, kRvcFunct3Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct2Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct2BValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2BShift + kRvcFunct2Bits - 1, kRvcFunct2BShift);
+}
+
+template <class T>
+uint32_t InstructionGetters<T>::Rvvzimm() const {
+ if ((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return zimm >> kRvvZimmShift;
+ } else {
+ MOZ_ASSERT((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000)) == RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return (zimm >> kRvvZimmShift) & 0x3FF;
+ }
+}
+
+template <class T>
+uint32_t InstructionGetters<T>::Rvvuimm() const {
+ MOZ_ASSERT((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000)) == RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t uimm = Bits & kRvvUimmMask;
+ return uimm >> kRvvUimmShift;
+}
+
+template class InstructionGetters<InstructionBase>;
+#ifdef JS_SIMULATOR_RISCV64
+template class InstructionGetters<SimInstructionBase>;
+#endif
+
+OffsetSize InstructionBase::GetOffsetSize() const {
+ if (IsIllegalInstruction()) {
+ MOZ_CRASH("IllegalInstruction");
+ }
+ if (IsShortInstruction()) {
+ switch (InstructionBits() & kRvcOpcodeMask) {
+ case RO_C_J:
+ return kOffset11;
+ case RO_C_BEQZ:
+ case RO_C_BNEZ:
+ return kOffset9;
+ default:
+ MOZ_CRASH("IllegalInstruction");
+ }
+ } else {
+ switch (InstructionBits() & kBaseOpcodeMask) {
+ case BRANCH:
+ return kOffset13;
+ case JAL:
+ return kOffset21;
+ default:
+ MOZ_CRASH("IllegalInstruction");
+ }
+ }
+}
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ if (IsIllegalInstruction()) {
+ return kUnsupported;
+ }
+ // RV64C Instruction
+ if (IsShortInstruction()) {
+ switch (InstructionBits() & kRvcOpcodeMask) {
+ case RO_C_ADDI4SPN:
+ return kCIWType;
+ case RO_C_FLD:
+ case RO_C_LW:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LD:
+#endif
+ return kCLType;
+ case RO_C_FSD:
+ case RO_C_SW:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SD:
+#endif
+ return kCSType;
+ case RO_C_NOP_ADDI:
+ case RO_C_LI:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+#endif
+ case RO_C_LUI_ADD:
+ return kCIType;
+ case RO_C_MISC_ALU:
+ if (Bits(11, 10) != 0b11)
+ return kCBType;
+ else
+ return kCAType;
+ case RO_C_J:
+ return kCJType;
+ case RO_C_BEQZ:
+ case RO_C_BNEZ:
+ return kCBType;
+ case RO_C_SLLI:
+ case RO_C_FLDSP:
+ case RO_C_LWSP:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LDSP:
+#endif
+ return kCIType;
+ case RO_C_JR_MV_ADD:
+ return kCRType;
+ case RO_C_FSDSP:
+ case RO_C_SWSP:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SDSP:
+#endif
+ return kCSSType;
+ default:
+ break;
+ }
+ } else {
+ // RISCV routine
+ switch (InstructionBits() & kBaseOpcodeMask) {
+ case LOAD:
+ return kIType;
+ case LOAD_FP:
+ return kIType;
+ case MISC_MEM:
+ return kIType;
+ case OP_IMM:
+ return kIType;
+ case AUIPC:
+ return kUType;
+ case OP_IMM_32:
+ return kIType;
+ case STORE:
+ return kSType;
+ case STORE_FP:
+ return kSType;
+ case AMO:
+ return kRType;
+ case OP:
+ return kRType;
+ case LUI:
+ return kUType;
+ case OP_32:
+ return kRType;
+ case MADD:
+ case MSUB:
+ case NMSUB:
+ case NMADD:
+ return kR4Type;
+ case OP_FP:
+ return kRType;
+ case BRANCH:
+ return kBType;
+ case JALR:
+ return kIType;
+ case JAL:
+ return kJType;
+ case SYSTEM:
+ return kIType;
+ case OP_V:
+ return kVType;
+ }
+ }
+ return kUnsupported;
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/constant/Base-constant-riscv.h b/js/src/jit/riscv64/constant/Base-constant-riscv.h
new file mode 100644
index 0000000000..929ccd67b5
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Base-constant-riscv.h
@@ -0,0 +1,1057 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Base_constant_riscv__h_
+#define jit_riscv64_constant_Base_constant_riscv__h_
+namespace js {
+namespace jit {
+
+// On RISC-V Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxTracepointCode = 63;
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+static_assert(kMaxWatchpointCode < kMaxStopCode);
+static_assert(kMaxTracepointCode < kMaxStopCode);
+
+// Debug parameters.
+//
+// For example:
+//
+// __ Debug(TRACE_ENABLE | LOG_TRACE);
+// starts tracing: set v8_flags.trace-sim is true.
+// __ Debug(TRACE_ENABLE | LOG_REGS);
+// PrintAllregs.
+// __ Debug(TRACE_DISABLE | LOG_TRACE);
+// stops tracing: set v8_flags.trace-sim is false.
+const uint32_t kDebuggerTracingDirectivesMask = 0b111 << 3;
+enum DebugParameters : uint32_t {
+ NO_PARAM = 1 << 5,
+ BREAK = 1 << 0,
+ LOG_TRACE = 1 << 1,
+ LOG_REGS = 1 << 2,
+ LOG_ALL = LOG_TRACE,
+ // Trace control.
+ TRACE_ENABLE = 1 << 3 | NO_PARAM,
+ TRACE_DISABLE = 1 << 4 | NO_PARAM,
+};
+// On RISCV all instructions are 32 bits, except for RVC.
+using Instr = int32_t;
+using ShortInstr = int16_t;
+typedef unsigned char byte;
+// ----- Fields offset and length.
+// RISCV constants
+const int kBaseOpcodeShift = 0;
+const int kBaseOpcodeBits = 7;
+const int kFunct7Shift = 25;
+const int kFunct7Bits = 7;
+const int kFunct5Shift = 27;
+const int kFunct5Bits = 5;
+const int kFunct3Shift = 12;
+const int kFunct3Bits = 3;
+const int kFunct2Shift = 25;
+const int kFunct2Bits = 2;
+const int kRs1Shift = 15;
+const int kRs1Bits = 5;
+const int kVs1Shift = 15;
+const int kVs1Bits = 5;
+const int kVs2Shift = 20;
+const int kVs2Bits = 5;
+const int kVdShift = 7;
+const int kVdBits = 5;
+const int kRs2Shift = 20;
+const int kRs2Bits = 5;
+const int kRs3Shift = 27;
+const int kRs3Bits = 5;
+const int kRdShift = 7;
+const int kRdBits = 5;
+const int kRlShift = 25;
+const int kAqShift = 26;
+const int kImm12Shift = 20;
+const int kImm12Bits = 12;
+const int kImm11Shift = 2;
+const int kImm11Bits = 11;
+const int kShamtShift = 20;
+const int kShamtBits = 5;
+const int kShamtWShift = 20;
+// FIXME: remove this once we have a proper way to handle the wide shift amount
+const int kShamtWBits = 6;
+const int kArithShiftShift = 30;
+const int kImm20Shift = 12;
+const int kImm20Bits = 20;
+const int kCsrShift = 20;
+const int kCsrBits = 12;
+const int kMemOrderBits = 4;
+const int kPredOrderShift = 24;
+const int kSuccOrderShift = 20;
+
+// for C extension
+const int kRvcFunct4Shift = 12;
+const int kRvcFunct4Bits = 4;
+const int kRvcFunct3Shift = 13;
+const int kRvcFunct3Bits = 3;
+const int kRvcRs1Shift = 7;
+const int kRvcRs1Bits = 5;
+const int kRvcRs2Shift = 2;
+const int kRvcRs2Bits = 5;
+const int kRvcRdShift = 7;
+const int kRvcRdBits = 5;
+const int kRvcRs1sShift = 7;
+const int kRvcRs1sBits = 3;
+const int kRvcRs2sShift = 2;
+const int kRvcRs2sBits = 3;
+const int kRvcFunct2Shift = 5;
+const int kRvcFunct2BShift = 10;
+const int kRvcFunct2Bits = 2;
+const int kRvcFunct6Shift = 10;
+const int kRvcFunct6Bits = 6;
+
+const uint32_t kRvcOpcodeMask =
+ 0b11 | (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const uint32_t kRvcFunct3Mask =
+ (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const uint32_t kRvcFunct4Mask =
+ (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift);
+const uint32_t kRvcFunct6Mask =
+ (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift);
+const uint32_t kRvcFunct2Mask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift);
+const uint32_t kRvcFunct2BMask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2BShift);
+const uint32_t kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask;
+const uint32_t kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask;
+const uint32_t kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask;
+const uint32_t kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10);
+
+// RISCV Instruction bit masks
+const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
+ << kBaseOpcodeShift;
+const uint32_t kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift;
+const uint32_t kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift;
+const uint32_t kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift;
+const uint32_t kFunct2Mask = 0b11 << kFunct7Shift;
+const uint32_t kRTypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct7Mask;
+const uint32_t kRATypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct5Mask;
+const uint32_t kRFPTypeMask = kBaseOpcodeMask | kFunct7Mask;
+const uint32_t kR4TypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct2Mask;
+const uint32_t kITypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kUTypeMask = kBaseOpcodeMask;
+const uint32_t kJTypeMask = kBaseOpcodeMask;
+const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
+const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
+const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
+const uint32_t kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const uint32_t kBImm12Mask = kFunct7Mask | kRdFieldMask;
+const uint32_t kImm20Mask = ((1 << kImm20Bits) - 1) << kImm20Shift;
+const uint32_t kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const uint32_t kImm11Mask = ((1 << kImm11Bits) - 1) << kImm11Shift;
+const uint32_t kImm31_12Mask = ((1 << 20) - 1) << 12;
+const uint32_t kImm19_0Mask = ((1 << 20) - 1);
+
+// for RVV extension
+#define RVV_LMUL(V) \
+ V(m1) \
+ V(m2) \
+ V(m4) \
+ V(m8) \
+ V(RESERVERD) \
+ V(mf8) \
+ V(mf4) \
+ V(mf2)
+
+enum Vlmul {
+#define DEFINE_FLAG(name) name,
+ RVV_LMUL(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+#define RVV_SEW(V) \
+ V(E8) \
+ V(E16) \
+ V(E32) \
+ V(E64)
+
+#define DEFINE_FLAG(name) name,
+enum VSew {
+ RVV_SEW(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+constexpr int kRvvELEN = 64;
+constexpr int kRvvVLEN = 128;
+constexpr int kRvvSLEN = kRvvVLEN;
+const int kRvvFunct6Shift = 26;
+const int kRvvFunct6Bits = 6;
+const uint32_t kRvvFunct6Mask =
+ (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift);
+
+const int kRvvVmBits = 1;
+const int kRvvVmShift = 25;
+const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift);
+
+const int kRvvVs2Bits = 5;
+const int kRvvVs2Shift = 20;
+const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift);
+
+const int kRvvVs1Bits = 5;
+const int kRvvVs1Shift = 15;
+const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift);
+
+const int kRvvRs1Bits = kRvvVs1Bits;
+const int kRvvRs1Shift = kRvvVs1Shift;
+const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift);
+
+const int kRvvRs2Bits = 5;
+const int kRvvRs2Shift = 20;
+const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift);
+
+const int kRvvImm5Bits = kRvvVs1Bits;
+const int kRvvImm5Shift = kRvvVs1Shift;
+const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift);
+
+const int kRvvVdBits = 5;
+const int kRvvVdShift = 7;
+const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift);
+
+const int kRvvRdBits = kRvvVdBits;
+const int kRvvRdShift = kRvvVdShift;
+const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift);
+
+const int kRvvZimmBits = 11;
+const int kRvvZimmShift = 20;
+const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift);
+
+const int kRvvUimmShift = kRvvRs1Shift;
+const int kRvvUimmBits = kRvvRs1Bits;
+const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift);
+
+const int kRvvWidthBits = 3;
+const int kRvvWidthShift = 12;
+const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift);
+
+const int kRvvMopBits = 2;
+const int kRvvMopShift = 26;
+const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift);
+
+const int kRvvMewBits = 1;
+const int kRvvMewShift = 28;
+const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift);
+
+const int kRvvNfBits = 3;
+const int kRvvNfShift = 29;
+const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift);
+
+const int kNopByte = 0x00000013;
+
+enum BaseOpcode : uint32_t {
+ LOAD = 0b0000011, // I form: LB LH LW LBU LHU
+ LOAD_FP = 0b0000111, // I form: FLW FLD FLQ
+ MISC_MEM = 0b0001111, // I special form: FENCE FENCE.I
+ OP_IMM = 0b0010011, // I form: ADDI SLTI SLTIU XORI ORI ANDI SLLI SRLI SRAI
+ // Note: SLLI/SRLI/SRAI I form first, then func3 001/101 => R type
+ AUIPC = 0b0010111, // U form: AUIPC
+ OP_IMM_32 = 0b0011011, // I form: ADDIW SLLIW SRLIW SRAIW
+ // Note: SRLIW SRAIW I form first, then func3 101 special shift encoding
+ STORE = 0b0100011, // S form: SB SH SW SD
+ STORE_FP = 0b0100111, // S form: FSW FSD FSQ
+ AMO = 0b0101111, // R form: All A instructions
+ OP = 0b0110011, // R: ADD SUB SLL SLT SLTU XOR SRL SRA OR AND and 32M set
+ LUI = 0b0110111, // U form: LUI
+ OP_32 = 0b0111011, // R: ADDW SUBW SLLW SRLW SRAW MULW DIVW DIVUW REMW REMUW
+ MADD = 0b1000011, // R4 type: FMADD.S FMADD.D FMADD.Q
+ MSUB = 0b1000111, // R4 type: FMSUB.S FMSUB.D FMSUB.Q
+ NMSUB = 0b1001011, // R4 type: FNMSUB.S FNMSUB.D FNMSUB.Q
+ NMADD = 0b1001111, // R4 type: FNMADD.S FNMADD.D FNMADD.Q
+ OP_FP = 0b1010011, // R type: Q ext
+ BRANCH = 0b1100011, // B form: BEQ BNE, BLT, BGE, BLTU BGEU
+ JALR = 0b1100111, // I form: JALR
+ JAL = 0b1101111, // J form: JAL
+ SYSTEM = 0b1110011, // I form: ECALL EBREAK Zicsr ext
+ OP_V = 0b1010111, // V form: RVV
+
+ // C extension
+ C0 = 0b00,
+ C1 = 0b01,
+ C2 = 0b10,
+ FUNCT2_0 = 0b00,
+ FUNCT2_1 = 0b01,
+ FUNCT2_2 = 0b10,
+ FUNCT2_3 = 0b11,
+};
+
+// ----- Emulated conditions.
+// On RISC-V we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Opposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
+enum RiscvCondition { // Any value < 0 is considered no_condition.
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ less = 8,
+ greater_equal = 9,
+ less_equal = 10,
+ greater = 11,
+ cc_always = 12,
+
+ // Aliases.
+ eq = equal,
+ ne = not_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+};
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+ EQ = 0x02, // Ordered and Equal
+ NE = 0x03, // Unordered or Not Equal
+ LT = 0x04, // Ordered and Less Than
+ GE = 0x05, // Ordered and Greater Than or Equal
+ LE = 0x06, // Ordered and Less Than or Equal
+ GT = 0x07, // Ordered and Greater Than
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// ----------------------------------------------------------------------------
+// RISCV flags
+
+enum ControlStatusReg {
+ csr_fflags = 0x001, // Floating-Point Accrued Exceptions (RW)
+ csr_frm = 0x002, // Floating-Point Dynamic Rounding Mode (RW)
+ csr_fcsr = 0x003, // Floating-Point Control and Status Register (RW)
+ csr_cycle = 0xc00, // Cycle counter for RDCYCLE instruction (RO)
+ csr_time = 0xc01, // Timer for RDTIME instruction (RO)
+ csr_instret = 0xc02, // Insns-retired counter for RDINSTRET instruction (RO)
+ csr_cycleh = 0xc80, // Upper 32 bits of cycle, RV32I only (RO)
+ csr_timeh = 0xc81, // Upper 32 bits of time, RV32I only (RO)
+ csr_instreth = 0xc82 // Upper 32 bits of instret, RV32I only (RO)
+};
+
+enum FFlagsMask {
+ kInvalidOperation = 0b10000, // NV: Invalid
+ kDivideByZero = 0b1000, // DZ: Divide by Zero
+ kOverflow = 0b100, // OF: Overflow
+ kUnderflow = 0b10, // UF: Underflow
+ kInexact = 0b1 // NX: Inexact
+};
+
+enum FPURoundingMode {
+ RNE = 0b000, // Round to Nearest, ties to Even
+ RTZ = 0b001, // Round towards Zero
+ RDN = 0b010, // Round Down (towards -infinity)
+ RUP = 0b011, // Round Up (towards +infinity)
+ RMM = 0b100, // Round to Nearest, tiest to Max Magnitude
+ DYN = 0b111 // In instruction's rm field, selects dynamic rounding mode;
+ // In Rounding Mode register, Invalid
+};
+
+enum MemoryOdering {
+ PSI = 0b1000, // PI or SI
+ PSO = 0b0100, // PO or SO
+ PSR = 0b0010, // PR or SR
+ PSW = 0b0001, // PW or SW
+ PSIORW = PSI | PSO | PSR | PSW
+};
+
+const int kFloat32ExponentBias = 127;
+const int kFloat32MantissaBits = 23;
+const int kFloat32ExponentBits = 8;
+const int kFloat64ExponentBias = 1023;
+const int kFloat64MantissaBits = 52;
+const int kFloat64ExponentBits = 11;
+
+enum FClassFlag {
+ kNegativeInfinity = 1,
+ kNegativeNormalNumber = 1 << 1,
+ kNegativeSubnormalNumber = 1 << 2,
+ kNegativeZero = 1 << 3,
+ kPositiveZero = 1 << 4,
+ kPositiveSubnormalNumber = 1 << 5,
+ kPositiveNormalNumber = 1 << 6,
+ kPositiveInfinity = 1 << 7,
+ kSignalingNaN = 1 << 8,
+ kQuietNaN = 1 << 9
+};
+
+enum OffsetSize : uint32_t {
+ kOffset21 = 21, // RISCV jal
+ kOffset12 = 12, // RISCV imm12
+ kOffset20 = 20, // RISCV imm20
+ kOffset13 = 13, // RISCV branch
+ kOffset32 = 32, // RISCV auipc + instr_I
+ kOffset11 = 11, // RISCV C_J
+ kOffset9 = 9, // RISCV compressed branch
+};
+
+// The classes of immediate branch ranges, in order of increasing range.
+// Note that CondBranchType and CompareBranchType have the same range.
+enum ImmBranchRangeType {
+ CondBranchRangeType, //
+ UncondBranchRangeType, //
+ UnknownBranchRangeType,
+
+ // Number of 'short-range' branch range types.
+ // We don't consider unconditional branches 'short-range'.
+ NumShortBranchRangeTypes = UnknownBranchRangeType
+};
+
+inline ImmBranchRangeType OffsetSizeToImmBranchRangeType(OffsetSize bits) {
+ switch (bits) {
+ case kOffset21:
+ return UncondBranchRangeType;
+ case kOffset13:
+ return CondBranchRangeType;
+ default:
+ MOZ_CRASH("Unimplement");
+ }
+}
+
+inline OffsetSize ImmBranchRangeTypeToOffsetSize(ImmBranchRangeType type) {
+ switch (type) {
+ case CondBranchRangeType:
+ return kOffset13;
+ case UncondBranchRangeType:
+ return kOffset21;
+ default:
+ MOZ_CRASH("Unimplement");
+ }
+}
+
+int32_t ImmBranchMaxForwardOffset(OffsetSize bits);
+
+inline int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType type) {
+ return ImmBranchMaxForwardOffset(ImmBranchRangeTypeToOffsetSize(type));
+}
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-riscv64.cc, as they use named
+// registers and other constants.
+
+// An Illegal instruction
+const Instr kIllegalInstr = 0; // All other bits are 0s (i.e., ecall)
+// An ECALL instruction, used for redirected real time call
+const Instr rtCallRedirInstr = SYSTEM; // All other bits are 0s (i.e., ecall)
+// An EBreak instruction, used for debugging and semi-hosting
+const Instr kBreakInstr = SYSTEM | 1 << kImm12Shift; // ebreak
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kShortInstrSize = 2;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum {
+ // On RISC-V, PC cannot actually be directly accessed. We behave as if PC
+ // was always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Instruction type.
+ enum Type {
+ kRType,
+ kR4Type, // Special R4 for Q extension
+ kIType,
+ kSType,
+ kBType,
+ kUType,
+ kJType,
+ // C extension
+ kCRType,
+ kCIType,
+ kCSSType,
+ kCIWType,
+ kCLType,
+ kCSType,
+ kCAType,
+ kCBType,
+ kCJType,
+ // V extension
+ kVType,
+ kVLType,
+ kVSType,
+ kVAMOType,
+ kVIVVType,
+ kVFVVType,
+ kVMVVType,
+ kVIVIType,
+ kVIVXType,
+ kVFVFType,
+ kVMVXType,
+ kVSETType,
+ kUnsupported = -1
+ };
+
+ inline bool IsIllegalInstruction() const {
+ uint16_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ return FirstHalfWord == 0;
+ }
+
+ bool IsShortInstruction() const;
+
+ inline uint8_t InstructionSize() const {
+ return (this->IsShortInstruction()) ? kShortInstrSize : kInstrSize;
+ }
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ if (this->IsShortInstruction()) {
+ return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
+ }
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Accessors for the different named fields used in the RISC-V encoding.
+ inline BaseOpcode BaseOpcodeValue() const {
+ return static_cast<BaseOpcode>(
+ Bits(kBaseOpcodeShift + kBaseOpcodeBits - 1, kBaseOpcodeShift));
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline BaseOpcode BaseOpcodeFieldRaw() const {
+ return static_cast<BaseOpcode>(InstructionBits() & kBaseOpcodeMask);
+ }
+
+ // Safe to call within R-type instructions
+ inline int Funct7FieldRaw() const { return InstructionBits() & kFunct7Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Funct3FieldRaw() const { return InstructionBits() & kFunct3Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Rs1FieldRawNoAssert() const {
+ return InstructionBits() & kRs1FieldMask;
+ }
+
+ // Safe to call within R-, S-, or B-type instructions
+ inline int Rs2FieldRawNoAssert() const {
+ return InstructionBits() & kRs2FieldMask;
+ }
+
+ // Safe to call within R4-type instructions
+ inline int Rs3FieldRawNoAssert() const {
+ return InstructionBits() & kRs3FieldMask;
+ }
+
+ inline int32_t ITypeBits() const { return InstructionBits() & kITypeMask; }
+
+ inline int32_t InstructionOpcodeType() const {
+ if (IsShortInstruction()) {
+ return InstructionBits() & kRvcOpcodeMask;
+ } else {
+ return InstructionBits() & kBaseOpcodeMask;
+ }
+ }
+
+ // Get the encoding type of the instruction.
+ Type InstructionType() const;
+ OffsetSize GetOffsetSize() const;
+ inline ImmBranchRangeType GetImmBranchRangeType() const {
+ return OffsetSizeToImmBranchRangeType(GetOffsetSize());
+ }
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ // Say if the instruction is a break or a trap.
+ bool IsTrap() const;
+
+ inline int BaseOpcode() const {
+ return this->InstructionBits() & kBaseOpcodeMask;
+ }
+
+ inline int RvcOpcode() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->InstructionBits() & kRvcOpcodeMask;
+ }
+
+ inline int Rs1Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
+ }
+
+ inline int Rs2Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
+ }
+
+ inline int Rs3Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kR4Type);
+ return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
+ }
+
+ inline int Vs1Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift);
+ }
+
+ inline int Vs2Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift);
+ }
+
+ inline int VdValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVdShift + kVdBits - 1, kVdShift);
+ }
+
+ inline int RdValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kUType ||
+ this->InstructionType() == InstructionBase::kJType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int RvcRs1Value() const { return this->RvcRdValue(); }
+
+ int RvcRdValue() const;
+
+ int RvcRs2Value() const;
+
+ int RvcRs1sValue() const;
+
+ int RvcRs2sValue() const;
+
+ int Funct7Value() const;
+
+ inline int Funct3Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int Funct5Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct5Shift + kFunct5Bits - 1, kFunct5Shift);
+ }
+
+ int RvcFunct6Value() const;
+
+ int RvcFunct4Value() const;
+
+ int RvcFunct3Value() const;
+
+ int RvcFunct2Value() const;
+
+ int RvcFunct2BValue() const;
+
+ inline int CsrValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == SYSTEM);
+ return (this->Bits(kCsrShift + kCsrBits - 1, kCsrShift));
+ }
+
+ inline int RoundMode() const {
+ MOZ_ASSERT((this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type) &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int MemoryOrder(bool is_pred) const {
+ MOZ_ASSERT((this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == MISC_MEM));
+ if (is_pred) {
+ return this->Bits(kPredOrderShift + kMemOrderBits - 1, kPredOrderShift);
+ } else {
+ return this->Bits(kSuccOrderShift + kMemOrderBits - 1, kSuccOrderShift);
+ }
+ }
+
+ inline int Imm12Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kIType);
+ int Value = this->Bits(kImm12Shift + kImm12Bits - 1, kImm12Shift);
+ return Value << 20 >> 20;
+ }
+
+ inline int32_t Imm12SExtValue() const {
+ int32_t Value = this->Imm12Value() << 20 >> 20;
+ return Value;
+ }
+
+ inline int BranchOffset() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kBType);
+ // | imm[12|10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm13 = ((Bits & 0xf00) >> 7) | ((Bits & 0x7e000000) >> 20) |
+ ((Bits & 0x80) << 4) | ((Bits & 0x80000000) >> 19);
+ return imm13 << 19 >> 19;
+ }
+
+ inline int StoreOffset() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kSType);
+ // | imm[11:5] | rs2 | rs1 | funct3 | imm[4:0] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm12 = ((Bits & 0xf80) >> 7) | ((Bits & 0xfe000000) >> 20);
+ return imm12 << 20 >> 20;
+ }
+
+ inline int Imm20UValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kUType);
+ // | imm[31:12] | rd | opcode |
+ // 31 12
+ int32_t Bits = this->InstructionBits();
+ return Bits >> 12;
+ }
+
+ inline int Imm20JValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kJType);
+ // | imm[20|10:1|11|19:12] | rd | opcode |
+ // 31 12
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm20 = ((Bits & 0x7fe00000) >> 20) | ((Bits & 0x100000) >> 9) |
+ (Bits & 0xff000) | ((Bits & 0x80000000) >> 11);
+ return imm20 << 11 >> 11;
+ }
+
+ inline bool IsArithShift() const {
+ // Valid only for right shift operations
+ MOZ_ASSERT((this->BaseOpcode() == OP || this->BaseOpcode() == OP_32 ||
+ this->BaseOpcode() == OP_IMM ||
+ this->BaseOpcode() == OP_IMM_32) &&
+ this->Funct3Value() == 0b101);
+ return this->InstructionBits() & 0x40000000;
+ }
+
+ inline int Shamt() const {
+ // Valid only for shift instructions (SLLI, SRLI, SRAI)
+ MOZ_ASSERT((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A0000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 25 20
+ return this->Bits(kImm12Shift + 5, kImm12Shift);
+ }
+
+ inline int Shamt32() const {
+ // Valid only for shift instructions (SLLIW, SRLIW, SRAIW)
+ MOZ_ASSERT((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM_32 &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A00000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 24 20
+ return this->Bits(kImm12Shift + 4, kImm12Shift);
+ }
+
+ inline int RvcImm6Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2);
+ return imm6 << 26 >> 26;
+ }
+
+ inline int RvcImm6Addi16spValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzimm[9] | 2 | nzimm[4|6|8:7|5] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm10 = ((Bits & 0x1000) >> 3) | ((Bits & 0x40) >> 2) |
+ ((Bits & 0x20) << 1) | ((Bits & 0x18) << 4) |
+ ((Bits & 0x4) << 3);
+ MOZ_ASSERT(imm10 != 0);
+ return imm10 << 22 >> 22;
+ }
+
+ inline int RvcImm8Addi4spnValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzimm[11] | rd' | opcode |
+ // 15 13 5 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t uimm10 = ((Bits & 0x20) >> 2) | ((Bits & 0x40) >> 4) |
+ ((Bits & 0x780) >> 1) | ((Bits & 0x1800) >> 7);
+ MOZ_ASSERT(uimm10 != 0);
+ return uimm10;
+ }
+
+ inline int RvcShamt6() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzuimm[5] | rs1/rd | nzuimm[4:0] | opcode |
+ // 15 12 6 2
+ int32_t imm6 = this->RvcImm6Value();
+ return imm6 & 0x3f;
+ }
+
+ inline int RvcImm6LwspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:2|7:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x70) >> 2) | ((Bits & 0xc) << 4);
+ return imm8;
+ }
+
+ inline int RvcImm6LdspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:3|8:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x60) >> 2) | ((Bits & 0x1c) << 4);
+ return imm9;
+ }
+
+ inline int RvcImm6SwspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5:2|7:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1e00) >> 7) | ((Bits & 0x180) >> 1);
+ return imm8;
+ }
+
+ inline int RvcImm6SdspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5:3|8:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x380) >> 1);
+ return imm9;
+ }
+
+ inline int RvcImm5WValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[2|6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm7 =
+ ((Bits & 0x1c00) >> 7) | ((Bits & 0x40) >> 4) | ((Bits & 0x20) << 1);
+ return imm7;
+ }
+
+ inline int RvcImm5DValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[7:6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x60) << 1);
+ return imm8;
+ }
+
+ inline int RvcImm11CJValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | [11|4|9:8|10|6|7|3:1|5] | opcode |
+ // 15 12 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm12 = ((Bits & 0x4) << 3) | ((Bits & 0x38) >> 2) |
+ ((Bits & 0x40) << 1) | ((Bits & 0x80) >> 1) |
+ ((Bits & 0x100) << 2) | ((Bits & 0x600) >> 1) |
+ ((Bits & 0x800) >> 7) | ((Bits & 0x1000) >> 1);
+ return imm12 << 20 >> 20;
+ }
+
+ inline int RvcImm8BValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[8|4:3] | rs1` | imm[7:6|2:1|5] | opcode |
+ // 15 12 10 7 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x4) << 3) | ((Bits & 0x18) >> 2) |
+ ((Bits & 0x60) << 1) | ((Bits & 0xc00) >> 7) |
+ ((Bits & 0x1000) >> 4);
+ return imm9 << 23 >> 23;
+ }
+
+ inline int vl_vs_width() {
+ int width = 0;
+ if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+ }
+
+ uint32_t Rvvzimm() const;
+
+ uint32_t Rvvuimm() const;
+
+ inline uint32_t RvvVsew() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vsew = (zimm >> 3) & 0x7;
+ return vsew;
+ }
+
+ inline uint32_t RvvVlmul() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vlmul = zimm & 0x7;
+ return vlmul;
+ }
+
+ inline uint8_t RvvVM() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift);
+ }
+
+ inline const char* RvvSEW() const {
+ uint32_t vsew = this->RvvVsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* RvvLMUL() const {
+ uint32_t vlmul = this->RvvVlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VLMUL
+ }
+ }
+
+#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len))
+#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len))
+
+ inline int32_t RvvSimm5() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType);
+ return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift),
+ kRvvImm5Bits);
+ }
+
+ inline uint32_t RvvUimm5() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType);
+ uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift);
+ return zext(imm, kRvvImm5Bits);
+ }
+#undef sext
+#undef zext
+ inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
+
+ inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ Instruction() = delete;
+ Instruction(const Instruction&) = delete;
+ Instruction& operator=(const Instruction&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return (this->InstructionBits() == kBreakInstr);
+}
+
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Base_constant_riscv__h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-a.h b/js/src/jit/riscv64/constant/Constant-riscv-a.h
new file mode 100644
index 0000000000..718e607240
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-a.h
@@ -0,0 +1,43 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_a_h_
+#define jit_riscv64_constant_Constant_riscv64_a_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVA : uint32_t {
+ // RV32A Standard Extension
+ RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64A Standard Extension (in addition to RV32A)
+ RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+#endif // JS_CODEGEN_RISCV64
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_a_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-c.h b/js/src/jit/riscv64/constant/Constant-riscv-c.h
new file mode 100644
index 0000000000..a7d4792f5f
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-c.h
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_c_h_
+#define jit_riscv64_constant_Constant_riscv64_c_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+enum OpcodeRISCVC : uint32_t {
+
+ RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
+ RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift),
+ RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
+ RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
+ RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
+ RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
+ RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+ RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
+ RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
+ RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
+ RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
+ RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
+ RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
+ RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
+ RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
+ RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
+ RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
+ RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
+
+ RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
+ RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
+ RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
+ RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
+#ifdef JS_CODEGEN_RISCV64
+ RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
+ RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
+ RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
+ RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+ RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
+ RO_C_SUBW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_ADDW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+#endif
+#ifdef JS_CODEGEN_RISCV32
+ RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift),
+ RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift),
+ RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift),
+ RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_c_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-d.h b/js/src/jit/riscv64/constant/Constant-riscv-d.h
new file mode 100644
index 0000000000..d97e44ffe5
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-d.h
@@ -0,0 +1,55 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_d_h_
+#define jit_riscv64_constant_Constant_riscv64_d_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVD : uint32_t {
+ // RV32D Standard Extension
+ RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
+ RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
+ RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
+ RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
+ RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
+ RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
+ RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
+ RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
+ RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
+ RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
+ RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64D Standard Extension (in addition to RV32D)
+ RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_a_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-f.h b/js/src/jit/riscv64/constant/Constant-riscv-f.h
new file mode 100644
index 0000000000..28c96394e2
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-f.h
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_f_h_
+#define jit_riscv64_constant_Constant_riscv64_f_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVF : uint32_t {
+ // RV32F Standard Extension
+ RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
+ RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
+ RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
+ RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
+ RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
+ RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
+ RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
+ RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
+ RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
+ RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
+ RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
+ RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64F Standard Extension (in addition to RV32F)
+ RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+#endif // JS_CODEGEN_RISCV64
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_f_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-i.h b/js/src/jit/riscv64/constant/Constant-riscv-i.h
new file mode 100644
index 0000000000..586ffd8a14
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-i.h
@@ -0,0 +1,73 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_i_h_
+#define jit_riscv64_constant_Constant_riscv64_i_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCV32I : uint32_t {
+ // Note use RO (RiscV Opcode) prefix
+ // RV32I Base Instruction Set
+ RO_LUI = LUI,
+ RO_AUIPC = AUIPC,
+ RO_JAL = JAL,
+ RO_JALR = JALR | (0b000 << kFunct3Shift),
+ RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
+ RO_BNE = BRANCH | (0b001 << kFunct3Shift),
+ RO_BLT = BRANCH | (0b100 << kFunct3Shift),
+ RO_BGE = BRANCH | (0b101 << kFunct3Shift),
+ RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
+ RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
+ RO_LB = LOAD | (0b000 << kFunct3Shift),
+ RO_LH = LOAD | (0b001 << kFunct3Shift),
+ RO_LW = LOAD | (0b010 << kFunct3Shift),
+ RO_LBU = LOAD | (0b100 << kFunct3Shift),
+ RO_LHU = LOAD | (0b101 << kFunct3Shift),
+ RO_SB = STORE | (0b000 << kFunct3Shift),
+ RO_SH = STORE | (0b001 << kFunct3Shift),
+ RO_SW = STORE | (0b010 << kFunct3Shift),
+ RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
+ RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
+ RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
+ RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
+ RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
+ RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
+ RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
+ RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
+ // RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
+ RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
+ RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
+// RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
+
+#if JS_CODEGEN_RISCV64
+ // RV64I Base Instruction Set (in addition to RV32I)
+ RO_LWU = LOAD | (0b110 << kFunct3Shift),
+ RO_LD = LOAD | (0b011 << kFunct3Shift),
+ RO_SD = STORE | (0b011 << kFunct3Shift),
+ RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
+ RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
+ RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
+ // RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
+ RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_i_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-m.h b/js/src/jit/riscv64/constant/Constant-riscv-m.h
new file mode 100644
index 0000000000..81a69dab41
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-m.h
@@ -0,0 +1,34 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_m_h_
+#define jit_riscv64_constant_Constant_riscv64_m_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVM : uint32_t {
+ // RV32M Standard Extension
+ RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64M Standard Extension (in addition to RV32M)
+ RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_CONSTANT_RISCV_M_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-v.h b/js/src/jit/riscv64/constant/Constant-riscv-v.h
new file mode 100644
index 0000000000..cca3540efd
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-v.h
@@ -0,0 +1,508 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_v_h_
+#define jit_riscv64_constant_Constant_riscv64_v_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+namespace RVV {
+enum TailAgnosticType {
+ ta = 0x1, // Tail agnostic
+ tu = 0x0, // Tail undisturbed
+};
+
+enum MaskAgnosticType {
+ ma = 0x1, // Mask agnostic
+ mu = 0x0, // Mask undisturbed
+};
+enum MaskType {
+ Mask = 0x0, // use the mask
+ NoMask = 0x1,
+};
+} // namespace RVV
+
+enum OpcodeRISCVV : uint32_t {
+ // RVV Extension
+ OP_IVV = OP_V | (0b000 << kFunct3Shift),
+ OP_FVV = OP_V | (0b001 << kFunct3Shift),
+ OP_MVV = OP_V | (0b010 << kFunct3Shift),
+ OP_IVI = OP_V | (0b011 << kFunct3Shift),
+ OP_IVX = OP_V | (0b100 << kFunct3Shift),
+ OP_FVF = OP_V | (0b101 << kFunct3Shift),
+ OP_MVX = OP_V | (0b110 << kFunct3Shift),
+
+ RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
+ RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
+ RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
+
+ // RVV LOAD/STORE
+ RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+
+ RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ // THE kFunct6Shift is mop
+ RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ // RVV Vector Arithmetic Instruction
+ VADD_FUNCT6 = 0b000000,
+ RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSUB_FUNCT6 = 0b000010,
+ RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VDIVU_FUNCT6 = 0b100000,
+ RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+
+ VDIV_FUNCT6 = 0b100001,
+ RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VREMU_FUNCT6 = 0b100010,
+ RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
+
+ VREM_FUNCT6 = 0b100011,
+ RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHU_FUNCT6 = 0b100100,
+ RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+
+ VMUL_FUNCT6 = 0b100101,
+ RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMUL_FUNCT6 = 0b111011,
+ RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMULU_FUNCT6 = 0b111000,
+ RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHSU_FUNCT6 = 0b100110,
+ RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULH_FUNCT6 = 0b100111,
+ RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
+
+ VWADD_FUNCT6 = 0b110001,
+ RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDU_FUNCT6 = 0b110000,
+ RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDUW_FUNCT6 = 0b110101,
+ RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+
+ VCOMPRESS_FUNCT6 = 0b010111,
+ RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
+
+ VSADDU_FUNCT6 = 0b100000,
+ RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VSADD_FUNCT6 = 0b100001,
+ RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUB_FUNCT6 = 0b100011,
+ RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUBU_FUNCT6 = 0b100010,
+ RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+
+ VRSUB_FUNCT6 = 0b000011,
+ RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VMINU_FUNCT6 = 0b000100,
+ RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
+
+ VMIN_FUNCT6 = 0b000101,
+ RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VMAXU_FUNCT6 = 0b000110,
+ RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+
+ VMAX_FUNCT6 = 0b000111,
+ RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VAND_FUNCT6 = 0b001001,
+ RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
+
+ VOR_FUNCT6 = 0b001010,
+ RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
+
+ VXOR_FUNCT6 = 0b001011,
+ RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
+
+ VRGATHER_FUNCT6 = 0b001100,
+ RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+
+ VMV_FUNCT6 = 0b010111,
+ RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift),
+
+ RO_V_VMERGE_VI = RO_V_VMV_VI,
+ RO_V_VMERGE_VV = RO_V_VMV_VV,
+ RO_V_VMERGE_VX = RO_V_VMV_VX,
+
+ VMSEQ_FUNCT6 = 0b011000,
+ RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMSNE_FUNCT6 = 0b011001,
+ RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLTU_FUNCT6 = 0b011010,
+ RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLT_FUNCT6 = 0b011011,
+ RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLE_FUNCT6 = 0b011101,
+ RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLEU_FUNCT6 = 0b011100,
+ RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGTU_FUNCT6 = 0b011110,
+ RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGT_FUNCT6 = 0b011111,
+ RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEUP_FUNCT6 = 0b001110,
+ RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEDOWN_FUNCT6 = 0b001111,
+ RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+
+ VSRL_FUNCT6 = 0b101000,
+ RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+
+ VSRA_FUNCT6 = 0b101001,
+ RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
+
+ VSLL_FUNCT6 = 0b100101,
+ RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+
+ VSMUL_FUNCT6 = 0b100111,
+ RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VADC_FUNCT6 = 0b010000,
+ RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
+
+ VMADC_FUNCT6 = 0b010001,
+ RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
+
+ VWXUNARY0_FUNCT6 = 0b010000,
+ VRXUNARY0_FUNCT6 = 0b010000,
+ VMUNARY0_FUNCT6 = 0b010100,
+
+ RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VID_V = 0b10001,
+
+ VXUNARY0_FUNCT6 = 0b010010,
+ RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VWFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VRFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMAXU_FUNCT6 = 0b000110,
+ RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
+ VREDMAX_FUNCT6 = 0b000111,
+ RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMINU_FUNCT6 = 0b000100,
+ RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
+ VREDMIN_FUNCT6 = 0b000101,
+ RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFUNARY0_FUNCT6 = 0b010010,
+ RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VFUNARY1_FUNCT6 = 0b010011,
+ RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
+
+ VFCVT_XU_F_V = 0b00000,
+ VFCVT_X_F_V = 0b00001,
+ VFCVT_F_XU_V = 0b00010,
+ VFCVT_F_X_V = 0b00011,
+ VFWCVT_XU_F_V = 0b01000,
+ VFWCVT_X_F_V = 0b01001,
+ VFWCVT_F_XU_V = 0b01010,
+ VFWCVT_F_X_V = 0b01011,
+ VFWCVT_F_F_V = 0b01100,
+ VFNCVT_F_F_W = 0b10100,
+ VFNCVT_X_F_W = 0b10001,
+ VFNCVT_XU_F_W = 0b10000,
+
+ VFCLASS_V = 0b10000,
+ VFSQRT_V = 0b00000,
+ VFRSQRT7_V = 0b00100,
+ VFREC7_V = 0b00101,
+
+ VFADD_FUNCT6 = 0b000000,
+ RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFSUB_FUNCT6 = 0b000010,
+ RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFDIV_FUNCT6 = 0b100000,
+ RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VFMUL_FUNCT6 = 0b100100,
+ RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ VFWADD_FUNCT6 = 0b110000,
+ RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_FUNCT6 = 0b110010,
+ RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFWADD_W_FUNCT6 = 0b110100,
+ RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_W_FUNCT6 = 0b110110,
+ RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Reduction Instructions
+ VFWREDUSUM_FUNCT6 = 0b110001,
+ RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift),
+
+ VFWREDOSUM_FUNCT6 = 0b110011,
+ RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Multiply
+ VFWMUL_FUNCT6 = 0b111000,
+ RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VMFEQ_FUNCT6 = 0b011000,
+ RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMFNE_FUNCT6 = 0b011100,
+ RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLT_FUNCT6 = 0b011011,
+ RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLE_FUNCT6 = 0b011001,
+ RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGE_FUNCT6 = 0b011111,
+ RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGT_FUNCT6 = 0b011101,
+ RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
+
+ VFMAX_FUNCT6 = 0b000110,
+ RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFREDMAX_FUNCT6 = 0b0001111,
+ RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMIN_FUNCT6 = 0b000100,
+ RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJ_FUNCT6 = 0b001000,
+ RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJN_FUNCT6 = 0b001001,
+ RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJX_FUNCT6 = 0b001010,
+ RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMADD_FUNCT6 = 0b101000,
+ RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMADD_FUNCT6 = 0b101001,
+ RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFMSUB_FUNCT6 = 0b101010,
+ RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMSUB_FUNCT6 = 0b101011,
+ RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFMACC_FUNCT6 = 0b101100,
+ RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMACC_FUNCT6 = 0b101101,
+ RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFMSAC_FUNCT6 = 0b101110,
+ RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMSAC_FUNCT6 = 0b101111,
+ RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ VFWMACC_FUNCT6 = 0b111100,
+ RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMACC_FUNCT6 = 0b111101,
+ RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWMSAC_FUNCT6 = 0b111110,
+ RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMSAC_FUNCT6 = 0b111111,
+ RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VNCLIP_FUNCT6 = 0b101111,
+ RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+
+ VNCLIPU_FUNCT6 = 0b101110,
+ RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_v_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h b/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h
new file mode 100644
index 0000000000..6fecfa3d92
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_zicsr_h_
+#define jit_riscv64_constant_Constant_riscv64_zicsr_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+// RISCV CSR related bit mask and shift
+const int kFcsrFlagsBits = 5;
+const uint32_t kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1;
+const int kFcsrFrmBits = 3;
+const int kFcsrFrmShift = kFcsrFlagsBits;
+const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
+const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
+const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
+
+enum OpcodeRISCVZICSR : uint32_t {
+ // RV32/RV64 Zicsr Standard Extension
+ RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
+ RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
+ RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
+ RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
+ RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
+ RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_zicsr_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h b/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h
new file mode 100644
index 0000000000..be01cd0ae0
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_zifencei_h_
+#define jit_riscv64_constant_Constant_riscv64_zifencei_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+enum OpcodeRISCVIFENCEI : uint32_t {
+ RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
+};
+}
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_zifencei_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv64.h b/js/src/jit/riscv64/constant/Constant-riscv64.h
new file mode 100644
index 0000000000..b9b1f894e7
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv64.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_constant_Constant_riscv64_h
+#define jit_riscv64_constant_Constant_riscv64_h
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+#include "jit/riscv64/constant/Constant-riscv-a.h"
+#include "jit/riscv64/constant/Constant-riscv-c.h"
+#include "jit/riscv64/constant/Constant-riscv-d.h"
+#include "jit/riscv64/constant/Constant-riscv-f.h"
+#include "jit/riscv64/constant/Constant-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv-m.h"
+#include "jit/riscv64/constant/Constant-riscv-v.h"
+#include "jit/riscv64/constant/Constant-riscv-zicsr.h"
+#include "jit/riscv64/constant/Constant-riscv-zifencei.h"
+
+namespace js {
+namespace jit {
+
+// A reasonable (ie, safe) buffer size for the disassembly of a single
+// instruction.
+const int ReasonableBufferSize = 256;
+
+// Difference between address of current opcode and value read from pc
+// register.
+static constexpr int kPcLoadDelta = 4;
+
+// Bits available for offset field in branches
+static constexpr int kBranchOffsetBits = 13;
+
+// Bits available for offset field in jump
+static constexpr int kJumpOffsetBits = 21;
+
+// Bits available for offset field in compresed jump
+static constexpr int kCJalOffsetBits = 12;
+
+// Bits available for offset field in 4 branch
+static constexpr int kCBranchOffsetBits = 9;
+
+// Max offset for b instructions with 12-bit offset field (multiple of 2)
+static constexpr int kMaxBranchOffset = (1 << (kBranchOffsetBits - 1)) - 1;
+
+static constexpr int kCBranchOffset = (1 << (kCBranchOffsetBits - 1)) - 1;
+// Max offset for jal instruction with 20-bit offset field (multiple of 2)
+static constexpr int kMaxJumpOffset = (1 << (kJumpOffsetBits - 1)) - 1;
+
+static constexpr int kCJumpOffset = (1 << (kCJalOffsetBits - 1)) - 1;
+
+static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+static_assert(kCJalOffsetBits == kOffset12);
+static_assert(kCBranchOffsetBits == kOffset9);
+static_assert(kJumpOffsetBits == kOffset21);
+static_assert(kBranchOffsetBits == kOffset13);
+// Vector as used by the original code to allow for minimal modification.
+// Functions exactly like a character array with helper methods.
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_h
diff --git a/js/src/jit/riscv64/constant/util-riscv64.h b/js/src/jit/riscv64/constant/util-riscv64.h
new file mode 100644
index 0000000000..089e0f3b94
--- /dev/null
+++ b/js/src/jit/riscv64/constant/util-riscv64.h
@@ -0,0 +1,82 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_util_riscv64__h_
+#define jit_riscv64_constant_util_riscv64__h_
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+namespace js {
+namespace jit {
+template <typename T>
+class V8Vector {
+ public:
+ V8Vector() : start_(nullptr), length_(0) {}
+ V8Vector(T* data, int length) : start_(data), length_(length) {
+ MOZ_ASSERT(length == 0 || (length > 0 && data != nullptr));
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ MOZ_ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ inline V8Vector<T> operator+(int offset) {
+ MOZ_ASSERT(offset < length_);
+ return V8Vector<T>(start_ + offset, length_ - offset);
+ }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+template <typename T, int kSize>
+class EmbeddedVector : public V8Vector<T> {
+ public:
+ EmbeddedVector() : V8Vector<T>(buffer_, kSize) {}
+
+ explicit EmbeddedVector(T initial_value) : V8Vector<T>(buffer_, kSize) {
+ for (int i = 0; i < kSize; ++i) {
+ buffer_[i] = initial_value;
+ }
+ }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs) : V8Vector<T>(rhs) {
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ V8Vector<T>::operator=(rhs);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ return *this;
+ }
+
+ private:
+ T buffer_[kSize];
+};
+
+// Helper function for printing to a Vector.
+static inline int MOZ_FORMAT_PRINTF(2, 3)
+ SNPrintF(V8Vector<char> str, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = vsnprintf(str.start(), str.length(), format, args);
+ va_end(args);
+ return result;
+}
+} // namespace jit
+} // namespace js
+#endif
diff --git a/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp b/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp
new file mode 100644
index 0000000000..bd9770d074
--- /dev/null
+++ b/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp
@@ -0,0 +1,2155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// disasm::NameConverter converter;
+// disasm::Disassembler d(converter);
+// for (uint8_t* pc = begin; pc < end;) {
+// disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+// uint8_t* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "jit/riscv64/Assembler-riscv64.h"
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+#define UNSUPPORTED_RISCV() printf("Unsupported instruction %d.\n", __LINE__)
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter, V8Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(uint8_t* instruction);
+
+ static bool IsConstantPoolAt(uint8_t* instr_ptr);
+ static int ConstantPoolSizeAt(uint8_t* instr_ptr);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintVRegister(int reg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRs1(Instruction* instr);
+ void PrintRs2(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintUimm(Instruction* instr);
+ void PrintVs1(Instruction* instr);
+ void PrintVs2(Instruction* instr);
+ void PrintVd(Instruction* instr);
+ void PrintFRs1(Instruction* instr);
+ void PrintFRs2(Instruction* instr);
+ void PrintFRs3(Instruction* instr);
+ void PrintFRd(Instruction* instr);
+ void PrintImm12(Instruction* instr);
+ void PrintImm12X(Instruction* instr);
+ void PrintImm20U(Instruction* instr);
+ void PrintImm20J(Instruction* instr);
+ void PrintShamt(Instruction* instr);
+ void PrintShamt32(Instruction* instr);
+ void PrintRvcImm6(Instruction* instr);
+ void PrintRvcImm6U(Instruction* instr);
+ void PrintRvcImm6Addi16sp(Instruction* instr);
+ void PrintRvcShamt(Instruction* instr);
+ void PrintRvcImm6Ldsp(Instruction* instr);
+ void PrintRvcImm6Lwsp(Instruction* instr);
+ void PrintRvcImm6Sdsp(Instruction* instr);
+ void PrintRvcImm6Swsp(Instruction* instr);
+ void PrintRvcImm5W(Instruction* instr);
+ void PrintRvcImm5D(Instruction* instr);
+ void PrintRvcImm8Addi4spn(Instruction* instr);
+ void PrintRvcImm11CJ(Instruction* instr);
+ void PrintRvcImm8B(Instruction* instr);
+ void PrintRvvVm(Instruction* instr);
+ void PrintAcquireRelease(Instruction* instr);
+ void PrintBranchOffset(Instruction* instr);
+ void PrintStoreOffset(Instruction* instr);
+ void PrintCSRReg(Instruction* instr);
+ void PrintRvvSEW(Instruction* instr);
+ void PrintRvvLMUL(Instruction* instr);
+ void PrintRvvSimm5(Instruction* instr);
+ void PrintRvvUimm5(Instruction* instr);
+ void PrintRoundingMode(Instruction* instr);
+ void PrintMemoryOrder(Instruction* instr, bool is_pred);
+
+ // Each of these functions decodes one particular instruction type.
+ void DecodeRType(Instruction* instr);
+ void DecodeR4Type(Instruction* instr);
+ void DecodeRAType(Instruction* instr);
+ void DecodeRFPType(Instruction* instr);
+ void DecodeIType(Instruction* instr);
+ void DecodeSType(Instruction* instr);
+ void DecodeBType(Instruction* instr);
+ void DecodeUType(Instruction* instr);
+ void DecodeJType(Instruction* instr);
+ void DecodeCRType(Instruction* instr);
+ void DecodeCAType(Instruction* instr);
+ void DecodeCIType(Instruction* instr);
+ void DecodeCIWType(Instruction* instr);
+ void DecodeCSSType(Instruction* instr);
+ void DecodeCLType(Instruction* instr);
+ void DecodeCSType(Instruction* instr);
+ void DecodeCJType(Instruction* instr);
+ void DecodeCBType(Instruction* instr);
+
+ // Printing of instruction name.
+ void PrintInstructionName(Instruction* instr);
+ void PrintTarget(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option);
+ int FormatRvcRegister(Instruction* instr, const char* option);
+ int FormatRvcImm(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ int switch_sew(Instruction* instr);
+ int switch_nf(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ V8Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ // Disallow copy and assign.
+ Decoder(const Decoder&) = delete;
+ void operator=(const Decoder&) = delete;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < int(out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+int Decoder::switch_nf(Instruction* instr) {
+ int nf = 0;
+ switch (instr->InstructionBits() & kRvvNfMask) {
+ case 0x20000000:
+ nf = 2;
+ break;
+ case 0x40000000:
+ nf = 3;
+ break;
+ case 0x60000000:
+ nf = 4;
+ break;
+ case 0x80000000:
+ nf = 5;
+ break;
+ case 0xa0000000:
+ nf = 6;
+ break;
+ case 0xc0000000:
+ nf = 7;
+ break;
+ case 0xe0000000:
+ nf = 8;
+ break;
+ }
+ return nf;
+}
+
+int Decoder::switch_sew(Instruction* instr) {
+ int width = 0;
+ if ((instr->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (instr->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (instr->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT(format[0] == 'r');
+ if (format[1] == 's') { // 'rs[12]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+ return 3;
+ }
+ MOZ_CRASH();
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegisterOrRoundMode(Instruction* instr,
+ const char* format) {
+ MOZ_ASSERT(format[0] == 'f');
+ if (format[1] == 's') { // 'fs[1-3]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '3') {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+ return 3;
+ }
+ MOZ_CRASH();
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'frm
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "frm"));
+ PrintRoundingMode(instr);
+ return 3;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all C extension register based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT(format[0] == 'C');
+ MOZ_ASSERT(format[1] == 'r' || format[1] == 'f');
+ if (format[2] == 's') { // 'Crs[12]: Rs register.
+ if (format[3] == '1') {
+ if (format[4] == 's') { // 'Crs1s: 3-bits register
+ int reg = instr->RvcRs1sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs1Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ } else if (format[3] == '2') {
+ if (format[4] == 's') { // 'Crs2s: 3-bits register
+ int reg = instr->RvcRs2sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs2Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ }
+ MOZ_CRASH();
+ } else if (format[2] == 'd') { // 'Crd: rd register.
+ int reg = instr->RvcRdValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 3;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all C extension immediates based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcImm(Instruction* instr, const char* format) {
+ // TODO(riscv): add other rvc imm format
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm"));
+ if (format[4] == '6') {
+ if (format[5] == 'U') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6U"));
+ PrintRvcImm6U(instr);
+ return 6;
+ } else if (format[5] == 'A') {
+ if (format[9] == '1' && format[10] == '6') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Addi16sp"));
+ PrintRvcImm6Addi16sp(instr);
+ return 13;
+ }
+ MOZ_CRASH();
+ } else if (format[5] == 'L') {
+ if (format[6] == 'd') {
+ if (format[7] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Ldsp"));
+ PrintRvcImm6Ldsp(instr);
+ return 9;
+ }
+ } else if (format[6] == 'w') {
+ if (format[7] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Lwsp"));
+ PrintRvcImm6Lwsp(instr);
+ return 9;
+ }
+ }
+ MOZ_CRASH();
+ } else if (format[5] == 'S') {
+ if (format[6] == 'w') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Swsp"));
+ PrintRvcImm6Swsp(instr);
+ return 9;
+ } else if (format[6] == 'd') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Sdsp"));
+ PrintRvcImm6Sdsp(instr);
+ return 9;
+ }
+ MOZ_CRASH();
+ }
+ PrintRvcImm6(instr);
+ return 5;
+ } else if (format[4] == '5') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5"));
+ if (format[5] == 'W') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5W"));
+ PrintRvcImm5W(instr);
+ return 6;
+ } else if (format[5] == 'D') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5D"));
+ PrintRvcImm5D(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ } else if (format[4] == '8') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8"));
+ if (format[5] == 'A') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8Addi4spn"));
+ PrintRvcImm8Addi4spn(instr);
+ return 13;
+ } else if (format[5] == 'B') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8B"));
+ PrintRvcImm8B(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ } else if (format[4] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm1"));
+ if (format[5] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm11CJ"));
+ PrintRvcImm11CJ(instr);
+ return 8;
+ }
+ MOZ_CRASH();
+ }
+ MOZ_CRASH();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'C': { // `C extension
+ if (format[1] == 'r' || format[1] == 'f') {
+ return FormatRvcRegister(instr, format);
+ } else if (format[1] == 'i') {
+ return FormatRvcImm(instr, format);
+ } else if (format[1] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cshamt"));
+ PrintRvcShamt(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ }
+ case 'c': { // `csr: CSR registers
+ if (format[1] == 's') {
+ if (format[2] == 'r') {
+ PrintCSRReg(instr);
+ return 3;
+ }
+ }
+ MOZ_CRASH();
+ }
+ case 'i': { // 'imm12, 'imm12x, 'imm20U, or 'imm20J: Immediates.
+ if (format[3] == '1') {
+ if (format[4] == '2') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm12"));
+ if (format[5] == 'x') {
+ PrintImm12X(instr);
+ return 6;
+ }
+ PrintImm12(instr);
+ return 5;
+ }
+ } else if (format[3] == '2' && format[4] == '0') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20"));
+ switch (format[5]) {
+ case 'U':
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20U"));
+ PrintImm20U(instr);
+ break;
+ case 'J':
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20J"));
+ PrintImm20J(instr);
+ break;
+ }
+ return 6;
+ }
+ MOZ_CRASH();
+ }
+ case 'o': { // 'offB or 'offS: Offsets.
+ if (format[3] == 'B') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "offB"));
+ PrintBranchOffset(instr);
+ return 4;
+ } else if (format[3] == 'S') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "offS"));
+ PrintStoreOffset(instr);
+ return 4;
+ }
+ MOZ_CRASH();
+ }
+ case 'r': { // 'r: registers.
+ return FormatRegister(instr, format);
+ }
+ case 'f': { // 'f: FPUregisters or `frm
+ return FormatFPURegisterOrRoundMode(instr, format);
+ }
+ case 'a': { // 'a: Atomic acquire and release.
+ PrintAcquireRelease(instr);
+ return 1;
+ }
+ case 'p': { // `pre
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "pre"));
+ PrintMemoryOrder(instr, true);
+ return 3;
+ }
+ case 's': { // 's32 or 's64: Shift amount.
+ if (format[1] == '3') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "s32"));
+ PrintShamt32(instr);
+ return 3;
+ } else if (format[1] == '6') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "s64"));
+ PrintShamt(instr);
+ return 3;
+ } else if (format[1] == 'u') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "suc"));
+ PrintMemoryOrder(instr, false);
+ return 3;
+ } else if (format[1] == 'e') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "sew"));
+ PrintRvvSEW(instr);
+ return 3;
+ } else if (format[1] == 'i') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "simm5"));
+ PrintRvvSimm5(instr);
+ return 5;
+ }
+ MOZ_CRASH();
+ }
+ case 'v': {
+ if (format[1] == 'd') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vd"));
+ PrintVd(instr);
+ return 2;
+ } else if (format[2] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ } else if (format[2] == '2') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vs2"));
+ PrintVs2(instr);
+ return 3;
+ } else {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vm"));
+ PrintRvvVm(instr);
+ return 2;
+ }
+ }
+ case 'l': {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "lmul"));
+ PrintRvvLMUL(instr);
+ return 4;
+ }
+ case 'u': {
+ if (STRING_STARTS_WITH(format, "uimm5")) {
+ PrintRvvUimm5(instr);
+ return 5;
+ } else {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "uimm"));
+ PrintUimm(instr);
+ return 4;
+ }
+ }
+ case 't': { // 'target: target of branch instructions'
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "target"));
+ PrintTarget(instr);
+ return 6;
+ }
+ }
+ MOZ_CRASH();
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintVRegister(int reg) { UNSUPPORTED_RISCV(); }
+
+void Decoder::PrintRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintUimm(Instruction* instr) {
+ int val = instr->Rs1Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
+}
+
+void Decoder::PrintVs1(Instruction* instr) {
+ int reg = instr->Vs1Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVs2(Instruction* instr) {
+ int reg = instr->Vs2Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVd(Instruction* instr) {
+ int reg = instr->VdValue();
+ PrintVRegister(reg);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs3(Instruction* instr) {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintImm12X(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm12(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintTarget(Instruction* instr) {
+ // if (Assembler::IsJalr(instr->InstructionBits())) {
+ // if (Assembler::IsAuipc((instr - 4)->InstructionBits()) &&
+ // (instr - 4)->RdValue() == instr->Rs1Value()) {
+ // int32_t imm = Assembler::BrachlongOffset((instr -
+ // 4)->InstructionBits(),
+ // instr->InstructionBits());
+ // const char* target =
+ // converter_.NameOfAddress(reinterpret_cast<byte*>(instr - 4) + imm);
+ // out_buffer_pos_ +=
+ // SNPrintF(out_buffer_ + out_buffer_pos_, " -> %s", target);
+ // return;
+ // }
+ // }
+}
+
+void Decoder::PrintBranchOffset(Instruction* instr) {
+ int32_t imm = instr->BranchOffset();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintStoreOffset(Instruction* instr) {
+ int32_t imm = instr->StoreOffset();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvvSEW(Instruction* instr) {
+ const char* sew = instr->RvvSEW();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", sew);
+}
+
+void Decoder::PrintRvvLMUL(Instruction* instr) {
+ const char* lmul = instr->RvvLMUL();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", lmul);
+}
+
+void Decoder::PrintRvvSimm5(Instruction* instr) {
+ const int simm5 = instr->RvvSimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", simm5);
+}
+
+void Decoder::PrintRvvUimm5(Instruction* instr) {
+ const uint32_t uimm5 = instr->RvvUimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", uimm5);
+}
+
+void Decoder::PrintImm20U(Instruction* instr) {
+ int32_t imm = instr->Imm20UValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm20J(Instruction* instr) {
+ int32_t imm = instr->Imm20JValue();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintShamt(Instruction* instr) {
+ int32_t imm = instr->Shamt();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintShamt32(Instruction* instr) {
+ int32_t imm = instr->Shamt32();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6U(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value() & 0xFFFFF;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintRvcImm6Addi16sp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Addi16spValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcShamt(Instruction* instr) {
+ int32_t imm = instr->RvcShamt6();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Ldsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Lwsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Swsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Sdsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5W(Instruction* instr) {
+ int32_t imm = instr->RvcImm5WValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5D(Instruction* instr) {
+ int32_t imm = instr->RvcImm5DValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm8Addi4spn(Instruction* instr) {
+ int32_t imm = instr->RvcImm8Addi4spnValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm11CJ(Instruction* instr) {
+ int32_t imm = instr->RvcImm11CJValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm8B(Instruction* instr) {
+ int32_t imm = instr->RvcImm8BValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvvVm(Instruction* instr) {
+ uint8_t imm = instr->RvvVM();
+ if (imm == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " v0.t");
+ }
+}
+
+void Decoder::PrintAcquireRelease(Instruction* instr) {
+ bool aq = instr->AqValue();
+ bool rl = instr->RlValue();
+ if (aq || rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ".");
+ }
+ if (aq) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "aq");
+ }
+ if (rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "rl");
+ }
+}
+
+void Decoder::PrintCSRReg(Instruction* instr) {
+ int32_t csr_reg = instr->CsrValue();
+ std::string s;
+ switch (csr_reg) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ s = "csr_fflags";
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ s = "csr_frm";
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ s = "csr_fcsr";
+ break;
+ case csr_cycle:
+ s = "csr_cycle";
+ break;
+ case csr_time:
+ s = "csr_time";
+ break;
+ case csr_instret:
+ s = "csr_instret";
+ break;
+ case csr_cycleh:
+ s = "csr_cycleh";
+ break;
+ case csr_timeh:
+ s = "csr_timeh";
+ break;
+ case csr_instreth:
+ s = "csr_instreth";
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintRoundingMode(Instruction* instr) {
+ int frm = instr->RoundMode();
+ std::string s;
+ switch (frm) {
+ case RNE:
+ s = "RNE";
+ break;
+ case RTZ:
+ s = "RTZ";
+ break;
+ case RDN:
+ s = "RDN";
+ break;
+ case RUP:
+ s = "RUP";
+ break;
+ case RMM:
+ s = "RMM";
+ break;
+ case DYN:
+ s = "DYN";
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintMemoryOrder(Instruction* instr, bool is_pred) {
+ int memOrder = instr->MemoryOrder(is_pred);
+ std::string s;
+ if ((memOrder & PSI) == PSI) {
+ s += "i";
+ }
+ if ((memOrder & PSO) == PSO) {
+ s += "o";
+ }
+ if ((memOrder & PSR) == PSR) {
+ s += "r";
+ }
+ if ((memOrder & PSW) == PSW) {
+ s += "w";
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {}
+
+// RISCV Instruction Decode Routine
+void Decoder::DecodeRType(Instruction* instr) {
+ switch (instr->InstructionBits() & kRTypeMask) {
+ case RO_ADD:
+ Format(instr, "add 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUB:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "neg 'rd, 'rs2");
+ else
+ Format(instr, "sub 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLL:
+ Format(instr, "sll 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLT:
+ if (instr->Rs2Value() == zero.code())
+ Format(instr, "sltz 'rd, 'rs1");
+ else if (instr->Rs1Value() == zero.code())
+ Format(instr, "sgtz 'rd, 'rs2");
+ else
+ Format(instr, "slt 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLTU:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "snez 'rd, 'rs2");
+ else
+ Format(instr, "sltu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_XOR:
+ Format(instr, "xor 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRL:
+ Format(instr, "srl 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRA:
+ Format(instr, "sra 'rd, 'rs1, 'rs2");
+ break;
+ case RO_OR:
+ Format(instr, "or 'rd, 'rs1, 'rs2");
+ break;
+ case RO_AND:
+ Format(instr, "and 'rd, 'rs1, 'rs2");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_ADDW:
+ Format(instr, "addw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUBW:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "negw 'rd, 'rs2");
+ else
+ Format(instr, "subw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLLW:
+ Format(instr, "sllw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRLW:
+ Format(instr, "srlw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRAW:
+ Format(instr, "sraw 'rd, 'rs1, 'rs2");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL:
+ Format(instr, "mul 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULH:
+ Format(instr, "mulh 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHSU:
+ Format(instr, "mulhsu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHU:
+ Format(instr, "mulhu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIV:
+ Format(instr, "div 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVU:
+ Format(instr, "divu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REM:
+ Format(instr, "rem 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMU:
+ Format(instr, "remu 'rd, 'rs1, 'rs2");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_MULW:
+ Format(instr, "mulw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVW:
+ Format(instr, "divw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVUW:
+ Format(instr, "divuw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMW:
+ Format(instr, "remw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMUW:
+ Format(instr, "remuw 'rd, 'rs1, 'rs2");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr->BaseOpcode()) {
+ case AMO:
+ DecodeRAType(instr);
+ break;
+ case OP_FP:
+ DecodeRFPType(instr);
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ }
+ }
+}
+
+void Decoder::DecodeRAType(Instruction* instr) {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr->InstructionBits() & kRATypeMask) {
+ case RO_LR_W:
+ Format(instr, "lr.w'a 'rd, ('rs1)");
+ break;
+ case RO_SC_W:
+ Format(instr, "sc.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_W:
+ Format(instr, "amoswap.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_W:
+ Format(instr, "amoadd.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_W:
+ Format(instr, "amoxor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_W:
+ Format(instr, "amoand.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_W:
+ Format(instr, "amoor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_W:
+ Format(instr, "amomin.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_W:
+ Format(instr, "amomax.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_W:
+ Format(instr, "amominu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_W:
+ Format(instr, "amomaxu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_LR_D:
+ Format(instr, "lr.d'a 'rd, ('rs1)");
+ break;
+ case RO_SC_D:
+ Format(instr, "sc.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_D:
+ Format(instr, "amoadd.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_D:
+ Format(instr, "amoxor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_D:
+ Format(instr, "amoand.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_D:
+ Format(instr, "amoor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_D:
+ Format(instr, "amomin.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_D:
+ Format(instr, "amominu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_D:
+ Format(instr, "amomaxu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeRFPType(Instruction* instr) {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses fun3 and rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr->InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S:
+ Format(instr, "fadd.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fs1");
+ break;
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSGNJX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_S
+ Format(instr, "fmin.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_S
+ Format(instr, "fmax.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_S
+ Format(instr, "fcvt.w.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_S
+ Format(instr, "fcvt.wu.s ['frm] 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_L_S
+ Format(instr, "fcvt.l.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_S
+ Format(instr, "fcvt.lu.s ['frm] 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ }
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMV_X_W
+ Format(instr, "fmv.x.w 'rd, 'fs1");
+ break;
+ case 0b001: // RO_FCLASS_S
+ Format(instr, "fclass.s 'rd, 'fs1");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_S
+ Format(instr, "flt.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_S
+ Format(instr, "fle.s 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_S_W
+ Format(instr, "fcvt.s.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_S_WU
+ Format(instr, "fcvt.s.wu 'fd, 'rs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_S_L
+ Format(instr, "fcvt.s.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_S_LU
+ Format(instr, "fcvt.s.lu 'fd, 'rs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr->Funct3Value() == 0b000) {
+ Format(instr, "fmv.w.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D:
+ Format(instr, "fadd.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_D: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fsqrt.d 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSGNJX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_D
+ Format(instr, "fmin.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_D
+ Format(instr, "fmax.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr->Rs2Value() == 0b00001) {
+ Format(instr, "fcvt.s.d ['frm] 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fcvt.d.s 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_D
+ Format(instr, "flt.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_D
+ Format(instr, "fle.d 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ switch (instr->Funct3Value()) {
+ case 0b001: // RO_FCLASS_D
+ Format(instr, "fclass.d 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b000: // RO_FMV_X_D
+ Format(instr, "fmv.x.d 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_D
+ Format(instr, "fcvt.w.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_D
+ Format(instr, "fcvt.wu.d ['frm] 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_L_D
+ Format(instr, "fcvt.l.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_D
+ Format(instr, "fcvt.lu.d ['frm] 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_D_W
+ Format(instr, "fcvt.d.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_D_WU
+ Format(instr, "fcvt.d.wu 'fd, 'rs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_D_L
+ Format(instr, "fcvt.d.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_D_LU
+ Format(instr, "fcvt.d.lu 'fd, 'rs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#ifdef JS_CODEGEN_RISCV64
+ case RO_FMV_D_X: {
+ if (instr->Funct3Value() == 0b000 && instr->Rs2Value() == 0b00000) {
+ Format(instr, "fmv.d.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeR4Type(Instruction* instr) {
+ switch (instr->InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeIType(Instruction* instr) {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero.code() && instr->Rs1Value() == ra.code() &&
+ instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero.code() && instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero.code() && instr->Rs1Value() == zero.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
+ else
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
+ }
+#ifdef JS_CODEGEN_RISCV64
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
+ }
+#endif /*JS_CODEGEN_RISCV64*/
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ // FIXME(RISC-V): Add special formatting for CSR registers
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->Rs1Value() == zero.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrc 'csr, 'rs1");
+ else
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrwi 'csr, 'uimm");
+ else
+ Format(instr, "csrrwi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRSI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrsi 'csr, 'uimm");
+ else
+ Format(instr, "csrrsi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrci 'csr, 'uimm");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'uimm");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVL(instr);
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+#else
+ UNSUPPORTED_RISCV();
+#endif
+ }
+}
+
+void Decoder::DecodeSType(Instruction* instr) {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVS(instr);
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+#else
+ UNSUPPORTED_RISCV();
+#endif
+ }
+}
+
+void Decoder::DecodeBType(Instruction* instr) {
+ switch (instr->InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ Format(instr, "beq 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BNE:
+ Format(instr, "bne 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLT:
+ Format(instr, "blt 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGE:
+ Format(instr, "bge 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLTU:
+ Format(instr, "bltu 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGEU:
+ Format(instr, "bgeu 'rs1, 'rs2, 'offB");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+void Decoder::DecodeUType(Instruction* instr) {
+ // U Type doesn't have additional mask
+ switch (instr->BaseOpcodeFieldRaw()) {
+ case LUI:
+ Format(instr, "lui 'rd, 'imm20U");
+ break;
+ case AUIPC:
+ Format(instr, "auipc 'rd, 'imm20U");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+// namespace jit
+void Decoder::DecodeJType(Instruction* instr) {
+ // J Type doesn't have additional mask
+ switch (instr->BaseOpcodeValue()) {
+ case JAL:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "j 'imm20J");
+ else if (instr->RdValue() == ra.code())
+ Format(instr, "jal 'imm20J");
+ else
+ Format(instr, "jal 'rd, 'imm20J");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCRType(Instruction* instr) {
+ switch (instr->RvcFunct4Value()) {
+ case 0b1000:
+ if (instr->RvcRs1Value() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "mv 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case 0b1001:
+ if (instr->RvcRs1Value() == 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "ebreak");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jalr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "add 'Crd, 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCAType(Instruction* instr) {
+ switch (instr->InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ Format(instr, "sub 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_XOR:
+ Format(instr, "xor 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_OR:
+ Format(instr, "or 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_AND:
+ Format(instr, "and 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SUBW:
+ Format(instr, "subw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_ADDW:
+ Format(instr, "addw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr->RvcRdValue() == 0)
+ Format(instr, "nop");
+ else
+ Format(instr, "addi 'Crd, 'Crd, 'Cimm6");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+ Format(instr, "addiw 'Crd, 'Crd, 'Cimm6");
+ break;
+#endif
+ case RO_C_LI:
+ Format(instr, "li 'Crd, 'Cimm6");
+ break;
+ case RO_C_LUI_ADD:
+ if (instr->RvcRdValue() == 2)
+ Format(instr, "addi sp, sp, 'Cimm6Addi16sp");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRdValue() != 2)
+ Format(instr, "lui 'Crd, 'Cimm6U");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case RO_C_SLLI:
+ Format(instr, "slli 'Crd, 'Crd, 'Cshamt");
+ break;
+ case RO_C_FLDSP:
+ Format(instr, "fld 'Cfd, 'Cimm6Ldsp(sp)");
+ break;
+ case RO_C_LWSP:
+ Format(instr, "lw 'Crd, 'Cimm6Lwsp(sp)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LDSP:
+ Format(instr, "ld 'Crd, 'Cimm6Ldsp(sp)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FLWSP:
+ Format(instr, "flw 'Cfd, 'Cimm6Ldsp(sp)");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIWType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_ADDI4SPN:
+ Format(instr, "addi 'Crs2s, sp, 'Cimm8Addi4spn");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_SWSP:
+ Format(instr, "sw 'Crs2, 'Cimm6Swsp(sp)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SDSP:
+ Format(instr, "sd 'Crs2, 'Cimm6Sdsp(sp)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FSWSP:
+ Format(instr, "fsw 'Cfs2, 'Cimm6Sdsp(sp)");
+ break;
+#endif
+ case RO_C_FSDSP:
+ Format(instr, "fsd 'Cfs2, 'Cimm6Sdsp(sp)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCLType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FLD:
+ Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_LW:
+ Format(instr, "lw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LD:
+ Format(instr, "ld 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FLW:
+ Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+#endif
+
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FSD:
+ Format(instr, "fsd 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_SW:
+ Format(instr, "sw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SD:
+ Format(instr, "sd 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FSW:
+ Format(instr, "fsw 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCJType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_J:
+ Format(instr, "j 'Cimm11CJ");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCBType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_BNEZ:
+ Format(instr, "bnez 'Crs1s, x0, 'Cimm8B");
+ break;
+ case RO_C_BEQZ:
+ Format(instr, "beqz 'Crs1s, x0, 'Cimm8B");
+ break;
+ case RO_C_MISC_ALU:
+ if (instr->RvcFunct2BValue() == 0b00)
+ Format(instr, "srli 'Crs1s, 'Crs1s, 'Cshamt");
+ else if (instr->RvcFunct2BValue() == 0b01)
+ Format(instr, "srai 'Crs1s, 'Crs1s, 'Cshamt");
+ else if (instr->RvcFunct2BValue() == 0b10)
+ Format(instr, "andi 'Crs1s, 'Crs1s, 'Cimm6");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+#undef VERIFIY
+
+bool Decoder::IsConstantPoolAt(uint8_t* instr_ptr) {
+ UNSUPPORTED_RISCV();
+ MOZ_CRASH();
+}
+
+int Decoder::ConstantPoolSizeAt(uint8_t* instr_ptr) {
+ UNSUPPORTED_RISCV();
+ MOZ_CRASH();
+}
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kRType:
+ DecodeRType(instr);
+ break;
+ case Instruction::kR4Type:
+ DecodeR4Type(instr);
+ break;
+ case Instruction::kIType:
+ DecodeIType(instr);
+ break;
+ case Instruction::kSType:
+ DecodeSType(instr);
+ break;
+ case Instruction::kBType:
+ DecodeBType(instr);
+ break;
+ case Instruction::kUType:
+ DecodeUType(instr);
+ break;
+ case Instruction::kJType:
+ DecodeJType(instr);
+ break;
+ case Instruction::kCRType:
+ DecodeCRType(instr);
+ break;
+ case Instruction::kCAType:
+ DecodeCAType(instr);
+ break;
+ case Instruction::kCJType:
+ DecodeCJType(instr);
+ break;
+ case Instruction::kCIType:
+ DecodeCIType(instr);
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType(instr);
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType(instr);
+ break;
+ case Instruction::kCLType:
+ DecodeCLType(instr);
+ break;
+ case Instruction::kCSType:
+ DecodeCSType(instr);
+ break;
+ case Instruction::kCBType:
+ DecodeCBType(instr);
+ break;
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ case Instruction::kVType:
+ DecodeVType(instr);
+ break;
+#endif
+ default:
+ Format(instr, "UNSUPPORTED");
+ UNSUPPORTED_RISCV();
+ }
+ return instr->InstructionSize();
+}
+
+} // namespace disasm
+
+#undef STRING_STARTS_WITH
+#undef VERIFY
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(uint8_t* addr) const {
+ SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+const char* NameConverter::NameOfConstant(uint8_t* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return Registers::GetName(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ MOZ_CRASH(" RISC-V does not have the concept of a byte register.");
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return FloatRegisters::GetName(reg);
+}
+
+const char* NameConverter::NameInCode(uint8_t* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+Disassembler::~Disassembler() {}
+
+int Disassembler::InstructionDecode(V8Vector<char> buffer,
+ uint8_t* instruction) {
+ Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(uint8_t* instruction) {
+ return Decoder::ConstantPoolSizeAt(instruction);
+}
+
+void Disassembler::Disassemble(FILE* f, uint8_t* begin, uint8_t* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (uint8_t* pc = begin; pc < end;) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p %08x %s\n", prev_pc,
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/disasm/Disasm-riscv64.h b/js/src/jit/riscv64/disasm/Disasm-riscv64.h
new file mode 100644
index 0000000000..0548523f6b
--- /dev/null
+++ b/js/src/jit/riscv64/disasm/Disasm-riscv64.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_disasm_Disasm_riscv64_h
+#define jit_riscv64_disasm_Disasm_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/constant/util-riscv64.h"
+namespace js {
+namespace jit {
+namespace disasm {
+
+typedef unsigned char byte;
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text. The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+ virtual ~NameConverter() {}
+ virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfByteCPURegister(int reg) const;
+ virtual const char* NameOfXMMRegister(int reg) const;
+ virtual const char* NameOfAddress(byte* addr) const;
+ virtual const char* NameOfConstant(byte* addr) const;
+ virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ EmbeddedVector<char, 128> tmp_buffer_;
+};
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+ // Caller deallocates converter.
+ explicit Disassembler(const NameConverter& converter);
+
+ virtual ~Disassembler();
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(V8Vector<char> buffer, uint8_t* instruction);
+
+ // Returns -1 if instruction does not mark the beginning of a constant pool,
+ // or the number of entries in the constant pool beginning here.
+ int ConstantPoolSizeAt(byte* instruction);
+
+ // Write disassembly into specified file 'f' using specified NameConverter
+ // (see constructor).
+ static void Disassemble(FILE* f, uint8_t* begin, uint8_t* end);
+
+ private:
+ const NameConverter& converter_;
+
+ // Disallow implicit constructors.
+ Disassembler() = delete;
+ Disassembler(const Disassembler&) = delete;
+ void operator=(const Disassembler&) = delete;
+};
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_disasm_Disasm_riscv64_h
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.cc b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
new file mode 100644
index 0000000000..a64cc818b3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
@@ -0,0 +1,517 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+
+namespace js {
+namespace jit {
+
+int ToNumber(Register reg) {
+ MOZ_ASSERT(reg.code() < Registers::Total && reg.code() >= 0);
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // sp
+ 3, // gp
+ 4, // tp
+ 5, // t0
+ 6, // t1
+ 7, // t2
+ 8, // s0/fp
+ 9, // s1
+ 10, // a0
+ 11, // a1
+ 12, // a2
+ 13, // a3
+ 14, // a4
+ 15, // a5
+ 16, // a6
+ 17, // a7
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // s8
+ 25, // s9
+ 26, // s10
+ 27, // s11
+ 28, // t3
+ 29, // t4
+ 30, // t5
+ 31, // t6
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(uint32_t num) {
+ MOZ_ASSERT(num >= 0 && num < Registers::Total);
+ const Register kRegisters[] = {
+ zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
+ a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
+ return kRegisters[num];
+}
+
+// ----- Top-level instruction formats match those in the ISA manual
+// (R, I, S, B, U, J). These match the formats defined in the compiler
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ Register rs3, FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl,
+ uint8_t funct3, Register rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint5(funct5) && is_uint3(funct3));
+ Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRFrm(uint8_t funct7, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShift(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShiftW(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtWShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, FPURegister rs2,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm13) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int13(imm13) && ((imm13 & 1) == 0));
+ Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11
+ ((imm13 & 0x1e) << 7) | // bits 4-1
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm13 & 0x7e0) << 20) | // bits 10-5
+ ((imm13 & 0x1000) << 19); // bit 12
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrU(BaseOpcode opcode, Register rd,
+ int32_t imm20) {
+ MOZ_ASSERT((is_int20(imm20) || is_uint20(imm20)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrJ(BaseOpcode opcode, Register rd,
+ int32_t imm21) {
+ MOZ_ASSERT(is_int21(imm21) && ((imm21 & 1) == 0));
+ Instr instr = opcode | (rd.code() << kRdShift) |
+ (imm21 & 0xff000) | // bits 19-12
+ ((imm21 & 0x800) << 9) | // bit 11
+ ((imm21 & 0x7fe) << 20) | // bits 10-1
+ ((imm21 & 0x100000) << 11); // bit 20
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCR(uint8_t funct4, BaseOpcode opcode,
+ Register rd, Register rs2) {
+ MOZ_ASSERT(is_uint4(funct4));
+ ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) |
+ (rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCA(uint8_t funct6, BaseOpcode opcode,
+ Register rd, uint8_t funct, Register rs2) {
+ MOZ_ASSERT(is_uint6(funct6) && is_uint2(funct));
+ ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((rd.code() & 0x7) << kRvcRs1sShift) |
+ (funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIW(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+void AssemblerRiscvBase::GenInstrCJ(uint8_t funct3, BaseOpcode opcode,
+ uint16_t uint11) {
+ MOZ_ASSERT(is_uint11(uint11));
+ ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCBA(uint8_t funct3, uint8_t funct2,
+ BaseOpcode opcode, Register rs1,
+ int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift) | (funct2 << 10);
+ emit(instr);
+}
+// ----- Instruction class templates match those in the compiler
+
+void AssemblerRiscvBase::GenInstrBranchCC_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm13) {
+ GenInstrB(funct3, BRANCH, rs1, rs2, imm13);
+}
+
+void AssemblerRiscvBase::GenInstrLoad_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStore_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALU_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, OP_IMM, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrShift_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ MOZ_ASSERT(is_uint6(shamt));
+ GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALU_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ir(uint8_t funct3, Register rd,
+ ControlStatusReg csr, Register rs1) {
+ GenInstrI(funct3, SYSTEM, rd, rs1, csr);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ii(uint8_t funct3, Register rd,
+ ControlStatusReg csr, uint8_t imm5) {
+ GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr);
+}
+
+void AssemblerRiscvBase::GenInstrShiftW_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrPriv(uint8_t funct7, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0UL), rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD_FP, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStoreFP_rri(uint8_t funct3, Register rs1,
+ FPURegister rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE_FP, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.h b/js/src/jit/riscv64/extension/base-assembler-riscv.h
new file mode 100644
index 0000000000..cb3083d365
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.h
@@ -0,0 +1,219 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef jit_riscv64_extension_Base_assembler_riscv_h
+#define jit_riscv64_extension_Base_assembler_riscv_h
+
+#include <memory>
+#include <set>
+#include <stdio.h>
+
+#include "jit/Label.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+
+#define xlen (uint8_t(sizeof(void*) * 8))
+
+#define kBitsPerByte 8UL
+// Check number width.
+inline constexpr bool is_intn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline constexpr bool is_uintn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+#undef kBitsPerByte
+// clang-format off
+#define INT_1_TO_63_LIST(V) \
+ V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
+ V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
+ V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
+ V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+ V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
+ V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+// clang-format on
+
+#define DECLARE_IS_INT_N(N) \
+ inline constexpr bool is_int##N(int64_t x) { return is_intn(x, N); }
+
+#define DECLARE_IS_UINT_N(N) \
+ template <class T> \
+ inline constexpr bool is_uint##N(T x) { \
+ return is_uintn(x, N); \
+ }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+
+#undef DECLARE_IS_INT_N
+#undef INT_1_TO_63_LIST
+
+namespace js {
+namespace jit {
+
+typedef FloatRegister FPURegister;
+#define zero_reg zero
+
+#define DEBUG_PRINTF(...) \
+ if (FLAG_riscv_debug) { \
+ std::printf(__VA_ARGS__); \
+ }
+
+int ToNumber(Register reg);
+Register ToRegister(uint32_t num);
+
+class AssemblerRiscvBase {
+ protected:
+ virtual int32_t branch_offset_helper(Label* L, OffsetSize bits) = 0;
+
+ virtual void emit(Instr x) = 0;
+ virtual void emit(ShortInstr x) = 0;
+ virtual void emit(uint64_t x) = 0;
+ virtual uint32_t currentOffset() = 0;
+ // Instruction generation.
+
+ // ----- Top-level instruction formats match those in the ISA manual
+ // (R, I, S, B, U, J). These match the formats defined in LLVM's
+ // RISCVInstrFormats.td.
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2, Register rs3, FPURoundingMode frm);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm);
+ void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
+ Register rd, Register rs1, Register rs2);
+ void GenInstrRFrm(uint8_t funct7, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2, FPURoundingMode frm);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, int16_t imm12);
+ void GenInstrIShift(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrIShiftW(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ FPURegister rs2, int16_t imm12);
+ void GenInstrB(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrU(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrJ(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrCR(uint8_t funct4, BaseOpcode opcode, Register rd, Register rs2);
+ void GenInstrCA(uint8_t funct6, BaseOpcode opcode, Register rd, uint8_t funct,
+ Register rs2);
+ void GenInstrCI(uint8_t funct3, BaseOpcode opcode, Register rd, int8_t imm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ uint8_t uimm6);
+ void GenInstrCIW(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm8);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ uint8_t uimm6);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, Register rs2,
+ uint8_t uimm6);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, Register rs2, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCJ(uint8_t funct3, BaseOpcode opcode, uint16_t uint11);
+ void GenInstrCB(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ uint8_t uimm8);
+ void GenInstrCBA(uint8_t funct3, uint8_t funct2, BaseOpcode opcode,
+ Register rs1, int8_t imm6);
+
+ // ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
+ void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
+ void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
+ Register rs2);
+ void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
+ Register rs1);
+ void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
+ uint8_t rs1);
+ void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
+ void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
+ int16_t imm12);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ Register rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, FPURegister rs2);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_assembler_riscv_h
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.cc b/js/src/jit/riscv64/extension/base-riscv-i.cc
new file mode 100644
index 0000000000..2ee8877eb1
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.cc
@@ -0,0 +1,351 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
+ GenInstrU(LUI, rd, imm20);
+}
+
+void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
+ GenInstrU(AUIPC, rd, imm20);
+}
+
+// Jumps
+
+void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
+ GenInstrJ(JAL, rd, imm21);
+}
+
+void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, JALR, rd, rs1, imm12);
+}
+
+// Branches
+
+void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+}
+
+// Loads
+
+void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b001, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b101, rd, rs1, imm12);
+}
+
+// Stores
+
+void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b000, base, source, imm12);
+}
+
+void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b001, base, source, imm12);
+}
+
+void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b010, base, source, imm12);
+}
+
+// Arithmetic with immediate
+
+void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b111, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+// Arithmetic
+
+void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
+}
+
+// Memory fences
+
+void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
+ MOZ_ASSERT(is_uint4(pred) && is_uint4(succ));
+ uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+void AssemblerRISCVI::fence_tso() {
+ uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+// Environment call / break
+
+void AssemblerRISCVI::ecall() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 0);
+}
+
+void AssemblerRISCVI::ebreak() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 1);
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+void AssemblerRISCVI::unimp() {
+ GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
+}
+
+bool AssemblerRISCVI::IsBranch(Instr instr) {
+ return (instr & kBaseOpcodeMask) == BRANCH;
+}
+
+bool AssemblerRISCVI::IsJump(Instr instr) {
+ int Op = instr & kBaseOpcodeMask;
+ return Op == JAL || Op == JALR;
+}
+
+bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
+
+bool AssemblerRISCVI::IsJal(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JAL;
+}
+
+bool AssemblerRISCVI::IsJalr(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JALR;
+}
+
+bool AssemblerRISCVI::IsLui(Instr instr) {
+ return (instr & kBaseOpcodeMask) == LUI;
+}
+bool AssemblerRISCVI::IsAuipc(Instr instr) {
+ return (instr & kBaseOpcodeMask) == AUIPC;
+}
+bool AssemblerRISCVI::IsAddi(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
+}
+bool AssemblerRISCVI::IsOri(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
+}
+bool AssemblerRISCVI::IsSlli(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
+}
+
+int AssemblerRISCVI::JumpOffset(Instr instr) {
+ int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
+ (instr & 0xff000) | ((instr & 0x80000000) >> 11);
+ imm21 = imm21 << 11 >> 11;
+ return imm21;
+}
+
+int AssemblerRISCVI::JalrOffset(Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+int AssemblerRISCVI::AuipcOffset(Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
+ return imm20;
+}
+
+bool AssemblerRISCVI::IsLw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
+}
+
+int AssemblerRISCVI::LoadOffset(Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif V8_TARGET_ARCH_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+#ifdef JS_CODEGEN_RISCV64
+
+bool AssemblerRISCVI::IsAddiw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
+}
+
+bool AssemblerRISCVI::IsLd(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
+}
+
+void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+#endif
+
+int AssemblerRISCVI::BranchOffset(Instr instr) {
+ // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
+ ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
+ imm13 = imm13 << 19 >> 19;
+ return imm13;
+}
+
+int AssemblerRISCVI::BrachlongOffset(Instr auipc, Instr instr_I) {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
+ InstructionBase::kIType);
+ MOZ_ASSERT(IsAuipc(auipc));
+ MOZ_ASSERT(((auipc & kRdFieldMask) >> kRdShift) ==
+ ((instr_I & kRs1FieldMask) >> kRs1Shift));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
+ return offset;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.h b/js/src/jit/riscv64/extension/base-riscv-i.h
new file mode 100644
index 0000000000..cca342c960
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.h
@@ -0,0 +1,273 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Base_riscv_i_h_
+#define jit_riscv64_extension_Base_riscv_i_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVI : public AssemblerRiscvBase {
+ public:
+ void lui(Register rd, int32_t imm20);
+ void auipc(Register rd, int32_t imm20);
+
+ // Jumps
+ void jal(Register rd, int32_t imm20);
+ void jalr(Register rd, Register rs1, int16_t imm12);
+
+ // Branches
+ void beq(Register rs1, Register rs2, int16_t imm12);
+ void bne(Register rs1, Register rs2, int16_t imm12);
+ void blt(Register rs1, Register rs2, int16_t imm12);
+ void bge(Register rs1, Register rs2, int16_t imm12);
+ void bltu(Register rs1, Register rs2, int16_t imm12);
+ void bgeu(Register rs1, Register rs2, int16_t imm12);
+ // Loads
+ void lb(Register rd, Register rs1, int16_t imm12);
+ void lh(Register rd, Register rs1, int16_t imm12);
+ void lw(Register rd, Register rs1, int16_t imm12);
+ void lbu(Register rd, Register rs1, int16_t imm12);
+ void lhu(Register rd, Register rs1, int16_t imm12);
+
+ // Stores
+ void sb(Register source, Register base, int16_t imm12);
+ void sh(Register source, Register base, int16_t imm12);
+ void sw(Register source, Register base, int16_t imm12);
+
+ // Arithmetic with immediate
+ void addi(Register rd, Register rs1, int16_t imm12);
+ void slti(Register rd, Register rs1, int16_t imm12);
+ void sltiu(Register rd, Register rs1, int16_t imm12);
+ void xori(Register rd, Register rs1, int16_t imm12);
+ void ori(Register rd, Register rs1, int16_t imm12);
+ void andi(Register rd, Register rs1, int16_t imm12);
+ void slli(Register rd, Register rs1, uint8_t shamt);
+ void srli(Register rd, Register rs1, uint8_t shamt);
+ void srai(Register rd, Register rs1, uint8_t shamt);
+
+ // Arithmetic
+ void add(Register rd, Register rs1, Register rs2);
+ void sub(Register rd, Register rs1, Register rs2);
+ void sll(Register rd, Register rs1, Register rs2);
+ void slt(Register rd, Register rs1, Register rs2);
+ void sltu(Register rd, Register rs1, Register rs2);
+ void xor_(Register rd, Register rs1, Register rs2);
+ void srl(Register rd, Register rs1, Register rs2);
+ void sra(Register rd, Register rs1, Register rs2);
+ void or_(Register rd, Register rs1, Register rs2);
+ void and_(Register rd, Register rs1, Register rs2);
+
+ // Other pseudo instructions that are not part of RISCV pseudo assemly
+ void nor(Register rd, Register rs, Register rt) {
+ or_(rd, rs, rt);
+ not_(rd, rd);
+ }
+
+ // Memory fences
+ void fence(uint8_t pred, uint8_t succ);
+ void fence_tso();
+
+ // Environment call / break
+ void ecall();
+ void ebreak();
+
+ void sync() { fence(0b1111, 0b1111); }
+
+ // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+ // instruction (i.e., it should always trap, if your implementation has
+ // invalid instruction traps).
+ void unimp();
+
+ static int JumpOffset(Instr instr);
+ static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
+ static int LoadOffset(Instr instr);
+ static int BranchOffset(Instr instr);
+ static int BrachlongOffset(Instr auipc, Instr instr_I);
+ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kBranchOffsetBits));
+
+ instr &= ~kBImm12Mask;
+ int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11
+ ((imm & 0x1e) << 7) | // bits 4-1
+ ((imm & 0x7e0) << 20) | // bits 10-5
+ ((imm & 0x1000) << 19); // bit 12
+
+ return instr | (imm12 & kBImm12Mask);
+ }
+
+ static inline Instr SetJalOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ MOZ_ASSERT(IsJal(instr));
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kJumpOffsetBits));
+
+ instr &= ~kImm20Mask;
+ int32_t imm20 = (imm & 0xff000) | // bits 19-12
+ ((imm & 0x800) << 9) | // bit 11
+ ((imm & 0x7fe) << 20) | // bits 10-1
+ ((imm & 0x100000) << 11); // bit 20
+
+ return instr | (imm20 & kImm20Mask);
+ }
+
+ static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ MOZ_ASSERT(IsJalr(instr | (imm12 & kImm12Mask)));
+ MOZ_ASSERT(JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetLoadOffset(int32_t offset, Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ MOZ_ASSERT(is_int20(offset));
+ instr = (instr & ~kImm31_12Mask) | ((offset & kImm19_0Mask) << 12);
+ return instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsNop(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJal(Instr instr);
+ static bool IsJalr(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsAuipc(Instr instr);
+ static bool IsAddi(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsSlli(Instr instr);
+ static bool IsLw(Instr instr);
+
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset13);
+ }
+ inline int32_t jump_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+
+ // Branches
+ void beq(Register rs1, Register rs2, Label* L) {
+ beq(rs1, rs2, branch_offset(L));
+ }
+ void bne(Register rs1, Register rs2, Label* L) {
+ bne(rs1, rs2, branch_offset(L));
+ }
+ void blt(Register rs1, Register rs2, Label* L) {
+ blt(rs1, rs2, branch_offset(L));
+ }
+ void bge(Register rs1, Register rs2, Label* L) {
+ bge(rs1, rs2, branch_offset(L));
+ }
+ void bltu(Register rs1, Register rs2, Label* L) {
+ bltu(rs1, rs2, branch_offset(L));
+ }
+ void bgeu(Register rs1, Register rs2, Label* L) {
+ bgeu(rs1, rs2, branch_offset(L));
+ }
+
+ void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
+ void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
+ void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
+ void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
+ void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
+ void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
+ void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
+ void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
+ void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
+ void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
+ void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
+
+ void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
+ void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
+ void bgt(Register rs1, Register rs2, Label* L) {
+ bgt(rs1, rs2, branch_offset(L));
+ }
+ void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
+ void ble(Register rs1, Register rs2, Label* L) {
+ ble(rs1, rs2, branch_offset(L));
+ }
+ void bgtu(Register rs1, Register rs2, int16_t imm13) {
+ bltu(rs2, rs1, imm13);
+ }
+ void bgtu(Register rs1, Register rs2, Label* L) {
+ bgtu(rs1, rs2, branch_offset(L));
+ }
+ void bleu(Register rs1, Register rs2, int16_t imm13) {
+ bgeu(rs2, rs1, imm13);
+ }
+ void bleu(Register rs1, Register rs2, Label* L) {
+ bleu(rs1, rs2, branch_offset(L));
+ }
+
+ void j(int32_t imm21) { jal(zero_reg, imm21); }
+ void j(Label* L) { j(jump_offset(L)); }
+ void b(Label* L) { j(L); }
+ void jal(int32_t imm21) { jal(ra, imm21); }
+ void jal(Label* L) { jal(jump_offset(L)); }
+ void jr(Register rs) { jalr(zero_reg, rs, 0); }
+ void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
+ void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
+ void jalr(Register rs) { jalr(ra, rs, 0); }
+ void call(int32_t offset) {
+ auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
+ jalr(ra, ra, offset << 20 >> 20);
+ }
+
+ void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+ void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+ void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
+ void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+ void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
+ void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
+ void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
+
+#if JS_CODEGEN_RISCV64
+ void lwu(Register rd, Register rs1, int16_t imm12);
+ void ld(Register rd, Register rs1, int16_t imm12);
+ void sd(Register source, Register base, int16_t imm12);
+ void addiw(Register rd, Register rs1, int16_t imm12);
+ void slliw(Register rd, Register rs1, uint8_t shamt);
+ void srliw(Register rd, Register rs1, uint8_t shamt);
+ void sraiw(Register rd, Register rs1, uint8_t shamt);
+ void addw(Register rd, Register rs1, Register rs2);
+ void subw(Register rd, Register rs1, Register rs2);
+ void sllw(Register rd, Register rs1, Register rs2);
+ void srlw(Register rd, Register rs1, Register rs2);
+ void sraw(Register rd, Register rs1, Register rs2);
+ void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
+ void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
+
+ static bool IsAddiw(Instr instr);
+ static bool IsLd(Instr instr);
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_riscv_I_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.cc b/js/src/jit/riscv64/extension/extension-riscv-a.cc
new file mode 100644
index 0000000000..ead355fc0a
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-a.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32A Standard Extension
+void AssemblerRISCVA::lr_w(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+// RV64A Standard Extension (in addition to RV32A)
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVA::lr_d(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.h b/js/src/jit/riscv64/extension/extension-riscv-a.h
new file mode 100644
index 0000000000..442a4f5bba
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.h
@@ -0,0 +1,46 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file."
+#ifndef jit_riscv64_extension_Extension_riscv_a_h_
+#define jit_riscv64_extension_Extension_riscv_a_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVA : public AssemblerRiscvBase {
+ // RV32A Standard Extension
+ public:
+ void lr_w(bool aq, bool rl, Register rd, Register rs1);
+ void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64A Standard Extension (in addition to RV32A)
+ void lr_d(bool aq, bool rl, Register rd, Register rs1);
+ void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_A_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.cc b/js/src/jit/riscv64/extension/extension-riscv-c.cc
new file mode 100644
index 0000000000..714753a0e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.cc
@@ -0,0 +1,275 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-c.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV64C Standard Extension
+void AssemblerRISCVC::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
+
+void AssemblerRISCVC::c_addi(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && imm6 != 0);
+ GenInstrCI(0b000, C1, rd, imm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_addiw(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b001, C1, rd, imm6);
+}
+#endif
+
+void AssemblerRISCVC::c_addi16sp(int16_t imm10) {
+ MOZ_ASSERT(is_int10(imm10) && (imm10 & 0xf) == 0);
+ uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
+ ((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
+ ((imm10 & 0x20) >> 5);
+ GenInstrCIU(0b011, C1, sp, uimm6);
+}
+
+void AssemblerRISCVC::c_addi4spn(Register rd, int16_t uimm10) {
+ MOZ_ASSERT(is_uint10(uimm10) && (uimm10 != 0));
+ uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
+ ((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
+ GenInstrCIW(0b000, C0, rd, uimm8);
+}
+
+void AssemblerRISCVC::c_li(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b010, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_lui(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && rd != sp && imm6 != 0);
+ GenInstrCI(0b011, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_slli(Register rd, uint8_t shamt6) {
+ MOZ_ASSERT(rd != zero_reg && shamt6 != 0);
+ GenInstrCIU(0b000, C2, rd, shamt6);
+}
+
+void AssemblerRISCVC::c_fldsp(FPURegister rd, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b001, C2, rd, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ldsp(Register rd, uint16_t uimm9) {
+ MOZ_ASSERT(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b011, C2, rd, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_lwsp(Register rd, uint16_t uimm8) {
+ MOZ_ASSERT(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCIU(0b010, C2, rd, uimm6);
+}
+
+void AssemblerRISCVC::c_jr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1000, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_mv(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1000, C2, rd, rs2);
+}
+
+void AssemblerRISCVC::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
+
+void AssemblerRISCVC::c_jalr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1001, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_add(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1001, C2, rd, rs2);
+}
+
+// CA Instructions
+void AssemblerRISCVC::c_sub(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_xor(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b01, rs2);
+}
+
+void AssemblerRISCVC::c_or(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b10, rs2);
+}
+
+void AssemblerRISCVC::c_and(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b11, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_subw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_addw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b01, rs2);
+}
+#endif
+
+void AssemblerRISCVC::c_swsp(Register rs2, uint16_t uimm8) {
+ MOZ_ASSERT(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCSS(0b110, C2, rs2, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sdsp(Register rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b111, C2, rs2, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b101, C2, rs2, uimm6);
+}
+
+// CL Instructions
+
+void AssemblerRISCVC::c_lw(Register rd, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCL(0b010, C0, rd, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ld(Register rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b011, C0, rd, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b001, C0, rd, rs1, uimm5);
+}
+
+// CS Instructions
+
+void AssemblerRISCVC::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCS(0b110, C0, rs2, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b111, C0, rs2, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b101, C0, rs2, rs1, uimm5);
+}
+
+// CJ Instructions
+
+void AssemblerRISCVC::c_j(int16_t imm12) {
+ MOZ_ASSERT(is_int12(imm12));
+ int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
+ ((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
+ ((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
+ ((imm12 & 0x10) << 5) | (imm12 & 0xe);
+ GenInstrCJ(0b101, C1, uimm11);
+}
+
+// CB Instructions
+
+void AssemblerRISCVC::c_bnez(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b111, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_beqz(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b110, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_srli(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_srai(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_andi(Register rs1, int8_t imm6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
+ GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
+}
+
+bool AssemblerRISCVC::IsCJal(Instr instr) {
+ return (instr & kRvcOpcodeMask) == RO_C_J;
+}
+
+bool AssemblerRISCVC::IsCBranch(Instr instr) {
+ int Op = instr & kRvcOpcodeMask;
+ return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
+}
+
+int AssemblerRISCVC::CJumpOffset(Instr instr) {
+ int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
+ ((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
+ ((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
+ ((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
+ imm12 = imm12 << 20 >> 20;
+ return imm12;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.h b/js/src/jit/riscv64/extension/extension-riscv-c.h
new file mode 100644
index 0000000000..655141cb30
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.h
@@ -0,0 +1,77 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_c_h_
+#define jit_riscv64_extension_Extension_riscv_c_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVC : public AssemblerRiscvBase {
+ // RV64C Standard Extension
+ public:
+ void c_nop();
+ void c_addi(Register rd, int8_t imm6);
+
+ void c_addi16sp(int16_t imm10);
+ void c_addi4spn(Register rd, int16_t uimm10);
+ void c_li(Register rd, int8_t imm6);
+ void c_lui(Register rd, int8_t imm6);
+ void c_slli(Register rd, uint8_t shamt6);
+ void c_lwsp(Register rd, uint16_t uimm8);
+ void c_jr(Register rs1);
+ void c_mv(Register rd, Register rs2);
+ void c_ebreak();
+ void c_jalr(Register rs1);
+ void c_j(int16_t imm12);
+ void c_add(Register rd, Register rs2);
+ void c_sub(Register rd, Register rs2);
+ void c_and(Register rd, Register rs2);
+ void c_xor(Register rd, Register rs2);
+ void c_or(Register rd, Register rs2);
+ void c_swsp(Register rs2, uint16_t uimm8);
+ void c_lw(Register rd, Register rs1, uint16_t uimm7);
+ void c_sw(Register rs2, Register rs1, uint16_t uimm7);
+ void c_bnez(Register rs1, int16_t imm9);
+ void c_beqz(Register rs1, int16_t imm9);
+ void c_srli(Register rs1, int8_t shamt6);
+ void c_srai(Register rs1, int8_t shamt6);
+ void c_andi(Register rs1, int8_t imm6);
+
+ void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
+ void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
+ void c_fldsp(FPURegister rd, uint16_t uimm9);
+ void c_fsdsp(FPURegister rs2, uint16_t uimm9);
+#ifdef JS_CODEGEN_RISCV64
+ void c_ld(Register rd, Register rs1, uint16_t uimm8);
+ void c_sd(Register rs2, Register rs1, uint16_t uimm8);
+ void c_subw(Register rd, Register rs2);
+ void c_addw(Register rd, Register rs2);
+ void c_addiw(Register rd, int8_t imm6);
+ void c_ldsp(Register rd, uint16_t uimm9);
+ void c_sdsp(Register rs2, uint16_t uimm9);
+#endif
+
+ int CJumpOffset(Instr instr);
+
+ static bool IsCBranch(Instr instr);
+ static bool IsCJal(Instr instr);
+
+ inline int16_t cjump_offset(Label* L) {
+ return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
+ }
+ inline int32_t cbranch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset9);
+ }
+
+ void c_j(Label* L) { c_j(cjump_offset(L)); }
+ void c_bnez(Register rs1, Label* L) { c_bnez(rs1, cbranch_offset(L)); }
+ void c_beqz(Register rs1, Label* L) { c_beqz(rs1, cbranch_offset(L)); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_C_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.cc b/js/src/jit/riscv64/extension/extension-riscv-d.cc
new file mode 100644
index 0000000000..cb728baf12
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.cc
@@ -0,0 +1,167 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-d.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32D Standard Extension
+
+void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVD::fsd(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVD::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsqrt_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fsgnj_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjn_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjx_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fclass_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64D Standard Extension (in addition to RV32D)
+
+void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.h b/js/src/jit/riscv64/extension/extension-riscv-d.h
new file mode 100644
index 0000000000..8497c0ca63
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.h
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_d_h_
+#define jit_riscv64_extension_Extension_riscv_d_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVD : public AssemblerRiscvBase {
+ // RV32D Standard Extension
+ public:
+ void fld(FPURegister rd, Register rs1, int16_t imm12);
+ void fsd(FPURegister source, Register base, int16_t imm12);
+ void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_d(Register rd, FPURegister rs1);
+ void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64D Standard Extension (in addition to RV32D)
+ void fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_d(Register rd, FPURegister rs1);
+ void fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_d_x(FPURegister rd, Register rs1);
+#endif
+
+ void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
+ void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
+ void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_D_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.cc b/js/src/jit/riscv64/extension/extension-riscv-f.cc
new file mode 100644
index 0000000000..44e1fdc495
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.cc
@@ -0,0 +1,158 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-f.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32F Standard Extension
+
+void AssemblerRISCVF::flw(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVF::fsw(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b010, base, source, imm12);
+}
+
+void AssemblerRISCVF::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsqrt_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fsgnj_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjn_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjx_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fcvt_w_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_wu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_x_w(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::feq_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::flt_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fle_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fclass_s(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64F Standard Extension (in addition to RV32F)
+
+void AssemblerRISCVF::fcvt_l_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_lu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVF::fcvt_s_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_s_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3));
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.h b/js/src/jit/riscv64/extension/extension-riscv-f.h
new file mode 100644
index 0000000000..3ab46ffcf6
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.h
@@ -0,0 +1,66 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_f_h_
+#define jit_riscv64_extension_Extension_riscv_f_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVF : public AssemblerRiscvBase {
+ // RV32F Standard Extension
+ public:
+ void flw(FPURegister rd, Register rs1, int16_t imm12);
+ void fsw(FPURegister source, Register base, int16_t imm12);
+ void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_w(Register rd, FPURegister rs1);
+ void feq_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_s(Register rd, FPURegister rs1);
+ void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_w_x(FPURegister rd, Register rs1);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64F Standard Extension (in addition to RV32F)
+ void fcvt_l_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+#endif
+
+ void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); }
+ void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); }
+ void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_F_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.cc b/js/src/jit/riscv64/extension/extension-riscv-m.cc
new file mode 100644
index 0000000000..b5fcd6c34c
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.cc
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-m.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32M Standard Extension
+
+void AssemblerRISCVM::mul(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulh(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhsu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::div(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::rem(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64M Standard Extension (in addition to RV32M)
+
+void AssemblerRISCVM::mulw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.h b/js/src/jit/riscv64/extension/extension-riscv-m.h
new file mode 100644
index 0000000000..7c2c932516
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.h
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_m_h_
+#define jit_riscv64_extension_Extension_riscv_m_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVM : public AssemblerRiscvBase {
+ // RV32M Standard Extension
+ public:
+ void mul(Register rd, Register rs1, Register rs2);
+ void mulh(Register rd, Register rs1, Register rs2);
+ void mulhsu(Register rd, Register rs1, Register rs2);
+ void mulhu(Register rd, Register rs1, Register rs2);
+ void div(Register rd, Register rs1, Register rs2);
+ void divu(Register rd, Register rs1, Register rs2);
+ void rem(Register rd, Register rs1, Register rs2);
+ void remu(Register rd, Register rs1, Register rs2);
+#ifdef JS_CODEGEN_RISCV64
+ // RV64M Standard Extension (in addition to RV32M)
+ void mulw(Register rd, Register rs1, Register rs2);
+ void divw(Register rd, Register rs1, Register rs2);
+ void divuw(Register rd, Register rs1, Register rs2);
+ void remw(Register rd, Register rs1, Register rs2);
+ void remuw(Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_M_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.cc b/js/src/jit/riscv64/extension/extension-riscv-v.cc
new file mode 100644
index 0000000000..c7241158e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.cc
@@ -0,0 +1,891 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "jit/riscv64/extension/extension-riscv-v.h"
+
+#ifdef CAN_USE_RVV
+# include "src/codegen/assembler.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/register-riscv.h"
+
+namespace js {
+namespace jit {
+
+// RVV
+
+void AssemblerRISCVV::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs1);
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
+ GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
+}
+
+# define DEFINE_OPIVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWV(name, funct6) \
+ void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFRED(name, funct6) \
+ void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVI(name, funct6) \
+ void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+# define DEFINE_OPMVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+// void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
+// rs1,
+// VRegister vs2, MaskType mask = NoMask);
+# define DEFINE_OPMVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWF(name, funct6) \
+ void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+// vector integer extension
+# define DEFINE_OPMVV_VIE(name, vs1) \
+ void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
+ GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+void AssemblerRISCVV::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
+ GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
+}
+
+void AssemblerRISCVV::vfmv_fs(FPURegister fd, VRegister vs2) {
+ GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vfmv_sf(VRegister vd, FPURegister fs) {
+ GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
+}
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
+DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+
+// Vector Widening Floating-Point Add/Subtract Instructions
+DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+// Vector Widening Floating-Point Reduction Instructions
+DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+// Vector Widening Floating-Point Multiply
+DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+// Vector Widening Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+// Vector Narrowing Fixed-Point Clip Instructions
+DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+// Vector Integer Extension
+DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
+DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
+DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
+DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
+DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
+DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+
+void AssemblerRISCVV::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
+ Vlmul vlmul, TailAgnosticType tail,
+ MaskAgnosticType mask) {
+ MOZ_ASSERT(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ default:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+// OPIVV OPFVV OPMVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, int8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ FPURegister fd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((fd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, Register rs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_IVX || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVF
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, FPURegister fs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVF);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((fs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ MOZ_ASSERT(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// vmv_xs vcpop_m vfirst_m
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, uint8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+void AssemblerRISCVV::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
+}
+
+void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+
+void AssemblerRISCVV::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
+}
+void AssemblerRISCVV::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
+}
+
+void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
+}
+
+void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
+}
+
+void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
+}
+
+void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
+}
+
+void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
+}
+
+void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
+}
+
+void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
+}
+void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
+}
+void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
+}
+void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
+}
+void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
+}
+void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
+}
+void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
+}
+void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+
+void AssemblerRISCVV::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
+}
+
+void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
+}
+
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace jit
+} // namespace js
+#endif
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.h b/js/src/jit/riscv64/extension/extension-riscv-v.h
new file mode 100644
index 0000000000..8f04f24c56
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.h
@@ -0,0 +1,484 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_v_h_
+#define jit_riscv64_extension_Extension_riscv_v_h_
+#ifdef CAN_USE_RVV
+# include "jit/riscv64/Architecture-riscv64.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/base-assembler-riscv.h"
+
+namespace js {
+namespace jit {
+
+class AssemblerRISCVV : public AssemblerRiscvBase {
+ public:
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+# define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+# define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+# undef ARG
+# undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
+ void vfmv_fs(FPURegister fd, VRegister vs2);
+ void vfmv_sf(VRegister vd, FPURegister fs);
+
+ void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask = NoMask);
+ void vid_v(VRegister vd, MaskType mask = Mask);
+
+# define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWV(name, funct6) \
+ void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFRED(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF(name, funct6) \
+ void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWF(name, funct6) \
+ void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV_VIE(name) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+ DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+ DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+ DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+ DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+ DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+ DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+ DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+ DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+ // Vector Widening Floating-Point Reduction Instructions
+ DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+ DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+ // Vector Widening Floating-Point Multiply
+ DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+ DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+ DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+ DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+ DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+ DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
+ DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+ DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+ DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+ DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+ // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+ // Vector Narrowing Fixed-Point Clip Instructions
+ DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+ // Vector Integer Extension
+ DEFINE_OPMVV_VIE(vzext_vf8)
+ DEFINE_OPMVV_VIE(vsext_vf8)
+ DEFINE_OPMVV_VIE(vzext_vf4)
+ DEFINE_OPMVV_VIE(vsext_vf4)
+ DEFINE_OPMVV_VIE(vzext_vf2)
+ DEFINE_OPMVV_VIE(vsext_vf2)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPMVV
+# undef DEFINE_OPMVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+# undef DEFINE_OPFRED
+
+# define DEFINE_VFUNARY(name, funct6, vs1) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+ DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
+ DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
+ DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
+ DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
+ DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
+ DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
+ DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
+
+ DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+ DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
+ DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
+
+ DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+ DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
+ DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
+ DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
+# undef DEFINE_VFUNARY
+
+ void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vxor_vi(dst, src, -1, mask);
+ }
+
+ void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vrsub_vx(dst, src, zero_reg, mask);
+ }
+
+ void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjn_vv(dst, src, src, mask);
+ }
+ void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjx_vv(dst, src, src, mask);
+ }
+ void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ protected:
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPMVX
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVF
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // vmv_xs vcpop_m vfirst_m
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
+ VRegister vs2, MaskType mask);
+};
+
+class LoadStoreLaneParams {
+ public:
+ int sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+} // namespace jit
+} // namespace js
+#endif
+#endif // jit_riscv64_extension_Extension_riscv_V_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
new file mode 100644
index 0000000000..7fa87393a3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zicsr.h"
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZicsr::csrrw(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b001, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrs(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b010, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrc(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b011, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrwi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b101, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrsi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b110, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrci(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b111, rd, csr, imm5);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.h b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
new file mode 100644
index 0000000000..e1fba4fa57
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zicsr_h_
+#define jit_riscv64_extension_Extension_riscv_zicsr_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVZicsr : public AssemblerRiscvBase {
+ public:
+ // CSR
+ void csrrw(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrs(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrc(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5);
+
+ // Read instructions-retired counter
+ void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); }
+ void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); }
+ void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); }
+ void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); }
+ void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); }
+ void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); }
+
+ void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); }
+ void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); }
+ void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); }
+ void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); }
+
+ void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); }
+ void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); }
+ void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); }
+
+ void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); }
+ void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); }
+ void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); }
+
+ void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); }
+ void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); }
+ void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); }
+
+ void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); }
+ void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); }
+ void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zicsr_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
new file mode 100644
index 0000000000..ec8080b0cb
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zifencei.h"
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZifencei::fence_i() {
+ GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0);
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.h b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
new file mode 100644
index 0000000000..a245320ec4
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
@@ -0,0 +1,20 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zifencei_h_
+#define jit_riscv64_extension_Extension_riscv_zifencei_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVZifencei : public AssemblerRiscvBase {
+ public:
+ void fence_i();
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zifencei_h_
diff --git a/js/src/jit/shared/Architecture-shared.h b/js/src/jit/shared/Architecture-shared.h
new file mode 100644
index 0000000000..33085a6bdb
--- /dev/null
+++ b/js/src/jit/shared/Architecture-shared.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Architecture_shared_h
+#define jit_shared_Architecture_shared_h
+
+namespace js {
+namespace jit {
+
+enum class RegTypeName { GPR, Float32, Float64, Vector128, Any };
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Architecture_shared_h */
diff --git a/js/src/jit/shared/Assembler-shared.cpp b/js/src/jit/shared/Assembler-shared.cpp
new file mode 100644
index 0000000000..96dcf69a72
--- /dev/null
+++ b/js/src/jit/shared/Assembler-shared.cpp
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Assembler-shared.h"
+
+#include "jit/JitSpewer.h"
+#include "vm/NativeObject.h"
+
+namespace js::jit {
+
+void BaseObjectElementIndex::staticAssertions() {
+ NativeObject::elementsSizeMustNotOverflow();
+}
+
+void BaseObjectSlotIndex::staticAssertions() {
+ NativeObject::slotsSizeMustNotOverflow();
+}
+
+AssemblerShared::~AssemblerShared() {
+#ifdef DEBUG
+ while (hasCreator()) {
+ popCreator();
+ }
+#endif
+}
+
+#ifdef DEBUG
+void AssemblerShared::pushCreator(const char* who) {
+ (void)creators_.append(who);
+ JitSpewStart(JitSpew_Codegen, "# BEGIN creators: ");
+ bool first = true;
+ for (const char* str : creators_) {
+ JitSpewCont(JitSpew_Codegen, "%s%s", first ? "" : "/", str);
+ first = false;
+ }
+ JitSpewCont(JitSpew_Codegen, "\n");
+}
+
+void AssemblerShared::popCreator() {
+ JitSpewStart(JitSpew_Codegen, "# END creators: ");
+ bool first = true;
+ for (const char* str : creators_) {
+ JitSpewCont(JitSpew_Codegen, "%s%s", first ? "" : "/", str);
+ first = false;
+ }
+ JitSpewCont(JitSpew_Codegen, "\n");
+ if (creators_.empty()) {
+ JitSpew(JitSpew_Codegen, " ");
+ }
+ MOZ_ASSERT(!creators_.empty());
+ creators_.popBack();
+}
+
+bool AssemblerShared::hasCreator() const {
+ // If you get failures of assertions of the form `MOZ_ASSERT(hasCreator())`,
+ // what this means is that a `MacroAssembler` (or, really, anything that
+ // inherits from `js::jit::AssemblerShared`) has emitted code or data from a
+ // place, in the SM C++ hierarchy, that is not nested within an
+ // `AutoCreatedBy` RAII scope. Consequently the emitted instructions/data
+ // won't have any owner that is identifiable in the `IONFLAGS=codegen`
+ // output.
+ //
+ // Fixing this is easy: work back up the crash stack and decide on a place
+ // to put an `AutoCreatedBy` call. A bit of grepping for `AutoCreatedBy`
+ // should make it obvious what to do. If in doubt, add `AutoCreatedBy`
+ // calls liberally; "extra" ones are harmless.
+ return !creators_.empty();
+}
+#endif
+
+} // namespace js::jit
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
new file mode 100644
index 0000000000..d3e66fe7a4
--- /dev/null
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -0,0 +1,716 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Assembler_shared_h
+#define jit_shared_Assembler_shared_h
+
+#include "mozilla/CheckedInt.h"
+
+#include <limits.h>
+
+#include "gc/Barrier.h"
+#include "jit/AtomicOp.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitCode.h"
+#include "jit/JitContext.h"
+#include "jit/Label.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/HelperThreads.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmConstants.h"
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_WASM32) || \
+ defined(JS_CODEGEN_RISCV64)
+// Push return addresses callee-side.
+# define JS_USE_LINK_REGISTER
+#endif
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+// JS_CODELABEL_LINKMODE gives labels additional metadata
+// describing how Bind() should patch them.
+# define JS_CODELABEL_LINKMODE
+#endif
+
+namespace js {
+namespace jit {
+
+enum class FrameType;
+enum class ExceptionResumeKind : int32_t;
+
+namespace Disassembler {
+class HeapAccess;
+} // namespace Disassembler
+
+static constexpr uint32_t Simd128DataSize = 4 * sizeof(int32_t);
+static_assert(Simd128DataSize == 4 * sizeof(int32_t),
+ "SIMD data should be able to contain int32x4");
+static_assert(Simd128DataSize == 4 * sizeof(float),
+ "SIMD data should be able to contain float32x4");
+static_assert(Simd128DataSize == 2 * sizeof(double),
+ "SIMD data should be able to contain float64x2");
+
+enum Scale { TimesOne = 0, TimesTwo = 1, TimesFour = 2, TimesEight = 3 };
+
+static_assert(sizeof(JS::Value) == 8,
+ "required for TimesEight and 3 below to be correct");
+static const Scale ValueScale = TimesEight;
+static const size_t ValueShift = 3;
+
+static inline unsigned ScaleToShift(Scale scale) { return unsigned(scale); }
+
+static inline bool IsShiftInScaleRange(int i) {
+ return i >= TimesOne && i <= TimesEight;
+}
+
+static inline Scale ShiftToScale(int i) {
+ MOZ_ASSERT(IsShiftInScaleRange(i));
+ return Scale(i);
+}
+
+static inline Scale ScaleFromElemWidth(int shift) {
+ switch (shift) {
+ case 1:
+ return TimesOne;
+ case 2:
+ return TimesTwo;
+ case 4:
+ return TimesFour;
+ case 8:
+ return TimesEight;
+ }
+
+ MOZ_CRASH("Invalid scale");
+}
+
+static inline Scale ScaleFromScalarType(Scalar::Type type) {
+ return ScaleFromElemWidth(Scalar::byteSize(type));
+}
+
+// Used for 32-bit immediates which do not require relocation.
+struct Imm32 {
+ int32_t value;
+
+ explicit Imm32(int32_t value) : value(value) {}
+ explicit Imm32(FrameType type) : Imm32(int32_t(type)) {}
+ explicit Imm32(ExceptionResumeKind kind) : Imm32(int32_t(kind)) {}
+
+ static inline Imm32 ShiftOf(enum Scale s) {
+ switch (s) {
+ case TimesOne:
+ return Imm32(0);
+ case TimesTwo:
+ return Imm32(1);
+ case TimesFour:
+ return Imm32(2);
+ case TimesEight:
+ return Imm32(3);
+ };
+ MOZ_CRASH("Invalid scale");
+ }
+
+ static inline Imm32 FactorOf(enum Scale s) {
+ return Imm32(1 << ShiftOf(s).value);
+ }
+};
+
+// Pointer-sized integer to be embedded as an immediate in an instruction.
+struct ImmWord {
+ uintptr_t value;
+
+ explicit ImmWord(uintptr_t value) : value(value) {}
+};
+
+// Used for 64-bit immediates which do not require relocation.
+struct Imm64 {
+ uint64_t value;
+
+ explicit Imm64(int64_t value) : value(value) {}
+
+ Imm32 low() const { return Imm32(int32_t(value)); }
+
+ Imm32 hi() const { return Imm32(int32_t(value >> 32)); }
+
+ inline Imm32 firstHalf() const;
+ inline Imm32 secondHalf() const;
+};
+
+#ifdef DEBUG
+static inline bool IsCompilingWasm() {
+ return GetJitContext()->isCompilingWasm();
+}
+#endif
+
+// Pointer to be embedded as an immediate in an instruction.
+struct ImmPtr {
+ void* value;
+
+ struct NoCheckToken {};
+
+ explicit constexpr ImmPtr(std::nullptr_t) : value(nullptr) {
+ // Explicit constructor for nullptr. This ensures ImmPtr(0) can't be called.
+ // Either use ImmPtr(nullptr) or ImmWord(0).
+ }
+
+ explicit ImmPtr(void* value, NoCheckToken) : value(value) {
+ // A special unchecked variant for contexts where we know it is safe to
+ // use an immptr. This is assuming the caller knows what they're doing.
+ }
+
+ explicit ImmPtr(const void* value) : value(const_cast<void*>(value)) {
+ // To make code serialization-safe, wasm compilation should only
+ // compile pointer immediates using a SymbolicAddress.
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R>
+ explicit ImmPtr(R (*pf)()) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1>
+ explicit ImmPtr(R (*pf)(A1)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2>
+ explicit ImmPtr(R (*pf)(A1, A2)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3>
+ explicit ImmPtr(R (*pf)(A1, A2, A3)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3, class A4>
+ explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+};
+
+// The same as ImmPtr except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedImmPtr {
+ void* value;
+
+ explicit PatchedImmPtr() : value(nullptr) {}
+ explicit PatchedImmPtr(const void* value) : value(const_cast<void*>(value)) {}
+};
+
+class AssemblerShared;
+class ImmGCPtr;
+
+// Used for immediates which require relocation.
+class ImmGCPtr {
+ public:
+ const gc::Cell* value;
+
+ explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr) {
+ // Nursery pointers can't be used if the main thread might be currently
+ // performing a minor GC.
+ MOZ_ASSERT_IF(ptr && !ptr->isTenured(), !CurrentThreadIsIonCompiling());
+
+ // wasm shouldn't be creating GC things
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ private:
+ ImmGCPtr() : value(0) {}
+};
+
+// Pointer to trampoline code. Trampoline code is kept alive until the runtime
+// is destroyed, so does not need to be traced.
+struct TrampolinePtr {
+ uint8_t* value;
+
+ TrampolinePtr() : value(nullptr) {}
+ explicit TrampolinePtr(uint8_t* value) : value(value) { MOZ_ASSERT(value); }
+};
+
+// Pointer to be embedded as an immediate that is loaded/stored from by an
+// instruction.
+struct AbsoluteAddress {
+ void* addr;
+
+ explicit AbsoluteAddress(const void* addr) : addr(const_cast<void*>(addr)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ AbsoluteAddress offset(ptrdiff_t delta) {
+ return AbsoluteAddress(((uint8_t*)addr) + delta);
+ }
+};
+
+// The same as AbsoluteAddress except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedAbsoluteAddress {
+ void* addr;
+
+ explicit PatchedAbsoluteAddress() : addr(nullptr) {}
+ explicit PatchedAbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr)) {}
+ explicit PatchedAbsoluteAddress(uintptr_t addr)
+ : addr(reinterpret_cast<void*>(addr)) {}
+};
+
+// Specifies an address computed in the form of a register base and a constant,
+// 32-bit offset.
+struct Address {
+ RegisterOrSP base;
+ int32_t offset;
+
+ Address(Register base, int32_t offset)
+ : base(RegisterOrSP(base)), offset(offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ Address(RegisterOrSP base, int32_t offset) : base(base), offset(offset) {}
+#endif
+
+ Address() = delete;
+};
+
+#if JS_BITS_PER_WORD == 32
+
+static inline Address LowWord(const Address& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return Address(address.base, offset.value());
+}
+
+static inline Address HighWord(const Address& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return Address(address.base, offset.value());
+}
+
+#endif
+
+// Specifies an address computed in the form of a register base, a register
+// index with a scale, and a constant, 32-bit offset.
+struct BaseIndex {
+ RegisterOrSP base;
+ Register index;
+ Scale scale;
+ int32_t offset;
+
+ BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
+ : base(RegisterOrSP(base)), index(index), scale(scale), offset(offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseIndex(RegisterOrSP base, Register index, Scale scale, int32_t offset = 0)
+ : base(base), index(index), scale(scale), offset(offset) {}
+#endif
+
+ BaseIndex() = delete;
+};
+
+#if JS_BITS_PER_WORD == 32
+
+static inline BaseIndex LowWord(const BaseIndex& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return BaseIndex(address.base, address.index, address.scale, offset.value());
+}
+
+static inline BaseIndex HighWord(const BaseIndex& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return BaseIndex(address.base, address.index, address.scale, offset.value());
+}
+
+#endif
+
+// A BaseIndex used to access Values. Note that |offset| is *not* scaled by
+// sizeof(Value). Use this *only* if you're indexing into a series of Values
+// that aren't object elements or object slots (for example, values on the
+// stack, values in an arguments object, &c.). If you're indexing into an
+// object's elements or slots, don't use this directly! Use
+// BaseObject{Element,Slot}Index instead.
+struct BaseValueIndex : BaseIndex {
+ BaseValueIndex(Register base, Register index, int32_t offset = 0)
+ : BaseIndex(RegisterOrSP(base), index, ValueScale, offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseValueIndex(RegisterOrSP base, Register index, int32_t offset = 0)
+ : BaseIndex(base, index, ValueScale, offset) {}
+#endif
+};
+
+// Specifies the address of an indexed Value within object elements from a
+// base. The index must not already be scaled by sizeof(Value)!
+struct BaseObjectElementIndex : BaseValueIndex {
+ BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseObjectElementIndex(RegisterOrSP base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset) {}
+#endif
+
+ static void staticAssertions();
+};
+
+// Like BaseObjectElementIndex, except for object slots.
+struct BaseObjectSlotIndex : BaseValueIndex {
+ BaseObjectSlotIndex(Register base, Register index)
+ : BaseValueIndex(base, index) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseObjectSlotIndex(RegisterOrSP base, Register index)
+ : BaseValueIndex(base, index) {}
+#endif
+
+ static void staticAssertions();
+};
+
+enum class RelocationKind {
+ // The target is immovable, so patching is only needed if the source
+ // buffer is relocated and the reference is relative.
+ HARDCODED,
+
+ // The target is the start of a JitCode buffer, which must be traced
+ // during garbage collection. Relocations and patching may be needed.
+ JITCODE
+};
+
+class CodeOffset {
+ size_t offset_;
+
+ static const size_t NOT_BOUND = size_t(-1);
+
+ public:
+ explicit CodeOffset(size_t offset) : offset_(offset) {}
+ CodeOffset() : offset_(NOT_BOUND) {}
+
+ size_t offset() const {
+ MOZ_ASSERT(bound());
+ return offset_;
+ }
+
+ void bind(size_t offset) {
+ MOZ_ASSERT(!bound());
+ offset_ = offset;
+ MOZ_ASSERT(bound());
+ }
+ bool bound() const { return offset_ != NOT_BOUND; }
+
+ void offsetBy(size_t delta) {
+ MOZ_ASSERT(bound());
+ MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
+ offset_ += delta;
+ }
+};
+
+// A code label contains an absolute reference to a point in the code. Thus, it
+// cannot be patched until after linking.
+// When the source label is resolved into a memory address, this address is
+// patched into the destination address.
+// Some need to distinguish between multiple ways of patching that address.
+// See JS_CODELABEL_LINKMODE.
+class CodeLabel {
+ // The destination position, where the absolute reference should get
+ // patched into.
+ CodeOffset patchAt_;
+
+ // The source label (relative) in the code to where the destination should
+ // get patched to.
+ CodeOffset target_;
+
+#ifdef JS_CODELABEL_LINKMODE
+ public:
+ enum LinkMode { Uninitialized = 0, RawPointer, MoveImmediate, JumpImmediate };
+
+ private:
+ LinkMode linkMode_ = Uninitialized;
+#endif
+
+ public:
+ CodeLabel() = default;
+ explicit CodeLabel(const CodeOffset& patchAt) : patchAt_(patchAt) {}
+ CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
+ : patchAt_(patchAt), target_(target) {}
+ CodeOffset* patchAt() { return &patchAt_; }
+ CodeOffset* target() { return &target_; }
+ CodeOffset patchAt() const { return patchAt_; }
+ CodeOffset target() const { return target_; }
+#ifdef JS_CODELABEL_LINKMODE
+ LinkMode linkMode() const { return linkMode_; }
+ void setLinkMode(LinkMode value) { linkMode_ = value; }
+#endif
+};
+
+typedef Vector<CodeLabel, 0, SystemAllocPolicy> CodeLabelVector;
+
+class CodeLocationLabel {
+ uint8_t* raw_ = nullptr;
+
+ public:
+ CodeLocationLabel(JitCode* code, CodeOffset base) {
+ MOZ_ASSERT(base.offset() < code->instructionsSize());
+ raw_ = code->raw() + base.offset();
+ }
+ explicit CodeLocationLabel(JitCode* code) { raw_ = code->raw(); }
+ explicit CodeLocationLabel(uint8_t* raw) {
+ MOZ_ASSERT(raw);
+ raw_ = raw;
+ }
+
+ ptrdiff_t operator-(const CodeLocationLabel& other) const {
+ return raw_ - other.raw_;
+ }
+
+ uint8_t* raw() const { return raw_; }
+};
+
+} // namespace jit
+
+namespace wasm {
+
+// Represents an instruction to be patched and the intended pointee. These
+// links are accumulated in the MacroAssembler, but patching is done outside
+// the MacroAssembler (in Module::staticallyLink).
+
+struct SymbolicAccess {
+ SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
+ : patchAt(patchAt), target(target) {}
+
+ jit::CodeOffset patchAt;
+ SymbolicAddress target;
+};
+
+typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
+
+// Describes a single wasm or asm.js memory access for the purpose of generating
+// code and metadata.
+
+class MemoryAccessDesc {
+ uint64_t offset64_;
+ uint32_t align_;
+ Scalar::Type type_;
+ jit::Synchronization sync_;
+ wasm::BytecodeOffset trapOffset_;
+ wasm::SimdOp widenOp_;
+ enum { Plain, ZeroExtend, Splat, Widen } loadOp_;
+
+ public:
+ explicit MemoryAccessDesc(
+ Scalar::Type type, uint32_t align, uint64_t offset,
+ BytecodeOffset trapOffset,
+ const jit::Synchronization& sync = jit::Synchronization::None())
+ : offset64_(offset),
+ align_(align),
+ type_(type),
+ sync_(sync),
+ trapOffset_(trapOffset),
+ widenOp_(wasm::SimdOp::Limit),
+ loadOp_(Plain) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ }
+
+ // The offset is a 64-bit value because of memory64. Almost always, it will
+ // fit in 32 bits, and hence offset() checks that it will, this method is used
+ // almost everywhere in the engine. The compiler front-ends must use
+ // offset64() to bypass the check performed by offset(), and must resolve
+ // offsets that don't fit in 32 bits early in the compilation pipeline so that
+ // no large offsets are observed later.
+ uint32_t offset() const {
+ MOZ_ASSERT(offset64_ <= UINT32_MAX);
+ return uint32_t(offset64_);
+ }
+ uint64_t offset64() const { return offset64_; }
+
+ // The offset can be cleared without worrying about its magnitude.
+ void clearOffset() { offset64_ = 0; }
+
+ // The offset can be set (after compile-time evaluation) but only to values
+ // that fit in 32 bits.
+ void setOffset32(uint32_t offset) { offset64_ = offset; }
+
+ uint32_t align() const { return align_; }
+ Scalar::Type type() const { return type_; }
+ unsigned byteSize() const { return Scalar::byteSize(type()); }
+ const jit::Synchronization& sync() const { return sync_; }
+ BytecodeOffset trapOffset() const { return trapOffset_; }
+ wasm::SimdOp widenSimdOp() const {
+ MOZ_ASSERT(isWidenSimd128Load());
+ return widenOp_;
+ }
+ bool isAtomic() const { return !sync_.isNone(); }
+ bool isZeroExtendSimd128Load() const { return loadOp_ == ZeroExtend; }
+ bool isSplatSimd128Load() const { return loadOp_ == Splat; }
+ bool isWidenSimd128Load() const { return loadOp_ == Widen; }
+
+ void setZeroExtendSimd128Load() {
+ MOZ_ASSERT(type() == Scalar::Float32 || type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ loadOp_ = ZeroExtend;
+ }
+
+ void setSplatSimd128Load() {
+ MOZ_ASSERT(type() == Scalar::Uint8 || type() == Scalar::Uint16 ||
+ type() == Scalar::Float32 || type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ loadOp_ = Splat;
+ }
+
+ void setWidenSimd128Load(wasm::SimdOp op) {
+ MOZ_ASSERT(type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ widenOp_ = op;
+ loadOp_ = Widen;
+ }
+};
+
+} // namespace wasm
+
+namespace jit {
+
+// The base class of all Assemblers for all archs.
+class AssemblerShared {
+ wasm::CallSiteVector callSites_;
+ wasm::CallSiteTargetVector callSiteTargets_;
+ wasm::TrapSiteVectorArray trapSites_;
+ wasm::SymbolicAccessVector symbolicAccesses_;
+ wasm::TryNoteVector tryNotes_;
+#ifdef DEBUG
+ // To facilitate figuring out which part of SM created each instruction as
+ // shown by IONFLAGS=codegen, this maintains a stack of (notionally)
+ // code-creating routines, which is printed in the log output every time an
+ // entry is pushed or popped. Do not push/pop entries directly; instead use
+ // `class AutoCreatedBy`.
+ mozilla::Vector<const char*> creators_;
+#endif
+
+ protected:
+ CodeLabelVector codeLabels_;
+
+ bool enoughMemory_;
+ bool embedsNurseryPointers_;
+
+ public:
+ AssemblerShared() : enoughMemory_(true), embedsNurseryPointers_(false) {}
+
+ ~AssemblerShared();
+
+#ifdef DEBUG
+ // Do not use these directly; instead use `class AutoCreatedBy`.
+ void pushCreator(const char*);
+ void popCreator();
+ // See comment on the implementation of `hasCreator` for guidance on what to
+ // do if you get failures of the assertion `MOZ_ASSERT(hasCreator())`,
+ bool hasCreator() const;
+#endif
+
+ void propagateOOM(bool success) { enoughMemory_ &= success; }
+
+ void setOOM() { enoughMemory_ = false; }
+
+ bool oom() const { return !enoughMemory_; }
+
+ bool embedsNurseryPointers() const { return embedsNurseryPointers_; }
+
+ void addCodeLabel(CodeLabel label) {
+ propagateOOM(codeLabels_.append(label));
+ }
+ size_t numCodeLabels() const { return codeLabels_.length(); }
+ CodeLabel codeLabel(size_t i) { return codeLabels_[i]; }
+ CodeLabelVector& codeLabels() { return codeLabels_; }
+
+ // WebAssembly metadata emitted by masm operations accumulated on the
+ // MacroAssembler, and swapped into a wasm::CompiledCode after finish().
+
+ template <typename... Args>
+ void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr,
+ Args&&... args) {
+ enoughMemory_ &= callSites_.emplaceBack(desc, retAddr.offset());
+ enoughMemory_ &= callSiteTargets_.emplaceBack(std::forward<Args>(args)...);
+ }
+ void append(wasm::Trap trap, wasm::TrapSite site) {
+ enoughMemory_ &= trapSites_[trap].append(site);
+ }
+ void append(const wasm::MemoryAccessDesc& access, uint32_t pcOffset) {
+ appendOutOfBoundsTrap(access.trapOffset(), pcOffset);
+ }
+ void appendOutOfBoundsTrap(wasm::BytecodeOffset trapOffset,
+ uint32_t pcOffset) {
+ append(wasm::Trap::OutOfBounds, wasm::TrapSite(pcOffset, trapOffset));
+ }
+ void append(wasm::SymbolicAccess access) {
+ enoughMemory_ &= symbolicAccesses_.append(access);
+ }
+ // This one returns an index as the try note so that it can be looked up
+ // later to add the end point and stack position of the try block.
+ [[nodiscard]] bool append(wasm::TryNote tryNote, size_t* tryNoteIndex) {
+ if (!tryNotes_.append(tryNote)) {
+ enoughMemory_ = false;
+ return false;
+ }
+ *tryNoteIndex = tryNotes_.length() - 1;
+ return true;
+ }
+
+ wasm::CallSiteVector& callSites() { return callSites_; }
+ wasm::CallSiteTargetVector& callSiteTargets() { return callSiteTargets_; }
+ wasm::TrapSiteVectorArray& trapSites() { return trapSites_; }
+ wasm::SymbolicAccessVector& symbolicAccesses() { return symbolicAccesses_; }
+ wasm::TryNoteVector& tryNotes() { return tryNotes_; }
+};
+
+// AutoCreatedBy pushes and later pops a who-created-these-insns? tag into the
+// JitSpew_Codegen output. These could be created fairly frequently, so a
+// dummy inlineable-out version is provided for non-debug builds. The tag
+// text can be completely arbitrary -- it serves only to help readers of the
+// output text to relate instructions back to the part(s) of SM that created
+// them.
+#ifdef DEBUG
+class MOZ_RAII AutoCreatedBy {
+ private:
+ AssemblerShared& ash_;
+
+ public:
+ AutoCreatedBy(AssemblerShared& ash, const char* who) : ash_(ash) {
+ ash_.pushCreator(who);
+ }
+ ~AutoCreatedBy() { ash_.popCreator(); }
+};
+#else
+class MOZ_RAII AutoCreatedBy {
+ public:
+ inline AutoCreatedBy(AssemblerShared& ash, const char* who) {}
+ // A user-defined constructor is necessary to stop some compilers from
+ // complaining about unused variables.
+ inline ~AutoCreatedBy() {}
+};
+#endif
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Assembler_shared_h */
diff --git a/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
new file mode 100644
index 0000000000..d4bf3430ff
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
@@ -0,0 +1,453 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h, both the comment block at the
+ * beginning and the #ifdef nest near the end.
+ *
+ * This is a common file for tier-3 platforms (including simulators for our
+ * tier-1 platforms) that are not providing hardware-specific implementations of
+ * the atomic operations. Please keep it reasonably platform-independent by
+ * adding #ifdefs at the beginning as much as possible, not throughout the file.
+ *
+ *
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * !!!! NOTE !!!!
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ *
+ * The implementations in this file are NOT SAFE and cannot be safe even in
+ * principle because they rely on C++ undefined behavior. However, they are
+ * frequently good enough for tier-3 platforms.
+ */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_gcc_h
+#define jit_shared_AtomicOperations_feeling_lucky_gcc_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+// Explicitly exclude tier-1 platforms.
+
+#if (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
+ defined(_M_IX86) || (defined(__arm__) && __ARM_ARCH >= 7) || \
+ defined(__aarch64__))
+# error "Do not use on a tier-1 platform where inline assembly is available"
+#endif
+
+#if !(defined(__clang__) || defined(__GNUC__))
+# error "This file only for gcc/Clang"
+#endif
+
+// 64-bit atomics are not required by the JS spec, and you can compile
+// SpiderMonkey without them. 64-bit atomics are required for BigInt
+// support.
+//
+// 64-bit lock-free atomics are required for WebAssembly, but gating in the
+// WebAssembly subsystem ensures that no WebAssembly-supporting platforms need
+// code in this file.
+
+#if defined(JS_SIMULATOR_ARM64) || defined(JS_SIMULATOR_ARM) || \
+ defined(JS_SIMULATOR_MIPS64) || defined(JS_SIMULATOR_LOONG64)
+// On some x86 (32-bit) systems this will not work because the compiler does not
+// open-code 64-bit atomics. If so, try linking with -latomic. If that doesn't
+// work, you're mostly on your own.
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#if defined(__arm__)
+# define HAS_64BIT_ATOMICS
+#endif
+
+#if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#if defined(__riscv) && __riscv_xlen == 64
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#if defined(__loongarch64)
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#ifdef __sparc__
+# ifdef __LP64__
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+# endif
+#endif
+
+#ifdef JS_CODEGEN_NONE
+# ifdef JS_64BIT
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+# endif
+#endif
+
+// The default implementation tactic for gcc/clang is to use the newer __atomic
+// intrinsics added for use in C++11 <atomic>. Where that isn't available, we
+// use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward compatible
+// option for older compilers: enable this to use GCC's old __sync functions
+// instead of the newer __atomic functions. This will be required for GCC 4.6.x
+// and earlier, and probably for Clang 3.1, should we need to use those
+// versions. Firefox no longer supports compilers that old.
+
+// #define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+// Sanity check.
+
+#if defined(HAS_64BIT_LOCKFREE) && !defined(HAS_64BIT_ATOMICS)
+# error "This combination of features is senseless, please fix"
+#endif
+
+// Try to avoid platform #ifdefs below this point.
+
+// When compiling with Clang on 32-bit linux it will be necessary to link with
+// -latomic to get the proper 64-bit intrinsics.
+
+inline bool js::jit::AtomicOperations::hasAtomic8() {
+#if defined(HAS_64BIT_ATOMICS)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool js::jit::AtomicOperations::isLockfree8() {
+#if defined(HAS_64BIT_LOCKFREE)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void js::jit::AtomicOperations::fenceSeqCst() {
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+#else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+#endif
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+#else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+#endif
+ return v;
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::loadSeqCst(int64_t* addr) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::loadSeqCst(uint64_t* addr) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+#else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline void AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline void AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+#else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
+ T newval) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+#else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return oldval;
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::compareExchangeSeqCst(int64_t* addr,
+ int64_t oldval,
+ int64_t newval) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::compareExchangeSeqCst(uint64_t* addr,
+ uint64_t oldval,
+ uint64_t newval) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+#else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+#else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+#else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+#else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+#else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+ // This is actually roughly right even on 32-bit platforms since in that
+ // case, double, int64, and uint64 loads need not be access-atomic.
+ //
+ // We could use __atomic_load, but it would be needlessly expensive on
+ // 32-bit platforms that could support it and just plain wrong on others.
+ return *addr;
+}
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+ // This is actually roughly right even on 32-bit platforms since in that
+ // case, double, int64, and uint64 loads need not be access-atomic.
+ //
+ // We could use __atomic_store, but it would be needlessly expensive on
+ // 32-bit platforms that could support it and just plain wrong on others.
+ *addr = val;
+}
+
+inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ ::memcpy(dest, src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ ::memmove(dest, src, nbytes);
+}
+
+#undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#undef HAS_64BIT_ATOMICS
+#undef HAS_64BIT_LOCKFREE
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_gcc_h
diff --git a/js/src/jit/shared/AtomicOperations-feeling-lucky.h b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
new file mode 100644
index 0000000000..4aa7883fd4
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_h
+#define jit_shared_AtomicOperations_feeling_lucky_h
+
+#if defined(__clang__) || defined(__GNUC__)
+# include "jit/shared/AtomicOperations-feeling-lucky-gcc.h"
+#else
+# error "No AtomicOperations support for this platform+compiler combination"
+#endif
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_h
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.cpp b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
new file mode 100644
index 0000000000..df7c049dfa
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/AtomicOperations.h"
+
+#if defined(__arm__)
+# include "jit/arm/Architecture-arm.h"
+#endif
+
+#ifdef JS_HAVE_GENERATED_ATOMIC_OPS
+
+# include <atomic>
+
+# include "js/GCAPI.h"
+
+using namespace js;
+using namespace js::jit;
+
+// A "block" is a sequence of bytes that is a reasonable quantum to copy to
+// amortize call overhead when implementing memcpy and memmove. A block will
+// not fit in registers on all platforms and copying it without using
+// intermediate memory will therefore be sensitive to overlap.
+//
+// A "word" is an item that we can copy using only register intermediate storage
+// on all platforms; words can be individually copied without worrying about
+// overlap.
+//
+// Blocks and words can be aligned or unaligned; specific (generated) copying
+// functions handle this in platform-specific ways.
+
+static constexpr size_t WORDSIZE = sizeof(uintptr_t);
+static constexpr size_t BLOCKSIZE = 8 * WORDSIZE; // Must be a power of 2
+
+static_assert(BLOCKSIZE % WORDSIZE == 0,
+ "A block is an integral number of words");
+
+// Constants must match the ones in GenerateAtomicOperations.py
+static_assert(JS_GENERATED_ATOMICS_BLOCKSIZE == BLOCKSIZE);
+static_assert(JS_GENERATED_ATOMICS_WORDSIZE == WORDSIZE);
+
+static constexpr size_t WORDMASK = WORDSIZE - 1;
+static constexpr size_t BLOCKMASK = BLOCKSIZE - 1;
+
+namespace js {
+namespace jit {
+
+static bool UnalignedAccessesAreOK() {
+# ifdef DEBUG
+ const char* flag = getenv("JS_NO_UNALIGNED_MEMCPY");
+ if (flag && *flag == '1') return false;
+# endif
+# if defined(__x86_64__) || defined(__i386__)
+ return true;
+# elif defined(__arm__)
+ return !HasAlignmentFault();
+# elif defined(__aarch64__)
+ // This is not necessarily true but it's the best guess right now.
+ return true;
+# else
+# error "Unsupported platform"
+# endif
+}
+
+# ifndef JS_64BIT
+void AtomicCompilerFence() {
+ std::atomic_signal_fence(std::memory_order_acq_rel);
+}
+# endif
+
+void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes) {
+ JS::AutoSuppressGCAnalysis nogc;
+
+ const uint8_t* lim = src + nbytes;
+
+ // Set up bulk copying. The cases are ordered the way they are on the
+ // assumption that if we can achieve aligned copies even with a little
+ // preprocessing then that is better than unaligned copying on a platform
+ // that supports it.
+
+ if (nbytes >= WORDSIZE) {
+ void (*copyBlock)(uint8_t* dest, const uint8_t* src);
+ void (*copyWord)(uint8_t* dest, const uint8_t* src);
+
+ if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
+ const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE);
+ MOZ_ASSERT(cutoff <= lim); // because nbytes >= WORDSIZE
+ while (src < cutoff) {
+ AtomicCopyByteUnsynchronized(dest++, src++);
+ }
+ copyBlock = AtomicCopyBlockDownUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else if (UnalignedAccessesAreOK()) {
+ copyBlock = AtomicCopyBlockDownUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else {
+ copyBlock = AtomicCopyUnalignedBlockDownUnsynchronized;
+ copyWord = AtomicCopyUnalignedWordDownUnsynchronized;
+ }
+
+ // Bulk copy, first larger blocks and then individual words.
+
+ const uint8_t* blocklim = src + ((lim - src) & ~BLOCKMASK);
+ while (src < blocklim) {
+ copyBlock(dest, src);
+ dest += BLOCKSIZE;
+ src += BLOCKSIZE;
+ }
+
+ const uint8_t* wordlim = src + ((lim - src) & ~WORDMASK);
+ while (src < wordlim) {
+ copyWord(dest, src);
+ dest += WORDSIZE;
+ src += WORDSIZE;
+ }
+ }
+
+ // Byte copy any remaining tail.
+
+ while (src < lim) {
+ AtomicCopyByteUnsynchronized(dest++, src++);
+ }
+}
+
+void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes) {
+ JS::AutoSuppressGCAnalysis nogc;
+
+ const uint8_t* lim = src;
+
+ src += nbytes;
+ dest += nbytes;
+
+ if (nbytes >= WORDSIZE) {
+ void (*copyBlock)(uint8_t* dest, const uint8_t* src);
+ void (*copyWord)(uint8_t* dest, const uint8_t* src);
+
+ if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
+ const uint8_t* cutoff = (const uint8_t*)(uintptr_t(src) & ~WORDMASK);
+ MOZ_ASSERT(cutoff >= lim); // Because nbytes >= WORDSIZE
+ while (src > cutoff) {
+ AtomicCopyByteUnsynchronized(--dest, --src);
+ }
+ copyBlock = AtomicCopyBlockUpUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else if (UnalignedAccessesAreOK()) {
+ copyBlock = AtomicCopyBlockUpUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else {
+ copyBlock = AtomicCopyUnalignedBlockUpUnsynchronized;
+ copyWord = AtomicCopyUnalignedWordUpUnsynchronized;
+ }
+
+ const uint8_t* blocklim = src - ((src - lim) & ~BLOCKMASK);
+ while (src > blocklim) {
+ dest -= BLOCKSIZE;
+ src -= BLOCKSIZE;
+ copyBlock(dest, src);
+ }
+
+ const uint8_t* wordlim = src - ((src - lim) & ~WORDMASK);
+ while (src > wordlim) {
+ dest -= WORDSIZE;
+ src -= WORDSIZE;
+ copyWord(dest, src);
+ }
+ }
+
+ while (src > lim) {
+ AtomicCopyByteUnsynchronized(--dest, --src);
+ }
+}
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_HAVE_GENERATED_ATOMIC_OPS
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.h b/js/src/jit/shared/AtomicOperations-shared-jit.h
new file mode 100644
index 0000000000..ca66a6f9b9
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.h
@@ -0,0 +1,490 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For overall documentation, see jit/AtomicOperations.h.
+ *
+ * NOTE CAREFULLY: This file is only applicable when we have configured a JIT
+ * and the JIT is for the same architecture that we're compiling the shell for.
+ * Simulators must use a different mechanism.
+ *
+ * See comments before the include nest near the end of jit/AtomicOperations.h
+ * if you didn't understand that.
+ */
+
+#ifndef jit_shared_AtomicOperations_shared_jit_h
+#define jit_shared_AtomicOperations_shared_jit_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/AtomicOperationsGenerated.h"
+#include "vm/Uint8Clamped.h"
+
+namespace js {
+namespace jit {
+
+#ifndef JS_64BIT
+// `AtomicCompilerFence` erects a reordering boundary for operations on the
+// current thread. We use it to prevent the compiler from reordering loads and
+// stores inside larger primitives that are synthesized from cmpxchg.
+extern void AtomicCompilerFence();
+#endif
+
+// `...MemcpyDown` moves bytes toward lower addresses in memory: dest <= src.
+// `...MemcpyUp` moves bytes toward higher addresses in memory: dest >= src.
+extern void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes);
+extern void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes);
+
+} // namespace jit
+} // namespace js
+
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
+
+inline bool js::jit::AtomicOperations::isLockfree8() { return true; }
+
+inline void js::jit::AtomicOperations::fenceSeqCst() { AtomicFenceSeqCst(); }
+
+#define JIT_LOADOP(T, U, loadop) \
+ template <> \
+ inline T AtomicOperations::loadSeqCst(T* addr) { \
+ return (T)loadop((U*)addr); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_LOADOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::loadSeqCst(T* addr) { \
+ AtomicCompilerFence(); \
+ return (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, 0, 0); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_LOADOP(int8_t, uint8_t, AtomicLoad8SeqCst)
+JIT_LOADOP(uint8_t, uint8_t, AtomicLoad8SeqCst)
+JIT_LOADOP(int16_t, uint16_t, AtomicLoad16SeqCst)
+JIT_LOADOP(uint16_t, uint16_t, AtomicLoad16SeqCst)
+JIT_LOADOP(int32_t, uint32_t, AtomicLoad32SeqCst)
+JIT_LOADOP(uint32_t, uint32_t, AtomicLoad32SeqCst)
+
+#ifdef JIT_LOADOP_CAS
+JIT_LOADOP_CAS(int64_t)
+JIT_LOADOP_CAS(uint64_t)
+#else
+JIT_LOADOP(int64_t, uint64_t, AtomicLoad64SeqCst)
+JIT_LOADOP(uint64_t, uint64_t, AtomicLoad64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_LOADOP
+#undef JIT_LOADOP_CAS
+
+#define JIT_STOREOP(T, U, storeop) \
+ template <> \
+ inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+ storeop((U*)addr, val); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_STOREOP_CAS(T) \
+ template <> \
+ inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+ AtomicCompilerFence(); \
+ T oldval = *addr; /* good initial approximation */ \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, \
+ (uint64_t)oldval, (uint64_t)val); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_STOREOP(int8_t, uint8_t, AtomicStore8SeqCst)
+JIT_STOREOP(uint8_t, uint8_t, AtomicStore8SeqCst)
+JIT_STOREOP(int16_t, uint16_t, AtomicStore16SeqCst)
+JIT_STOREOP(uint16_t, uint16_t, AtomicStore16SeqCst)
+JIT_STOREOP(int32_t, uint32_t, AtomicStore32SeqCst)
+JIT_STOREOP(uint32_t, uint32_t, AtomicStore32SeqCst)
+
+#ifdef JIT_STOREOP_CAS
+JIT_STOREOP_CAS(int64_t)
+JIT_STOREOP_CAS(uint64_t)
+#else
+JIT_STOREOP(int64_t, uint64_t, AtomicStore64SeqCst)
+JIT_STOREOP(uint64_t, uint64_t, AtomicStore64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_STOREOP
+#undef JIT_STOREOP_CAS
+
+#define JIT_EXCHANGEOP(T, U, xchgop) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ return (T)xchgop((U*)addr, (U)val); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_EXCHANGEOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ AtomicCompilerFence(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, \
+ (uint64_t)oldval, (uint64_t)val); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_EXCHANGEOP(int8_t, uint8_t, AtomicExchange8SeqCst)
+JIT_EXCHANGEOP(uint8_t, uint8_t, AtomicExchange8SeqCst)
+JIT_EXCHANGEOP(int16_t, uint16_t, AtomicExchange16SeqCst)
+JIT_EXCHANGEOP(uint16_t, uint16_t, AtomicExchange16SeqCst)
+JIT_EXCHANGEOP(int32_t, uint32_t, AtomicExchange32SeqCst)
+JIT_EXCHANGEOP(uint32_t, uint32_t, AtomicExchange32SeqCst)
+
+#ifdef JIT_EXCHANGEOP_CAS
+JIT_EXCHANGEOP_CAS(int64_t)
+JIT_EXCHANGEOP_CAS(uint64_t)
+#else
+JIT_EXCHANGEOP(int64_t, uint64_t, AtomicExchange64SeqCst)
+JIT_EXCHANGEOP(uint64_t, uint64_t, AtomicExchange64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_EXCHANGEOP
+#undef JIT_EXCHANGEOP_CAS
+
+#define JIT_CAS(T, U, cmpxchg) \
+ template <> \
+ inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
+ T newval) { \
+ return (T)cmpxchg((U*)addr, (U)oldval, (U)newval); \
+ }
+
+namespace js {
+namespace jit {
+
+JIT_CAS(int8_t, uint8_t, AtomicCmpXchg8SeqCst)
+JIT_CAS(uint8_t, uint8_t, AtomicCmpXchg8SeqCst)
+JIT_CAS(int16_t, uint16_t, AtomicCmpXchg16SeqCst)
+JIT_CAS(uint16_t, uint16_t, AtomicCmpXchg16SeqCst)
+JIT_CAS(int32_t, uint32_t, AtomicCmpXchg32SeqCst)
+JIT_CAS(uint32_t, uint32_t, AtomicCmpXchg32SeqCst)
+JIT_CAS(int64_t, uint64_t, AtomicCmpXchg64SeqCst)
+JIT_CAS(uint64_t, uint64_t, AtomicCmpXchg64SeqCst)
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_CAS
+
+#define JIT_FETCHADDOP(T, U, xadd) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ return (T)xadd((U*)addr, (U)val); \
+ }
+
+#define JIT_FETCHSUBOP(T) \
+ template <> \
+ inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
+ return fetchAddSeqCst(addr, (T)(0 - val)); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_FETCHADDOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ AtomicCompilerFence(); \
+ T oldval = *addr; /* Good initial approximation */ \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst( \
+ (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval + val)); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_FETCHADDOP(int8_t, uint8_t, AtomicAdd8SeqCst)
+JIT_FETCHADDOP(uint8_t, uint8_t, AtomicAdd8SeqCst)
+JIT_FETCHADDOP(int16_t, uint16_t, AtomicAdd16SeqCst)
+JIT_FETCHADDOP(uint16_t, uint16_t, AtomicAdd16SeqCst)
+JIT_FETCHADDOP(int32_t, uint32_t, AtomicAdd32SeqCst)
+JIT_FETCHADDOP(uint32_t, uint32_t, AtomicAdd32SeqCst)
+
+#ifdef JIT_FETCHADDOP_CAS
+JIT_FETCHADDOP_CAS(int64_t)
+JIT_FETCHADDOP_CAS(uint64_t)
+#else
+JIT_FETCHADDOP(int64_t, uint64_t, AtomicAdd64SeqCst)
+JIT_FETCHADDOP(uint64_t, uint64_t, AtomicAdd64SeqCst)
+#endif
+
+JIT_FETCHSUBOP(int8_t)
+JIT_FETCHSUBOP(uint8_t)
+JIT_FETCHSUBOP(int16_t)
+JIT_FETCHSUBOP(uint16_t)
+JIT_FETCHSUBOP(int32_t)
+JIT_FETCHSUBOP(uint32_t)
+JIT_FETCHSUBOP(int64_t)
+JIT_FETCHSUBOP(uint64_t)
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_FETCHADDOP
+#undef JIT_FETCHADDOP_CAS
+#undef JIT_FETCHSUBOP
+
+#define JIT_FETCHBITOPX(T, U, name, op) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ return (T)op((U*)addr, (U)val); \
+ }
+
+#define JIT_FETCHBITOP(T, U, andop, orop, xorop) \
+ JIT_FETCHBITOPX(T, U, fetchAndSeqCst, andop) \
+ JIT_FETCHBITOPX(T, U, fetchOrSeqCst, orop) \
+ JIT_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
+
+#ifndef JS_64BIT
+
+# define AND_OP &
+# define OR_OP |
+# define XOR_OP ^
+
+# define JIT_FETCHBITOPX_CAS(T, name, OP) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ AtomicCompilerFence(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst( \
+ (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval OP val)); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+
+# define JIT_FETCHBITOP_CAS(T) \
+ JIT_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
+ JIT_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP) \
+ JIT_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
+
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_FETCHBITOP(int8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+ AtomicXor8SeqCst)
+JIT_FETCHBITOP(uint8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+ AtomicXor8SeqCst)
+JIT_FETCHBITOP(int16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+ AtomicXor16SeqCst)
+JIT_FETCHBITOP(uint16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+ AtomicXor16SeqCst)
+JIT_FETCHBITOP(int32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+ AtomicXor32SeqCst)
+JIT_FETCHBITOP(uint32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+ AtomicXor32SeqCst)
+
+#ifdef JIT_FETCHBITOP_CAS
+JIT_FETCHBITOP_CAS(int64_t)
+JIT_FETCHBITOP_CAS(uint64_t)
+#else
+JIT_FETCHBITOP(int64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+ AtomicXor64SeqCst)
+JIT_FETCHBITOP(uint64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+ AtomicXor64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_FETCHBITOPX_CAS
+#undef JIT_FETCHBITOPX
+#undef JIT_FETCHBITOP_CAS
+#undef JIT_FETCHBITOP
+
+#define JIT_LOADSAFE(T, U, loadop) \
+ template <> \
+ inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+ union { \
+ U u; \
+ T t; \
+ }; \
+ u = loadop((U*)addr); \
+ return t; \
+ }
+
+#ifndef JS_64BIT
+# define JIT_LOADSAFE_TEARING(T) \
+ template <> \
+ inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+ MOZ_ASSERT(sizeof(T) == 8); \
+ union { \
+ uint32_t u[2]; \
+ T t; \
+ }; \
+ uint32_t* ptr = (uint32_t*)addr; \
+ u[0] = AtomicLoad32Unsynchronized(ptr); \
+ u[1] = AtomicLoad32Unsynchronized(ptr + 1); \
+ return t; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_LOADSAFE(int8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(uint8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(int16_t, uint16_t, AtomicLoad16Unsynchronized)
+JIT_LOADSAFE(uint16_t, uint16_t, AtomicLoad16Unsynchronized)
+JIT_LOADSAFE(int32_t, uint32_t, AtomicLoad32Unsynchronized)
+JIT_LOADSAFE(uint32_t, uint32_t, AtomicLoad32Unsynchronized)
+#ifdef JIT_LOADSAFE_TEARING
+JIT_LOADSAFE_TEARING(int64_t)
+JIT_LOADSAFE_TEARING(uint64_t)
+JIT_LOADSAFE_TEARING(double)
+#else
+JIT_LOADSAFE(int64_t, uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(uint64_t, uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(double, uint64_t, AtomicLoad64Unsynchronized)
+#endif
+JIT_LOADSAFE(float, uint32_t, AtomicLoad32Unsynchronized)
+
+// Clang requires a specialization for uint8_clamped.
+template <>
+inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
+ uint8_clamped* addr) {
+ return uint8_clamped(loadSafeWhenRacy((uint8_t*)addr));
+}
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_LOADSAFE
+#undef JIT_LOADSAFE_TEARING
+
+#define JIT_STORESAFE(T, U, storeop) \
+ template <> \
+ inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+ union { \
+ U u; \
+ T t; \
+ }; \
+ t = val; \
+ storeop((U*)addr, u); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_STORESAFE_TEARING(T) \
+ template <> \
+ inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+ union { \
+ uint32_t u[2]; \
+ T t; \
+ }; \
+ t = val; \
+ uint32_t* ptr = (uint32_t*)addr; \
+ AtomicStore32Unsynchronized(ptr, u[0]); \
+ AtomicStore32Unsynchronized(ptr + 1, u[1]); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_STORESAFE(int8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(uint8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(int16_t, uint16_t, AtomicStore16Unsynchronized)
+JIT_STORESAFE(uint16_t, uint16_t, AtomicStore16Unsynchronized)
+JIT_STORESAFE(int32_t, uint32_t, AtomicStore32Unsynchronized)
+JIT_STORESAFE(uint32_t, uint32_t, AtomicStore32Unsynchronized)
+#ifdef JIT_STORESAFE_TEARING
+JIT_STORESAFE_TEARING(int64_t)
+JIT_STORESAFE_TEARING(uint64_t)
+JIT_STORESAFE_TEARING(double)
+#else
+JIT_STORESAFE(int64_t, uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(uint64_t, uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(double, uint64_t, AtomicStore64Unsynchronized)
+#endif
+JIT_STORESAFE(float, uint32_t, AtomicStore32Unsynchronized)
+
+// Clang requires a specialization for uint8_clamped.
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
+ uint8_clamped val) {
+ storeSafeWhenRacy((uint8_t*)addr, (uint8_t)val);
+}
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_STORESAFE
+#undef JIT_STORESAFE_TEARING
+
+void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ if ((char*)dest <= (char*)src) {
+ AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+ } else {
+ AtomicMemcpyUpUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+ }
+}
+
+#endif // jit_shared_AtomicOperations_shared_jit_h
diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
new file mode 100644
index 0000000000..1c9880f59f
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -0,0 +1,342 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_inl_h
+#define jit_shared_CodeGenerator_shared_inl_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+#include "jit/JitFrames.h"
+#include "jit/ScalarTypeUtils.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+static inline bool IsConstant(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue()) {
+ return true;
+ }
+ if (a.high().isConstantIndex()) {
+ return true;
+ }
+#else
+ if (a.value().isConstantValue()) {
+ return true;
+ }
+ if (a.value().isConstantIndex()) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+static inline int32_t ToInt32(const LAllocation* a) {
+ if (a->isConstantValue()) {
+ const MConstant* cst = a->toConstant();
+ if (cst->type() == MIRType::Int32) {
+ return cst->toInt32();
+ }
+ intptr_t val = cst->toIntPtr();
+ MOZ_ASSERT(INT32_MIN <= val && val <= INT32_MAX);
+ return int32_t(val);
+ }
+ if (a->isConstantIndex()) {
+ return a->toConstantIndex()->index();
+ }
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t ToInt64(const LAllocation* a) {
+ if (a->isConstantValue()) {
+ return a->toConstant()->toInt64();
+ }
+ if (a->isConstantIndex()) {
+ return a->toConstantIndex()->index();
+ }
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t ToInt64(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue()) {
+ return a.high().toConstant()->toInt64();
+ }
+ if (a.high().isConstantIndex()) {
+ return a.high().toConstantIndex()->index();
+ }
+#else
+ if (a.value().isConstantValue()) {
+ return a.value().toConstant()->toInt64();
+ }
+ if (a.value().isConstantIndex()) {
+ return a.value().toConstantIndex()->index();
+ }
+#endif
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline double ToDouble(const LAllocation* a) {
+ return a->toConstant()->numberToDouble();
+}
+
+static inline bool ToBoolean(const LAllocation* a) {
+ return a->toConstant()->toBoolean();
+}
+
+static inline Register ToRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isGeneralReg());
+ return a.toGeneralReg()->reg();
+}
+
+static inline Register ToRegister(const LAllocation* a) {
+ return ToRegister(*a);
+}
+
+static inline Register ToRegister(const LDefinition* def) {
+ return ToRegister(*def->output());
+}
+
+static inline Register64 ToOutRegister64(LInstruction* ins) {
+#if JS_BITS_PER_WORD == 32
+ Register loReg = ToRegister(ins->getDef(INT64LOW_INDEX));
+ Register hiReg = ToRegister(ins->getDef(INT64HIGH_INDEX));
+ return Register64(hiReg, loReg);
+#else
+ return Register64(ToRegister(ins->getDef(0)));
+#endif
+}
+
+static inline Register64 ToRegister64(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.high()), ToRegister(a.low()));
+#else
+ return Register64(ToRegister(a.value()));
+#endif
+}
+
+static inline Register64 ToRegister64(const LInt64Definition& a) {
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.pointerHigh()), ToRegister(a.pointerLow()));
+#else
+ return Register64(ToRegister(a.pointer()));
+#endif
+}
+
+static inline Register ToTempRegisterOrInvalid(const LDefinition* def) {
+ if (def->isBogusTemp()) {
+ return InvalidReg;
+ }
+ return ToRegister(def);
+}
+
+static inline Register64 ToTempRegister64OrInvalid(
+ const LInt64Definition& def) {
+ if (def.isBogusTemp()) {
+ return Register64::Invalid();
+ }
+ return ToRegister64(def);
+}
+
+static inline Register ToTempUnboxRegister(const LDefinition* def) {
+ return ToTempRegisterOrInvalid(def);
+}
+
+static inline Register ToRegisterOrInvalid(const LDefinition* a) {
+ return a ? ToRegister(a) : InvalidReg;
+}
+
+static inline FloatRegister ToFloatRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isFloatReg());
+ return a.toFloatReg()->reg();
+}
+
+static inline FloatRegister ToFloatRegister(const LAllocation* a) {
+ return ToFloatRegister(*a);
+}
+
+static inline FloatRegister ToFloatRegister(const LDefinition* def) {
+ return ToFloatRegister(*def->output());
+}
+
+static inline FloatRegister ToTempFloatRegisterOrInvalid(
+ const LDefinition* def) {
+ if (def->isBogusTemp()) {
+ return InvalidFloatReg;
+ }
+ return ToFloatRegister(def);
+}
+
+static inline AnyRegister ToAnyRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isGeneralReg() || a.isFloatReg());
+ if (a.isGeneralReg()) {
+ return AnyRegister(ToRegister(a));
+ }
+ return AnyRegister(ToFloatRegister(a));
+}
+
+static inline AnyRegister ToAnyRegister(const LAllocation* a) {
+ return ToAnyRegister(*a);
+}
+
+static inline AnyRegister ToAnyRegister(const LDefinition* def) {
+ return ToAnyRegister(def->output());
+}
+
+static inline ValueOperand ToOutValue(LInstruction* ins) {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(ToRegister(ins->getDef(TYPE_INDEX)),
+ ToRegister(ins->getDef(PAYLOAD_INDEX)));
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(ToRegister(ins->getDef(0)));
+#else
+# error "Unknown"
+#endif
+}
+
+static inline ValueOperand GetTempValue(Register type, Register payload) {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(type, payload);
+#elif defined(JS_PUNBOX64)
+ (void)type;
+ return ValueOperand(payload);
+#else
+# error "Unknown"
+#endif
+}
+
+// For argument construction for calls. Argslots are Value-sized.
+Address CodeGeneratorShared::AddressOfPassedArg(uint32_t slot) const {
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ MOZ_ASSERT(slot > 0);
+ MOZ_ASSERT(slot <= graph.argumentSlotCount());
+
+ uint32_t offsetFromBase = offsetOfPassedArgSlots_ + slot * sizeof(Value);
+ MOZ_ASSERT(offsetFromBase <= frameSize());
+
+ // Space for passed arguments is reserved below a function's local stack
+ // storage. Note that passedArgSlotsOffset_ is aligned to at least
+ // sizeof(Value) to ensure proper alignment.
+ MOZ_ASSERT((offsetFromBase % sizeof(Value)) == 0);
+
+ if (JitOptions.baseRegForLocals == BaseRegForAddress::SP) {
+ return Address(masm.getStackPointer(), frameSize() - offsetFromBase);
+ }
+ MOZ_ASSERT(JitOptions.baseRegForLocals == BaseRegForAddress::FP);
+ return Address(FramePointer, -int32_t(offsetFromBase));
+}
+
+uint32_t CodeGeneratorShared::UnusedStackBytesForCall(
+ uint32_t numArgSlots) const {
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+ MOZ_ASSERT(numArgSlots <= graph.argumentSlotCount());
+ uint32_t unusedArgSlots = graph.argumentSlotCount() - numArgSlots;
+ return unusedArgSlots * sizeof(Value);
+}
+
+template <BaseRegForAddress Base>
+Address CodeGeneratorShared::ToAddress(const LAllocation& a) const {
+ MOZ_ASSERT(a.isMemory() || a.isStackArea());
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+
+ if (a.isArgument()) {
+ // Use the frame pointer, unless the caller explicitly requested a
+ // stack-pointer-relative address.
+ uint32_t offsetFromFP = offsetOfArgsFromFP_ + a.toArgument()->index();
+ if constexpr (Base == BaseRegForAddress::SP) {
+ return Address(masm.getStackPointer(), frameSize() + offsetFromFP);
+ } else {
+ static_assert(Base == BaseRegForAddress::Default ||
+ Base == BaseRegForAddress::FP);
+ return Address(FramePointer, offsetFromFP);
+ }
+ }
+
+ uint32_t slot =
+ a.isStackSlot() ? a.toStackSlot()->slot() : a.toStackArea()->base();
+ MOZ_ASSERT(slot > 0 && slot <= graph.localSlotsSize());
+ MOZ_ASSERT(slot <= frameSize());
+
+ BaseRegForAddress base = Base;
+ if constexpr (Base == BaseRegForAddress::Default) {
+ base = JitOptions.baseRegForLocals;
+ }
+
+ if (base == BaseRegForAddress::FP) {
+ return Address(FramePointer, -int32_t(slot));
+ }
+ MOZ_ASSERT(base == BaseRegForAddress::SP);
+ return Address(masm.getStackPointer(), frameSize() - slot);
+}
+
+template <BaseRegForAddress Base>
+Address CodeGeneratorShared::ToAddress(const LAllocation* a) const {
+ return ToAddress<Base>(*a);
+}
+
+// static
+Address CodeGeneratorShared::ToAddress(Register elements,
+ const LAllocation* index,
+ Scalar::Type type,
+ int32_t offsetAdjustment) {
+ int32_t idx = ToInt32(index);
+ int32_t offset;
+ MOZ_ALWAYS_TRUE(ArrayOffsetFitsInInt32(idx, type, offsetAdjustment, &offset));
+ return Address(elements, offset);
+}
+
+void CodeGeneratorShared::saveLive(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PushRegsInMask(safepoint->liveRegs());
+}
+
+void CodeGeneratorShared::restoreLive(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMask(safepoint->liveRegs());
+}
+
+void CodeGeneratorShared::restoreLiveIgnore(LInstruction* ins,
+ LiveRegisterSet ignore) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMaskIgnore(safepoint->liveRegs(), ignore);
+}
+
+LiveRegisterSet CodeGeneratorShared::liveVolatileRegs(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
+ RegisterSet::Volatile());
+ return regs;
+}
+
+void CodeGeneratorShared::saveLiveVolatile(LInstruction* ins) {
+ LiveRegisterSet regs = liveVolatileRegs(ins);
+ masm.PushRegsInMask(regs);
+}
+
+void CodeGeneratorShared::restoreLiveVolatile(LInstruction* ins) {
+ LiveRegisterSet regs = liveVolatileRegs(ins);
+ masm.PopRegsInMask(regs);
+}
+
+inline bool CodeGeneratorShared::isGlobalObject(JSObject* object) {
+ // Calling object->is<GlobalObject>() is racy because this relies on
+ // checking the group and this can be changed while we are compiling off the
+ // main thread. Note that we only check for the script realm's global here.
+ return object == gen->realm->maybeGlobal();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_inl_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
new file mode 100644
index 0000000000..04a8baa752
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -0,0 +1,983 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include <utility>
+
+#include "jit/CodeGenerator.h"
+#include "jit/CompactBuffer.h"
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/SafepointIndex.h"
+#include "js/Conversions.h"
+#include "util/Memory.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+MacroAssembler& CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg,
+ TempAllocator& alloc,
+ CompileRealm* realm) {
+ if (masmArg) {
+ return *masmArg;
+ }
+ maybeMasm_.emplace(alloc, realm);
+ return *maybeMasm_;
+}
+
+CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masmArg)
+ : maybeMasm_(),
+ masm(ensureMasm(masmArg, gen->alloc(), gen->realm)),
+ gen(gen),
+ graph(*graph),
+ current(nullptr),
+ snapshots_(),
+ recovers_(),
+#ifdef DEBUG
+ pushedArgs_(0),
+#endif
+ lastOsiPointOffset_(0),
+ safepoints_(graph->localSlotsSize(),
+ (gen->outerInfo().nargs() + 1) * sizeof(Value)),
+ returnLabel_(),
+ nativeToBytecodeMap_(nullptr),
+ nativeToBytecodeMapSize_(0),
+ nativeToBytecodeTableOffset_(0),
+#ifdef CHECK_OSIPOINT_REGISTERS
+ checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
+#endif
+ frameDepth_(0) {
+ if (gen->isProfilerInstrumentationEnabled()) {
+ masm.enableProfilingInstrumentation();
+ }
+
+ if (gen->compilingWasm()) {
+ offsetOfArgsFromFP_ = sizeof(wasm::Frame);
+
+#ifdef JS_CODEGEN_ARM64
+ // Ensure SP is aligned to 16 bytes.
+ frameDepth_ = AlignBytes(graph->localSlotsSize(), WasmStackAlignment);
+#else
+ frameDepth_ = AlignBytes(graph->localSlotsSize(), sizeof(uintptr_t));
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM64)
+ // On X64/x86 and ARM64, we don't need alignment for Wasm SIMD at this time.
+# else
+# error \
+ "we may need padding so that local slots are SIMD-aligned and the stack must be kept SIMD-aligned too."
+# endif
+#endif
+
+ if (gen->needsStaticStackAlignment()) {
+ // Since wasm uses the system ABI which does not necessarily use a
+ // regular array where all slots are sizeof(Value), it maintains the max
+ // argument stack depth separately.
+ MOZ_ASSERT(graph->argumentSlotCount() == 0);
+ frameDepth_ += gen->wasmMaxStackArgBytes();
+
+ // An MWasmCall does not align the stack pointer at calls sites but
+ // instead relies on the a priori stack adjustment. This must be the
+ // last adjustment of frameDepth_.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ }
+
+#ifdef JS_CODEGEN_ARM64
+ MOZ_ASSERT((frameDepth_ % WasmStackAlignment) == 0,
+ "Trap exit stub needs 16-byte aligned stack pointer");
+#endif
+ } else {
+ offsetOfArgsFromFP_ = sizeof(JitFrameLayout);
+
+ // Allocate space for local slots (register allocator spills). Round to
+ // JitStackAlignment, and implicitly to sizeof(Value) as JitStackAlignment
+ // is a multiple of sizeof(Value). This was originally implemented for
+ // SIMD.js, but now lets us use faster ABI calls via setupAlignedABICall.
+ frameDepth_ = AlignBytes(graph->localSlotsSize(), JitStackAlignment);
+
+ // Allocate space for argument Values passed to callee functions.
+ offsetOfPassedArgSlots_ = frameDepth_;
+ MOZ_ASSERT((offsetOfPassedArgSlots_ % sizeof(JS::Value)) == 0);
+ frameDepth_ += graph->argumentSlotCount() * sizeof(JS::Value);
+
+ MOZ_ASSERT((frameDepth_ % JitStackAlignment) == 0);
+ }
+}
+
+bool CodeGeneratorShared::generatePrologue() {
+ MOZ_ASSERT(masm.framePushed() == 0);
+ MOZ_ASSERT(!gen->compilingWasm());
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // Frame prologue.
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ // Ensure that the Ion frame is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled()) {
+ masm.profilerEnterFrame(FramePointer, CallTempReg0);
+ }
+
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+ masm.checkStackAlignment();
+
+ return true;
+}
+
+bool CodeGeneratorShared::generateEpilogue() {
+ MOZ_ASSERT(!gen->compilingWasm());
+ masm.bind(&returnLabel_);
+
+ // If profiling, jump to a trampoline to reset the JitActivation's
+ // lastProfilingFrame to point to the previous frame and return to the caller.
+ if (isProfilerInstrumentationEnabled()) {
+ masm.profilerExitFrame();
+ }
+
+ MOZ_ASSERT(masm.framePushed() == frameSize());
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.setFramePushed(0);
+
+ masm.ret();
+
+ // On systems that use a constant pool, this is a good time to emit.
+ masm.flushBuffer();
+ return true;
+}
+
+bool CodeGeneratorShared::generateOutOfLineCode() {
+ AutoCreatedBy acb(masm, "CodeGeneratorShared::generateOutOfLineCode");
+
+ // OOL paths should not attempt to use |current| as it's the last block
+ // instead of the block corresponding to the OOL path.
+ current = nullptr;
+
+ for (size_t i = 0; i < outOfLineCode_.length(); i++) {
+ // Add native => bytecode mapping entries for OOL sites.
+ // Not enabled on wasm yet since it doesn't contain bytecode mappings.
+ if (!gen->compilingWasm()) {
+ if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite())) {
+ return false;
+ }
+ }
+
+ if (!gen->alloc().ensureBallast()) {
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting out of line code");
+
+ masm.setFramePushed(outOfLineCode_[i]->framePushed());
+ outOfLineCode_[i]->bind(&masm);
+
+ outOfLineCode_[i]->generate(this);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
+ const MInstruction* mir) {
+ MOZ_ASSERT(mir);
+ addOutOfLineCode(code, mir->trackedSite());
+}
+
+void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
+ const BytecodeSite* site) {
+ MOZ_ASSERT_IF(!gen->compilingWasm(), site->script()->containsPC(site->pc()));
+ code->setFramePushed(masm.framePushed());
+ code->setBytecodeSite(site);
+ masm.propagateOOM(outOfLineCode_.append(code));
+}
+
+bool CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site) {
+ MOZ_ASSERT(site);
+ MOZ_ASSERT(site->tree());
+ MOZ_ASSERT(site->pc());
+
+ // Skip the table entirely if profiling is not enabled.
+ if (!isProfilerInstrumentationEnabled()) {
+ return true;
+ }
+
+ // Fails early if the last added instruction caused the macro assembler to
+ // run out of memory as continuity assumption below do not hold.
+ if (masm.oom()) {
+ return false;
+ }
+
+ InlineScriptTree* tree = site->tree();
+ jsbytecode* pc = site->pc();
+ uint32_t nativeOffset = masm.currentOffset();
+
+ MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
+
+ if (!nativeToBytecodeList_.empty()) {
+ size_t lastIdx = nativeToBytecodeList_.length() - 1;
+ NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
+
+ MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
+
+ // If the new entry is for the same inlineScriptTree and same
+ // bytecodeOffset, but the nativeOffset has changed, do nothing.
+ // The same site just generated some more code.
+ if (lastEntry.tree == tree && lastEntry.pc == pc) {
+ JitSpew(JitSpew_Profiling, " => In-place update [%zu-%" PRIu32 "]",
+ lastEntry.nativeOffset.offset(), nativeOffset);
+ return true;
+ }
+
+ // If the new entry is for the same native offset, then update the
+ // previous entry with the new bytecode site, since the previous
+ // bytecode site did not generate any native code.
+ if (lastEntry.nativeOffset.offset() == nativeOffset) {
+ lastEntry.tree = tree;
+ lastEntry.pc = pc;
+ JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
+
+ // This overwrite might have made the entry merge-able with a
+ // previous one. If so, merge it.
+ if (lastIdx > 0) {
+ NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
+ if (nextToLastEntry.tree == lastEntry.tree &&
+ nextToLastEntry.pc == lastEntry.pc) {
+ JitSpew(JitSpew_Profiling, " => Merging with previous region");
+ nativeToBytecodeList_.erase(&lastEntry);
+ }
+ }
+
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+ }
+ }
+
+ // Otherwise, some native code was generated for the previous bytecode site.
+ // Add a new entry for code that is about to be generated.
+ NativeToBytecode entry;
+ entry.nativeOffset = CodeOffset(nativeOffset);
+ entry.tree = tree;
+ entry.pc = pc;
+ if (!nativeToBytecodeList_.append(entry)) {
+ return false;
+ }
+
+ JitSpew(JitSpew_Profiling, " => Push new entry.");
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+}
+
+void CodeGeneratorShared::dumpNativeToBytecodeEntries() {
+#ifdef JS_JITSPEW
+ InlineScriptTree* topTree = gen->outerInfo().inlineScriptTree();
+ JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u:%u\n",
+ topTree->script()->filename(), topTree->script()->lineno(),
+ topTree->script()->column());
+ for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++) {
+ dumpNativeToBytecodeEntry(i);
+ }
+#endif
+}
+
+void CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx) {
+#ifdef JS_JITSPEW
+ NativeToBytecode& ref = nativeToBytecodeList_[idx];
+ InlineScriptTree* tree = ref.tree;
+ JSScript* script = tree->script();
+ uint32_t nativeOffset = ref.nativeOffset.offset();
+ unsigned nativeDelta = 0;
+ unsigned pcDelta = 0;
+ if (idx + 1 < nativeToBytecodeList_.length()) {
+ NativeToBytecode* nextRef = &ref + 1;
+ nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
+ if (nextRef->tree == ref.tree) {
+ pcDelta = nextRef->pc - ref.pc;
+ }
+ }
+ JitSpewStart(
+ JitSpew_Profiling, " %08zx [+%-6u] => %-6ld [%-4u] {%-10s} (%s:%u:%u",
+ ref.nativeOffset.offset(), nativeDelta, (long)(ref.pc - script->code()),
+ pcDelta, CodeName(JSOp(*ref.pc)), script->filename(), script->lineno(),
+ script->column());
+
+ for (tree = tree->caller(); tree; tree = tree->caller()) {
+ JitSpewCont(JitSpew_Profiling, " <= %s:%u:%u", tree->script()->filename(),
+ tree->script()->lineno(), tree->script()->column());
+ }
+ JitSpewCont(JitSpew_Profiling, ")");
+ JitSpewFin(JitSpew_Profiling);
+#endif
+}
+
+// see OffsetOfFrameSlot
+static inline int32_t ToStackIndex(LAllocation* a) {
+ if (a->isStackSlot()) {
+ MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
+ return a->toStackSlot()->slot();
+ }
+ return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
+}
+
+void CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot,
+ MDefinition* mir,
+ uint32_t* allocIndex) {
+ if (mir->isBox()) {
+ mir = mir->toBox()->getOperand(0);
+ }
+
+ MIRType type = mir->isRecoveredOnBailout() ? MIRType::None
+ : mir->isUnused() ? MIRType::MagicOptimizedOut
+ : mir->type();
+
+ RValueAllocation alloc;
+
+ switch (type) {
+ case MIRType::None: {
+ MOZ_ASSERT(mir->isRecoveredOnBailout());
+ uint32_t index = 0;
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ MNode** it = recoverInfo->begin();
+ MNode** end = recoverInfo->end();
+ while (it != end && mir != *it) {
+ ++it;
+ ++index;
+ }
+
+ // This MDefinition is recovered, thus it should be listed in the
+ // LRecoverInfo.
+ MOZ_ASSERT(it != end && mir == *it);
+
+ // Lambda should have a default value readable for iterating over the
+ // inner frames.
+ MConstant* functionOperand = nullptr;
+ if (mir->isLambda()) {
+ functionOperand = mir->toLambda()->functionOperand();
+ } else if (mir->isFunctionWithProto()) {
+ functionOperand = mir->toFunctionWithProto()->functionOperand();
+ }
+ if (functionOperand) {
+ uint32_t cstIndex;
+ masm.propagateOOM(
+ graph.addConstantToPool(functionOperand->toJSValue(), &cstIndex));
+ alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
+ break;
+ }
+
+ alloc = RValueAllocation::RecoverInstruction(index);
+ break;
+ }
+ case MIRType::Undefined:
+ alloc = RValueAllocation::Undefined();
+ break;
+ case MIRType::Null:
+ alloc = RValueAllocation::Null();
+ break;
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ case MIRType::Shape:
+ case MIRType::Boolean:
+ case MIRType::Double: {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(
+ graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ JSValueType valueType = ValueTypeFromMIRType(type);
+
+ MOZ_DIAGNOSTIC_ASSERT(payload->isMemory() || payload->isRegister());
+ if (payload->isMemory()) {
+ alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
+ } else if (payload->isGeneralReg()) {
+ alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
+ } else if (payload->isFloatReg()) {
+ alloc = RValueAllocation::Double(ToFloatRegister(payload));
+ } else {
+ MOZ_CRASH("Unexpected payload type.");
+ }
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Simd128: {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(
+ graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
+ if (payload->isFloatReg()) {
+ alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
+ } else {
+ alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
+ }
+ break;
+ }
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing: {
+ uint32_t index;
+ JSWhyMagic why = JS_GENERIC_MAGIC;
+ switch (type) {
+ case MIRType::MagicOptimizedOut:
+ why = JS_OPTIMIZED_OUT;
+ break;
+ case MIRType::MagicUninitializedLexical:
+ why = JS_UNINITIALIZED_LEXICAL;
+ break;
+ case MIRType::MagicIsConstructing:
+ why = JS_IS_CONSTRUCTING;
+ break;
+ default:
+ MOZ_CRASH("Invalid Magic MIRType");
+ }
+
+ Value v = MagicValue(why);
+ masm.propagateOOM(graph.addConstantToPool(v, &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+ default: {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+#ifdef JS_NUNBOX32
+ LAllocation* type = snapshot->typeOfSlot(*allocIndex);
+ if (type->isRegister()) {
+ if (payload->isRegister()) {
+ alloc =
+ RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToRegister(type),
+ ToStackIndex(payload));
+ }
+ } else {
+ if (payload->isRegister()) {
+ alloc = RValueAllocation::Untyped(ToStackIndex(type),
+ ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToStackIndex(type),
+ ToStackIndex(payload));
+ }
+ }
+#elif JS_PUNBOX64
+ if (payload->isRegister()) {
+ alloc = RValueAllocation::Untyped(ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToStackIndex(payload));
+ }
+#endif
+ break;
+ }
+ }
+ MOZ_DIAGNOSTIC_ASSERT(alloc.valid());
+
+ // This set an extra bit as part of the RValueAllocation, such that we know
+ // that recover instruction have to be executed without wrapping the
+ // instruction in a no-op recover instruction.
+ if (mir->isIncompleteObject()) {
+ alloc.setNeedSideEffect();
+ }
+
+ masm.propagateOOM(snapshots_.add(alloc));
+
+ *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
+}
+
+void CodeGeneratorShared::encode(LRecoverInfo* recover) {
+ if (recover->recoverOffset() != INVALID_RECOVER_OFFSET) {
+ return;
+ }
+
+ uint32_t numInstructions = recover->numInstructions();
+ JitSpew(JitSpew_IonSnapshots,
+ "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
+ (void*)recover, recover->mir()->frameCount(), numInstructions);
+
+ RecoverOffset offset = recovers_.startRecover(numInstructions);
+
+ for (MNode* insn : *recover) {
+ recovers_.writeInstruction(insn);
+ }
+
+ recovers_.endRecover();
+ recover->setRecoverOffset(offset);
+ masm.propagateOOM(!recovers_.oom());
+}
+
+void CodeGeneratorShared::encode(LSnapshot* snapshot) {
+ if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET) {
+ return;
+ }
+
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ encode(recoverInfo);
+
+ RecoverOffset recoverOffset = recoverInfo->recoverOffset();
+ MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
+
+ JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
+ (void*)snapshot, (void*)recoverInfo);
+
+ SnapshotOffset offset =
+ snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
+
+#ifdef TRACK_SNAPSHOTS
+ uint32_t pcOpcode = 0;
+ uint32_t lirOpcode = 0;
+ uint32_t lirId = 0;
+ uint32_t mirOpcode = 0;
+ uint32_t mirId = 0;
+
+ if (LInstruction* ins = instruction()) {
+ lirOpcode = uint32_t(ins->op());
+ lirId = ins->id();
+ if (MDefinition* mir = ins->mirRaw()) {
+ mirOpcode = uint32_t(mir->op());
+ mirId = mir->id();
+ if (jsbytecode* pc = mir->trackedSite()->pc()) {
+ pcOpcode = *pc;
+ }
+ }
+ }
+ snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
+#endif
+
+ uint32_t allocIndex = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
+ encodeAllocation(snapshot, *it, &allocIndex);
+ MOZ_ASSERT_IF(!snapshots_.oom(),
+ allocWritten + 1 == snapshots_.allocWritten());
+ }
+
+ MOZ_ASSERT(allocIndex == snapshot->numSlots());
+ snapshots_.endSnapshot();
+ snapshot->setSnapshotOffset(offset);
+ masm.propagateOOM(!snapshots_.oom());
+}
+
+bool CodeGeneratorShared::encodeSafepoints() {
+ for (CodegenSafepointIndex& index : safepointIndices_) {
+ LSafepoint* safepoint = index.safepoint();
+
+ if (!safepoint->encoded()) {
+ safepoints_.encode(safepoint);
+ }
+ }
+
+ return !safepoints_.oom();
+}
+
+bool CodeGeneratorShared::createNativeToBytecodeScriptList(
+ JSContext* cx, IonEntry::ScriptList& scripts) {
+ MOZ_ASSERT(scripts.empty());
+
+ InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
+ for (;;) {
+ // Add script from current tree.
+ bool found = false;
+ for (uint32_t i = 0; i < scripts.length(); i++) {
+ if (scripts[i].script == tree->script()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ UniqueChars str =
+ GeckoProfilerRuntime::allocProfileString(cx, tree->script());
+ if (!str) {
+ return false;
+ }
+ if (!scripts.emplaceBack(tree->script(), std::move(str))) {
+ return false;
+ }
+ }
+
+ // Process rest of tree
+
+ // If children exist, emit children.
+ if (tree->hasChildren()) {
+ tree = tree->firstChild();
+ continue;
+ }
+
+ // Otherwise, find the first tree up the chain (including this one)
+ // that contains a next sibling.
+ while (!tree->hasNextCallee() && tree->hasCaller()) {
+ tree = tree->caller();
+ }
+
+ // If we found a sibling, use it.
+ if (tree->hasNextCallee()) {
+ tree = tree->nextCallee();
+ continue;
+ }
+
+ // Otherwise, we must have reached the top without finding any siblings.
+ MOZ_ASSERT(tree->isOutermostCaller());
+ break;
+ }
+
+ return true;
+}
+
+bool CodeGeneratorShared::generateCompactNativeToBytecodeMap(
+ JSContext* cx, JitCode* code, IonEntry::ScriptList& scripts) {
+ MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
+
+ if (!createNativeToBytecodeScriptList(cx, scripts)) {
+ return false;
+ }
+
+ CompactBufferWriter writer;
+ uint32_t tableOffset = 0;
+ uint32_t numRegions = 0;
+
+ if (!JitcodeIonTable::WriteIonTable(
+ writer, scripts, &nativeToBytecodeList_[0],
+ &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
+ &tableOffset, &numRegions)) {
+ return false;
+ }
+
+ MOZ_ASSERT(tableOffset > 0);
+ MOZ_ASSERT(numRegions > 0);
+
+ // Writer is done, copy it to sized buffer.
+ uint8_t* data = cx->pod_malloc<uint8_t>(writer.length());
+ if (!data) {
+ return false;
+ }
+
+ memcpy(data, writer.buffer(), writer.length());
+ nativeToBytecodeMap_.reset(data);
+ nativeToBytecodeMapSize_ = writer.length();
+ nativeToBytecodeTableOffset_ = tableOffset;
+
+ verifyCompactNativeToBytecodeMap(code, scripts, numRegions);
+
+ JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]", data,
+ data + nativeToBytecodeMapSize_);
+
+ return true;
+}
+
+void CodeGeneratorShared::verifyCompactNativeToBytecodeMap(
+ JitCode* code, const IonEntry::ScriptList& scripts, uint32_t numRegions) {
+#ifdef DEBUG
+ MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
+ MOZ_ASSERT(numRegions > 0);
+
+ // The pointer to the table must be 4-byte aligned
+ const uint8_t* tablePtr =
+ nativeToBytecodeMap_.get() + nativeToBytecodeTableOffset_;
+ MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
+
+ // Verify that numRegions was encoded correctly.
+ const JitcodeIonTable* ionTable =
+ reinterpret_cast<const JitcodeIonTable*>(tablePtr);
+ MOZ_ASSERT(ionTable->numRegions() == numRegions);
+
+ // Region offset for first region should be at the start of the payload
+ // region. Since the offsets are backward from the start of the table, the
+ // first entry backoffset should be equal to the forward table offset from the
+ // start of the allocated data.
+ MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
+
+ // Verify each region.
+ for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
+ // Back-offset must point into the payload region preceding the table, not
+ // before it.
+ MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
+
+ // Back-offset must point to a later area in the payload region than
+ // previous back-offset. This means that back-offsets decrease
+ // monotonically.
+ MOZ_ASSERT_IF(i > 0,
+ ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
+
+ JitcodeRegionEntry entry = ionTable->regionEntry(i);
+
+ // Ensure native code offset for region falls within jitcode.
+ MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
+
+ // Read out script/pc stack and verify.
+ JitcodeRegionEntry::ScriptPcIterator scriptPcIter =
+ entry.scriptPcIterator();
+ while (scriptPcIter.hasMore()) {
+ uint32_t scriptIdx = 0, pcOffset = 0;
+ scriptPcIter.readNext(&scriptIdx, &pcOffset);
+
+ // Ensure scriptIdx refers to a valid script in the list.
+ JSScript* script = scripts[scriptIdx].script;
+
+ // Ensure pcOffset falls within the script.
+ MOZ_ASSERT(pcOffset < script->length());
+ }
+
+ // Obtain the original nativeOffset and pcOffset and script.
+ uint32_t curNativeOffset = entry.nativeOffset();
+ JSScript* script = nullptr;
+ uint32_t curPcOffset = 0;
+ {
+ uint32_t scriptIdx = 0;
+ scriptPcIter.reset();
+ scriptPcIter.readNext(&scriptIdx, &curPcOffset);
+ script = scripts[scriptIdx].script;
+ }
+
+ // Read out nativeDeltas and pcDeltas and verify.
+ JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
+ while (deltaIter.hasMore()) {
+ uint32_t nativeDelta = 0;
+ int32_t pcDelta = 0;
+ deltaIter.readNext(&nativeDelta, &pcDelta);
+
+ curNativeOffset += nativeDelta;
+ curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
+
+ // Ensure that nativeOffset still falls within jitcode after delta.
+ MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
+
+ // Ensure that pcOffset still falls within bytecode after delta.
+ MOZ_ASSERT(curPcOffset < script->length());
+ }
+ }
+#endif // DEBUG
+}
+
+void CodeGeneratorShared::markSafepoint(LInstruction* ins) {
+ markSafepointAt(masm.currentOffset(), ins);
+}
+
+void CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins) {
+ MOZ_ASSERT_IF(
+ !safepointIndices_.empty() && !masm.oom(),
+ offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
+ masm.propagateOOM(safepointIndices_.append(
+ CodegenSafepointIndex(offset, ins->safepoint())));
+}
+
+void CodeGeneratorShared::ensureOsiSpace() {
+ // For a refresher, an invalidation point is of the form:
+ // 1: call <target>
+ // 2: ...
+ // 3: <osipoint>
+ //
+ // The four bytes *before* instruction 2 are overwritten with an offset.
+ // Callers must ensure that the instruction itself has enough bytes to
+ // support this.
+ //
+ // The bytes *at* instruction 3 are overwritten with an invalidation jump.
+ // jump. These bytes may be in a completely different IR sequence, but
+ // represent the join point of the call out of the function.
+ //
+ // At points where we want to ensure that invalidation won't corrupt an
+ // important instruction, we make sure to pad with nops.
+ if (masm.currentOffset() - lastOsiPointOffset_ <
+ Assembler::PatchWrite_NearCallSize()) {
+ int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
+ paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
+ for (int32_t i = 0; i < paddingSize; ++i) {
+ masm.nop();
+ }
+ }
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - lastOsiPointOffset_ >=
+ Assembler::PatchWrite_NearCallSize());
+}
+
+uint32_t CodeGeneratorShared::markOsiPoint(LOsiPoint* ins) {
+ encode(ins->snapshot());
+ ensureOsiSpace();
+
+ uint32_t offset = masm.currentOffset();
+ SnapshotOffset so = ins->snapshot()->snapshotOffset();
+ masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
+ lastOsiPointOffset_ = offset;
+
+ return offset;
+}
+
+class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared> {
+ FloatRegister src_;
+ Register dest_;
+ bool widenFloatToDouble_;
+ wasm::BytecodeOffset bytecodeOffset_;
+ bool preserveInstance_;
+
+ public:
+ OutOfLineTruncateSlow(
+ FloatRegister src, Register dest, bool widenFloatToDouble = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset(),
+ bool preserveInstance = false)
+ : src_(src),
+ dest_(dest),
+ widenFloatToDouble_(widenFloatToDouble),
+ bytecodeOffset_(bytecodeOffset),
+ preserveInstance_(preserveInstance) {}
+
+ void accept(CodeGeneratorShared* codegen) override {
+ codegen->visitOutOfLineTruncateSlow(this);
+ }
+ FloatRegister src() const { return src_; }
+ Register dest() const { return dest_; }
+ bool widenFloatToDouble() const { return widenFloatToDouble_; }
+ bool preserveInstance() const { return preserveInstance_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+OutOfLineCode* CodeGeneratorShared::oolTruncateDouble(
+ FloatRegister src, Register dest, MInstruction* mir,
+ wasm::BytecodeOffset bytecodeOffset, bool preserveInstance) {
+ MOZ_ASSERT_IF(IsCompilingWasm(), bytecodeOffset.isValid());
+
+ OutOfLineTruncateSlow* ool = new (alloc()) OutOfLineTruncateSlow(
+ src, dest, /* float32 */ false, bytecodeOffset, preserveInstance);
+ addOutOfLineCode(ool, mir);
+ return ool;
+}
+
+void CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest,
+ MInstruction* mir) {
+ MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
+ wasm::BytecodeOffset bytecodeOffset =
+ mir->isTruncateToInt32()
+ ? mir->toTruncateToInt32()->bytecodeOffset()
+ : mir->toWasmBuiltinTruncateToInt32()->bytecodeOffset();
+ OutOfLineCode* ool = oolTruncateDouble(src, dest, mir, bytecodeOffset);
+
+ masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest,
+ MInstruction* mir) {
+ MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
+ wasm::BytecodeOffset bytecodeOffset =
+ mir->isTruncateToInt32()
+ ? mir->toTruncateToInt32()->bytecodeOffset()
+ : mir->toWasmBuiltinTruncateToInt32()->bytecodeOffset();
+ OutOfLineTruncateSlow* ool = new (alloc())
+ OutOfLineTruncateSlow(src, dest, /* float32 */ true, bytecodeOffset);
+ addOutOfLineCode(ool, mir);
+
+ masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorShared::visitOutOfLineTruncateSlow(
+ OutOfLineTruncateSlow* ool) {
+ FloatRegister src = ool->src();
+ Register dest = ool->dest();
+
+ saveVolatile(dest);
+ masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(),
+ gen->compilingWasm(), ool->bytecodeOffset());
+ restoreVolatile(dest);
+
+ masm.jump(ool->rejoin());
+}
+
+bool CodeGeneratorShared::omitOverRecursedCheck() const {
+ // If the current function makes no calls (which means it isn't recursive)
+ // and it uses only a small amount of stack space, it doesn't need a
+ // stack overflow check. Note that the actual number here is somewhat
+ // arbitrary, and codegen actually uses small bounded amounts of
+ // additional stack space in some cases too.
+ return frameSize() < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
+ !gen->needsOverrecursedCheck();
+}
+
+void CodeGeneratorShared::emitPreBarrier(Register elements,
+ const LAllocation* index) {
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * sizeof(Value));
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+ } else {
+ BaseObjectElementIndex address(elements, ToRegister(index));
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+ }
+}
+
+void CodeGeneratorShared::emitPreBarrier(Address address) {
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+}
+
+void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir) {
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ // No jump necessary if we can fall through to the next block.
+ if (isNextBlock(mir->lir())) {
+ return;
+ }
+
+ masm.jump(mir->lir()->label());
+}
+
+Label* CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block) {
+ // Skip past trivial blocks.
+ return skipTrivialBlocks(block)->lir()->label();
+}
+
+// This function is not used for MIPS/MIPS64/LOONG64. They have
+// branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64) && \
+ !defined(JS_CODEGEN_LOONG64) && !defined(JS_CODEGEN_RISCV64)
+void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir,
+ Assembler::Condition cond) {
+ // Skip past trivial blocks.
+ masm.j(cond, skipTrivialBlocks(mir)->lir()->label());
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h
new file mode 100644
index 0000000000..66b0aff5af
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -0,0 +1,488 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_h
+#define jit_shared_CodeGenerator_shared_h
+
+#include "mozilla/Alignment.h"
+
+#include <utility>
+
+#include "jit/InlineScriptTree.h"
+#include "jit/JitcodeMap.h"
+#include "jit/LIR.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/SafepointIndex.h"
+#include "jit/Safepoints.h"
+#include "jit/Snapshots.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineCode;
+class CodeGenerator;
+class MacroAssembler;
+class IonIC;
+
+class OutOfLineTruncateSlow;
+
+class CodeGeneratorShared : public LElementVisitor {
+ js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
+
+ MacroAssembler& ensureMasm(MacroAssembler* masm, TempAllocator& alloc,
+ CompileRealm* realm);
+ mozilla::Maybe<IonHeapMacroAssembler> maybeMasm_;
+
+ public:
+ MacroAssembler& masm;
+
+ protected:
+ MIRGenerator* gen;
+ LIRGraph& graph;
+ LBlock* current;
+ SnapshotWriter snapshots_;
+ RecoverWriter recovers_;
+#ifdef DEBUG
+ uint32_t pushedArgs_;
+#endif
+ uint32_t lastOsiPointOffset_;
+ SafepointWriter safepoints_;
+ Label invalidate_;
+ CodeOffset invalidateEpilogueData_;
+
+ // Label for the common return path.
+ NonAssertingLabel returnLabel_;
+
+ js::Vector<CodegenSafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
+ js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
+
+ // Allocated data space needed at runtime.
+ js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
+
+ // Vector mapping each IC index to its offset in runtimeData_.
+ js::Vector<uint32_t, 0, SystemAllocPolicy> icList_;
+
+ // IC data we need at compile-time. Discarded after creating the IonScript.
+ struct CompileTimeICInfo {
+ CodeOffset icOffsetForJump;
+ CodeOffset icOffsetForPush;
+ };
+ js::Vector<CompileTimeICInfo, 0, SystemAllocPolicy> icInfo_;
+
+ protected:
+ js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
+ UniquePtr<uint8_t> nativeToBytecodeMap_;
+ uint32_t nativeToBytecodeMapSize_;
+ uint32_t nativeToBytecodeTableOffset_;
+
+ bool isProfilerInstrumentationEnabled() {
+ return gen->isProfilerInstrumentationEnabled();
+ }
+
+ gc::Heap initialStringHeap() const { return gen->initialStringHeap(); }
+ gc::Heap initialBigIntHeap() const { return gen->initialBigIntHeap(); }
+
+ protected:
+ // The offset of the first instruction of the OSR entry block from the
+ // beginning of the code buffer.
+ mozilla::Maybe<size_t> osrEntryOffset_ = {};
+
+ TempAllocator& alloc() const { return graph.mir().alloc(); }
+
+ void setOsrEntryOffset(size_t offset) { osrEntryOffset_.emplace(offset); }
+
+ size_t getOsrEntryOffset() const {
+ MOZ_RELEASE_ASSERT(osrEntryOffset_.isSome());
+ return *osrEntryOffset_;
+ }
+
+ typedef js::Vector<CodegenSafepointIndex, 8, SystemAllocPolicy>
+ SafepointIndices;
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // See JitOptions.checkOsiPointRegisters. We set this here to avoid
+ // races when enableOsiPointRegisterChecks is called while we're generating
+ // code off-thread.
+ bool checkOsiPointRegisters;
+#endif
+
+ // The initial size of the frame in bytes. These are bytes beyond the
+ // constant header present for every Ion frame, used for pre-determined
+ // spills.
+ uint32_t frameDepth_;
+
+ // Offset in bytes to the incoming arguments, relative to the frame pointer.
+ uint32_t offsetOfArgsFromFP_ = 0;
+
+ // Offset in bytes of the stack region reserved for passed argument Values.
+ uint32_t offsetOfPassedArgSlots_ = 0;
+
+ // For argument construction for calls. Argslots are Value-sized.
+ inline Address AddressOfPassedArg(uint32_t slot) const;
+ inline uint32_t UnusedStackBytesForCall(uint32_t numArgSlots) const;
+
+ template <BaseRegForAddress Base = BaseRegForAddress::Default>
+ inline Address ToAddress(const LAllocation& a) const;
+
+ template <BaseRegForAddress Base = BaseRegForAddress::Default>
+ inline Address ToAddress(const LAllocation* a) const;
+
+ static inline Address ToAddress(Register elements, const LAllocation* index,
+ Scalar::Type type,
+ int32_t offsetAdjustment = 0);
+
+ uint32_t frameSize() const { return frameDepth_; }
+
+ protected:
+ bool addNativeToBytecodeEntry(const BytecodeSite* site);
+ void dumpNativeToBytecodeEntries();
+ void dumpNativeToBytecodeEntry(uint32_t idx);
+
+ public:
+ MIRGenerator& mirGen() const { return *gen; }
+
+ // When appending to runtimeData_, the vector might realloc, leaving pointers
+ // int the origianl vector stale and unusable. DataPtr acts like a pointer,
+ // but allows safety in the face of potentially realloc'ing vector appends.
+ friend class DataPtr;
+ template <typename T>
+ class DataPtr {
+ CodeGeneratorShared* cg_;
+ size_t index_;
+
+ T* lookup() { return reinterpret_cast<T*>(&cg_->runtimeData_[index_]); }
+
+ public:
+ DataPtr(CodeGeneratorShared* cg, size_t index) : cg_(cg), index_(index) {}
+
+ T* operator->() { return lookup(); }
+ T* operator*() { return lookup(); }
+ };
+
+ protected:
+ [[nodiscard]] bool allocateData(size_t size, size_t* offset) {
+ MOZ_ASSERT(size % sizeof(void*) == 0);
+ *offset = runtimeData_.length();
+ masm.propagateOOM(runtimeData_.appendN(0, size));
+ return !masm.oom();
+ }
+
+ template <typename T>
+ inline size_t allocateIC(const T& cache) {
+ static_assert(std::is_base_of_v<IonIC, T>, "T must inherit from IonIC");
+ size_t index;
+ masm.propagateOOM(
+ allocateData(sizeof(mozilla::AlignedStorage2<T>), &index));
+ masm.propagateOOM(icList_.append(index));
+ masm.propagateOOM(icInfo_.append(CompileTimeICInfo()));
+ if (masm.oom()) {
+ return SIZE_MAX;
+ }
+ // Use the copy constructor on the allocated space.
+ MOZ_ASSERT(index == icList_.back());
+ new (&runtimeData_[index]) T(cache);
+ return index;
+ }
+
+ protected:
+ // Encodes an LSnapshot into the compressed snapshot buffer.
+ void encode(LRecoverInfo* recover);
+ void encode(LSnapshot* snapshot);
+ void encodeAllocation(LSnapshot* snapshot, MDefinition* def,
+ uint32_t* startIndex);
+
+ // Encode all encountered safepoints in CG-order, and resolve |indices| for
+ // safepoint offsets.
+ bool encodeSafepoints();
+
+ // Fixup offsets of native-to-bytecode map.
+ bool createNativeToBytecodeScriptList(JSContext* cx,
+ IonEntry::ScriptList& scripts);
+ bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code,
+ IonEntry::ScriptList& scripts);
+ void verifyCompactNativeToBytecodeMap(JitCode* code,
+ const IonEntry::ScriptList& scripts,
+ uint32_t numRegions);
+
+ // Mark the safepoint on |ins| as corresponding to the current assembler
+ // location. The location should be just after a call.
+ void markSafepoint(LInstruction* ins);
+ void markSafepointAt(uint32_t offset, LInstruction* ins);
+
+ // Mark the OSI point |ins| as corresponding to the current
+ // assembler location inside the |osiIndices_|. Return the assembler
+ // location for the OSI point return location.
+ uint32_t markOsiPoint(LOsiPoint* ins);
+
+ // Ensure that there is enough room between the last OSI point and the
+ // current instruction, such that:
+ // (1) Invalidation will not overwrite the current instruction, and
+ // (2) Overwriting the current instruction will not overwrite
+ // an invalidation marker.
+ void ensureOsiSpace();
+
+ OutOfLineCode* oolTruncateDouble(
+ FloatRegister src, Register dest, MInstruction* mir,
+ wasm::BytecodeOffset callOffset = wasm::BytecodeOffset(),
+ bool preserveInstance = false);
+ void emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir);
+
+ void emitPreBarrier(Register elements, const LAllocation* index);
+ void emitPreBarrier(Address address);
+
+ // We don't emit code for trivial blocks, so if we want to branch to the
+ // given block, and it's trivial, return the ultimate block we should
+ // actually branch directly to.
+ MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
+ while (block->lir()->isTrivial()) {
+ LGoto* ins = block->lir()->rbegin()->toGoto();
+ MOZ_ASSERT(ins->numSuccessors() == 1);
+ block = ins->getSuccessor(0);
+ }
+ return block;
+ }
+
+ // Test whether the given block can be reached via fallthrough from the
+ // current block.
+ inline bool isNextBlock(LBlock* block) {
+ uint32_t target = skipTrivialBlocks(block->mir())->id();
+ uint32_t i = current->mir()->id() + 1;
+ if (target < i) {
+ return false;
+ }
+ // Trivial blocks can be crossed via fallthrough.
+ for (; i != target; ++i) {
+ if (!graph.getBlock(i)->isTrivial()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ protected:
+ // Save and restore all volatile registers to/from the stack, excluding the
+ // specified register(s), before a function call made using callWithABI and
+ // after storing the function call's return value to an output register.
+ // (The only registers that don't need to be saved/restored are 1) the
+ // temporary register used to store the return value of the function call,
+ // if there is one [otherwise that stored value would be overwritten]; and
+ // 2) temporary registers whose values aren't needed in the rest of the LIR
+ // instruction [this is purely an optimization]. All other volatiles must
+ // be saved and restored in case future LIR instructions need those values.)
+ void saveVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(LiveRegisterSet temps) {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void restoreVolatile(LiveRegisterSet temps) {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void saveVolatile() {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+ void restoreVolatile() {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+
+ // These functions have to be called before and after any callVM and before
+ // any modifications of the stack. Modification of the stack made after
+ // these calls should update the framePushed variable, needed by the exit
+ // frame produced by callVM.
+ inline void saveLive(LInstruction* ins);
+ inline void restoreLive(LInstruction* ins);
+ inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg);
+
+ // Get/save/restore all registers that are both live and volatile.
+ inline LiveRegisterSet liveVolatileRegs(LInstruction* ins);
+ inline void saveLiveVolatile(LInstruction* ins);
+ inline void restoreLiveVolatile(LInstruction* ins);
+
+ public:
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ void pushArg(jsid id, Register temp) {
+ masm.Push(id, temp);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ template <typename T>
+ CodeOffset pushArgWithPatch(const T& t) {
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ return masm.PushWithPatch(t);
+ }
+
+ void storePointerResultTo(Register reg) { masm.storeCallPointerResult(reg); }
+
+ void storeFloatResultTo(FloatRegister reg) { masm.storeCallFloatResult(reg); }
+
+ template <typename T>
+ void storeResultValueTo(const T& t) {
+ masm.storeCallResultValue(t);
+ }
+
+ protected:
+ void addIC(LInstruction* lir, size_t cacheIndex);
+
+ protected:
+ bool generatePrologue();
+ bool generateEpilogue();
+
+ void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
+ void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
+ bool generateOutOfLineCode();
+
+ Label* getJumpLabelForBranch(MBasicBlock* block);
+
+ // Generate a jump to the start of the specified block. Use this in place of
+ // jumping directly to mir->lir()->label(), or use getJumpLabelForBranch()
+ // if a label to use directly is needed.
+ void jumpToBlock(MBasicBlock* mir);
+
+// This function is not used for MIPS. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+ void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
+#endif
+
+ private:
+ void generateInvalidateEpilogue();
+
+ public:
+ CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool);
+
+ bool omitOverRecursedCheck() const;
+
+ public:
+ bool isGlobalObject(JSObject* object);
+};
+
+// An out-of-line path is generated at the end of the function.
+class OutOfLineCode : public TempObject {
+ Label entry_;
+ Label rejoin_;
+ uint32_t framePushed_;
+ const BytecodeSite* site_;
+
+ public:
+ OutOfLineCode() : framePushed_(0), site_() {}
+
+ virtual void generate(CodeGeneratorShared* codegen) = 0;
+
+ Label* entry() { return &entry_; }
+ virtual void bind(MacroAssembler* masm) { masm->bind(entry()); }
+ Label* rejoin() { return &rejoin_; }
+ void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; }
+ uint32_t framePushed() const { return framePushed_; }
+ void setBytecodeSite(const BytecodeSite* site) { site_ = site; }
+ const BytecodeSite* bytecodeSite() const { return site_; }
+};
+
+// For OOL paths that want a specific-typed code generator.
+template <typename T>
+class OutOfLineCodeBase : public OutOfLineCode {
+ public:
+ virtual void generate(CodeGeneratorShared* codegen) override {
+ accept(static_cast<T*>(codegen));
+ }
+
+ public:
+ virtual void accept(T* codegen) = 0;
+};
+
+template <class CodeGen>
+class OutOfLineWasmTruncateCheckBase : public OutOfLineCodeBase<CodeGen> {
+ MIRType fromType_;
+ MIRType toType_;
+ FloatRegister input_;
+ Register output_;
+ Register64 output64_;
+ TruncFlags flags_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ public:
+ OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt32* mir, FloatRegister input,
+ Register output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int32),
+ input_(input),
+ output_(output),
+ output64_(Register64::Invalid()),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ OutOfLineWasmTruncateCheckBase(MWasmBuiltinTruncateToInt64* mir,
+ FloatRegister input, Register64 output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int64),
+ input_(input),
+ output_(Register::Invalid()),
+ output64_(output),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt64* mir, FloatRegister input,
+ Register64 output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int64),
+ input_(input),
+ output_(Register::Invalid()),
+ output64_(output),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ void accept(CodeGen* codegen) override {
+ codegen->visitOutOfLineWasmTruncateCheck(this);
+ }
+
+ FloatRegister input() const { return input_; }
+ Register output() const { return output_; }
+ Register64 output64() const { return output64_; }
+ MIRType toType() const { return toType_; }
+ MIRType fromType() const { return fromType_; }
+ bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; }
+ bool isSaturating() const { return flags_ & TRUNC_SATURATING; }
+ TruncFlags flags() const { return flags_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_h */
diff --git a/js/src/jit/shared/Disassembler-shared.cpp b/js/src/jit/shared/Disassembler-shared.cpp
new file mode 100644
index 0000000000..a8e7f126b5
--- /dev/null
+++ b/js/src/jit/shared/Disassembler-shared.cpp
@@ -0,0 +1,248 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Disassembler-shared.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/Label.h"
+#include "js/Printer.h"
+
+using namespace js::jit;
+
+using js::Sprinter;
+
+#ifdef JS_DISASM_SUPPORTED
+// Concurrent assemblers are disambiguated by prefixing every disassembly with a
+// tag that is quasi-unique, and certainly unique enough in realistic cases
+// where we are debugging and looking at disassembler output. The tag is a
+// letter or digit between brackets prefixing the disassembly, eg, [X]. This
+// wraps around every 62 assemblers.
+//
+// When running with --no-threads we can still have concurrent assemblers in the
+// form of nested assemblers, as when an IC stub is created by one assembler
+// while a JS compilation is going on and producing output in another assembler.
+//
+// We generate the tag for an assembler by incrementing a global mod-2^32
+// counter every time a new disassembler is created.
+
+mozilla::Atomic<uint32_t> DisassemblerSpew::counter_(0);
+#endif
+
+DisassemblerSpew::DisassemblerSpew()
+ : printer_(nullptr)
+#ifdef JS_DISASM_SUPPORTED
+ ,
+ labelIndent_(""),
+ targetIndent_(""),
+ spewNext_(1000),
+ nodes_(nullptr),
+ tag_(0)
+#endif
+{
+#ifdef JS_DISASM_SUPPORTED
+ tag_ = counter_++;
+#endif
+}
+
+DisassemblerSpew::~DisassemblerSpew() {
+#ifdef JS_DISASM_SUPPORTED
+ Node* p = nodes_;
+ while (p) {
+ Node* victim = p;
+ p = p->next;
+ js_free(victim);
+ }
+#endif
+}
+
+void DisassemblerSpew::setPrinter(Sprinter* printer) { printer_ = printer; }
+
+bool DisassemblerSpew::isDisabled() {
+ return !(JitSpewEnabled(JitSpew_Codegen) || printer_);
+}
+
+void DisassemblerSpew::spew(const char* fmt, ...) {
+#ifdef JS_DISASM_SUPPORTED
+ static const char prefix_chars[] =
+ "0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ static const char prefix_fmt[] = "[%c] ";
+
+ char fmt2[1024];
+ if (sizeof(fmt2) >= strlen(fmt) + sizeof(prefix_fmt)) {
+ snprintf(fmt2, sizeof(prefix_fmt), prefix_fmt,
+ prefix_chars[tag_ % (sizeof(prefix_chars) - 1)]);
+ strcat(fmt2, fmt);
+ fmt = fmt2;
+ }
+#endif
+
+ va_list args;
+ va_start(args, fmt);
+ spewVA(fmt, args);
+ va_end(args);
+}
+
+void DisassemblerSpew::spewVA(const char* fmt, va_list va) {
+ if (printer_) {
+ printer_->vprintf(fmt, va);
+ printer_->put("\n");
+ }
+ js::jit::JitSpewVA(js::jit::JitSpew_Codegen, fmt, va);
+}
+
+#ifdef JS_DISASM_SUPPORTED
+
+void DisassemblerSpew::setLabelIndent(const char* s) { labelIndent_ = s; }
+
+void DisassemblerSpew::setTargetIndent(const char* s) { targetIndent_ = s; }
+
+DisassemblerSpew::LabelDoc DisassemblerSpew::refLabel(const Label* l) {
+ return l ? LabelDoc(internalResolve(l), l->bound()) : LabelDoc();
+}
+
+void DisassemblerSpew::spewRef(const LabelDoc& target) {
+ if (isDisabled()) {
+ return;
+ }
+ if (!target.valid) {
+ return;
+ }
+ spew("%s-> %d%s", targetIndent_, target.doc, !target.bound ? "f" : "");
+}
+
+void DisassemblerSpew::spewBind(const Label* label) {
+ if (isDisabled()) {
+ return;
+ }
+ uint32_t v = internalResolve(label);
+ Node* probe = lookup(label);
+ if (probe) {
+ probe->bound = true;
+ }
+ spew("%s%d:", labelIndent_, v);
+}
+
+void DisassemblerSpew::spewRetarget(const Label* label, const Label* target) {
+ if (isDisabled()) {
+ return;
+ }
+ LabelDoc labelDoc = LabelDoc(internalResolve(label), label->bound());
+ LabelDoc targetDoc = LabelDoc(internalResolve(target), target->bound());
+ Node* probe = lookup(label);
+ if (probe) {
+ probe->bound = true;
+ }
+ spew("%s%d: .retarget -> %d%s", labelIndent_, labelDoc.doc, targetDoc.doc,
+ !targetDoc.bound ? "f" : "");
+}
+
+void DisassemblerSpew::formatLiteral(const LiteralDoc& doc, char* buffer,
+ size_t bufsize) {
+ switch (doc.type) {
+ case LiteralDoc::Type::Patchable:
+ snprintf(buffer, bufsize, "patchable");
+ break;
+ case LiteralDoc::Type::I32:
+ snprintf(buffer, bufsize, "%d", doc.value.i32);
+ break;
+ case LiteralDoc::Type::U32:
+ snprintf(buffer, bufsize, "%u", doc.value.u32);
+ break;
+ case LiteralDoc::Type::I64:
+ snprintf(buffer, bufsize, "%" PRIi64, doc.value.i64);
+ break;
+ case LiteralDoc::Type::U64:
+ snprintf(buffer, bufsize, "%" PRIu64, doc.value.u64);
+ break;
+ case LiteralDoc::Type::F32:
+ snprintf(buffer, bufsize, "%g", doc.value.f32);
+ break;
+ case LiteralDoc::Type::F64:
+ snprintf(buffer, bufsize, "%g", doc.value.f64);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void DisassemblerSpew::spewOrphans() {
+ for (Node* p = nodes_; p; p = p->next) {
+ if (!p->bound) {
+ spew("%s%d: ; .orphan", labelIndent_, p->value);
+ }
+ }
+}
+
+uint32_t DisassemblerSpew::internalResolve(const Label* l) {
+ // Note, internalResolve will sometimes return 0 when it is triggered by the
+ // profiler and not by a full disassembly, since in that case a label can be
+ // used or bound but not previously have been defined. In that case,
+ // internalResolve(l) will not necessarily create a binding for l!
+ // Consequently a subsequent lookup(l) may still return null.
+ return l->used() || l->bound() ? probe(l) : define(l);
+}
+
+uint32_t DisassemblerSpew::probe(const Label* l) {
+ Node* n = lookup(l);
+ return n ? n->value : 0;
+}
+
+uint32_t DisassemblerSpew::define(const Label* l) {
+ remove(l);
+ uint32_t value = spewNext_++;
+ if (!add(l, value)) {
+ return 0;
+ }
+ return value;
+}
+
+DisassemblerSpew::Node* DisassemblerSpew::lookup(const Label* key) {
+ Node* p;
+ for (p = nodes_; p && p->key != key; p = p->next) {
+ ;
+ }
+ return p;
+}
+
+DisassemblerSpew::Node* DisassemblerSpew::add(const Label* key,
+ uint32_t value) {
+ MOZ_ASSERT(!lookup(key));
+ Node* node = js_new<Node>();
+ if (node) {
+ node->key = key;
+ node->value = value;
+ node->bound = false;
+ node->next = nodes_;
+ nodes_ = node;
+ }
+ return node;
+}
+
+bool DisassemblerSpew::remove(const Label* key) {
+ // We do not require that there is a node matching the key.
+ for (Node *p = nodes_, *pp = nullptr; p; pp = p, p = p->next) {
+ if (p->key == key) {
+ if (pp) {
+ pp->next = p->next;
+ } else {
+ nodes_ = p->next;
+ }
+ js_free(p);
+ return true;
+ }
+ }
+ return false;
+}
+
+#else
+
+DisassemblerSpew::LabelDoc DisassemblerSpew::refLabel(const Label* l) {
+ return LabelDoc();
+}
+
+#endif
diff --git a/js/src/jit/shared/Disassembler-shared.h b/js/src/jit/shared/Disassembler-shared.h
new file mode 100644
index 0000000000..882de421a8
--- /dev/null
+++ b/js/src/jit/shared/Disassembler-shared.h
@@ -0,0 +1,184 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Disassembler_shared_h
+#define jit_shared_Disassembler_shared_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#if defined(JS_DISASM_ARM) || defined(JS_DISASM_ARM64)
+# define JS_DISASM_SUPPORTED
+#endif
+
+namespace js {
+
+class JS_PUBLIC_API Sprinter;
+
+namespace jit {
+
+class Label;
+
+// A wrapper around spew/disassembly functionality. The disassembler is built
+// on a per-instruction disassembler (as in our ARM, ARM64 back-ends) and
+// formats labels with meaningful names and literals with meaningful values, if
+// the assembler creates documentation (with provided helpers) at appropriate
+// points.
+
+class DisassemblerSpew {
+#ifdef JS_DISASM_SUPPORTED
+ struct Node {
+ const Label* key; // Never dereferenced, only used for its value
+ uint32_t value; // The printable label value
+ bool bound; // If the label has been seen by spewBind()
+ Node* next;
+ };
+
+ Node* lookup(const Label* key);
+ Node* add(const Label* key, uint32_t value);
+ bool remove(const Label* key);
+
+ uint32_t probe(const Label* l);
+ uint32_t define(const Label* l);
+ uint32_t internalResolve(const Label* l);
+#endif
+
+ void spewVA(const char* fmt, va_list args) MOZ_FORMAT_PRINTF(2, 0);
+
+ public:
+ DisassemblerSpew();
+ ~DisassemblerSpew();
+
+#ifdef JS_DISASM_SUPPORTED
+ // Set indentation strings. The spewer retains a reference to s.
+ void setLabelIndent(const char* s);
+ void setTargetIndent(const char* s);
+#endif
+
+ // Set the spew printer, which will always be used if it is set, regardless
+ // of whether the system spew channel is enabled or not. The spewer retains
+ // a reference to sp.
+ void setPrinter(Sprinter* sp);
+
+ // Return true if disassembly spew is disabled and no additional printer is
+ // set.
+ bool isDisabled();
+
+ // Format and print text on the spew channel; output is suppressed if spew
+ // is disabled. The output is not indented, and is terminated by a newline.
+ void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+ // Documentation for a label reference.
+ struct LabelDoc {
+#ifdef JS_DISASM_SUPPORTED
+ LabelDoc() : doc(0), bound(false), valid(false) {}
+ LabelDoc(uint32_t doc, bool bound) : doc(doc), bound(bound), valid(true) {}
+ const uint32_t doc;
+ const bool bound;
+ const bool valid;
+#else
+ LabelDoc() = default;
+ LabelDoc(uint32_t, bool) {}
+#endif
+ };
+
+ // Documentation for a literal load.
+ struct LiteralDoc {
+#ifdef JS_DISASM_SUPPORTED
+ enum class Type { Patchable, I32, U32, I64, U64, F32, F64 };
+ const Type type;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ int64_t i64;
+ uint64_t u64;
+ float f32;
+ double f64;
+ } value;
+ LiteralDoc() : type(Type::Patchable) {}
+ explicit LiteralDoc(int32_t v) : type(Type::I32) { value.i32 = v; }
+ explicit LiteralDoc(uint32_t v) : type(Type::U32) { value.u32 = v; }
+ explicit LiteralDoc(int64_t v) : type(Type::I64) { value.i64 = v; }
+ explicit LiteralDoc(uint64_t v) : type(Type::U64) { value.u64 = v; }
+ explicit LiteralDoc(float v) : type(Type::F32) { value.f32 = v; }
+ explicit LiteralDoc(double v) : type(Type::F64) { value.f64 = v; }
+#else
+ LiteralDoc() = default;
+ explicit LiteralDoc(int32_t) {}
+ explicit LiteralDoc(uint32_t) {}
+ explicit LiteralDoc(int64_t) {}
+ explicit LiteralDoc(uint64_t) {}
+ explicit LiteralDoc(float) {}
+ explicit LiteralDoc(double) {}
+#endif
+ };
+
+ // Reference a label, resolving it to a printable representation.
+ //
+ // NOTE: The printable representation depends on the state of the label, so
+ // if we call resolve() when emitting & disassembling a branch instruction
+ // then it should be called before the label becomes Used, if emitting the
+ // branch can change the label's state.
+ //
+ // If the disassembler is not defined this returns a structure that is
+ // marked not valid.
+ LabelDoc refLabel(const Label* l);
+
+#ifdef JS_DISASM_SUPPORTED
+ // Spew the label information previously gathered by refLabel(), at a point
+ // where the label is referenced. The output is indented by targetIndent_
+ // and terminated by a newline.
+ void spewRef(const LabelDoc& target);
+
+ // Spew the label at the point where the label is bound. The output is
+ // indented by labelIndent_ and terminated by a newline.
+ void spewBind(const Label* label);
+
+ // Spew a retarget directive at the point where the retarget is recorded.
+ // The output is indented by labelIndent_ and terminated by a newline.
+ void spewRetarget(const Label* label, const Label* target);
+
+ // Format a literal value into the buffer. The buffer is always
+ // NUL-terminated even if this chops the formatted value.
+ void formatLiteral(const LiteralDoc& doc, char* buffer, size_t bufsize);
+
+ // Print any unbound labels, one per line, with normal label indent and with
+ // a comment indicating the label is not defined. Labels can be referenced
+ // but unbound in some legitimate cases, normally for traps. Printing them
+ // reduces confusion.
+ void spewOrphans();
+#endif
+
+ private:
+ Sprinter* printer_;
+#ifdef JS_DISASM_SUPPORTED
+ const char* labelIndent_;
+ const char* targetIndent_;
+ uint32_t spewNext_;
+ Node* nodes_;
+ uint32_t tag_;
+
+ // This global is used to disambiguate concurrently live assemblers, see
+ // comments in Disassembler-shared.cpp for why this is desirable.
+ //
+ // The variable is atomic to avoid any kind of complaint from thread
+ // sanitizers etc. However, trying to look at disassembly without using
+ // --no-threads is basically insane, so you can ignore the multi-threading
+ // implications here.
+ static mozilla::Atomic<uint32_t> counter_;
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_Disassembler_shared_h
diff --git a/js/src/jit/shared/IonAssemblerBuffer.h b/js/src/jit/shared/IonAssemblerBuffer.h
new file mode 100644
index 0000000000..170f098707
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBuffer.h
@@ -0,0 +1,438 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBuffer_h
+#define jit_shared_IonAssemblerBuffer_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/ProcessExecutableMemory.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+// The offset into a buffer, in bytes.
+class BufferOffset {
+ int offset;
+
+ public:
+ friend BufferOffset nextOffset();
+
+ BufferOffset() : offset(INT_MIN) {}
+
+ explicit BufferOffset(int offset_) : offset(offset_) {
+ MOZ_ASSERT(offset >= 0);
+ }
+
+ explicit BufferOffset(Label* l) : offset(l->offset()) {
+ MOZ_ASSERT(offset >= 0);
+ }
+
+ int getOffset() const { return offset; }
+ bool assigned() const { return offset != INT_MIN; }
+
+ // A BOffImm is a Branch Offset Immediate. It is an architecture-specific
+ // structure that holds the immediate for a pc relative branch. diffB takes
+ // the label for the destination of the branch, and encodes the immediate
+ // for the branch. This will need to be fixed up later, since A pool may be
+ // inserted between the branch and its destination.
+ template <class BOffImm>
+ BOffImm diffB(BufferOffset other) const {
+ if (!BOffImm::IsInRange(offset - other.offset)) {
+ return BOffImm();
+ }
+ return BOffImm(offset - other.offset);
+ }
+
+ template <class BOffImm>
+ BOffImm diffB(Label* other) const {
+ MOZ_ASSERT(other->bound());
+ if (!BOffImm::IsInRange(offset - other->offset())) {
+ return BOffImm();
+ }
+ return BOffImm(offset - other->offset());
+ }
+};
+
+inline bool operator<(BufferOffset a, BufferOffset b) {
+ return a.getOffset() < b.getOffset();
+}
+
+inline bool operator>(BufferOffset a, BufferOffset b) {
+ return a.getOffset() > b.getOffset();
+}
+
+inline bool operator<=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() <= b.getOffset();
+}
+
+inline bool operator>=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() >= b.getOffset();
+}
+
+inline bool operator==(BufferOffset a, BufferOffset b) {
+ return a.getOffset() == b.getOffset();
+}
+
+inline bool operator!=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() != b.getOffset();
+}
+
+template <int SliceSize>
+class BufferSlice {
+ protected:
+ BufferSlice<SliceSize>* prev_;
+ BufferSlice<SliceSize>* next_;
+
+ size_t bytelength_;
+
+ public:
+ mozilla::Array<uint8_t, SliceSize> instructions;
+
+ public:
+ explicit BufferSlice() : prev_(nullptr), next_(nullptr), bytelength_(0) {}
+
+ size_t length() const { return bytelength_; }
+ static inline size_t Capacity() { return SliceSize; }
+
+ BufferSlice* getNext() const { return next_; }
+ BufferSlice* getPrev() const { return prev_; }
+
+ void setNext(BufferSlice<SliceSize>* next) {
+ MOZ_ASSERT(next_ == nullptr);
+ MOZ_ASSERT(next->prev_ == nullptr);
+ next_ = next;
+ next->prev_ = this;
+ }
+
+ void putBytes(size_t numBytes, const void* source) {
+ MOZ_ASSERT(bytelength_ + numBytes <= SliceSize);
+ if (source) {
+ memcpy(&instructions[length()], source, numBytes);
+ }
+ bytelength_ += numBytes;
+ }
+
+ MOZ_ALWAYS_INLINE
+ void putU32Aligned(uint32_t value) {
+ MOZ_ASSERT(bytelength_ + 4 <= SliceSize);
+ MOZ_ASSERT((bytelength_ & 3) == 0);
+ MOZ_ASSERT((uintptr_t(&instructions[0]) & 3) == 0);
+ *reinterpret_cast<uint32_t*>(&instructions[bytelength_]) = value;
+ bytelength_ += 4;
+ }
+};
+
+template <int SliceSize, class Inst>
+class AssemblerBuffer {
+ protected:
+ typedef BufferSlice<SliceSize> Slice;
+
+ // Doubly-linked list of BufferSlices, with the most recent in tail position.
+ Slice* head;
+ Slice* tail;
+
+ bool m_oom;
+
+ // How many bytes has been committed to the buffer thus far.
+ // Does not include tail.
+ uint32_t bufferSize;
+
+ // How many bytes can be in the buffer. Normally this is
+ // MaxCodeBytesPerBuffer, but for pasteup buffers where we handle far jumps
+ // explicitly it can be larger.
+ uint32_t maxSize;
+
+ // Finger for speeding up accesses.
+ Slice* finger;
+ int finger_offset;
+
+ LifoAlloc lifoAlloc_;
+
+ public:
+ explicit AssemblerBuffer()
+ : head(nullptr),
+ tail(nullptr),
+ m_oom(false),
+ bufferSize(0),
+ maxSize(MaxCodeBytesPerBuffer),
+ finger(nullptr),
+ finger_offset(0),
+ lifoAlloc_(8192) {}
+
+ public:
+ bool isAligned(size_t alignment) const {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ return !(size() & (alignment - 1));
+ }
+
+ void setUnlimited() { maxSize = MaxCodeBytesPerProcess; }
+
+ private:
+ Slice* newSlice(LifoAlloc& a) {
+ if (size() > maxSize - sizeof(Slice)) {
+ fail_oom();
+ return nullptr;
+ }
+ Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
+ if (!tmp) {
+ fail_oom();
+ return nullptr;
+ }
+ return new (tmp) Slice;
+ }
+
+ public:
+ bool ensureSpace(size_t size) {
+ // Space can exist in the most recent Slice.
+ if (tail && tail->length() + size <= tail->Capacity()) {
+ // Simulate allocation failure even when we don't need a new slice.
+ if (js::oom::ShouldFailWithOOM()) {
+ return fail_oom();
+ }
+
+ return true;
+ }
+
+ // Otherwise, a new Slice must be added.
+ Slice* slice = newSlice(lifoAlloc_);
+ if (slice == nullptr) {
+ return fail_oom();
+ }
+
+ // If this is the first Slice in the buffer, add to head position.
+ if (!head) {
+ head = slice;
+ finger = slice;
+ finger_offset = 0;
+ }
+
+ // Finish the last Slice and add the new Slice to the linked list.
+ if (tail) {
+ bufferSize += tail->length();
+ tail->setNext(slice);
+ }
+ tail = slice;
+
+ return true;
+ }
+
+ BufferOffset putByte(uint8_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putShort(uint16_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putInt(uint32_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ MOZ_ALWAYS_INLINE
+ BufferOffset putU32Aligned(uint32_t value) {
+ if (!ensureSpace(sizeof(value))) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret = nextOffset();
+ tail->putU32Aligned(value);
+ return ret;
+ }
+
+ // Add numBytes bytes to this buffer.
+ // The data must fit in a single slice.
+ BufferOffset putBytes(size_t numBytes, const void* inst) {
+ if (!ensureSpace(numBytes)) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret = nextOffset();
+ tail->putBytes(numBytes, inst);
+ return ret;
+ }
+
+ // Add a potentially large amount of data to this buffer.
+ // The data may be distrubuted across multiple slices.
+ // Return the buffer offset of the first added byte.
+ BufferOffset putBytesLarge(size_t numBytes, const void* data) {
+ BufferOffset ret = nextOffset();
+ while (numBytes > 0) {
+ if (!ensureSpace(1)) {
+ return BufferOffset();
+ }
+ size_t avail = tail->Capacity() - tail->length();
+ size_t xfer = numBytes < avail ? numBytes : avail;
+ MOZ_ASSERT(xfer > 0, "ensureSpace should have allocated a slice");
+ tail->putBytes(xfer, data);
+ data = (const uint8_t*)data + xfer;
+ numBytes -= xfer;
+ }
+ return ret;
+ }
+
+ unsigned int size() const {
+ if (tail) {
+ return bufferSize + tail->length();
+ }
+ return bufferSize;
+ }
+ BufferOffset nextOffset() const { return BufferOffset(size()); }
+
+ bool oom() const { return m_oom; }
+
+ bool fail_oom() {
+ m_oom = true;
+#ifdef DEBUG
+ JitContext* context = MaybeGetJitContext();
+ if (context) {
+ context->setOOM();
+ }
+#endif
+ return false;
+ }
+
+ private:
+ void update_finger(Slice* finger_, int fingerOffset_) {
+ finger = finger_;
+ finger_offset = fingerOffset_;
+ }
+
+ static const unsigned SliceDistanceRequiringFingerUpdate = 3;
+
+ Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset,
+ bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset;
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset >= cursor);
+
+ for (Slice* slice = start; slice != nullptr; slice = slice->getNext()) {
+ const int slicelen = slice->length();
+
+ // Is the offset within the bounds of this slice?
+ if (offset < cursor + slicelen) {
+ if (updateFinger ||
+ slicesSkipped >= SliceDistanceRequiringFingerUpdate) {
+ update_finger(slice, cursor);
+ }
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ cursor += slicelen;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset,
+ bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset; // First (lowest) offset in the start Slice.
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset < int(cursor + start->length()));
+
+ for (Slice* slice = start; slice != nullptr;) {
+ // Is the offset within the bounds of this slice?
+ if (offset >= cursor) {
+ if (updateFinger ||
+ slicesSkipped >= SliceDistanceRequiringFingerUpdate) {
+ update_finger(slice, cursor);
+ }
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ // Move the cursor to the start of the previous slice.
+ Slice* prev = slice->getPrev();
+ cursor -= prev->length();
+
+ slice = prev;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ public:
+ Inst* getInstOrNull(BufferOffset off) {
+ if (!off.assigned()) {
+ return nullptr;
+ }
+ return getInst(off);
+ }
+
+ // Get a pointer to the instruction at offset |off| which must be within the
+ // bounds of the buffer. Use |getInstOrNull()| if |off| may be unassigned.
+ Inst* getInst(BufferOffset off) {
+ const int offset = off.getOffset();
+ // This function is hot, do not make the next line a RELEASE_ASSERT.
+ MOZ_ASSERT(off.assigned() && offset >= 0 && unsigned(offset) < size());
+
+ // Is the instruction in the last slice?
+ if (offset >= int(bufferSize)) {
+ return (Inst*)&tail->instructions[offset - bufferSize];
+ }
+
+ // How close is this offset to the previous one we looked up?
+ // If it is sufficiently far from the start and end of the buffer,
+ // use the finger to start midway through the list.
+ int finger_dist = abs(offset - finger_offset);
+ if (finger_dist < std::min(offset, int(bufferSize - offset))) {
+ if (finger_offset < offset) {
+ return getInstForwards(off, finger, finger_offset, true);
+ }
+ return getInstBackwards(off, finger, finger_offset, true);
+ }
+
+ // Is the instruction closer to the start or to the end?
+ if (offset < int(bufferSize - offset)) {
+ return getInstForwards(off, head, 0);
+ }
+
+ // The last slice was already checked above, so start at the
+ // second-to-last.
+ Slice* prev = tail->getPrev();
+ return getInstBackwards(off, prev, bufferSize - prev->length());
+ }
+
+ typedef AssemblerBuffer<SliceSize, Inst> ThisClass;
+
+ class AssemblerBufferInstIterator {
+ BufferOffset bo_;
+ ThisClass* buffer_;
+
+ public:
+ explicit AssemblerBufferInstIterator(BufferOffset bo, ThisClass* buffer)
+ : bo_(bo), buffer_(buffer) {}
+ void advance(int offset) { bo_ = BufferOffset(bo_.getOffset() + offset); }
+ Inst* next() {
+ advance(cur()->size());
+ return cur();
+ }
+ Inst* peek() {
+ return buffer_->getInst(BufferOffset(bo_.getOffset() + cur()->size()));
+ }
+ Inst* cur() const { return buffer_->getInst(bo_); }
+ };
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBuffer_h
diff --git a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
new file mode 100644
index 0000000000..4f615db12f
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -0,0 +1,1197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
+#define jit_shared_IonAssemblerBufferWithConstantPools_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+// [SMDOC] JIT AssemblerBuffer constant pooling (ARM/ARM64/MIPS)
+//
+// This code extends the AssemblerBuffer to support the pooling of values loaded
+// using program-counter relative addressing modes. This is necessary with the
+// ARM instruction set because it has a fixed instruction size that can not
+// encode all values as immediate arguments in instructions. Pooling the values
+// allows the values to be placed in large chunks which minimizes the number of
+// forced branches around them in the code. This is used for loading floating
+// point constants, for loading 32 bit constants on the ARMv6, for absolute
+// branch targets, and in future will be needed for large branches on the ARMv6.
+//
+// For simplicity of the implementation, the constant pools are always placed
+// after the loads referencing them. When a new constant pool load is added to
+// the assembler buffer, a corresponding pool entry is added to the current
+// pending pool. The finishPool() method copies the current pending pool entries
+// into the assembler buffer at the current offset and patches the pending
+// constant pool load instructions.
+//
+// Before inserting instructions or pool entries, it is necessary to determine
+// if doing so would place a pending pool entry out of reach of an instruction,
+// and if so then the pool must firstly be dumped. With the allocation algorithm
+// used below, the recalculation of all the distances between instructions and
+// their pool entries can be avoided by noting that there will be a limiting
+// instruction and pool entry pair that does not change when inserting more
+// instructions. Adding more instructions makes the same increase to the
+// distance, between instructions and their pool entries, for all such
+// pairs. This pair is recorded as the limiter, and it is updated when new pool
+// entries are added, see updateLimiter()
+//
+// The pools consist of: a guard instruction that branches around the pool, a
+// header word that helps identify a pool in the instruction stream, and then
+// the pool entries allocated in units of words. The guard instruction could be
+// omitted if control does not reach the pool, and this is referred to as a
+// natural guard below, but for simplicity the guard branch is always
+// emitted. The pool header is an identifiable word that in combination with the
+// guard uniquely identifies a pool in the instruction stream. The header also
+// encodes the pool size and a flag indicating if the guard is natural. It is
+// possible to iterate through the code instructions skipping or examining the
+// pools. E.g. it might be necessary to skip pools when search for, or patching,
+// an instruction sequence.
+//
+// It is often required to keep a reference to a pool entry, to patch it after
+// the buffer is finished. Each pool entry is assigned a unique index, counting
+// up from zero (see the poolEntryCount slot below). These can be mapped back to
+// the offset of the pool entry in the finished buffer, see poolEntryOffset().
+//
+// The code supports no-pool regions, and for these the size of the region, in
+// instructions, must be supplied. This size is used to determine if inserting
+// the instructions would place a pool entry out of range, and if so then a pool
+// is firstly flushed. The DEBUG code checks that the emitted code is within the
+// supplied size to detect programming errors. See enterNoPool() and
+// leaveNoPool().
+
+// The only planned instruction sets that require inline constant pools are the
+// ARM, ARM64, and MIPS, and these all have fixed 32-bit sized instructions so
+// for simplicity the code below is specialized for fixed 32-bit sized
+// instructions and makes no attempt to support variable length
+// instructions. The base assembler buffer which supports variable width
+// instruction is used by the x86 and x64 backends.
+
+// The AssemblerBufferWithConstantPools template class uses static callbacks to
+// the provided Asm template argument class:
+//
+// void Asm::InsertIndexIntoTag(uint8_t* load_, uint32_t index)
+//
+// When allocEntry() is called to add a constant pool load with an associated
+// constant pool entry, this callback is called to encode the index of the
+// allocated constant pool entry into the load instruction.
+//
+// After the constant pool has been placed, PatchConstantPoolLoad() is called
+// to update the load instruction with the right load offset.
+//
+// void Asm::WritePoolGuard(BufferOffset branch,
+// Instruction* dest,
+// BufferOffset afterPool)
+//
+// Write out the constant pool guard branch before emitting the pool.
+//
+// branch
+// Offset of the guard branch in the buffer.
+//
+// dest
+// Pointer into the buffer where the guard branch should be emitted. (Same
+// as getInst(branch)). Space for guardSize_ instructions has been reserved.
+//
+// afterPool
+// Offset of the first instruction after the constant pool. This includes
+// both pool entries and branch veneers added after the pool data.
+//
+// void Asm::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
+//
+// Write out the pool header which follows the guard branch.
+//
+// void Asm::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+//
+// Re-encode a load of a constant pool entry after the location of the
+// constant pool is known.
+//
+// The load instruction at loadAddr was previously passed to
+// InsertIndexIntoTag(). The constPoolAddr is the final address of the
+// constant pool in the assembler buffer.
+//
+// void Asm::PatchShortRangeBranchToVeneer(AssemblerBufferWithConstantPools*,
+// unsigned rangeIdx,
+// BufferOffset deadline,
+// BufferOffset veneer)
+//
+// Patch a short-range branch to jump through a veneer before it goes out of
+// range.
+//
+// rangeIdx, deadline
+// These arguments were previously passed to registerBranchDeadline(). It is
+// assumed that PatchShortRangeBranchToVeneer() knows how to compute the
+// offset of the short-range branch from this information.
+//
+// veneer
+// Space for a branch veneer, guaranteed to be <= deadline. At this
+// position, guardSize_ * InstSize bytes are allocated. They should be
+// initialized to the proper unconditional branch instruction.
+//
+// Unbound branches to the same unbound label are organized as a linked list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Branch3 -> nil
+//
+// This callback should insert a new veneer branch into the list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Veneer -> Branch3 -> nil
+//
+// When Assembler::bind() rewrites the branches with the real label offset, it
+// probably has to bind Branch2 to target the veneer branch instead of jumping
+// straight to the label.
+
+namespace js {
+namespace jit {
+
+// BranchDeadlineSet - Keep track of pending branch deadlines.
+//
+// Some architectures like arm and arm64 have branch instructions with limited
+// range. When assembling a forward branch, it is not always known if the final
+// target label will be in range of the branch instruction.
+//
+// The BranchDeadlineSet data structure is used to keep track of the set of
+// pending forward branches. It supports the following fast operations:
+//
+// 1. Get the earliest deadline in the set.
+// 2. Add a new branch deadline.
+// 3. Remove a branch deadline.
+//
+// Architectures may have different branch encodings with different ranges. Each
+// supported range is assigned a small integer starting at 0. This data
+// structure does not care about the actual range of branch instructions, just
+// the latest buffer offset that can be reached - the deadline offset.
+//
+// Branched are stored as (rangeIdx, deadline) tuples. The target-specific code
+// can compute the location of the branch itself from this information. This
+// data structure does not need to know.
+//
+template <unsigned NumRanges>
+class BranchDeadlineSet {
+ // Maintain a list of pending deadlines for each range separately.
+ //
+ // The offsets in each vector are always kept in ascending order.
+ //
+ // Because we have a separate vector for different ranges, as forward
+ // branches are added to the assembler buffer, their deadlines will
+ // always be appended to the vector corresponding to their range.
+ //
+ // When binding labels, we expect a more-or-less LIFO order of branch
+ // resolutions. This would always hold if we had strictly structured control
+ // flow.
+ //
+ // We allow branch deadlines to be added and removed in any order, but
+ // performance is best in the expected case of near LIFO order.
+ //
+ typedef Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> RangeVector;
+
+ // We really just want "RangeVector deadline_[NumRanges];", but each vector
+ // needs to be initialized with a LifoAlloc, and C++ doesn't bend that way.
+ //
+ // Use raw aligned storage instead and explicitly construct NumRanges
+ // vectors in our constructor.
+ mozilla::AlignedStorage2<RangeVector[NumRanges]> deadlineStorage_;
+
+ // Always access the range vectors through this method.
+ RangeVector& vectorForRange(unsigned rangeIdx) {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ const RangeVector& vectorForRange(unsigned rangeIdx) const {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ // Maintain a precomputed earliest deadline at all times.
+ // This is unassigned only when all deadline vectors are empty.
+ BufferOffset earliest_;
+
+ // The range vector owning earliest_. Uninitialized when empty.
+ unsigned earliestRange_;
+
+ // Recompute the earliest deadline after it's been invalidated.
+ void recomputeEarliest() {
+ earliest_ = BufferOffset();
+ for (unsigned r = 0; r < NumRanges; r++) {
+ auto& vec = vectorForRange(r);
+ if (!vec.empty() && (!earliest_.assigned() || vec[0] < earliest_)) {
+ earliest_ = vec[0];
+ earliestRange_ = r;
+ }
+ }
+ }
+
+ // Update the earliest deadline if needed after inserting (rangeIdx,
+ // deadline). Always return true for convenience:
+ // return insert() && updateEarliest().
+ bool updateEarliest(unsigned rangeIdx, BufferOffset deadline) {
+ if (!earliest_.assigned() || deadline < earliest_) {
+ earliest_ = deadline;
+ earliestRange_ = rangeIdx;
+ }
+ return true;
+ }
+
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) : earliestRange_(0) {
+ // Manually construct vectors in the uninitialized aligned storage.
+ // This is because C++ arrays can otherwise only be constructed with
+ // the default constructor.
+ for (unsigned r = 0; r < NumRanges; r++) {
+ new (&vectorForRange(r)) RangeVector(alloc);
+ }
+ }
+
+ ~BranchDeadlineSet() {
+ // Aligned storage doesn't destruct its contents automatically.
+ for (unsigned r = 0; r < NumRanges; r++) {
+ vectorForRange(r).~RangeVector();
+ }
+ }
+
+ // Is this set completely empty?
+ bool empty() const { return !earliest_.assigned(); }
+
+ // Get the total number of deadlines in the set.
+ size_t size() const {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++) {
+ count += vectorForRange(r).length();
+ }
+ return count;
+ }
+
+ // Get the number of deadlines for the range with the most elements.
+ size_t maxRangeSize() const {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++) {
+ count = std::max(count, vectorForRange(r).length());
+ }
+ return count;
+ }
+
+ // Get the first deadline that is still in the set.
+ BufferOffset earliestDeadline() const {
+ MOZ_ASSERT(!empty());
+ return earliest_;
+ }
+
+ // Get the range index corresponding to earliestDeadlineRange().
+ unsigned earliestDeadlineRange() const {
+ MOZ_ASSERT(!empty());
+ return earliestRange_;
+ }
+
+ // Add a (rangeIdx, deadline) tuple to the set.
+ //
+ // It is assumed that this tuple is not already in the set.
+ // This function performs best id the added deadline is later than any
+ // existing deadline for the same range index.
+ //
+ // Return true if the tuple was added, false if the tuple could not be added
+ // because of an OOM error.
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ MOZ_ASSERT(deadline.assigned(), "Can only store assigned buffer offsets");
+ // This is the vector where deadline should be saved.
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Fast case: Simple append to the relevant array. This never affects
+ // the earliest deadline.
+ if (!vec.empty() && vec.back() < deadline) {
+ return vec.append(deadline);
+ }
+
+ // Fast case: First entry to the vector. We need to update earliest_.
+ if (vec.empty()) {
+ return vec.append(deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ return addDeadlineSlow(rangeIdx, deadline);
+ }
+
+ private:
+ // General case of addDeadline. This is split into two functions such that
+ // the common case in addDeadline can be inlined while this part probably
+ // won't inline.
+ bool addDeadlineSlow(unsigned rangeIdx, BufferOffset deadline) {
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Inserting into the middle of the vector. Use a log time binary search
+ // and a linear time insert().
+ // Is it worthwhile special-casing the empty vector?
+ auto at = std::lower_bound(vec.begin(), vec.end(), deadline);
+ MOZ_ASSERT(at == vec.end() || *at != deadline,
+ "Cannot insert duplicate deadlines");
+ return vec.insert(at, deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ public:
+ // Remove a deadline from the set.
+ // If (rangeIdx, deadline) is not in the set, nothing happens.
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ auto& vec = vectorForRange(rangeIdx);
+
+ if (vec.empty()) {
+ return;
+ }
+
+ if (deadline == vec.back()) {
+ // Expected fast case: Structured control flow causes forward
+ // branches to be bound in reverse order.
+ vec.popBack();
+ } else {
+ // Slow case: Binary search + linear erase.
+ auto where = std::lower_bound(vec.begin(), vec.end(), deadline);
+ if (where == vec.end() || *where != deadline) {
+ return;
+ }
+ vec.erase(where);
+ }
+ if (deadline == earliest_) {
+ recomputeEarliest();
+ }
+ }
+};
+
+// Specialization for architectures that don't need to track short-range
+// branches.
+template <>
+class BranchDeadlineSet<0u> {
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) {}
+ bool empty() const { return true; }
+ size_t size() const { return 0; }
+ size_t maxRangeSize() const { return 0; }
+ BufferOffset earliestDeadline() const { MOZ_CRASH(); }
+ unsigned earliestDeadlineRange() const { MOZ_CRASH(); }
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+};
+
+// The allocation unit size for pools.
+typedef int32_t PoolAllocUnit;
+
+// Hysteresis given to short-range branches.
+//
+// If any short-range branches will go out of range in the next N bytes,
+// generate a veneer for them in the current pool. The hysteresis prevents the
+// creation of many tiny constant pools for branch veneers.
+const size_t ShortRangeBranchHysteresis = 128;
+
+struct Pool {
+ private:
+ // The maximum program-counter relative offset below which the instruction
+ // set can encode. Different classes of intructions might support different
+ // ranges but for simplicity the minimum is used here, and for the ARM this
+ // is constrained to 1024 by the float load instructions.
+ const size_t maxOffset_;
+ // An offset to apply to program-counter relative offsets. The ARM has a
+ // bias of 8.
+ const unsigned bias_;
+
+ // The content of the pool entries.
+ Vector<PoolAllocUnit, 8, LifoAllocPolicy<Fallible>> poolData_;
+
+ // Flag that tracks OOM conditions. This is set after any append failed.
+ bool oom_;
+
+ // The limiting instruction and pool-entry pair. The instruction program
+ // counter relative offset of this limiting instruction will go out of range
+ // first as the pool position moves forward. It is more efficient to track
+ // just this limiting pair than to recheck all offsets when testing if the
+ // pool needs to be dumped.
+ //
+ // 1. The actual offset of the limiting instruction referencing the limiting
+ // pool entry.
+ BufferOffset limitingUser;
+ // 2. The pool entry index of the limiting pool entry.
+ unsigned limitingUsee;
+
+ public:
+ // A record of the code offset of instructions that reference pool
+ // entries. These instructions need to be patched when the actual position
+ // of the instructions and pools are known, and for the code below this
+ // occurs when each pool is finished, see finishPool().
+ Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> loadOffsets;
+
+ // Create a Pool. Don't allocate anything from lifoAloc, just capture its
+ // reference.
+ explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc)
+ : maxOffset_(maxOffset),
+ bias_(bias),
+ poolData_(lifoAlloc),
+ oom_(false),
+ limitingUser(),
+ limitingUsee(INT_MIN),
+ loadOffsets(lifoAlloc) {}
+
+ // If poolData() returns nullptr then oom_ will also be true.
+ const PoolAllocUnit* poolData() const { return poolData_.begin(); }
+
+ unsigned numEntries() const { return poolData_.length(); }
+
+ size_t getPoolSize() const { return numEntries() * sizeof(PoolAllocUnit); }
+
+ bool oom() const { return oom_; }
+
+ // Update the instruction/pool-entry pair that limits the position of the
+ // pool. The nextInst is the actual offset of the new instruction being
+ // allocated.
+ //
+ // This is comparing the offsets, see checkFull() below for the equation,
+ // but common expressions on both sides have been canceled from the ranges
+ // being compared. Notably, the poolOffset cancels out, so the limiting pair
+ // does not depend on where the pool is placed.
+ void updateLimiter(BufferOffset nextInst) {
+ ptrdiff_t oldRange =
+ limitingUsee * sizeof(PoolAllocUnit) - limitingUser.getOffset();
+ ptrdiff_t newRange = getPoolSize() - nextInst.getOffset();
+ if (!limitingUser.assigned() || newRange > oldRange) {
+ // We have a new largest range!
+ limitingUser = nextInst;
+ limitingUsee = numEntries();
+ }
+ }
+
+ // Check if inserting a pool at the actual offset poolOffset would place
+ // pool entries out of reach. This is called before inserting instructions
+ // to check that doing so would not push pool entries out of reach, and if
+ // so then the pool would need to be firstly dumped. The poolOffset is the
+ // first word of the pool, after the guard and header and alignment fill.
+ bool checkFull(size_t poolOffset) const {
+ // Not full if there are no uses.
+ if (!limitingUser.assigned()) {
+ return false;
+ }
+ size_t offset = poolOffset + limitingUsee * sizeof(PoolAllocUnit) -
+ (limitingUser.getOffset() + bias_);
+ return offset >= maxOffset_;
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+
+ unsigned insertEntry(unsigned num, uint8_t* data, BufferOffset off,
+ LifoAlloc& lifoAlloc) {
+ if (oom_) {
+ return OOM_FAIL;
+ }
+ unsigned ret = numEntries();
+ if (!poolData_.append((PoolAllocUnit*)data, num) ||
+ !loadOffsets.append(off)) {
+ oom_ = true;
+ return OOM_FAIL;
+ }
+ return ret;
+ }
+
+ void reset() {
+ poolData_.clear();
+ loadOffsets.clear();
+
+ limitingUser = BufferOffset();
+ limitingUsee = -1;
+ }
+};
+
+// Template arguments:
+//
+// SliceSize
+// Number of bytes in each allocated BufferSlice. See
+// AssemblerBuffer::SliceSize.
+//
+// InstSize
+// Size in bytes of the fixed-size instructions. This should be equal to
+// sizeof(Inst). This is only needed here because the buffer is defined before
+// the Instruction.
+//
+// Inst
+// The actual type used to represent instructions. This is only really used as
+// the return type of the getInst() method.
+//
+// Asm
+// Class defining the needed static callback functions. See documentation of
+// the Asm::* callbacks above.
+//
+// NumShortBranchRanges
+// The number of short branch ranges to support. This can be 0 if no support
+// for tracking short range branches is needed. The
+// AssemblerBufferWithConstantPools class does not need to know what the range
+// of branches is - it deals in branch 'deadlines' which is the last buffer
+// position that a short-range forward branch can reach. It is assumed that
+// the Asm class is able to find the actual branch instruction given a
+// (range-index, deadline) pair.
+//
+//
+template <size_t SliceSize, size_t InstSize, class Inst, class Asm,
+ unsigned NumShortBranchRanges = 0>
+struct AssemblerBufferWithConstantPools
+ : public AssemblerBuffer<SliceSize, Inst> {
+ private:
+ // The PoolEntry index counter. Each PoolEntry is given a unique index,
+ // counting up from zero, and these can be mapped back to the actual pool
+ // entry offset after finishing the buffer, see poolEntryOffset().
+ size_t poolEntryCount;
+
+ public:
+ class PoolEntry {
+ size_t index_;
+
+ public:
+ explicit PoolEntry(size_t index) : index_(index) {}
+
+ PoolEntry() : index_(-1) {}
+
+ size_t index() const { return index_; }
+ };
+
+ private:
+ typedef AssemblerBuffer<SliceSize, Inst> Parent;
+ using typename Parent::Slice;
+
+ // The size of a pool guard, in instructions. A branch around the pool.
+ const unsigned guardSize_;
+ // The size of the header that is put at the beginning of a full pool, in
+ // instruction sized units.
+ const unsigned headerSize_;
+
+ // The maximum pc relative offset encoded in instructions that reference
+ // pool entries. This is generally set to the maximum offset that can be
+ // encoded by the instructions, but for testing can be lowered to affect the
+ // pool placement and frequency of pool placement.
+ const size_t poolMaxOffset_;
+
+ // The bias on pc relative addressing mode offsets, in units of bytes. The
+ // ARM has a bias of 8 bytes.
+ const unsigned pcBias_;
+
+ // The current working pool. Copied out as needed before resetting.
+ Pool pool_;
+
+ // The buffer should be aligned to this address.
+ const size_t instBufferAlign_;
+
+ struct PoolInfo {
+ // The index of the first entry in this pool.
+ // Pool entries are numbered uniquely across all pools, starting from 0.
+ unsigned firstEntryIndex;
+
+ // The location of this pool's first entry in the main assembler buffer.
+ // Note that the pool guard and header come before this offset which
+ // points directly at the data.
+ BufferOffset offset;
+
+ explicit PoolInfo(unsigned index, BufferOffset data)
+ : firstEntryIndex(index), offset(data) {}
+ };
+
+ // Info for each pool that has already been dumped. This does not include
+ // any entries in pool_.
+ Vector<PoolInfo, 8, LifoAllocPolicy<Fallible>> poolInfo_;
+
+ // Set of short-range forward branches that have not yet been bound.
+ // We may need to insert veneers if the final label turns out to be out of
+ // range.
+ //
+ // This set stores (rangeIdx, deadline) pairs instead of the actual branch
+ // locations.
+ BranchDeadlineSet<NumShortBranchRanges> branchDeadlines_;
+
+ // When true dumping pools is inhibited.
+ bool canNotPlacePool_;
+
+#ifdef DEBUG
+ // State for validating the 'maxInst' argument to enterNoPool().
+ // The buffer offset when entering the no-pool region.
+ size_t canNotPlacePoolStartOffset_;
+ // The maximum number of word sized instructions declared for the no-pool
+ // region.
+ size_t canNotPlacePoolMaxInst_;
+#endif
+
+ // Instruction to use for alignment fill.
+ const uint32_t alignFillInst_;
+
+ // Insert a number of NOP instructions between each requested instruction at
+ // all locations at which a pool can potentially spill. This is useful for
+ // checking that instruction locations are correctly referenced and/or
+ // followed.
+ const uint32_t nopFillInst_;
+ const unsigned nopFill_;
+
+ // For inhibiting the insertion of fill NOPs in the dynamic context in which
+ // they are being inserted.
+ bool inhibitNops_;
+
+ private:
+ // The buffer slices are in a double linked list.
+ Slice* getHead() const { return this->head; }
+ Slice* getTail() const { return this->tail; }
+
+ public:
+ AssemblerBufferWithConstantPools(unsigned guardSize, unsigned headerSize,
+ size_t instBufferAlign, size_t poolMaxOffset,
+ unsigned pcBias, uint32_t alignFillInst,
+ uint32_t nopFillInst, unsigned nopFill = 0)
+ : poolEntryCount(0),
+ guardSize_(guardSize),
+ headerSize_(headerSize),
+ poolMaxOffset_(poolMaxOffset),
+ pcBias_(pcBias),
+ pool_(poolMaxOffset, pcBias, this->lifoAlloc_),
+ instBufferAlign_(instBufferAlign),
+ poolInfo_(this->lifoAlloc_),
+ branchDeadlines_(this->lifoAlloc_),
+ canNotPlacePool_(false),
+#ifdef DEBUG
+ canNotPlacePoolStartOffset_(0),
+ canNotPlacePoolMaxInst_(0),
+#endif
+ alignFillInst_(alignFillInst),
+ nopFillInst_(nopFillInst),
+ nopFill_(nopFill),
+ inhibitNops_(false) {
+ }
+
+ private:
+ size_t sizeExcludingCurrentPool() const {
+ // Return the actual size of the buffer, excluding the current pending
+ // pool.
+ return this->nextOffset().getOffset();
+ }
+
+ public:
+ size_t size() const {
+ // Return the current actual size of the buffer. This is only accurate
+ // if there are no pending pool entries to dump, check.
+ MOZ_ASSERT_IF(!this->oom(), pool_.numEntries() == 0);
+ return sizeExcludingCurrentPool();
+ }
+
+ private:
+ void insertNopFill() {
+ // Insert fill for testing.
+ if (nopFill_ > 0 && !inhibitNops_ && !canNotPlacePool_) {
+ inhibitNops_ = true;
+
+ // Fill using a branch-nop rather than a NOP so this can be
+ // distinguished and skipped.
+ for (size_t i = 0; i < nopFill_; i++) {
+ putInt(nopFillInst_);
+ }
+
+ inhibitNops_ = false;
+ }
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+ static const unsigned DUMMY_INDEX = unsigned(-2);
+
+ // Check if it is possible to add numInst instructions and numPoolEntries
+ // constant pool entries without needing to flush the current pool.
+ bool hasSpaceForInsts(unsigned numInsts, unsigned numPoolEntries) const {
+ size_t nextOffset = sizeExcludingCurrentPool();
+ // Earliest starting offset for the current pool after adding numInsts.
+ // This is the beginning of the pool entries proper, after inserting a
+ // guard branch + pool header.
+ size_t poolOffset =
+ nextOffset + (numInsts + guardSize_ + headerSize_) * InstSize;
+
+ // Any constant pool loads that would go out of range?
+ if (pool_.checkFull(poolOffset)) {
+ return false;
+ }
+
+ // Any short-range branch that would go out of range?
+ if (!branchDeadlines_.empty()) {
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ size_t poolEnd = poolOffset + pool_.getPoolSize() +
+ numPoolEntries * sizeof(PoolAllocUnit);
+
+ // When NumShortBranchRanges > 1, is is possible for branch deadlines to
+ // expire faster than we can insert veneers. Suppose branches are 4 bytes
+ // each, we could have the following deadline set:
+ //
+ // Range 0: 40, 44, 48
+ // Range 1: 44, 48
+ //
+ // It is not good enough to start inserting veneers at the 40 deadline; we
+ // would not be able to create veneers for the second 44 deadline.
+ // Instead, we need to start at 32:
+ //
+ // 32: veneer(40)
+ // 36: veneer(44)
+ // 40: veneer(44)
+ // 44: veneer(48)
+ // 48: veneer(48)
+ //
+ // This is a pretty conservative solution to the problem: If we begin at
+ // the earliest deadline, we can always emit all veneers for the range
+ // that currently has the most pending deadlines. That may not leave room
+ // for veneers for the remaining ranges, so reserve space for those
+ // secondary range veneers assuming the worst case deadlines.
+
+ // Total pending secondary range veneer size.
+ size_t secondaryVeneers = guardSize_ * (branchDeadlines_.size() -
+ branchDeadlines_.maxRangeSize());
+
+ if (deadline < poolEnd + secondaryVeneers) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ unsigned insertEntryForwards(unsigned numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data) {
+ // If inserting pool entries then find a new limiter before we do the
+ // range check.
+ if (numPoolEntries) {
+ pool_.updateLimiter(BufferOffset(sizeExcludingCurrentPool()));
+ }
+
+ if (!hasSpaceForInsts(numInst, numPoolEntries)) {
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "Inserting pool entry caused a spill");
+ } else {
+ JitSpew(JitSpew_Pools, "Inserting instruction(%zu) caused a spill",
+ sizeExcludingCurrentPool());
+ }
+
+ finishPool(numInst * InstSize);
+ if (this->oom()) {
+ return OOM_FAIL;
+ }
+ return insertEntryForwards(numInst, numPoolEntries, inst, data);
+ }
+ if (numPoolEntries) {
+ unsigned result = pool_.insertEntry(numPoolEntries, data,
+ this->nextOffset(), this->lifoAlloc_);
+ if (result == Pool::OOM_FAIL) {
+ this->fail_oom();
+ return OOM_FAIL;
+ }
+ return result;
+ }
+
+ // The pool entry index is returned above when allocating an entry, but
+ // when not allocating an entry a dummy value is returned - it is not
+ // expected to be used by the caller.
+ return DUMMY_INDEX;
+ }
+
+ public:
+ // Get the next buffer offset where an instruction would be inserted.
+ // This may flush the current constant pool before returning nextOffset().
+ BufferOffset nextInstrOffset(int numInsts = 1) {
+ if (!hasSpaceForInsts(numInsts, /* numPoolEntries= */ 0)) {
+ JitSpew(JitSpew_Pools,
+ "nextInstrOffset @ %d caused a constant pool spill",
+ this->nextOffset().getOffset());
+ finishPool(ShortRangeBranchHysteresis);
+ }
+ return this->nextOffset();
+ }
+
+ MOZ_NEVER_INLINE
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data,
+ PoolEntry* pe = nullptr) {
+ // The allocation of pool entries is not supported in a no-pool region,
+ // check.
+ MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);
+
+ if (this->oom()) {
+ return BufferOffset();
+ }
+
+ insertNopFill();
+
+#ifdef JS_JITSPEW
+ if (numPoolEntries && JitSpewEnabled(JitSpew_Pools)) {
+ JitSpew(JitSpew_Pools, "Inserting %d entries into pool", numPoolEntries);
+ JitSpewStart(JitSpew_Pools, "data is: 0x");
+ size_t length = numPoolEntries * sizeof(PoolAllocUnit);
+ for (unsigned idx = 0; idx < length; idx++) {
+ JitSpewCont(JitSpew_Pools, "%02x", data[length - idx - 1]);
+ if (((idx & 3) == 3) && (idx + 1 != length)) {
+ JitSpewCont(JitSpew_Pools, "_");
+ }
+ }
+ JitSpewFin(JitSpew_Pools);
+ }
+#endif
+
+ // Insert the pool value.
+ unsigned index = insertEntryForwards(numInst, numPoolEntries, inst, data);
+ if (this->oom()) {
+ return BufferOffset();
+ }
+
+ // Now to get an instruction to write.
+ PoolEntry retPE;
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "Entry has index %u, offset %zu", index,
+ sizeExcludingCurrentPool());
+ Asm::InsertIndexIntoTag(inst, index);
+ // Figure out the offset within the pool entries.
+ retPE = PoolEntry(poolEntryCount);
+ poolEntryCount += numPoolEntries;
+ }
+ // Now inst is a valid thing to insert into the instruction stream.
+ if (pe != nullptr) {
+ *pe = retPE;
+ }
+ return this->putBytes(numInst * InstSize, inst);
+ }
+
+ // putInt is the workhorse for the assembler and higher-level buffer
+ // abstractions: it places one instruction into the instruction stream.
+ // Under normal circumstances putInt should just check that the constant
+ // pool does not need to be flushed, that there is space for the single word
+ // of the instruction, and write that word and update the buffer pointer.
+ //
+ // To do better here we need a status variable that handles both nopFill_
+ // and capacity, so that we can quickly know whether to go the slow path.
+ // That could be a variable that has the remaining number of simple
+ // instructions that can be inserted before a more expensive check,
+ // which is set to zero when nopFill_ is set.
+ //
+ // We assume that we don't have to check this->oom() if there is space to
+ // insert a plain instruction; there will always come a later time when it
+ // will be checked anyway.
+
+ MOZ_ALWAYS_INLINE
+ BufferOffset putInt(uint32_t value) {
+ if (nopFill_ ||
+ !hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
+ return allocEntry(1, 0, (uint8_t*)&value, nullptr, nullptr);
+ }
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ return this->putU32Aligned(value);
+#else
+ return this->AssemblerBuffer<SliceSize, Inst>::putInt(value);
+#endif
+ }
+
+ // Register a short-range branch deadline.
+ //
+ // After inserting a short-range forward branch, call this method to
+ // register the branch 'deadline' which is the last buffer offset that the
+ // branch instruction can reach.
+ //
+ // When the branch is bound to a destination label, call
+ // unregisterBranchDeadline() to stop tracking this branch,
+ //
+ // If the assembled code is about to exceed the registered branch deadline,
+ // and unregisterBranchDeadline() has not yet been called, an
+ // instruction-sized constant pool entry is allocated before the branch
+ // deadline.
+ //
+ // rangeIdx
+ // A number < NumShortBranchRanges identifying the range of the branch.
+ //
+ // deadline
+ // The highest buffer offset the the short-range branch can reach
+ // directly.
+ //
+ void registerBranchDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ if (!this->oom() && !branchDeadlines_.addDeadline(rangeIdx, deadline)) {
+ this->fail_oom();
+ }
+ }
+
+ // Un-register a short-range branch deadline.
+ //
+ // When a short-range branch has been successfully bound to its destination
+ // label, call this function to stop traching the branch.
+ //
+ // The (rangeIdx, deadline) pair must be previously registered.
+ //
+ void unregisterBranchDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ if (!this->oom()) {
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+ }
+ }
+
+ private:
+ // Are any short-range branches about to expire?
+ bool hasExpirableShortRangeBranches(size_t reservedBytes) const {
+ if (branchDeadlines_.empty()) {
+ return false;
+ }
+
+ // Include branches that would expire in the next N bytes. The reservedBytes
+ // argument avoids the needless creation of many tiny constant pools.
+ //
+ // As the reservedBytes could be of any sizes such as SIZE_MAX, in the case
+ // of flushPool, we have to check for overflow when comparing the deadline
+ // with our expected reserved bytes.
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ using CheckedSize = mozilla::CheckedInt<size_t>;
+ CheckedSize current(this->nextOffset().getOffset());
+ CheckedSize poolFreeSpace(reservedBytes);
+ auto future = current + poolFreeSpace;
+ return !future.isValid() || deadline < future.value();
+ }
+
+ bool isPoolEmptyFor(size_t bytes) const {
+ return pool_.numEntries() == 0 && !hasExpirableShortRangeBranches(bytes);
+ }
+ void finishPool(size_t reservedBytes) {
+ JitSpew(JitSpew_Pools, "Attempting to finish pool %zu with %u entries.",
+ poolInfo_.length(), pool_.numEntries());
+
+ if (reservedBytes < ShortRangeBranchHysteresis) {
+ reservedBytes = ShortRangeBranchHysteresis;
+ }
+
+ if (isPoolEmptyFor(reservedBytes)) {
+ // If there is no data in the pool being dumped, don't dump anything.
+ JitSpew(JitSpew_Pools, "Aborting because the pool is empty");
+ return;
+ }
+
+ // Should not be placing a pool in a no-pool region, check.
+ MOZ_ASSERT(!canNotPlacePool_);
+
+ // Dump the pool with a guard branch around the pool.
+ BufferOffset guard = this->putBytes(guardSize_ * InstSize, nullptr);
+ BufferOffset header = this->putBytes(headerSize_ * InstSize, nullptr);
+ BufferOffset data = this->putBytesLarge(pool_.getPoolSize(),
+ (const uint8_t*)pool_.poolData());
+ if (this->oom()) {
+ return;
+ }
+
+ // Now generate branch veneers for any short-range branches that are
+ // about to expire.
+ while (hasExpirableShortRangeBranches(reservedBytes)) {
+ unsigned rangeIdx = branchDeadlines_.earliestDeadlineRange();
+ BufferOffset deadline = branchDeadlines_.earliestDeadline();
+
+ // Stop tracking this branch. The Asm callback below may register
+ // new branches to track.
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+
+ // Make room for the veneer. Same as a pool guard branch.
+ BufferOffset veneer = this->putBytes(guardSize_ * InstSize, nullptr);
+ if (this->oom()) {
+ return;
+ }
+
+ // Fix the branch so it targets the veneer.
+ // The Asm class knows how to find the original branch given the
+ // (rangeIdx, deadline) pair.
+ Asm::PatchShortRangeBranchToVeneer(this, rangeIdx, deadline, veneer);
+ }
+
+ // We only reserved space for the guard branch and pool header.
+ // Fill them in.
+ BufferOffset afterPool = this->nextOffset();
+ Asm::WritePoolGuard(guard, this->getInst(guard), afterPool);
+ Asm::WritePoolHeader((uint8_t*)this->getInst(header), &pool_, false);
+
+ // With the pool's final position determined it is now possible to patch
+ // the instructions that reference entries in this pool, and this is
+ // done incrementally as each pool is finished.
+ size_t poolOffset = data.getOffset();
+
+ unsigned idx = 0;
+ for (BufferOffset* iter = pool_.loadOffsets.begin();
+ iter != pool_.loadOffsets.end(); ++iter, ++idx) {
+ // All entries should be before the pool.
+ MOZ_ASSERT(iter->getOffset() < guard.getOffset());
+
+ // Everything here is known so we can safely do the necessary
+ // substitutions.
+ Inst* inst = this->getInst(*iter);
+ size_t codeOffset = poolOffset - iter->getOffset();
+
+ // That is, PatchConstantPoolLoad wants to be handed the address of
+ // the pool entry that is being loaded. We need to do a non-trivial
+ // amount of math here, since the pool that we've made does not
+ // actually reside there in memory.
+ JitSpew(JitSpew_Pools, "Fixing entry %d offset to %zu", idx, codeOffset);
+ Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset);
+ }
+
+ // Record the pool info.
+ unsigned firstEntry = poolEntryCount - pool_.numEntries();
+ if (!poolInfo_.append(PoolInfo(firstEntry, data))) {
+ this->fail_oom();
+ return;
+ }
+
+ // Reset everything to the state that it was in when we started.
+ pool_.reset();
+ }
+
+ public:
+ void flushPool() {
+ if (this->oom()) {
+ return;
+ }
+ JitSpew(JitSpew_Pools, "Requesting a pool flush");
+ finishPool(SIZE_MAX);
+ }
+
+ void enterNoPool(size_t maxInst) {
+ if (this->oom()) {
+ return;
+ }
+ // Don't allow re-entry.
+ MOZ_ASSERT(!canNotPlacePool_);
+ insertNopFill();
+
+ // Check if the pool will spill by adding maxInst instructions, and if
+ // so then finish the pool before entering the no-pool region. It is
+ // assumed that no pool entries are allocated in a no-pool region and
+ // this is asserted when allocating entries.
+ if (!hasSpaceForInsts(maxInst, 0)) {
+ JitSpew(JitSpew_Pools, "No-Pool instruction(%zu) caused a spill.",
+ sizeExcludingCurrentPool());
+ finishPool(maxInst * InstSize);
+ if (this->oom()) {
+ return;
+ }
+ MOZ_ASSERT(hasSpaceForInsts(maxInst, 0));
+ }
+
+#ifdef DEBUG
+ // Record the buffer position to allow validating maxInst when leaving
+ // the region.
+ canNotPlacePoolStartOffset_ = this->nextOffset().getOffset();
+ canNotPlacePoolMaxInst_ = maxInst;
+#endif
+
+ canNotPlacePool_ = true;
+ }
+
+ void leaveNoPool() {
+ if (this->oom()) {
+ canNotPlacePool_ = false;
+ return;
+ }
+ MOZ_ASSERT(canNotPlacePool_);
+ canNotPlacePool_ = false;
+
+ // Validate the maxInst argument supplied to enterNoPool().
+ MOZ_ASSERT(this->nextOffset().getOffset() - canNotPlacePoolStartOffset_ <=
+ canNotPlacePoolMaxInst_ * InstSize);
+ }
+
+ void enterNoNops() {
+ MOZ_ASSERT(!inhibitNops_);
+ inhibitNops_ = true;
+ }
+ void leaveNoNops() {
+ MOZ_ASSERT(inhibitNops_);
+ inhibitNops_ = false;
+ }
+ void assertNoPoolAndNoNops() {
+ MOZ_ASSERT(inhibitNops_);
+ MOZ_ASSERT_IF(!this->oom(), isPoolEmptyFor(InstSize) || canNotPlacePool_);
+ }
+
+ void align(unsigned alignment) { align(alignment, alignFillInst_); }
+
+ void align(unsigned alignment, uint32_t pattern) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ MOZ_ASSERT(alignment >= InstSize);
+
+ // A pool many need to be dumped at this point, so insert NOP fill here.
+ insertNopFill();
+
+ // Check if the code position can be aligned without dumping a pool.
+ unsigned requiredFill = sizeExcludingCurrentPool() & (alignment - 1);
+ if (requiredFill == 0) {
+ return;
+ }
+ requiredFill = alignment - requiredFill;
+
+ // Add an InstSize because it is probably not useful for a pool to be
+ // dumped at the aligned code position.
+ if (!hasSpaceForInsts(requiredFill / InstSize + 1, 0)) {
+ // Alignment would cause a pool dump, so dump the pool now.
+ JitSpew(JitSpew_Pools, "Alignment of %d at %zu caused a spill.",
+ alignment, sizeExcludingCurrentPool());
+ finishPool(requiredFill);
+ }
+
+ bool prevInhibitNops = inhibitNops_;
+ inhibitNops_ = true;
+ while ((sizeExcludingCurrentPool() & (alignment - 1)) && !this->oom()) {
+ putInt(pattern);
+ }
+ inhibitNops_ = prevInhibitNops;
+ }
+
+ public:
+ void executableCopy(uint8_t* dest) {
+ if (this->oom()) {
+ return;
+ }
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
+ memcpy(dest, &cur->instructions[0], cur->length());
+ dest += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+
+ public:
+ size_t poolEntryOffset(PoolEntry pe) const {
+ MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),
+ "Invalid pool entry, or not flushed yet.");
+ // Find the pool containing pe.index().
+ // The array is sorted, so we can use a binary search.
+ auto b = poolInfo_.begin(), e = poolInfo_.end();
+ // A note on asymmetric types in the upper_bound comparator:
+ // http://permalink.gmane.org/gmane.comp.compilers.clang.devel/10101
+ auto i = std::upper_bound(b, e, pe.index(),
+ [](size_t value, const PoolInfo& entry) {
+ return value < entry.firstEntryIndex;
+ });
+ // Since upper_bound finds the first pool greater than pe,
+ // we want the previous one which is the last one less than or equal.
+ MOZ_ASSERT(i != b, "PoolInfo not sorted or empty?");
+ --i;
+ // The i iterator now points to the pool containing pe.index.
+ MOZ_ASSERT(i->firstEntryIndex <= pe.index() &&
+ (i + 1 == e || (i + 1)->firstEntryIndex > pe.index()));
+ // Compute the byte offset into the pool.
+ unsigned relativeIndex = pe.index() - i->firstEntryIndex;
+ return i->offset.getOffset() + relativeIndex * sizeof(PoolAllocUnit);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBufferWithConstantPools_h
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
new file mode 100644
index 0000000000..03b0ccef53
--- /dev/null
+++ b/js/src/jit/shared/LIR-shared.h
@@ -0,0 +1,4272 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LIR_shared_h
+#define jit_shared_LIR_shared_h
+
+#include "mozilla/Maybe.h"
+#include "jit/AtomicOp.h"
+#include "jit/shared/Assembler-shared.h"
+#include "util/Memory.h"
+
+// This file declares LIR instructions that are common to every platform.
+
+namespace js {
+namespace jit {
+
+LIR_OPCODE_CLASS_GENERATED
+
+#ifdef FUZZING_JS_FUZZILLI
+class LFuzzilliHashT : public LInstructionHelper<1, 1, 2> {
+ public:
+ LIR_HEADER(FuzzilliHashT);
+
+ LFuzzilliHashT(const LAllocation& value, const LDefinition& temp,
+ const LDefinition& tempFloat)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ setTemp(0, temp);
+ setTemp(1, tempFloat);
+ }
+
+ const LAllocation* value() { return getOperand(0); }
+
+ MFuzzilliHash* mir() const { return mir_->toFuzzilliHash(); }
+};
+
+class LFuzzilliHashV : public LInstructionHelper<1, BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(FuzzilliHashV);
+
+ LFuzzilliHashV(const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempFloat)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(0, value);
+ setTemp(0, temp);
+ setTemp(1, tempFloat);
+ }
+
+ MFuzzilliHash* mir() const { return mir_->toFuzzilliHash(); }
+};
+
+class LFuzzilliHashStore : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(FuzzilliHashStore);
+
+ LFuzzilliHashStore(const LAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* value() { return getOperand(0); }
+
+ MFuzzilliHashStore* mir() const { return mir_->toFuzzilliHashStore(); }
+};
+#endif
+
+class LBox : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(Box);
+
+ LBox(const LAllocation& payload, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setOperand(0, payload);
+ }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LBinaryMath : public LInstructionHelper<1, 2 + ExtraUses, Temps> {
+ protected:
+ explicit LBinaryMath(LNode::Opcode opcode)
+ : LInstructionHelper<1, 2 + ExtraUses, Temps>(opcode) {}
+
+ public:
+ const LAllocation* lhs() { return this->getOperand(0); }
+ const LAllocation* rhs() { return this->getOperand(1); }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LUnaryMath : public LInstructionHelper<1, 1 + ExtraUses, Temps> {
+ protected:
+ explicit LUnaryMath(LNode::Opcode opcode)
+ : LInstructionHelper<1, 1 + ExtraUses, Temps>(opcode) {}
+
+ public:
+ const LAllocation* input() { return this->getOperand(0); }
+};
+
+// An LOsiPoint captures a snapshot after a call and ensures enough space to
+// patch in a call to the invalidation mechanism.
+//
+// Note: LSafepoints are 1:1 with LOsiPoints, so it holds a reference to the
+// corresponding LSafepoint to inform it of the LOsiPoint's masm offset when it
+// gets GC'd.
+class LOsiPoint : public LInstructionHelper<0, 0, 0> {
+ LSafepoint* safepoint_;
+
+ public:
+ LOsiPoint(LSafepoint* safepoint, LSnapshot* snapshot)
+ : LInstructionHelper(classOpcode), safepoint_(safepoint) {
+ MOZ_ASSERT(safepoint && snapshot);
+ assignSnapshot(snapshot);
+ }
+
+ LSafepoint* associatedSafepoint() { return safepoint_; }
+
+ LIR_HEADER(OsiPoint)
+};
+
+class LMove {
+ LAllocation from_;
+ LAllocation to_;
+ LDefinition::Type type_;
+
+ public:
+ LMove(LAllocation from, LAllocation to, LDefinition::Type type)
+ : from_(from), to_(to), type_(type) {}
+
+ LAllocation from() const { return from_; }
+ LAllocation to() const { return to_; }
+ LDefinition::Type type() const { return type_; }
+};
+
+class LMoveGroup : public LInstructionHelper<0, 0, 0> {
+ js::Vector<LMove, 2, JitAllocPolicy> moves_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional general register available for use when executing moves.
+ LAllocation scratchRegister_;
+#endif
+
+ explicit LMoveGroup(TempAllocator& alloc)
+ : LInstructionHelper(classOpcode), moves_(alloc) {}
+
+ public:
+ LIR_HEADER(MoveGroup)
+
+ static LMoveGroup* New(TempAllocator& alloc) {
+ return new (alloc) LMoveGroup(alloc);
+ }
+
+ void printOperands(GenericPrinter& out);
+
+ // Add a move which takes place simultaneously with all others in the group.
+ bool add(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ // Add a move which takes place after existing moves in the group.
+ bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ size_t numMoves() const { return moves_.length(); }
+ const LMove& getMove(size_t i) const { return moves_[i]; }
+
+#ifdef JS_CODEGEN_X86
+ void setScratchRegister(Register reg) { scratchRegister_ = LGeneralReg(reg); }
+ LAllocation maybeScratchRegister() { return scratchRegister_; }
+#endif
+
+ bool uses(Register reg) {
+ for (size_t i = 0; i < numMoves(); i++) {
+ LMove move = getMove(i);
+ if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg)) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+// A constant Value.
+class LValue : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ Value v_;
+
+ public:
+ LIR_HEADER(Value)
+
+ explicit LValue(const Value& v) : LInstructionHelper(classOpcode), v_(v) {}
+
+ Value value() const { return v_; }
+};
+
+// Base class for control instructions (goto, branch, etc.)
+template <size_t Succs, size_t Operands, size_t Temps>
+class LControlInstructionHelper
+ : public LInstructionHelper<0, Operands, Temps> {
+ mozilla::Array<MBasicBlock*, Succs> successors_;
+
+ protected:
+ explicit LControlInstructionHelper(LNode::Opcode opcode)
+ : LInstructionHelper<0, Operands, Temps>(opcode) {}
+
+ public:
+ size_t numSuccessors() const { return Succs; }
+ MBasicBlock* getSuccessor(size_t i) const { return successors_[i]; }
+
+ void setSuccessor(size_t i, MBasicBlock* successor) {
+ successors_[i] = successor;
+ }
+};
+
+// Jumps to the start of a basic block.
+class LGoto : public LControlInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(Goto)
+
+ explicit LGoto(MBasicBlock* block) : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, block);
+ }
+
+ MBasicBlock* target() const { return getSuccessor(0); }
+};
+
+class LNewArray : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewArray)
+
+ explicit LNewArray(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewArray* mir() const { return mir_->toNewArray(); }
+};
+
+class LNewObject : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewObject)
+
+ explicit LNewObject(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewObject* mir() const { return mir_->toNewObject(); }
+};
+
+template <size_t Defs, size_t Ops>
+class LWasmReinterpretBase : public LInstructionHelper<Defs, Ops, 0> {
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ protected:
+ explicit LWasmReinterpretBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ const LAllocation* input() { return Base::getOperand(0); }
+ MWasmReinterpret* mir() const { return Base::mir_->toWasmReinterpret(); }
+};
+
+class LWasmReinterpret : public LWasmReinterpretBase<1, 1> {
+ public:
+ LIR_HEADER(WasmReinterpret);
+ explicit LWasmReinterpret(const LAllocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LWasmReinterpretFromI64 : public LWasmReinterpretBase<1, INT64_PIECES> {
+ public:
+ static const size_t Input = 0;
+
+ LIR_HEADER(WasmReinterpretFromI64);
+ explicit LWasmReinterpretFromI64(const LInt64Allocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setInt64Operand(Input, input);
+ }
+};
+
+class LWasmReinterpretToI64 : public LWasmReinterpretBase<INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(WasmReinterpretToI64);
+ explicit LWasmReinterpretToI64(const LAllocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+namespace details {
+template <size_t Defs, size_t Ops, size_t Temps>
+class RotateBase : public LInstructionHelper<Defs, Ops, Temps> {
+ typedef LInstructionHelper<Defs, Ops, Temps> Base;
+
+ protected:
+ explicit RotateBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ MRotate* mir() { return Base::mir_->toRotate(); }
+};
+} // namespace details
+
+class LRotate : public details::RotateBase<1, 2, 0> {
+ public:
+ LIR_HEADER(Rotate);
+
+ LRotate() : RotateBase(classOpcode) {}
+
+ const LAllocation* input() { return getOperand(0); }
+ LAllocation* count() { return getOperand(1); }
+};
+
+class LRotateI64
+ : public details::RotateBase<INT64_PIECES, INT64_PIECES + 1, 1> {
+ public:
+ LIR_HEADER(RotateI64);
+
+ LRotateI64() : RotateBase(classOpcode) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ static const size_t Input = 0;
+ static const size_t Count = INT64_PIECES;
+
+ const LInt64Allocation input() { return getInt64Operand(Input); }
+ const LDefinition* temp() { return getTemp(0); }
+ LAllocation* count() { return getOperand(Count); }
+};
+
+// Allocate a new arguments object for an inlined frame.
+class LCreateInlinedArgumentsObject : public LVariadicInstruction<1, 2> {
+ public:
+ LIR_HEADER(CreateInlinedArgumentsObject)
+
+ static const size_t CallObj = 0;
+ static const size_t Callee = 1;
+ static const size_t NumNonArgumentOperands = 2;
+ static size_t ArgIndex(size_t i) {
+ return NumNonArgumentOperands + BOX_PIECES * i;
+ }
+
+ LCreateInlinedArgumentsObject(uint32_t numOperands, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LVariadicInstruction(classOpcode, numOperands) {
+ setIsCall();
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* getCallObject() { return getOperand(CallObj); }
+ const LAllocation* getCallee() { return getOperand(Callee); }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MCreateInlinedArgumentsObject* mir() const {
+ return mir_->toCreateInlinedArgumentsObject();
+ }
+};
+
+class LGetInlinedArgument : public LVariadicInstruction<BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetInlinedArgument)
+
+ static const size_t Index = 0;
+ static const size_t NumNonArgumentOperands = 1;
+ static size_t ArgIndex(size_t i) {
+ return NumNonArgumentOperands + BOX_PIECES * i;
+ }
+
+ explicit LGetInlinedArgument(uint32_t numOperands)
+ : LVariadicInstruction(classOpcode, numOperands) {}
+
+ const LAllocation* getIndex() { return getOperand(Index); }
+
+ MGetInlinedArgument* mir() const { return mir_->toGetInlinedArgument(); }
+};
+
+class LGetInlinedArgumentHole : public LVariadicInstruction<BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetInlinedArgumentHole)
+
+ static const size_t Index = 0;
+ static const size_t NumNonArgumentOperands = 1;
+ static size_t ArgIndex(size_t i) {
+ return NumNonArgumentOperands + BOX_PIECES * i;
+ }
+
+ explicit LGetInlinedArgumentHole(uint32_t numOperands)
+ : LVariadicInstruction(classOpcode, numOperands) {}
+
+ const LAllocation* getIndex() { return getOperand(Index); }
+
+ MGetInlinedArgumentHole* mir() const {
+ return mir_->toGetInlinedArgumentHole();
+ }
+};
+
+class LInlineArgumentsSlice : public LVariadicInstruction<1, 1> {
+ public:
+ LIR_HEADER(InlineArgumentsSlice)
+
+ static const size_t Begin = 0;
+ static const size_t Count = 1;
+ static const size_t NumNonArgumentOperands = 2;
+ static size_t ArgIndex(size_t i) {
+ return NumNonArgumentOperands + BOX_PIECES * i;
+ }
+
+ explicit LInlineArgumentsSlice(uint32_t numOperands, const LDefinition& temp)
+ : LVariadicInstruction(classOpcode, numOperands) {
+ setTemp(0, temp);
+ }
+
+ const LAllocation* begin() { return getOperand(Begin); }
+ const LAllocation* count() { return getOperand(Count); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MInlineArgumentsSlice* mir() const { return mir_->toInlineArgumentsSlice(); }
+};
+
+// Common code for LIR descended from MCall.
+template <size_t Defs, size_t Operands, size_t Temps>
+class LJSCallInstructionHelper
+ : public LCallInstructionHelper<Defs, Operands, Temps> {
+ protected:
+ explicit LJSCallInstructionHelper(LNode::Opcode opcode)
+ : LCallInstructionHelper<Defs, Operands, Temps>(opcode) {}
+
+ public:
+ MCall* mir() const { return this->mir_->toCall(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const { return mir()->numActualArgs(); }
+
+ bool isConstructing() const { return mir()->isConstructing(); }
+ bool ignoresReturnValue() const { return mir()->ignoresReturnValue(); }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LCallGeneric : public LJSCallInstructionHelper<BOX_PIECES, 1, 2> {
+ public:
+ LIR_HEADER(CallGeneric)
+
+ LCallGeneric(const LAllocation& func, const LDefinition& nargsreg,
+ const LDefinition& tmpobjreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setTemp(0, nargsreg);
+ setTemp(1, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LDefinition* getNargsReg() { return getTemp(0); }
+ const LDefinition* getTempObject() { return getTemp(1); }
+};
+
+// Generates a hardcoded callsite for a known, non-native target.
+class LCallKnown : public LJSCallInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(CallKnown)
+
+ LCallKnown(const LAllocation& func, const LDefinition& tmpobjreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setTemp(0, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LDefinition* getTempObject() { return getTemp(0); }
+};
+
+// Generates a hardcoded callsite for a known, native target.
+class LCallNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4> {
+ public:
+ LIR_HEADER(CallNative)
+
+ LCallNative(const LDefinition& argContext, const LDefinition& argUintN,
+ const LDefinition& argVp, const LDefinition& tmpreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ // Registers used for callWithABI().
+ setTemp(0, argContext);
+ setTemp(1, argUintN);
+ setTemp(2, argVp);
+
+ // Temporary registers.
+ setTemp(3, tmpreg);
+ }
+
+ const LDefinition* getArgContextReg() { return getTemp(0); }
+ const LDefinition* getArgUintNReg() { return getTemp(1); }
+ const LDefinition* getArgVpReg() { return getTemp(2); }
+ const LDefinition* getTempReg() { return getTemp(3); }
+};
+
+class LCallClassHook : public LCallInstructionHelper<BOX_PIECES, 1, 4> {
+ public:
+ LIR_HEADER(CallClassHook)
+
+ LCallClassHook(const LAllocation& callee, const LDefinition& argContext,
+ const LDefinition& argUintN, const LDefinition& argVp,
+ const LDefinition& tmpreg)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, callee);
+
+ // Registers used for callWithABI().
+ setTemp(0, argContext);
+ setTemp(1, argUintN);
+ setTemp(2, argVp);
+
+ // Temporary registers.
+ setTemp(3, tmpreg);
+ }
+
+ MCallClassHook* mir() const { return mir_->toCallClassHook(); }
+
+ const LAllocation* getCallee() { return this->getOperand(0); }
+
+ const LDefinition* getArgContextReg() { return getTemp(0); }
+ const LDefinition* getArgUintNReg() { return getTemp(1); }
+ const LDefinition* getArgVpReg() { return getTemp(2); }
+ const LDefinition* getTempReg() { return getTemp(3); }
+};
+
+// Generates a hardcoded callsite for a known, DOM-native target.
+class LCallDOMNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4> {
+ public:
+ LIR_HEADER(CallDOMNative)
+
+ LCallDOMNative(const LDefinition& argJSContext, const LDefinition& argObj,
+ const LDefinition& argPrivate, const LDefinition& argArgs)
+ : LJSCallInstructionHelper(classOpcode) {
+ setTemp(0, argJSContext);
+ setTemp(1, argObj);
+ setTemp(2, argPrivate);
+ setTemp(3, argArgs);
+ }
+
+ const LDefinition* getArgJSContext() { return getTemp(0); }
+ const LDefinition* getArgObj() { return getTemp(1); }
+ const LDefinition* getArgPrivate() { return getTemp(2); }
+ const LDefinition* getArgArgs() { return getTemp(3); }
+};
+
+class LUnreachable : public LControlInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(Unreachable)
+
+ LUnreachable() : LControlInstructionHelper(classOpcode) {}
+};
+
+class LUnreachableResultV : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(UnreachableResultV)
+
+ LUnreachableResultV() : LInstructionHelper(classOpcode) {}
+};
+
+template <size_t defs, size_t ops>
+class LDOMPropertyInstructionHelper
+ : public LCallInstructionHelper<defs, 1 + ops, 3> {
+ protected:
+ LDOMPropertyInstructionHelper(LNode::Opcode opcode,
+ const LDefinition& JSContextReg,
+ const LAllocation& ObjectReg,
+ const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LCallInstructionHelper<defs, 1 + ops, 3>(opcode) {
+ this->setOperand(0, ObjectReg);
+ this->setTemp(0, JSContextReg);
+ this->setTemp(1, PrivReg);
+ this->setTemp(2, ValueReg);
+ }
+
+ public:
+ const LDefinition* getJSContextReg() { return this->getTemp(0); }
+ const LAllocation* getObjectReg() { return this->getOperand(0); }
+ const LDefinition* getPrivReg() { return this->getTemp(1); }
+ const LDefinition* getValueReg() { return this->getTemp(2); }
+};
+
+class LGetDOMProperty : public LDOMPropertyInstructionHelper<BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetDOMProperty)
+
+ LGetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<BOX_PIECES, 0>(
+ classOpcode, JSContextReg, ObjectReg, PrivReg, ValueReg) {}
+
+ MGetDOMProperty* mir() const { return mir_->toGetDOMProperty(); }
+};
+
+class LGetDOMMemberV : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(GetDOMMemberV);
+ explicit LGetDOMMemberV(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ MGetDOMMember* mir() const { return mir_->toGetDOMMember(); }
+};
+
+class LSetDOMProperty : public LDOMPropertyInstructionHelper<0, BOX_PIECES> {
+ public:
+ LIR_HEADER(SetDOMProperty)
+
+ LSetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LBoxAllocation& value, const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<0, BOX_PIECES>(
+ classOpcode, JSContextReg, ObjectReg, PrivReg, ValueReg) {
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ MSetDOMProperty* mir() const { return mir_->toSetDOMProperty(); }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LApplyArgsGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsGeneric)
+
+ LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArgs* mir() const { return mir_->toApplyArgs(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LApplyArgsObj
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsObj)
+
+ LApplyArgsObj(const LAllocation& func, const LAllocation& argsObj,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argsObj);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ MApplyArgsObj* mir() const { return mir_->toApplyArgsObj(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgsObj() { return getOperand(1); }
+ // All registers are calltemps. argc is mapped to the same register as
+ // ArgsObj. argc becomes live as ArgsObj is dying.
+ const LAllocation* getArgc() { return getOperand(1); }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LApplyArrayGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArrayGeneric)
+
+ LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArray* mir() const { return mir_->toApplyArray(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LConstructArgsGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 3, 1> {
+ public:
+ LIR_HEADER(ConstructArgsGeneric)
+
+ LConstructArgsGeneric(const LAllocation& func, const LAllocation& argc,
+ const LAllocation& newTarget,
+ const LBoxAllocation& thisv,
+ const LDefinition& tmpobjreg)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setOperand(2, newTarget);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ }
+
+ MConstructArgs* mir() const { return mir_->toConstructArgs(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ static const size_t ThisIndex = 3;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+
+ // tempForArgCopy is mapped to the same register as newTarget:
+ // tempForArgCopy becomes live as newTarget is dying, all registers are
+ // calltemps.
+ const LAllocation* getTempForArgCopy() { return getOperand(2); }
+};
+
+class LConstructArrayGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 3, 1> {
+ public:
+ LIR_HEADER(ConstructArrayGeneric)
+
+ LConstructArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LAllocation& newTarget,
+ const LBoxAllocation& thisv,
+ const LDefinition& tmpobjreg)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setOperand(2, newTarget);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ }
+
+ MConstructArray* mir() const { return mir_->toConstructArray(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ static const size_t ThisIndex = 3;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+
+ // tempForArgCopy is mapped to the same register as newTarget:
+ // tempForArgCopy becomes live as newTarget is dying, all registers are
+ // calltemps.
+ const LAllocation* getTempForArgCopy() { return getOperand(2); }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestIAndBranch)
+
+ LTestIAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in an int64 input and tests it for truthiness.
+class LTestI64AndBranch : public LControlInstructionHelper<2, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(TestI64AndBranch)
+
+ LTestI64AndBranch(const LInt64Allocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in a double input and tests it for truthiness.
+class LTestDAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestDAndBranch)
+
+ LTestDAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in a float32 input and tests it for truthiness.
+class LTestFAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestFAndBranch)
+
+ LTestFAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in a bigint input and tests it for truthiness.
+class LTestBIAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestBIAndBranch)
+
+ LTestBIAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() { return getSuccessor(0); }
+ MBasicBlock* ifFalse() { return getSuccessor(1); }
+};
+
+// Takes an object and tests it for truthiness. An object is falsy iff it
+// emulates |undefined|; see js::EmulatesUndefined.
+class LTestOAndBranch : public LControlInstructionHelper<2, 1, 1> {
+ public:
+ LIR_HEADER(TestOAndBranch)
+
+ LTestOAndBranch(const LAllocation& input, MBasicBlock* ifTruthy,
+ MBasicBlock* ifFalsy, const LDefinition& temp)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MBasicBlock* ifTruthy() { return getSuccessor(0); }
+ MBasicBlock* ifFalsy() { return getSuccessor(1); }
+
+ MTest* mir() { return mir_->toTest(); }
+};
+
+// Takes in a boxed value and tests it for truthiness.
+class LTestVAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TestVAndBranch)
+
+ LTestVAndBranch(MBasicBlock* ifTruthy, MBasicBlock* ifFalsy,
+ const LBoxAllocation& input, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ const LDefinition* temp1() { return getTemp(1); }
+
+ const LDefinition* temp2() { return getTemp(2); }
+
+ MBasicBlock* ifTruthy() { return getSuccessor(0); }
+ MBasicBlock* ifFalsy() { return getSuccessor(1); }
+
+ MTest* mir() const { return mir_->toTest(); }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompare : public LInstructionHelper<1, 2, 0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(Compare)
+ LCompare(JSOp jsop, const LAllocation& left, const LAllocation& right)
+ : LInstructionHelper(classOpcode), jsop_(jsop) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareI64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64(JSOp jsop, const LInt64Allocation& left,
+ const LInt64Allocation& right)
+ : LInstructionHelper(classOpcode), jsop_(jsop) {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MCompare* mir() { return mir_->toCompare(); }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareI64AndBranch
+ : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0> {
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64AndBranch)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64AndBranch(MCompare* cmpMir, JSOp jsop,
+ const LInt64Allocation& left,
+ const LInt64Allocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir), jsop_(jsop) {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompareAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareAndBranch)
+ LCompareAndBranch(MCompare* cmpMir, JSOp jsop, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir), jsop_(jsop) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareDAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareDAndBranch)
+
+ LCompareDAndBranch(MCompare* cmpMir, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+class LCompareFAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareFAndBranch)
+ LCompareFAndBranch(MCompare* cmpMir, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+class LBitAndAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ // This denotes only a single-word AND on the target. Hence `is64_` is
+ // required to be `false` on a 32-bit target.
+ bool is64_;
+ Assembler::Condition cond_;
+
+ public:
+ LIR_HEADER(BitAndAndBranch)
+ LBitAndAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse, bool is64,
+ Assembler::Condition cond = Assembler::NonZero)
+ : LControlInstructionHelper(classOpcode), is64_(is64), cond_(cond) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ bool is64() const { return is64_; }
+ Assembler::Condition cond() const {
+ MOZ_ASSERT(cond_ == Assembler::Zero || cond_ == Assembler::NonZero);
+ return cond_;
+ }
+};
+
+class LIsNullOrLikeUndefinedAndBranchV
+ : public LControlInstructionHelper<2, BOX_PIECES, 2> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchV)
+
+ LIsNullOrLikeUndefinedAndBranchV(MCompare* cmpMir, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse,
+ const LBoxAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* tempToUnbox() { return getTemp(1); }
+};
+
+class LIsNullOrLikeUndefinedAndBranchT
+ : public LControlInstructionHelper<2, 1, 1> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchT)
+
+ LIsNullOrLikeUndefinedAndBranchT(MCompare* cmpMir, const LAllocation& input,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LDefinition& temp)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, input);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setTemp(0, temp);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LIsNullAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullAndBranch)
+
+ LIsNullAndBranch(MCompare* cmpMir, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& value)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+class LIsUndefinedAndBranch
+ : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsUndefinedAndBranch)
+
+ LIsUndefinedAndBranch(MCompare* cmpMir, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse, const LBoxAllocation& value)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+// Bitwise not operation, takes a 32-bit integer as input and returning
+// a 32-bit integer result as an output.
+class LBitNotI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(BitNotI)
+
+ LBitNotI() : LInstructionHelper(classOpcode) {}
+};
+
+class LBitNotI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(BitNotI64)
+
+ LBitNotI64() : LInstructionHelper(classOpcode) {}
+};
+
+// Binary bitwise operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LBitOpI : public LInstructionHelper<1, 2, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI)
+
+ explicit LBitOpI(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ const char* extraName() const {
+ if (bitop() == JSOp::Ursh && mir_->toUrsh()->bailoutsDisabled()) {
+ return "ursh:BailoutsDisabled";
+ }
+ return CodeName(op_);
+ }
+
+ JSOp bitop() const { return op_; }
+};
+
+class LBitOpI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ explicit LBitOpI64(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ const char* extraName() const { return CodeName(op_); }
+
+ JSOp bitop() const { return op_; }
+};
+
+// Shift operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LShiftI : public LBinaryMath<0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI)
+
+ explicit LShiftI(JSOp op) : LBinaryMath(classOpcode), op_(op) {}
+
+ JSOp bitop() { return op_; }
+
+ MInstruction* mir() { return mir_->toInstruction(); }
+
+ const char* extraName() const { return CodeName(op_); }
+};
+
+class LShiftI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI64)
+
+ explicit LShiftI64(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ JSOp bitop() { return op_; }
+
+ MInstruction* mir() { return mir_->toInstruction(); }
+
+ const char* extraName() const { return CodeName(op_); }
+};
+
+class LSignExtendInt64
+ : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(SignExtendInt64)
+
+ explicit LSignExtendInt64(const LInt64Allocation& input)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, input);
+ }
+
+ const MSignExtendInt64* mir() const { return mir_->toSignExtendInt64(); }
+
+ MSignExtendInt64::Mode mode() const { return mir()->mode(); }
+};
+
+class LUrshD : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UrshD)
+
+ LUrshD(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Returns from the function being compiled (not used in inlined frames). The
+// input must be a box.
+class LReturn : public LInstructionHelper<0, BOX_PIECES, 0> {
+ bool isGenerator_;
+
+ public:
+ LIR_HEADER(Return)
+
+ explicit LReturn(bool isGenerator)
+ : LInstructionHelper(classOpcode), isGenerator_(isGenerator) {}
+
+ bool isGenerator() { return isGenerator_; }
+};
+
+class LMinMaxBase : public LInstructionHelper<1, 2, 0> {
+ protected:
+ LMinMaxBase(LNode::Opcode opcode, const LAllocation& first,
+ const LAllocation& second)
+ : LInstructionHelper(opcode) {
+ setOperand(0, first);
+ setOperand(1, second);
+ }
+
+ public:
+ const LAllocation* first() { return this->getOperand(0); }
+ const LAllocation* second() { return this->getOperand(1); }
+ const LDefinition* output() { return this->getDef(0); }
+ MMinMax* mir() const { return mir_->toMinMax(); }
+ const char* extraName() const { return mir()->isMax() ? "Max" : "Min"; }
+};
+
+class LMinMaxI : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxI)
+ LMinMaxI(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+class LMinMaxD : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxD)
+ LMinMaxD(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+class LMinMaxF : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxF)
+ LMinMaxF(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+class LMinMaxArrayI : public LInstructionHelper<1, 1, 3> {
+ public:
+ LIR_HEADER(MinMaxArrayI);
+ LMinMaxArrayI(const LAllocation& array, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, array);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LAllocation* array() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+
+ bool isMax() const { return mir_->toMinMaxArray()->isMax(); }
+};
+
+class LMinMaxArrayD : public LInstructionHelper<1, 1, 3> {
+ public:
+ LIR_HEADER(MinMaxArrayD);
+ LMinMaxArrayD(const LAllocation& array, const LDefinition& floatTemp,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, array);
+ setTemp(0, floatTemp);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LAllocation* array() { return getOperand(0); }
+ const LDefinition* floatTemp() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+ const LDefinition* temp2() { return getTemp(2); }
+
+ bool isMax() const { return mir_->toMinMaxArray()->isMax(); }
+};
+
+// Copysign for doubles.
+class LCopySignD : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CopySignD)
+ explicit LCopySignD() : LInstructionHelper(classOpcode) {}
+};
+
+// Copysign for float32.
+class LCopySignF : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CopySignF)
+ explicit LCopySignF() : LInstructionHelper(classOpcode) {}
+};
+
+class LAtan2D : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Atan2D)
+ LAtan2D(const LAllocation& y, const LAllocation& x)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, y);
+ setOperand(1, x);
+ }
+
+ const LAllocation* y() { return getOperand(0); }
+
+ const LAllocation* x() { return getOperand(1); }
+
+ const LDefinition* output() { return getDef(0); }
+};
+
+class LHypot : public LCallInstructionHelper<1, 4, 0> {
+ uint32_t numOperands_;
+
+ public:
+ LIR_HEADER(Hypot)
+ LHypot(const LAllocation& x, const LAllocation& y)
+ : LCallInstructionHelper(classOpcode), numOperands_(2) {
+ setOperand(0, x);
+ setOperand(1, y);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z)
+ : LCallInstructionHelper(classOpcode), numOperands_(3) {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z,
+ const LAllocation& w)
+ : LCallInstructionHelper(classOpcode), numOperands_(4) {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+ }
+
+ uint32_t numArgs() const { return numOperands_; }
+
+ const LAllocation* x() { return getOperand(0); }
+
+ const LAllocation* y() { return getOperand(1); }
+
+ const LDefinition* output() { return getDef(0); }
+};
+
+class LMathFunctionD : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(MathFunctionD)
+ explicit LMathFunctionD(const LAllocation& input)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MMathFunction* mir() const { return mir_->toMathFunction(); }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+class LMathFunctionF : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(MathFunctionF)
+ explicit LMathFunctionF(const LAllocation& input)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MMathFunction* mir() const { return mir_->toMathFunction(); }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+// Adds two integers, returning an integer value.
+class LAddI : public LBinaryMath<0> {
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(AddI)
+
+ LAddI() : LBinaryMath(classOpcode), recoversInput_(false) {}
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ bool recoversInput() const { return recoversInput_; }
+ void setRecoversInput() { recoversInput_ = true; }
+
+ MAdd* mir() const { return mir_->toAdd(); }
+};
+
+class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(AddI64)
+
+ LAddI64() : LInstructionHelper(classOpcode) {}
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Subtracts two integers, returning an integer value.
+class LSubI : public LBinaryMath<0> {
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(SubI)
+
+ LSubI() : LBinaryMath(classOpcode), recoversInput_(false) {}
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ bool recoversInput() const { return recoversInput_; }
+ void setRecoversInput() { recoversInput_ = true; }
+ MSub* mir() const { return mir_->toSub(); }
+};
+
+inline bool LNode::recoversInput() const {
+ switch (op()) {
+ case Opcode::AddI:
+ return toAddI()->recoversInput();
+ case Opcode::SubI:
+ return toSubI()->recoversInput();
+ default:
+ return false;
+ }
+}
+
+class LSubI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(SubI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LSubI64() : LInstructionHelper(classOpcode) {}
+};
+
+class LMulI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(MulI64)
+
+ explicit LMulI64() : LInstructionHelper(classOpcode) {
+ setTemp(0, LDefinition());
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathD : public LBinaryMath<0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathD)
+
+ explicit LMathD(JSOp jsop) : LBinaryMath(classOpcode), jsop_(jsop) {}
+
+ JSOp jsop() const { return jsop_; }
+
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathF : public LBinaryMath<0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathF)
+
+ explicit LMathF(JSOp jsop) : LBinaryMath(classOpcode), jsop_(jsop) {}
+
+ JSOp jsop() const { return jsop_; }
+
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LModD : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModD)
+
+ LModD(const LAllocation& lhs, const LAllocation& rhs)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setIsCall();
+ }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoD : public LInstructionHelper<1, 1, 0> {
+ const uint32_t divisor_;
+
+ public:
+ LIR_HEADER(ModPowTwoD)
+
+ LModPowTwoD(const LAllocation& lhs, uint32_t divisor)
+ : LInstructionHelper(classOpcode), divisor_(divisor) {
+ setOperand(0, lhs);
+ }
+
+ uint32_t divisor() const { return divisor_; }
+ const LAllocation* lhs() { return getOperand(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LBigIntAdd : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntAdd)
+
+ LBigIntAdd(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntSub : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntSub)
+
+ LBigIntSub(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntMul : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntMul)
+
+ LBigIntMul(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntDiv : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntDiv)
+
+ LBigIntDiv(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntDiv* mir() const { return mirRaw()->toBigIntDiv(); }
+};
+
+class LBigIntMod : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntMod)
+
+ LBigIntMod(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntMod* mir() const { return mirRaw()->toBigIntMod(); }
+};
+
+class LBigIntPow : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntPow)
+
+ LBigIntPow(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntPow* mir() const { return mirRaw()->toBigIntPow(); }
+};
+
+class LBigIntBitAnd : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitAnd)
+
+ LBigIntBitAnd(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntBitOr : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitOr)
+
+ LBigIntBitOr(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntBitXor : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitXor)
+
+ LBigIntBitXor(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntLsh : public LBinaryMath<3> {
+ public:
+ LIR_HEADER(BigIntLsh)
+
+ LBigIntLsh(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+};
+
+class LBigIntRsh : public LBinaryMath<3> {
+ public:
+ LIR_HEADER(BigIntRsh)
+
+ LBigIntRsh(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+};
+
+class LBigIntIncrement : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntIncrement)
+
+ LBigIntIncrement(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntDecrement : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntDecrement)
+
+ LBigIntDecrement(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntNegate : public LUnaryMath<1> {
+ public:
+ LIR_HEADER(BigIntNegate)
+
+ LBigIntNegate(const LAllocation& input, const LDefinition& temp)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LBigIntBitNot : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitNot)
+
+ LBigIntBitNot(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+// Convert a value to an int32.
+// Input: components of a Value
+// Output: 32-bit integer
+// Bailout: undefined, string, object, or non-int32 double
+// Temps: one float register, one GP register
+//
+// This instruction requires a temporary float register.
+class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2> {
+ public:
+ enum Mode { NORMAL, TRUNCATE };
+
+ private:
+ Mode mode_;
+
+ public:
+ LIR_HEADER(ValueToInt32)
+
+ LValueToInt32(const LBoxAllocation& input, const LDefinition& temp0,
+ const LDefinition& temp1, Mode mode)
+ : LInstructionHelper(classOpcode), mode_(mode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ switch (mode()) {
+ case NORMAL:
+ return "Normal";
+ case TRUNCATE:
+ return "Truncate";
+ }
+ MOZ_CRASH("Invalid mode");
+ }
+
+ static const size_t Input = 0;
+
+ Mode mode() const { return mode_; }
+ const LDefinition* tempFloat() { return getTemp(0); }
+ const LDefinition* temp() { return getTemp(1); }
+ MToNumberInt32* mirNormal() const {
+ MOZ_ASSERT(mode_ == NORMAL);
+ return mir_->toToNumberInt32();
+ }
+ MTruncateToInt32* mirTruncate() const {
+ MOZ_ASSERT(mode_ == TRUNCATE);
+ return mir_->toTruncateToInt32();
+ }
+ MInstruction* mir() const { return mir_->toInstruction(); }
+};
+
+// Double raised to a half power.
+class LPowHalfD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(PowHalfD);
+ explicit LPowHalfD(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* output() { return getDef(0); }
+ MPowHalf* mir() const { return mir_->toPowHalf(); }
+};
+
+// Passed the BaselineFrame address in the OsrFrameReg via the IonOsrTempData
+// populated by PrepareOsrTempData.
+//
+// Forwards this object to the LOsrValues for Value materialization.
+class LOsrEntry : public LInstructionHelper<1, 0, 1> {
+ protected:
+ Label label_;
+ uint32_t frameDepth_;
+
+ public:
+ LIR_HEADER(OsrEntry)
+
+ explicit LOsrEntry(const LDefinition& temp)
+ : LInstructionHelper(classOpcode), frameDepth_(0) {
+ setTemp(0, temp);
+ }
+
+ void setFrameDepth(uint32_t depth) { frameDepth_ = depth; }
+ uint32_t getFrameDepth() { return frameDepth_; }
+ Label* label() { return &label_; }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length.
+class LBoundsCheckRange : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(BoundsCheckRange)
+
+ LBoundsCheckRange(const LAllocation& index, const LAllocation& length,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+ const MBoundsCheck* mir() const { return mir_->toBoundsCheck(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+};
+
+// Load a value from a dense array's elements vector. Bail out if it's the hole
+// value.
+class LLoadElementV : public LInstructionHelper<BOX_PIECES, 2, 0> {
+ public:
+ LIR_HEADER(LoadElementV)
+
+ LLoadElementV(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const MLoadElement* mir() const { return mir_->toLoadElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Load a value from an array's elements vector, loading |undefined| if we hit a
+// hole. Bail out if we get a negative index.
+class LLoadElementHole : public LInstructionHelper<BOX_PIECES, 3, 0> {
+ public:
+ LIR_HEADER(LoadElementHole)
+
+ LLoadElementHole(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& initLength)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ }
+
+ const MLoadElementHole* mir() const { return mir_->toLoadElementHole(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* initLength() { return getOperand(2); }
+};
+
+// Store a boxed value to a dense array's element vector.
+class LStoreElementV : public LInstructionHelper<0, 2 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(StoreElementV)
+
+ LStoreElementV(const LAllocation& elements, const LAllocation& index,
+ const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setBoxOperand(Value, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ static const size_t Value = 2;
+
+ const MStoreElement* mir() const { return mir_->toStoreElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Store a typed value to a dense array's elements vector. Compared to
+// LStoreElementV, this instruction can store doubles and constants directly,
+// and does not store the type tag if the array is monomorphic and known to
+// be packed.
+class LStoreElementT : public LInstructionHelper<0, 3, 0> {
+ public:
+ LIR_HEADER(StoreElementT)
+
+ LStoreElementT(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MStoreElement* mir() const { return mir_->toStoreElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+};
+
+class LArrayPopShift : public LInstructionHelper<BOX_PIECES, 1, 2> {
+ public:
+ LIR_HEADER(ArrayPopShift)
+
+ LArrayPopShift(const LAllocation& object, const LDefinition& temp0,
+ const LDefinition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const { return mir_->toArrayPopShift(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+};
+
+class LLoadUnboxedBigInt : public LInstructionHelper<1, 2, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadUnboxedBigInt)
+
+ LLoadUnboxedBigInt(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp, const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadUnboxedScalar* mir() const { return mir_->toLoadUnboxedScalar(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LLoadDataViewElement : public LInstructionHelper<1, 3, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadDataViewElement)
+
+ LLoadDataViewElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& littleEndian, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, littleEndian);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadDataViewElement* mir() const {
+ return mir_->toLoadDataViewElement();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* littleEndian() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LLoadTypedArrayElementHoleBigInt
+ : public LInstructionHelper<BOX_PIECES, 2, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadTypedArrayElementHoleBigInt)
+
+ LLoadTypedArrayElementHoleBigInt(const LAllocation& object,
+ const LAllocation& index,
+ const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadTypedArrayElementHole* mir() const {
+ return mir_->toLoadTypedArrayElementHole();
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LStoreUnboxedBigInt : public LInstructionHelper<0, 3, INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreUnboxedBigInt)
+
+ LStoreUnboxedBigInt(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp() { return getInt64Temp(0); }
+};
+
+class LStoreDataViewElement
+ : public LInstructionHelper<0, 4, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreDataViewElement)
+
+ LStoreDataViewElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LAllocation& littleEndian,
+ const LDefinition& temp, const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setOperand(3, littleEndian);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const MStoreDataViewElement* mir() const {
+ return mir_->toStoreDataViewElement();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ const LAllocation* littleEndian() { return getOperand(3); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LStoreTypedArrayElementHoleBigInt
+ : public LInstructionHelper<0, 4, INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreTypedArrayElementHoleBigInt)
+
+ LStoreTypedArrayElementHoleBigInt(const LAllocation& elements,
+ const LAllocation& length,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, length);
+ setOperand(2, index);
+ setOperand(3, value);
+ setInt64Temp(0, temp);
+ }
+
+ const MStoreTypedArrayElementHole* mir() const {
+ return mir_->toStoreTypedArrayElementHole();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+ const LAllocation* index() { return getOperand(2); }
+ const LAllocation* value() { return getOperand(3); }
+ LInt64Definition temp() { return getInt64Temp(0); }
+};
+
+class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4> {
+ public:
+ LIR_HEADER(CompareExchangeTypedArrayElement)
+
+ // ARM, ARM64, x86, x64
+ LCompareExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& oldval,
+ const LAllocation& newval,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ }
+ // MIPS32, MIPS64
+ LCompareExchangeTypedArrayElement(
+ const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* oldval() { return getOperand(2); }
+ const LAllocation* newval() { return getOperand(3); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MCompareExchangeTypedArrayElement* mir() const {
+ return mir_->toCompareExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4> {
+ public:
+ LIR_HEADER(AtomicExchangeTypedArrayElement)
+
+ // ARM, ARM64, x86, x64
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ }
+ // MIPS32, MIPS64
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MAtomicExchangeTypedArrayElement* mir() const {
+ return mir_->toAtomicExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinop)
+
+ static const int32_t valueOp = 2;
+
+ // ARM, ARM64, x86, x64
+ LAtomicTypedArrayElementBinop(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ // MIPS32, MIPS64
+ LAtomicTypedArrayElementBinop(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp2,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, temp2);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 2);
+ return getOperand(2);
+ }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(2); }
+ const LDefinition* offsetTemp() { return getTemp(3); }
+ const LDefinition* maskTemp() { return getTemp(4); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAtomicTypedArrayElementBinopForEffect
+ : public LInstructionHelper<0, 3, 4> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
+
+ // ARM, ARM64, x86, x64
+ LAtomicTypedArrayElementBinopForEffect(
+ const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(0); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+class LAtomicLoad64 : public LInstructionHelper<1, 2, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(AtomicLoad64)
+
+ LAtomicLoad64(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp, const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadUnboxedScalar* mir() const { return mir_->toLoadUnboxedScalar(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LAtomicStore64 : public LInstructionHelper<0, 3, 2 * INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(AtomicStore64)
+
+ // x64, ARM64
+ LAtomicStore64(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LInt64Definition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(2 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ // ARM32
+ LAtomicStore64(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setTemp(2 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ // x86
+ LAtomicStore64(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LInt64Definition& temp1,
+ const LDefinition& tempLow)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(2 * INT64_PIECES, tempLow);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp1() { return getInt64Temp(0); }
+ LInt64Definition temp2() { return getInt64Temp(INT64_PIECES); }
+ const LDefinition* tempLow() { return getTemp(2 * INT64_PIECES); }
+};
+
+class LCompareExchangeTypedArrayElement64
+ : public LInstructionHelper<1, 4, 3 * INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(CompareExchangeTypedArrayElement64)
+
+ // x64, ARM64
+ LCompareExchangeTypedArrayElement64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& oldval,
+ const LAllocation& newval,
+ const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setInt64Temp(2 * INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(3 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ // x86
+ LCompareExchangeTypedArrayElement64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& oldval,
+ const LAllocation& newval,
+ const LDefinition& tempLow)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setInt64Temp(0, LInt64Definition::BogusTemp());
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setInt64Temp(2 * INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(3 * INT64_PIECES, tempLow);
+ }
+
+ // ARM
+ LCompareExchangeTypedArrayElement64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& oldval,
+ const LAllocation& newval,
+ const LInt64Definition& temp1,
+ const LInt64Definition& temp2,
+ const LInt64Definition& temp3)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setInt64Temp(2 * INT64_PIECES, temp3);
+ setTemp(3 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ const MCompareExchangeTypedArrayElement* mir() const {
+ return mir_->toCompareExchangeTypedArrayElement();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* oldval() { return getOperand(2); }
+ const LAllocation* newval() { return getOperand(3); }
+ LInt64Definition temp1() { return getInt64Temp(0); }
+ LInt64Definition temp2() { return getInt64Temp(INT64_PIECES); }
+ LInt64Definition temp3() { return getInt64Temp(2 * INT64_PIECES); }
+ const LDefinition* tempLow() { return getTemp(3 * INT64_PIECES); }
+};
+
+class LAtomicExchangeTypedArrayElement64
+ : public LInstructionHelper<1, 3, INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(AtomicExchangeTypedArrayElement64)
+
+ // ARM, ARM64, x64
+ LAtomicExchangeTypedArrayElement64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setTemp(INT64_PIECES, temp2);
+ }
+
+ // x86
+ LAtomicExchangeTypedArrayElement64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp);
+ setTemp(INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp1() { return getInt64Temp(0); }
+ const LDefinition* temp2() { return getTemp(INT64_PIECES); }
+
+ const MAtomicExchangeTypedArrayElement* mir() const {
+ return mir_->toAtomicExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicTypedArrayElementBinop64
+ : public LInstructionHelper<1, 3, 3 * INT64_PIECES> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinop64)
+
+ // x86
+ LAtomicTypedArrayElementBinop64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setInt64Temp(2 * INT64_PIECES, LInt64Definition::BogusTemp());
+ }
+
+ // ARM64, x64
+ LAtomicTypedArrayElementBinop64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setInt64Temp(2 * INT64_PIECES, LInt64Definition::BogusTemp());
+ }
+
+ // ARM
+ LAtomicTypedArrayElementBinop64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp1,
+ const LInt64Definition& temp2,
+ const LInt64Definition& temp3)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setInt64Temp(2 * INT64_PIECES, temp3);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp1() { return getInt64Temp(0); }
+ LInt64Definition temp2() { return getInt64Temp(INT64_PIECES); }
+ LInt64Definition temp3() { return getInt64Temp(2 * INT64_PIECES); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAtomicTypedArrayElementBinopForEffect64
+ : public LInstructionHelper<0, 3, 2 * INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinopForEffect64)
+
+ // x86
+ LAtomicTypedArrayElementBinopForEffect64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp,
+ const LDefinition& tempLow)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp);
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(2 * INT64_PIECES, tempLow);
+ }
+
+ // x64
+ LAtomicTypedArrayElementBinopForEffect64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp);
+ setInt64Temp(INT64_PIECES, LInt64Definition::BogusTemp());
+ setTemp(2 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ // ARM64
+ LAtomicTypedArrayElementBinopForEffect64(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp1);
+ setInt64Temp(INT64_PIECES, temp2);
+ setTemp(2 * INT64_PIECES, LDefinition::BogusTemp());
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp1() { return getInt64Temp(0); }
+ LInt64Definition temp2() { return getInt64Temp(INT64_PIECES); }
+ const LDefinition* tempLow() { return getTemp(2 * INT64_PIECES); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+class LIteratorHasIndicesAndBranch : public LControlInstructionHelper<2, 2, 2> {
+ public:
+ LIR_HEADER(IteratorHasIndicesAndBranch)
+
+ LIteratorHasIndicesAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LAllocation& object,
+ const LAllocation& iterator,
+ const LDefinition& temp,
+ const LDefinition& temp2)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setOperand(0, object);
+ setOperand(1, iterator);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* iterator() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+class LIsNoIterAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsNoIterAndBranch)
+
+ LIsNoIterAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+class LInstanceOfCache : public LInstructionHelper<1, BOX_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(InstanceOfCache)
+ LInstanceOfCache(const LBoxAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LDefinition* output() { return this->getDef(0); }
+ const LAllocation* rhs() { return getOperand(RHS); }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LIsObjectAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsObjectAndBranch)
+
+ LIsObjectAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+class LIsNullOrUndefinedAndBranch
+ : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ MIsNullOrUndefined* isNullOrUndefined_;
+
+ public:
+ LIR_HEADER(IsNullOrUndefinedAndBranch)
+ static const size_t Input = 0;
+
+ LIsNullOrUndefinedAndBranch(MIsNullOrUndefined* isNullOrUndefined,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode),
+ isNullOrUndefined_(isNullOrUndefined) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+
+ MIsNullOrUndefined* isNullOrUndefinedMir() const {
+ return isNullOrUndefined_;
+ }
+};
+
+template <size_t Defs, size_t Ops>
+class LWasmSelectBase : public LInstructionHelper<Defs, Ops, 0> {
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ protected:
+ explicit LWasmSelectBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ MWasmSelect* mir() const { return Base::mir_->toWasmSelect(); }
+};
+
+class LWasmSelect : public LWasmSelectBase<1, 3> {
+ public:
+ LIR_HEADER(WasmSelect);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = 1;
+ static const size_t CondIndex = 2;
+
+ LWasmSelect(const LAllocation& trueExpr, const LAllocation& falseExpr,
+ const LAllocation& cond)
+ : LWasmSelectBase(classOpcode) {
+ setOperand(TrueExprIndex, trueExpr);
+ setOperand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LAllocation* trueExpr() { return getOperand(TrueExprIndex); }
+ const LAllocation* falseExpr() { return getOperand(FalseExprIndex); }
+ const LAllocation* condExpr() { return getOperand(CondIndex); }
+};
+
+class LWasmSelectI64
+ : public LWasmSelectBase<INT64_PIECES, 2 * INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(WasmSelectI64);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = INT64_PIECES;
+ static const size_t CondIndex = INT64_PIECES * 2;
+
+ LWasmSelectI64(const LInt64Allocation& trueExpr,
+ const LInt64Allocation& falseExpr, const LAllocation& cond)
+ : LWasmSelectBase(classOpcode) {
+ setInt64Operand(TrueExprIndex, trueExpr);
+ setInt64Operand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LInt64Allocation trueExpr() { return getInt64Operand(TrueExprIndex); }
+ const LInt64Allocation falseExpr() { return getInt64Operand(FalseExprIndex); }
+ const LAllocation* condExpr() { return getOperand(CondIndex); }
+};
+
+class LWasmCompareAndSelect : public LWasmSelectBase<1, 4> {
+ MCompare::CompareType compareType_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(WasmCompareAndSelect);
+
+ static const size_t LeftExprIndex = 0;
+ static const size_t RightExprIndex = 1;
+ static const size_t IfTrueExprIndex = 2;
+ static const size_t IfFalseExprIndex = 3;
+
+ LWasmCompareAndSelect(const LAllocation& leftExpr,
+ const LAllocation& rightExpr,
+ MCompare::CompareType compareType, JSOp jsop,
+ const LAllocation& ifTrueExpr,
+ const LAllocation& ifFalseExpr)
+ : LWasmSelectBase(classOpcode), compareType_(compareType), jsop_(jsop) {
+ setOperand(LeftExprIndex, leftExpr);
+ setOperand(RightExprIndex, rightExpr);
+ setOperand(IfTrueExprIndex, ifTrueExpr);
+ setOperand(IfFalseExprIndex, ifFalseExpr);
+ }
+
+ const LAllocation* leftExpr() { return getOperand(LeftExprIndex); }
+ const LAllocation* rightExpr() { return getOperand(RightExprIndex); }
+ const LAllocation* ifTrueExpr() { return getOperand(IfTrueExprIndex); }
+ const LAllocation* ifFalseExpr() { return getOperand(IfFalseExprIndex); }
+
+ MCompare::CompareType compareType() { return compareType_; }
+ JSOp jsop() { return jsop_; }
+};
+
+class LWasmBoundsCheck64
+ : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmBoundsCheck64);
+ explicit LWasmBoundsCheck64(const LInt64Allocation& ptr,
+ const LInt64Allocation& boundsCheckLimit)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, ptr);
+ setInt64Operand(INT64_PIECES, boundsCheckLimit);
+ }
+ MWasmBoundsCheck* mir() const { return mir_->toWasmBoundsCheck(); }
+ LInt64Allocation ptr() { return getInt64Operand(0); }
+ LInt64Allocation boundsCheckLimit() { return getInt64Operand(INT64_PIECES); }
+};
+
+namespace details {
+
+// This is a base class for LWasmLoad/LWasmLoadI64.
+template <size_t Defs, size_t Temp>
+class LWasmLoadBase : public LInstructionHelper<Defs, 2, Temp> {
+ public:
+ typedef LInstructionHelper<Defs, 2, Temp> Base;
+ explicit LWasmLoadBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LAllocation& memoryBase)
+ : Base(opcode) {
+ Base::setOperand(0, ptr);
+ Base::setOperand(1, memoryBase);
+ }
+ MWasmLoad* mir() const { return Base::mir_->toWasmLoad(); }
+ const LAllocation* ptr() { return Base::getOperand(0); }
+ const LAllocation* memoryBase() { return Base::getOperand(1); }
+};
+
+} // namespace details
+
+class LWasmLoad : public details::LWasmLoadBase<1, 1> {
+ public:
+ explicit LWasmLoad(const LAllocation& ptr,
+ const LAllocation& memoryBase = LAllocation())
+ : LWasmLoadBase(classOpcode, ptr, memoryBase) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+
+ LIR_HEADER(WasmLoad);
+};
+
+class LWasmLoadI64 : public details::LWasmLoadBase<INT64_PIECES, 1> {
+ public:
+ explicit LWasmLoadI64(const LAllocation& ptr,
+ const LAllocation& memoryBase = LAllocation())
+ : LWasmLoadBase(classOpcode, ptr, memoryBase) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+
+ LIR_HEADER(WasmLoadI64);
+};
+
+class LWasmStore : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(WasmStore);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+ static const size_t MemoryBaseIndex = 2;
+
+ LWasmStore(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(PtrIndex, ptr);
+ setOperand(ValueIndex, value);
+ setOperand(MemoryBaseIndex, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* ptr() { return getOperand(PtrIndex); }
+ const LDefinition* ptrCopy() { return getTemp(0); }
+ const LAllocation* value() { return getOperand(ValueIndex); }
+ const LAllocation* memoryBase() { return getOperand(MemoryBaseIndex); }
+};
+
+class LWasmStoreI64 : public LInstructionHelper<0, INT64_PIECES + 2, 1> {
+ public:
+ LIR_HEADER(WasmStoreI64);
+
+ static const size_t PtrIndex = 0;
+ static const size_t MemoryBaseIndex = 1;
+ static const size_t ValueIndex = 2;
+
+ LWasmStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(PtrIndex, ptr);
+ setOperand(MemoryBaseIndex, memoryBase);
+ setInt64Operand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* ptr() { return getOperand(PtrIndex); }
+ const LAllocation* memoryBase() { return getOperand(MemoryBaseIndex); }
+ const LDefinition* ptrCopy() { return getTemp(0); }
+ const LInt64Allocation value() { return getInt64Operand(ValueIndex); }
+};
+
+class LWasmCompareExchangeHeap : public LInstructionHelper<1, 4, 4> {
+ public:
+ LIR_HEADER(WasmCompareExchangeHeap);
+
+ // ARM, ARM64, x86, x64
+ LWasmCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setOperand(3, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ // MIPS32, MIPS64
+ LWasmCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setOperand(3, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* oldValue() { return getOperand(1); }
+ const LAllocation* newValue() { return getOperand(2); }
+ const LAllocation* memoryBase() { return getOperand(3); }
+ const LDefinition* addrTemp() { return getTemp(0); }
+
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeHeap : public LInstructionHelper<1, 3, 4> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeHeap);
+
+ // ARM, ARM64, x86, x64
+ LWasmAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LAllocation* memoryBase() { return getOperand(2); }
+ const LDefinition* addrTemp() { return getTemp(0); }
+
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopHeap : public LInstructionHelper<1, 3, 6> {
+ public:
+ LIR_HEADER(WasmAtomicBinopHeap);
+
+ static const int32_t valueOp = 1;
+
+ // ARM, ARM64, x86, x64
+ LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& flagTemp = LDefinition::BogusTemp(),
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, LDefinition::BogusTemp());
+ setTemp(3, valueTemp);
+ setTemp(4, offsetTemp);
+ setTemp(5, maskTemp);
+ }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 1);
+ return getOperand(1);
+ }
+ const LAllocation* memoryBase() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() { return getTemp(1); }
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(1, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(2); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(3); }
+ const LDefinition* offsetTemp() { return getTemp(4); }
+ const LDefinition* maskTemp() { return getTemp(5); }
+
+ MWasmAtomicBinopHeap* mir() const { return mir_->toWasmAtomicBinopHeap(); }
+};
+
+// Atomic binary operation where the result is discarded.
+class LWasmAtomicBinopHeapForEffect : public LInstructionHelper<0, 3, 5> {
+ public:
+ LIR_HEADER(WasmAtomicBinopHeapForEffect);
+ // ARM, ARM64, x86, x64
+ LWasmAtomicBinopHeapForEffect(
+ const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp(),
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicBinopHeapForEffect(const LAllocation& ptr,
+ const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LAllocation* memoryBase() { return getOperand(2); }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() { return getTemp(0); }
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(1); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(2); }
+ const LDefinition* offsetTemp() { return getTemp(3); }
+ const LDefinition* maskTemp() { return getTemp(4); }
+
+ MWasmAtomicBinopHeap* mir() const { return mir_->toWasmAtomicBinopHeap(); }
+};
+
+class LWasmDerivedPointer : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmDerivedPointer);
+ explicit LWasmDerivedPointer(const LAllocation& base)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, base);
+ }
+ const LAllocation* base() { return getOperand(0); }
+ uint32_t offset() { return mirRaw()->toWasmDerivedPointer()->offset(); }
+};
+
+class LWasmDerivedIndexPointer : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(WasmDerivedIndexPointer);
+ explicit LWasmDerivedIndexPointer(const LAllocation& base,
+ const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, base);
+ setOperand(1, index);
+ }
+ const LAllocation* base() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ Scale scale() { return mirRaw()->toWasmDerivedIndexPointer()->scale(); }
+};
+
+class LWasmParameterI64 : public LInstructionHelper<INT64_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(WasmParameterI64);
+
+ LWasmParameterI64() : LInstructionHelper(classOpcode) {}
+};
+
+// This is used only with LWasmCall.
+class LWasmCallIndirectAdjunctSafepoint : public LInstructionHelper<0, 0, 0> {
+ CodeOffset offs_;
+ uint32_t framePushedAtStackMapBase_;
+
+ public:
+ LIR_HEADER(WasmCallIndirectAdjunctSafepoint);
+
+ LWasmCallIndirectAdjunctSafepoint()
+ : LInstructionHelper(classOpcode),
+ offs_(0),
+ framePushedAtStackMapBase_(0) {}
+
+ CodeOffset safepointLocation() const {
+ MOZ_ASSERT(offs_.offset() != 0);
+ return offs_;
+ }
+ uint32_t framePushedAtStackMapBase() const {
+ MOZ_ASSERT(offs_.offset() != 0);
+ return framePushedAtStackMapBase_;
+ }
+ void recordSafepointInfo(CodeOffset offs, uint32_t framePushed) {
+ offs_ = offs;
+ framePushedAtStackMapBase_ = framePushed;
+ }
+};
+
+// LWasmCall may be generated into two function calls in the case of
+// call_indirect, one for the fast path and one for the slow path. In that
+// case, the node carries a pointer to a companion node, the "adjunct
+// safepoint", representing the safepoint for the second of the two calls. The
+// dual-call construction is only meaningful for wasm because wasm has no
+// invalidation of code; this is not a pattern to be used generally.
+class LWasmCall : public LVariadicInstruction<0, 0> {
+ bool needsBoundsCheck_;
+ mozilla::Maybe<uint32_t> tableSize_;
+ LWasmCallIndirectAdjunctSafepoint* adjunctSafepoint_;
+
+ public:
+ LIR_HEADER(WasmCall);
+
+ LWasmCall(uint32_t numOperands, bool needsBoundsCheck,
+ mozilla::Maybe<uint32_t> tableSize = mozilla::Nothing())
+ : LVariadicInstruction(classOpcode, numOperands),
+ needsBoundsCheck_(needsBoundsCheck),
+ tableSize_(tableSize),
+ adjunctSafepoint_(nullptr) {
+ this->setIsCall();
+ }
+
+ MWasmCallBase* callBase() const {
+ if (isCatchable()) {
+ return static_cast<MWasmCallBase*>(mirCatchable());
+ }
+ return static_cast<MWasmCallBase*>(mirUncatchable());
+ }
+ bool isCatchable() const { return mir_->isWasmCallCatchable(); }
+ MWasmCallCatchable* mirCatchable() const {
+ return mir_->toWasmCallCatchable();
+ }
+ MWasmCallUncatchable* mirUncatchable() const {
+ return mir_->toWasmCallUncatchable();
+ }
+
+ static bool isCallPreserved(AnyRegister reg) {
+ // All MWasmCalls preserve the TLS register:
+ // - internal/indirect calls do by the internal wasm ABI
+ // - import calls do by explicitly saving/restoring at the callsite
+ // - builtin calls do because the TLS reg is non-volatile
+ // See also CodeGeneratorShared::emitWasmCall.
+ //
+ // All other registers are not preserved. This is is relied upon by
+ // MWasmCallCatchable which needs all live registers to be spilled before
+ // a call.
+ return !reg.isFloat() && reg.gpr() == InstanceReg;
+ }
+
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+ mozilla::Maybe<uint32_t> tableSize() const { return tableSize_; }
+ LWasmCallIndirectAdjunctSafepoint* adjunctSafepoint() const {
+ MOZ_ASSERT(adjunctSafepoint_ != nullptr);
+ return adjunctSafepoint_;
+ }
+ void setAdjunctSafepoint(LWasmCallIndirectAdjunctSafepoint* asp) {
+ adjunctSafepoint_ = asp;
+ }
+};
+
+class LWasmRegisterResult : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(WasmRegisterResult);
+
+ LWasmRegisterResult() : LInstructionHelper(classOpcode) {}
+
+ MWasmRegisterResult* mir() const {
+ if (!mir_->isWasmRegisterResult()) {
+ return nullptr;
+ }
+ return mir_->toWasmRegisterResult();
+ }
+};
+
+class LWasmRegisterPairResult : public LInstructionHelper<2, 0, 0> {
+ public:
+ LIR_HEADER(WasmRegisterPairResult);
+
+ LWasmRegisterPairResult() : LInstructionHelper(classOpcode) {}
+
+ MDefinition* mir() const { return mirRaw(); }
+};
+
+inline uint32_t LStackArea::base() const {
+ return ins()->toWasmStackResultArea()->mir()->base();
+}
+inline void LStackArea::setBase(uint32_t base) {
+ ins()->toWasmStackResultArea()->mir()->setBase(base);
+}
+inline uint32_t LStackArea::size() const {
+ return ins()->toWasmStackResultArea()->mir()->byteSize();
+}
+
+inline bool LStackArea::ResultIterator::done() const {
+ return idx_ == alloc_.ins()->toWasmStackResultArea()->mir()->resultCount();
+}
+inline void LStackArea::ResultIterator::next() {
+ MOZ_ASSERT(!done());
+ idx_++;
+}
+inline LAllocation LStackArea::ResultIterator::alloc() const {
+ MOZ_ASSERT(!done());
+ MWasmStackResultArea* area = alloc_.ins()->toWasmStackResultArea()->mir();
+ return LStackSlot(area->base() - area->result(idx_).offset());
+}
+inline bool LStackArea::ResultIterator::isGcPointer() const {
+ MOZ_ASSERT(!done());
+ MWasmStackResultArea* area = alloc_.ins()->toWasmStackResultArea()->mir();
+ MIRType type = area->result(idx_).type();
+#ifndef JS_PUNBOX64
+ // LDefinition::TypeFrom isn't defined for MIRType::Int64 values on
+ // this platform, so here we have a special case.
+ if (type == MIRType::Int64) {
+ return false;
+ }
+#endif
+ return LDefinition::TypeFrom(type) == LDefinition::OBJECT;
+}
+
+class LWasmStackResult : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmStackResult);
+
+ LWasmStackResult() : LInstructionHelper(classOpcode) {}
+
+ MWasmStackResult* mir() const { return mir_->toWasmStackResult(); }
+ LStackSlot result(uint32_t base) const {
+ return LStackSlot(base - mir()->result().offset());
+ }
+};
+
+class LWasmStackResult64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmStackResult64);
+
+ LWasmStackResult64() : LInstructionHelper(classOpcode) {}
+
+ MWasmStackResult* mir() const { return mir_->toWasmStackResult(); }
+ LStackSlot result(uint32_t base, LDefinition* def) {
+ uint32_t offset = base - mir()->result().offset();
+#if defined(JS_NUNBOX32)
+ if (def == getDef(INT64LOW_INDEX)) {
+ offset -= INT64LOW_OFFSET;
+ } else {
+ MOZ_ASSERT(def == getDef(INT64HIGH_INDEX));
+ offset -= INT64HIGH_OFFSET;
+ }
+#else
+ MOZ_ASSERT(def == getDef(0));
+#endif
+ return LStackSlot(offset);
+ }
+};
+
+inline LStackSlot LStackArea::resultAlloc(LInstruction* lir,
+ LDefinition* def) const {
+ if (lir->isWasmStackResult64()) {
+ return lir->toWasmStackResult64()->result(base(), def);
+ }
+ MOZ_ASSERT(def == lir->getDef(0));
+ return lir->toWasmStackResult()->result(base());
+}
+
+inline bool LNode::isCallPreserved(AnyRegister reg) const {
+ return isWasmCall() && LWasmCall::isCallPreserved(reg);
+}
+
+class LAssertRangeI : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(AssertRangeI)
+
+ explicit LAssertRangeI(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeD : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(AssertRangeD)
+
+ LAssertRangeD(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeF : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(AssertRangeF)
+ LAssertRangeF(const LAllocation& input, const LDefinition& temp,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(AssertRangeV)
+
+ LAssertRangeV(const LBoxAllocation& input, const LDefinition& temp,
+ const LDefinition& floatTemp1, const LDefinition& floatTemp2)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ setTemp(1, floatTemp1);
+ setTemp(2, floatTemp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* floatTemp1() { return getTemp(1); }
+ const LDefinition* floatTemp2() { return getTemp(2); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LMemoryBarrier : public LInstructionHelper<0, 0, 0> {
+ private:
+ const MemoryBarrierBits type_;
+
+ public:
+ LIR_HEADER(MemoryBarrier)
+
+ // The parameter 'type' is a bitwise 'or' of the barrier types needed,
+ // see AtomicOp.h.
+ explicit LMemoryBarrier(MemoryBarrierBits type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ MOZ_ASSERT((type_ & ~MembarAllbits) == MembarNobits);
+ }
+
+ MemoryBarrierBits type() const { return type_; }
+};
+
+// Math.random().
+class LRandom : public LInstructionHelper<1, 0, 1 + 2 * INT64_PIECES> {
+ public:
+ LIR_HEADER(Random)
+ LRandom(const LDefinition& temp0, const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp0);
+ setInt64Temp(1, temp1);
+ setInt64Temp(1 + INT64_PIECES, temp2);
+ }
+ const LDefinition* temp0() { return getTemp(0); }
+ LInt64Definition temp1() { return getInt64Temp(1); }
+ LInt64Definition temp2() { return getInt64Temp(1 + INT64_PIECES); }
+
+ MRandom* mir() const { return mir_->toRandom(); }
+};
+
+class LBigIntAsIntN64 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsIntN64)
+
+ LBigIntAsIntN64(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsIntN32 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsIntN32)
+
+ LBigIntAsIntN32(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsUintN64 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsUintN64)
+
+ LBigIntAsUintN64(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsUintN32 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsUintN32)
+
+ LBigIntAsUintN32(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+template <size_t NumDefs>
+class LIonToWasmCallBase : public LVariadicInstruction<NumDefs, 1> {
+ using Base = LVariadicInstruction<NumDefs, 1>;
+
+ public:
+ explicit LIonToWasmCallBase(LNode::Opcode classOpcode, uint32_t numOperands,
+ const LDefinition& temp)
+ : Base(classOpcode, numOperands) {
+ this->setIsCall();
+ this->setTemp(0, temp);
+ }
+ MIonToWasmCall* mir() const { return this->mir_->toIonToWasmCall(); }
+ const LDefinition* temp() { return this->getTemp(0); }
+};
+
+class LIonToWasmCall : public LIonToWasmCallBase<1> {
+ public:
+ LIR_HEADER(IonToWasmCall);
+ LIonToWasmCall(uint32_t numOperands, const LDefinition& temp)
+ : LIonToWasmCallBase<1>(classOpcode, numOperands, temp) {}
+};
+
+class LIonToWasmCallV : public LIonToWasmCallBase<BOX_PIECES> {
+ public:
+ LIR_HEADER(IonToWasmCallV);
+ LIonToWasmCallV(uint32_t numOperands, const LDefinition& temp)
+ : LIonToWasmCallBase<BOX_PIECES>(classOpcode, numOperands, temp) {}
+};
+
+class LIonToWasmCallI64 : public LIonToWasmCallBase<INT64_PIECES> {
+ public:
+ LIR_HEADER(IonToWasmCallI64);
+ LIonToWasmCallI64(uint32_t numOperands, const LDefinition& temp)
+ : LIonToWasmCallBase<INT64_PIECES>(classOpcode, numOperands, temp) {}
+};
+
+class LWasmGcObjectIsSubtypeOfAbstractAndBranch
+ : public LControlInstructionHelper<2, 2, 2> {
+ wasm::RefType sourceType_;
+ wasm::RefType destType_;
+
+ public:
+ LIR_HEADER(WasmGcObjectIsSubtypeOfAbstractAndBranch)
+
+ static constexpr uint32_t Object = 0;
+
+ LWasmGcObjectIsSubtypeOfAbstractAndBranch(MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse,
+ wasm::RefType sourceType,
+ wasm::RefType destType,
+ const LAllocation& object,
+ const LDefinition& temp0)
+ : LControlInstructionHelper(classOpcode),
+ sourceType_(sourceType),
+ destType_(destType) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setOperand(Object, object);
+ setTemp(0, temp0);
+ }
+
+ wasm::RefType sourceType() const { return sourceType_; }
+ wasm::RefType destType() const { return destType_; }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+
+ const LAllocation* object() { return getOperand(Object); }
+ const LDefinition* temp0() { return getTemp(0); }
+};
+
+class LWasmGcObjectIsSubtypeOfConcreteAndBranch
+ : public LControlInstructionHelper<2, 2, 2> {
+ wasm::RefType sourceType_;
+ wasm::RefType destType_;
+
+ public:
+ LIR_HEADER(WasmGcObjectIsSubtypeOfConcreteAndBranch)
+
+ static constexpr uint32_t Object = 0;
+ static constexpr uint32_t SuperSuperTypeVector = 1;
+
+ LWasmGcObjectIsSubtypeOfConcreteAndBranch(
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse, wasm::RefType sourceType,
+ wasm::RefType destType, const LAllocation& object,
+ const LAllocation& superSuperTypeVector, const LDefinition& temp0,
+ const LDefinition& temp1)
+ : LControlInstructionHelper(classOpcode),
+ sourceType_(sourceType),
+ destType_(destType) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setOperand(Object, object);
+ setOperand(SuperSuperTypeVector, superSuperTypeVector);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ wasm::RefType sourceType() const { return sourceType_; }
+ wasm::RefType destType() const { return destType_; }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+
+ const LAllocation* object() { return getOperand(Object); }
+ const LAllocation* superSuperTypeVector() {
+ return getOperand(SuperSuperTypeVector);
+ }
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+};
+
+// Wasm SIMD.
+
+// (v128, v128, v128) -> v128 effect-free operation.
+// temp is FPR.
+class LWasmTernarySimd128 : public LInstructionHelper<1, 3, 1> {
+ wasm::SimdOp op_;
+
+ public:
+ LIR_HEADER(WasmTernarySimd128)
+
+ static constexpr uint32_t V0 = 0;
+ static constexpr uint32_t V1 = 1;
+ static constexpr uint32_t V2 = 2;
+
+ LWasmTernarySimd128(wasm::SimdOp op, const LAllocation& v0,
+ const LAllocation& v1, const LAllocation& v2)
+ : LInstructionHelper(classOpcode), op_(op) {
+ setOperand(V0, v0);
+ setOperand(V1, v1);
+ setOperand(V2, v2);
+ }
+
+ LWasmTernarySimd128(wasm::SimdOp op, const LAllocation& v0,
+ const LAllocation& v1, const LAllocation& v2,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), op_(op) {
+ setOperand(V0, v0);
+ setOperand(V1, v1);
+ setOperand(V2, v2);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* v0() { return getOperand(V0); }
+ const LAllocation* v1() { return getOperand(V1); }
+ const LAllocation* v2() { return getOperand(V2); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ wasm::SimdOp simdOp() const { return op_; }
+};
+
+// (v128, v128) -> v128 effect-free operations
+// lhs and dest are the same.
+// temps (if in use) are FPR.
+// The op may differ from the MIR node's op.
+class LWasmBinarySimd128 : public LInstructionHelper<1, 2, 2> {
+ wasm::SimdOp op_;
+
+ public:
+ LIR_HEADER(WasmBinarySimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmBinarySimd128(wasm::SimdOp op, const LAllocation& lhs,
+ const LAllocation& rhs, const LDefinition& temp0,
+ const LDefinition& temp1)
+ : LInstructionHelper(classOpcode), op_(op) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ wasm::SimdOp simdOp() const { return op_; }
+
+ static bool SpecializeForConstantRhs(wasm::SimdOp op);
+};
+
+class LWasmBinarySimd128WithConstant : public LInstructionHelper<1, 1, 1> {
+ SimdConstant rhs_;
+
+ public:
+ LIR_HEADER(WasmBinarySimd128WithConstant)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+
+ LWasmBinarySimd128WithConstant(const LAllocation& lhs,
+ const SimdConstant& rhs,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), rhs_(rhs) {
+ setOperand(Lhs, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const SimdConstant& rhs() { return rhs_; }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmBinarySimd128WithConstant()->simdOp();
+ }
+};
+
+// (v128, i32) -> v128 effect-free variable-width shift operations
+// lhs and dest are the same.
+// temp is an FPR (if in use).
+class LWasmVariableShiftSimd128 : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(WasmVariableShiftSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmVariableShiftSimd128(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmShiftSimd128()->simdOp(); }
+};
+
+// (v128, i32) -> v128 effect-free constant-width shift operations
+class LWasmConstantShiftSimd128 : public LInstructionHelper<1, 1, 0> {
+ int32_t shift_;
+
+ public:
+ LIR_HEADER(WasmConstantShiftSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmConstantShiftSimd128(const LAllocation& src, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ int32_t shift() { return shift_; }
+ wasm::SimdOp simdOp() const { return mir_->toWasmShiftSimd128()->simdOp(); }
+};
+
+// (v128) -> v128 sign replication operation.
+class LWasmSignReplicationSimd128 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmSignReplicationSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmSignReplicationSimd128(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmShiftSimd128()->simdOp(); }
+};
+
+// (v128, v128, imm_simd) -> v128 effect-free operation.
+// temp is FPR (and always in use).
+class LWasmShuffleSimd128 : public LInstructionHelper<1, 2, 1> {
+ private:
+ SimdShuffleOp op_;
+ SimdConstant control_;
+
+ public:
+ LIR_HEADER(WasmShuffleSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmShuffleSimd128(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp, SimdShuffleOp op,
+ SimdConstant control)
+ : LInstructionHelper(classOpcode), op_(op), control_(control) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ const LDefinition* temp() { return getTemp(0); }
+ SimdShuffleOp op() { return op_; }
+ SimdConstant control() { return control_; }
+};
+
+// (v128, imm_simd) -> v128 effect-free operation.
+class LWasmPermuteSimd128 : public LInstructionHelper<1, 1, 0> {
+ private:
+ SimdPermuteOp op_;
+ SimdConstant control_;
+
+ public:
+ LIR_HEADER(WasmPermuteSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmPermuteSimd128(const LAllocation& src, SimdPermuteOp op,
+ SimdConstant control)
+ : LInstructionHelper(classOpcode), op_(op), control_(control) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ SimdPermuteOp op() { return op_; }
+ SimdConstant control() { return control_; }
+};
+
+class LWasmReplaceLaneSimd128 : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(WasmReplaceLaneSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmReplaceLaneSimd128(const LAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ uint32_t laneIndex() const {
+ return mir_->toWasmReplaceLaneSimd128()->laneIndex();
+ }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmReplaceLaneSimd128()->simdOp();
+ }
+};
+
+class LWasmReplaceInt64LaneSimd128
+ : public LInstructionHelper<1, INT64_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(WasmReplaceInt64LaneSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmReplaceInt64LaneSimd128(const LAllocation& lhs,
+ const LInt64Allocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LInt64Allocation rhs() { return getInt64Operand(Rhs); }
+ const LDefinition* output() { return this->getDef(0); }
+ uint32_t laneIndex() const {
+ return mir_->toWasmReplaceLaneSimd128()->laneIndex();
+ }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmReplaceLaneSimd128()->simdOp();
+ }
+};
+
+// (scalar) -> v128 effect-free operations, scalar != int64
+class LWasmScalarToSimd128 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmScalarToSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmScalarToSimd128(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmScalarToSimd128()->simdOp();
+ }
+};
+
+// (int64) -> v128 effect-free operations
+class LWasmInt64ToSimd128 : public LInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmInt64ToSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmInt64ToSimd128(const LInt64Allocation& src)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(Src, src);
+ }
+
+ const LInt64Allocation src() { return getInt64Operand(Src); }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmScalarToSimd128()->simdOp();
+ }
+};
+
+// (v128) -> v128 effect-free operations
+// temp is FPR (if in use).
+class LWasmUnarySimd128 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUnarySimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmUnarySimd128(const LAllocation& src, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ const LDefinition* temp() { return getTemp(0); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmUnarySimd128()->simdOp(); }
+};
+
+// (v128, imm) -> scalar effect-free operations.
+// temp is FPR (if in use).
+class LWasmReduceSimd128 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmReduceSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmReduceSimd128(const LAllocation& src, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ uint32_t imm() const { return mir_->toWasmReduceSimd128()->imm(); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmReduceSimd128()->simdOp(); }
+};
+
+// (v128, onTrue, onFalse) test-and-branch operations.
+class LWasmReduceAndBranchSimd128 : public LControlInstructionHelper<2, 1, 0> {
+ wasm::SimdOp op_;
+
+ public:
+ LIR_HEADER(WasmReduceAndBranchSimd128)
+
+ static constexpr uint32_t Src = 0;
+ static constexpr uint32_t IfTrue = 0;
+ static constexpr uint32_t IfFalse = 1;
+
+ LWasmReduceAndBranchSimd128(const LAllocation& src, wasm::SimdOp op,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), op_(op) {
+ setOperand(Src, src);
+ setSuccessor(IfTrue, ifTrue);
+ setSuccessor(IfFalse, ifFalse);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ wasm::SimdOp simdOp() const { return op_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(IfTrue); }
+ MBasicBlock* ifFalse() const { return getSuccessor(IfFalse); }
+};
+
+// (v128, imm) -> i64 effect-free operations
+class LWasmReduceSimd128ToInt64
+ : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmReduceSimd128ToInt64)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmReduceSimd128ToInt64(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ uint32_t imm() const { return mir_->toWasmReduceSimd128()->imm(); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmReduceSimd128()->simdOp(); }
+};
+
+class LWasmLoadLaneSimd128 : public LInstructionHelper<1, 3, 1> {
+ public:
+ LIR_HEADER(WasmLoadLaneSimd128);
+
+ static constexpr uint32_t Src = 2;
+
+ explicit LWasmLoadLaneSimd128(const LAllocation& ptr, const LAllocation& src,
+ const LDefinition& temp,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, memoryBase);
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* memoryBase() { return getOperand(1); }
+ const LAllocation* src() { return getOperand(Src); }
+ const LDefinition* temp() { return getTemp(0); }
+ MWasmLoadLaneSimd128* mir() const { return mir_->toWasmLoadLaneSimd128(); }
+ uint32_t laneSize() const {
+ return mir_->toWasmLoadLaneSimd128()->laneSize();
+ }
+ uint32_t laneIndex() const {
+ return mir_->toWasmLoadLaneSimd128()->laneIndex();
+ }
+};
+
+class LWasmStoreLaneSimd128 : public LInstructionHelper<1, 3, 1> {
+ public:
+ LIR_HEADER(WasmStoreLaneSimd128);
+
+ static constexpr uint32_t Src = 2;
+
+ explicit LWasmStoreLaneSimd128(const LAllocation& ptr, const LAllocation& src,
+ const LDefinition& temp,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, memoryBase);
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* memoryBase() { return getOperand(1); }
+ const LAllocation* src() { return getOperand(Src); }
+ const LDefinition* temp() { return getTemp(0); }
+ MWasmStoreLaneSimd128* mir() const { return mir_->toWasmStoreLaneSimd128(); }
+ uint32_t laneSize() const {
+ return mir_->toWasmStoreLaneSimd128()->laneSize();
+ }
+ uint32_t laneIndex() const {
+ return mir_->toWasmStoreLaneSimd128()->laneIndex();
+ }
+};
+
+// End Wasm SIMD
+
+// End Wasm Exception Handling
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_LIR_shared_h */
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
new file mode 100644
index 0000000000..89aafa22e4
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -0,0 +1,894 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_inl_h
+#define jit_shared_Lowering_shared_inl_h
+
+#include "jit/shared/Lowering-shared.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+void LIRGeneratorShared::emitAtUses(MInstruction* mir) {
+ MOZ_ASSERT(mir->canEmitAtUses());
+ mir->setEmittedAtUses();
+ mir->setVirtualRegister(0);
+}
+
+LUse LIRGeneratorShared::use(MDefinition* mir, LUse policy) {
+ // It is illegal to call use() on an instruction with two defs.
+#if BOX_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Value);
+#endif
+#if INT64_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Int64);
+#endif
+ ensureDefined(mir);
+ policy.setVirtualRegister(mir->virtualRegister());
+ return policy;
+}
+
+template <size_t X>
+void LIRGeneratorShared::define(
+ details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy) {
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ define(lir, mir, LDefinition(type, policy));
+}
+
+template <size_t X>
+void LIRGeneratorShared::define(
+ details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ // Assign the definition and a virtual register. Then, propagate this
+ // virtual register to the MIR, so we can map MIR to LIR during lowering.
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t X, size_t Y>
+void LIRGeneratorShared::defineFixed(LInstructionHelper<1, X, Y>* lir,
+ MDefinition* mir,
+ const LAllocation& output) {
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::FIXED);
+ def.setOutput(output);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64Fixed(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output) {
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 64
+ LDefinition def(LDefinition::GENERAL, LDefinition::FIXED);
+ def.setOutput(output.value());
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+#else
+ LDefinition def0(LDefinition::GENERAL, LDefinition::FIXED);
+ def0.setOutput(output.low());
+ lir->setDef(0, def0);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+ getVirtualRegister();
+ LDefinition def1(LDefinition::GENERAL, LDefinition::FIXED);
+ def1.setOutput(output.high());
+ lir->setDef(1, def1);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineReuseInput(
+ LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64ReuseInput(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ LDefinition def1(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ lir->setDef(0, def1);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+#if JS_BITS_PER_WORD == 32
+ getVirtualRegister();
+ LDefinition def2(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ lir->setDef(1, def2);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineBoxReuseInput(
+ LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#ifdef JS_NUNBOX32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#ifdef JS_NUNBOX32
+ static_assert(VREG_TYPE_OFFSET == 0,
+ "Code below assumes VREG_TYPE_OFFSET == 0");
+ static_assert(VREG_DATA_OFFSET == 1,
+ "Code below assumes VREG_DATA_OFFSET == 1");
+
+ LDefinition def1(LDefinition::TYPE, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ def1.setVirtualRegister(vreg);
+ lir->setDef(0, def1);
+
+ getVirtualRegister();
+ LDefinition def2(LDefinition::PAYLOAD, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ def2.setVirtualRegister(vreg + 1);
+ lir->setDef(1, def2);
+#else
+ LDefinition def(LDefinition::BOX, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+ def.setVirtualRegister(vreg);
+ lir->setDef(0, def);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Temps>
+void LIRGeneratorShared::defineBox(
+ details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir,
+ MDefinition* mir, LDefinition::Policy policy) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(0,
+ LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, policy));
+ lir->setDef(
+ 1, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, policy));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+
+#ifdef JS_64BIT
+ MOZ_ASSERT(mir->type() == MIRType::Int64 || mir->type() == MIRType::IntPtr);
+#else
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+#endif
+
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 32
+ lir->setDef(0,
+ LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL, policy));
+ lir->setDef(
+ 1, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL, policy));
+ getVirtualRegister();
+#else
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir) {
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ switch (mir->type()) {
+ case MIRType::Value:
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX,
+ LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX,
+ LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(
+ 0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+ break;
+ case MIRType::Int64:
+#if defined(JS_NUNBOX32)
+ lir->setDef(INT64LOW_INDEX,
+ LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.low)));
+ lir->setDef(INT64HIGH_INDEX,
+ LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.high)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(
+ 0, LDefinition(vreg, LDefinition::GENERAL, LGeneralReg(ReturnReg)));
+#endif
+ break;
+ case MIRType::Float32:
+ lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32,
+ LFloatReg(ReturnFloat32Reg)));
+ break;
+ case MIRType::Double:
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE,
+ LFloatReg(ReturnDoubleReg)));
+ break;
+ case MIRType::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128,
+ LFloatReg(ReturnSimd128Reg)));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ default:
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ switch (type) {
+ case LDefinition::GENERAL:
+ case LDefinition::INT32:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+ case LDefinition::STACKRESULTS:
+ lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
+ break;
+ case LDefinition::DOUBLE:
+ case LDefinition::FLOAT32:
+ case LDefinition::SIMD128:
+ MOZ_CRASH("Float cases must have been handled earlier");
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ break;
+ }
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+#ifdef DEBUG
+// This function checks that when making redefinitions, we don't accidentally
+// coerce two incompatible types.
+static inline bool IsCompatibleLIRCoercion(MIRType to, MIRType from) {
+ if (to == from) {
+ return true;
+ }
+ // In LIR, we treat boolean and int32 as the same low-level type (INTEGER).
+ // When snapshotting, we recover the actual JS type from MIR.
+ if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
+ (from == MIRType::Int32 || from == MIRType::Boolean)) {
+ return true;
+ }
+ // On 32-bit platforms Int32 can be redefined as IntPtr and vice versa.
+ // On 64-bit platforms we can redefine non-negative Int32 values as IntPtr.
+ if (from == MIRType::Int32 && to == MIRType::IntPtr) {
+ return true;
+ }
+# ifndef JS_64BIT
+ if (from == MIRType::IntPtr && to == MIRType::Int32) {
+ return true;
+ }
+# endif
+ return false;
+}
+#endif
+
+void LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as) {
+ MOZ_ASSERT(IsCompatibleLIRCoercion(def->type(), as->type()));
+
+ // Try to emit MIR marked as emitted-at-uses at, well, uses. For
+ // snapshotting reasons we delay the MIRTypes match, or when we are
+ // coercing between bool and int32 constants.
+ if (as->isEmittedAtUses() &&
+ (def->type() == as->type() ||
+ (as->isConstant() &&
+ (def->type() == MIRType::Int32 || def->type() == MIRType::Boolean) &&
+ (as->type() == MIRType::Int32 || as->type() == MIRType::Boolean)))) {
+ MInstruction* replacement;
+ if (def->type() != as->type()) {
+ if (as->type() == MIRType::Int32) {
+ replacement =
+ MConstant::New(alloc(), BooleanValue(as->toConstant()->toInt32()));
+ } else {
+ replacement =
+ MConstant::New(alloc(), Int32Value(as->toConstant()->toBoolean()));
+ }
+ def->block()->insertBefore(def->toInstruction(), replacement);
+ emitAtUses(replacement->toInstruction());
+ } else {
+ replacement = as->toInstruction();
+ }
+ def->replaceAllUsesWith(replacement);
+ } else {
+ ensureDefined(as);
+ def->setVirtualRegister(as->virtualRegister());
+ }
+}
+
+void LIRGeneratorShared::ensureDefined(MDefinition* mir) {
+ if (mir->isEmittedAtUses()) {
+ visitEmittedAtUses(mir->toInstruction());
+ MOZ_ASSERT(mir->isLowered());
+ }
+}
+
+bool LIRGeneratorShared::willHaveDifferentLIRNodes(MDefinition* mir1,
+ MDefinition* mir2) {
+ if (mir1 != mir2) {
+ return true;
+ }
+ if (mir1->isEmittedAtUses()) {
+ return true;
+ }
+ return false;
+}
+
+template <typename LClass, typename... Args>
+LClass* LIRGeneratorShared::allocateVariadic(uint32_t numOperands,
+ Args&&... args) {
+ size_t numBytes = sizeof(LClass) + numOperands * sizeof(LAllocation);
+ void* buf = alloc().allocate(numBytes);
+ if (!buf) {
+ return nullptr;
+ }
+
+ LClass* ins = static_cast<LClass*>(buf);
+ new (ins) LClass(numOperands, std::forward<Args>(args)...);
+
+ ins->initOperandsOffset(sizeof(LClass));
+
+ for (uint32_t i = 0; i < numOperands; i++) {
+ ins->setOperand(i, LAllocation());
+ }
+
+ return ins;
+}
+
+LUse LIRGeneratorShared::useRegister(MDefinition* mir) {
+ return use(mir, LUse(LUse::REGISTER));
+}
+
+LUse LIRGeneratorShared::useRegisterAtStart(MDefinition* mir) {
+ return use(mir, LUse(LUse::REGISTER, true));
+}
+
+LUse LIRGeneratorShared::use(MDefinition* mir) {
+ return use(mir, LUse(LUse::ANY));
+}
+
+LUse LIRGeneratorShared::useAtStart(MDefinition* mir) {
+ return use(mir, LUse(LUse::ANY, true));
+}
+
+LAllocation LIRGeneratorShared::useOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return use(mir);
+}
+
+LAllocation LIRGeneratorShared::useOrConstantAtStart(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrConstantAtStart(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegisterAtStart(mir);
+}
+
+inline bool CanUseInt32Constant(MDefinition* mir) {
+ if (!mir->isConstant()) {
+ return false;
+ }
+ MConstant* cst = mir->toConstant();
+ if (cst->type() == MIRType::IntPtr) {
+ return INT32_MIN <= cst->toIntPtr() && cst->toIntPtr() <= INT32_MAX;
+ }
+ MOZ_ASSERT(cst->type() == MIRType::Int32);
+ return true;
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrInt32Constant(MDefinition* mir) {
+ if (CanUseInt32Constant(mir)) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorShared::useAnyOrInt32Constant(MDefinition* mir) {
+ if (CanUseInt32Constant(mir)) {
+ return LAllocation(mir->toConstant());
+ }
+ return useAny(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrZero(MDefinition* mir) {
+ if (mir->isConstant() &&
+ (mir->toConstant()->isInt32(0) || mir->toConstant()->isInt64(0))) {
+ return LAllocation();
+ }
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition* mir) {
+ if (mir->isConstant() &&
+ (mir->toConstant()->isInt32(0) || mir->toConstant()->isInt64(0))) {
+ return LAllocation();
+ }
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ if (mir->isConstant() && mir->type() != MIRType::Double &&
+ mir->type() != MIRType::Float32) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegister(mir);
+}
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_RISCV64)
+LAllocation LIRGeneratorShared::useAnyOrConstant(MDefinition* mir) {
+ return useRegisterOrConstant(mir);
+}
+LAllocation LIRGeneratorShared::useStorable(MDefinition* mir) {
+ return useRegister(mir);
+}
+LAllocation LIRGeneratorShared::useStorableAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useAny(MDefinition* mir) {
+ return useRegister(mir);
+}
+LAllocation LIRGeneratorShared::useAnyAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+#else
+LAllocation LIRGeneratorShared::useAnyOrConstant(MDefinition* mir) {
+ return useOrConstant(mir);
+}
+
+LAllocation LIRGeneratorShared::useAny(MDefinition* mir) { return use(mir); }
+LAllocation LIRGeneratorShared::useAnyAtStart(MDefinition* mir) {
+ return useAtStart(mir);
+}
+LAllocation LIRGeneratorShared::useStorable(MDefinition* mir) {
+ return useRegisterOrConstant(mir);
+}
+LAllocation LIRGeneratorShared::useStorableAtStart(MDefinition* mir) {
+ return useRegisterOrConstantAtStart(mir);
+}
+
+#endif
+
+LAllocation LIRGeneratorShared::useKeepalive(MDefinition* mir) {
+ return use(mir, LUse(LUse::KEEPALIVE));
+}
+
+LAllocation LIRGeneratorShared::useKeepaliveOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useKeepalive(mir);
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, Register reg) {
+ return use(mir, LUse(reg));
+}
+
+LUse LIRGeneratorShared::useFixedAtStart(MDefinition* mir, Register reg) {
+ return use(mir, LUse(reg, true));
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, FloatRegister reg) {
+ return use(mir, LUse(reg));
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, AnyRegister reg) {
+ return reg.isFloat() ? use(mir, LUse(reg.fpu())) : use(mir, LUse(reg.gpr()));
+}
+
+LUse LIRGeneratorShared::useFixedAtStart(MDefinition* mir, AnyRegister reg) {
+ return reg.isFloat() ? use(mir, LUse(reg.fpu(), true))
+ : use(mir, LUse(reg.gpr(), true));
+}
+
+LDefinition LIRGeneratorShared::temp(LDefinition::Type type,
+ LDefinition::Policy policy) {
+ return LDefinition(getVirtualRegister(), type, policy);
+}
+
+LInt64Definition LIRGeneratorShared::tempInt64(LDefinition::Policy policy) {
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL, policy);
+ LDefinition low = temp(LDefinition::GENERAL, policy);
+ return LInt64Definition(high, low);
+#else
+ return LInt64Definition(temp(LDefinition::GENERAL, policy));
+#endif
+}
+
+LDefinition LIRGeneratorShared::tempFixed(Register reg) {
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg));
+ return t;
+}
+
+LInt64Definition LIRGeneratorShared::tempInt64Fixed(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL);
+ LDefinition low = temp(LDefinition::GENERAL);
+ high.setOutput(LGeneralReg(reg.high));
+ low.setOutput(LGeneralReg(reg.low));
+ return LInt64Definition(high, low);
+#else
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg.reg));
+ return LInt64Definition(t);
+#endif
+}
+
+LDefinition LIRGeneratorShared::tempFixed(FloatRegister reg) {
+ LDefinition t = temp(LDefinition::DOUBLE);
+ t.setOutput(LFloatReg(reg));
+ return t;
+}
+
+LDefinition LIRGeneratorShared::tempFloat32() {
+ return temp(LDefinition::FLOAT32);
+}
+
+LDefinition LIRGeneratorShared::tempDouble() {
+ return temp(LDefinition::DOUBLE);
+}
+
+#ifdef ENABLE_WASM_SIMD
+LDefinition LIRGeneratorShared::tempSimd128() {
+ return temp(LDefinition::SIMD128);
+}
+#endif
+
+LDefinition LIRGeneratorShared::tempCopy(MDefinition* input,
+ uint32_t reusedInput) {
+ MOZ_ASSERT(input->virtualRegister());
+ LDefinition t =
+ temp(LDefinition::TypeFrom(input->type()), LDefinition::MUST_REUSE_INPUT);
+ t.setReusedInput(reusedInput);
+ return t;
+}
+
+template <typename T>
+void LIRGeneratorShared::annotate(T* ins) {
+ ins->setId(lirGraph_.getInstructionId());
+}
+
+template <typename T>
+void LIRGeneratorShared::add(T* ins, MInstruction* mir) {
+ MOZ_ASSERT(!ins->isPhi());
+ current->add(ins);
+ if (mir) {
+ MOZ_ASSERT(current == mir->block()->lir());
+ ins->setMir(mir);
+ }
+ annotate(ins);
+ if (ins->isCall()) {
+ gen->setNeedsOverrecursedCheck();
+ gen->setNeedsStaticStackAlignment();
+ }
+}
+
+#ifdef JS_NUNBOX32
+// Returns the virtual register of a js::Value-defining instruction. This is
+// abstracted because MBox is a special value-returning instruction that
+// redefines its input payload if its input is not constant. Therefore, it is
+// illegal to request a box's payload by adding VREG_DATA_OFFSET to its raw id.
+static inline uint32_t VirtualRegisterOfPayload(MDefinition* mir) {
+ if (mir->isBox()) {
+ MDefinition* inner = mir->toBox()->getOperand(0);
+ if (!inner->isConstant() && inner->type() != MIRType::Double &&
+ inner->type() != MIRType::Float32) {
+ return inner->virtualRegister();
+ }
+ }
+ return mir->virtualRegister() + VREG_DATA_OFFSET;
+}
+
+// Note: always call ensureDefined before calling useType/usePayload,
+// so that emitted-at-use operands are handled correctly.
+LUse LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(mir->virtualRegister() + VREG_TYPE_OFFSET, policy);
+}
+
+LUse LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy);
+}
+
+LUse LIRGeneratorShared::usePayloadAtStart(MDefinition* mir,
+ LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy, true);
+}
+
+LUse LIRGeneratorShared::usePayloadInRegisterAtStart(MDefinition* mir) {
+ return usePayloadAtStart(mir, LUse::REGISTER);
+}
+
+void LIRGeneratorShared::fillBoxUses(LInstruction* lir, size_t n,
+ MDefinition* mir) {
+ ensureDefined(mir);
+ lir->getOperand(n)->toUse()->setVirtualRegister(mir->virtualRegister() +
+ VREG_TYPE_OFFSET);
+ lir->getOperand(n + 1)->toUse()->setVirtualRegister(
+ VirtualRegisterOfPayload(mir));
+}
+#endif
+
+LUse LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir,
+ MIRType type) {
+ MOZ_ASSERT(type != MIRType::Value && type != MIRType::None);
+ MOZ_ASSERT(mir->type() == MIRType::Object || mir->type() == MIRType::Slots);
+
+#ifdef JS_PUNBOX64
+ // On x64, masm.loadUnboxedValue emits slightly less efficient code when
+ // the input and output use the same register and we're not loading an
+ // int32/bool/double, so we just call useRegister in this case.
+ if (type != MIRType::Int32 && type != MIRType::Boolean &&
+ type != MIRType::Double) {
+ return useRegister(mir);
+ }
+#endif
+
+ return useRegisterAtStart(mir);
+}
+
+LBoxAllocation LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(
+ LUse(mir->virtualRegister(), policy, useAtStart),
+ LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
+#else
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart));
+#endif
+}
+
+LBoxAllocation LIRGeneratorShared::useBoxOrTyped(MDefinition* mir,
+ bool useAtStart) {
+ if (mir->type() == MIRType::Value) {
+ return useBox(mir, LUse::REGISTER, useAtStart);
+ }
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(useAtStart ? useRegisterAtStart(mir) : useRegister(mir),
+ LAllocation());
+#else
+ return LBoxAllocation(useAtStart ? useRegisterAtStart(mir)
+ : useRegister(mir));
+#endif
+}
+
+LBoxAllocation LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir,
+ bool useConstant,
+ bool useAtStart) {
+ if (useConstant && mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LBoxAllocation(LAllocation(mir->toConstant()));
+#endif
+ }
+
+ return useBoxOrTyped(mir, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64(MDefinition* mir,
+ LUse::Policy policy,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(vreg + INT64HIGH_INDEX, policy, useAtStart),
+ LUse(vreg + INT64LOW_INDEX, policy, useAtStart));
+#else
+ return LInt64Allocation(LUse(vreg, policy, useAtStart));
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64Fixed(MDefinition* mir,
+ Register64 regs,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(regs.high, vreg + INT64HIGH_INDEX, useAtStart),
+ LUse(regs.low, vreg + INT64LOW_INDEX, useAtStart));
+#else
+ return LInt64Allocation(LUse(regs.reg, vreg, useAtStart));
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64FixedAtStart(MDefinition* mir,
+ Register64 regs) {
+ return useInt64Fixed(mir, regs, true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64(MDefinition* mir,
+ bool useAtStart) {
+ // On 32-bit platforms, always load the value in registers.
+#if JS_BITS_PER_WORD == 32
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+#else
+ return useInt64(mir, LUse::ANY, useAtStart);
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64AtStart(MDefinition* mir) {
+ return useInt64(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64Register(MDefinition* mir,
+ bool useAtStart) {
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64OrConstant(MDefinition* mir,
+ bool useAtStart) {
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64(mir, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64RegisterOrConstant(
+ MDefinition* mir, bool useAtStart) {
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64Register(mir, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64RegisterAtStart(MDefinition* mir) {
+ return useInt64Register(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64RegisterOrConstantAtStart(
+ MDefinition* mir) {
+ return useInt64RegisterOrConstant(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64OrConstantAtStart(
+ MDefinition* mir) {
+ return useInt64OrConstant(mir, /* useAtStart = */ true);
+}
+
+void LIRGeneratorShared::lowerConstantDouble(double d, MInstruction* mir) {
+ define(new (alloc()) LDouble(d), mir);
+}
+void LIRGeneratorShared::lowerConstantFloat32(float f, MInstruction* mir) {
+ define(new (alloc()) LFloat32(f), mir);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_inl_h */
diff --git a/js/src/jit/shared/Lowering-shared.cpp b/js/src/jit/shared/Lowering-shared.cpp
new file mode 100644
index 0000000000..754cbe71e7
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -0,0 +1,319 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+#include "jit/LIR.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/ScalarTypeUtils.h"
+
+#include "vm/SymbolType.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+bool LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs,
+ MDefinition* rhs,
+ MInstruction* ins) {
+ // lhs and rhs are used by the commutative operator.
+ MOZ_ASSERT(lhs->hasDefUses());
+ MOZ_ASSERT(rhs->hasDefUses());
+
+ // Ensure that if there is a constant, then it is in rhs.
+ if (rhs->isConstant()) {
+ return false;
+ }
+ if (lhs->isConstant()) {
+ return true;
+ }
+
+ // Since clobbering binary operations clobber the left operand, prefer a
+ // non-constant lhs operand with no further uses. To be fully precise, we
+ // should check whether this is the *last* use, but checking hasOneDefUse()
+ // is a decent approximation which doesn't require any extra analysis.
+ bool rhsSingleUse = rhs->hasOneDefUse();
+ bool lhsSingleUse = lhs->hasOneDefUse();
+ if (rhsSingleUse) {
+ if (!lhsSingleUse) {
+ return true;
+ }
+ } else {
+ if (lhsSingleUse) {
+ return false;
+ }
+ }
+
+ // If this is a reduction-style computation, such as
+ //
+ // sum = 0;
+ // for (...)
+ // sum += ...;
+ //
+ // put the phi on the left to promote coalescing. This is fairly specific.
+ if (rhsSingleUse && rhs->isPhi() && rhs->block()->isLoopHeader() &&
+ ins == rhs->toPhi()->getLoopBackedgeOperand()) {
+ return true;
+ }
+
+ return false;
+}
+
+void LIRGeneratorShared::ReorderCommutative(MDefinition** lhsp,
+ MDefinition** rhsp,
+ MInstruction* ins) {
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (ShouldReorderCommutative(lhs, rhs, ins)) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ }
+}
+
+void LIRGeneratorShared::definePhiOneRegister(MPhi* phi, size_t lirIndex) {
+ LPhi* lir = current->getPhi(lirIndex);
+
+ uint32_t vreg = getVirtualRegister();
+
+ phi->setVirtualRegister(vreg);
+ lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type())));
+ annotate(lir);
+}
+
+#ifdef JS_NUNBOX32
+void LIRGeneratorShared::definePhiTwoRegisters(MPhi* phi, size_t lirIndex) {
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT_IF(!errored(), typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+#endif
+
+void LIRGeneratorShared::lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* lir = block->getPhi(lirIndex);
+ lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY));
+}
+
+LRecoverInfo* LIRGeneratorShared::getRecoverInfo(MResumePoint* rp) {
+ if (cachedRecoverInfo_ && cachedRecoverInfo_->mir() == rp) {
+ return cachedRecoverInfo_;
+ }
+
+ LRecoverInfo* recoverInfo = LRecoverInfo::New(gen, rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ cachedRecoverInfo_ = recoverInfo;
+ return recoverInfo;
+}
+
+#ifdef DEBUG
+bool LRecoverInfo::OperandIter::canOptimizeOutIfUnused() {
+ MDefinition* ins = **this;
+
+ // We check ins->type() in addition to ins->isUnused() because
+ // EliminateDeadResumePointOperands may replace nodes with the constant
+ // MagicValue(JS_OPTIMIZED_OUT).
+ if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
+ (*it_)->isResumePoint()) {
+ return !(*it_)->toResumePoint()->isObservableOperand(op_);
+ }
+
+ return true;
+}
+#endif
+
+LAllocation LIRGeneratorShared::useRegisterOrIndexConstant(
+ MDefinition* mir, Scalar::Type type, int32_t offsetAdjustment) {
+ if (CanUseInt32Constant(mir)) {
+ MConstant* cst = mir->toConstant();
+ int32_t val =
+ cst->type() == MIRType::Int32 ? cst->toInt32() : cst->toIntPtr();
+ int32_t offset;
+ if (ArrayOffsetFitsInInt32(val, type, offsetAdjustment, &offset)) {
+ return LAllocation(mir->toConstant());
+ }
+ }
+ return useRegister(mir);
+}
+
+#ifdef JS_NUNBOX32
+LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
+ BailoutKind kind) {
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot) {
+ return nullptr;
+ }
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* ins = *it;
+
+ if (ins->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ LAllocation* type = snapshot->typeOfSlot(index);
+ LAllocation* payload = snapshot->payloadOfSlot(index);
+ ++index;
+
+ if (ins->isBox()) {
+ ins = ins->toBox()->getOperand(0);
+ }
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
+
+ // The register allocation will fill these fields in with actual
+ // register/stack assignments. During code generation, we can restore
+ // interpreter state with the given information. Note that for
+ // constants, including known types, we record a dummy placeholder,
+ // since we can recover the same information, much cleaner, from MIR.
+ if (ins->isConstant() || ins->isUnused()) {
+ *type = LAllocation();
+ *payload = LAllocation();
+ } else if (ins->type() != MIRType::Value) {
+ *type = LAllocation();
+ *payload = use(ins, LUse(LUse::KEEPALIVE));
+ } else {
+ *type = useType(ins, LUse::KEEPALIVE);
+ *payload = usePayload(ins, LUse::KEEPALIVE);
+ }
+ }
+
+ return snapshot;
+}
+
+#elif JS_PUNBOX64
+
+LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
+ BailoutKind kind) {
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot) {
+ return nullptr;
+ }
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* def = *it;
+
+ if (def->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ if (def->isBox()) {
+ def = def->toBox()->getOperand(0);
+ }
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
+
+ LAllocation* a = snapshot->getEntry(index++);
+
+ if (def->isUnused()) {
+ *a = LAllocation();
+ continue;
+ }
+
+ *a = useKeepaliveOrConstant(def);
+ }
+
+ return snapshot;
+}
+#endif
+
+void LIRGeneratorShared::assignSnapshot(LInstruction* ins, BailoutKind kind) {
+ // assignSnapshot must be called before define/add, since
+ // it may add new instructions for emitted-at-use operands.
+ MOZ_ASSERT(ins->id() == 0);
+ MOZ_ASSERT(kind != BailoutKind::Unknown);
+
+ LSnapshot* snapshot = buildSnapshot(lastResumePoint_, kind);
+ if (!snapshot) {
+ abort(AbortReason::Alloc, "buildSnapshot failed");
+ return;
+ }
+
+ ins->assignSnapshot(snapshot);
+}
+
+void LIRGeneratorShared::assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind) {
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ MResumePoint* mrp =
+ mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
+ LSnapshot* postSnapshot = buildSnapshot(mrp, kind);
+ if (!postSnapshot) {
+ abort(AbortReason::Alloc, "buildSnapshot failed");
+ return;
+ }
+
+ osiPoint_ = new (alloc()) LOsiPoint(ins->safepoint(), postSnapshot);
+
+ if (!lirGraph_.noteNeedsSafepoint(ins)) {
+ abort(AbortReason::Alloc, "noteNeedsSafepoint failed");
+ return;
+ }
+}
+
+void LIRGeneratorShared::assignWasmSafepoint(LInstruction* ins) {
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ if (!lirGraph_.noteNeedsSafepoint(ins)) {
+ abort(AbortReason::Alloc, "noteNeedsSafepoint failed");
+ return;
+ }
+}
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
new file mode 100644
index 0000000000..d26e349d8c
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -0,0 +1,371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_h
+#define jit_shared_Lowering_shared_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+class MDefinition;
+class MInstruction;
+class LOsiPoint;
+
+class LIRGeneratorShared {
+ protected:
+ MIRGenerator* gen;
+ MIRGraph& graph;
+ LIRGraph& lirGraph_;
+ LBlock* current;
+ MResumePoint* lastResumePoint_;
+ LRecoverInfo* cachedRecoverInfo_;
+ LOsiPoint* osiPoint_;
+
+ LIRGeneratorShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : gen(gen),
+ graph(graph),
+ lirGraph_(lirGraph),
+ current(nullptr),
+ lastResumePoint_(nullptr),
+ cachedRecoverInfo_(nullptr),
+ osiPoint_(nullptr) {}
+
+ MIRGenerator* mir() { return gen; }
+
+ // Abort errors are caught at end of visitInstruction. It is possible for
+ // multiple errors to be detected before the end of visitInstruction. In
+ // this case, we only report the first back to the MIRGenerator.
+ bool errored() { return gen->getOffThreadStatus().isErr(); }
+ void abort(AbortReason r, const char* message, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ if (errored()) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, message);
+ auto reason_ = gen->abortFmt(r, message, ap);
+ va_end(ap);
+ gen->setOffThreadStatus(reason_);
+ }
+ void abort(AbortReason r) {
+ if (errored()) {
+ return;
+ }
+
+ auto reason_ = gen->abort(r);
+ gen->setOffThreadStatus(reason_);
+ }
+
+ static void ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp,
+ MInstruction* ins);
+ static bool ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs,
+ MInstruction* ins);
+
+ // A backend can decide that an instruction should be emitted at its uses,
+ // rather than at its definition. To communicate this, set the
+ // instruction's virtual register set to 0. When using the instruction,
+ // its virtual register is temporarily reassigned. To know to clear it
+ // after constructing the use information, the worklist bit is temporarily
+ // unset.
+ //
+ // The backend can use the worklist bit to determine whether or not a
+ // definition should be created.
+ inline void emitAtUses(MInstruction* mir);
+
+ // The lowest-level calls to use, those that do not wrap another call to
+ // use(), must prefix grabbing virtual register IDs by these calls.
+ inline void ensureDefined(MDefinition* mir);
+
+ void visitEmittedAtUses(MInstruction* ins);
+
+ // These all create a use of a virtual register, with an optional
+ // allocation policy.
+ //
+ // Some of these use functions have atStart variants.
+ // - non-atStart variants will tell the register allocator that the input
+ // allocation must be different from any Temp or Definition also needed for
+ // this LInstruction.
+ // - atStart variants relax that restriction and allow the input to be in
+ // the same register as any output Definition (but not Temps) used by the
+ // LInstruction. Note that it doesn't *imply* this will actually happen,
+ // but gives a hint to the register allocator that it can do it.
+ //
+ // TL;DR: Use non-atStart variants only if you need the input value after
+ // writing to any definitions (excluding temps), during code generation of
+ // this LInstruction. Otherwise, use atStart variants, which will lower
+ // register pressure.
+ //
+ // There is an additional constraint. Consider a MIR node with two
+ // MDefinition* operands, op1 and op2. If the node reuses the register of op1
+ // for its output then op1 must be used as atStart. Then, if op1 and op2
+ // represent the same LIR node then op2 must be an atStart use too; otherwise
+ // op2 must be a non-atStart use. There is however not always a 1-1 mapping
+ // from MDefinition* to LNode*, so to determine whether two MDefinition* map
+ // to the same LNode*, ALWAYS go via the willHaveDifferentLIRNodes()
+ // predicate. Do not use pointer equality on the MIR nodes.
+ //
+ // Do not add other conditions when using willHaveDifferentLIRNodes(). The
+ // predicate is the source of truth about whether to use atStart or not, no
+ // other conditions may apply in contexts when it is appropriate to use it.
+ inline LUse use(MDefinition* mir, LUse policy);
+ inline LUse use(MDefinition* mir);
+ inline LUse useAtStart(MDefinition* mir);
+ inline LUse useRegister(MDefinition* mir);
+ inline LUse useRegisterAtStart(MDefinition* mir);
+ inline LUse useFixed(MDefinition* mir, Register reg);
+ inline LUse useFixed(MDefinition* mir, FloatRegister reg);
+ inline LUse useFixed(MDefinition* mir, AnyRegister reg);
+ inline LUse useFixedAtStart(MDefinition* mir, Register reg);
+ inline LUse useFixedAtStart(MDefinition* mir, AnyRegister reg);
+ inline LAllocation useOrConstant(MDefinition* mir);
+ inline LAllocation useOrConstantAtStart(MDefinition* mir);
+ // "Any" is architecture dependent, and will include registers and stack
+ // slots on X86, and only registers on ARM.
+ inline LAllocation useAny(MDefinition* mir);
+ inline LAllocation useAnyAtStart(MDefinition* mir);
+ inline LAllocation useAnyOrConstant(MDefinition* mir);
+ // "Storable" is architecture dependend, and will include registers and
+ // constants on X86 and only registers on ARM. This is a generic "things
+ // we can expect to write into memory in 1 instruction".
+ inline LAllocation useStorable(MDefinition* mir);
+ inline LAllocation useStorableAtStart(MDefinition* mir);
+ inline LAllocation useKeepalive(MDefinition* mir);
+ inline LAllocation useKeepaliveOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstantAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZeroAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZero(MDefinition* mir);
+ inline LAllocation useRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ // These methods accept either an Int32 or IntPtr value. A constant is used if
+ // the value fits in an int32.
+ inline LAllocation useRegisterOrInt32Constant(MDefinition* mir);
+ inline LAllocation useAnyOrInt32Constant(MDefinition* mir);
+
+ // Like useRegisterOrInt32Constant, but uses a constant only if
+ // |int32val * Scalar::byteSize(type) + offsetAdjustment| doesn't overflow
+ // int32.
+ LAllocation useRegisterOrIndexConstant(MDefinition* mir, Scalar::Type type,
+ int32_t offsetAdjustment = 0);
+
+ inline LUse useRegisterForTypedLoad(MDefinition* mir, MIRType type);
+
+#ifdef JS_NUNBOX32
+ inline LUse useType(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayload(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadAtStart(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadInRegisterAtStart(MDefinition* mir);
+
+ // Adds a box input to an instruction, setting operand |n| to the type and
+ // |n+1| to the payload. Does not modify the operands, instead expecting a
+ // policy to already be set.
+ inline void fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir);
+#endif
+
+ // Test whether mir1 and mir2 may give rise to different LIR nodes even if
+ // mir1 == mir2; use it to guide the selection of the use directive for one of
+ // the nodes in the context of a reused input. See comments above about why
+ // it's important to use this predicate and not pointer equality.
+ //
+ // This predicate may be called before or after the application of a use
+ // directive to the first of the nodes, but it is meaningless to call it after
+ // the application of a directive to the second node.
+ inline bool willHaveDifferentLIRNodes(MDefinition* mir1, MDefinition* mir2);
+
+ // These create temporary register requests.
+ inline LDefinition temp(LDefinition::Type type = LDefinition::GENERAL,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LInt64Definition tempInt64(
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LDefinition tempFloat32();
+ inline LDefinition tempDouble();
+#ifdef ENABLE_WASM_SIMD
+ inline LDefinition tempSimd128();
+#endif
+ inline LDefinition tempCopy(MDefinition* input, uint32_t reusedInput);
+
+ // Note that the fixed register has a GENERAL type,
+ // unless the arg is of FloatRegister type
+ inline LDefinition tempFixed(Register reg);
+ inline LDefinition tempFixed(FloatRegister reg);
+ inline LInt64Definition tempInt64Fixed(Register64 reg);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineFixed(LInstructionHelper<1, Ops, Temps>* lir,
+ MDefinition* mir, const LAllocation& output);
+
+ template <size_t Temps>
+ inline void defineBox(
+ details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir,
+ MDefinition* mir, LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64Fixed(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output);
+
+ inline void defineReturn(LInstruction* lir, MDefinition* mir);
+
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir,
+ MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir,
+ MDefinition* mir, const LDefinition& def);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineBoxReuseInput(
+ LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64ReuseInput(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ // Returns a box allocation for a Value-typed instruction.
+ inline LBoxAllocation useBox(MDefinition* mir,
+ LUse::Policy policy = LUse::REGISTER,
+ bool useAtStart = false);
+
+ // Returns a box allocation. The use is either typed, a Value, or
+ // a constant (if useConstant is true).
+ inline LBoxAllocation useBoxOrTypedOrConstant(MDefinition* mir,
+ bool useConstant,
+ bool useAtStart = false);
+ inline LBoxAllocation useBoxOrTyped(MDefinition* mir,
+ bool useAtStart = false);
+
+ // Returns an int64 allocation for an Int64-typed instruction.
+ inline LInt64Allocation useInt64(MDefinition* mir, LUse::Policy policy,
+ bool useAtStart);
+ inline LInt64Allocation useInt64(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64AtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64OrConstant(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64Register(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64FixedAtStart(MDefinition* mir,
+ Register64 regs);
+
+ inline LInt64Allocation useInt64RegisterAtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64RegisterOrConstantAtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64OrConstantAtStart(MDefinition* mir);
+
+ // Rather than defining a new virtual register, sets |ins| to have the same
+ // virtual register as |as|.
+ inline void redefine(MDefinition* ins, MDefinition* as);
+
+ template <typename LClass, typename... Args>
+ inline LClass* allocateVariadic(uint32_t numOperands, Args&&... args);
+
+ TempAllocator& alloc() const { return graph.alloc(); }
+
+ uint32_t getVirtualRegister() {
+ uint32_t vreg = lirGraph_.getVirtualRegister();
+
+ // If we run out of virtual registers, mark code generation as having
+ // failed and return a dummy vreg. Include a + 1 here for NUNBOX32
+ // platforms that expect Value vregs to be adjacent.
+ if (vreg + 1 >= MAX_VIRTUAL_REGISTERS) {
+ abort(AbortReason::Alloc, "max virtual registers");
+ return 1;
+ }
+ return vreg;
+ }
+
+ template <typename T>
+ void annotate(T* ins);
+ template <typename T>
+ void add(T* ins, MInstruction* mir = nullptr);
+
+ void lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void definePhiOneRegister(MPhi* phi, size_t lirIndex);
+#ifdef JS_NUNBOX32
+ void definePhiTwoRegisters(MPhi* phi, size_t lirIndex);
+#endif
+
+ void defineTypedPhi(MPhi* phi, size_t lirIndex) {
+ // One register containing the payload.
+ definePhiOneRegister(phi, lirIndex);
+ }
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex) {
+#ifdef JS_NUNBOX32
+ // Two registers: one for the type, one for the payload.
+ definePhiTwoRegisters(phi, lirIndex);
+#else
+ // One register containing the full Value.
+ definePhiOneRegister(phi, lirIndex);
+#endif
+ }
+
+ LOsiPoint* popOsiPoint() {
+ LOsiPoint* tmp = osiPoint_;
+ osiPoint_ = nullptr;
+ return tmp;
+ }
+
+ LRecoverInfo* getRecoverInfo(MResumePoint* rp);
+ LSnapshot* buildSnapshot(MResumePoint* rp, BailoutKind kind);
+ bool assignPostSnapshot(MInstruction* mir, LInstruction* ins);
+
+ // Marks this instruction as fallible, meaning that before it performs
+ // effects (if any), it may check pre-conditions and bailout if they do not
+ // hold. This function informs the register allocator that it will need to
+ // capture appropriate state.
+ void assignSnapshot(LInstruction* ins, BailoutKind kind);
+
+ // Marks this instruction as needing to call into either the VM or GC. This
+ // function may build a snapshot that captures the result of its own
+ // instruction, and as such, should generally be called after define*().
+ void assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind = BailoutKind::DuringVMCall);
+
+ // Marks this instruction as needing a wasm safepoint.
+ void assignWasmSafepoint(LInstruction* ins);
+
+ inline void lowerConstantDouble(double d, MInstruction* mir);
+ inline void lowerConstantFloat32(float f, MInstruction* mir);
+
+ bool canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,
+ MIRType insTy);
+ void lowerWasmCompareAndSelect(MWasmSelect* ins, MDefinition* lhs,
+ MDefinition* rhs, MCompare::CompareType compTy,
+ JSOp jsop);
+
+ public:
+ // Whether to generate typed reads for element accesses with hole checks.
+ static bool allowTypedElementHoleCheck() { return false; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_h */
diff --git a/js/src/jit/wasm32/Architecture-wasm32.h b/js/src/jit/wasm32/Architecture-wasm32.h
new file mode 100644
index 0000000000..d7726eaa5f
--- /dev/null
+++ b/js/src/jit/wasm32/Architecture-wasm32.h
@@ -0,0 +1,174 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_Architecture_wasm32_h
+#define jit_wasm32_Architecture_wasm32_h
+
+// JitSpewer.h is included through MacroAssembler implementations for other
+// platforms, so include it here to avoid inadvertent build bustage.
+#include "jit/JitSpewer.h"
+
+#include "jit/shared/Architecture-shared.h"
+
+namespace js::jit {
+
+static const uint32_t SimdMemoryAlignment =
+ 4; // Make it 4 to avoid a bunch of div-by-zero warnings
+static const uint32_t WasmStackAlignment = 8;
+static const uint32_t WasmTrapInstructionLength = 0;
+
+// See comments in wasm::GenerateFunctionPrologue.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+class Registers {
+ public:
+ enum RegisterID {
+ sp = 0, // corresponds to global __stack_pointer which is mapped into
+ // global[0]
+ fp = 1,
+ r2 = 2,
+ r3 = 3,
+ invalid_reg,
+ invalid_reg2, // To avoid silly static_assert failures.
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ typedef uint8_t SetType;
+
+ static uint32_t SetSize(SetType) { MOZ_CRASH(); }
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Encoding StackPointer = RegisterID::sp;
+ static const Encoding FramePointer = RegisterID::fp;
+ static const Encoding Invalid = invalid_reg;
+ static const uint32_t Total = 5;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType ArgRegMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+ static const SetType JSCallMask = 0;
+ static const SetType CallMask = 0;
+};
+
+typedef uint8_t PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID { f0 = 0, invalid_reg };
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ typedef uint32_t SetType;
+
+ static const char* GetName(Code) { MOZ_CRASH(); }
+ static Code FromName(const char*) { MOZ_CRASH(); }
+
+ static const Code Invalid = invalid_reg;
+ static const uint32_t Total = 0;
+ static const uint32_t TotalPhys = 0;
+ static const uint32_t Allocatable = 0;
+ static const SetType AllMask = 0;
+ static const SetType AllDoubleMask = 0;
+ static const SetType AllSingleMask = 0;
+ static const SetType VolatileMask = 0;
+ static const SetType NonVolatileMask = 0;
+ static const SetType NonAllocatableMask = 0;
+ static const SetType AllocatableMask = 0;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ Code _;
+
+ static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
+ static uint32_t LastBit(SetType) { MOZ_CRASH(); }
+ static FloatRegister FromCode(uint32_t) { MOZ_CRASH(); }
+ bool isSingle() const { MOZ_CRASH(); }
+ bool isDouble() const { MOZ_CRASH(); }
+ bool isSimd128() const { MOZ_CRASH(); }
+ bool isInvalid() const { MOZ_CRASH(); }
+ FloatRegister asSingle() const { MOZ_CRASH(); }
+ FloatRegister asDouble() const { MOZ_CRASH(); }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+ Code code() const { MOZ_CRASH(); }
+ Encoding encoding() const { MOZ_CRASH(); }
+ const char* name() const { MOZ_CRASH(); }
+ bool volatile_() const { MOZ_CRASH(); }
+ bool operator!=(FloatRegister) const { MOZ_CRASH(); }
+ bool operator==(FloatRegister) const { MOZ_CRASH(); }
+ bool aliases(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t numAliased() const { MOZ_CRASH(); }
+ FloatRegister aliased(uint32_t) { MOZ_CRASH(); }
+ bool equiv(FloatRegister) const { MOZ_CRASH(); }
+ uint32_t size() const { MOZ_CRASH(); }
+ uint32_t numAlignedAliased() const { MOZ_CRASH(); }
+ FloatRegister alignedAliased(uint32_t) { MOZ_CRASH(); }
+ SetType alignedOrDominatedAliasedSet() const { MOZ_CRASH(); }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return SetType(0);
+ }
+
+ template <typename T>
+ static T ReduceSetForPush(T) {
+ MOZ_CRASH();
+ }
+ uint32_t getRegisterDumpOffsetInBytes() { MOZ_CRASH(); }
+ static uint32_t SetSize(SetType x) { MOZ_CRASH(); }
+ static Code FromName(const char* name) { MOZ_CRASH(); }
+
+ // This is used in static initializers, so produce a bogus value instead of
+ // crashing.
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>&) {
+ return 0;
+ }
+};
+
+inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
+inline bool hasMultiAlias() { MOZ_CRASH(); }
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+#ifdef JS_NUNBOX32
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_Architecture_wasm32_h */
diff --git a/js/src/jit/wasm32/Assembler-wasm32.h b/js/src/jit/wasm32/Assembler-wasm32.h
new file mode 100644
index 0000000000..d3816a3ea6
--- /dev/null
+++ b/js/src/jit/wasm32/Assembler-wasm32.h
@@ -0,0 +1,229 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_Assembler_wasm32_h
+#define jit_wasm32_Assembler_wasm32_h
+
+#include "mozilla/Assertions.h"
+
+#include <cstdint>
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/wasm32/Architecture-wasm32.h"
+#include "js/Value.h"
+
+namespace js::jit {
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+class MacroAssembler;
+
+static constexpr Register StackPointer{Registers::StackPointer};
+static constexpr Register FramePointer{Registers::FramePointer};
+
+static constexpr Register ReturnReg{Registers::invalid_reg2};
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ReturnSimd128Reg = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister ScratchSimd128Reg = {
+ FloatRegisters::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg = {FloatRegisters::invalid_reg};
+
+struct ScratchFloat32Scope : FloatRegister {
+ explicit ScratchFloat32Scope(MacroAssembler& masm) {}
+};
+
+struct ScratchDoubleScope : FloatRegister {
+ explicit ScratchDoubleScope(MacroAssembler& masm) {}
+};
+
+static constexpr Register OsrFrameReg{Registers::invalid_reg};
+static constexpr Register PreBarrierReg{Registers::invalid_reg};
+static constexpr Register InterpreterPCReg{Registers::invalid_reg};
+static constexpr Register CallTempReg0{Registers::invalid_reg};
+static constexpr Register CallTempReg1{Registers::invalid_reg};
+static constexpr Register CallTempReg2{Registers::invalid_reg};
+static constexpr Register CallTempReg3{Registers::invalid_reg};
+static constexpr Register CallTempReg4{Registers::invalid_reg};
+static constexpr Register CallTempReg5{Registers::invalid_reg};
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr Register CallTempNonArgRegs[] = {InvalidReg, InvalidReg};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0{Registers::invalid_reg};
+static constexpr Register IntArgReg1{Registers::invalid_reg};
+static constexpr Register IntArgReg2{Registers::invalid_reg};
+static constexpr Register IntArgReg3{Registers::invalid_reg};
+static constexpr Register HeapReg{Registers::invalid_reg};
+
+static constexpr Register RegExpMatcherRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpMatcherStringReg{Registers::invalid_reg};
+static constexpr Register RegExpMatcherLastIndexReg{Registers::invalid_reg};
+
+static constexpr Register RegExpExecTestRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpExecTestStringReg{Registers::invalid_reg};
+
+static constexpr Register RegExpSearcherRegExpReg{Registers::invalid_reg};
+static constexpr Register RegExpSearcherStringReg{Registers::invalid_reg};
+static constexpr Register RegExpSearcherLastIndexReg{Registers::invalid_reg};
+
+// Uses |invalid_reg2| to avoid static_assert failures.
+static constexpr Register JSReturnReg_Type{Registers::invalid_reg2};
+static constexpr Register JSReturnReg_Data{Registers::invalid_reg2};
+static constexpr Register JSReturnReg{Registers::invalid_reg2};
+
+#if defined(JS_NUNBOX32)
+static constexpr ValueOperand JSReturnOperand(Register{Registers::r2},
+ Register{Registers::r3});
+static constexpr Register64 ReturnReg64(InvalidReg, InvalidReg);
+#elif defined(JS_PUNBOX64)
+static constexpr ValueOperand JSReturnOperand(InvalidReg);
+static constexpr Register64 ReturnReg64(InvalidReg);
+#else
+# error "Bad architecture"
+#endif
+
+static constexpr Register ABINonArgReg0{Registers::invalid_reg};
+static constexpr Register ABINonArgReg1{Registers::invalid_reg};
+static constexpr Register ABINonArgReg2{Registers::invalid_reg};
+static constexpr Register ABINonArgReg3{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnReg0{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnReg1{Registers::invalid_reg};
+static constexpr Register ABINonVolatileReg{Registers::invalid_reg};
+static constexpr Register ABINonArgReturnVolatileReg{Registers::invalid_reg};
+
+static constexpr FloatRegister ABINonArgDoubleReg = {
+ FloatRegisters::invalid_reg};
+
+static constexpr Register WasmTableCallScratchReg0{Registers::invalid_reg};
+static constexpr Register WasmTableCallScratchReg1{Registers::invalid_reg};
+static constexpr Register WasmTableCallSigReg{Registers::invalid_reg};
+static constexpr Register WasmTableCallIndexReg{Registers::invalid_reg};
+static constexpr Register InstanceReg{Registers::invalid_reg};
+static constexpr Register WasmJitEntryReturnScratch{Registers::invalid_reg};
+static constexpr Register WasmCallRefCallScratchReg0{Registers::invalid_reg};
+static constexpr Register WasmCallRefCallScratchReg1{Registers::invalid_reg};
+static constexpr Register WasmCallRefReg{Registers::invalid_reg};
+
+static constexpr uint32_t ABIStackAlignment = 4;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 8;
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+
+static const Scale ScalePointer = TimesOne;
+
+static constexpr uint32_t Int32SizeLog2 = 2;
+
+struct MemoryArgument {
+ uint32_t align;
+ uint32_t offset;
+};
+
+class AssemblerWasm32 : public AssemblerShared {};
+
+class Assembler : public AssemblerWasm32 {
+ public:
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static Condition InvertCondition(Condition) { MOZ_CRASH(); }
+
+ static DoubleCondition InvertCondition(DoubleCondition) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ static void PatchDataWithValueCheck(CodeLocationLabel, T, S) {
+ MOZ_CRASH();
+ }
+ static void PatchWrite_Imm32(CodeLocationLabel, Imm32) { MOZ_CRASH(); }
+
+ static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) {
+ MOZ_CRASH();
+ }
+ static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
+
+ static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
+ static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
+
+ static void Bind(uint8_t*, const CodeLabel&) { MOZ_CRASH(); }
+
+ static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
+
+ static bool HasRoundInstruction(RoundingMode) { return false; }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ MOZ_CRASH();
+ }
+
+ void setUnlimitedBuffer() { MOZ_CRASH(); }
+};
+
+class Operand {
+ public:
+ explicit Operand(const Address&) { MOZ_CRASH(); }
+ explicit Operand(const Register) { MOZ_CRASH(); }
+ explicit Operand(const FloatRegister) { MOZ_CRASH(); }
+ explicit Operand(Register, Imm32) { MOZ_CRASH(); }
+ explicit Operand(Register, int32_t) { MOZ_CRASH(); }
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator() = default;
+ ABIArg next(MIRType) { MOZ_CRASH(); }
+ ABIArg& current() { MOZ_CRASH(); }
+ uint32_t stackBytesConsumedSoFar() const { MOZ_CRASH(); }
+ void increaseStackOffset(uint32_t) { MOZ_CRASH(); }
+};
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_Assembler_wasm32_h */
diff --git a/js/src/jit/wasm32/CodeGenerator-wasm32.cpp b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
new file mode 100644
index 0000000000..5535eed21d
--- /dev/null
+++ b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
@@ -0,0 +1,254 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/wasm32/CodeGenerator-wasm32.h"
+
+#include "jit/CodeGenerator.h"
+
+using namespace js::jit;
+
+void CodeGenerator::visitDouble(LDouble*) { MOZ_CRASH(); }
+void CodeGenerator::visitFloat32(LFloat32* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitValue(LValue* value) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitRotateI64(LRotateI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) { MOZ_CRASH(); }
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) { MOZ_CRASH(); }
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) { MOZ_CRASH(); }
+void CodeGenerator::visitCompare(LCompare* comp) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareI64(LCompareI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitCompareD(LCompareD* comp) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareF(LCompareF* comp) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitNotI(LNotI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNotI64(LNotI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitNotD(LNotD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNotF(LNotF* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitBitNotI(LBitNotI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitBitOpI(LBitOpI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitShiftI(LShiftI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitShiftI64(LShiftI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitUrshD(LUrshD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNegI(LNegI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNegI64(LNegI64* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNegD(LNegD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitNegF(LNegF* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitCopySignD(LCopySignD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitCopySignF(LCopySignF* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitClzI(LClzI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitClzI64(LClzI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitCtzI(LCtzI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitCtzI64(LCtzI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitPopcntI(LPopcntI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitAddI(LAddI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitAddI64(LAddI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitSubI(LSubI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitSubI64(LSubI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitMulI64(LMulI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitMathD(LMathD* math) { MOZ_CRASH(); }
+void CodeGenerator::visitMathF(LMathF* math) { MOZ_CRASH(); }
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH(); }
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) { MOZ_CRASH(); }
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitUnbox(LUnbox* unbox) { MOZ_CRASH(); }
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ MOZ_CRASH();
+}
+void CodeGenerator::visitDivI(LDivI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitModI(LModI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitMulI(LMulI* ins) { MOZ_CRASH(); }
+void CodeGenerator::visitBox(LBox* box) { MOZ_CRASH(); }
diff --git a/js/src/jit/wasm32/CodeGenerator-wasm32.h b/js/src/jit/wasm32/CodeGenerator-wasm32.h
new file mode 100644
index 0000000000..26d4adf982
--- /dev/null
+++ b/js/src/jit/wasm32/CodeGenerator-wasm32.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_CodeGenerator_wasm32_h
+#define jit_wasm32_CodeGenerator_wasm32_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js::jit {
+
+class CodeGeneratorWasm32 : public CodeGeneratorShared {
+ protected:
+ CodeGeneratorWasm32(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {
+ MOZ_CRASH();
+ }
+
+ MoveOperand toMoveOperand(LAllocation) const { MOZ_CRASH(); }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition, T1, T2, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ void bailoutTestPtr(Assembler::Condition, Register, Register, LSnapshot*) {
+ MOZ_CRASH();
+ }
+ void bailoutIfFalseBool(Register, LSnapshot*) { MOZ_CRASH(); }
+ void bailoutFrom(Label*, LSnapshot*) { MOZ_CRASH(); }
+ void bailout(LSnapshot*) { MOZ_CRASH(); }
+ void bailoutIf(Assembler::Condition, LSnapshot*) { MOZ_CRASH(); }
+ bool generateOutOfLineCode() { MOZ_CRASH(); }
+ void testNullEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testUndefinedEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testObjectEmitBranch(Assembler::Condition, ValueOperand, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void testZeroEmitBranch(Assembler::Condition, Register, MBasicBlock*,
+ MBasicBlock*) {
+ MOZ_CRASH();
+ }
+ void emitTableSwitchDispatch(MTableSwitch*, Register, Register) {
+ MOZ_CRASH();
+ }
+ void emitBigIntDiv(LBigIntDiv*, Register, Register, Register, Label*) {
+ MOZ_CRASH();
+ }
+ void emitBigIntMod(LBigIntMod*, Register, Register, Register, Label*) {
+ MOZ_CRASH();
+ }
+ ValueOperand ToValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ ValueOperand ToTempValue(LInstruction*, size_t) { MOZ_CRASH(); }
+ void generateInvalidateEpilogue() { MOZ_CRASH(); }
+};
+
+typedef CodeGeneratorWasm32 CodeGeneratorSpecific;
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_CodeGenerator_wasm32_h */
diff --git a/js/src/jit/wasm32/LIR-wasm32.h b/js/src/jit/wasm32/LIR-wasm32.h
new file mode 100644
index 0000000000..8943d89143
--- /dev/null
+++ b/js/src/jit/wasm32/LIR-wasm32.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_LIR_wasm32_h
+#define jit_wasm32_LIR_wasm32_h
+
+namespace js::jit {
+
+class LUnboxFloatingPoint : public LInstruction {
+ public:
+ LIR_HEADER(UnboxFloatingPoint)
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { MOZ_CRASH(); }
+
+ const LDefinition* output() const { MOZ_CRASH(); }
+ MIRType type() const { MOZ_CRASH(); }
+};
+
+class LTableSwitch : public LInstruction {
+ public:
+ LIR_HEADER(TableSwitch)
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LAllocation* index() { MOZ_CRASH(); }
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+};
+
+class LTableSwitchV : public LInstruction {
+ public:
+ LIR_HEADER(TableSwitchV)
+ MTableSwitch* mir() { MOZ_CRASH(); }
+
+ const LDefinition* tempInt() { MOZ_CRASH(); }
+ const LDefinition* tempFloat() { MOZ_CRASH(); }
+ const LDefinition* tempPointer() { MOZ_CRASH(); }
+
+ static const size_t InputValue = 0;
+};
+
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ explicit LWasmUint32ToFloat32(const LAllocation&)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ MUnbox* mir() const { MOZ_CRASH(); }
+ const LAllocation* payload() { MOZ_CRASH(); }
+ const LAllocation* type() { MOZ_CRASH(); }
+ const char* extraName() const { MOZ_CRASH(); }
+};
+class LDivI : public LBinaryMath<1> {
+ public:
+ LDivI(const LAllocation&, const LAllocation&, const LDefinition&)
+ : LBinaryMath(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LDivPowTwoI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LDivPowTwoI(const LAllocation&, int32_t)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ const LAllocation* numerator() { MOZ_CRASH(); }
+ int32_t shift() { MOZ_CRASH(); }
+ MDiv* mir() const { MOZ_CRASH(); }
+};
+class LModI : public LBinaryMath<1> {
+ public:
+ LModI(const LAllocation&, const LAllocation&, const LDefinition&)
+ : LBinaryMath(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+
+ const LDefinition* callTemp() { MOZ_CRASH(); }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ explicit LWasmUint32ToDouble(const LAllocation&)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+};
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ public:
+ int32_t shift() { MOZ_CRASH(); }
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(Opcode::Invalid) {
+ MOZ_CRASH();
+ }
+ MMod* mir() const { MOZ_CRASH(); }
+};
+
+class LMulI : public LInstruction {};
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_LIR_wasm32_h */
diff --git a/js/src/jit/wasm32/Lowering-wasm32.h b/js/src/jit/wasm32/Lowering-wasm32.h
new file mode 100644
index 0000000000..3a0aab364a
--- /dev/null
+++ b/js/src/jit/wasm32/Lowering-wasm32.h
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_Lowering_wasm32_h
+#define jit_wasm32_Lowering_wasm32_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js::jit {
+
+class LIRGeneratorWasm32 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorWasm32(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {
+ MOZ_CRASH();
+ }
+
+ LBoxAllocation useBoxFixed(MDefinition*, Register, Register,
+ bool useAtStart = false) {
+ MOZ_CRASH();
+ }
+
+ LAllocation useByteOpRegister(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterAtStart(MDefinition*) { MOZ_CRASH(); }
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition*) {
+ MOZ_CRASH();
+ }
+ LDefinition tempByteOpRegister() { MOZ_CRASH(); }
+ LDefinition tempToUnbox() { MOZ_CRASH(); }
+ bool needTempForPostBarrier() { MOZ_CRASH(); }
+ void lowerUntypedPhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH(); }
+ void defineInt64Phi(MPhi*, size_t) { MOZ_CRASH(); }
+ void lowerForShift(LInstructionHelper<1, 2, 0>*, MDefinition*, MDefinition*,
+ MDefinition*) {
+ MOZ_CRASH();
+ }
+ void lowerUrshD(MUrsh*) { MOZ_CRASH(); }
+ void lowerPowOfTwoI(MPow*) { MOZ_CRASH(); }
+ template <typename T>
+ void lowerForALU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForFPU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForALUInt64(T, MDefinition*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ void lowerForMulInt64(LMulI64*, MMul*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ template <typename T>
+ void lowerForShiftInt64(T, MDefinition*, MDefinition*,
+ MDefinition* v = nullptr) {
+ MOZ_CRASH();
+ }
+ void lowerForBitAndAndBranch(LBitAndAndBranch*, MInstruction*, MDefinition*,
+ MDefinition*) {
+ MOZ_CRASH();
+ }
+ void lowerForCompareI64AndBranch(MTest*, MCompare*, JSOp, MDefinition*,
+ MDefinition*, MBasicBlock*, MBasicBlock*) {
+ MOZ_CRASH();
+ }
+
+ void lowerConstantDouble(double, MInstruction*) { MOZ_CRASH(); }
+ void lowerConstantFloat32(float, MInstruction*) { MOZ_CRASH(); }
+ void lowerTruncateDToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerTruncateFToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH();
+ }
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH();
+ }
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins) {
+ MOZ_CRASH();
+ }
+ void lowerDivI(MDiv*) { MOZ_CRASH(); }
+ void lowerModI(MMod*) { MOZ_CRASH(); }
+ void lowerDivI64(MDiv*) { MOZ_CRASH(); }
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) { MOZ_CRASH(); }
+ void lowerModI64(MMod*) { MOZ_CRASH(); }
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) { MOZ_CRASH(); }
+ void lowerNegI(MInstruction*, MDefinition*) { MOZ_CRASH(); }
+ void lowerNegI64(MInstruction*, MDefinition*) { MOZ_CRASH(); }
+ void lowerMulI(MMul*, MDefinition*, MDefinition*) { MOZ_CRASH(); }
+ void lowerUDiv(MDiv*) { MOZ_CRASH(); }
+ void lowerUMod(MMod*) { MOZ_CRASH(); }
+ void lowerWasmSelectI(MWasmSelect* select) { MOZ_CRASH(); }
+ void lowerWasmSelectI64(MWasmSelect* select) { MOZ_CRASH(); }
+ void lowerWasmCompareAndSelect(MWasmSelect* ins, MDefinition* lhs,
+ MDefinition* rhs, MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_CRASH();
+ }
+ bool canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,
+ MIRType insTy) {
+ MOZ_CRASH();
+ }
+
+ void lowerBigIntLsh(MBigIntLsh*) { MOZ_CRASH(); }
+ void lowerBigIntRsh(MBigIntRsh*) { MOZ_CRASH(); }
+ void lowerBigIntDiv(MBigIntDiv*) { MOZ_CRASH(); }
+ void lowerBigIntMod(MBigIntMod*) { MOZ_CRASH(); }
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar*) { MOZ_CRASH(); }
+ void lowerAtomicStore64(MStoreUnboxedScalar*) { MOZ_CRASH(); }
+
+ LTableSwitch* newLTableSwitch(LAllocation, LDefinition, MTableSwitch*) {
+ MOZ_CRASH();
+ }
+ LTableSwitchV* newLTableSwitchV(MTableSwitch*) { MOZ_CRASH(); }
+};
+
+typedef LIRGeneratorWasm32 LIRGeneratorSpecific;
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_Lowering_wasm32_h */
diff --git a/js/src/jit/wasm32/MacroAssembler-wasm32-inl.h b/js/src/jit/wasm32/MacroAssembler-wasm32-inl.h
new file mode 100644
index 0000000000..eca2a07a65
--- /dev/null
+++ b/js/src/jit/wasm32/MacroAssembler-wasm32-inl.h
@@ -0,0 +1,1176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_MacroAssembler_wasm32_inl_h
+#define jit_wasm32_MacroAssembler_wasm32_inl_h
+
+#include "jit/wasm32/MacroAssembler-wasm32.h"
+
+namespace js::jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::move64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::notPtr(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::orPtr(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::and64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xorPtr(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::byteSwap64(Register64 reg) { MOZ_CRASH(); }
+
+void MacroAssembler::addPtr(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::add64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::inc64(AbsoluteAddress dest) { MOZ_CRASH(); }
+
+void MacroAssembler::neg64(Register64 reg) { MOZ_CRASH(); }
+
+void MacroAssembler::negPtr(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::clz64(Register64 src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::ctz64(Register64 src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register temp) {
+ MOZ_CRASH();
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& address) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::not32(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::and32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::and32(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::and32(const Address& src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::or32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::xor32(const Address& src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::byteSwap32(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::add32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::add32(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::sub32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::neg32(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { MOZ_CRASH(); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { MOZ_CRASH(); }
+
+void MacroAssembler::abs32(Register src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::lshift32(Imm32 shift, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshift32(Imm32 shift, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshift32Arithmetic(Imm32 shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rshift32Arithmetic(Register shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::lshift32(Register shift, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::rshift32(Register shift, Register srcDest) { MOZ_CRASH(); }
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) { MOZ_CRASH(); }
+
+void MacroAssembler::clampIntToUint8(Register reg) { MOZ_CRASH(); }
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhh,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_CRASH();
+}
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestStringTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::addPtr(const Address& src, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::subPtr(Register src, const Address& dest) { MOZ_CRASH(); }
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) { MOZ_CRASH(); }
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::flexibleLshift32(Register shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::flexibleRshift32(Register shift, Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register shift,
+ Register srcDest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::popcnt32(Register src, Register dest, Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+//}}} check_macroassembler_style
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_MacroAssembler_wasm32_inl_h */
diff --git a/js/src/jit/wasm32/MacroAssembler-wasm32.cpp b/js/src/jit/wasm32/MacroAssembler-wasm32.cpp
new file mode 100644
index 0000000000..fe82793499
--- /dev/null
+++ b/js/src/jit/wasm32/MacroAssembler-wasm32.cpp
@@ -0,0 +1,502 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/wasm32/MacroAssembler-wasm32.h"
+
+namespace js::jit {
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) { MOZ_CRASH(); }
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::PushBoxed(FloatRegister reg) { MOZ_CRASH(); }
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::pushReturnAddress() { MOZ_CRASH(); }
+
+void MacroAssembler::popReturnAddress() { MOZ_CRASH(); }
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ MOZ_CRASH();
+}
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ MOZ_CRASH();
+ return 0;
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) { MOZ_CRASH(); }
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::PopStackPtr() { MOZ_CRASH(); }
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::flexibleRemainder32(
+ Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register scratch) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { MOZ_CRASH(); }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) { MOZ_CRASH(); }
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ MOZ_CRASH();
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ MOZ_CRASH();
+}
+
+CodeOffset MacroAssembler::call(Register reg) {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress imm) {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ MOZ_CRASH();
+ return CodeOffset(0);
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_CRASH();
+}
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::Pop(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::Pop(FloatRegister t) { MOZ_CRASH(); }
+
+void MacroAssembler::Pop(const ValueOperand& val) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(Register reg) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(const Imm32 imm) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(const ImmWord imm) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(const ImmPtr imm) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(const ImmGCPtr ptr) { MOZ_CRASH(); }
+
+void MacroAssembler::Push(FloatRegister reg) { MOZ_CRASH(); }
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) { MOZ_CRASH(); }
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::call(const Address& addr) { MOZ_CRASH(); }
+
+void MacroAssembler::call(ImmWord imm) { MOZ_CRASH(); }
+
+void MacroAssembler::call(ImmPtr imm) { MOZ_CRASH(); }
+
+void MacroAssembler::call(JitCode* c) { MOZ_CRASH(); }
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::comment(const char* msg) { MOZ_CRASH(); }
+
+void MacroAssembler::flush() { MOZ_CRASH(); }
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ MOZ_CRASH();
+}
+
+//}}} check_macroassembler_style
+
+void MacroAssemblerWasm32::executableCopy(void* buffer) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::jump(Label* label) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::writeCodePointer(CodeLabel* label) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::haltingAlign(size_t) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::nopAlign(size_t) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::checkStackAlignment() { MOZ_CRASH(); }
+
+uint32_t MacroAssemblerWasm32::currentOffset() {
+ MOZ_CRASH();
+ return 0;
+}
+
+void MacroAssemblerWasm32::nop() { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::breakpoint() { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::abiret() { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::ret() { MOZ_CRASH(); }
+
+CodeOffset MacroAssemblerWasm32::toggledJump(Label*) { MOZ_CRASH(); }
+
+CodeOffset MacroAssemblerWasm32::toggledCall(JitCode*, bool) { MOZ_CRASH(); }
+
+size_t MacroAssemblerWasm32::ToggledCallSize(uint8_t*) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::finish() { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::pushValue(ValueOperand val) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::popValue(ValueOperand) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::tagValue(JSValueType, Register, ValueOperand) {
+ MOZ_CRASH();
+}
+
+void MacroAssemblerWasm32::retn(Imm32 n) { MOZ_CRASH(); }
+
+void MacroAssemblerWasm32::push(Register reg) { MOZ_CRASH(); }
+
+Address MacroAssemblerWasm32::ToType(const Address& address) { MOZ_CRASH(); }
+
+} // namespace js::jit
diff --git a/js/src/jit/wasm32/MacroAssembler-wasm32.h b/js/src/jit/wasm32/MacroAssembler-wasm32.h
new file mode 100644
index 0000000000..e876cfff1e
--- /dev/null
+++ b/js/src/jit/wasm32/MacroAssembler-wasm32.h
@@ -0,0 +1,528 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_MacroAssembler_wasm32_h
+#define jit_wasm32_MacroAssembler_wasm32_h
+
+#include "jit/wasm32/Assembler-wasm32.h"
+
+namespace js::jit {
+
+class CompactBufferReader;
+
+class ScratchTagScope {
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand) {}
+ operator Register() { MOZ_CRASH(); }
+ void release() { MOZ_CRASH(); }
+ void reacquire() { MOZ_CRASH(); }
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerWasm32 : public Assembler {
+ public:
+ size_t size() const { return bytesNeeded(); }
+
+ size_t bytesNeeded() const { MOZ_CRASH(); }
+
+ size_t jumpRelocationTableBytes() const { MOZ_CRASH(); }
+
+ size_t dataRelocationTableBytes() const { MOZ_CRASH(); }
+
+ size_t preBarrierTableBytes() const { MOZ_CRASH(); }
+
+ size_t numCodeLabels() const { MOZ_CRASH(); }
+ CodeLabel codeLabel(size_t) { MOZ_CRASH(); }
+
+ bool reserve(size_t size) { MOZ_CRASH(); }
+ bool appendRawCode(const uint8_t* code, size_t numBytes) { MOZ_CRASH(); }
+ bool swapBuffer(wasm::Bytes& bytes) { MOZ_CRASH(); }
+
+ void assertNoGCThings() const { MOZ_CRASH(); }
+
+ static void TraceJumpRelocations(JSTracer*, JitCode*, CompactBufferReader&) {
+ MOZ_CRASH();
+ }
+ static void TraceDataRelocations(JSTracer*, JitCode*, CompactBufferReader&) {
+ MOZ_CRASH();
+ }
+
+ static bool SupportsFloatingPoint() { return true; }
+ static bool SupportsUnalignedAccesses() { return false; }
+ static bool SupportsFastUnalignedFPAccesses() { return false; }
+
+ void executableCopy(void* buffer);
+
+ void copyJumpRelocationTable(uint8_t*) { MOZ_CRASH(); }
+
+ void copyDataRelocationTable(uint8_t*) { MOZ_CRASH(); }
+
+ void copyPreBarrierTable(uint8_t*) { MOZ_CRASH(); }
+
+ void processCodeLabels(uint8_t*) { MOZ_CRASH(); }
+
+ void flushBuffer() { MOZ_CRASH(); }
+
+ void bind(Label* label) { MOZ_CRASH(); }
+
+ void bind(CodeLabel* label) { MOZ_CRASH(); }
+
+ template <typename T>
+ void j(Condition, T) {
+ MOZ_CRASH();
+ }
+
+ void jump(Label* label);
+
+ void jump(JitCode* code) { MOZ_CRASH(); }
+
+ void jump(Register reg) { MOZ_CRASH(); }
+
+ void jump(const Address& address) { MOZ_CRASH(); }
+
+ void jump(ImmPtr ptr) { MOZ_CRASH(); }
+
+ void jump(TrampolinePtr code) { MOZ_CRASH(); }
+
+ void writeCodePointer(CodeLabel* label);
+
+ void haltingAlign(size_t);
+
+ void nopAlign(size_t);
+ void checkStackAlignment();
+
+ uint32_t currentOffset();
+
+ void nop();
+
+ void breakpoint();
+
+ void abiret();
+ void ret();
+
+ CodeOffset toggledJump(Label*);
+ CodeOffset toggledCall(JitCode*, bool);
+ static size_t ToggledCallSize(uint8_t*);
+
+ void finish();
+
+ template <typename T, typename S>
+ void moveValue(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S, typename U>
+ void moveValue(T, S, U) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storeValue(const T&, const S&) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S, typename U>
+ void storeValue(T, S, U) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storePrivateValue(const T&, const S&) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void loadValue(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void loadUnalignedValue(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void pushValue(const T&) {
+ MOZ_CRASH();
+ }
+
+ void pushValue(ValueOperand val);
+
+ template <typename T, typename S>
+ void pushValue(T, S) {
+ MOZ_CRASH();
+ }
+
+ void popValue(ValueOperand);
+ void tagValue(JSValueType, Register, ValueOperand);
+ void retn(Imm32 n);
+
+ template <typename T>
+ void push(const T&) {
+ MOZ_CRASH();
+ }
+
+ void push(Register reg);
+
+ template <typename T>
+ void Push(T) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void pop(T) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ CodeOffset pushWithPatch(T) {
+ MOZ_CRASH();
+ }
+
+ void testNullSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testObjectSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+ void testUndefinedSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ void cmpPtrSet(Condition, T, S, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void mov(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void movePtr(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void movePtr(Register src, Register dst) { MOZ_CRASH(); }
+
+ template <typename T>
+ void move32(const T&, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void movq(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void moveFloat32(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void moveDouble(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void move64(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ CodeOffset movWithPatch(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void loadPtr(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void loadPtr(const Address& address, Register dest) { MOZ_CRASH(); }
+
+ template <typename T>
+ void load32(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load32Unaligned(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void loadFloat32(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void loadDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void loadPrivate(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load8SignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load8ZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load16SignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load16UnalignedSignExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load16ZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load16UnalignedZeroExtend(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load64(T, Register64) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void load64Unaligned(T, Register64) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storePtr(const T&, S) {
+ MOZ_CRASH();
+ }
+
+ void storePtr(Register src, const Address& address) { MOZ_CRASH(); }
+ void storePtr(ImmPtr src, const Address& address) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ void store32(T, S) {
+ MOZ_CRASH();
+ }
+
+ void store32(Imm32 src, const Address& address) { MOZ_CRASH(); }
+
+ template <typename T, typename S>
+ void store32Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storeFloat32(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void storeDouble(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void store8(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void store16(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void store16Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void store64(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T, typename S>
+ void store64Unaligned(T, S) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void computeEffectiveAddress(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void splitTagForTest(ValueOperand, ScratchTagScope&) { MOZ_CRASH(); }
+
+ void boxDouble(FloatRegister, ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+
+ template <typename T>
+ void boxDouble(FloatRegister src, const T& dest) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxInt32(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxBoolean(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxString(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxSymbol(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxBigInt(T, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxObject(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void unboxObject(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+
+ template <typename T>
+ void unboxDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+
+ void unboxValue(const ValueOperand&, AnyRegister, JSValueType) {
+ MOZ_CRASH();
+ }
+
+ void unboxNonDouble(const ValueOperand&, Register, JSValueType) {
+ MOZ_CRASH();
+ }
+
+ void unboxNonDouble(const Address& address, Register dest, JSValueType type) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxGCThingForGCBarrier(const T&, Register) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ MOZ_CRASH();
+ }
+
+ void notBoolean(ValueOperand) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractObject(Address, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractObject(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractSymbol(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractInt32(ValueOperand, Register) { MOZ_CRASH(); }
+ [[nodiscard]] Register extractBoolean(ValueOperand, Register) { MOZ_CRASH(); }
+
+ template <typename T>
+ [[nodiscard]] Register extractTag(T, Register) {
+ MOZ_CRASH();
+ }
+
+ void convertFloat32ToInt32(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertDoubleToInt32(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertDoubleToPtr(FloatRegister, Register, Label*, bool v = true) {
+ MOZ_CRASH();
+ }
+ void convertBoolToInt32(Register, Register) { MOZ_CRASH(); }
+ void convertDoubleToFloat32(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+ void convertInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+
+ template <typename T>
+ void convertInt32ToDouble(T, FloatRegister) {
+ MOZ_CRASH();
+ }
+
+ void convertFloat32ToDouble(FloatRegister, FloatRegister) { MOZ_CRASH(); }
+
+ void boolValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void boolValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToDouble(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+ void int32ValueToFloat32(ValueOperand, FloatRegister) { MOZ_CRASH(); }
+
+ void loadConstantDouble(double, FloatRegister) { MOZ_CRASH(); }
+ void loadConstantFloat32(float, FloatRegister) { MOZ_CRASH(); }
+ Condition testInt32Truthy(bool, ValueOperand) { MOZ_CRASH(); }
+ Condition testStringTruthy(bool, ValueOperand) { MOZ_CRASH(); }
+ Condition testBigIntTruthy(bool, ValueOperand) { MOZ_CRASH(); }
+
+ template <typename T>
+ void loadUnboxedValue(T, MIRType, AnyRegister) {
+ MOZ_CRASH();
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T, size_t, JSValueType) {
+ MOZ_CRASH();
+ }
+
+ void convertUInt32ToDouble(Register, FloatRegister) { MOZ_CRASH(); }
+ void convertUInt32ToFloat32(Register, FloatRegister) { MOZ_CRASH(); }
+ void incrementInt32Value(Address) { MOZ_CRASH(); }
+ void ensureDouble(ValueOperand, FloatRegister, Label*) { MOZ_CRASH(); }
+ void handleFailureWithHandlerTail(Label*, Label*) { MOZ_CRASH(); }
+
+ void buildFakeExitFrame(Register, uint32_t*) { MOZ_CRASH(); }
+ bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
+
+ void setPrinter(Sprinter*) { MOZ_CRASH(); }
+ Operand ToPayload(Operand base) { MOZ_CRASH(); }
+ Address ToPayload(const Address& base) const { return base; }
+
+ Register getStackPointer() const { return StackPointer; }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register, Register) { MOZ_CRASH(); }
+ void profilerExitFrame() { MOZ_CRASH(); }
+
+#ifdef JS_NUNBOX32
+ Address ToType(const Address& address);
+#endif
+};
+
+typedef MacroAssemblerWasm32 MacroAssemblerSpecific;
+
+static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) {
+ MOZ_CRASH();
+}
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_MacroAssembler_wasm32_h */
diff --git a/js/src/jit/wasm32/MoveEmitter-wasm32.h b/js/src/jit/wasm32/MoveEmitter-wasm32.h
new file mode 100644
index 0000000000..01fc494dfd
--- /dev/null
+++ b/js/src/jit/wasm32/MoveEmitter-wasm32.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_MoveEmitter_wasm32_h
+#define jit_wasm32_MoveEmitter_wasm32_h
+
+#include "mozilla/Assertions.h"
+
+namespace js::jit {
+
+class MacroAssemblerWasm32;
+class MoveResolver;
+struct Register;
+
+class MoveEmitterWasm32 {
+ public:
+ explicit MoveEmitterWasm32(MacroAssemblerWasm32&) { MOZ_CRASH(); }
+ void emit(const MoveResolver&) { MOZ_CRASH(); }
+ void finish() { MOZ_CRASH(); }
+ void setScratchRegister(Register) { MOZ_CRASH(); }
+};
+
+typedef MoveEmitterWasm32 MoveEmitter;
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_MoveEmitter_wasm32_h */
diff --git a/js/src/jit/wasm32/SharedICHelpers-wasm32-inl.h b/js/src/jit/wasm32/SharedICHelpers-wasm32-inl.h
new file mode 100644
index 0000000000..d4629dc93f
--- /dev/null
+++ b/js/src/jit/wasm32/SharedICHelpers-wasm32-inl.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_SharedICHelpers_wasm32_inl_h
+#define jit_wasm32_SharedICHelpers_wasm32_inl_h
+
+#include "jit/SharedICHelpers.h"
+
+namespace js::jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr, MacroAssembler&, uint32_t) {
+ MOZ_CRASH();
+}
+inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler&, Register,
+ uint32_t) {
+ MOZ_CRASH();
+}
+inline void EmitBaselineCallVM(TrampolinePtr, MacroAssembler&) { MOZ_CRASH(); }
+
+static const uint32_t STUB_FRAME_SIZE = 0;
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = 0;
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler&, Register) {
+ MOZ_CRASH();
+}
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_SharedICHelpers_wasm32_inl_h */
diff --git a/js/src/jit/wasm32/SharedICHelpers-wasm32.h b/js/src/jit/wasm32/SharedICHelpers-wasm32.h
new file mode 100644
index 0000000000..f45b085838
--- /dev/null
+++ b/js/src/jit/wasm32/SharedICHelpers-wasm32.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_SharedICHelpers_wasm32_h
+#define jit_wasm32_SharedICHelpers_wasm32_h
+
+namespace js::jit {
+
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitRepushTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitCallIC(MacroAssembler&, CodeOffset*) { MOZ_CRASH(); }
+inline void EmitReturnFromIC(MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitBaselineLeaveStubFrame(MacroAssembler&, bool v = false) {
+ MOZ_CRASH();
+}
+inline void EmitStubGuardFailure(MacroAssembler&) { MOZ_CRASH(); }
+
+template <typename T>
+inline void EmitPreBarrier(MacroAssembler&, T, MIRType) {
+ MOZ_CRASH();
+}
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_SharedICHelpers_wasm32_h */
diff --git a/js/src/jit/wasm32/SharedICRegisters-wasm32.h b/js/src/jit/wasm32/SharedICRegisters-wasm32.h
new file mode 100644
index 0000000000..23e43b2239
--- /dev/null
+++ b/js/src/jit/wasm32/SharedICRegisters-wasm32.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_wasm32_SharedICRegisters_wasm32_h
+#define jit_wasm32_SharedICRegisters_wasm32_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/wasm32/MacroAssembler-wasm32.h"
+
+namespace js::jit {
+
+static constexpr Register BaselineStackReg = StackPointer;
+static constexpr Register BaselineFrameReg = FramePointer;
+
+static constexpr ValueOperand R0 = JSReturnOperand;
+static constexpr ValueOperand R1 = JSReturnOperand;
+static constexpr ValueOperand R2 = JSReturnOperand;
+
+static constexpr Register ICTailCallReg{Registers::invalid_reg};
+static constexpr Register ICStubReg{Registers::invalid_reg};
+
+static constexpr Register ExtractTemp0{Registers::invalid_reg};
+static constexpr Register ExtractTemp1{Registers::invalid_reg};
+
+static constexpr FloatRegister FloatReg0 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg1 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg2 = {FloatRegisters::invalid_reg};
+static constexpr FloatRegister FloatReg3 = {FloatRegisters::invalid_reg};
+
+} // namespace js::jit
+
+#endif /* jit_wasm32_SharedICRegisters_wasm32_h */
diff --git a/js/src/jit/wasm32/Trampoline-wasm32.cpp b/js/src/jit/wasm32/Trampoline-wasm32.cpp
new file mode 100644
index 0000000000..7c4c4db348
--- /dev/null
+++ b/js/src/jit/wasm32/Trampoline-wasm32.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineIC.h"
+#include "jit/JitRuntime.h"
+#include "vm/Realm.h"
+
+using namespace js;
+using namespace js::jit;
+
+void JitRuntime::generateEnterJIT(JSContext*, MacroAssembler&) { MOZ_CRASH(); }
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler&, Label*) { MOZ_CRASH(); }
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler&,
+ ArgumentsRectifierKind kind) {
+ MOZ_CRASH();
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler&, Label*) {
+ MOZ_CRASH();
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext*, MacroAssembler&, MIRType) {
+ MOZ_CRASH();
+ return 0;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler&, Label*) {
+ MOZ_CRASH();
+}
+
+bool JitRuntime::generateVMWrapper(JSContext*, MacroAssembler&,
+ const VMFunctionData&, DynFn, uint32_t*) {
+ MOZ_CRASH();
+}
diff --git a/js/src/jit/x64/Assembler-x64.cpp b/js/src/jit/x64/Assembler-x64.cpp
new file mode 100644
index 0000000000..e00e34e830
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -0,0 +1,246 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Assembler-x64.h"
+
+#include "gc/Tracer.h"
+#include "util/Memory.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ :
+#if defined(XP_WIN)
+ regIndex_(0),
+ stackOffset_(ShadowStackSpace),
+#else
+ intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+#endif
+ current_() {
+}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+#if defined(XP_WIN)
+ static_assert(NumIntArgRegs == NumFloatArgRegs);
+ if (regIndex_ == NumIntArgRegs) {
+ if (type == MIRType::Simd128) {
+ // On Win64, >64 bit args need to be passed by reference. However, wasm
+ // doesn't allow passing SIMD values to JS, so the only way to reach this
+ // is wasm to wasm calls. Ergo we can break the native ABI here and use
+ // the Wasm ABI instead.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ } else {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ }
+ return current_;
+ }
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ current_ = ABIArg(IntArgRegs[regIndex_++]);
+ break;
+ case MIRType::Float32:
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
+ break;
+ case MIRType::Double:
+ current_ = ABIArg(FloatArgRegs[regIndex_++]);
+ break;
+ case MIRType::Simd128:
+ // On Win64, >64 bit args need to be passed by reference, but wasm
+ // doesn't allow passing SIMD values to FFIs. The only way to reach
+ // here is asm to asm calls, so we can break the ABI here.
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#else
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(IntArgRegs[intRegIndex_++]);
+ break;
+ case MIRType::Double:
+ case MIRType::Float32:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ if (type == MIRType::Float32) {
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
+ } else {
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
+ }
+ break;
+ case MIRType::Simd128:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ }
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#endif
+}
+
+void Assembler::addPendingJump(JmpSrc src, ImmPtr target,
+ RelocationKind reloc) {
+ MOZ_ASSERT(target.value != nullptr);
+
+ // Emit reloc before modifying the jump table, since it computes a 0-based
+ // index. This jump is not patchable at runtime.
+ if (reloc == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ }
+
+ static_assert(MaxCodeBytesPerProcess <= uint64_t(2) * 1024 * 1024 * 1024,
+ "Code depends on using int32_t for cross-JitCode jump offsets");
+
+ MOZ_ASSERT_IF(reloc == RelocationKind::JITCODE,
+ AddressIsInExecutableMemory(target.value));
+
+ RelativePatch patch(src.offset(), target.value, reloc);
+ if (reloc == RelocationKind::JITCODE ||
+ AddressIsInExecutableMemory(target.value)) {
+ enoughMemory_ &= codeJumps_.append(patch);
+ } else {
+ enoughMemory_ &= extendedJumps_.append(patch);
+ }
+}
+
+void Assembler::finish() {
+ if (oom()) {
+ return;
+ }
+
+ AutoCreatedBy acb(*this, "Assembler::finish");
+
+ if (!extendedJumps_.length()) {
+ // Since we may be folowed by non-executable data, eagerly insert an
+ // undefined instruction byte to prevent processors from decoding
+ // gibberish into their pipelines. See Intel performance guides.
+ masm.ud2();
+ return;
+ }
+
+ // Emit the jump table.
+ masm.haltingAlign(SizeOfJumpTableEntry);
+ extendedJumpTable_ = masm.size();
+
+ // Zero the extended jumps table.
+ for (size_t i = 0; i < extendedJumps_.length(); i++) {
+#ifdef DEBUG
+ size_t oldSize = masm.size();
+#endif
+ MOZ_ASSERT(hasCreator());
+ masm.jmp_rip(2);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 6);
+ // Following an indirect branch with ud2 hints to the hardware that
+ // there's no fall-through. This also aligns the 64-bit immediate.
+ masm.ud2();
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8);
+ masm.immediate64(0);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry);
+ }
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ AssemblerX86Shared::executableCopy(buffer);
+
+ for (RelativePatch& rp : codeJumps_) {
+ uint8_t* src = buffer + rp.offset;
+ MOZ_ASSERT(rp.target);
+
+ MOZ_RELEASE_ASSERT(X86Encoding::CanRelinkJump(src, rp.target));
+ X86Encoding::SetRel32(src, rp.target);
+ }
+
+ for (size_t i = 0; i < extendedJumps_.length(); i++) {
+ RelativePatch& rp = extendedJumps_[i];
+ uint8_t* src = buffer + rp.offset;
+ MOZ_ASSERT(rp.target);
+
+ if (X86Encoding::CanRelinkJump(src, rp.target)) {
+ X86Encoding::SetRel32(src, rp.target);
+ } else {
+ // An extended jump table must exist, and its offset must be in
+ // range.
+ MOZ_ASSERT(extendedJumpTable_);
+ MOZ_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <=
+ size() - SizeOfJumpTableEntry);
+
+ // Patch the jump to go to the extended jump entry.
+ uint8_t* entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
+ X86Encoding::SetRel32(src, entry);
+
+ // Now patch the pointer, note that we need to align it to
+ // *after* the extended jump, i.e. after the 64-bit immedate.
+ X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
+ }
+ }
+}
+
+class RelocationIterator {
+ CompactBufferReader reader_;
+ uint32_t offset_ = 0;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
+
+ bool read() {
+ if (!reader_.more()) {
+ return false;
+ }
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const { return offset_; }
+};
+
+JitCode* Assembler::CodeFromJump(JitCode* code, uint8_t* jump) {
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+
+ MOZ_ASSERT(!code->containsNativePC(target),
+ "Extended jump table not used for cross-JitCode jumps");
+
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
new file mode 100644
index 0000000000..2f44d9f3e4
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -0,0 +1,1249 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Assembler_x64_h
+#define jit_x64_Assembler_x64_h
+
+#include <iterator>
+
+#include "jit/JitCode.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register rax{X86Encoding::rax};
+static constexpr Register rbx{X86Encoding::rbx};
+static constexpr Register rcx{X86Encoding::rcx};
+static constexpr Register rdx{X86Encoding::rdx};
+static constexpr Register rsi{X86Encoding::rsi};
+static constexpr Register rdi{X86Encoding::rdi};
+static constexpr Register rbp{X86Encoding::rbp};
+static constexpr Register r8{X86Encoding::r8};
+static constexpr Register r9{X86Encoding::r9};
+static constexpr Register r10{X86Encoding::r10};
+static constexpr Register r11{X86Encoding::r11};
+static constexpr Register r12{X86Encoding::r12};
+static constexpr Register r13{X86Encoding::r13};
+static constexpr Register r14{X86Encoding::r14};
+static constexpr Register r15{X86Encoding::r15};
+static constexpr Register rsp{X86Encoding::rsp};
+
+static constexpr FloatRegister xmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 =
+ FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 =
+ FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 =
+ FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 =
+ FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 =
+ FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 =
+ FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister xmm8 =
+ FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
+static constexpr FloatRegister xmm9 =
+ FloatRegister(X86Encoding::xmm9, FloatRegisters::Double);
+static constexpr FloatRegister xmm10 =
+ FloatRegister(X86Encoding::xmm10, FloatRegisters::Double);
+static constexpr FloatRegister xmm11 =
+ FloatRegister(X86Encoding::xmm11, FloatRegisters::Double);
+static constexpr FloatRegister xmm12 =
+ FloatRegister(X86Encoding::xmm12, FloatRegisters::Double);
+static constexpr FloatRegister xmm13 =
+ FloatRegister(X86Encoding::xmm13, FloatRegisters::Double);
+static constexpr FloatRegister xmm14 =
+ FloatRegister(X86Encoding::xmm14, FloatRegisters::Double);
+static constexpr FloatRegister xmm15 =
+ FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+
+// Vector registers fixed for use with some instructions, e.g. PBLENDVB.
+static constexpr FloatRegister vmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+
+// X86-common synonyms.
+static constexpr Register eax = rax;
+static constexpr Register ebx = rbx;
+static constexpr Register ecx = rcx;
+static constexpr Register edx = rdx;
+static constexpr Register esi = rsi;
+static constexpr Register edi = rdi;
+static constexpr Register ebp = rbp;
+static constexpr Register esp = rsp;
+
+static constexpr Register InvalidReg{X86Encoding::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register StackPointer = rsp;
+static constexpr Register FramePointer = rbp;
+static constexpr Register JSReturnReg = rcx;
+// Avoid, except for assertions.
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+
+static constexpr Register ScratchReg = r11;
+
+// Helper class for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of the scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchReg) {}
+};
+
+static constexpr Register ReturnReg = rax;
+static constexpr Register HeapReg = r15;
+static constexpr Register64 ReturnReg64(rax);
+static constexpr FloatRegister ReturnFloat32Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg_ =
+ FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg_ =
+ FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg =
+ FloatRegister(X86Encoding::xmm15, FloatRegisters::Simd128);
+
+// Avoid rbp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register CallTempReg0 = rax;
+static constexpr Register CallTempReg1 = rdi;
+static constexpr Register CallTempReg2 = rbx;
+static constexpr Register CallTempReg3 = rcx;
+static constexpr Register CallTempReg4 = rsi;
+static constexpr Register CallTempReg5 = rdx;
+
+// Different argument registers for WIN64
+#if defined(_WIN64)
+static constexpr Register IntArgReg0 = rcx;
+static constexpr Register IntArgReg1 = rdx;
+static constexpr Register IntArgReg2 = r8;
+static constexpr Register IntArgReg3 = r9;
+static constexpr uint32_t NumIntArgRegs = 4;
+static constexpr Register IntArgRegs[NumIntArgRegs] = {rcx, rdx, r8, r9};
+
+static constexpr Register CallTempNonArgRegs[] = {rax, rdi, rbx, rsi};
+static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static constexpr uint32_t NumFloatArgRegs = 4;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {xmm0, xmm1,
+ xmm2, xmm3};
+#else
+static constexpr Register IntArgReg0 = rdi;
+static constexpr Register IntArgReg1 = rsi;
+static constexpr Register IntArgReg2 = rdx;
+static constexpr Register IntArgReg3 = rcx;
+static constexpr Register IntArgReg4 = r8;
+static constexpr Register IntArgReg5 = r9;
+static constexpr uint32_t NumIntArgRegs = 6;
+static constexpr Register IntArgRegs[NumIntArgRegs] = {rdi, rsi, rdx,
+ rcx, r8, r9};
+
+static constexpr Register CallTempNonArgRegs[] = {rax, rbx};
+static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static constexpr FloatRegister FloatArgReg4 = xmm4;
+static constexpr FloatRegister FloatArgReg5 = xmm5;
+static constexpr FloatRegister FloatArgReg6 = xmm6;
+static constexpr FloatRegister FloatArgReg7 = xmm7;
+static constexpr uint32_t NumFloatArgRegs = 8;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7};
+#endif
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg1;
+static constexpr Register RegExpExecTestStringReg = CallTempReg2;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg1;
+static constexpr Register RegExpSearcherStringReg = CallTempReg2;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg3;
+
+class ABIArgGenerator {
+#if defined(XP_WIN)
+ unsigned regIndex_;
+#else
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+#endif
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+// These registers may be volatile or nonvolatile.
+// Avoid r11, which is the MacroAssembler's ScratchReg.
+static constexpr Register ABINonArgReg0 = rax;
+static constexpr Register ABINonArgReg1 = rbx;
+static constexpr Register ABINonArgReg2 = r10;
+static constexpr Register ABINonArgReg3 = r12;
+
+// This register may be volatile or nonvolatile. Avoid xmm15 which is the
+// ScratchDoubleReg.
+static constexpr FloatRegister ABINonArgDoubleReg =
+ FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = r10;
+static constexpr Register ABINonArgReturnReg1 = r12;
+static constexpr Register ABINonVolatileReg = r13;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = r10;
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = r14;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = rbx;
+
+static constexpr Register OsrFrameReg = IntArgReg3;
+
+static constexpr Register PreBarrierReg = rdx;
+
+static constexpr Register InterpreterPCReg = r14;
+
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments "
+ "which are used for "
+ "spilled values. Thus it should be larger than the alignment "
+ "for SIMD accesses.");
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static constexpr uint32_t WasmTrapInstructionLength = 2;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+// Return operand from a JS -> JS call.
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+class Assembler : public AssemblerX86Shared {
+ // x64 jumps may need extra bits of relocation, because a jump may extend
+ // beyond the signed 32-bit range. To account for this we add an extended
+ // jump table at the bottom of the instruction stream, and if a jump
+ // overflows its range, it will redirect here.
+ //
+ // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the
+ // hardware branch predictor that there is no fallthrough, followed by the
+ // eight bytes containing an immediate address. This comes out to 16 bytes.
+ // +1 byte for opcode
+ // +1 byte for mod r/m
+ // +4 bytes for rip-relative offset (2)
+ // +2 bytes for ud2 instruction
+ // +8 bytes for 64-bit address
+ //
+ static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
+ static const uint32_t SizeOfJumpTableEntry = 16;
+
+ // Two kinds of jumps on x64:
+ //
+ // * codeJumps_ tracks jumps with target within the executable code region
+ // for the process. These jumps don't need entries in the extended jump
+ // table because source and target must be within 2 GB of each other.
+ //
+ // * extendedJumps_ tracks jumps with target outside the executable code
+ // region. These jumps need entries in the extended jump table described
+ // above.
+ using PendingJumpVector = Vector<RelativePatch, 8, SystemAllocPolicy>;
+ PendingJumpVector codeJumps_;
+ PendingJumpVector extendedJumps_;
+
+ uint32_t extendedJumpTable_;
+
+ static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
+
+ private:
+ void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc);
+
+ public:
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::pop;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::vmovq;
+
+ Assembler() : extendedJumpTable_(0) {}
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : codeJumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+ for (auto& j : extendedJumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ // Actual assembly emitting functions.
+
+ void push(const ImmGCPtr ptr) {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ void push(const ImmWord ptr) {
+ // We often end up with ImmWords that actually fit into int32.
+ // Be aware of the sign extension behavior.
+ if (ptr.value <= INT32_MAX) {
+ push(Imm32(ptr.value));
+ } else {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ }
+ void push(ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
+ void push(FloatRegister src) {
+ subq(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+ CodeOffset pushWithPatch(ImmWord word) {
+ CodeOffset label = movWithPatch(word, ScratchReg);
+ push(ScratchReg);
+ return label;
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addq(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ masm.movq_i64r(word.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ // This is for patching during code generation, not after.
+ void patchAddq(CodeOffset offset, int32_t n) {
+ unsigned char* code = masm.data();
+ X86Encoding::SetInt32(code + offset.offset(), n);
+ }
+
+ // Load an ImmWord value into a register. Note that this instruction will
+ // attempt to optimize its immediate field size. When a full 64-bit
+ // immediate is needed for a relocation, use movWithPatch.
+ void movq(ImmWord word, Register dest) {
+ // Load a 64-bit immediate into a register. If the value falls into
+ // certain ranges, we can use specialized instructions which have
+ // smaller encodings.
+ if (word.value <= UINT32_MAX) {
+ // movl has a 32-bit unsigned (effectively) immediate field.
+ masm.movl_i32r((uint32_t)word.value, dest.encoding());
+ } else if ((intptr_t)word.value >= INT32_MIN &&
+ (intptr_t)word.value <= INT32_MAX) {
+ // movq has a 32-bit signed immediate field.
+ masm.movq_i32r((int32_t)(intptr_t)word.value, dest.encoding());
+ } else {
+ // Otherwise use movabs.
+ masm.movq_i64r(word.value, dest.encoding());
+ }
+ }
+ void movq(ImmPtr imm, Register dest) {
+ movq(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void movq(ImmGCPtr ptr, Register dest) {
+ masm.movq_i64r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Imm32 imm32, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(imm32.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_i32m(imm32.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(Register src, FloatRegister dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void vmovq(FloatRegister src, Register dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void movq(Register src, Register dest) {
+ masm.movq_rr(src.encoding(), dest.encoding());
+ }
+
+ void cmovCCq(Condition cond, const Operand& src, Register dest) {
+ X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.cmovCCq_rr(cc, src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmovCCq_mr(cc, src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmovCCq_mr(cc, src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmovCCq(Condition cond, Register src, Register dest) {
+ X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
+ masm.cmovCCq_rr(cc, src.encoding(), dest.encoding());
+ }
+
+ void cmovzq(const Operand& src, Register dest) {
+ cmovCCq(Condition::Zero, src, dest);
+ }
+ void cmovnzq(const Operand& src, Register dest) {
+ cmovCCq(Condition::NonZero, src, dest);
+ }
+
+ template <typename T>
+ void lock_addq(T src, const Operand& op) {
+ masm.prefix_lock();
+ addq(src, op);
+ }
+ template <typename T>
+ void lock_subq(T src, const Operand& op) {
+ masm.prefix_lock();
+ subq(src, op);
+ }
+ template <typename T>
+ void lock_andq(T src, const Operand& op) {
+ masm.prefix_lock();
+ andq(src, op);
+ }
+ template <typename T>
+ void lock_orq(T src, const Operand& op) {
+ masm.prefix_lock();
+ orq(src, op);
+ }
+ template <typename T>
+ void lock_xorq(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorq(src, op);
+ }
+
+ void lock_cmpxchgq(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgq(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgq(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgq(Register src, Register dest) {
+ masm.xchgq_rr(src.encoding(), dest.encoding());
+ }
+
+ void xchgq(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgq_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgq_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void lock_xaddq(Register srcdest, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base(),
+ mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movsbq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movsbq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movsbq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movsbq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzbq(const Operand& src, Register dest) {
+ // movzbl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzbl(src, dest);
+ }
+
+ void movswq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movswq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movswq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movswq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzwq(const Operand& src, Register dest) {
+ // movzwl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzwl(src, dest);
+ }
+
+ void movslq(Register src, Register dest) {
+ masm.movslq_rr(src.encoding(), dest.encoding());
+ }
+ void movslq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movslq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movslq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movslq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void andq(Register src, Register dest) {
+ masm.andq_rr(src.encoding(), dest.encoding());
+ }
+ void andq(Imm32 imm, Register dest) {
+ masm.andq_ir(imm.value, dest.encoding());
+ }
+ void andq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.andq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.andq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.andq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void addq(Imm32 imm, Register dest) {
+ masm.addq_ir(imm.value, dest.encoding());
+ }
+ CodeOffset addqWithPatch(Imm32 imm, Register dest) {
+ masm.addq_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void addq(Imm32 imm, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addq_ir(imm.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_im(imm.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_im(imm.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addq(Register src, Register dest) {
+ masm.addq_rr(src.encoding(), dest.encoding());
+ }
+ void addq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.addq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_mr(src.address(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void subq(Imm32 imm, Register dest) {
+ masm.subq_ir(imm.value, dest.encoding());
+ }
+ void subq(Register src, Register dest) {
+ masm.subq_rr(src.encoding(), dest.encoding());
+ }
+ void subq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.subq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void shlq(Imm32 imm, Register dest) {
+ masm.shlq_ir(imm.value, dest.encoding());
+ }
+ void shrq(Imm32 imm, Register dest) {
+ masm.shrq_ir(imm.value, dest.encoding());
+ }
+ void sarq(Imm32 imm, Register dest) {
+ masm.sarq_ir(imm.value, dest.encoding());
+ }
+ void shlq_cl(Register dest) { masm.shlq_CLr(dest.encoding()); }
+ void shrq_cl(Register dest) { masm.shrq_CLr(dest.encoding()); }
+ void sarq_cl(Register dest) { masm.sarq_CLr(dest.encoding()); }
+ void sarxq(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.sarxq_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+ void shlxq(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.shlxq_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+ void shrxq(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.shrxq_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+ void rolq(Imm32 imm, Register dest) {
+ masm.rolq_ir(imm.value, dest.encoding());
+ }
+ void rolq_cl(Register dest) { masm.rolq_CLr(dest.encoding()); }
+ void rorq(Imm32 imm, Register dest) {
+ masm.rorq_ir(imm.value, dest.encoding());
+ }
+ void rorq_cl(Register dest) { masm.rorq_CLr(dest.encoding()); }
+ void orq(Imm32 imm, Register dest) {
+ masm.orq_ir(imm.value, dest.encoding());
+ }
+ void orq(Register src, Register dest) {
+ masm.orq_rr(src.encoding(), dest.encoding());
+ }
+ void orq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.orq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.orq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.orq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorq(Register src, Register dest) {
+ masm.xorq_rr(src.encoding(), dest.encoding());
+ }
+ void xorq(Imm32 imm, Register dest) {
+ masm.xorq_ir(imm.value, dest.encoding());
+ }
+ void xorq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.xorq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.xorq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.xorq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void bsrq(const Register& src, const Register& dest) {
+ masm.bsrq_rr(src.encoding(), dest.encoding());
+ }
+ void bsfq(const Register& src, const Register& dest) {
+ masm.bsfq_rr(src.encoding(), dest.encoding());
+ }
+ void bswapq(const Register& reg) { masm.bswapq_r(reg.encoding()); }
+ void lzcntq(const Register& src, const Register& dest) {
+ masm.lzcntq_rr(src.encoding(), dest.encoding());
+ }
+ void tzcntq(const Register& src, const Register& dest) {
+ masm.tzcntq_rr(src.encoding(), dest.encoding());
+ }
+ void popcntq(const Register& src, const Register& dest) {
+ masm.popcntq_rr(src.encoding(), dest.encoding());
+ }
+
+ void imulq(Imm32 imm, Register src, Register dest) {
+ masm.imulq_ir(imm.value, src.encoding(), dest.encoding());
+ }
+ void imulq(Register src, Register dest) {
+ masm.imulq_rr(src.encoding(), dest.encoding());
+ }
+ void imulq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.imulq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.imulq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ MOZ_CRASH("NYI");
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void cqo() { masm.cqo(); }
+ void idivq(Register divisor) { masm.idivq_r(divisor.encoding()); }
+ void udivq(Register divisor) { masm.divq_r(divisor.encoding()); }
+
+ void vcvtsi2sdq(Register src, FloatRegister dest) {
+ masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
+ }
+
+ void vpextrq(unsigned lane, FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpextrq_irr(lane, src.encoding(), dest.encoding());
+ }
+
+ void vpinsrq(unsigned lane, Register src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpinsrq_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void negq(Register reg) { masm.negq_r(reg.encoding()); }
+
+ void notq(Register reg) { masm.notq_r(reg.encoding()); }
+
+ void mov(ImmWord word, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though. Use xorl instead of xorq since they are functionally
+ // equivalent (32-bit instructions zero-extend their results to 64 bits)
+ // and xorl has a smaller encoding.
+ if (word.value == 0) {
+ xorl(dest, dest);
+ } else {
+ movq(word, dest);
+ }
+ }
+ void mov(ImmPtr imm, Register dest) { movq(imm, dest); }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movq_i64r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) { movq(src, dest); }
+ void mov(Register src, const Operand& dest) { movq(src, dest); }
+ void mov(Imm32 imm32, const Operand& dest) { movq(imm32, dest); }
+ void mov(Register src, Register dest) { movq(src, dest); }
+ void mov(CodeLabel* label, Register dest) {
+ masm.movq_i64r(/* placeholder */ 0, dest.encoding());
+ label->patchAt()->bind(masm.size());
+ }
+ void xchg(Register src, Register dest) { xchgq(src, dest); }
+
+ void lea(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.leaq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexepcted operand kind");
+ }
+ }
+
+ void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); }
+ void cmovzPtr(const Operand& src, Register dest) { return cmovzq(src, dest); }
+
+ CodeOffset loadRipRelativeInt32(Register dest) {
+ return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt64(Register dest) {
+ return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeDouble(FloatRegister dest) {
+ return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
+ return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
+ }
+ CodeOffset leaRipRelative(Register dest) {
+ return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
+ }
+
+ void cmpq(Register rhs, Register lhs) {
+ masm.cmpq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpq(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(Imm32 rhs, Register lhs) {
+ masm.cmpq_ir(rhs.value, lhs.encoding());
+ }
+ void cmpq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpq_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(const Operand& rhs, Register lhs) {
+ switch (rhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.reg(), lhs.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void testq(Imm32 rhs, Register lhs) {
+ masm.testq_ir(rhs.value, lhs.encoding());
+ }
+ void testq(Register rhs, Register lhs) {
+ masm.testq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void testq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.testq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
+ MOZ_ASSERT(hasCreator());
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ RelocationKind reloc = RelocationKind::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, RelocationKind::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Do not mask shared implementations.
+ using AssemblerX86Shared::call;
+
+ void vcvttsd2sq(FloatRegister src, Register dest) {
+ masm.vcvttsd2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttss2sq(FloatRegister src, Register dest) {
+ masm.vcvttss2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtsq2sd(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+};
+
+static inline bool GetIntArgReg(uint32_t intArg, uint32_t floatArg,
+ Register* out) {
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = intArg;
+#endif
+ if (arg >= NumIntArgRegs) {
+ return false;
+ }
+ *out = IntArgRegs[arg];
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+#if defined(_WIN64)
+ uint32_t arg = usedIntArgs + usedFloatArgs;
+#else
+ uint32_t arg = usedIntArgs;
+#endif
+ arg -= NumIntArgRegs;
+ if (arg >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[arg];
+ return true;
+}
+
+static inline bool GetFloatArgReg(uint32_t intArg, uint32_t floatArg,
+ FloatRegister* out) {
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = floatArg;
+#endif
+ if (floatArg >= NumFloatArgRegs) {
+ return false;
+ }
+ *out = FloatArgRegs[arg];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Assembler_x64_h */
diff --git a/js/src/jit/x64/BaseAssembler-x64.h b/js/src/jit/x64/BaseAssembler-x64.h
new file mode 100644
index 0000000000..f5a9bb99f9
--- /dev/null
+++ b/js/src/jit/x64/BaseAssembler-x64.h
@@ -0,0 +1,1373 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_BaseAssembler_x64_h
+#define jit_x64_BaseAssembler_x64_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX64 : public BaseAssembler {
+ public:
+ // Arithmetic operations:
+
+ void addq_rr(RegisterID src, RegisterID dst) {
+ spew("addq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst);
+ }
+
+ void addq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("addq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst);
+ }
+
+ void addq_mr(const void* addr, RegisterID dst) {
+ spew("addq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst);
+ }
+
+ void addq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("addq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, index, scale, dst);
+ }
+
+ void addq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, src);
+ }
+
+ void addq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("addq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, index, scale, src);
+ }
+
+ void addq_ir(int32_t imm, RegisterID dst) {
+ spew("addq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_i32r(int32_t imm, RegisterID dst) {
+ // 32-bit immediate always, for patching.
+ spew("addq $0x%04x, %s", uint32_t(imm), GPReg64Name(dst));
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ }
+ m_formatter.immediate32(imm);
+ }
+
+ void addq_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int32_t imm, const void* addr) {
+ spew("addq $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andq_rr(RegisterID src, RegisterID dst) {
+ spew("andq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, src, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("andq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("andq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst);
+ }
+
+ void andq_mr(const void* addr, RegisterID dst) {
+ spew("andq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst);
+ }
+
+ void andq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("andq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, src);
+ }
+
+ void andq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("andq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, index, scale, src);
+ }
+
+ void orq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("orq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst);
+ }
+
+ void orq_mr(const void* addr, RegisterID dst) {
+ spew("orq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst);
+ }
+
+ void orq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("orq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, src);
+ }
+
+ void orq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, index, scale, src);
+ }
+
+ void xorq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("xorq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst);
+ }
+
+ void xorq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("xorq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, index, scale, dst);
+ }
+
+ void xorq_mr(const void* addr, RegisterID dst) {
+ spew("xorq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst);
+ }
+
+ void xorq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xorq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, src);
+ }
+
+ void xorq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xorq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, index, scale, src);
+ }
+
+ void bswapq_r(RegisterID dst) {
+ spew("bswapq %s", GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSWAP, dst);
+ }
+
+ void bsrq_rr(RegisterID src, RegisterID dst) {
+ spew("bsrq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst);
+ }
+
+ void bsfq_rr(RegisterID src, RegisterID dst) {
+ spew("bsfq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst);
+ }
+
+ void lzcntq_rr(RegisterID src, RegisterID dst) {
+ spew("lzcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp64(OP2_LZCNT_GvEv, src, dst);
+ }
+
+ void tzcntq_rr(RegisterID src, RegisterID dst) {
+ spew("tzcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp64(OP2_TZCNT_GvEv, src, dst);
+ }
+
+ void popcntq_rr(RegisterID src, RegisterID dst) {
+ spew("popcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst);
+ }
+
+ void andq_ir(int32_t imm, RegisterID dst) {
+ spew("andq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_AND_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void negq_r(RegisterID dst) {
+ spew("negq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
+ }
+
+ void orq_rr(RegisterID src, RegisterID dst) {
+ spew("orq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, src, dst);
+ }
+
+ void orq_ir(int32_t imm, RegisterID dst) {
+ spew("orq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_OR_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void notq_r(RegisterID dst) {
+ spew("notq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
+ }
+
+ void subq_rr(RegisterID src, RegisterID dst) {
+ spew("subq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst);
+ }
+
+ void subq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("subq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("subq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, index, scale, src);
+ }
+
+ void subq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("subq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst);
+ }
+
+ void subq_mr(const void* addr, RegisterID dst) {
+ spew("subq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst);
+ }
+
+ void subq_ir(int32_t imm, RegisterID dst) {
+ spew("subq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_SUB_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rr(RegisterID src, RegisterID dst) {
+ spew("xorq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorq_ir(int32_t imm, RegisterID dst) {
+ spew("xorq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp64(OP_XOR_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sarq_CLr(RegisterID dst) {
+ spew("sarq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
+ }
+
+ void shlq_CLr(RegisterID dst) {
+ spew("shlq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
+ }
+
+ void shrq_CLr(RegisterID dst) {
+ spew("shrq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
+ }
+
+ void sarq_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 64);
+ spew("sarq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shlq_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 64);
+ spew("shlq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shrq_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 64);
+ spew("shrq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void rolq_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 64);
+ spew("rolq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rolq_CLr(RegisterID dst) {
+ spew("rolq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
+ }
+
+ void rorq_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 64);
+ spew("rorq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rorq_CLr(RegisterID dst) {
+ spew("rorq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
+ }
+
+ void imulq_rr(RegisterID src, RegisterID dst) {
+ spew("imulq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
+ }
+
+ void imulq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
+ }
+
+ void imulq_ir(int32_t value, RegisterID src, RegisterID dst) {
+ spew("imulq $%d, %s, %s", value, GPReg64Name(src), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(value)) {
+ m_formatter.oneByteOp64(OP_IMUL_GvEvIb, src, dst);
+ m_formatter.immediate8s(value);
+ } else {
+ m_formatter.oneByteOp64(OP_IMUL_GvEvIz, src, dst);
+ m_formatter.immediate32(value);
+ }
+ }
+
+ void cqo() {
+ spew("cqo ");
+ m_formatter.oneByteOp64(OP_CDQ);
+ }
+
+ void idivq_r(RegisterID divisor) {
+ spew("idivq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
+ }
+
+ void divq_r(RegisterID divisor) {
+ spew("divq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
+ }
+
+ // Comparisons:
+
+ void cmpq_rr(RegisterID rhs, RegisterID lhs) {
+ spew("cmpq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base) {
+ spew("cmpq %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs);
+ }
+
+ void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpq %s, " MEM_obs, GPReg64Name(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, index, scale, rhs);
+ }
+
+ void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs) {
+ spew("cmpq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs);
+ }
+
+ void cmpq_ir(int32_t rhs, RegisterID lhs) {
+ if (rhs == 0) {
+ testq_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpq $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ if (lhs == rax) {
+ m_formatter.oneByteOp64(OP_CMP_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ }
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("cmpq $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs),
+ ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("cmpq $0x%x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_im(int32_t rhs, const void* addr) {
+ spew("cmpq $0x%" PRIx64 ", %p", uint64_t(rhs), addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_rm(RegisterID rhs, const void* addr) {
+ spew("cmpq %s, %p", GPReg64Name(rhs), addr);
+ m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void testq_rr(RegisterID rhs, RegisterID lhs) {
+ spew("testq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testq_ir(int32_t rhs, RegisterID lhs) {
+ // If the mask fits in a 32-bit immediate, we can use testl with a
+ // 32-bit subreg.
+ if (CAN_ZERO_EXTEND_32_64(rhs)) {
+ testl_ir(rhs, lhs);
+ return;
+ }
+ spew("testq $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs));
+ if (lhs == rax) {
+ m_formatter.oneByteOp64(OP_TEST_EAXIv);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
+ }
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("testq $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs),
+ ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("testq $0x%4x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale,
+ GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ // Various move ops:
+
+ void cmovCCq_rr(Condition cond, RegisterID src, RegisterID dst) {
+ spew("cmov%s %s, %s", CCName(cond), GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(cmovccOpcode(cond), src, dst);
+ }
+ void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base,
+ RegisterID dst) {
+ spew("cmov%s " MEM_ob ", %s", CCName(cond), ADDR_ob(offset, base),
+ GPReg64Name(dst));
+ m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, dst);
+ }
+ void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID dst) {
+ spew("cmov%s " MEM_obs ", %s", CCName(cond),
+ ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, index, scale,
+ dst);
+ }
+
+ void cmpxchgq(RegisterID src, int32_t offset, RegisterID base) {
+ spew("cmpxchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, src);
+ }
+
+ void cmpxchgq(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpxchgq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
+ }
+
+ void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base) {
+ spew("lock xaddq %s, " MEM_ob, GPReg64Name(srcdest), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, srcdest);
+ }
+
+ void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("lock xaddq %s, " MEM_obs, GPReg64Name(srcdest),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, index, scale, srcdest);
+ }
+
+ void xchgq_rr(RegisterID src, RegisterID dst) {
+ spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xchgq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void movq_rr(RegisterID src, RegisterID dst) {
+ spew("movq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, dst, src);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movq %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("movq %s, " MEM_obs, GPReg64Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movq_rm(RegisterID src, const void* addr) {
+ if (src == rax && !IsAddressImmediate(addr)) {
+ movq_EAXm(addr);
+ return;
+ }
+
+ spew("movq %s, %p", GPReg64Name(src), addr);
+ m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src);
+ }
+
+ void movq_mEAX(const void* addr) {
+ if (IsAddressImmediate(addr)) {
+ movq_mr(addr, rax);
+ return;
+ }
+
+ spew("movq %p, %%rax", addr);
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr) {
+ if (IsAddressImmediate(addr)) {
+ movq_rm(rax, addr);
+ return;
+ }
+
+ spew("movq %%rax, %p", addr);
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movq " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movq_mr(const void* addr, RegisterID dst) {
+ if (dst == rax && !IsAddressImmediate(addr)) {
+ movq_mEAX(addr);
+ return;
+ }
+
+ spew("movq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst);
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("leaq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst);
+ }
+
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base) {
+ spew("movq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("movq $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale,
+ GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, const void* addr) {
+ spew("movq $%d, %p", imm, addr);
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ // Note that this instruction sign-extends its 32-bit immediate field to 64
+ // bits and loads the 64-bit value into a 64-bit register.
+ //
+ // Note also that this is similar to the movl_i32r instruction, except that
+ // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code
+ // size, so it's preferred for values which could use either.
+ void movq_i32r(int32_t imm, RegisterID dst) {
+ spew("movq $%d, %s", imm, GPRegName(dst));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst) {
+ spew("movabsq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsbq_rr(RegisterID src, RegisterID dst) {
+ spew("movsbq %s, %s", GPReg32Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, src, dst);
+ }
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movsbq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movsbq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movswq_rr(RegisterID src, RegisterID dst) {
+ spew("movswq %s, %s", GPReg32Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, src, dst);
+ }
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movswq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movswq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movslq_rr(RegisterID src, RegisterID dst) {
+ spew("movslq %s, %s", GPReg32Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movslq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movslq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst);
+ }
+
+ [[nodiscard]] JmpSrc movl_ripr(RegisterID dst) {
+ m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst);
+ JmpSrc label(m_formatter.size());
+ spew("movl " MEM_o32r ", %s", ADDR_o32r(label.offset()),
+ GPReg32Name(dst));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc movl_rrip(RegisterID src) {
+ m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movl %s, " MEM_o32r "", GPReg32Name(src),
+ ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc movq_ripr(RegisterID dst) {
+ m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("movq " MEM_o32r ", %s", ADDR_o32r(label.offset()),
+ GPRegName(dst));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc movq_rrip(RegisterID src) {
+ m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movq %s, " MEM_o32r "", GPRegName(src),
+ ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("leaq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_LEA, offset, base, dst);
+ }
+
+ [[nodiscard]] JmpSrc leaq_rip(RegisterID dst) {
+ m_formatter.oneByteRipOp64(OP_LEA, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("leaq " MEM_o32r ", %s", ADDR_o32r(label.offset()),
+ GPRegName(dst));
+ return label;
+ }
+
+ // Flow control:
+
+ void jmp_rip(int ripOffset) {
+ // rip-relative addressing.
+ spew("jmp *%d(%%rip)", ripOffset);
+ m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN);
+ }
+
+ void immediate64(int64_t imm) {
+ spew(".quad %lld", (long long)imm);
+ m_formatter.immediate64(imm);
+ }
+
+ // SSE operations:
+
+ void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0,
+ dst);
+ }
+ void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0,
+ dst);
+ }
+
+ void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) {
+ twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src,
+ invalid_xmm, dst);
+ }
+
+ void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vmovq_rr(XMMRegisterID src, RegisterID dst) {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst,
+ (RegisterID)src);
+ }
+
+ void vpextrq_irr(unsigned lane, XMMRegisterID src, RegisterID dst) {
+ MOZ_ASSERT(lane < 2);
+ threeByteOpImmSimdInt64("vpextrq", VEX_PD, OP3_PEXTRQ_EvVdqIb, ESCAPE_3A,
+ lane, src, dst);
+ }
+
+ void vpinsrq_irr(unsigned lane, RegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 2);
+ threeByteOpImmInt64Simd("vpinsrq", VEX_PD, OP3_PINSRQ_VdqEvIb, ESCAPE_3A,
+ lane, src1, src0, dst);
+ }
+
+ void vmovq_rr(RegisterID src, XMMRegisterID dst) {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
+ }
+
+ [[nodiscard]] JmpSrc vmovsd_ripr(XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, dst);
+ }
+ [[nodiscard]] JmpSrc vmovss_ripr(XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, dst);
+ }
+ [[nodiscard]] JmpSrc vmovaps_ripr(XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, dst);
+ }
+ [[nodiscard]] JmpSrc vmovdqa_ripr(XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, dst);
+ }
+
+ [[nodiscard]] JmpSrc vpaddb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddq_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddq", VEX_PD, OP2_PADDQ_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubq_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubq", VEX_PD, OP2_PSUBQ_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmullw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmulld_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddusb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpaddusw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubusb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpsubusw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpminsb", VEX_PD, OP3_PMINSB_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminub_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpminub", VEX_PD, OP2_PMINUB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpminsw", VEX_PD, OP2_PMINSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminuw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpminuw", VEX_PD, OP3_PMINUW_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminsd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpminsd", VEX_PD, OP3_PMINSD_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpminud_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpminud", VEX_PD, OP3_PMINUD_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmaxsb", VEX_PD, OP3_PMAXSB_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxub_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpmaxub", VEX_PD, OP2_PMAXUB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpmaxsw", VEX_PD, OP2_PMAXSW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxuw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmaxuw", VEX_PD, OP3_PMAXUW_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxsd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmaxsd", VEX_PD, OP3_PMAXSD_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaxud_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmaxud", VEX_PD, OP3_PMAXUD_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpand_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpxor_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpor_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vaddps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, src, dst);
+ }
+ [[nodiscard]] JmpSrc vaddpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vaddpd", VEX_PD, OP2_ADDPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vsubps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, src, dst);
+ }
+ [[nodiscard]] JmpSrc vsubpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vsubpd", VEX_PD, OP2_SUBPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vdivps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, src, dst);
+ }
+ [[nodiscard]] JmpSrc vdivpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vdivpd", VEX_PD, OP2_DIVPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vmulps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, src, dst);
+ }
+ [[nodiscard]] JmpSrc vmulpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vmulpd", VEX_PD, OP2_MULPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vandpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vandpd", VEX_PD, OP2_ANDPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vminpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vminpd", VEX_PD, OP2_MINPD_VpdWpd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpacksswb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpacksswb", VEX_PD, OP2_PACKSSWB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpackuswb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpackuswb", VEX_PD, OP2_PACKUSWB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpackssdw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpackssdw", VEX_PD, OP2_PACKSSDW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpackusdw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpackusdw", VEX_PD, OP3_PACKUSDW_VdqWdq,
+ ESCAPE_38, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpunpckldq_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ_VdqWdq, src,
+ dst);
+ }
+ [[nodiscard]] JmpSrc vunpcklps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, src, dst);
+ }
+ [[nodiscard]] JmpSrc vptest_ripr(XMMRegisterID lhs) {
+ return threeByteRipOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, lhs);
+ }
+ [[nodiscard]] JmpSrc vpshufb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38,
+ src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaddwd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpmaddwd", VEX_PD, OP2_PMADDWD_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpeqb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpgtb_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpeqw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpgtw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpeqd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpcmpgtd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpeqps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
+ X86Encoding::ConditionCmp_EQ, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpneqps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
+ X86Encoding::ConditionCmp_NEQ, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpltps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
+ X86Encoding::ConditionCmp_LT, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpleps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
+ X86Encoding::ConditionCmp_LE, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpgeps_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
+ X86Encoding::ConditionCmp_GE, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpeqpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
+ X86Encoding::ConditionCmp_EQ, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpneqpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
+ X86Encoding::ConditionCmp_NEQ, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmpltpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
+ X86Encoding::ConditionCmp_LT, src, dst);
+ }
+ [[nodiscard]] JmpSrc vcmplepd_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
+ X86Encoding::ConditionCmp_LE, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmaddubsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return threeByteRipOpSimd("vpmaddubsw", VEX_PD, OP3_PMADDUBSW_VdqWdq,
+ ESCAPE_38, src, dst);
+ }
+ [[nodiscard]] JmpSrc vpmuludq_ripr(XMMRegisterID src, XMMRegisterID dst) {
+ return twoByteRipOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, src, dst);
+ }
+
+ // BMI instructions:
+
+ void sarxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("sarxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
+ GPReg64Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex64(VEX_SS /* = F3 */, OP3_SARX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ void shlxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("shlxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
+ GPReg64Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex64(VEX_PD /* = 66 */, OP3_SHLX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ void shrxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("shrxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
+ GPReg64Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex64(VEX_SD /* = F2 */, OP3_SHRX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ private:
+ [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode,
+ XMMRegisterID reg) {
+ MOZ_ASSERT(!IsXMMReversedOperands(opcode));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteRipOp(opcode, 0, reg);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s " MEM_o32r ", %s", legacySSEOpName(name),
+ ADDR_o32r(label.offset()), XMMRegName(reg));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode));
+ if (useLegacySSEEncoding(src0, dst)) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteRipOp(opcode, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
+ ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s, " MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()),
+ XMMRegName(src0), XMMRegName(dst));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc twoByteRipOpImmSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode));
+ if (useLegacySSEEncoding(src0, dst)) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteRipOp(opcode, 0, dst);
+ m_formatter.immediate8u(imm);
+ JmpSrc label(m_formatter.size(),
+ /* bytes trailing the patch field = */ 1);
+ spew("%-11s$0x%x, " MEM_o32r ", %s", legacySSEOpName(name), imm,
+ ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
+ m_formatter.immediate8u(imm);
+ JmpSrc label(m_formatter.size(),
+ /* bytes trailing the patch field = */ 1);
+ spew("%-11s$0x%x, " MEM_o32r ", %s, %s", name, imm,
+ ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
+ return label;
+ }
+
+ void twoByteOpInt64Simd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, RegisterID rm,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst),
+ GPRegName(rm));
+ } else {
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm),
+ XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
+ } else {
+ spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst);
+ }
+
+ void twoByteOpSimdInt64(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, XMMRegisterID rm,
+ RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst),
+ XMMRegName(rm));
+ } else if (opcode == OP2_MOVD_EdVd) {
+ spew("%-11s%s, %s", legacySSEOpName(name),
+ XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
+ } else {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ GPRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
+ } else if (opcode == OP2_MOVD_EdVd) {
+ spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst),
+ GPRegName((RegisterID)rm));
+ } else {
+ spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
+ }
+ m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm,
+ (XMMRegisterID)dst);
+ }
+
+ [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ XMMRegisterID dst) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteRipOp(opcode, escape, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
+ ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape,
+ XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(src0 != invalid_xmm);
+ if (useLegacySSEEncoding(src0, dst)) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteRipOp(opcode, escape, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
+ ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ m_formatter.threeByteRipOpVex(ty, opcode, escape, 0, src0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()),
+ XMMRegName(src0), XMMRegName(dst));
+ return label;
+ }
+
+ void threeByteOpImmSimdInt64(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, XMMRegisterID src,
+ RegisterID dst) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(dst),
+ XMMRegName(src));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp64(opcode, escape, dst, (RegisterID)src);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmInt64Simd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, RegisterID src1,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(src1),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp64(opcode, escape, src1, (RegisterID)dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ MOZ_ASSERT(src0 != invalid_xmm);
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, GPReg64Name(src1),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex64(ty, opcode, escape, src1, src0,
+ (RegisterID)dst);
+ m_formatter.immediate8u(imm);
+ }
+};
+
+using BaseAssemblerSpecific = BaseAssemblerX64;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_BaseAssembler_x64_h */
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
new file mode 100644
index 0000000000..9bd7e9b253
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -0,0 +1,984 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/CodeGenerator-x64.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+CodeGeneratorX64::CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm) {}
+
+ValueOperand CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+Operand CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64) {
+ const LAllocation& a = a64.value();
+ MOZ_ASSERT(!a.isFloatReg());
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ ValueOperand result = ToOutValue(value);
+ masm.moveValue(value->value(), result);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+
+ if (JitOptions.spectreValueMasking && IsFloatingPointType(box->type())) {
+ ScratchRegisterScope scratch(masm);
+ masm.movePtr(ImmWord(JSVAL_SHIFTED_TAG_MAX_DOUBLE), scratch);
+ masm.cmpPtrMovePtr(Assembler::Below, scratch, result.valueReg(), scratch,
+ result.valueReg());
+ }
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ // Infallible unbox.
+
+ Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
+
+#ifdef DEBUG
+ // Assert the types match.
+ JSValueTag tag = MIRTypeToTag(mir->type());
+ Label ok;
+ masm.splitTag(input, ScratchReg);
+ masm.branch32(Assembler::Equal, ScratchReg, Imm32(tag), &ok);
+ masm.assumeUnreachable("Infallible unbox type mismatch");
+ masm.bind(&ok);
+#endif
+
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(input, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(input, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(input, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(input, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(input, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(input, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ } else {
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ } else {
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
+ Register regL = ToRegister(baab->left());
+ if (baab->is64()) {
+ if (baab->right()->isConstant()) {
+ masm.test64(regL, Imm64(ToInt64(baab->right())));
+ } else {
+ masm.test64(regL, ToRegister(baab->right()));
+ }
+ } else {
+ if (baab->right()->isConstant()) {
+ masm.test32(regL, Imm32(ToInt32(baab->right())));
+ } else {
+ masm.test32(regL, ToRegister(baab->right()));
+ }
+ }
+ emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
+
+ Label done;
+
+ // Put the lhs in rax.
+ if (lhs != rax) {
+ masm.mov(lhs, rax);
+ }
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.xorl(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cqo();
+ masm.idivq(rhs);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ DebugOnly<Register> output = ToRegister(lir->output());
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output.value == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
+
+ // Put the lhs in rax.
+ if (lhs != rax) {
+ masm.mov(lhs, rax);
+ }
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Zero extend the lhs into rdx to make (rdx:rax).
+ masm.xorl(rdx, rdx);
+ masm.udivq(rhs);
+
+ masm.bind(&done);
+}
+
+void CodeGeneratorX64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == rax);
+ MOZ_ASSERT(output == rdx);
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cqo();
+
+ masm.idivq(divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorX64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == rax);
+ MOZ_ASSERT(output == rdx);
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cqo();
+
+ masm.idivq(divisor);
+
+ // Move the remainder from rdx.
+ masm.movq(output, dividend);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Load();
+
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ auto sync = Synchronization::Store();
+
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+
+ MOZ_ASSERT(temp1.reg == rax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, temp2);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp1);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp1);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp1, out, temp2.reg);
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register temp2 = ToRegister(lir->temp2());
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp1);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp1);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp1, out, temp2);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ Register64 fetchTemp = Register64(out);
+ Register64 fetchOut = temp2;
+ Register createTemp = temp1.reg;
+
+ // Add and Sub don't need |fetchTemp| and can save a `mov` when the value and
+ // output register are equal to each other.
+ if (atomicOp == AtomicFetchAddOp || atomicOp == AtomicFetchSubOp) {
+ fetchTemp = Register64::Invalid();
+ fetchOut = temp1;
+ createTemp = temp2.reg;
+ } else {
+ MOZ_ASSERT(temp2.reg == rax);
+ }
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ fetchTemp, fetchOut);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ fetchTemp, fetchOut);
+ }
+
+ emitCreateBigInt(lir, arrayType, fetchOut, out, createTemp);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest);
+ }
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+
+ Operand falseExpr = ToOperandOrRegister64(lir->falseExpr());
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ masm.test32(cond, cond);
+ masm.cmovzq(falseExpr, out.reg);
+}
+
+// We expect to handle only the cases: compare is {U,}Int{32,64}, and select
+// is {U,}Int{32,64}, independently. Some values may be stack allocated, and
+// the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool cmpIs64bit = ins->compareType() == MCompare::Compare_Int64 ||
+ ins->compareType() == MCompare::Compare_UInt64;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+ bool selIs64bit = ins->mir()->type() == MIRType::Int64;
+
+ // Throw out unhandled cases
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit != cmpIs64bit && selIs32bit != selIs64bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ using C = Assembler::Condition;
+ using R = Register;
+ using A = const Address&;
+
+ // Identify macroassembler methods to generate instructions, based on the
+ // type of the comparison and the select. This avoids having to duplicate
+ // the code-generation tree below 4 times. These assignments to
+ // `cmpMove_CRRRR` et al are unambiguous as a result of the combination of
+ // the template parameters and the 5 argument types ((C, R, R, R, R) etc).
+ void (MacroAssembler::*cmpMove_CRRRR)(C, R, R, R, R) = nullptr;
+ void (MacroAssembler::*cmpMove_CRARR)(C, R, A, R, R) = nullptr;
+ void (MacroAssembler::*cmpLoad_CRRAR)(C, R, R, A, R) = nullptr;
+ void (MacroAssembler::*cmpLoad_CRAAR)(C, R, A, A, R) = nullptr;
+
+ if (cmpIs32bit) {
+ if (selIs32bit) {
+ cmpMove_CRRRR = &MacroAssemblerX64::cmpMove<32, 32>;
+ cmpMove_CRARR = &MacroAssemblerX64::cmpMove<32, 32>;
+ cmpLoad_CRRAR = &MacroAssemblerX64::cmpLoad<32, 32>;
+ cmpLoad_CRAAR = &MacroAssemblerX64::cmpLoad<32, 32>;
+ } else {
+ cmpMove_CRRRR = &MacroAssemblerX64::cmpMove<32, 64>;
+ cmpMove_CRARR = &MacroAssemblerX64::cmpMove<32, 64>;
+ cmpLoad_CRRAR = &MacroAssemblerX64::cmpLoad<32, 64>;
+ cmpLoad_CRAAR = &MacroAssemblerX64::cmpLoad<32, 64>;
+ }
+ } else {
+ if (selIs32bit) {
+ cmpMove_CRRRR = &MacroAssemblerX64::cmpMove<64, 32>;
+ cmpMove_CRARR = &MacroAssemblerX64::cmpMove<64, 32>;
+ cmpLoad_CRRAR = &MacroAssemblerX64::cmpLoad<64, 32>;
+ cmpLoad_CRAAR = &MacroAssemblerX64::cmpLoad<64, 32>;
+ } else {
+ cmpMove_CRRRR = &MacroAssemblerX64::cmpMove<64, 64>;
+ cmpMove_CRARR = &MacroAssemblerX64::cmpMove<64, 64>;
+ cmpLoad_CRRAR = &MacroAssemblerX64::cmpLoad<64, 64>;
+ cmpLoad_CRAAR = &MacroAssemblerX64::cmpLoad<64, 64>;
+ }
+ }
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ // We generate one of four cmp+cmov pairings, depending on whether one of
+ // the cmp args and one of the cmov args is in memory or a register.
+ if (rhs->isRegister()) {
+ if (falseExpr->isRegister()) {
+ (masm.*cmpMove_CRRRR)(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ (masm.*cmpLoad_CRRAR)(cond, lhs, ToRegister(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ } else {
+ if (falseExpr->isRegister()) {
+ (masm.*cmpMove_CRARR)(cond, lhs, ToAddress(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ (masm.*cmpLoad_CRAAR)(cond, lhs, ToAddress(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access,
+ const LAllocation* value, Operand dstAddr) {
+ if (value->isConstant()) {
+ masm.memoryBarrierBefore(access.sync());
+
+ const MConstant* mir = value->toConstant();
+ Imm32 cst =
+ Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
+
+ masm.append(access, masm.size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.movb(cst, dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.movw(cst, dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.movl(cst, dstAddr);
+ break;
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrierAfter(access.sync());
+ } else {
+ masm.wasmStore(access, ToAnyRegister(value), dstAddr);
+ }
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorX64::emitWasmLoad(T* ins) {
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ if (mir->type() == MIRType::Int64) {
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ } else {
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
+
+template <typename T>
+void CodeGeneratorX64::emitWasmStore(T* ins) {
+ const MWasmStore* mir = ins->mir();
+ const wasm::MemoryAccessDesc& access = mir->access();
+
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* value = ins->getOperand(ins->ValueIndex);
+ const LAllocation* ptr = ins->ptr();
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ wasmStore(access, value, dstAddr);
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
+ MOZ_CRASH("Unused on this platform");
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (accessType == Scalar::Int64) {
+ masm.wasmCompareExchange64(mir->access(), srcAddr, Register64(oldval),
+ Register64(newval), ToOutRegister64(ins));
+ } else {
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval,
+ ToRegister(ins->output()));
+ }
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (accessType == Scalar::Int64) {
+ masm.wasmAtomicExchange64(mir->access(), srcAddr, Register64(value),
+ ToOutRegister64(ins));
+ } else {
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value,
+ ToRegister(ins->output()));
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ Register temp =
+ ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ Register output = ToRegister(ins->output());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ if (accessType == Scalar::Uint32) {
+ accessType = Scalar::Int32;
+ }
+
+ AtomicOp op = mir->operation();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (accessType == Scalar::Int64) {
+ Register64 val = Register64(ToRegister(value));
+ Register64 out = Register64(output);
+ Register64 tmp = Register64(temp);
+ masm.wasmAtomicFetchOp64(mir->access(), op, val, srcAddr, tmp, out);
+ } else if (value->isConstant()) {
+ masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), srcAddr,
+ temp, output);
+ } else {
+ masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), srcAddr, temp,
+ output);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
+
+ if (accessType == Scalar::Int64) {
+ Register64 val = Register64(ToRegister(value));
+ masm.wasmAtomicEffectOp64(mir->access(), op, val, srcAddr);
+ } else if (value->isConstant()) {
+ Imm32 c(0);
+ if (value->toConstant()->type() == MIRType::Int64) {
+ c = Imm32(ToInt64(value));
+ } else {
+ c = Imm32(ToInt32(value));
+ }
+ masm.wasmAtomicEffectOp(mir->access(), op, c, srcAddr, InvalidReg);
+ } else {
+ masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), srcAddr,
+ InvalidReg);
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateDouble uses vcvttsd2sq. Unlike the x86
+ // implementation, this should handle most doubles and we can just
+ // call a stub if it fails.
+ emitTruncateDouble(input, output, ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ emitTruncateDouble(input, output, lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ emitTruncateFloat32(input, output, lir->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateFloat32 uses vcvttss2sq. Unlike the x86
+ // implementation, this should handle most floats and we can just
+ // call a stub if it fails.
+ emitTruncateFloat32(input, output, ins->mir());
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.movl(ToOperand(input), output);
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.movl(ToOperand(input), output);
+ } else {
+ masm.movslq(ToOperand(input), output);
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ // Generates no code on this platform because the input is assumed to have
+ // canonical form.
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+ masm.debugAssertCanonicalInt32(output);
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ // Generates no code on this platform because the input is assumed to have
+ // canonical form.
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(ToRegister(lir->input()) == output);
+ masm.debugAssertCanonicalInt32(output);
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ switch (ins->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.movsbq(Operand(input.reg), output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.movswq(Operand(input.reg), output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.movslq(Operand(input.reg), output.reg);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType inputType = mir->input()->type();
+
+ MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ FloatRegister temp =
+ mir->isUnsigned() ? ToFloatRegister(lir->temp()) : InvalidFloatReg;
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+ if (inputType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, temp);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, temp);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, temp);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ bool isUnsigned = mir->isUnsigned();
+
+ MIRType outputType = mir->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+ MOZ_ASSERT(isUnsigned == !lir->getTemp(0)->isBogusTemp());
+
+ if (outputType == MIRType::Double) {
+ if (isUnsigned) {
+ masm.convertUInt64ToDouble(input, output, ToRegister(lir->getTemp(0)));
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (isUnsigned) {
+ masm.convertUInt64ToFloat32(input, output, ToRegister(lir->getTemp(0)));
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ masm.cmpq(Imm32(0), ToRegister(lir->input()));
+ masm.emitSet(Assembler::Equal, ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputR = ToRegister(input);
+ MOZ_ASSERT(inputR == ToRegister(ins->output()));
+ masm.notq(inputR);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register input = ToRegister(lir->input());
+ masm.testq(input, input);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/x64/CodeGenerator-x64.h b/js/src/jit/x64/CodeGenerator-x64.h
new file mode 100644
index 0000000000..c3359d0190
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_CodeGenerator_x64_h
+#define jit_x64_CodeGenerator_x64_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorX64 : public CodeGeneratorX86Shared {
+ protected:
+ CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ Operand ToOperand64(const LInt64Allocation& a);
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ void wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
+ Operand dstAddr);
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+};
+
+using CodeGeneratorSpecific = CodeGeneratorX64;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_CodeGenerator_x64_h */
diff --git a/js/src/jit/x64/LIR-x64.h b/js/src/jit/x64/LIR-x64.h
new file mode 100644
index 0000000000..efaedc4499
--- /dev/null
+++ b/js/src/jit/x64/LIR-x64.h
@@ -0,0 +1,170 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_LIR_x64_h
+#define jit_x64_LIR_x64_h
+
+namespace js {
+namespace jit {
+
+// Given an untyped input, guards on whether it's a specific type and returns
+// the unboxed payload.
+class LUnboxBase : public LInstructionHelper<1, 1, 0> {
+ public:
+ LUnboxBase(LNode::Opcode op, const LAllocation& input)
+ : LInstructionHelper<1, 1, 0>(op) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+};
+
+class LUnbox : public LUnboxBase {
+ public:
+ LIR_HEADER(Unbox)
+
+ explicit LUnbox(const LAllocation& input) : LUnboxBase(classOpcode, input) {}
+
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnboxBase {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint)
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnboxBase(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or
+// remainder depending on whether this instruction is defined to output
+// rax (quotient) or rdx (remainder).
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_LIR_x64_h */
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
new file mode 100644
index 0000000000..d2e328930c
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -0,0 +1,565 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Lowering-x64.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/x64/Assembler-x64.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorX64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorX64::tempByteOpRegister() { return temp(); }
+
+LDefinition LIRGeneratorX64::tempToUnbox() { return temp(); }
+
+void LIRGeneratorX64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ // X64 doesn't need a temp for 64bit multiplication.
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnboxBase* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LUse oldval = useRegister(ins->oldval());
+ LUse newval = useRegister(ins->newval());
+ LInt64Definition temp1 = tempInt64Fixed(Register64(rax));
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerCompareExchangeTypedArrayElement(ins,
+ /* useI386ByteRegisters = */ false);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useRegister(ins->value());
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useRegister(ins->value());
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ LInt64Definition temp = tempInt64();
+
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD.
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
+ ins->operation() == AtomicFetchSubOp);
+
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2;
+ if (bitOp) {
+ temp2 = tempInt64Fixed(Register64(rax));
+ } else {
+ temp2 = tempInt64();
+ }
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
+}
+
+void LIRGeneratorX64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit it is
+ // zero-extended and can act as 64-bit.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->type() != MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
+ define(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ valueAlloc = useRegisterOrConstantAtStart(value);
+ break;
+ case Scalar::Int64:
+ // No way to encode an int64-to-memory move on x64.
+ if (value->isConstant() && value->type() != MIRType::Int64) {
+ valueAlloc = useOrConstantAtStart(value);
+ } else {
+ valueAlloc = useRegisterAtStart(value);
+ }
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ valueAlloc = useRegisterAtStart(value);
+ break;
+ case Scalar::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ valueAlloc = useRegisterAtStart(value);
+ break;
+#else
+ MOZ_CRASH("unexpected array type");
+#endif
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ // The output may not be used but will be clobbered regardless, so
+ // pin the output to eax.
+ //
+ // The input values must both be in registers.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval = useRegister(ins->newValue());
+
+ LWasmCompareExchangeHeap* lir =
+ new (alloc()) LWasmCompareExchangeHeap(useRegister(base), oldval, newval);
+
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32 ||
+ ins->base()->type() == MIRType::Int64);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ // The output may not be used but will be clobbered regardless,
+ // so ignore the case where we're not using the value and just
+ // use the output register as a temp.
+
+ LWasmAtomicExchangeHeap* lir =
+ new (alloc()) LWasmAtomicExchangeHeap(base, value);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ // No support for 64-bit operations with constants at the masm level.
+
+ bool canTakeConstant = ins->access().type() != Scalar::Int64;
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR.
+
+ if (!ins->hasUses()) {
+ LAllocation value = canTakeConstant ? useRegisterOrConstant(ins->value())
+ : useRegister(ins->value());
+ LWasmAtomicBinopHeapForEffect* lir =
+ new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD with word and byte ops as
+ // appropriate. Any output register can be used and if value is a
+ // register it's best if it's the same as output:
+ //
+ // movl value, output ; if value != output
+ // lock xaddl output, mem
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
+ // always in rax:
+ //
+ // movl *mem, rax
+ // L: mov rax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads rax also
+ // jnz L
+ // ; result in rax
+ //
+ // Note the placement of L, cmpxchg will update rax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
+ ins->operation() == AtomicFetchSubOp);
+ bool reuseInput = false;
+ LAllocation value;
+
+ if (bitOp || ins->value()->isConstant()) {
+ value = canTakeConstant ? useRegisterOrConstant(ins->value())
+ : useRegister(ins->value());
+ } else {
+ reuseInput = true;
+ value = useRegisterAtStart(ins->value());
+ }
+
+ auto* lir = new (alloc()) LWasmAtomicBinopHeap(
+ useRegister(base), value, bitOp ? temp() : LDefinition::BogusTemp());
+
+ if (reuseInput) {
+ defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
+ } else if (bitOp) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void LIRGeneratorX64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorX64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void LIRGeneratorX64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorX64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void LIRGeneratorX64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void LIRGeneratorX64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp =
+ ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp),
+ ins);
+}
+
+void LIRGeneratorX64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ LDefinition maybeTemp = ins->isUnsigned() ? temp() : LDefinition::BogusTemp();
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
+ ins);
+}
+
+void LIRGeneratorX64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(new (alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+// On x64 we specialize the cases: compare is {U,}Int{32,64}, and select is
+// {U,}Int{32,64}, independently.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return (insTy == MIRType::Int32 || insTy == MIRType::Int64) &&
+ (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32 ||
+ compTy == MCompare::Compare_Int64 ||
+ compTy == MCompare::Compare_UInt64);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useAny(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
diff --git a/js/src/jit/x64/Lowering-x64.h b/js/src/jit/x64/Lowering-x64.h
new file mode 100644
index 0000000000..1c34ea8693
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Lowering_x64_h
+#define jit_x64_Lowering_x64_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX64 : public LIRGeneratorX86Shared {
+ protected:
+ LIRGeneratorX64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph) {}
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register,
+ bool useAtStart = false);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on x64 all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+using LIRGeneratorSpecific = LIRGeneratorX64;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Lowering_x64_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64-inl.h b/js/src/jit/x64/MacroAssembler-x64-inl.h
new file mode 100644
index 0000000000..6869e6c4b6
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -0,0 +1,1099 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_inl_h
+#define jit_x64_MacroAssembler_x64_inl_h
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ // Use mov instead of movq because it has special optimizations for imm == 0.
+ mov(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movq(src.reg, dest.reg);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ vmovq(src, dest.reg);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ vmovq(src.reg, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ movl(src.reg, dest);
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ movl(src, dest.reg);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ movsbq(Operand(src), dest.reg);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ movswq(Operand(src), dest.reg);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ movslq(src, dest.reg);
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ movslq(src, dest);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ movl(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ movslq(Operand(src), dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::notPtr(Register reg) { notq(reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { andq(src, dest); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { andq(imm, dest); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ andq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ andq(scratch, dest.reg);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ orq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ orq(scratch, dest.reg);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ xorq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ xorq(scratch, dest.reg);
+ }
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { orq(src, dest); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { orq(imm, dest); }
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ andq(src.reg, dest.reg);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ orq(src.reg, dest.reg);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ xorq(src.reg, dest.reg);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { xorq(src, dest); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorq(imm, dest); }
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ andq(src, dest.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ orq(src, dest.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ xorq(src, dest.reg);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg) { bswapq(reg.reg); }
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) { addq(src, dest); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { addq(imm, dest); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ addq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ addq(scratch, dest);
+ }
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ addq(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
+ addq(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ addq(Operand(src), dest);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ addq(src, dest.reg);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addq(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) { addq(imm, dest.reg); }
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ addPtr(ImmWord(imm.value), dest.reg);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ moveStackPtrTo(dest);
+ addqWithPatch(Imm32(0), dest);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ patchAddq(offset, -imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) { subq(src, dest); }
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ subq(src, Operand(dest));
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) { subq(imm, dest); }
+
+void MacroAssembler::subPtr(ImmWord imm, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ subq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ subq(scratch, dest);
+ }
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ subq(Operand(addr), dest);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ subq(src, dest.reg);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ subq(src.reg, dest.reg);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ subPtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ // To compute the unsigned multiplication using imulq, we have to ensure both
+ // operands don't have any bits set in the high word.
+
+ if (imm.value >= 0) {
+ // Clear the high word of |src|.
+ movl(src, src);
+
+ // |imm| and |src| are both positive, so directly perform imulq.
+ imulq(imm, src, dest);
+ } else {
+ // Store the low word of |src| into |dest|.
+ movl(src, dest);
+
+ // Compute the unsigned value of |imm| before performing imulq.
+ movl(imm, ScratchReg);
+ imulq(ScratchReg, dest);
+ }
+
+ // Move the high word into |dest|.
+ shrq(Imm32(32), dest);
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ imulq(rhs, srcDest);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ imulq(Imm32((int32_t)imm.value), dest.reg, dest.reg);
+ } else {
+ movq(ImmWord(uintptr_t(imm.value)), ScratchReg);
+ imulq(ScratchReg, dest.reg);
+ }
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(Operand(src.reg), dest);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest) {
+ imulq(src, dest.reg);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(src, dest);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ movq(imm, ScratchReg);
+ vmulsd(Operand(ScratchReg, 0), dest, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ if (X86Encoding::IsAddressImmediate(dest.addr)) {
+ addPtr(Imm32(1), dest);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(dest.addr), scratch);
+ addPtr(Imm32(1), Address(scratch, 0));
+ }
+}
+
+void MacroAssembler::neg64(Register64 reg) { negq(reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { negq(reg); }
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shlq(imm, dest);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
+ if (Assembler::HasBMI2()) {
+ shlxq(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == rcx);
+ shlq_cl(srcDest);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ lshiftPtr(imm, dest.reg);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
+ if (Assembler::HasBMI2()) {
+ shlxq(srcDest.reg, shift, srcDest.reg);
+ return;
+ }
+ MOZ_ASSERT(shift == rcx);
+ shlq_cl(srcDest.reg);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shrq(imm, dest);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
+ if (Assembler::HasBMI2()) {
+ shrxq(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == rcx);
+ shrq_cl(srcDest);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ rshiftPtr(imm, dest.reg);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
+ if (Assembler::HasBMI2()) {
+ shrxq(srcDest.reg, shift, srcDest.reg);
+ return;
+ }
+ MOZ_ASSERT(shift == rcx);
+ shrq_cl(srcDest.reg);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ sarq(imm, dest);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ rshiftPtrArithmetic(imm, dest.reg);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
+ if (Assembler::HasBMI2()) {
+ sarxq(srcDest.reg, shift, srcDest.reg);
+ return;
+ }
+ MOZ_ASSERT(shift == rcx);
+ sarq_cl(srcDest.reg);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rolq_cl(dest.reg);
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rorq_cl(dest.reg);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src,
+ Register64 dest) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rolq(count, dest.reg);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src,
+ Register64 dest) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rorq(count, dest.reg);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ cmpPtrSet(cond, lhs, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasLZCNT()) {
+ lzcntq(src.reg, dest);
+ return;
+ }
+
+ Label nonzero;
+ bsrq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(0x7F), dest);
+ bind(&nonzero);
+ xorq(Imm32(0x3F), dest);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasBMI1()) {
+ tzcntq(src.reg, dest);
+ return;
+ }
+
+ Label nonzero;
+ bsfq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(64), dest);
+ bind(&nonzero);
+}
+
+void MacroAssembler::popcnt64(Register64 src64, Register64 dest64,
+ Register tmp) {
+ Register src = src64.reg;
+ Register dest = dest64.reg;
+
+ if (AssemblerX86Shared::HasPOPCNT()) {
+ MOZ_ASSERT(tmp == InvalidReg);
+ popcntq(src, dest);
+ return;
+ }
+
+ if (src != dest) {
+ movq(src, dest);
+ }
+
+ MOZ_ASSERT(tmp != dest);
+
+ ScratchRegisterScope scratch(*this);
+
+ // Equivalent to mozilla::CountPopulation32, adapted for 64 bits.
+ // x -= (x >> 1) & m1;
+ movq(src, tmp);
+ movq(ImmWord(0x5555555555555555), scratch);
+ shrq(Imm32(1), tmp);
+ andq(scratch, tmp);
+ subq(tmp, dest);
+
+ // x = (x & m2) + ((x >> 2) & m2);
+ movq(dest, tmp);
+ movq(ImmWord(0x3333333333333333), scratch);
+ andq(scratch, dest);
+ shrq(Imm32(2), tmp);
+ andq(scratch, tmp);
+ addq(tmp, dest);
+
+ // x = (x + (x >> 4)) & m4;
+ movq(dest, tmp);
+ movq(ImmWord(0x0f0f0f0f0f0f0f0f), scratch);
+ shrq(Imm32(4), tmp);
+ addq(tmp, dest);
+ andq(scratch, dest);
+
+ // (x * h01) >> 56
+ movq(ImmWord(0x0101010101010101), scratch);
+ imulq(scratch, dest);
+ shrq(Imm32(56), dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+ mov(lhs, scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ mov(lhs, scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
+ Register dest, Label* fail) {
+ vcvttss2sq(src, dest);
+
+ // Same trick as for Doubles
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateFloat32ToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ branchTruncateFloat32ToPtr(src, dest, fail);
+
+ // Check that the result is in the int32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32To64SignExtend(dest, Register64(scratch));
+ cmpPtr(dest, scratch);
+ j(Assembler::NotEqual, fail);
+
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
+ Label* fail) {
+ vcvttsd2sq(src, dest);
+
+ // vcvttsd2sq returns 0x8000000000000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this avoids the need to
+ // materialize that value in a register).
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateDoubleToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ branchTruncateDoubleToPtr(src, dest, fail);
+
+ // Check that the result is in the int32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32To64SignExtend(dest, Register64(scratch));
+ cmpPtr(dest, scratch);
+ j(Assembler::NotEqual, fail);
+
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ test32(Operand(lhs), rhs);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ test32(Operand(scratch, 0), rhs);
+ }
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ test32(value.valueReg(), value.valueReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(valaddr, ImmWord(magic));
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& address) {
+ jmp(Operand(address));
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmpPtr(lhs, rhs);
+ cmovCCq(cond, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmpPtr(lhs, Operand(rhs));
+ cmovCCq(cond, src, dest);
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCq(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCq(cond, Operand(src), dest);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCq(cond, Operand(src), dest);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCq(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ cmovCCq(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(length != scratch);
+
+ if (JitOptions.spectreIndexMasking) {
+ move32(Imm32(0), scratch);
+ }
+
+ cmp32(index, length);
+ j(Assembler::AboveOrEqual, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCl(Assembler::AboveOrEqual, scratch, index);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(length.base != scratch);
+
+ if (JitOptions.spectreIndexMasking) {
+ move32(Imm32(0), scratch);
+ }
+
+ cmp32(index, Operand(length));
+ j(Assembler::AboveOrEqual, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCl(Assembler::AboveOrEqual, scratch, index);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(length != scratch);
+
+ if (JitOptions.spectreIndexMasking) {
+ movePtr(ImmWord(0), scratch);
+ }
+
+ cmpPtr(index, length);
+ j(Assembler::AboveOrEqual, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCq(Assembler::AboveOrEqual, scratch, index);
+ }
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(length.base != scratch);
+
+ if (JitOptions.spectreIndexMasking) {
+ movePtr(ImmWord(0), scratch);
+ }
+
+ cmpPtr(index, Operand(length));
+ j(Assembler::AboveOrEqual, failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCq(Assembler::AboveOrEqual, scratch, index);
+ }
+}
+
+// ========================================================================
+// SIMD.
+
+// Extract lane as scalar
+
+void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
+ Register64 dest) {
+ if (lane == 0) {
+ vmovq(src, dest.reg);
+ } else {
+ vpextrq(lane, src, dest.reg);
+ }
+}
+
+// Replace lane value
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
+ FloatRegister lhsDest) {
+ vpinsrq(lane, rhs.reg, lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
+ Register64 rhs, FloatRegister dest) {
+ vpinsrq(lane, rhs.reg, lhs, dest);
+}
+
+// Splat
+
+void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
+ vmovq(src.reg, dest);
+ if (HasAVX2()) {
+ vbroadcastq(Operand(dest), dest);
+ } else {
+ vpunpcklqdq(dest, dest, dest);
+ }
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handled
+ // separately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::truncateDoubleToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle
+ // seperately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerX64::fallibleUnboxPtrImpl(const Operand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register.
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xorq(src, scratch);
+ mov(scratch, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), scratch);
+ j(Assembler::NonZero, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(Operand(src.valueReg()), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(Operand(src), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(Operand(src), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void MacroAssemblerX64::incrementInt32Value(const Address& addr) {
+ asMasm().addPtr(Imm32(1), addr);
+}
+
+void MacroAssemblerX64::unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+template <typename T>
+void MacroAssemblerX64::loadInt32OrDouble(const T& src, FloatRegister dest) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src, dest);
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest);
+ bind(&end);
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void MacroAssemblerX64::ensureDouble(const ValueOperand& source,
+ FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ unboxInt32(source, scratch);
+ convertInt32ToDouble(scratch, dest);
+ }
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_inl_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
new file mode 100644
index 0000000000..c6ab1fe935
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -0,0 +1,1747 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "util/Memory.h"
+#include "vm/BigIntType.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) {
+ if (maybeInlineDouble(d, dest)) {
+ return;
+ }
+ Double* dbl = getDouble(d);
+ if (!dbl) {
+ return;
+ }
+ // The constants will be stored in a pool appended to the text (see
+ // finish()), so they will always be a fixed distance from the
+ // instructions which reference them. This allows the instructions to use
+ // PC-relative addressing. Use "jump" label support code, because we need
+ // the same PC-relative address patching that jumps use.
+ JmpSrc j = masm.vmovsd_ripr(dest.encoding());
+ propagateOOM(dbl->uses.append(j));
+}
+
+void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) {
+ if (maybeInlineFloat(f, dest)) {
+ return;
+ }
+ Float* flt = getFloat(f);
+ if (!flt) {
+ return;
+ }
+ // See comment in loadConstantDouble
+ JmpSrc j = masm.vmovss_ripr(dest.encoding());
+ propagateOOM(flt->uses.append(j));
+}
+
+void MacroAssemblerX64::vpRiprOpSimd128(
+ const SimdConstant& v, FloatRegister reg,
+ JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
+ X86Encoding::XMMRegisterID id)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ JmpSrc j = (masm.*op)(reg.encoding());
+ propagateOOM(val->uses.append(j));
+}
+
+void MacroAssemblerX64::vpRiprOpSimd128(
+ const SimdConstant& v, FloatRegister src, FloatRegister dest,
+ JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
+ X86Encoding::XMMRegisterID srcId, X86Encoding::XMMRegisterID destId)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ JmpSrc j = (masm.*op)(src.encoding(), dest.encoding());
+ propagateOOM(val->uses.append(j));
+}
+
+void MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Int(v, dest)) {
+ return;
+ }
+ vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovdqa_ripr);
+}
+
+void MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Float(v, dest)) {
+ return;
+ }
+ vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovaps_ripr);
+}
+
+void MacroAssemblerX64::vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddb_ripr);
+}
+
+void MacroAssemblerX64::vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddw_ripr);
+}
+
+void MacroAssemblerX64::vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddd_ripr);
+}
+
+void MacroAssemblerX64::vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddq_ripr);
+}
+
+void MacroAssemblerX64::vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubb_ripr);
+}
+
+void MacroAssemblerX64::vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubw_ripr);
+}
+
+void MacroAssemblerX64::vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubd_ripr);
+}
+
+void MacroAssemblerX64::vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubq_ripr);
+}
+
+void MacroAssemblerX64::vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmullw_ripr);
+}
+
+void MacroAssemblerX64::vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmulld_ripr);
+}
+
+void MacroAssemblerX64::vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsb_ripr);
+}
+
+void MacroAssemblerX64::vpaddusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusb_ripr);
+}
+
+void MacroAssemblerX64::vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsw_ripr);
+}
+
+void MacroAssemblerX64::vpadduswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusw_ripr);
+}
+
+void MacroAssemblerX64::vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsb_ripr);
+}
+
+void MacroAssemblerX64::vpsubusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusb_ripr);
+}
+
+void MacroAssemblerX64::vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsw_ripr);
+}
+
+void MacroAssemblerX64::vpsubuswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusw_ripr);
+}
+
+void MacroAssemblerX64::vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsb_ripr);
+}
+
+void MacroAssemblerX64::vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminub_ripr);
+}
+
+void MacroAssemblerX64::vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsw_ripr);
+}
+
+void MacroAssemblerX64::vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminuw_ripr);
+}
+
+void MacroAssemblerX64::vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsd_ripr);
+}
+
+void MacroAssemblerX64::vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminud_ripr);
+}
+
+void MacroAssemblerX64::vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsb_ripr);
+}
+
+void MacroAssemblerX64::vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxub_ripr);
+}
+
+void MacroAssemblerX64::vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsw_ripr);
+}
+
+void MacroAssemblerX64::vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxuw_ripr);
+}
+
+void MacroAssemblerX64::vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsd_ripr);
+}
+
+void MacroAssemblerX64::vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxud_ripr);
+}
+
+void MacroAssemblerX64::vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpand_ripr);
+}
+
+void MacroAssemblerX64::vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpxor_ripr);
+}
+
+void MacroAssemblerX64::vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpor_ripr);
+}
+
+void MacroAssemblerX64::vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddps_ripr);
+}
+
+void MacroAssemblerX64::vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddpd_ripr);
+}
+
+void MacroAssemblerX64::vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubps_ripr);
+}
+
+void MacroAssemblerX64::vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubpd_ripr);
+}
+
+void MacroAssemblerX64::vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivps_ripr);
+}
+
+void MacroAssemblerX64::vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivpd_ripr);
+}
+
+void MacroAssemblerX64::vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulps_ripr);
+}
+
+void MacroAssemblerX64::vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulpd_ripr);
+}
+
+void MacroAssemblerX64::vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vandpd_ripr);
+}
+
+void MacroAssemblerX64::vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vminpd_ripr);
+}
+
+void MacroAssemblerX64::vpacksswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpacksswb_ripr);
+}
+
+void MacroAssemblerX64::vpackuswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackuswb_ripr);
+}
+
+void MacroAssemblerX64::vpackssdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackssdw_ripr);
+}
+
+void MacroAssemblerX64::vpackusdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackusdw_ripr);
+}
+
+void MacroAssemblerX64::vpunpckldqSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest,
+ &X86Encoding::BaseAssemblerX64::vpunpckldq_ripr);
+}
+
+void MacroAssemblerX64::vunpcklpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vunpcklps_ripr);
+}
+
+void MacroAssemblerX64::vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpshufb_ripr);
+}
+
+void MacroAssemblerX64::vptestSimd128(const SimdConstant& v,
+ FloatRegister lhs) {
+ vpRiprOpSimd128(v, lhs, &X86Encoding::BaseAssemblerX64::vptest_ripr);
+}
+
+void MacroAssemblerX64::vpmaddwdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaddwd_ripr);
+}
+
+void MacroAssemblerX64::vpcmpeqbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqb_ripr);
+}
+
+void MacroAssemblerX64::vpcmpgtbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtb_ripr);
+}
+
+void MacroAssemblerX64::vpcmpeqwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqw_ripr);
+}
+
+void MacroAssemblerX64::vpcmpgtwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtw_ripr);
+}
+
+void MacroAssemblerX64::vpcmpeqdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqd_ripr);
+}
+
+void MacroAssemblerX64::vpcmpgtdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtd_ripr);
+}
+
+void MacroAssemblerX64::vcmpeqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqps_ripr);
+}
+
+void MacroAssemblerX64::vcmpneqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqps_ripr);
+}
+
+void MacroAssemblerX64::vcmpltpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltps_ripr);
+}
+
+void MacroAssemblerX64::vcmplepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpleps_ripr);
+}
+
+void MacroAssemblerX64::vcmpgepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpgeps_ripr);
+}
+
+void MacroAssemblerX64::vcmpeqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqpd_ripr);
+}
+
+void MacroAssemblerX64::vcmpneqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqpd_ripr);
+}
+
+void MacroAssemblerX64::vcmpltpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltpd_ripr);
+}
+
+void MacroAssemblerX64::vcmplepdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmplepd_ripr);
+}
+
+void MacroAssemblerX64::vpmaddubswSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest,
+ &X86Encoding::BaseAssemblerX64::vpmaddubsw_ripr);
+}
+
+void MacroAssemblerX64::vpmuludqSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmuludq_ripr);
+}
+
+void MacroAssemblerX64::bindOffsets(
+ const MacroAssemblerX86Shared::UsesVector& uses) {
+ for (JmpSrc src : uses) {
+ JmpDst dst(currentOffset());
+ // Using linkJump here is safe, as explained in the comment in
+ // loadConstantDouble.
+ masm.linkJump(src, dst);
+ }
+}
+
+void MacroAssemblerX64::finish() {
+ if (!doubles_.empty()) {
+ masm.haltingAlign(sizeof(double));
+ }
+ for (const Double& d : doubles_) {
+ bindOffsets(d.uses);
+ masm.doubleConstant(d.value);
+ }
+
+ if (!floats_.empty()) {
+ masm.haltingAlign(sizeof(float));
+ }
+ for (const Float& f : floats_) {
+ bindOffsets(f.uses);
+ masm.floatConstant(f.value);
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty()) {
+ masm.haltingAlign(SimdMemoryAlignment);
+ }
+ for (const SimdData& v : simds_) {
+ bindOffsets(v.uses);
+ masm.simd128Constant(v.value.bytes());
+ }
+
+ MacroAssemblerX86Shared::finish();
+}
+
+void MacroAssemblerX64::boxValue(JSValueType type, Register src,
+ Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsZeroed;
+ movePtr(ImmWord(UINT32_MAX), dest);
+ asMasm().branchPtr(Assembler::BelowOrEqual, src, dest, &upper32BitsZeroed);
+ breakpoint();
+ bind(&upper32BitsZeroed);
+ }
+#endif
+ mov(ImmShiftedTag(tag), dest);
+ orq(src, dest);
+}
+
+void MacroAssemblerX64::handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail) {
+ // Reserve space for exception information.
+ subq(Imm32(sizeof(ResumeFromException)), rsp);
+ movq(rsp, rax);
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(rcx);
+ asMasm().passABIArg(rax);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ load32(Address(rsp, ResumeFromException::offsetOfKind()), rax);
+ asMasm().branch32(Assembler::Equal, rax,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, rax,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, rax,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, rax,
+ Imm32(ExceptionResumeKind::WasmCatch), &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfTarget()), rax);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ jmp(Operand(rax));
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(rcx);
+ loadValue(Address(esp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(rsp, ResumeFromException::offsetOfTarget()), rax);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jmp(Operand(rax));
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ loadValue(Address(rbp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jmp(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(rsp, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ movq(rbp, rsp);
+ pop(rbp);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfBailoutInfo()), r9);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ move32(Imm32(1), ReturnReg);
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ movePtr(ImmPtr((const void*)wasm::FailInstanceReg), InstanceReg);
+ masm.ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfTarget()), rax);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
+ loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
+ jmp(Operand(rax));
+}
+
+void MacroAssemblerX64::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerX64::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+Assembler::Condition MacroAssemblerX64::testStringTruthy(
+ bool truthy, const ValueOperand& value) {
+ ScratchRegisterScope scratch(asMasm());
+ unboxString(value, scratch);
+ cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+Assembler::Condition MacroAssemblerX64::testBigIntTruthy(
+ bool truthy, const ValueOperand& value) {
+ ScratchRegisterScope scratch(asMasm());
+ unboxBigInt(value, scratch);
+ cmp32(Operand(scratch, JS::BigInt::offsetOfDigitLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+MacroAssembler& MacroAssemblerX64::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerX64::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where
+ // the Ion frame data for a piece of code is very large. To handle this
+ // special case, for frames over 4k in size we allocate memory on the stack
+ // incrementally, touching it as we go.
+ //
+ // When the amount is quite large, which it can be, we emit an actual loop,
+ // in order to keep the function prologue compact. Compactness is a
+ // requirement for eg Wasm's CodeRange data structure, which can encode only
+ // 8-bit offsets.
+ uint32_t amountLeft = imm32.value;
+ uint32_t fullPages = amountLeft / 4096;
+ if (fullPages <= 8) {
+ while (amountLeft > 4096) {
+ subq(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subq(Imm32(amountLeft), StackPointer);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ Label top;
+ move32(Imm32(fullPages), scratch);
+ bind(&top);
+ subq(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ subl(Imm32(1), scratch);
+ j(Assembler::NonZero, &top);
+ amountLeft -= fullPages * 4096;
+ if (amountLeft) {
+ subq(Imm32(amountLeft), StackPointer);
+ }
+ }
+ }
+}
+
+void MacroAssemblerX64::convertDoubleToPtr(FloatRegister src, Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ // Check for -0.0
+ if (negativeZeroCheck) {
+ branchNegativeZero(src, dest, fail);
+ }
+
+ ScratchDoubleScope scratch(asMasm());
+ vcvttsd2sq(src, dest);
+ asMasm().convertInt64ToDouble(Register64(dest), scratch);
+ vucomisd(scratch, src);
+ j(Assembler::Parity, fail);
+ j(Assembler::NotEqual, fail);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ movq(rsp, scratch);
+ andq(Imm32(~(ABIStackAlignment - 1)), rsp);
+ push(scratch);
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool cleanupArg) {
+ freeStack(stackAdjust);
+ if (dynamicAlignment_) {
+ pop(rsp);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+static bool IsIntArgReg(Register reg) {
+ for (uint32_t i = 0; i < NumIntArgRegs; i++) {
+ if (IntArgRegs[i] == reg) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ if (IsIntArgReg(fun)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10),
+ MoveOp::GENERAL));
+ fun = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(fun));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ Address safeFun = fun;
+ if (IsIntArgReg(safeFun.base)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10),
+ MoveOp::GENERAL));
+ safeFun.base = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(safeFun.base));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(safeFun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxValue(ValueTypeFromMIRType(type), reg.gpr(), dest.valueReg());
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, freg);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movq(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+ writeDataRelocation(src);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ if (ptr != buffer) {
+ movePtr(ptr, buffer);
+ }
+ andPtr(Imm32(int32_t(~gc::ChunkMask)), buffer);
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != scratch);
+
+ movePtr(ptr, scratch);
+ andPtr(Imm32(int32_t(~gc::ChunkMask)), scratch);
+ branchPtr(InvertCondition(cond), Address(scratch, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ getGCThingValueChunk(value, temp);
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ cmpPtr(lhs.valueReg(), scratch);
+ j(cond, label);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subq(Imm32(sizeof(double)), StackPointer);
+ boxDouble(reg, Address(StackPointer, 0));
+ adjustFrame(sizeof(double));
+}
+
+// ========================================================================
+// wasm support
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, AnyRegister out) {
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ MOZ_ASSERT_IF(
+ access.isZeroExtendSimd128Load(),
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(
+ access.isSplatSimd128Load(),
+ access.type() == Scalar::Uint8 || access.type() == Scalar::Uint16 ||
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(access.isWidenSimd128Load(), access.type() == Scalar::Float64);
+
+ append(access, size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastb(srcAddr, out.fpu());
+ } else {
+ movzbl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int16:
+ movswl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastw(srcAddr, out.fpu());
+ } else {
+ movzwl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastss(srcAddr, out.fpu());
+ } else {
+ // vmovss does the right thing also for access.isZeroExtendSimd128Load()
+ vmovss(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Float64:
+ if (access.isSplatSimd128Load()) {
+ vmovddup(srcAddr, out.fpu());
+ } else if (access.isWidenSimd128Load()) {
+ switch (access.widenSimdOp()) {
+ case wasm::SimdOp::V128Load8x8S:
+ vpmovsxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ vpmovzxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ vpmovsxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ vpmovzxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ vpmovsxdq(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ vpmovzxdq(srcAddr, out.fpu());
+ break;
+ default:
+ MOZ_CRASH("Unexpected widening op for wasmLoad");
+ }
+ } else {
+ // vmovsd does the right thing also for access.isZeroExtendSimd128Load()
+ vmovsd(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Simd128:
+ MacroAssemblerX64::loadUnalignedSimd128(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("int64 loads must use load64");
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected scalar type for wasmLoad");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, Register64 out) {
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ append(access, size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint8:
+ movzbq(srcAddr, out.reg);
+ break;
+ case Scalar::Int16:
+ movswq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint16:
+ movzwq(srcAddr, out.reg);
+ break;
+ case Scalar::Int32:
+ movslq(srcAddr, out.reg);
+ break;
+ // Int32 to int64 moves zero-extend by default.
+ case Scalar::Uint32:
+ movl(srcAddr, out.reg);
+ break;
+ case Scalar::Int64:
+ movq(srcAddr, out.reg);
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Simd128:
+ MOZ_CRASH("float loads must use wasmLoad");
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected scalar type for wasmLoadI64");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Operand dstAddr) {
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ append(access, masm.size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ movb(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movw(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int64:
+ movq(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ storeUncanonicalizedFloat32(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ storeUncanonicalizedDouble(value.fpu(), dstAddr);
+ break;
+ case Scalar::Simd128:
+ MacroAssemblerX64::storeUnalignedSimd128(value.fpu(), dstAddr);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ vcvttsd2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ vcvttss2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ vcvttsd2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ vcvttss2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ // If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchDoubleScope scratch(*this);
+ loadConstantDouble(double(0x8000000000000000), scratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttsd2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveDouble(input, tempReg);
+ vsubsd(scratch, tempReg, tempReg);
+ vcvttsd2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ // If the input < INT64_MAX, vcvttss2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchFloat32Scope scratch(*this);
+ loadConstantFloat32(float(0x8000000000000000), scratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttss2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveFloat32(input, tempReg);
+ vsubss(scratch, tempReg, tempReg);
+ vcvttss2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void MacroAssembler::widenInt32(Register r) {
+ move32To64ZeroExtend(r, Register64(r));
+}
+
+// ========================================================================
+// Convert floating point.
+
+void MacroAssembler::convertInt64ToDouble(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ vcvtsq2sd(input.reg, output, output);
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ vcvtsq2ss(input.reg, output, output);
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return true; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 input,
+ FloatRegister output,
+ Register temp) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ // If the input's sign bit is not set we use vcvtsq2sd directly.
+ // Else, we divide by 2 and keep the LSB, convert to double, and multiply
+ // the result by 2.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2sd(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(*this);
+ mov(input.reg, scratch);
+ mov(input.reg, temp);
+ shrq(Imm32(1), scratch);
+ andq(Imm32(1), temp);
+ orq(temp, scratch);
+
+ vcvtsq2sd(scratch, output, output);
+ vaddsd(output, output, output);
+
+ bind(&done);
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 input,
+ FloatRegister output,
+ Register temp) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ // See comment in convertUInt64ToDouble.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2ss(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(*this);
+ mov(input.reg, scratch);
+ mov(input.reg, temp);
+ shrq(Imm32(1), scratch);
+ andq(Imm32(1), temp);
+ orq(temp, scratch);
+
+ vcvtsq2ss(scratch, output, output);
+ vaddss(output, output, output);
+
+ bind(&done);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ MOZ_ASSERT(output.reg == rax);
+ if (expected != output) {
+ movq(expected.reg, output.reg);
+ }
+ append(access, size());
+ lock_cmpxchgq(replacement.reg, Operand(mem));
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ MOZ_ASSERT(output.reg == rax);
+ if (expected != output) {
+ movq(expected.reg, output.reg);
+ }
+ append(access, size());
+ lock_cmpxchgq(replacement.reg, Operand(mem));
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ if (value != output) {
+ movq(value.reg, output.reg);
+ }
+ append(access, masm.size());
+ xchgq(output.reg, Operand(mem));
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ if (value != output) {
+ movq(value.reg, output.reg);
+ }
+ append(access, masm.size());
+ xchgq(output.reg, Operand(mem));
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, AtomicOp op,
+ Register value, const T& mem, Register temp,
+ Register output) {
+ // NOTE: the generated code must match the assembly code in gen_fetchop in
+ // GenerateAtomicOperations.py
+ if (op == AtomicFetchAddOp) {
+ if (value != output) {
+ masm.movq(value, output);
+ }
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.lock_xaddq(output, Operand(mem));
+ } else if (op == AtomicFetchSubOp) {
+ if (value != output) {
+ masm.movq(value, output);
+ }
+ masm.negq(output);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.lock_xaddq(output, Operand(mem));
+ } else {
+ Label again;
+ MOZ_ASSERT(output == rax);
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ MOZ_ASSERT(temp != output);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.movq(Operand(mem), rax);
+ masm.bind(&again);
+ masm.movq(rax, temp);
+ switch (op) {
+ case AtomicFetchAndOp:
+ masm.andq(value, temp);
+ break;
+ case AtomicFetchOrOp:
+ masm.orq(value, temp);
+ break;
+ case AtomicFetchXorOp:
+ masm.xorq(value, temp);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ masm.lock_cmpxchgq(temp, Operand(mem));
+ masm.j(MacroAssembler::NonZero, &again);
+ }
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
+}
+
+template <typename T>
+static void AtomicEffectOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, AtomicOp op,
+ Register value, const T& mem) {
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.lock_addq(value, Operand(mem));
+ break;
+ case AtomicFetchSubOp:
+ masm.lock_subq(value, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ masm.lock_andq(value, Operand(mem));
+ break;
+ case AtomicFetchOrOp:
+ masm.lock_orq(value, Operand(mem));
+ break;
+ case AtomicFetchXorOp:
+ masm.lock_xorq(value, Operand(mem));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem) {
+ AtomicEffectOp64(*this, &access, op, value.reg, mem);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const Address& mem, Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ MOZ_ASSERT(output.reg == rax);
+ if (expected != output) {
+ movq(expected.reg, output.reg);
+ }
+ lock_cmpxchgq(replacement.reg, Operand(mem));
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ MOZ_ASSERT(output.reg == rax);
+ if (expected != output) {
+ movq(expected.reg, output.reg);
+ }
+ lock_cmpxchgq(replacement.reg, Operand(mem));
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ // NOTE: the generated code must match the assembly code in gen_exchange in
+ // GenerateAtomicOperations.py
+ if (value != output) {
+ movq(value.reg, output.reg);
+ }
+ xchgq(output.reg, Operand(mem));
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ if (value != output) {
+ movq(value.reg, output.reg);
+ }
+ xchgq(output.reg, Operand(mem));
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem) {
+ AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem) {
+ AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return leaRipRelative(dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ ptrdiff_t off = target - loc;
+ MOZ_ASSERT(off > ptrdiff_t(INT32_MIN));
+ MOZ_ASSERT(off < ptrdiff_t(INT32_MAX));
+ PatchWrite_Imm32(loc, Imm32(off));
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ cmpPtr(index.reg, boundsCheckLimit.reg);
+ j(cond, ok);
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCq(cond, Operand(boundsCheckLimit.reg), index.reg);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ cmpPtr(index.reg, Operand(boundsCheckLimit));
+ j(cond, ok);
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCq(cond, Operand(boundsCheckLimit), index.reg);
+ }
+}
+
+// ========================================================================
+// Integer compare-then-conditionally-load/move operations.
+
+// cmpMove, Cond-Reg-Reg-Reg-Reg cases
+
+template <size_t CmpSize, size_t MoveSize>
+void MacroAssemblerX64::cmpMove(Condition cond, Register lhs, Register rhs,
+ Register falseVal, Register trueValAndDest) {
+ if constexpr (CmpSize == 32) {
+ cmp32(lhs, rhs);
+ } else {
+ static_assert(CmpSize == 64);
+ cmpPtr(lhs, rhs);
+ }
+ if constexpr (MoveSize == 32) {
+ cmovCCl(cond, Operand(falseVal), trueValAndDest);
+ } else {
+ static_assert(MoveSize == 64);
+ cmovCCq(cond, Operand(falseVal), trueValAndDest);
+ }
+}
+template void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
+ Register rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
+ Register rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
+ Register rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
+ Register rhs,
+ Register falseVal,
+ Register trueValAndDest);
+
+// cmpMove, Cond-Reg-Addr-Reg-Reg cases
+
+template <size_t CmpSize, size_t MoveSize>
+void MacroAssemblerX64::cmpMove(Condition cond, Register lhs,
+ const Address& rhs, Register falseVal,
+ Register trueValAndDest) {
+ if constexpr (CmpSize == 32) {
+ cmp32(lhs, Operand(rhs));
+ } else {
+ static_assert(CmpSize == 64);
+ cmpPtr(lhs, Operand(rhs));
+ }
+ if constexpr (MoveSize == 32) {
+ cmovCCl(cond, Operand(falseVal), trueValAndDest);
+ } else {
+ static_assert(MoveSize == 64);
+ cmovCCq(cond, Operand(falseVal), trueValAndDest);
+ }
+}
+template void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
+ const Address& rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
+ const Address& rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
+ const Address& rhs,
+ Register falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
+ const Address& rhs,
+ Register falseVal,
+ Register trueValAndDest);
+
+// cmpLoad, Cond-Reg-Reg-Addr-Reg cases
+
+template <size_t CmpSize, size_t LoadSize>
+void MacroAssemblerX64::cmpLoad(Condition cond, Register lhs, Register rhs,
+ const Address& falseVal,
+ Register trueValAndDest) {
+ if constexpr (CmpSize == 32) {
+ cmp32(lhs, rhs);
+ } else {
+ static_assert(CmpSize == 64);
+ cmpPtr(lhs, rhs);
+ }
+ if constexpr (LoadSize == 32) {
+ cmovCCl(cond, Operand(falseVal), trueValAndDest);
+ } else {
+ static_assert(LoadSize == 64);
+ cmovCCq(cond, Operand(falseVal), trueValAndDest);
+ }
+}
+template void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
+ Register rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
+ Register rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
+ Register rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
+ Register rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+
+// cmpLoad, Cond-Reg-Addr-Addr-Reg cases
+
+template <size_t CmpSize, size_t LoadSize>
+void MacroAssemblerX64::cmpLoad(Condition cond, Register lhs,
+ const Address& rhs, const Address& falseVal,
+ Register trueValAndDest) {
+ if constexpr (CmpSize == 32) {
+ cmp32(lhs, Operand(rhs));
+ } else {
+ static_assert(CmpSize == 64);
+ cmpPtr(lhs, Operand(rhs));
+ }
+ if constexpr (LoadSize == 32) {
+ cmovCCl(cond, Operand(falseVal), trueValAndDest);
+ } else {
+ static_assert(LoadSize == 64);
+ cmovCCq(cond, Operand(falseVal), trueValAndDest);
+ }
+}
+template void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
+ const Address& rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
+ const Address& rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
+ const Address& rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+template void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
+ const Address& rhs,
+ const Address& falseVal,
+ Register trueValAndDest);
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x64/MacroAssembler-x64.h b/js/src/jit/x64/MacroAssembler-x64.h
new file mode 100644
index 0000000000..80e2dfed28
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -0,0 +1,1218 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_h
+#define jit_x64_MacroAssembler_x64_h
+
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+#include "js/HeapAPI.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSVAL_TYPE_TO_SHIFTED_TAG(type))) {}
+};
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag tag) : Imm32(tag) {}
+};
+
+// ScratchTagScope and ScratchTagScopeRelease are used to manage the tag
+// register for splitTagForTest(), which has different register management on
+// different platforms. On 64-bit platforms it requires a scratch register that
+// does not interfere with other operations; on 32-bit platforms it uses a
+// register that is already part of the Value.
+//
+// The ScratchTagScope RAII type acquires the appropriate register; a reference
+// to a variable of this type is then passed to splitTagForTest().
+//
+// On 64-bit platforms ScratchTagScopeRelease makes the owned scratch register
+// available in a dynamic scope during compilation. However it is important to
+// remember that that does not preserve the register value in any way, so this
+// RAII type should only be used along paths that eventually branch past further
+// uses of the extracted tag value.
+//
+// On 32-bit platforms ScratchTagScopeRelease has no effect, since it does not
+// manage a register, it only aliases a register in the ValueOperand.
+
+class ScratchTagScope : public ScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : ScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+class MacroAssemblerX64 : public MacroAssemblerX86Shared {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ void bindOffsets(const MacroAssemblerX86Shared::UsesVector&);
+
+ void vpRiprOpSimd128(const SimdConstant& v, FloatRegister reg,
+ JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
+ X86Encoding::XMMRegisterID id));
+
+ void vpRiprOpSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest,
+ JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId));
+
+ public:
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store16;
+ using MacroAssemblerX86Shared::store32;
+
+ MacroAssemblerX64() = default;
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X64 helpers.
+ /////////////////////////////////////////////////////////////////
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // Assembler::TraceDataRelocations.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(masm.currentOffset());
+ }
+ }
+
+ // Refers to the upper 32 bits of a 64-bit Value operand.
+ // On x86_64, the upper 32 bits do not necessarily only contain the type.
+ Operand ToUpper32(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()), base.disp() + 4);
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()),
+ Register::FromCode(base.index()), base.scale(),
+ base.disp() + 4);
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ static inline Operand ToUpper32(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ static inline Operand ToUpper32(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale,
+ address.offset + 4);
+ }
+
+ uint32_t Upper32Of(JSValueShiftedTag tag) { return uint32_t(tag >> 32); }
+
+ JSValueShiftedTag GetShiftedTag(JSValueType type) {
+ return (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+
+ void storeValue(ValueOperand val, Operand dest) {
+ movq(val.valueReg(), dest);
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ // Value types with 32-bit payloads can be emitted as two 32-bit moves.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movl(reg, Operand(dest));
+ movl(Imm32(Upper32Of(GetShiftedTag(type))), ToUpper32(Operand(dest)));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ movq(scratch, Operand(dest));
+ }
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ if (val.isGCThing()) {
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ } else {
+ mov(ImmWord(val.asRawBits()), scratch);
+ }
+ movq(scratch, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+ void loadValue(Operand src, ValueOperand val) { movq(src, val.valueReg()); }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.valueReg() != scratch);
+ if (payload != dest.valueReg()) {
+ movq(payload, dest.valueReg());
+ }
+ mov(ImmShiftedTag(type), scratch);
+ orq(scratch, dest.valueReg());
+ }
+ void pushValue(ValueOperand val) { push(val.valueReg()); }
+ void popValue(ValueOperand val) { pop(val.valueReg()); }
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ ScratchRegisterScope scratch(asMasm());
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ push(scratch);
+ }
+ void pushValue(const Address& addr) { push(Operand(addr)); }
+
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ push(Operand(addr));
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest);
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JS::detail::ValueUpperInclNumberTag));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testString(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testObject(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+
+ Condition testUndefined(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src),
+ Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_UNDEFINED))));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_INT32))));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_BOOLEAN))));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_NULL))));
+ return cond;
+ }
+ Condition testString(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testObject(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+
+ Condition testUndefined(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testNull(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testString(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testBigInt(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBigInt(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testObject(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+
+ Condition isMagic(Condition cond, const ValueOperand& src, JSWhyMagic why) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(src.valueReg(), ImmWord(magic));
+ return cond;
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ if (intptr_t(rhs.value) <= INT32_MAX && intptr_t(rhs.value) >= INT32_MIN) {
+ cmpPtr(lhs, Imm32(int32_t(rhs.value)));
+ } else {
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(Register lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(Register lhs, const Imm32 rhs) { cmpq(rhs, lhs); }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(!lhs.containsReg(scratch));
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ if ((intptr_t)rhs.value <= INT32_MAX && (intptr_t)rhs.value >= INT32_MIN) {
+ cmpPtr(lhs, Imm32((int32_t)rhs.value));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Operand& lhs, Register rhs) { cmpq(rhs, lhs); }
+ void cmpPtr(Register lhs, const Operand& rhs) { cmpq(rhs, lhs); }
+ void cmpPtr(const Operand& lhs, const Imm32 rhs) { cmpq(rhs, lhs); }
+ void cmpPtr(const Address& lhs, Register rhs) { cmpPtr(Operand(lhs), rhs); }
+ void cmpPtr(Register lhs, Register rhs) { cmpq(rhs, lhs); }
+ void testPtr(Register lhs, Register rhs) { testq(rhs, lhs); }
+ void testPtr(Register lhs, Imm32 rhs) { testq(rhs, lhs); }
+ void testPtr(const Operand& lhs, Imm32 rhs) { testq(rhs, lhs); }
+ void test64(Register lhs, Register rhs) { testq(rhs, lhs); }
+ void test64(Register lhs, const Imm64 rhs) {
+ if ((intptr_t)rhs.value <= INT32_MAX && (intptr_t)rhs.value >= INT32_MIN) {
+ testq(Imm32((int32_t)rhs.value), lhs);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ movq(ImmWord(rhs.value), scratch);
+ testq(scratch, lhs);
+ }
+ }
+
+ // Compare-then-conditionally-move/load, for integer types
+ template <size_t CmpSize, size_t MoveSize>
+ void cmpMove(Condition cond, Register lhs, Register rhs, Register falseVal,
+ Register trueValAndDest);
+
+ template <size_t CmpSize, size_t MoveSize>
+ void cmpMove(Condition cond, Register lhs, const Address& rhs,
+ Register falseVal, Register trueValAndDest);
+
+ template <size_t CmpSize, size_t LoadSize>
+ void cmpLoad(Condition cond, Register lhs, Register rhs,
+ const Address& falseVal, Register trueValAndDest);
+
+ template <size_t CmpSize, size_t LoadSize>
+ void cmpLoad(Condition cond, Register lhs, const Address& rhs,
+ const Address& falseVal, Register trueValAndDest);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ void movePtr(Register src, Register dest) { movq(src, dest); }
+ void movePtr(Register src, const Operand& dest) { movq(src, dest); }
+ void movePtr(ImmWord imm, Register dest) { mov(imm, dest); }
+ void movePtr(ImmPtr imm, Register dest) { mov(imm, dest); }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) { mov(imm, dest); }
+ void movePtr(ImmGCPtr imm, Register dest) { movq(imm, dest); }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ loadPtr(Address(scratch, 0x0), dest);
+ }
+ }
+ void loadPtr(const Address& address, Register dest) {
+ movq(Operand(address), dest);
+ }
+ void load64(const Address& address, Register dest) {
+ movq(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) { movq(src, dest); }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movq(Operand(src), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) { loadPtr(src, dest); }
+ void load32(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ load32(Address(scratch, 0x0), dest);
+ }
+ }
+ void load64(const Operand& address, Register64 dest) {
+ movq(address, dest.reg);
+ }
+ void load64(const Address& address, Register64 dest) {
+ movq(Operand(address), dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ movq(Operand(address), dest.reg);
+ }
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ movq(Imm32((int32_t)imm.value), Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ ScratchRegisterScope scratch(asMasm());
+ movq(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movq(src, Operand(address));
+ }
+ void store64(Register src, const Address& address) {
+ movq(src, Operand(address));
+ }
+ void store64(Register64 src, const Operand& address) {
+ movq(src.reg, address);
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movq(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) { movq(src, dest); }
+ void storePtr(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ storePtr(src, Address(scratch, 0x0));
+ }
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store32(src, Address(scratch, 0x0));
+ }
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movw(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store16(src, Address(scratch, 0x0));
+ }
+ }
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ template <typename S, typename T>
+ void store64Unaligned(const S& src, const T& dest) {
+ store64(src, dest);
+ }
+
+ void splitTag(Register src, Register dest) {
+ if (src != dest) {
+ movq(src, dest);
+ }
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+ void splitTag(const Operand& operand, Register dest) {
+ movq(operand, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const Address& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+ void splitTag(const BaseIndex& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+
+ // Extracts the tag of a value and places it in tag.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+ void cmpTag(const ValueOperand& operand, ImmTag tag) {
+ ScratchTagScope reg(asMasm(), operand);
+ splitTagForTest(operand, reg);
+ cmp32(reg, tag);
+ }
+
+ Condition testMagic(Condition cond, const ValueOperand& src) {
+ ScratchTagScope scratch(asMasm(), src);
+ splitTagForTest(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testError(Condition cond, const ValueOperand& src) {
+ return testMagic(cond, src);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister) {
+ vmovq(src, dest.valueReg());
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+ }
+
+ // Note that the |dest| register here may be ScratchReg, so we shouldn't
+ // use it.
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxInt32(const Operand& src, Register dest) { movl(src, dest); }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxInt32(Operand(src), dest);
+ }
+ void unboxInt32(const BaseIndex& src, Register dest) {
+ unboxInt32(Operand(src), dest);
+ }
+ template <typename T>
+ void unboxDouble(const T& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+
+ void unboxArgObjMagic(const ValueOperand& src, Register dest) {
+ unboxArgObjMagic(Operand(src.valueReg()), dest);
+ }
+ void unboxArgObjMagic(const Operand& src, Register dest) {
+ mov(ImmWord(0), dest);
+ }
+ void unboxArgObjMagic(const Address& src, Register dest) {
+ unboxArgObjMagic(Operand(src), dest);
+ }
+
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxBoolean(const Operand& src, Register dest) { movl(src, dest); }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxBoolean(Operand(src), dest);
+ }
+ void unboxBoolean(const BaseIndex& src, Register dest) {
+ unboxBoolean(Operand(src), dest);
+ }
+
+ void unboxMagic(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ vmovq(src.valueReg(), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) { xorq(Imm32(1), val.valueReg()); }
+
+ void unboxNonDouble(const ValueOperand& src, Register dest,
+ JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movl(src.valueReg(), dest);
+ return;
+ }
+ if (src.valueReg() == dest) {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xorq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), dest);
+ xorq(src.valueReg(), dest);
+ }
+ }
+ void unboxNonDouble(const Operand& src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movl(src, dest);
+ return;
+ }
+ // Explicitly permits |dest| to be used in |src|.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest != scratch);
+ if (src.containsReg(dest)) {
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ // If src is already a register, then src and dest are the same
+ // thing and we don't need to move anything into dest.
+ if (src.kind() != Operand::REG) {
+ movq(src, dest);
+ }
+ xorq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), dest);
+ xorq(src, dest);
+ }
+ }
+ void unboxNonDouble(const Address& src, Register dest, JSValueType type) {
+ unboxNonDouble(Operand(src), dest, type);
+ }
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType type) {
+ unboxNonDouble(Operand(src), dest, type);
+ }
+
+ void unboxString(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Operand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+
+ void unboxSymbol(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxSymbol(const Operand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+
+ void unboxBigInt(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Operand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Operand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ unboxNonDouble(Operand(src), dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(Operand(src), dest, JSVAL_TYPE_OBJECT);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(~JS::detail::ValueObjectOrNullBit), scratch);
+ andq(scratch, dest);
+ }
+
+ // This should only be used for GC barrier code, to unbox a GC thing Value.
+ // It's fine there because we don't depend on the actual Value type (all Cells
+ // are treated the same way). In almost all other cases this would be
+ // Spectre-unsafe - use unboxNonDouble and friends instead.
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ movq(ImmWord(JS::detail::ValueGCThingPayloadMask), dest);
+ andq(Operand(src), dest);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movq(ImmWord(JS::detail::ValueGCThingPayloadMask), dest);
+ andq(src.valueReg(), dest);
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ movq(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ andq(Operand(src), dest);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movq(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ andq(src.valueReg(), dest);
+ }
+
+ inline void fallibleUnboxPtrImpl(const Operand& src, Register dest,
+ JSValueType type, Label* fail);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(address, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ loadPtr(address, scratch);
+ splitTag(scratch, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type);
+
+ // These two functions use the low 32-bits of the full value register.
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+ void vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadduswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubuswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpacksswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackuswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackssdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackusdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpunpckldqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vunpcklpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vptestSimd128(const SimdConstant& v, FloatRegister lhs);
+ void vpmaddwdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpgepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaddubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmuludqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+
+ public:
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.valueReg(), operand.valueReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+ Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(src, dest.fpu());
+ } else {
+ unboxNonDouble(Operand(src), dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ ScratchRegisterScope scratch(asMasm());
+ unboxNonDouble(value, scratch, type);
+ storePtr(scratch, address);
+ if (type == JSVAL_TYPE_OBJECT) {
+ // Ideally we would call unboxObjectOrNull, but we need an extra
+ // scratch register for that. So unbox as object, then clear the
+ // object-or-null bit.
+ mov(ImmWord(~JS::detail::ValueObjectOrNullBit), scratch);
+ andq(scratch, Operand(address));
+ }
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ // Checks whether a double is representable as a 64-bit integer. If so, the
+ // integer is written to the output register. Otherwise, a bailout is taken to
+ // the given snapshot. This function overwrites the scratch float register.
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertUInt32ToDouble(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2sd(src, dest, dest);
+ }
+
+ void convertUInt32ToFloat32(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2ss(src, dest, dest);
+ }
+
+ inline void incrementInt32Value(const Address& addr);
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ public:
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+using MacroAssemblerSpecific = MacroAssemblerX64;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_h */
diff --git a/js/src/jit/x64/SharedICHelpers-x64-inl.h b/js/src/jit/x64/SharedICHelpers-x64-inl.h
new file mode 100644
index 0000000000..c40eb45c74
--- /dev/null
+++ b/js/src/jit/x64/SharedICHelpers-x64-inl.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICHelpers_x64_inl_h
+#define jit_x64_SharedICHelpers_x64_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ ScratchRegisterScope scratch(masm);
+
+ // We can assume during this that R0 and R1 have been pushed.
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.movq(FramePointer, scratch);
+ masm.subq(StackPointer, scratch);
+ masm.subq(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ICTailCallReg);
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register) {
+#ifdef DEBUG
+ // Compute frame size. Because the return address is still on the stack,
+ // this is:
+ //
+ // FramePointer
+ // - StackPointer
+ // - sizeof(return address)
+
+ ScratchRegisterScope scratch(masm);
+ masm.movq(FramePointer, scratch);
+ masm.subq(StackPointer, scratch);
+ masm.subq(Imm32(sizeof(void*)), scratch); // Return address.
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push the return address that's currently on top of the stack.
+ masm.Push(Operand(StackPointer, 0));
+
+ // Replace the original return address with the frame descriptor.
+ masm.storePtr(ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, sizeof(uintptr_t)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ masm.Push(ICStubReg);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICHelpers_x64_inl_h */
diff --git a/js/src/jit/x64/SharedICHelpers-x64.h b/js/src/jit/x64/SharedICHelpers-x64.h
new file mode 100644
index 0000000000..8233db5735
--- /dev/null
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICHelpers_x64_h
+#define jit_x64_SharedICHelpers_x64_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from Stack top to the top Value inside an IC stub (this is the
+// return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ masm.Pop(ICTailCallReg);
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ masm.Push(ICTailCallReg);
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.ret(); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
+ masm.loadPtr(stubAddr, ICStubReg);
+
+ masm.mov(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // The return address is on top of the stack, followed by the frame
+ // descriptor. Use a pop instruction to overwrite the frame descriptor
+ // with the return address. Note that pop increments the stack pointer
+ // before computing the address.
+ masm.Pop(Operand(StackPointer, 0));
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ masm.guardedCallPreBarrier(addr, type);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICHelpers_x64_h */
diff --git a/js/src/jit/x64/SharedICRegisters-x64.h b/js/src/jit/x64/SharedICRegisters-x64.h
new file mode 100644
index 0000000000..8e52e5f3c9
--- /dev/null
+++ b/js/src/jit/x64/SharedICRegisters-x64.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICRegisters_x64_h
+#define jit_x64_SharedICRegisters_x64_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/x64/Assembler-x64.h"
+
+namespace js {
+namespace jit {
+
+static constexpr ValueOperand R0(rcx);
+static constexpr ValueOperand R1(rbx);
+static constexpr ValueOperand R2(rax);
+
+static constexpr Register ICTailCallReg = rsi;
+static constexpr Register ICStubReg = rdi;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+static constexpr FloatRegister FloatReg2 = xmm2;
+static constexpr FloatRegister FloatReg3 = xmm3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICRegisters_x64_h */
diff --git a/js/src/jit/x64/Trampoline-x64.cpp b/js/src/jit/x64/Trampoline-x64.cpp
new file mode 100644
index 0000000000..dcc9291299
--- /dev/null
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -0,0 +1,888 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "jit/x64/SharedICRegisters-x64.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsPowerOfTwo;
+
+// This struct reflects the contents of the stack entry.
+// Given a `CommonFrameLayout* frame`:
+// - `frame->prevType()` should be `FrameType::CppToJSJit`.
+// - Then EnterJITStackEntry starts at:
+// frame->callerFramePtr() + EnterJITStackEntry::offsetFromFP()
+// (the offset is negative, so this subtracts from the frame pointer)
+struct EnterJITStackEntry {
+ // Offset from frame pointer to EnterJITStackEntry*.
+ static constexpr int32_t offsetFromFP() {
+ return -int32_t(offsetof(EnterJITStackEntry, rbp));
+ }
+
+ void* result;
+
+#if defined(_WIN64)
+ struct XMM {
+ using XMM128 = char[16];
+ XMM128 xmm6;
+ XMM128 xmm7;
+ XMM128 xmm8;
+ XMM128 xmm9;
+ XMM128 xmm10;
+ XMM128 xmm11;
+ XMM128 xmm12;
+ XMM128 xmm13;
+ XMM128 xmm14;
+ XMM128 xmm15;
+ } xmm;
+
+ // 16-byte aligment for xmm registers above.
+ uint64_t xmmPadding;
+
+ void* rsi;
+ void* rdi;
+#endif
+
+ void* r15;
+ void* r14;
+ void* r13;
+ void* r12;
+ void* rbx;
+ void* rbp;
+
+ // Pushed by CALL.
+ void* rip;
+};
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ masm.assertStackAlignment(ABIStackAlignment,
+ -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ static_assert(OsrFrameReg == IntArgReg3);
+
+#if defined(_WIN64)
+ const Address token = Address(rbp, 16 + ShadowStackSpace);
+ const Operand scopeChain = Operand(rbp, 24 + ShadowStackSpace);
+ const Operand numStackValuesAddr = Operand(rbp, 32 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 40 + ShadowStackSpace);
+#else
+ const Register token = IntArgReg4;
+ const Register scopeChain = IntArgReg5;
+ const Operand numStackValuesAddr = Operand(rbp, 16 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 24 + ShadowStackSpace);
+#endif
+
+ // Note: the stack pushes below must match the fields in EnterJITStackEntry.
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(rbp);
+ masm.mov(rsp, rbp);
+
+ // Save non-volatile registers. These must be saved by the trampoline, rather
+ // than by the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.push(rbx);
+ masm.push(r12);
+ masm.push(r13);
+ masm.push(r14);
+ masm.push(r15);
+#if defined(_WIN64)
+ masm.push(rdi);
+ masm.push(rsi);
+
+ // 16-byte aligment for vmovdqa
+ masm.subq(Imm32(sizeof(EnterJITStackEntry::XMM) + 8), rsp);
+
+ masm.vmovdqa(xmm6, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm6)));
+ masm.vmovdqa(xmm7, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm7)));
+ masm.vmovdqa(xmm8, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm8)));
+ masm.vmovdqa(xmm9, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm9)));
+ masm.vmovdqa(xmm10, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm10)));
+ masm.vmovdqa(xmm11, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm11)));
+ masm.vmovdqa(xmm12, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm12)));
+ masm.vmovdqa(xmm13, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm13)));
+ masm.vmovdqa(xmm14, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm14)));
+ masm.vmovdqa(xmm15, Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm15)));
+#endif
+
+ // Save arguments passed in registers needed after function call.
+ masm.push(result);
+
+ // End of pushes reflected in EnterJITStackEntry, i.e. EnterJITStackEntry
+ // starts at this rsp.
+
+ // Remember number of bytes occupied by argument vector
+ masm.mov(reg_argc, r13);
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.addq(Imm32(1), r13);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.shll(Imm32(3), r13); // r13 = argc * sizeof(Value)
+ static_assert(sizeof(Value) == 1 << 3, "Constant is baked in assembly code");
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.mov(rsp, r12);
+ masm.subq(r13, r12);
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ masm.andl(Imm32(JitStackAlignment - 1), r12);
+ masm.subq(r12, rsp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // r13 still stores the number of bytes in the argument vector.
+ masm.addq(reg_argv, r13); // r13 points above last argument or newTarget
+
+ // while r13 > rdx, push arguments.
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmpPtr(r13, reg_argv);
+ masm.j(AssemblerX86Shared::BelowOrEqual, &footer);
+
+ masm.subq(Imm32(8), r13);
+ masm.push(Operand(r13, 0));
+ masm.jmp(&header);
+
+ masm.bind(&footer);
+ }
+
+ // Load the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.movq(result, reg_argc);
+ masm.unboxInt32(Operand(reg_argc, 0), reg_argc);
+
+ // Push the callee token.
+ masm.push(token);
+
+ // Push the descriptor.
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, reg_argc, reg_argc);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(rbp));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.movq(numStackValuesAddr, numStackValues);
+
+ // Push return address
+ masm.mov(&returnLabel, scratch);
+ masm.push(scratch);
+
+ // Frame prologue.
+ masm.push(rbp);
+ masm.mov(rsp, rbp);
+
+ // Reserve frame.
+ masm.subPtr(Imm32(BaselineFrame::Size()), rsp);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.touchFrameValues(numStackValues, scratch, framePtrScratch);
+ masm.mov(rsp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ Register valuesSize = regs.takeAny();
+ masm.mov(numStackValues, valuesSize);
+ masm.shll(Imm32(3), valuesSize);
+ masm.subPtr(valuesSize, rsp);
+
+ // Enter exit frame.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(Imm32(0)); // Fake return address.
+ masm.push(FramePointer);
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ regs.add(valuesSize);
+
+ masm.push(reg_code);
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.pop(reg_code);
+
+ MOZ_ASSERT(reg_code != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), rsp);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(rbp, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(reg_code);
+
+ // OOM: frame epilogue, load error value, discard return address and return.
+ masm.bind(&error);
+ masm.mov(rbp, rsp);
+ masm.pop(rbp);
+ masm.addPtr(Imm32(sizeof(uintptr_t)), rsp); // Return address.
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ masm.movq(scopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call function.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set rsp to the address of the
+ // EnterJITStackEntry on the stack.
+ masm.lea(Operand(rbp, EnterJITStackEntry::offsetFromFP()), rsp);
+
+ /*****************************************************************
+ Place return value where it belongs, pop all saved registers
+ *****************************************************************/
+ masm.pop(r12); // vp
+ masm.storeValue(JSReturnOperand, Operand(r12, 0));
+
+ // Restore non-volatile registers.
+#if defined(_WIN64)
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm6)), xmm6);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm7)), xmm7);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm8)), xmm8);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm9)), xmm9);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm10)), xmm10);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm11)), xmm11);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm12)), xmm12);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm13)), xmm13);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm14)), xmm14);
+ masm.vmovdqa(Operand(rsp, offsetof(EnterJITStackEntry::XMM, xmm15)), xmm15);
+
+ masm.addq(Imm32(sizeof(EnterJITStackEntry::XMM) + 8), rsp);
+
+ masm.pop(rsi);
+ masm.pop(rdi);
+#endif
+ masm.pop(r15);
+ masm.pop(r14);
+ masm.pop(r13);
+ masm.pop(r12);
+ masm.pop(rbx);
+
+ // Restore frame pointer and return.
+ masm.pop(rbp);
+ masm.ret();
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ if (frameStackAddress->prevType() != FrameType::CppToJSJit) {
+ // This is not a CppToJSJit frame, there are no C++ registers here.
+ return mozilla::Nothing{};
+ }
+
+ // Compute pointer to start of EnterJITStackEntry on the stack.
+ uint8_t* fp = frameStackAddress->callerFramePtr();
+ auto* enterJITStackEntry = reinterpret_cast<EnterJITStackEntry*>(
+ fp + EnterJITStackEntry::offsetFromFP());
+
+ // Extract native function call registers.
+ ::JS::ProfilingFrameIterator::RegisterState registerState;
+ registerState.fp = enterJITStackEntry->rbp;
+ registerState.pc = enterJITStackEntry->rip;
+ // sp should be inside the caller's frame, so set sp to the value of the stack
+ // pointer before the call to the EnterJit trampoline.
+ registerState.sp = &enterJITStackEntry->rip + 1;
+ // No lr in this world.
+ registerState.lr = nullptr;
+ return mozilla::Some(registerState);
+}
+
+// Push AllRegs in a way that is compatible with RegisterDump, regardless of
+// what PushRegsInMask might do to reduce the set size.
+static void DumpAllRegs(MacroAssembler& masm) {
+#ifdef ENABLE_WASM_SIMD
+ masm.PushRegsInMask(AllRegs);
+#else
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
+ ++iter) {
+ masm.Push(*iter);
+ }
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
+ ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+#endif
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ // See explanatory comment in x86's JitRuntime::generateInvalidator.
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ masm.movq(rsp, rax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, rbx);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(rdx);
+ masm.passABIArg(rax);
+ masm.passABIArg(rbx);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- rsp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.movq(rsp, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, r8);
+
+ // Load |nformals| into %rcx.
+ masm.loadPtr(Address(rbp, RectifierFrameLayout::offsetOfCalleeToken()), rax);
+ masm.mov(rax, rcx);
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rcx);
+ masm.loadFunctionArgCount(rcx, rcx);
+
+ // Stash another copy in r11, since we are going to do destructive operations
+ // on rcx
+ masm.mov(rcx, r11);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(rax, rdx);
+ masm.andq(Imm32(uint32_t(CalleeToken_FunctionConstructing)), rdx);
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ rcx);
+ masm.addl(rdx, rcx);
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), rcx);
+
+ // Load the number of |undefined|s to push into %rcx. Subtract 1 for |this|.
+ masm.subl(r8, rcx);
+ masm.subl(Imm32(1), rcx);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- rsp
+ // '--- #r8 ---'
+ //
+ // Rectifier frame:
+ // [rbp'] [undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '------- #rcx --------' '--- #r8 ---'
+
+ // Copy the number of actual arguments into rdx.
+ masm.mov(r8, rdx);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(r10));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(r10);
+ masm.subl(Imm32(1), rcx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ BaseIndex b(FramePointer, r8, TimesEight, sizeof(RectifierFrameLayout));
+ masm.lea(Operand(b), rcx);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addl(Imm32(1), r8);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(rcx, 0x0));
+ masm.subq(Imm32(sizeof(Value)), rcx);
+ masm.subl(Imm32(1), r8);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, rax,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(r10);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, rdx, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(rsp, r11, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [rbp'] <- rbp [undef] [undef] [undef] [arg2] [arg1] [this] <- rsp [ [argc]
+ // [callee] [descr] [raddr] ]
+ //
+
+ // Construct JitFrameLayout.
+ masm.push(rax); // callee token
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, rdx, rdx);
+
+ // Call the target function.
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rax);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(rax, rax);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(rax);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(rax, rbx, &noBaselineScript);
+ masm.callJitNoProfiler(rbx);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(rax, rax);
+ masm.callJitNoProfiler(rax);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ // Get the stack pointer into a register, pre-alignment.
+ masm.movq(rsp, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, r8);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, r9);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(r8);
+ masm.passABIArg(r9);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set must be a superset of Volatile register set");
+
+ // The context is the first argument.
+ Register cxreg = IntArgReg0;
+ regs.take(cxreg);
+
+ // Stack is:
+ // ... frame ...
+ // +12 [args]
+ // +8 descriptor
+ // +0 returnAddress
+ //
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = r10;
+ regs.take(argsBase);
+ masm.lea(Operand(rsp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Bool:
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movq(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH("NYI: x64 callVM should not be used with 128bits values.");
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, rax, rax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(rax, rax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(esp, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ masm.loadDouble(Address(esp, 0), ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ static_assert(PreBarrierReg == rdx);
+ Register temp1 = rax;
+ Register temp2 = rbx;
+ Register temp3 = rcx;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet regs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.PushRegsInMask(regs);
+
+ masm.mov(ImmPtr(cx->runtime()), rcx);
+
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(rcx);
+ masm.passABIArg(rdx);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(regs);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(rdx, r9);
+}
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.cpp b/js/src/jit/x86-shared/Architecture-x86-shared.cpp
new file mode 100644
index 0000000000..f000b09c77
--- /dev/null
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.cpp
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/Architecture-x86-shared.h"
+#if !defined(JS_CODEGEN_X86) && !defined(JS_CODEGEN_X64)
+# error "Wrong architecture. Only x86 and x64 should build this file!"
+#endif
+
+#include <iterator>
+
+#include "jit/RegisterSets.h"
+
+const char* js::jit::FloatRegister::name() const {
+ static const char* const names[] = {
+
+#ifdef JS_CODEGEN_X64
+# define FLOAT_REGS_(TYPE) \
+ "%xmm0" TYPE, "%xmm1" TYPE, "%xmm2" TYPE, "%xmm3" TYPE, "%xmm4" TYPE, \
+ "%xmm5" TYPE, "%xmm6" TYPE, "%xmm7" TYPE, "%xmm8" TYPE, "%xmm9" TYPE, \
+ "%xmm10" TYPE, "%xmm11" TYPE, "%xmm12" TYPE, "%xmm13" TYPE, \
+ "%xmm14" TYPE, "%xmm15" TYPE
+#else
+# define FLOAT_REGS_(TYPE) \
+ "%xmm0" TYPE, "%xmm1" TYPE, "%xmm2" TYPE, "%xmm3" TYPE, "%xmm4" TYPE, \
+ "%xmm5" TYPE, "%xmm6" TYPE, "%xmm7" TYPE
+#endif
+
+ // These should be enumerated in the same order as in
+ // FloatRegisters::ContentType.
+ FLOAT_REGS_(".s"), FLOAT_REGS_(".d"), FLOAT_REGS_(".i4"),
+ FLOAT_REGS_(".s4")
+#undef FLOAT_REGS_
+
+ };
+ MOZ_ASSERT(size_t(code()) < std::size(names));
+ return names[size_t(code())];
+}
+
+js::jit::FloatRegisterSet js::jit::FloatRegister::ReduceSetForPush(
+ const FloatRegisterSet& s) {
+ SetType bits = s.bits();
+
+ // Ignore all SIMD register, if not supported.
+#ifndef ENABLE_WASM_SIMD
+ bits &= Codes::AllPhysMask * Codes::SpreadScalar;
+#endif
+
+ // Exclude registers which are already pushed with a larger type. High bits
+ // are associated with larger register types. Thus we keep the set of
+ // registers which are not included in larger type.
+ bits &= ~(bits >> (1 * Codes::TotalPhys));
+ bits &= ~(bits >> (2 * Codes::TotalPhys));
+ bits &= ~(bits >> (3 * Codes::TotalPhys));
+
+ return FloatRegisterSet(bits);
+}
+
+uint32_t js::jit::FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+ SetType all = s.bits();
+ SetType set128b = (all >> (uint32_t(Codes::Simd128) * Codes::TotalPhys)) &
+ Codes::AllPhysMask;
+ SetType doubleSet = (all >> (uint32_t(Codes::Double) * Codes::TotalPhys)) &
+ Codes::AllPhysMask;
+ SetType singleSet = (all >> (uint32_t(Codes::Single) * Codes::TotalPhys)) &
+ Codes::AllPhysMask;
+
+ // PushRegsInMask pushes the largest register first, and thus avoids pushing
+ // aliased registers. So we have to filter out the physical registers which
+ // are already pushed as part of larger registers.
+ SetType set64b = doubleSet & ~set128b;
+ SetType set32b = singleSet & ~set64b & ~set128b;
+
+ static_assert(Codes::AllPhysMask <= 0xffff,
+ "We can safely use CountPopulation32");
+ uint32_t count32b = mozilla::CountPopulation32(set32b);
+
+#if defined(JS_CODEGEN_X64)
+ // If we have an odd number of 32 bits values, then we increase the size to
+ // keep the stack aligned on 8 bytes. Note: Keep in sync with
+ // PushRegsInMask, and PopRegsInMaskIgnore.
+ count32b += count32b & 1;
+#endif
+
+ return mozilla::CountPopulation32(set128b) * (4 * sizeof(int32_t)) +
+ mozilla::CountPopulation32(set64b) * sizeof(double) +
+ count32b * sizeof(float);
+}
+uint32_t js::jit::FloatRegister::getRegisterDumpOffsetInBytes() {
+ return uint32_t(encoding()) * sizeof(FloatRegisters::RegisterContent);
+}
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.h b/js/src/jit/x86-shared/Architecture-x86-shared.h
new file mode 100644
index 0000000000..b4701af284
--- /dev/null
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -0,0 +1,467 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Architecture_x86_h
+#define jit_x86_shared_Architecture_x86_h
+
+#if !defined(JS_CODEGEN_X86) && !defined(JS_CODEGEN_X64)
+# error "Unsupported architecture!"
+#endif
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <string.h>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+#if defined(JS_CODEGEN_X86)
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+#if defined(JS_CODEGEN_X64) && defined(_WIN64)
+static const uint32_t ShadowStackSpace = 32;
+#else
+static const uint32_t ShadowStackSpace = 0;
+#endif
+
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+class Registers {
+ public:
+ using Code = uint8_t;
+ using Encoding = X86Encoding::RegisterID;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+#if defined(JS_CODEGEN_X86)
+ using SetType = uint8_t;
+
+ static const char* GetName(Code code) {
+ return X86Encoding::GPRegName(Encoding(code));
+ }
+
+ static const uint32_t Total = 8;
+ static const uint32_t TotalPhys = 8;
+ static const uint32_t Allocatable = 7;
+
+#elif defined(JS_CODEGEN_X64)
+ using SetType = uint16_t;
+
+ static const char* GetName(Code code) {
+ static const char* const Names[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"};
+ return Names[code];
+ }
+
+ static const uint32_t Total = 16;
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t Allocatable = 14;
+#endif
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) <= 4, "SetType must be, at most, 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static Code FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Code(i)), name) == 0) {
+ return Code(i);
+ }
+ }
+ return Invalid;
+ }
+
+ static const Encoding StackPointer = X86Encoding::rsp;
+ static const Encoding Invalid = X86Encoding::invalid_reg;
+
+ static const SetType AllMask = (1 << Total) - 1;
+
+#if defined(JS_CODEGEN_X86)
+ static const SetType ArgRegMask = 0;
+
+ static const SetType VolatileMask = (1 << X86Encoding::rax) |
+ (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::rdx);
+
+ static const SetType WrapperMask = VolatileMask | (1 << X86Encoding::rbx);
+
+ static const SetType SingleByteRegs =
+ (1 << X86Encoding::rax) | (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::rdx) | (1 << X86Encoding::rbx);
+
+ static const SetType NonAllocatableMask =
+ (1 << X86Encoding::rsp) | (1 << X86Encoding::rbp);
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask =
+ (1 << X86Encoding::rcx) | (1 << X86Encoding::rdx);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << X86Encoding::rax);
+
+#elif defined(JS_CODEGEN_X64)
+ static const SetType ArgRegMask =
+# if !defined(_WIN64)
+ (1 << X86Encoding::rdi) | (1 << X86Encoding::rsi) |
+# endif
+ (1 << X86Encoding::rdx) | (1 << X86Encoding::rcx) |
+ (1 << X86Encoding::r8) | (1 << X86Encoding::r9);
+
+ static const SetType VolatileMask = (1 << X86Encoding::rax) | ArgRegMask |
+ (1 << X86Encoding::r10) |
+ (1 << X86Encoding::r11);
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType SingleByteRegs = AllMask & ~(1 << X86Encoding::rsp);
+
+ static const SetType NonAllocatableMask =
+ (1 << X86Encoding::rsp) | (1 << X86Encoding::rbp) |
+ (1 << X86Encoding::r11); // This is ScratchReg.
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << X86Encoding::rcx);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << X86Encoding::rax);
+
+#endif
+
+ static const SetType NonVolatileMask =
+ AllMask & ~VolatileMask & ~(1 << X86Encoding::rsp);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+using PackedRegisterMask = Registers::SetType;
+
+class FloatRegisters {
+ public:
+ using Encoding = X86Encoding::XMMRegisterID;
+
+ // Observe that there is a Simd128 type on both x86 and x64 whether SIMD is
+ // implemented/enabled or not, and that the RegisterContent union is large
+ // enough for a V128 datum always. Producers and consumers of a register dump
+ // must be aware of this even if they don't need to save/restore values in the
+ // high lanes of the SIMD registers. See the DumpAllRegs() implementations,
+ // for example.
+
+ enum ContentType {
+ Single, // 32-bit float.
+ Double, // 64-bit double.
+ Simd128, // 128-bit Wasm SIMD type.
+ NumTypes
+ };
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ float s;
+ double d;
+ uint8_t v128[16];
+ };
+
+ static const char* GetName(Encoding code) {
+ return X86Encoding::XMMRegName(code);
+ }
+
+ static Encoding FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Encoding(i)), name) == 0) {
+ return Encoding(i);
+ }
+ }
+ return Invalid;
+ }
+
+ static const Encoding Invalid = X86Encoding::invalid_xmm;
+
+#if defined(JS_CODEGEN_X86)
+ static const uint32_t Total = 8 * NumTypes;
+ static const uint32_t TotalPhys = 8;
+ static const uint32_t Allocatable = 7;
+ using SetType = uint32_t;
+#elif defined(JS_CODEGEN_X64)
+ static const uint32_t Total = 16 * NumTypes;
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t Allocatable = 15;
+ using SetType = uint64_t;
+#endif
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1)
+ << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1)
+ << (uint32_t(Double) * TotalPhys);
+ static const SetType SpreadSimd128 = SetType(1)
+ << (uint32_t(Simd128) * TotalPhys);
+ static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
+ static const SetType SpreadVector = SpreadSimd128;
+ static const SetType Spread = SpreadScalar | SpreadVector;
+
+ static const SetType AllPhysMask = ((1 << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllVector128Mask = AllPhysMask * SpreadSimd128;
+
+#if defined(JS_CODEGEN_X86)
+ static const SetType NonAllocatableMask =
+ Spread * (1 << X86Encoding::xmm7); // This is ScratchDoubleReg.
+
+#elif defined(JS_CODEGEN_X64)
+ static const SetType NonAllocatableMask =
+ Spread * (1 << X86Encoding::xmm15); // This is ScratchDoubleReg.
+#endif
+
+#if defined(JS_CODEGEN_X64) && defined(_WIN64)
+ static const SetType VolatileMask =
+ ((1 << X86Encoding::xmm0) | (1 << X86Encoding::xmm1) |
+ (1 << X86Encoding::xmm2) | (1 << X86Encoding::xmm3) |
+ (1 << X86Encoding::xmm4) | (1 << X86Encoding::xmm5)) *
+ Spread;
+#else
+ static const SetType VolatileMask = AllMask;
+#endif
+
+ static const SetType NonVolatileMask = AllMask & ~VolatileMask;
+ static const SetType WrapperMask = VolatileMask;
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ using Codes = FloatRegisters;
+ using Code = size_t;
+ using Encoding = Codes::Encoding;
+ using SetType = Codes::SetType;
+ static uint32_t SetSize(SetType x) {
+ // Count the number of non-aliased registers, for the moment.
+ //
+ // Copy the set bits of each typed register to the low part of the of
+ // the Set, and count the number of registers. This is made to avoid
+ // registers which are allocated twice with different types (such as in
+ // AllMask).
+ x |= x >> (2 * Codes::TotalPhys);
+ x |= x >> Codes::TotalPhys;
+ x &= Codes::AllPhysMask;
+ static_assert(Codes::AllPhysMask <= 0xffff,
+ "We can safely use CountPopulation32");
+ return mozilla::CountPopulation32(x);
+ }
+
+#if defined(JS_CODEGEN_X86)
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+#elif defined(JS_CODEGEN_X64)
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+#endif
+
+ private:
+ // Note: These fields are using one extra bit to make the invalid enumerated
+ // values fit, and thus prevent a warning.
+ Codes::Encoding reg_ : 5;
+ Codes::ContentType type_ : 3;
+ bool isInvalid_ : 1;
+
+ // Constants used for exporting/importing the float register code.
+#if defined(JS_CODEGEN_X86)
+ static const size_t RegSize = 3;
+#elif defined(JS_CODEGEN_X64)
+ static const size_t RegSize = 4;
+#endif
+ static const size_t RegMask = (1 << RegSize) - 1;
+
+ public:
+ constexpr FloatRegister()
+ : reg_(Codes::Encoding(0)), type_(Codes::Single), isInvalid_(true) {}
+ constexpr FloatRegister(uint32_t r, Codes::ContentType k)
+ : reg_(Codes::Encoding(r)), type_(k), isInvalid_(false) {}
+ constexpr FloatRegister(Codes::Encoding r, Codes::ContentType k)
+ : reg_(r), type_(k), isInvalid_(false) {}
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Codes::Total);
+ return FloatRegister(i & RegMask, Codes::ContentType(i >> RegSize));
+ }
+
+ bool isSingle() const {
+ MOZ_ASSERT(!isInvalid());
+ return type_ == Codes::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!isInvalid());
+ return type_ == Codes::Double;
+ }
+ bool isSimd128() const {
+ MOZ_ASSERT(!isInvalid());
+ return type_ == Codes::Simd128;
+ }
+ bool isInvalid() const { return isInvalid_; }
+
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!isInvalid());
+ return FloatRegister(reg_, Codes::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!isInvalid());
+ return FloatRegister(reg_, Codes::Double);
+ }
+ FloatRegister asSimd128() const {
+ MOZ_ASSERT(!isInvalid());
+ return FloatRegister(reg_, Codes::Simd128);
+ }
+
+ uint32_t size() const {
+ MOZ_ASSERT(!isInvalid());
+ if (isSingle()) {
+ return sizeof(float);
+ }
+ if (isDouble()) {
+ return sizeof(double);
+ }
+ MOZ_ASSERT(isSimd128());
+ return 4 * sizeof(int32_t);
+ }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ // :TODO: ARM is doing the same thing, but we should avoid this, except
+ // that the RegisterSets depends on this.
+ return Code(reg_ | (type_ << RegSize));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ return reg_;
+ }
+ // defined in Assembler-x86-shared.cpp
+ const char* name() const;
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ bool operator!=(FloatRegister other) const {
+ return other.reg_ != reg_ || other.type_ != type_;
+ }
+ bool operator==(FloatRegister other) const {
+ return other.reg_ == reg_ && other.type_ == type_;
+ }
+ bool aliases(FloatRegister other) const { return other.reg_ == reg_; }
+ // Check if two floating point registers have the same type.
+ bool equiv(FloatRegister other) const { return other.type_ == type_; }
+
+ uint32_t numAliased() const { return Codes::NumTypes; }
+ uint32_t numAlignedAliased() const { return numAliased(); }
+
+ FloatRegister aliased(uint32_t aliasIdx) const {
+ MOZ_ASSERT(aliasIdx < Codes::NumTypes);
+ return FloatRegister(
+ reg_, Codes::ContentType((aliasIdx + type_) % Codes::NumTypes));
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) const {
+ return aliased(aliasIdx);
+ }
+
+ SetType alignedOrDominatedAliasedSet() const { return Codes::Spread << reg_; }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Vector128>(SetType set) {
+ return set & FloatRegisters::AllVector128Mask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+// Arm/D32 has double registers that can NOT be treated as float32
+// and this requires some dances in lowering.
+inline bool hasUnaliasedDouble() { return false; }
+
+// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32
+// to a double as a temporary, you need a temporary double register.
+inline bool hasMultiAlias() { return false; }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Architecture_x86_h */
diff --git a/js/src/jit/x86-shared/Assembler-x86-shared.cpp b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
new file mode 100644
index 0000000000..65fd124cf8
--- /dev/null
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -0,0 +1,355 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+
+#include "jit/AutoWritableJitCode.h"
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/MacroAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/MacroAssembler-x64.h"
+#else
+# error "Wrong architecture. Only x86 and x64 should build this file!"
+#endif
+
+#ifdef _MSC_VER
+# include <intrin.h> // for __cpuid
+# if defined(_M_X64) && (_MSC_FULL_VER >= 160040219)
+# include <immintrin.h> // for _xgetbv
+# endif
+#endif
+
+using namespace js;
+using namespace js::jit;
+
+void AssemblerX86Shared::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void AssemblerX86Shared::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+/* static */
+void AssemblerX86Shared::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ MOZ_ASSERT(offset >= sizeof(void*) && offset <= code->instructionsSize());
+
+ uint8_t* src = code->raw() + offset;
+ void* data = X86Encoding::GetPointer(src);
+
+#ifdef JS_PUNBOX64
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+
+ uintptr_t word = reinterpret_cast<uintptr_t>(data);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value value = Value::fromRawBits(word);
+ MOZ_ASSERT_IF(value.isGCThing(),
+ gc::IsCellPointerValid(value.toGCThing()));
+ TraceManuallyBarrieredEdge(trc, &value, "jit-masm-value");
+ if (word != value.asRawBits()) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ X86Encoding::SetPointer(src, value.bitsAsPunboxPointer());
+ }
+ continue;
+ }
+#endif
+
+ // This relocation is a raw pointer or a Value with a zero tag.
+ gc::Cell* cell = static_cast<gc::Cell*>(data);
+ MOZ_ASSERT(gc::IsCellPointerValid(cell));
+ TraceManuallyBarrieredGenericPointerEdge(trc, &cell, "jit-masm-ptr");
+ if (cell != data) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ X86Encoding::SetPointer(src, cell);
+ }
+ }
+}
+
+void AssemblerX86Shared::executableCopy(void* buffer) {
+ masm.executableCopy(buffer);
+}
+
+void AssemblerX86Shared::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+AssemblerX86Shared::Condition AssemblerX86Shared::InvertCondition(
+ Condition cond) {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerX86Shared::Condition AssemblerX86Shared::UnsignedCondition(
+ Condition cond) {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerX86Shared::Condition AssemblerX86Shared::ConditionWithoutEqual(
+ Condition cond) {
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerX86Shared::DoubleCondition AssemblerX86Shared::InvertCondition(
+ DoubleCondition cond) {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+CPUInfo::SSEVersion CPUInfo::maxSSEVersion = UnknownSSE;
+CPUInfo::SSEVersion CPUInfo::maxEnabledSSEVersion = UnknownSSE;
+bool CPUInfo::avxPresent = false;
+#ifdef ENABLE_WASM_AVX
+bool CPUInfo::avxEnabled = true;
+#else
+bool CPUInfo::avxEnabled = false;
+#endif
+bool CPUInfo::popcntPresent = false;
+bool CPUInfo::bmi1Present = false;
+bool CPUInfo::bmi2Present = false;
+bool CPUInfo::lzcntPresent = false;
+bool CPUInfo::avx2Present = false;
+bool CPUInfo::fmaPresent = false;
+
+namespace js {
+namespace jit {
+bool CPUFlagsHaveBeenComputed() { return CPUInfo::FlagsHaveBeenComputed(); }
+} // namespace jit
+} // namespace js
+
+static uintptr_t ReadXGETBV() {
+ // We use a variety of low-level mechanisms to get at the xgetbv
+ // instruction, including spelling out the xgetbv instruction as bytes,
+ // because older compilers and assemblers may not recognize the instruction
+ // by name.
+ size_t xcr0EAX = 0;
+#if defined(_XCR_XFEATURE_ENABLED_MASK)
+ xcr0EAX = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+#elif defined(__GNUC__)
+ // xgetbv returns its results in %eax and %edx, and for our purposes here,
+ // we're only interested in the %eax value.
+ asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0EAX) : "c"(0) : "%edx");
+#elif defined(_MSC_VER) && defined(_M_IX86)
+ __asm {
+ xor ecx, ecx
+ _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0
+ mov xcr0EAX, eax
+ }
+#endif
+ return xcr0EAX;
+}
+
+static void ReadCPUInfo(int* flagsEax, int* flagsEbx, int* flagsEcx,
+ int* flagsEdx) {
+#ifdef _MSC_VER
+ int cpuinfo[4];
+ __cpuid(cpuinfo, *flagsEax);
+ *flagsEax = cpuinfo[0];
+ *flagsEbx = cpuinfo[1];
+ *flagsEcx = cpuinfo[2];
+ *flagsEdx = cpuinfo[3];
+#elif defined(__GNUC__)
+ // Some older 32-bits processors don't fill the ecx register with cpuid, so
+ // clobber it before calling cpuid, so that there's no risk of picking
+ // random bits indicating SSE3/SSE4 are present. Also make sure that it's
+ // set to 0 as an input for BMI detection on all platforms.
+ *flagsEcx = 0;
+# ifdef JS_CODEGEN_X64
+ asm("cpuid;"
+ : "+a"(*flagsEax), "=b"(*flagsEbx), "+c"(*flagsEcx), "=d"(*flagsEdx));
+# else
+ // On x86, preserve ebx. The compiler needs it for PIC mode.
+ asm("mov %%ebx, %%edi;"
+ "cpuid;"
+ "xchg %%edi, %%ebx;"
+ : "+a"(*flagsEax), "=D"(*flagsEbx), "+c"(*flagsEcx), "=d"(*flagsEdx));
+# endif
+#else
+# error "Unsupported compiler"
+#endif
+}
+
+void CPUInfo::ComputeFlags() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+
+ int flagsEax = 1;
+ int flagsEbx = 0;
+ int flagsEcx = 0;
+ int flagsEdx = 0;
+ ReadCPUInfo(&flagsEax, &flagsEbx, &flagsEcx, &flagsEdx);
+
+ static constexpr int SSEBit = 1 << 25;
+ static constexpr int SSE2Bit = 1 << 26;
+ static constexpr int SSE3Bit = 1 << 0;
+ static constexpr int SSSE3Bit = 1 << 9;
+ static constexpr int SSE41Bit = 1 << 19;
+ static constexpr int SSE42Bit = 1 << 20;
+
+ if (flagsEcx & SSE42Bit) {
+ maxSSEVersion = SSE4_2;
+ } else if (flagsEcx & SSE41Bit) {
+ maxSSEVersion = SSE4_1;
+ } else if (flagsEcx & SSSE3Bit) {
+ maxSSEVersion = SSSE3;
+ } else if (flagsEcx & SSE3Bit) {
+ maxSSEVersion = SSE3;
+ } else if (flagsEdx & SSE2Bit) {
+ maxSSEVersion = SSE2;
+ } else if (flagsEdx & SSEBit) {
+ maxSSEVersion = SSE;
+ } else {
+ maxSSEVersion = NoSSE;
+ }
+
+ if (maxEnabledSSEVersion != UnknownSSE) {
+ maxSSEVersion = std::min(maxSSEVersion, maxEnabledSSEVersion);
+ }
+
+ static constexpr int AVXBit = 1 << 28;
+ static constexpr int XSAVEBit = 1 << 27;
+ avxPresent = (flagsEcx & AVXBit) && (flagsEcx & XSAVEBit) && avxEnabled;
+
+ // If the hardware supports AVX, check whether the OS supports it too.
+ if (avxPresent) {
+ size_t xcr0EAX = ReadXGETBV();
+ static constexpr int xcr0SSEBit = 1 << 1;
+ static constexpr int xcr0AVXBit = 1 << 2;
+ avxPresent = (xcr0EAX & xcr0SSEBit) && (xcr0EAX & xcr0AVXBit);
+ }
+
+ // CMOV instruction are supposed to be supported by all CPU which have SSE2
+ // enabled. While this might be true, this is not guaranteed by any
+ // documentation, nor AMD, nor Intel.
+ static constexpr int CMOVBit = 1 << 15;
+ MOZ_RELEASE_ASSERT(flagsEdx & CMOVBit,
+ "CMOVcc instruction is not recognized by this CPU.");
+
+ static constexpr int POPCNTBit = 1 << 23;
+ popcntPresent = (flagsEcx & POPCNTBit);
+
+ // Use the avxEnabled flag to enable/disable FMA.
+ static constexpr int FMABit = 1 << 12;
+ fmaPresent = (flagsEcx & FMABit) && avxEnabled;
+
+ flagsEax = 0x80000001;
+ ReadCPUInfo(&flagsEax, &flagsEbx, &flagsEcx, &flagsEdx);
+
+ static constexpr int LZCNTBit = 1 << 5;
+ lzcntPresent = (flagsEcx & LZCNTBit);
+
+ flagsEax = 0x7;
+ ReadCPUInfo(&flagsEax, &flagsEbx, &flagsEcx, &flagsEdx);
+
+ static constexpr int BMI1Bit = 1 << 3;
+ static constexpr int BMI2Bit = 1 << 8;
+ static constexpr int AVX2Bit = 1 << 5;
+ bmi1Present = (flagsEbx & BMI1Bit);
+ bmi2Present = bmi1Present && (flagsEbx & BMI2Bit);
+ avx2Present = avxPresent && (flagsEbx & AVX2Bit);
+
+ MOZ_ASSERT(FlagsHaveBeenComputed());
+}
diff --git a/js/src/jit/x86-shared/Assembler-x86-shared.h b/js/src/jit/x86-shared/Assembler-x86-shared.h
new file mode 100644
index 0000000000..922a2de029
--- /dev/null
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -0,0 +1,4887 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Assembler_x86_shared_h
+#define jit_x86_shared_Assembler_x86_shared_h
+
+#include <cstddef>
+
+#include "jit/shared/Assembler-shared.h"
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/BaseAssembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/BaseAssembler-x64.h"
+#else
+# error "Unknown architecture!"
+#endif
+#include "jit/CompactBuffer.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+// Do not reference ScratchFloat32Reg_ directly, use ScratchFloat32Scope
+// instead.
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg_) {}
+};
+
+// Do not reference ScratchDoubleReg_ directly, use ScratchDoubleScope instead.
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {}
+};
+
+struct ScratchSimd128Scope : public AutoFloatRegisterScope {
+ explicit ScratchSimd128Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchSimd128Reg) {}
+};
+
+class Operand {
+ public:
+ enum Kind { REG, MEM_REG_DISP, FPREG, MEM_SCALE, MEM_ADDRESS32 };
+
+ private:
+ Kind kind_ : 4;
+ // Used as a Register::Encoding and a FloatRegister::Encoding.
+ uint32_t base_ : 5;
+ Scale scale_ : 3;
+ // We don't use all 8 bits, of course, but GCC complains if the size of
+ // this field is smaller than the size of Register::Encoding.
+ Register::Encoding index_ : 8;
+ int32_t disp_;
+
+ public:
+ explicit Operand(Register reg)
+ : kind_(REG),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(0) {}
+ explicit Operand(FloatRegister reg)
+ : kind_(FPREG),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(0) {}
+ explicit Operand(const Address& address)
+ : kind_(MEM_REG_DISP),
+ base_(address.base.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(address.offset) {}
+ explicit Operand(const BaseIndex& address)
+ : kind_(MEM_SCALE),
+ base_(address.base.encoding()),
+ scale_(address.scale),
+ index_(address.index.encoding()),
+ disp_(address.offset) {}
+ Operand(Register base, Register index, Scale scale, int32_t disp = 0)
+ : kind_(MEM_SCALE),
+ base_(base.encoding()),
+ scale_(scale),
+ index_(index.encoding()),
+ disp_(disp) {}
+ Operand(Register reg, int32_t disp)
+ : kind_(MEM_REG_DISP),
+ base_(reg.encoding()),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(disp) {}
+ explicit Operand(AbsoluteAddress address)
+ : kind_(MEM_ADDRESS32),
+ base_(Registers::Invalid),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(X86Encoding::AddressImmediate(address.addr)) {}
+ explicit Operand(PatchedAbsoluteAddress address)
+ : kind_(MEM_ADDRESS32),
+ base_(Registers::Invalid),
+ scale_(TimesOne),
+ index_(Registers::Invalid),
+ disp_(X86Encoding::AddressImmediate(address.addr)) {}
+
+ Address toAddress() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP);
+ return Address(Register::FromCode(base()), disp());
+ }
+
+ BaseIndex toBaseIndex() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return BaseIndex(Register::FromCode(base()), Register::FromCode(index()),
+ scale(), disp());
+ }
+
+ Kind kind() const { return kind_; }
+ Register::Encoding reg() const {
+ MOZ_ASSERT(kind() == REG);
+ return Register::Encoding(base_);
+ }
+ Register::Encoding base() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP || kind() == MEM_SCALE);
+ return Register::Encoding(base_);
+ }
+ Register::Encoding index() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return index_;
+ }
+ Scale scale() const {
+ MOZ_ASSERT(kind() == MEM_SCALE);
+ return scale_;
+ }
+ FloatRegister::Encoding fpu() const {
+ MOZ_ASSERT(kind() == FPREG);
+ return FloatRegister::Encoding(base_);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(kind() == MEM_REG_DISP || kind() == MEM_SCALE);
+ return disp_;
+ }
+ void* address() const {
+ MOZ_ASSERT(kind() == MEM_ADDRESS32);
+ return reinterpret_cast<void*>(disp_);
+ }
+
+ bool containsReg(Register r) const {
+ switch (kind()) {
+ case REG:
+ return r.encoding() == reg();
+ case MEM_REG_DISP:
+ return r.encoding() == base();
+ case MEM_SCALE:
+ return r.encoding() == base() || r.encoding() == index();
+ default:
+ return false;
+ }
+ }
+};
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+class CPUInfo {
+ public:
+ // As the SSE's were introduced in order, the presence of a later SSE implies
+ // the presence of an earlier SSE. For example, SSE4_2 support implies SSE2
+ // support.
+ enum SSEVersion {
+ UnknownSSE = 0,
+ NoSSE = 1,
+ SSE = 2,
+ SSE2 = 3,
+ SSE3 = 4,
+ SSSE3 = 5,
+ SSE4_1 = 6,
+ SSE4_2 = 7
+ };
+ static const int AVX_PRESENT_BIT = 8;
+
+ static SSEVersion GetSSEVersion() {
+ MOZ_ASSERT(FlagsHaveBeenComputed());
+ MOZ_ASSERT_IF(maxEnabledSSEVersion != UnknownSSE,
+ maxSSEVersion <= maxEnabledSSEVersion);
+ return maxSSEVersion;
+ }
+
+ static bool IsAVXPresent() {
+ MOZ_ASSERT(FlagsHaveBeenComputed());
+ MOZ_ASSERT_IF(!avxEnabled, !avxPresent);
+ return avxPresent;
+ }
+
+ static inline uint32_t GetFingerprint() {
+ return GetSSEVersion() | (IsAVXPresent() ? AVX_PRESENT_BIT : 0);
+ }
+
+ private:
+ static SSEVersion maxSSEVersion;
+ static SSEVersion maxEnabledSSEVersion;
+ static bool avxPresent;
+ static bool avxEnabled;
+ static bool popcntPresent;
+ static bool bmi1Present;
+ static bool bmi2Present;
+ static bool lzcntPresent;
+ static bool fmaPresent;
+ static bool avx2Present;
+
+ static void SetMaxEnabledSSEVersion(SSEVersion v) {
+ if (maxEnabledSSEVersion == UnknownSSE) {
+ maxEnabledSSEVersion = v;
+ } else {
+ maxEnabledSSEVersion = std::min(v, maxEnabledSSEVersion);
+ }
+ }
+
+ public:
+ static bool IsSSE2Present() {
+#ifdef JS_CODEGEN_X64
+ return true;
+#else
+ return GetSSEVersion() >= SSE2;
+#endif
+ }
+ static bool IsSSE3Present() { return GetSSEVersion() >= SSE3; }
+ static bool IsSSSE3Present() { return GetSSEVersion() >= SSSE3; }
+ static bool IsSSE41Present() { return GetSSEVersion() >= SSE4_1; }
+ static bool IsSSE42Present() { return GetSSEVersion() >= SSE4_2; }
+ static bool IsPOPCNTPresent() { return popcntPresent; }
+ static bool IsBMI1Present() { return bmi1Present; }
+ static bool IsBMI2Present() { return bmi2Present; }
+ static bool IsLZCNTPresent() { return lzcntPresent; }
+ static bool IsFMAPresent() { return fmaPresent; }
+ static bool IsAVX2Present() { return avx2Present; }
+
+ static bool FlagsHaveBeenComputed() { return maxSSEVersion != UnknownSSE; }
+
+ static void ComputeFlags();
+
+ // The following should be called only before JS_Init (where the flags are
+ // computed). If several are called, the most restrictive setting is kept.
+
+ static void SetSSE3Disabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ SetMaxEnabledSSEVersion(SSE2);
+ avxEnabled = false;
+ }
+ static void SetSSSE3Disabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ SetMaxEnabledSSEVersion(SSE3);
+ avxEnabled = false;
+ }
+ static void SetSSE41Disabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ SetMaxEnabledSSEVersion(SSSE3);
+ avxEnabled = false;
+ }
+ static void SetSSE42Disabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ SetMaxEnabledSSEVersion(SSE4_1);
+ avxEnabled = false;
+ }
+ static void SetAVXDisabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ avxEnabled = false;
+ }
+ static void SetAVXEnabled() {
+ MOZ_ASSERT(!FlagsHaveBeenComputed());
+ avxEnabled = true;
+ }
+};
+
+class AssemblerX86Shared : public AssemblerShared {
+ protected:
+ struct RelativePatch {
+ int32_t offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(int32_t offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // Assembler::TraceDataRelocations.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(masm.currentOffset());
+ }
+ }
+
+ protected:
+ X86Encoding::BaseAssemblerSpecific masm;
+
+ using JmpSrc = X86Encoding::JmpSrc;
+ using JmpDst = X86Encoding::JmpDst;
+
+ public:
+ AssemblerX86Shared() {
+ if (!HasAVX()) {
+ masm.disableVEX();
+ }
+ }
+
+ enum Condition {
+ Equal = X86Encoding::ConditionE,
+ NotEqual = X86Encoding::ConditionNE,
+ Above = X86Encoding::ConditionA,
+ AboveOrEqual = X86Encoding::ConditionAE,
+ Below = X86Encoding::ConditionB,
+ BelowOrEqual = X86Encoding::ConditionBE,
+ GreaterThan = X86Encoding::ConditionG,
+ GreaterThanOrEqual = X86Encoding::ConditionGE,
+ LessThan = X86Encoding::ConditionL,
+ LessThanOrEqual = X86Encoding::ConditionLE,
+ Overflow = X86Encoding::ConditionO,
+ NoOverflow = X86Encoding::ConditionNO,
+ CarrySet = X86Encoding::ConditionC,
+ CarryClear = X86Encoding::ConditionNC,
+ Signed = X86Encoding::ConditionS,
+ NotSigned = X86Encoding::ConditionNS,
+ Zero = X86Encoding::ConditionE,
+ NonZero = X86Encoding::ConditionNE,
+ Parity = X86Encoding::ConditionP,
+ NoParity = X86Encoding::ConditionNP
+ };
+
+ enum class SSERoundingMode {
+ Nearest = int(X86Encoding::SSERoundingMode::RoundToNearest),
+ Floor = int(X86Encoding::SSERoundingMode::RoundDown),
+ Ceil = int(X86Encoding::SSERoundingMode::RoundUp),
+ Trunc = int(X86Encoding::SSERoundingMode::RoundToZero)
+ };
+
+ // If this bit is set, the vucomisd operands have to be inverted.
+ static const int DoubleConditionBitInvert = 0x10;
+
+ // Bit set when a DoubleCondition does not map to a single x86 condition.
+ // The macro assembler has to special-case these conditions.
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits =
+ DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered
+ // - i.e. neither operand is NaN.
+ DoubleOrdered = NoParity,
+ DoubleEqual = Equal | DoubleConditionBitSpecial,
+ DoubleNotEqual = NotEqual,
+ DoubleGreaterThan = Above,
+ DoubleGreaterThanOrEqual = AboveOrEqual,
+ DoubleLessThan = Above | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = AboveOrEqual | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = Parity,
+ DoubleEqualOrUnordered = Equal,
+ DoubleNotEqualOrUnordered = NotEqual | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = Below | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered =
+ BelowOrEqual | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = Below,
+ DoubleLessThanOrEqualOrUnordered = BelowOrEqual
+ };
+
+ enum NaNCond { NaN_HandledByCond, NaN_IsTrue, NaN_IsFalse };
+
+ // If the primary condition returned by ConditionFromDoubleCondition doesn't
+ // handle NaNs properly, return NaN_IsFalse if the comparison should be
+ // overridden to return false on NaN, NaN_IsTrue if it should be overridden
+ // to return true on NaN, or NaN_HandledByCond if no secondary check is
+ // needed.
+ static inline NaNCond NaNCondFromDoubleCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ case DoubleNotEqual:
+ case DoubleGreaterThan:
+ case DoubleGreaterThanOrEqual:
+ case DoubleLessThan:
+ case DoubleLessThanOrEqual:
+ case DoubleUnordered:
+ case DoubleEqualOrUnordered:
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThanOrEqualOrUnordered:
+ return NaN_HandledByCond;
+ case DoubleEqual:
+ return NaN_IsFalse;
+ case DoubleNotEqualOrUnordered:
+ return NaN_IsTrue;
+ }
+
+ MOZ_CRASH("Unknown double condition");
+ }
+
+ static void StaticAsserts() {
+ // DoubleConditionBits should not interfere with x86 condition codes.
+ static_assert(!((Equal | NotEqual | Above | AboveOrEqual | Below |
+ BelowOrEqual | Parity | NoParity) &
+ DoubleConditionBits));
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ // Return the primary condition to test. Some primary conditions may not
+ // handle NaNs properly and may therefore require a secondary condition.
+ // Use NaNCondFromDoubleCondition to determine what else is needed.
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ return static_cast<Condition>(cond & ~DoubleConditionBits);
+ }
+
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void setUnlimitedBuffer() {
+ // No-op on this platform
+ }
+ bool oom() const {
+ return AssemblerShared::oom() || masm.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+ }
+ bool reserve(size_t size) { return masm.reserve(size); }
+ bool swapBuffer(wasm::Bytes& other) { return masm.swapBuffer(other); }
+
+ void setPrinter(Sprinter* sp) { masm.setPrinter(sp); }
+
+ Register getStackPointer() const { return StackPointer; }
+
+ void executableCopy(void* buffer);
+ void processCodeLabels(uint8_t* rawCode);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const { return masm.size(); }
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const { return jumpRelocations_.length(); }
+ size_t dataRelocationTableBytes() const { return dataRelocations_.length(); }
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+ }
+
+ public:
+ void haltingAlign(int alignment) {
+ MOZ_ASSERT(hasCreator());
+ masm.haltingAlign(alignment);
+ }
+ void nopAlign(int alignment) {
+ MOZ_ASSERT(hasCreator());
+ masm.nopAlign(alignment);
+ }
+ void writeCodePointer(CodeLabel* label) {
+ MOZ_ASSERT(hasCreator());
+ // Use -1 as dummy value. This will be patched after codegen.
+ masm.jumpTablePointer(-1);
+ label->patchAt()->bind(masm.size());
+ }
+ void cmovCCl(Condition cond, const Operand& src, Register dest) {
+ X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.cmovCCl_rr(cc, src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmovCCl_mr(cc, src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmovCCl_mr(cc, src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmovCCl(Condition cond, Register src, Register dest) {
+ X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
+ masm.cmovCCl_rr(cc, src.encoding(), dest.encoding());
+ }
+ void cmovzl(const Operand& src, Register dest) {
+ cmovCCl(Condition::Zero, src, dest);
+ }
+ void cmovnzl(const Operand& src, Register dest) {
+ cmovCCl(Condition::NonZero, src, dest);
+ }
+ void movl(Imm32 imm32, Register dest) {
+ MOZ_ASSERT(hasCreator());
+ masm.movl_i32r(imm32.value, dest.encoding());
+ }
+ void movl(Register src, Register dest) {
+ MOZ_ASSERT(hasCreator());
+ masm.movl_rr(src.encoding(), dest.encoding());
+ }
+ void movl(const Operand& src, Register dest) {
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(Register src, const Operand& dest) {
+ MOZ_ASSERT(hasCreator());
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(Imm32 imm32, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(imm32.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(imm32.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(imm32.value, dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_i32m(imm32.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgl(Register src, Register dest) {
+ masm.xchgl_rr(src.encoding(), dest.encoding());
+ }
+
+ void vmovapd(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovapd_rr(src.encoding(), dest.encoding());
+ }
+ // Eventually vmovapd should be overloaded to support loads and
+ // stores too.
+ void vmovapd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovapd_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vmovaps(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rr(src.encoding(), dest.encoding());
+ }
+ void vmovaps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovaps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovaps_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::FPREG:
+ masm.vmovaps_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovaps(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovaps_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovaps_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovups(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovups_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovups(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovups_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vmovsd(const Address& src, FloatRegister dest) {
+ masm.vmovsd_mr(src.offset, src.base.encoding(), dest.encoding());
+ }
+ void vmovsd(const BaseIndex& src, FloatRegister dest) {
+ masm.vmovsd_mr(src.offset, src.base.encoding(), src.index.encoding(),
+ src.scale, dest.encoding());
+ }
+ void vmovsd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ vmovsd(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ vmovsd(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("Unknown operand for vmovsd");
+ }
+ }
+ void vmovsd(FloatRegister src, const Address& dest) {
+ masm.vmovsd_rm(src.encoding(), dest.offset, dest.base.encoding());
+ }
+ void vmovsd(FloatRegister src, const BaseIndex& dest) {
+ masm.vmovsd_rm(src.encoding(), dest.offset, dest.base.encoding(),
+ dest.index.encoding(), dest.scale);
+ }
+ // Note special semantics of this - does not clobber high bits of destination.
+ void vmovsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ masm.vmovsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmovss(const Address& src, FloatRegister dest) {
+ masm.vmovss_mr(src.offset, src.base.encoding(), dest.encoding());
+ }
+ void vmovss(const BaseIndex& src, FloatRegister dest) {
+ masm.vmovss_mr(src.offset, src.base.encoding(), src.index.encoding(),
+ src.scale, dest.encoding());
+ }
+ void vmovss(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ vmovss(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ vmovss(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("Unknown operand for vmovss");
+ }
+ }
+ void vmovss(FloatRegister src, const Address& dest) {
+ masm.vmovss_rm(src.encoding(), dest.offset, dest.base.encoding());
+ }
+ void vmovss(FloatRegister src, const BaseIndex& dest) {
+ masm.vmovss_rm(src.encoding(), dest.offset, dest.base.encoding(),
+ dest.index.encoding(), dest.scale);
+ }
+ void vmovss(FloatRegister src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ vmovss(src, dest.toAddress());
+ break;
+ case Operand::MEM_SCALE:
+ vmovss(src, dest.toBaseIndex());
+ break;
+ default:
+ MOZ_CRASH("Unknown operand for vmovss");
+ }
+ }
+ // Note special semantics of this - does not clobber high bits of destination.
+ void vmovss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ masm.vmovss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmovdqu(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqu_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqu(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(hasCreator());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqu_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovdqa_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqa_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqa_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqa_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovdqa_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovdqa(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtss2sd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtss2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsd2ss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsd2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void movzbl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movzbl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movsbl(Register src, Register dest) {
+ masm.movsbl_rr(src.encoding(), dest.encoding());
+ }
+ void movsbl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movsbl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(Imm32 src, Register dest) {
+ masm.movb_ir(src.value & 255, dest.encoding());
+ }
+ void movb(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movb(Imm32 src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_im(src.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movb_im(src.value, dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movzwl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movzwl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movzwl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movzwl(Register src, Register dest) {
+ masm.movzwl_rr(src.encoding(), dest.encoding());
+ }
+ void movw(const Operand& src, Register dest) {
+ masm.prefix_16_for_32();
+ movl(src, dest);
+ }
+ void movw(Imm32 src, Register dest) {
+ masm.prefix_16_for_32();
+ movl(src, dest);
+ }
+ void movw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movw(Imm32 src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_im(src.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movw_im(src.value, dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movswl(Register src, Register dest) {
+ masm.movswl_rr(src.encoding(), dest.encoding());
+ }
+ void movswl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movswl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void leal(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.leal_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.leal_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ protected:
+ void jSrc(Condition cond, Label* label) {
+ if (label->bound()) {
+ // The jump can be immediately encoded to the correct destination.
+ masm.jCC_i(static_cast<X86Encoding::Condition>(cond),
+ JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc j = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ JmpSrc prev;
+ if (label->used()) {
+ prev = JmpSrc(label->offset());
+ }
+ label->use(j.offset());
+ masm.setNextJump(j, prev);
+ }
+ }
+ void jmpSrc(Label* label) {
+ if (label->bound()) {
+ // The jump can be immediately encoded to the correct destination.
+ masm.jmp_i(JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc j = masm.jmp();
+ JmpSrc prev;
+ if (label->used()) {
+ prev = JmpSrc(label->offset());
+ }
+ label->use(j.offset());
+ masm.setNextJump(j, prev);
+ }
+ }
+
+ // Comparison of EAX against the address given by a Label.
+ JmpSrc cmpSrc(Label* label) {
+ JmpSrc j = masm.cmp_eax();
+ if (label->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(j, JmpDst(label->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc prev;
+ if (label->used()) {
+ prev = JmpSrc(label->offset());
+ }
+ label->use(j.offset());
+ masm.setNextJump(j, prev);
+ }
+ return j;
+ }
+
+ public:
+ void nop() {
+ MOZ_ASSERT(hasCreator());
+ masm.nop();
+ }
+ void nop(size_t n) {
+ MOZ_ASSERT(hasCreator());
+ masm.insert_nop(n);
+ }
+ void j(Condition cond, Label* label) {
+ MOZ_ASSERT(hasCreator());
+ jSrc(cond, label);
+ }
+ void jmp(Label* label) {
+ MOZ_ASSERT(hasCreator());
+ jmpSrc(label);
+ }
+
+ void jmp(const Operand& op) {
+ MOZ_ASSERT(hasCreator());
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.jmp_m(op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.jmp_m(op.disp(), op.base(), op.index(), op.scale());
+ break;
+ case Operand::REG:
+ masm.jmp_r(op.reg());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpEAX(Label* label) { cmpSrc(label); }
+ void bind(Label* label) {
+ JmpDst dst(masm.label());
+ if (label->used()) {
+ bool more;
+ JmpSrc jmp(label->offset());
+ do {
+ JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ masm.linkJump(jmp, dst);
+ jmp = next;
+ } while (more);
+ }
+ label->bind(dst.offset());
+ }
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return masm.label().offset(); }
+
+ // Re-routes pending jumps to a new label.
+ void retarget(Label* label, Label* target) {
+ if (!label->used()) {
+ return;
+ }
+ bool more;
+ JmpSrc jmp(label->offset());
+ do {
+ JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ if (target->bound()) {
+ // The jump can be immediately patched to the correct destination.
+ masm.linkJump(jmp, JmpDst(target->offset()));
+ } else {
+ // Thread the jump list through the unpatched jump targets.
+ JmpSrc prev;
+ if (target->used()) {
+ prev = JmpSrc(target->offset());
+ }
+ target->use(jmp.offset());
+ masm.setNextJump(jmp, prev);
+ }
+ jmp = JmpSrc(next.offset());
+ } while (more);
+ label->reset();
+ }
+
+ static void Bind(uint8_t* raw, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+ X86Encoding::SetPointer(raw + offset, raw + target);
+ }
+ }
+
+ void ret() {
+ MOZ_ASSERT(hasCreator());
+ masm.ret();
+ }
+ void retn(Imm32 n) {
+ MOZ_ASSERT(hasCreator());
+ // Remove the size of the return address which is included in the frame.
+ masm.ret_i(n.value - sizeof(void*));
+ }
+ CodeOffset call(Label* label) {
+ JmpSrc j = masm.call();
+ if (label->bound()) {
+ masm.linkJump(j, JmpDst(label->offset()));
+ } else {
+ JmpSrc prev;
+ if (label->used()) {
+ prev = JmpSrc(label->offset());
+ }
+ label->use(j.offset());
+ masm.setNextJump(j, prev);
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset call(Register reg) {
+ masm.call_r(reg.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void call(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.call_r(op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.call_m(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ CodeOffset callWithPatch() { return CodeOffset(masm.call().offset()); }
+
+ void patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ unsigned char* code = masm.data();
+ X86Encoding::SetRel32(code + callerOffset, code + calleeOffset);
+ }
+ CodeOffset farJumpWithPatch() { return CodeOffset(masm.jmp().offset()); }
+ void patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ unsigned char* code = masm.data();
+ X86Encoding::SetRel32(code + farJump.offset(), code + targetOffset);
+ }
+
+ // This is for patching during code generation, not after.
+ void patchAddl(CodeOffset offset, int32_t n) {
+ unsigned char* code = masm.data();
+ X86Encoding::SetInt32(code + offset.offset(), n);
+ }
+
+ static void patchFiveByteNopToCall(uint8_t* callsite, uint8_t* target) {
+ X86Encoding::BaseAssembler::patchFiveByteNopToCall(callsite, target);
+ }
+ static void patchCallToFiveByteNop(uint8_t* callsite) {
+ X86Encoding::BaseAssembler::patchCallToFiveByteNop(callsite);
+ }
+
+ void breakpoint() { masm.int3(); }
+ CodeOffset ud2() {
+ MOZ_ASSERT(hasCreator());
+ CodeOffset off(masm.currentOffset());
+ masm.ud2();
+ return off;
+ }
+
+ static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
+ static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
+ static bool HasSSSE3() { return CPUInfo::IsSSSE3Present(); }
+ static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
+ static bool HasSSE42() { return CPUInfo::IsSSE42Present(); }
+ static bool HasPOPCNT() { return CPUInfo::IsPOPCNTPresent(); }
+ static bool HasBMI1() { return CPUInfo::IsBMI1Present(); }
+ static bool HasBMI2() { return CPUInfo::IsBMI2Present(); }
+ static bool HasLZCNT() { return CPUInfo::IsLZCNTPresent(); }
+ static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+ static bool SupportsWasmSimd() { return CPUInfo::IsSSE41Present(); }
+ static bool HasAVX() { return CPUInfo::IsAVXPresent(); }
+ static bool HasAVX2() { return CPUInfo::IsAVX2Present(); }
+ static bool HasFMA() { return CPUInfo::IsFMAPresent(); }
+
+ static bool HasRoundInstruction(RoundingMode mode) {
+ switch (mode) {
+ case RoundingMode::Up:
+ case RoundingMode::Down:
+ case RoundingMode::NearestTiesToEven:
+ case RoundingMode::TowardsZero:
+ return CPUInfo::IsSSE41Present();
+ }
+ MOZ_CRASH("unexpected mode");
+ }
+
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(const Operand& rhs, Register lhs) {
+ switch (rhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_rr(rhs.reg(), lhs.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_mr(rhs.disp(), rhs.base(), lhs.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_mr(rhs.address(), lhs.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpl_rm(rhs.encoding(), lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Imm32 rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpl_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpw(Register rhs, Register lhs) {
+ masm.cmpw_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpw(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpw_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpw_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpw_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpw_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpb(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpb_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpb_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpb_rm(rhs.encoding(), lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpb_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpb(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpb_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpb_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpb_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
+ lhs.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpb_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void setCC(Condition cond, Register r) {
+ masm.setCC_r(static_cast<X86Encoding::Condition>(cond), r.encoding());
+ }
+ void testb(Register rhs, Register lhs) {
+ MOZ_ASSERT(
+ AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(rhs));
+ MOZ_ASSERT(
+ AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(lhs));
+ masm.testb_rr(rhs.encoding(), lhs.encoding());
+ }
+ void testw(Register rhs, Register lhs) {
+ masm.testw_rr(lhs.encoding(), rhs.encoding());
+ }
+ void testl(Register rhs, Register lhs) {
+ masm.testl_rr(lhs.encoding(), rhs.encoding());
+ }
+ void testl(Imm32 rhs, Register lhs) {
+ masm.testl_ir(rhs.value, lhs.encoding());
+ }
+ void testl(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.testl_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.testl_i32m(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.testl_i32m(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void addl(Imm32 imm, Register dest) {
+ masm.addl_ir(imm.value, dest.encoding());
+ }
+ CodeOffset addlWithPatch(Imm32 imm, Register dest) {
+ masm.addl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ void addl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.addl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addl_im(imm.value, op.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.addw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addw_im(imm.value, op.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subl(Imm32 imm, Register dest) {
+ masm.subl_ir(imm.value, dest.encoding());
+ }
+ void subl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.subl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.subw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addl(Register src, Register dest) {
+ masm.addl_rr(src.encoding(), dest.encoding());
+ }
+ void addl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void sbbl(Register src, Register dest) {
+ masm.sbbl_rr(src.encoding(), dest.encoding());
+ }
+ void subl(Register src, Register dest) {
+ masm.subl_rr(src.encoding(), dest.encoding());
+ }
+ void subl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.subl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(Register reg, Register dest) {
+ masm.orl_rr(reg.encoding(), dest.encoding());
+ }
+ void orl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.orl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.orw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(Imm32 imm, Register reg) { masm.orl_ir(imm.value, reg.encoding()); }
+ void orl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.orl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.orw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(Register src, Register dest) {
+ masm.xorl_rr(src.encoding(), dest.encoding());
+ }
+ void xorl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.xorl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.xorw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(Imm32 imm, Register reg) {
+ masm.xorl_ir(imm.value, reg.encoding());
+ }
+ void xorl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.xorl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.xorw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(Register src, Register dest) {
+ masm.andl_rr(src.encoding(), dest.encoding());
+ }
+ void andl(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.andl_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andl_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andw(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.andw_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andw_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andw_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(Imm32 imm, Register dest) {
+ masm.andl_ir(imm.value, dest.encoding());
+ }
+ void andl(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.andl_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andl_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andw(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::REG:
+ masm.andw_ir(imm.value, op.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andw_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andw_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.addl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void orl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.orl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.xorl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void andl(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.andl_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void bsrl(const Register& src, const Register& dest) {
+ masm.bsrl_rr(src.encoding(), dest.encoding());
+ }
+ void bsfl(const Register& src, const Register& dest) {
+ masm.bsfl_rr(src.encoding(), dest.encoding());
+ }
+ void bswapl(Register reg) { masm.bswapl_r(reg.encoding()); }
+ void lzcntl(const Register& src, const Register& dest) {
+ masm.lzcntl_rr(src.encoding(), dest.encoding());
+ }
+ void tzcntl(const Register& src, const Register& dest) {
+ masm.tzcntl_rr(src.encoding(), dest.encoding());
+ }
+ void popcntl(const Register& src, const Register& dest) {
+ masm.popcntl_rr(src.encoding(), dest.encoding());
+ }
+ void imull(Register multiplier) {
+ // Consumes eax as the other argument
+ // and clobbers edx, as result is in edx:eax
+ masm.imull_r(multiplier.encoding());
+ }
+ void umull(Register multiplier) { masm.mull_r(multiplier.encoding()); }
+ void imull(Imm32 imm, Register dest) {
+ masm.imull_ir(imm.value, dest.encoding(), dest.encoding());
+ }
+ void imull(Register src, Register dest) {
+ masm.imull_rr(src.encoding(), dest.encoding());
+ }
+ void imull(Imm32 imm, Register src, Register dest) {
+ masm.imull_ir(imm.value, src.encoding(), dest.encoding());
+ }
+ void imull(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.imull_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.imull_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void negl(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.negl_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.negl_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void negl(Register reg) { masm.negl_r(reg.encoding()); }
+ void notl(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.notl_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.notl_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void notl(Register reg) { masm.notl_r(reg.encoding()); }
+ void shrl(const Imm32 imm, Register dest) {
+ masm.shrl_ir(imm.value, dest.encoding());
+ }
+ void shll(const Imm32 imm, Register dest) {
+ masm.shll_ir(imm.value, dest.encoding());
+ }
+ void sarl(const Imm32 imm, Register dest) {
+ masm.sarl_ir(imm.value, dest.encoding());
+ }
+ void shrl_cl(Register dest) { masm.shrl_CLr(dest.encoding()); }
+ void shll_cl(Register dest) { masm.shll_CLr(dest.encoding()); }
+ void sarl_cl(Register dest) { masm.sarl_CLr(dest.encoding()); }
+ void shrdl_cl(Register src, Register dest) {
+ masm.shrdl_CLr(src.encoding(), dest.encoding());
+ }
+ void shldl_cl(Register src, Register dest) {
+ masm.shldl_CLr(src.encoding(), dest.encoding());
+ }
+
+ void sarxl(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.sarxl_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+ void shlxl(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.shlxl_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+ void shrxl(Register src, Register shift, Register dest) {
+ MOZ_ASSERT(HasBMI2());
+ masm.shrxl_rrr(src.encoding(), shift.encoding(), dest.encoding());
+ }
+
+ void roll(const Imm32 imm, Register dest) {
+ masm.roll_ir(imm.value, dest.encoding());
+ }
+ void roll_cl(Register dest) { masm.roll_CLr(dest.encoding()); }
+ void rolw(const Imm32 imm, Register dest) {
+ masm.rolw_ir(imm.value, dest.encoding());
+ }
+ void rorl(const Imm32 imm, Register dest) {
+ masm.rorl_ir(imm.value, dest.encoding());
+ }
+ void rorl_cl(Register dest) { masm.rorl_CLr(dest.encoding()); }
+
+ void incl(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.incl_m32(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_incl(const Operand& op) {
+ masm.prefix_lock();
+ incl(op);
+ }
+
+ void decl(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.decl_m32(op.disp(), op.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_decl(const Operand& op) {
+ masm.prefix_lock();
+ decl(op);
+ }
+
+ void addb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.addb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void addb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.addb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.addb_rm(src.encoding(), op.disp(), op.base(), op.index(),
+ op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void subb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.subb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void subb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.subb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.subb_rm(src.encoding(), op.disp(), op.base(), op.index(),
+ op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void andb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.andb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void andb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.andb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andb_rm(src.encoding(), op.disp(), op.base(), op.index(),
+ op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void orb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.orb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void orb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.orb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.orb_rm(src.encoding(), op.disp(), op.base(), op.index(),
+ op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void xorb(Imm32 imm, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xorb_im(imm.value, op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorb_im(imm.value, op.disp(), op.base(), op.index(), op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+ void xorb(Register src, const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xorb_rm(src.encoding(), op.disp(), op.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xorb_rm(src.encoding(), op.disp(), op.base(), op.index(),
+ op.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ template <typename T>
+ void lock_addb(T src, const Operand& op) {
+ masm.prefix_lock();
+ addb(src, op);
+ }
+ template <typename T>
+ void lock_subb(T src, const Operand& op) {
+ masm.prefix_lock();
+ subb(src, op);
+ }
+ template <typename T>
+ void lock_andb(T src, const Operand& op) {
+ masm.prefix_lock();
+ andb(src, op);
+ }
+ template <typename T>
+ void lock_orb(T src, const Operand& op) {
+ masm.prefix_lock();
+ orb(src, op);
+ }
+ template <typename T>
+ void lock_xorb(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorb(src, op);
+ }
+
+ template <typename T>
+ void lock_addw(T src, const Operand& op) {
+ masm.prefix_lock();
+ addw(src, op);
+ }
+ template <typename T>
+ void lock_subw(T src, const Operand& op) {
+ masm.prefix_lock();
+ subw(src, op);
+ }
+ template <typename T>
+ void lock_andw(T src, const Operand& op) {
+ masm.prefix_lock();
+ andw(src, op);
+ }
+ template <typename T>
+ void lock_orw(T src, const Operand& op) {
+ masm.prefix_lock();
+ orw(src, op);
+ }
+ template <typename T>
+ void lock_xorw(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorw(src, op);
+ }
+
+ // Note, lock_addl(imm, op) is used for a memory barrier on non-SSE2 systems,
+ // among other things. Do not optimize, replace by XADDL, or similar.
+ template <typename T>
+ void lock_addl(T src, const Operand& op) {
+ masm.prefix_lock();
+ addl(src, op);
+ }
+ template <typename T>
+ void lock_subl(T src, const Operand& op) {
+ masm.prefix_lock();
+ subl(src, op);
+ }
+ template <typename T>
+ void lock_andl(T src, const Operand& op) {
+ masm.prefix_lock();
+ andl(src, op);
+ }
+ template <typename T>
+ void lock_orl(T src, const Operand& op) {
+ masm.prefix_lock();
+ orl(src, op);
+ }
+ template <typename T>
+ void lock_xorl(T src, const Operand& op) {
+ masm.prefix_lock();
+ xorl(src, op);
+ }
+
+ void lock_cmpxchgb(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgb(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgb(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_cmpxchgw(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgw(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgw(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_cmpxchgl(Register src, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchgl(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchgl(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_cmpxchg8b(Register srcHi, Register srcLo, Register newHi,
+ Register newLo, const Operand& mem) {
+ masm.prefix_lock();
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.cmpxchg8b(srcHi.encoding(), srcLo.encoding(), newHi.encoding(),
+ newLo.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmpxchg8b(srcHi.encoding(), srcLo.encoding(), newHi.encoding(),
+ newLo.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgb(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgb_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgb_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xchgw(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgw_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgw_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xchgl(Register src, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.xchgl_rm(src.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.xchgl_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
+ mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void lock_xaddb(Register srcdest, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.lock_xaddb_rm(srcdest.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.lock_xaddb_rm(srcdest.encoding(), mem.disp(), mem.base(),
+ mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void lock_xaddw(Register srcdest, const Operand& mem) {
+ masm.prefix_16_for_32();
+ lock_xaddl(srcdest, mem);
+ }
+ void lock_xaddl(Register srcdest, const Operand& mem) {
+ switch (mem.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.lock_xaddl_rm(srcdest.encoding(), mem.disp(), mem.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.lock_xaddl_rm(srcdest.encoding(), mem.disp(), mem.base(),
+ mem.index(), mem.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void push(const Imm32 imm) { masm.push_i(imm.value); }
+
+ void push(const Operand& src) {
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.push_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.push_m(src.disp(), src.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.push_m(src.disp(), src.base(), src.index(), src.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void push(Register src) {
+ MOZ_ASSERT(hasCreator());
+ masm.push_r(src.encoding());
+ }
+ void push(const Address& src) {
+ masm.push_m(src.offset, src.base.encoding());
+ }
+
+ void pop(const Operand& src) {
+ MOZ_ASSERT(hasCreator());
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.pop_r(src.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.pop_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void pop(Register src) {
+ MOZ_ASSERT(hasCreator());
+ masm.pop_r(src.encoding());
+ }
+ void pop(const Address& src) { masm.pop_m(src.offset, src.base.encoding()); }
+
+ void pushFlags() { masm.push_flags(); }
+ void popFlags() { masm.pop_flags(); }
+
+#ifdef JS_CODEGEN_X86
+ void pushAllRegs() { masm.pusha(); }
+ void popAllRegs() { masm.popa(); }
+#endif
+
+ // Zero-extend byte to 32-bit integer.
+ void movzbl(Register src, Register dest) {
+ masm.movzbl_rr(src.encoding(), dest.encoding());
+ }
+
+ void cdq() { masm.cdq(); }
+ void idiv(Register divisor) { masm.idivl_r(divisor.encoding()); }
+ void udiv(Register divisor) { masm.divl_r(divisor.encoding()); }
+
+ void vpblendw(uint32_t mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpblendw_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void vpblendvb(FloatRegister mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpblendvb_rr(mask.encoding(), src1.encoding(), src0.encoding(),
+ dest.encoding());
+ }
+
+ void vpinsrb(unsigned lane, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vpinsrb_irr(lane, src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpinsrb_imr(lane, src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpinsrb_imr(lane, src1.disp(), src1.base(), src1.index(),
+ src1.scale(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpinsrw(unsigned lane, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vpinsrw_irr(lane, src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpinsrw_imr(lane, src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpinsrw_imr(lane, src1.disp(), src1.base(), src1.index(),
+ src1.scale(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpinsrd(unsigned lane, Register src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpinsrd_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void vpextrb(unsigned lane, FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.vpextrb_irr(lane, src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpextrb_irm(lane, src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpextrb_irm(lane, src.encoding(), dest.disp(), dest.base(),
+ dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpextrw(unsigned lane, FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.vpextrw_irr(lane, src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpextrw_irm(lane, src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpextrw_irm(lane, src.encoding(), dest.disp(), dest.base(),
+ dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpextrd(unsigned lane, FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpextrd_irr(lane, src.encoding(), dest.encoding());
+ }
+ void vpsrldq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrldq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpslldq(Imm32 shift, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpslldq_ir(shift.value, src.encoding(), dest.encoding());
+ }
+ void vpsllq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpsllq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrlq(Imm32 shift, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlq_ir(shift.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrlq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpslld(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpslld_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpslld(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpslld_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrad(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrad_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrad(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrad_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrld(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrld_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrld(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrld_ir(count.value, src0.encoding(), dest.encoding());
+ }
+
+ void vpsllw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsllw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsllw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsraw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsraw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsraw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsraw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+ void vpsrlw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpsrlw(Imm32 count, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpsrlw_ir(count.value, src0.encoding(), dest.encoding());
+ }
+
+ void vcvtsi2sd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vcvtsi2sd_rr(src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcvtsi2sd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vcvtsi2sd_mr(src1.disp(), src1.base(), src1.index(), src1.scale(),
+ src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcvttsd2si(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttsd2si_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttss2si(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttss2si_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtsi2ss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::REG:
+ masm.vcvtsi2ss_rr(src1.reg(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcvtsi2ss_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vcvtsi2ss_mr(src1.disp(), src1.base(), src1.index(), src1.scale(),
+ src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcvtsi2ss(Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsi2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsi2sd(Register src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtsi2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvttps2dq(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvttps2dq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttpd2dq(FloatRegister src, FloatRegister dest) {
+ masm.vcvttpd2dq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtdq2ps(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vcvtdq2ps_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtdq2pd(FloatRegister src, FloatRegister dest) {
+ masm.vcvtdq2pd_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtps2pd(FloatRegister src, FloatRegister dest) {
+ masm.vcvtps2pd_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtpd2ps(FloatRegister src, FloatRegister dest) {
+ masm.vcvtpd2ps_rr(src.encoding(), dest.encoding());
+ }
+ void vmovmskpd(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovmskpd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovmskps(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovmskps_rr(src.encoding(), dest.encoding());
+ }
+ void vpmovmskb(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpmovmskb_rr(src.encoding(), dest.encoding());
+ }
+ void vptest(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vptest_rr(rhs.encoding(), lhs.encoding());
+ }
+ void vucomisd(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vucomisd_rr(rhs.encoding(), lhs.encoding());
+ }
+ void vucomiss(FloatRegister rhs, FloatRegister lhs) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vucomiss_rr(rhs.encoding(), lhs.encoding());
+ }
+
+ void vpcmpeqb(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqb_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqb_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqb_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtb(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtb_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtb_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtb_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpcmpeqw(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqw_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqw_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqw_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtw(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtw_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtw_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtw_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpcmpeqd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqd_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqd_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqd_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtd_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpgtd_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpgtd_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpcmpgtq(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE42());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpgtq_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpcmpeqq(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vpcmpeqq_rr(rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpcmpeqq_mr(rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpcmpeqq_mr(rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vcmpps(uint8_t order, Operand rhs, FloatRegister lhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vcmpps_rr(order, rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vcmpps_mr(order, rhs.disp(), rhs.base(), lhs.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vcmpps_mr(order, rhs.address(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vcmpeqps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_EQ, rhs, lhs, dest);
+ }
+ void vcmpltps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_LT, rhs, lhs, dest);
+ }
+ void vcmpleps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_LE, rhs, lhs, dest);
+ }
+ void vcmpunordps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_UNORD, rhs, lhs, dest);
+ }
+ void vcmpneqps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_NEQ, rhs, lhs, dest);
+ }
+ void vcmpordps(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmpps(X86Encoding::ConditionCmp_ORD, rhs, lhs, dest);
+ }
+ void vcmppd(uint8_t order, Operand rhs, FloatRegister lhs,
+ FloatRegister dest) {
+ switch (rhs.kind()) {
+ case Operand::FPREG:
+ masm.vcmppd_rr(order, rhs.fpu(), lhs.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ }
+ void vcmpeqpd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_EQ, rhs, lhs, dest);
+ }
+ void vcmpltpd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_LT, rhs, lhs, dest);
+ }
+ void vcmplepd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_LE, rhs, lhs, dest);
+ }
+ void vcmpneqpd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_NEQ, rhs, lhs, dest);
+ }
+ void vcmpordpd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_ORD, rhs, lhs, dest);
+ }
+ void vcmpunordpd(const Operand& rhs, FloatRegister lhs, FloatRegister dest) {
+ vcmppd(X86Encoding::ConditionCmp_UNORD, rhs, lhs, dest);
+ }
+ void vrcpps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vrcpps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vrcpps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vrcpps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsqrtps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vsqrtps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsqrtps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsqrtps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vrsqrtps(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vrsqrtps_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vrsqrtps_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vrsqrtps_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsqrtpd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vsqrtpd_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovd(Register src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovd(FloatRegister src, Register dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rr(src.encoding(), dest.encoding());
+ }
+ void vmovd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovd(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaddubsw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSSE3());
+ masm.vpmaddubsw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpaddb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddsb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddsb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddusb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddusb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddusb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddusb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubsb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubsb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubusb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubusb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubusb_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubusb_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddsw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddsw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddusw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddusw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddusw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddusw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubsw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubsw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubusw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubusw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubusw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubusw_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpaddd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpaddd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpsubd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpsubd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmuldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vpmuldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpmuludq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpmuludq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpmuludq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmuludq_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmuludq_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmullw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmullw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmullw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmulhw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmulhw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmulhw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmulhuw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmulhuw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmulhuw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmulhrsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmulhrsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmulhrsw_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmulld(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmulld_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmulld_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpmulld_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaddwd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaddwd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpaddq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpaddq_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpsubq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpsubq_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsubps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmulps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vdivps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vdivps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmaxps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmaxps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmaxps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmaxps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vminps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vminps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vminps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vminpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmaxpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmaxpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vdivpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpavgb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpavgb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpavgw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpavgw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminub(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminub_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxsb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxsb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxub(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxub_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminuw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminuw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxsw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxsw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxuw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxuw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpminud(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpminud_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmaxud(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpmaxud_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpacksswb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpacksswb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpackuswb(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpackuswb_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpackssdw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpackssdw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpackusdw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpackusdw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpabsb(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpabsb_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpabsw(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpabsw_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpabsd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpabsd_rr(src.fpu(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovsxbw(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovsxbw_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovsxbw_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovsxbw_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovzxbw(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovzxbw_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovzxbw_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovzxbw_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovsxwd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovsxwd_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovsxwd_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovsxwd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovzxwd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovzxwd_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovzxwd_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovzxwd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovsxdq(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovsxdq_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovsxdq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovsxdq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpmovzxdq(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vpmovzxdq_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpmovzxdq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vpmovzxdq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vphaddd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vphaddd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpalignr(const Operand& src1, FloatRegister src0, FloatRegister dest,
+ uint8_t shift) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpalignr_irr(shift, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpunpcklbw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpcklbw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckhbw(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckhbw_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpcklbw(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpunpcklbw_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpunpckldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckldq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vpunpckldq_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpunpckldq_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpunpcklqdq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpcklqdq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpcklqdq(const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vpunpcklqdq_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpunpcklqdq_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpunpckhdq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckhdq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckhqdq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckhqdq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpcklwd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpcklwd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckhwd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckhwd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void vandps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vandps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vandps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vandps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vandnps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ // Negates bits of dest and then applies AND
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vandnps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vandnps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vandnps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vorps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vorps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vorps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vorps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vxorps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vxorps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vxorps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vxorps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vandpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vandpd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpand(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpand_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpand(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpand_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpand_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpand_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpor(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpor_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpor(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpor_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpor_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpor_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpxor(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpxor_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpxor(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpxor_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpxor_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpxor_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vpandn(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpandn_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpandn(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpandn_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpandn_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpandn_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpshufd(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshufd_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufd(uint32_t mask, const Operand& src1, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vpshufd_irr(mask, src1.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vpshufd_imr(mask, src1.disp(), src1.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpshufd_imr(mask, src1.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpshuflw(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshuflw_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufhw(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vpshufhw_irr(mask, src.encoding(), dest.encoding());
+ }
+ void vpshufb(FloatRegister mask, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSSE3());
+ masm.vpshufb_rr(mask.encoding(), src.encoding(), dest.encoding());
+ }
+ void vmovddup(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovddup_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovddup_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovddup_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovhlps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovhlps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmovlhps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovlhps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpcklps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vunpcklps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpcklps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vunpcklps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vunpcklps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vunpcklps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vunpckhps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vunpckhps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vunpckhps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vunpckhps_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vunpckhps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vunpckhps_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vshufps(uint32_t mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vshufps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vshufps(uint32_t mask, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vshufps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vshufps_imr(mask, src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vshufps_imr(mask, src1.address(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vshufpd(uint32_t mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vshufpd_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vaddsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vaddsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vaddss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vaddss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vaddsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddsd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vaddss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vaddss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vaddss_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vaddss_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsubsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsubss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsubss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsubsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vsubss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vsubss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vsubss_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmulsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmulsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmulss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmulss_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmulss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmulss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vdivsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vdivss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vdivsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vdivss(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vdivss_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vdivss_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vxorpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vxorpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vxorps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vxorps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vorpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vorpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vorps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vorps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vandpd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vandpd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vandps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vandps_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsqrtsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsqrtsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vsqrtss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vsqrtss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vroundps(SSERoundingMode mode, const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vroundps_irr((X86Encoding::SSERoundingMode)mode, src.fpu(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vroundpd(SSERoundingMode mode, const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vroundpd_irr((X86Encoding::SSERoundingMode)mode, src.fpu(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ static X86Encoding::RoundingMode ToX86RoundingMode(RoundingMode mode) {
+ switch (mode) {
+ case RoundingMode::Up:
+ return X86Encoding::RoundUp;
+ case RoundingMode::Down:
+ return X86Encoding::RoundDown;
+ case RoundingMode::NearestTiesToEven:
+ return X86Encoding::RoundToNearest;
+ case RoundingMode::TowardsZero:
+ return X86Encoding::RoundToZero;
+ }
+ MOZ_CRASH("unexpected mode");
+ }
+ void vroundsd(X86Encoding::RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vroundsd_irr(mode, src.encoding(), dest.encoding());
+ }
+ void vroundss(X86Encoding::RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vroundss_irr(mode, src.encoding(), dest.encoding());
+ }
+
+ unsigned vinsertpsMask(unsigned sourceLane, unsigned destLane,
+ unsigned zeroMask = 0) {
+ // Note that the sourceLane bits are ignored in the case of a source
+ // memory operand, and the source is the given 32-bits memory location.
+ MOZ_ASSERT(zeroMask < 16);
+ unsigned ret = zeroMask;
+ ret |= destLane << 4;
+ ret |= sourceLane << 6;
+ MOZ_ASSERT(ret < 256);
+ return ret;
+ }
+ void vinsertps(uint32_t mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vinsertps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vinsertps(uint32_t mask, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vinsertps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vinsertps_imr(mask, src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vinsertps_imr(mask, src1.disp(), src1.base(), src1.index(),
+ src1.scale(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovlps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovlps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovlps_mr(src1.disp(), src1.base(), src1.index(), src1.scale(),
+ src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovlps(FloatRegister src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovlps_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovlps_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovhps(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovhps_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovhps_mr(src1.disp(), src1.base(), src1.index(), src1.scale(),
+ src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovhps(FloatRegister src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovhps_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovhps_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vextractps(unsigned lane, FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE41());
+ MOZ_ASSERT(lane < 4);
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vextractps_rm(lane, src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vextractps_rm(lane, src.encoding(), dest.disp(), dest.base(),
+ dest.index(), dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ unsigned blendpsMask(bool x, bool y, bool z, bool w) {
+ return (x << 0) | (y << 1) | (z << 2) | (w << 3);
+ }
+ void vblendps(unsigned mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vblendps_irr(mask, src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vblendps(unsigned mask, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vblendps_irr(mask, src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vblendps_imr(mask, src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vblendvps(FloatRegister mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vblendvps_rr(mask.encoding(), src1.encoding(), src0.encoding(),
+ dest.encoding());
+ }
+ void vblendvps(FloatRegister mask, const Operand& src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vblendvps_rr(mask.encoding(), src1.fpu(), src0.encoding(),
+ dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vblendvps_mr(mask.encoding(), src1.disp(), src1.base(),
+ src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vblendvpd(FloatRegister mask, FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasSSE41());
+ masm.vblendvpd_rr(mask.encoding(), src1.encoding(), src0.encoding(),
+ dest.encoding());
+ }
+ void vmovsldup(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ masm.vmovsldup_rr(src.encoding(), dest.encoding());
+ }
+ void vmovsldup(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovsldup_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovsldup_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovshdup(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ masm.vmovshdup_rr(src.encoding(), dest.encoding());
+ }
+ void vmovshdup(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vmovshdup_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmovshdup_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vminsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vminsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vminsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vminsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vminss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vminss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmaxsd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmaxsd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vmaxsd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src1.kind()) {
+ case Operand::FPREG:
+ masm.vmaxsd_rr(src1.fpu(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vmaxsd_mr(src1.disp(), src1.base(), src0.encoding(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmaxss(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmaxss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void fisttp(const Operand& dest) {
+ MOZ_ASSERT(HasSSE3());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fisttp_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fistp(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fistp_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fnstcw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fnstcw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fldcw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fldcw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fnstsw(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fnstsw_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fld(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fld_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fld32(const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fld32_m(dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fstp(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vbroadcastb(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasAVX2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vbroadcastb_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vbroadcastb_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vbroadcastb_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vbroadcastw(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasAVX2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vbroadcastw_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vbroadcastw_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vbroadcastw_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vbroadcastd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasAVX2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vbroadcastd_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vbroadcastd_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vbroadcastd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vbroadcastq(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasAVX2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vbroadcastq_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vbroadcastq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vbroadcastq_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vbroadcastss(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasAVX2());
+ switch (src.kind()) {
+ case Operand::FPREG:
+ masm.vbroadcastss_rr(src.fpu(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.vbroadcastss_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vbroadcastss_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vfmadd231ps(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasFMA());
+ masm.vfmadd231ps_rrr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vfnmadd231ps(FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasFMA());
+ masm.vfnmadd231ps_rrr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vfmadd231pd(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasFMA());
+ masm.vfmadd231pd_rrr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vfnmadd231pd(FloatRegister src1, FloatRegister src0,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasFMA());
+ masm.vfnmadd231pd_rrr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+
+ void flushBuffer() {}
+
+ // Patching.
+
+ static size_t PatchWrite_NearCallSize() { return 5; }
+ static uintptr_t GetPointer(uint8_t* instPtr) {
+ uint8_t* ptr = instPtr - sizeof(uintptr_t);
+ return mozilla::LittleEndian::readUintptr(ptr);
+ }
+ // Write a relative call at the start location |dataLabel|.
+ // Note that this DOES NOT patch data that comes before |label|.
+ static void PatchWrite_NearCall(CodeLocationLabel startLabel,
+ CodeLocationLabel target) {
+ uint8_t* start = startLabel.raw();
+ *start = 0xE8; // <CALL> rel32
+ ptrdiff_t offset = target - startLabel - PatchWrite_NearCallSize();
+ MOZ_ASSERT(int32_t(offset) == offset);
+ mozilla::LittleEndian::writeInt32(start + 1, offset); // CALL <rel32>
+ }
+
+ static void PatchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) {
+ // dataLabel is a code location which targets the end of an instruction
+ // which has a 32 bits immediate. Thus writting a value requires shifting
+ // back to the address of the 32 bits immediate within the instruction.
+ uint8_t* ptr = dataLabel.raw();
+ mozilla::LittleEndian::writeInt32(ptr - sizeof(int32_t), toWrite.value);
+ }
+
+ static void PatchDataWithValueCheck(CodeLocationLabel data,
+ PatchedImmPtr newData,
+ PatchedImmPtr expectedData) {
+ // The pointer given is a pointer to *after* the data.
+ uint8_t* ptr = data.raw() - sizeof(uintptr_t);
+ MOZ_ASSERT(mozilla::LittleEndian::readUintptr(ptr) ==
+ uintptr_t(expectedData.value));
+ mozilla::LittleEndian::writeUintptr(ptr, uintptr_t(newData.value));
+ }
+ static void PatchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData,
+ ImmPtr expectedData) {
+ PatchDataWithValueCheck(data, PatchedImmPtr(newData.value),
+ PatchedImmPtr(expectedData.value));
+ }
+
+ static uint32_t NopSize() { return 1; }
+ static uint8_t* NextInstruction(uint8_t* cur, uint32_t* count) {
+ MOZ_CRASH("nextInstruction NYI on x86");
+ }
+
+ // Toggle a jmp or cmp emitted by toggledJump().
+ static void ToggleToJmp(CodeLocationLabel inst) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0x3D); // <CMP> eax, imm32
+ *ptr = 0xE9; // <JMP> rel32
+ }
+ static void ToggleToCmp(CodeLocationLabel inst) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0xE9); // <JMP> rel32
+ *ptr = 0x3D; // <CMP> eax, imm32
+ }
+ static void ToggleCall(CodeLocationLabel inst, bool enabled) {
+ uint8_t* ptr = (uint8_t*)inst.raw();
+ MOZ_ASSERT(*ptr == 0x3D || // <CMP> eax, imm32
+ *ptr == 0xE8); // <CALL> rel32
+ *ptr = enabled ? 0xE8 : 0x3D;
+ }
+
+ MOZ_COLD void verifyHeapAccessDisassembly(
+ uint32_t begin, uint32_t end, const Disassembler::HeapAccess& heapAccess);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Assembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp
new file mode 100644
index 0000000000..835d7755c9
--- /dev/null
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/AssemblerBuffer-x86-shared.h"
+
+#include "mozilla/Sprintf.h"
+
+using namespace js;
+using namespace jit;
+
+bool AssemblerBuffer::swap(Vector<uint8_t, 0, SystemAllocPolicy>& bytes) {
+ // For now, specialize to the one use case.
+ MOZ_ASSERT(bytes.empty());
+
+ if (m_buffer.empty()) {
+ if (bytes.capacity() > m_buffer.capacity()) {
+ size_t newCapacity = bytes.capacity();
+ uint8_t* newBuffer = bytes.extractRawBuffer();
+ MOZ_ASSERT(newBuffer);
+ m_buffer.replaceRawBuffer((unsigned char*)newBuffer, 0, newCapacity);
+ }
+ return true;
+ }
+
+ size_t newLength = m_buffer.length();
+ size_t newCapacity = m_buffer.capacity();
+ unsigned char* newBuffer = m_buffer.extractRawBuffer();
+
+ // NB: extractRawBuffer() only returns null if the Vector is using
+ // inline storage and thus a malloc would be needed. In this case,
+ // just make a simple copy.
+ if (!newBuffer) {
+ return bytes.append(m_buffer.begin(), m_buffer.end());
+ }
+
+ bytes.replaceRawBuffer((uint8_t*)newBuffer, newLength, newCapacity);
+ return true;
+}
+
+#ifdef JS_JITSPEW
+void js::jit::GenericAssembler::spew(const char* fmt, va_list va) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+}
+#endif
diff --git a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
new file mode 100644
index 0000000000..e12efb600e
--- /dev/null
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -0,0 +1,256 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jit_x86_shared_AssemblerBuffer_x86_shared_h
+#define jit_x86_shared_AssemblerBuffer_x86_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Vector.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/JitContext.h"
+#include "jit/JitSpewer.h"
+#include "jit/ProcessExecutableMemory.h"
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+
+// Spew formatting helpers.
+#define PRETTYHEX(x) \
+ (((x) < 0) ? "-" : ""), \
+ ((unsigned)((x) ^ ((x) >> 31)) + ((unsigned)(x) >> 31))
+
+#define MEM_o "%s0x%x"
+#define MEM_os MEM_o "(,%s,%d)"
+#define MEM_ob MEM_o "(%s)"
+#define MEM_obs MEM_o "(%s,%s,%d)"
+
+#define MEM_o32 "%s0x%04x"
+#define MEM_o32s MEM_o32 "(,%s,%d)"
+#define MEM_o32b MEM_o32 "(%s)"
+#define MEM_o32bs MEM_o32 "(%s,%s,%d)"
+#define MEM_o32r ".Lfrom%d(%%rip)"
+
+#define ADDR_o(offset) PRETTYHEX(offset)
+#define ADDR_os(offset, index, scale) \
+ ADDR_o(offset), GPRegName((index)), (1 << (scale))
+#define ADDR_ob(offset, base) ADDR_o(offset), GPRegName((base))
+#define ADDR_obs(offset, base, index, scale) \
+ ADDR_ob(offset, base), GPRegName((index)), (1 << (scale))
+
+#define ADDR_o32(offset) ADDR_o(offset)
+#define ADDR_o32s(offset, index, scale) ADDR_os(offset, index, scale)
+#define ADDR_o32b(offset, base) ADDR_ob(offset, base)
+#define ADDR_o32bs(offset, base, index, scale) \
+ ADDR_obs(offset, base, index, scale)
+#define ADDR_o32r(offset) (offset)
+
+namespace js {
+
+class JS_PUBLIC_API Sprinter;
+
+namespace jit {
+
+// AllocPolicy for AssemblerBuffer. OOMs when trying to allocate more than
+// MaxCodeBytesPerProcess bytes. Use private inheritance to make sure we
+// explicitly have to expose SystemAllocPolicy methods.
+class AssemblerBufferAllocPolicy : private SystemAllocPolicy {
+ public:
+ using SystemAllocPolicy::checkSimulatedOOM;
+ using SystemAllocPolicy::free_;
+ using SystemAllocPolicy::reportAllocOverflow;
+
+ template <typename T>
+ T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ static_assert(
+ sizeof(T) == 1,
+ "AssemblerBufferAllocPolicy should only be used with byte vectors");
+ MOZ_ASSERT(oldSize <= MaxCodeBytesPerProcess);
+ if (MOZ_UNLIKELY(newSize > MaxCodeBytesPerProcess)) {
+ return nullptr;
+ }
+ return SystemAllocPolicy::pod_realloc<T>(p, oldSize, newSize);
+ }
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ static_assert(
+ sizeof(T) == 1,
+ "AssemblerBufferAllocPolicy should only be used with byte vectors");
+ if (MOZ_UNLIKELY(numElems > MaxCodeBytesPerProcess)) {
+ return nullptr;
+ }
+ return SystemAllocPolicy::pod_malloc<T>(numElems);
+ }
+};
+
+class AssemblerBuffer {
+ template <size_t size, typename T>
+ MOZ_ALWAYS_INLINE void sizedAppendUnchecked(T value) {
+ m_buffer.infallibleAppend(reinterpret_cast<unsigned char*>(&value), size);
+ }
+
+ template <size_t size, typename T>
+ MOZ_ALWAYS_INLINE void sizedAppend(T value) {
+ if (MOZ_UNLIKELY(
+ !m_buffer.append(reinterpret_cast<unsigned char*>(&value), size))) {
+ oomDetected();
+ }
+ }
+
+ public:
+ AssemblerBuffer() : m_oom(false) {}
+
+ void ensureSpace(size_t space) {
+ // This should only be called with small |space| values to ensure
+ // we don't overflow below.
+ MOZ_ASSERT(space <= 16);
+ if (MOZ_UNLIKELY(!m_buffer.reserve(m_buffer.length() + space))) {
+ oomDetected();
+ }
+ }
+
+ bool isAligned(size_t alignment) const {
+ return !(m_buffer.length() & (alignment - 1));
+ }
+
+ MOZ_ALWAYS_INLINE void putByteUnchecked(int value) {
+ sizedAppendUnchecked<1>(value);
+ }
+ MOZ_ALWAYS_INLINE void putShortUnchecked(int value) {
+ sizedAppendUnchecked<2>(value);
+ }
+ MOZ_ALWAYS_INLINE void putIntUnchecked(int value) {
+ sizedAppendUnchecked<4>(value);
+ }
+ MOZ_ALWAYS_INLINE void putInt64Unchecked(int64_t value) {
+ sizedAppendUnchecked<8>(value);
+ }
+
+ MOZ_ALWAYS_INLINE void putByte(int value) { sizedAppend<1>(value); }
+ MOZ_ALWAYS_INLINE void putShort(int value) { sizedAppend<2>(value); }
+ MOZ_ALWAYS_INLINE void putInt(int value) { sizedAppend<4>(value); }
+ MOZ_ALWAYS_INLINE void putInt64(int64_t value) { sizedAppend<8>(value); }
+
+ [[nodiscard]] bool append(const unsigned char* values, size_t size) {
+ if (MOZ_UNLIKELY(!m_buffer.append(values, size))) {
+ oomDetected();
+ return false;
+ }
+ return true;
+ }
+
+ size_t size() const { return m_buffer.length(); }
+
+ bool oom() const { return m_oom; }
+
+ bool reserve(size_t size) { return !m_oom && m_buffer.reserve(size); }
+
+ bool swap(Vector<uint8_t, 0, SystemAllocPolicy>& bytes);
+
+ const unsigned char* buffer() const {
+ MOZ_RELEASE_ASSERT(!m_oom);
+ return m_buffer.begin();
+ }
+
+ unsigned char* data() { return m_buffer.begin(); }
+
+ protected:
+ /*
+ * OOM handling: This class can OOM in the ensureSpace() method trying
+ * to allocate a new buffer. In response to an OOM, we need to avoid
+ * crashing and report the error. We also want to make it so that
+ * users of this class need to check for OOM only at certain points
+ * and not after every operation.
+ *
+ * Our strategy for handling an OOM is to set m_oom, and then clear (but
+ * not free) m_buffer, preserving the current buffer. This way, the user
+ * can continue assembling into the buffer, deferring OOM checking
+ * until the user wants to read code out of the buffer.
+ *
+ * See also the |buffer| method.
+ */
+ void oomDetected() {
+ m_oom = true;
+ m_buffer.clear();
+#ifdef DEBUG
+ JitContext* context = MaybeGetJitContext();
+ if (context) {
+ context->setOOM();
+ }
+#endif
+ }
+
+ mozilla::Vector<unsigned char, 256, AssemblerBufferAllocPolicy> m_buffer;
+ bool m_oom;
+};
+
+class GenericAssembler {
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+ public:
+ GenericAssembler()
+#ifdef JS_JITSPEW
+ : printer(nullptr)
+#endif
+ {
+ }
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0);
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_AssemblerBuffer_x86_shared_h */
diff --git a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
new file mode 100644
index 0000000000..b7a5031757
--- /dev/null
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -0,0 +1,6460 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jit_x86_shared_BaseAssembler_x86_shared_h
+#define jit_x86_shared_BaseAssembler_x86_shared_h
+
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include "jit/x86-shared/AssemblerBuffer-x86-shared.h"
+#include "jit/x86-shared/Encoding-x86-shared.h"
+#include "jit/x86-shared/Patching-x86-shared.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssembler;
+
+class BaseAssembler : public GenericAssembler {
+ public:
+ BaseAssembler() : useVEX_(true) {}
+
+ void disableVEX() { useVEX_ = false; }
+
+ size_t size() const { return m_formatter.size(); }
+ const unsigned char* buffer() const { return m_formatter.buffer(); }
+ unsigned char* data() { return m_formatter.data(); }
+ bool oom() const { return m_formatter.oom(); }
+ bool reserve(size_t size) { return m_formatter.reserve(size); }
+ bool swapBuffer(wasm::Bytes& other) { return m_formatter.swapBuffer(other); }
+
+ void nop() {
+ spew("nop");
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ void comment(const char* msg) { spew("; %s", msg); }
+
+ static void patchFiveByteNopToCall(uint8_t* callsite, uint8_t* target) {
+ // Note: the offset is relative to the address of the instruction after
+ // the call which is five bytes.
+ uint8_t* inst = callsite - sizeof(int32_t) - 1;
+ // The nop can be already patched as call, overriding the call.
+ // See also nop_five.
+ MOZ_ASSERT(inst[0] == OP_NOP_0F || inst[0] == OP_CALL_rel32);
+ MOZ_ASSERT_IF(inst[0] == OP_NOP_0F,
+ inst[1] == OP_NOP_1F || inst[2] == OP_NOP_44 ||
+ inst[3] == OP_NOP_00 || inst[4] == OP_NOP_00);
+ inst[0] = OP_CALL_rel32;
+ SetRel32(callsite, target);
+ }
+
+ static void patchCallToFiveByteNop(uint8_t* callsite) {
+ // See also patchFiveByteNopToCall and nop_five.
+ uint8_t* inst = callsite - sizeof(int32_t) - 1;
+ // The call can be already patched as nop.
+ if (inst[0] == OP_NOP_0F) {
+ MOZ_ASSERT(inst[1] == OP_NOP_1F || inst[2] == OP_NOP_44 ||
+ inst[3] == OP_NOP_00 || inst[4] == OP_NOP_00);
+ return;
+ }
+ MOZ_ASSERT(inst[0] == OP_CALL_rel32);
+ inst[0] = OP_NOP_0F;
+ inst[1] = OP_NOP_1F;
+ inst[2] = OP_NOP_44;
+ inst[3] = OP_NOP_00;
+ inst[4] = OP_NOP_00;
+ }
+
+ /*
+ * The nop multibytes sequences are directly taken from the Intel's
+ * architecture software developer manual.
+ * They are defined for sequences of sizes from 1 to 9 included.
+ */
+ void nop_one() { m_formatter.oneByteOp(OP_NOP); }
+
+ void nop_two() {
+ m_formatter.oneByteOp(OP_NOP_66);
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ void nop_three() {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_four() {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_40);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_five() {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_44);
+ m_formatter.oneByteOp(OP_NOP_00);
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+
+ void nop_six() {
+ m_formatter.oneByteOp(OP_NOP_66);
+ nop_five();
+ }
+
+ void nop_seven() {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_80);
+ for (int i = 0; i < 4; ++i) {
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+ }
+
+ void nop_eight() {
+ m_formatter.oneByteOp(OP_NOP_0F);
+ m_formatter.oneByteOp(OP_NOP_1F);
+ m_formatter.oneByteOp(OP_NOP_84);
+ for (int i = 0; i < 5; ++i) {
+ m_formatter.oneByteOp(OP_NOP_00);
+ }
+ }
+
+ void nop_nine() {
+ m_formatter.oneByteOp(OP_NOP_66);
+ nop_eight();
+ }
+
+ void insert_nop(int size) {
+ switch (size) {
+ case 1:
+ nop_one();
+ break;
+ case 2:
+ nop_two();
+ break;
+ case 3:
+ nop_three();
+ break;
+ case 4:
+ nop_four();
+ break;
+ case 5:
+ nop_five();
+ break;
+ case 6:
+ nop_six();
+ break;
+ case 7:
+ nop_seven();
+ break;
+ case 8:
+ nop_eight();
+ break;
+ case 9:
+ nop_nine();
+ break;
+ case 10:
+ nop_three();
+ nop_seven();
+ break;
+ case 11:
+ nop_four();
+ nop_seven();
+ break;
+ case 12:
+ nop_six();
+ nop_six();
+ break;
+ case 13:
+ nop_six();
+ nop_seven();
+ break;
+ case 14:
+ nop_seven();
+ nop_seven();
+ break;
+ case 15:
+ nop_one();
+ nop_seven();
+ nop_seven();
+ break;
+ default:
+ MOZ_CRASH("Unhandled alignment");
+ }
+ }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg) {
+ spew("push %s", GPRegName(reg));
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg) {
+ spew("pop %s", GPRegName(reg));
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i(int32_t imm) {
+ spew("push $%s0x%x", PRETTYHEX(imm));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_PUSH_Ib);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void push_i32(int32_t imm) {
+ spew("push $%s0x%04x", PRETTYHEX(imm));
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int32_t offset, RegisterID base) {
+ spew("push " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_PUSH);
+ }
+ void push_m(int32_t offset, RegisterID base, RegisterID index, int scale) {
+ spew("push " MEM_obs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, index, scale,
+ GROUP5_OP_PUSH);
+ }
+
+ void pop_m(int32_t offset, RegisterID base) {
+ spew("pop " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, offset, base, GROUP1A_OP_POP);
+ }
+
+ void push_flags() {
+ spew("pushf");
+ m_formatter.oneByteOp(OP_PUSHFLAGS);
+ }
+
+ void pop_flags() {
+ spew("popf");
+ m_formatter.oneByteOp(OP_POPFLAGS);
+ }
+
+ // Arithmetic operations:
+
+ void addl_rr(RegisterID src, RegisterID dst) {
+ spew("addl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADD_GvEv, src, dst);
+ }
+
+ void addw_rr(RegisterID src, RegisterID dst) {
+ spew("addw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_GvEv, src, dst);
+ }
+
+ void addl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("addl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADD_GvEv, offset, base, dst);
+ }
+
+ void addl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, src);
+ }
+
+ void addl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("addl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, index, scale, src);
+ }
+
+ void addl_ir(int32_t imm, RegisterID dst) {
+ spew("addl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addw_ir(int32_t imm, RegisterID dst) {
+ spew("addw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addl_i32r(int32_t imm, RegisterID dst) {
+ // 32-bit immediate always, for patching.
+ spew("addl $0x%04x, %s", uint32_t(imm), GPReg32Name(dst));
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ }
+ m_formatter.immediate32(imm);
+ }
+
+ void addl_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addl $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("addl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int32_t imm, const void* addr) {
+ spew("addl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+ void addw_im(int32_t imm, const void* addr) {
+ spew("addw $%d, %p", int16_t(imm), addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void addw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addw $%d, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("addw $%d, " MEM_obs, int16_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_ADD);
+ m_formatter.immediate16(imm);
+ }
+
+ void addw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, src);
+ }
+
+ void addw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("addw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_ADD_EvGv, offset, base, index, scale, src);
+ }
+
+ void addb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("addb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8(imm);
+ }
+
+ void addb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("addb $%d, " MEM_obs, int8_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_ADD);
+ m_formatter.immediate8(imm);
+ }
+
+ void addb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("addb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_ADD_EbGb, offset, base, src);
+ }
+
+ void addb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("addb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_ADD_EbGb, offset, base, index, scale, src);
+ }
+
+ void subb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("subb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8(imm);
+ }
+
+ void subb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("subb $%d, " MEM_obs, int8_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_SUB);
+ m_formatter.immediate8(imm);
+ }
+
+ void subb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("subb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_SUB_EbGb, offset, base, src);
+ }
+
+ void subb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("subb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_SUB_EbGb, offset, base, index, scale, src);
+ }
+
+ void andb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("andb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8(imm);
+ }
+
+ void andb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("andb $%d, " MEM_obs, int8_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_AND);
+ m_formatter.immediate8(imm);
+ }
+
+ void andb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("andb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_AND_EbGb, offset, base, src);
+ }
+
+ void andb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("andb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_AND_EbGb, offset, base, index, scale, src);
+ }
+
+ void orb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("orb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8(imm);
+ }
+
+ void orb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orb $%d, " MEM_obs, int8_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_OR);
+ m_formatter.immediate8(imm);
+ }
+
+ void orb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("orb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_OR_EbGb, offset, base, src);
+ }
+
+ void orb_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_OR_EbGb, offset, base, index, scale, src);
+ }
+
+ void xorb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("xorb $%d, " MEM_ob, int8_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8(imm);
+ }
+
+ void xorb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("xorb $%d, " MEM_obs, int8_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_XOR);
+ m_formatter.immediate8(imm);
+ }
+
+ void xorb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xorb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_XOR_EbGb, offset, base, src);
+ }
+
+ void xorb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xorb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_XOR_EbGb, offset, base, index, scale, src);
+ }
+
+ void lock_xaddb_rm(RegisterID srcdest, int32_t offset, RegisterID base) {
+ spew("lock xaddb %s, " MEM_ob, GPReg8Name(srcdest), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp8(OP2_XADD_EbGb, offset, base, srcdest);
+ }
+
+ void lock_xaddb_rm(RegisterID srcdest, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("lock xaddb %s, " MEM_obs, GPReg8Name(srcdest),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp8(OP2_XADD_EbGb, offset, base, index, scale, srcdest);
+ }
+
+ void lock_xaddl_rm(RegisterID srcdest, int32_t offset, RegisterID base) {
+ spew("lock xaddl %s, " MEM_ob, GPReg32Name(srcdest), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp(OP2_XADD_EvGv, offset, base, srcdest);
+ }
+
+ void lock_xaddl_rm(RegisterID srcdest, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("lock xaddl %s, " MEM_obs, GPReg32Name(srcdest),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(PRE_LOCK);
+ m_formatter.twoByteOp(OP2_XADD_EvGv, offset, base, index, scale, srcdest);
+ }
+
+ void vpmaddubsw_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpmaddubsw", VEX_PD, OP3_PMADDUBSW_VdqWdq, ESCAPE_38, src1,
+ src0, dst);
+ }
+ void vpmaddubsw_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpmaddubsw", VEX_PD, OP3_PMADDUBSW_VdqWdq, ESCAPE_38,
+ address, src0, dst);
+ }
+
+ void vpaddb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddsb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpaddsb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddusb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, src1, src0, dst);
+ }
+ void vpaddusb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpaddusb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddsw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpaddsw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddusw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, src1, src0, dst);
+ }
+ void vpaddusw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpaddusw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, src1, src0, dst);
+ }
+ void vpaddd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpaddd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, address, src0, dst);
+ }
+
+ void vpaddq_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddq", VEX_PD, OP2_PADDQ_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubsb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpsubsb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubusb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, src1, src0, dst);
+ }
+ void vpsubusb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpsubusb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubsw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpsubsw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubusw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, src1, src0, dst);
+ }
+ void vpsubusw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpsubusw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, src1, src0, dst);
+ }
+ void vpsubd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, offset, base, src0, dst);
+ }
+ void vpsubd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, address, src0, dst);
+ }
+
+ void vpsubq_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubq", VEX_PD, OP2_PSUBQ_VdqWdq, address, src0, dst);
+ }
+
+ void vpmuldq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmuldq", VEX_PD, OP3_PMULDQ_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+
+ void vpmuludq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpmuludq_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpmuludq_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, address, src0, dst);
+ }
+
+ void vpmaddwd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaddwd", VEX_PD, OP2_PMADDWD_VdqWdq, src1, src0, dst);
+ }
+ void vpmaddwd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaddwd", VEX_PD, OP2_PMADDWD_VdqWdq, address, src0, dst);
+ }
+
+ void vpmullw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, src1, src0, dst);
+ }
+ void vpmulhw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmulhw", VEX_PD, OP2_PMULHW_VdqWdq, src1, src0, dst);
+ }
+ void vpmulhuw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmulhuw", VEX_PD, OP2_PMULHUW_VdqWdq, src1, src0, dst);
+ }
+ void vpmullw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpmulhw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpmulhw", VEX_PD, OP2_PMULHW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpmulhuw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpmulhuw", VEX_PD, OP2_PMULHUW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpmullw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, address, src0, dst);
+ }
+
+ void vpmulld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpmulld_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, offset,
+ base, src0, dst);
+ }
+ void vpmulld_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+ void vpmulhrsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmulhrsw", VEX_PD, OP3_PMULHRSW_VdqWdq, ESCAPE_38, src1,
+ src0, dst);
+ }
+ void vpmulhrsw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpmulhrsw", VEX_PD, OP3_PMULHRSW_VdqWdq, ESCAPE_38, offset,
+ base, src0, dst);
+ }
+
+ void vaddps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, src1, src0, dst);
+ }
+ void vaddps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, offset, base, src0, dst);
+ }
+ void vaddps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, address, src0, dst);
+ }
+
+ void vsubps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+ void vsubps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, offset, base, src0, dst);
+ }
+ void vsubps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, address, src0, dst);
+ }
+
+ void vmulps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, src1, src0, dst);
+ }
+ void vmulps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, offset, base, src0, dst);
+ }
+ void vmulps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, address, src0, dst);
+ }
+
+ void vdivps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, src1, src0, dst);
+ }
+ void vdivps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, offset, base, src0, dst);
+ }
+ void vdivps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, address, src0, dst);
+ }
+
+ void vmaxps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, src1, src0, dst);
+ }
+ void vmaxps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, offset, base, src0, dst);
+ }
+ void vmaxps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmaxps", VEX_PS, OP2_MAXPS_VpsWps, address, src0, dst);
+ }
+
+ void vmaxpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmaxpd", VEX_PD, OP2_MAXPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vminps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, src1, src0, dst);
+ }
+ void vminps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, offset, base, src0, dst);
+ }
+ void vminps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminps", VEX_PS, OP2_MINPS_VpsWps, address, src0, dst);
+ }
+
+ void vminpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminpd", VEX_PD, OP2_MINPD_VpdWpd, src1, src0, dst);
+ }
+ void vminpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminpd", VEX_PD, OP2_MINPD_VpdWpd, address, src0, dst);
+ }
+
+ void vaddpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddpd", VEX_PD, OP2_ADDPD_VpdWpd, src1, src0, dst);
+ }
+ void vaddpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddpd", VEX_PD, OP2_ADDPD_VpdWpd, address, src0, dst);
+ }
+
+ void vsubpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPD_VpdWpd, src1, src0, dst);
+ }
+ void vsubpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPD_VpdWpd, address, src0, dst);
+ }
+
+ void vmulpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulpd", VEX_PD, OP2_MULPD_VpdWpd, src1, src0, dst);
+ }
+ void vmulpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulpd", VEX_PD, OP2_MULPD_VpdWpd, address, src0, dst);
+ }
+
+ void vdivpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivpd", VEX_PD, OP2_DIVPD_VpdWpd, src1, src0, dst);
+ }
+ void vdivpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivpd", VEX_PD, OP2_DIVPD_VpdWpd, address, src0, dst);
+ }
+
+ void andl_rr(RegisterID src, RegisterID dst) {
+ spew("andl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_AND_GvEv, src, dst);
+ }
+
+ void andw_rr(RegisterID src, RegisterID dst) {
+ spew("andw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_GvEv, src, dst);
+ }
+
+ void andl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("andl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_AND_GvEv, offset, base, dst);
+ }
+
+ void andl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("andl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_AND_GvEv, offset, base, index, scale, dst);
+ }
+
+ void andl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("andl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, src);
+ }
+
+ void andw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("andw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, src);
+ }
+
+ void andl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("andl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, index, scale, src);
+ }
+
+ void andw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("andw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_AND_EvGv, offset, base, index, scale, src);
+ }
+
+ void andl_ir(int32_t imm, RegisterID dst) {
+ spew("andl $0x%x, %s", uint32_t(imm), GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_AND_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_ir(int32_t imm, RegisterID dst) {
+ spew("andw $0x%x, %s", uint16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_AND_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ }
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void andl_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("andl $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("andw $0x%x, " MEM_ob, uint16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_AND);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void andl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("andl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("andw $%d, " MEM_obs, int16_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_AND);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void fld_m(int32_t offset, RegisterID base) {
+ spew("fld " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FLD);
+ }
+ void fld32_m(int32_t offset, RegisterID base) {
+ spew("fld " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FLD);
+ }
+ void faddp() {
+ spew("addp ");
+ m_formatter.oneByteOp(OP_FPU6_ADDP);
+ m_formatter.oneByteOp(OP_ADDP_ST0_ST1);
+ }
+ void fisttp_m(int32_t offset, RegisterID base) {
+ spew("fisttp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FISTTP);
+ }
+ void fistp_m(int32_t offset, RegisterID base) {
+ spew("fistp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FILD, offset, base, FPU6_OP_FISTP);
+ }
+ void fstp_m(int32_t offset, RegisterID base) {
+ spew("fstp " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FSTP);
+ }
+ void fstp32_m(int32_t offset, RegisterID base) {
+ spew("fstp32 " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FSTP);
+ }
+ void fnstcw_m(int32_t offset, RegisterID base) {
+ spew("fnstcw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FISTP);
+ }
+ void fldcw_m(int32_t offset, RegisterID base) {
+ spew("fldcw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6_F32, offset, base, FPU6_OP_FLDCW);
+ }
+ void fnstsw_m(int32_t offset, RegisterID base) {
+ spew("fnstsw " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_FPU6, offset, base, FPU6_OP_FISTP);
+ }
+
+ void negl_r(RegisterID dst) {
+ spew("negl %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
+ }
+
+ void negl_m(int32_t offset, RegisterID base) {
+ spew("negl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, offset, base, GROUP3_OP_NEG);
+ }
+
+ void notl_r(RegisterID dst) {
+ spew("notl %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
+ }
+
+ void notl_m(int32_t offset, RegisterID base) {
+ spew("notl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, offset, base, GROUP3_OP_NOT);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst) {
+ spew("orl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_OR_GvEv, src, dst);
+ }
+
+ void orw_rr(RegisterID src, RegisterID dst) {
+ spew("orw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_GvEv, src, dst);
+ }
+
+ void orl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("orl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_OR_GvEv, offset, base, dst);
+ }
+
+ void orl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("orl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, src);
+ }
+
+ void orw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("orw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, src);
+ }
+
+ void orl_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, index, scale, src);
+ }
+
+ void orw_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_OR_EvGv, offset, base, index, scale, src);
+ }
+
+ void orl_ir(int32_t imm, RegisterID dst) {
+ spew("orl $0x%x, %s", uint32_t(imm), GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_ir(int32_t imm, RegisterID dst) {
+ spew("orw $0x%x, %s", uint16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ }
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void orl_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("orl $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("orw $0x%x, " MEM_ob, uint16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_OR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void orl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("orw $%d, " MEM_obs, int16_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_OR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void sbbl_rr(RegisterID src, RegisterID dst) {
+ spew("sbbl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, src, dst);
+ }
+
+ void subl_rr(RegisterID src, RegisterID dst) {
+ spew("subl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SUB_GvEv, src, dst);
+ }
+
+ void subw_rr(RegisterID src, RegisterID dst) {
+ spew("subw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_GvEv, src, dst);
+ }
+
+ void subl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("subl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SUB_GvEv, offset, base, dst);
+ }
+
+ void subl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("subl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("subw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("subl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, index, scale, src);
+ }
+
+ void subw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("subw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_SUB_EvGv, offset, base, index, scale, src);
+ }
+
+ void subl_ir(int32_t imm, RegisterID dst) {
+ spew("subl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_ir(int32_t imm, RegisterID dst) {
+ spew("subw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ }
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void subl_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("subl $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("subw $%d, " MEM_ob, int16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_SUB);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void subl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("subl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("subw $%d, " MEM_obs, int16_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_SUB);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_rr(RegisterID src, RegisterID dst) {
+ spew("xorl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorw_rr(RegisterID src, RegisterID dst) {
+ spew("xorw %s, %s", GPReg16Name(src), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("xorl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XOR_GvEv, offset, base, dst);
+ }
+
+ void xorl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xorl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, src);
+ }
+
+ void xorw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xorw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, src);
+ }
+
+ void xorl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xorl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, index, scale, src);
+ }
+
+ void xorw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xorw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XOR_EvGv, offset, base, index, scale, src);
+ }
+
+ void xorl_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("xorl $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("xorw $0x%x, " MEM_ob, uint16_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_XOR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("xorl $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("xorw $%d, " MEM_obs, int16_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_XOR);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void xorl_ir(int32_t imm, RegisterID dst) {
+ spew("xorl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ }
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorw_ir(int32_t imm, RegisterID dst) {
+ spew("xorw $%d, %s", int16_t(imm), GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax) {
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ }
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void bswapl_r(RegisterID dst) {
+ spew("bswap %s", GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_BSWAP, dst);
+ }
+
+ void sarl_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("sarl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst) {
+ spew("sarl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
+ }
+
+ void shrl_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shrl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst) {
+ spew("shrl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
+ }
+
+ void shrdl_CLr(RegisterID src, RegisterID dst) {
+ spew("shrdl %%cl, %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_SHRD_GvEv, dst, src);
+ }
+
+ void shldl_CLr(RegisterID src, RegisterID dst) {
+ spew("shldl %%cl, %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_SHLD_GvEv, dst, src);
+ }
+
+ void shll_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shll $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst) {
+ spew("shll %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
+ }
+
+ void roll_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("roll $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rolw_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("roll $%d, %s", imm, GPReg16Name(dst));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void roll_CLr(RegisterID dst) {
+ spew("roll %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
+ }
+
+ void rorl_ir(int32_t imm, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("rorl $%d, %s", imm, GPReg32Name(dst));
+ if (imm == 1) {
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rorl_CLr(RegisterID dst) {
+ spew("rorl %%cl, %s", GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
+ }
+
+ void bsrl_rr(RegisterID src, RegisterID dst) {
+ spew("bsrl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_BSR_GvEv, src, dst);
+ }
+
+ void bsfl_rr(RegisterID src, RegisterID dst) {
+ spew("bsfl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_BSF_GvEv, src, dst);
+ }
+
+ void lzcntl_rr(RegisterID src, RegisterID dst) {
+ spew("lzcntl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp(OP2_LZCNT_GvEv, src, dst);
+ }
+
+ void tzcntl_rr(RegisterID src, RegisterID dst) {
+ spew("tzcntl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp(OP2_TZCNT_GvEv, src, dst);
+ }
+
+ void popcntl_rr(RegisterID src, RegisterID dst) {
+ spew("popcntl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp(OP2_POPCNT_GvEv, src, dst);
+ }
+
+ void imull_rr(RegisterID src, RegisterID dst) {
+ spew("imull %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, src, dst);
+ }
+
+ void imull_r(RegisterID multiplier) {
+ spew("imull %s", GPReg32Name(multiplier));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, multiplier, GROUP3_OP_IMUL);
+ }
+
+ void imull_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("imull " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, offset, base, dst);
+ }
+
+ void imull_ir(int32_t value, RegisterID src, RegisterID dst) {
+ spew("imull $%d, %s, %s", value, GPReg32Name(src), GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(value)) {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIb, src, dst);
+ m_formatter.immediate8s(value);
+ } else {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, src, dst);
+ m_formatter.immediate32(value);
+ }
+ }
+
+ void mull_r(RegisterID multiplier) {
+ spew("mull %s", GPReg32Name(multiplier));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, multiplier, GROUP3_OP_MUL);
+ }
+
+ void idivl_r(RegisterID divisor) {
+ spew("idivl %s", GPReg32Name(divisor));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
+ }
+
+ void divl_r(RegisterID divisor) {
+ spew("div %s", GPReg32Name(divisor));
+ m_formatter.oneByteOp(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
+ }
+
+ void prefix_lock() {
+ spew("lock");
+ m_formatter.oneByteOp(PRE_LOCK);
+ }
+
+ void prefix_16_for_32() {
+ spew("[16-bit operands next]");
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ }
+
+ void incl_m32(int32_t offset, RegisterID base) {
+ spew("incl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_INC);
+ }
+
+ void decl_m32(int32_t offset, RegisterID base) {
+ spew("decl " MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_DEC);
+ }
+
+ // Note that CMPXCHG performs comparison against REG = %al/%ax/%eax/%rax.
+ // If %REG == [%base+offset], then %src -> [%base+offset].
+ // Otherwise, [%base+offset] -> %REG.
+ // For the 8-bit operations src must also be an 8-bit register.
+
+ void cmpxchgb(RegisterID src, int32_t offset, RegisterID base) {
+ spew("cmpxchgb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.twoByteOp8(OP2_CMPXCHG_GvEb, offset, base, src);
+ }
+ void cmpxchgb(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpxchgb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp8(OP2_CMPXCHG_GvEb, offset, base, index, scale, src);
+ }
+ void cmpxchgw(RegisterID src, int32_t offset, RegisterID base) {
+ spew("cmpxchgw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, src);
+ }
+ void cmpxchgw(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpxchgw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
+ }
+ void cmpxchgl(RegisterID src, int32_t offset, RegisterID base) {
+ spew("cmpxchgl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, src);
+ }
+ void cmpxchgl(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpxchgl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
+ }
+
+ void cmpxchg8b(RegisterID srcHi, RegisterID srcLo, RegisterID newHi,
+ RegisterID newLo, int32_t offset, RegisterID base) {
+ MOZ_ASSERT(srcHi == edx.code() && srcLo == eax.code());
+ MOZ_ASSERT(newHi == ecx.code() && newLo == ebx.code());
+ spew("cmpxchg8b %s, " MEM_ob, "edx:eax", ADDR_ob(offset, base));
+ m_formatter.twoByteOp(OP2_CMPXCHGNB, offset, base, 1);
+ }
+ void cmpxchg8b(RegisterID srcHi, RegisterID srcLo, RegisterID newHi,
+ RegisterID newLo, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ MOZ_ASSERT(srcHi == edx.code() && srcLo == eax.code());
+ MOZ_ASSERT(newHi == ecx.code() && newLo == ebx.code());
+ spew("cmpxchg8b %s, " MEM_obs, "edx:eax",
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.twoByteOp(OP2_CMPXCHGNB, offset, base, index, scale, 1);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID rhs, RegisterID lhs) {
+ spew("cmpl %s, %s", GPReg32Name(rhs), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpl_rm(RegisterID rhs, int32_t offset, RegisterID base) {
+ spew("cmpl %s, " MEM_ob, GPReg32Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_CMP_EvGv, offset, base, rhs);
+ }
+
+ void cmpl_rm(RegisterID rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpl %s, " MEM_obs, GPReg32Name(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_CMP_EvGv, offset, base, index, scale, rhs);
+ }
+
+ void cmpl_mr(int32_t offset, RegisterID base, RegisterID lhs) {
+ spew("cmpl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, offset, base, lhs);
+ }
+
+ void cmpl_mr(const void* address, RegisterID lhs) {
+ spew("cmpl %p, %s", address, GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GvEv, address, lhs);
+ }
+
+ void cmpl_ir(int32_t rhs, RegisterID lhs) {
+ if (rhs == 0) {
+ testl_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpl $0x%x, %s", uint32_t(rhs), GPReg32Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ if (lhs == rax) {
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ }
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpl_i32r(int32_t rhs, RegisterID lhs) {
+ spew("cmpl $0x%04x, %s", uint32_t(rhs), GPReg32Name(lhs));
+ if (lhs == rax) {
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ }
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_im(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("cmpl $0x%x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpb_rr(RegisterID rhs, RegisterID lhs) {
+ spew("cmpb %s, %s", GPReg8Name(rhs), GPReg8Name(lhs));
+ m_formatter.oneByteOp(OP_CMP_GbEb, rhs, lhs);
+ }
+
+ void cmpb_rm(RegisterID rhs, int32_t offset, RegisterID base) {
+ spew("cmpb %s, " MEM_ob, GPReg8Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_CMP_EbGb, offset, base, rhs);
+ }
+
+ void cmpb_rm(RegisterID rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpb %s, " MEM_obs, GPReg8Name(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_CMP_EbGb, offset, base, index, scale, rhs);
+ }
+
+ void cmpb_rm(RegisterID rhs, const void* addr) {
+ spew("cmpb %s, %p", GPReg8Name(rhs), addr);
+ m_formatter.oneByteOp(OP_CMP_EbGb, addr, rhs);
+ }
+
+ void cmpb_ir(int32_t rhs, RegisterID lhs) {
+ if (rhs == 0) {
+ testb_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpb $0x%x, %s", uint32_t(rhs), GPReg8Name(lhs));
+ if (lhs == rax) {
+ m_formatter.oneByteOp(OP_CMP_EAXIb);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, lhs, GROUP1_OP_CMP);
+ }
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpb_im(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("cmpb $0x%x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpb_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("cmpb $0x%x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpb_im(int32_t rhs, const void* addr) {
+ spew("cmpb $0x%x, %p", uint32_t(rhs), addr);
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8(rhs);
+ }
+
+ void cmpl_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("cmpl $0x%x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ [[nodiscard]] JmpSrc cmpl_im_disp32(int32_t rhs, int32_t offset,
+ RegisterID base) {
+ spew("cmpl $0x%x, " MEM_o32b, uint32_t(rhs), ADDR_o32b(offset, base));
+ JmpSrc r;
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate32(rhs);
+ }
+ return r;
+ }
+
+ [[nodiscard]] JmpSrc cmpl_im_disp32(int32_t rhs, const void* addr) {
+ spew("cmpl $0x%x, %p", uint32_t(rhs), addr);
+ JmpSrc r;
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp_disp32(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ r = JmpSrc(m_formatter.size());
+ m_formatter.immediate32(rhs);
+ }
+ return r;
+ }
+
+ void cmpl_i32m(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("cmpl $0x%04x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_i32m(int32_t rhs, const void* addr) {
+ spew("cmpl $0x%04x, %p", uint32_t(rhs), addr);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+
+ void cmpl_rm(RegisterID rhs, const void* addr) {
+ spew("cmpl %s, %p", GPReg32Name(rhs), addr);
+ m_formatter.oneByteOp(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void cmpl_rm_disp32(RegisterID rhs, const void* addr) {
+ spew("cmpl %s, %p", GPReg32Name(rhs), addr);
+ m_formatter.oneByteOp_disp32(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void cmpl_im(int32_t rhs, const void* addr) {
+ spew("cmpl $0x%x, %p", uint32_t(rhs), addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpw_rr(RegisterID rhs, RegisterID lhs) {
+ spew("cmpw %s, %s", GPReg16Name(rhs), GPReg16Name(lhs));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpw_rm(RegisterID rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("cmpw %s, " MEM_obs, GPReg16Name(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, offset, base, index, scale, rhs);
+ }
+
+ void cmpw_ir(int32_t rhs, RegisterID lhs) {
+ if (rhs == 0) {
+ testw_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpw $0x%x, %s", uint32_t(rhs), GPReg16Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate16(rhs);
+ }
+ }
+
+ void cmpw_im(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("cmpw $0x%x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate16(rhs);
+ }
+ }
+
+ void cmpw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("cmpw $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale,
+ GROUP1_OP_CMP);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void cmpw_im(int32_t rhs, const void* addr) {
+ spew("cmpw $0x%x, %p", uint32_t(rhs), addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate16(rhs);
+ }
+ }
+
+ void testl_rr(RegisterID rhs, RegisterID lhs) {
+ spew("testl %s, %s", GPReg32Name(rhs), GPReg32Name(lhs));
+ m_formatter.oneByteOp(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testb_rr(RegisterID rhs, RegisterID lhs) {
+ spew("testb %s, %s", GPReg8Name(rhs), GPReg8Name(lhs));
+ m_formatter.oneByteOp(OP_TEST_EbGb, lhs, rhs);
+ }
+
+ void testl_ir(int32_t rhs, RegisterID lhs) {
+ // If the mask fits in an 8-bit immediate, we can use testb with an
+ // 8-bit subreg.
+ if (CAN_ZERO_EXTEND_8_32(rhs) && HasSubregL(lhs)) {
+ testb_ir(rhs, lhs);
+ return;
+ }
+ // If the mask is a subset of 0xff00, we can use testb with an h reg, if
+ // one happens to be available.
+ if (CAN_ZERO_EXTEND_8H_32(rhs) && HasSubregH(lhs)) {
+ testb_ir_norex(rhs >> 8, GetSubregH(lhs));
+ return;
+ }
+ spew("testl $0x%x, %s", uint32_t(rhs), GPReg32Name(lhs));
+ if (lhs == rax) {
+ m_formatter.oneByteOp(OP_TEST_EAXIv);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
+ }
+ m_formatter.immediate32(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("testl $0x%x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, const void* addr) {
+ spew("testl $0x%x, %p", uint32_t(rhs), addr);
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, addr, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testb_im(int32_t rhs, int32_t offset, RegisterID base) {
+ spew("testb $0x%x, " MEM_ob, uint32_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void testb_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("testb $0x%x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, offset, base, index, scale,
+ GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void testl_i32m(int32_t rhs, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("testl $0x%4x, " MEM_obs, uint32_t(rhs),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, offset, base, index, scale,
+ GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testw_rr(RegisterID rhs, RegisterID lhs) {
+ spew("testw %s, %s", GPReg16Name(rhs), GPReg16Name(lhs));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testb_ir(int32_t rhs, RegisterID lhs) {
+ spew("testb $0x%x, %s", uint32_t(rhs), GPReg8Name(lhs));
+ if (lhs == rax) {
+ m_formatter.oneByteOp8(OP_TEST_EAXIb);
+ } else {
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, lhs, GROUP3_OP_TEST);
+ }
+ m_formatter.immediate8(rhs);
+ }
+
+ // Like testb_ir, but never emits a REX prefix. This may be used to
+ // reference ah..bh.
+ void testb_ir_norex(int32_t rhs, HRegisterID lhs) {
+ spew("testb $0x%x, %s", uint32_t(rhs), HRegName8(lhs));
+ m_formatter.oneByteOp8_norex(OP_GROUP3_EbIb, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate8(rhs);
+ }
+
+ void setCC_r(Condition cond, RegisterID lhs) {
+ spew("set%s %s", CCName(cond), GPReg8Name(lhs));
+ m_formatter.twoByteOp8(setccOpcode(cond), lhs, (GroupOpcodeID)0);
+ }
+
+ void sete_r(RegisterID dst) { setCC_r(ConditionE, dst); }
+
+ void setz_r(RegisterID dst) { sete_r(dst); }
+
+ void setne_r(RegisterID dst) { setCC_r(ConditionNE, dst); }
+
+ void setnz_r(RegisterID dst) { setne_r(dst); }
+
+ // Various move ops:
+
+ void cdq() {
+ spew("cdq ");
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void xchgb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xchgb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, src);
+ }
+ void xchgb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xchgb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_XCHG_GbEb, offset, base, index, scale, src);
+ }
+
+ void xchgw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xchgw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xchgw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst) {
+ spew("xchgl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, src, dst);
+ }
+ void xchgl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("xchgl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("xchgl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void cmovCCl_rr(Condition cond, RegisterID src, RegisterID dst) {
+ spew("cmov%s %s, %s", CCName(cond), GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(cmovccOpcode(cond), src, dst);
+ }
+ void cmovCCl_mr(Condition cond, int32_t offset, RegisterID base,
+ RegisterID dst) {
+ spew("cmov%s " MEM_ob ", %s", CCName(cond), ADDR_ob(offset, base),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp(cmovccOpcode(cond), offset, base, dst);
+ }
+ void cmovCCl_mr(Condition cond, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID dst) {
+ spew("cmov%s " MEM_obs ", %s", CCName(cond),
+ ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp(cmovccOpcode(cond), offset, base, index, scale, dst);
+ }
+
+ void movl_rr(RegisterID src, RegisterID dst) {
+ spew("movl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, src, dst);
+ }
+
+ void movw_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movw %s, " MEM_ob, GPReg16Name(src), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movw_rm_disp32(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movw %s, " MEM_o32b, GPReg16Name(src), ADDR_o32b(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movw_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("movw %s, " MEM_obs, GPReg16Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movw_rm(RegisterID src, const void* addr) {
+ spew("movw %s, %p", GPReg16Name(src), addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, addr, src);
+ }
+
+ void movl_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movl %s, " MEM_ob, GPReg32Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movl_rm_disp32(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movl %s, " MEM_o32b, GPReg32Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movl_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("movl %s, " MEM_obs, GPReg32Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movl_mEAX(const void* addr) {
+#ifdef JS_CODEGEN_X64
+ if (IsAddressImmediate(addr)) {
+ movl_mr(addr, rax);
+ return;
+ }
+#endif
+
+#ifdef JS_CODEGEN_X64
+ spew("movabs %p, %%eax", addr);
+#else
+ spew("movl %p, %%eax", addr);
+#endif
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#ifdef JS_CODEGEN_X64
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int32_t>(addr));
+#endif
+ }
+
+ void movl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movl " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movl_mr(const void* base, RegisterID index, int scale, RegisterID dst) {
+ int32_t disp = AddressImmediate(base);
+
+ spew("movl " MEM_os ", %s", ADDR_os(disp, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, disp, index, scale, dst);
+ }
+
+ void movl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movl_mr(const void* addr, RegisterID dst) {
+ if (dst == rax
+#ifdef JS_CODEGEN_X64
+ && !IsAddressImmediate(addr)
+#endif
+ ) {
+ movl_mEAX(addr);
+ return;
+ }
+
+ spew("movl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEv, addr, dst);
+ }
+
+ void movl_i32r(int32_t imm, RegisterID dst) {
+ spew("movl $0x%x, %s", uint32_t(imm), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movb_ir(int32_t imm, RegisterID reg) {
+ spew("movb $0x%x, %s", uint32_t(imm), GPReg8Name(reg));
+ m_formatter.oneByteOp8(OP_MOV_EbIb, reg);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("movb $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, offset, base, GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("movb $0x%x, " MEM_obs, uint32_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, offset, base, index, scale,
+ GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_im(int32_t imm, const void* addr) {
+ spew("movb $%d, %p", imm, addr);
+ m_formatter.oneByteOp_disp32(OP_GROUP11_EvIb, addr, GROUP11_MOV);
+ m_formatter.immediate8(imm);
+ }
+
+ void movw_im(int32_t imm, int32_t offset, RegisterID base) {
+ spew("movw $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movw_im(int32_t imm, const void* addr) {
+ spew("movw $%d, %p", imm, addr);
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp_disp32(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movl_i32m(int32_t imm, int32_t offset, RegisterID base) {
+ spew("movl $0x%x, " MEM_ob, uint32_t(imm), ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movw_im(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("movw $0x%x, " MEM_obs, uint32_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, index, scale,
+ GROUP11_MOV);
+ m_formatter.immediate16(imm);
+ }
+
+ void movl_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
+ int scale) {
+ spew("movl $0x%x, " MEM_obs, uint32_t(imm),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, offset, base, index, scale,
+ GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_EAXm(const void* addr) {
+#ifdef JS_CODEGEN_X64
+ if (IsAddressImmediate(addr)) {
+ movl_rm(rax, addr);
+ return;
+ }
+#endif
+
+ spew("movl %%eax, %p", addr);
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#ifdef JS_CODEGEN_X64
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int32_t>(addr));
+#endif
+ }
+
+ void vmovq_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ // vmovq_rm can be encoded either as a true vmovq or as a vmovd with a
+ // REX prefix modifying it to be 64-bit. We choose the vmovq encoding
+ // because it's smaller (when it doesn't need a REX prefix for other
+ // reasons) and because it works on 32-bit x86 too.
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base, invalid_xmm,
+ src);
+ }
+
+ void vmovq_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovq_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, offset, base, index, scale,
+ invalid_xmm, src);
+ }
+
+ void vmovq_rm(XMMRegisterID src, const void* addr) {
+ twoByteOpSimd("vmovq", VEX_PD, OP2_MOVQ_WdVd, addr, invalid_xmm, src);
+ }
+
+ void vmovq_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ // vmovq_mr can be encoded either as a true vmovq or as a vmovd with a
+ // REX prefix modifying it to be 64-bit. We choose the vmovq encoding
+ // because it's smaller (when it doesn't need a REX prefix for other
+ // reasons) and because it works on 32-bit x86 too.
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base, invalid_xmm,
+ dst);
+ }
+
+ void vmovq_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovq_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, offset, base, index, scale,
+ invalid_xmm, dst);
+ }
+
+ void vmovq_mr(const void* addr, XMMRegisterID dst) {
+ twoByteOpSimd("vmovq", VEX_SS, OP2_MOVQ_VdWd, addr, invalid_xmm, dst);
+ }
+
+ void movl_rm(RegisterID src, const void* addr) {
+ if (src == rax
+#ifdef JS_CODEGEN_X64
+ && !IsAddressImmediate(addr)
+#endif
+ ) {
+ movl_EAXm(addr);
+ return;
+ }
+
+ spew("movl %s, %p", GPReg32Name(src), addr);
+ m_formatter.oneByteOp(OP_MOV_EvGv, addr, src);
+ }
+
+ void movl_i32m(int32_t imm, const void* addr) {
+ spew("movl $%d, %p", imm, addr);
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movb_rm(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movb %s, " MEM_ob, GPReg8Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp8(OP_MOV_EbGv, offset, base, src);
+ }
+
+ void movb_rm_disp32(RegisterID src, int32_t offset, RegisterID base) {
+ spew("movb %s, " MEM_o32b, GPReg8Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp8_disp32(OP_MOV_EbGv, offset, base, src);
+ }
+
+ void movb_rm(RegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ spew("movb %s, " MEM_obs, GPReg8Name(src),
+ ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp8(OP_MOV_EbGv, offset, base, index, scale, src);
+ }
+
+ void movb_rm(RegisterID src, const void* addr) {
+ spew("movb %s, %p", GPReg8Name(src), addr);
+ m_formatter.oneByteOp8(OP_MOV_EbGv, addr, src);
+ }
+
+ void movb_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movb " MEM_ob ", %s", ADDR_ob(offset, base), GPReg8Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEb, offset, base, dst);
+ }
+
+ void movb_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movb " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg8Name(dst));
+ m_formatter.oneByteOp(OP_MOV_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movzbl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movzbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, offset, base, dst);
+ }
+
+ void movzbl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movzbl " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEb, offset, base, dst);
+ }
+
+ void movzbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movzbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movzbl_mr(const void* addr, RegisterID dst) {
+ spew("movzbl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, addr, dst);
+ }
+
+ void movsbl_rr(RegisterID src, RegisterID dst) {
+ spew("movsbl %s, %s", GPReg8Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8_movx(OP2_MOVSX_GvEb, src, dst);
+ }
+
+ void movsbl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movsbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+
+ void movsbl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movsbl " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+
+ void movsbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movsbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movsbl_mr(const void* addr, RegisterID dst) {
+ spew("movsbl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, addr, dst);
+ }
+
+ void movzwl_rr(RegisterID src, RegisterID dst) {
+ spew("movzwl %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, src, dst);
+ }
+
+ void movzwl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movzwl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, offset, base, dst);
+ }
+
+ void movzwl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movzwl " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVZX_GvEw, offset, base, dst);
+ }
+
+ void movzwl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movzwl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movzwl_mr(const void* addr, RegisterID dst) {
+ spew("movzwl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, addr, dst);
+ }
+
+ void movswl_rr(RegisterID src, RegisterID dst) {
+ spew("movswl %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, src, dst);
+ }
+
+ void movswl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movswl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+
+ void movswl_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("movswl " MEM_o32b ", %s", ADDR_o32b(offset, base),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp_disp32(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+
+ void movswl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("movswl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movswl_mr(const void* addr, RegisterID dst) {
+ spew("movswl %p, %s", addr, GPReg32Name(dst));
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, addr, dst);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst) {
+ spew("movzbl %s, %s", GPReg8Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8_movx(OP2_MOVZX_GvEb, src, dst);
+ }
+
+ void leal_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("leal " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_LEA, offset, base, index, scale, dst);
+ }
+
+ void leal_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("leal " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_LEA, offset, base, dst);
+ }
+
+ // Flow control:
+
+ [[nodiscard]] JmpSrc call() {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("call .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void call_r(RegisterID dst) {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, dst, GROUP5_OP_CALLN);
+ spew("call *%s", GPRegName(dst));
+ }
+
+ void call_m(int32_t offset, RegisterID base) {
+ spew("call *" MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_CALLN);
+ }
+
+ // Comparison of EAX against a 32-bit immediate. The immediate is patched
+ // in as if it were a jump target. The intention is to toggle the first
+ // byte of the instruction between a CMP and a JMP to produce a pseudo-NOP.
+ [[nodiscard]] JmpSrc cmp_eax() {
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("cmpl %%eax, .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void jmp_i(JmpDst dst) {
+ int32_t diff = dst.offset() - m_formatter.size();
+ spew("jmp .Llabel%d", dst.offset());
+
+ // The jump immediate is an offset from the end of the jump instruction.
+ // A jump instruction is either 1 byte opcode and 1 byte offset, or 1
+ // byte opcode and 4 bytes offset.
+ if (CAN_SIGN_EXTEND_8_32(diff - 2)) {
+ m_formatter.oneByteOp(OP_JMP_rel8);
+ m_formatter.immediate8s(diff - 2);
+ } else {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ m_formatter.immediate32(diff - 5);
+ }
+ }
+ [[nodiscard]] JmpSrc jmp() {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("jmp .Lfrom%d", r.offset());
+ return r;
+ }
+
+ void jmp_r(RegisterID dst) {
+ spew("jmp *%s", GPRegName(dst));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, dst, GROUP5_OP_JMPN);
+ }
+
+ void jmp_m(int32_t offset, RegisterID base) {
+ spew("jmp *" MEM_ob, ADDR_ob(offset, base));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, GROUP5_OP_JMPN);
+ }
+
+ void jmp_m(int32_t offset, RegisterID base, RegisterID index, int scale) {
+ spew("jmp *" MEM_obs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp(OP_GROUP5_Ev, offset, base, index, scale,
+ GROUP5_OP_JMPN);
+ }
+
+ void jCC_i(Condition cond, JmpDst dst) {
+ int32_t diff = dst.offset() - m_formatter.size();
+ spew("j%s .Llabel%d", CCName(cond), dst.offset());
+
+ // The jump immediate is an offset from the end of the jump instruction.
+ // A conditional jump instruction is either 1 byte opcode and 1 byte
+ // offset, or 2 bytes opcode and 4 bytes offset.
+ if (CAN_SIGN_EXTEND_8_32(diff - 2)) {
+ m_formatter.oneByteOp(jccRel8(cond));
+ m_formatter.immediate8s(diff - 2);
+ } else {
+ m_formatter.twoByteOp(jccRel32(cond));
+ m_formatter.immediate32(diff - 6);
+ }
+ }
+
+ [[nodiscard]] JmpSrc jCC(Condition cond) {
+ m_formatter.twoByteOp(jccRel32(cond));
+ JmpSrc r = m_formatter.immediateRel32();
+ spew("j%s .Lfrom%d", CCName(cond), r.offset());
+ return r;
+ }
+
+ // SSE operations:
+
+ void vpcmpeqb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpeqb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtb_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpgtb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpeqw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpeqw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtw_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpgtw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpeqd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpeqd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpeqd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, src1, src0, dst);
+ }
+ void vpcmpgtd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpcmpgtd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, address, src0, dst);
+ }
+
+ void vpcmpgtq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpcmpgtq", VEX_PD, OP3_PCMPGTQ_VdqWdq, ESCAPE_38, src1,
+ src0, dst);
+ }
+
+ void vpcmpeqq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpcmpeqq", VEX_PD, OP3_PCMPEQQ_VdqWdq, ESCAPE_38, src1,
+ src0, dst);
+ }
+ void vpcmpeqq_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpcmpeqq", VEX_PD, OP3_PCMPEQQ_VdqWdq, ESCAPE_38, offset,
+ base, src0, dst);
+ }
+ void vpcmpeqq_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpcmpeqq", VEX_PD, OP3_PCMPEQQ_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vcmpps_rr(uint8_t order, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT_IF(!useVEX_,
+ order < uint8_t(X86Encoding::ConditionCmp_AVX_Enabled));
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, src1, src0,
+ dst);
+ }
+ void vcmpps_mr(uint8_t order, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ MOZ_ASSERT_IF(!useVEX_,
+ order < uint8_t(X86Encoding::ConditionCmp_AVX_Enabled));
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, offset, base,
+ src0, dst);
+ }
+ void vcmpps_mr(uint8_t order, const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT_IF(!useVEX_,
+ order < uint8_t(X86Encoding::ConditionCmp_AVX_Enabled));
+ twoByteOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, order, address, src0,
+ dst);
+ }
+
+ static constexpr size_t CMPPS_MR_PATCH_OFFSET = 1;
+
+ size_t vcmpeqps_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmpps_mr(X86Encoding::ConditionCmp_EQ, address, src0, dst);
+ return CMPPS_MR_PATCH_OFFSET;
+ }
+ size_t vcmpneqps_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmpps_mr(X86Encoding::ConditionCmp_NEQ, address, src0, dst);
+ return CMPPS_MR_PATCH_OFFSET;
+ }
+ size_t vcmpltps_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmpps_mr(X86Encoding::ConditionCmp_LT, address, src0, dst);
+ return CMPPS_MR_PATCH_OFFSET;
+ }
+ size_t vcmpleps_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmpps_mr(X86Encoding::ConditionCmp_LE, address, src0, dst);
+ return CMPPS_MR_PATCH_OFFSET;
+ }
+ size_t vcmpgeps_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmpps_mr(X86Encoding::ConditionCmp_GE, address, src0, dst);
+ return CMPPS_MR_PATCH_OFFSET;
+ }
+
+ void vcmppd_rr(uint8_t order, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, order, src1, src0,
+ dst);
+ }
+ void vcmppd_mr(uint8_t order, const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, order, address, src0,
+ dst);
+ }
+
+ static constexpr size_t CMPPD_MR_PATCH_OFFSET = 1;
+
+ size_t vcmpeqpd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmppd_mr(X86Encoding::ConditionCmp_EQ, address, src0, dst);
+ return CMPPD_MR_PATCH_OFFSET;
+ }
+ size_t vcmpneqpd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmppd_mr(X86Encoding::ConditionCmp_NEQ, address, src0, dst);
+ return CMPPD_MR_PATCH_OFFSET;
+ }
+ size_t vcmpltpd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmppd_mr(X86Encoding::ConditionCmp_LT, address, src0, dst);
+ return CMPPD_MR_PATCH_OFFSET;
+ }
+ size_t vcmplepd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vcmppd_mr(X86Encoding::ConditionCmp_LE, address, src0, dst);
+ return CMPPD_MR_PATCH_OFFSET;
+ }
+
+ void vrcpps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, src, invalid_xmm, dst);
+ }
+ void vrcpps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, offset, base, invalid_xmm,
+ dst);
+ }
+ void vrcpps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vrcpps", VEX_PS, OP2_RCPPS_VpsWps, address, invalid_xmm,
+ dst);
+ }
+
+ void vrsqrtps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, src, invalid_xmm,
+ dst);
+ }
+ void vrsqrtps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+ void vrsqrtps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vrsqrtps", VEX_PS, OP2_RSQRTPS_VpsWps, address, invalid_xmm,
+ dst);
+ }
+
+ void vsqrtps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, src, invalid_xmm, dst);
+ }
+ void vsqrtps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+ void vsqrtps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtps", VEX_PS, OP2_SQRTPS_VpsWps, address, invalid_xmm,
+ dst);
+ }
+ void vsqrtpd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtpd", VEX_PD, OP2_SQRTPD_VpdWpd, src, invalid_xmm, dst);
+ }
+
+ void vaddsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vaddss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vaddsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vaddss_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vaddsd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddsd", VEX_SD, OP2_ADDSD_VsdWsd, address, src0, dst);
+ }
+ void vaddss_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vaddss", VEX_SS, OP2_ADDSD_VsdWsd, address, src0, dst);
+ }
+
+ void vcvtss2sd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtss2sd", VEX_SS, OP2_CVTSS2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsd2ss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsd2ss", VEX_SD, OP2_CVTSD2SS_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsi2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpInt32Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0,
+ dst);
+ }
+
+ void vcvtsi2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpInt32Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0,
+ dst);
+ }
+
+ void vcvttps2dq_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvttps2dq", VEX_SS, OP2_CVTTPS2DQ_VdqWps, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvttpd2dq_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvttpd2dq", VEX_PD, OP2_CVTTPD2DQ_VdqWpd, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvtdq2ps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtdq2ps", VEX_PS, OP2_CVTDQ2PS_VpsWdq, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvtdq2pd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtdq2pd", VEX_SS, OP2_CVTDQ2PD_VpdWdq, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvtpd2ps_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtpd2ps", VEX_PD, OP2_CVTPD2PS_VpsWpd, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvtps2pd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtps2pd", VEX_PS, OP2_CVTPS2PD_VpdWps, src, invalid_xmm,
+ dst);
+ }
+
+ void vcvtsi2sd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, offset, base, src0,
+ dst);
+ }
+
+ void vcvtsi2sd_mr(int32_t offset, RegisterID base, RegisterID index,
+ int scale, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, offset, base, index,
+ scale, src0, dst);
+ }
+
+ void vcvtsi2ss_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, offset, base, src0,
+ dst);
+ }
+
+ void vcvtsi2ss_mr(int32_t offset, RegisterID base, RegisterID index,
+ int scale, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, offset, base, index,
+ scale, src0, dst);
+ }
+
+ void vcvttsd2si_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vcvttss2si_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vunpcklps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, src1, src0, dst);
+ }
+ void vunpcklps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, offset, base, src0,
+ dst);
+ }
+ void vunpcklps_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, addr, src0, dst);
+ }
+
+ void vunpckhps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, src1, src0, dst);
+ }
+ void vunpckhps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, offset, base, src0,
+ dst);
+ }
+ void vunpckhps_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vunpckhps", VEX_PS, OP2_UNPCKHPS_VsdWsd, addr, src0, dst);
+ }
+
+ void vpand_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpand_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpand_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, address, src0, dst);
+ }
+ void vpor_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpor_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpor_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, address, src0, dst);
+ }
+ void vpxor_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpxor_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, offset, base, src0, dst);
+ }
+ void vpxor_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, address, src0, dst);
+ }
+ void vpandn_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpandn_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, offset, base, src0,
+ dst);
+ }
+ void vpandn_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpandn", VEX_PD, OP2_PANDNDQ_VdqWdq, address, src0, dst);
+ }
+ void vptest_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, address, src0,
+ dst);
+ }
+
+ void vpshufd_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, src,
+ invalid_xmm, dst);
+ }
+ void vpshufd_imr(uint32_t mask, int32_t offset, RegisterID base,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, offset, base,
+ invalid_xmm, dst);
+ }
+ void vpshufd_imr(uint32_t mask, const void* address, XMMRegisterID dst) {
+ twoByteOpImmSimd("vpshufd", VEX_PD, OP2_PSHUFD_VdqWdqIb, mask, address,
+ invalid_xmm, dst);
+ }
+
+ void vpshuflw_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpImmSimd("vpshuflw", VEX_SD, OP2_PSHUFLW_VdqWdqIb, mask, src,
+ invalid_xmm, dst);
+ }
+
+ void vpshufhw_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpImmSimd("vpshufhw", VEX_SS, OP2_PSHUFHW_VdqWdqIb, mask, src,
+ invalid_xmm, dst);
+ }
+
+ void vpshufb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpshufb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vshufps_irr(uint32_t mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, src1, src0,
+ dst);
+ }
+ void vshufps_imr(uint32_t mask, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, offset, base,
+ src0, dst);
+ }
+ void vshufps_imr(uint32_t mask, const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vshufps", VEX_PS, OP2_SHUFPS_VpsWpsIb, mask, address,
+ src0, dst);
+ }
+ void vshufpd_irr(uint32_t mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpImmSimd("vshufpd", VEX_PD, OP2_SHUFPD_VpdWpdIb, mask, src1, src0,
+ dst);
+ }
+
+ void vmovddup_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vmovddup", VEX_SD, OP2_MOVDDUP_VqWq, src, invalid_xmm, dst);
+ }
+ void vmovddup_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovddup", VEX_SD, OP2_MOVDDUP_VqWq, offset, base,
+ invalid_xmm, dst);
+ }
+ void vmovddup_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ twoByteOpSimd("vmovddup", VEX_SD, OP2_MOVDDUP_VqWq, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmovhlps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovhlps", VEX_PS, OP2_MOVHLPS_VqUq, src1, src0, dst);
+ }
+
+ void vmovlhps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovlhps", VEX_PS, OP2_MOVLHPS_VqUq, src1, src0, dst);
+ }
+
+ void vpsrldq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsrldq", OP2_PSRLDQ_Vd, ShiftID::vpsrldq, count, src, dst);
+ }
+
+ void vpslldq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpslldq", OP2_PSRLDQ_Vd, ShiftID::vpslldq, count, src, dst);
+ }
+
+ void vpsllq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 64);
+ shiftOpImmSimd("vpsllq", OP2_PSRLDQ_Vd, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsllq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsllq", VEX_PD, OP2_PSLLQ_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrlq_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 64);
+ shiftOpImmSimd("vpsrlq", OP2_PSRLDQ_Vd, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vpsrlq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsrlq", VEX_PD, OP2_PSRLQ_VdqWdq, src1, src0, dst);
+ }
+
+ void vpslld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpslld", VEX_PD, OP2_PSLLD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpslld_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpslld", OP2_PSLLD_UdqIb, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsrad_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsrad", VEX_PD, OP2_PSRAD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrad_ir(int32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpsrad", OP2_PSRAD_UdqIb, ShiftID::vpsrad, count, src, dst);
+ }
+
+ void vpsrld_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsrld", VEX_PD, OP2_PSRLD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrld_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 32);
+ shiftOpImmSimd("vpsrld", OP2_PSRLD_UdqIb, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vpsllw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsllw", VEX_PD, OP2_PSLLW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsllw_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsllw", OP2_PSLLW_UdqIb, ShiftID::vpsllx, count, src, dst);
+ }
+
+ void vpsraw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsraw", VEX_PD, OP2_PSRAW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsraw_ir(int32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsraw", OP2_PSRAW_UdqIb, ShiftID::vpsrad, count, src, dst);
+ }
+
+ void vpsrlw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsrlw", VEX_PD, OP2_PSRLW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpsrlw_ir(uint32_t count, XMMRegisterID src, XMMRegisterID dst) {
+ MOZ_ASSERT(count < 16);
+ shiftOpImmSimd("vpsrlw", OP2_PSRLW_UdqIb, ShiftID::vpsrlx, count, src, dst);
+ }
+
+ void vmovmskpd_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vmovmskpd", VEX_PD, OP2_MOVMSKPD_EdVd, src, dst);
+ }
+
+ void vmovmskps_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vmovmskps", VEX_PS, OP2_MOVMSKPD_EdVd, src, dst);
+ }
+
+ void vpmovmskb_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vpmovmskb", VEX_PD, OP2_PMOVMSKB_EdVd, src, dst);
+ }
+
+ void vptest_rr(XMMRegisterID rhs, XMMRegisterID lhs) {
+ threeByteOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, rhs,
+ invalid_xmm, lhs);
+ }
+
+ void vmovd_rr(XMMRegisterID src, RegisterID dst) {
+ twoByteOpSimdInt32("vmovd", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst,
+ (RegisterID)src);
+ }
+
+ void vmovd_rr(RegisterID src, XMMRegisterID dst) {
+ twoByteOpInt32Simd("vmovd", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
+ }
+
+ void vmovd_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base, invalid_xmm,
+ dst);
+ }
+
+ void vmovd_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base, index, scale,
+ invalid_xmm, dst);
+ }
+
+ void vmovd_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovd", VEX_PD, OP2_MOVD_VdEd, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovd_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_VdEd, address, invalid_xmm, dst);
+ }
+
+ void vmovd_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base, invalid_xmm,
+ src);
+ }
+
+ void vmovd_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base, index, scale,
+ invalid_xmm, src);
+ }
+
+ void vmovd_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovd", VEX_PD, OP2_MOVD_EdVd, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovd_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovd", VEX_PD, OP2_MOVD_EdVd, address, invalid_xmm, src);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm,
+ src);
+ }
+
+ void vmovsd_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base, invalid_xmm,
+ src);
+ }
+
+ void vmovss_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovss_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm,
+ dst);
+ }
+
+ void vmovss_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vmovss_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmovsd_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base, invalid_xmm,
+ dst);
+ }
+
+ void vmovsd_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovsd_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ // Note that the register-to-register form of vmovsd does not write to the
+ // entire output register. For general-purpose register-to-register moves,
+ // use vmovapd instead.
+ void vmovsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, src1, src0, dst);
+ }
+
+ // The register-to-register form of vmovss has the same problem as vmovsd
+ // above. Prefer vmovaps for register-to-register moves.
+ void vmovss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmovsd_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, address, invalid_xmm,
+ dst);
+ }
+
+ void vmovss_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, address, invalid_xmm,
+ dst);
+ }
+
+ void vmovups_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, address, invalid_xmm,
+ dst);
+ }
+
+ void vmovdqu_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, address, invalid_xmm,
+ dst);
+ }
+
+ void vmovsd_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, address, invalid_xmm,
+ src);
+ }
+
+ void vmovss_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, address, invalid_xmm,
+ src);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, address, invalid_xmm,
+ src);
+ }
+
+ void vmovaps_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, address, invalid_xmm,
+ src);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, address, invalid_xmm,
+ src);
+ }
+
+ void vmovups_rm(XMMRegisterID src, const void* address) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, address, invalid_xmm,
+ src);
+ }
+
+ void vmovaps_rr(XMMRegisterID src, XMMRegisterID dst) {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, dst, invalid_xmm,
+ src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, src, invalid_xmm, dst);
+ }
+ void vmovaps_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, offset, base,
+ invalid_xmm, src);
+ }
+ void vmovaps_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_WsdVsd, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+ void vmovaps_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, offset, base,
+ invalid_xmm, dst);
+ }
+ void vmovaps_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmovups_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base,
+ invalid_xmm, src);
+ }
+ void vmovups_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base,
+ invalid_xmm, src);
+ }
+ void vmovups_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_WpsVps, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+ void vmovups_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+ void vmovups_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+ void vmovups_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovups", VEX_PS, OP2_MOVPS_VpsWps, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmovapd_rr(XMMRegisterID src, XMMRegisterID dst) {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovapd", VEX_PD, OP2_MOVAPS_WsdVsd, dst, invalid_xmm,
+ src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovapd", VEX_PD, OP2_MOVAPD_VsdWsd, src, invalid_xmm, dst);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovdqu_rm_disp32(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd_disp32("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovdqu_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_WdqVdq, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vmovdqu_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovdqu_mr_disp32(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd_disp32("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovdqu_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqu", VEX_SS, OP2_MOVDQ_VdqWdq, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmovdqa_rr(XMMRegisterID src, XMMRegisterID dst) {
+#ifdef JS_CODEGEN_X64
+ // There are two opcodes that can encode this instruction. If we have
+ // one register in [xmm8,xmm15] and one in [xmm0,xmm7], use the
+ // opcode which swaps the operands, as that way we can get a two-byte
+ // VEX in that case.
+ if (src >= xmm8 && dst < xmm8) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, dst, invalid_xmm, src);
+ return;
+ }
+#endif
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, src, invalid_xmm, dst);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, offset, base,
+ invalid_xmm, src);
+ }
+
+ void vmovdqa_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vmovdqa_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovdqa_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, offset, base, index,
+ scale, invalid_xmm, dst);
+ }
+
+ void vmulsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulsd", VEX_SD, OP2_MULSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmulss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmulss", VEX_SS, OP2_MULSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vmulsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmulsd", VEX_SD, OP2_MULSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vmulss_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmulss", VEX_SS, OP2_MULSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vpinsrw_irr(uint32_t whichWord, RegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(whichWord < 8);
+ twoByteOpImmInt32Simd("vpinsrw", VEX_PD, OP2_PINSRW, whichWord, src1, src0,
+ dst);
+ }
+ void vpinsrw_imr(unsigned lane, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ twoByteOpImmInt32Simd("vpinsrw", VEX_PD, OP2_PINSRW, lane, offset, base,
+ src0, dst);
+ }
+ void vpinsrw_imr(unsigned lane, int32_t offset, RegisterID base,
+ RegisterID index, int32_t scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ twoByteOpImmInt32Simd("vpinsrw", VEX_PD, OP2_PINSRW, lane, offset, base,
+ index, scale, src0, dst);
+ }
+
+ void vpextrw_irr(uint32_t whichWord, XMMRegisterID src, RegisterID dst) {
+ MOZ_ASSERT(whichWord < 8);
+ twoByteOpImmSimdInt32("vpextrw", VEX_PD, OP2_PEXTRW_GdUdIb, whichWord, src,
+ dst);
+ }
+
+ void vpextrw_irm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base) {
+ MOZ_ASSERT(lane < 8);
+ threeByteOpImmSimdInt32("vpextrw", VEX_PD, OP3_PEXTRW_EwVdqIb, ESCAPE_3A,
+ lane, offset, base, (RegisterID)src);
+ }
+
+ void vpextrw_irm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base, RegisterID index, int scale) {
+ MOZ_ASSERT(lane < 8);
+ threeByteOpImmSimdInt32("vpextrw", VEX_PD, OP3_PEXTRW_EwVdqIb, ESCAPE_3A,
+ lane, offset, base, index, scale, (RegisterID)src);
+ }
+
+ void vsubsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubsd", VEX_SD, OP2_SUBSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsubss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubss", VEX_SS, OP2_SUBSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsubsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vsubsd", VEX_SD, OP2_SUBSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vsubss_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vsubss", VEX_SS, OP2_SUBSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vucomiss_rr(XMMRegisterID rhs, XMMRegisterID lhs) {
+ twoByteOpSimdFlags("vucomiss", VEX_PS, OP2_UCOMISD_VsdWsd, rhs, lhs);
+ }
+
+ void vucomisd_rr(XMMRegisterID rhs, XMMRegisterID lhs) {
+ twoByteOpSimdFlags("vucomisd", VEX_PD, OP2_UCOMISD_VsdWsd, rhs, lhs);
+ }
+
+ void vucomisd_mr(int32_t offset, RegisterID base, XMMRegisterID lhs) {
+ twoByteOpSimdFlags("vucomisd", VEX_PD, OP2_UCOMISD_VsdWsd, offset, base,
+ lhs);
+ }
+
+ void vdivsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivsd", VEX_SD, OP2_DIVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vdivss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vdivss", VEX_SS, OP2_DIVSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vdivsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vdivsd", VEX_SD, OP2_DIVSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vdivss_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vdivss", VEX_SS, OP2_DIVSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vxorpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vxorpd", VEX_PD, OP2_XORPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vorpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vorpd", VEX_PD, OP2_ORPD_VpdWpd, src1, src0, dst);
+ }
+
+ void vandpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandpd", VEX_PD, OP2_ANDPD_VpdWpd, src1, src0, dst);
+ }
+ void vandpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandpd", VEX_PD, OP2_ANDPD_VpdWpd, address, src0, dst);
+ }
+
+ void vandps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, src1, src0, dst);
+ }
+
+ void vandps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vandps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, address, src0, dst);
+ }
+
+ void vandnps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, src1, src0, dst);
+ }
+
+ void vandnps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, offset, base, src0,
+ dst);
+ }
+
+ void vandnps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vandnps", VEX_PS, OP2_ANDNPS_VpsWps, address, src0, dst);
+ }
+
+ void vorps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, src1, src0, dst);
+ }
+
+ void vorps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vorps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vorps", VEX_PS, OP2_ORPS_VpsWps, address, src0, dst);
+ }
+
+ void vxorps_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, src1, src0, dst);
+ }
+
+ void vxorps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, offset, base, src0, dst);
+ }
+
+ void vxorps_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, address, src0, dst);
+ }
+
+ void vsqrtsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtsd", VEX_SD, OP2_SQRTSD_VsdWsd, src1, src0, dst);
+ }
+
+ void vsqrtss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsqrtss", VEX_SS, OP2_SQRTSS_VssWss, src1, src0, dst);
+ }
+
+ void vroundsd_irr(RoundingMode mode, XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpImmSimd("vroundsd", VEX_PD, OP3_ROUNDSD_VsdWsd, ESCAPE_3A, mode,
+ src, invalid_xmm, dst);
+ }
+
+ void vroundss_irr(RoundingMode mode, XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpImmSimd("vroundss", VEX_PD, OP3_ROUNDSS_VsdWsd, ESCAPE_3A, mode,
+ src, invalid_xmm, dst);
+ }
+ void vroundps_irr(SSERoundingMode mode, XMMRegisterID src,
+ XMMRegisterID dst) {
+ threeByteOpImmSimd("vroundps", VEX_PD, OP3_ROUNDPS_VpsWps, ESCAPE_3A,
+ int(mode), src, invalid_xmm, dst);
+ }
+ void vroundpd_irr(SSERoundingMode mode, XMMRegisterID src,
+ XMMRegisterID dst) {
+ threeByteOpImmSimd("vroundpd", VEX_PD, OP3_ROUNDPD_VpdWpd, ESCAPE_3A,
+ int(mode), src, invalid_xmm, dst);
+ }
+
+ void vinsertps_irr(uint32_t mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpImmSimd("vinsertps", VEX_PD, OP3_INSERTPS_VpsUps, ESCAPE_3A,
+ mask, src1, src0, dst);
+ }
+ void vinsertps_imr(uint32_t mask, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpImmSimd("vinsertps", VEX_PD, OP3_INSERTPS_VpsUps, ESCAPE_3A,
+ mask, offset, base, src0, dst);
+ }
+ void vinsertps_imr(uint32_t mask, int32_t offset, RegisterID base,
+ RegisterID index, int scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpImmSimd("vinsertps", VEX_PD, OP3_INSERTPS_VpsUps, ESCAPE_3A,
+ mask, offset, base, index, scale, src0, dst);
+ }
+
+ void vmovlps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovlps", VEX_PS, OP2_MOVLPS_VqEq, offset, base, src0, dst);
+ }
+ void vmovlps_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovlps", VEX_PS, OP2_MOVLPS_VqEq, offset, base, index,
+ scale, src0, dst);
+ }
+ void vmovlps_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovlps", VEX_PS, OP2_MOVLPS_EqVq, offset, base, invalid_xmm,
+ src);
+ }
+ void vmovlps_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovlps", VEX_PS, OP2_MOVLPS_EqVq, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vmovhps_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmovhps", VEX_PS, OP2_MOVHPS_VqEq, offset, base, src0, dst);
+ }
+ void vmovhps_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmovhps", VEX_PS, OP2_MOVHPS_VqEq, offset, base, index,
+ scale, src0, dst);
+ }
+
+ void vmovhps_rm(XMMRegisterID src, int32_t offset, RegisterID base) {
+ twoByteOpSimd("vmovhps", VEX_PS, OP2_MOVHPS_EqVq, offset, base, invalid_xmm,
+ src);
+ }
+ void vmovhps_rm(XMMRegisterID src, int32_t offset, RegisterID base,
+ RegisterID index, int scale) {
+ twoByteOpSimd("vmovhps", VEX_PS, OP2_MOVHPS_EqVq, offset, base, index,
+ scale, invalid_xmm, src);
+ }
+
+ void vextractps_rm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base) {
+ threeByteOpImmSimd("vextractps", VEX_PD, OP3_EXTRACTPS_EdVdqIb, ESCAPE_3A,
+ lane, offset, base, invalid_xmm, src);
+ }
+ void vextractps_rm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base, RegisterID index, int scale) {
+ threeByteOpImmSimd("vextractps", VEX_PD, OP3_EXTRACTPS_EdVdqIb, ESCAPE_3A,
+ lane, offset, base, index, scale, invalid_xmm, src);
+ }
+
+ void vpblendw_irr(unsigned mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(mask < 256);
+ threeByteOpImmSimd("vpblendw", VEX_PD, OP3_PBLENDW_VdqWdqIb, ESCAPE_3A,
+ mask, src1, src0, dst);
+ }
+
+ void vpblendvb_rr(XMMRegisterID mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vblendvOpSimd("vpblendvb", OP3_PBLENDVB_VdqWdq, OP3_VPBLENDVB_VdqWdq, mask,
+ src1, src0, dst);
+ }
+
+ void vpinsrb_irr(unsigned lane, RegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmInt32Simd("vpinsrb", VEX_PD, OP3_PINSRB_VdqEvIb, ESCAPE_3A,
+ lane, src1, src0, dst);
+ }
+ void vpinsrb_imr(unsigned lane, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmInt32Simd("vpinsrb", VEX_PD, OP3_PINSRB_VdqEvIb, ESCAPE_3A,
+ lane, offset, base, src0, dst);
+ }
+ void vpinsrb_imr(unsigned lane, int32_t offset, RegisterID base,
+ RegisterID index, int32_t scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmInt32Simd("vpinsrb", VEX_PD, OP3_PINSRB_VdqEvIb, ESCAPE_3A,
+ lane, offset, base, index, scale, src0, dst);
+ }
+
+ void vpinsrd_irr(unsigned lane, RegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(lane < 4);
+ threeByteOpImmInt32Simd("vpinsrd", VEX_PD, OP3_PINSRD_VdqEvIb, ESCAPE_3A,
+ lane, src1, src0, dst);
+ }
+
+ void vpextrb_irr(unsigned lane, XMMRegisterID src, RegisterID dst) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmSimdInt32("vpextrb", VEX_PD, OP3_PEXTRB_EvVdqIb, ESCAPE_3A,
+ lane, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vpextrb_irm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmSimdInt32("vpextrb", VEX_PD, OP3_PEXTRB_EvVdqIb, ESCAPE_3A,
+ lane, offset, base, (RegisterID)src);
+ }
+
+ void vpextrb_irm(unsigned lane, XMMRegisterID src, int32_t offset,
+ RegisterID base, RegisterID index, int scale) {
+ MOZ_ASSERT(lane < 16);
+ threeByteOpImmSimdInt32("vpextrb", VEX_PD, OP3_PEXTRB_EvVdqIb, ESCAPE_3A,
+ lane, offset, base, index, scale, (RegisterID)src);
+ }
+
+ void vpextrd_irr(unsigned lane, XMMRegisterID src, RegisterID dst) {
+ MOZ_ASSERT(lane < 4);
+ threeByteOpImmSimdInt32("vpextrd", VEX_PD, OP3_PEXTRD_EvVdqIb, ESCAPE_3A,
+ lane, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vblendps_irr(unsigned imm, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(imm < 16);
+ // Despite being a "ps" instruction, vblendps is encoded with the "pd"
+ // prefix.
+ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm,
+ src1, src0, dst);
+ }
+
+ void vblendps_imr(unsigned imm, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ MOZ_ASSERT(imm < 16);
+ // Despite being a "ps" instruction, vblendps is encoded with the "pd"
+ // prefix.
+ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm,
+ offset, base, src0, dst);
+ }
+
+ void vblendvps_rr(XMMRegisterID mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vblendvOpSimd("vblendvps", OP3_BLENDVPS_VdqWdq, OP3_VBLENDVPS_VdqWdq, mask,
+ src1, src0, dst);
+ }
+ void vblendvps_mr(XMMRegisterID mask, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ vblendvOpSimd("vblendvps", OP3_BLENDVPS_VdqWdq, OP3_VBLENDVPS_VdqWdq, mask,
+ offset, base, src0, dst);
+ }
+ void vblendvpd_rr(XMMRegisterID mask, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ vblendvOpSimd("vblendvpd", OP3_BLENDVPD_VdqWdq, OP3_VBLENDVPD_VdqWdq, mask,
+ src1, src0, dst);
+ }
+
+ void vmovsldup_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vmovsldup", VEX_SS, OP2_MOVSLDUP_VpsWps, src, invalid_xmm,
+ dst);
+ }
+ void vmovsldup_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovsldup", VEX_SS, OP2_MOVSLDUP_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vmovshdup_rr(XMMRegisterID src, XMMRegisterID dst) {
+ twoByteOpSimd("vmovshdup", VEX_SS, OP2_MOVSHDUP_VpsWps, src, invalid_xmm,
+ dst);
+ }
+ void vmovshdup_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ twoByteOpSimd("vmovshdup", VEX_SS, OP2_MOVSHDUP_VpsWps, offset, base,
+ invalid_xmm, dst);
+ }
+
+ void vminsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminsd", VEX_SD, OP2_MINSD_VsdWsd, src1, src0, dst);
+ }
+ void vminsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vminsd", VEX_SD, OP2_MINSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vminss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vminss", VEX_SS, OP2_MINSS_VssWss, src1, src0, dst);
+ }
+
+ void vmaxsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmaxsd", VEX_SD, OP2_MAXSD_VsdWsd, src1, src0, dst);
+ }
+ void vmaxsd_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vmaxsd", VEX_SD, OP2_MAXSD_VsdWsd, offset, base, src0, dst);
+ }
+
+ void vmaxss_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vmaxss", VEX_SS, OP2_MAXSS_VssWss, src1, src0, dst);
+ }
+
+ void vpavgb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpavgb", VEX_PD, OP2_PAVGB_VdqWdq, src1, src0, dst);
+ }
+
+ void vpavgw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpavgw", VEX_PD, OP2_PAVGW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpminsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminsb", VEX_PD, OP3_PMINSB_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpminsb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminsb", VEX_PD, OP3_PMINSB_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpmaxsb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxsb", VEX_PD, OP3_PMAXSB_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpmaxsb_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxsb", VEX_PD, OP3_PMAXSB_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpminub_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpminub", VEX_PD, OP2_PMINUB_VdqWdq, src1, src0, dst);
+ }
+ void vpminub_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpminub", VEX_PD, OP2_PMINUB_VdqWdq, address, src0, dst);
+ }
+
+ void vpmaxub_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaxub", VEX_PD, OP2_PMAXUB_VdqWdq, src1, src0, dst);
+ }
+ void vpmaxub_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaxub", VEX_PD, OP2_PMAXUB_VdqWdq, address, src0, dst);
+ }
+
+ void vpminsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpminsw", VEX_PD, OP2_PMINSW_VdqWdq, src1, src0, dst);
+ }
+ void vpminsw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpminsw", VEX_PD, OP2_PMINSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpmaxsw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaxsw", VEX_PD, OP2_PMAXSW_VdqWdq, src1, src0, dst);
+ }
+ void vpmaxsw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpmaxsw", VEX_PD, OP2_PMAXSW_VdqWdq, address, src0, dst);
+ }
+
+ void vpminuw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminuw", VEX_PD, OP3_PMINUW_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpminuw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminuw", VEX_PD, OP3_PMINUW_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpmaxuw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxuw", VEX_PD, OP3_PMAXUW_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpmaxuw_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxuw", VEX_PD, OP3_PMAXUW_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpminsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminsd", VEX_PD, OP3_PMINSD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpminsd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminsd", VEX_PD, OP3_PMINSD_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpmaxsd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxsd", VEX_PD, OP3_PMAXSD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpmaxsd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxsd", VEX_PD, OP3_PMAXSD_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpminud_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminud", VEX_PD, OP3_PMINUD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpminud_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpminud", VEX_PD, OP3_PMINUD_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpmaxud_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxud", VEX_PD, OP3_PMAXUD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+ void vpmaxud_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpmaxud", VEX_PD, OP3_PMAXUD_VdqWdq, ESCAPE_38, address,
+ src0, dst);
+ }
+
+ void vpacksswb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpacksswb", VEX_PD, OP2_PACKSSWB_VdqWdq, src1, src0, dst);
+ }
+ void vpacksswb_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpacksswb", VEX_PD, OP2_PACKSSWB_VdqWdq, address, src0, dst);
+ }
+
+ void vpackuswb_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpackuswb", VEX_PD, OP2_PACKUSWB_VdqWdq, src1, src0, dst);
+ }
+ void vpackuswb_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpackuswb", VEX_PD, OP2_PACKUSWB_VdqWdq, address, src0, dst);
+ }
+
+ void vpackssdw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpackssdw", VEX_PD, OP2_PACKSSDW_VdqWdq, src1, src0, dst);
+ }
+ void vpackssdw_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpackssdw", VEX_PD, OP2_PACKSSDW_VdqWdq, address, src0, dst);
+ }
+
+ void vpackusdw_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vpackusdw", VEX_PD, OP3_PACKUSDW_VdqWdq, ESCAPE_38, src1,
+ src0, dst);
+ }
+ void vpackusdw_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ threeByteOpSimd("vpackusdw", VEX_PD, OP3_PACKUSDW_VdqWdq, ESCAPE_38,
+ address, src0, dst);
+ }
+
+ void vpabsb_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpabsb", VEX_PD, OP3_PABSB_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+
+ void vpabsw_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpabsw", VEX_PD, OP3_PABSW_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+
+ void vpabsd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpabsd", VEX_PD, OP3_PABSD_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+
+ void vpmovsxbw_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxbw", VEX_PD, OP3_PMOVSXBW_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovsxbw_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxbw", VEX_PD, OP3_PMOVSXBW_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovsxbw_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxbw", VEX_PD, OP3_PMOVSXBW_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vpmovzxbw_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxbw", VEX_PD, OP3_PMOVZXBW_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovzxbw_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxbw", VEX_PD, OP3_PMOVZXBW_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovzxbw_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxbw", VEX_PD, OP3_PMOVZXBW_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vpmovsxwd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxwd", VEX_PD, OP3_PMOVSXWD_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovsxwd_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxwd", VEX_PD, OP3_PMOVSXWD_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovsxwd_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxwd", VEX_PD, OP3_PMOVSXWD_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vpmovzxwd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxwd", VEX_PD, OP3_PMOVZXWD_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovzxwd_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxwd", VEX_PD, OP3_PMOVZXWD_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovzxwd_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxwd", VEX_PD, OP3_PMOVZXWD_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vpmovsxdq_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxwd", VEX_PD, OP3_PMOVSXDQ_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovsxdq_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxdq", VEX_PD, OP3_PMOVSXDQ_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovsxdq_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovsxdq", VEX_PD, OP3_PMOVSXDQ_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vpmovzxdq_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxwd", VEX_PD, OP3_PMOVZXDQ_VdqWdq, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vpmovzxdq_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxdq", VEX_PD, OP3_PMOVZXDQ_VdqWdq, ESCAPE_38, offset,
+ base, invalid_xmm, dst);
+ }
+ void vpmovzxdq_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vpmovzxdq", VEX_PD, OP3_PMOVZXDQ_VdqWdq, ESCAPE_38, offset,
+ base, index, scale, invalid_xmm, dst);
+ }
+
+ void vphaddd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ threeByteOpSimd("vphaddd", VEX_PD, OP3_PHADDD_VdqWdq, ESCAPE_38, src1, src0,
+ dst);
+ }
+
+ void vpalignr_irr(unsigned imm, XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ threeByteOpImmSimd("vpalignr", VEX_PD, OP3_PALIGNR_VdqWdqIb, ESCAPE_3A, imm,
+ src1, src0, dst);
+ }
+
+ void vpunpcklbw_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpcklbw", VEX_PD, OP2_PUNPCKLBW_VdqWdq, src1, src0, dst);
+ }
+ void vpunpckhbw_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckhbw", VEX_PD, OP2_PUNPCKHBW_VdqWdq, src1, src0, dst);
+ }
+
+ void vpunpckldq_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpunpckldq_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ_VdqWdq, offset, base,
+ src0, dst);
+ }
+ void vpunpckldq_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ_VdqWdq, addr, src0, dst);
+ }
+ void vpunpcklqdq_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpcklqdq", VEX_PD, OP2_PUNPCKLQDQ_VdqWdq, src1, src0,
+ dst);
+ }
+ void vpunpcklqdq_mr(int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpcklqdq", VEX_PD, OP2_PUNPCKLQDQ_VdqWdq, offset, base,
+ src0, dst);
+ }
+ void vpunpcklqdq_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpunpcklqdq", VEX_PD, OP2_PUNPCKLQDQ_VdqWdq, addr, src0,
+ dst);
+ }
+ void vpunpckhdq_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckhdq", VEX_PD, OP2_PUNPCKHDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpunpckhqdq_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckhqdq", VEX_PD, OP2_PUNPCKHQDQ_VdqWdq, src1, src0,
+ dst);
+ }
+ void vpunpcklwd_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpcklwd", VEX_PD, OP2_PUNPCKLWD_VdqWdq, src1, src0, dst);
+ }
+ void vpunpckhwd_rr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckhwd", VEX_PD, OP2_PUNPCKHWD_VdqWdq, src1, src0, dst);
+ }
+
+ void vpaddq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpaddq", VEX_PD, OP2_PADDQ_VdqWdq, src1, src0, dst);
+ }
+ void vpsubq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpsubq", VEX_PD, OP2_PSUBQ_VdqWdq, src1, src0, dst);
+ }
+
+ void vbroadcastb_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastb", VEX_PD, OP3_VBROADCASTB_VxWx, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vbroadcastb_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastb", VEX_PD, OP3_VBROADCASTB_VxWx, ESCAPE_38,
+ offset, base, invalid_xmm, dst);
+ }
+ void vbroadcastb_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastb", VEX_PD, OP3_VBROADCASTB_VxWx, ESCAPE_38,
+ offset, base, index, scale, invalid_xmm, dst);
+ }
+ void vbroadcastw_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastw", VEX_PD, OP3_VBROADCASTW_VxWx, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vbroadcastw_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastw", VEX_PD, OP3_VBROADCASTW_VxWx, ESCAPE_38,
+ offset, base, invalid_xmm, dst);
+ }
+ void vbroadcastw_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastw", VEX_PD, OP3_VBROADCASTW_VxWx, ESCAPE_38,
+ offset, base, index, scale, invalid_xmm, dst);
+ }
+ void vbroadcastd_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastd", VEX_PD, OP3_VBROADCASTD_VxWx, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vbroadcastd_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastd", VEX_PD, OP3_VBROADCASTD_VxWx, ESCAPE_38,
+ offset, base, invalid_xmm, dst);
+ }
+ void vbroadcastd_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastd", VEX_PD, OP3_VBROADCASTD_VxWx, ESCAPE_38,
+ offset, base, index, scale, invalid_xmm, dst);
+ }
+ void vbroadcastq_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastq", VEX_PD, OP3_VBROADCASTQ_VxWx, ESCAPE_38, src,
+ invalid_xmm, dst);
+ }
+ void vbroadcastq_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastq", VEX_PD, OP3_VBROADCASTQ_VxWx, ESCAPE_38,
+ offset, base, invalid_xmm, dst);
+ }
+ void vbroadcastq_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastq", VEX_PD, OP3_VBROADCASTQ_VxWx, ESCAPE_38,
+ offset, base, index, scale, invalid_xmm, dst);
+ }
+ void vbroadcastss_rr(XMMRegisterID src, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastss", VEX_PD, OP3_VBROADCASTSS_VxWd, ESCAPE_38,
+ src, invalid_xmm, dst);
+ }
+ void vbroadcastss_mr(int32_t offset, RegisterID base, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastss", VEX_PD, OP3_VBROADCASTSS_VxWd, ESCAPE_38,
+ offset, base, invalid_xmm, dst);
+ }
+ void vbroadcastss_mr(int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID dst) {
+ threeByteOpSimd("vbroadcastss", VEX_PD, OP3_VBROADCASTSS_VxWd, ESCAPE_38,
+ offset, base, index, scale, invalid_xmm, dst);
+ }
+
+ // BMI instructions:
+
+ void sarxl_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("sarxl %s, %s, %s", GPReg32Name(src), GPReg32Name(shift),
+ GPReg32Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex(VEX_SS /* = F3 */, OP3_SARX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ void shlxl_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("shlxl %s, %s, %s", GPReg32Name(src), GPReg32Name(shift),
+ GPReg32Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex(VEX_PD /* = 66 */, OP3_SHLX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ void shrxl_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
+ spew("shrxl %s, %s, %s", GPReg32Name(src), GPReg32Name(shift),
+ GPReg32Name(dst));
+
+ RegisterID rm = src;
+ XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
+ int reg = dst;
+ m_formatter.threeByteOpVex(VEX_SD /* = F2 */, OP3_SHRX_GyEyBy, ESCAPE_38,
+ rm, src0, reg);
+ }
+
+ // FMA instructions:
+
+ void vfmadd231ps_rrr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ spew("vfmadd213ps %s, %s, %s", XMMRegName(src1), XMMRegName(src0),
+ XMMRegName(dst));
+
+ m_formatter.threeByteOpVex(VEX_PD, OP3_VFMADD231PS_VxHxWx, ESCAPE_38,
+ (RegisterID)src1, src0, (RegisterID)dst);
+ }
+
+ void vfnmadd231ps_rrr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ spew("vfnmadd213ps %s, %s, %s", XMMRegName(src1), XMMRegName(src0),
+ XMMRegName(dst));
+
+ m_formatter.threeByteOpVex(VEX_PD, OP3_VFNMADD231PS_VxHxWx, ESCAPE_38,
+ (RegisterID)src1, src0, (RegisterID)dst);
+ }
+
+ void vfmadd231pd_rrr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ spew("vfmadd213pd %s, %s, %s", XMMRegName(src1), XMMRegName(src0),
+ XMMRegName(dst));
+
+ m_formatter.threeByteOpVex64(VEX_PD, OP3_VFMADD231PD_VxHxWx, ESCAPE_38,
+ (RegisterID)src1, src0, (RegisterID)dst);
+ }
+
+ void vfnmadd231pd_rrr(XMMRegisterID src1, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ spew("vfnmadd213pd %s, %s, %s", XMMRegName(src1), XMMRegName(src0),
+ XMMRegName(dst));
+
+ m_formatter.threeByteOpVex64(VEX_PD, OP3_VFNMADD231PD_VxHxWx, ESCAPE_38,
+ (RegisterID)src1, src0, (RegisterID)dst);
+ }
+
+ // Misc instructions:
+
+ void int3() {
+ spew("int3");
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ud2() {
+ spew("ud2");
+ m_formatter.twoByteOp(OP2_UD2);
+ }
+
+ void ret() {
+ spew("ret");
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void ret_i(int32_t imm) {
+ spew("ret $%d", imm);
+ m_formatter.oneByteOp(OP_RET_Iz);
+ m_formatter.immediate16u(imm);
+ }
+
+ void lfence() {
+ spew("lfence");
+ m_formatter.twoByteOp(OP_FENCE, (RegisterID)0, 0b101);
+ }
+ void mfence() {
+ spew("mfence");
+ m_formatter.twoByteOp(OP_FENCE, (RegisterID)0, 0b110);
+ }
+
+ // Assembler admin methods:
+
+ JmpDst label() {
+ JmpDst r = JmpDst(m_formatter.size());
+ spew(".set .Llabel%d, .", r.offset());
+ return r;
+ }
+
+ size_t currentOffset() const { return m_formatter.size(); }
+
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0) {
+ return JmpDst(jump.offset() + offset);
+ }
+
+ void haltingAlign(int alignment) {
+ spew(".balign %d, 0x%x # hlt", alignment, unsigned(OP_HLT));
+ while (!m_formatter.isAligned(alignment)) {
+ m_formatter.oneByteOp(OP_HLT);
+ }
+ }
+
+ void nopAlign(int alignment) {
+ spew(".balign %d", alignment);
+
+ int remainder = m_formatter.size() % alignment;
+ if (remainder > 0) {
+ insert_nop(alignment - remainder);
+ }
+ }
+
+ void jumpTablePointer(uintptr_t ptr) {
+#ifdef JS_CODEGEN_X64
+ spew(".quad 0x%" PRIxPTR, ptr);
+#else
+ spew(".int 0x%" PRIxPTR, ptr);
+#endif
+ m_formatter.jumpTablePointer(ptr);
+ }
+
+ void doubleConstant(double d) {
+ spew(".double %.16g", d);
+ m_formatter.doubleConstant(d);
+ }
+ void floatConstant(float f) {
+ spew(".float %.16g", f);
+ m_formatter.floatConstant(f);
+ }
+
+ void simd128Constant(const void* data) {
+ const uint32_t* dw = reinterpret_cast<const uint32_t*>(data);
+ spew(".int 0x%08x,0x%08x,0x%08x,0x%08x", dw[0], dw[1], dw[2], dw[3]);
+ MOZ_ASSERT(m_formatter.isAligned(16));
+ m_formatter.simd128Constant(data);
+ }
+
+ void int32Constant(int32_t i) {
+ spew(".int %d", i);
+ m_formatter.int32Constant(i);
+ }
+ void int64Constant(int64_t i) {
+ spew(".quad %lld", (long long)i);
+ m_formatter.int64Constant(i);
+ }
+
+ // Linking & patching:
+
+ void assertValidJmpSrc(JmpSrc src) {
+ // The target offset is stored at offset - 4.
+ MOZ_RELEASE_ASSERT(src.offset() > int32_t(sizeof(int32_t)));
+ MOZ_RELEASE_ASSERT(size_t(src.offset()) <= size());
+ }
+
+ bool nextJump(const JmpSrc& from, JmpSrc* next) {
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom()) {
+ return false;
+ }
+
+ assertValidJmpSrc(from);
+ MOZ_ASSERT(from.trailing() == 0);
+
+ const unsigned char* code = m_formatter.data();
+ int32_t offset = GetInt32(code + from.offset());
+ if (offset == -1) {
+ return false;
+ }
+
+ MOZ_RELEASE_ASSERT(size_t(offset) < size(), "nextJump bogus offset");
+
+ *next = JmpSrc(offset);
+ return true;
+ }
+ void setNextJump(const JmpSrc& from, const JmpSrc& to) {
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom()) {
+ return;
+ }
+
+ assertValidJmpSrc(from);
+ MOZ_ASSERT(from.trailing() == 0);
+ MOZ_RELEASE_ASSERT(to.offset() == -1 || size_t(to.offset()) <= size());
+
+ unsigned char* code = m_formatter.data();
+ SetInt32(code + from.offset(), to.offset());
+ }
+
+ void linkJump(JmpSrc from, JmpDst to) {
+ MOZ_ASSERT(from.offset() != -1);
+ MOZ_ASSERT(to.offset() != -1);
+
+ // Sanity check - if the assembler has OOM'd, it will start overwriting
+ // its internal buffer and thus our links could be garbage.
+ if (oom()) {
+ return;
+ }
+
+ assertValidJmpSrc(from);
+ MOZ_RELEASE_ASSERT(size_t(to.offset()) <= size());
+
+ spew(".set .Lfrom%d, .Llabel%d", from.offset(), to.offset());
+ unsigned char* code = m_formatter.data();
+ SetRel32(code + from.offset(), code + to.offset(), from.trailing());
+ }
+
+ void executableCopy(void* dst) {
+ const unsigned char* src = m_formatter.buffer();
+ memcpy(dst, src, size());
+ }
+ [[nodiscard]] bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ return m_formatter.append(code, numBytes);
+ }
+
+ // `offset` is the instruction offset at the end of the instruction.
+ void addToPCRel4(uint32_t offset, int32_t bias) {
+ unsigned char* code = m_formatter.data();
+ SetInt32(code + offset, GetInt32(code + offset) + bias);
+ }
+
+ protected:
+ static bool CAN_SIGN_EXTEND_8_32(int32_t value) {
+ return value == (int32_t)(int8_t)value;
+ }
+ static bool CAN_SIGN_EXTEND_16_32(int32_t value) {
+ return value == (int32_t)(int16_t)value;
+ }
+ static bool CAN_ZERO_EXTEND_8_32(int32_t value) {
+ return value == (int32_t)(uint8_t)value;
+ }
+ static bool CAN_ZERO_EXTEND_8H_32(int32_t value) {
+ return value == (value & 0xff00);
+ }
+ static bool CAN_ZERO_EXTEND_16_32(int32_t value) {
+ return value == (int32_t)(uint16_t)value;
+ }
+ static bool CAN_ZERO_EXTEND_32_64(int32_t value) { return value >= 0; }
+
+ // Methods for encoding SIMD instructions via either legacy SSE encoding or
+ // VEX encoding.
+
+ bool useLegacySSEEncoding(XMMRegisterID src0, XMMRegisterID dst) {
+ // If we don't have AVX or it's disabled, use the legacy SSE encoding.
+ if (!useVEX_) {
+ MOZ_ASSERT(
+ src0 == invalid_xmm || src0 == dst,
+ "Legacy SSE (pre-AVX) encoding requires the output register to be "
+ "the same as the src0 input register");
+ return true;
+ }
+
+ // If src0 is the same as the output register, we might as well use
+ // the legacy SSE encoding, since it is smaller. However, this is only
+ // beneficial as long as we're not using ymm registers anywhere.
+ return src0 == dst;
+ }
+
+ bool useLegacySSEEncodingForVblendv(XMMRegisterID mask, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ // Similar to useLegacySSEEncoding, but for vblendv the Legacy SSE
+ // encoding also requires the mask to be in xmm0.
+
+ if (!useVEX_) {
+ MOZ_ASSERT(
+ src0 == dst,
+ "Legacy SSE (pre-AVX) encoding requires the output register to be "
+ "the same as the src0 input register");
+ MOZ_ASSERT(
+ mask == xmm0,
+ "Legacy SSE (pre-AVX) encoding for blendv requires the mask to be "
+ "in xmm0");
+ return true;
+ }
+
+ return src0 == dst && mask == xmm0;
+ }
+
+ bool useLegacySSEEncodingAlways() { return !useVEX_; }
+
+ const char* legacySSEOpName(const char* name) {
+ MOZ_ASSERT(name[0] == 'v');
+ return name + 1;
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, XMMRegisterID rm,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst),
+ XMMRegName(rm));
+ } else {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", name, XMMRegName(dst), XMMRegName(rm));
+ } else {
+ spew("%-11s%s, %s", name, XMMRegName(rm), XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s%s, %s, %s", name, XMMRegName(rm), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm, XMMRegisterID rm,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, XMMRegName(rm),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_ob, legacySSEOpName(name), XMMRegName(dst),
+ ADDR_ob(offset, base));
+ } else {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name),
+ ADDR_ob(offset, base), XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_ob, name, XMMRegName(dst), ADDR_ob(offset, base));
+ } else {
+ spew("%-11s" MEM_ob ", %s", name, ADDR_ob(offset, base),
+ XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s" MEM_ob ", %s, %s", name, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, src0, dst);
+ }
+
+ void twoByteOpSimd_disp32(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, int32_t offset,
+ RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_o32b, legacySSEOpName(name), XMMRegName(dst),
+ ADDR_o32b(offset, base));
+ } else {
+ spew("%-11s" MEM_o32b ", %s", legacySSEOpName(name),
+ ADDR_o32b(offset, base), XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp_disp32(opcode, offset, base, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_o32b, name, XMMRegName(dst),
+ ADDR_o32b(offset, base));
+ } else {
+ spew("%-11s" MEM_o32b ", %s", name, ADDR_o32b(offset, base),
+ XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s" MEM_o32b ", %s, %s", name, ADDR_o32b(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex_disp32(ty, opcode, offset, base, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm, int32_t offset,
+ RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_obs, legacySSEOpName(name), XMMRegName(dst),
+ ADDR_obs(offset, base, index, scale));
+ } else {
+ spew("%-11s" MEM_obs ", %s", legacySSEOpName(name),
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, index, scale, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, " MEM_obs, name, XMMRegName(dst),
+ ADDR_obs(offset, base, index, scale));
+ } else {
+ spew("%-11s" MEM_obs ", %s", name, ADDR_obs(offset, base, index, scale),
+ XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s" MEM_obs ", %s, %s", name,
+ ADDR_obs(offset, base, index, scale), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, index, scale, src0, dst);
+ }
+
+ void twoByteOpSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, const void* address,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %p", legacySSEOpName(name), XMMRegName(dst), address);
+ } else {
+ spew("%-11s%p, %s", legacySSEOpName(name), address, XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, address, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %p", name, XMMRegName(dst), address);
+ } else {
+ spew("%-11s%p, %s", name, address, XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s%p, %s, %s", name, address, XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, address, src0, dst);
+ }
+
+ void twoByteOpImmSimd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %p, %s", legacySSEOpName(name), imm, address,
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, address, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %p, %s, %s", name, imm, address, XMMRegName(src0),
+ XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, address, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpInt32Simd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, RegisterID rm,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst),
+ GPReg32Name(rm));
+ } else {
+ spew("%-11s%s, %s", legacySSEOpName(name), GPReg32Name(rm),
+ XMMRegName(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", name, XMMRegName(dst), GPReg32Name(rm));
+ } else {
+ spew("%-11s%s, %s", name, GPReg32Name(rm), XMMRegName(dst));
+ }
+ } else {
+ spew("%-11s%s, %s, %s", name, GPReg32Name(rm), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, rm, src0, dst);
+ }
+
+ void twoByteOpSimdInt32(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, XMMRegisterID rm,
+ RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), GPReg32Name(dst),
+ XMMRegName(rm));
+ } else if (opcode == OP2_MOVD_EdVd) {
+ spew("%-11s%s, %s", legacySSEOpName(name),
+ XMMRegName((XMMRegisterID)dst), GPReg32Name((RegisterID)rm));
+ } else {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ GPReg32Name(dst));
+ }
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (IsXMMReversedOperands(opcode)) {
+ spew("%-11s%s, %s", name, GPReg32Name(dst), XMMRegName(rm));
+ } else if (opcode == OP2_MOVD_EdVd) {
+ spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst),
+ GPReg32Name((RegisterID)rm));
+ } else {
+ spew("%-11s%s, %s", name, XMMRegName(rm), GPReg32Name(dst));
+ }
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm, dst);
+ }
+
+ void twoByteOpImmSimdInt32(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ XMMRegisterID rm, RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm),
+ GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(rm), GPReg32Name(dst));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ RegisterID rm, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg32Name(rm),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s", name, imm, GPReg32Name(rm), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, uint32_t imm,
+ int32_t offset, RegisterID base, RegisterID index,
+ int scale, XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, " MEM_obs ", %s", legacySSEOpName(name), imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, index, scale, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_obs ", %s, %s", name, imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(src0),
+ XMMRegName(dst));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, index, scale, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void twoByteOpSimdFlags(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, XMMRegisterID rm,
+ XMMRegisterID reg) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ XMMRegName(reg));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, (RegisterID)rm, reg);
+ return;
+ }
+
+ spew("%-11s%s, %s", name, XMMRegName(rm), XMMRegName(reg));
+ m_formatter.twoByteOpVex(ty, opcode, (RegisterID)rm, invalid_xmm,
+ (XMMRegisterID)reg);
+ }
+
+ void twoByteOpSimdFlags(const char* name, VexOperandType ty,
+ TwoByteOpcodeID opcode, int32_t offset,
+ RegisterID base, XMMRegisterID reg) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name), ADDR_ob(offset, base),
+ XMMRegName(reg));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp(opcode, offset, base, reg);
+ return;
+ }
+
+ spew("%-11s" MEM_ob ", %s", name, ADDR_ob(offset, base), XMMRegName(reg));
+ m_formatter.twoByteOpVex(ty, opcode, offset, base, invalid_xmm,
+ (XMMRegisterID)reg);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ XMMRegisterID rm, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s%s, %s", name, XMMRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s%s, %s, %s", name, XMMRegName(rm), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)rm, src0, dst);
+ }
+
+ void threeByteOpImmSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, XMMRegisterID rm, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(rm),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)rm, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, XMMRegName(rm),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)rm, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name), ADDR_ob(offset, base),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s" MEM_ob ", %s", name, ADDR_ob(offset, base), XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_ob ", %s, %s", name, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s" MEM_obs ", %s", legacySSEOpName(name),
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, index, scale, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s" MEM_obs ", %s", name, ADDR_obs(offset, base, index, scale),
+ XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_obs ", %s, %s", name,
+ ADDR_obs(offset, base, index, scale), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, index, scale,
+ src0, dst);
+ }
+
+ void threeByteOpImmSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(dst));
+ } else {
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ RegisterID index, int scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_obs ", %s", legacySSEOpName(name), imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, index, scale, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s$0x%x, " MEM_obs ", %s", name, imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ } else {
+ spew("%-11s$0x%x, " MEM_obs ", %s, %s", name, imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(src0),
+ XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, index, scale,
+ src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpSimd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s%p, %s", legacySSEOpName(name), address, XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, address, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ spew("%-11s%p, %s", name, address, XMMRegName(dst));
+ } else {
+ spew("%-11s%p, %s, %s", name, address, XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, address, src0, dst);
+ }
+
+ void threeByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, RegisterID src1,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg32Name(src1),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, src1, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, %s, %s, %s", name, imm, GPReg32Name(src1),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, src1, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s, %s", name, imm, ADDR_ob(offset, base),
+ XMMRegName(src0), XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmInt32Simd(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ RegisterID index, int scale, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src0, dst)) {
+ spew("%-11s$0x%x, " MEM_obs ", %s", legacySSEOpName(name), imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, index, scale, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_obs ", %s, %s", name, imm,
+ ADDR_obs(offset, base, index, scale), XMMRegName(src0),
+ XMMRegName(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, index, scale,
+ src0, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimdInt32(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, XMMRegisterID src,
+ RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, XMMRegName(src),
+ GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, (RegisterID)src, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ if (opcode == OP3_PEXTRD_EvVdqIb) {
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName((XMMRegisterID)dst),
+ GPReg32Name((RegisterID)src));
+ } else {
+ spew("%-11s$0x%x, %s, %s", name, imm, XMMRegName(src), GPReg32Name(dst));
+ }
+ m_formatter.threeByteOpVex(ty, opcode, escape, (RegisterID)src, invalid_xmm,
+ dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimdInt32(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, " MEM_ob ", %s", legacySSEOpName(name), imm,
+ ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_ob ", %s", name, imm, ADDR_ob(offset, base),
+ GPReg32Name(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, invalid_xmm,
+ dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ void threeByteOpImmSimdInt32(const char* name, VexOperandType ty,
+ ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ uint32_t imm, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID dst) {
+ if (useLegacySSEEncodingAlways()) {
+ spew("%-11s$0x%x, " MEM_obs ", %s", legacySSEOpName(name), imm,
+ ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.threeByteOp(opcode, escape, offset, base, index, scale, dst);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$0x%x, " MEM_obs ", %s", name, imm,
+ ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.threeByteOpVex(ty, opcode, escape, offset, base, index, scale,
+ invalid_xmm, dst);
+ m_formatter.immediate8u(imm);
+ }
+
+ // Blendv is a three-byte op, but the VEX encoding has a different opcode
+ // than the SSE encoding, so we handle it specially.
+ void vblendvOpSimd(const char* name, ThreeByteOpcodeID opcode,
+ ThreeByteOpcodeID vexOpcode, XMMRegisterID mask,
+ XMMRegisterID rm, XMMRegisterID src0, XMMRegisterID dst) {
+ if (useLegacySSEEncodingForVblendv(mask, src0, dst)) {
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
+ XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd"
+ // prefix.
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.threeByteOp(opcode, ESCAPE_38, (RegisterID)rm, dst);
+ return;
+ }
+
+ spew("%-11s%s, %s, %s, %s", name, XMMRegName(mask), XMMRegName(rm),
+ XMMRegName(src0), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.vblendvOpVex(VEX_PD, vexOpcode, ESCAPE_3A, mask, (RegisterID)rm,
+ src0, dst);
+ }
+
+ void vblendvOpSimd(const char* name, ThreeByteOpcodeID opcode,
+ ThreeByteOpcodeID vexOpcode, XMMRegisterID mask,
+ int32_t offset, RegisterID base, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncodingForVblendv(mask, src0, dst)) {
+ spew("%-11s" MEM_ob ", %s", legacySSEOpName(name), ADDR_ob(offset, base),
+ XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd"
+ // prefix.
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.threeByteOp(opcode, ESCAPE_38, offset, base, dst);
+ return;
+ }
+
+ spew("%-11s%s, " MEM_ob ", %s, %s", name, XMMRegName(mask),
+ ADDR_ob(offset, base), XMMRegName(src0), XMMRegName(dst));
+ // Even though a "ps" instruction, vblendv is encoded with the "pd" prefix.
+ m_formatter.vblendvOpVex(VEX_PD, vexOpcode, ESCAPE_3A, mask, offset, base,
+ src0, dst);
+ }
+
+ void shiftOpImmSimd(const char* name, TwoByteOpcodeID opcode,
+ ShiftID shiftKind, uint32_t imm, XMMRegisterID src,
+ XMMRegisterID dst) {
+ if (useLegacySSEEncoding(src, dst)) {
+ spew("%-11s$%d, %s", legacySSEOpName(name), int32_t(imm),
+ XMMRegName(dst));
+ m_formatter.legacySSEPrefix(VEX_PD);
+ m_formatter.twoByteOp(opcode, (RegisterID)dst, (int)shiftKind);
+ m_formatter.immediate8u(imm);
+ return;
+ }
+
+ spew("%-11s$%d, %s, %s", name, int32_t(imm), XMMRegName(src),
+ XMMRegName(dst));
+ // For shift instructions, destination is stored in vvvv field.
+ m_formatter.twoByteOpVex(VEX_PD, opcode, (RegisterID)src, dst,
+ (int)shiftKind);
+ m_formatter.immediate8u(imm);
+ }
+
+ class X86InstructionFormatter {
+ public:
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre) { m_buffer.putByte(pre); }
+
+ void legacySSEPrefix(VexOperandType ty) {
+ switch (ty) {
+ case VEX_PS:
+ break;
+ case VEX_PD:
+ prefix(PRE_SSE_66);
+ break;
+ case VEX_SS:
+ prefix(PRE_SSE_F3);
+ break;
+ case VEX_SD:
+ prefix(PRE_SSE_F2);
+ break;
+ }
+ }
+
+ /* clang-format off */
+ //
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a
+ // void*. On 64-bit targets REX prefixes will be planted as necessary,
+ // where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences
+ // (first opcode byte 0x0F).
+ //
+ /* clang-format on */
+
+ void oneByteOp(OneByteOpcodeID opcode) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int32_t offset,
+ RegisterID base, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int32_t offset,
+ RegisterID index, int scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, index, scale, reg);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, const void* address, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, const void* address,
+ int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+#ifdef JS_CODEGEN_X64
+ void oneByteRipOp(OneByteOpcodeID opcode, int ripOffset, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void oneByteRipOp64(OneByteOpcodeID opcode, int ripOffset, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void twoByteRipOp(TwoByteOpcodeID opcode, int ripOffset, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void twoByteRipOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ int ripOffset, XMMRegisterID src0, XMMRegisterID reg) {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode, RegisterID rm,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode, int32_t offset,
+ RegisterID base, XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp_disp32(TwoByteOpcodeID opcode, int32_t offset,
+ RegisterID base, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void twoByteOpVex_disp32(VexOperandType ty, TwoByteOpcodeID opcode,
+ int32_t offset, RegisterID base,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode, int32_t offset,
+ RegisterID base, RegisterID index, int scale,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = (index >> 3), b = (base >> 3);
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, const void* address, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOpVex(VexOperandType ty, TwoByteOpcodeID opcode,
+ const void* address, XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 1; // 0x0F
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(address, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, RegisterID rm,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int32_t offset, RegisterID base, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int32_t offset, RegisterID base, RegisterID index,
+ int32_t scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, int32_t offset, RegisterID base,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, int32_t offset, RegisterID base,
+ RegisterID index, int scale, XMMRegisterID src0,
+ int reg) {
+ int r = (reg >> 3), x = (index >> 3), b = (base >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void threeByteOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ const void* address, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void threeByteRipOp(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ int ripOffset, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIfNeeded(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void threeByteOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, const void* address,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(address, reg);
+ }
+
+ void threeByteRipOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, int ripOffset,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = 0;
+ int m = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ int w = 0, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+ m_buffer.putIntUnchecked(ripOffset);
+ }
+
+ void vblendvOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, XMMRegisterID mask, RegisterID rm,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ immediate8u(mask << 4);
+ }
+
+ void vblendvOpVex(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, XMMRegisterID mask,
+ int32_t offset, RegisterID base, XMMRegisterID src0,
+ int reg) {
+ int r = (reg >> 3), x = 0, b = (base >> 3);
+ int m = 0, w = 0, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ memoryModRM(offset, base, reg);
+ immediate8u(mask << 4);
+ }
+
+#ifdef JS_CODEGEN_X64
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix. When
+ // planting d64 or f64 instructions, not requiring a REX.w prefix, the
+ // normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int32_t offset,
+ RegisterID base, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, const void* address, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int offset, RegisterID base,
+ int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int offset, RegisterID base,
+ RegisterID index, int scale, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, const void* address, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, 0);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(address, reg);
+ }
+
+ void twoByteOpVex64(VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, XMMRegisterID reg) {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 1; // 0x0F
+ int w = 1, v = src0, l = 0;
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ void threeByteOp64(ThreeByteOpcodeID opcode, ThreeByteEscape escape,
+ RegisterID rm, int reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(escape);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+#endif // JS_CODEGEN_X64
+
+ void threeByteOpVex64(VexOperandType ty, ThreeByteOpcodeID opcode,
+ ThreeByteEscape escape, RegisterID rm,
+ XMMRegisterID src0, int reg) {
+ int r = (reg >> 3), x = 0, b = (rm >> 3);
+ int m = 0, w = 1, v = src0, l = 0;
+ switch (escape) {
+ case ESCAPE_38:
+ m = 2;
+ break;
+ case ESCAPE_3A:
+ m = 3;
+ break;
+ default:
+ MOZ_CRASH("unexpected escape");
+ }
+ threeOpVex(ty, r, x, b, m, w, v, l, opcode);
+ registerModRM(rm, reg);
+ }
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from
+ // the normal formatters in the circumstances under which they will
+ // decide to emit REX prefixes. These should be used where any register
+ // operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the
+ // range 4..7 on x86-64. These register numbers may either represent
+ // the second byte of the first four registers (ah..bh) or the first
+ // byte of the second four registers (spl..dil).
+ //
+ // Address operands should still be checked using regRequiresRex(),
+ // while byteRegRequiresRex() is provided to check byte register
+ // operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, RegisterID r) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(r), 0, 0, r);
+ m_buffer.putByteUnchecked(opcode + (r & 7));
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, RegisterID rm,
+ GroupOpcodeID groupOp) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, groupOp);
+ }
+
+ // Like oneByteOp8, but never emits a REX prefix.
+ void oneByteOp8_norex(OneByteOpcodeID opcode, HRegisterID rm,
+ GroupOpcodeID groupOp) {
+ MOZ_ASSERT(!regRequiresRex(RegisterID(rm)));
+ m_buffer.ensureSpace(MaxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(RegisterID(rm), groupOp);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void oneByteOp8_disp32(OneByteOpcodeID opcode, int32_t offset,
+ RegisterID base, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(offset, base, reg);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, const void* address,
+ RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg), reg, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(address, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID rm, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(base), reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, int32_t offset, RegisterID base,
+ RegisterID index, int scale, RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(base) ||
+ regRequiresRex(index),
+ reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(offset, base, index, scale, reg);
+ }
+
+ // Like twoByteOp8 but doesn't add a REX prefix if the destination reg
+ // is in esp..edi. This may be used when the destination is not an 8-bit
+ // register (as in a movzbl instruction), so it doesn't need a REX
+ // prefix to disambiguate it from ah..bh.
+ void twoByteOp8_movx(TwoByteOpcodeID opcode, RegisterID rm,
+ RegisterID reg) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(regRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, reg);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID rm,
+ GroupOpcodeID groupOp) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(rm, groupOp);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has
+ // been emitted. The writes are unchecked since the opcode formatters
+ // above will have ensured space.
+
+ // A signed 8-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate8s(int32_t imm) {
+ MOZ_ASSERT(CAN_SIGN_EXTEND_8_32(imm));
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ // An unsigned 8-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate8u(uint32_t imm) {
+ MOZ_ASSERT(CAN_ZERO_EXTEND_8_32(imm));
+ m_buffer.putByteUnchecked(int32_t(imm));
+ }
+
+ // An 8-bit immediate with is either signed or unsigned, for use in
+ // instructions which actually only operate on 8 bits.
+ MOZ_ALWAYS_INLINE void immediate8(int32_t imm) {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ // A signed 16-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate16s(int32_t imm) {
+ MOZ_ASSERT(CAN_SIGN_EXTEND_16_32(imm));
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ // An unsigned 16-bit immediate.
+ MOZ_ALWAYS_INLINE void immediate16u(int32_t imm) {
+ MOZ_ASSERT(CAN_ZERO_EXTEND_16_32(imm));
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ // A 16-bit immediate with is either signed or unsigned, for use in
+ // instructions which actually only operate on 16 bits.
+ MOZ_ALWAYS_INLINE void immediate16(int32_t imm) {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ MOZ_ALWAYS_INLINE void immediate32(int32_t imm) {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ MOZ_ALWAYS_INLINE void immediate64(int64_t imm) {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ [[nodiscard]] MOZ_ALWAYS_INLINE JmpSrc immediateRel32() {
+ m_buffer.putIntUnchecked(0);
+ return JmpSrc(m_buffer.size());
+ }
+
+ // Data:
+
+ void jumpTablePointer(uintptr_t ptr) {
+ m_buffer.ensureSpace(sizeof(uintptr_t));
+#ifdef JS_CODEGEN_X64
+ m_buffer.putInt64Unchecked(ptr);
+#else
+ m_buffer.putIntUnchecked(ptr);
+#endif
+ }
+
+ void doubleConstant(double d) {
+ m_buffer.ensureSpace(sizeof(double));
+ m_buffer.putInt64Unchecked(mozilla::BitwiseCast<uint64_t>(d));
+ }
+
+ void floatConstant(float f) {
+ m_buffer.ensureSpace(sizeof(float));
+ m_buffer.putIntUnchecked(mozilla::BitwiseCast<uint32_t>(f));
+ }
+
+ void simd128Constant(const void* data) {
+ const uint8_t* bytes = reinterpret_cast<const uint8_t*>(data);
+ m_buffer.ensureSpace(16);
+ for (size_t i = 0; i < 16; ++i) {
+ m_buffer.putByteUnchecked(bytes[i]);
+ }
+ }
+
+ void int64Constant(int64_t i) {
+ m_buffer.ensureSpace(sizeof(int64_t));
+ m_buffer.putInt64Unchecked(i);
+ }
+
+ void int32Constant(int32_t i) {
+ m_buffer.ensureSpace(sizeof(int32_t));
+ m_buffer.putIntUnchecked(i);
+ }
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ const unsigned char* buffer() const { return m_buffer.buffer(); }
+ unsigned char* data() { return m_buffer.data(); }
+ bool oom() const { return m_buffer.oom(); }
+ bool reserve(size_t size) { return m_buffer.reserve(size); }
+ bool swapBuffer(wasm::Bytes& other) { return m_buffer.swap(other); }
+ bool isAligned(int alignment) const {
+ return m_buffer.isAligned(alignment);
+ }
+
+ [[nodiscard]] bool append(const unsigned char* values, size_t size) {
+ return m_buffer.append(values, size);
+ }
+
+ private:
+ // Internals; ModRm and REX formatters.
+
+ // Byte operand register spl & above requir a REX prefix, which precludes
+ // use of the h registers in the same instruction.
+ static bool byteRegRequiresRex(RegisterID reg) {
+#ifdef JS_CODEGEN_X64
+ return reg >= rsp;
+#else
+ return false;
+#endif
+ }
+
+ // For non-byte sizes, registers r8 & above always require a REX prefix.
+ static bool regRequiresRex(RegisterID reg) {
+#ifdef JS_CODEGEN_X64
+ return reg >= r8;
+#else
+ return false;
+#endif
+ }
+
+#ifdef JS_CODEGEN_X64
+ // Format a REX prefix byte.
+ void emitRex(bool w, int r, int x, int b) {
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r >> 3) << 2) |
+ ((x >> 3) << 1) | (b >> 3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ void emitRexW(int r, int x, int b) { emitRex(true, r, x, b); }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to
+ // check register operands, regRequiresRex() to check other registers
+ // (i.e. address base & index).
+ //
+ // NB: WebKit's use of emitRexIf() is limited such that the
+ // reqRequiresRex() checks are not needed. SpiderMonkey extends
+ // oneByteOp8 and twoByteOp8 functionality such that r, x, and b
+ // can all be used.
+ void emitRexIf(bool condition, int r, int x, int b) {
+ if (condition || regRequiresRex(RegisterID(r)) ||
+ regRequiresRex(RegisterID(x)) || regRequiresRex(RegisterID(b))) {
+ emitRex(false, r, x, b);
+ }
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary
+ // (if any register is r8 or above).
+ void emitRexIfNeeded(int r, int x, int b) { emitRexIf(false, r, x, b); }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ void emitRexIf(bool condition, int, int, int) {
+ MOZ_ASSERT(!condition, "32-bit x86 should never use a REX prefix");
+ }
+ void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ void putModRm(ModRmMode mode, RegisterID rm, int reg) {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, RegisterID base, RegisterID index,
+ int scale, int reg) {
+ MOZ_ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, hasSib, reg);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(RegisterID rm, int reg) {
+ putModRm(ModRmRegister, rm, reg);
+ }
+
+ void memoryModRM(int32_t offset, RegisterID base, int reg) {
+ // A base of esp or r12 would be interpreted as a sib, so force a
+ // sib with no index & put the base in there.
+#ifdef JS_CODEGEN_X64
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) { // No need to check if the base is noBase, since we know
+ // it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, base, noIndex, 0, reg);
+ } else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, base, noIndex, 0, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, base, noIndex, 0, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#ifdef JS_CODEGEN_X64
+ if (!offset && (base != noBase) && (base != noBase2)) {
+#else
+ if (!offset && (base != noBase)) {
+#endif
+ putModRm(ModRmMemoryNoDisp, base, reg);
+ } else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, base, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, base, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp32(int32_t offset, RegisterID base, int reg) {
+ // A base of esp or r12 would be interpreted as a sib, so force a
+ // sib with no index & put the base in there.
+#ifdef JS_CODEGEN_X64
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, base, noIndex, 0, reg);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, base, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int32_t offset, RegisterID base, RegisterID index,
+ int scale, int reg) {
+ MOZ_ASSERT(index != noIndex);
+
+#ifdef JS_CODEGEN_X64
+ if (!offset && (base != noBase) && (base != noBase2)) {
+#else
+ if (!offset && (base != noBase)) {
+#endif
+ putModRmSib(ModRmMemoryNoDisp, base, index, scale, reg);
+ } else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, base, index, scale, reg);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, base, index, scale, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM_disp32(int32_t offset, RegisterID index, int scale,
+ int reg) {
+ MOZ_ASSERT(index != noIndex);
+
+ // NB: the base-less memoryModRM overloads generate different code
+ // then the base-full memoryModRM overloads in the base == noBase
+ // case. The base-less overloads assume that the desired effective
+ // address is:
+ //
+ // reg := [scaled index] + disp32
+ //
+ // which means the mod needs to be ModRmMemoryNoDisp. The base-full
+ // overloads pass ModRmMemoryDisp32 in all cases and thus, when
+ // base == noBase (== ebp), the effective address is:
+ //
+ // reg := [scaled index] + disp32 + [ebp]
+ //
+ // See Intel developer manual, Vol 2, 2.1.5, Table 2-3.
+ putModRmSib(ModRmMemoryNoDisp, noBase, index, scale, reg);
+ m_buffer.putIntUnchecked(offset);
+ }
+
+ void memoryModRM_disp32(const void* address, int reg) {
+ int32_t disp = AddressImmediate(address);
+
+#ifdef JS_CODEGEN_X64
+ // On x64-64, non-RIP-relative absolute mode requires a SIB.
+ putModRmSib(ModRmMemoryNoDisp, noBase, noIndex, 0, reg);
+#else
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, noBase, reg);
+#endif
+ m_buffer.putIntUnchecked(disp);
+ }
+
+ void memoryModRM(const void* address, int reg) {
+ memoryModRM_disp32(address, reg);
+ }
+
+ void threeOpVex(VexOperandType p, int r, int x, int b, int m, int w, int v,
+ int l, int opcode) {
+ m_buffer.ensureSpace(MaxInstructionSize);
+
+ if (v == invalid_xmm) {
+ v = XMMRegisterID(0);
+ }
+
+ if (x == 0 && b == 0 && m == 1 && w == 0) {
+ // Two byte VEX.
+ m_buffer.putByteUnchecked(PRE_VEX_C5);
+ m_buffer.putByteUnchecked(((r << 7) | (v << 3) | (l << 2) | p) ^ 0xf8);
+ } else {
+ // Three byte VEX.
+ m_buffer.putByteUnchecked(PRE_VEX_C4);
+ m_buffer.putByteUnchecked(((r << 7) | (x << 6) | (b << 5) | m) ^ 0xe0);
+ m_buffer.putByteUnchecked(((w << 7) | (v << 3) | (l << 2) | p) ^ 0x78);
+ }
+
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ bool useVEX_;
+};
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_BaseAssembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
new file mode 100644
index 0000000000..3b1730599f
--- /dev/null
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -0,0 +1,3883 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/ReciprocalMulConstants.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/DifferentialTesting.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Abs;
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+using JS::GenericNaN;
+
+namespace js {
+namespace jit {
+
+CodeGeneratorX86Shared::CodeGeneratorX86Shared(MIRGenerator* gen,
+ LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorX86Shared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorX86Shared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void OutOfLineBailout::accept(CodeGeneratorX86Shared* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+void CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond,
+ MBasicBlock* mirTrue,
+ MBasicBlock* mirFalse,
+ Assembler::NaNCond ifNaN) {
+ if (ifNaN == Assembler::NaN_IsFalse) {
+ jumpToBlock(mirFalse, Assembler::Parity);
+ } else if (ifNaN == Assembler::NaN_IsTrue) {
+ jumpToBlock(mirTrue, Assembler::Parity);
+ }
+
+ if (isNextBlock(mirFalse->lir())) {
+ jumpToBlock(mirTrue, cond);
+ } else {
+ jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ Register input = ToRegister(test->input());
+ masm.test32(input, input);
+ emitBranch(Assembler::NonZero, test->ifTrue(), test->ifFalse());
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ const LAllocation* opd = test->input();
+
+ // vucomisd flags:
+ // Z P C
+ // ---------
+ // NaN 1 1 1
+ // > 0 0 0
+ // < 0 0 1
+ // = 1 0 0
+ //
+ // NaN is falsey, so comparing against 0 and then using the Z flag is
+ // enough to determine which branch to take.
+ ScratchDoubleScope scratch(masm);
+ masm.zeroDouble(scratch);
+ masm.vucomisd(scratch, ToFloatRegister(opd));
+ emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ const LAllocation* opd = test->input();
+ // vucomiss flags are the same as doubles; see comment above
+ {
+ ScratchFloat32Scope scratch(masm);
+ masm.zeroFloat32(scratch);
+ masm.vucomiss(scratch, ToFloatRegister(opd));
+ }
+ emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
+}
+
+void CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type,
+ const LAllocation* left,
+ const LAllocation* right) {
+#ifdef JS_CODEGEN_X64
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
+ masm.cmpPtr(ToRegister(left), Imm32(ToInt32(right)));
+ } else {
+ masm.cmpPtr(ToRegister(left), ToOperand(right));
+ }
+ return;
+ }
+#endif
+
+ if (right->isConstant()) {
+ masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
+ } else {
+ masm.cmp32(ToRegister(left), ToOperand(right));
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ emitCompare(mir->compareType(), comp->left(), comp->right());
+ masm.emitSet(JSOpToCondition(mir->compareType(), comp->jsop()),
+ ToRegister(comp->output()));
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ MCompare* mir = comp->cmpMir();
+ emitCompare(mir->compareType(), comp->left(), comp->right());
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ emitBranch(cond, comp->ifTrue(), comp->ifFalse());
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->mir()->operandsAreNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ masm.compareDouble(cond, lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
+ ToRegister(comp->output()), nanCond);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->mir()->operandsAreNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ masm.compareFloat(cond, lhs, rhs);
+ masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
+ ToRegister(comp->output()), nanCond);
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32(ToRegister(ins->input()), Imm32(0));
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ FloatRegister opd = ToFloatRegister(ins->input());
+
+ // Not returns true if the input is a NaN. We don't have to worry about
+ // it if we know the input is never NaN though.
+ Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
+ if (ins->mir()->operandIsNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ ScratchDoubleScope scratch(masm);
+ masm.zeroDouble(scratch);
+ masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, scratch);
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ FloatRegister opd = ToFloatRegister(ins->input());
+
+ // Not returns true if the input is a NaN. We don't have to worry about
+ // it if we know the input is never NaN though.
+ Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
+ if (ins->mir()->operandIsNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ ScratchFloat32Scope scratch(masm);
+ masm.zeroFloat32(scratch);
+ masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, scratch);
+ masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->cmpMir()->operandsAreNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ masm.compareDouble(cond, lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
+ comp->ifFalse(), nanCond);
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+
+ Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
+ if (comp->cmpMir()->operandsAreNeverNaN()) {
+ nanCond = Assembler::NaN_HandledByCond;
+ }
+
+ masm.compareFloat(cond, lhs, rhs);
+ emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
+ comp->ifFalse(), nanCond);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
+ } else if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), dst);
+ } else {
+ switch (mir->input()->type()) {
+ case MIRType::Double:
+ masm.storeDouble(ToFloatRegister(ins->arg()), dst);
+ return;
+ case MIRType::Float32:
+ masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
+ return;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ masm.storeUnalignedSimd128(ToFloatRegister(ins->arg()), dst);
+ return;
+#endif
+ default:
+ break;
+ }
+ MOZ_CRASH("unexpected mir type in WasmStackArg");
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ Operand falseExpr = ToOperand(ins->falseExpr());
+
+ masm.test32(cond, cond);
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (mirType == MIRType::Int32) {
+ masm.cmovz32(falseExpr, out);
+ } else {
+ masm.cmovzPtr(falseExpr, out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ Label done;
+ masm.j(Assembler::NonZero, &done);
+
+ if (mirType == MIRType::Float32) {
+ if (falseExpr.kind() == Operand::FPREG) {
+ masm.moveFloat32(ToFloatRegister(ins->falseExpr()), out);
+ } else {
+ masm.loadFloat32(falseExpr, out);
+ }
+ } else if (mirType == MIRType::Double) {
+ if (falseExpr.kind() == Operand::FPREG) {
+ masm.moveDouble(ToFloatRegister(ins->falseExpr()), out);
+ } else {
+ masm.loadDouble(falseExpr, out);
+ }
+ } else if (mirType == MIRType::Simd128) {
+ if (falseExpr.kind() == Operand::FPREG) {
+ masm.moveSimd128(ToFloatRegister(ins->falseExpr()), out);
+ } else {
+ masm.loadUnalignedSimd128(falseExpr, out);
+ }
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+#ifdef DEBUG
+ MIRType from = ins->input()->type();
+#endif
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.vmovd(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.vmovd(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+ AnyRegister out = ToAnyRegister(ins->output());
+
+ Scalar::Type accessType = mir->accessType();
+
+ OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+ if (mir->needsBoundsCheck()) {
+ ool = new (alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+ addOutOfLineCode(ool, mir);
+
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ToRegister(ptr),
+ ToRegister(boundsCheckLimit), ool->entry());
+ }
+
+ Operand srcAddr = toMemoryAccessOperand(ins, 0);
+ masm.wasmLoad(mir->access(), srcAddr, out);
+
+ if (ool) {
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(
+ OutOfLineLoadTypedArrayOutOfBounds* ool) {
+ switch (ool->viewType()) {
+ case Scalar::Int64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::Simd128:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ case Scalar::Float32:
+ masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
+ break;
+ case Scalar::Float64:
+ masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ Register destReg = ool->dest().gpr();
+ masm.mov(ImmWord(0), destReg);
+ break;
+ }
+ masm.jmp(ool->rejoin());
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* value = ins->value();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Scalar::Type accessType = mir->accessType();
+ canonicalizeIfDeterministic(accessType, value);
+
+ Label rejoin;
+ if (mir->needsBoundsCheck()) {
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ToRegister(ptr),
+ ToRegister(boundsCheckLimit), &rejoin);
+ }
+
+ Operand dstAddr = toMemoryAccessOperand(ins, 0);
+ masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
+
+ if (rejoin.used()) {
+ masm.bind(&rejoin);
+ }
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ if (base != out) {
+ masm.move32(base, out);
+ }
+ masm.add32(Imm32(mir->offset()), out);
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.j(Assembler::CarrySet, ool->entry());
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ if (base != out) {
+ masm.move64(base, out);
+ }
+ masm.add64(Imm64(mir->offset()), out);
+ OutOfLineAbortingWasmTrap* ool = new (alloc())
+ OutOfLineAbortingWasmTrap(mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
+ addOutOfLineCode(ool, mir);
+ masm.j(Assembler::CarrySet, ool->entry());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType inputType = mir->input()->type();
+
+ MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (inputType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (inputType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ if (mir->isSaturating()) {
+ masm.bind(ool->rejoin());
+ }
+ return;
+ }
+
+ if (inputType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (inputType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+bool CodeGeneratorX86Shared::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ masm.push(Imm32(frameSize()));
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+class BailoutJump {
+ Assembler::Condition cond_;
+
+ public:
+ explicit BailoutJump(Assembler::Condition cond) : cond_(cond) {}
+#ifdef JS_CODEGEN_X86
+ void operator()(MacroAssembler& masm, uint8_t* code) const {
+ masm.j(cond_, ImmPtr(code), RelocationKind::HARDCODED);
+ }
+#endif
+ void operator()(MacroAssembler& masm, Label* label) const {
+ masm.j(cond_, label);
+ }
+};
+
+class BailoutLabel {
+ Label* label_;
+
+ public:
+ explicit BailoutLabel(Label* label) : label_(label) {}
+#ifdef JS_CODEGEN_X86
+ void operator()(MacroAssembler& masm, uint8_t* code) const {
+ masm.retarget(label_, ImmPtr(code), RelocationKind::HARDCODED);
+ }
+#endif
+ void operator()(MacroAssembler& masm, Label* label) const {
+ masm.retarget(label_, label);
+ }
+};
+
+template <typename T>
+void CodeGeneratorX86Shared::bailout(const T& binder, LSnapshot* snapshot) {
+ encode(snapshot);
+
+ // All bailout code is associated with the bytecodeSite of the block we are
+ // bailing out from.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ binder(masm, ool->entry());
+}
+
+void CodeGeneratorX86Shared::bailoutIf(Assembler::Condition condition,
+ LSnapshot* snapshot) {
+ bailout(BailoutJump(condition), snapshot);
+}
+
+void CodeGeneratorX86Shared::bailoutIf(Assembler::DoubleCondition condition,
+ LSnapshot* snapshot) {
+ MOZ_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) ==
+ Assembler::NaN_HandledByCond);
+ bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot);
+}
+
+void CodeGeneratorX86Shared::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used() && !label->bound());
+ bailout(BailoutLabel(label), snapshot);
+}
+
+void CodeGeneratorX86Shared::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void CodeGeneratorX86Shared::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ masm.push(Imm32(ool->snapshot()->snapshotOffset()));
+ masm.jmp(&deoptLabel_);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+#ifdef DEBUG
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(first == output);
+#endif
+
+ bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, handleNaN);
+ } else {
+ masm.minDouble(second, first, handleNaN);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+#ifdef DEBUG
+ FloatRegister output = ToFloatRegister(ins->output());
+ MOZ_ASSERT(first == output);
+#endif
+
+ bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, handleNaN);
+ } else {
+ masm.minFloat32(second, first, handleNaN);
+ }
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ bool knownNotZero = ins->mir()->operandIsNeverZero();
+
+ masm.clz32(input, output, knownNotZero);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ bool knownNotZero = ins->mir()->operandIsNeverZero();
+
+ masm.ctz32(input, output, knownNotZero);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register temp =
+ ins->temp0()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, temp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ ScratchDoubleScope scratch(masm);
+
+ Label done, sqrt;
+
+ if (!ins->mir()->operandIsNeverNegativeInfinity()) {
+ // Branch if not -Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
+
+ Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
+ if (ins->mir()->operandIsNeverNaN()) {
+ cond = Assembler::DoubleNotEqual;
+ }
+ masm.branchDouble(cond, input, scratch, &sqrt);
+
+ // Math.pow(-Infinity, 0.5) == Infinity.
+ masm.zeroDouble(output);
+ masm.subDouble(scratch, output);
+ masm.jump(&done);
+
+ masm.bind(&sqrt);
+ }
+
+ if (!ins->mir()->operandIsNeverNegativeZero()) {
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.zeroDouble(scratch);
+ masm.addDouble(input, scratch);
+ masm.vsqrtsd(scratch, output, output);
+ } else {
+ masm.vsqrtsd(input, output, output);
+ }
+
+ masm.bind(&done);
+}
+
+class OutOfLineUndoALUOperation
+ : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineUndoALUOperation(LInstruction* ins) : ins_(ins) {}
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitOutOfLineUndoALUOperation(this);
+ }
+ LInstruction* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ if (ins->rhs()->isConstant()) {
+ masm.addl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
+ } else {
+ masm.addl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
+ }
+
+ if (ins->snapshot()) {
+ if (ins->recoversInput()) {
+ OutOfLineUndoALUOperation* ool =
+ new (alloc()) OutOfLineUndoALUOperation(ins);
+ addOutOfLineCode(ool, ins->mir());
+ masm.j(Assembler::Overflow, ool->entry());
+ } else {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ }
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ if (ins->rhs()->isConstant()) {
+ masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
+ } else {
+ masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
+ }
+
+ if (ins->snapshot()) {
+ if (ins->recoversInput()) {
+ OutOfLineUndoALUOperation* ool =
+ new (alloc()) OutOfLineUndoALUOperation(ins);
+ addOutOfLineCode(ool, ins->mir());
+ masm.j(Assembler::Overflow, ool->entry());
+ } else {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ }
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(
+ OutOfLineUndoALUOperation* ool) {
+ LInstruction* ins = ool->ins();
+ Register reg = ToRegister(ins->getDef(0));
+
+ DebugOnly<LAllocation*> lhs = ins->getOperand(0);
+ LAllocation* rhs = ins->getOperand(1);
+
+ MOZ_ASSERT(reg == ToRegister(lhs));
+ MOZ_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));
+
+ // Undo the effect of the ALU operation, which was performed on the output
+ // register and overflowed. Writing to the output register clobbered an
+ // input reg, and the original value of the input needs to be recovered
+ // to satisfy the constraint imposed by any RECOVERED_INPUT operands to
+ // the bailout snapshot.
+
+ if (rhs->isConstant()) {
+ Imm32 constant(ToInt32(rhs));
+ if (ins->isAddI()) {
+ masm.subl(constant, reg);
+ } else {
+ masm.addl(constant, reg);
+ }
+ } else {
+ if (ins->isAddI()) {
+ masm.subl(ToOperand(rhs), reg);
+ } else {
+ masm.addl(ToOperand(rhs), reg);
+ }
+ }
+
+ bailout(ool->ins()->snapshot());
+}
+
+class MulNegativeZeroCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ LMulI* ins_;
+
+ public:
+ explicit MulNegativeZeroCheck(LMulI* ins) : ins_(ins) {}
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitMulNegativeZeroCheck(this);
+ }
+ LMulI* ins() const { return ins_; }
+};
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ MMul* mul = ins->mir();
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ // Bailout on -0.0
+ int32_t constant = ToInt32(rhs);
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition bailoutCond =
+ (constant == 0) ? Assembler::Signed : Assembler::Equal;
+ masm.test32(ToRegister(lhs), ToRegister(lhs));
+ bailoutIf(bailoutCond, ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ masm.negl(ToOperand(lhs));
+ break;
+ case 0:
+ masm.xorl(ToOperand(lhs), ToRegister(lhs));
+ return; // escape overflow check;
+ case 1:
+ // nop
+ return; // escape overflow check;
+ case 2:
+ masm.addl(ToOperand(lhs), ToRegister(lhs));
+ break;
+ default:
+ if (!mul->canOverflow() && constant > 0) {
+ // Use shift if cannot overflow and constant is power of 2
+ int32_t shift = FloorLog2(constant);
+ if ((1 << shift) == constant) {
+ masm.shll(Imm32(shift), ToRegister(lhs));
+ return;
+ }
+ }
+ masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs));
+ }
+
+ // Bailout on overflow
+ if (mul->canOverflow()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+ } else {
+ masm.imull(ToOperand(rhs), ToRegister(lhs));
+
+ // Bailout on overflow
+ if (mul->canOverflow()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ }
+
+ if (mul->canBeNegativeZero()) {
+ // Jump to an OOL path if the result is 0.
+ MulNegativeZeroCheck* ool = new (alloc()) MulNegativeZeroCheck(ins);
+ addOutOfLineCode(ool, mul);
+
+ masm.test32(ToRegister(lhs), ToRegister(lhs));
+ masm.j(Assembler::Zero, ool->entry());
+ masm.bind(ool->rejoin());
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+class ReturnZero : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ Register reg_;
+
+ public:
+ explicit ReturnZero(Register reg) : reg_(reg) {}
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitReturnZero(this);
+ }
+ Register reg() const { return reg_; }
+};
+
+void CodeGeneratorX86Shared::visitReturnZero(ReturnZero* ool) {
+ masm.mov(ImmWord(0), ool->reg());
+ masm.jmp(ool->rejoin());
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT_IF(output == eax, ToRegister(ins->remainder()) == edx);
+
+ ReturnZero* ool = nullptr;
+
+ // Put the lhs in eax.
+ if (lhs != eax) {
+ masm.mov(lhs, eax);
+ }
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.j(Assembler::NonZero, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ ool = new (alloc()) ReturnZero(output);
+ masm.j(Assembler::Zero, ool->entry());
+ }
+ } else {
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ // Zero extend the lhs into edx to make (edx:eax), since udiv is 64-bit.
+ masm.mov(ImmWord(0), edx);
+ masm.udiv(rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv() && !ins->mir()->toDiv()->canTruncateRemainder()) {
+ Register remainder = ToRegister(ins->remainder());
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ // Unsigned div or mod can return a value that's not a signed int32.
+ // If our users aren't expecting that, bail.
+ if (!ins->mir()->isTruncated()) {
+ masm.test32(output, output);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+
+ if (ool) {
+ addOutOfLineCode(ool, ins->mir());
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitUDivOrModConstant(LUDivOrModConstant* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ uint32_t d = ins->denominator();
+
+ // This emits the division answer into edx or the modulus answer into eax.
+ MOZ_ASSERT(output == eax || output == edx);
+ MOZ_ASSERT(lhs != eax && lhs != edx);
+ bool isDiv = (output == edx);
+
+ if (d == 0) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ } else {
+ masm.xorl(output, output);
+ }
+ } else {
+ bailout(ins->snapshot());
+ }
+ return;
+ }
+
+ // The denominator isn't a power of 2 (see LDivPowTwoI and LModPowTwoI).
+ MOZ_ASSERT((d & (d - 1)) != 0);
+
+ auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(d);
+
+ // We first compute (M * n) >> 32, where M = rmc.multiplier.
+ masm.movl(Imm32(rmc.multiplier), eax);
+ masm.umull(lhs);
+ if (rmc.multiplier > UINT32_MAX) {
+ // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
+ // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
+ // contradicting the proof of correctness in computeDivisionConstants.
+ MOZ_ASSERT(rmc.shiftAmount > 0);
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
+
+ // We actually computed edx = ((uint32_t(M) * n) >> 32) instead. Since
+ // (M * n) >> (32 + shift) is the same as (edx + n) >> shift, we can
+ // correct for the overflow. This case is a bit trickier than the signed
+ // case, though, as the (edx + n) addition itself can overflow; however,
+ // note that (edx + n) >> shift == (((n - edx) >> 1) + edx) >> (shift - 1),
+ // which is overflow-free. See Hacker's Delight, section 10-8 for details.
+
+ // Compute (n - edx) >> 1 into eax.
+ masm.movl(lhs, eax);
+ masm.subl(edx, eax);
+ masm.shrl(Imm32(1), eax);
+
+ // Finish the computation.
+ masm.addl(eax, edx);
+ masm.shrl(Imm32(rmc.shiftAmount - 1), edx);
+ } else {
+ masm.shrl(Imm32(rmc.shiftAmount), edx);
+ }
+
+ // We now have the truncated division value in edx. If we're
+ // computing a modulus or checking whether the division resulted
+ // in an integer, we need to multiply the obtained value by d and
+ // finish the computation/check.
+ if (!isDiv) {
+ masm.imull(Imm32(d), edx, edx);
+ masm.movl(lhs, eax);
+ masm.subl(edx, eax);
+
+ // The final result of the modulus op, just computed above by the
+ // sub instruction, can be a number in the range [2^31, 2^32). If
+ // this is the case and the modulus is not truncated, we must bail
+ // out.
+ if (!ins->mir()->isTruncated()) {
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ } else if (!ins->mir()->isTruncated()) {
+ masm.imull(Imm32(d), edx, eax);
+ masm.cmpl(lhs, eax);
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+ }
+}
+
+void CodeGeneratorX86Shared::visitMulNegativeZeroCheck(
+ MulNegativeZeroCheck* ool) {
+ LMulI* ins = ool->ins();
+ Register result = ToRegister(ins->output());
+ Operand lhsCopy = ToOperand(ins->lhsCopy());
+ Operand rhs = ToOperand(ins->rhs());
+ MOZ_ASSERT_IF(lhsCopy.kind() == Operand::REG, lhsCopy.reg() != result.code());
+
+ // Result is -0 if lhs or rhs is negative.
+ masm.movl(lhsCopy, result);
+ masm.orl(rhs, result);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+
+ masm.mov(ImmWord(0), result);
+ masm.jmp(ool->rejoin());
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ DebugOnly<Register> output = ToRegister(ins->output());
+
+ int32_t shift = ins->shift();
+ bool negativeDivisor = ins->negativeDivisor();
+ MDiv* mir = ins->mir();
+
+ // We use defineReuseInput so these should always be the same, which is
+ // convenient since all of our instructions here are two-address.
+ MOZ_ASSERT(lhs == output);
+
+ if (!mir->isTruncated() && negativeDivisor) {
+ // 0 divided by a negative number must return a double.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+
+ if (shift) {
+ if (!mir->isTruncated()) {
+ // If the remainder is != 0, bailout since this must be a double.
+ masm.test32(lhs, Imm32(UINT32_MAX >> (32 - shift)));
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ if (mir->isUnsigned()) {
+ masm.shrl(Imm32(shift), lhs);
+ } else {
+ // Adjust the value so that shifting produces a correctly
+ // rounded result when the numerator is negative. See 10-1
+ // "Signed Division by a Known Power of 2" in Henry
+ // S. Warren, Jr.'s Hacker's Delight.
+ if (mir->canBeNegativeDividend() && mir->isTruncated()) {
+ // Note: There is no need to execute this code, which handles how to
+ // round the signed integer division towards 0, if we previously bailed
+ // due to a non-zero remainder.
+ Register lhsCopy = ToRegister(ins->numeratorCopy());
+ MOZ_ASSERT(lhsCopy != lhs);
+ if (shift > 1) {
+ // Copy the sign bit of the numerator. (= (2^32 - 1) or 0)
+ masm.sarl(Imm32(31), lhs);
+ }
+ // Divide by 2^(32 - shift)
+ // i.e. (= (2^32 - 1) / 2^(32 - shift) or 0)
+ // i.e. (= (2^shift - 1) or 0)
+ masm.shrl(Imm32(32 - shift), lhs);
+ // If signed, make any 1 bit below the shifted bits to bubble up, such
+ // that once shifted the value would be rounded towards 0.
+ masm.addl(lhsCopy, lhs);
+ }
+ masm.sarl(Imm32(shift), lhs);
+
+ if (negativeDivisor) {
+ masm.negl(lhs);
+ }
+ }
+ return;
+ }
+
+ if (negativeDivisor) {
+ // INT32_MIN / -1 overflows.
+ masm.negl(lhs);
+ if (!mir->isTruncated()) {
+ bailoutIf(Assembler::Overflow, ins->snapshot());
+ } else if (mir->trapOnError()) {
+ Label ok;
+ masm.j(Assembler::NoOverflow, &ok);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ }
+ } else if (mir->isUnsigned() && !mir->isTruncated()) {
+ // Unsigned division by 1 can overflow if output is not
+ // truncated.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+}
+
+void CodeGenerator::visitDivOrModConstantI(LDivOrModConstantI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register output = ToRegister(ins->output());
+ int32_t d = ins->denominator();
+
+ // This emits the division answer into edx or the modulus answer into eax.
+ MOZ_ASSERT(output == eax || output == edx);
+ MOZ_ASSERT(lhs != eax && lhs != edx);
+ bool isDiv = (output == edx);
+
+ // The absolute value of the denominator isn't a power of 2 (see LDivPowTwoI
+ // and LModPowTwoI).
+ MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
+
+ // We will first divide by Abs(d), and negate the answer if d is negative.
+ // If desired, this can be avoided by generalizing computeDivisionConstants.
+ auto rmc = ReciprocalMulConstants::computeSignedDivisionConstants(Abs(d));
+
+ // We first compute (M * n) >> 32, where M = rmc.multiplier.
+ masm.movl(Imm32(rmc.multiplier), eax);
+ masm.imull(lhs);
+ if (rmc.multiplier > INT32_MAX) {
+ MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
+
+ // We actually computed edx = ((int32_t(M) * n) >> 32) instead. Since
+ // (M * n) >> 32 is the same as (edx + n), we can correct for the overflow.
+ // (edx + n) can't overflow, as n and edx have opposite signs because
+ // int32_t(M) is negative.
+ masm.addl(lhs, edx);
+ }
+ // (M * n) >> (32 + shift) is the truncated division answer if n is
+ // non-negative, as proved in the comments of computeDivisionConstants. We
+ // must add 1 later if n is negative to get the right answer in all cases.
+ masm.sarl(Imm32(rmc.shiftAmount), edx);
+
+ // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
+ // computed with just a sign-extending shift of 31 bits.
+ if (ins->canBeNegativeDividend()) {
+ masm.movl(lhs, eax);
+ masm.sarl(Imm32(31), eax);
+ masm.subl(eax, edx);
+ }
+
+ // After this, edx contains the correct truncated division result.
+ if (d < 0) {
+ masm.negl(edx);
+ }
+
+ if (!isDiv) {
+ masm.imull(Imm32(-d), edx, eax);
+ masm.addl(lhs, eax);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ if (isDiv) {
+ // This is a division op. Multiply the obtained value by d to check if
+ // the correct answer is an integer. This cannot overflow, since |d| > 1.
+ masm.imull(Imm32(d), edx, eax);
+ masm.cmp32(lhs, eax);
+ bailoutIf(Assembler::NotEqual, ins->snapshot());
+
+ // If lhs is zero and the divisor is negative, the answer should have
+ // been -0.
+ if (d < 0) {
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ } else if (ins->canBeNegativeDividend()) {
+ // This is a mod op. If the computed value is zero and lhs
+ // is negative, the answer should have been -0.
+ Label done;
+
+ masm.cmp32(lhs, Imm32(0));
+ masm.j(Assembler::GreaterThanOrEqual, &done);
+
+ masm.test32(eax, eax);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ Register remainder = ToRegister(ins->remainder());
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ MDiv* mir = ins->mir();
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT(remainder == edx);
+ MOZ_ASSERT(output == eax);
+
+ Label done;
+ ReturnZero* ool = nullptr;
+
+ // Put the lhs in eax, for either the negative overflow case or the regular
+ // divide case.
+ if (lhs != eax) {
+ masm.mov(lhs, eax);
+ }
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.j(Assembler::NonZero, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ if (!ool) {
+ ool = new (alloc()) ReturnZero(output);
+ }
+ masm.j(Assembler::Zero, ool->entry());
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.cmp32(lhs, Imm32(INT32_MIN));
+ masm.j(Assembler::NotEqual, &notOverflow);
+ masm.cmp32(rhs, Imm32(-1));
+ if (mir->trapOnError()) {
+ masm.j(Assembler::NotEqual, &notOverflow);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in the
+ // output register (lhs == eax).
+ masm.j(Assembler::Equal, &done);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutIf(Assembler::Equal, ins->snapshot());
+ }
+ masm.bind(&notOverflow);
+ }
+
+ // Handle negative 0.
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.test32(lhs, lhs);
+ masm.j(Assembler::NonZero, &nonzero);
+ masm.cmp32(rhs, Imm32(0));
+ bailoutIf(Assembler::LessThan, ins->snapshot());
+ masm.bind(&nonzero);
+ }
+
+ // Sign extend the lhs into edx to make (edx:eax), since idiv is 64-bit.
+ if (lhs != eax) {
+ masm.mov(lhs, eax);
+ }
+ masm.cdq();
+ masm.idiv(rhs);
+
+ if (!mir->canTruncateRemainder()) {
+ // If the remainder is > 0, bailout since this must be a double.
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::NonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+
+ if (ool) {
+ addOutOfLineCode(ool, mir);
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register lhs = ToRegister(ins->getOperand(0));
+ int32_t shift = ins->shift();
+
+ Label negative;
+
+ if (!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend()) {
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
+ }
+
+ masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
+
+ if (!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend()) {
+ Label done;
+ masm.jump(&done);
+
+ // Negative numbers need a negate, bitmask, negate
+ masm.bind(&negative);
+
+ // Unlike in the visitModI case, we are not computing the mod by means of a
+ // division. Therefore, the divisor = -1 case isn't problematic (the andl
+ // always returns 0, which is what we expect).
+ //
+ // The negl instruction overflows if lhs == INT32_MIN, but this is also not
+ // a problem: shift is at most 31, and so the andl also always returns 0.
+ masm.negl(lhs);
+ masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
+ masm.negl(lhs);
+
+ // Since a%b has the same sign as b, and a is negative in this branch,
+ // an answer of 0 means the correct result is actually -0. Bail out.
+ if (!ins->mir()->isTruncated()) {
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ masm.bind(&done);
+ }
+}
+
+class ModOverflowCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ Label done_;
+ LModI* ins_;
+ Register rhs_;
+
+ public:
+ explicit ModOverflowCheck(LModI* ins, Register rhs) : ins_(ins), rhs_(rhs) {}
+
+ virtual void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitModOverflowCheck(this);
+ }
+ Label* done() { return &done_; }
+ LModI* ins() const { return ins_; }
+ Register rhs() const { return rhs_; }
+};
+
+void CodeGeneratorX86Shared::visitModOverflowCheck(ModOverflowCheck* ool) {
+ masm.cmp32(ool->rhs(), Imm32(-1));
+ if (ool->ins()->mir()->isTruncated()) {
+ masm.j(Assembler::NotEqual, ool->rejoin());
+ masm.mov(ImmWord(0), edx);
+ masm.jmp(ool->done());
+ } else {
+ bailoutIf(Assembler::Equal, ool->ins()->snapshot());
+ masm.jmp(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ Register remainder = ToRegister(ins->remainder());
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+
+ // Required to use idiv.
+ MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
+ MOZ_ASSERT(rhs != edx);
+ MOZ_ASSERT(remainder == edx);
+ MOZ_ASSERT(ToRegister(ins->getTemp(0)) == eax);
+
+ Label done;
+ ReturnZero* ool = nullptr;
+ ModOverflowCheck* overflow = nullptr;
+
+ // Set up eax in preparation for doing a div.
+ if (lhs != eax) {
+ masm.mov(lhs, eax);
+ }
+
+ MMod* mir = ins->mir();
+
+ // Prevent divide by zero.
+ if (mir->canBeDivideByZero()) {
+ masm.test32(rhs, rhs);
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.j(Assembler::NonZero, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ if (!ool) {
+ ool = new (alloc()) ReturnZero(edx);
+ }
+ masm.j(Assembler::Zero, ool->entry());
+ }
+ } else {
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ Label negative;
+
+ // Switch based on sign of the lhs.
+ if (mir->canBeNegativeDividend()) {
+ masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
+ }
+
+ // If lhs >= 0 then remainder = lhs % rhs. The remainder must be positive.
+ {
+ // Check if rhs is a power-of-two.
+ if (mir->canBePowerOfTwoDivisor()) {
+ MOZ_ASSERT(rhs != remainder);
+
+ // Rhs y is a power-of-two if (y & (y-1)) == 0. Note that if
+ // y is any negative number other than INT32_MIN, both y and
+ // y-1 will have the sign bit set so these are never optimized
+ // as powers-of-two. If y is INT32_MIN, y-1 will be INT32_MAX
+ // and because lhs >= 0 at this point, lhs & INT32_MAX returns
+ // the correct value.
+ Label notPowerOfTwo;
+ masm.mov(rhs, remainder);
+ masm.subl(Imm32(1), remainder);
+ masm.branchTest32(Assembler::NonZero, remainder, rhs, &notPowerOfTwo);
+ {
+ masm.andl(lhs, remainder);
+ masm.jmp(&done);
+ }
+ masm.bind(&notPowerOfTwo);
+ }
+
+ // Since lhs >= 0, the sign-extension will be 0
+ masm.mov(ImmWord(0), edx);
+ masm.idiv(rhs);
+ }
+
+ // Otherwise, we have to beware of two special cases:
+ if (mir->canBeNegativeDividend()) {
+ masm.jump(&done);
+
+ masm.bind(&negative);
+
+ // Prevent an integer overflow exception from -2147483648 % -1
+ masm.cmp32(lhs, Imm32(INT32_MIN));
+ overflow = new (alloc()) ModOverflowCheck(ins, rhs);
+ masm.j(Assembler::Equal, overflow->entry());
+ masm.bind(overflow->rejoin());
+ masm.cdq();
+ masm.idiv(rhs);
+
+ if (!mir->isTruncated()) {
+ // A remainder of 0 means that the rval must be -0, which is a double.
+ masm.test32(remainder, remainder);
+ bailoutIf(Assembler::Zero, ins->snapshot());
+ }
+ }
+
+ masm.bind(&done);
+
+ if (overflow) {
+ addOutOfLineCode(overflow, mir);
+ masm.bind(overflow->done());
+ }
+
+ if (ool) {
+ addOutOfLineCode(ool, mir);
+ masm.bind(ool->rejoin());
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.notl(ToOperand(input));
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.orl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ } else {
+ masm.orl(ToOperand(rhs), ToRegister(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.xorl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ } else {
+ masm.xorl(ToOperand(rhs), ToRegister(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.andl(Imm32(ToInt32(rhs)), ToOperand(lhs));
+ } else {
+ masm.andl(ToOperand(rhs), ToRegister(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift32(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift32Arithmetic(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift32(Imm32(shift), lhs);
+ } else if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ Register shift = ToRegister(rhs);
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift32(shift, lhs);
+ break;
+ case JSOp::Rsh:
+ masm.rshift32Arithmetic(shift, lhs);
+ break;
+ case JSOp::Ursh:
+ masm.rshift32(shift, lhs);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ masm.test32(lhs, lhs);
+ bailoutIf(Assembler::Signed, ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ Register shift = ToRegister(rhs);
+#ifdef JS_CODEGEN_X86
+ MOZ_ASSERT(shift == ecx);
+#endif
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(shift, ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(shift, ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(shift, ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ MOZ_ASSERT(ToRegister(ins->temp()) == lhs);
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ if (shift) {
+ masm.shrl(Imm32(shift), lhs);
+ }
+ } else {
+ Register shift = ToRegister(rhs);
+ masm.rshift32(shift, lhs);
+ }
+
+ masm.convertUInt32ToDouble(lhs, out);
+}
+
+Operand CodeGeneratorX86Shared::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorX86Shared::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorX86Shared::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+MoveOperand CodeGeneratorX86Shared::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ return MoveOperand(ToAddress(a), kind);
+}
+
+class OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorX86Shared::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subl(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.cmp32(index, Imm32(cases));
+ masm.j(AssemblerX86Shared::AboveOrEqual, defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.mov(ool->jumpLabel(), base);
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister lhs = ToFloatRegister(math->lhs());
+ Operand rhs = ToOperand(math->rhs());
+ FloatRegister output = ToFloatRegister(math->output());
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.vaddsd(rhs, lhs, output);
+ break;
+ case JSOp::Sub:
+ masm.vsubsd(rhs, lhs, output);
+ break;
+ case JSOp::Mul:
+ masm.vmulsd(rhs, lhs, output);
+ break;
+ case JSOp::Div:
+ masm.vdivsd(rhs, lhs, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister lhs = ToFloatRegister(math->lhs());
+ Operand rhs = ToOperand(math->rhs());
+ FloatRegister output = ToFloatRegister(math->output());
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.vaddss(rhs, lhs, output);
+ break;
+ case JSOp::Sub:
+ masm.vsubss(rhs, lhs, output);
+ break;
+ case JSOp::Mul:
+ masm.vmulss(rhs, lhs, output);
+ break;
+ case JSOp::Div:
+ masm.vdivss(rhs, lhs, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ RoundingMode roundingMode = lir->mir()->roundingMode();
+ masm.nearbyIntDouble(roundingMode, input, output);
+}
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ RoundingMode roundingMode = lir->mir()->roundingMode();
+ masm.nearbyIntFloat32(roundingMode, input, output);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+ masm.leal(Operand(base, index, mir->scale(), mir->displacement()), output);
+}
+
+void CodeGeneratorX86Shared::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the Ion script onto the stack (when we determine what that pointer
+ // is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+ masm.jump(thunk);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ MOZ_ASSERT(input == ToRegister(ins->output()));
+
+ masm.neg32(input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+
+ masm.negateDouble(input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+
+ masm.negateFloat(input);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, temp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
+ output);
+ }
+}
+
+template <typename T>
+static inline void AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op,
+ Scalar::Type arrayType,
+ const LAllocation* value,
+ const T& mem, Register temp1,
+ Register temp2, AnyRegister output) {
+ if (value->isConstant()) {
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(), op,
+ Imm32(ToInt32(value)), mem, temp1, temp2, output);
+ } else {
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(), op,
+ ToRegister(value), mem, temp1, temp2, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register temp1 =
+ lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
+ Register temp2 =
+ lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value,
+ mem, temp1, temp2, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value,
+ mem, temp1, temp2, output);
+ }
+}
+
+template <typename T>
+static inline void AtomicBinopToTypedArray(MacroAssembler& masm,
+ Scalar::Type arrayType, AtomicOp op,
+ const LAllocation* value,
+ const T& mem) {
+ if (value->isConstant()) {
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(), op,
+ Imm32(ToInt32(value)), mem, InvalidReg);
+ } else {
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(), op,
+ ToRegister(value), mem, InvalidReg);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ const LAllocation* value = lir->value();
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value,
+ mem);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value,
+ mem);
+ }
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ if (ins->type() & MembarStoreLoad) {
+ masm.storeLoadFence();
+ }
+}
+
+void CodeGeneratorX86Shared::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ FloatRegister input = ool->input();
+ Register output = ool->output();
+ Register64 output64 = ool->output64();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+ Label* oolRejoin = ool->rejoin();
+ TruncFlags flags = ool->flags();
+ wasm::BytecodeOffset off = ool->bytecodeOffset();
+
+ if (fromType == MIRType::Float32) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF32ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else if (fromType == MIRType::Double) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF64ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+void CodeGeneratorX86Shared::canonicalizeIfDeterministic(
+ Scalar::Type type, const LAllocation* value) {
+#ifdef DEBUG
+ if (!js::SupportDifferentialTesting()) {
+ return;
+ }
+
+ switch (type) {
+ case Scalar::Float32: {
+ FloatRegister in = ToFloatRegister(value);
+ masm.canonicalizeFloatIfDeterministic(in);
+ break;
+ }
+ case Scalar::Float64: {
+ FloatRegister in = ToFloatRegister(value);
+ masm.canonicalizeDoubleIfDeterministic(in);
+ break;
+ }
+ default: {
+ // Other types don't need canonicalization.
+ break;
+ }
+ }
+#endif // DEBUG
+}
+
+template <typename T>
+Operand CodeGeneratorX86Shared::toMemoryAccessOperand(T* lir, int32_t disp) {
+ const LAllocation* ptr = lir->ptr();
+#ifdef JS_CODEGEN_X86
+ const LAllocation* memoryBase = lir->memoryBase();
+ Operand destAddr = ptr->isBogus() ? Operand(ToRegister(memoryBase), disp)
+ : Operand(ToRegister(memoryBase),
+ ToRegister(ptr), TimesOne, disp);
+#else
+ Operand destAddr = ptr->isBogus()
+ ? Operand(HeapReg, disp)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, disp);
+#endif
+ return destAddr;
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* lir) {
+ FloatRegister lhs = ToFloatRegister(lir->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(lir->getOperand(1));
+
+ FloatRegister out = ToFloatRegister(lir->output());
+
+ if (lhs == rhs) {
+ if (lhs != out) {
+ masm.moveFloat32(lhs, out);
+ }
+ return;
+ }
+
+ masm.copySignFloat32(lhs, rhs, out);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* lir) {
+ FloatRegister lhs = ToFloatRegister(lir->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(lir->getOperand(1));
+
+ FloatRegister out = ToFloatRegister(lir->output());
+
+ if (lhs == rhs) {
+ if (lhs != out) {
+ masm.moveDouble(lhs, out);
+ }
+ return;
+ }
+
+ masm.copySignDouble(lhs, rhs, out);
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ Register temp = InvalidReg;
+ if (!AssemblerX86Shared::HasPOPCNT()) {
+ temp = ToRegister(lir->getTemp(0));
+ }
+
+ masm.popcnt64(input, output, temp);
+}
+
+void CodeGenerator::visitSimd128(LSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantSimd128(ins->simd128(), ToFloatRegister(out));
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128Bitselect: {
+ FloatRegister lhsDest = ToFloatRegister(ins->v0());
+ FloatRegister rhs = ToFloatRegister(ins->v1());
+ FloatRegister control = ToFloatRegister(ins->v2());
+ FloatRegister temp = ToFloatRegister(ins->temp());
+ masm.bitwiseSelectSimd128(control, lhsDest, rhs, lhsDest, temp);
+ break;
+ }
+ case wasm::SimdOp::F32x4RelaxedFma:
+ masm.fmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F32x4RelaxedFnma:
+ masm.fnmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F64x2RelaxedFma:
+ masm.fmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::F64x2RelaxedFnma:
+ masm.fnmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ case wasm::SimdOp::I8x16RelaxedLaneSelect:
+ case wasm::SimdOp::I16x8RelaxedLaneSelect:
+ case wasm::SimdOp::I32x4RelaxedLaneSelect:
+ case wasm::SimdOp::I64x2RelaxedLaneSelect: {
+ FloatRegister lhs = ToFloatRegister(ins->v0());
+ FloatRegister rhs = ToFloatRegister(ins->v1());
+ FloatRegister mask = ToFloatRegister(ins->v2());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ masm.laneSelectSimd128(mask, lhs, rhs, dest);
+ break;
+ }
+ case wasm::SimdOp::I32x4DotI8x16I7x16AddS:
+ masm.dotInt8x16Int7x16ThenAdd(ToFloatRegister(ins->v0()),
+ ToFloatRegister(ins->v1()),
+ ToFloatRegister(ins->v2()));
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhsDest());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ FloatRegister temp1 = ToTempFloatRegisterOrInvalid(ins->getTemp(0));
+ FloatRegister temp2 = ToTempFloatRegisterOrInvalid(ins->getTemp(1));
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128And:
+ masm.bitwiseAndSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Or:
+ masm.bitwiseOrSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Xor:
+ masm.bitwiseXorSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128AndNot:
+ // x86/x64 specific: The CPU provides ~A & B. The operands were swapped
+ // during lowering, and we'll compute A & ~B here as desired.
+ masm.bitwiseNotAndSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AvgrU:
+ masm.unsignedAverageInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AvgrU:
+ masm.unsignedAverageInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Add:
+ masm.addInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatS:
+ masm.addSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatU:
+ masm.unsignedAddSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Sub:
+ masm.subInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatS:
+ masm.subSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatU:
+ masm.unsignedSubSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinS:
+ masm.minInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinU:
+ masm.unsignedMinInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxS:
+ masm.maxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxU:
+ masm.unsignedMaxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Add:
+ masm.addInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatS:
+ masm.addSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatU:
+ masm.unsignedAddSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Sub:
+ masm.subInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatS:
+ masm.subSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatU:
+ masm.unsignedSubSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Mul:
+ masm.mulInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinS:
+ masm.minInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinU:
+ masm.unsignedMinInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxS:
+ masm.maxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxU:
+ masm.unsignedMaxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Add:
+ masm.addInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Sub:
+ masm.subInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Mul:
+ masm.mulInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinS:
+ masm.minInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinU:
+ masm.unsignedMinInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxS:
+ masm.maxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxU:
+ masm.unsignedMaxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Add:
+ masm.addInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Sub:
+ masm.subInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Mul:
+ masm.mulInt64x2(lhs, rhs, dest, temp1);
+ break;
+ case wasm::SimdOp::F32x4Add:
+ masm.addFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Sub:
+ masm.subFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Mul:
+ masm.mulFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Div:
+ masm.divFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Min:
+ masm.minFloat32x4(lhs, rhs, dest, temp1, temp2);
+ break;
+ case wasm::SimdOp::F32x4Max:
+ masm.maxFloat32x4(lhs, rhs, dest, temp1, temp2);
+ break;
+ case wasm::SimdOp::F64x2Add:
+ masm.addFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Sub:
+ masm.subFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Mul:
+ masm.mulFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Div:
+ masm.divFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Min:
+ masm.minFloat64x2(lhs, rhs, dest, temp1, temp2);
+ break;
+ case wasm::SimdOp::F64x2Max:
+ masm.maxFloat64x2(lhs, rhs, dest, temp1, temp2);
+ break;
+ case wasm::SimdOp::I8x16Swizzle:
+ masm.swizzleInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16RelaxedSwizzle:
+ masm.swizzleInt8x16Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8S:
+ masm.narrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8U:
+ masm.unsignedNarrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4S:
+ masm.narrowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4U:
+ masm.unsignedNarrowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Eq:
+ masm.compareInt8x16(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Ne:
+ masm.compareInt8x16(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LtS:
+ masm.compareInt8x16(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GtS:
+ masm.compareInt8x16(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LeS:
+ masm.compareInt8x16(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GeS:
+ masm.compareInt8x16(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LtU:
+ masm.compareInt8x16(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GtU:
+ masm.compareInt8x16(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LeU:
+ masm.compareInt8x16(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GeU:
+ masm.compareInt8x16(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Eq:
+ masm.compareInt16x8(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Ne:
+ masm.compareInt16x8(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LtS:
+ masm.compareInt16x8(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GtS:
+ masm.compareInt16x8(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LeS:
+ masm.compareInt16x8(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GeS:
+ masm.compareInt16x8(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LtU:
+ masm.compareInt16x8(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GtU:
+ masm.compareInt16x8(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LeU:
+ masm.compareInt16x8(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GeU:
+ masm.compareInt16x8(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Eq:
+ masm.compareInt32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Ne:
+ masm.compareInt32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LtS:
+ masm.compareInt32x4(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GtS:
+ masm.compareInt32x4(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LeS:
+ masm.compareInt32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GeS:
+ masm.compareInt32x4(Assembler::GreaterThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LtU:
+ masm.compareInt32x4(Assembler::Below, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GtU:
+ masm.compareInt32x4(Assembler::Above, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LeU:
+ masm.compareInt32x4(Assembler::BelowOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GeU:
+ masm.compareInt32x4(Assembler::AboveOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Eq:
+ masm.compareForEqualityInt64x2(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Ne:
+ masm.compareForEqualityInt64x2(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2LtS:
+ masm.compareForOrderingInt64x2(Assembler::LessThan, lhs, rhs, dest, temp1,
+ temp2);
+ break;
+ case wasm::SimdOp::I64x2GtS:
+ masm.compareForOrderingInt64x2(Assembler::GreaterThan, lhs, rhs, dest,
+ temp1, temp2);
+ break;
+ case wasm::SimdOp::I64x2LeS:
+ masm.compareForOrderingInt64x2(Assembler::LessThanOrEqual, lhs, rhs, dest,
+ temp1, temp2);
+ break;
+ case wasm::SimdOp::I64x2GeS:
+ masm.compareForOrderingInt64x2(Assembler::GreaterThanOrEqual, lhs, rhs,
+ dest, temp1, temp2);
+ break;
+ case wasm::SimdOp::F32x4Eq:
+ masm.compareFloat32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Ne:
+ masm.compareFloat32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Lt:
+ masm.compareFloat32x4(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Le:
+ masm.compareFloat32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Eq:
+ masm.compareFloat64x2(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Ne:
+ masm.compareFloat64x2(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Lt:
+ masm.compareFloat64x2(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Le:
+ masm.compareFloat64x2(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4PMax:
+ // `lhs` and `rhs` are swapped, for non-VEX platforms the output is rhs.
+ masm.pseudoMaxFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4PMin:
+ // `lhs` and `rhs` are swapped, for non-VEX platforms the output is rhs.
+ masm.pseudoMinFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2PMax:
+ // `lhs` and `rhs` are swapped, for non-VEX platforms the output is rhs.
+ masm.pseudoMaxFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2PMin:
+ // `lhs` and `rhs` are swapped, for non-VEX platforms the output is rhs.
+ masm.pseudoMinFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4DotI16x8S:
+ masm.widenDotInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulLowI8x16S:
+ masm.extMulLowInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulHighI8x16S:
+ masm.extMulHighInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulLowI8x16U:
+ masm.unsignedExtMulLowInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtmulHighI8x16U:
+ masm.unsignedExtMulHighInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulLowI16x8S:
+ masm.extMulLowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulHighI16x8S:
+ masm.extMulHighInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulLowI16x8U:
+ masm.unsignedExtMulLowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtmulHighI16x8U:
+ masm.unsignedExtMulHighInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulLowI32x4S:
+ masm.extMulLowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulHighI32x4S:
+ masm.extMulHighInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulLowI32x4U:
+ masm.unsignedExtMulLowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtmulHighI32x4U:
+ masm.unsignedExtMulHighInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Q15MulrSatS:
+ masm.q15MulrSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4RelaxedMin:
+ masm.minFloat32x4Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4RelaxedMax:
+ masm.maxFloat32x4Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2RelaxedMin:
+ masm.minFloat64x2Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2RelaxedMax:
+ masm.maxFloat64x2Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8RelaxedQ15MulrS:
+ masm.q15MulrInt16x8Relaxed(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8DotI8x16I7x16S:
+ masm.dotInt8x16Int7x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::MozPMADDUBSW:
+ masm.vpmaddubsw(rhs, lhs, dest);
+ break;
+ default:
+ MOZ_CRASH("Binary SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhsDest());
+ const SimdConstant& rhs = ins->rhs();
+ FloatRegister dest = ToFloatRegister(ins->output());
+ FloatRegister temp = ToTempFloatRegisterOrInvalid(ins->getTemp(0));
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Add:
+ masm.addInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Add:
+ masm.addInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Add:
+ masm.addInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Add:
+ masm.addInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Sub:
+ masm.subInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Sub:
+ masm.subInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Sub:
+ masm.subInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Sub:
+ masm.subInt64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Mul:
+ masm.mulInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Mul:
+ masm.mulInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatS:
+ masm.addSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16AddSatU:
+ masm.unsignedAddSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatS:
+ masm.addSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8AddSatU:
+ masm.unsignedAddSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatS:
+ masm.subSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16SubSatU:
+ masm.unsignedSubSatInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatS:
+ masm.subSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8SubSatU:
+ masm.unsignedSubSatInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinS:
+ masm.minInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MinU:
+ masm.unsignedMinInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinS:
+ masm.minInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MinU:
+ masm.unsignedMinInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinS:
+ masm.minInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MinU:
+ masm.unsignedMinInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxS:
+ masm.maxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16MaxU:
+ masm.unsignedMaxInt8x16(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxS:
+ masm.maxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8MaxU:
+ masm.unsignedMaxInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxS:
+ masm.maxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4MaxU:
+ masm.unsignedMaxInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128And:
+ masm.bitwiseAndSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Or:
+ masm.bitwiseOrSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::V128Xor:
+ masm.bitwiseXorSimd128(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Eq:
+ masm.compareInt8x16(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16Ne:
+ masm.compareInt8x16(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16GtS:
+ masm.compareInt8x16(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16LeS:
+ masm.compareInt8x16(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Eq:
+ masm.compareInt16x8(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8Ne:
+ masm.compareInt16x8(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8GtS:
+ masm.compareInt16x8(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8LeS:
+ masm.compareInt16x8(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Eq:
+ masm.compareInt32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4Ne:
+ masm.compareInt32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4GtS:
+ masm.compareInt32x4(Assembler::GreaterThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4LeS:
+ masm.compareInt32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I64x2Mul:
+ masm.mulInt64x2(lhs, rhs, dest, temp);
+ break;
+ case wasm::SimdOp::F32x4Eq:
+ masm.compareFloat32x4(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Ne:
+ masm.compareFloat32x4(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Lt:
+ masm.compareFloat32x4(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Le:
+ masm.compareFloat32x4(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Eq:
+ masm.compareFloat64x2(Assembler::Equal, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Ne:
+ masm.compareFloat64x2(Assembler::NotEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Lt:
+ masm.compareFloat64x2(Assembler::LessThan, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Le:
+ masm.compareFloat64x2(Assembler::LessThanOrEqual, lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I32x4DotI16x8S:
+ masm.widenDotInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Add:
+ masm.addFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Add:
+ masm.addFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Sub:
+ masm.subFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Sub:
+ masm.subFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Div:
+ masm.divFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Div:
+ masm.divFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F32x4Mul:
+ masm.mulFloat32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::F64x2Mul:
+ masm.mulFloat64x2(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8S:
+ masm.narrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I8x16NarrowI16x8U:
+ masm.unsignedNarrowInt16x8(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4S:
+ masm.narrowInt32x4(lhs, rhs, dest);
+ break;
+ case wasm::SimdOp::I16x8NarrowI32x4U:
+ masm.unsignedNarrowInt32x4(lhs, rhs, dest);
+ break;
+ default:
+ MOZ_CRASH("Binary SimdOp with constant not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhsDest = ToFloatRegister(ins->lhsDest());
+ Register rhs = ToRegister(ins->rhs());
+ FloatRegister temp = ToTempFloatRegisterOrInvalid(ins->getTemp(0));
+
+ MOZ_ASSERT(ToFloatRegister(ins->output()) == lhsDest);
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ masm.leftShiftInt8x16(rhs, lhsDest, temp);
+ break;
+ case wasm::SimdOp::I8x16ShrS:
+ masm.rightShiftInt8x16(rhs, lhsDest, temp);
+ break;
+ case wasm::SimdOp::I8x16ShrU:
+ masm.unsignedRightShiftInt8x16(rhs, lhsDest, temp);
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ masm.leftShiftInt16x8(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I16x8ShrS:
+ masm.rightShiftInt16x8(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I16x8ShrU:
+ masm.unsignedRightShiftInt16x8(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ masm.leftShiftInt32x4(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I32x4ShrS:
+ masm.rightShiftInt32x4(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I32x4ShrU:
+ masm.unsignedRightShiftInt32x4(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ masm.leftShiftInt64x2(rhs, lhsDest);
+ break;
+ case wasm::SimdOp::I64x2ShrS:
+ masm.rightShiftInt64x2(rhs, lhsDest, temp);
+ break;
+ case wasm::SimdOp::I64x2ShrU:
+ masm.unsignedRightShiftInt64x2(rhs, lhsDest);
+ break;
+ default:
+ MOZ_CRASH("Shift SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ int32_t shift = ins->shift();
+
+ if (shift == 0) {
+ masm.moveSimd128(src, dest);
+ return;
+ }
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ masm.leftShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrS:
+ masm.rightShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I8x16ShrU:
+ masm.unsignedRightShiftInt8x16(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ masm.leftShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrS:
+ masm.rightShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrU:
+ masm.unsignedRightShiftInt16x8(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ masm.leftShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrS:
+ masm.rightShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrU:
+ masm.unsignedRightShiftInt32x4(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ masm.leftShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrS:
+ masm.rightShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrU:
+ masm.unsignedRightShiftInt64x2(Imm32(shift), src, dest);
+ break;
+ default:
+ MOZ_CRASH("Shift SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16ShrS:
+ masm.signReplicationInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ShrS:
+ masm.signReplicationInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ShrS:
+ masm.signReplicationInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ShrS:
+ masm.signReplicationInt64x2(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Shift SimdOp unsupported sign replication optimization");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhsDest = ToFloatRegister(ins->lhsDest());
+ FloatRegister rhs = ToFloatRegister(ins->rhs());
+ SimdConstant control = ins->control();
+ FloatRegister output = ToFloatRegister(ins->output());
+ switch (ins->op()) {
+ case SimdShuffleOp::BLEND_8x16: {
+ masm.blendInt8x16(reinterpret_cast<const uint8_t*>(control.asInt8x16()),
+ lhsDest, rhs, output, ToFloatRegister(ins->temp()));
+ break;
+ }
+ case SimdShuffleOp::BLEND_16x8: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.blendInt16x8(reinterpret_cast<const uint16_t*>(control.asInt16x8()),
+ lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::CONCAT_RIGHT_SHIFT_8x16: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ int8_t count = 16 - control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.concatAndRightShiftSimd128(lhsDest, rhs, output, count);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_8x16: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveHighInt8x16(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_16x8: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveHighInt16x8(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_32x4: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveHighInt32x4(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_HIGH_64x2: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveHighInt64x2(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_8x16: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveLowInt8x16(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_16x8: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveLowInt16x8(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_32x4: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveLowInt32x4(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::INTERLEAVE_LOW_64x2: {
+ MOZ_ASSERT(ins->temp()->isBogusTemp());
+ masm.interleaveLowInt64x2(lhsDest, rhs, output);
+ break;
+ }
+ case SimdShuffleOp::SHUFFLE_BLEND_8x16: {
+ masm.shuffleInt8x16(reinterpret_cast<const uint8_t*>(control.asInt8x16()),
+ lhsDest, rhs, output);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unsupported SIMD shuffle operation");
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+enum PermuteX64I16x8Action : uint16_t {
+ UNAVAILABLE = 0,
+ SWAP_QWORDS = 1, // Swap qwords first
+ PERM_LOW = 2, // Permute low qword by control_[0..3]
+ PERM_HIGH = 4 // Permute high qword by control_[4..7]
+};
+
+// Skip lanes that equal v starting at i, returning the index just beyond the
+// last of those. There is no requirement that the initial lanes[i] == v.
+template <typename T>
+static int ScanConstant(const T* lanes, int v, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i <= len);
+ while (i < len && lanes[i] == v) {
+ i++;
+ }
+ return i;
+}
+
+// Apply a transformation to each lane value.
+template <typename T>
+static void MapLanes(T* result, const T* input, int (*f)(int)) {
+ int len = int(16 / sizeof(T));
+ for (int i = 0; i < len; i++) {
+ result[i] = f(input[i]);
+ }
+}
+
+// Recognize part of an identity permutation starting at start, with
+// the first value of the permutation expected to be bias.
+template <typename T>
+static bool IsIdentity(const T* lanes, int start, int len, int bias) {
+ if (lanes[start] != bias) {
+ return false;
+ }
+ for (int i = start + 1; i < start + len; i++) {
+ if (lanes[i] != lanes[i - 1] + 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// We can permute by words if the mask is reducible to a word mask, but the x64
+// lowering is only efficient if we can permute the high and low quadwords
+// separately, possibly after swapping quadwords.
+static PermuteX64I16x8Action CalculateX64Permute16x8(SimdConstant* control) {
+ const SimdConstant::I16x8& lanes = control->asInt16x8();
+ SimdConstant::I16x8 mapped;
+ MapLanes(mapped, lanes, [](int x) -> int { return x < 4 ? 0 : 1; });
+ int i = ScanConstant(mapped, mapped[0], 0);
+ if (i != 4) {
+ return PermuteX64I16x8Action::UNAVAILABLE;
+ }
+ i = ScanConstant(mapped, mapped[4], 4);
+ if (i != 8) {
+ return PermuteX64I16x8Action::UNAVAILABLE;
+ }
+ // Now compute the operation bits. `mapped` holds the adjusted lane mask.
+ memcpy(mapped, lanes, sizeof(mapped));
+ uint16_t op = 0;
+ if (mapped[0] > mapped[4]) {
+ op |= PermuteX64I16x8Action::SWAP_QWORDS;
+ }
+ for (auto& m : mapped) {
+ m &= 3;
+ }
+ if (!IsIdentity(mapped, 0, 4, 0)) {
+ op |= PermuteX64I16x8Action::PERM_LOW;
+ }
+ if (!IsIdentity(mapped, 4, 4, 0)) {
+ op |= PermuteX64I16x8Action::PERM_HIGH;
+ }
+ MOZ_ASSERT(op != PermuteX64I16x8Action::UNAVAILABLE);
+ *control = SimdConstant::CreateX8(mapped);
+ return (PermuteX64I16x8Action)op;
+}
+
+#endif
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ SimdConstant control = ins->control();
+ switch (ins->op()) {
+ // For broadcast, would MOVDDUP be better than PSHUFD for the last step?
+ case SimdPermuteOp::BROADCAST_8x16: {
+ const SimdConstant::I8x16& mask = control.asInt8x16();
+ int8_t source = mask[0];
+ if (source == 0 && Assembler::HasAVX2()) {
+ masm.vbroadcastb(Operand(src), dest);
+ break;
+ }
+ MOZ_ASSERT_IF(!Assembler::HasAVX(), src == dest);
+ if (source < 8) {
+ masm.interleaveLowInt8x16(src, src, dest);
+ } else {
+ masm.interleaveHighInt8x16(src, src, dest);
+ source -= 8;
+ }
+ uint16_t v = uint16_t(source & 3);
+ uint16_t wordMask[4] = {v, v, v, v};
+ if (source < 4) {
+ masm.permuteLowInt16x8(wordMask, dest, dest);
+ uint32_t dwordMask[4] = {0, 0, 0, 0};
+ masm.permuteInt32x4(dwordMask, dest, dest);
+ } else {
+ masm.permuteHighInt16x8(wordMask, dest, dest);
+ uint32_t dwordMask[4] = {2, 2, 2, 2};
+ masm.permuteInt32x4(dwordMask, dest, dest);
+ }
+ break;
+ }
+ case SimdPermuteOp::BROADCAST_16x8: {
+ const SimdConstant::I16x8& mask = control.asInt16x8();
+ int16_t source = mask[0];
+ if (source == 0 && Assembler::HasAVX2()) {
+ masm.vbroadcastw(Operand(src), dest);
+ break;
+ }
+ uint16_t v = uint16_t(source & 3);
+ uint16_t wordMask[4] = {v, v, v, v};
+ if (source < 4) {
+ masm.permuteLowInt16x8(wordMask, src, dest);
+ uint32_t dwordMask[4] = {0, 0, 0, 0};
+ masm.permuteInt32x4(dwordMask, dest, dest);
+ } else {
+ masm.permuteHighInt16x8(wordMask, src, dest);
+ uint32_t dwordMask[4] = {2, 2, 2, 2};
+ masm.permuteInt32x4(dwordMask, dest, dest);
+ }
+ break;
+ }
+ case SimdPermuteOp::MOVE: {
+ masm.moveSimd128(src, dest);
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_8x16: {
+ const SimdConstant::I8x16& mask = control.asInt8x16();
+# ifdef DEBUG
+ DebugOnly<int> i;
+ for (i = 0; i < 16 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 16, "Should have been a MOVE operation");
+# endif
+ masm.permuteInt8x16(reinterpret_cast<const uint8_t*>(mask), src, dest);
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_16x8: {
+# ifdef DEBUG
+ const SimdConstant::I16x8& mask = control.asInt16x8();
+ DebugOnly<int> i;
+ for (i = 0; i < 8 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 8, "Should have been a MOVE operation");
+# endif
+ PermuteX64I16x8Action op = CalculateX64Permute16x8(&control);
+ if (op != PermuteX64I16x8Action::UNAVAILABLE) {
+ const SimdConstant::I16x8& mask = control.asInt16x8();
+ if (op & PermuteX64I16x8Action::SWAP_QWORDS) {
+ uint32_t dwordMask[4] = {2, 3, 0, 1};
+ masm.permuteInt32x4(dwordMask, src, dest);
+ src = dest;
+ }
+ if (op & PermuteX64I16x8Action::PERM_LOW) {
+ masm.permuteLowInt16x8(reinterpret_cast<const uint16_t*>(mask) + 0,
+ src, dest);
+ src = dest;
+ }
+ if (op & PermuteX64I16x8Action::PERM_HIGH) {
+ masm.permuteHighInt16x8(reinterpret_cast<const uint16_t*>(mask) + 4,
+ src, dest);
+ src = dest;
+ }
+ } else {
+ const SimdConstant::I16x8& wmask = control.asInt16x8();
+ uint8_t mask[16];
+ for (unsigned i = 0; i < 16; i += 2) {
+ mask[i] = wmask[i / 2] * 2;
+ mask[i + 1] = wmask[i / 2] * 2 + 1;
+ }
+ masm.permuteInt8x16(mask, src, dest);
+ }
+ break;
+ }
+ case SimdPermuteOp::PERMUTE_32x4: {
+ const SimdConstant::I32x4& mask = control.asInt32x4();
+ if (Assembler::HasAVX2() && mask[0] == 0 && mask[1] == 0 &&
+ mask[2] == 0 && mask[3] == 0) {
+ masm.vbroadcastd(Operand(src), dest);
+ break;
+ }
+# ifdef DEBUG
+ DebugOnly<int> i;
+ for (i = 0; i < 4 && mask[i] == i; i++) {
+ }
+ MOZ_ASSERT(i < 4, "Should have been a MOVE operation");
+# endif
+ masm.permuteInt32x4(reinterpret_cast<const uint32_t*>(mask), src, dest);
+ break;
+ }
+ case SimdPermuteOp::ROTATE_RIGHT_8x16: {
+ MOZ_ASSERT_IF(!Assembler::HasAVX(), src == dest);
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.concatAndRightShiftSimd128(src, src, dest, count);
+ break;
+ }
+ case SimdPermuteOp::SHIFT_LEFT_8x16: {
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.leftShiftSimd128(Imm32(count), src, dest);
+ break;
+ }
+ case SimdPermuteOp::SHIFT_RIGHT_8x16: {
+ int8_t count = control.asInt8x16()[0];
+ MOZ_ASSERT(count > 0, "Should have been a MOVE operation");
+ masm.rightShiftSimd128(Imm32(count), src, dest);
+ break;
+ }
+ case SimdPermuteOp::REVERSE_16x8:
+ masm.reverseInt16x8(src, dest);
+ break;
+ case SimdPermuteOp::REVERSE_32x4:
+ masm.reverseInt32x4(src, dest);
+ break;
+ case SimdPermuteOp::REVERSE_64x2:
+ masm.reverseInt64x2(src, dest);
+ break;
+ default: {
+ MOZ_CRASH("Unsupported SIMD permutation operation");
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister lhs = ToFloatRegister(ins->lhsDest());
+ FloatRegister dest = ToFloatRegister(ins->output());
+ const LAllocation* rhs = ins->rhs();
+ uint32_t laneIndex = ins->laneIndex();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16ReplaceLane:
+ masm.replaceLaneInt8x16(laneIndex, lhs, ToRegister(rhs), dest);
+ break;
+ case wasm::SimdOp::I16x8ReplaceLane:
+ masm.replaceLaneInt16x8(laneIndex, lhs, ToRegister(rhs), dest);
+ break;
+ case wasm::SimdOp::I32x4ReplaceLane:
+ masm.replaceLaneInt32x4(laneIndex, lhs, ToRegister(rhs), dest);
+ break;
+ case wasm::SimdOp::F32x4ReplaceLane:
+ masm.replaceLaneFloat32x4(laneIndex, lhs, ToFloatRegister(rhs), dest);
+ break;
+ case wasm::SimdOp::F64x2ReplaceLane:
+ masm.replaceLaneFloat64x2(laneIndex, lhs, ToFloatRegister(rhs), dest);
+ break;
+ default:
+ MOZ_CRASH("ReplaceLane SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_RELEASE_ASSERT(ins->simdOp() == wasm::SimdOp::I64x2ReplaceLane);
+ masm.replaceLaneInt64x2(ins->laneIndex(), ToFloatRegister(ins->lhs()),
+ ToRegister64(ins->rhs()),
+ ToFloatRegister(ins->output()));
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Splat:
+ masm.splatX16(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::I16x8Splat:
+ masm.splatX8(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::I32x4Splat:
+ masm.splatX4(ToRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::F32x4Splat:
+ masm.splatX4(ToFloatRegister(ins->src()), dest);
+ break;
+ case wasm::SimdOp::F64x2Splat:
+ masm.splatX2(ToFloatRegister(ins->src()), dest);
+ break;
+ default:
+ MOZ_CRASH("ScalarToSimd128 SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ Register64 src = ToRegister64(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I64x2Splat:
+ masm.splatX2(src, dest);
+ break;
+ case wasm::SimdOp::V128Load8x8S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt8x16(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt8x16(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt16x8(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt16x8(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.widenLowInt32x4(dest, dest);
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ masm.moveGPR64ToDouble(src, dest);
+ masm.unsignedWidenLowInt32x4(dest, dest);
+ break;
+ default:
+ MOZ_CRASH("Int64ToSimd128 SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ FloatRegister dest = ToFloatRegister(ins->output());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Neg:
+ masm.negInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8Neg:
+ masm.negInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendLowI8x16S:
+ masm.widenLowInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendHighI8x16S:
+ masm.widenHighInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendLowI8x16U:
+ masm.unsignedWidenLowInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtendHighI8x16U:
+ masm.unsignedWidenHighInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I32x4Neg:
+ masm.negInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendLowI16x8S:
+ masm.widenLowInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendHighI16x8S:
+ masm.widenHighInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendLowI16x8U:
+ masm.unsignedWidenLowInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtendHighI16x8U:
+ masm.unsignedWidenHighInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF32x4S:
+ masm.truncSatFloat32x4ToInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF32x4U:
+ masm.unsignedTruncSatFloat32x4ToInt32x4(src, dest,
+ ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I64x2Neg:
+ masm.negInt64x2(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendLowI32x4S:
+ masm.widenLowInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendHighI32x4S:
+ masm.widenHighInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendLowI32x4U:
+ masm.unsignedWidenLowInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2ExtendHighI32x4U:
+ masm.unsignedWidenHighInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Abs:
+ masm.absFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Neg:
+ masm.negFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Sqrt:
+ masm.sqrtFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4ConvertI32x4S:
+ masm.convertInt32x4ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4ConvertI32x4U:
+ masm.unsignedConvertInt32x4ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Abs:
+ masm.absFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Neg:
+ masm.negFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Sqrt:
+ masm.sqrtFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::V128Not:
+ masm.bitwiseNotSimd128(src, dest);
+ break;
+ case wasm::SimdOp::I8x16Popcnt:
+ masm.popcntInt8x16(src, dest, ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I8x16Abs:
+ masm.absInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8Abs:
+ masm.absInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4Abs:
+ masm.absInt32x4(src, dest);
+ break;
+ case wasm::SimdOp::I64x2Abs:
+ masm.absInt64x2(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Ceil:
+ masm.ceilFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Floor:
+ masm.floorFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Trunc:
+ masm.truncFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F32x4Nearest:
+ masm.nearestFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Ceil:
+ masm.ceilFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Floor:
+ masm.floorFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Trunc:
+ masm.truncFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2Nearest:
+ masm.nearestFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F32x4DemoteF64x2Zero:
+ masm.convertFloat64x2ToFloat32x4(src, dest);
+ break;
+ case wasm::SimdOp::F64x2PromoteLowF32x4:
+ masm.convertFloat32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2ConvertLowI32x4S:
+ masm.convertInt32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::F64x2ConvertLowI32x4U:
+ masm.unsignedConvertInt32x4ToFloat64x2(src, dest);
+ break;
+ case wasm::SimdOp::I32x4TruncSatF64x2SZero:
+ masm.truncSatFloat64x2ToInt32x4(src, dest, ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I32x4TruncSatF64x2UZero:
+ masm.unsignedTruncSatFloat64x2ToInt32x4(src, dest,
+ ToFloatRegister(ins->temp()));
+ break;
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16S:
+ masm.extAddPairwiseInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16U:
+ masm.unsignedExtAddPairwiseInt8x16(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8S:
+ masm.extAddPairwiseInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8U:
+ masm.unsignedExtAddPairwiseInt16x8(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4S:
+ masm.truncFloat32x4ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4U:
+ masm.unsignedTruncFloat32x4ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2SZero:
+ masm.truncFloat64x2ToInt32x4Relaxed(src, dest);
+ break;
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2UZero:
+ masm.unsignedTruncFloat64x2ToInt32x4Relaxed(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Unary SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ const LDefinition* dest = ins->output();
+ uint32_t imm = ins->imm();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128AnyTrue:
+ masm.anyTrueSimd128(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16AllTrue:
+ masm.allTrueInt8x16(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8AllTrue:
+ masm.allTrueInt16x8(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I32x4AllTrue:
+ masm.allTrueInt32x4(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I64x2AllTrue:
+ masm.allTrueInt64x2(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16Bitmask:
+ masm.bitmaskInt8x16(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8Bitmask:
+ masm.bitmaskInt16x8(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I32x4Bitmask:
+ masm.bitmaskInt32x4(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I64x2Bitmask:
+ masm.bitmaskInt64x2(src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneS:
+ masm.extractLaneInt8x16(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I8x16ExtractLaneU:
+ masm.unsignedExtractLaneInt8x16(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneS:
+ masm.extractLaneInt16x8(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I16x8ExtractLaneU:
+ masm.unsignedExtractLaneInt16x8(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::I32x4ExtractLane:
+ masm.extractLaneInt32x4(imm, src, ToRegister(dest));
+ break;
+ case wasm::SimdOp::F32x4ExtractLane:
+ masm.extractLaneFloat32x4(imm, src, ToFloatRegister(dest));
+ break;
+ case wasm::SimdOp::F64x2ExtractLane:
+ masm.extractLaneFloat64x2(imm, src, ToFloatRegister(dest));
+ break;
+ default:
+ MOZ_CRASH("Reduce SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128AnyTrue:
+ // Set the zero flag if all of the lanes are zero, and branch on that.
+ masm.vptest(src, src);
+ emitBranch(Assembler::NotEqual, ins->ifTrue(), ins->ifFalse());
+ break;
+ case wasm::SimdOp::I8x16AllTrue:
+ case wasm::SimdOp::I16x8AllTrue:
+ case wasm::SimdOp::I32x4AllTrue:
+ case wasm::SimdOp::I64x2AllTrue: {
+ // Compare all lanes to zero, set the zero flag if none of the lanes are
+ // zero, and branch on that.
+ ScratchSimd128Scope tmp(masm);
+ masm.vpxor(tmp, tmp, tmp);
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16AllTrue:
+ masm.vpcmpeqb(Operand(src), tmp, tmp);
+ break;
+ case wasm::SimdOp::I16x8AllTrue:
+ masm.vpcmpeqw(Operand(src), tmp, tmp);
+ break;
+ case wasm::SimdOp::I32x4AllTrue:
+ masm.vpcmpeqd(Operand(src), tmp, tmp);
+ break;
+ case wasm::SimdOp::I64x2AllTrue:
+ masm.vpcmpeqq(Operand(src), tmp, tmp);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ masm.vptest(tmp, tmp);
+ emitBranch(Assembler::Equal, ins->ifTrue(), ins->ifFalse());
+ break;
+ }
+ case wasm::SimdOp::I16x8Bitmask: {
+ masm.bitwiseTestSimd128(SimdConstant::SplatX8(0x8000), src);
+ emitBranch(Assembler::NotEqual, ins->ifTrue(), ins->ifFalse());
+ break;
+ }
+ default:
+ MOZ_CRASH("Reduce-and-branch SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+#ifdef ENABLE_WASM_SIMD
+ FloatRegister src = ToFloatRegister(ins->src());
+ Register64 dest = ToOutRegister64(ins);
+ uint32_t imm = ins->imm();
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I64x2ExtractLane:
+ masm.extractLaneInt64x2(imm, src, dest);
+ break;
+ default:
+ MOZ_CRASH("Reduce SimdOp not implemented");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ const MWasmLoadLaneSimd128* mir = ins->mir();
+ const wasm::MemoryAccessDesc& access = mir->access();
+
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* value = ins->src();
+ Operand srcAddr = toMemoryAccessOperand(ins, offset);
+
+ masm.append(access, masm.size());
+ switch (ins->laneSize()) {
+ case 1: {
+ masm.vpinsrb(ins->laneIndex(), srcAddr, ToFloatRegister(value),
+ ToFloatRegister(value));
+ break;
+ }
+ case 2: {
+ masm.vpinsrw(ins->laneIndex(), srcAddr, ToFloatRegister(value),
+ ToFloatRegister(value));
+ break;
+ }
+ case 4: {
+ masm.vinsertps(ins->laneIndex() << 4, srcAddr, ToFloatRegister(value),
+ ToFloatRegister(value));
+ break;
+ }
+ case 8: {
+ if (ins->laneIndex() == 0) {
+ masm.vmovlps(srcAddr, ToFloatRegister(value), ToFloatRegister(value));
+ } else {
+ masm.vmovhps(srcAddr, ToFloatRegister(value), ToFloatRegister(value));
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported load lane size");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ const MWasmStoreLaneSimd128* mir = ins->mir();
+ const wasm::MemoryAccessDesc& access = mir->access();
+
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* src = ins->src();
+ Operand destAddr = toMemoryAccessOperand(ins, offset);
+
+ masm.append(access, masm.size());
+ switch (ins->laneSize()) {
+ case 1: {
+ masm.vpextrb(ins->laneIndex(), ToFloatRegister(src), destAddr);
+ break;
+ }
+ case 2: {
+ masm.vpextrw(ins->laneIndex(), ToFloatRegister(src), destAddr);
+ break;
+ }
+ case 4: {
+ unsigned lane = ins->laneIndex();
+ if (lane == 0) {
+ masm.vmovss(ToFloatRegister(src), destAddr);
+ } else {
+ masm.vextractps(lane, ToFloatRegister(src), destAddr);
+ }
+ break;
+ }
+ case 8: {
+ if (ins->laneIndex() == 0) {
+ masm.vmovlps(ToFloatRegister(src), destAddr);
+ } else {
+ masm.vmovhps(ToFloatRegister(src), destAddr);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unsupported store lane size");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
new file mode 100644
index 0000000000..74bcb91149
--- /dev/null
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -0,0 +1,189 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_CodeGenerator_x86_shared_h
+#define jit_x86_shared_CodeGenerator_x86_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorX86Shared;
+class OutOfLineBailout;
+class OutOfLineUndoALUOperation;
+class OutOfLineLoadTypedArrayOutOfBounds;
+class MulNegativeZeroCheck;
+class ModOverflowCheck;
+class ReturnZero;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorX86Shared>;
+
+class CodeGeneratorX86Shared : public CodeGeneratorShared {
+ friend class MoveResolverX86;
+
+ template <typename T>
+ void bailout(const T& t, LSnapshot* snapshot);
+
+ protected:
+ CodeGeneratorX86Shared(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ // Load a NaN or zero into a register for an out of bounds AsmJS or static
+ // typed array load.
+ class OutOfLineLoadTypedArrayOutOfBounds
+ : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ AnyRegister dest_;
+ Scalar::Type viewType_;
+
+ public:
+ OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, Scalar::Type viewType)
+ : dest_(dest), viewType_(viewType) {}
+
+ AnyRegister dest() const { return dest_; }
+ Scalar::Type viewType() const { return viewType_; }
+ void accept(CodeGeneratorX86Shared* codegen) override {
+ codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
+ }
+ };
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
+ void bailoutIf(Assembler::DoubleCondition condition, LSnapshot* snapshot);
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmpPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ masm.testPtr(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.cmp32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ masm.test32(lhs, rhs);
+ bailoutIf(c, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ masm.test32(reg, Imm32(0xFF));
+ bailoutIf(Assembler::Zero, snapshot);
+ }
+ void bailoutCvttsd2si(FloatRegister src, Register dest, LSnapshot* snapshot) {
+ Label bail;
+ masm.truncateDoubleToInt32(src, dest, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutCvttss2si(FloatRegister src, Register dest, LSnapshot* snapshot) {
+ Label bail;
+ masm.truncateFloat32ToInt32(src, dest, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ bool generateOutOfLineCode();
+
+ void emitCompare(MCompare::CompareType type, const LAllocation* left,
+ const LAllocation* right);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse,
+ Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond);
+ void emitBranch(Assembler::DoubleCondition cond, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse);
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ cond = masm.testNull(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testUndefined(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ cond = masm.testObject(cond, value);
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ masm.cmpPtr(reg, ImmWord(0));
+ emitBranch(cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ void generateInvalidateEpilogue();
+
+ void canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value);
+
+ template <typename T>
+ Operand toMemoryAccessOperand(T* lir, int32_t disp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool);
+ void visitMulNegativeZeroCheck(MulNegativeZeroCheck* ool);
+ void visitModOverflowCheck(ModOverflowCheck* ool);
+ void visitReturnZero(ReturnZero* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineLoadTypedArrayOutOfBounds(
+ OutOfLineLoadTypedArrayOutOfBounds* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
+ LSnapshot* snapshot_;
+
+ public:
+ explicit OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorX86Shared* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_CodeGenerator_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Constants-x86-shared.h b/js/src/jit/x86-shared/Constants-x86-shared.h
new file mode 100644
index 0000000000..6c59515b21
--- /dev/null
+++ b/js/src/jit/x86-shared/Constants-x86-shared.h
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Constants_x86_shared_h
+#define jit_x86_shared_Constants_x86_shared_h
+
+#include "mozilla/Assertions.h"
+
+#include <iterator>
+#include <stddef.h>
+#include <stdint.h>
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+enum RegisterID : uint8_t {
+ rax,
+ rcx,
+ rdx,
+ rbx,
+ rsp,
+ rbp,
+ rsi,
+ rdi
+#ifdef JS_CODEGEN_X64
+ ,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15
+#endif
+ ,
+ invalid_reg
+};
+
+enum HRegisterID { ah = rsp, ch = rbp, dh = rsi, bh = rdi };
+
+enum XMMRegisterID
+// GCC < 8.0 has a bug with bitfields of enums with an underlying type.
+#if defined(__clang__) || __GNUC__ > 7
+ : uint8_t
+#endif
+{
+ xmm0 = 0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7
+#ifdef JS_CODEGEN_X64
+ ,
+ xmm8,
+ xmm9,
+ xmm10,
+ xmm11,
+ xmm12,
+ xmm13,
+ xmm14,
+ xmm15
+#endif
+ ,
+ invalid_xmm
+};
+
+inline const char* XMMRegName(XMMRegisterID reg) {
+ static const char* const names[] = {"%xmm0",
+ "%xmm1",
+ "%xmm2",
+ "%xmm3",
+ "%xmm4",
+ "%xmm5",
+ "%xmm6",
+ "%xmm7"
+#ifdef JS_CODEGEN_X64
+ ,
+ "%xmm8",
+ "%xmm9",
+ "%xmm10",
+ "%xmm11",
+ "%xmm12",
+ "%xmm13",
+ "%xmm14",
+ "%xmm15"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < std::size(names));
+ return names[reg];
+}
+
+#ifdef JS_CODEGEN_X64
+inline const char* GPReg64Name(RegisterID reg) {
+ static const char* const names[] = {"%rax",
+ "%rcx",
+ "%rdx",
+ "%rbx",
+ "%rsp",
+ "%rbp",
+ "%rsi",
+ "%rdi"
+# ifdef JS_CODEGEN_X64
+ ,
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15"
+# endif
+ };
+ MOZ_ASSERT(size_t(reg) < std::size(names));
+ return names[reg];
+}
+#endif
+
+inline const char* GPReg32Name(RegisterID reg) {
+ static const char* const names[] = {"%eax",
+ "%ecx",
+ "%edx",
+ "%ebx",
+ "%esp",
+ "%ebp",
+ "%esi",
+ "%edi"
+#ifdef JS_CODEGEN_X64
+ ,
+ "%r8d",
+ "%r9d",
+ "%r10d",
+ "%r11d",
+ "%r12d",
+ "%r13d",
+ "%r14d",
+ "%r15d"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < std::size(names));
+ return names[reg];
+}
+
+inline const char* GPReg16Name(RegisterID reg) {
+ static const char* const names[] = {"%ax",
+ "%cx",
+ "%dx",
+ "%bx",
+ "%sp",
+ "%bp",
+ "%si",
+ "%di"
+#ifdef JS_CODEGEN_X64
+ ,
+ "%r8w",
+ "%r9w",
+ "%r10w",
+ "%r11w",
+ "%r12w",
+ "%r13w",
+ "%r14w",
+ "%r15w"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < std::size(names));
+ return names[reg];
+}
+
+inline const char* GPReg8Name(RegisterID reg) {
+ static const char* const names[] = {"%al",
+ "%cl",
+ "%dl",
+ "%bl"
+#ifdef JS_CODEGEN_X64
+ ,
+ "%spl",
+ "%bpl",
+ "%sil",
+ "%dil",
+ "%r8b",
+ "%r9b",
+ "%r10b",
+ "%r11b",
+ "%r12b",
+ "%r13b",
+ "%r14b",
+ "%r15b"
+#endif
+ };
+ MOZ_ASSERT(size_t(reg) < std::size(names));
+ return names[reg];
+}
+
+inline const char* GPRegName(RegisterID reg) {
+#ifdef JS_CODEGEN_X64
+ return GPReg64Name(reg);
+#else
+ return GPReg32Name(reg);
+#endif
+}
+
+inline bool HasSubregL(RegisterID reg) {
+#ifdef JS_CODEGEN_X64
+ // In 64-bit mode, all registers have an 8-bit lo subreg.
+ return true;
+#else
+ // In 32-bit mode, only the first four registers do.
+ return reg <= rbx;
+#endif
+}
+
+inline bool HasSubregH(RegisterID reg) {
+ // The first four registers always have h registers. However, note that
+ // on x64, h registers may not be used in instructions using REX
+ // prefixes. Also note that this may depend on what other registers are
+ // used!
+ return reg <= rbx;
+}
+
+inline HRegisterID GetSubregH(RegisterID reg) {
+ MOZ_ASSERT(HasSubregH(reg));
+ return HRegisterID(reg + 4);
+}
+
+inline const char* HRegName8(HRegisterID reg) {
+ static const char* const names[] = {"%ah", "%ch", "%dh", "%bh"};
+ size_t index = reg - GetSubregH(rax);
+ MOZ_ASSERT(index < std::size(names));
+ return names[index];
+}
+
+enum Condition {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE
+};
+
+inline const char* CCName(Condition cc) {
+ static const char* const names[] = {"o ", "no", "b ", "ae", "e ", "ne",
+ "be", "a ", "s ", "ns", "p ", "np",
+ "l ", "ge", "le", "g "};
+ MOZ_ASSERT(size_t(cc) < std::size(names));
+ return names[cc];
+}
+
+// Conditions for CMP instructions (CMPSS, CMPSD, CMPPS, CMPPD, etc).
+enum ConditionCmp {
+ ConditionCmp_EQ = 0x0,
+ ConditionCmp_LT = 0x1,
+ ConditionCmp_LE = 0x2,
+ ConditionCmp_UNORD = 0x3,
+ ConditionCmp_NEQ = 0x4,
+ ConditionCmp_NLT = 0x5,
+ ConditionCmp_NLE = 0x6,
+ ConditionCmp_ORD = 0x7,
+ ConditionCmp_AVX_Enabled = 0x8,
+ ConditionCmp_GE = 0xD,
+};
+
+// Rounding modes for ROUNDSS / ROUNDSD.
+enum RoundingMode {
+ RoundToNearest = 0x0,
+ RoundDown = 0x1,
+ RoundUp = 0x2,
+ RoundToZero = 0x3
+};
+
+// Rounding modes for ROUNDPS / ROUNDPD. Note these are the same as for
+// RoundingMode above but incorporate the 'inexact' bit which says not to signal
+// exceptions for lost precision. It's not obvious that this bit is needed; it
+// was however suggested in the wasm SIMD proposal that led to these encodings.
+enum class SSERoundingMode {
+ RoundToNearest = 0x08,
+ RoundDown = 0x09,
+ RoundUp = 0x0A,
+ RoundToZero = 0x0B
+};
+
+// Test whether the given address will fit in an address immediate field.
+// This is always true on x86, but on x64 it's only true for addreses which
+// fit in the 32-bit immediate field.
+inline bool IsAddressImmediate(const void* address) {
+ intptr_t value = reinterpret_cast<intptr_t>(address);
+ int32_t immediate = static_cast<int32_t>(value);
+ return value == immediate;
+}
+
+// Convert the given address to a 32-bit immediate field value. This is a
+// no-op on x86, but on x64 it asserts that the address is actually a valid
+// address immediate.
+inline int32_t AddressImmediate(const void* address) {
+ MOZ_ASSERT(IsAddressImmediate(address));
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(address));
+}
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Constants_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Encoding-x86-shared.h b/js/src/jit/x86-shared/Encoding-x86-shared.h
new file mode 100644
index 0000000000..d3d78f4e5e
--- /dev/null
+++ b/js/src/jit/x86-shared/Encoding-x86-shared.h
@@ -0,0 +1,508 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Encoding_x86_shared_h
+#define jit_x86_shared_Encoding_x86_shared_h
+
+#include <type_traits>
+
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+static const size_t MaxInstructionSize = 16;
+
+// These enumerated values are following the Intel documentation Volume 2C [1],
+// Appendix A.2 and Appendix A.3.
+//
+// Operand size/types as listed in the Appendix A.2. Tables of the instructions
+// and their operands can be found in the Appendix A.3.
+//
+// B = reg (VEX.vvvv of VEX prefix)
+// E = reg/mem
+// G = reg (reg field of ModR/M)
+// U = xmm (R/M field of ModR/M)
+// V = xmm (reg field of ModR/M)
+// W = xmm/mem64
+// I = immediate
+// O = offset
+//
+// b = byte (8-bit)
+// w = word (16-bit)
+// v = register size
+// d = double (32-bit)
+// dq = double-quad (128-bit) (xmm)
+// ss = scalar float 32 (xmm)
+// ps = packed float 32 (xmm)
+// sd = scalar double (xmm)
+// pd = packed double (xmm)
+// y = 32/64-bit
+// z = 16/32/64-bit
+// vqp = (*)
+//
+// (*) Some website [2] provides a convenient list of all instructions, but be
+// aware that they do not follow the Intel documentation naming, as the
+// following enumeration does. Do not use these names as a reference for adding
+// new instructions.
+//
+// [1]
+// http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-manual-325462.html
+// [2] http://ref.x86asm.net/geek.html
+//
+// OPn_NAME_DstSrc
+enum OneByteOpcodeID {
+ OP_NOP_00 = 0x00,
+ OP_ADD_EbGb = 0x00,
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_ADD_EAXIv = 0x05,
+ OP_OR_EbGb = 0x08,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_OR_EAXIv = 0x0D,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_NOP_0F = 0x0F,
+ OP_ADC_GvEv = 0x13,
+ OP_SBB_GvEv = 0x1B,
+ OP_NOP_1F = 0x1F,
+ OP_AND_EbGb = 0x20,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_AND_EAXIv = 0x25,
+ OP_SUB_EbGb = 0x28,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ OP_SUB_EAXIv = 0x2D,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EbGb = 0x30,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_XOR_EAXIv = 0x35,
+ OP_CMP_EbGb = 0x38,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GbEb = 0x3A,
+ OP_CMP_GvEv = 0x3B,
+ OP_CMP_EAXIb = 0x3C,
+ OP_CMP_EAXIv = 0x3D,
+#ifdef JS_CODEGEN_X64
+ PRE_REX = 0x40,
+#endif
+ OP_NOP_40 = 0x40,
+ OP_NOP_44 = 0x44,
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#ifdef JS_CODEGEN_X86
+ OP_PUSHA = 0x60,
+ OP_POPA = 0x61,
+#endif
+#ifdef JS_CODEGEN_X64
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_NOP_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_PUSH_Ib = 0x6a,
+ OP_IMUL_GvEvIb = 0x6b,
+ OP_JCC_rel8 = 0x70,
+ OP_GROUP1_EbIb = 0x80,
+ OP_NOP_80 = 0x80,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
+ OP_NOP_84 = 0x84,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_GbEb = 0x86,
+ OP_XCHG_GvEv = 0x87,
+ OP_MOV_EbGv = 0x88,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEb = 0x8A,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_PUSHFLAGS = 0x9C,
+ OP_POPFLAGS = 0x9D,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_TEST_EAXIb = 0xA8,
+ OP_TEST_EAXIv = 0xA9,
+ OP_MOV_EbIb = 0xB0,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_ADDP_ST0_ST1 = 0xC1,
+ OP_RET_Iz = 0xC2,
+ PRE_VEX_C4 = 0xC4,
+ PRE_VEX_C5 = 0xC5,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_FPU6 = 0xDD,
+ OP_FPU6_F32 = 0xD9,
+ OP_FPU6_ADDP = 0xDE,
+ OP_FILD = 0xDF,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ OP_JMP_rel8 = 0xEB,
+ PRE_LOCK = 0xF0,
+ PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz =
+ 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF
+};
+
+enum class ShiftID {
+ vpsrlx = 2,
+ vpsrldq = 3,
+ vpsrad = 4,
+ vpsllx = 6,
+ vpslldq = 7
+};
+
+enum TwoByteOpcodeID {
+ OP2_UD2 = 0x0B,
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVPS_VpsWps = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVPS_WpsVps = 0x11,
+ OP2_MOVDDUP_VqWq = 0x12,
+ OP2_MOVHLPS_VqUq = 0x12,
+ OP2_MOVSLDUP_VpsWps = 0x12,
+ OP2_MOVLPS_VqEq = 0x12,
+ OP2_MOVLPS_EqVq = 0x13,
+ OP2_UNPCKLPS_VsdWsd = 0x14,
+ OP2_UNPCKHPS_VsdWsd = 0x15,
+ OP2_MOVLHPS_VqUq = 0x16,
+ OP2_MOVSHDUP_VpsWps = 0x16,
+ OP2_MOVHPS_VqEq = 0x16,
+ OP2_MOVHPS_EqVq = 0x17,
+ OP2_MOVAPD_VsdWsd = 0x28,
+ OP2_MOVAPS_VsdWsd = 0x28,
+ OP2_MOVAPS_WsdVsd = 0x29,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_CMOVCC_GvEv = 0x40,
+ OP2_MOVMSKPD_EdVd = 0x50,
+ OP2_ANDPS_VpsWps = 0x54,
+ OP2_ANDNPS_VpsWps = 0x55,
+ OP2_ORPS_VpsWps = 0x56,
+ OP2_XORPS_VpsWps = 0x57,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_ADDPS_VpsWps = 0x58,
+ OP2_ADDPD_VpdWpd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_MULPD_VpdWpd = 0x59,
+ OP2_MULPS_VpsWps = 0x59,
+ OP2_CVTSS2SD_VsdEd = 0x5A,
+ OP2_CVTSD2SS_VsdEd = 0x5A,
+ OP2_CVTPS2PD_VpdWps = 0x5A,
+ OP2_CVTPD2PS_VpsWpd = 0x5A,
+ OP2_CVTTPS2DQ_VdqWps = 0x5B,
+ OP2_CVTDQ2PS_VpsWdq = 0x5B,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_SUBPS_VpsWps = 0x5C,
+ OP2_SUBPD_VpdWpd = 0x5C,
+ OP2_MINSD_VsdWsd = 0x5D,
+ OP2_MINSS_VssWss = 0x5D,
+ OP2_MINPS_VpsWps = 0x5D,
+ OP2_MINPD_VpdWpd = 0x5D,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_DIVPS_VpsWps = 0x5E,
+ OP2_DIVPD_VpdWpd = 0x5E,
+ OP2_MAXSD_VsdWsd = 0x5F,
+ OP2_MAXSS_VssWss = 0x5F,
+ OP2_MAXPS_VpsWps = 0x5F,
+ OP2_MAXPD_VpdWpd = 0x5F,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_SQRTSS_VssWss = 0x51,
+ OP2_SQRTPS_VpsWps = 0x51,
+ OP2_SQRTPD_VpdWpd = 0x51,
+ OP2_RSQRTPS_VpsWps = 0x52,
+ OP2_RCPPS_VpsWps = 0x53,
+ OP2_ANDPD_VpdWpd = 0x54,
+ OP2_ORPD_VpdWpd = 0x56,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_PUNPCKLBW_VdqWdq = 0x60,
+ OP2_PUNPCKLWD_VdqWdq = 0x61,
+ OP2_PUNPCKLDQ_VdqWdq = 0x62,
+ OP2_PACKSSWB_VdqWdq = 0x63,
+ OP2_PCMPGTB_VdqWdq = 0x64,
+ OP2_PCMPGTW_VdqWdq = 0x65,
+ OP2_PCMPGTD_VdqWdq = 0x66,
+ OP2_PACKUSWB_VdqWdq = 0x67,
+ OP2_PUNPCKHBW_VdqWdq = 0x68,
+ OP2_PUNPCKHWD_VdqWdq = 0x69,
+ OP2_PUNPCKHDQ_VdqWdq = 0x6A,
+ OP2_PACKSSDW_VdqWdq = 0x6B,
+ OP2_PUNPCKLQDQ_VdqWdq = 0x6C,
+ OP2_PUNPCKHQDQ_VdqWdq = 0x6D,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVDQ_VsdWsd = 0x6F,
+ OP2_MOVDQ_VdqWdq = 0x6F,
+ OP2_PSHUFD_VdqWdqIb = 0x70,
+ OP2_PSHUFLW_VdqWdqIb = 0x70,
+ OP2_PSHUFHW_VdqWdqIb = 0x70,
+ OP2_PSLLW_UdqIb = 0x71,
+ OP2_PSRAW_UdqIb = 0x71,
+ OP2_PSRLW_UdqIb = 0x71,
+ OP2_PSLLD_UdqIb = 0x72,
+ OP2_PSRAD_UdqIb = 0x72,
+ OP2_PSRLD_UdqIb = 0x72,
+ OP2_PSRLDQ_Vd = 0x73,
+ OP2_PCMPEQB_VdqWdq = 0x74,
+ OP2_PCMPEQW_VdqWdq = 0x75,
+ OP2_PCMPEQD_VdqWdq = 0x76,
+ OP2_HADDPD = 0x7C,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_MOVQ_VdWd = 0x7E,
+ OP2_MOVDQ_WdqVdq = 0x7F,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_SHLD = 0xA4,
+ OP2_SHLD_GvEv = 0xA5,
+ OP2_SHRD = 0xAC,
+ OP2_SHRD_GvEv = 0xAD,
+ OP_FENCE = 0xAE,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_CMPXCHG_GvEb = 0xB0,
+ OP2_CMPXCHG_GvEw = 0xB1,
+ OP2_POPCNT_GvEv = 0xB8,
+ OP2_BSF_GvEv = 0xBC,
+ OP2_TZCNT_GvEv = 0xBC,
+ OP2_BSR_GvEv = 0xBD,
+ OP2_LZCNT_GvEv = 0xBD,
+ OP2_MOVSX_GvEb = 0xBE,
+ OP2_MOVSX_GvEw = 0xBF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_XADD_EbGb = 0xC0,
+ OP2_XADD_EvGv = 0xC1,
+ OP2_CMPPS_VpsWps = 0xC2,
+ OP2_CMPPD_VpdWpd = 0xC2,
+ OP2_PINSRW = 0xC4,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_SHUFPS_VpsWpsIb = 0xC6,
+ OP2_SHUFPD_VpdWpdIb = 0xC6,
+ OP2_CMPXCHGNB = 0xC7, // CMPXCHG8B; CMPXCHG16B with REX
+ OP2_BSWAP = 0xC8,
+ OP2_PSRLW_VdqWdq = 0xD1,
+ OP2_PSRLD_VdqWdq = 0xD2,
+ OP2_PSRLQ_VdqWdq = 0xD3,
+ OP2_PADDQ_VdqWdq = 0xD4,
+ OP2_PMULLW_VdqWdq = 0xD5,
+ OP2_MOVQ_WdVd = 0xD6,
+ OP2_PMOVMSKB_EdVd = 0xD7,
+ OP2_PSUBUSB_VdqWdq = 0xD8,
+ OP2_PSUBUSW_VdqWdq = 0xD9,
+ OP2_PMINUB_VdqWdq = 0xDA,
+ OP2_PANDDQ_VdqWdq = 0xDB,
+ OP2_PADDUSB_VdqWdq = 0xDC,
+ OP2_PADDUSW_VdqWdq = 0xDD,
+ OP2_PMAXUB_VdqWdq = 0xDE,
+ OP2_PANDNDQ_VdqWdq = 0xDF,
+ OP2_PAVGB_VdqWdq = 0xE0,
+ OP2_PSRAW_VdqWdq = 0xE1,
+ OP2_PSRAD_VdqWdq = 0xE2,
+ OP2_PAVGW_VdqWdq = 0xE3,
+ OP2_PMULHUW_VdqWdq = 0xE4,
+ OP2_PMULHW_VdqWdq = 0xE5,
+ OP2_CVTDQ2PD_VpdWdq = 0xE6,
+ OP2_CVTTPD2DQ_VdqWpd = 0xE6,
+ OP2_PSUBSB_VdqWdq = 0xE8,
+ OP2_PSUBSW_VdqWdq = 0xE9,
+ OP2_PMINSW_VdqWdq = 0xEA,
+ OP2_PORDQ_VdqWdq = 0xEB,
+ OP2_PADDSB_VdqWdq = 0xEC,
+ OP2_PADDSW_VdqWdq = 0xED,
+ OP2_PMAXSW_VdqWdq = 0xEE,
+ OP2_PXORDQ_VdqWdq = 0xEF,
+ OP2_PSLLW_VdqWdq = 0xF1,
+ OP2_PSLLD_VdqWdq = 0xF2,
+ OP2_PSLLQ_VdqWdq = 0xF3,
+ OP2_PMULUDQ_VdqWdq = 0xF4,
+ OP2_PMADDWD_VdqWdq = 0xF5,
+ OP2_PSUBB_VdqWdq = 0xF8,
+ OP2_PSUBW_VdqWdq = 0xF9,
+ OP2_PSUBD_VdqWdq = 0xFA,
+ OP2_PSUBQ_VdqWdq = 0xFB,
+ OP2_PADDB_VdqWdq = 0xFC,
+ OP2_PADDW_VdqWdq = 0xFD,
+ OP2_PADDD_VdqWdq = 0xFE
+};
+
+enum ThreeByteOpcodeID {
+ OP3_PSHUFB_VdqWdq = 0x00,
+ OP3_PHADDD_VdqWdq = 0x02,
+ OP3_PMADDUBSW_VdqWdq = 0x04,
+ OP3_ROUNDPS_VpsWps = 0x08,
+ OP3_ROUNDPD_VpdWpd = 0x09,
+ OP3_ROUNDSS_VsdWsd = 0x0A,
+ OP3_ROUNDSD_VsdWsd = 0x0B,
+ OP3_PMULHRSW_VdqWdq = 0x0B,
+ OP3_BLENDPS_VpsWpsIb = 0x0C,
+ OP3_PBLENDW_VdqWdqIb = 0x0E,
+ OP3_PALIGNR_VdqWdqIb = 0x0F,
+ OP3_PBLENDVB_VdqWdq = 0x10,
+ OP3_BLENDVPS_VdqWdq = 0x14,
+ OP3_PEXTRB_EvVdqIb = 0x14,
+ OP3_PEXTRW_EwVdqIb = 0x15,
+ OP3_BLENDVPD_VdqWdq = 0x15,
+ OP3_PEXTRD_EvVdqIb = 0x16,
+ OP3_PEXTRQ_EvVdqIb = 0x16,
+ OP3_PTEST_VdVd = 0x17,
+ OP3_EXTRACTPS_EdVdqIb = 0x17,
+ OP3_VBROADCASTSS_VxWd = 0x18,
+ OP3_PABSB_VdqWdq = 0x1C,
+ OP3_PABSW_VdqWdq = 0x1D,
+ OP3_PABSD_VdqWdq = 0x1E,
+ OP3_PINSRB_VdqEvIb = 0x20,
+ OP3_PMOVSXBW_VdqWdq = 0x20,
+ OP3_INSERTPS_VpsUps = 0x21,
+ OP3_PINSRD_VdqEvIb = 0x22,
+ OP3_PINSRQ_VdqEvIb = 0x22,
+ OP3_PMOVSXWD_VdqWdq = 0x23,
+ OP3_PMOVSXDQ_VdqWdq = 0x25,
+ OP3_PMULDQ_VdqWdq = 0x28,
+ OP3_PCMPEQQ_VdqWdq = 0x29,
+ OP3_PACKUSDW_VdqWdq = 0x2B,
+ OP3_PMOVZXBW_VdqWdq = 0x30,
+ OP3_PMOVZXWD_VdqWdq = 0x33,
+ OP3_PMOVZXDQ_VdqWdq = 0x35,
+ OP3_PCMPGTQ_VdqWdq = 0x37,
+ OP3_PMINSB_VdqWdq = 0x38,
+ OP3_PMINSD_VdqWdq = 0x39,
+ OP3_PMINUW_VdqWdq = 0x3A,
+ OP3_PMINUD_VdqWdq = 0x3B,
+ OP3_PMAXSB_VdqWdq = 0x3C,
+ OP3_PMAXSD_VdqWdq = 0x3D,
+ OP3_PMAXUW_VdqWdq = 0x3E,
+ OP3_PMAXUD_VdqWdq = 0x3F,
+ OP3_PMULLD_VdqWdq = 0x40,
+ OP3_VBLENDVPS_VdqWdq = 0x4A,
+ OP3_VBLENDVPD_VdqWdq = 0x4B,
+ OP3_VPBLENDVB_VdqWdq = 0x4C,
+ OP3_VBROADCASTD_VxWx = 0x58,
+ OP3_VBROADCASTQ_VxWx = 0x59,
+ OP3_VBROADCASTB_VxWx = 0x78,
+ OP3_VBROADCASTW_VxWx = 0x79,
+ OP3_VFMADD231PS_VxHxWx = 0xB8,
+ OP3_VFMADD231PD_VxHxWx = 0xB8,
+ OP3_VFNMADD231PS_VxHxWx = 0xBC,
+ OP3_VFNMADD231PD_VxHxWx = 0xBC,
+ OP3_SHLX_GyEyBy = 0xF7,
+ OP3_SARX_GyEyBy = 0xF7,
+ OP3_SHRX_GyEyBy = 0xF7,
+};
+
+// Test whether the given opcode should be printed with its operands reversed.
+inline bool IsXMMReversedOperands(TwoByteOpcodeID opcode) {
+ switch (opcode) {
+ case OP2_MOVSD_WsdVsd: // also OP2_MOVPS_WpsVps
+ case OP2_MOVAPS_WsdVsd:
+ case OP2_MOVDQ_WdqVdq:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+enum ThreeByteEscape { ESCAPE_38 = 0x38, ESCAPE_3A = 0x3A };
+
+enum VexOperandType { VEX_PS = 0, VEX_PD = 1, VEX_SS = 2, VEX_SD = 3 };
+
+inline OneByteOpcodeID jccRel8(Condition cond) {
+ return OneByteOpcodeID(OP_JCC_rel8 + std::underlying_type_t<Condition>(cond));
+}
+inline TwoByteOpcodeID jccRel32(Condition cond) {
+ return TwoByteOpcodeID(OP2_JCC_rel32 +
+ std::underlying_type_t<Condition>(cond));
+}
+inline TwoByteOpcodeID setccOpcode(Condition cond) {
+ return TwoByteOpcodeID(OP_SETCC + std::underlying_type_t<Condition>(cond));
+}
+inline TwoByteOpcodeID cmovccOpcode(Condition cond) {
+ return TwoByteOpcodeID(OP2_CMOVCC_GvEv +
+ std::underlying_type_t<Condition>(cond));
+}
+
+enum GroupOpcodeID {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_SBB = 3,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_MUL = 4,
+ GROUP3_OP_IMUL = 5,
+ GROUP3_OP_DIV = 6,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_INC = 0,
+ GROUP5_OP_DEC = 1,
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ FILD_OP_64 = 5,
+
+ FPU6_OP_FLD = 0,
+ FPU6_OP_FISTTP = 1,
+ FPU6_OP_FSTP = 3,
+ FPU6_OP_FLDCW = 5,
+ FPU6_OP_FISTP = 7,
+
+ GROUP11_MOV = 0
+};
+
+static const RegisterID noBase = rbp;
+static const RegisterID hasSib = rsp;
+static const RegisterID noIndex = rsp;
+#ifdef JS_CODEGEN_X64
+static const RegisterID noBase2 = r13;
+static const RegisterID hasSib2 = r12;
+#endif
+
+enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister
+};
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Encoding_x86_shared_h */
diff --git a/js/src/jit/x86-shared/LIR-x86-shared.h b/js/src/jit/x86-shared/LIR-x86-shared.h
new file mode 100644
index 0000000000..27f9f86468
--- /dev/null
+++ b/js/src/jit/x86-shared/LIR-x86-shared.h
@@ -0,0 +1,304 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_LIR_x86_shared_h
+#define jit_x86_shared_LIR_x86_shared_h
+
+namespace js {
+namespace jit {
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI)
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ if (mir()->isTruncated()) {
+ if (mir()->canBeNegativeZero()) {
+ return mir()->canBeNegativeOverflow()
+ ? "Truncate_NegativeZero_NegativeOverflow"
+ : "Truncate_NegativeZero";
+ }
+ return mir()->canBeNegativeOverflow() ? "Truncate_NegativeOverflow"
+ : "Truncate";
+ }
+ if (mir()->canBeNegativeZero()) {
+ return mir()->canBeNegativeOverflow() ? "NegativeZero_NegativeOverflow"
+ : "NegativeZero";
+ }
+ return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : nullptr;
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+// Signed division by a power-of-two constant.
+class LDivPowTwoI : public LBinaryMath<0> {
+ const int32_t shift_;
+ const bool negativeDivisor_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, const LAllocation& lhsCopy, int32_t shift,
+ bool negativeDivisor)
+ : LBinaryMath(classOpcode),
+ shift_(shift),
+ negativeDivisor_(negativeDivisor) {
+ setOperand(0, lhs);
+ setOperand(1, lhsCopy);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ const LAllocation* numeratorCopy() { return getOperand(1); }
+ int32_t shift() const { return shift_; }
+ bool negativeDivisor() const { return negativeDivisor_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivOrModConstantI : public LInstructionHelper<1, 1, 1> {
+ const int32_t denominator_;
+
+ public:
+ LIR_HEADER(DivOrModConstantI)
+
+ LDivOrModConstantI(const LAllocation& lhs, int32_t denominator,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), denominator_(denominator) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t denominator() const { return denominator_; }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeNegativeDividend() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeDividend();
+ }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI)
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ const LDefinition* remainder() { return getDef(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or
+// remainder depending on whether this instruction is defined to output eax
+// (quotient) or edx (remainder).
+class LUDivOrMod : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModConstant : public LInstructionHelper<1, 1, 1> {
+ const uint32_t denominator_;
+
+ public:
+ LIR_HEADER(UDivOrModConstant)
+
+ LUDivOrModConstant(const LAllocation& lhs, uint32_t denominator,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode), denominator_(denominator) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ uint32_t denominator() const { return denominator_; }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeNegativeDividend() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeDividend();
+ }
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI)
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ const LDefinition* remainder() { return getDef(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch)
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with a value to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV)
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0, 1> {
+ public:
+ LIR_HEADER(MulI)
+
+ LMulI(const LAllocation& lhs, const LAllocation& rhs,
+ const LAllocation& lhsCopy)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setOperand(2, lhsCopy);
+ }
+
+ const char* extraName() const {
+ return (mir()->mode() == MMul::Integer)
+ ? "Integer"
+ : (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : nullptr);
+ }
+
+ MMul* mir() const { return mir_->toMul(); }
+ const LAllocation* lhsCopy() { return this->getOperand(2); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ LInt64ToFloatingPoint(const LInt64Allocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ setTemp(0, temp);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_LIR_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
new file mode 100644
index 0000000000..bef178b2f5
--- /dev/null
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -0,0 +1,1863 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Abs;
+using mozilla::FloorLog2;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+LTableSwitch* LIRGeneratorX86Shared::newLTableSwitch(
+ const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorX86Shared::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ define(lir, ins);
+}
+
+void LIRGeneratorX86Shared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+
+ // Shift operand should be constant or, unless BMI2 is available, in register
+ // ecx. x86 can't shift a non-ecx register.
+ if (rhs->isConstant()) {
+ ins->setOperand(1, useOrConstantAtStart(rhs));
+ } else if (Assembler::HasBMI2() && !mir->isRotate()) {
+ ins->setOperand(1, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useRegister(rhs)
+ : useRegisterAtStart(rhs));
+ } else {
+ ins->setOperand(1, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useFixed(rhs, ecx)
+ : useFixedAtStart(rhs, ecx));
+ }
+
+ defineReuseInput(ins, mir, 0);
+}
+
+template <size_t Temps>
+void LIRGeneratorX86Shared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#if defined(JS_NUNBOX32)
+ if (mir->isRotate()) {
+ ins->setTemp(0, temp());
+ }
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ // Shift operand should be constant or, unless BMI2 is available, in register
+ // ecx. x86 can't shift a non-ecx register.
+ if (rhs->isConstant()) {
+ ins->setOperand(INT64_PIECES, useOrConstantAtStart(rhs));
+#ifdef JS_CODEGEN_X64
+ } else if (Assembler::HasBMI2() && !mir->isRotate()) {
+ ins->setOperand(INT64_PIECES, useRegister(rhs));
+#endif
+ } else {
+ // The operands are int64, but we only care about the lower 32 bits of
+ // the RHS. On 32-bit, the code below will load that part in ecx and
+ // will discard the upper half.
+ ensureDefined(rhs);
+ LUse use(ecx);
+ use.setVirtualRegister(rhs->virtualRegister());
+ ins->setOperand(INT64_PIECES, use);
+ }
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorX86Shared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorX86Shared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+void LIRGeneratorX86Shared::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ auto* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegisterAtStart(input));
+ defineReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useOrConstant(rhs)
+ : useOrConstantAtStart(rhs));
+ defineReuseInput(ins, mir, 0);
+}
+
+template <size_t Temps>
+void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ // Without AVX, we'll need to use the x86 encodings where one of the
+ // inputs must be the same location as the output.
+ if (!Assembler::HasAVX()) {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(
+ 1, willHaveDifferentLIRNodes(lhs, rhs) ? use(rhs) : useAtStart(rhs));
+ defineReuseInput(ins, mir, 0);
+ } else {
+ ins->setOperand(0, useRegisterAtStart(lhs));
+ ins->setOperand(1, useAtStart(rhs));
+ define(ins, mir);
+ }
+}
+
+template void LIRGeneratorX86Shared::lowerForFPU(
+ LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorX86Shared::lowerForFPU(
+ LInstructionHelper<1, 2, 1>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorX86Shared::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void LIRGeneratorX86Shared::lowerNegI(MInstruction* ins, MDefinition* input) {
+ defineReuseInput(new (alloc()) LNegI(useRegisterAtStart(input)), ins, 0);
+}
+
+void LIRGeneratorX86Shared::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ defineReuseInput(allocateAbs(ins, useRegisterAtStart(ins->input())), ins, 0);
+}
+
+void LIRGeneratorX86Shared::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ // Note: If we need a negative zero check, lhs is used twice.
+ LAllocation lhsCopy = mul->canBeNegativeZero() ? use(lhs) : LAllocation();
+ LMulI* lir = new (alloc())
+ LMulI(useRegisterAtStart(lhs),
+ willHaveDifferentLIRNodes(lhs, rhs) ? useOrConstant(rhs)
+ : useOrConstantAtStart(rhs),
+ lhsCopy);
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+ defineReuseInput(lir, mul, 0);
+}
+
+void LIRGeneratorX86Shared::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+
+ // Division by powers of two can be done by shifting, and division by
+ // other numbers can be done by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(Abs(rhs));
+ if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) {
+ LAllocation lhs = useRegisterAtStart(div->lhs());
+ LDivPowTwoI* lir;
+ // When truncated with maybe a non-zero remainder, we have to round the
+ // result toward 0. This requires an extra register to round up/down
+ // whether the left-hand-side is signed.
+ bool needRoundNeg = div->canBeNegativeDividend() && div->isTruncated();
+ if (!needRoundNeg) {
+ // Numerator is unsigned, so does not need adjusting.
+ lir = new (alloc()) LDivPowTwoI(lhs, lhs, shift, rhs < 0);
+ } else {
+ // Numerator might be signed, and needs adjusting, and an extra lhs copy
+ // is needed to round the result of the integer division towards zero.
+ lir = new (alloc())
+ LDivPowTwoI(lhs, useRegister(div->lhs()), shift, rhs < 0);
+ }
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineReuseInput(lir, div, 0);
+ return;
+ }
+ if (rhs != 0) {
+ LDivOrModConstantI* lir;
+ lir = new (alloc())
+ LDivOrModConstantI(useRegister(div->lhs()), rhs, tempFixed(eax));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineFixed(lir, div, LAllocation(AnyRegister(edx)));
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(edx));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineFixed(lir, div, LAllocation(AnyRegister(eax)));
+}
+
+void LIRGeneratorX86Shared::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(Abs(rhs));
+ if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineReuseInput(lir, mod, 0);
+ return;
+ }
+ if (rhs != 0) {
+ LDivOrModConstantI* lir;
+ lir = new (alloc())
+ LDivOrModConstantI(useRegister(mod->lhs()), rhs, tempFixed(edx));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineFixed(lir, mod, LAllocation(AnyRegister(eax)));
+ return;
+ }
+ }
+
+ LModI* lir = new (alloc())
+ LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ switch (ins->type()) {
+ case MIRType::Int32:
+ defineReuseInput(new (alloc()) LNegI(useRegisterAtStart(ins->input())),
+ ins, 0);
+ break;
+ case MIRType::Float32:
+ defineReuseInput(new (alloc()) LNegF(useRegisterAtStart(ins->input())),
+ ins, 0);
+ break;
+ case MIRType::Double:
+ defineReuseInput(new (alloc()) LNegD(useRegisterAtStart(ins->input())),
+ ins, 0);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void LIRGeneratorX86Shared::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorX86Shared::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants. This should
+ // only happen in rare constant-folding cases since asm.js sets the minimum
+ // heap size based when accessed via constant.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+ LAllocation memoryBaseAlloc = ins->hasMemoryBase()
+ ? useRegisterAtStart(ins->memoryBase())
+ : LAllocation();
+
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, memoryBaseAlloc);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants. This should
+ // only happen in rare constant-folding cases since asm.js sets the minimum
+ // heap size based when accessed via constant.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+ LAllocation memoryBaseAlloc = ins->hasMemoryBase()
+ ? useRegisterAtStart(ins->memoryBase())
+ : LAllocation();
+
+ LAsmJSStoreHeap* lir = nullptr;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+#ifdef JS_CODEGEN_X86
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ lir = new (alloc()) LAsmJSStoreHeap(
+ baseAlloc, useFixed(ins->value(), eax), limitAlloc, memoryBaseAlloc);
+ break;
+#endif
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ lir = new (alloc())
+ LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, memoryBaseAlloc);
+ break;
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("NYI");
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ add(lir, ins);
+}
+
+void LIRGeneratorX86Shared::lowerUDiv(MDiv* div) {
+ if (div->rhs()->isConstant()) {
+ // NOTE: the result of toInt32 is coerced to uint32_t.
+ uint32_t rhs = div->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+
+ LAllocation lhs = useRegisterAtStart(div->lhs());
+ if (rhs != 0 && uint32_t(1) << shift == rhs) {
+ LDivPowTwoI* lir = new (alloc()) LDivPowTwoI(lhs, lhs, shift, false);
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineReuseInput(lir, div, 0);
+ } else {
+ LUDivOrModConstant* lir = new (alloc())
+ LUDivOrModConstant(useRegister(div->lhs()), rhs, tempFixed(eax));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineFixed(lir, div, LAllocation(AnyRegister(edx)));
+ }
+ return;
+ }
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod(
+ useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(edx));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ defineFixed(lir, div, LAllocation(AnyRegister(eax)));
+}
+
+void LIRGeneratorX86Shared::lowerUMod(MMod* mod) {
+ if (mod->rhs()->isConstant()) {
+ uint32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+
+ if (rhs != 0 && uint32_t(1) << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineReuseInput(lir, mod, 0);
+ } else {
+ LUDivOrModConstant* lir = new (alloc())
+ LUDivOrModConstant(useRegister(mod->lhs()), rhs, tempFixed(edx));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineFixed(lir, mod, LAllocation(AnyRegister(eax)));
+ }
+ return;
+ }
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod(
+ useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
+}
+
+void LIRGeneratorX86Shared::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ MOZ_ASSERT(mir->type() == MIRType::Double);
+
+#ifdef JS_CODEGEN_X64
+ static_assert(ecx == rcx);
+#endif
+
+ // Without BMI2, x86 can only shift by ecx.
+ LUse lhsUse = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc;
+ if (rhs->isConstant()) {
+ rhsAlloc = useOrConstant(rhs);
+ } else if (Assembler::HasBMI2()) {
+ rhsAlloc = useRegister(rhs);
+ } else {
+ rhsAlloc = useFixed(rhs, ecx);
+ }
+
+ LUrshD* lir = new (alloc()) LUrshD(lhsUse, rhsAlloc, tempCopy(lhs, 0));
+ define(lir, mir);
+}
+
+void LIRGeneratorX86Shared::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ // Shift operand should be in register ecx, unless BMI2 is available.
+ // x86 can't shift a non-ecx register.
+ LAllocation powerAlloc =
+ Assembler::HasBMI2() ? useRegister(power) : useFixed(power, ecx);
+ auto* lir = new (alloc()) LPowOfTwoI(powerAlloc, base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorX86Shared::lowerBigIntLsh(MBigIntLsh* ins) {
+ // Shift operand should be in register ecx, unless BMI2 is available.
+ // x86 can't shift a non-ecx register.
+ LDefinition shiftAlloc = Assembler::HasBMI2() ? temp() : tempFixed(ecx);
+ auto* lir =
+ new (alloc()) LBigIntLsh(useRegister(ins->lhs()), useRegister(ins->rhs()),
+ temp(), shiftAlloc, temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86Shared::lowerBigIntRsh(MBigIntRsh* ins) {
+ // Shift operand should be in register ecx, unless BMI2 is available.
+ // x86 can't shift a non-ecx register.
+ LDefinition shiftAlloc = Assembler::HasBMI2() ? temp() : tempFixed(ecx);
+ auto* lir =
+ new (alloc()) LBigIntRsh(useRegister(ins->lhs()), useRegister(ins->rhs()),
+ temp(), shiftAlloc, temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86Shared::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp =
+ Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ maybeTemp),
+ ins);
+ return;
+ }
+
+ define(
+ new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg), maybeTemp),
+ ins);
+}
+
+void LIRGeneratorX86Shared::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ LDefinition maybeTemp =
+ Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), maybeTemp), ins);
+}
+
+void LIRGeneratorX86Shared::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp =
+ Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempFloat32();
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
+}
+
+void LIRGeneratorX86Shared::lowerCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins, bool useI386ByteRegisters) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ // If the target is a floating register then we need a temp at the
+ // lower level; that temp must be eax.
+ //
+ // Otherwise the target (if used) is an integer register, which
+ // must be eax. If the target is not used the machine code will
+ // still clobber eax, so just pretend it's used.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: on x86
+ // this must be ebx, ecx, or edx (eax is taken for the output).
+ //
+ // Bug #1077036 describes some further optimization opportunities.
+
+ bool fixedOutput = false;
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation newval;
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ tempDef = tempFixed(eax);
+ newval = useRegister(ins->newval());
+ } else {
+ fixedOutput = true;
+ if (useI386ByteRegisters && ins->isByteArray()) {
+ newval = useFixed(ins->newval(), ebx);
+ } else {
+ newval = useRegister(ins->newval());
+ }
+ }
+
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ LCompareExchangeTypedArrayElement* lir =
+ new (alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval,
+ newval, tempDef);
+
+ if (fixedOutput) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGeneratorX86Shared::lowerAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins, bool useI386ByteRegisters) {
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ // The underlying instruction is XCHG, which can operate on any
+ // register.
+ //
+ // If the target is a floating register (for Uint32) then we need
+ // a temp into which to exchange.
+ //
+ // If the source is a byte array then we need a register that has
+ // a byte size; in this case -- on x86 only -- pin the output to
+ // an appropriate register and use that as a temp in the back-end.
+
+ LDefinition tempDef = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ tempDef = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir = new (alloc())
+ LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
+
+ if (useI386ByteRegisters && ins->isByteArray()) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins, bool useI386ByteRegisters) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. We can do this even for the Uint32 case.
+
+ if (ins->isForEffect()) {
+ LAllocation value;
+ if (useI386ByteRegisters && ins->isByteArray() &&
+ !ins->value()->isConstant()) {
+ value = useFixed(ins->value(), ebx);
+ } else {
+ value = useRegisterOrConstant(ins->value());
+ }
+
+ LAtomicTypedArrayElementBinopForEffect* lir = new (alloc())
+ LAtomicTypedArrayElementBinopForEffect(elements, index, value);
+
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl src, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the output.
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl src, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // If the array is not a uint32 array then:
+ // - eax should be the output (one result of the cmpxchg)
+ // - there is a temp, which must have a byte register if
+ // the array has 1-byte elements elements
+ //
+ // If the array is a uint32 array then:
+ // - eax is the first temp
+ // - we also need a second temp
+ //
+ // There are optimization opportunities:
+ // - better register allocation in the x86 8-bit case, Bug #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
+ ins->operation() == AtomicFetchSubOp);
+ bool fixedOutput = true;
+ bool reuseInput = false;
+ LDefinition tempDef1 = LDefinition::BogusTemp();
+ LDefinition tempDef2 = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ value = useRegisterOrConstant(ins->value());
+ fixedOutput = false;
+ if (bitOp) {
+ tempDef1 = tempFixed(eax);
+ tempDef2 = temp();
+ } else {
+ tempDef1 = temp();
+ }
+ } else if (useI386ByteRegisters && ins->isByteArray()) {
+ if (ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ } else {
+ value = useFixed(ins->value(), ebx);
+ }
+ if (bitOp) {
+ tempDef1 = tempFixed(ecx);
+ }
+ } else if (bitOp) {
+ value = useRegisterOrConstant(ins->value());
+ tempDef1 = temp();
+ } else if (ins->value()->isConstant()) {
+ fixedOutput = false;
+ value = useRegisterOrConstant(ins->value());
+ } else {
+ fixedOutput = false;
+ reuseInput = true;
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAtomicTypedArrayElementBinop* lir = new (alloc())
+ LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
+
+ if (fixedOutput) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else if (reuseInput) {
+ defineReuseInput(lir, ins, LAtomicTypedArrayElementBinop::valueOp);
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ // As lowerForFPU, but we want rhs to be in a FP register too.
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ if (!Assembler::HasAVX()) {
+ lir->setOperand(1, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useRegister(rhs)
+ : useRegisterAtStart(rhs));
+ defineReuseInput(lir, ins, 0);
+ } else {
+ lir->setOperand(1, useRegisterAtStart(rhs));
+ define(lir, ins);
+ }
+}
+
+// These lowerings are really x86-shared but some Masm APIs are not yet
+// available on x86.
+
+// Ternary and binary operators require the dest register to be the same as
+// their first input register, leading to a pattern of useRegisterAtStart +
+// defineReuseInput.
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->v0()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->v1()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->v2()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::V128Bitselect: {
+ // Enforcing lhs == output avoids one setup move. We would like to also
+ // enforce merging the control with the temp (with
+ // usRegisterAtStart(control) and tempCopy()), but the register allocator
+ // ignores those constraints at present.
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegisterAtStart(ins->v0()), useRegister(ins->v1()),
+ useRegister(ins->v2()), tempSimd128());
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V0);
+ break;
+ }
+ case wasm::SimdOp::F32x4RelaxedFma:
+ case wasm::SimdOp::F32x4RelaxedFnma:
+ case wasm::SimdOp::F64x2RelaxedFma:
+ case wasm::SimdOp::F64x2RelaxedFnma: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()));
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ case wasm::SimdOp::I32x4DotI8x16I7x16AddS: {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()), useRegister(ins->v1()),
+ useRegisterAtStart(ins->v2()));
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V2);
+ break;
+ }
+ case wasm::SimdOp::I8x16RelaxedLaneSelect:
+ case wasm::SimdOp::I16x8RelaxedLaneSelect:
+ case wasm::SimdOp::I32x4RelaxedLaneSelect:
+ case wasm::SimdOp::I64x2RelaxedLaneSelect: {
+ if (Assembler::HasAVX()) {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegisterAtStart(ins->v0()),
+ useRegisterAtStart(ins->v1()), useRegisterAtStart(ins->v2()));
+ define(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LWasmTernarySimd128(
+ ins->simdOp(), useRegister(ins->v0()),
+ useRegisterAtStart(ins->v1()), useFixed(ins->v2(), vmm0));
+ defineReuseInput(lir, ins, LWasmTernarySimd128::V1);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("NYI");
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+ wasm::SimdOp op = ins->simdOp();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(rhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ // Note MWasmBinarySimd128::foldsTo has already specialized operations that
+ // have a constant operand, so this takes care of more general cases of
+ // reordering, see ReorderCommutative.
+ if (ins->isCommutative()) {
+ ReorderCommutative(&lhs, &rhs, ins);
+ }
+
+ // Swap operands and change operation if necessary, these are all x86/x64
+ // dependent transformations. Except where noted, this is about avoiding
+ // unnecessary moves and fixups in the code generator macros.
+ bool swap = false;
+ switch (op) {
+ case wasm::SimdOp::V128AndNot: {
+ // Code generation requires the operands to be reversed.
+ swap = true;
+ break;
+ }
+ case wasm::SimdOp::I8x16LtS: {
+ swap = true;
+ op = wasm::SimdOp::I8x16GtS;
+ break;
+ }
+ case wasm::SimdOp::I8x16GeS: {
+ swap = true;
+ op = wasm::SimdOp::I8x16LeS;
+ break;
+ }
+ case wasm::SimdOp::I16x8LtS: {
+ swap = true;
+ op = wasm::SimdOp::I16x8GtS;
+ break;
+ }
+ case wasm::SimdOp::I16x8GeS: {
+ swap = true;
+ op = wasm::SimdOp::I16x8LeS;
+ break;
+ }
+ case wasm::SimdOp::I32x4LtS: {
+ swap = true;
+ op = wasm::SimdOp::I32x4GtS;
+ break;
+ }
+ case wasm::SimdOp::I32x4GeS: {
+ swap = true;
+ op = wasm::SimdOp::I32x4LeS;
+ break;
+ }
+ case wasm::SimdOp::F32x4Gt: {
+ swap = true;
+ op = wasm::SimdOp::F32x4Lt;
+ break;
+ }
+ case wasm::SimdOp::F32x4Ge: {
+ swap = true;
+ op = wasm::SimdOp::F32x4Le;
+ break;
+ }
+ case wasm::SimdOp::F64x2Gt: {
+ swap = true;
+ op = wasm::SimdOp::F64x2Lt;
+ break;
+ }
+ case wasm::SimdOp::F64x2Ge: {
+ swap = true;
+ op = wasm::SimdOp::F64x2Le;
+ break;
+ }
+ case wasm::SimdOp::F32x4PMin:
+ case wasm::SimdOp::F32x4PMax:
+ case wasm::SimdOp::F64x2PMin:
+ case wasm::SimdOp::F64x2PMax: {
+ // Code generation requires the operations to be reversed (the rhs is the
+ // output register).
+ swap = true;
+ break;
+ }
+ default:
+ break;
+ }
+ if (swap) {
+ MDefinition* tmp = lhs;
+ lhs = rhs;
+ rhs = tmp;
+ }
+
+ // Allocate temp registers
+ LDefinition tempReg0 = LDefinition::BogusTemp();
+ LDefinition tempReg1 = LDefinition::BogusTemp();
+ switch (op) {
+ case wasm::SimdOp::I64x2Mul:
+ tempReg0 = tempSimd128();
+ break;
+ case wasm::SimdOp::F32x4Min:
+ case wasm::SimdOp::F32x4Max:
+ case wasm::SimdOp::F64x2Min:
+ case wasm::SimdOp::F64x2Max:
+ tempReg0 = tempSimd128();
+ tempReg1 = tempSimd128();
+ break;
+ case wasm::SimdOp::I64x2LtS:
+ case wasm::SimdOp::I64x2GtS:
+ case wasm::SimdOp::I64x2LeS:
+ case wasm::SimdOp::I64x2GeS:
+ // The compareForOrderingInt64x2AVX implementation does not require
+ // temps but needs SSE4.2 support. Checking if both AVX and SSE4.2
+ // are enabled.
+ if (!(Assembler::HasAVX() && Assembler::HasSSE42())) {
+ tempReg0 = tempSimd128();
+ tempReg1 = tempSimd128();
+ }
+ break;
+ default:
+ break;
+ }
+
+ // For binary ops, without AVX support, the Masm API always is usually
+ // (rhs, lhsDest) and requires AtStart+ReuseInput for the lhs.
+ //
+ // For a few ops, the API is actually (rhsDest, lhs) and the rules are the
+ // same but the reversed. We swapped operands above; they will be swapped
+ // again in the code generator to emit the right code.
+ //
+ // If AVX support is enabled, some binary ops can use output as destination,
+ // useRegisterAtStart is applied for both operands and no need for ReuseInput.
+
+ switch (op) {
+ case wasm::SimdOp::I8x16AvgrU:
+ case wasm::SimdOp::I16x8AvgrU:
+ case wasm::SimdOp::I8x16Add:
+ case wasm::SimdOp::I8x16AddSatS:
+ case wasm::SimdOp::I8x16AddSatU:
+ case wasm::SimdOp::I8x16Sub:
+ case wasm::SimdOp::I8x16SubSatS:
+ case wasm::SimdOp::I8x16SubSatU:
+ case wasm::SimdOp::I16x8Mul:
+ case wasm::SimdOp::I16x8MinS:
+ case wasm::SimdOp::I16x8MinU:
+ case wasm::SimdOp::I16x8MaxS:
+ case wasm::SimdOp::I16x8MaxU:
+ case wasm::SimdOp::I32x4Add:
+ case wasm::SimdOp::I32x4Sub:
+ case wasm::SimdOp::I32x4Mul:
+ case wasm::SimdOp::I32x4MinS:
+ case wasm::SimdOp::I32x4MinU:
+ case wasm::SimdOp::I32x4MaxS:
+ case wasm::SimdOp::I32x4MaxU:
+ case wasm::SimdOp::I64x2Add:
+ case wasm::SimdOp::I64x2Sub:
+ case wasm::SimdOp::I64x2Mul:
+ case wasm::SimdOp::F32x4Add:
+ case wasm::SimdOp::F32x4Sub:
+ case wasm::SimdOp::F32x4Mul:
+ case wasm::SimdOp::F32x4Div:
+ case wasm::SimdOp::F64x2Add:
+ case wasm::SimdOp::F64x2Sub:
+ case wasm::SimdOp::F64x2Mul:
+ case wasm::SimdOp::F64x2Div:
+ case wasm::SimdOp::F32x4Eq:
+ case wasm::SimdOp::F32x4Ne:
+ case wasm::SimdOp::F32x4Lt:
+ case wasm::SimdOp::F32x4Le:
+ case wasm::SimdOp::F64x2Eq:
+ case wasm::SimdOp::F64x2Ne:
+ case wasm::SimdOp::F64x2Lt:
+ case wasm::SimdOp::F64x2Le:
+ case wasm::SimdOp::F32x4PMin:
+ case wasm::SimdOp::F32x4PMax:
+ case wasm::SimdOp::F64x2PMin:
+ case wasm::SimdOp::F64x2PMax:
+ case wasm::SimdOp::I8x16Swizzle:
+ case wasm::SimdOp::I8x16RelaxedSwizzle:
+ case wasm::SimdOp::I8x16Eq:
+ case wasm::SimdOp::I8x16Ne:
+ case wasm::SimdOp::I8x16GtS:
+ case wasm::SimdOp::I8x16LeS:
+ case wasm::SimdOp::I8x16LtU:
+ case wasm::SimdOp::I8x16GtU:
+ case wasm::SimdOp::I8x16LeU:
+ case wasm::SimdOp::I8x16GeU:
+ case wasm::SimdOp::I16x8Eq:
+ case wasm::SimdOp::I16x8Ne:
+ case wasm::SimdOp::I16x8GtS:
+ case wasm::SimdOp::I16x8LeS:
+ case wasm::SimdOp::I16x8LtU:
+ case wasm::SimdOp::I16x8GtU:
+ case wasm::SimdOp::I16x8LeU:
+ case wasm::SimdOp::I16x8GeU:
+ case wasm::SimdOp::I32x4Eq:
+ case wasm::SimdOp::I32x4Ne:
+ case wasm::SimdOp::I32x4GtS:
+ case wasm::SimdOp::I32x4LeS:
+ case wasm::SimdOp::I32x4LtU:
+ case wasm::SimdOp::I32x4GtU:
+ case wasm::SimdOp::I32x4LeU:
+ case wasm::SimdOp::I32x4GeU:
+ case wasm::SimdOp::I64x2Eq:
+ case wasm::SimdOp::I64x2Ne:
+ case wasm::SimdOp::I64x2LtS:
+ case wasm::SimdOp::I64x2GtS:
+ case wasm::SimdOp::I64x2LeS:
+ case wasm::SimdOp::I64x2GeS:
+ case wasm::SimdOp::V128And:
+ case wasm::SimdOp::V128Or:
+ case wasm::SimdOp::V128Xor:
+ case wasm::SimdOp::V128AndNot:
+ case wasm::SimdOp::F32x4Min:
+ case wasm::SimdOp::F32x4Max:
+ case wasm::SimdOp::F64x2Min:
+ case wasm::SimdOp::F64x2Max:
+ case wasm::SimdOp::I8x16NarrowI16x8S:
+ case wasm::SimdOp::I8x16NarrowI16x8U:
+ case wasm::SimdOp::I16x8NarrowI32x4S:
+ case wasm::SimdOp::I16x8NarrowI32x4U:
+ case wasm::SimdOp::I32x4DotI16x8S:
+ case wasm::SimdOp::I16x8ExtmulLowI8x16S:
+ case wasm::SimdOp::I16x8ExtmulHighI8x16S:
+ case wasm::SimdOp::I16x8ExtmulLowI8x16U:
+ case wasm::SimdOp::I16x8ExtmulHighI8x16U:
+ case wasm::SimdOp::I32x4ExtmulLowI16x8S:
+ case wasm::SimdOp::I32x4ExtmulHighI16x8S:
+ case wasm::SimdOp::I32x4ExtmulLowI16x8U:
+ case wasm::SimdOp::I32x4ExtmulHighI16x8U:
+ case wasm::SimdOp::I64x2ExtmulLowI32x4S:
+ case wasm::SimdOp::I64x2ExtmulHighI32x4S:
+ case wasm::SimdOp::I64x2ExtmulLowI32x4U:
+ case wasm::SimdOp::I64x2ExtmulHighI32x4U:
+ case wasm::SimdOp::I16x8Q15MulrSatS:
+ case wasm::SimdOp::F32x4RelaxedMin:
+ case wasm::SimdOp::F32x4RelaxedMax:
+ case wasm::SimdOp::F64x2RelaxedMin:
+ case wasm::SimdOp::F64x2RelaxedMax:
+ case wasm::SimdOp::I16x8RelaxedQ15MulrS:
+ case wasm::SimdOp::I16x8DotI8x16I7x16S:
+ case wasm::SimdOp::MozPMADDUBSW:
+ if (isThreeOpAllowed()) {
+ auto* lir = new (alloc())
+ LWasmBinarySimd128(op, useRegisterAtStart(lhs),
+ useRegisterAtStart(rhs), tempReg0, tempReg1);
+ define(lir, ins);
+ break;
+ }
+ [[fallthrough]];
+ default: {
+ LAllocation lhsDestAlloc = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc = willHaveDifferentLIRNodes(lhs, rhs)
+ ? useRegister(rhs)
+ : useRegisterAtStart(rhs);
+ auto* lir = new (alloc())
+ LWasmBinarySimd128(op, lhsDestAlloc, rhsAlloc, tempReg0, tempReg1);
+ defineReuseInput(lir, ins, LWasmBinarySimd128::LhsDest);
+ break;
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ if (simdOp() != wasm::SimdOp::V128Bitselect) {
+ return false;
+ }
+
+ // Optimization when control vector is a mask with all 0 or all 1 per lane.
+ // On x86, there is no bitselect, blend operations will be a win,
+ // e.g. via PBLENDVB or PBLENDW.
+ SimdConstant constant = static_cast<MWasmFloatConstant*>(v2())->toSimd128();
+ const SimdConstant::I8x16& bytes = constant.asInt8x16();
+ for (int8_t i = 0; i < 16; i++) {
+ if (bytes[i] == -1) {
+ shuffle[i] = i + 16;
+ } else if (bytes[i] == 0) {
+ shuffle[i] = i;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+bool MWasmTernarySimd128::canRelaxBitselect() {
+ wasm::SimdOp simdOp;
+ if (v2()->isWasmBinarySimd128()) {
+ simdOp = v2()->toWasmBinarySimd128()->simdOp();
+ } else if (v2()->isWasmBinarySimd128WithConstant()) {
+ simdOp = v2()->toWasmBinarySimd128WithConstant()->simdOp();
+ } else {
+ return false;
+ }
+ switch (simdOp) {
+ case wasm::SimdOp::I8x16Eq:
+ case wasm::SimdOp::I8x16Ne:
+ case wasm::SimdOp::I8x16GtS:
+ case wasm::SimdOp::I8x16GeS:
+ case wasm::SimdOp::I8x16LtS:
+ case wasm::SimdOp::I8x16LeS:
+ case wasm::SimdOp::I8x16GtU:
+ case wasm::SimdOp::I8x16GeU:
+ case wasm::SimdOp::I8x16LtU:
+ case wasm::SimdOp::I8x16LeU:
+ case wasm::SimdOp::I16x8Eq:
+ case wasm::SimdOp::I16x8Ne:
+ case wasm::SimdOp::I16x8GtS:
+ case wasm::SimdOp::I16x8GeS:
+ case wasm::SimdOp::I16x8LtS:
+ case wasm::SimdOp::I16x8LeS:
+ case wasm::SimdOp::I16x8GtU:
+ case wasm::SimdOp::I16x8GeU:
+ case wasm::SimdOp::I16x8LtU:
+ case wasm::SimdOp::I16x8LeU:
+ case wasm::SimdOp::I32x4Eq:
+ case wasm::SimdOp::I32x4Ne:
+ case wasm::SimdOp::I32x4GtS:
+ case wasm::SimdOp::I32x4GeS:
+ case wasm::SimdOp::I32x4LtS:
+ case wasm::SimdOp::I32x4LeS:
+ case wasm::SimdOp::I32x4GtU:
+ case wasm::SimdOp::I32x4GeU:
+ case wasm::SimdOp::I32x4LtU:
+ case wasm::SimdOp::I32x4LeU:
+ case wasm::SimdOp::I64x2Eq:
+ case wasm::SimdOp::I64x2Ne:
+ case wasm::SimdOp::I64x2GtS:
+ case wasm::SimdOp::I64x2GeS:
+ case wasm::SimdOp::I64x2LtS:
+ case wasm::SimdOp::I64x2LeS:
+ case wasm::SimdOp::F32x4Eq:
+ case wasm::SimdOp::F32x4Ne:
+ case wasm::SimdOp::F32x4Gt:
+ case wasm::SimdOp::F32x4Ge:
+ case wasm::SimdOp::F32x4Lt:
+ case wasm::SimdOp::F32x4Le:
+ case wasm::SimdOp::F64x2Eq:
+ case wasm::SimdOp::F64x2Ne:
+ case wasm::SimdOp::F64x2Gt:
+ case wasm::SimdOp::F64x2Ge:
+ case wasm::SimdOp::F64x2Lt:
+ case wasm::SimdOp::F64x2Le:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool MWasmBinarySimd128::canPmaddubsw() {
+ MOZ_ASSERT(Assembler::HasSSE3());
+ return true;
+}
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // The order follows MacroAssembler.h, generally
+ switch (simdOp()) {
+ // Operations implemented by a single native instruction where it is
+ // plausible that the rhs (after commutation if available) could be a
+ // constant.
+ //
+ // Swizzle is not here because it was handled earlier in the pipeline.
+ //
+ // Integer compares >= and < are not here because they are not supported in
+ // the hardware.
+ //
+ // Floating compares are not here because our patching machinery can't
+ // handle them yet.
+ //
+ // Floating-point min and max (including pmin and pmax) are not here because
+ // they are not straightforward to implement.
+ case wasm::SimdOp::I8x16Add:
+ case wasm::SimdOp::I16x8Add:
+ case wasm::SimdOp::I32x4Add:
+ case wasm::SimdOp::I64x2Add:
+ case wasm::SimdOp::I8x16Sub:
+ case wasm::SimdOp::I16x8Sub:
+ case wasm::SimdOp::I32x4Sub:
+ case wasm::SimdOp::I64x2Sub:
+ case wasm::SimdOp::I16x8Mul:
+ case wasm::SimdOp::I32x4Mul:
+ case wasm::SimdOp::I8x16AddSatS:
+ case wasm::SimdOp::I8x16AddSatU:
+ case wasm::SimdOp::I16x8AddSatS:
+ case wasm::SimdOp::I16x8AddSatU:
+ case wasm::SimdOp::I8x16SubSatS:
+ case wasm::SimdOp::I8x16SubSatU:
+ case wasm::SimdOp::I16x8SubSatS:
+ case wasm::SimdOp::I16x8SubSatU:
+ case wasm::SimdOp::I8x16MinS:
+ case wasm::SimdOp::I8x16MinU:
+ case wasm::SimdOp::I16x8MinS:
+ case wasm::SimdOp::I16x8MinU:
+ case wasm::SimdOp::I32x4MinS:
+ case wasm::SimdOp::I32x4MinU:
+ case wasm::SimdOp::I8x16MaxS:
+ case wasm::SimdOp::I8x16MaxU:
+ case wasm::SimdOp::I16x8MaxS:
+ case wasm::SimdOp::I16x8MaxU:
+ case wasm::SimdOp::I32x4MaxS:
+ case wasm::SimdOp::I32x4MaxU:
+ case wasm::SimdOp::V128And:
+ case wasm::SimdOp::V128Or:
+ case wasm::SimdOp::V128Xor:
+ case wasm::SimdOp::I8x16Eq:
+ case wasm::SimdOp::I8x16Ne:
+ case wasm::SimdOp::I8x16GtS:
+ case wasm::SimdOp::I8x16LeS:
+ case wasm::SimdOp::I16x8Eq:
+ case wasm::SimdOp::I16x8Ne:
+ case wasm::SimdOp::I16x8GtS:
+ case wasm::SimdOp::I16x8LeS:
+ case wasm::SimdOp::I32x4Eq:
+ case wasm::SimdOp::I32x4Ne:
+ case wasm::SimdOp::I32x4GtS:
+ case wasm::SimdOp::I32x4LeS:
+ case wasm::SimdOp::I64x2Mul:
+ case wasm::SimdOp::F32x4Eq:
+ case wasm::SimdOp::F32x4Ne:
+ case wasm::SimdOp::F32x4Lt:
+ case wasm::SimdOp::F32x4Le:
+ case wasm::SimdOp::F64x2Eq:
+ case wasm::SimdOp::F64x2Ne:
+ case wasm::SimdOp::F64x2Lt:
+ case wasm::SimdOp::F64x2Le:
+ case wasm::SimdOp::I32x4DotI16x8S:
+ case wasm::SimdOp::F32x4Add:
+ case wasm::SimdOp::F64x2Add:
+ case wasm::SimdOp::F32x4Sub:
+ case wasm::SimdOp::F64x2Sub:
+ case wasm::SimdOp::F32x4Div:
+ case wasm::SimdOp::F64x2Div:
+ case wasm::SimdOp::F32x4Mul:
+ case wasm::SimdOp::F64x2Mul:
+ case wasm::SimdOp::I8x16NarrowI16x8S:
+ case wasm::SimdOp::I8x16NarrowI16x8U:
+ case wasm::SimdOp::I16x8NarrowI32x4S:
+ case wasm::SimdOp::I16x8NarrowI32x4U:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* lhs = ins->lhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ // Allocate temp registers
+ LDefinition tempReg = LDefinition::BogusTemp();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I64x2Mul:
+ tempReg = tempSimd128();
+ break;
+ default:
+ break;
+ }
+
+ if (isThreeOpAllowed()) {
+ // The non-destructive versions of instructions will be available
+ // when AVX is enabled.
+ LAllocation lhsAlloc = useRegisterAtStart(lhs);
+ auto* lir = new (alloc())
+ LWasmBinarySimd128WithConstant(lhsAlloc, ins->rhs(), tempReg);
+ define(lir, ins);
+ } else {
+ // Always beneficial to reuse the lhs register here, see discussion in
+ // visitWasmBinarySimd128() and also code in specializeForConstantRhs().
+ LAllocation lhsDestAlloc = useRegisterAtStart(lhs);
+ auto* lir = new (alloc())
+ LWasmBinarySimd128WithConstant(lhsDestAlloc, ins->rhs(), tempReg);
+ defineReuseInput(lir, ins, LWasmBinarySimd128WithConstant::LhsDest);
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ if (rhs->isConstant()) {
+ int32_t shiftCountMask;
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ case wasm::SimdOp::I8x16ShrU:
+ case wasm::SimdOp::I8x16ShrS:
+ shiftCountMask = 7;
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ case wasm::SimdOp::I16x8ShrU:
+ case wasm::SimdOp::I16x8ShrS:
+ shiftCountMask = 15;
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ case wasm::SimdOp::I32x4ShrU:
+ case wasm::SimdOp::I32x4ShrS:
+ shiftCountMask = 31;
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ case wasm::SimdOp::I64x2ShrU:
+ case wasm::SimdOp::I64x2ShrS:
+ shiftCountMask = 63;
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift operation");
+ }
+
+ int32_t shiftCount = rhs->toConstant()->toInt32() & shiftCountMask;
+ if (shiftCount == shiftCountMask) {
+ // Check if possible to apply sign replication optimization.
+ // For some ops the input shall be reused.
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16ShrS: {
+ auto* lir =
+ new (alloc()) LWasmSignReplicationSimd128(useRegister(lhs));
+ define(lir, ins);
+ return;
+ }
+ case wasm::SimdOp::I16x8ShrS:
+ case wasm::SimdOp::I32x4ShrS:
+ case wasm::SimdOp::I64x2ShrS: {
+ auto* lir = new (alloc())
+ LWasmSignReplicationSimd128(useRegisterAtStart(lhs));
+ if (isThreeOpAllowed()) {
+ define(lir, ins);
+ } else {
+ // For non-AVX, it is always beneficial to reuse the input.
+ defineReuseInput(lir, ins, LWasmConstantShiftSimd128::Src);
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("shift -> constant shift");
+# endif
+ auto* lir = new (alloc())
+ LWasmConstantShiftSimd128(useRegisterAtStart(lhs), shiftCount);
+ if (isThreeOpAllowed()) {
+ define(lir, ins);
+ } else {
+ // For non-AVX, it is always beneficial to reuse the input.
+ defineReuseInput(lir, ins, LWasmConstantShiftSimd128::Src);
+ }
+ return;
+ }
+
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("shift -> variable shift");
+# endif
+
+ LDefinition tempReg = LDefinition::BogusTemp();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Shl:
+ case wasm::SimdOp::I8x16ShrS:
+ case wasm::SimdOp::I8x16ShrU:
+ case wasm::SimdOp::I64x2ShrS:
+ tempReg = tempSimd128();
+ break;
+ default:
+ break;
+ }
+
+ // Reusing the input if possible is never detrimental.
+ LAllocation lhsDestAlloc = useRegisterAtStart(lhs);
+ LAllocation rhsAlloc = useRegisterAtStart(rhs);
+ auto* lir =
+ new (alloc()) LWasmVariableShiftSimd128(lhsDestAlloc, rhsAlloc, tempReg);
+ defineReuseInput(lir, ins, LWasmVariableShiftSimd128::LhsDest);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->rhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ SimdShuffle s = ins->shuffle();
+ switch (s.opd) {
+ case SimdShuffle::Operand::LEFT:
+ case SimdShuffle::Operand::RIGHT: {
+ LAllocation src;
+ bool reuse = false;
+ switch (*s.permuteOp) {
+ case SimdPermuteOp::MOVE:
+ reuse = true;
+ break;
+ case SimdPermuteOp::BROADCAST_8x16:
+ case SimdPermuteOp::BROADCAST_16x8:
+ case SimdPermuteOp::PERMUTE_8x16:
+ case SimdPermuteOp::PERMUTE_16x8:
+ case SimdPermuteOp::PERMUTE_32x4:
+ case SimdPermuteOp::ROTATE_RIGHT_8x16:
+ case SimdPermuteOp::SHIFT_LEFT_8x16:
+ case SimdPermuteOp::SHIFT_RIGHT_8x16:
+ case SimdPermuteOp::REVERSE_16x8:
+ case SimdPermuteOp::REVERSE_32x4:
+ case SimdPermuteOp::REVERSE_64x2:
+ // No need to reuse registers when VEX instructions are enabled.
+ reuse = !Assembler::HasAVX();
+ break;
+ default:
+ MOZ_CRASH("Unexpected operator");
+ }
+ if (s.opd == SimdShuffle::Operand::LEFT) {
+ src = useRegisterAtStart(ins->lhs());
+ } else {
+ src = useRegisterAtStart(ins->rhs());
+ }
+ auto* lir =
+ new (alloc()) LWasmPermuteSimd128(src, *s.permuteOp, s.control);
+ if (reuse) {
+ defineReuseInput(lir, ins, LWasmPermuteSimd128::Src);
+ } else {
+ define(lir, ins);
+ }
+ break;
+ }
+ case SimdShuffle::Operand::BOTH:
+ case SimdShuffle::Operand::BOTH_SWAPPED: {
+ LDefinition temp = LDefinition::BogusTemp();
+ switch (*s.shuffleOp) {
+ case SimdShuffleOp::BLEND_8x16:
+ temp = Assembler::HasAVX() ? tempSimd128() : tempFixed(xmm0);
+ break;
+ default:
+ break;
+ }
+ if (isThreeOpAllowed()) {
+ LAllocation lhs;
+ LAllocation rhs;
+ if (s.opd == SimdShuffle::Operand::BOTH) {
+ lhs = useRegisterAtStart(ins->lhs());
+ rhs = useRegisterAtStart(ins->rhs());
+ } else {
+ lhs = useRegisterAtStart(ins->rhs());
+ rhs = useRegisterAtStart(ins->lhs());
+ }
+ auto* lir = new (alloc())
+ LWasmShuffleSimd128(lhs, rhs, temp, *s.shuffleOp, s.control);
+ define(lir, ins);
+ } else {
+ LAllocation lhs;
+ LAllocation rhs;
+ if (s.opd == SimdShuffle::Operand::BOTH) {
+ lhs = useRegisterAtStart(ins->lhs());
+ rhs = useRegister(ins->rhs());
+ } else {
+ lhs = useRegisterAtStart(ins->rhs());
+ rhs = useRegister(ins->lhs());
+ }
+ auto* lir = new (alloc())
+ LWasmShuffleSimd128(lhs, rhs, temp, *s.shuffleOp, s.control);
+ defineReuseInput(lir, ins, LWasmShuffleSimd128::LhsDest);
+ }
+ break;
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->lhs()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ // If AVX support is disabled, the Masm API is (rhs, lhsDest) and requires
+ // AtStart+ReuseInput for the lhs. For type reasons, the rhs will never be
+ // the same as the lhs and is therefore a plain Use.
+ //
+ // If AVX support is enabled, useRegisterAtStart is preferred.
+
+ if (ins->rhs()->type() == MIRType::Int64) {
+ if (isThreeOpAllowed()) {
+ auto* lir = new (alloc()) LWasmReplaceInt64LaneSimd128(
+ useRegisterAtStart(ins->lhs()), useInt64RegisterAtStart(ins->rhs()));
+ define(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LWasmReplaceInt64LaneSimd128(
+ useRegisterAtStart(ins->lhs()), useInt64Register(ins->rhs()));
+ defineReuseInput(lir, ins, LWasmReplaceInt64LaneSimd128::LhsDest);
+ }
+ } else {
+ if (isThreeOpAllowed()) {
+ auto* lir = new (alloc()) LWasmReplaceLaneSimd128(
+ useRegisterAtStart(ins->lhs()), useRegisterAtStart(ins->rhs()));
+ define(lir, ins);
+ } else {
+ auto* lir = new (alloc()) LWasmReplaceLaneSimd128(
+ useRegisterAtStart(ins->lhs()), useRegister(ins->rhs()));
+ defineReuseInput(lir, ins, LWasmReplaceLaneSimd128::LhsDest);
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ switch (ins->input()->type()) {
+ case MIRType::Int64: {
+ // 64-bit integer splats.
+ // Load-and-(sign|zero)extend.
+ auto* lir = new (alloc())
+ LWasmInt64ToSimd128(useInt64RegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ // Floating-point splats.
+ // Ideally we save a move on SSE systems by reusing the input register,
+ // but since the input and output register types differ, we can't.
+ auto* lir =
+ new (alloc()) LWasmScalarToSimd128(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ default: {
+ // 32-bit integer splats.
+ auto* lir =
+ new (alloc()) LWasmScalarToSimd128(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+ break;
+ }
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(ins->input()->type() == MIRType::Simd128);
+ MOZ_ASSERT(ins->type() == MIRType::Simd128);
+
+ bool useAtStart = false;
+ bool reuseInput = false;
+ LDefinition tempReg = LDefinition::BogusTemp();
+ switch (ins->simdOp()) {
+ case wasm::SimdOp::I8x16Neg:
+ case wasm::SimdOp::I16x8Neg:
+ case wasm::SimdOp::I32x4Neg:
+ case wasm::SimdOp::I64x2Neg:
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16S:
+ // Prefer src != dest to avoid an unconditional src->temp move.
+ MOZ_ASSERT(!reuseInput);
+ // If AVX is enabled, we prefer useRegisterAtStart.
+ useAtStart = isThreeOpAllowed();
+ break;
+ case wasm::SimdOp::F32x4Neg:
+ case wasm::SimdOp::F64x2Neg:
+ case wasm::SimdOp::F32x4Abs:
+ case wasm::SimdOp::F64x2Abs:
+ case wasm::SimdOp::V128Not:
+ case wasm::SimdOp::F32x4Sqrt:
+ case wasm::SimdOp::F64x2Sqrt:
+ case wasm::SimdOp::I8x16Abs:
+ case wasm::SimdOp::I16x8Abs:
+ case wasm::SimdOp::I32x4Abs:
+ case wasm::SimdOp::I64x2Abs:
+ case wasm::SimdOp::I32x4TruncSatF32x4S:
+ case wasm::SimdOp::F32x4ConvertI32x4U:
+ case wasm::SimdOp::I16x8ExtaddPairwiseI8x16U:
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8S:
+ case wasm::SimdOp::I32x4ExtaddPairwiseI16x8U:
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4S:
+ case wasm::SimdOp::I32x4RelaxedTruncF32x4U:
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2SZero:
+ case wasm::SimdOp::I32x4RelaxedTruncF64x2UZero:
+ case wasm::SimdOp::I64x2ExtendHighI32x4S:
+ case wasm::SimdOp::I64x2ExtendHighI32x4U:
+ // Prefer src == dest to avoid an unconditional src->dest move
+ // for better performance in non-AVX mode (e.g. non-PSHUFD use).
+ useAtStart = true;
+ reuseInput = !isThreeOpAllowed();
+ break;
+ case wasm::SimdOp::I32x4TruncSatF32x4U:
+ case wasm::SimdOp::I32x4TruncSatF64x2SZero:
+ case wasm::SimdOp::I32x4TruncSatF64x2UZero:
+ case wasm::SimdOp::I8x16Popcnt:
+ tempReg = tempSimd128();
+ // Prefer src == dest to avoid an unconditional src->dest move
+ // in non-AVX mode.
+ useAtStart = true;
+ reuseInput = !isThreeOpAllowed();
+ break;
+ case wasm::SimdOp::I16x8ExtendLowI8x16S:
+ case wasm::SimdOp::I16x8ExtendHighI8x16S:
+ case wasm::SimdOp::I16x8ExtendLowI8x16U:
+ case wasm::SimdOp::I16x8ExtendHighI8x16U:
+ case wasm::SimdOp::I32x4ExtendLowI16x8S:
+ case wasm::SimdOp::I32x4ExtendHighI16x8S:
+ case wasm::SimdOp::I32x4ExtendLowI16x8U:
+ case wasm::SimdOp::I32x4ExtendHighI16x8U:
+ case wasm::SimdOp::I64x2ExtendLowI32x4S:
+ case wasm::SimdOp::I64x2ExtendLowI32x4U:
+ case wasm::SimdOp::F32x4ConvertI32x4S:
+ case wasm::SimdOp::F32x4Ceil:
+ case wasm::SimdOp::F32x4Floor:
+ case wasm::SimdOp::F32x4Trunc:
+ case wasm::SimdOp::F32x4Nearest:
+ case wasm::SimdOp::F64x2Ceil:
+ case wasm::SimdOp::F64x2Floor:
+ case wasm::SimdOp::F64x2Trunc:
+ case wasm::SimdOp::F64x2Nearest:
+ case wasm::SimdOp::F32x4DemoteF64x2Zero:
+ case wasm::SimdOp::F64x2PromoteLowF32x4:
+ case wasm::SimdOp::F64x2ConvertLowI32x4S:
+ case wasm::SimdOp::F64x2ConvertLowI32x4U:
+ // Prefer src == dest to exert the lowest register pressure on the
+ // surrounding code.
+ useAtStart = true;
+ MOZ_ASSERT(!reuseInput);
+ break;
+ default:
+ MOZ_CRASH("Unary SimdOp not implemented");
+ }
+
+ LUse inputUse =
+ useAtStart ? useRegisterAtStart(ins->input()) : useRegister(ins->input());
+ LWasmUnarySimd128* lir = new (alloc()) LWasmUnarySimd128(inputUse, tempReg);
+ if (reuseInput) {
+ defineReuseInput(lir, ins, LWasmUnarySimd128::Src);
+ } else {
+ define(lir, ins);
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // A trick: On 32-bit systems, the base pointer is 32 bits (it was bounds
+ // checked and then chopped). On 64-bit systems, it can be 32 bits or 64
+ // bits. Either way, it fits in a GPR so we can ignore the
+ // Register/Register64 distinction here.
+# ifndef JS_64BIT
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+# endif
+ LUse base = useRegisterAtStart(ins->base());
+ LUse inputUse = useRegisterAtStart(ins->value());
+ LAllocation memoryBase = ins->hasMemoryBase()
+ ? useRegisterAtStart(ins->memoryBase())
+ : LAllocation();
+ LWasmLoadLaneSimd128* lir = new (alloc()) LWasmLoadLaneSimd128(
+ base, inputUse, LDefinition::BogusTemp(), memoryBase);
+ defineReuseInput(lir, ins, LWasmLoadLaneSimd128::Src);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ // See comment above.
+# ifndef JS_64BIT
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+# endif
+ LUse base = useRegisterAtStart(ins->base());
+ LUse input = useRegisterAtStart(ins->value());
+ LAllocation memoryBase = ins->hasMemoryBase()
+ ? useRegisterAtStart(ins->memoryBase())
+ : LAllocation();
+ LWasmStoreLaneSimd128* lir = new (alloc())
+ LWasmStoreLaneSimd128(base, input, LDefinition::BogusTemp(), memoryBase);
+ add(lir, ins);
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+bool LIRGeneratorX86Shared::canFoldReduceSimd128AndBranch(wasm::SimdOp op) {
+ switch (op) {
+ case wasm::SimdOp::V128AnyTrue:
+ case wasm::SimdOp::I8x16AllTrue:
+ case wasm::SimdOp::I16x8AllTrue:
+ case wasm::SimdOp::I32x4AllTrue:
+ case wasm::SimdOp::I64x2AllTrue:
+ case wasm::SimdOp::I16x8Bitmask:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool LIRGeneratorX86Shared::canEmitWasmReduceSimd128AtUses(
+ MWasmReduceSimd128* ins) {
+ if (!ins->canEmitAtUses()) {
+ return false;
+ }
+ // Only specific ops generating int32.
+ if (ins->type() != MIRType::Int32) {
+ return false;
+ }
+ if (!canFoldReduceSimd128AndBranch(ins->simdOp())) {
+ return false;
+ }
+ // If never used then defer (it will be removed).
+ MUseIterator iter(ins->usesBegin());
+ if (iter == ins->usesEnd()) {
+ return true;
+ }
+ // We require an MTest consumer.
+ MNode* node = iter->consumer();
+ if (!node->isDefinition() || !node->toDefinition()->isTest()) {
+ return false;
+ }
+ // Defer only if there's only one use.
+ iter++;
+ return iter == ins->usesEnd();
+}
+
+#endif // ENABLE_WASM_SIMD
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+#ifdef ENABLE_WASM_SIMD
+ if (canEmitWasmReduceSimd128AtUses(ins)) {
+ emitAtUses(ins);
+ return;
+ }
+
+ // Reductions (any_true, all_true, bitmask, extract_lane) uniformly prefer
+ // useRegisterAtStart:
+ //
+ // - In most cases, the input type differs from the output type, so there's no
+ // conflict and it doesn't really matter.
+ //
+ // - For extract_lane(0) on F32x4 and F64x2, input == output results in zero
+ // code being generated.
+ //
+ // - For extract_lane(k > 0) on F32x4 and F64x2, allowing the input register
+ // to be targeted lowers register pressure if it's the last use of the
+ // input.
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc())
+ LWasmReduceSimd128ToInt64(useRegisterAtStart(ins->input()));
+ defineInt64(lir, ins);
+ } else {
+ // Ideally we would reuse the input register for floating extract_lane if
+ // the lane is zero, but constraints in the register allocator require the
+ // input and output register types to be the same.
+ auto* lir = new (alloc()) LWasmReduceSimd128(
+ useRegisterAtStart(ins->input()), LDefinition::BogusTemp());
+ define(lir, ins);
+ }
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+}
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.h b/js/src/jit/x86-shared/Lowering-x86-shared.h
new file mode 100644
index 0000000000..69d367270a
--- /dev/null
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Lowering_x86_shared_h
+#define jit_x86_shared_Lowering_x86_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86Shared : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorX86Shared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerPowOfTwoI(MPow* mir);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins, bool useI386ByteRegisters);
+ void lowerAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins, bool useI386ByteRegisters);
+ void lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
+ bool useI386ByteRegisters);
+
+#ifdef ENABLE_WASM_SIMD
+ bool isThreeOpAllowed() { return Assembler::HasAVX(); }
+ bool canFoldReduceSimd128AndBranch(wasm::SimdOp op);
+ bool canEmitWasmReduceSimd128AtUses(MWasmReduceSimd128* ins);
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Lowering_x86_shared_h */
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp
new file mode 100644
index 0000000000..51fa65e40d
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp
@@ -0,0 +1,1484 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/MacroAssembler.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::Maybe;
+using mozilla::SpecificNaN;
+
+void MacroAssemblerX86Shared::splatX16(Register input, FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+
+ vmovd(input, output);
+ if (HasAVX2()) {
+ vbroadcastb(Operand(output), output);
+ return;
+ }
+ vpxor(scratch, scratch, scratch);
+ vpshufb(scratch, output, output);
+}
+
+void MacroAssemblerX86Shared::splatX8(Register input, FloatRegister output) {
+ vmovd(input, output);
+ if (HasAVX2()) {
+ vbroadcastw(Operand(output), output);
+ return;
+ }
+ vpshuflw(0, output, output);
+ vpshufd(0, output, output);
+}
+
+void MacroAssemblerX86Shared::splatX4(Register input, FloatRegister output) {
+ vmovd(input, output);
+ if (HasAVX2()) {
+ vbroadcastd(Operand(output), output);
+ return;
+ }
+ vpshufd(0, output, output);
+}
+
+void MacroAssemblerX86Shared::splatX4(FloatRegister input,
+ FloatRegister output) {
+ MOZ_ASSERT(input.isSingle() && output.isSimd128());
+ if (HasAVX2()) {
+ vbroadcastss(Operand(input), output);
+ return;
+ }
+ input = asMasm().moveSimd128FloatIfNotAVX(input.asSimd128(), output);
+ vshufps(0, input, input, output);
+}
+
+void MacroAssemblerX86Shared::splatX2(FloatRegister input,
+ FloatRegister output) {
+ MOZ_ASSERT(input.isDouble() && output.isSimd128());
+ vmovddup(Operand(input.asSimd128()), output);
+}
+
+void MacroAssemblerX86Shared::extractLaneInt32x4(FloatRegister input,
+ Register output,
+ unsigned lane) {
+ if (lane == 0) {
+ // The value we want to extract is in the low double-word
+ moveLowInt32(input, output);
+ } else {
+ vpextrd(lane, input, output);
+ }
+}
+
+void MacroAssemblerX86Shared::extractLaneFloat32x4(FloatRegister input,
+ FloatRegister output,
+ unsigned lane) {
+ MOZ_ASSERT(input.isSimd128() && output.isSingle());
+ if (lane == 0) {
+ // The value we want to extract is in the low double-word
+ if (input.asSingle() != output) {
+ moveFloat32(input, output);
+ }
+ } else if (lane == 2) {
+ moveHighPairToLowPairFloat32(input, output);
+ } else {
+ uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
+ FloatRegister dest = output.asSimd128();
+ input = moveSimd128FloatIfNotAVX(input, dest);
+ vshufps(mask, input, input, dest);
+ }
+}
+
+void MacroAssemblerX86Shared::extractLaneFloat64x2(FloatRegister input,
+ FloatRegister output,
+ unsigned lane) {
+ MOZ_ASSERT(input.isSimd128() && output.isDouble());
+ if (lane == 0) {
+ // The value we want to extract is in the low quadword
+ if (input.asDouble() != output) {
+ moveDouble(input, output);
+ }
+ } else {
+ vpalignr(Operand(input), output, output, 8);
+ }
+}
+
+void MacroAssemblerX86Shared::extractLaneInt16x8(FloatRegister input,
+ Register output, unsigned lane,
+ SimdSign sign) {
+ vpextrw(lane, input, Operand(output));
+ if (sign == SimdSign::Signed) {
+ movswl(output, output);
+ }
+}
+
+void MacroAssemblerX86Shared::extractLaneInt8x16(FloatRegister input,
+ Register output, unsigned lane,
+ SimdSign sign) {
+ vpextrb(lane, input, Operand(output));
+ if (sign == SimdSign::Signed) {
+ if (!AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(output)) {
+ xchgl(eax, output);
+ movsbl(eax, eax);
+ xchgl(eax, output);
+ } else {
+ movsbl(output, output);
+ }
+ }
+}
+
+void MacroAssemblerX86Shared::replaceLaneFloat32x4(unsigned lane,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(lhs.isSimd128() && rhs.isSingle());
+
+ if (lane == 0) {
+ if (rhs.asSimd128() == lhs) {
+ // no-op, although this should not normally happen for type checking
+ // reasons higher up in the stack.
+ moveSimd128Float(lhs, dest);
+ } else {
+ // move low dword of value into low dword of output
+ vmovss(rhs, lhs, dest);
+ }
+ } else {
+ vinsertps(vinsertpsMask(0, lane), rhs, lhs, dest);
+ }
+}
+
+void MacroAssemblerX86Shared::replaceLaneFloat64x2(unsigned lane,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(lhs.isSimd128() && rhs.isDouble());
+
+ if (lane == 0) {
+ if (rhs.asSimd128() == lhs) {
+ // no-op, although this should not normally happen for type checking
+ // reasons higher up in the stack.
+ moveSimd128Float(lhs, dest);
+ } else {
+ // move low qword of value into low qword of output
+ vmovsd(rhs, lhs, dest);
+ }
+ } else {
+ // move low qword of value into high qword of output
+ vshufpd(0, rhs, lhs, dest);
+ }
+}
+
+void MacroAssemblerX86Shared::blendInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output,
+ FloatRegister temp,
+ const uint8_t lanes[16]) {
+ asMasm().loadConstantSimd128Int(
+ SimdConstant::CreateX16(reinterpret_cast<const int8_t*>(lanes)), temp);
+ vpblendvb(temp, rhs, lhs, output);
+}
+
+void MacroAssemblerX86Shared::blendInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output,
+ const uint16_t lanes[8]) {
+ uint32_t mask = 0;
+ for (unsigned i = 0; i < 8; i++) {
+ if (lanes[i]) {
+ mask |= (1 << i);
+ }
+ }
+ vpblendw(mask, rhs, lhs, output);
+}
+
+void MacroAssemblerX86Shared::laneSelectSimd128(FloatRegister mask,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister output) {
+ vpblendvb(mask, lhs, rhs, output);
+}
+
+void MacroAssemblerX86Shared::shuffleInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister output,
+ const uint8_t lanes[16]) {
+ ScratchSimd128Scope scratch(asMasm());
+
+ // Use pshufb instructions to gather the lanes from each source vector.
+ // A negative index creates a zero lane, so the two vectors can be combined.
+
+ // Set scratch = lanes from rhs.
+ int8_t idx[16];
+ for (unsigned i = 0; i < 16; i++) {
+ idx[i] = lanes[i] >= 16 ? lanes[i] - 16 : -1;
+ }
+ rhs = moveSimd128IntIfNotAVX(rhs, scratch);
+ asMasm().vpshufbSimd128(SimdConstant::CreateX16(idx), rhs, scratch);
+
+ // Set output = lanes from lhs.
+ for (unsigned i = 0; i < 16; i++) {
+ idx[i] = lanes[i] < 16 ? lanes[i] : -1;
+ }
+ lhs = moveSimd128IntIfNotAVX(lhs, output);
+ asMasm().vpshufbSimd128(SimdConstant::CreateX16(idx), lhs, output);
+
+ // Combine.
+ vpor(scratch, output, output);
+}
+
+static inline FloatRegister ToSimdFloatRegister(const Operand& op) {
+ return FloatRegister(op.fpu(), FloatRegister::Codes::ContentType::Simd128);
+}
+
+void MacroAssemblerX86Shared::compareInt8x16(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output) {
+ switch (cond) {
+ case Assembler::Condition::GreaterThan:
+ vpcmpgtb(rhs, lhs, output);
+ break;
+ case Assembler::Condition::Equal:
+ vpcmpeqb(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThan: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtb(Operand(lhs), output, output);
+ break;
+ }
+ case Assembler::Condition::NotEqual:
+ vpcmpeqb(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtb(Operand(lhs), output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ vpcmpgtb(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Above:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminub(rhs, lhs, output);
+ vpcmpeqb(Operand(lhs), output, output);
+ } else {
+ vpmaxub(rhs, lhs, output);
+ vpcmpeqb(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::BelowOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminub(rhs, lhs, output);
+ vpcmpeqb(Operand(lhs), output, output);
+ } else {
+ vpmaxub(rhs, lhs, output);
+ vpcmpeqb(rhs, output, output);
+ }
+ break;
+ case Assembler::Below:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxub(rhs, lhs, output);
+ vpcmpeqb(Operand(lhs), output, output);
+ } else {
+ vpminub(rhs, lhs, output);
+ vpcmpeqb(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::AboveOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxub(rhs, lhs, output);
+ vpcmpeqb(Operand(lhs), output, output);
+ } else {
+ vpminub(rhs, lhs, output);
+ vpcmpeqb(rhs, output, output);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareInt8x16(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ bool complement = false;
+ switch (cond) {
+ case Assembler::Condition::NotEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::Equal:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpeqb,
+ &MacroAssembler::vpcmpeqbSimd128);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::GreaterThan:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpgtb,
+ &MacroAssembler::vpcmpgtbSimd128);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+ if (complement) {
+ asMasm().bitwiseXorSimd128(dest, SimdConstant::SplatX16(-1), dest);
+ }
+}
+
+void MacroAssemblerX86Shared::compareInt16x8(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output) {
+ switch (cond) {
+ case Assembler::Condition::GreaterThan:
+ vpcmpgtw(rhs, lhs, output);
+ break;
+ case Assembler::Condition::Equal:
+ vpcmpeqw(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThan: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtw(Operand(lhs), output, output);
+ break;
+ }
+ case Assembler::Condition::NotEqual:
+ vpcmpeqw(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtw(Operand(lhs), output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ vpcmpgtw(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Above:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminuw(rhs, lhs, output);
+ vpcmpeqw(Operand(lhs), output, output);
+ } else {
+ vpmaxuw(rhs, lhs, output);
+ vpcmpeqw(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::BelowOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminuw(rhs, lhs, output);
+ vpcmpeqw(Operand(lhs), output, output);
+ } else {
+ vpmaxuw(rhs, lhs, output);
+ vpcmpeqw(rhs, output, output);
+ }
+ break;
+ case Assembler::Below:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxuw(rhs, lhs, output);
+ vpcmpeqw(Operand(lhs), output, output);
+ } else {
+ vpminuw(rhs, lhs, output);
+ vpcmpeqw(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::AboveOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxuw(rhs, lhs, output);
+ vpcmpeqw(Operand(lhs), output, output);
+ } else {
+ vpminuw(rhs, lhs, output);
+ vpcmpeqw(rhs, output, output);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareInt16x8(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ bool complement = false;
+ switch (cond) {
+ case Assembler::Condition::NotEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::Equal:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpeqw,
+ &MacroAssembler::vpcmpeqwSimd128);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::GreaterThan:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpgtw,
+ &MacroAssembler::vpcmpgtwSimd128);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+ if (complement) {
+ asMasm().bitwiseXorSimd128(dest, SimdConstant::SplatX16(-1), dest);
+ }
+}
+
+void MacroAssemblerX86Shared::compareInt32x4(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output) {
+ switch (cond) {
+ case Assembler::Condition::GreaterThan:
+ vpcmpgtd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::Equal:
+ vpcmpeqd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThan: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtd(Operand(lhs), output, output);
+ break;
+ }
+ case Assembler::Condition::NotEqual:
+ vpcmpeqd(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual: {
+ ScratchSimd128Scope scratch(asMasm());
+ if (lhs == output) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ if (rhs.kind() == Operand::FPREG) {
+ moveSimd128Int(ToSimdFloatRegister(rhs), output);
+ } else {
+ loadAlignedSimd128Int(rhs, output);
+ }
+ vpcmpgtd(Operand(lhs), output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+ vpcmpgtd(rhs, lhs, output);
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::Above:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminud(rhs, lhs, output);
+ vpcmpeqd(Operand(lhs), output, output);
+ } else {
+ vpmaxud(rhs, lhs, output);
+ vpcmpeqd(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::BelowOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpminud(rhs, lhs, output);
+ vpcmpeqd(Operand(lhs), output, output);
+ } else {
+ vpmaxud(rhs, lhs, output);
+ vpcmpeqd(rhs, output, output);
+ }
+ break;
+ case Assembler::Below:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxud(rhs, lhs, output);
+ vpcmpeqd(Operand(lhs), output, output);
+ } else {
+ vpminud(rhs, lhs, output);
+ vpcmpeqd(rhs, output, output);
+ }
+ asMasm().bitwiseNotSimd128(output, output);
+ break;
+ case Assembler::AboveOrEqual:
+ if (rhs.kind() == Operand::FPREG && ToSimdFloatRegister(rhs) == output) {
+ vpmaxud(rhs, lhs, output);
+ vpcmpeqd(Operand(lhs), output, output);
+ } else {
+ vpminud(rhs, lhs, output);
+ vpcmpeqd(rhs, output, output);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareInt32x4(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ bool complement = false;
+ switch (cond) {
+ case Assembler::Condition::NotEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::Equal:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpeqd,
+ &MacroAssembler::vpcmpeqdSimd128);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ complement = true;
+ [[fallthrough]];
+ case Assembler::Condition::GreaterThan:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpcmpgtd,
+ &MacroAssembler::vpcmpgtdSimd128);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+ if (complement) {
+ asMasm().bitwiseXorSimd128(dest, SimdConstant::SplatX16(-1), dest);
+ }
+}
+
+void MacroAssemblerX86Shared::compareForEqualityInt64x2(
+ FloatRegister lhs, Operand rhs, Assembler::Condition cond,
+ FloatRegister output) {
+ static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+ switch (cond) {
+ case Assembler::Condition::Equal:
+ vpcmpeqq(rhs, lhs, output);
+ break;
+ case Assembler::Condition::NotEqual:
+ vpcmpeqq(rhs, lhs, output);
+ asMasm().bitwiseXorSimd128(output, allOnes, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareForOrderingInt64x2(
+ FloatRegister lhs, Operand rhs, Assembler::Condition cond,
+ FloatRegister temp1, FloatRegister temp2, FloatRegister output) {
+ static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+ // The pseudo code is for (e.g. > comparison):
+ // __m128i pcmpgtq_sse2 (__m128i a, __m128i b) {
+ // __m128i r = _mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_sub_epi64(b, a));
+ // r = _mm_or_si128(r, _mm_cmpgt_epi32(a, b));
+ // return _mm_shuffle_epi32(r, _MM_SHUFFLE(3,3,1,1));
+ // }
+ // Credits to https://stackoverflow.com/a/65175746
+ switch (cond) {
+ case Assembler::Condition::GreaterThan:
+ vmovdqa(rhs, temp1);
+ vmovdqa(Operand(lhs), temp2);
+ vpsubq(Operand(lhs), temp1, temp1);
+ vpcmpeqd(rhs, temp2, temp2);
+ vandpd(temp2, temp1, temp1);
+ lhs = asMasm().moveSimd128IntIfNotAVX(lhs, output);
+ vpcmpgtd(rhs, lhs, output);
+ vpor(Operand(temp1), output, output);
+ vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), output, output);
+ break;
+ case Assembler::Condition::LessThan:
+ vmovdqa(rhs, temp1);
+ vmovdqa(Operand(lhs), temp2);
+ vpcmpgtd(Operand(lhs), temp1, temp1);
+ vpcmpeqd(Operand(rhs), temp2, temp2);
+ lhs = asMasm().moveSimd128IntIfNotAVX(lhs, output);
+ vpsubq(rhs, lhs, output);
+ vandpd(temp2, output, output);
+ vpor(Operand(temp1), output, output);
+ vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), output, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual:
+ vmovdqa(rhs, temp1);
+ vmovdqa(Operand(lhs), temp2);
+ vpcmpgtd(Operand(lhs), temp1, temp1);
+ vpcmpeqd(Operand(rhs), temp2, temp2);
+ lhs = asMasm().moveSimd128IntIfNotAVX(lhs, output);
+ vpsubq(rhs, lhs, output);
+ vandpd(temp2, output, output);
+ vpor(Operand(temp1), output, output);
+ vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), output, output);
+ asMasm().bitwiseXorSimd128(output, allOnes, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ vmovdqa(rhs, temp1);
+ vmovdqa(Operand(lhs), temp2);
+ vpsubq(Operand(lhs), temp1, temp1);
+ vpcmpeqd(rhs, temp2, temp2);
+ vandpd(temp2, temp1, temp1);
+ lhs = asMasm().moveSimd128IntIfNotAVX(lhs, output);
+ vpcmpgtd(rhs, lhs, output);
+ vpor(Operand(temp1), output, output);
+ vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), output, output);
+ asMasm().bitwiseXorSimd128(output, allOnes, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareForOrderingInt64x2AVX(
+ FloatRegister lhs, FloatRegister rhs, Assembler::Condition cond,
+ FloatRegister output) {
+ MOZ_ASSERT(HasSSE42());
+ static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+ switch (cond) {
+ case Assembler::Condition::GreaterThan:
+ vpcmpgtq(Operand(rhs), lhs, output);
+ break;
+ case Assembler::Condition::LessThan:
+ vpcmpgtq(Operand(lhs), rhs, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual:
+ vpcmpgtq(Operand(lhs), rhs, output);
+ asMasm().bitwiseXorSimd128(output, allOnes, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ vpcmpgtq(Operand(rhs), lhs, output);
+ asMasm().bitwiseXorSimd128(output, allOnes, output);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareFloat32x4(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output) {
+ // TODO Can do better here with three-address compares
+
+ // Move lhs to output if lhs!=output; move rhs out of the way if rhs==output.
+ // This is bad, but Ion does not need this fixup.
+ ScratchSimd128Scope scratch(asMasm());
+ if (!HasAVX() && !lhs.aliases(output)) {
+ if (rhs.kind() == Operand::FPREG &&
+ output.aliases(FloatRegister::FromCode(rhs.fpu()))) {
+ vmovaps(rhs, scratch);
+ rhs = Operand(scratch);
+ }
+ vmovaps(lhs, output);
+ lhs = output;
+ }
+
+ switch (cond) {
+ case Assembler::Condition::Equal:
+ vcmpeqps(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThan:
+ vcmpltps(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ vcmpleps(rhs, lhs, output);
+ break;
+ case Assembler::Condition::NotEqual:
+ vcmpneqps(rhs, lhs, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual:
+ case Assembler::Condition::GreaterThan:
+ // We reverse these operations in the -inl.h file so that we don't have to
+ // copy into and out of temporaries after codegen.
+ MOZ_CRASH("should have reversed this");
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ switch (cond) {
+ case Assembler::Condition::Equal:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpeqps,
+ &MacroAssembler::vcmpeqpsSimd128);
+ break;
+ case Assembler::Condition::LessThan:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpltps,
+ &MacroAssembler::vcmpltpsSimd128);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpleps,
+ &MacroAssembler::vcmplepsSimd128);
+ break;
+ case Assembler::Condition::NotEqual:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpneqps,
+ &MacroAssembler::vcmpneqpsSimd128);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareFloat64x2(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output) {
+ // TODO Can do better here with three-address compares
+
+ // Move lhs to output if lhs!=output; move rhs out of the way if rhs==output.
+ // This is bad, but Ion does not need this fixup.
+ ScratchSimd128Scope scratch(asMasm());
+ if (!HasAVX() && !lhs.aliases(output)) {
+ if (rhs.kind() == Operand::FPREG &&
+ output.aliases(FloatRegister::FromCode(rhs.fpu()))) {
+ vmovapd(rhs, scratch);
+ rhs = Operand(scratch);
+ }
+ vmovapd(lhs, output);
+ lhs = output;
+ }
+
+ switch (cond) {
+ case Assembler::Condition::Equal:
+ vcmpeqpd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThan:
+ vcmpltpd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ vcmplepd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::NotEqual:
+ vcmpneqpd(rhs, lhs, output);
+ break;
+ case Assembler::Condition::GreaterThanOrEqual:
+ case Assembler::Condition::GreaterThan:
+ // We reverse these operations in the -inl.h file so that we don't have to
+ // copy into and out of temporaries after codegen.
+ MOZ_CRASH("should have reversed this");
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+void MacroAssemblerX86Shared::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ switch (cond) {
+ case Assembler::Condition::Equal:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpeqpd,
+ &MacroAssembler::vcmpeqpdSimd128);
+ break;
+ case Assembler::Condition::LessThan:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpltpd,
+ &MacroAssembler::vcmpltpdSimd128);
+ break;
+ case Assembler::Condition::LessThanOrEqual:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmplepd,
+ &MacroAssembler::vcmplepdSimd128);
+ break;
+ case Assembler::Condition::NotEqual:
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vcmpneqpd,
+ &MacroAssembler::vcmpneqpdSimd128);
+ break;
+ default:
+ MOZ_CRASH("unexpected condition op");
+ }
+}
+
+// Semantics of wasm max and min.
+//
+// * -0 < 0
+// * If one input is NaN then that NaN is the output
+// * If both inputs are NaN then the output is selected nondeterministically
+// * Any returned NaN is always made quiet
+// * The MVP spec 2.2.3 says "No distinction is made between signalling and
+// quiet NaNs", suggesting SNaN inputs are allowed and should not fault
+//
+// Semantics of maxps/minps/maxpd/minpd:
+//
+// * If the values are both +/-0 the rhs is returned
+// * If the rhs is SNaN then the rhs is returned
+// * If either value is NaN then the rhs is returned
+// * An SNaN operand does not appear to give rise to an exception, at least
+// not in the JS shell on Linux, though the Intel spec lists Invalid
+// as one of the possible exceptions
+
+// Various unaddressed considerations:
+//
+// It's pretty insane for this to take an Operand rhs - it really needs to be
+// a register, given the number of times we access it.
+//
+// Constant load can be folded into the ANDPS. Do we care? It won't save us
+// any registers, since output/temp1/temp2/scratch are all live at the same time
+// after the first instruction of the slow path.
+//
+// Can we use blend for the NaN extraction/insertion? We'd need xmm0 for the
+// mask, which is no fun. But it would be lhs UNORD lhs -> mask, blend;
+// rhs UNORD rhs -> mask; blend. Better than the mess we have below. But
+// we'd still need to setup the QNaN bits, unless we can blend those too
+// with the lhs UNORD rhs mask?
+//
+// If we could determine that both input lanes are NaN then the result of the
+// fast path should be fine modulo the QNaN bits, but it's not obvious this is
+// much of an advantage.
+
+void MacroAssemblerX86Shared::minMaxFloat32x4(bool isMin, FloatRegister lhs,
+ Operand rhs, FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+ Label l;
+ SimdConstant quietBits(SimdConstant::SplatX4(int32_t(0x00400000)));
+
+ /* clang-format off */ /* leave my comments alone */
+ lhs = moveSimd128FloatIfNotAVXOrOther(lhs, scratch, output);
+ if (isMin) {
+ vmovaps(lhs, output); // compute
+ vminps(rhs, output, output); // min lhs, rhs
+ vmovaps(rhs, temp1); // compute
+ vminps(Operand(lhs), temp1, temp1); // min rhs, lhs
+ vorps(temp1, output, output); // fix min(-0, 0) with OR
+ } else {
+ vmovaps(lhs, output); // compute
+ vmaxps(rhs, output, output); // max lhs, rhs
+ vmovaps(rhs, temp1); // compute
+ vmaxps(Operand(lhs), temp1, temp1); // max rhs, lhs
+ vandps(temp1, output, output); // fix max(-0, 0) with AND
+ }
+ vmovaps(lhs, temp1); // compute
+ vcmpunordps(rhs, temp1, temp1); // lhs UNORD rhs
+ vptest(temp1, temp1); // check if any unordered
+ j(Assembler::Equal, &l); // and exit if not
+
+ // Slow path.
+ // output has result for non-NaN lanes, garbage in NaN lanes.
+ // temp1 has lhs UNORD rhs.
+ // temp2 is dead.
+
+ vmovaps(temp1, temp2); // clear NaN lanes of result
+ vpandn(output, temp2, temp2); // result now in temp2
+ asMasm().vpandSimd128(quietBits, temp1, temp1); // setup QNaN bits in NaN lanes
+ vorps(temp1, temp2, temp2); // and OR into result
+ vmovaps(lhs, temp1); // find NaN lanes
+ vcmpunordps(Operand(temp1), temp1, temp1); // in lhs
+ vmovaps(temp1, output); // (and save them for later)
+ vandps(lhs, temp1, temp1); // and extract the NaNs
+ vorps(temp1, temp2, temp2); // and add to the result
+ vmovaps(rhs, temp1); // find NaN lanes
+ vcmpunordps(Operand(temp1), temp1, temp1); // in rhs
+ vpandn(temp1, output, output); // except if they were in lhs
+ vandps(rhs, output, output); // and extract the NaNs
+ vorps(temp2, output, output); // and add to the result
+
+ bind(&l);
+ /* clang-format on */
+}
+
+void MacroAssemblerX86Shared::minMaxFloat32x4AVX(bool isMin, FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+ Label l;
+ SimdConstant quietBits(SimdConstant::SplatX4(int32_t(0x00400000)));
+
+ /* clang-format off */ /* leave my comments alone */
+ FloatRegister lhsCopy = moveSimd128FloatIfEqual(lhs, scratch, output);
+ // Allow rhs be assigned to scratch when rhs == lhs and == output --
+ // don't make a special case since the semantics require setup QNaN bits.
+ FloatRegister rhsCopy = moveSimd128FloatIfEqual(rhs, scratch, output);
+ if (isMin) {
+ vminps(Operand(rhs), lhs, temp2); // min lhs, rhs
+ vminps(Operand(lhs), rhs, temp1); // min rhs, lhs
+ vorps(temp1, temp2, output); // fix min(-0, 0) with OR
+ } else {
+ vmaxps(Operand(rhs), lhs, temp2); // max lhs, rhs
+ vmaxps(Operand(lhs), rhs, temp1); // max rhs, lhs
+ vandps(temp1, temp2, output); // fix max(-0, 0) with AND
+ }
+ vcmpunordps(Operand(rhsCopy), lhsCopy, temp1); // lhs UNORD rhs
+ vptest(temp1, temp1); // check if any unordered
+ j(Assembler::Equal, &l); // and exit if not
+
+ // Slow path.
+ // output has result for non-NaN lanes, garbage in NaN lanes.
+ // temp1 has lhs UNORD rhs.
+ // temp2 is dead.
+ vcmpunordps(Operand(lhsCopy), lhsCopy, temp2); // find NaN lanes in lhs
+ vblendvps(temp2, lhsCopy, rhsCopy, temp2); // add other lines from rhs
+ asMasm().vporSimd128(quietBits, temp2, temp2); // setup QNaN bits in NaN lanes
+ vblendvps(temp1, temp2, output, output); // replace NaN lines from temp2
+
+ bind(&l);
+ /* clang-format on */
+}
+
+// Exactly as above.
+void MacroAssemblerX86Shared::minMaxFloat64x2(bool isMin, FloatRegister lhs,
+ Operand rhs, FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+ Label l;
+ SimdConstant quietBits(SimdConstant::SplatX2(int64_t(0x0008000000000000ull)));
+
+ /* clang-format off */ /* leave my comments alone */
+ lhs = moveSimd128FloatIfNotAVXOrOther(lhs, scratch, output);
+ if (isMin) {
+ vmovapd(lhs, output); // compute
+ vminpd(rhs, output, output); // min lhs, rhs
+ vmovapd(rhs, temp1); // compute
+ vminpd(Operand(lhs), temp1, temp1); // min rhs, lhs
+ vorpd(temp1, output, output); // fix min(-0, 0) with OR
+ } else {
+ vmovapd(lhs, output); // compute
+ vmaxpd(rhs, output, output); // max lhs, rhs
+ vmovapd(rhs, temp1); // compute
+ vmaxpd(Operand(lhs), temp1, temp1); // max rhs, lhs
+ vandpd(temp1, output, output); // fix max(-0, 0) with AND
+ }
+ vmovapd(lhs, temp1); // compute
+ vcmpunordpd(rhs, temp1, temp1); // lhs UNORD rhs
+ vptest(temp1, temp1); // check if any unordered
+ j(Assembler::Equal, &l); // and exit if not
+
+ // Slow path.
+ // output has result for non-NaN lanes, garbage in NaN lanes.
+ // temp1 has lhs UNORD rhs.
+ // temp2 is dead.
+
+ vmovapd(temp1, temp2); // clear NaN lanes of result
+ vpandn(output, temp2, temp2); // result now in temp2
+ asMasm().vpandSimd128(quietBits, temp1, temp1); // setup QNaN bits in NaN lanes
+ vorpd(temp1, temp2, temp2); // and OR into result
+ vmovapd(lhs, temp1); // find NaN lanes
+ vcmpunordpd(Operand(temp1), temp1, temp1); // in lhs
+ vmovapd(temp1, output); // (and save them for later)
+ vandpd(lhs, temp1, temp1); // and extract the NaNs
+ vorpd(temp1, temp2, temp2); // and add to the result
+ vmovapd(rhs, temp1); // find NaN lanes
+ vcmpunordpd(Operand(temp1), temp1, temp1); // in rhs
+ vpandn(temp1, output, output); // except if they were in lhs
+ vandpd(rhs, output, output); // and extract the NaNs
+ vorpd(temp2, output, output); // and add to the result
+
+ bind(&l);
+ /* clang-format on */
+}
+
+void MacroAssemblerX86Shared::minMaxFloat64x2AVX(bool isMin, FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+ Label l;
+ SimdConstant quietBits(SimdConstant::SplatX2(int64_t(0x0008000000000000ull)));
+
+ /* clang-format off */ /* leave my comments alone */
+ FloatRegister lhsCopy = moveSimd128FloatIfEqual(lhs, scratch, output);
+ // Allow rhs be assigned to scratch when rhs == lhs and == output --
+ // don't make a special case since the semantics require setup QNaN bits.
+ FloatRegister rhsCopy = moveSimd128FloatIfEqual(rhs, scratch, output);
+ if (isMin) {
+ vminpd(Operand(rhs), lhs, temp2); // min lhs, rhs
+ vminpd(Operand(lhs), rhs, temp1); // min rhs, lhs
+ vorpd(temp1, temp2, output); // fix min(-0, 0) with OR
+ } else {
+ vmaxpd(Operand(rhs), lhs, temp2); // max lhs, rhs
+ vmaxpd(Operand(lhs), rhs, temp1); // max rhs, lhs
+ vandpd(temp1, temp2, output); // fix max(-0, 0) with AND
+ }
+ vcmpunordpd(Operand(rhsCopy), lhsCopy, temp1); // lhs UNORD rhs
+ vptest(temp1, temp1); // check if any unordered
+ j(Assembler::Equal, &l); // and exit if not
+
+ // Slow path.
+ // output has result for non-NaN lanes, garbage in NaN lanes.
+ // temp1 has lhs UNORD rhs.
+ // temp2 is dead.
+ vcmpunordpd(Operand(lhsCopy), lhsCopy, temp2); // find NaN lanes in lhs
+ vblendvpd(temp2, lhsCopy, rhsCopy, temp2); // add other lines from rhs
+ asMasm().vporSimd128(quietBits, temp2, temp2); // setup QNaN bits in NaN lanes
+ vblendvpd(temp1, temp2, output, output); // replace NaN lines from temp2
+
+ bind(&l);
+ /* clang-format on */
+}
+
+void MacroAssemblerX86Shared::minFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ if (HasAVX()) {
+ minMaxFloat32x4AVX(/*isMin=*/true, lhs, rhs, temp1, temp2, output);
+ return;
+ }
+ minMaxFloat32x4(/*isMin=*/true, lhs, Operand(rhs), temp1, temp2, output);
+}
+
+void MacroAssemblerX86Shared::maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ if (HasAVX()) {
+ minMaxFloat32x4AVX(/*isMin=*/false, lhs, rhs, temp1, temp2, output);
+ return;
+ }
+ minMaxFloat32x4(/*isMin=*/false, lhs, Operand(rhs), temp1, temp2, output);
+}
+
+void MacroAssemblerX86Shared::minFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ if (HasAVX()) {
+ minMaxFloat64x2AVX(/*isMin=*/true, lhs, rhs, temp1, temp2, output);
+ return;
+ }
+ minMaxFloat64x2(/*isMin=*/true, lhs, Operand(rhs), temp1, temp2, output);
+}
+
+void MacroAssemblerX86Shared::maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1,
+ FloatRegister temp2,
+ FloatRegister output) {
+ if (HasAVX()) {
+ minMaxFloat64x2AVX(/*isMin=*/false, lhs, rhs, temp1, temp2, output);
+ return;
+ }
+ minMaxFloat64x2(/*isMin=*/false, lhs, Operand(rhs), temp1, temp2, output);
+}
+
+void MacroAssemblerX86Shared::packedShiftByScalarInt8x16(
+ FloatRegister in, Register count, FloatRegister xtmp, FloatRegister dest,
+ void (MacroAssemblerX86Shared::*shift)(FloatRegister, FloatRegister,
+ FloatRegister),
+ void (MacroAssemblerX86Shared::*extend)(const Operand&, FloatRegister)) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+
+ // High bytes
+ vpalignr(Operand(in), xtmp, xtmp, 8);
+ (this->*extend)(Operand(xtmp), xtmp);
+ (this->*shift)(scratch, xtmp, xtmp);
+
+ // Low bytes
+ (this->*extend)(Operand(dest), dest);
+ (this->*shift)(scratch, dest, dest);
+
+ // Mask off garbage to avoid saturation during packing
+ asMasm().loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x00FF00FF)),
+ scratch);
+ vpand(Operand(scratch), xtmp, xtmp);
+ vpand(Operand(scratch), dest, dest);
+
+ vpackuswb(Operand(xtmp), dest, dest);
+}
+
+void MacroAssemblerX86Shared::packedLeftShiftByScalarInt8x16(
+ FloatRegister in, Register count, FloatRegister xtmp, FloatRegister dest) {
+ packedShiftByScalarInt8x16(in, count, xtmp, dest,
+ &MacroAssemblerX86Shared::vpsllw,
+ &MacroAssemblerX86Shared::vpmovzxbw);
+}
+
+void MacroAssemblerX86Shared::packedLeftShiftByScalarInt8x16(
+ Imm32 count, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(count.value <= 7);
+ if (MOZ_UNLIKELY(count.value == 0)) {
+ moveSimd128Int(src, dest);
+ return;
+ }
+ src = asMasm().moveSimd128IntIfNotAVX(src, dest);
+ // Use the doubling trick for low shift counts, otherwise mask off the bits
+ // that are shifted out of the low byte of each word and use word shifts. The
+ // optimal cutoff remains to be explored.
+ if (count.value <= 3) {
+ vpaddb(Operand(src), src, dest);
+ for (int32_t shift = count.value - 1; shift > 0; --shift) {
+ vpaddb(Operand(dest), dest, dest);
+ }
+ } else {
+ asMasm().bitwiseAndSimd128(src, SimdConstant::SplatX16(0xFF >> count.value),
+ dest);
+ vpsllw(count, dest, dest);
+ }
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt8x16(
+ FloatRegister in, Register count, FloatRegister xtmp, FloatRegister dest) {
+ packedShiftByScalarInt8x16(in, count, xtmp, dest,
+ &MacroAssemblerX86Shared::vpsraw,
+ &MacroAssemblerX86Shared::vpmovsxbw);
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt8x16(
+ Imm32 count, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(count.value <= 7);
+ ScratchSimd128Scope scratch(asMasm());
+
+ vpunpckhbw(src, scratch, scratch);
+ vpunpcklbw(src, dest, dest);
+ vpsraw(Imm32(count.value + 8), scratch, scratch);
+ vpsraw(Imm32(count.value + 8), dest, dest);
+ vpacksswb(Operand(scratch), dest, dest);
+}
+
+void MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt8x16(
+ FloatRegister in, Register count, FloatRegister xtmp, FloatRegister dest) {
+ packedShiftByScalarInt8x16(in, count, xtmp, dest,
+ &MacroAssemblerX86Shared::vpsrlw,
+ &MacroAssemblerX86Shared::vpmovzxbw);
+}
+
+void MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt8x16(
+ Imm32 count, FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(count.value <= 7);
+ src = asMasm().moveSimd128IntIfNotAVX(src, dest);
+ asMasm().bitwiseAndSimd128(
+ src, SimdConstant::SplatX16((0xFF << count.value) & 0xFF), dest);
+ vpsrlw(count, dest, dest);
+}
+
+void MacroAssemblerX86Shared::packedLeftShiftByScalarInt16x8(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsllw(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt16x8(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsraw(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt16x8(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsrlw(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedLeftShiftByScalarInt32x4(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpslld(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt32x4(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsrad(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt32x4(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsrld(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedLeftShiftByScalarInt64x2(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsllq(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt64x2(
+ FloatRegister in, Register count, FloatRegister temp, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, temp);
+ asMasm().signReplicationInt64x2(in, scratch);
+ in = asMasm().moveSimd128FloatIfNotAVX(in, dest);
+ // Invert if negative, shift all, invert back if negative.
+ vpxor(Operand(scratch), in, dest);
+ vpsrlq(temp, dest, dest);
+ vpxor(Operand(scratch), dest, dest);
+}
+
+void MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt64x2(
+ FloatRegister in, Register count, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ vmovd(count, scratch);
+ vpsrlq(scratch, in, dest);
+}
+
+void MacroAssemblerX86Shared::packedRightShiftByScalarInt64x2(
+ Imm32 count, FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ asMasm().signReplicationInt64x2(src, scratch);
+ // Invert if negative, shift all, invert back if negative.
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+ vpxor(Operand(scratch), src, dest);
+ vpsrlq(Imm32(count.value & 63), dest, dest);
+ vpxor(Operand(scratch), dest, dest);
+}
+
+void MacroAssemblerX86Shared::selectSimd128(FloatRegister mask,
+ FloatRegister onTrue,
+ FloatRegister onFalse,
+ FloatRegister temp,
+ FloatRegister output) {
+ // Normally the codegen will attempt to enforce these register assignments so
+ // that the moves are avoided.
+
+ onTrue = asMasm().moveSimd128IntIfNotAVX(onTrue, output);
+ if (MOZ_UNLIKELY(mask == onTrue)) {
+ vpor(Operand(onFalse), onTrue, output);
+ return;
+ }
+
+ mask = asMasm().moveSimd128IntIfNotAVX(mask, temp);
+
+ vpand(Operand(mask), onTrue, output);
+ vpandn(Operand(onFalse), mask, temp);
+ vpor(Operand(temp), output, output);
+}
+
+// Code sequences for int32x4<->float32x4 culled from v8; commentary added.
+
+void MacroAssemblerX86Shared::unsignedConvertInt32x4ToFloat32x4(
+ FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ src = asMasm().moveSimd128IntIfNotAVX(src, dest);
+ vpxor(Operand(scratch), scratch, scratch); // extract low bits
+ vpblendw(0x55, src, scratch, scratch); // into scratch
+ vpsubd(Operand(scratch), src, dest); // and high bits into dest
+ vcvtdq2ps(scratch, scratch); // convert low bits
+ vpsrld(Imm32(1), dest, dest); // get high into unsigned range
+ vcvtdq2ps(dest, dest); // convert
+ vaddps(Operand(dest), dest, dest); // and back into signed
+ vaddps(Operand(scratch), dest, dest); // combine high+low: may round
+}
+
+void MacroAssemblerX86Shared::truncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+
+ // The cvttps2dq instruction is the workhorse but does not handle NaN or out
+ // of range values as we need it to. We want to saturate too-large positive
+ // values to 7FFFFFFFh and too-large negative values to 80000000h. NaN and -0
+ // become 0.
+
+ // Convert NaN to 0 by masking away values that compare unordered to itself.
+ if (HasAVX()) {
+ vcmpeqps(Operand(src), src, scratch);
+ vpand(Operand(scratch), src, dest);
+ } else {
+ vmovaps(src, scratch);
+ vcmpeqps(Operand(scratch), scratch, scratch);
+ moveSimd128Float(src, dest);
+ vpand(Operand(scratch), dest, dest);
+ }
+
+ // Make lanes in scratch == 0xFFFFFFFFh, if dest overflows during cvttps2dq,
+ // otherwise 0.
+ static const SimdConstant minOverflowedInt =
+ SimdConstant::SplatX4(2147483648.f);
+ if (HasAVX()) {
+ asMasm().vcmpgepsSimd128(minOverflowedInt, dest, scratch);
+ } else {
+ asMasm().loadConstantSimd128Float(minOverflowedInt, scratch);
+ vcmpleps(Operand(dest), scratch, scratch);
+ }
+
+ // Convert. This will make the output 80000000h if the input is out of range.
+ vcvttps2dq(dest, dest);
+
+ // Convert overflow lanes to 0x7FFFFFFF.
+ vpxor(Operand(scratch), dest, dest);
+}
+
+void MacroAssemblerX86Shared::unsignedTruncSatFloat32x4ToInt32x4(
+ FloatRegister src, FloatRegister temp, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+
+ // The cvttps2dq instruction is the workhorse but does not handle NaN or out
+ // of range values as we need it to. We want to saturate too-large positive
+ // values to FFFFFFFFh and negative values to zero. NaN and -0 become 0.
+
+ // Convert NaN and negative values to zeroes in dest.
+ vxorps(Operand(scratch), scratch, scratch);
+ vmaxps(Operand(scratch), src, dest);
+
+ // Place the largest positive signed integer in all lanes in scratch.
+ // We use it to bias the conversion to handle edge cases.
+ asMasm().loadConstantSimd128Float(SimdConstant::SplatX4(2147483647.f),
+ scratch);
+
+ // temp = dest - 7FFFFFFFh (as floating), this brings integers in the unsigned
+ // range but above the signed range into the signed range; 0 => -7FFFFFFFh.
+ vmovaps(dest, temp);
+ vsubps(Operand(scratch), temp, temp);
+
+ // scratch = mask of biased values that are greater than 7FFFFFFFh.
+ vcmpleps(Operand(temp), scratch, scratch);
+
+ // Convert the biased values to integer. Positive values above 7FFFFFFFh will
+ // have been converted to 80000000h, all others become the expected integer.
+ vcvttps2dq(temp, temp);
+
+ // As lanes of scratch are ~0 where the result overflows, this computes
+ // 7FFFFFFF in lanes of temp that are 80000000h, and leaves other lanes
+ // untouched as the biased integer.
+ vpxor(Operand(scratch), temp, temp);
+
+ // Convert negative biased lanes in temp to zero. After this, temp will be
+ // zero where the result should be zero or is less than 80000000h, 7FFFFFFF
+ // where the result overflows, and will have the converted biased result in
+ // other lanes (for input values >= 80000000h).
+ vpxor(Operand(scratch), scratch, scratch);
+ vpmaxsd(Operand(scratch), temp, temp);
+
+ // Convert. Overflow lanes above 7FFFFFFFh will be 80000000h, other lanes will
+ // be what they should be.
+ vcvttps2dq(dest, dest);
+
+ // Add temp to the result. Overflow lanes with 80000000h becomes FFFFFFFFh,
+ // biased high-value unsigned lanes become unbiased, everything else is left
+ // unchanged.
+ vpaddd(Operand(temp), dest, dest);
+}
+
+void MacroAssemblerX86Shared::unsignedTruncFloat32x4ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+
+ // Place lanes below 80000000h into dest, otherwise into scratch.
+ // Keep dest or scratch 0 as default.
+ asMasm().loadConstantSimd128Float(SimdConstant::SplatX4(0x4f000000), scratch);
+ vcmpltps(Operand(src), scratch, scratch);
+ vpand(Operand(src), scratch, scratch);
+ vpxor(Operand(scratch), src, dest);
+
+ // Convert lanes below 80000000h into unsigned int without issues.
+ vcvttps2dq(dest, dest);
+ // Knowing IEEE-754 number representation: convert lanes above 7FFFFFFFh,
+ // mutiply by 2 (to add 1 in exponent) and shift to the left by 8 bits.
+ vaddps(Operand(scratch), scratch, scratch);
+ vpslld(Imm32(8), scratch, scratch);
+
+ // Combine the results.
+ vpaddd(Operand(scratch), dest, dest);
+}
+
+void MacroAssemblerX86Shared::unsignedConvertInt32x4ToFloat64x2(
+ FloatRegister src, FloatRegister dest) {
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+ asMasm().vunpcklpsSimd128(SimdConstant::SplatX4(0x43300000), src, dest);
+ asMasm().vsubpdSimd128(SimdConstant::SplatX2(4503599627370496.0), dest, dest);
+}
+
+void MacroAssemblerX86Shared::truncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister temp,
+ FloatRegister dest) {
+ FloatRegister srcForTemp = asMasm().moveSimd128FloatIfNotAVX(src, temp);
+ vcmpeqpd(Operand(srcForTemp), srcForTemp, temp);
+
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+ asMasm().vandpdSimd128(SimdConstant::SplatX2(2147483647.0), temp, temp);
+ vminpd(Operand(temp), src, dest);
+ vcvttpd2dq(dest, dest);
+}
+
+void MacroAssemblerX86Shared::unsignedTruncSatFloat64x2ToInt32x4(
+ FloatRegister src, FloatRegister temp, FloatRegister dest) {
+ src = asMasm().moveSimd128FloatIfNotAVX(src, dest);
+
+ vxorpd(temp, temp, temp);
+ vmaxpd(Operand(temp), src, dest);
+
+ asMasm().vminpdSimd128(SimdConstant::SplatX2(4294967295.0), dest, dest);
+ vroundpd(SSERoundingMode::Trunc, Operand(dest), dest);
+ asMasm().vaddpdSimd128(SimdConstant::SplatX2(4503599627370496.0), dest, dest);
+
+ // temp == 0
+ vshufps(0x88, temp, dest, dest);
+}
+
+void MacroAssemblerX86Shared::unsignedTruncFloat64x2ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(asMasm());
+
+ // The same as unsignedConvertInt32x4ToFloat64x2, but without NaN
+ // and out-of-bounds checks.
+ vroundpd(SSERoundingMode::Trunc, Operand(src), dest);
+ asMasm().loadConstantSimd128Float(SimdConstant::SplatX2(4503599627370496.0),
+ scratch);
+ vaddpd(Operand(scratch), dest, dest);
+ // The scratch has zeros in f32x4 lanes with index 0 and 2. The in-memory
+ // representantation of the splatted double contantant contains zero in its
+ // low bits.
+ vshufps(0x88, scratch, dest, dest);
+}
+
+void MacroAssemblerX86Shared::popcntInt8x16(FloatRegister src,
+ FloatRegister temp,
+ FloatRegister output) {
+ ScratchSimd128Scope scratch(asMasm());
+ asMasm().loadConstantSimd128Int(SimdConstant::SplatX16(0x0f), scratch);
+ FloatRegister srcForTemp = asMasm().moveSimd128IntIfNotAVX(src, temp);
+ vpand(scratch, srcForTemp, temp);
+ vpandn(src, scratch, scratch);
+ int8_t counts[] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
+ asMasm().loadConstantSimd128(SimdConstant::CreateX16(counts), output);
+ vpsrlw(Imm32(4), scratch, scratch);
+ vpshufb(temp, output, output);
+ asMasm().loadConstantSimd128(SimdConstant::CreateX16(counts), temp);
+ vpshufb(scratch, temp, temp);
+ vpaddb(Operand(temp), output, output);
+}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
new file mode 100644
index 0000000000..4985e072d8
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -0,0 +1,3396 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_MacroAssembler_x86_shared_inl_h
+#define jit_x86_shared_MacroAssembler_x86_shared_inl_h
+
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ vmovd(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ vmovd(src, dest);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ movsbl(src, dest);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ movswl(src, dest);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) {
+ loadPtr(Address(getStackPointer(), 0), dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { notl(reg); }
+
+void MacroAssembler::and32(Register src, Register dest) { andl(src, dest); }
+
+void MacroAssembler::and32(Imm32 imm, Register dest) { andl(imm, dest); }
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ andl(imm, Operand(dest));
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ andl(Operand(src), dest);
+}
+
+void MacroAssembler::or32(Register src, Register dest) { orl(src, dest); }
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { orl(imm, dest); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ orl(imm, Operand(dest));
+}
+
+void MacroAssembler::xor32(Register src, Register dest) { xorl(src, dest); }
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) { xorl(imm, dest); }
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ xorl(imm, Operand(dest));
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ xorl(Operand(src), dest);
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ if (AssemblerX86Shared::HasLZCNT()) {
+ lzcntl(src, dest);
+ return;
+ }
+
+ bsrl(src, dest);
+ if (!knownNotZero) {
+ // If the source is zero then bsrl leaves garbage in the destination.
+ Label nonzero;
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x3F), dest);
+ bind(&nonzero);
+ }
+ xorl(Imm32(0x1F), dest);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ if (AssemblerX86Shared::HasBMI1()) {
+ tzcntl(src, dest);
+ return;
+ }
+
+ bsfl(src, dest);
+ if (!knownNotZero) {
+ Label nonzero;
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(32), dest);
+ bind(&nonzero);
+ }
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ if (AssemblerX86Shared::HasPOPCNT()) {
+ popcntl(input, output);
+ return;
+ }
+
+ MOZ_ASSERT(tmp != InvalidReg);
+
+ // Equivalent to mozilla::CountPopulation32()
+
+ movl(input, tmp);
+ if (input != output) {
+ movl(input, output);
+ }
+ shrl(Imm32(1), output);
+ andl(Imm32(0x55555555), output);
+ subl(output, tmp);
+ movl(tmp, output);
+ andl(Imm32(0x33333333), output);
+ shrl(Imm32(2), tmp);
+ andl(Imm32(0x33333333), tmp);
+ addl(output, tmp);
+ movl(tmp, output);
+ shrl(Imm32(4), output);
+ addl(tmp, output);
+ andl(Imm32(0xF0F0F0F), output);
+ imull(Imm32(0x1010101), output, output);
+ shrl(Imm32(24), output);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ rolw(Imm32(8), reg);
+ movswl(reg, reg);
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ rolw(Imm32(8), reg);
+ movzwl(reg, reg);
+}
+
+void MacroAssembler::byteSwap32(Register reg) { bswapl(reg); }
+
+// ===============================================================
+// Arithmetic instructions
+
+void MacroAssembler::add32(Register src, Register dest) { addl(src, dest); }
+
+void MacroAssembler::add32(Imm32 imm, Register dest) { addl(imm, dest); }
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::add32(Imm32 imm, const AbsoluteAddress& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ vaddss(src, dest, dest);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ vaddsd(src, dest, dest);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) { subl(src, dest); }
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) { subl(imm, dest); }
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ subl(Operand(src), dest);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ vsubsd(src, dest, dest);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ vsubss(src, dest, dest);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ imull(rhs, srcDest);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) { imull(imm, srcDest); }
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ vmulss(src, dest, dest);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ vmulsd(src, dest, dest);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ Register tempEdx, bool isUnsigned) {
+ MOZ_ASSERT(srcDest == eax && tempEdx == edx);
+
+ // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
+ if (isUnsigned) {
+ mov(ImmWord(0), edx);
+ udiv(rhs);
+ } else {
+ cdq();
+ idiv(rhs);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ Register tempEdx, bool isUnsigned) {
+ MOZ_ASSERT(srcDest == eax && tempEdx == edx);
+
+ // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
+ if (isUnsigned) {
+ mov(ImmWord(0), edx);
+ udiv(rhs);
+ } else {
+ cdq();
+ idiv(rhs);
+ }
+ mov(edx, eax);
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ vdivss(src, dest, dest);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ vdivsd(src, dest, dest);
+}
+
+void MacroAssembler::neg32(Register reg) { negl(reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) {
+ ScratchFloat32Scope scratch(*this);
+ vpcmpeqw(Operand(scratch), scratch, scratch);
+ vpsllq(Imm32(31), scratch, scratch);
+
+ // XOR the float in a float register with -0.0.
+ vxorps(scratch, reg, reg); // s ^ 0x80000000
+}
+
+void MacroAssembler::negateDouble(FloatRegister reg) {
+ // From MacroAssemblerX86Shared::maybeInlineDouble
+ ScratchDoubleScope scratch(*this);
+ vpcmpeqw(Operand(scratch), scratch, scratch);
+ vpsllq(Imm32(63), scratch, scratch);
+
+ // XOR the float in a float register with -0.0.
+ vxorpd(scratch, reg, reg); // s ^ 0x80000000000000
+}
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ if (src != dest) {
+ move32(src, dest);
+ }
+ Label positive;
+ branchTest32(Assembler::NotSigned, dest, dest, &positive);
+ neg32(dest);
+ bind(&positive);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ ScratchFloat32Scope scratch(*this);
+ loadConstantFloat32(mozilla::SpecificNaN<float>(
+ 0, mozilla::FloatingPoint<float>::kSignificandBits),
+ scratch);
+ vandps(scratch, src, dest);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ ScratchDoubleScope scratch(*this);
+ loadConstantDouble(mozilla::SpecificNaN<double>(
+ 0, mozilla::FloatingPoint<double>::kSignificandBits),
+ scratch);
+ vandpd(scratch, src, dest);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ vsqrtss(src, dest, dest);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ vsqrtsd(src, dest, dest);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Rotation instructions
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ count.value &= 0x1f;
+ if (count.value) {
+ roll(count, input);
+ }
+}
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+ roll_cl(input);
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ count.value &= 0x1f;
+ if (count.value) {
+ rorl(count, input);
+ }
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ MOZ_ASSERT(input == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+ rorl_cl(input);
+}
+
+// ===============================================================
+// Shift instructions
+
+void MacroAssembler::lshift32(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shlxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shll_cl(srcDest);
+}
+
+void MacroAssembler::flexibleLshift32(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shlxl(srcDest, shift, srcDest);
+ return;
+ }
+ if (shift == ecx) {
+ shll_cl(srcDest);
+ } else {
+ // Shift amount must be in ecx.
+ xchg(shift, ecx);
+ shll_cl(shift == srcDest ? ecx : srcDest == ecx ? shift : srcDest);
+ xchg(shift, ecx);
+ }
+}
+
+void MacroAssembler::rshift32(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shrxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shrl_cl(srcDest);
+}
+
+void MacroAssembler::flexibleRshift32(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shrxl(srcDest, shift, srcDest);
+ return;
+ }
+ if (shift == ecx) {
+ shrl_cl(srcDest);
+ } else {
+ // Shift amount must be in ecx.
+ xchg(shift, ecx);
+ shrl_cl(shift == srcDest ? ecx : srcDest == ecx ? shift : srcDest);
+ xchg(shift, ecx);
+ }
+}
+
+void MacroAssembler::rshift32Arithmetic(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ sarxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ sarl_cl(srcDest);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register shift,
+ Register srcDest) {
+ if (HasBMI2()) {
+ sarxl(srcDest, shift, srcDest);
+ return;
+ }
+ if (shift == ecx) {
+ sarl_cl(srcDest);
+ } else {
+ // Shift amount must be in ecx.
+ xchg(shift, ecx);
+ sarl_cl(shift == srcDest ? ecx : srcDest == ecx ? shift : srcDest);
+ xchg(shift, ecx);
+ }
+}
+
+void MacroAssembler::lshift32(Imm32 shift, Register srcDest) {
+ shll(shift, srcDest);
+}
+
+void MacroAssembler::rshift32(Imm32 shift, Register srcDest) {
+ shrl(shift, srcDest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 shift, Register srcDest) {
+ sarl(shift, srcDest);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ cmp8(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ cmp16(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmp32(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch instructions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ cmp8(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ cmp8(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ cmp16(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Operand& lhs, Register rhs,
+ Label* label) {
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const Operand& lhs, Imm32 rhs,
+ Label* label) {
+ cmp32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ cmpPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+template <typename T, typename S, typename L>
+void MacroAssembler::branchPtrImpl(Condition cond, const T& lhs, const S& rhs,
+ L label) {
+ cmpPtr(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareFloat(cond, lhs, rhs);
+
+ if (cond == DoubleEqual) {
+ Label unordered;
+ j(Parity, &unordered);
+ j(Equal, label);
+ bind(&unordered);
+ return;
+ }
+
+ if (cond == DoubleNotEqualOrUnordered) {
+ j(NotEqual, label);
+ j(Parity, label);
+ return;
+ }
+
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ j(ConditionFromDoubleCondition(cond), label);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ compareDouble(cond, lhs, rhs);
+
+ if (cond == DoubleEqual) {
+ Label unordered;
+ j(Parity, &unordered);
+ j(Equal, label);
+ bind(&unordered);
+ return;
+ }
+ if (cond == DoubleNotEqualOrUnordered) {
+ j(NotEqual, label);
+ j(Parity, label);
+ return;
+ }
+
+ MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
+ j(ConditionFromDoubleCondition(cond), label);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* label) {
+ addl(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ subl(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* label) {
+ mul32(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ j(cond, label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ addPtr(src, dest);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ subPtr(src, dest);
+ j(cond, label);
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ mulPtr(src, dest);
+ j(cond, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ test32(lhs, rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ test32(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ testPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ testPtr(lhs, rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ testPtr(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ branchTestUndefinedImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ branchTestUndefinedImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestUndefinedImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testUndefined(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ branchTestInt32Impl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestInt32Impl(cond, address, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestInt32Impl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
+ Label* label) {
+ cond = testInt32(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition cond = testInt32Truthy(truthy, value);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ branchTestDoubleImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestDoubleImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestDoubleImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testDouble(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
+ Label* label) {
+ Condition cond = testDoubleTruthy(truthy, reg);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ branchTestNumberImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNumberImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testNumber(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ branchTestBooleanImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBooleanImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestBooleanImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testBoolean(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ branchTestStringImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestStringImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestStringImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testString(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition cond = testStringTruthy(truthy, value);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ branchTestSymbolImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestSymbolImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestSymbolImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testSymbol(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ branchTestBigIntImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestBigIntImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestBigIntImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testBigInt(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ Condition cond = testBigIntTruthy(truthy, value);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ branchTestNullImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestNullImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNullImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testNull(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ branchTestObjectImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestObjectImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestObjectImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testObject(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestGCThingImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testGCThing(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestPrimitiveImpl(cond, value, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
+ Label* label) {
+ cond = testPrimitive(cond, t);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ branchTestMagicImpl(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestMagicImpl(cond, address, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ branchTestMagicImpl(cond, value, label);
+}
+
+template <typename T, class L>
+void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
+ cond = testMagic(cond, t);
+ j(cond, label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testNumber(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBoolean(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testString(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testSymbol(cond, src);
+ emitSet(cond, dest);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ cond = testBigInt(cond, src);
+ emitSet(cond, dest);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, src, dest);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmp32(lhs, Operand(rhs));
+ cmovCCl(cond, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ cmp32(lhs, Operand(rhs));
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ // Note: use movl instead of move32/xorl to ensure flags are not clobbered.
+ movl(Imm32(0), scratch);
+ spectreMovePtr(cond, scratch, dest);
+}
+
+// ========================================================================
+// Memory access primitives.
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& dest) {
+ vmovsd(src, dest);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& dest) {
+ vmovsd(src, dest);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ storeUncanonicalizedDouble(src, dest.toAddress());
+ break;
+ case Operand::MEM_SCALE:
+ storeUncanonicalizedDouble(src, dest.toBaseIndex());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+}
+
+template void MacroAssembler::storeDouble(FloatRegister src,
+ const Operand& dest);
+
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& dest) {
+ vmovss(src, dest);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& dest) {
+ vmovss(src, dest);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ storeUncanonicalizedFloat32(src, dest.toAddress());
+ break;
+ case Operand::MEM_SCALE:
+ storeUncanonicalizedFloat32(src, dest.toBaseIndex());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+}
+
+template void MacroAssembler::storeFloat32(FloatRegister src,
+ const Operand& dest);
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier & MembarStoreLoad) {
+ storeLoadFence();
+ }
+}
+
+// ========================================================================
+// Wasm SIMD
+//
+// Some parts of the masm API are currently agnostic as to the data's
+// interpretation as int or float, despite the Intel architecture having
+// separate functional units and sometimes penalizing type-specific instructions
+// that operate on data in the "wrong" unit.
+//
+// For the time being, we always choose the integer interpretation when we are
+// forced to choose blind, but whether that is right or wrong depends on the
+// application. This applies to moveSimd128, loadConstantSimd128,
+// loadUnalignedSimd128, and storeUnalignedSimd128, at least.
+//
+// SSE4.1 or better is assumed.
+//
+// The order of operations here follows the header file.
+
+// Moves. See comments above regarding integer operation.
+
+void MacroAssembler::moveSimd128(FloatRegister src, FloatRegister dest) {
+ MacroAssemblerX86Shared::moveSimd128Int(src, dest);
+}
+
+// Constants. See comments above regarding integer operation.
+
+void MacroAssembler::loadConstantSimd128(const SimdConstant& v,
+ FloatRegister dest) {
+ if (v.isFloatingType()) {
+ loadConstantSimd128Float(v, dest);
+ } else {
+ loadConstantSimd128Int(v, dest);
+ }
+}
+
+// Splat
+
+void MacroAssembler::splatX16(Register src, FloatRegister dest) {
+ MacroAssemblerX86Shared::splatX16(src, dest);
+}
+
+void MacroAssembler::splatX8(Register src, FloatRegister dest) {
+ MacroAssemblerX86Shared::splatX8(src, dest);
+}
+
+void MacroAssembler::splatX4(Register src, FloatRegister dest) {
+ MacroAssemblerX86Shared::splatX4(src, dest);
+}
+
+void MacroAssembler::splatX4(FloatRegister src, FloatRegister dest) {
+ MacroAssemblerX86Shared::splatX4(src, dest);
+}
+
+void MacroAssembler::splatX2(FloatRegister src, FloatRegister dest) {
+ MacroAssemblerX86Shared::splatX2(src, dest);
+}
+
+// Extract lane as scalar
+
+void MacroAssembler::extractLaneInt8x16(uint32_t lane, FloatRegister src,
+ Register dest) {
+ MacroAssemblerX86Shared::extractLaneInt8x16(src, dest, lane,
+ SimdSign::Signed);
+}
+
+void MacroAssembler::unsignedExtractLaneInt8x16(uint32_t lane,
+ FloatRegister src,
+ Register dest) {
+ MacroAssemblerX86Shared::extractLaneInt8x16(src, dest, lane,
+ SimdSign::Unsigned);
+}
+
+void MacroAssembler::extractLaneInt16x8(uint32_t lane, FloatRegister src,
+ Register dest) {
+ MacroAssemblerX86Shared::extractLaneInt16x8(src, dest, lane,
+ SimdSign::Signed);
+}
+
+void MacroAssembler::unsignedExtractLaneInt16x8(uint32_t lane,
+ FloatRegister src,
+ Register dest) {
+ MacroAssemblerX86Shared::extractLaneInt16x8(src, dest, lane,
+ SimdSign::Unsigned);
+}
+
+void MacroAssembler::extractLaneInt32x4(uint32_t lane, FloatRegister src,
+ Register dest) {
+ MacroAssemblerX86Shared::extractLaneInt32x4(src, dest, lane);
+}
+
+void MacroAssembler::extractLaneFloat32x4(uint32_t lane, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::extractLaneFloat32x4(src, dest, lane);
+}
+
+void MacroAssembler::extractLaneFloat64x2(uint32_t lane, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::extractLaneFloat64x2(src, dest, lane);
+}
+
+// Replace lane value
+
+void MacroAssembler::replaceLaneInt8x16(unsigned lane, FloatRegister lhs,
+ Register rhs, FloatRegister dest) {
+ vpinsrb(lane, Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::replaceLaneInt8x16(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ vpinsrb(lane, Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneInt16x8(unsigned lane, FloatRegister lhs,
+ Register rhs, FloatRegister dest) {
+ vpinsrw(lane, Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::replaceLaneInt16x8(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ vpinsrw(lane, Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneInt32x4(unsigned lane, FloatRegister lhs,
+ Register rhs, FloatRegister dest) {
+ vpinsrd(lane, rhs, lhs, dest);
+}
+
+void MacroAssembler::replaceLaneInt32x4(unsigned lane, Register rhs,
+ FloatRegister lhsDest) {
+ vpinsrd(lane, rhs, lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneFloat32x4(unsigned lane, FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::replaceLaneFloat32x4(lane, lhs, rhs, dest);
+}
+
+void MacroAssembler::replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::replaceLaneFloat32x4(lane, lhsDest, rhs, lhsDest);
+}
+
+void MacroAssembler::replaceLaneFloat64x2(unsigned lane, FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::replaceLaneFloat64x2(lane, lhs, rhs, dest);
+}
+
+void MacroAssembler::replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::replaceLaneFloat64x2(lane, lhsDest, rhs, lhsDest);
+}
+
+// Shuffle - permute with immediate indices
+
+void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::shuffleInt8x16(lhsDest, rhs, lhsDest, lanes);
+}
+
+void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ MacroAssemblerX86Shared::shuffleInt8x16(lhs, rhs, dest, lanes);
+}
+
+void MacroAssembler::blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::blendInt8x16(lhs, rhs, dest, temp, lanes);
+}
+
+void MacroAssembler::blendInt16x8(const uint16_t lanes[8], FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ MacroAssemblerX86Shared::blendInt16x8(lhs, rhs, dest, lanes);
+}
+
+void MacroAssembler::laneSelectSimd128(FloatRegister mask, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister dest) {
+ MacroAssemblerX86Shared::laneSelectSimd128(mask, lhs, rhs, dest);
+}
+
+void MacroAssembler::interleaveHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpckhwd(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpckhdq(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveHighInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpckhqdq(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpckhbw(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpcklwd(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpckldq(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveLowInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpcklqdq(rhs, lhs, dest);
+}
+
+void MacroAssembler::interleaveLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpunpcklbw(rhs, lhs, dest);
+}
+
+void MacroAssembler::permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpshufbSimd128(SimdConstant::CreateX16((const int8_t*)lanes), src, dest);
+}
+
+void MacroAssembler::permuteLowInt16x8(const uint16_t lanes[4],
+ FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(lanes[0] < 4 && lanes[1] < 4 && lanes[2] < 4 && lanes[3] < 4);
+ vpshuflw(ComputeShuffleMask(lanes[0], lanes[1], lanes[2], lanes[3]), src,
+ dest);
+}
+
+void MacroAssembler::permuteHighInt16x8(const uint16_t lanes[4],
+ FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(lanes[0] < 4 && lanes[1] < 4 && lanes[2] < 4 && lanes[3] < 4);
+ vpshufhw(ComputeShuffleMask(lanes[0], lanes[1], lanes[2], lanes[3]), src,
+ dest);
+}
+
+void MacroAssembler::permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
+ FloatRegister dest) {
+ vpshufd(ComputeShuffleMask(lanes[0], lanes[1], lanes[2], lanes[3]), src,
+ dest);
+}
+
+void MacroAssembler::concatAndRightShiftSimd128(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest,
+ uint32_t shift) {
+ vpalignr(Operand(rhs), lhs, dest, shift);
+}
+
+void MacroAssembler::leftShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpslldq(count, src, dest);
+}
+
+void MacroAssembler::rightShiftSimd128(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrldq(count, src, dest);
+}
+
+// Reverse bytes in lanes.
+
+void MacroAssembler::reverseInt16x8(FloatRegister src, FloatRegister dest) {
+ // Byteswap is MOV + PSLLW + PSRLW + POR, a small win over PSHUFB.
+ ScratchSimd128Scope scratch(*this);
+ FloatRegister srcForScratch = moveSimd128IntIfNotAVX(src, scratch);
+ vpsrlw(Imm32(8), srcForScratch, scratch);
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsllw(Imm32(8), src, dest);
+ vpor(scratch, dest, dest);
+}
+
+void MacroAssembler::reverseInt32x4(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ int8_t lanes[] = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12};
+ vpshufbSimd128(SimdConstant::CreateX16((const int8_t*)lanes), src, dest);
+}
+
+void MacroAssembler::reverseInt64x2(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ int8_t lanes[] = {7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8};
+ vpshufbSimd128(SimdConstant::CreateX16((const int8_t*)lanes), src, dest);
+}
+
+// Any lane true, ie any bit set
+
+void MacroAssembler::anyTrueSimd128(FloatRegister src, Register dest) {
+ vptest(src, src);
+ emitSetRegisterIf(Condition::NonZero, dest);
+}
+
+// All lanes true
+
+void MacroAssembler::allTrueInt8x16(FloatRegister src, Register dest) {
+ ScratchSimd128Scope xtmp(*this);
+ // xtmp is all-00h
+ vpxor(xtmp, xtmp, xtmp);
+ // Set FFh if byte==0 otherwise 00h
+ // Operand ordering constraint: lhs==output
+ vpcmpeqb(Operand(src), xtmp, xtmp);
+ // Check if xtmp is 0.
+ vptest(xtmp, xtmp);
+ emitSetRegisterIf(Condition::Zero, dest);
+}
+
+void MacroAssembler::allTrueInt16x8(FloatRegister src, Register dest) {
+ ScratchSimd128Scope xtmp(*this);
+ // xtmp is all-00h
+ vpxor(xtmp, xtmp, xtmp);
+ // Set FFFFh if word==0 otherwise 0000h
+ // Operand ordering constraint: lhs==output
+ vpcmpeqw(Operand(src), xtmp, xtmp);
+ // Check if xtmp is 0.
+ vptest(xtmp, xtmp);
+ emitSetRegisterIf(Condition::Zero, dest);
+}
+
+void MacroAssembler::allTrueInt32x4(FloatRegister src, Register dest) {
+ ScratchSimd128Scope xtmp(*this);
+ // xtmp is all-00h
+ vpxor(xtmp, xtmp, xtmp);
+ // Set FFFFFFFFh if doubleword==0 otherwise 00000000h
+ // Operand ordering constraint: lhs==output
+ vpcmpeqd(Operand(src), xtmp, xtmp);
+ // Check if xtmp is 0.
+ vptest(xtmp, xtmp);
+ emitSetRegisterIf(Condition::Zero, dest);
+}
+
+void MacroAssembler::allTrueInt64x2(FloatRegister src, Register dest) {
+ ScratchSimd128Scope xtmp(*this);
+ // xtmp is all-00h
+ vpxor(xtmp, xtmp, xtmp);
+ // Set FFFFFFFFFFFFFFFFh if quadword==0 otherwise 0000000000000000h
+ // Operand ordering constraint: lhs==output
+ vpcmpeqq(Operand(src), xtmp, xtmp);
+ // Check if xtmp is 0.
+ vptest(xtmp, xtmp);
+ emitSetRegisterIf(Condition::Zero, dest);
+}
+
+// Bitmask
+
+void MacroAssembler::bitmaskInt8x16(FloatRegister src, Register dest) {
+ vpmovmskb(src, dest);
+}
+
+void MacroAssembler::bitmaskInt16x8(FloatRegister src, Register dest) {
+ ScratchSimd128Scope scratch(*this);
+ // A three-instruction sequence is possible by using scratch as a don't-care
+ // input and shifting rather than masking at the end, but creates a false
+ // dependency on the old value of scratch. The better fix is to allow src to
+ // be clobbered.
+ src = moveSimd128IntIfNotAVX(src, scratch);
+ vpacksswb(Operand(src), src, scratch);
+ vpmovmskb(scratch, dest);
+ andl(Imm32(0xFF), dest);
+}
+
+void MacroAssembler::bitmaskInt32x4(FloatRegister src, Register dest) {
+ vmovmskps(src, dest);
+}
+
+void MacroAssembler::bitmaskInt64x2(FloatRegister src, Register dest) {
+ vmovmskpd(src, dest);
+}
+
+// Swizzle - permute with variable indices
+
+void MacroAssembler::swizzleInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ rhs = moveSimd128IntIfNotAVX(rhs, scratch);
+ // Set high bit to 1 for values > 15 via adding with saturation.
+ vpaddusbSimd128(SimdConstant::SplatX16(0x70), rhs, scratch);
+ vpshufb(scratch, lhs, dest); // permute
+}
+
+void MacroAssembler::swizzleInt8x16Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpshufb(rhs, lhs, dest);
+}
+
+// Integer Add
+
+void MacroAssembler::addInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddb,
+ &MacroAssembler::vpaddbSimd128);
+}
+
+void MacroAssembler::addInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddw,
+ &MacroAssembler::vpaddwSimd128);
+}
+
+void MacroAssembler::addInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddd,
+ &MacroAssembler::vpadddSimd128);
+}
+
+void MacroAssembler::addInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddq(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddq,
+ &MacroAssembler::vpaddqSimd128);
+}
+
+// Integer subtract
+
+void MacroAssembler::subInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubb,
+ &MacroAssembler::vpsubbSimd128);
+}
+
+void MacroAssembler::subInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubw,
+ &MacroAssembler::vpsubwSimd128);
+}
+
+void MacroAssembler::subInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubd,
+ &MacroAssembler::vpsubdSimd128);
+}
+
+void MacroAssembler::subInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubq(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubq,
+ &MacroAssembler::vpsubqSimd128);
+}
+
+// Integer multiply
+
+void MacroAssembler::mulInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmullw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::mulInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmullw,
+ &MacroAssembler::vpmullwSimd128);
+}
+
+void MacroAssembler::mulInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmulld(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::mulInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmulld,
+ &MacroAssembler::vpmulldSimd128);
+}
+
+void MacroAssembler::mulInt64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp) {
+ ScratchSimd128Scope temp2(*this);
+ // lhs = <D C> <B A>
+ // rhs = <H G> <F E>
+ // result = <(DG+CH)_low+CG_high CG_low> <(BE+AF)_low+AE_high AE_low>
+ FloatRegister lhsForTemp =
+ moveSimd128IntIfNotAVX(lhs, temp); // temp = <D C> <B A>
+ vpsrlq(Imm32(32), lhsForTemp, temp); // temp = <0 D> <0 B>
+ vpmuludq(rhs, temp, temp); // temp = <DG> <BE>
+ FloatRegister rhsForTemp =
+ moveSimd128IntIfNotAVX(rhs, temp2); // temp2 = <H G> <F E>
+ vpsrlq(Imm32(32), rhsForTemp, temp2); // temp2 = <0 H> <0 F>
+ vpmuludq(lhs, temp2, temp2); // temp2 = <CH> <AF>
+ vpaddq(Operand(temp), temp2, temp2); // temp2 = <DG+CH> <BE+AF>
+ vpsllq(Imm32(32), temp2, temp2); // temp2 = <(DG+CH)_low 0>
+ // <(BE+AF)_low 0>
+ vpmuludq(rhs, lhs, dest); // dest = <CG_high CG_low>
+ // <AE_high AE_low>
+ vpaddq(Operand(temp2), dest, dest); // dest =
+ // <(DG+CH)_low+CG_high CG_low>
+ // <(BE+AF)_low+AE_high AE_low>
+}
+
+void MacroAssembler::mulInt64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest, FloatRegister temp) {
+ // Check if we can specialize that to less than eight instructions
+ // (in comparison with the above mulInt64x2 version).
+ const int64_t* c = static_cast<const int64_t*>(rhs.bytes());
+ const int64_t val = c[0];
+ if (val == c[1]) {
+ switch (mozilla::CountPopulation64(val)) {
+ case 0: // val == 0
+ vpxor(Operand(dest), dest, dest);
+ return;
+ case 64: // val == -1
+ negInt64x2(lhs, dest);
+ return;
+ case 1: // val == power of 2
+ if (val == 1) {
+ moveSimd128Int(lhs, dest);
+ } else {
+ lhs = moveSimd128IntIfNotAVX(lhs, dest);
+ vpsllq(Imm32(mozilla::CountTrailingZeroes64(val)), lhs, dest);
+ }
+ return;
+ case 2: {
+ // Constants with 2 bits set, such as 3, 5, 10, etc.
+ int i0 = mozilla::CountTrailingZeroes64(val);
+ int i1 = mozilla::CountTrailingZeroes64(val & (val - 1));
+ FloatRegister lhsForTemp = moveSimd128IntIfNotAVX(lhs, temp);
+ vpsllq(Imm32(i1), lhsForTemp, temp);
+ lhs = moveSimd128IntIfNotAVX(lhs, dest);
+ if (i0 > 0) {
+ vpsllq(Imm32(i0), lhs, dest);
+ lhs = dest;
+ }
+ vpaddq(Operand(temp), lhs, dest);
+ return;
+ }
+ case 63: {
+ // Some constants with 1 bit unset, such as -2, -3, -5, etc.
+ FloatRegister lhsForTemp = moveSimd128IntIfNotAVX(lhs, temp);
+ vpsllq(Imm32(mozilla::CountTrailingZeroes64(~val)), lhsForTemp, temp);
+ negInt64x2(lhs, dest);
+ vpsubq(Operand(temp), dest, dest);
+ return;
+ }
+ }
+ }
+
+ // lhs = <D C> <B A>
+ // rhs = <H G> <F E>
+ // result = <(DG+CH)_low+CG_high CG_low> <(BE+AF)_low+AE_high AE_low>
+
+ if ((c[0] >> 32) == 0 && (c[1] >> 32) == 0) {
+ // If the H and F == 0, simplify calculations:
+ // result = <DG_low+CG_high CG_low> <BE_low+AE_high AE_low>
+ const int64_t rhsShifted[2] = {c[0] << 32, c[1] << 32};
+ FloatRegister lhsForTemp = moveSimd128IntIfNotAVX(lhs, temp);
+ vpmulldSimd128(SimdConstant::CreateSimd128(rhsShifted), lhsForTemp, temp);
+ vpmuludqSimd128(rhs, lhs, dest);
+ vpaddq(Operand(temp), dest, dest);
+ return;
+ }
+
+ const int64_t rhsSwapped[2] = {
+ static_cast<int64_t>(static_cast<uint64_t>(c[0]) >> 32) | (c[0] << 32),
+ static_cast<int64_t>(static_cast<uint64_t>(c[1]) >> 32) | (c[1] << 32),
+ }; // rhsSwapped = <G H> <E F>
+ FloatRegister lhsForTemp = moveSimd128IntIfNotAVX(lhs, temp);
+ vpmulldSimd128(SimdConstant::CreateSimd128(rhsSwapped), lhsForTemp,
+ temp); // temp = <DG CH> <BE AF>
+ vphaddd(Operand(temp), temp, temp); // temp = <xx xx> <DG+CH BE+AF>
+ vpmovzxdq(Operand(temp), temp); // temp = <0 DG+CG> <0 BE+AF>
+ vpmuludqSimd128(rhs, lhs, dest); // dest = <CG_high CG_low>
+ // <AE_high AE_low>
+ vpsllq(Imm32(32), temp, temp); // temp = <(DG+CH)_low 0>
+ // <(BE+AF)_low 0>
+ vpaddq(Operand(temp), dest, dest);
+}
+
+// Code generation from the PR: https://github.com/WebAssembly/simd/pull/376.
+// The double PSHUFD for the 32->64 case is not great, and there's some
+// discussion on the PR (scroll down far enough) on how to avoid one of them,
+// but we need benchmarking + correctness proofs.
+
+void MacroAssembler::extMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ widenLowInt8x16(rhs, scratch);
+ widenLowInt8x16(lhs, dest);
+ mulInt16x8(dest, scratch, dest);
+}
+
+void MacroAssembler::extMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ widenHighInt8x16(rhs, scratch);
+ widenHighInt8x16(lhs, dest);
+ mulInt16x8(dest, scratch, dest);
+}
+
+void MacroAssembler::unsignedExtMulLowInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ unsignedWidenLowInt8x16(rhs, scratch);
+ unsignedWidenLowInt8x16(lhs, dest);
+ mulInt16x8(dest, scratch, dest);
+}
+
+void MacroAssembler::unsignedExtMulHighInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ unsignedWidenHighInt8x16(rhs, scratch);
+ unsignedWidenHighInt8x16(lhs, dest);
+ mulInt16x8(dest, scratch, dest);
+}
+
+void MacroAssembler::extMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ FloatRegister lhsCopy = moveSimd128IntIfNotAVX(lhs, scratch);
+ vpmulhw(Operand(rhs), lhsCopy, scratch);
+ vpmullw(Operand(rhs), lhs, dest);
+ vpunpcklwd(scratch, dest, dest);
+}
+
+void MacroAssembler::extMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ FloatRegister lhsCopy = moveSimd128IntIfNotAVX(lhs, scratch);
+ vpmulhw(Operand(rhs), lhsCopy, scratch);
+ vpmullw(Operand(rhs), lhs, dest);
+ vpunpckhwd(scratch, dest, dest);
+}
+
+void MacroAssembler::unsignedExtMulLowInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ FloatRegister lhsCopy = moveSimd128IntIfNotAVX(lhs, scratch);
+ vpmulhuw(Operand(rhs), lhsCopy, scratch);
+ vpmullw(Operand(rhs), lhs, dest);
+ vpunpcklwd(scratch, dest, dest);
+}
+
+void MacroAssembler::unsignedExtMulHighInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ FloatRegister lhsCopy = moveSimd128IntIfNotAVX(lhs, scratch);
+ vpmulhuw(Operand(rhs), lhsCopy, scratch);
+ vpmullw(Operand(rhs), lhs, dest);
+ vpunpckhwd(scratch, dest, dest);
+}
+
+void MacroAssembler::extMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ vpshufd(ComputeShuffleMask(0, 0, 1, 0), lhs, scratch);
+ vpshufd(ComputeShuffleMask(0, 0, 1, 0), rhs, dest);
+ vpmuldq(scratch, dest, dest);
+}
+
+void MacroAssembler::extMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ vpshufd(ComputeShuffleMask(2, 0, 3, 0), lhs, scratch);
+ vpshufd(ComputeShuffleMask(2, 0, 3, 0), rhs, dest);
+ vpmuldq(scratch, dest, dest);
+}
+
+void MacroAssembler::unsignedExtMulLowInt32x4(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ vpshufd(ComputeShuffleMask(0, 0, 1, 0), lhs, scratch);
+ vpshufd(ComputeShuffleMask(0, 0, 1, 0), rhs, dest);
+ vpmuludq(Operand(scratch), dest, dest);
+}
+
+void MacroAssembler::unsignedExtMulHighInt32x4(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ vpshufd(ComputeShuffleMask(2, 0, 3, 0), lhs, scratch);
+ vpshufd(ComputeShuffleMask(2, 0, 3, 0), rhs, dest);
+ vpmuludq(Operand(scratch), dest, dest);
+}
+
+void MacroAssembler::q15MulrSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ vpmulhrsw(Operand(rhs), lhs, dest);
+ FloatRegister destCopy = moveSimd128IntIfNotAVX(dest, scratch);
+ vpcmpeqwSimd128(SimdConstant::SplatX8(0x8000), destCopy, scratch);
+ vpxor(scratch, dest, dest);
+}
+
+void MacroAssembler::q15MulrInt16x8Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmulhrsw(Operand(rhs), lhs, dest);
+}
+
+// Integer negate
+
+void MacroAssembler::negInt8x16(FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (src == dest) {
+ moveSimd128Int(src, scratch);
+ src = scratch;
+ }
+ vpxor(Operand(dest), dest, dest);
+ vpsubb(Operand(src), dest, dest);
+}
+
+void MacroAssembler::negInt16x8(FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (src == dest) {
+ moveSimd128Int(src, scratch);
+ src = scratch;
+ }
+ vpxor(Operand(dest), dest, dest);
+ vpsubw(Operand(src), dest, dest);
+}
+
+void MacroAssembler::negInt32x4(FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (src == dest) {
+ moveSimd128Int(src, scratch);
+ src = scratch;
+ }
+ vpxor(Operand(dest), dest, dest);
+ vpsubd(Operand(src), dest, dest);
+}
+
+void MacroAssembler::negInt64x2(FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (src == dest) {
+ moveSimd128Int(src, scratch);
+ src = scratch;
+ }
+ vpxor(Operand(dest), dest, dest);
+ vpsubq(Operand(src), dest, dest);
+}
+
+// Saturating integer add
+
+void MacroAssembler::addSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddsb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddsb,
+ &MacroAssembler::vpaddsbSimd128);
+}
+
+void MacroAssembler::unsignedAddSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddusb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedAddSatInt8x16(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddusb,
+ &MacroAssembler::vpaddusbSimd128);
+}
+
+void MacroAssembler::addSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddsw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddsw,
+ &MacroAssembler::vpaddswSimd128);
+}
+
+void MacroAssembler::unsignedAddSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpaddusw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedAddSatInt16x8(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpaddusw,
+ &MacroAssembler::vpadduswSimd128);
+}
+
+// Saturating integer subtract
+
+void MacroAssembler::subSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubsb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubsb,
+ &MacroAssembler::vpsubsbSimd128);
+}
+
+void MacroAssembler::unsignedSubSatInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubusb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedSubSatInt8x16(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubusb,
+ &MacroAssembler::vpsubusbSimd128);
+}
+
+void MacroAssembler::subSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubsw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubsw,
+ &MacroAssembler::vpsubswSimd128);
+}
+
+void MacroAssembler::unsignedSubSatInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpsubusw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedSubSatInt16x8(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpsubusw,
+ &MacroAssembler::vpsubuswSimd128);
+}
+
+// Lane-wise integer minimum
+
+void MacroAssembler::minInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminsb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::minInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminsb,
+ &MacroAssembler::vpminsbSimd128);
+}
+
+void MacroAssembler::unsignedMinInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminub(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMinInt8x16(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminub,
+ &MacroAssembler::vpminubSimd128);
+}
+
+void MacroAssembler::minInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminsw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::minInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminsw,
+ &MacroAssembler::vpminswSimd128);
+}
+
+void MacroAssembler::unsignedMinInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminuw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMinInt16x8(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminuw,
+ &MacroAssembler::vpminuwSimd128);
+}
+
+void MacroAssembler::minInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminsd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::minInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminsd,
+ &MacroAssembler::vpminsdSimd128);
+}
+
+void MacroAssembler::unsignedMinInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpminud(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMinInt32x4(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpminud,
+ &MacroAssembler::vpminudSimd128);
+}
+
+// Lane-wise integer maximum
+
+void MacroAssembler::maxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxsb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::maxInt8x16(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxsb,
+ &MacroAssembler::vpmaxsbSimd128);
+}
+
+void MacroAssembler::unsignedMaxInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxub(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMaxInt8x16(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxub,
+ &MacroAssembler::vpmaxubSimd128);
+}
+
+void MacroAssembler::maxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxsw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::maxInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxsw,
+ &MacroAssembler::vpmaxswSimd128);
+}
+
+void MacroAssembler::unsignedMaxInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxuw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMaxInt16x8(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxuw,
+ &MacroAssembler::vpmaxuwSimd128);
+}
+
+void MacroAssembler::maxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxsd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::maxInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxsd,
+ &MacroAssembler::vpmaxsdSimd128);
+}
+
+void MacroAssembler::unsignedMaxInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaxud(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedMaxInt32x4(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaxud,
+ &MacroAssembler::vpmaxudSimd128);
+}
+
+// Lane-wise integer rounding average
+
+void MacroAssembler::unsignedAverageInt8x16(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ vpavgb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedAverageInt16x8(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ vpavgw(Operand(rhs), lhs, dest);
+}
+
+// Lane-wise integer absolute value
+
+void MacroAssembler::absInt8x16(FloatRegister src, FloatRegister dest) {
+ vpabsb(Operand(src), dest);
+}
+
+void MacroAssembler::absInt16x8(FloatRegister src, FloatRegister dest) {
+ vpabsw(Operand(src), dest);
+}
+
+void MacroAssembler::absInt32x4(FloatRegister src, FloatRegister dest) {
+ vpabsd(Operand(src), dest);
+}
+
+void MacroAssembler::absInt64x2(FloatRegister src, FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ signReplicationInt64x2(src, scratch);
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpxor(Operand(scratch), src, dest);
+ vpsubq(Operand(scratch), dest, dest);
+}
+
+// Left shift by scalar
+
+void MacroAssembler::leftShiftInt8x16(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::packedLeftShiftByScalarInt8x16(lhsDest, rhs, temp,
+ lhsDest);
+}
+
+void MacroAssembler::leftShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::packedLeftShiftByScalarInt8x16(count, src, dest);
+}
+
+void MacroAssembler::leftShiftInt16x8(Register rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedLeftShiftByScalarInt16x8(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::leftShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsllw(count, src, dest);
+}
+
+void MacroAssembler::leftShiftInt32x4(Register rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedLeftShiftByScalarInt32x4(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::leftShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpslld(count, src, dest);
+}
+
+void MacroAssembler::leftShiftInt64x2(Register rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedLeftShiftByScalarInt64x2(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::leftShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsllq(count, src, dest);
+}
+
+// Right shift by scalar
+
+void MacroAssembler::rightShiftInt8x16(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt8x16(lhsDest, rhs, temp,
+ lhsDest);
+}
+
+void MacroAssembler::rightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt8x16(count, src, dest);
+}
+
+void MacroAssembler::unsignedRightShiftInt8x16(Register rhs,
+ FloatRegister lhsDest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt8x16(
+ lhsDest, rhs, temp, lhsDest);
+}
+
+void MacroAssembler::unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt8x16(count, src,
+ dest);
+}
+
+void MacroAssembler::rightShiftInt16x8(Register rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt16x8(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::rightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsraw(count, src, dest);
+}
+
+void MacroAssembler::unsignedRightShiftInt16x8(Register rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt16x8(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrlw(count, src, dest);
+}
+
+void MacroAssembler::rightShiftInt32x4(Register rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt32x4(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::rightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrad(count, src, dest);
+}
+
+void MacroAssembler::unsignedRightShiftInt32x4(Register rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt32x4(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrld(count, src, dest);
+}
+
+void MacroAssembler::rightShiftInt64x2(Register rhs, FloatRegister lhsDest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt64x2(lhsDest, rhs, temp,
+ lhsDest);
+}
+
+void MacroAssembler::rightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::packedRightShiftByScalarInt64x2(count, src, dest);
+}
+
+void MacroAssembler::unsignedRightShiftInt64x2(Register rhs,
+ FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt64x2(lhsDest, rhs,
+ lhsDest);
+}
+
+void MacroAssembler::unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrlq(count, src, dest);
+}
+
+// Sign replication operation
+
+void MacroAssembler::signReplicationInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(src != dest);
+ vpxor(Operand(dest), dest, dest);
+ vpcmpgtb(Operand(src), dest, dest);
+}
+
+void MacroAssembler::signReplicationInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsraw(Imm32(15), src, dest);
+}
+
+void MacroAssembler::signReplicationInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpsrad(Imm32(31), src, dest);
+}
+
+void MacroAssembler::signReplicationInt64x2(FloatRegister src,
+ FloatRegister dest) {
+ vpshufd(ComputeShuffleMask(1, 1, 3, 3), src, dest);
+ vpsrad(Imm32(31), dest, dest);
+}
+
+// Bitwise and, or, xor, not
+
+void MacroAssembler::bitwiseAndSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ vpand(Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::bitwiseAndSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpand(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::bitwiseAndSimd128(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpand,
+ &MacroAssembler::vpandSimd128);
+}
+
+void MacroAssembler::bitwiseOrSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ vpor(Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::bitwiseOrSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpor(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::bitwiseOrSimd128(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpor,
+ &MacroAssembler::vporSimd128);
+}
+
+void MacroAssembler::bitwiseXorSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ vpxor(Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::bitwiseXorSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpxor(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::bitwiseXorSimd128(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpxor,
+ &MacroAssembler::vpxorSimd128);
+}
+
+void MacroAssembler::bitwiseNotSimd128(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ bitwiseXorSimd128(src, SimdConstant::SplatX16(-1), dest);
+}
+
+// Bitwise and-not
+
+void MacroAssembler::bitwiseNotAndSimd128(FloatRegister rhs,
+ FloatRegister lhsDest) {
+ vpandn(Operand(rhs), lhsDest, lhsDest);
+}
+
+void MacroAssembler::bitwiseNotAndSimd128(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpandn(Operand(rhs), lhs, dest);
+}
+
+// Bitwise select
+
+void MacroAssembler::bitwiseSelectSimd128(FloatRegister mask,
+ FloatRegister onTrue,
+ FloatRegister onFalse,
+ FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::selectSimd128(mask, onTrue, onFalse, temp, dest);
+}
+
+// Population count
+
+void MacroAssembler::popcntInt8x16(FloatRegister src, FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::popcntInt8x16(src, temp, dest);
+}
+
+// Comparisons (integer and floating-point)
+
+void MacroAssembler::compareInt8x16(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::compareInt8x16(lhsDest, Operand(rhs), cond, lhsDest);
+}
+
+void MacroAssembler::compareInt8x16(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::compareInt8x16(lhs, Operand(rhs), cond, dest);
+}
+
+void MacroAssembler::compareInt8x16(Assembler::Condition cond,
+ FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(cond != Assembler::Condition::LessThan &&
+ cond != Assembler::Condition::GreaterThanOrEqual);
+ MacroAssemblerX86Shared::compareInt8x16(cond, lhs, rhs, dest);
+}
+
+void MacroAssembler::compareInt16x8(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::compareInt16x8(lhsDest, Operand(rhs), cond, lhsDest);
+}
+
+void MacroAssembler::compareInt16x8(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::compareInt16x8(lhs, Operand(rhs), cond, dest);
+}
+
+void MacroAssembler::compareInt16x8(Assembler::Condition cond,
+ FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(cond != Assembler::Condition::LessThan &&
+ cond != Assembler::Condition::GreaterThanOrEqual);
+ MacroAssemblerX86Shared::compareInt16x8(cond, lhs, rhs, dest);
+}
+
+void MacroAssembler::compareInt32x4(Assembler::Condition cond,
+ FloatRegister rhs, FloatRegister lhsDest) {
+ MacroAssemblerX86Shared::compareInt32x4(lhsDest, Operand(rhs), cond, lhsDest);
+}
+
+void MacroAssembler::compareInt32x4(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::compareInt32x4(lhs, Operand(rhs), cond, dest);
+}
+
+void MacroAssembler::compareInt32x4(Assembler::Condition cond,
+ FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(cond != Assembler::Condition::LessThan &&
+ cond != Assembler::Condition::GreaterThanOrEqual);
+ MacroAssemblerX86Shared::compareInt32x4(cond, lhs, rhs, dest);
+}
+
+void MacroAssembler::compareForEqualityInt64x2(Assembler::Condition cond,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::compareForEqualityInt64x2(lhs, Operand(rhs), cond,
+ dest);
+}
+
+void MacroAssembler::compareForOrderingInt64x2(
+ Assembler::Condition cond, FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1, FloatRegister temp2) {
+ if (HasAVX() && HasSSE42()) {
+ MacroAssemblerX86Shared::compareForOrderingInt64x2AVX(lhs, rhs, cond, dest);
+ } else {
+ MacroAssemblerX86Shared::compareForOrderingInt64x2(lhs, Operand(rhs), cond,
+ temp1, temp2, dest);
+ }
+}
+
+void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister rhs,
+ FloatRegister lhsDest) {
+ // Code in the SIMD implementation allows operands to be reversed like this,
+ // this benefits the baseline compiler. Ion takes care of the reversing
+ // itself and never generates GT/GE.
+ if (cond == Assembler::GreaterThan) {
+ MacroAssemblerX86Shared::compareFloat32x4(rhs, Operand(lhsDest),
+ Assembler::LessThan, lhsDest);
+ } else if (cond == Assembler::GreaterThanOrEqual) {
+ MacroAssemblerX86Shared::compareFloat32x4(
+ rhs, Operand(lhsDest), Assembler::LessThanOrEqual, lhsDest);
+ } else {
+ MacroAssemblerX86Shared::compareFloat32x4(lhsDest, Operand(rhs), cond,
+ lhsDest);
+ }
+}
+
+void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::compareFloat32x4(lhs, Operand(rhs), cond, dest);
+}
+
+void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(cond != Assembler::Condition::GreaterThan &&
+ cond != Assembler::Condition::GreaterThanOrEqual);
+ MacroAssemblerX86Shared::compareFloat32x4(cond, lhs, rhs, dest);
+}
+
+void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister rhs,
+ FloatRegister lhsDest) {
+ compareFloat64x2(cond, lhsDest, rhs, lhsDest);
+}
+
+void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ // Code in the SIMD implementation allows operands to be reversed like this,
+ // this benefits the baseline compiler. Ion takes care of the reversing
+ // itself and never generates GT/GE.
+ if (cond == Assembler::GreaterThan) {
+ MacroAssemblerX86Shared::compareFloat64x2(rhs, Operand(lhs),
+ Assembler::LessThan, dest);
+ } else if (cond == Assembler::GreaterThanOrEqual) {
+ MacroAssemblerX86Shared::compareFloat64x2(rhs, Operand(lhs),
+ Assembler::LessThanOrEqual, dest);
+ } else {
+ MacroAssemblerX86Shared::compareFloat64x2(lhs, Operand(rhs), cond, dest);
+ }
+}
+
+void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
+ FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ MOZ_ASSERT(cond != Assembler::Condition::GreaterThan &&
+ cond != Assembler::Condition::GreaterThanOrEqual);
+ MacroAssemblerX86Shared::compareFloat64x2(cond, lhs, rhs, dest);
+}
+
+// Load. See comments above regarding integer operation.
+
+void MacroAssembler::loadUnalignedSimd128(const Operand& src,
+ FloatRegister dest) {
+ loadUnalignedSimd128Int(src, dest);
+}
+
+void MacroAssembler::loadUnalignedSimd128(const Address& src,
+ FloatRegister dest) {
+ loadUnalignedSimd128Int(src, dest);
+}
+
+void MacroAssembler::loadUnalignedSimd128(const BaseIndex& src,
+ FloatRegister dest) {
+ loadUnalignedSimd128Int(src, dest);
+}
+
+// Store. See comments above regarding integer operation.
+
+void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
+ const Address& dest) {
+ storeUnalignedSimd128Int(src, dest);
+}
+
+void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
+ const BaseIndex& dest) {
+ storeUnalignedSimd128Int(src, dest);
+}
+
+// Floating point negation
+
+void MacroAssembler::negFloat32x4(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128FloatIfNotAVX(src, dest);
+ bitwiseXorSimd128(src, SimdConstant::SplatX4(-0.f), dest);
+}
+
+void MacroAssembler::negFloat64x2(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128FloatIfNotAVX(src, dest);
+ bitwiseXorSimd128(src, SimdConstant::SplatX2(-0.0), dest);
+}
+
+// Floating point absolute value
+
+void MacroAssembler::absFloat32x4(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128FloatIfNotAVX(src, dest);
+ bitwiseAndSimd128(src, SimdConstant::SplatX4(0x7FFFFFFF), dest);
+}
+
+void MacroAssembler::absFloat64x2(FloatRegister src, FloatRegister dest) {
+ src = moveSimd128FloatIfNotAVX(src, dest);
+ bitwiseAndSimd128(src, SimdConstant::SplatX2(int64_t(0x7FFFFFFFFFFFFFFFll)),
+ dest);
+}
+
+// NaN-propagating minimum
+
+void MacroAssembler::minFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) {
+ MacroAssemblerX86Shared::minFloat32x4(lhs, rhs, temp1, temp2, dest);
+}
+
+void MacroAssembler::minFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) {
+ MacroAssemblerX86Shared::minFloat64x2(lhs, rhs, temp1, temp2, dest);
+}
+
+// NaN-propagating maximum
+
+void MacroAssembler::maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) {
+ MacroAssemblerX86Shared::maxFloat32x4(lhs, rhs, temp1, temp2, dest);
+}
+
+void MacroAssembler::maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest, FloatRegister temp1,
+ FloatRegister temp2) {
+ MacroAssemblerX86Shared::maxFloat64x2(lhs, rhs, temp1, temp2, dest);
+}
+
+// Compare-based minimum
+
+void MacroAssembler::pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ // Shut up the linter by using the same names as in the declaration, then
+ // aliasing here.
+ FloatRegister rhsDest = rhsOrRhsDest;
+ FloatRegister lhs = lhsOrLhsDest;
+ vminps(Operand(lhs), rhsDest, rhsDest);
+}
+
+void MacroAssembler::pseudoMinFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vminps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhsDest = rhsOrRhsDest;
+ FloatRegister lhs = lhsOrLhsDest;
+ vminpd(Operand(lhs), rhsDest, rhsDest);
+}
+
+void MacroAssembler::pseudoMinFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vminpd(Operand(rhs), lhs, dest);
+}
+
+// Compare-based maximum
+
+void MacroAssembler::pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhsDest = rhsOrRhsDest;
+ FloatRegister lhs = lhsOrLhsDest;
+ vmaxps(Operand(lhs), rhsDest, rhsDest);
+}
+
+void MacroAssembler::pseudoMaxFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmaxps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,
+ FloatRegister lhsOrLhsDest) {
+ FloatRegister rhsDest = rhsOrRhsDest;
+ FloatRegister lhs = lhsOrLhsDest;
+ vmaxpd(Operand(lhs), rhsDest, rhsDest);
+}
+
+void MacroAssembler::pseudoMaxFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmaxpd(Operand(rhs), lhs, dest);
+}
+
+// Widening/pairwise integer dot product
+
+void MacroAssembler::widenDotInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpmaddwd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::widenDotInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpmaddwd,
+ &MacroAssembler::vpmaddwdSimd128);
+}
+
+void MacroAssembler::dotInt8x16Int7x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (lhs == dest && !HasAVX()) {
+ moveSimd128Int(lhs, scratch);
+ lhs = scratch;
+ }
+ rhs = moveSimd128IntIfNotAVX(rhs, dest);
+ vpmaddubsw(lhs, rhs, dest);
+}
+
+void MacroAssembler::dotInt8x16Int7x16ThenAdd(FloatRegister lhs,
+ FloatRegister rhs,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ rhs = moveSimd128IntIfNotAVX(rhs, scratch);
+ vpmaddubsw(lhs, rhs, scratch);
+ vpmaddwdSimd128(SimdConstant::SplatX8(1), scratch, scratch);
+ vpaddd(Operand(scratch), dest, dest);
+}
+
+// Rounding
+
+void MacroAssembler::ceilFloat32x4(FloatRegister src, FloatRegister dest) {
+ vroundps(Assembler::SSERoundingMode::Ceil, Operand(src), dest);
+}
+
+void MacroAssembler::ceilFloat64x2(FloatRegister src, FloatRegister dest) {
+ vroundpd(Assembler::SSERoundingMode::Ceil, Operand(src), dest);
+}
+
+void MacroAssembler::floorFloat32x4(FloatRegister src, FloatRegister dest) {
+ vroundps(Assembler::SSERoundingMode::Floor, Operand(src), dest);
+}
+
+void MacroAssembler::floorFloat64x2(FloatRegister src, FloatRegister dest) {
+ vroundpd(Assembler::SSERoundingMode::Floor, Operand(src), dest);
+}
+
+void MacroAssembler::truncFloat32x4(FloatRegister src, FloatRegister dest) {
+ vroundps(Assembler::SSERoundingMode::Trunc, Operand(src), dest);
+}
+
+void MacroAssembler::truncFloat64x2(FloatRegister src, FloatRegister dest) {
+ vroundpd(Assembler::SSERoundingMode::Trunc, Operand(src), dest);
+}
+
+void MacroAssembler::nearestFloat32x4(FloatRegister src, FloatRegister dest) {
+ vroundps(Assembler::SSERoundingMode::Nearest, Operand(src), dest);
+}
+
+void MacroAssembler::nearestFloat64x2(FloatRegister src, FloatRegister dest) {
+ vroundpd(Assembler::SSERoundingMode::Nearest, Operand(src), dest);
+}
+
+// Floating add
+
+void MacroAssembler::addFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vaddps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vaddps,
+ &MacroAssembler::vaddpsSimd128);
+}
+
+void MacroAssembler::addFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vaddpd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::addFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vaddpd,
+ &MacroAssembler::vaddpdSimd128);
+}
+
+// Floating subtract
+
+void MacroAssembler::subFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vsubps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vsubps,
+ &MacroAssembler::vsubpsSimd128);
+}
+
+void MacroAssembler::subFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ AssemblerX86Shared::vsubpd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::subFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vsubpd,
+ &MacroAssembler::vsubpdSimd128);
+}
+
+// Floating division
+
+void MacroAssembler::divFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vdivps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::divFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vdivps,
+ &MacroAssembler::vdivpsSimd128);
+}
+
+void MacroAssembler::divFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vdivpd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::divFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vdivpd,
+ &MacroAssembler::vdivpdSimd128);
+}
+
+// Floating Multiply
+
+void MacroAssembler::mulFloat32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmulps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::mulFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vmulps,
+ &MacroAssembler::vmulpsSimd128);
+}
+
+void MacroAssembler::mulFloat64x2(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmulpd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::mulFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vmulpd,
+ &MacroAssembler::vmulpdSimd128);
+}
+
+// Pairwise add
+
+void MacroAssembler::extAddPairwiseInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ if (dest == src) {
+ moveSimd128(src, scratch);
+ src = scratch;
+ }
+ loadConstantSimd128Int(SimdConstant::SplatX16(1), dest);
+ vpmaddubsw(src, dest, dest);
+}
+
+void MacroAssembler::unsignedExtAddPairwiseInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpmaddubswSimd128(SimdConstant::SplatX16(1), src, dest);
+}
+
+void MacroAssembler::extAddPairwiseInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpmaddwdSimd128(SimdConstant::SplatX8(1), src, dest);
+}
+
+void MacroAssembler::unsignedExtAddPairwiseInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpxorSimd128(SimdConstant::SplatX8(-0x8000), src, dest);
+ vpmaddwdSimd128(SimdConstant::SplatX8(1), dest, dest);
+ vpadddSimd128(SimdConstant::SplatX4(0x00010000), dest, dest);
+}
+
+// Floating square root
+
+void MacroAssembler::sqrtFloat32x4(FloatRegister src, FloatRegister dest) {
+ vsqrtps(Operand(src), dest);
+}
+
+void MacroAssembler::sqrtFloat64x2(FloatRegister src, FloatRegister dest) {
+ vsqrtpd(Operand(src), dest);
+}
+
+// Integer to floating point with rounding
+
+void MacroAssembler::convertInt32x4ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ vcvtdq2ps(src, dest);
+}
+
+void MacroAssembler::unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::unsignedConvertInt32x4ToFloat32x4(src, dest);
+}
+
+void MacroAssembler::convertInt32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ vcvtdq2pd(src, dest);
+}
+
+void MacroAssembler::unsignedConvertInt32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::unsignedConvertInt32x4ToFloat64x2(src, dest);
+}
+
+// Floating point to integer with saturation
+
+void MacroAssembler::truncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ MacroAssemblerX86Shared::truncSatFloat32x4ToInt32x4(src, dest);
+}
+
+void MacroAssembler::unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::unsignedTruncSatFloat32x4ToInt32x4(src, temp, dest);
+}
+
+void MacroAssembler::truncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::truncSatFloat64x2ToInt32x4(src, temp, dest);
+}
+
+void MacroAssembler::unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister temp) {
+ MacroAssemblerX86Shared::unsignedTruncSatFloat64x2ToInt32x4(src, temp, dest);
+}
+
+void MacroAssembler::truncFloat32x4ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest) {
+ vcvttps2dq(src, dest);
+}
+
+void MacroAssembler::unsignedTruncFloat32x4ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ MacroAssemblerX86Shared::unsignedTruncFloat32x4ToInt32x4Relaxed(src, dest);
+}
+
+void MacroAssembler::truncFloat64x2ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest) {
+ vcvttpd2dq(src, dest);
+}
+
+void MacroAssembler::unsignedTruncFloat64x2ToInt32x4Relaxed(
+ FloatRegister src, FloatRegister dest) {
+ MacroAssemblerX86Shared::unsignedTruncFloat64x2ToInt32x4Relaxed(src, dest);
+}
+
+// Floating point widening
+
+void MacroAssembler::convertFloat64x2ToFloat32x4(FloatRegister src,
+ FloatRegister dest) {
+ vcvtpd2ps(src, dest);
+}
+
+void MacroAssembler::convertFloat32x4ToFloat64x2(FloatRegister src,
+ FloatRegister dest) {
+ vcvtps2pd(src, dest);
+}
+
+// Integer to integer narrowing
+
+void MacroAssembler::narrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpacksswb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::narrowInt16x8(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpacksswb,
+ &MacroAssembler::vpacksswbSimd128);
+}
+
+void MacroAssembler::unsignedNarrowInt16x8(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpackuswb(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedNarrowInt16x8(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpackuswb,
+ &MacroAssembler::vpackuswbSimd128);
+}
+
+void MacroAssembler::narrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpackssdw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::narrowInt32x4(FloatRegister lhs, const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpackssdw,
+ &MacroAssembler::vpackssdwSimd128);
+}
+
+void MacroAssembler::unsignedNarrowInt32x4(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vpackusdw(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::unsignedNarrowInt32x4(FloatRegister lhs,
+ const SimdConstant& rhs,
+ FloatRegister dest) {
+ binarySimd128(lhs, rhs, dest, &MacroAssembler::vpackusdw,
+ &MacroAssembler::vpackusdwSimd128);
+}
+
+// Integer to integer widening
+
+void MacroAssembler::widenLowInt8x16(FloatRegister src, FloatRegister dest) {
+ vpmovsxbw(Operand(src), dest);
+}
+
+void MacroAssembler::widenHighInt8x16(FloatRegister src, FloatRegister dest) {
+ vpalignr(Operand(src), dest, dest, 8);
+ vpmovsxbw(Operand(dest), dest);
+}
+
+void MacroAssembler::unsignedWidenLowInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ vpmovzxbw(Operand(src), dest);
+}
+
+void MacroAssembler::unsignedWidenHighInt8x16(FloatRegister src,
+ FloatRegister dest) {
+ vpalignr(Operand(src), dest, dest, 8);
+ vpmovzxbw(Operand(dest), dest);
+}
+
+void MacroAssembler::widenLowInt16x8(FloatRegister src, FloatRegister dest) {
+ vpmovsxwd(Operand(src), dest);
+}
+
+void MacroAssembler::widenHighInt16x8(FloatRegister src, FloatRegister dest) {
+ vpalignr(Operand(src), dest, dest, 8);
+ vpmovsxwd(Operand(dest), dest);
+}
+
+void MacroAssembler::unsignedWidenLowInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ vpmovzxwd(Operand(src), dest);
+}
+
+void MacroAssembler::unsignedWidenHighInt16x8(FloatRegister src,
+ FloatRegister dest) {
+ vpalignr(Operand(src), dest, dest, 8);
+ vpmovzxwd(Operand(dest), dest);
+}
+
+void MacroAssembler::widenLowInt32x4(FloatRegister src, FloatRegister dest) {
+ vpmovsxdq(Operand(src), dest);
+}
+
+void MacroAssembler::unsignedWidenLowInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ vpmovzxdq(Operand(src), dest);
+}
+
+void MacroAssembler::widenHighInt32x4(FloatRegister src, FloatRegister dest) {
+ if (src == dest || HasAVX()) {
+ vmovhlps(src, src, dest);
+ } else {
+ vpshufd(ComputeShuffleMask(2, 3, 2, 3), src, dest);
+ }
+ vpmovsxdq(Operand(dest), dest);
+}
+
+void MacroAssembler::unsignedWidenHighInt32x4(FloatRegister src,
+ FloatRegister dest) {
+ ScratchSimd128Scope scratch(*this);
+ src = moveSimd128IntIfNotAVX(src, dest);
+ vpxor(scratch, scratch, scratch);
+ vpunpckhdq(scratch, src, dest);
+}
+
+// Floating multiply-accumulate: srcDest [+-]= src1 * src2
+// The Intel FMA feature is some AVX* special sauce, no support yet.
+
+void MacroAssembler::fmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ if (HasFMA()) {
+ vfmadd231ps(src2, src1, srcDest);
+ return;
+ }
+ ScratchSimd128Scope scratch(*this);
+ src1 = moveSimd128FloatIfNotAVX(src1, scratch);
+ mulFloat32x4(src1, src2, scratch);
+ addFloat32x4(srcDest, scratch, srcDest);
+}
+
+void MacroAssembler::fnmaFloat32x4(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ if (HasFMA()) {
+ vfnmadd231ps(src2, src1, srcDest);
+ return;
+ }
+ ScratchSimd128Scope scratch(*this);
+ src1 = moveSimd128FloatIfNotAVX(src1, scratch);
+ mulFloat32x4(src1, src2, scratch);
+ subFloat32x4(srcDest, scratch, srcDest);
+}
+
+void MacroAssembler::fmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ if (HasFMA()) {
+ vfmadd231pd(src2, src1, srcDest);
+ return;
+ }
+ ScratchSimd128Scope scratch(*this);
+ src1 = moveSimd128FloatIfNotAVX(src1, scratch);
+ mulFloat64x2(src1, src2, scratch);
+ addFloat64x2(srcDest, scratch, srcDest);
+}
+
+void MacroAssembler::fnmaFloat64x2(FloatRegister src1, FloatRegister src2,
+ FloatRegister srcDest) {
+ if (HasFMA()) {
+ vfnmadd231pd(src2, src1, srcDest);
+ return;
+ }
+ ScratchSimd128Scope scratch(*this);
+ src1 = moveSimd128FloatIfNotAVX(src1, scratch);
+ mulFloat64x2(src1, src2, scratch);
+ subFloat64x2(srcDest, scratch, srcDest);
+}
+
+void MacroAssembler::minFloat32x4Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ vminps(Operand(src), srcDest, srcDest);
+}
+
+void MacroAssembler::minFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vminps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::maxFloat32x4Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ vmaxps(Operand(src), srcDest, srcDest);
+}
+
+void MacroAssembler::maxFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmaxps(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::minFloat64x2Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ vminpd(Operand(src), srcDest, srcDest);
+}
+
+void MacroAssembler::minFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vminpd(Operand(rhs), lhs, dest);
+}
+
+void MacroAssembler::maxFloat64x2Relaxed(FloatRegister src,
+ FloatRegister srcDest) {
+ vmaxpd(Operand(src), srcDest, srcDest);
+}
+
+void MacroAssembler::maxFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest) {
+ vmaxpd(Operand(rhs), lhs, dest);
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void MacroAssembler::truncateFloat32ToInt64(Address src, Address dest,
+ Register temp) {
+ if (Assembler::HasSSE3()) {
+ fld32(Operand(src));
+ fisttp(Operand(dest));
+ return;
+ }
+
+ if (src.base == esp) {
+ src.offset += 2 * sizeof(int32_t);
+ }
+ if (dest.base == esp) {
+ dest.offset += 2 * sizeof(int32_t);
+ }
+
+ reserveStack(2 * sizeof(int32_t));
+
+ // Set conversion to truncation.
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ andl(Imm32(~0xFF00), temp);
+ orl(Imm32(0xCFF), temp);
+ store32(temp, Address(esp, sizeof(int32_t)));
+ fldcw(Operand(esp, sizeof(int32_t)));
+
+ // Load double on fp stack, convert and load regular stack.
+ fld32(Operand(src));
+ fistp(Operand(dest));
+
+ // Reset the conversion flag.
+ fldcw(Operand(esp, 0));
+
+ freeStack(2 * sizeof(int32_t));
+}
+void MacroAssembler::truncateDoubleToInt64(Address src, Address dest,
+ Register temp) {
+ if (Assembler::HasSSE3()) {
+ fld(Operand(src));
+ fisttp(Operand(dest));
+ return;
+ }
+
+ if (src.base == esp) {
+ src.offset += 2 * sizeof(int32_t);
+ }
+ if (dest.base == esp) {
+ dest.offset += 2 * sizeof(int32_t);
+ }
+
+ reserveStack(2 * sizeof(int32_t));
+
+ // Set conversion to truncation.
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ andl(Imm32(~0xFF00), temp);
+ orl(Imm32(0xCFF), temp);
+ store32(temp, Address(esp, 1 * sizeof(int32_t)));
+ fldcw(Operand(esp, 1 * sizeof(int32_t)));
+
+ // Load double on fp stack, convert and load regular stack.
+ fld(Operand(src));
+ fistp(Operand(dest));
+
+ // Reset the conversion flag.
+ fldcw(Operand(esp, 0));
+
+ freeStack(2 * sizeof(int32_t));
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ Label inRange;
+ branchTest32(Assembler::Zero, reg, Imm32(0xffffff00), &inRange);
+ {
+ sarl(Imm32(31), reg);
+ notl(reg);
+ andl(Imm32(255), reg);
+ }
+ bind(&inRange);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_MacroAssembler_x86_shared_inl_h */
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
new file mode 100644
index 0000000000..185c555be0
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -0,0 +1,2132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+#include "mozilla/Casting.h"
+
+#include "jsmath.h"
+
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Note: this function clobbers the input register.
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ ScratchDoubleScope scratch(*this);
+ MOZ_ASSERT(input != scratch);
+ Label positive, done;
+
+ // <= 0 or NaN --> 0
+ zeroDouble(scratch);
+ branchDouble(DoubleGreaterThan, input, scratch, &positive);
+ {
+ move32(Imm32(0), output);
+ jump(&done);
+ }
+
+ bind(&positive);
+
+ // Add 0.5 and truncate.
+ loadConstantDouble(0.5, scratch);
+ addDouble(scratch, input);
+
+ Label outOfRange;
+
+ // Truncate to int32 and ensure the result <= 255. This relies on the
+ // processor setting output to a value > 255 for doubles outside the int32
+ // range (for instance 0x80000000).
+ vcvttsd2si(input, output);
+ branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+ {
+ // Check if we had a tie.
+ convertInt32ToDouble(output, scratch);
+ branchDouble(DoubleNotEqual, input, scratch, &done);
+
+ // It was a tie. Mask out the ones bit to get an even value.
+ // See also js_TypedArray_uint8_clamp_double.
+ and32(Imm32(~1), output);
+ jump(&done);
+ }
+
+ // > 255 --> 255
+ bind(&outOfRange);
+ { move32(Imm32(255), output); }
+
+ bind(&done);
+}
+
+bool MacroAssemblerX86Shared::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS);
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerX86Shared::branchNegativeZero(FloatRegister reg,
+ Register scratch, Label* label,
+ bool maybeNonZero) {
+ // Determines whether the low double contained in the XMM register reg
+ // is equal to -0.0.
+
+#if defined(JS_CODEGEN_X86)
+ Label nonZero;
+
+ // if not already compared to zero
+ if (maybeNonZero) {
+ ScratchDoubleScope scratchDouble(asMasm());
+
+ // Compare to zero. Lets through {0, -0}.
+ zeroDouble(scratchDouble);
+
+ // If reg is non-zero, jump to nonZero.
+ asMasm().branchDouble(DoubleNotEqual, reg, scratchDouble, &nonZero);
+ }
+ // Input register is either zero or negative zero. Retrieve sign of input.
+ vmovmskpd(reg, scratch);
+
+ // If reg is 1 or 3, input is negative zero.
+ // If reg is 0 or 2, input is a normal zero.
+ asMasm().branchTest32(NonZero, scratch, Imm32(1), label);
+
+ bind(&nonZero);
+#elif defined(JS_CODEGEN_X64)
+ vmovq(reg, scratch);
+ cmpq(Imm32(1), scratch);
+ j(Overflow, label);
+#endif
+}
+
+void MacroAssemblerX86Shared::branchNegativeZeroFloat32(FloatRegister reg,
+ Register scratch,
+ Label* label) {
+ vmovd(reg, scratch);
+ cmp32(scratch, Imm32(1));
+ j(Overflow, label);
+}
+
+MacroAssembler& MacroAssemblerX86Shared::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerX86Shared::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+template <class T, class Map>
+T* MacroAssemblerX86Shared::getConstant(const typename T::Pod& value, Map& map,
+ Vector<T, 0, SystemAllocPolicy>& vec) {
+ using AddPtr = typename Map::AddPtr;
+ size_t index;
+ if (AddPtr p = map.lookupForAdd(value)) {
+ index = p->value();
+ } else {
+ index = vec.length();
+ enoughMemory_ &= vec.append(T(value));
+ if (!enoughMemory_) {
+ return nullptr;
+ }
+ enoughMemory_ &= map.add(p, value, index);
+ if (!enoughMemory_) {
+ return nullptr;
+ }
+ }
+ return &vec[index];
+}
+
+MacroAssemblerX86Shared::Float* MacroAssemblerX86Shared::getFloat(float f) {
+ return getConstant<Float, FloatMap>(f, floatMap_, floats_);
+}
+
+MacroAssemblerX86Shared::Double* MacroAssemblerX86Shared::getDouble(double d) {
+ return getConstant<Double, DoubleMap>(d, doubleMap_, doubles_);
+}
+
+MacroAssemblerX86Shared::SimdData* MacroAssemblerX86Shared::getSimdData(
+ const SimdConstant& v) {
+ return getConstant<SimdData, SimdMap>(v, simdMap_, simds_);
+}
+
+void MacroAssemblerX86Shared::binarySimd128(
+ const SimdConstant& rhs, FloatRegister lhsDest,
+ void (MacroAssembler::*regOp)(const Operand&, FloatRegister, FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&, FloatRegister)) {
+ ScratchSimd128Scope scratch(asMasm());
+ if (maybeInlineSimd128Int(rhs, scratch)) {
+ (asMasm().*regOp)(Operand(scratch), lhsDest, lhsDest);
+ } else {
+ (asMasm().*constOp)(rhs, lhsDest);
+ }
+}
+
+void MacroAssemblerX86Shared::binarySimd128(
+ FloatRegister lhs, const SimdConstant& rhs, FloatRegister dest,
+ void (MacroAssembler::*regOp)(const Operand&, FloatRegister, FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&, FloatRegister,
+ FloatRegister)) {
+ ScratchSimd128Scope scratch(asMasm());
+ if (maybeInlineSimd128Int(rhs, scratch)) {
+ (asMasm().*regOp)(Operand(scratch), lhs, dest);
+ } else {
+ (asMasm().*constOp)(rhs, lhs, dest);
+ }
+}
+
+void MacroAssemblerX86Shared::binarySimd128(
+ const SimdConstant& rhs, FloatRegister lhs,
+ void (MacroAssembler::*regOp)(const Operand&, FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&, FloatRegister)) {
+ ScratchSimd128Scope scratch(asMasm());
+ if (maybeInlineSimd128Int(rhs, scratch)) {
+ (asMasm().*regOp)(Operand(scratch), lhs);
+ } else {
+ (asMasm().*constOp)(rhs, lhs);
+ }
+}
+
+void MacroAssemblerX86Shared::bitwiseTestSimd128(const SimdConstant& rhs,
+ FloatRegister lhs) {
+ ScratchSimd128Scope scratch(asMasm());
+ if (maybeInlineSimd128Int(rhs, scratch)) {
+ vptest(scratch, lhs);
+ } else {
+ asMasm().vptestSimd128(rhs, lhs);
+ }
+}
+
+void MacroAssemblerX86Shared::minMaxDouble(FloatRegister first,
+ FloatRegister second, bool canBeNaN,
+ bool isMax) {
+ Label done, nan, minMaxInst;
+
+ // Do a vucomisd to catch equality and NaNs, which both require special
+ // handling. If the operands are ordered and inequal, we branch straight to
+ // the min/max instruction. If we wanted, we could also branch for less-than
+ // or greater-than here instead of using min/max, however these conditions
+ // will sometimes be hard on the branch predictor.
+ vucomisd(second, first);
+ j(Assembler::NotEqual, &minMaxInst);
+ if (canBeNaN) {
+ j(Assembler::Parity, &nan);
+ }
+
+ // Ordered and equal. The operands are bit-identical unless they are zero
+ // and negative zero. These instructions merge the sign bits in that
+ // case, and are no-ops otherwise.
+ if (isMax) {
+ vandpd(second, first, first);
+ } else {
+ vorpd(second, first, first);
+ }
+ jump(&done);
+
+ // x86's min/max are not symmetric; if either operand is a NaN, they return
+ // the read-only operand. We need to return a NaN if either operand is a
+ // NaN, so we explicitly check for a NaN in the read-write operand.
+ if (canBeNaN) {
+ bind(&nan);
+ vucomisd(first, first);
+ j(Assembler::Parity, &done);
+ }
+
+ // When the values are inequal, or second is NaN, x86's min and max will
+ // return the value we need.
+ bind(&minMaxInst);
+ if (isMax) {
+ vmaxsd(second, first, first);
+ } else {
+ vminsd(second, first, first);
+ }
+
+ bind(&done);
+}
+
+void MacroAssemblerX86Shared::minMaxFloat32(FloatRegister first,
+ FloatRegister second, bool canBeNaN,
+ bool isMax) {
+ Label done, nan, minMaxInst;
+
+ // Do a vucomiss to catch equality and NaNs, which both require special
+ // handling. If the operands are ordered and inequal, we branch straight to
+ // the min/max instruction. If we wanted, we could also branch for less-than
+ // or greater-than here instead of using min/max, however these conditions
+ // will sometimes be hard on the branch predictor.
+ vucomiss(second, first);
+ j(Assembler::NotEqual, &minMaxInst);
+ if (canBeNaN) {
+ j(Assembler::Parity, &nan);
+ }
+
+ // Ordered and equal. The operands are bit-identical unless they are zero
+ // and negative zero. These instructions merge the sign bits in that
+ // case, and are no-ops otherwise.
+ if (isMax) {
+ vandps(second, first, first);
+ } else {
+ vorps(second, first, first);
+ }
+ jump(&done);
+
+ // x86's min/max are not symmetric; if either operand is a NaN, they return
+ // the read-only operand. We need to return a NaN if either operand is a
+ // NaN, so we explicitly check for a NaN in the read-write operand.
+ if (canBeNaN) {
+ bind(&nan);
+ vucomiss(first, first);
+ j(Assembler::Parity, &done);
+ }
+
+ // When the values are inequal, or second is NaN, x86's min and max will
+ // return the value we need.
+ bind(&minMaxInst);
+ if (isMax) {
+ vmaxss(second, first, first);
+ } else {
+ vminss(second, first, first);
+ }
+
+ bind(&done);
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MacroAssembler::MustMaskShiftCountSimd128(wasm::SimdOp op, int32_t* mask) {
+ switch (op) {
+ case wasm::SimdOp::I8x16Shl:
+ case wasm::SimdOp::I8x16ShrU:
+ case wasm::SimdOp::I8x16ShrS:
+ *mask = 7;
+ break;
+ case wasm::SimdOp::I16x8Shl:
+ case wasm::SimdOp::I16x8ShrU:
+ case wasm::SimdOp::I16x8ShrS:
+ *mask = 15;
+ break;
+ case wasm::SimdOp::I32x4Shl:
+ case wasm::SimdOp::I32x4ShrU:
+ case wasm::SimdOp::I32x4ShrS:
+ *mask = 31;
+ break;
+ case wasm::SimdOp::I64x2Shl:
+ case wasm::SimdOp::I64x2ShrU:
+ case wasm::SimdOp::I64x2ShrS:
+ *mask = 63;
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift operation");
+ }
+ return true;
+}
+#endif
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() {}
+
+void MacroAssembler::comment(const char* msg) { masm.comment(msg); }
+
+// This operation really consists of five phases, in order to enforce the
+// restriction that on x86_shared, srcDest must be eax and edx will be
+// clobbered.
+//
+// Input: { rhs, lhsOutput }
+//
+// [PUSH] Preserve registers
+// [MOVE] Generate moves to specific registers
+//
+// [DIV] Input: { regForRhs, EAX }
+// [DIV] extend EAX into EDX
+// [DIV] x86 Division operator
+// [DIV] Ouptut: { EAX, EDX }
+//
+// [MOVE] Move specific registers to outputs
+// [POP] Restore registers
+//
+// Output: { lhsOutput, remainderOutput }
+void MacroAssembler::flexibleDivMod32(Register rhs, Register lhsOutput,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ // Currently this helper can't handle this situation.
+ MOZ_ASSERT(lhsOutput != rhs);
+ MOZ_ASSERT(lhsOutput != remOutput);
+
+ // Choose a register that is not edx, or eax to hold the rhs;
+ // ebx is chosen arbitrarily, and will be preserved if necessary.
+ Register regForRhs = (rhs == eax || rhs == edx) ? ebx : rhs;
+
+ // Add registers we will be clobbering as live, but
+ // also remove the set we do not restore.
+ LiveRegisterSet preserve;
+ preserve.add(edx);
+ preserve.add(eax);
+ preserve.add(regForRhs);
+
+ preserve.takeUnchecked(lhsOutput);
+ preserve.takeUnchecked(remOutput);
+
+ PushRegsInMask(preserve);
+
+ // Shuffle input into place.
+ moveRegPair(lhsOutput, rhs, eax, regForRhs);
+ if (oom()) {
+ return;
+ }
+
+ // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
+ if (isUnsigned) {
+ mov(ImmWord(0), edx);
+ udiv(regForRhs);
+ } else {
+ cdq();
+ idiv(regForRhs);
+ }
+
+ moveRegPair(eax, edx, lhsOutput, remOutput);
+ if (oom()) {
+ return;
+ }
+
+ PopRegsInMask(preserve);
+}
+
+void MacroAssembler::flexibleQuotient32(
+ Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ // Choose an arbitrary register that isn't eax, edx, rhs or srcDest;
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(eax);
+ regs.takeUnchecked(edx);
+ regs.takeUnchecked(rhs);
+ regs.takeUnchecked(srcDest);
+
+ Register remOut = regs.takeAny();
+ push(remOut);
+ flexibleDivMod32(rhs, srcDest, remOut, isUnsigned, volatileLiveRegs);
+ pop(remOut);
+}
+
+void MacroAssembler::flexibleRemainder32(
+ Register rhs, Register srcDest, bool isUnsigned,
+ const LiveRegisterSet& volatileLiveRegs) {
+ // Choose an arbitrary register that isn't eax, edx, rhs or srcDest
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(eax);
+ regs.takeUnchecked(edx);
+ regs.takeUnchecked(rhs);
+ regs.takeUnchecked(srcDest);
+
+ Register remOut = regs.takeAny();
+ push(remOut);
+ flexibleDivMod32(rhs, srcDest, remOut, isUnsigned, volatileLiveRegs);
+ mov(remOut, srcDest);
+ pop(remOut);
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ return set.gprs().size() * sizeof(intptr_t) + fpuSet.getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ mozilla::DebugOnly<size_t> framePushedInitial = framePushed();
+
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ // On x86, always use push to push the integer registers, as it's fast
+ // on modern hardware and it's a small instruction.
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ Push(*iter);
+ }
+ MOZ_ASSERT(diffG == 0);
+ (void)diffG;
+
+ reserveStack(diffF);
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ Address spillAddress(StackPointer, diffF);
+ if (reg.isDouble()) {
+ storeDouble(reg, spillAddress);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, spillAddress);
+ } else if (reg.isSimd128()) {
+ storeUnalignedSimd128(reg, spillAddress);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ (void)numFpu;
+
+ // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
+ // GetPushSizeInBytes.
+ size_t alignExtra = ((size_t)diffF) % sizeof(uintptr_t);
+ MOZ_ASSERT_IF(sizeof(uintptr_t) == 8, alignExtra == 0 || alignExtra == 4);
+ MOZ_ASSERT_IF(sizeof(uintptr_t) == 4, alignExtra == 0);
+ diffF -= alignExtra;
+ MOZ_ASSERT(diffF == 0);
+
+ // The macroassembler will keep the stack sizeof(uintptr_t)-aligned, so
+ // we don't need to take into account `alignExtra` here.
+ MOZ_ASSERT(framePushed() - framePushedInitial ==
+ PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ mozilla::DebugOnly<size_t> offsetInitial = dest.offset;
+
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+ (void)diffG;
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else if (reg.isSimd128()) {
+ storeUnalignedSimd128(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ (void)numFpu;
+
+ // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
+ // GetPushSizeInBytes.
+ size_t alignExtra = ((size_t)diffF) % sizeof(uintptr_t);
+ MOZ_ASSERT_IF(sizeof(uintptr_t) == 8, alignExtra == 0 || alignExtra == 4);
+ MOZ_ASSERT_IF(sizeof(uintptr_t) == 4, alignExtra == 0);
+ diffF -= alignExtra;
+ MOZ_ASSERT(diffF == 0);
+
+ // What this means is: if `alignExtra` is nonzero, then the save area size
+ // actually used is `alignExtra` bytes smaller than what
+ // PushRegsInMaskSizeInBytes claims. Hence we need to compensate for that.
+ MOZ_ASSERT(alignExtra + offsetInitial - dest.offset ==
+ PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ mozilla::DebugOnly<size_t> framePushedInitial = framePushed();
+
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ if (ignore.has(reg)) {
+ continue;
+ }
+
+ Address spillAddress(StackPointer, diffF);
+ if (reg.isDouble()) {
+ loadDouble(spillAddress, reg);
+ } else if (reg.isSingle()) {
+ loadFloat32(spillAddress, reg);
+ } else if (reg.isSimd128()) {
+ loadUnalignedSimd128(spillAddress, reg);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ freeStack(reservedF);
+ MOZ_ASSERT(numFpu == 0);
+ (void)numFpu;
+ // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
+ // GetPushBytesInSize.
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+
+ // On x86, use pop to pop the integer registers, if we're not going to
+ // ignore any slots, as it's fast on modern hardware and it's a small
+ // instruction.
+ if (ignore.emptyGeneral()) {
+ for (GeneralRegisterForwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ Pop(*iter);
+ }
+ } else {
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();
+ ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ }
+ freeStack(reservedG);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ MOZ_ASSERT(framePushedInitial - framePushed() ==
+ PushRegsInMaskSizeInBytes(set));
+}
+
+void MacroAssembler::Push(const Operand op) {
+ push(op);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(Register reg) {
+ push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ push(imm);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ push(ptr);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Push(FloatRegister t) {
+ push(t);
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::PushFlags() {
+ pushFlags();
+ adjustFrame(sizeof(intptr_t));
+}
+
+void MacroAssembler::Pop(const Operand op) {
+ pop(op);
+ implicitPop(sizeof(intptr_t));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ pop(reg);
+ implicitPop(sizeof(intptr_t));
+}
+
+void MacroAssembler::Pop(FloatRegister reg) {
+ pop(reg);
+ implicitPop(sizeof(double));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ implicitPop(sizeof(Value));
+}
+
+void MacroAssembler::PopFlags() {
+ popFlags();
+ implicitPop(sizeof(intptr_t));
+}
+
+void MacroAssembler::PopStackPtr() { Pop(StackPointer); }
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) { return Assembler::call(reg); }
+
+CodeOffset MacroAssembler::call(Label* label) { return Assembler::call(label); }
+
+void MacroAssembler::call(const Address& addr) {
+ Assembler::call(Operand(addr.base, addr.offset));
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ mov(target, eax);
+ return Assembler::call(eax);
+}
+
+void MacroAssembler::call(ImmWord target) { Assembler::call(target); }
+
+void MacroAssembler::call(ImmPtr target) { Assembler::call(target); }
+
+void MacroAssembler::call(JitCode* target) { Assembler::call(target); }
+
+CodeOffset MacroAssembler::callWithPatch() {
+ return Assembler::callWithPatch();
+}
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ Assembler::patchCall(callerOffset, calleeOffset);
+}
+
+void MacroAssembler::callAndPushReturnAddress(Register reg) { call(reg); }
+
+void MacroAssembler::callAndPushReturnAddress(Label* label) { call(label); }
+
+// ===============================================================
+// Patchable near/far jumps.
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ return Assembler::farJumpWithPatch();
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ Assembler::patchFarJump(farJump, targetOffset);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ masm.nop_five();
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* callsite, uint8_t* target) {
+ Assembler::patchFiveByteNopToCall(callsite, target);
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* callsite) {
+ Assembler::patchCallToFiveByteNop(callsite);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ mov(&cl, scratch);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+// ===============================================================
+// WebAssembly
+
+CodeOffset MacroAssembler::wasmTrapInstruction() { return ud2(); }
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ cmp32(index, boundsCheckLimit);
+ j(cond, ok);
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCl(cond, Operand(boundsCheckLimit), index);
+ }
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ cmp32(index, Operand(boundsCheckLimit));
+ j(cond, ok);
+ if (JitOptions.spectreIndexMasking) {
+ cmovCCl(cond, Operand(boundsCheckLimit), index);
+ }
+}
+
+// RAII class that generates the jumps to traps when it's destructed, to
+// prevent some code duplication in the outOfLineWasmTruncateXtoY methods.
+struct MOZ_RAII AutoHandleWasmTruncateToIntErrors {
+ MacroAssembler& masm;
+ Label inputIsNaN;
+ Label intOverflow;
+ wasm::BytecodeOffset off;
+
+ explicit AutoHandleWasmTruncateToIntErrors(MacroAssembler& masm,
+ wasm::BytecodeOffset off)
+ : masm(masm), off(off) {}
+
+ ~AutoHandleWasmTruncateToIntErrors() {
+ // Handle errors. These cases are not in arbitrary order: code will
+ // fall through to intOverflow.
+ masm.bind(&intOverflow);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, off);
+
+ masm.bind(&inputIsNaN);
+ masm.wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ }
+};
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ vcvttsd2si(input, output);
+ cmp32(output, Imm32(1));
+ j(Assembler::Overflow, oolEntry);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ vcvttss2si(input, output);
+ cmp32(output, Imm32(1));
+ j(Assembler::Overflow, oolEntry);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (isUnsigned) {
+ // Negative overflow and NaN both are converted to 0, and the only
+ // other case is positive overflow which is converted to
+ // UINT32_MAX.
+ Label nonNegative;
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &nonNegative);
+ move32(Imm32(0), output);
+ jump(rejoin);
+
+ bind(&nonNegative);
+ move32(Imm32(UINT32_MAX), output);
+ } else {
+ // Negative overflow is already saturated to INT32_MIN, so we only
+ // have to handle NaN and positive overflow here.
+ Label notNaN;
+ branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
+ move32(Imm32(0), output);
+ jump(rejoin);
+
+ bind(&notNaN);
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleLessThan, input, fpscratch, rejoin);
+ sub32(Imm32(1), output);
+ }
+ jump(rejoin);
+ return;
+ }
+
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // For unsigned, fall through to intOverflow failure case.
+ if (isUnsigned) {
+ return;
+ }
+
+ // Handle special values.
+
+ // We've used vcvttsd2si. The only valid double values that can
+ // truncate to INT32_MIN are in ]INT32_MIN - 1; INT32_MIN].
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(double(INT32_MIN) - 1.0, fpscratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, input, fpscratch,
+ &traps.intOverflow);
+
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch,
+ &traps.intOverflow);
+ jump(rejoin);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (isUnsigned) {
+ // Negative overflow and NaN both are converted to 0, and the only
+ // other case is positive overflow which is converted to
+ // UINT32_MAX.
+ Label nonNegative;
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(0.0f, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, input, fpscratch,
+ &nonNegative);
+ move32(Imm32(0), output);
+ jump(rejoin);
+
+ bind(&nonNegative);
+ move32(Imm32(UINT32_MAX), output);
+ } else {
+ // Negative overflow is already saturated to INT32_MIN, so we only
+ // have to handle NaN and positive overflow here.
+ Label notNaN;
+ branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
+ move32(Imm32(0), output);
+ jump(rejoin);
+
+ bind(&notNaN);
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(0.0f, fpscratch);
+ branchFloat(Assembler::DoubleLessThan, input, fpscratch, rejoin);
+ sub32(Imm32(1), output);
+ }
+ jump(rejoin);
+ return;
+ }
+
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // For unsigned, fall through to intOverflow failure case.
+ if (isUnsigned) {
+ return;
+ }
+
+ // Handle special values.
+
+ // We've used vcvttss2si. Check that the input wasn't
+ // float(INT32_MIN), which is the only legimitate input that
+ // would truncate to INT32_MIN.
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(float(INT32_MIN), fpscratch);
+ branchFloat(Assembler::DoubleNotEqual, input, fpscratch, &traps.intOverflow);
+ jump(rejoin);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (isUnsigned) {
+ // Negative overflow and NaN both are converted to 0, and the only
+ // other case is positive overflow which is converted to
+ // UINT64_MAX.
+ Label positive;
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch, &positive);
+ move64(Imm64(0), output);
+ jump(rejoin);
+
+ bind(&positive);
+ move64(Imm64(UINT64_MAX), output);
+ } else {
+ // Negative overflow is already saturated to INT64_MIN, so we only
+ // have to handle NaN and positive overflow here.
+ Label notNaN;
+ branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
+ move64(Imm64(0), output);
+ jump(rejoin);
+
+ bind(&notNaN);
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleLessThan, input, fpscratch, rejoin);
+ sub64(Imm64(1), output);
+ }
+ jump(rejoin);
+ return;
+ }
+
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values.
+ if (isUnsigned) {
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ branchDouble(Assembler::DoubleGreaterThan, input, fpscratch,
+ &traps.intOverflow);
+ loadConstantDouble(-1.0, fpscratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, input, fpscratch,
+ &traps.intOverflow);
+ jump(rejoin);
+ return;
+ }
+
+ // We've used vcvtsd2sq. The only legit value whose i64
+ // truncation is INT64_MIN is double(INT64_MIN): exponent is so
+ // high that the highest resolution around is much more than 1.
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(double(int64_t(INT64_MIN)), fpscratch);
+ branchDouble(Assembler::DoubleNotEqual, input, fpscratch, &traps.intOverflow);
+ jump(rejoin);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (isUnsigned) {
+ // Negative overflow and NaN both are converted to 0, and the only
+ // other case is positive overflow which is converted to
+ // UINT64_MAX.
+ Label positive;
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(0.0f, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThan, input, fpscratch, &positive);
+ move64(Imm64(0), output);
+ jump(rejoin);
+
+ bind(&positive);
+ move64(Imm64(UINT64_MAX), output);
+ } else {
+ // Negative overflow is already saturated to INT64_MIN, so we only
+ // have to handle NaN and positive overflow here.
+ Label notNaN;
+ branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
+ move64(Imm64(0), output);
+ jump(rejoin);
+
+ bind(&notNaN);
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(0.0f, fpscratch);
+ branchFloat(Assembler::DoubleLessThan, input, fpscratch, rejoin);
+ sub64(Imm64(1), output);
+ }
+ jump(rejoin);
+ return;
+ }
+
+ AutoHandleWasmTruncateToIntErrors traps(*this, off);
+
+ // Eagerly take care of NaNs.
+ branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
+
+ // Handle special values.
+ if (isUnsigned) {
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(0.0f, fpscratch);
+ branchFloat(Assembler::DoubleGreaterThan, input, fpscratch,
+ &traps.intOverflow);
+ loadConstantFloat32(-1.0f, fpscratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, input, fpscratch,
+ &traps.intOverflow);
+ jump(rejoin);
+ return;
+ }
+
+ // We've used vcvtss2sq. See comment in outOfLineWasmTruncateDoubleToInt64.
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(float(int64_t(INT64_MIN)), fpscratch);
+ branchFloat(Assembler::DoubleNotEqual, input, fpscratch, &traps.intOverflow);
+ jump(rejoin);
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+static void ExtendTo32(MacroAssembler& masm, Scalar::Type type, Register r) {
+ switch (Scalar::byteSize(type)) {
+ case 1:
+ if (Scalar::isSignedIntType(type)) {
+ masm.movsbl(r, r);
+ } else {
+ masm.movzbl(r, r);
+ }
+ break;
+ case 2:
+ if (Scalar::isSignedIntType(type)) {
+ masm.movswl(r, r);
+ } else {
+ masm.movzwl(r, r);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void CheckBytereg(Register r) {
+#ifdef DEBUG
+ AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs);
+ MOZ_ASSERT(byteRegs.has(r));
+#endif
+}
+
+static inline void CheckBytereg(Imm32 r) {
+ // Nothing
+}
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const T& mem, Register oldval,
+ Register newval, Register output) {
+ MOZ_ASSERT(output == eax);
+
+ if (oldval != output) {
+ masm.movl(oldval, output);
+ }
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ switch (Scalar::byteSize(type)) {
+ case 1:
+ CheckBytereg(newval);
+ masm.lock_cmpxchgb(newval, Operand(mem));
+ break;
+ case 2:
+ masm.lock_cmpxchgw(newval, Operand(mem));
+ break;
+ case 4:
+ masm.lock_cmpxchgl(newval, Operand(mem));
+ break;
+ }
+
+ ExtendTo32(masm, type, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+ const Address& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), mem, oldval, newval, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register output) {
+ CompareExchange(*this, &access, access.type(), mem, oldval, newval, output);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const T& mem, Register value,
+ Register output)
+// NOTE: the generated code must match the assembly code in gen_exchange in
+// GenerateAtomicOperations.py
+{
+ if (value != output) {
+ masm.movl(value, output);
+ }
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ switch (Scalar::byteSize(type)) {
+ case 1:
+ CheckBytereg(output);
+ masm.xchgb(output, Operand(mem));
+ break;
+ case 2:
+ masm.xchgw(output, Operand(mem));
+ break;
+ case 4:
+ masm.xchgl(output, Operand(mem));
+ break;
+ default:
+ MOZ_CRASH("Invalid");
+ }
+ ExtendTo32(masm, type, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+ const Address& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+ const BaseIndex& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, nullptr, type, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register output) {
+ AtomicExchange(*this, &access, access.type(), mem, value, output);
+}
+
+static void SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src,
+ Register output) {
+ if (op == AtomicFetchSubOp) {
+ masm.movl(Imm32(-src.value), output);
+ } else {
+ masm.movl(src, output);
+ }
+}
+
+static void SetupValue(MacroAssembler& masm, AtomicOp op, Register src,
+ Register output) {
+ if (src != output) {
+ masm.movl(src, output);
+ }
+ if (op == AtomicFetchSubOp) {
+ masm.negl(output);
+ }
+}
+
+template <typename T, typename V>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type arrayType, AtomicOp op, V value,
+ const T& mem, Register temp, Register output) {
+ // Note value can be an Imm or a Register.
+
+ // NOTE: the generated code must match the assembly code in gen_fetchop in
+ // GenerateAtomicOperations.py
+#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG) \
+ do { \
+ MOZ_ASSERT(output != temp); \
+ MOZ_ASSERT(output == eax); \
+ if (access) masm.append(*access, masm.size()); \
+ masm.LOAD(Operand(mem), eax); \
+ Label again; \
+ masm.bind(&again); \
+ masm.movl(eax, temp); \
+ masm.OP(value, temp); \
+ masm.LOCK_CMPXCHG(temp, Operand(mem)); \
+ masm.j(MacroAssembler::NonZero, &again); \
+ } while (0)
+
+ MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp,
+ temp == InvalidReg);
+
+ switch (Scalar::byteSize(arrayType)) {
+ case 1:
+ CheckBytereg(output);
+ switch (op) {
+ case AtomicFetchAddOp:
+ case AtomicFetchSubOp:
+ CheckBytereg(value); // But not for the bitwise ops
+ SetupValue(masm, op, value, output);
+ if (access) masm.append(*access, masm.size());
+ masm.lock_xaddb(output, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ CheckBytereg(temp);
+ ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb);
+ break;
+ case AtomicFetchOrOp:
+ CheckBytereg(temp);
+ ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb);
+ break;
+ case AtomicFetchXorOp:
+ CheckBytereg(temp);
+ ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ switch (op) {
+ case AtomicFetchAddOp:
+ case AtomicFetchSubOp:
+ SetupValue(masm, op, value, output);
+ if (access) masm.append(*access, masm.size());
+ masm.lock_xaddw(output, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw);
+ break;
+ case AtomicFetchOrOp:
+ ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw);
+ break;
+ case AtomicFetchXorOp:
+ ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case 4:
+ switch (op) {
+ case AtomicFetchAddOp:
+ case AtomicFetchSubOp:
+ SetupValue(masm, op, value, output);
+ if (access) masm.append(*access, masm.size());
+ masm.lock_xaddl(output, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchgl);
+ break;
+ case AtomicFetchOrOp:
+ ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchgl);
+ break;
+ case AtomicFetchXorOp:
+ ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchgl);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ }
+ ExtendTo32(masm, arrayType, output);
+
+#undef ATOMIC_BITOP_BODY
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Imm32 value, const BaseIndex& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Imm32 value, const Address& mem,
+ Register temp, Register output) {
+ AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Imm32 value,
+ const Address& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Imm32 value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
+}
+
+template <typename T, typename V>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type arrayType, AtomicOp op, V value,
+ const T& mem) {
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ switch (Scalar::byteSize(arrayType)) {
+ case 1:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.lock_addb(value, Operand(mem));
+ break;
+ case AtomicFetchSubOp:
+ masm.lock_subb(value, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ masm.lock_andb(value, Operand(mem));
+ break;
+ case AtomicFetchOrOp:
+ masm.lock_orb(value, Operand(mem));
+ break;
+ case AtomicFetchXorOp:
+ masm.lock_xorb(value, Operand(mem));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case 2:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.lock_addw(value, Operand(mem));
+ break;
+ case AtomicFetchSubOp:
+ masm.lock_subw(value, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ masm.lock_andw(value, Operand(mem));
+ break;
+ case AtomicFetchOrOp:
+ masm.lock_orw(value, Operand(mem));
+ break;
+ case AtomicFetchXorOp:
+ masm.lock_xorw(value, Operand(mem));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case 4:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.lock_addl(value, Operand(mem));
+ break;
+ case AtomicFetchSubOp:
+ masm.lock_subl(value, Operand(mem));
+ break;
+ case AtomicFetchAndOp:
+ masm.lock_andl(value, Operand(mem));
+ break;
+ case AtomicFetchOrOp:
+ masm.lock_orl(value, Operand(mem));
+ break;
+ case AtomicFetchXorOp:
+ masm.lock_xorl(value, Operand(mem));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, &access, access.type(), op, value, mem);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Imm32 value,
+ const Address& mem, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, &access, access.type(), op, value, mem);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, &access, access.type(), op, value, mem);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Imm32 value,
+ const BaseIndex& mem, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, &access, access.type(), op, value, mem);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register temp,
+ AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register temp, AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register temp1,
+ Register temp2, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+ masm.convertUInt32ToDouble(temp1, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Register value, const Address& mem,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization&, AtomicOp op,
+ Imm32 value, const Address& mem,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Imm32 value, const BaseIndex& mem,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Imm32 value, const T& mem, Register temp1,
+ Register temp2, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+ masm.convertUInt32ToDouble(temp1, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Imm32 value, const Address& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Imm32 value, const BaseIndex& mem,
+ Register temp1, Register temp2,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() {
+ // Spectre mitigation recommended by Intel and AMD suggest to use lfence as
+ // a way to force all speculative execution of instructions to end.
+ MOZ_ASSERT(HasSSE2());
+ masm.lfence();
+}
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ if (HasSSE41()) {
+ // Fail on negative-zero.
+ branchNegativeZeroFloat32(src, dest, fail);
+
+ // Round toward -Infinity.
+ {
+ ScratchFloat32Scope scratch(*this);
+ vroundss(X86Encoding::RoundDown, src, scratch);
+ truncateFloat32ToInt32(scratch, dest, fail);
+ }
+ } else {
+ Label negative, end;
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ {
+ ScratchFloat32Scope scratch(*this);
+ zeroFloat32(scratch);
+ branchFloat(Assembler::DoubleLessThan, src, scratch, &negative);
+ }
+
+ // Fail on negative-zero.
+ branchNegativeZeroFloat32(src, dest, fail);
+
+ // Input is non-negative, so truncation correctly rounds.
+ truncateFloat32ToInt32(src, dest, fail);
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ // Negative values go on a comparatively expensive path, since no
+ // native rounding mode matches JS semantics. Still better than callVM.
+ bind(&negative);
+ {
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ truncateFloat32ToInt32(src, dest, fail);
+
+ // Test whether the input double was integer-valued.
+ {
+ ScratchFloat32Scope scratch(*this);
+ convertInt32ToFloat32(dest, scratch);
+ branchFloat(Assembler::DoubleEqualOrUnordered, src, scratch, &end);
+ }
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ subl(Imm32(1), dest);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+
+ bind(&end);
+ }
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ if (HasSSE41()) {
+ // Fail on negative-zero.
+ branchNegativeZero(src, dest, fail);
+
+ // Round toward -Infinity.
+ {
+ ScratchDoubleScope scratch(*this);
+ vroundsd(X86Encoding::RoundDown, src, scratch);
+ truncateDoubleToInt32(scratch, dest, fail);
+ }
+ } else {
+ Label negative, end;
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ {
+ ScratchDoubleScope scratch(*this);
+ zeroDouble(scratch);
+ branchDouble(Assembler::DoubleLessThan, src, scratch, &negative);
+ }
+
+ // Fail on negative-zero.
+ branchNegativeZero(src, dest, fail);
+
+ // Input is non-negative, so truncation correctly rounds.
+ truncateDoubleToInt32(src, dest, fail);
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ // Negative values go on a comparatively expensive path, since no
+ // native rounding mode matches JS semantics. Still better than callVM.
+ bind(&negative);
+ {
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ truncateDoubleToInt32(src, dest, fail);
+
+ // Test whether the input double was integer-valued.
+ {
+ ScratchDoubleScope scratch(*this);
+ convertInt32ToDouble(dest, scratch);
+ branchDouble(Assembler::DoubleEqualOrUnordered, src, scratch, &end);
+ }
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ subl(Imm32(1), dest);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+
+ bind(&end);
+ }
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label lessThanOrEqualMinusOne;
+
+ // If x is in ]-1,0], ceil(x) is -0, which cannot be represented as an int32.
+ // Fail if x > -1 and the sign bit is set.
+ loadConstantFloat32(-1.f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqualOrUnordered, src, scratch,
+ &lessThanOrEqualMinusOne);
+ vmovmskps(src, dest);
+ branchTest32(Assembler::NonZero, dest, Imm32(1), fail);
+
+ if (HasSSE41()) {
+ // x <= -1 or x > -0
+ bind(&lessThanOrEqualMinusOne);
+ // Round toward +Infinity.
+ vroundss(X86Encoding::RoundUp, src, scratch);
+ truncateFloat32ToInt32(scratch, dest, fail);
+ return;
+ }
+
+ // No SSE4.1
+ Label end;
+
+ // x >= 0 and x is not -0.0. We can truncate integer values, and truncate and
+ // add 1 to non-integer values. This will also work for values >= INT_MAX + 1,
+ // as the truncate operation will return INT_MIN and we'll fail.
+ truncateFloat32ToInt32(src, dest, fail);
+ convertInt32ToFloat32(dest, scratch);
+ branchFloat(Assembler::DoubleEqualOrUnordered, src, scratch, &end);
+
+ // Input is not integer-valued, add 1 to obtain the ceiling value.
+ // If input > INT_MAX, output == INT_MAX so adding 1 will overflow.
+ branchAdd32(Assembler::Overflow, Imm32(1), dest, fail);
+ jump(&end);
+
+ // x <= -1, truncation is the way to go.
+ bind(&lessThanOrEqualMinusOne);
+ truncateFloat32ToInt32(src, dest, fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label lessThanOrEqualMinusOne;
+
+ // If x is in ]-1,0], ceil(x) is -0, which cannot be represented as an int32.
+ // Fail if x > -1 and the sign bit is set.
+ loadConstantDouble(-1.0, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqualOrUnordered, src, scratch,
+ &lessThanOrEqualMinusOne);
+ vmovmskpd(src, dest);
+ branchTest32(Assembler::NonZero, dest, Imm32(1), fail);
+
+ if (HasSSE41()) {
+ // x <= -1 or x > -0
+ bind(&lessThanOrEqualMinusOne);
+ // Round toward +Infinity.
+ vroundsd(X86Encoding::RoundUp, src, scratch);
+ truncateDoubleToInt32(scratch, dest, fail);
+ return;
+ }
+
+ // No SSE4.1
+ Label end;
+
+ // x >= 0 and x is not -0.0. We can truncate integer values, and truncate and
+ // add 1 to non-integer values. This will also work for values >= INT_MAX + 1,
+ // as the truncate operation will return INT_MIN and we'll fail.
+ truncateDoubleToInt32(src, dest, fail);
+ convertInt32ToDouble(dest, scratch);
+ branchDouble(Assembler::DoubleEqualOrUnordered, src, scratch, &end);
+
+ // Input is not integer-valued, add 1 to obtain the ceiling value.
+ // If input > INT_MAX, output == INT_MAX so adding 1 will overflow.
+ branchAdd32(Assembler::Overflow, Imm32(1), dest, fail);
+ jump(&end);
+
+ // x <= -1, truncation is the way to go.
+ bind(&lessThanOrEqualMinusOne);
+ truncateDoubleToInt32(src, dest, fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label lessThanOrEqualMinusOne;
+
+ // Bail on ]-1; -0] range
+ {
+ ScratchDoubleScope scratch(*this);
+ loadConstantDouble(-1, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqualOrUnordered, src, scratch,
+ &lessThanOrEqualMinusOne);
+ }
+
+ // Test for remaining values with the sign bit set, i.e. ]-1; -0]
+ vmovmskpd(src, dest);
+ branchTest32(Assembler::NonZero, dest, Imm32(1), fail);
+
+ // x <= -1 or x >= +0, truncation is the way to go.
+ bind(&lessThanOrEqualMinusOne);
+ truncateDoubleToInt32(src, dest, fail);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label lessThanOrEqualMinusOne;
+
+ // Bail on ]-1; -0] range
+ {
+ ScratchFloat32Scope scratch(*this);
+ loadConstantFloat32(-1.f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqualOrUnordered, src, scratch,
+ &lessThanOrEqualMinusOne);
+ }
+
+ // Test for remaining values with the sign bit set, i.e. ]-1; -0]
+ vmovmskps(src, dest);
+ branchTest32(Assembler::NonZero, dest, Imm32(1), fail);
+
+ // x <= -1 or x >= +0, truncation is the way to go.
+ bind(&lessThanOrEqualMinusOne);
+ truncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label negativeOrZero, negative, end;
+
+ // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
+ zeroFloat32(scratch);
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &negativeOrZero);
+ {
+ // Input is strictly positive or NaN. Add the biggest float less than 0.5
+ // and truncate, rounding down (because if the input is the biggest float
+ // less than 0.5, adding 0.5 would undesirably round up to 1). Note that we
+ // have to add the input to the temp register because we're not allowed to
+ // modify the input register.
+ addFloat32(src, temp);
+ truncateFloat32ToInt32(temp, dest, fail);
+ jump(&end);
+ }
+
+ // Input is negative, +0 or -0.
+ bind(&negativeOrZero);
+ {
+ // Branch on negative input.
+ j(Assembler::NotEqual, &negative);
+
+ // Fail on negative-zero.
+ branchNegativeZeroFloat32(src, dest, fail);
+
+ // Input is +0.
+ xor32(dest, dest);
+ jump(&end);
+ }
+
+ // Input is negative.
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) are rounded to -0. Fail.
+ loadConstantFloat32(-0.5f, scratch);
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, src, scratch, fail);
+
+ // Other negative inputs need the biggest float less than 0.5 added.
+ //
+ // The result is stored in the temp register (currently contains the biggest
+ // float less than 0.5).
+ addFloat32(src, temp);
+
+ if (HasSSE41()) {
+ // Round toward -Infinity.
+ vroundss(X86Encoding::RoundDown, temp, scratch);
+
+ // Truncate.
+ truncateFloat32ToInt32(scratch, dest, fail);
+ } else {
+ // Round toward -Infinity without the benefit of ROUNDSS.
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ truncateFloat32ToInt32(temp, dest, fail);
+
+ // Test whether the truncated float was integer-valued.
+ convertInt32ToFloat32(dest, scratch);
+ branchFloat(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ subl(Imm32(1), dest);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+ }
+
+ bind(&end);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label negativeOrZero, negative, end;
+
+ // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
+ zeroDouble(scratch);
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &negativeOrZero);
+ {
+ // Input is strictly positive or NaN. Add the biggest double less than 0.5
+ // and truncate, rounding down (because if the input is the biggest double
+ // less than 0.5, adding 0.5 would undesirably round up to 1). Note that we
+ // have to add the input to the temp register because we're not allowed to
+ // modify the input register.
+ addDouble(src, temp);
+ truncateDoubleToInt32(temp, dest, fail);
+ jump(&end);
+ }
+
+ // Input is negative, +0 or -0.
+ bind(&negativeOrZero);
+ {
+ // Branch on negative input.
+ j(Assembler::NotEqual, &negative);
+
+ // Fail on negative-zero.
+ branchNegativeZero(src, dest, fail, /* maybeNonZero = */ false);
+
+ // Input is +0
+ xor32(dest, dest);
+ jump(&end);
+ }
+
+ // Input is negative.
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) are rounded to -0. Fail.
+ loadConstantDouble(-0.5, scratch);
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, src, scratch, fail);
+
+ // Other negative inputs need the biggest double less than 0.5 added.
+ //
+ // The result is stored in the temp register (currently contains the biggest
+ // double less than 0.5).
+ addDouble(src, temp);
+
+ if (HasSSE41()) {
+ // Round toward -Infinity.
+ vroundsd(X86Encoding::RoundDown, temp, scratch);
+
+ // Truncate.
+ truncateDoubleToInt32(scratch, dest, fail);
+ } else {
+ // Round toward -Infinity without the benefit of ROUNDSD.
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ truncateDoubleToInt32(temp, dest, fail);
+
+ // Test whether the truncated double was integer-valued.
+ convertInt32ToDouble(dest, scratch);
+ branchDouble(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
+
+ // Input is not integer-valued, so we rounded off-by-one in the
+ // wrong direction. Correct by subtraction.
+ subl(Imm32(1), dest);
+ // Cannot overflow: output was already checked against INT_MIN.
+ }
+ }
+
+ bind(&end);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasRoundInstruction(mode));
+ vroundsd(Assembler::ToX86RoundingMode(mode), src, dest);
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(HasRoundInstruction(mode));
+ vroundss(Assembler::ToX86RoundingMode(mode), src, dest);
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ ScratchDoubleScope scratch(*this);
+
+ // TODO Support AVX2
+ if (rhs == output) {
+ MOZ_ASSERT(lhs != rhs);
+ double keepSignMask = mozilla::BitwiseCast<double>(INT64_MIN);
+ loadConstantDouble(keepSignMask, scratch);
+ vandpd(scratch, rhs, output);
+
+ double clearSignMask = mozilla::BitwiseCast<double>(INT64_MAX);
+ loadConstantDouble(clearSignMask, scratch);
+ vandpd(lhs, scratch, scratch);
+ } else {
+ double clearSignMask = mozilla::BitwiseCast<double>(INT64_MAX);
+ loadConstantDouble(clearSignMask, scratch);
+ vandpd(scratch, lhs, output);
+
+ double keepSignMask = mozilla::BitwiseCast<double>(INT64_MIN);
+ loadConstantDouble(keepSignMask, scratch);
+ vandpd(rhs, scratch, scratch);
+ }
+
+ vorpd(scratch, output, output);
+}
+
+void MacroAssembler::copySignFloat32(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ ScratchFloat32Scope scratch(*this);
+
+ // TODO Support AVX2
+ if (rhs == output) {
+ MOZ_ASSERT(lhs != rhs);
+ float keepSignMask = mozilla::BitwiseCast<float>(INT32_MIN);
+ loadConstantFloat32(keepSignMask, scratch);
+ vandps(scratch, output, output);
+
+ float clearSignMask = mozilla::BitwiseCast<float>(INT32_MAX);
+ loadConstantFloat32(clearSignMask, scratch);
+ vandps(lhs, scratch, scratch);
+ } else {
+ float clearSignMask = mozilla::BitwiseCast<float>(INT32_MAX);
+ loadConstantFloat32(clearSignMask, scratch);
+ vandps(scratch, lhs, output);
+
+ float keepSignMask = mozilla::BitwiseCast<float>(INT32_MIN);
+ loadConstantFloat32(keepSignMask, scratch);
+ vandps(rhs, scratch, scratch);
+ }
+
+ vorps(scratch, output, output);
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
new file mode 100644
index 0000000000..9185abd647
--- /dev/null
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -0,0 +1,998 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_MacroAssembler_x86_shared_h
+#define jit_x86_shared_MacroAssembler_x86_shared_h
+
+#if defined(JS_CODEGEN_X86)
+# include "jit/x86/Assembler-x86.h"
+#elif defined(JS_CODEGEN_X64)
+# include "jit/x64/Assembler-x64.h"
+#endif
+
+namespace js {
+namespace jit {
+
+class MacroAssembler;
+
+class MacroAssemblerX86Shared : public Assembler {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ public:
+#ifdef JS_CODEGEN_X64
+ typedef X86Encoding::JmpSrc UsesItem;
+#else
+ typedef CodeOffset UsesItem;
+#endif
+
+ typedef Vector<UsesItem, 0, SystemAllocPolicy> UsesVector;
+ static_assert(sizeof(UsesItem) == 4);
+
+ protected:
+ // For Double, Float and SimdData, make the move ctors explicit so that MSVC
+ // knows what to use instead of copying these data structures.
+ template <class T>
+ struct Constant {
+ using Pod = T;
+
+ T value;
+ UsesVector uses;
+
+ explicit Constant(const T& value) : value(value) {}
+ Constant(Constant<T>&& other)
+ : value(other.value), uses(std::move(other.uses)) {}
+ explicit Constant(const Constant<T>&) = delete;
+ };
+
+ // Containers use SystemAllocPolicy since wasm releases memory after each
+ // function is compiled, and these need to live until after all functions
+ // are compiled.
+ using Double = Constant<double>;
+ Vector<Double, 0, SystemAllocPolicy> doubles_;
+ typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy>
+ DoubleMap;
+ DoubleMap doubleMap_;
+
+ using Float = Constant<float>;
+ Vector<Float, 0, SystemAllocPolicy> floats_;
+ typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy>
+ FloatMap;
+ FloatMap floatMap_;
+
+ struct SimdData : public Constant<SimdConstant> {
+ explicit SimdData(SimdConstant d) : Constant<SimdConstant>(d) {}
+ SimdData(SimdData&& d) : Constant<SimdConstant>(std::move(d)) {}
+ explicit SimdData(const SimdData&) = delete;
+ SimdConstant::Type type() const { return value.type(); }
+ };
+
+ Vector<SimdData, 0, SystemAllocPolicy> simds_;
+ typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy>
+ SimdMap;
+ SimdMap simdMap_;
+
+ template <class T, class Map>
+ T* getConstant(const typename T::Pod& value, Map& map,
+ Vector<T, 0, SystemAllocPolicy>& vec);
+
+ Float* getFloat(float f);
+ Double* getDouble(double d);
+ SimdData* getSimdData(const SimdConstant& v);
+
+ public:
+ using Assembler::call;
+
+ MacroAssemblerX86Shared() = default;
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ return masm.appendRawCode(code, numBytes);
+ }
+
+ void addToPCRel4(uint32_t offset, int32_t bias) {
+ return masm.addToPCRel4(offset, bias);
+ }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, second).
+ // Checks for NaN if canBeNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister second, bool canBeNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister second, bool canBeNaN,
+ bool isMax);
+
+ void compareDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs) {
+ if (cond & DoubleConditionBitInvert) {
+ vucomisd(lhs, rhs);
+ } else {
+ vucomisd(rhs, lhs);
+ }
+ }
+
+ void compareFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs) {
+ if (cond & DoubleConditionBitInvert) {
+ vucomiss(lhs, rhs);
+ } else {
+ vucomiss(rhs, lhs);
+ }
+ }
+
+ void branchNegativeZero(FloatRegister reg, Register scratch, Label* label,
+ bool maybeNonZero = true);
+ void branchNegativeZeroFloat32(FloatRegister reg, Register scratch,
+ Label* label);
+
+ void move32(Imm32 imm, Register dest) {
+ // Use the ImmWord version of mov to register, which has special
+ // optimizations. Casting to uint32_t here ensures that the value
+ // is zero-extended.
+ mov(ImmWord(uint32_t(imm.value)), dest);
+ }
+ void move32(Imm32 imm, const Operand& dest) { movl(imm, dest); }
+ void move32(Register src, Register dest) { movl(src, dest); }
+ void move32(Register src, const Operand& dest) { movl(src, dest); }
+ void test32(Register lhs, Register rhs) { testl(rhs, lhs); }
+ void test32(const Address& addr, Imm32 imm) { testl(imm, Operand(addr)); }
+ void test32(const Operand lhs, Imm32 imm) { testl(imm, lhs); }
+ void test32(Register lhs, Imm32 rhs) { testl(rhs, lhs); }
+ void cmp32(Register lhs, Imm32 rhs) { cmpl(rhs, lhs); }
+ void cmp32(Register lhs, Register rhs) { cmpl(rhs, lhs); }
+ void cmp32(const Address& lhs, Register rhs) { cmp32(Operand(lhs), rhs); }
+ void cmp32(const Address& lhs, Imm32 rhs) { cmp32(Operand(lhs), rhs); }
+ void cmp32(const Operand& lhs, Imm32 rhs) { cmpl(rhs, lhs); }
+ void cmp32(const Operand& lhs, Register rhs) { cmpl(rhs, lhs); }
+ void cmp32(Register lhs, const Operand& rhs) { cmpl(rhs, lhs); }
+
+ void cmp16(const Address& lhs, Imm32 rhs) { cmp16(Operand(lhs), rhs); }
+ void cmp16(const Operand& lhs, Imm32 rhs) { cmpw(rhs, lhs); }
+
+ void cmp8(const Address& lhs, Imm32 rhs) { cmp8(Operand(lhs), rhs); }
+ void cmp8(const Operand& lhs, Imm32 rhs) { cmpb(rhs, lhs); }
+ void cmp8(const Operand& lhs, Register rhs) { cmpb(rhs, lhs); }
+
+ void atomic_inc32(const Operand& addr) { lock_incl(addr); }
+ void atomic_dec32(const Operand& addr) { lock_decl(addr); }
+
+ void storeLoadFence() {
+ // This implementation follows Linux.
+ if (HasSSE2()) {
+ masm.mfence();
+ } else {
+ lock_addl(Imm32(0), Operand(Address(esp, 0)));
+ }
+ }
+
+ void branch16(Condition cond, Register lhs, Register rhs, Label* label) {
+ cmpw(rhs, lhs);
+ j(cond, label);
+ }
+ void branchTest16(Condition cond, Register lhs, Register rhs, Label* label) {
+ testw(rhs, lhs);
+ j(cond, label);
+ }
+
+ void jump(Label* label) { jmp(label); }
+ void jump(JitCode* code) { jmp(code); }
+ void jump(TrampolinePtr code) { jmp(ImmPtr(code.value)); }
+ void jump(ImmPtr ptr) { jmp(ptr); }
+ void jump(Register reg) { jmp(Operand(reg)); }
+ void jump(const Address& addr) { jmp(Operand(addr)); }
+
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ // vcvtsi2sd and friends write only part of their output register, which
+ // causes slowdowns on out-of-order processors. Explicitly break
+ // dependencies with vxorpd (and vxorps elsewhere), which are handled
+ // specially in modern CPUs, for this purpose. See sections 8.14, 9.8,
+ // 10.8, 12.9, 13.16, 14.14, and 15.8 of Agner's Microarchitecture
+ // document.
+ zeroDouble(dest);
+ vcvtsi2sd(src, dest, dest);
+ }
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ convertInt32ToDouble(Operand(src), dest);
+ }
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ convertInt32ToDouble(Operand(src), dest);
+ }
+ void convertInt32ToDouble(const Operand& src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroDouble(dest);
+ vcvtsi2sd(Operand(src), dest, dest);
+ }
+ void convertInt32ToFloat32(Register src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroFloat32(dest);
+ vcvtsi2ss(src, dest, dest);
+ }
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
+ convertInt32ToFloat32(Operand(src), dest);
+ }
+ void convertInt32ToFloat32(const Operand& src, FloatRegister dest) {
+ // Clear the output register first to break dependencies; see above;
+ zeroFloat32(dest);
+ vcvtsi2ss(src, dest, dest);
+ }
+ Condition testDoubleTruthy(bool truthy, FloatRegister reg) {
+ ScratchDoubleScope scratch(asMasm());
+ zeroDouble(scratch);
+ vucomisd(reg, scratch);
+ return truthy ? NonZero : Zero;
+ }
+
+ // Class which ensures that registers used in byte ops are compatible with
+ // such instructions, even if the original register passed in wasn't. This
+ // only applies to x86, as on x64 all registers are valid single byte regs.
+ // This doesn't lead to great code but helps to simplify code generation.
+ //
+ // Note that this can currently only be used in cases where the register is
+ // read from by the guarded instruction, not written to.
+ class AutoEnsureByteRegister {
+ MacroAssemblerX86Shared* masm;
+ Register original_;
+ Register substitute_;
+
+ public:
+ template <typename T>
+ AutoEnsureByteRegister(MacroAssemblerX86Shared* masm, T address,
+ Register reg)
+ : masm(masm), original_(reg) {
+ AllocatableGeneralRegisterSet singleByteRegs(Registers::SingleByteRegs);
+ if (singleByteRegs.has(reg)) {
+ substitute_ = reg;
+ } else {
+ MOZ_ASSERT(address.base != StackPointer);
+ do {
+ substitute_ = singleByteRegs.takeAny();
+ } while (Operand(address).containsReg(substitute_));
+
+ masm->push(substitute_);
+ masm->mov(reg, substitute_);
+ }
+ }
+
+ ~AutoEnsureByteRegister() {
+ if (original_ != substitute_) {
+ masm->pop(substitute_);
+ }
+ }
+
+ Register reg() { return substitute_; }
+ };
+
+ void load8ZeroExtend(const Operand& src, Register dest) { movzbl(src, dest); }
+ void load8ZeroExtend(const Address& src, Register dest) {
+ movzbl(Operand(src), dest);
+ }
+ void load8ZeroExtend(const BaseIndex& src, Register dest) {
+ movzbl(Operand(src), dest);
+ }
+ void load8SignExtend(const Operand& src, Register dest) { movsbl(src, dest); }
+ void load8SignExtend(const Address& src, Register dest) {
+ movsbl(Operand(src), dest);
+ }
+ void load8SignExtend(const BaseIndex& src, Register dest) {
+ movsbl(Operand(src), dest);
+ }
+ template <typename T>
+ void store8(Imm32 src, const T& dest) {
+ movb(src, Operand(dest));
+ }
+ template <typename T>
+ void store8(Register src, const T& dest) {
+ AutoEnsureByteRegister ensure(this, dest, src);
+ movb(ensure.reg(), Operand(dest));
+ }
+ void load16ZeroExtend(const Operand& src, Register dest) {
+ movzwl(src, dest);
+ }
+ void load16ZeroExtend(const Address& src, Register dest) {
+ movzwl(Operand(src), dest);
+ }
+ void load16ZeroExtend(const BaseIndex& src, Register dest) {
+ movzwl(Operand(src), dest);
+ }
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+ template <typename S, typename T>
+ void store16(const S& src, const T& dest) {
+ movw(src, Operand(dest));
+ }
+ template <typename S, typename T>
+ void store16Unaligned(const S& src, const T& dest) {
+ store16(src, dest);
+ }
+ void load16SignExtend(const Operand& src, Register dest) {
+ movswl(src, dest);
+ }
+ void load16SignExtend(const Address& src, Register dest) {
+ movswl(Operand(src), dest);
+ }
+ void load16SignExtend(const BaseIndex& src, Register dest) {
+ movswl(Operand(src), dest);
+ }
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+ void load32(const Address& address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void load32(const BaseIndex& src, Register dest) { movl(Operand(src), dest); }
+ void load32(const Operand& src, Register dest) { movl(src, dest); }
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+ template <typename S, typename T>
+ void store32(const S& src, const T& dest) {
+ movl(src, Operand(dest));
+ }
+ template <typename S, typename T>
+ void store32Unaligned(const S& src, const T& dest) {
+ store32(src, dest);
+ }
+ void loadDouble(const Address& src, FloatRegister dest) { vmovsd(src, dest); }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ vmovsd(src, dest);
+ }
+ void loadDouble(const Operand& src, FloatRegister dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ loadDouble(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ loadDouble(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ // Use vmovapd instead of vmovsd to avoid dependencies.
+ vmovapd(src, dest);
+ }
+ void zeroDouble(FloatRegister reg) { vxorpd(reg, reg, reg); }
+ void zeroFloat32(FloatRegister reg) { vxorps(reg, reg, reg); }
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
+ vcvtss2sd(src, dest, dest);
+ }
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
+ vcvtsd2ss(src, dest, dest);
+ }
+
+ void loadInt32x4(const Address& addr, FloatRegister dest) {
+ vmovdqa(Operand(addr), dest);
+ }
+ void loadFloat32x4(const Address& addr, FloatRegister dest) {
+ vmovaps(Operand(addr), dest);
+ }
+ void storeInt32x4(FloatRegister src, const Address& addr) {
+ vmovdqa(src, Operand(addr));
+ }
+ void storeFloat32x4(FloatRegister src, const Address& addr) {
+ vmovaps(src, Operand(addr));
+ }
+
+ void convertFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest) {
+ // Note that if the conversion failed (because the converted
+ // result is larger than the maximum signed int32, or less than the
+ // least signed int32, or NaN), this will return the undefined integer
+ // value (0x8000000).
+ vcvttps2dq(src, dest);
+ }
+ void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest) {
+ vcvtdq2ps(src, dest);
+ }
+
+ void binarySimd128(const SimdConstant& rhs, FloatRegister lhsDest,
+ void (MacroAssembler::*regOp)(const Operand&,
+ FloatRegister,
+ FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&,
+ FloatRegister));
+ void binarySimd128(
+ FloatRegister lhs, const SimdConstant& rhs, FloatRegister dest,
+ void (MacroAssembler::*regOp)(const Operand&, FloatRegister,
+ FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&, FloatRegister,
+ FloatRegister));
+ void binarySimd128(const SimdConstant& rhs, FloatRegister lhsDest,
+ void (MacroAssembler::*regOp)(const Operand&,
+ FloatRegister),
+ void (MacroAssembler::*constOp)(const SimdConstant&,
+ FloatRegister));
+
+ // SIMD methods, defined in MacroAssembler-x86-shared-SIMD.cpp.
+
+ void unsignedConvertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest);
+ void unsignedConvertInt32x4ToFloat64x2(FloatRegister src, FloatRegister dest);
+ void bitwiseTestSimd128(const SimdConstant& rhs, FloatRegister lhs);
+
+ void truncSatFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest);
+ void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src, FloatRegister temp,
+ FloatRegister dest);
+ void unsignedTruncFloat32x4ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest);
+ void truncSatFloat64x2ToInt32x4(FloatRegister src, FloatRegister temp,
+ FloatRegister dest);
+ void unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src, FloatRegister temp,
+ FloatRegister dest);
+ void unsignedTruncFloat64x2ToInt32x4Relaxed(FloatRegister src,
+ FloatRegister dest);
+
+ void splatX16(Register input, FloatRegister output);
+ void splatX8(Register input, FloatRegister output);
+ void splatX4(Register input, FloatRegister output);
+ void splatX4(FloatRegister input, FloatRegister output);
+ void splatX2(FloatRegister input, FloatRegister output);
+
+ void extractLaneInt32x4(FloatRegister input, Register output, unsigned lane);
+ void extractLaneFloat32x4(FloatRegister input, FloatRegister output,
+ unsigned lane);
+ void extractLaneFloat64x2(FloatRegister input, FloatRegister output,
+ unsigned lane);
+ void extractLaneInt16x8(FloatRegister input, Register output, unsigned lane,
+ SimdSign sign);
+ void extractLaneInt8x16(FloatRegister input, Register output, unsigned lane,
+ SimdSign sign);
+
+ void replaceLaneFloat32x4(unsigned lane, FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest);
+ void replaceLaneFloat64x2(unsigned lane, FloatRegister lhs, FloatRegister rhs,
+ FloatRegister dest);
+
+ void shuffleInt8x16(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output, const uint8_t lanes[16]);
+ void blendInt8x16(FloatRegister lhs, FloatRegister rhs, FloatRegister output,
+ FloatRegister temp, const uint8_t lanes[16]);
+ void blendInt16x8(FloatRegister lhs, FloatRegister rhs, FloatRegister output,
+ const uint16_t lanes[8]);
+ void laneSelectSimd128(FloatRegister mask, FloatRegister lhs,
+ FloatRegister rhs, FloatRegister output);
+
+ void compareInt8x16(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
+ FloatRegister output);
+ void compareInt8x16(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest);
+ void compareInt16x8(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
+ FloatRegister output);
+ void compareInt16x8(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest);
+ void compareInt32x4(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
+ FloatRegister output);
+ void compareInt32x4(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest);
+ void compareForEqualityInt64x2(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond,
+ FloatRegister output);
+ void compareForOrderingInt64x2(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond, FloatRegister temp1,
+ FloatRegister temp2, FloatRegister output);
+ void compareForOrderingInt64x2AVX(FloatRegister lhs, FloatRegister rhs,
+ Assembler::Condition cond,
+ FloatRegister output);
+ void compareFloat32x4(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond, FloatRegister output);
+ void compareFloat32x4(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest);
+ void compareFloat64x2(FloatRegister lhs, Operand rhs,
+ Assembler::Condition cond, FloatRegister output);
+ void compareFloat64x2(Assembler::Condition cond, FloatRegister lhs,
+ const SimdConstant& rhs, FloatRegister dest);
+
+ void minMaxFloat32x4(bool isMin, FloatRegister lhs, Operand rhs,
+ FloatRegister temp1, FloatRegister temp2,
+ FloatRegister output);
+ void minMaxFloat32x4AVX(bool isMin, FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1, FloatRegister temp2,
+ FloatRegister output);
+ void minMaxFloat64x2(bool isMin, FloatRegister lhs, Operand rhs,
+ FloatRegister temp1, FloatRegister temp2,
+ FloatRegister output);
+ void minMaxFloat64x2AVX(bool isMin, FloatRegister lhs, FloatRegister rhs,
+ FloatRegister temp1, FloatRegister temp2,
+ FloatRegister output);
+ void minFloat32x4(FloatRegister lhs, FloatRegister rhs, FloatRegister temp1,
+ FloatRegister temp2, FloatRegister output);
+ void maxFloat32x4(FloatRegister lhs, FloatRegister rhs, FloatRegister temp1,
+ FloatRegister temp2, FloatRegister output);
+
+ void minFloat64x2(FloatRegister lhs, FloatRegister rhs, FloatRegister temp1,
+ FloatRegister temp2, FloatRegister output);
+ void maxFloat64x2(FloatRegister lhs, FloatRegister rhs, FloatRegister temp1,
+ FloatRegister temp2, FloatRegister output);
+
+ void packedShiftByScalarInt8x16(
+ FloatRegister in, Register count, FloatRegister xtmp, FloatRegister dest,
+ void (MacroAssemblerX86Shared::*shift)(FloatRegister, FloatRegister,
+ FloatRegister),
+ void (MacroAssemblerX86Shared::*extend)(const Operand&, FloatRegister));
+
+ void packedLeftShiftByScalarInt8x16(FloatRegister in, Register count,
+ FloatRegister xtmp, FloatRegister dest);
+ void packedLeftShiftByScalarInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest);
+ void packedRightShiftByScalarInt8x16(FloatRegister in, Register count,
+ FloatRegister xtmp, FloatRegister dest);
+ void packedRightShiftByScalarInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest);
+ void packedUnsignedRightShiftByScalarInt8x16(FloatRegister in, Register count,
+ FloatRegister xtmp,
+ FloatRegister dest);
+ void packedUnsignedRightShiftByScalarInt8x16(Imm32 count, FloatRegister src,
+ FloatRegister dest);
+
+ void packedLeftShiftByScalarInt16x8(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedRightShiftByScalarInt16x8(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedUnsignedRightShiftByScalarInt16x8(FloatRegister in, Register count,
+ FloatRegister dest);
+
+ void packedLeftShiftByScalarInt32x4(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedRightShiftByScalarInt32x4(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedUnsignedRightShiftByScalarInt32x4(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedLeftShiftByScalarInt64x2(FloatRegister in, Register count,
+ FloatRegister dest);
+ void packedRightShiftByScalarInt64x2(FloatRegister in, Register count,
+ FloatRegister temp, FloatRegister dest);
+ void packedRightShiftByScalarInt64x2(Imm32 count, FloatRegister src,
+ FloatRegister dest);
+ void packedUnsignedRightShiftByScalarInt64x2(FloatRegister in, Register count,
+ FloatRegister dest);
+ void selectSimd128(FloatRegister mask, FloatRegister onTrue,
+ FloatRegister onFalse, FloatRegister temp,
+ FloatRegister output);
+ void popcntInt8x16(FloatRegister src, FloatRegister temp,
+ FloatRegister output);
+
+ // SIMD inline methods private to the implementation, that appear to be used.
+
+ template <class T, class Reg>
+ inline void loadScalar(const Operand& src, Reg dest);
+ template <class T, class Reg>
+ inline void storeScalar(Reg src, const Address& dest);
+ template <class T>
+ inline void loadAlignedVector(const Address& src, FloatRegister dest);
+ template <class T>
+ inline void storeAlignedVector(FloatRegister src, const Address& dest);
+
+ void loadAlignedSimd128Int(const Address& src, FloatRegister dest) {
+ vmovdqa(Operand(src), dest);
+ }
+ void loadAlignedSimd128Int(const Operand& src, FloatRegister dest) {
+ vmovdqa(src, dest);
+ }
+ void storeAlignedSimd128Int(FloatRegister src, const Address& dest) {
+ vmovdqa(src, Operand(dest));
+ }
+ void moveSimd128Int(FloatRegister src, FloatRegister dest) {
+ if (src != dest) {
+ vmovdqa(src, dest);
+ }
+ }
+ FloatRegister moveSimd128IntIfNotAVX(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(src.isSimd128() && dest.isSimd128());
+ if (HasAVX()) {
+ return src;
+ }
+ moveSimd128Int(src, dest);
+ return dest;
+ }
+ FloatRegister selectDestIfAVX(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(src.isSimd128() && dest.isSimd128());
+ return HasAVX() ? dest : src;
+ }
+ void loadUnalignedSimd128Int(const Address& src, FloatRegister dest) {
+ vmovdqu(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Int(const BaseIndex& src, FloatRegister dest) {
+ vmovdqu(Operand(src), dest);
+ }
+ void loadUnalignedSimd128Int(const Operand& src, FloatRegister dest) {
+ vmovdqu(src, dest);
+ }
+ void storeUnalignedSimd128Int(FloatRegister src, const Address& dest) {
+ vmovdqu(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Int(FloatRegister src, const BaseIndex& dest) {
+ vmovdqu(src, Operand(dest));
+ }
+ void storeUnalignedSimd128Int(FloatRegister src, const Operand& dest) {
+ vmovdqu(src, dest);
+ }
+ void packedLeftShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
+ count.value &= 15;
+ vpsllw(count, dest, dest);
+ }
+ void packedRightShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
+ count.value &= 15;
+ vpsraw(count, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt16x8(Imm32 count,
+ FloatRegister dest) {
+ count.value &= 15;
+ vpsrlw(count, dest, dest);
+ }
+ void packedLeftShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+ count.value &= 31;
+ vpslld(count, dest, dest);
+ }
+ void packedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+ count.value &= 31;
+ vpsrad(count, dest, dest);
+ }
+ void packedUnsignedRightShiftByScalarInt32x4(Imm32 count,
+ FloatRegister dest) {
+ count.value &= 31;
+ vpsrld(count, dest, dest);
+ }
+ void loadAlignedSimd128Float(const Address& src, FloatRegister dest) {
+ vmovaps(Operand(src), dest);
+ }
+ void loadAlignedSimd128Float(const Operand& src, FloatRegister dest) {
+ vmovaps(src, dest);
+ }
+ void storeAlignedSimd128Float(FloatRegister src, const Address& dest) {
+ vmovaps(src, Operand(dest));
+ }
+ void moveSimd128Float(FloatRegister src, FloatRegister dest) {
+ if (src != dest) {
+ vmovaps(src, dest);
+ }
+ }
+ FloatRegister moveSimd128FloatIfNotAVX(FloatRegister src,
+ FloatRegister dest) {
+ MOZ_ASSERT(src.isSimd128() && dest.isSimd128());
+ if (HasAVX()) {
+ return src;
+ }
+ moveSimd128Float(src, dest);
+ return dest;
+ }
+ FloatRegister moveSimd128FloatIfEqual(FloatRegister src, FloatRegister dest,
+ FloatRegister other) {
+ MOZ_ASSERT(src.isSimd128() && dest.isSimd128());
+ if (src != other) {
+ return src;
+ }
+ moveSimd128Float(src, dest);
+ return dest;
+ }
+ FloatRegister moveSimd128FloatIfNotAVXOrOther(FloatRegister src,
+ FloatRegister dest,
+ FloatRegister other) {
+ MOZ_ASSERT(src.isSimd128() && dest.isSimd128());
+ if (HasAVX() && src != other) {
+ return src;
+ }
+ moveSimd128Float(src, dest);
+ return dest;
+ }
+
+ void loadUnalignedSimd128(const Operand& src, FloatRegister dest) {
+ vmovups(src, dest);
+ }
+ void storeUnalignedSimd128(FloatRegister src, const Operand& dest) {
+ vmovups(src, dest);
+ }
+
+ static uint32_t ComputeShuffleMask(uint32_t x = 0, uint32_t y = 1,
+ uint32_t z = 2, uint32_t w = 3) {
+ MOZ_ASSERT(x < 4 && y < 4 && z < 4 && w < 4);
+ uint32_t r = (w << 6) | (z << 4) | (y << 2) | (x << 0);
+ MOZ_ASSERT(r < 256);
+ return r;
+ }
+
+ void shuffleInt32(uint32_t mask, FloatRegister src, FloatRegister dest) {
+ vpshufd(mask, src, dest);
+ }
+ void moveLowInt32(FloatRegister src, Register dest) { vmovd(src, dest); }
+
+ void moveHighPairToLowPairFloat32(FloatRegister src, FloatRegister dest) {
+ vmovhlps(src, dest, dest);
+ }
+ void moveFloatAsDouble(Register src, FloatRegister dest) {
+ vmovd(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const Address& src, FloatRegister dest) {
+ vmovss(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
+ vmovss(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloatAsDouble(const Operand& src, FloatRegister dest) {
+ loadFloat32(src, dest);
+ vcvtss2sd(dest, dest, dest);
+ }
+ void loadFloat32(const Address& src, FloatRegister dest) {
+ vmovss(src, dest);
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ vmovss(src, dest);
+ }
+ void loadFloat32(const Operand& src, FloatRegister dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ loadFloat32(src.toAddress(), dest);
+ break;
+ case Operand::MEM_SCALE:
+ loadFloat32(src.toBaseIndex(), dest);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ // Use vmovaps instead of vmovss to avoid dependencies.
+ vmovaps(src, dest);
+ }
+
+ // Checks whether a double is representable as a 32-bit integer. If so, the
+ // integer is written to the output register. Otherwise, a bailout is taken to
+ // the given snapshot. This function overwrites the scratch float register.
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ // Check for -0.0
+ if (negativeZeroCheck) {
+ branchNegativeZero(src, dest, fail);
+ }
+
+ ScratchDoubleScope scratch(asMasm());
+ vcvttsd2si(src, dest);
+ convertInt32ToDouble(dest, scratch);
+ vucomisd(scratch, src);
+ j(Assembler::Parity, fail);
+ j(Assembler::NotEqual, fail);
+ }
+
+ // Checks whether a float32 is representable as a 32-bit integer. If so, the
+ // integer is written to the output register. Otherwise, a bailout is taken to
+ // the given snapshot. This function overwrites the scratch float register.
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ // Check for -0.0
+ if (negativeZeroCheck) {
+ branchNegativeZeroFloat32(src, dest, fail);
+ }
+
+ ScratchFloat32Scope scratch(asMasm());
+ vcvttss2si(src, dest);
+ convertInt32ToFloat32(dest, scratch);
+ vucomiss(scratch, src);
+ j(Assembler::Parity, fail);
+ j(Assembler::NotEqual, fail);
+ }
+
+ void truncateDoubleToInt32(FloatRegister src, Register dest, Label* fail) {
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow. The other possibility is to test
+ // equality for INT_MIN after a comparison, but 1 costs fewer bytes to
+ // materialize.
+ vcvttsd2si(src, dest);
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+ }
+ void truncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail) {
+ // Same trick as explained in the above comment.
+ vcvttss2si(src, dest);
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+ }
+
+ inline void clampIntToUint8(Register reg);
+
+ bool maybeInlineDouble(double d, FloatRegister dest) {
+ // Loading zero with xor is specially optimized in hardware.
+ if (mozilla::IsPositiveZero(d)) {
+ zeroDouble(dest);
+ return true;
+ }
+
+ // It is also possible to load several common constants using vpcmpeqw
+ // to get all ones and then vpsllq and vpsrlq to get zeros at the ends,
+ // as described in "13.4 Generating constants" of
+ // "2. Optimizing subroutines in assembly language" by Agner Fog, and as
+ // previously implemented here. However, with x86 and x64 both using
+ // constant pool loads for double constants, this is probably only
+ // worthwhile in cases where a load is likely to be delayed.
+
+ return false;
+ }
+
+ bool maybeInlineFloat(float f, FloatRegister dest) {
+ // See comment above
+ if (mozilla::IsPositiveZero(f)) {
+ zeroFloat32(dest);
+ return true;
+ }
+ return false;
+ }
+
+ bool maybeInlineSimd128Int(const SimdConstant& v, const FloatRegister& dest) {
+ if (v.isZeroBits()) {
+ vpxor(dest, dest, dest);
+ return true;
+ }
+ if (v.isOneBits()) {
+ vpcmpeqw(Operand(dest), dest, dest);
+ return true;
+ }
+ return false;
+ }
+ bool maybeInlineSimd128Float(const SimdConstant& v,
+ const FloatRegister& dest) {
+ if (v.isZeroBits()) {
+ vxorps(dest, dest, dest);
+ return true;
+ }
+ return false;
+ }
+
+ void convertBoolToInt32(Register source, Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ movzbl(source, dest);
+ }
+
+ void emitSet(Assembler::Condition cond, Register dest,
+ Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond) {
+ if (AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
+ // If the register we're defining is a single byte register,
+ // take advantage of the setCC instruction
+ setCC(cond, dest);
+ movzbl(dest, dest);
+
+ if (ifNaN != Assembler::NaN_HandledByCond) {
+ Label noNaN;
+ j(Assembler::NoParity, &noNaN);
+ mov(ImmWord(ifNaN == Assembler::NaN_IsTrue), dest);
+ bind(&noNaN);
+ }
+ } else {
+ Label end;
+ Label ifFalse;
+
+ if (ifNaN == Assembler::NaN_IsFalse) {
+ j(Assembler::Parity, &ifFalse);
+ }
+ // Note a subtlety here: FLAGS is live at this point, and the
+ // mov interface doesn't guarantee to preserve FLAGS. Use
+ // movl instead of mov, because the movl instruction
+ // preserves FLAGS.
+ movl(Imm32(1), dest);
+ j(cond, &end);
+ if (ifNaN == Assembler::NaN_IsTrue) {
+ j(Assembler::Parity, &end);
+ }
+ bind(&ifFalse);
+ mov(ImmWord(0), dest);
+
+ bind(&end);
+ }
+ }
+
+ void emitSetRegisterIf(AssemblerX86Shared::Condition cond, Register dest) {
+ if (AllocatableGeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
+ // If the register we're defining is a single byte register,
+ // take advantage of the setCC instruction
+ setCC(cond, dest);
+ movzbl(dest, dest);
+ } else {
+ Label end;
+ movl(Imm32(1), dest);
+ j(cond, &end);
+ mov(ImmWord(0), dest);
+ bind(&end);
+ }
+ }
+
+ // Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label) {
+ CodeOffset offset(size());
+ jump(label);
+ return offset;
+ }
+
+ template <typename T>
+ void computeEffectiveAddress(const T& address, Register dest) {
+ lea(Operand(address), dest);
+ }
+
+ void checkStackAlignment() {
+ // Exists for ARM compatibility.
+ }
+
+ void abiret() { ret(); }
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+};
+
+// Specialize for float to use movaps. Use movdqa for everything else.
+template <>
+inline void MacroAssemblerX86Shared::loadAlignedVector<float>(
+ const Address& src, FloatRegister dest) {
+ loadAlignedSimd128Float(src, dest);
+}
+
+template <typename T>
+inline void MacroAssemblerX86Shared::loadAlignedVector(const Address& src,
+ FloatRegister dest) {
+ loadAlignedSimd128Int(src, dest);
+}
+
+// Specialize for float to use movaps. Use movdqa for everything else.
+template <>
+inline void MacroAssemblerX86Shared::storeAlignedVector<float>(
+ FloatRegister src, const Address& dest) {
+ storeAlignedSimd128Float(src, dest);
+}
+
+template <typename T>
+inline void MacroAssemblerX86Shared::storeAlignedVector(FloatRegister src,
+ const Address& dest) {
+ storeAlignedSimd128Int(src, dest);
+}
+
+template <>
+inline void MacroAssemblerX86Shared::loadScalar<int8_t>(const Operand& src,
+ Register dest) {
+ load8ZeroExtend(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::loadScalar<int16_t>(const Operand& src,
+ Register dest) {
+ load16ZeroExtend(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::loadScalar<int32_t>(const Operand& src,
+ Register dest) {
+ load32(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::loadScalar<float>(const Operand& src,
+ FloatRegister dest) {
+ loadFloat32(src, dest);
+}
+
+template <>
+inline void MacroAssemblerX86Shared::storeScalar<int8_t>(Register src,
+ const Address& dest) {
+ store8(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::storeScalar<int16_t>(Register src,
+ const Address& dest) {
+ store16(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::storeScalar<int32_t>(Register src,
+ const Address& dest) {
+ store32(src, dest);
+}
+template <>
+inline void MacroAssemblerX86Shared::storeScalar<float>(FloatRegister src,
+ const Address& dest) {
+ vmovss(src, dest);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_MacroAssembler_x86_shared_h */
diff --git a/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
new file mode 100644
index 0000000000..590bd90a37
--- /dev/null
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
@@ -0,0 +1,528 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86-shared/MoveEmitter-x86-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::Maybe;
+
+MoveEmitterX86::MoveEmitterX86(MacroAssembler& masm)
+ : inCycle_(false), masm(masm), pushedAtCycle_(-1) {
+ pushedAtStart_ = masm.framePushed();
+}
+
+// Examine the cycle in moves starting at position i. Determine if it's a
+// simple cycle consisting of all register-to-register moves in a single class,
+// and whether it can be implemented entirely by swaps.
+size_t MoveEmitterX86::characterizeCycle(const MoveResolver& moves, size_t i,
+ bool* allGeneralRegs,
+ bool* allFloatRegs) {
+ size_t swapCount = 0;
+
+ for (size_t j = i;; j++) {
+ const MoveOp& move = moves.getMove(j);
+
+ // If it isn't a cycle of registers of the same kind, we won't be able
+ // to optimize it.
+ if (!move.to().isGeneralReg()) {
+ *allGeneralRegs = false;
+ }
+ if (!move.to().isFloatReg()) {
+ *allFloatRegs = false;
+ }
+ if (!*allGeneralRegs && !*allFloatRegs) {
+ return -1;
+ }
+
+ // Stop iterating when we see the last one.
+ if (j != i && move.isCycleEnd()) {
+ break;
+ }
+
+ // Check that this move is actually part of the cycle. This is
+ // over-conservative when there are multiple reads from the same source,
+ // but that's expected to be rare.
+ if (move.from() != moves.getMove(j + 1).to()) {
+ *allGeneralRegs = false;
+ *allFloatRegs = false;
+ return -1;
+ }
+
+ swapCount++;
+ }
+
+ // Check that the last move cycles back to the first move.
+ const MoveOp& move = moves.getMove(i + swapCount);
+ if (move.from() != moves.getMove(i).to()) {
+ *allGeneralRegs = false;
+ *allFloatRegs = false;
+ return -1;
+ }
+
+ return swapCount;
+}
+
+// If we can emit optimized code for the cycle in moves starting at position i,
+// do so, and return true.
+bool MoveEmitterX86::maybeEmitOptimizedCycle(const MoveResolver& moves,
+ size_t i, bool allGeneralRegs,
+ bool allFloatRegs,
+ size_t swapCount) {
+ if (allGeneralRegs && swapCount <= 2) {
+ // Use x86's swap-integer-registers instruction if we only have a few
+ // swaps. (x86 also has a swap between registers and memory but it's
+ // slow.)
+ for (size_t k = 0; k < swapCount; k++) {
+ masm.xchg(moves.getMove(i + k).to().reg(),
+ moves.getMove(i + k + 1).to().reg());
+ }
+ return true;
+ }
+
+ if (allFloatRegs && swapCount == 1) {
+ // There's no xchg for xmm registers, but if we only need a single swap,
+ // it's cheap to do an XOR swap.
+ FloatRegister a = moves.getMove(i).to().floatReg();
+ FloatRegister b = moves.getMove(i + 1).to().floatReg();
+ masm.vxorpd(a, b, b);
+ masm.vxorpd(b, a, a);
+ masm.vxorpd(a, b, b);
+ return true;
+ }
+
+ return false;
+}
+
+void MoveEmitterX86::emit(const MoveResolver& moves) {
+#if defined(JS_CODEGEN_X86) && defined(DEBUG)
+ // Clobber any scratch register we have, to make regalloc bugs more visible.
+ if (scratchRegister_.isSome()) {
+ masm.mov(ImmWord(0xdeadbeef), scratchRegister_.value());
+ }
+#endif
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+#if defined(JS_CODEGEN_X86) && defined(DEBUG)
+ if (!scratchRegister_.isSome()) {
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.mov(ImmWord(0xdeadbeef), reg.value());
+ }
+ }
+#endif
+
+ const MoveOp& move = moves.getMove(i);
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(to, move.type());
+ inCycle_ = false;
+ continue;
+ }
+
+ if (move.isCycleBegin()) {
+ MOZ_ASSERT(!inCycle_);
+
+ // Characterize the cycle.
+ bool allGeneralRegs = true, allFloatRegs = true;
+ size_t swapCount =
+ characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs);
+
+ // Attempt to optimize it to avoid using the stack.
+ if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs,
+ swapCount)) {
+ i += swapCount;
+ continue;
+ }
+
+ // Otherwise use the stack.
+ breakCycle(to, move.endCycleType());
+ inCycle_ = true;
+ }
+
+ // A normal move which is not part of a cycle.
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to, moves, i);
+ break;
+ case MoveOp::GENERAL:
+ emitGeneralMove(from, to, moves, i);
+ break;
+ case MoveOp::SIMD128:
+ emitSimd128Move(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+ }
+}
+
+MoveEmitterX86::~MoveEmitterX86() { assertDone(); }
+
+Address MoveEmitterX86::cycleSlot() {
+ if (pushedAtCycle_ == -1) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 16);
+ masm.reserveStack(SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ return Address(StackPointer, masm.framePushed() - pushedAtCycle_);
+}
+
+Address MoveEmitterX86::toAddress(const MoveOperand& operand) const {
+ if (operand.base() != StackPointer) {
+ return Address(operand.base(), operand.disp());
+ }
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ return Address(StackPointer,
+ operand.disp() + (masm.framePushed() - pushedAtStart_));
+}
+
+// Warning, do not use the resulting operand with pop instructions, since they
+// compute the effective destination address after altering the stack pointer.
+// Use toPopOperand if an Operand is needed for a pop.
+Operand MoveEmitterX86::toOperand(const MoveOperand& operand) const {
+ if (operand.isMemoryOrEffectiveAddress()) {
+ return Operand(toAddress(operand));
+ }
+ if (operand.isGeneralReg()) {
+ return Operand(operand.reg());
+ }
+
+ MOZ_ASSERT(operand.isFloatReg());
+ return Operand(operand.floatReg());
+}
+
+// This is the same as toOperand except that it computes an Operand suitable for
+// use in a pop.
+Operand MoveEmitterX86::toPopOperand(const MoveOperand& operand) const {
+ if (operand.isMemory()) {
+ if (operand.base() != StackPointer) {
+ return Operand(operand.base(), operand.disp());
+ }
+
+ MOZ_ASSERT(operand.disp() >= 0);
+
+ // Otherwise, the stack offset may need to be adjusted.
+ // Note the adjustment by the stack slot here, to offset for the fact that
+ // pop computes its effective address after incrementing the stack pointer.
+ return Operand(
+ StackPointer,
+ operand.disp() + (masm.framePushed() - sizeof(void*) - pushedAtStart_));
+ }
+ if (operand.isGeneralReg()) {
+ return Operand(operand.reg());
+ }
+
+ MOZ_ASSERT(operand.isFloatReg());
+ return Operand(operand.floatReg());
+}
+
+void MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::SIMD128:
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadUnalignedSimd128(toAddress(to), scratch);
+ masm.storeUnalignedSimd128(scratch, cycleSlot());
+ } else {
+ masm.storeUnalignedSimd128(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(toAddress(to), scratch);
+ masm.storeFloat32(scratch, cycleSlot());
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(toAddress(to), scratch);
+ masm.storeDouble(scratch, cycleSlot());
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::INT32:
+#ifdef JS_CODEGEN_X64
+ // x64 can't pop to a 32-bit destination, so don't push.
+ if (to.isMemory()) {
+ masm.load32(toAddress(to), ScratchReg);
+ masm.store32(ScratchReg, cycleSlot());
+ } else {
+ masm.store32(to.reg(), cycleSlot());
+ }
+ break;
+#endif
+ case MoveOp::GENERAL:
+ masm.Push(toOperand(to));
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterX86::completeCycle(const MoveOperand& to, MoveOp::Type type) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::SIMD128:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
+ if (to.isMemory()) {
+ ScratchSimd128Scope scratch(masm);
+ masm.loadUnalignedSimd128(cycleSlot(), scratch);
+ masm.storeUnalignedSimd128(scratch, toAddress(to));
+ } else {
+ masm.loadUnalignedSimd128(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::FLOAT32:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
+ if (to.isMemory()) {
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(cycleSlot(), scratch);
+ masm.storeFloat32(scratch, toAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double));
+ if (to.isMemory()) {
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(cycleSlot(), scratch);
+ masm.storeDouble(scratch, toAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+#ifdef JS_CODEGEN_X64
+ MOZ_ASSERT(pushedAtCycle_ != -1);
+ MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
+ // x64 can't pop to a 32-bit destination.
+ if (to.isMemory()) {
+ masm.load32(cycleSlot(), ScratchReg);
+ masm.store32(ScratchReg, toAddress(to));
+ } else {
+ masm.load32(cycleSlot(), to.reg());
+ }
+ break;
+#endif
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(masm.framePushed() - pushedAtStart_ >= sizeof(intptr_t));
+ masm.Pop(toPopOperand(to));
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterX86::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to,
+ const MoveResolver& moves, size_t i) {
+ if (from.isGeneralReg()) {
+ masm.move32(from.reg(), toOperand(to));
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.load32(toAddress(from), to.reg());
+ } else {
+ // Memory to memory gpr move.
+ MOZ_ASSERT(from.isMemory());
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.load32(toAddress(from), reg.value());
+ masm.move32(reg.value(), toOperand(to));
+ } else {
+ // No scratch register available; bounce it off the stack.
+ masm.Push(toOperand(from));
+ masm.Pop(toPopOperand(to));
+ }
+ }
+}
+
+void MoveEmitterX86::emitGeneralMove(const MoveOperand& from,
+ const MoveOperand& to,
+ const MoveResolver& moves, size_t i) {
+ if (from.isGeneralReg()) {
+ masm.mov(from.reg(), toOperand(to));
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
+ if (from.isMemory()) {
+ masm.loadPtr(toAddress(from), to.reg());
+ } else {
+ masm.lea(toOperand(from), to.reg());
+ }
+ } else if (from.isMemory()) {
+ // Memory to memory gpr move.
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.loadPtr(toAddress(from), reg.value());
+ masm.mov(reg.value(), toOperand(to));
+ } else {
+ // No scratch register available; bounce it off the stack.
+ masm.Push(toOperand(from));
+ masm.Pop(toPopOperand(to));
+ }
+ } else {
+ // Effective address to memory move.
+ MOZ_ASSERT(from.isEffectiveAddress());
+ Maybe<Register> reg = findScratchRegister(moves, i);
+ if (reg.isSome()) {
+ masm.lea(toOperand(from), reg.value());
+ masm.mov(reg.value(), toOperand(to));
+ } else {
+ // This is tricky without a scratch reg. We can't do an lea. Bounce the
+ // base register off the stack, then add the offset in place. Note that
+ // this clobbers FLAGS!
+ masm.Push(from.base());
+ masm.Pop(toPopOperand(to));
+ MOZ_ASSERT(to.isMemoryOrEffectiveAddress());
+ masm.addPtr(Imm32(from.disp()), toAddress(to));
+ }
+ }
+}
+
+void MoveEmitterX86::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSingle());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSingle());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else {
+ masm.storeFloat32(from.floatReg(), toAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ masm.loadFloat32(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchFloat32Scope scratch(masm);
+ masm.loadFloat32(toAddress(from), scratch);
+ masm.storeFloat32(scratch, toAddress(to));
+ }
+}
+
+void MoveEmitterX86::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isDouble());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isDouble());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else {
+ masm.storeDouble(from.floatReg(), toAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ masm.loadDouble(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchDoubleScope scratch(masm);
+ masm.loadDouble(toAddress(from), scratch);
+ masm.storeDouble(scratch, toAddress(to));
+ }
+}
+
+void MoveEmitterX86::emitSimd128Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveSimd128(from.floatReg(), to.floatReg());
+ } else {
+ masm.storeUnalignedSimd128(from.floatReg(), toAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ masm.loadUnalignedSimd128(toAddress(from), to.floatReg());
+ } else {
+ // Memory to memory move.
+ MOZ_ASSERT(from.isMemory());
+ ScratchSimd128Scope scratch(masm);
+ masm.loadUnalignedSimd128(toAddress(from), scratch);
+ masm.storeUnalignedSimd128(scratch, toAddress(to));
+ }
+}
+
+void MoveEmitterX86::assertDone() { MOZ_ASSERT(!inCycle_); }
+
+void MoveEmitterX86::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
+
+Maybe<Register> MoveEmitterX86::findScratchRegister(const MoveResolver& moves,
+ size_t initial) {
+#ifdef JS_CODEGEN_X86
+ if (scratchRegister_.isSome()) {
+ return scratchRegister_;
+ }
+
+ // All registers are either in use by this move group or are live
+ // afterwards. Look through the remaining moves for a register which is
+ // clobbered before it is used, and is thus dead at this point.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ for (size_t i = initial; i < moves.numMoves(); i++) {
+ const MoveOp& move = moves.getMove(i);
+ if (move.from().isGeneralReg()) {
+ regs.takeUnchecked(move.from().reg());
+ } else if (move.from().isMemoryOrEffectiveAddress()) {
+ regs.takeUnchecked(move.from().base());
+ }
+ if (move.to().isGeneralReg()) {
+ if (i != initial && !move.isCycleBegin() && regs.has(move.to().reg())) {
+ return mozilla::Some(move.to().reg());
+ }
+ regs.takeUnchecked(move.to().reg());
+ } else if (move.to().isMemoryOrEffectiveAddress()) {
+ regs.takeUnchecked(move.to().base());
+ }
+ }
+
+ return mozilla::Nothing();
+#else
+ return mozilla::Some(ScratchReg);
+#endif
+}
diff --git a/js/src/jit/x86-shared/MoveEmitter-x86-shared.h b/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
new file mode 100644
index 0000000000..15a1680c9a
--- /dev/null
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MoveEmitter_x86_shared_h
+#define jit_MoveEmitter_x86_shared_h
+
+#include "mozilla/Maybe.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jit/MoveResolver.h"
+#include "jit/Registers.h"
+
+namespace js {
+namespace jit {
+
+struct Address;
+class MacroAssembler;
+class Operand;
+
+class MoveEmitterX86 {
+ bool inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // This is a store stack offset for the cycle-break spill slot, snapshotting
+ // codegen->framePushed_ at the time it is allocated. -1 if not allocated.
+ int32_t pushedAtCycle_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional scratch register for performing moves.
+ mozilla::Maybe<Register> scratchRegister_;
+#endif
+
+ void assertDone();
+ Address cycleSlot();
+ Address toAddress(const MoveOperand& operand) const;
+ Operand toOperand(const MoveOperand& operand) const;
+ Operand toPopOperand(const MoveOperand& operand) const;
+
+ size_t characterizeCycle(const MoveResolver& moves, size_t i,
+ bool* allGeneralRegs, bool* allFloatRegs);
+ bool maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i,
+ bool allGeneralRegs, bool allFloatRegs,
+ size_t swapCount);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i);
+ void emitGeneralMove(const MoveOperand& from, const MoveOperand& to,
+ const MoveResolver& moves, size_t i);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void emitSimd128Move(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& to, MoveOp::Type type);
+ void completeCycle(const MoveOperand& to, MoveOp::Type type);
+
+ public:
+ explicit MoveEmitterX86(MacroAssembler& masm);
+ ~MoveEmitterX86();
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {
+#ifdef JS_CODEGEN_X86
+ scratchRegister_.emplace(reg);
+#endif
+ }
+
+ mozilla::Maybe<Register> findScratchRegister(const MoveResolver& moves,
+ size_t i);
+};
+
+using MoveEmitter = MoveEmitterX86;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_MoveEmitter_x86_shared_h */
diff --git a/js/src/jit/x86-shared/Patching-x86-shared.h b/js/src/jit/x86-shared/Patching-x86-shared.h
new file mode 100644
index 0000000000..85c523cd15
--- /dev/null
+++ b/js/src/jit/x86-shared/Patching-x86-shared.h
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_shared_Patching_x86_shared_h
+#define jit_x86_shared_Patching_x86_shared_h
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+inline void* GetPointer(const void* where) {
+ void* res;
+ memcpy(&res, (const char*)where - sizeof(void*), sizeof(void*));
+ return res;
+}
+
+inline void SetPointer(void* where, const void* value) {
+ memcpy((char*)where - sizeof(void*), &value, sizeof(void*));
+}
+
+inline int32_t GetInt32(const void* where) {
+ int32_t res;
+ memcpy(&res, (const char*)where - sizeof(int32_t), sizeof(int32_t));
+ return res;
+}
+
+inline void SetInt32(void* where, int32_t value, uint32_t trailing = 0) {
+ memcpy((char*)where - trailing - sizeof(int32_t), &value, sizeof(int32_t));
+}
+
+inline void SetRel32(void* from, void* to, uint32_t trailing = 0) {
+ intptr_t offset =
+ reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ MOZ_ASSERT(offset == static_cast<int32_t>(offset),
+ "offset is too great for a 32-bit relocation");
+ if (offset != static_cast<int32_t>(offset)) {
+ MOZ_CRASH("offset is too great for a 32-bit relocation");
+ }
+
+ SetInt32(from, offset, trailing);
+}
+
+inline void* GetRel32Target(void* where) {
+ int32_t rel = GetInt32(where);
+ return (char*)where + rel;
+}
+
+// JmpSrc represents a positive offset within a code buffer, or an uninitialized
+// value. Lots of code depends on uninitialized JmpSrc holding the value -1, on
+// -1 being a legal value of JmpSrc, and on being able to initialize a JmpSrc
+// with the value -1.
+//
+// The value of the `offset` is always positive and <= MaxCodeBytesPerProcess,
+// see ProcessExecutableMemory.h. The latter quantity in turn must fit in an
+// i32. But we further require that the value is not precisely INT32_MAX, so as
+// to allow the JmpSrc value -1 to mean "uninitialized" without ambiguity.
+//
+// The quantity `trailing` denotes the number of bytes of data that follow the
+// patch field in the instruction. The offset points to the end of the
+// instruction as per normal. The information about trailing bytes is needed
+// separately from the offset to correctly patch instructions that have
+// immediates trailing the patch field (eg CMPSS and CMPSD). Currently the only
+// allowed values for `trailing` are 0 and 1.
+
+static_assert(MaxCodeBytesPerProcess < size_t(INT32_MAX), "Invariant");
+
+class JmpSrc {
+ public:
+ JmpSrc() : offset_(INT32_MAX), trailing_(0) {}
+ explicit JmpSrc(int32_t offset) : offset_(offset), trailing_(0) {
+ // offset -1 is stored as INT32_MAX
+ MOZ_ASSERT(offset == -1 || (offset >= 0 && offset < INT32_MAX));
+ }
+ JmpSrc(int32_t offset, uint32_t trailing)
+ : offset_(offset), trailing_(trailing) {
+ // Disallow offset -1 in this situation, it does not apply.
+ MOZ_ASSERT(offset >= 0 && offset < INT32_MAX);
+ MOZ_ASSERT(trailing <= 1);
+ }
+ int32_t offset() const {
+ return offset_ == INT32_MAX ? -1 : int32_t(offset_);
+ }
+ uint32_t trailing() const { return trailing_; }
+
+ private:
+ uint32_t offset_ : 31;
+ uint32_t trailing_ : 1;
+};
+
+class JmpDst {
+ public:
+ explicit JmpDst(int32_t offset) : offset_(offset) {}
+ int32_t offset() const { return offset_; }
+
+ private:
+ int32_t offset_;
+};
+
+inline bool CanRelinkJump(void* from, void* to) {
+ intptr_t offset = static_cast<char*>(to) - static_cast<char*>(from);
+ return (offset == static_cast<int32_t>(offset));
+}
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_shared_Patching_x86_shared_h */
diff --git a/js/src/jit/x86/Assembler-x86.cpp b/js/src/jit/x86/Assembler-x86.cpp
new file mode 100644
index 0000000000..7296f77291
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Assembler-x86.h"
+
+#include "gc/Marking.h"
+#include "util/Memory.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator() : stackOffset_(0), current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Simd128:
+ // On Win64, >64 bit args need to be passed by reference. However, wasm
+ // doesn't allow passing SIMD values to JS, so the only way to reach this
+ // is wasm to wasm calls. Ergo we can break the native ABI here and use
+ // the Wasm ABI instead.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ AssemblerX86Shared::executableCopy(buffer);
+ for (RelativePatch& rp : jumps_) {
+ X86Encoding::SetRel32(buffer + rp.offset, rp.target);
+ }
+}
+
+class RelocationIterator {
+ CompactBufferReader reader_;
+ uint32_t offset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
+
+ bool read() {
+ if (!reader_.more()) {
+ return false;
+ }
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const { return offset_; }
+};
+
+static inline JitCode* CodeFromJump(uint8_t* jump) {
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
new file mode 100644
index 0000000000..12d790740e
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -0,0 +1,1079 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Assembler_x86_h
+#define jit_x86_Assembler_x86_h
+
+#include <iterator>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register eax{X86Encoding::rax};
+static constexpr Register ecx{X86Encoding::rcx};
+static constexpr Register edx{X86Encoding::rdx};
+static constexpr Register ebx{X86Encoding::rbx};
+static constexpr Register esp{X86Encoding::rsp};
+static constexpr Register ebp{X86Encoding::rbp};
+static constexpr Register esi{X86Encoding::rsi};
+static constexpr Register edi{X86Encoding::rdi};
+
+static constexpr FloatRegister xmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 =
+ FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 =
+ FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 =
+ FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 =
+ FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 =
+ FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 =
+ FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+
+// Vector registers fixed for use with some instructions, e.g. PBLENDVB.
+static constexpr FloatRegister vmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+
+static constexpr Register InvalidReg{X86Encoding::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register JSReturnReg_Type = ecx;
+static constexpr Register JSReturnReg_Data = edx;
+static constexpr Register StackPointer = esp;
+static constexpr Register FramePointer = ebp;
+static constexpr Register ReturnReg = eax;
+static constexpr FloatRegister ReturnFloat32Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg_ =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg_ =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
+
+// Note, EDX:EAX is the system ABI 64-bit return register, and it is to our
+// advantage to keep the SpiderMonkey ABI in sync with the system ABI.
+//
+// However, using EDX here means that we have to use a register that does not
+// have a word or byte part (eg DX/DH/DL) in some other places; notably,
+// ABINonArgReturnReg1 is EDI. If this becomes a problem and ReturnReg64 has to
+// be something other than EDX:EAX, then jitted code that calls directly to C++
+// will need to shuffle the return value from EDX:EAX into ReturnReg64 directly
+// after the call. See bug 1730161 for discussion and a patch that does that.
+static constexpr Register64 ReturnReg64(edx, eax);
+
+// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register CallTempReg0 = edi;
+static constexpr Register CallTempReg1 = eax;
+static constexpr Register CallTempReg2 = ebx;
+static constexpr Register CallTempReg3 = ecx;
+static constexpr Register CallTempReg4 = esi;
+static constexpr Register CallTempReg5 = edx;
+
+// We have no arg regs, so our NonArgRegs are just our CallTempReg*
+static constexpr Register CallTempNonArgRegs[] = {edi, eax, ebx, ecx, esi, edx};
+static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = eax;
+static constexpr Register ABINonArgReg1 = ebx;
+static constexpr Register ABINonArgReg2 = ecx;
+static constexpr Register ABINonArgReg3 = edx;
+
+// This register may be volatile or nonvolatile. Avoid xmm7 which is the
+// ScratchDoubleReg_.
+static constexpr FloatRegister ABINonArgDoubleReg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = ecx;
+static constexpr Register ABINonArgReturnReg1 = edi;
+static constexpr Register ABINonVolatileReg = ebx;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = ecx;
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = esi;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = ebx;
+
+static constexpr Register OsrFrameReg = edx;
+static constexpr Register PreBarrierReg = edx;
+
+// Not enough registers for a PC register (R0-R2 use 2 registers each).
+static constexpr Register InterpreterPCReg = InvalidReg;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg2;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg2;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg3;
+
+// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
+// calls. wasm code does.
+#if defined(__GNUC__) && !defined(__MINGW32__)
+static constexpr uint32_t ABIStackAlignment = 16;
+#else
+static constexpr uint32_t ABIStackAlignment = 4;
+#endif
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments "
+ "which are used for "
+ "spilled values. Thus it should be larger than the alignment "
+ "for SIMD accesses.");
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static constexpr uint32_t WasmTrapInstructionLength = 2;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform. (We could make
+// the tail offset 3, but I have opted for 4 as that results in a better-aligned
+// branch target.)
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+static constexpr Scale ScalePointer = TimesFour;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static inline Operand LowWord(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(LowWord(op.toAddress()));
+ case Operand::MEM_SCALE:
+ return Operand(LowWord(op.toBaseIndex()));
+ default:
+ MOZ_CRASH("Invalid operand type");
+ }
+}
+
+static inline Operand HighWord(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(HighWord(op.toAddress()));
+ case Operand::MEM_SCALE:
+ return Operand(HighWord(op.toBaseIndex()));
+ default:
+ MOZ_CRASH("Invalid operand type");
+ }
+}
+
+// Return operand from a JS -> JS call.
+static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
+ JSReturnReg_Data};
+
+class Assembler : public AssemblerX86Shared {
+ Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &=
+ jumps_.append(RelativePatch(src.offset(), target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ }
+ }
+
+ public:
+ using AssemblerX86Shared::call;
+ using AssemblerX86Shared::cmpl;
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::movl;
+ using AssemblerX86Shared::pop;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::retarget;
+ using AssemblerX86Shared::vmovsd;
+ using AssemblerX86Shared::vmovss;
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ // Actual assembly emitting functions.
+
+ void push(ImmGCPtr ptr) {
+ masm.push_i32(int32_t(ptr.value));
+ writeDataRelocation(ptr);
+ }
+ void push(const ImmWord imm) { push(Imm32(imm.value)); }
+ void push(const ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
+ void push(FloatRegister src) {
+ subl(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+
+ CodeOffset pushWithPatch(ImmWord word) {
+ masm.push_i32(int32_t(word.value));
+ return CodeOffset(masm.currentOffset());
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addl(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ movl(Imm32(word.value), dest);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void movl(ImmGCPtr ptr, Register dest) {
+ masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movl(ImmGCPtr ptr, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(),
+ dest.index(), dest.scale());
+ writeDataRelocation(ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(ImmWord imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ }
+ void movl(ImmPtr imm, Register dest) {
+ movl(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(ImmWord imm, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though.
+ if (imm.value == 0) {
+ xorl(dest, dest);
+ } else {
+ movl(imm, dest);
+ }
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movl_i32r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) { movl(src, dest); }
+ void mov(Register src, const Operand& dest) { movl(src, dest); }
+ void mov(Imm32 imm, const Operand& dest) { movl(imm, dest); }
+ void mov(CodeLabel* label, Register dest) {
+ // Put a placeholder value in the instruction stream.
+ masm.movl_i32r(0, dest.encoding());
+ label->patchAt()->bind(masm.size());
+ }
+ void mov(Register src, Register dest) { movl(src, dest); }
+ void xchg(Register src, Register dest) { xchgl(src, dest); }
+ void lea(const Operand& src, Register dest) { return leal(src, dest); }
+ void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); }
+ void cmovzPtr(const Operand& src, Register dest) { return cmovzl(src, dest); }
+
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void faddp() { masm.faddp(); }
+
+ void cmpl(ImmWord rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(ImmPtr rhs, Register lhs) {
+ cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
+ }
+ void cmpl(ImmGCPtr rhs, Register lhs) {
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
+ writeDataRelocation(rhs);
+ }
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(ImmGCPtr rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
+ writeDataRelocation(rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
+ masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
+ }
+ void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
+ JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
+ }
+
+ void adcl(Imm32 imm, Register dest) {
+ masm.adcl_ir(imm.value, dest.encoding());
+ }
+ void adcl(Register src, Register dest) {
+ masm.adcl_rr(src.encoding(), dest.encoding());
+ }
+ void adcl(Operand src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.adcl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.adcl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void sbbl(Imm32 imm, Register dest) {
+ masm.sbbl_ir(imm.value, dest.encoding());
+ }
+ void sbbl(Register src, Register dest) {
+ masm.sbbl_rr(src.encoding(), dest.encoding());
+ }
+ void sbbl(Operand src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.sbbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.sbbl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void mull(Register multiplier) { masm.mull_r(multiplier.encoding()); }
+
+ void shldl(const Imm32 imm, Register src, Register dest) {
+ masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+ void shrdl(const Imm32 imm, Register src, Register dest) {
+ masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+
+ void vhaddpd(FloatRegister rhs, FloatRegister lhsDest) {
+ MOZ_ASSERT(HasSSE3());
+ MOZ_ASSERT(rhs.size() == 16);
+ MOZ_ASSERT(lhsDest.size() == 16);
+ masm.vhaddpd_rr(rhs.encoding(), lhsDest.encoding(), lhsDest.encoding());
+ }
+
+ void fild(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fild_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ RelocationKind reloc = RelocationKind::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, RelocationKind::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Re-routes pending jumps to an external target, flushing the label in the
+ // process.
+ void retarget(Label* label, ImmPtr target, RelocationKind reloc) {
+ if (label->used()) {
+ bool more;
+ X86Encoding::JmpSrc jmp(label->offset());
+ do {
+ X86Encoding::JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ addPendingJump(jmp, target, reloc);
+ jmp = next;
+ } while (more);
+ }
+ label->reset();
+ }
+
+ // Move a 32-bit immediate into a register where the immediate can be
+ // patched.
+ CodeOffset movlWithPatch(Imm32 imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(base + disp32) where disp32 can be patched.
+ CodeOffset movsblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movsbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movswl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzwl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovss(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovss_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovsd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovsd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *(base + disp32) where disp32 can be patched.
+ CodeOffset movbWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movb_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movw_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ return movlWithPatch(regLow, LowWord(dest));
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand low(
+ PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
+ return movlWithPatch(regLow, low);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ return movlWithPatch(regHigh, HighWord(dest));
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) +
+ INT64HIGH_OFFSET));
+ return movlWithPatch(regHigh, high);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovss(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovss_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovsd(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovsd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(addr + index*scale) where addr can be patched.
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index,
+ Scale scale, Register dest) {
+ masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *src where src can be patched.
+ CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movsbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movswl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzwl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *dest where dest can be patched.
+ CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movb_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movw_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movl_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+};
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Assembler_x86_h */
diff --git a/js/src/jit/x86/BaseAssembler-x86.h b/js/src/jit/x86/BaseAssembler-x86.h
new file mode 100644
index 0000000000..a5a5f67bf2
--- /dev/null
+++ b/js/src/jit/x86/BaseAssembler-x86.h
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaseAssembler_x86_h
+#define jit_x86_BaseAssembler_x86_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX86 : public BaseAssembler {
+ public:
+ // Arithmetic operations:
+
+ void adcl_ir(int32_t imm, RegisterID dst) {
+ spew("adcl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_im(int32_t imm, const void* addr) {
+ spew("adcl %d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_rr(RegisterID src, RegisterID dst) {
+ spew("adcl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, src, dst);
+ }
+
+ void adcl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("adcl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, offset, base, dst);
+ }
+
+ void adcl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("adcl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, offset, base, index, scale, dst);
+ }
+
+ void sbbl_ir(int32_t imm, RegisterID dst) {
+ spew("sbbl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SBB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SBB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sbbl_rr(RegisterID src, RegisterID dst) {
+ spew("sbbl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, src, dst);
+ }
+
+ void sbbl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("sbbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, offset, base, dst);
+ }
+
+ void sbbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("sbbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, offset, base, index, scale, dst);
+ }
+
+ using BaseAssembler::andl_im;
+ void andl_im(int32_t imm, const void* addr) {
+ spew("andl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::orl_im;
+ void orl_im(int32_t imm, const void* addr) {
+ spew("orl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::subl_im;
+ void subl_im(int32_t imm, const void* addr) {
+ spew("subl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void shldl_irr(int32_t imm, RegisterID src, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shldl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHLD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ void shrdl_irr(int32_t imm, RegisterID src, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shrdl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHRD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ // SSE operations:
+
+ using BaseAssembler::vcvtsi2sd_mr;
+ void vcvtsi2sd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, address, src0, dst);
+ }
+
+ using BaseAssembler::vmovaps_mr;
+ void vmovaps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, address, invalid_xmm,
+ dst);
+ }
+
+ using BaseAssembler::vmovdqa_mr;
+ void vmovdqa_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, address, invalid_xmm,
+ dst);
+ }
+
+ void vhaddpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vhaddpd", VEX_PD, OP2_HADDPD, src1, src0, dst);
+ }
+
+ void vsubpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+
+ void fild_m(int32_t offset, RegisterID base) {
+ m_formatter.oneByteOp(OP_FILD, offset, base, FILD_OP_64);
+ }
+
+ // Misc instructions:
+
+ void pusha() {
+ spew("pusha");
+ m_formatter.oneByteOp(OP_PUSHA);
+ }
+
+ void popa() {
+ spew("popa");
+ m_formatter.oneByteOp(OP_POPA);
+ }
+};
+
+typedef BaseAssemblerX86 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaseAssembler_x86_h */
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
new file mode 100644
index 0000000000..ce0fb01d9e
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -0,0 +1,1509 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/CodeGenerator-x86.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+
+#include <iterator>
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmInstanceData.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+
+CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm) {}
+
+ValueOperand CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ DebugOnly<const LAllocation*> a = box->getOperand(0);
+ MOZ_ASSERT(!a->isConstant());
+
+ // On x86, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
+ const AnyRegister in = ToAnyRegister(box->getOperand(0));
+ const ValueOperand out = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), in), out);
+
+ if (JitOptions.spectreValueMasking) {
+ Register scratch = ToRegister(box->spectreTemp());
+ masm.move32(Imm32(JSVAL_TAG_CLEAR), scratch);
+ masm.cmp32Move32(Assembler::Below, scratch, out.typeReg(), scratch,
+ out.typeReg());
+ }
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ Operand type = ToOperand(unbox->type());
+ Operand payload = ToOperand(unbox->payload());
+ Register output = ToRegister(unbox->output());
+ MUnbox* mir = unbox->mir();
+
+ JSValueTag tag = MIRTypeToTag(mir->type());
+ if (mir->fallible()) {
+ masm.cmp32(type, Imm32(tag));
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ } else {
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
+ masm.assumeUnreachable("Infallible unbox type mismatch");
+ masm.bind(&ok);
+#endif
+ }
+
+ // Note: If spectreValueMasking is disabled, then this instruction will
+ // default to a no-op as long as the lowering allocate the same register for
+ // the output and the payload.
+ masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ MOZ_ASSERT(out == ecx);
+ MOZ_ASSERT(temp == ebx);
+ MOZ_ASSERT(temp64 == Register64(edx, eax));
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
+ Register64(edx, eax));
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
+ Register64(edx, eax));
+ }
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(value, ToRegister(lir->tempLow()));
+
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.push(value);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ }
+ masm.pop(value);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ DebugOnly<Register> newval = ToRegister(lir->newval());
+ DebugOnly<Register> temp = ToRegister(lir->tempLow());
+ Register out = ToRegister(lir->output());
+
+ MOZ_ASSERT(elements == esi);
+ MOZ_ASSERT(oldval == eax);
+ MOZ_ASSERT(newval.inspect() == edx);
+ MOZ_ASSERT(temp.inspect() == ebx);
+ MOZ_ASSERT(out == ecx);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save eax and edx before they're clobbered below.
+ masm.push(eax);
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() {
+ masm.pop(edx);
+ masm.pop(eax);
+ };
+
+ Register64 expected = Register64(edx, eax);
+ Register64 replacement = Register64(ecx, ebx);
+
+ // Load |oldval| and |newval| into |expected| resp. |replacement|.
+ {
+ // Use `esi` as a temp register.
+ Register bigInt = esi;
+ masm.push(bigInt);
+
+ masm.mov(oldval, bigInt);
+ masm.loadBigInt64(bigInt, expected);
+
+ // |newval| is stored in `edx`, which is already pushed onto the stack.
+ masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), bigInt);
+ masm.loadBigInt64(bigInt, replacement);
+
+ masm.pop(bigInt);
+ }
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
+ expected);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
+ expected);
+ }
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(expected, replacement);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, replacement, out);
+
+ // Use `edx:eax`, which are both already on the stack, as temp registers.
+ Register bigInt = eax;
+ Register temp2 = edx;
+
+ Label fail;
+ masm.newGCBigInt(bigInt, temp2, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, bigInt, replacement);
+ masm.mov(bigInt, out);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx:eax` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx:eax` must have been restored to their original values.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register out = ToRegister(lir->output());
+ Register64 temp2 = Register64(value, out);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(out == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() { masm.pop(edx); };
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(temp2, temp1);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
+
+ // Use `edx`, which is already on the stack, as a temp register.
+ Register temp = edx;
+
+ Label fail;
+ masm.newGCBigInt(out, temp, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, out, temp1);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx` must have been restored to its original value.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register out = ToRegister(lir->output());
+ Register64 temp2 = Register64(value, out);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(out == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() { masm.pop(edx); };
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.Push(temp1);
+
+ Address addr(masm.getStackPointer(), 0);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ }
+
+ masm.freeStack(sizeof(uint64_t));
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(temp2, temp1);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
+
+ // Use `edx`, which is already on the stack, as a temp register.
+ Register temp = edx;
+
+ Label fail;
+ masm.newGCBigInt(out, temp, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, out, temp1);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx` must have been restored to its original value.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register tempLow = ToRegister(lir->tempLow());
+ Register64 temp2 = Register64(value, tempLow);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(tempLow == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.Push(temp1);
+
+ Address addr(masm.getStackPointer(), 0);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ }
+
+ masm.freeStack(sizeof(uint64_t));
+
+ masm.pop(edx);
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+
+ if (input != temp) {
+ masm.mov(input, temp);
+ }
+
+ // Beware: convertUInt32ToDouble clobbers input.
+ masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (input != temp) {
+ masm.mov(input, temp);
+ }
+
+ // Beware: convertUInt32ToFloat32 clobbers input.
+ masm.convertUInt32ToFloat32(temp, output);
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ masm.loadPtr(Address(ToRegister(ins->instance()),
+ wasm::Instance::offsetOfMemoryBase()),
+ ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorX86::emitWasmLoad(T* ins) {
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* memoryBase = ins->memoryBase();
+
+ // Lowering has set things up so that we can use a BaseIndex form if the
+ // pointer is constant and the offset is zero, or if the pointer is zero.
+
+ Operand srcAddr =
+ ptr->isBogus()
+ ? Operand(ToRegister(memoryBase),
+ offset ? offset : mir->base()->toConstant()->toInt32())
+ : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ if (mir->type() == MIRType::Int64) {
+ MOZ_ASSERT_IF(mir->access().isAtomic(),
+ mir->access().type() != Scalar::Int64);
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ } else {
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
+
+template <typename T>
+void CodeGeneratorX86::emitWasmStore(T* ins) {
+ const MWasmStore* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* memoryBase = ins->memoryBase();
+
+ // Lowering has set things up so that we can use a BaseIndex form if the
+ // pointer is constant and the offset is zero, or if the pointer is zero.
+
+ Operand dstAddr =
+ ptr->isBogus()
+ ? Operand(ToRegister(memoryBase),
+ offset ? offset : mir->base()->toConstant()->toInt32())
+ : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ if (mir->access().type() == Scalar::Int64) {
+ Register64 value =
+ ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
+ masm.wasmStoreI64(mir->access(), value, dstAddr);
+ } else {
+ AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
+ masm.wasmStore(mir->access(), value, dstAddr);
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
+ emitWasmStore(ins);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register output = ToRegister(ins->output());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ masm.wasmCompareExchange(mir->access(), memAddr, oldval, newval, output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register output = ToRegister(ins->output());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ masm.wasmAtomicExchange(mir->access(), memAddr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register temp =
+ ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register out = ToRegister(ins->output());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ Register memoryBase = ToRegister(ins->memoryBase());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
+ temp, out);
+ } else {
+ masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), memAddr, temp,
+ out);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ Register memoryBase = ToRegister(ins->memoryBase());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ masm.wasmAtomicEffectOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
+ InvalidReg);
+ } else {
+ masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
+ InvalidReg);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
+ uint32_t offset = ins->mir()->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
+ MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+
+ masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
+ Register64(edx, eax));
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
+ uint32_t offset = ins->mir()->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
+ MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
+ MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
+ MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+
+ masm.append(ins->mir()->access(), masm.size());
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
+}
+
+template <typename T>
+void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
+ T* ins, const wasm::MemoryAccessDesc& access) {
+ MOZ_ASSERT(access.offset() < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
+ access.offset());
+
+ DebugOnly<const LInt64Allocation> value = ins->value();
+ MOZ_ASSERT(ToRegister64(value).low == ebx);
+ MOZ_ASSERT(ToRegister64(value).high == ecx);
+
+ // eax and edx will be overwritten every time through the loop but
+ // memoryBase and ptr must remain live for a possible second iteration.
+
+ MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
+ MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
+
+ Label again;
+ masm.bind(&again);
+ masm.append(access, masm.size());
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
+ masm.j(Assembler::Condition::NonZero, &again);
+}
+
+void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
+ MOZ_ASSERT(ToRegister(ins->t1()) == edx);
+ MOZ_ASSERT(ToRegister(ins->t2()) == eax);
+
+ emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access());
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+
+ emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
+ uint32_t offset = ins->access().offset();
+ MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+
+ BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
+ MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
+
+ Register64 value = ToRegister64(ins->value());
+
+ MOZ_ASSERT(value.low == ebx);
+ MOZ_ASSERT(value.high == ecx);
+
+ Register64 output = ToOutRegister64(ins);
+
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+
+ masm.Push(ecx);
+ masm.Push(ebx);
+
+ Address valueAddr(esp, 0);
+
+ // Here the `value` register acts as a temp, we'll restore it below.
+ masm.wasmAtomicFetchOp64(ins->access(), ins->operation(), valueAddr, srcAddr,
+ value, output);
+
+ masm.Pop(ebx);
+ masm.Pop(ecx);
+}
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineTruncate(LInstruction* ins) : ins_(ins) {
+ MOZ_ASSERT(ins_->isTruncateDToInt32() ||
+ ins_->isWasmBuiltinTruncateDToInt32());
+ }
+
+ void accept(CodeGeneratorX86* codegen) override {
+ codegen->visitOutOfLineTruncate(this);
+ }
+
+ LAllocation* input() { return ins_->getOperand(0); }
+ LDefinition* output() { return ins_->getDef(0); }
+ LDefinition* tempFloat() { return ins_->getTemp(0); }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (ins_->isTruncateDToInt32()) {
+ return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+
+ return ins_->toWasmBuiltinTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+};
+
+class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineTruncateFloat32(LInstruction* ins) : ins_(ins) {
+ MOZ_ASSERT(ins_->isTruncateFToInt32() ||
+ ins_->isWasmBuiltinTruncateFToInt32());
+ }
+
+ void accept(CodeGeneratorX86* codegen) override {
+ codegen->visitOutOfLineTruncateFloat32(this);
+ }
+
+ LAllocation* input() { return ins_->getOperand(0); }
+ LDefinition* output() { return ins_->getDef(0); }
+ LDefinition* tempFloat() { return ins_->getTemp(0); }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (ins_->isTruncateFToInt32()) {
+ return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+
+ return ins_->toWasmBuiltinTruncateFToInt32()->mir()->bytecodeOffset();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
+ FloatRegister input = ToFloatRegister(ool->input());
+ Register output = ToRegister(ool->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopDouble;
+ // Push double.
+ masm.subl(Imm32(sizeof(double)), esp);
+ masm.storeDouble(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
+
+ // Load double, perform 64-bit truncation.
+ masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop double and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopDouble);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ool->tempFloat());
+
+ // Try to convert doubles representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ {
+ ScratchDoubleScope fpscratch(masm);
+ masm.zeroDouble(fpscratch);
+ masm.vucomisd(fpscratch, input);
+ masm.j(Assembler::Parity, &fail);
+ }
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantDouble(4294967296.0, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantDouble(-4294967296.0, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addDouble(input, temp);
+ masm.vcvttsd2si(temp, output);
+ ScratchDoubleScope fpscratch(masm);
+ masm.vcvtsi2sd(output, fpscratch, fpscratch);
+
+ masm.vucomisd(fpscratch, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ }
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ saveVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.setupWasmABICall();
+ masm.passABIArg(input, MoveOp::DOUBLE);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
+ mozilla::Some(instanceOffset));
+ } else {
+ using Fn = int32_t (*)(double);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(input, MoveOp::DOUBLE);
+ masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ masm.storeCallInt32Result(output);
+
+ restoreVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.Pop(InstanceReg);
+ }
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
+ OutOfLineTruncateFloat32* ool) {
+ FloatRegister input = ToFloatRegister(ool->input());
+ Register output = ToRegister(ool->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopFloat;
+
+ // Push float32, but subtracts 64 bits so that the value popped by fisttp
+ // fits
+ masm.subl(Imm32(sizeof(uint64_t)), esp);
+ masm.storeFloat32(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchFloat32NotInInt64Range(Address(esp, 0), output, &failPopFloat);
+
+ // Load float, perform 32-bit truncation.
+ masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop 64bits and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopFloat);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ool->tempFloat());
+
+ // Try to convert float32 representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.zeroFloat32(fpscratch);
+ masm.vucomiss(fpscratch, input);
+ masm.j(Assembler::Parity, &fail);
+ }
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantFloat32(4294967296.f, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantFloat32(-4294967296.f, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addFloat32(input, temp);
+ masm.vcvttss2si(temp, output);
+ ScratchFloat32Scope fpscratch(masm);
+ masm.vcvtsi2ss(output, fpscratch, fpscratch);
+
+ masm.vucomiss(fpscratch, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ }
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ saveVolatile(output);
+
+ masm.Push(input);
+
+ if (gen->compilingWasm()) {
+ masm.setupWasmABICall();
+ } else {
+ masm.setupUnalignedABICall(output);
+ }
+
+ masm.vcvtss2sd(input, input, input);
+ masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
+
+ if (gen->compilingWasm()) {
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
+ mozilla::Some(instanceOffset));
+ } else {
+ using Fn = int32_t (*)(double);
+ masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+
+ masm.storeCallInt32Result(output);
+ masm.Pop(input);
+
+ restoreVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.Pop(InstanceReg);
+ }
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.xorl(output, output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ MDefinition* mir = lir->mir();
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
+ if (mir->isWasmBuiltinModI64()) {
+ masm.xor64(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
+ mozilla::Some(instanceOffset));
+ }
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.bind(&done);
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MDefinition* mir = lir->mir();
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
+ mozilla::Some(instanceOffset));
+ }
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.Pop(InstanceReg);
+}
+
+void CodeGeneratorX86::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == eax);
+ MOZ_ASSERT(output == edx);
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cdq();
+
+ masm.idiv(divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorX86::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == eax);
+ MOZ_ASSERT(output == edx);
+
+ // Sign extend the lhs into rdx to make edx:eax.
+ masm.cdq();
+
+ masm.idiv(divisor);
+
+ // Move the remainder from edx.
+ masm.movl(output, dividend);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ Register64 falseExpr = ToRegister64(lir->falseExpr());
+ Register64 out = ToOutRegister64(lir);
+
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ Label done;
+ masm.branchTest32(Assembler::NonZero, cond, cond, &done);
+ masm.movl(falseExpr.low, out.low);
+ masm.movl(falseExpr.high, out.high);
+ masm.bind(&done);
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32. Some values may be stack allocated, and the "true" input is
+// reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ if (rhs->isRegister()) {
+ if (falseExpr->isRegister()) {
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ masm.cmp32Load32(cond, lhs, ToRegister(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ } else {
+ if (falseExpr->isRegister()) {
+ masm.cmp32Move32(cond, lhs, ToAddress(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ masm.cmp32Load32(cond, lhs, ToAddress(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.Push(input.high);
+ masm.Push(input.low);
+ masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
+ masm.freeStack(sizeof(uint64_t));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ Register64 output = ToOutRegister64(lir);
+
+ masm.reserveStack(sizeof(uint64_t));
+ masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
+ masm.Pop(output.low);
+ masm.Pop(output.high);
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register64 output = ToOutRegister64(lir);
+ Register input = ToRegister(lir->input());
+
+ if (lir->mir()->isUnsigned()) {
+ if (output.low != input) {
+ masm.movl(input, output.low);
+ }
+ masm.xorl(output.high, output.high);
+ } else {
+ MOZ_ASSERT(output.low == input);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+ masm.cdq();
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+#ifdef DEBUG
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(input.low == eax);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(input.high == edx);
+ MOZ_ASSERT(output.high == edx);
+#endif
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8SignExtend(eax, eax);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16SignExtend(eax, eax);
+ break;
+ case MSignExtendInt64::Word:
+ break;
+ }
+ masm.cdq();
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.movl(ToRegister(input.low()), output);
+ } else {
+ masm.movl(ToRegister(input.high()), output);
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
+ MOZ_CRASH("64-bit only");
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ // Generates no code on this platform because we just return the low part of
+ // the input register pair.
+ MOZ_ASSERT(ToRegister(lir->input()) == ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ if (input.high == output) {
+ masm.orl(input.low, output);
+ } else if (input.low == output) {
+ masm.orl(input.high, output);
+ } else {
+ masm.movl(input.high, output);
+ masm.orl(input.low, output);
+ }
+
+ masm.cmpl(Imm32(0), output);
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ FloatRegister floatTemp = ToFloatRegister(lir->temp());
+
+ Label fail, convert;
+
+ MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
+ mir->input()->type() == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ bool isSaturating = mir->isSaturating();
+ if (mir->input()->type() == MIRType::Float32) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
+ ool->entry(), ool->rejoin(), floatTemp);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, temp);
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, temp);
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LInt64Allocation input = ins->getInt64Operand(0);
+ Register64 inputR = ToRegister64(input);
+ MOZ_ASSERT(inputR == ToOutRegister64(ins));
+ masm.notl(inputR.high);
+ masm.notl(inputR.low);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.testl(input.high, input.high);
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.testl(input.low, input.low);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
+ // LBitAndAndBranch only represents single-word ANDs, hence it can't be
+ // 64-bit here.
+ MOZ_ASSERT(!baab->is64());
+ Register regL = ToRegister(baab->left());
+ if (baab->right()->isConstant()) {
+ masm.test32(regL, Imm32(ToInt32(baab->right())));
+ } else {
+ masm.test32(regL, ToRegister(baab->right()));
+ }
+ emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
+}
diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h
new file mode 100644
index 0000000000..4f92bf615f
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_CodeGenerator_x86_h
+#define jit_x86_CodeGenerator_x86_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate;
+class OutOfLineTruncateFloat32;
+
+class CodeGeneratorX86 : public CodeGeneratorX86Shared {
+ protected:
+ CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+ template <typename T>
+ void emitWasmStoreOrExchangeAtomicI64(T* ins,
+ const wasm::MemoryAccessDesc& access);
+
+ public:
+ void visitOutOfLineTruncate(OutOfLineTruncate* ool);
+ void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
+};
+
+typedef CodeGeneratorX86 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_CodeGenerator_x86_h */
diff --git a/js/src/jit/x86/LIR-x86.h b/js/src/jit/x86/LIR-x86.h
new file mode 100644
index 0000000000..c7c9587e20
--- /dev/null
+++ b/js/src/jit/x86/LIR-x86.h
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LIR_x86_h
+#define jit_x86_LIR_x86_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 2> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp,
+ const LDefinition& spectreTemp, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setOperand(0, in);
+ setTemp(0, temp);
+ setTemp(1, spectreTemp);
+ }
+
+ const LDefinition* spectreTemp() { return getTemp(1); }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox() : LInstructionHelper(classOpcode) {}
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const LAllocation* payload() { return getOperand(0); }
+ const LAllocation* type() { return getOperand(1); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<INT64_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 2, 2> {
+ public:
+ LIR_HEADER(WasmAtomicLoadI64);
+
+ LWasmAtomicLoadI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LDefinition& t1, const LDefinition& t2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setTemp(0, t1);
+ setTemp(1, t2);
+ }
+
+ MWasmLoad* mir() const { return mir_->toWasmLoad(); }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LDefinition* t1() { return getTemp(0); }
+ const LDefinition* t2() { return getTemp(1); }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 2 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicStoreI64);
+
+ LWasmAtomicStoreI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value, const LDefinition& t1,
+ const LDefinition& t2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ setTemp(0, t1);
+ setTemp(1, t2);
+ }
+
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const LDefinition* t1() { return getTemp(0); }
+ const LDefinition* t2() { return getTemp(1); }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& expected,
+ const LInt64Allocation& replacement)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, expected);
+ setInt64Operand(2 + INT64_PIECES, replacement);
+ }
+
+ MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation expected() { return getInt64Operand(2); }
+ const LInt64Allocation replacement() {
+ return getInt64Operand(2 + INT64_PIECES);
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ const wasm::MemoryAccessDesc& access_;
+
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value,
+ const wasm::MemoryAccessDesc& access)
+ : LInstructionHelper(classOpcode), access_(access) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ }
+
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ const wasm::MemoryAccessDesc& access_;
+ AtomicOp op_;
+
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value,
+ const wasm::MemoryAccessDesc& access, AtomicOp op)
+ : LInstructionHelper(classOpcode), access_(access), op_(op) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ }
+
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+ AtomicOp operation() const { return op_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_LIR_x86_h */
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
new file mode 100644
index 0000000000..968b5baf14
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -0,0 +1,840 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Lowering-x86.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/x86/Assembler-x86.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegister(MDefinition* mir) {
+ return useFixed(mir, eax);
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useFixedAtStart(mir, eax);
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useFixed(mir, eax);
+}
+
+LDefinition LIRGeneratorX86::tempByteOpRegister() { return tempFixed(eax); }
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ LDefinition spectreTemp =
+ JitOptions.spectreValueMasking ? temp() : LDefinition::BogusTemp();
+ defineBox(new (alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
+ tempCopy(inner, 0), spectreTemp,
+ inner->type()),
+ box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new (alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* inner = unbox->getOperand(0);
+
+ // An unbox on x86 reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir =
+ new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new (alloc()) LUnbox;
+ bool reusePayloadReg = !JitOptions.spectreValueMasking ||
+ unbox->type() == MIRType::Int32 ||
+ unbox->type() == MIRType::Boolean;
+ if (reusePayloadReg) {
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+ } else {
+ lir->setOperand(0, usePayload(inner, LUse::REGISTER));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ if (reusePayloadReg) {
+ defineReuseInput(lir, unbox, 0);
+ } else {
+ define(lir, unbox);
+ }
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition,
+ LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition,
+ LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void LIRGeneratorX86::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX86::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorX86Shared::visitMulI64.
+ if (constant >= -1 && constant <= 2) {
+ needsTemp = false;
+ }
+ if (constant > 0 && int64_t(1) << shift == constant) {
+ needsTemp = false;
+ }
+ }
+
+ // MulI64 on x86 needs output to be in edx, eax;
+ ins->setInt64Operand(
+ 0, useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+
+ defineInt64Fixed(ins, mir,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useFixed(ins->elements(), esi);
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LUse oldval = useFixed(ins->oldval(), eax);
+ LUse newval = useFixed(ins->newval(), edx);
+ LDefinition temp = tempFixed(ebx);
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
+
+ auto* lir = new (alloc())
+ LAtomicExchangeTypedArrayElement64(elements, index, value, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/true);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ LDefinition tempLow = tempFixed(eax);
+
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp, tempLow);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
+}
+
+void LIRGeneratorX86::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, tempFixed(ebx),
+ tempInt64Fixed(Register64(edx, eax)));
+ defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp1 = tempInt64Fixed(Register64(ecx, ebx));
+ LDefinition temp2 = tempFixed(eax);
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, temp1, temp2), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new (alloc())
+ LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new (alloc())
+ LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+// If the base is a constant, and it is zero or its offset is zero, then
+// code generation will fold the values into the access. Allocate the
+// pointer to a register only if that can't happen.
+
+static bool OptimizableConstantAccess(MDefinition* base,
+ const wasm::MemoryAccessDesc& access) {
+ MOZ_ASSERT(base->isConstant());
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (!(base->toConstant()->isInt32(0) || access.offset() == 0)) {
+ return false;
+ }
+ if (access.type() == Scalar::Int64) {
+ // For int64 accesses on 32-bit systems we will need to add another offset
+ // of 4 to access the high part of the value; make sure this does not
+ // overflow the value.
+ int32_t v;
+ if (base->toConstant()->isInt32(0)) {
+ v = access.offset();
+ } else {
+ v = base->toConstant()->toInt32();
+ }
+ return v <= int32_t(INT32_MAX - INT64HIGH_OFFSET);
+ }
+ return true;
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(useRegisterAtStart(ins->instance()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc())
+ LWasmAtomicLoadI64(useRegister(memoryBase), useRegister(base),
+ tempFixed(ecx), tempFixed(ebx));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ LAllocation baseAlloc;
+ if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
+ baseAlloc = ins->type() == MIRType::Int64 ? useRegister(base)
+ : useRegisterAtStart(base);
+ }
+
+ if (ins->type() != MIRType::Int64) {
+ auto* lir =
+ new (alloc()) LWasmLoad(baseAlloc, useRegisterAtStart(memoryBase));
+ define(lir, ins);
+ return;
+ }
+
+ // "AtStart" register usage does not work for the 64-bit case because we
+ // clobber two registers for the result and may need two registers for a
+ // scaled address; we can't guarantee non-interference.
+
+ auto* lir = new (alloc()) LWasmLoadI64(baseAlloc, useRegister(memoryBase));
+
+ Scalar::Type accessType = ins->access().type();
+ if (accessType == Scalar::Int8 || accessType == Scalar::Int16 ||
+ accessType == Scalar::Int32) {
+ // We use cdq to sign-extend the result and cdq demands these registers.
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc())
+ LWasmAtomicStoreI64(useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)),
+ tempFixed(edx), tempFixed(eax));
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc;
+ if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
+ baseAlloc = useRegisterAtStart(base);
+ }
+
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ valueAlloc = useFixed(ins->value(), eax);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+ case Scalar::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+#else
+ MOZ_CRASH("unexpected array type");
+#endif
+ case Scalar::Int64: {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+ auto* lir = new (alloc())
+ LWasmStoreI64(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
+ add(lir, ins);
+ return;
+ }
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ auto* lir = new (alloc())
+ LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegisterAtStart(memoryBase), useRegisterAtStart(base),
+ useInt64FixedAtStart(ins->oldValue(), Register64(edx, eax)),
+ useInt64FixedAtStart(ins->newValue(), Register64(ecx, ebx)));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Register allocation:
+ //
+ // The output may not be used, but eax will be clobbered regardless
+ // so pin the output to eax.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: this must
+ // be ebx, ecx, or edx (eax is taken).
+ //
+ // Bug #1077036 describes some optimization opportunities.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval =
+ byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), oldval, newval, useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ MDefinition* base = ins->base();
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)), ins->access());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(base, value, useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ if (byteSize(ins->access().type()) == 1) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)),
+ ins->access(), ins->operation());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. These can all take an immediate.
+
+ if (!ins->hasUses()) {
+ LAllocation value;
+ if (byteArray && !ins->value()->isConstant()) {
+ value = useFixed(ins->value(), ebx);
+ } else {
+ value = useRegisterOrConstant(ins->value());
+ }
+ LWasmAtomicBinopHeapForEffect* lir =
+ new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value,
+ LDefinition::BogusTemp(),
+ useRegister(memoryBase));
+ lir->setAddrTemp(temp());
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl value, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the
+ // output only, we can still set up with movl; just pin the output
+ // to eax (or ebx / ecx / edx).
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // We want to fix eax as the output. We also need a temp for
+ // the intermediate value.
+ //
+ // For the 8-bit variants the temp must have a byte register.
+ //
+ // There are optimization opportunities:
+ // - better 8-bit register allocation and instruction selection, Bug
+ // #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
+ ins->operation() == AtomicFetchSubOp);
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (byteArray) {
+ value = useFixed(ins->value(), ebx);
+ if (bitOp) {
+ tempDef = tempFixed(ecx);
+ }
+ } else if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ if (bitOp) {
+ tempDef = temp();
+ }
+ } else {
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), value, tempDef,
+ LDefinition::BogusTemp(), useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ if (byteArray || bitOp) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else if (ins->value()->isConstant()) {
+ define(lir, ins);
+ } else {
+ defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
+ }
+}
+
+void LIRGeneratorX86::lowerDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_ASSERT(div->lhs()->type() == div->rhs()->type());
+ MOZ_ASSERT(IsNumberType(div->type()));
+
+ MOZ_ASSERT(div->type() == MIRType::Int64);
+
+ if (div->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorX86::lowerModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MDefinition* lhs = mod->lhs();
+ MDefinition* rhs = mod->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(mod->type()));
+
+ MOZ_ASSERT(mod->type() == MIRType::Int64);
+ MOZ_ASSERT(mod->type() == MIRType::Int64);
+
+ if (mod->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
+ useInt64FixedAtStart(rhs, Register64(ecx, edx)),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
+ useInt64FixedAtStart(rhs, Register64(ecx, edx)),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorX86::lowerUDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
+}
+
+void LIRGeneratorX86::lowerUModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ // Due to lack of registers on x86, we reuse the string register as
+ // temporary. As a result we only need two temporary registers and take a
+ // bogus temporary as fifth argument.
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), LDefinition::BogusTemp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition temp = tempDouble();
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ LDefinition maybeTemp =
+ (ins->isUnsigned() &&
+ ((ins->type() == MIRType::Double && AssemblerX86Shared::HasSSE3()) ||
+ ins->type() == MIRType::Float32))
+ ? temp()
+ : LDefinition::BogusTemp();
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
+ ins);
+}
+
+void LIRGeneratorX86::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ if (ins->isUnsigned()) {
+ defineInt64(new (alloc())
+ LExtendInt32ToInt64(useRegisterAtStart(ins->input())),
+ ins);
+ } else {
+ LExtendInt32ToInt64* lir =
+ new (alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ }
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ // Here we'll end up using cdq which requires input and output in (edx,eax).
+ LSignExtendInt64* lir = new (alloc()) LSignExtendInt64(
+ useInt64FixedAtStart(ins->input(), Register64(edx, eax)));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+// On x86 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useAny(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
diff --git a/js/src/jit/x86/Lowering-x86.h b/js/src/jit/x86/Lowering-x86.h
new file mode 100644
index 0000000000..b82109981e
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Lowering_x86_h
+#define jit_x86_Lowering_x86_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86 : public LIRGeneratorX86Shared {
+ protected:
+ LIRGeneratorX86(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // It's a trap! On x86, the 1-byte store can only use one of
+ // {al,bl,cl,dl,ah,bh,ch,dh}. That means if the register allocator
+ // gives us one of {edi,esi,ebp,esp}, we're out of luck. (The formatter
+ // will assert on us.) Ideally, we'd just ask the register allocator to
+ // give us one of {al,bl,cl,dl}. For now, just useFixed(al).
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() { return LDefinition::BogusTemp(); }
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+
+ void lowerPhi(MPhi* phi);
+
+ public:
+ static bool allowTypedElementHoleCheck() { return true; }
+};
+
+typedef LIRGeneratorX86 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Lowering_x86_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86-inl.h b/js/src/jit/x86/MacroAssembler-x86-inl.h
new file mode 100644
index 0000000000..66050cc1b5
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -0,0 +1,1386 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_inl_h
+#define jit_x86_MacroAssembler_x86_inl_h
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movl(src.low, dest.low);
+ movl(src.high, dest.high);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ ScratchDoubleScope scratch(*this);
+
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.low);
+ vpextrd(1, src, dest.high);
+ } else {
+ vmovd(src, dest.low);
+ moveDouble(src, scratch);
+ vpsrldq(Imm32(4), scratch, scratch);
+ vmovd(scratch, dest.high);
+ }
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src.low, dest);
+ vpinsrd(1, src.high, dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(*this);
+ vmovd(src.low, dest);
+ vmovd(src.high, fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ if (src.low != dest) {
+ movl(src.low, dest);
+ }
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ movl(src, dest.low);
+ }
+ movl(Imm32(0), dest.high);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move8SignExtend(src, dest.low);
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move16SignExtend(src, dest.low);
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ movl(src, dest.low);
+ }
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ movl(src, dest);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ movl(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical functions
+
+void MacroAssembler::notPtr(Register reg) { notl(reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { andl(src, dest); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { andl(imm, dest); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != int32_t(0xFFFFFFFF)) {
+ andl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != int32_t(0xFFFFFFFF)) {
+ andl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != 0) {
+ orl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != 0) {
+ orl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != 0) {
+ xorl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != 0) {
+ xorl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { orl(src, dest); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { orl(imm, dest); }
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ andl(src.low, dest.low);
+ andl(src.high, dest.high);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ orl(src.low, dest.low);
+ orl(src.high, dest.high);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ xorl(src.low, dest.low);
+ xorl(src.high, dest.high);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { xorl(src, dest); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorl(imm, dest); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ bswapl(reg.low);
+ bswapl(reg.high);
+ xchgl(reg.low, reg.high);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) { addl(src, dest); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { addl(imm, dest); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ addl(Imm32(imm.value), dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ addl(Operand(src), dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addl(src.low, dest.low);
+ adcl(src.high, dest.high);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ addl(imm, dest.low);
+ adcl(Imm32(0), dest.high);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ if (imm.low().value == 0) {
+ addl(imm.hi(), dest.high);
+ return;
+ }
+ addl(imm.low(), dest.low);
+ adcl(imm.hi(), dest.high);
+}
+
+void MacroAssembler::addConstantDouble(double d, FloatRegister dest) {
+ Double* dbl = getDouble(d);
+ if (!dbl) {
+ return;
+ }
+ masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ moveStackPtrTo(dest);
+ addlWithPatch(Imm32(0), dest);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ patchAddl(offset, -imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) { subl(src, dest); }
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ subl(src, Operand(dest));
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) { subl(imm, dest); }
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ subl(Operand(addr), dest);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ subl(src.low, dest.low);
+ sbbl(src.high, dest.high);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ if (imm.low().value == 0) {
+ subl(imm.hi(), dest.high);
+ return;
+ }
+ subl(imm.low(), dest.low);
+ sbbl(imm.hi(), dest.high);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ // Preserve edx:eax, unless they're the destination register.
+ if (edx != dest) {
+ push(edx);
+ }
+ if (eax != dest) {
+ push(eax);
+ }
+
+ if (src != eax) {
+ // Compute edx:eax := eax ∗ src
+ movl(imm, eax);
+ mull(src);
+ } else {
+ // Compute edx:eax := eax ∗ edx
+ movl(imm, edx);
+ mull(edx);
+ }
+
+ // Move the high word from edx into |dest|.
+ if (edx != dest) {
+ movl(edx, dest);
+ }
+
+ // Restore edx:eax.
+ if (eax != dest) {
+ pop(eax);
+ }
+ if (edx != dest) {
+ pop(edx);
+ }
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ imull(rhs, srcDest);
+}
+
+// Note: this function clobbers eax and edx.
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest.low != eax && dest.low != edx);
+ MOZ_ASSERT(dest.high != eax && dest.high != edx);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ imull(edx, dest.high);
+
+ // edx:eax = LOW(dest) * LOW(imm);
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ movl(dest.low, eax);
+ mull(edx);
+
+ // HIGH(dest) += edx;
+ addl(edx, dest.high);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5) {
+ leal(Operand(dest.low, dest.low, TimesFour), edx);
+ } else {
+ MOZ_CRASH("Unsupported imm");
+ }
+ addl(edx, dest.high);
+
+ // LOW(dest) = eax;
+ movl(eax, dest.low);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(temp != edx && temp != eax);
+
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(imm.low(), dest.high); // (2)
+ imull(imm.hi(), temp); // (3)
+ addl(dest.high, temp);
+ movl(imm.low(), dest.high);
+ mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
+ // (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(src != Register64(edx, eax) && src != Register64(eax, edx));
+
+ // Make sure the rhs.high isn't the dest.high register anymore.
+ // This saves us from doing other register moves.
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(src.low, dest.high); // (2)
+ imull(src.high, temp); // (3)
+ addl(dest.high, temp);
+ movl(src.low, dest.high);
+ mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
+ // (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ movl(imm, temp);
+ vmulsd(Operand(temp, 0), dest, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ addl(Imm32(1), Operand(dest));
+ Label noOverflow;
+ j(NonZero, &noOverflow);
+ addl(Imm32(1), Operand(dest.offset(4)));
+ bind(&noOverflow);
+}
+
+void MacroAssembler::neg64(Register64 reg) {
+ negl(reg.low);
+ adcl(Imm32(0), reg.high);
+ negl(reg.high);
+}
+
+void MacroAssembler::negPtr(Register reg) { negl(reg); }
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shll(imm, dest);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shlxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shll_cl(srcDest);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shldl(imm, dest.low, dest.high);
+ shll(imm, dest.low);
+ return;
+ }
+
+ mov(dest.low, dest.high);
+ shll(Imm32(imm.value & 0x1f), dest.high);
+ xorl(dest.low, dest.low);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shldl_cl(srcDest.low, srcDest.high);
+ shll_cl(srcDest.low);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.low, srcDest.high);
+ xorl(srcDest.low, srcDest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shrl(imm, dest);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shrxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shrl_cl(srcDest);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ shrl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ shrl(Imm32(imm.value & 0x1f), dest.low);
+ xorl(dest.high, dest.high);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ shrl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ xorl(srcDest.high, srcDest.high);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ sarl(imm, dest);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ sarl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ sarl(Imm32(imm.value & 0x1f), dest.low);
+ sarl(Imm32(0x1f), dest.high);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ sarl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ sarl(Imm32(0x1f), srcDest.high);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shldl_cl(dest.low, dest.high);
+ shldl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shrdl_cl(dest.low, dest.high);
+ shrdl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shldl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shldl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20)) {
+ xchgl(dest.high, dest.low);
+ }
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shrdl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shrdl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20)) {
+ xchgl(dest.high, dest.low);
+ }
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasLZCNT()) {
+ Label nonzero, zero;
+
+ testl(src.high, src.high);
+ j(Assembler::Zero, &zero);
+
+ lzcntl(src.high, dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ lzcntl(src.low, dest);
+ addl(Imm32(32), dest);
+
+ bind(&nonzero);
+ return;
+ }
+
+ // Because |dest| may be equal to |src.low|, we rely on BSR not modifying its
+ // output when the input is zero. AMD ISA documents BSR not modifying the
+ // output and current Intel CPUs follow AMD.
+
+ Label nonzero, zero;
+
+ bsrl(src.high, dest);
+ j(Assembler::Zero, &zero);
+ orl(Imm32(32), dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ bsrl(src.low, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x7F), dest);
+
+ bind(&nonzero);
+ xorl(Imm32(0x3F), dest);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasBMI1()) {
+ Label nonzero, zero;
+
+ testl(src.low, src.low);
+ j(Assembler::Zero, &zero);
+
+ tzcntl(src.low, dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ tzcntl(src.high, dest);
+ addl(Imm32(32), dest);
+
+ bind(&nonzero);
+ return;
+ }
+
+ // Because |dest| may be equal to |src.low|, we rely on BSF not modifying its
+ // output when the input is zero. AMD ISA documents BSF not modifying the
+ // output and current Intel CPUs follow AMD.
+
+ Label done, nonzero;
+
+ bsfl(src.low, dest);
+ j(Assembler::NonZero, &done);
+ bsfl(src.high, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(64), dest);
+ jump(&done);
+
+ bind(&nonzero);
+ orl(Imm32(32), dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
+ // The tmp register is only needed if there is no native POPCNT.
+
+ MOZ_ASSERT(src.low != tmp && src.high != tmp);
+ MOZ_ASSERT(dest.low != tmp && dest.high != tmp);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ addl(dest.high, dest.low);
+ xorl(dest.high, dest.high);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ Label success, done;
+
+ branch64(cond, lhs, rhs, &success);
+ move32(Imm32(0), dest);
+ jump(&done);
+ bind(&success);
+ move32(Imm32(1), dest);
+ bind(&done);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, val.low());
+ j(cond3, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, rhs.low);
+ j(cond3, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
+ val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, rhs.low, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, rhs.low, label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), rhs.high,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+ }
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
+ Register dest, Label* fail) {
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ vcvttss2si(src, dest);
+
+ // vcvttss2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
+ Label* fail) {
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ // TODO: X64 supports supports integers up till 64bits. Here we only support
+ // 32bits, before failing. Implementing this for x86 might give a x86 kraken
+ // win.
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ vcvttsd2si(src, dest);
+
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
+ Label* label) {
+ add64(imm, dest);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ if (cond == Assembler::Zero || cond == Assembler::NonZero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ movl(lhs.low, temp);
+ orl(lhs.high, temp);
+ branchTestPtr(cond, temp, temp, label);
+ } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
+ branchTest32(cond, lhs.high, rhs.high, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ test32(value.payloadReg(), value.payloadReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notMagic;
+ if (cond == Assembler::Equal) {
+ branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
+ } else {
+ branchTestMagic(Assembler::NotEqual, valaddr, label);
+ }
+
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+ bind(&notMagic);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notSameValue;
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), &notSameValue);
+ } else {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), label);
+ }
+
+ branch32(cond, ToPayload(lhs), rhs.payloadReg(), label);
+ bind(&notSameValue);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ jmp(Operand(addr));
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Operand& length,
+ Register maybeScratch,
+ Label* failure) {
+ Label failurePopValue;
+ bool pushedValue = false;
+ if (JitOptions.spectreIndexMasking) {
+ if (maybeScratch == InvalidReg) {
+ push(Imm32(0));
+ pushedValue = true;
+ } else {
+ move32(Imm32(0), maybeScratch);
+ }
+ }
+
+ cmp32(index, length);
+ j(Assembler::AboveOrEqual, pushedValue ? &failurePopValue : failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ if (maybeScratch == InvalidReg) {
+ Label done;
+ cmovCCl(Assembler::AboveOrEqual, Operand(StackPointer, 0), index);
+ lea(Operand(StackPointer, sizeof(void*)), StackPointer);
+ jump(&done);
+
+ bind(&failurePopValue);
+ lea(Operand(StackPointer, sizeof(void*)), StackPointer);
+ jump(failure);
+
+ bind(&done);
+ } else {
+ cmovCCl(Assembler::AboveOrEqual, maybeScratch, index);
+ }
+ }
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+// ========================================================================
+// SIMD
+
+void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
+ Register64 dest) {
+ if (lane == 0) {
+ vmovd(src, dest.low);
+ } else {
+ vpextrd(2 * lane, src, dest.low);
+ }
+ vpextrd(2 * lane + 1, src, dest.high);
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
+ FloatRegister lhsDest) {
+ vpinsrd(2 * lane, rhs.low, lhsDest, lhsDest);
+ vpinsrd(2 * lane + 1, rhs.high, lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
+ Register64 rhs, FloatRegister dest) {
+ vpinsrd(2 * lane, rhs.low, lhs, dest);
+ vpinsrd(2 * lane + 1, rhs.high, dest, dest);
+}
+
+void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
+ vmovd(src.low, dest);
+ vpinsrd(1, src.high, dest, dest);
+ vpunpcklqdq(dest, dest, dest);
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle
+ // seperately.
+ load32(HighWord(dest), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ load32(HighWord(dest), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, HighWord(dest));
+
+ bind(&done);
+}
+
+void MacroAssembler::truncateDoubleToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle
+ // seperately.
+ load32(HighWord(dest), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ load32(HighWord(dest), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, HighWord(dest));
+
+ bind(&done);
+}
+
+template <typename T>
+void MacroAssemblerX86::fallibleUnboxPtrImpl(const T& src, Register dest,
+ JSValueType type, Label* fail) {
+ switch (type) {
+ case JSVAL_TYPE_OBJECT:
+ asMasm().branchTestObject(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_STRING:
+ asMasm().branchTestString(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ asMasm().branchTestSymbol(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_BIGINT:
+ asMasm().branchTestBigInt(Assembler::NotEqual, src, fail);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ unboxNonDouble(src, dest, type);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// Note: this function clobbers the source register.
+void MacroAssemblerX86::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ // src is [0, 2^32-1]
+ subl(Imm32(0x80000000), src);
+
+ // Now src is [-2^31, 2^31-1] - int range, but not the same value.
+ convertInt32ToDouble(src, dest);
+
+ // dest is now a double with the int range.
+ // correct the double value by adding 0x80000000.
+ asMasm().addConstantDouble(2147483648.0, dest);
+}
+
+// Note: this function clobbers the source register.
+void MacroAssemblerX86::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+}
+
+void MacroAssemblerX86::unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ if (src.payloadReg() != dest.gpr()) {
+ movl(src.payloadReg(), dest.gpr());
+ }
+ }
+}
+
+template <typename T>
+void MacroAssemblerX86::loadInt32OrDouble(const T& src, FloatRegister dest) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(ToPayload(src), dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+template <typename T>
+void MacroAssemblerX86::loadUnboxedValue(const T& src, MIRType type,
+ AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(src, dest.fpu());
+ } else {
+ movl(Operand(src), dest.gpr());
+ }
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void MacroAssemblerX86::ensureDouble(const ValueOperand& source,
+ FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_inl_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
new file mode 100644
index 0000000000..c9d8acbd4d
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -0,0 +1,1829 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Casting.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "util/Memory.h"
+#include "vm/BigIntType.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest) {
+ if (maybeInlineDouble(d, dest)) {
+ return;
+ }
+ Double* dbl = getDouble(d);
+ if (!dbl) {
+ return;
+ }
+ masm.vmovsd_mr(nullptr, dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest) {
+ if (maybeInlineFloat(f, dest)) {
+ return;
+ }
+ Float* flt = getFloat(f);
+ if (!flt) {
+ return;
+ }
+ masm.vmovss_mr(nullptr, dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantSimd128Int(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Int(v, dest)) {
+ return;
+ }
+ SimdData* i4 = getSimdData(v);
+ if (!i4) {
+ return;
+ }
+ masm.vmovdqa_mr(nullptr, dest.encoding());
+ propagateOOM(i4->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Float(v, dest)) {
+ return;
+ }
+ SimdData* f4 = getSimdData(v);
+ if (!f4) {
+ return;
+ }
+ masm.vmovaps_mr(nullptr, dest.encoding());
+ propagateOOM(f4->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::vpPatchOpSimd128(
+ const SimdConstant& v, FloatRegister src, FloatRegister dest,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address, X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ (masm.*op)(nullptr, src.encoding(), dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::vpPatchOpSimd128(
+ const SimdConstant& v, FloatRegister src, FloatRegister dest,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address, X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ size_t patchOffsetFromEnd =
+ (masm.*op)(nullptr, src.encoding(), dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(masm.size() - patchOffsetFromEnd)));
+}
+
+void MacroAssemblerX86::vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddb_mr);
+}
+
+void MacroAssemblerX86::vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddw_mr);
+}
+
+void MacroAssemblerX86::vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddd_mr);
+}
+
+void MacroAssemblerX86::vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddq_mr);
+}
+
+void MacroAssemblerX86::vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubb_mr);
+}
+
+void MacroAssemblerX86::vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubw_mr);
+}
+
+void MacroAssemblerX86::vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubd_mr);
+}
+
+void MacroAssemblerX86::vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubq_mr);
+}
+
+void MacroAssemblerX86::vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmullw_mr);
+}
+
+void MacroAssemblerX86::vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmulld_mr);
+}
+
+void MacroAssemblerX86::vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddsb_mr);
+}
+
+void MacroAssemblerX86::vpaddusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddusb_mr);
+}
+
+void MacroAssemblerX86::vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddsw_mr);
+}
+
+void MacroAssemblerX86::vpadduswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddusw_mr);
+}
+
+void MacroAssemblerX86::vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubsb_mr);
+}
+
+void MacroAssemblerX86::vpsubusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubusb_mr);
+}
+
+void MacroAssemblerX86::vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubsw_mr);
+}
+
+void MacroAssemblerX86::vpsubuswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubusw_mr);
+}
+
+void MacroAssemblerX86::vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsb_mr);
+}
+
+void MacroAssemblerX86::vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminub_mr);
+}
+
+void MacroAssemblerX86::vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsw_mr);
+}
+
+void MacroAssemblerX86::vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminuw_mr);
+}
+
+void MacroAssemblerX86::vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsd_mr);
+}
+
+void MacroAssemblerX86::vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminud_mr);
+}
+
+void MacroAssemblerX86::vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsb_mr);
+}
+
+void MacroAssemblerX86::vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxub_mr);
+}
+
+void MacroAssemblerX86::vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsw_mr);
+}
+
+void MacroAssemblerX86::vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxuw_mr);
+}
+
+void MacroAssemblerX86::vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsd_mr);
+}
+
+void MacroAssemblerX86::vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxud_mr);
+}
+
+void MacroAssemblerX86::vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpand_mr);
+}
+
+void MacroAssemblerX86::vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpxor_mr);
+}
+
+void MacroAssemblerX86::vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpor_mr);
+}
+
+void MacroAssemblerX86::vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vaddps_mr);
+}
+
+void MacroAssemblerX86::vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vaddpd_mr);
+}
+
+void MacroAssemblerX86::vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vsubps_mr);
+}
+
+void MacroAssemblerX86::vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vsubpd_mr);
+}
+
+void MacroAssemblerX86::vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vdivps_mr);
+}
+
+void MacroAssemblerX86::vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vdivpd_mr);
+}
+
+void MacroAssemblerX86::vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vmulps_mr);
+}
+
+void MacroAssemblerX86::vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vmulpd_mr);
+}
+
+void MacroAssemblerX86::vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vandpd_mr);
+}
+
+void MacroAssemblerX86::vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vminpd_mr);
+}
+
+void MacroAssemblerX86::vpacksswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpacksswb_mr);
+}
+
+void MacroAssemblerX86::vpackuswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackuswb_mr);
+}
+
+void MacroAssemblerX86::vpackssdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackssdw_mr);
+}
+
+void MacroAssemblerX86::vpackusdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackusdw_mr);
+}
+
+void MacroAssemblerX86::vpunpckldqSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpunpckldq_mr);
+}
+
+void MacroAssemblerX86::vunpcklpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vunpcklps_mr);
+}
+
+void MacroAssemblerX86::vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpshufb_mr);
+}
+
+void MacroAssemblerX86::vptestSimd128(const SimdConstant& v,
+ FloatRegister lhs) {
+ vpPatchOpSimd128(v, lhs, &X86Encoding::BaseAssemblerX86::vptest_mr);
+}
+
+void MacroAssemblerX86::vpmaddwdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaddwd_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqb_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtb_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqw_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtw_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqd_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtd_mr);
+}
+
+void MacroAssemblerX86::vcmpeqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpeqps_mr);
+}
+
+void MacroAssemblerX86::vcmpneqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpneqps_mr);
+}
+
+void MacroAssemblerX86::vcmpltpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpltps_mr);
+}
+
+void MacroAssemblerX86::vcmplepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpleps_mr);
+}
+
+void MacroAssemblerX86::vcmpgepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpgeps_mr);
+}
+
+void MacroAssemblerX86::vcmpeqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpeqpd_mr);
+}
+
+void MacroAssemblerX86::vcmpneqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpneqpd_mr);
+}
+
+void MacroAssemblerX86::vcmpltpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpltpd_mr);
+}
+
+void MacroAssemblerX86::vcmplepdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmplepd_mr);
+}
+
+void MacroAssemblerX86::vpmaddubswSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaddubsw_mr);
+}
+
+void MacroAssemblerX86::vpmuludqSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmuludq_mr);
+}
+
+void MacroAssemblerX86::finish() {
+ // Last instruction may be an indirect jump so eagerly insert an undefined
+ // instruction byte to prevent processors from decoding data values into
+ // their pipelines. See Intel performance guides.
+ masm.ud2();
+
+ if (!doubles_.empty()) {
+ masm.haltingAlign(sizeof(double));
+ }
+ for (const Double& d : doubles_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : d.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.doubleConstant(d.value);
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+
+ if (!floats_.empty()) {
+ masm.haltingAlign(sizeof(float));
+ }
+ for (const Float& f : floats_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : f.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.floatConstant(f.value);
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty()) {
+ masm.haltingAlign(SimdMemoryAlignment);
+ }
+ for (const SimdData& v : simds_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : v.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.simd128Constant(v.value.bytes());
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+}
+
+void MacroAssemblerX86::handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail) {
+ // Reserve space for exception information.
+ subl(Imm32(sizeof(ResumeFromException)), esp);
+ movl(esp, eax);
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(ecx);
+ asMasm().passABIArg(eax);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ loadPtr(Address(esp, ResumeFromException::offsetOfKind()), eax);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::WasmCatch), &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ jmp(Operand(eax));
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(ecx, edx);
+ loadValue(Address(esp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jmp(Operand(eax));
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(esp, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ movl(ebp, esp);
+ pop(ebp);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(esp, ResumeFromException::offsetOfBailoutInfo()), ecx);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ move32(Imm32(1), ReturnReg);
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ movePtr(ImmPtr((const void*)wasm::FailInstanceReg), InstanceReg);
+ masm.ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ jmp(Operand(eax));
+}
+
+void MacroAssemblerX86::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerX86::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+Assembler::Condition MacroAssemblerX86::testStringTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register string = value.payloadReg();
+ cmp32(Operand(string, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+Assembler::Condition MacroAssemblerX86::testBigIntTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register bi = value.payloadReg();
+ cmp32(Operand(bi, JS::BigInt::offsetOfDigitLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+MacroAssembler& MacroAssemblerX86::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerX86::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where
+ // the Ion frame data for a piece of code is very large. To handle this
+ // special case, for frames over 4k in size we allocate memory on the stack
+ // incrementally, touching it as we go.
+ //
+ // When the amount is quite large, which it can be, we emit an actual loop,
+ // in order to keep the function prologue compact. Compactness is a
+ // requirement for eg Wasm's CodeRange data structure, which can encode only
+ // 8-bit offsets.
+ uint32_t amountLeft = imm32.value;
+ uint32_t fullPages = amountLeft / 4096;
+ if (fullPages <= 8) {
+ while (amountLeft > 4096) {
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subl(Imm32(amountLeft), StackPointer);
+ } else {
+ // Save scratch register.
+ push(eax);
+ amountLeft -= 4;
+ fullPages = amountLeft / 4096;
+
+ Label top;
+ move32(Imm32(fullPages), eax);
+ bind(&top);
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ subl(Imm32(1), eax);
+ j(Assembler::NonZero, &top);
+ amountLeft -= fullPages * 4096;
+ if (amountLeft) {
+ subl(Imm32(amountLeft), StackPointer);
+ }
+
+ // Restore scratch register.
+ movl(Operand(StackPointer, uint32_t(imm32.value) - 4), eax);
+ }
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ movl(esp, scratch);
+ andl(Imm32(~(ABIStackAlignment - 1)), esp);
+ push(scratch);
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ freeStack(stackAdjust);
+
+ // Calls to native functions in wasm pass through a thunk which already
+ // fixes up the return value for us.
+ if (!callFromWasm) {
+ if (result == MoveOp::DOUBLE) {
+ reserveStack(sizeof(double));
+ fstp(Operand(esp, 0));
+ loadDouble(Operand(esp, 0), ReturnDoubleReg);
+ freeStack(sizeof(double));
+ } else if (result == MoveOp::FLOAT32) {
+ reserveStack(sizeof(float));
+ fstp32(Operand(esp, 0));
+ loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
+ freeStack(sizeof(float));
+ }
+ }
+
+ if (dynamicAlignment_) {
+ pop(esp);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ if (reg.gpr() != dest.payloadReg()) {
+ movl(reg.gpr(), dest.payloadReg());
+ }
+ mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ Register s0 = src.typeReg();
+ Register s1 = src.payloadReg();
+ Register d0 = dest.typeReg();
+ Register d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ xchgl(d0, d1);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ movl(s0, d0);
+ }
+ if (s1 != d1) {
+ movl(s1, d1);
+ }
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ movl(Imm32(src.toNunboxTag()), dest.typeReg());
+ if (src.isGCThing()) {
+ movl(ImmGCPtr(src.toGCThing()), dest.payloadReg());
+ } else {
+ movl(Imm32(src.toNunboxPayload()), dest.payloadReg());
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ if (ptr != buffer) {
+ movePtr(ptr, buffer);
+ }
+ andPtr(Imm32(~gc::ChunkMask), buffer);
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ MOZ_ASSERT(ptr != temp);
+ movePtr(ptr, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ loadPtr(address, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void MacroAssembler::branchPtrInNurseryChunkImpl(Condition cond, Register ptr,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ andPtr(Imm32(~gc::ChunkMask), ptr);
+ branchPtr(InvertCondition(cond), Address(ptr, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, address,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, ToPayload(address), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ if (rhs.isGCThing()) {
+ cmpPtr(lhs.payloadReg(), ImmGCPtr(rhs.toGCThing()));
+ } else {
+ cmpPtr(lhs.payloadReg(), ImmWord(rhs.toNunboxPayload()));
+ }
+
+ if (cond == Equal) {
+ Label done;
+ j(NotEqual, &done);
+ {
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(Equal, label);
+ }
+ bind(&done);
+ } else {
+ j(NotEqual, label);
+
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(NotEqual, label);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag.
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(dest));
+
+ // Store the payload.
+ if (value.constant()) {
+ storePayload(value.value(), Operand(dest));
+ } else {
+ storePayload(value.reg().typedReg().gpr(), Operand(dest));
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+// wasm specific methods, used in both the wasm baseline compiler and ion.
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, AnyRegister out) {
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP ||
+ srcAddr.kind() == Operand::MEM_SCALE);
+
+ MOZ_ASSERT_IF(
+ access.isZeroExtendSimd128Load(),
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(
+ access.isSplatSimd128Load(),
+ access.type() == Scalar::Uint8 || access.type() == Scalar::Uint16 ||
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(access.isWidenSimd128Load(), access.type() == Scalar::Float64);
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ append(access, size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastb(srcAddr, out.fpu());
+ } else {
+ movzbl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int16:
+ movswl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastw(srcAddr, out.fpu());
+ } else {
+ movzwl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ if (access.isSplatSimd128Load()) {
+ vbroadcastss(srcAddr, out.fpu());
+ } else {
+ // vmovss does the right thing also for access.isZeroExtendSimd128Load()
+ vmovss(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Float64:
+ if (access.isSplatSimd128Load()) {
+ vmovddup(srcAddr, out.fpu());
+ } else if (access.isWidenSimd128Load()) {
+ switch (access.widenSimdOp()) {
+ case wasm::SimdOp::V128Load8x8S:
+ vpmovsxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ vpmovzxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ vpmovsxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ vpmovzxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ vpmovsxdq(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ vpmovzxdq(srcAddr, out.fpu());
+ break;
+ default:
+ MOZ_CRASH("Unexpected widening op for wasmLoad");
+ }
+ } else {
+ // vmovsd does the right thing also for access.isZeroExtendSimd128Load()
+ vmovsd(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Simd128:
+ vmovups(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, Register64 out) {
+ // Atomic i64 load must use lock_cmpxchg8b.
+ MOZ_ASSERT_IF(access.isAtomic(), access.byteSize() <= 4);
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP ||
+ srcAddr.kind() == Operand::MEM_SCALE);
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load()); // Use wasmLoad()
+ MOZ_ASSERT(!access.isSplatSimd128Load()); // Use wasmLoad()
+ MOZ_ASSERT(!access.isWidenSimd128Load()); // Use wasmLoad()
+
+ memoryBarrierBefore(access.sync());
+
+ append(access, size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movsbl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint8:
+ movzbl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int16:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movswl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint16:
+ movzwl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int32:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint32:
+ movl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int64: {
+ if (srcAddr.kind() == Operand::MEM_SCALE) {
+ MOZ_RELEASE_ASSERT(srcAddr.toBaseIndex().base != out.low &&
+ srcAddr.toBaseIndex().index != out.low);
+ }
+ if (srcAddr.kind() == Operand::MEM_REG_DISP) {
+ MOZ_RELEASE_ASSERT(srcAddr.toAddress().base != out.low);
+ }
+
+ movl(LowWord(srcAddr), out.low);
+
+ append(access, size());
+ movl(HighWord(srcAddr), out.high);
+
+ break;
+ }
+ case Scalar::Float32:
+ case Scalar::Float64:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Simd128:
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Operand dstAddr) {
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP ||
+ dstAddr.kind() == Operand::MEM_SCALE);
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ append(access, size());
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ movb(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movw(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ vmovss(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ vmovsd(value.fpu(), dstAddr);
+ break;
+ case Scalar::Simd128:
+ vmovups(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("Should be handled in storeI64.");
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ MOZ_CRASH("unexpected type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Operand dstAddr) {
+ // Atomic i64 store must use lock_cmpxchg8b.
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP ||
+ dstAddr.kind() == Operand::MEM_SCALE);
+
+ // Store the high word first so as to hit guard-page-based OOB checks without
+ // writing partial data.
+ append(access, size());
+ movl(value.high, HighWord(dstAddr));
+
+ append(access, size());
+ movl(value.low, LowWord(dstAddr));
+}
+
+template <typename T>
+static void AtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, const T& address,
+ Register64 temp, Register64 output) {
+ MOZ_ASSERT(temp.low == ebx);
+ MOZ_ASSERT(temp.high == ecx);
+ MOZ_ASSERT(output.high == edx);
+ MOZ_ASSERT(output.low == eax);
+
+ // In the event edx:eax matches what's in memory, ecx:ebx will be
+ // stored. The two pairs must therefore have the same values.
+ masm.movl(edx, ecx);
+ masm.movl(eax, ebx);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, &access, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, &access, mem, temp, output);
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const T& mem, Register64 expected,
+ Register64 replacement, Register64 output) {
+ MOZ_ASSERT(expected == output);
+ MOZ_ASSERT(expected.high == edx);
+ MOZ_ASSERT(expected.low == eax);
+ MOZ_ASSERT(replacement.high == ecx);
+ MOZ_ASSERT(replacement.low == ebx);
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, &access, mem, expected, replacement, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, &access, mem, expected, replacement, output);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value.low == ebx);
+ MOZ_ASSERT(value.high == ecx);
+ MOZ_ASSERT(output.high == edx);
+ MOZ_ASSERT(output.low == eax);
+
+ // edx:eax has garbage initially, and that is the best we can do unless
+ // we can guess with high probability what's in memory.
+
+ MOZ_ASSERT(mem.base != edx && mem.base != eax);
+ if constexpr (std::is_same_v<T, BaseIndex>) {
+ MOZ_ASSERT(mem.index != edx && mem.index != eax);
+ } else {
+ static_assert(std::is_same_v<T, Address>);
+ }
+
+ Label again;
+ masm.bind(&again);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
+ masm.j(MacroAssembler::NonZero, &again);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, &access, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ AtomicExchange64(*this, &access, mem, value, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, AtomicOp op,
+ const Address& value, const T& mem, Register64 temp,
+ Register64 output) {
+ // We don't have enough registers for all the operands on x86, so the rhs
+ // operand is in memory.
+
+#define ATOMIC_OP_BODY(OPERATE) \
+ do { \
+ MOZ_ASSERT(output.low == eax); \
+ MOZ_ASSERT(output.high == edx); \
+ MOZ_ASSERT(temp.low == ebx); \
+ MOZ_ASSERT(temp.high == ecx); \
+ if (access) { \
+ masm.append(*access, masm.size()); \
+ } \
+ masm.load64(mem, output); \
+ Label again; \
+ masm.bind(&again); \
+ masm.move64(output, temp); \
+ masm.OPERATE(Operand(value), temp); \
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem)); \
+ masm.j(MacroAssembler::NonZero, &again); \
+ } while (0)
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ ATOMIC_OP_BODY(add64FromMemory);
+ break;
+ case AtomicFetchSubOp:
+ ATOMIC_OP_BODY(sub64FromMemory);
+ break;
+ case AtomicFetchAndOp:
+ ATOMIC_OP_BODY(and64FromMemory);
+ break;
+ case AtomicFetchOrOp:
+ ATOMIC_OP_BODY(or64FromMemory);
+ break;
+ case AtomicFetchXorOp:
+ ATOMIC_OP_BODY(xor64FromMemory);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+#undef ATOMIC_OP_BODY
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, const Address& value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, const Address& value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+ vcvttsd2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(double(int32_t(0x80000000)), fpscratch);
+ addDouble(input, fpscratch);
+ vcvttsd2si(fpscratch, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+ vcvttss2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(float(int32_t(0x80000000)), fpscratch);
+ addFloat32(input, fpscratch);
+ vcvttss2si(fpscratch, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label ok;
+ Register temp = output.high;
+
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+
+ truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), temp);
+ load64(Address(esp, 0), output);
+
+ cmpl(Imm32(0), Operand(esp, 0));
+ j(Assembler::NotEqual, &ok);
+
+ cmpl(Imm32(1), Operand(esp, 4));
+ j(Assembler::Overflow, oolEntry);
+
+ bind(&ok);
+ bind(oolRejoin);
+
+ freeStack(2 * sizeof(int32_t));
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label ok;
+ Register temp = output.high;
+
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+
+ truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), temp);
+ load64(Address(esp, 0), output);
+
+ cmpl(Imm32(0), Operand(esp, 0));
+ j(Assembler::NotEqual, &ok);
+
+ cmpl(Imm32(1), Operand(esp, 4));
+ j(Assembler::Overflow, oolEntry);
+
+ bind(&ok);
+ bind(oolRejoin);
+
+ freeStack(2 * sizeof(int32_t));
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in uint64.
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+ branchDoubleNotInUInt64Range(Address(esp, 0), temp, &fail);
+ size_t stackBeforeBranch = framePushed();
+ jump(&convert);
+
+ bind(&fail);
+ freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ if (isSaturating) {
+ // The OOL path computes the right values.
+ setFramePushed(stackBeforeBranch);
+ } else {
+ // The OOL path just checks the input values.
+ bind(oolRejoin);
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+ }
+
+ // Convert the double/float to uint64.
+ bind(&convert);
+ truncateDoubleToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ freeStack(2 * sizeof(int32_t));
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in uint64.
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+ branchFloat32NotInUInt64Range(Address(esp, 0), temp, &fail);
+ size_t stackBeforeBranch = framePushed();
+ jump(&convert);
+
+ bind(&fail);
+ freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ if (isSaturating) {
+ // The OOL path computes the right values.
+ setFramePushed(stackBeforeBranch);
+ } else {
+ // The OOL path just checks the input values.
+ bind(oolRejoin);
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+ }
+
+ // Convert the float to uint64.
+ bind(&convert);
+ truncateFloat32ToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ freeStack(2 * sizeof(int32_t));
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+void MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicLoad64(*this, nullptr, mem, temp, output);
+}
+
+void MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicLoad64(*this, nullptr, mem, temp, output);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization&, const Address& mem,
+ Register64 value, Register64 temp) {
+ AtomicExchange64(*this, nullptr, mem, value, temp);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization&, const BaseIndex& mem,
+ Register64 value, Register64 temp) {
+ AtomicExchange64(*this, nullptr, mem, value, temp);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const Address& mem, Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, nullptr, mem, expected, replacement, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, nullptr, mem, expected, replacement, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, mem, value, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+ const Address& value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+ const Address& value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
+}
+
+// ========================================================================
+// Convert floating point.
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return HasSSE3(); }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ // SUBPD needs SSE2, HADDPD needs SSE3.
+ if (!HasSSE3()) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ Push(src.high);
+ Push(src.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ branch32(Assembler::NotSigned, src.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ store64(Imm64(mozilla::BitwiseCast<uint64_t>(add_constant)),
+ Address(esp, 0));
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), dest);
+ freeStack(2 * sizeof(intptr_t));
+ return;
+ }
+
+ // Following operation uses entire 128-bit of dest XMM register.
+ // Currently higher 64-bit is free when we have access to lower 64-bit.
+ MOZ_ASSERT(dest.size() == 8);
+ FloatRegister dest128 =
+ FloatRegister(dest.encoding(), FloatRegisters::Simd128);
+
+ // Assume that src is represented as following:
+ // src = 0x HHHHHHHH LLLLLLLL
+
+ {
+ // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
+ // dest = 0x 00000000 00000000 00000000 LLLLLLLL
+ // scratch = 0x 00000000 00000000 00000000 HHHHHHHH
+ ScratchSimd128Scope scratch(*this);
+ vmovd(src.low, dest128);
+ vmovd(src.high, scratch);
+
+ // Unpack and interleave dest and scratch to dest:
+ // dest = 0x 00000000 00000000 HHHHHHHH LLLLLLLL
+ vpunpckldq(scratch, dest128, dest128);
+ }
+
+ // Unpack and interleave dest and a constant C1 to dest:
+ // C1 = 0x 00000000 00000000 45300000 43300000
+ // dest = 0x 45300000 HHHHHHHH 43300000 LLLLLLLL
+ // here, each 64-bit part of dest represents following double:
+ // HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
+ // LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
+ // See convertUInt64ToDouble for the details.
+ static const int32_t CST1[4] = {
+ 0x43300000,
+ 0x45300000,
+ 0x0,
+ 0x0,
+ };
+
+ vpunpckldqSimd128(SimdConstant::CreateX4(CST1), dest128, dest128);
+
+ // Subtract a constant C2 from dest, for each 64-bit part:
+ // C2 = 0x 45300000 00000000 43300000 00000000
+ // here, each 64-bit part of C2 represents following double:
+ // HI(C2) = 0x 1.0000000000000 * 2**84 == 2**84
+ // LO(C2) = 0x 1.0000000000000 * 2**52 == 2**52
+ // after the operation each 64-bit part of dest represents following:
+ // HI(dest) = double(0x HHHHHHHH 00000000)
+ // LO(dest) = double(0x 00000000 LLLLLLLL)
+ static const int32_t CST2[4] = {
+ 0x0,
+ 0x43300000,
+ 0x0,
+ 0x45300000,
+ };
+
+ vsubpdSimd128(SimdConstant::CreateX4(CST2), dest128, dest128);
+
+ // Add HI(dest) and LO(dest) in double and store it into LO(dest),
+ // LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL)
+ // = double(0x HHHHHHHH LLLLLLLL)
+ // = double(src)
+ vhaddpd(dest128, dest128);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 input,
+ FloatRegister output,
+ Register temp) {
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ // Set the FPU precision to 80 bits.
+ reserveStack(2 * sizeof(intptr_t));
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ orl(Imm32(0x300), temp);
+ store32(temp, Operand(esp, sizeof(intptr_t)));
+ fldcw(Operand(esp, sizeof(intptr_t)));
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ branch32(Assembler::NotSigned, input.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ uint64_t add_constant_u64 = mozilla::BitwiseCast<uint64_t>(add_constant);
+ store64(Imm64(add_constant_u64), Address(esp, 0));
+
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp32(Operand(esp, 0));
+ vmovss(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+
+ // Restore FPU precision to the initial value.
+ fldcw(Operand(esp, 0));
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp32(Operand(esp, 0));
+ vmovss(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt32ToDouble(src, dest);
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) { Push(reg); }
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit.low, ok);
+ bind(&notOk);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit, ok);
+ bind(&notOk);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x86/MacroAssembler-x86.h b/js/src/jit/x86/MacroAssembler-x86.h
new file mode 100644
index 0000000000..6a99a44d04
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -0,0 +1,1149 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_h
+#define jit_x86_MacroAssembler_x86_h
+
+#include "jit/JitOptions.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+#include "js/HeapAPI.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ const ValueOperand& v_;
+
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
+ operator Register() { return v_.typeReg(); }
+ void release() {}
+ void reacquire() {}
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerX86 : public MacroAssemblerX86Shared {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ MoveResolver moveResolver_;
+
+ private:
+ Operand payloadOfAfterStackPush(const Address& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer) {
+ return Operand(address.base, address.offset + 4);
+ }
+ return payloadOf(address);
+ }
+ Operand payloadOfAfterStackPush(const BaseIndex& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer) {
+ return Operand(address.base, address.index, address.scale,
+ address.offset + 4);
+ }
+ return payloadOf(address);
+ }
+ Operand payloadOf(const Address& address) {
+ return Operand(address.base, address.offset);
+ }
+ Operand payloadOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset);
+ }
+ Operand tagOf(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ Operand tagOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale,
+ address.offset + 4);
+ }
+
+ void setupABICall(uint32_t args);
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister reg,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ vpPatchOpSimd128(v, reg, reg, op);
+ }
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId));
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister reg,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ vpPatchOpSimd128(v, reg, reg, op);
+ }
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId));
+
+ public:
+ using MacroAssemblerX86Shared::call;
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store16;
+ using MacroAssemblerX86Shared::store32;
+
+ MacroAssemblerX86() {}
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X86-specific interface.
+ /////////////////////////////////////////////////////////////////
+
+ Operand ToPayload(Operand base) { return base; }
+ Address ToPayload(Address base) { return base; }
+ BaseIndex ToPayload(BaseIndex base) { return base; }
+ Operand ToType(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()),
+ base.disp() + sizeof(void*));
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()),
+ Register::FromCode(base.index()), base.scale(),
+ base.disp() + sizeof(void*));
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ Address ToType(Address base) { return ToType(Operand(base)).toAddress(); }
+ BaseIndex ToType(BaseIndex base) {
+ return ToType(Operand(base)).toBaseIndex();
+ }
+
+ template <typename T>
+ void add64FromMemory(const T& address, Register64 dest) {
+ addl(Operand(LowWord(address)), dest.low);
+ adcl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void sub64FromMemory(const T& address, Register64 dest) {
+ subl(Operand(LowWord(address)), dest.low);
+ sbbl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void and64FromMemory(const T& address, Register64 dest) {
+ andl(Operand(LowWord(address)), dest.low);
+ andl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void or64FromMemory(const T& address, Register64 dest) {
+ orl(Operand(LowWord(address)), dest.low);
+ orl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void xor64FromMemory(const T& address, Register64 dest) {
+ xorl(Operand(LowWord(address)), dest.low);
+ xorl(Operand(HighWord(address)), dest.high);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ void storeValue(ValueOperand val, Operand dest) {
+ movl(val.payloadReg(), ToPayload(dest));
+ movl(val.typeReg(), ToType(dest));
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ storeTypeTag(ImmTag(JSVAL_TYPE_TO_TAG(type)), Operand(dest));
+ storePayload(reg, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ storeTypeTag(ImmTag(val.toNunboxTag()), Operand(dest));
+ storePayload(val, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ MOZ_ASSERT(src.base != temp);
+ MOZ_ASSERT(dest.base != temp);
+
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+ void storePrivateValue(Register src, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ store32(src, ToPayload(dest));
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ movl(imm, Operand(ToPayload(dest)));
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ Operand payload = ToPayload(src);
+ Operand type = ToType(src);
+
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory or the index.
+ Register baseReg = Register::FromCode(src.base());
+ Register indexReg = (src.kind() == Operand::MEM_SCALE)
+ ? Register::FromCode(src.index())
+ : InvalidReg;
+
+ // If we have a BaseIndex that uses both result registers, first compute
+ // the address and then load the Value from there.
+ if ((baseReg == val.payloadReg() && indexReg == val.typeReg()) ||
+ (baseReg == val.typeReg() && indexReg == val.payloadReg())) {
+ computeEffectiveAddress(src, val.scratchReg());
+ loadValue(Address(val.scratchReg(), 0), val);
+ return;
+ }
+
+ if (baseReg == val.payloadReg() || indexReg == val.payloadReg()) {
+ MOZ_ASSERT(baseReg != val.typeReg());
+ MOZ_ASSERT(indexReg != val.typeReg());
+
+ movl(type, val.typeReg());
+ movl(payload, val.payloadReg());
+ } else {
+ MOZ_ASSERT(baseReg != val.payloadReg());
+ MOZ_ASSERT(indexReg != val.payloadReg());
+
+ movl(payload, val.payloadReg());
+ movl(type, val.typeReg());
+ }
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg()) {
+ movl(payload, dest.payloadReg());
+ }
+ movl(ImmType(type), dest.typeReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.typeReg());
+ push(val.payloadReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.payloadReg());
+ pop(val.typeReg());
+ }
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ push(reg);
+ }
+ void pushValue(const Address& addr) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void push64(Register64 src) {
+ push(src.high);
+ push(src.low);
+ }
+ void pop64(Register64 dest) {
+ pop(dest.low);
+ pop(dest.high);
+ }
+ void storePayload(const Value& val, Operand dest) {
+ if (val.isGCThing()) {
+ movl(ImmGCPtr(val.toGCThing()), ToPayload(dest));
+ } else {
+ movl(Imm32(val.toNunboxPayload()), ToPayload(dest));
+ }
+ }
+ void storePayload(Register src, Operand dest) { movl(src, ToPayload(dest)); }
+ void storeTypeTag(ImmTag tag, Operand dest) { movl(tag, ToType(dest)); }
+
+ void movePtr(Register src, Register dest) { movl(src, dest); }
+ void movePtr(Register src, const Operand& dest) { movl(src, dest); }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+ }
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testGCThing(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(Operand(ToType(address)), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testInt32(cond, Operand(address));
+ }
+ Condition testObject(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testObject(cond, Operand(address));
+ }
+ Condition testDouble(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testDouble(cond, Operand(address));
+ }
+
+ Condition testUndefined(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testUndefined(Condition cond, const Address& addr) {
+ return testUndefined(cond, Operand(addr));
+ }
+ Condition testNull(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testNull(Condition cond, const Address& addr) {
+ return testNull(cond, Operand(addr));
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ return testUndefined(cond, value.typeReg());
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ return testBoolean(cond, value.typeReg());
+ }
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ return testInt32(cond, value.typeReg());
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ return testDouble(cond, value.typeReg());
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ return testNull(cond, value.typeReg());
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ return testString(cond, value.typeReg());
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ return testSymbol(cond, value.typeReg());
+ }
+ Condition testBigInt(Condition cond, const ValueOperand& value) {
+ return testBigInt(cond, value.typeReg());
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ return testObject(cond, value.typeReg());
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value.typeReg());
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ return testNumber(cond, value.typeReg());
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& value) {
+ return testGCThing(cond, value.typeReg());
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ return testPrimitive(cond, value.typeReg());
+ }
+
+ Condition testUndefined(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testNull(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testString(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testString(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testObject(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testMagic(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) { cmpl(Imm32(rhs.value), lhs); }
+ void cmpPtr(Register lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) { cmpl(rhs, lhs); }
+ void cmpPtr(const Operand& lhs, Imm32 rhs) { cmp32(lhs, rhs); }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ cmp32(lhs, Imm32(rhs.value));
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) { cmpl(rhs, lhs); }
+ void cmpPtr(const Address& lhs, Register rhs) { cmpPtr(Operand(lhs), rhs); }
+ void cmpPtr(const Operand& lhs, Register rhs) { cmp32(lhs, rhs); }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) { cmp32(lhs, rhs); }
+ void testPtr(Register lhs, Register rhs) { test32(lhs, rhs); }
+ void testPtr(Register lhs, Imm32 rhs) { test32(lhs, rhs); }
+ void testPtr(Register lhs, ImmWord rhs) { test32(lhs, Imm32(rhs.value)); }
+ void testPtr(const Operand& lhs, Imm32 rhs) { test32(lhs, rhs); }
+ void testPtr(const Operand& lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ void movePtr(ImmWord imm, Register dest) { movl(Imm32(imm.value), dest); }
+ void movePtr(ImmPtr imm, Register dest) { movl(imm, dest); }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) { mov(imm, dest); }
+ void movePtr(ImmGCPtr imm, Register dest) { movl(imm, dest); }
+ void loadPtr(const Address& address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) { movl(src, dest); }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movl(Operand(src), dest);
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void load64(const Address& address, Register64 dest) {
+ bool highBeforeLow = address.base == dest.low;
+ if (highBeforeLow) {
+ movl(Operand(HighWord(address)), dest.high);
+ movl(Operand(LowWord(address)), dest.low);
+ } else {
+ movl(Operand(LowWord(address)), dest.low);
+ movl(Operand(HighWord(address)), dest.high);
+ }
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ // If you run into this, relax your register allocation constraints.
+ MOZ_RELEASE_ASSERT(
+ !((address.base == dest.low || address.base == dest.high) &&
+ (address.index == dest.low || address.index == dest.high)));
+ bool highBeforeLow = address.base == dest.low || address.index == dest.low;
+ if (highBeforeLow) {
+ movl(Operand(HighWord(address)), dest.high);
+ movl(Operand(LowWord(address)), dest.low);
+ } else {
+ movl(Operand(LowWord(address)), dest.low);
+ movl(Operand(HighWord(address)), dest.high);
+ }
+ }
+ template <typename T>
+ void load64Unaligned(const T& address, Register64 dest) {
+ load64(address, dest);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ movl(Imm32(imm.value), Operand(address));
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ movl(imm, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) { movl(src, dest); }
+ void storePtr(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ movw(src, Operand(address));
+ }
+ template <typename T>
+ void store64(Register64 src, const T& address) {
+ movl(src.low, Operand(LowWord(address)));
+ movl(src.high, Operand(HighWord(address)));
+ }
+ void store64(Imm64 imm, Address address) {
+ movl(imm.low(), Operand(LowWord(address)));
+ movl(imm.hi(), Operand(HighWord(address)));
+ }
+ template <typename S, typename T>
+ void store64Unaligned(const S& src, const T& dest) {
+ store64(src, dest);
+ }
+
+ void setStackArg(Register reg, uint32_t arg) {
+ movl(reg, Operand(esp, arg * sizeof(intptr_t)));
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest,
+ FloatRegister temp) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.payloadReg());
+ vpextrd(1, src, dest.typeReg());
+ } else {
+ vmovd(src, dest.payloadReg());
+ if (src != temp) {
+ moveDouble(src, temp);
+ }
+ vpsrldq(Imm32(4), temp, temp);
+ vmovd(temp, dest.typeReg());
+ }
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ if (src != dest.payloadReg()) {
+ movl(src, dest.payloadReg());
+ }
+ movl(ImmType(type), dest.typeReg());
+ }
+
+ void unboxNonDouble(const ValueOperand& src, Register dest, JSValueType type,
+ Register scratch = InvalidReg) {
+ unboxNonDouble(Operand(src.typeReg()), Operand(src.payloadReg()), dest,
+ type, scratch);
+ }
+ void unboxNonDouble(const Operand& tag, const Operand& payload, Register dest,
+ JSValueType type, Register scratch = InvalidReg) {
+ auto movPayloadToDest = [&]() {
+ if (payload.kind() != Operand::REG || !payload.containsReg(dest)) {
+ movl(payload, dest);
+ }
+ };
+ if (!JitOptions.spectreValueMasking) {
+ movPayloadToDest();
+ return;
+ }
+
+ // Spectre mitigation: We zero the payload if the tag does not match the
+ // expected type and if this is a pointer type.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movPayloadToDest();
+ return;
+ }
+
+ if (!tag.containsReg(dest) && !payload.containsReg(dest)) {
+ // We zero the destination register and move the payload into it if
+ // the tag corresponds to the given type.
+ xorl(dest, dest);
+ cmpl(Imm32(JSVAL_TYPE_TO_TAG(type)), tag);
+ cmovCCl(Condition::Equal, payload, dest);
+ return;
+ }
+
+ if (scratch == InvalidReg || scratch == dest || tag.containsReg(scratch) ||
+ payload.containsReg(scratch)) {
+ // UnboxedLayout::makeConstructorCode calls extractObject with a
+ // scratch register which aliases the tag register, thus we cannot
+ // assert the above condition.
+ scratch = InvalidReg;
+ }
+
+ // The destination register aliases one of the operands. We create a
+ // zero value either in a scratch register or on the stack and use it
+ // to reset the destination register after reading both the tag and the
+ // payload.
+ Operand zero(Address(esp, 0));
+ if (scratch == InvalidReg) {
+ push(Imm32(0));
+ } else {
+ xorl(scratch, scratch);
+ zero = Operand(scratch);
+ }
+ cmpl(Imm32(JSVAL_TYPE_TO_TAG(type)), tag);
+ movPayloadToDest();
+ cmovCCl(Condition::NotEqual, zero, dest);
+ if (scratch == InvalidReg) {
+ addl(Imm32(sizeof(void*)), esp);
+ }
+ }
+ void unboxNonDouble(const Address& src, Register dest, JSValueType type) {
+ unboxNonDouble(tagOf(src), payloadOf(src), dest, type);
+ }
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType type) {
+ unboxNonDouble(tagOf(src), payloadOf(src), dest, type);
+ }
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxString(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxSymbol(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxSymbol(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxBigInt(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ // Due to Spectre mitigation logic (see Value.h), if the value is an Object
+ // then this yields the object; otherwise it yields zero (null), as desired.
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ template <typename T>
+ void unboxDouble(const T& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src.payloadReg(), dest);
+ vpinsrd(1, src.typeReg(), dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(asMasm());
+ vmovd(src.payloadReg(), dest);
+ vmovd(src.typeReg(), fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+ }
+ void unboxDouble(const Operand& payload, const Operand& type,
+ Register scratch, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vpinsrd(1, scratch, dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(asMasm());
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vmovd(scratch, fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+ }
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type);
+
+ // See comment in MacroAssembler-x64.h.
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) { xorl(Imm32(1), val.payloadReg()); }
+
+ template <typename T>
+ void fallibleUnboxPtrImpl(const T& src, Register dest, JSValueType type,
+ Label* fail);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address, Register dest) {
+ unboxObject(address, dest);
+ return dest;
+ }
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_OBJECT, scratch);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_SYMBOL, scratch);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
+ movl(tagOf(address), scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ return value.typeReg();
+ }
+
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+ void vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadduswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubuswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpacksswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackuswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackssdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackusdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpunpckldqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vunpcklpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vptestSimd128(const SimdConstant& v, FloatRegister lhs);
+ void vpmaddwdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpgepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaddubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmuludqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+ Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ inline void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToDouble(Register src, FloatRegister dest);
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToFloat32(Register src, FloatRegister dest);
+
+ void incrementInt32Value(const Address& addr) {
+ addl(Imm32(1), payloadOf(addr));
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ public:
+ // Used from within an Exit frame to handle a pending exception.
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX86 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_h */
diff --git a/js/src/jit/x86/SharedICHelpers-x86-inl.h b/js/src/jit/x86/SharedICHelpers-x86-inl.h
new file mode 100644
index 0000000000..2ab41c287a
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86-inl.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_inl_h
+#define jit_x86_SharedICHelpers_x86_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ // We assume during this that R0 and R1 have been pushed.
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.movl(FramePointer, eax);
+ masm.subl(StackPointer, eax);
+ masm.subl(Imm32(argSize), eax);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(eax, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ICTailCallReg);
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+#ifdef DEBUG
+ // Compute frame size. Because the return address is still on the stack,
+ // this is:
+ //
+ // FramePointer
+ // - StackPointer
+ // - sizeof(return address)
+
+ masm.movl(FramePointer, scratch);
+ masm.subl(StackPointer, scratch);
+ masm.subl(Imm32(sizeof(void*)), scratch); // Return address.
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push the return address that's currently on top of the stack.
+ masm.Push(Operand(StackPointer, 0));
+
+ // Replace the original return address with the frame descriptor.
+ masm.storePtr(ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, sizeof(uintptr_t)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ masm.Push(ICStubReg);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_inl_h */
diff --git a/js/src/jit/x86/SharedICHelpers-x86.h b/js/src/jit/x86/SharedICHelpers-x86.h
new file mode 100644
index 0000000000..07d2ae8a6f
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_h
+#define jit_x86_SharedICHelpers_x86_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from stack top to the top Value inside an IC stub (this is the
+// return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ masm.Pop(ICTailCallReg);
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ masm.Push(ICTailCallReg);
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.ret(); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
+ masm.loadPtr(stubAddr, ICStubReg);
+
+ masm.mov(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // The return address is on top of the stack, followed by the frame
+ // descriptor. Use a pop instruction to overwrite the frame descriptor
+ // with the return address. Note that pop increments the stack pointer
+ // before computing the address.
+ masm.Pop(Operand(StackPointer, 0));
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ masm.guardedCallPreBarrier(addr, type);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_h */
diff --git a/js/src/jit/x86/SharedICRegisters-x86.h b/js/src/jit/x86/SharedICRegisters-x86.h
new file mode 100644
index 0000000000..44edb5288f
--- /dev/null
+++ b/js/src/jit/x86/SharedICRegisters-x86.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICRegisters_x86_h
+#define jit_x86_SharedICRegisters_x86_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2
+static constexpr ValueOperand R0(ecx, edx);
+static constexpr ValueOperand R1(eax, ebx);
+static constexpr ValueOperand R2(esi, edi);
+
+// ICTailCallReg and ICStubReg reuse
+// registers from R2.
+static constexpr Register ICTailCallReg = esi;
+static constexpr Register ICStubReg = edi;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+static constexpr FloatRegister FloatReg2 = xmm2;
+static constexpr FloatRegister FloatReg3 = xmm3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICRegisters_x86_h */
diff --git a/js/src/jit/x86/Trampoline-x86.cpp b/js/src/jit/x86/Trampoline-x86.cpp
new file mode 100644
index 0000000000..da459cdd3a
--- /dev/null
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -0,0 +1,796 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "jit/x86/SharedICHelpers-x86.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using mozilla::IsPowerOfTwo;
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+enum EnterJitEbpArgumentOffset {
+ ARG_JITCODE = 2 * sizeof(void*),
+ ARG_ARGC = 3 * sizeof(void*),
+ ARG_ARGV = 4 * sizeof(void*),
+ ARG_STACKFRAME = 5 * sizeof(void*),
+ ARG_CALLEETOKEN = 6 * sizeof(void*),
+ ARG_SCOPECHAIN = 7 * sizeof(void*),
+ ARG_STACKVALUES = 8 * sizeof(void*),
+ ARG_RESULT = 9 * sizeof(void*)
+};
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard cdecl
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ masm.assertStackAlignment(ABIStackAlignment,
+ -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(ebp);
+ masm.movl(esp, ebp);
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.push(ebx);
+ masm.push(esi);
+ masm.push(edi);
+
+ // Load the number of values to be copied (argc) into eax
+ masm.loadPtr(Address(ebp, ARG_ARGC), eax);
+
+ // If we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.loadPtr(Address(ebp, ARG_CALLEETOKEN), edx);
+ masm.branchTest32(Assembler::Zero, edx,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.addl(Imm32(1), eax);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // eax <- 8*numValues, eax is now the offset betwen argv and the last value.
+ masm.shll(Imm32(3), eax);
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.movl(esp, ecx);
+ masm.subl(eax, ecx);
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // ecx = ecx & 15, holds alignment.
+ masm.andl(Imm32(JitStackAlignment - 1), ecx);
+ masm.subl(ecx, esp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // ebx = argv --argv pointer is in ebp + 16
+ masm.loadPtr(Address(ebp, ARG_ARGV), ebx);
+
+ // eax = argv[8(argc)] --eax now points one value past the last argument
+ masm.addl(ebx, eax);
+
+ // while (eax > ebx) --while still looping through arguments
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmp32(eax, ebx);
+ masm.j(Assembler::BelowOrEqual, &footer);
+
+ // eax -= 8 --move to previous argument
+ masm.subl(Imm32(8), eax);
+
+ // Push what eax points to on stack, a Value is 2 words
+ masm.push(Operand(eax, 4));
+ masm.push(Operand(eax, 0));
+
+ masm.jmp(&header);
+ masm.bind(&footer);
+ }
+
+ // Load the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.mov(Operand(ebp, ARG_RESULT), eax);
+ masm.unboxInt32(Address(eax, 0x0), eax);
+
+ // Push the callee token.
+ masm.push(Operand(ebp, ARG_CALLEETOKEN));
+
+ // Load the InterpreterFrame address into the OsrFrameReg.
+ // This address is also used for setting the constructing bit on all paths.
+ masm.loadPtr(Address(ebp, ARG_STACKFRAME), OsrFrameReg);
+
+ // Push the descriptor.
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, eax, eax);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(ebp));
+ regs.take(OsrFrameReg);
+ regs.take(ReturnReg);
+
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_STACKVALUES), numStackValues);
+
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
+
+ // Push return address.
+ masm.mov(&returnLabel, scratch);
+ masm.push(scratch);
+
+ // Frame prologue.
+ masm.push(ebp);
+ masm.mov(esp, ebp);
+
+ // Reserve frame.
+ masm.subPtr(Imm32(BaselineFrame::Size()), esp);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.touchFrameValues(numStackValues, scratch, framePtrScratch);
+ masm.mov(esp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, esp);
+
+ // Enter exit frame.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(Imm32(0)); // Fake return address.
+ masm.push(FramePointer);
+ // No GC things to mark on the stack, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.push(jitcode);
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.pop(jitcode);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), esp);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(ebp, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: frame epilogue, load error value, discard return address and return.
+ masm.bind(&error);
+ masm.mov(ebp, esp);
+ masm.pop(ebp);
+ masm.addPtr(Imm32(sizeof(uintptr_t)), esp); // Return address.
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ masm.loadPtr(Address(ebp, ARG_SCOPECHAIN), R1.scratchReg());
+ }
+
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ /***************************************************************
+ Call passed-in code, get return value and fill in the
+ passed in return value pointer
+ ***************************************************************/
+ masm.call(Address(ebp, ARG_JITCODE));
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Restore the stack pointer so the stack looks like this:
+ // +20 ... arguments ...
+ // +16 <return>
+ // +12 ebp <- %ebp pointing here.
+ // +8 ebx
+ // +4 esi
+ // +0 edi <- %esp pointing here.
+ masm.lea(Operand(ebp, -int32_t(3 * sizeof(void*))), esp);
+
+ // Store the return value.
+ masm.loadPtr(Address(ebp, ARG_RESULT), eax);
+ masm.storeValue(JSReturnOperand, Operand(eax, 0));
+
+ /**************************************************************
+ Return stack and registers to correct state
+ **************************************************************/
+
+ // Restore non-volatile registers
+ masm.pop(edi);
+ masm.pop(esi);
+ masm.pop(ebx);
+
+ // Restore old stack frame pointer
+ masm.pop(ebp);
+ masm.ret();
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+// Push AllRegs in a way that is compatible with RegisterDump, regardless of
+// what PushRegsInMask might do to reduce the set size.
+static void DumpAllRegs(MacroAssembler& masm) {
+#ifdef ENABLE_WASM_SIMD
+ masm.PushRegsInMask(AllRegs);
+#else
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
+ ++iter) {
+ masm.Push(*iter);
+ }
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
+ ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+#endif
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // We do the minimum amount of work in assembly and shunt the rest
+ // off to InvalidationBailout. Assembly does:
+ //
+ // - Push the machine state onto the stack.
+ // - Call the InvalidationBailout routine with the stack pointer.
+ // - Now that the frame has been bailed out, convert the invalidated
+ // frame into an exit frame.
+ // - Do the normal check-return-code-and-thunk-to-the-interpreter dance.
+
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ masm.movl(esp, eax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(edx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- esp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.movl(esp, FramePointer); // Save %esp.
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, esi);
+
+ // Load the number of |undefined|s to push into %ecx.
+ masm.loadPtr(Address(ebp, RectifierFrameLayout::offsetOfCalleeToken()), eax);
+ masm.mov(eax, ecx);
+ masm.andl(Imm32(CalleeTokenMask), ecx);
+ masm.loadFunctionArgCount(ecx, ecx);
+
+ // The frame pointer and its padding are pushed on the stack.
+ // Including |this|, there are (|nformals| + 1) arguments to push to the
+ // stack. Then we push a JitFrameLayout. We compute the padding expressed
+ // in the number of extra |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ ecx);
+
+ // Account for newTarget, if necessary.
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count an extra push");
+ masm.mov(eax, edx);
+ masm.andl(Imm32(CalleeToken_FunctionConstructing), edx);
+ masm.addl(edx, ecx);
+
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), ecx);
+ masm.subl(esi, ecx);
+ masm.subl(Imm32(1), ecx); // For |this|.
+
+ // Copy the number of actual arguments into edx.
+ masm.mov(esi, edx);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(ebx, edi));
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ // '-- #esi ---'
+ //
+ // Rectifier frame:
+ // [ebp'] <- ebp [padding] <- esp [undef] [undef] [arg2] [arg1] [this]
+ // '--- #ecx ----' '-- #esi ---'
+ //
+ // [ [argc] [callee] [descr] [raddr] ]
+
+ // Push undefined.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(ebx); // type(undefined);
+ masm.push(edi); // payload(undefined);
+ masm.subl(Imm32(1), ecx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument.
+ BaseIndex b(FramePointer, esi, TimesEight, sizeof(RectifierFrameLayout));
+ masm.lea(Operand(b), ecx);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addl(Imm32(1), esi);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(ecx, sizeof(Value) / 2));
+ masm.push(Operand(ecx, 0x0));
+ masm.subl(Imm32(sizeof(Value)), ecx);
+ masm.subl(Imm32(1), esi);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.mov(eax, ebx);
+ masm.branchTest32(Assembler::Zero, ebx,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ BaseValueIndex src(FramePointer, edx,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+
+ masm.andl(Imm32(CalleeTokenMask), ebx);
+ masm.loadFunctionArgCount(ebx, ebx);
+
+ BaseValueIndex dst(esp, ebx, sizeof(Value));
+
+ ValueOperand newTarget(ecx, edi);
+
+ masm.loadValue(src, newTarget);
+ masm.storeValue(newTarget, dst);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Construct JitFrameLayout.
+ masm.push(eax); // callee token
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, edx, edx);
+
+ // Call the target function.
+ masm.andl(Imm32(CalleeTokenMask), eax);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(eax, eax);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(eax);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(eax, ebx, &noBaselineScript);
+ masm.callJitNoProfiler(ebx);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(eax, eax);
+ masm.callJitNoProfiler(eax);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ // The current stack pointer is the first argument to jit::Bailout.
+ masm.movl(esp, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, eax);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(ecx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(ecx); // Get the bailoutInfo outparam.
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set must be a superset of Volatile register set.");
+
+ // The context is the first argument.
+ Register cxreg = regs.takeAny();
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args]
+ // +4 descriptor
+ // +0 returnAddress
+ //
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = regs.takeAny();
+ masm.lea(Operand(esp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.Push(UndefinedValue());
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int32_t));
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movl(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ // We don't pass doubles in float registers on x86, so no need
+ // to check for argPassedInFloatReg.
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, eax, eax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(eax, eax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.Pop(JSReturnOperand);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ masm.Pop(ReturnReg);
+ break;
+
+ case Type_Bool:
+ masm.Pop(ReturnReg);
+ masm.movzbl(ReturnReg, ReturnReg);
+ break;
+
+ case Type_Double:
+ masm.Pop(ReturnDoubleReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(0);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ static_assert(PreBarrierReg == edx);
+ Register temp1 = eax;
+ Register temp2 = ebx;
+ Register temp3 = ecx;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.PushRegsInMask(save);
+
+ masm.movl(ImmPtr(cx->runtime()), ecx);
+
+ masm.setupUnalignedABICall(eax);
+ masm.passABIArg(ecx);
+ masm.passABIArg(edx);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(edx, ecx);
+}